net/ice/base: support MPLS ethertype switch filter
[dpdk.git] / drivers / net / ice / base / ice_switch.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2001-2021 Intel Corporation
3  */
4
5 #include "ice_switch.h"
6 #include "ice_flex_type.h"
7 #include "ice_flow.h"
8
9 #define ICE_ETH_DA_OFFSET               0
10 #define ICE_ETH_ETHTYPE_OFFSET          12
11 #define ICE_ETH_VLAN_TCI_OFFSET         14
12 #define ICE_MAX_VLAN_ID                 0xFFF
13 #define ICE_IPV4_NVGRE_PROTO_ID         0x002F
14 #define ICE_PPP_IPV6_PROTO_ID           0x0057
15 #define ICE_IPV6_ETHER_ID               0x86DD
16 #define ICE_TCP_PROTO_ID                0x06
17 #define ICE_GTPU_PROFILE                24
18 #define ICE_ETH_P_8021Q                 0x8100
19 #define ICE_MPLS_ETHER_ID               0x8847
20
21 /* Dummy ethernet header needed in the ice_aqc_sw_rules_elem
22  * struct to configure any switch filter rules.
23  * {DA (6 bytes), SA(6 bytes),
24  * Ether type (2 bytes for header without VLAN tag) OR
25  * VLAN tag (4 bytes for header with VLAN tag) }
26  *
27  * Word on Hardcoded values
28  * byte 0 = 0x2: to identify it as locally administered DA MAC
29  * byte 6 = 0x2: to identify it as locally administered SA MAC
30  * byte 12 = 0x81 & byte 13 = 0x00:
31  *      In case of VLAN filter first two bytes defines ether type (0x8100)
32  *      and remaining two bytes are placeholder for programming a given VLAN ID
33  *      In case of Ether type filter it is treated as header without VLAN tag
34  *      and byte 12 and 13 is used to program a given Ether type instead
35  */
36 static const u8 dummy_eth_header[DUMMY_ETH_HDR_LEN] = { 0x2, 0, 0, 0, 0, 0,
37                                                         0x2, 0, 0, 0, 0, 0,
38                                                         0x81, 0, 0, 0};
39
40 struct ice_dummy_pkt_offsets {
41         enum ice_protocol_type type;
42         u16 offset; /* ICE_PROTOCOL_LAST indicates end of list */
43 };
44
45 static const struct ice_dummy_pkt_offsets dummy_gre_tcp_packet_offsets[] = {
46         { ICE_MAC_OFOS,         0 },
47         { ICE_ETYPE_OL,         12 },
48         { ICE_IPV4_OFOS,        14 },
49         { ICE_NVGRE,            34 },
50         { ICE_MAC_IL,           42 },
51         { ICE_IPV4_IL,          56 },
52         { ICE_TCP_IL,           76 },
53         { ICE_PROTOCOL_LAST,    0 },
54 };
55
56 static const u8 dummy_gre_tcp_packet[] = {
57         0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
58         0x00, 0x00, 0x00, 0x00,
59         0x00, 0x00, 0x00, 0x00,
60
61         0x08, 0x00,             /* ICE_ETYPE_OL 12 */
62
63         0x45, 0x00, 0x00, 0x3E, /* ICE_IPV4_OFOS 14 */
64         0x00, 0x00, 0x00, 0x00,
65         0x00, 0x2F, 0x00, 0x00,
66         0x00, 0x00, 0x00, 0x00,
67         0x00, 0x00, 0x00, 0x00,
68
69         0x80, 0x00, 0x65, 0x58, /* ICE_NVGRE 34 */
70         0x00, 0x00, 0x00, 0x00,
71
72         0x00, 0x00, 0x00, 0x00, /* ICE_MAC_IL 42 */
73         0x00, 0x00, 0x00, 0x00,
74         0x00, 0x00, 0x00, 0x00,
75         0x08, 0x00,
76
77         0x45, 0x00, 0x00, 0x14, /* ICE_IPV4_IL 56 */
78         0x00, 0x00, 0x00, 0x00,
79         0x00, 0x06, 0x00, 0x00,
80         0x00, 0x00, 0x00, 0x00,
81         0x00, 0x00, 0x00, 0x00,
82
83         0x00, 0x00, 0x00, 0x00, /* ICE_TCP_IL 76 */
84         0x00, 0x00, 0x00, 0x00,
85         0x00, 0x00, 0x00, 0x00,
86         0x50, 0x02, 0x20, 0x00,
87         0x00, 0x00, 0x00, 0x00
88 };
89
90 static const struct ice_dummy_pkt_offsets dummy_gre_udp_packet_offsets[] = {
91         { ICE_MAC_OFOS,         0 },
92         { ICE_ETYPE_OL,         12 },
93         { ICE_IPV4_OFOS,        14 },
94         { ICE_NVGRE,            34 },
95         { ICE_MAC_IL,           42 },
96         { ICE_IPV4_IL,          56 },
97         { ICE_UDP_ILOS,         76 },
98         { ICE_PROTOCOL_LAST,    0 },
99 };
100
101 static const u8 dummy_gre_udp_packet[] = {
102         0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
103         0x00, 0x00, 0x00, 0x00,
104         0x00, 0x00, 0x00, 0x00,
105
106         0x08, 0x00,             /* ICE_ETYPE_OL 12 */
107
108         0x45, 0x00, 0x00, 0x3E, /* ICE_IPV4_OFOS 14 */
109         0x00, 0x00, 0x00, 0x00,
110         0x00, 0x2F, 0x00, 0x00,
111         0x00, 0x00, 0x00, 0x00,
112         0x00, 0x00, 0x00, 0x00,
113
114         0x80, 0x00, 0x65, 0x58, /* ICE_NVGRE 34 */
115         0x00, 0x00, 0x00, 0x00,
116
117         0x00, 0x00, 0x00, 0x00, /* ICE_MAC_IL 42 */
118         0x00, 0x00, 0x00, 0x00,
119         0x00, 0x00, 0x00, 0x00,
120         0x08, 0x00,
121
122         0x45, 0x00, 0x00, 0x14, /* ICE_IPV4_IL 56 */
123         0x00, 0x00, 0x00, 0x00,
124         0x00, 0x11, 0x00, 0x00,
125         0x00, 0x00, 0x00, 0x00,
126         0x00, 0x00, 0x00, 0x00,
127
128         0x00, 0x00, 0x00, 0x00, /* ICE_UDP_ILOS 76 */
129         0x00, 0x08, 0x00, 0x00,
130 };
131
132 static const struct ice_dummy_pkt_offsets dummy_udp_tun_tcp_packet_offsets[] = {
133         { ICE_MAC_OFOS,         0 },
134         { ICE_ETYPE_OL,         12 },
135         { ICE_IPV4_OFOS,        14 },
136         { ICE_UDP_OF,           34 },
137         { ICE_VXLAN,            42 },
138         { ICE_GENEVE,           42 },
139         { ICE_VXLAN_GPE,        42 },
140         { ICE_MAC_IL,           50 },
141         { ICE_IPV4_IL,          64 },
142         { ICE_TCP_IL,           84 },
143         { ICE_PROTOCOL_LAST,    0 },
144 };
145
146 static const u8 dummy_udp_tun_tcp_packet[] = {
147         0x00, 0x00, 0x00, 0x00,  /* ICE_MAC_OFOS 0 */
148         0x00, 0x00, 0x00, 0x00,
149         0x00, 0x00, 0x00, 0x00,
150
151         0x08, 0x00,             /* ICE_ETYPE_OL 12 */
152
153         0x45, 0x00, 0x00, 0x5a, /* ICE_IPV4_OFOS 14 */
154         0x00, 0x01, 0x00, 0x00,
155         0x40, 0x11, 0x00, 0x00,
156         0x00, 0x00, 0x00, 0x00,
157         0x00, 0x00, 0x00, 0x00,
158
159         0x00, 0x00, 0x12, 0xb5, /* ICE_UDP_OF 34 */
160         0x00, 0x46, 0x00, 0x00,
161
162         0x00, 0x00, 0x65, 0x58, /* ICE_VXLAN 42 */
163         0x00, 0x00, 0x00, 0x00,
164
165         0x00, 0x00, 0x00, 0x00, /* ICE_MAC_IL 50 */
166         0x00, 0x00, 0x00, 0x00,
167         0x00, 0x00, 0x00, 0x00,
168         0x08, 0x00,
169
170         0x45, 0x00, 0x00, 0x28, /* ICE_IPV4_IL 64 */
171         0x00, 0x01, 0x00, 0x00,
172         0x40, 0x06, 0x00, 0x00,
173         0x00, 0x00, 0x00, 0x00,
174         0x00, 0x00, 0x00, 0x00,
175
176         0x00, 0x00, 0x00, 0x00, /* ICE_TCP_IL 84 */
177         0x00, 0x00, 0x00, 0x00,
178         0x00, 0x00, 0x00, 0x00,
179         0x50, 0x02, 0x20, 0x00,
180         0x00, 0x00, 0x00, 0x00
181 };
182
183 static const struct ice_dummy_pkt_offsets dummy_udp_tun_udp_packet_offsets[] = {
184         { ICE_MAC_OFOS,         0 },
185         { ICE_ETYPE_OL,         12 },
186         { ICE_IPV4_OFOS,        14 },
187         { ICE_UDP_OF,           34 },
188         { ICE_VXLAN,            42 },
189         { ICE_GENEVE,           42 },
190         { ICE_VXLAN_GPE,        42 },
191         { ICE_MAC_IL,           50 },
192         { ICE_IPV4_IL,          64 },
193         { ICE_UDP_ILOS,         84 },
194         { ICE_PROTOCOL_LAST,    0 },
195 };
196
197 static const u8 dummy_udp_tun_udp_packet[] = {
198         0x00, 0x00, 0x00, 0x00,  /* ICE_MAC_OFOS 0 */
199         0x00, 0x00, 0x00, 0x00,
200         0x00, 0x00, 0x00, 0x00,
201
202         0x08, 0x00,             /* ICE_ETYPE_OL 12 */
203
204         0x45, 0x00, 0x00, 0x4e, /* ICE_IPV4_OFOS 14 */
205         0x00, 0x01, 0x00, 0x00,
206         0x00, 0x11, 0x00, 0x00,
207         0x00, 0x00, 0x00, 0x00,
208         0x00, 0x00, 0x00, 0x00,
209
210         0x00, 0x00, 0x12, 0xb5, /* ICE_UDP_OF 34 */
211         0x00, 0x3a, 0x00, 0x00,
212
213         0x00, 0x00, 0x65, 0x58, /* ICE_VXLAN 42 */
214         0x00, 0x00, 0x00, 0x00,
215
216         0x00, 0x00, 0x00, 0x00, /* ICE_MAC_IL 50 */
217         0x00, 0x00, 0x00, 0x00,
218         0x00, 0x00, 0x00, 0x00,
219         0x08, 0x00,
220
221         0x45, 0x00, 0x00, 0x1c, /* ICE_IPV4_IL 64 */
222         0x00, 0x01, 0x00, 0x00,
223         0x00, 0x11, 0x00, 0x00,
224         0x00, 0x00, 0x00, 0x00,
225         0x00, 0x00, 0x00, 0x00,
226
227         0x00, 0x00, 0x00, 0x00, /* ICE_UDP_ILOS 84 */
228         0x00, 0x08, 0x00, 0x00,
229 };
230
231 /* offset info for MAC + IPv4 + UDP dummy packet */
232 static const struct ice_dummy_pkt_offsets dummy_udp_packet_offsets[] = {
233         { ICE_MAC_OFOS,         0 },
234         { ICE_ETYPE_OL,         12 },
235         { ICE_IPV4_OFOS,        14 },
236         { ICE_UDP_ILOS,         34 },
237         { ICE_PROTOCOL_LAST,    0 },
238 };
239
240 /* Dummy packet for MAC + IPv4 + UDP */
241 static const u8 dummy_udp_packet[] = {
242         0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
243         0x00, 0x00, 0x00, 0x00,
244         0x00, 0x00, 0x00, 0x00,
245
246         0x08, 0x00,             /* ICE_ETYPE_OL 12 */
247
248         0x45, 0x00, 0x00, 0x1c, /* ICE_IPV4_OFOS 14 */
249         0x00, 0x01, 0x00, 0x00,
250         0x00, 0x11, 0x00, 0x00,
251         0x00, 0x00, 0x00, 0x00,
252         0x00, 0x00, 0x00, 0x00,
253
254         0x00, 0x00, 0x00, 0x00, /* ICE_UDP_ILOS 34 */
255         0x00, 0x08, 0x00, 0x00,
256
257         0x00, 0x00,     /* 2 bytes for 4 byte alignment */
258 };
259
260 /* offset info for MAC + VLAN + IPv4 + UDP dummy packet */
261 static const struct ice_dummy_pkt_offsets dummy_vlan_udp_packet_offsets[] = {
262         { ICE_MAC_OFOS,         0 },
263         { ICE_ETYPE_OL,         12 },
264         { ICE_VLAN_OFOS,        14 },
265         { ICE_IPV4_OFOS,        18 },
266         { ICE_UDP_ILOS,         38 },
267         { ICE_PROTOCOL_LAST,    0 },
268 };
269
270 /* C-tag (801.1Q), IPv4:UDP dummy packet */
271 static const u8 dummy_vlan_udp_packet[] = {
272         0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
273         0x00, 0x00, 0x00, 0x00,
274         0x00, 0x00, 0x00, 0x00,
275
276         0x81, 0x00,             /* ICE_ETYPE_OL 12 */
277
278         0x00, 0x00, 0x08, 0x00, /* ICE_VLAN_OFOS 14 */
279
280         0x45, 0x00, 0x00, 0x1c, /* ICE_IPV4_OFOS 18 */
281         0x00, 0x01, 0x00, 0x00,
282         0x00, 0x11, 0x00, 0x00,
283         0x00, 0x00, 0x00, 0x00,
284         0x00, 0x00, 0x00, 0x00,
285
286         0x00, 0x00, 0x00, 0x00, /* ICE_UDP_ILOS 38 */
287         0x00, 0x08, 0x00, 0x00,
288
289         0x00, 0x00,     /* 2 bytes for 4 byte alignment */
290 };
291
292 /* offset info for MAC + IPv4 + TCP dummy packet */
293 static const struct ice_dummy_pkt_offsets dummy_tcp_packet_offsets[] = {
294         { ICE_MAC_OFOS,         0 },
295         { ICE_ETYPE_OL,         12 },
296         { ICE_IPV4_OFOS,        14 },
297         { ICE_TCP_IL,           34 },
298         { ICE_PROTOCOL_LAST,    0 },
299 };
300
301 /* Dummy packet for MAC + IPv4 + TCP */
302 static const u8 dummy_tcp_packet[] = {
303         0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
304         0x00, 0x00, 0x00, 0x00,
305         0x00, 0x00, 0x00, 0x00,
306
307         0x08, 0x00,             /* ICE_ETYPE_OL 12 */
308
309         0x45, 0x00, 0x00, 0x28, /* ICE_IPV4_OFOS 14 */
310         0x00, 0x01, 0x00, 0x00,
311         0x00, 0x06, 0x00, 0x00,
312         0x00, 0x00, 0x00, 0x00,
313         0x00, 0x00, 0x00, 0x00,
314
315         0x00, 0x00, 0x00, 0x00, /* ICE_TCP_IL 34 */
316         0x00, 0x00, 0x00, 0x00,
317         0x00, 0x00, 0x00, 0x00,
318         0x50, 0x00, 0x00, 0x00,
319         0x00, 0x00, 0x00, 0x00,
320
321         0x00, 0x00,     /* 2 bytes for 4 byte alignment */
322 };
323
324 /* offset info for MAC + MPLS dummy packet */
325 static const struct ice_dummy_pkt_offsets dummy_mpls_packet_offsets[] = {
326         { ICE_MAC_OFOS,         0 },
327         { ICE_ETYPE_OL,         12 },
328         { ICE_PROTOCOL_LAST,    0 },
329 };
330
331 /* Dummy packet for MAC + MPLS */
332 static const u8 dummy_mpls_packet[] = {
333         0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
334         0x00, 0x00, 0x00, 0x00,
335         0x00, 0x00, 0x00, 0x00,
336
337         0x88, 0x47,             /* ICE_ETYPE_OL 12 */
338         0x00, 0x00, 0x01, 0x00,
339
340         0x00, 0x00,     /* 2 bytes for 4 byte alignment */
341 };
342
343 /* offset info for MAC + VLAN (C-tag, 802.1Q) + IPv4 + TCP dummy packet */
344 static const struct ice_dummy_pkt_offsets dummy_vlan_tcp_packet_offsets[] = {
345         { ICE_MAC_OFOS,         0 },
346         { ICE_ETYPE_OL,         12 },
347         { ICE_VLAN_OFOS,        14 },
348         { ICE_IPV4_OFOS,        18 },
349         { ICE_TCP_IL,           38 },
350         { ICE_PROTOCOL_LAST,    0 },
351 };
352
353 /* C-tag (801.1Q), IPv4:TCP dummy packet */
354 static const u8 dummy_vlan_tcp_packet[] = {
355         0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
356         0x00, 0x00, 0x00, 0x00,
357         0x00, 0x00, 0x00, 0x00,
358
359         0x81, 0x00,             /* ICE_ETYPE_OL 12 */
360
361         0x00, 0x00, 0x08, 0x00, /* ICE_VLAN_OFOS 14 */
362
363         0x45, 0x00, 0x00, 0x28, /* ICE_IPV4_OFOS 18 */
364         0x00, 0x01, 0x00, 0x00,
365         0x00, 0x06, 0x00, 0x00,
366         0x00, 0x00, 0x00, 0x00,
367         0x00, 0x00, 0x00, 0x00,
368
369         0x00, 0x00, 0x00, 0x00, /* ICE_TCP_IL 38 */
370         0x00, 0x00, 0x00, 0x00,
371         0x00, 0x00, 0x00, 0x00,
372         0x50, 0x00, 0x00, 0x00,
373         0x00, 0x00, 0x00, 0x00,
374
375         0x00, 0x00,     /* 2 bytes for 4 byte alignment */
376 };
377
378 static const struct ice_dummy_pkt_offsets dummy_tcp_ipv6_packet_offsets[] = {
379         { ICE_MAC_OFOS,         0 },
380         { ICE_ETYPE_OL,         12 },
381         { ICE_IPV6_OFOS,        14 },
382         { ICE_TCP_IL,           54 },
383         { ICE_PROTOCOL_LAST,    0 },
384 };
385
386 static const u8 dummy_tcp_ipv6_packet[] = {
387         0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
388         0x00, 0x00, 0x00, 0x00,
389         0x00, 0x00, 0x00, 0x00,
390
391         0x86, 0xDD,             /* ICE_ETYPE_OL 12 */
392
393         0x60, 0x00, 0x00, 0x00, /* ICE_IPV6_OFOS 40 */
394         0x00, 0x14, 0x06, 0x00, /* Next header is TCP */
395         0x00, 0x00, 0x00, 0x00,
396         0x00, 0x00, 0x00, 0x00,
397         0x00, 0x00, 0x00, 0x00,
398         0x00, 0x00, 0x00, 0x00,
399         0x00, 0x00, 0x00, 0x00,
400         0x00, 0x00, 0x00, 0x00,
401         0x00, 0x00, 0x00, 0x00,
402         0x00, 0x00, 0x00, 0x00,
403
404         0x00, 0x00, 0x00, 0x00, /* ICE_TCP_IL 54 */
405         0x00, 0x00, 0x00, 0x00,
406         0x00, 0x00, 0x00, 0x00,
407         0x50, 0x00, 0x00, 0x00,
408         0x00, 0x00, 0x00, 0x00,
409
410         0x00, 0x00, /* 2 bytes for 4 byte alignment */
411 };
412
413 /* C-tag (802.1Q): IPv6 + TCP */
414 static const struct ice_dummy_pkt_offsets
415 dummy_vlan_tcp_ipv6_packet_offsets[] = {
416         { ICE_MAC_OFOS,         0 },
417         { ICE_ETYPE_OL,         12 },
418         { ICE_VLAN_OFOS,        14 },
419         { ICE_IPV6_OFOS,        18 },
420         { ICE_TCP_IL,           58 },
421         { ICE_PROTOCOL_LAST,    0 },
422 };
423
424 /* C-tag (802.1Q), IPv6 + TCP dummy packet */
425 static const u8 dummy_vlan_tcp_ipv6_packet[] = {
426         0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
427         0x00, 0x00, 0x00, 0x00,
428         0x00, 0x00, 0x00, 0x00,
429
430         0x81, 0x00,             /* ICE_ETYPE_OL 12 */
431
432         0x00, 0x00, 0x86, 0xDD, /* ICE_VLAN_OFOS 14 */
433
434         0x60, 0x00, 0x00, 0x00, /* ICE_IPV6_OFOS 18 */
435         0x00, 0x14, 0x06, 0x00, /* Next header is TCP */
436         0x00, 0x00, 0x00, 0x00,
437         0x00, 0x00, 0x00, 0x00,
438         0x00, 0x00, 0x00, 0x00,
439         0x00, 0x00, 0x00, 0x00,
440         0x00, 0x00, 0x00, 0x00,
441         0x00, 0x00, 0x00, 0x00,
442         0x00, 0x00, 0x00, 0x00,
443         0x00, 0x00, 0x00, 0x00,
444
445         0x00, 0x00, 0x00, 0x00, /* ICE_TCP_IL 58 */
446         0x00, 0x00, 0x00, 0x00,
447         0x00, 0x00, 0x00, 0x00,
448         0x50, 0x00, 0x00, 0x00,
449         0x00, 0x00, 0x00, 0x00,
450
451         0x00, 0x00, /* 2 bytes for 4 byte alignment */
452 };
453
454 /* IPv6 + UDP */
455 static const struct ice_dummy_pkt_offsets dummy_udp_ipv6_packet_offsets[] = {
456         { ICE_MAC_OFOS,         0 },
457         { ICE_ETYPE_OL,         12 },
458         { ICE_IPV6_OFOS,        14 },
459         { ICE_UDP_ILOS,         54 },
460         { ICE_PROTOCOL_LAST,    0 },
461 };
462
463 /* IPv6 + UDP dummy packet */
464 static const u8 dummy_udp_ipv6_packet[] = {
465         0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
466         0x00, 0x00, 0x00, 0x00,
467         0x00, 0x00, 0x00, 0x00,
468
469         0x86, 0xDD,             /* ICE_ETYPE_OL 12 */
470
471         0x60, 0x00, 0x00, 0x00, /* ICE_IPV6_OFOS 40 */
472         0x00, 0x10, 0x11, 0x00, /* Next header UDP */
473         0x00, 0x00, 0x00, 0x00,
474         0x00, 0x00, 0x00, 0x00,
475         0x00, 0x00, 0x00, 0x00,
476         0x00, 0x00, 0x00, 0x00,
477         0x00, 0x00, 0x00, 0x00,
478         0x00, 0x00, 0x00, 0x00,
479         0x00, 0x00, 0x00, 0x00,
480         0x00, 0x00, 0x00, 0x00,
481
482         0x00, 0x00, 0x00, 0x00, /* ICE_UDP_ILOS 54 */
483         0x00, 0x10, 0x00, 0x00,
484
485         0x00, 0x00, 0x00, 0x00, /* needed for ESP packets */
486         0x00, 0x00, 0x00, 0x00,
487
488         0x00, 0x00, /* 2 bytes for 4 byte alignment */
489 };
490
491 /* C-tag (802.1Q): IPv6 + UDP */
492 static const struct ice_dummy_pkt_offsets
493 dummy_vlan_udp_ipv6_packet_offsets[] = {
494         { ICE_MAC_OFOS,         0 },
495         { ICE_ETYPE_OL,         12 },
496         { ICE_VLAN_OFOS,        14 },
497         { ICE_IPV6_OFOS,        18 },
498         { ICE_UDP_ILOS,         58 },
499         { ICE_PROTOCOL_LAST,    0 },
500 };
501
502 /* C-tag (802.1Q), IPv6 + UDP dummy packet */
503 static const u8 dummy_vlan_udp_ipv6_packet[] = {
504         0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
505         0x00, 0x00, 0x00, 0x00,
506         0x00, 0x00, 0x00, 0x00,
507
508         0x81, 0x00,             /* ICE_ETYPE_OL 12 */
509
510         0x00, 0x00, 0x86, 0xDD, /* ICE_VLAN_OFOS 14 */
511
512         0x60, 0x00, 0x00, 0x00, /* ICE_IPV6_OFOS 18 */
513         0x00, 0x08, 0x11, 0x00, /* Next header UDP */
514         0x00, 0x00, 0x00, 0x00,
515         0x00, 0x00, 0x00, 0x00,
516         0x00, 0x00, 0x00, 0x00,
517         0x00, 0x00, 0x00, 0x00,
518         0x00, 0x00, 0x00, 0x00,
519         0x00, 0x00, 0x00, 0x00,
520         0x00, 0x00, 0x00, 0x00,
521         0x00, 0x00, 0x00, 0x00,
522
523         0x00, 0x00, 0x00, 0x00, /* ICE_UDP_ILOS 58 */
524         0x00, 0x08, 0x00, 0x00,
525
526         0x00, 0x00, /* 2 bytes for 4 byte alignment */
527 };
528
529 /* Outer IPv4 + Outer UDP + GTP + Inner IPv4 + Inner TCP */
530 static const struct ice_dummy_pkt_offsets dummy_ipv4_gtpu_ipv4_tcp_packet_offsets[] = {
531         { ICE_MAC_OFOS,         0 },
532         { ICE_IPV4_OFOS,        14 },
533         { ICE_UDP_OF,           34 },
534         { ICE_GTP,              42 },
535         { ICE_IPV4_IL,          62 },
536         { ICE_TCP_IL,           82 },
537         { ICE_PROTOCOL_LAST,    0 },
538 };
539
540 static const u8 dummy_ipv4_gtpu_ipv4_tcp_packet[] = {
541         0x00, 0x00, 0x00, 0x00, /* Ethernet 0 */
542         0x00, 0x00, 0x00, 0x00,
543         0x00, 0x00, 0x00, 0x00,
544         0x08, 0x00,
545
546         0x45, 0x00, 0x00, 0x58, /* IP 14 */
547         0x00, 0x00, 0x00, 0x00,
548         0x00, 0x11, 0x00, 0x00,
549         0x00, 0x00, 0x00, 0x00,
550         0x00, 0x00, 0x00, 0x00,
551
552         0x00, 0x00, 0x08, 0x68, /* UDP 34 */
553         0x00, 0x44, 0x00, 0x00,
554
555         0x34, 0xff, 0x00, 0x34, /* GTP-U Header 42 */
556         0x00, 0x00, 0x00, 0x00,
557         0x00, 0x00, 0x00, 0x85,
558
559         0x02, 0x00, 0x00, 0x00, /* GTP_PDUSession_ExtensionHeader 54 */
560         0x00, 0x00, 0x00, 0x00,
561
562         0x45, 0x00, 0x00, 0x28, /* IP 62 */
563         0x00, 0x00, 0x00, 0x00,
564         0x00, 0x06, 0x00, 0x00,
565         0x00, 0x00, 0x00, 0x00,
566         0x00, 0x00, 0x00, 0x00,
567
568         0x00, 0x00, 0x00, 0x00, /* TCP 82 */
569         0x00, 0x00, 0x00, 0x00,
570         0x00, 0x00, 0x00, 0x00,
571         0x50, 0x00, 0x00, 0x00,
572         0x00, 0x00, 0x00, 0x00,
573
574         0x00, 0x00, /* 2 bytes for 4 byte alignment */
575 };
576
577 /* Outer IPv4 + Outer UDP + GTP + Inner IPv4 + Inner UDP */
578 static const struct ice_dummy_pkt_offsets dummy_ipv4_gtpu_ipv4_udp_packet_offsets[] = {
579         { ICE_MAC_OFOS,         0 },
580         { ICE_IPV4_OFOS,        14 },
581         { ICE_UDP_OF,           34 },
582         { ICE_GTP,              42 },
583         { ICE_IPV4_IL,          62 },
584         { ICE_UDP_ILOS,         82 },
585         { ICE_PROTOCOL_LAST,    0 },
586 };
587
588 static const u8 dummy_ipv4_gtpu_ipv4_udp_packet[] = {
589         0x00, 0x00, 0x00, 0x00, /* Ethernet 0 */
590         0x00, 0x00, 0x00, 0x00,
591         0x00, 0x00, 0x00, 0x00,
592         0x08, 0x00,
593
594         0x45, 0x00, 0x00, 0x4c, /* IP 14 */
595         0x00, 0x00, 0x00, 0x00,
596         0x00, 0x11, 0x00, 0x00,
597         0x00, 0x00, 0x00, 0x00,
598         0x00, 0x00, 0x00, 0x00,
599
600         0x00, 0x00, 0x08, 0x68, /* UDP 34 */
601         0x00, 0x38, 0x00, 0x00,
602
603         0x34, 0xff, 0x00, 0x28, /* GTP-U Header 42 */
604         0x00, 0x00, 0x00, 0x00,
605         0x00, 0x00, 0x00, 0x85,
606
607         0x02, 0x00, 0x00, 0x00, /* GTP_PDUSession_ExtensionHeader 54 */
608         0x00, 0x00, 0x00, 0x00,
609
610         0x45, 0x00, 0x00, 0x1c, /* IP 62 */
611         0x00, 0x00, 0x00, 0x00,
612         0x00, 0x11, 0x00, 0x00,
613         0x00, 0x00, 0x00, 0x00,
614         0x00, 0x00, 0x00, 0x00,
615
616         0x00, 0x00, 0x00, 0x00, /* UDP 82 */
617         0x00, 0x08, 0x00, 0x00,
618
619         0x00, 0x00, /* 2 bytes for 4 byte alignment */
620 };
621
622 /* Outer IPv6 + Outer UDP + GTP + Inner IPv4 + Inner TCP */
623 static const struct ice_dummy_pkt_offsets dummy_ipv4_gtpu_ipv6_tcp_packet_offsets[] = {
624         { ICE_MAC_OFOS,         0 },
625         { ICE_IPV4_OFOS,        14 },
626         { ICE_UDP_OF,           34 },
627         { ICE_GTP,              42 },
628         { ICE_IPV6_IL,          62 },
629         { ICE_TCP_IL,           102 },
630         { ICE_PROTOCOL_LAST,    0 },
631 };
632
633 static const u8 dummy_ipv4_gtpu_ipv6_tcp_packet[] = {
634         0x00, 0x00, 0x00, 0x00, /* Ethernet 0 */
635         0x00, 0x00, 0x00, 0x00,
636         0x00, 0x00, 0x00, 0x00,
637         0x08, 0x00,
638
639         0x45, 0x00, 0x00, 0x6c, /* IP 14 */
640         0x00, 0x00, 0x00, 0x00,
641         0x00, 0x11, 0x00, 0x00,
642         0x00, 0x00, 0x00, 0x00,
643         0x00, 0x00, 0x00, 0x00,
644
645         0x00, 0x00, 0x08, 0x68, /* UDP 34 */
646         0x00, 0x58, 0x00, 0x00,
647
648         0x34, 0xff, 0x00, 0x48, /* GTP-U Header 42 */
649         0x00, 0x00, 0x00, 0x00,
650         0x00, 0x00, 0x00, 0x85,
651
652         0x02, 0x00, 0x00, 0x00, /* GTP_PDUSession_ExtensionHeader 54 */
653         0x00, 0x00, 0x00, 0x00,
654
655         0x60, 0x00, 0x00, 0x00, /* IPv6 62 */
656         0x00, 0x14, 0x06, 0x00,
657         0x00, 0x00, 0x00, 0x00,
658         0x00, 0x00, 0x00, 0x00,
659         0x00, 0x00, 0x00, 0x00,
660         0x00, 0x00, 0x00, 0x00,
661         0x00, 0x00, 0x00, 0x00,
662         0x00, 0x00, 0x00, 0x00,
663         0x00, 0x00, 0x00, 0x00,
664         0x00, 0x00, 0x00, 0x00,
665
666         0x00, 0x00, 0x00, 0x00, /* TCP 102 */
667         0x00, 0x00, 0x00, 0x00,
668         0x00, 0x00, 0x00, 0x00,
669         0x50, 0x00, 0x00, 0x00,
670         0x00, 0x00, 0x00, 0x00,
671
672         0x00, 0x00, /* 2 bytes for 4 byte alignment */
673 };
674
675 static const struct ice_dummy_pkt_offsets dummy_ipv4_gtpu_ipv6_udp_packet_offsets[] = {
676         { ICE_MAC_OFOS,         0 },
677         { ICE_IPV4_OFOS,        14 },
678         { ICE_UDP_OF,           34 },
679         { ICE_GTP,              42 },
680         { ICE_IPV6_IL,          62 },
681         { ICE_UDP_ILOS,         102 },
682         { ICE_PROTOCOL_LAST,    0 },
683 };
684
685 static const u8 dummy_ipv4_gtpu_ipv6_udp_packet[] = {
686         0x00, 0x00, 0x00, 0x00, /* Ethernet 0 */
687         0x00, 0x00, 0x00, 0x00,
688         0x00, 0x00, 0x00, 0x00,
689         0x08, 0x00,
690
691         0x45, 0x00, 0x00, 0x60, /* IP 14 */
692         0x00, 0x00, 0x00, 0x00,
693         0x00, 0x11, 0x00, 0x00,
694         0x00, 0x00, 0x00, 0x00,
695         0x00, 0x00, 0x00, 0x00,
696
697         0x00, 0x00, 0x08, 0x68, /* UDP 34 */
698         0x00, 0x4c, 0x00, 0x00,
699
700         0x34, 0xff, 0x00, 0x3c, /* GTP-U Header 42 */
701         0x00, 0x00, 0x00, 0x00,
702         0x00, 0x00, 0x00, 0x85,
703
704         0x02, 0x00, 0x00, 0x00, /* GTP_PDUSession_ExtensionHeader 54 */
705         0x00, 0x00, 0x00, 0x00,
706
707         0x60, 0x00, 0x00, 0x00, /* IPv6 62 */
708         0x00, 0x08, 0x11, 0x00,
709         0x00, 0x00, 0x00, 0x00,
710         0x00, 0x00, 0x00, 0x00,
711         0x00, 0x00, 0x00, 0x00,
712         0x00, 0x00, 0x00, 0x00,
713         0x00, 0x00, 0x00, 0x00,
714         0x00, 0x00, 0x00, 0x00,
715         0x00, 0x00, 0x00, 0x00,
716         0x00, 0x00, 0x00, 0x00,
717
718         0x00, 0x00, 0x00, 0x00, /* UDP 102 */
719         0x00, 0x08, 0x00, 0x00,
720
721         0x00, 0x00, /* 2 bytes for 4 byte alignment */
722 };
723
724 static const struct ice_dummy_pkt_offsets dummy_ipv6_gtpu_ipv4_tcp_packet_offsets[] = {
725         { ICE_MAC_OFOS,         0 },
726         { ICE_IPV6_OFOS,        14 },
727         { ICE_UDP_OF,           54 },
728         { ICE_GTP,              62 },
729         { ICE_IPV4_IL,          82 },
730         { ICE_TCP_IL,           102 },
731         { ICE_PROTOCOL_LAST,    0 },
732 };
733
734 static const u8 dummy_ipv6_gtpu_ipv4_tcp_packet[] = {
735         0x00, 0x00, 0x00, 0x00, /* Ethernet 0 */
736         0x00, 0x00, 0x00, 0x00,
737         0x00, 0x00, 0x00, 0x00,
738         0x86, 0xdd,
739
740         0x60, 0x00, 0x00, 0x00, /* IPv6 14 */
741         0x00, 0x44, 0x11, 0x00,
742         0x00, 0x00, 0x00, 0x00,
743         0x00, 0x00, 0x00, 0x00,
744         0x00, 0x00, 0x00, 0x00,
745         0x00, 0x00, 0x00, 0x00,
746         0x00, 0x00, 0x00, 0x00,
747         0x00, 0x00, 0x00, 0x00,
748         0x00, 0x00, 0x00, 0x00,
749         0x00, 0x00, 0x00, 0x00,
750
751         0x00, 0x00, 0x08, 0x68, /* UDP 54 */
752         0x00, 0x44, 0x00, 0x00,
753
754         0x34, 0xff, 0x00, 0x34, /* GTP-U Header 62 */
755         0x00, 0x00, 0x00, 0x00,
756         0x00, 0x00, 0x00, 0x85,
757
758         0x02, 0x00, 0x00, 0x00, /* GTP_PDUSession_ExtensionHeader 74 */
759         0x00, 0x00, 0x00, 0x00,
760
761         0x45, 0x00, 0x00, 0x28, /* IP 82 */
762         0x00, 0x00, 0x00, 0x00,
763         0x00, 0x06, 0x00, 0x00,
764         0x00, 0x00, 0x00, 0x00,
765         0x00, 0x00, 0x00, 0x00,
766
767         0x00, 0x00, 0x00, 0x00, /* TCP 102 */
768         0x00, 0x00, 0x00, 0x00,
769         0x00, 0x00, 0x00, 0x00,
770         0x50, 0x00, 0x00, 0x00,
771         0x00, 0x00, 0x00, 0x00,
772
773         0x00, 0x00, /* 2 bytes for 4 byte alignment */
774 };
775
776 static const struct ice_dummy_pkt_offsets dummy_ipv6_gtpu_ipv4_udp_packet_offsets[] = {
777         { ICE_MAC_OFOS,         0 },
778         { ICE_IPV6_OFOS,        14 },
779         { ICE_UDP_OF,           54 },
780         { ICE_GTP,              62 },
781         { ICE_IPV4_IL,          82 },
782         { ICE_UDP_ILOS,         102 },
783         { ICE_PROTOCOL_LAST,    0 },
784 };
785
786 static const u8 dummy_ipv6_gtpu_ipv4_udp_packet[] = {
787         0x00, 0x00, 0x00, 0x00, /* Ethernet 0 */
788         0x00, 0x00, 0x00, 0x00,
789         0x00, 0x00, 0x00, 0x00,
790         0x86, 0xdd,
791
792         0x60, 0x00, 0x00, 0x00, /* IPv6 14 */
793         0x00, 0x38, 0x11, 0x00,
794         0x00, 0x00, 0x00, 0x00,
795         0x00, 0x00, 0x00, 0x00,
796         0x00, 0x00, 0x00, 0x00,
797         0x00, 0x00, 0x00, 0x00,
798         0x00, 0x00, 0x00, 0x00,
799         0x00, 0x00, 0x00, 0x00,
800         0x00, 0x00, 0x00, 0x00,
801         0x00, 0x00, 0x00, 0x00,
802
803         0x00, 0x00, 0x08, 0x68, /* UDP 54 */
804         0x00, 0x38, 0x00, 0x00,
805
806         0x34, 0xff, 0x00, 0x28, /* GTP-U Header 62 */
807         0x00, 0x00, 0x00, 0x00,
808         0x00, 0x00, 0x00, 0x85,
809
810         0x02, 0x00, 0x00, 0x00, /* GTP_PDUSession_ExtensionHeader 74 */
811         0x00, 0x00, 0x00, 0x00,
812
813         0x45, 0x00, 0x00, 0x1c, /* IP 82 */
814         0x00, 0x00, 0x00, 0x00,
815         0x00, 0x11, 0x00, 0x00,
816         0x00, 0x00, 0x00, 0x00,
817         0x00, 0x00, 0x00, 0x00,
818
819         0x00, 0x00, 0x00, 0x00, /* UDP 102 */
820         0x00, 0x08, 0x00, 0x00,
821
822         0x00, 0x00, /* 2 bytes for 4 byte alignment */
823 };
824
825 static const struct ice_dummy_pkt_offsets dummy_ipv6_gtpu_ipv6_tcp_packet_offsets[] = {
826         { ICE_MAC_OFOS,         0 },
827         { ICE_IPV6_OFOS,        14 },
828         { ICE_UDP_OF,           54 },
829         { ICE_GTP,              62 },
830         { ICE_IPV6_IL,          82 },
831         { ICE_TCP_IL,           122 },
832         { ICE_PROTOCOL_LAST,    0 },
833 };
834
835 static const u8 dummy_ipv6_gtpu_ipv6_tcp_packet[] = {
836         0x00, 0x00, 0x00, 0x00, /* Ethernet 0 */
837         0x00, 0x00, 0x00, 0x00,
838         0x00, 0x00, 0x00, 0x00,
839         0x86, 0xdd,
840
841         0x60, 0x00, 0x00, 0x00, /* IPv6 14 */
842         0x00, 0x58, 0x11, 0x00,
843         0x00, 0x00, 0x00, 0x00,
844         0x00, 0x00, 0x00, 0x00,
845         0x00, 0x00, 0x00, 0x00,
846         0x00, 0x00, 0x00, 0x00,
847         0x00, 0x00, 0x00, 0x00,
848         0x00, 0x00, 0x00, 0x00,
849         0x00, 0x00, 0x00, 0x00,
850         0x00, 0x00, 0x00, 0x00,
851
852         0x00, 0x00, 0x08, 0x68, /* UDP 54 */
853         0x00, 0x58, 0x00, 0x00,
854
855         0x34, 0xff, 0x00, 0x48, /* GTP-U Header 62 */
856         0x00, 0x00, 0x00, 0x00,
857         0x00, 0x00, 0x00, 0x85,
858
859         0x02, 0x00, 0x00, 0x00, /* GTP_PDUSession_ExtensionHeader 74 */
860         0x00, 0x00, 0x00, 0x00,
861
862         0x60, 0x00, 0x00, 0x00, /* IPv6 82 */
863         0x00, 0x14, 0x06, 0x00,
864         0x00, 0x00, 0x00, 0x00,
865         0x00, 0x00, 0x00, 0x00,
866         0x00, 0x00, 0x00, 0x00,
867         0x00, 0x00, 0x00, 0x00,
868         0x00, 0x00, 0x00, 0x00,
869         0x00, 0x00, 0x00, 0x00,
870         0x00, 0x00, 0x00, 0x00,
871         0x00, 0x00, 0x00, 0x00,
872
873         0x00, 0x00, 0x00, 0x00, /* TCP 122 */
874         0x00, 0x00, 0x00, 0x00,
875         0x00, 0x00, 0x00, 0x00,
876         0x50, 0x00, 0x00, 0x00,
877         0x00, 0x00, 0x00, 0x00,
878
879         0x00, 0x00, /* 2 bytes for 4 byte alignment */
880 };
881
882 static const struct ice_dummy_pkt_offsets dummy_ipv6_gtpu_ipv6_udp_packet_offsets[] = {
883         { ICE_MAC_OFOS,         0 },
884         { ICE_IPV6_OFOS,        14 },
885         { ICE_UDP_OF,           54 },
886         { ICE_GTP,              62 },
887         { ICE_IPV6_IL,          82 },
888         { ICE_UDP_ILOS,         122 },
889         { ICE_PROTOCOL_LAST,    0 },
890 };
891
892 static const u8 dummy_ipv6_gtpu_ipv6_udp_packet[] = {
893         0x00, 0x00, 0x00, 0x00, /* Ethernet 0 */
894         0x00, 0x00, 0x00, 0x00,
895         0x00, 0x00, 0x00, 0x00,
896         0x86, 0xdd,
897
898         0x60, 0x00, 0x00, 0x00, /* IPv6 14 */
899         0x00, 0x4c, 0x11, 0x00,
900         0x00, 0x00, 0x00, 0x00,
901         0x00, 0x00, 0x00, 0x00,
902         0x00, 0x00, 0x00, 0x00,
903         0x00, 0x00, 0x00, 0x00,
904         0x00, 0x00, 0x00, 0x00,
905         0x00, 0x00, 0x00, 0x00,
906         0x00, 0x00, 0x00, 0x00,
907         0x00, 0x00, 0x00, 0x00,
908
909         0x00, 0x00, 0x08, 0x68, /* UDP 54 */
910         0x00, 0x4c, 0x00, 0x00,
911
912         0x34, 0xff, 0x00, 0x3c, /* GTP-U Header 62 */
913         0x00, 0x00, 0x00, 0x00,
914         0x00, 0x00, 0x00, 0x85,
915
916         0x02, 0x00, 0x00, 0x00, /* GTP_PDUSession_ExtensionHeader 74 */
917         0x00, 0x00, 0x00, 0x00,
918
919         0x60, 0x00, 0x00, 0x00, /* IPv6 82 */
920         0x00, 0x08, 0x11, 0x00,
921         0x00, 0x00, 0x00, 0x00,
922         0x00, 0x00, 0x00, 0x00,
923         0x00, 0x00, 0x00, 0x00,
924         0x00, 0x00, 0x00, 0x00,
925         0x00, 0x00, 0x00, 0x00,
926         0x00, 0x00, 0x00, 0x00,
927         0x00, 0x00, 0x00, 0x00,
928         0x00, 0x00, 0x00, 0x00,
929
930         0x00, 0x00, 0x00, 0x00, /* UDP 122 */
931         0x00, 0x08, 0x00, 0x00,
932
933         0x00, 0x00, /* 2 bytes for 4 byte alignment */
934 };
935
936 static const struct ice_dummy_pkt_offsets dummy_ipv4_gtpu_ipv4_packet_offsets[] = {
937         { ICE_MAC_OFOS,         0 },
938         { ICE_IPV4_OFOS,        14 },
939         { ICE_UDP_OF,           34 },
940         { ICE_GTP,              42 },
941         { ICE_IPV4_IL,          62 },
942         { ICE_PROTOCOL_LAST,    0 },
943 };
944
945 static const u8 dummy_ipv4_gtpu_ipv4_packet[] = {
946         0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
947         0x00, 0x00, 0x00, 0x00,
948         0x00, 0x00, 0x00, 0x00,
949         0x08, 0x00,
950
951         0x45, 0x00, 0x00, 0x44, /* ICE_IPV4_OFOS 14 */
952         0x00, 0x00, 0x40, 0x00,
953         0x40, 0x11, 0x00, 0x00,
954         0x00, 0x00, 0x00, 0x00,
955         0x00, 0x00, 0x00, 0x00,
956
957         0x08, 0x68, 0x08, 0x68, /* ICE_UDP_OF 34 */
958         0x00, 0x00, 0x00, 0x00,
959
960         0x34, 0xff, 0x00, 0x28,  /* ICE_GTP 42 */
961         0x00, 0x00, 0x00, 0x00,
962         0x00, 0x00, 0x00, 0x85,
963
964         0x02, 0x00, 0x00, 0x00, /* PDU Session extension header */
965         0x00, 0x00, 0x00, 0x00,
966
967         0x45, 0x00, 0x00, 0x14, /* ICE_IPV4_IL 62 */
968         0x00, 0x00, 0x40, 0x00,
969         0x40, 0x00, 0x00, 0x00,
970         0x00, 0x00, 0x00, 0x00,
971         0x00, 0x00, 0x00, 0x00,
972         0x00, 0x00,
973 };
974
975 static const
976 struct ice_dummy_pkt_offsets dummy_ipv4_gtpu_ipv6_packet_offsets[] = {
977         { ICE_MAC_OFOS,         0 },
978         { ICE_IPV4_OFOS,        14 },
979         { ICE_UDP_OF,           34 },
980         { ICE_GTP,              42 },
981         { ICE_IPV6_IL,          62 },
982         { ICE_PROTOCOL_LAST,    0 },
983 };
984
985 static const u8 dummy_ipv4_gtpu_ipv6_packet[] = {
986         0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
987         0x00, 0x00, 0x00, 0x00,
988         0x00, 0x00, 0x00, 0x00,
989         0x08, 0x00,
990
991         0x45, 0x00, 0x00, 0x58, /* ICE_IPV4_OFOS 14 */
992         0x00, 0x00, 0x40, 0x00,
993         0x40, 0x11, 0x00, 0x00,
994         0x00, 0x00, 0x00, 0x00,
995         0x00, 0x00, 0x00, 0x00,
996
997         0x08, 0x68, 0x08, 0x68, /* ICE_UDP_OF 34 */
998         0x00, 0x00, 0x00, 0x00,
999
1000         0x34, 0xff, 0x00, 0x28,  /* ICE_GTP 42 */
1001         0x00, 0x00, 0x00, 0x00,
1002         0x00, 0x00, 0x00, 0x85,
1003
1004         0x02, 0x00, 0x00, 0x00, /* PDU Session extension header */
1005         0x00, 0x00, 0x00, 0x00,
1006
1007         0x60, 0x00, 0x00, 0x00, /* ICE_IPV6_IL 62 */
1008         0x00, 0x00, 0x3b, 0x00,
1009         0x00, 0x00, 0x00, 0x00,
1010         0x00, 0x00, 0x00, 0x00,
1011         0x00, 0x00, 0x00, 0x00,
1012         0x00, 0x00, 0x00, 0x00,
1013         0x00, 0x00, 0x00, 0x00,
1014         0x00, 0x00, 0x00, 0x00,
1015         0x00, 0x00, 0x00, 0x00,
1016         0x00, 0x00, 0x00, 0x00,
1017
1018         0x00, 0x00,
1019 };
1020
1021 static const
1022 struct ice_dummy_pkt_offsets dummy_ipv6_gtpu_ipv4_packet_offsets[] = {
1023         { ICE_MAC_OFOS,         0 },
1024         { ICE_IPV6_OFOS,        14 },
1025         { ICE_UDP_OF,           54 },
1026         { ICE_GTP,              62 },
1027         { ICE_IPV4_IL,          82 },
1028         { ICE_PROTOCOL_LAST,    0 },
1029 };
1030
1031 static const u8 dummy_ipv6_gtpu_ipv4_packet[] = {
1032         0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
1033         0x00, 0x00, 0x00, 0x00,
1034         0x00, 0x00, 0x00, 0x00,
1035         0x86, 0xdd,
1036
1037         0x60, 0x00, 0x00, 0x00, /* ICE_IPV6_OFOS 14 */
1038         0x00, 0x58, 0x11, 0x00, /* Next header UDP*/
1039         0x00, 0x00, 0x00, 0x00,
1040         0x00, 0x00, 0x00, 0x00,
1041         0x00, 0x00, 0x00, 0x00,
1042         0x00, 0x00, 0x00, 0x00,
1043         0x00, 0x00, 0x00, 0x00,
1044         0x00, 0x00, 0x00, 0x00,
1045         0x00, 0x00, 0x00, 0x00,
1046         0x00, 0x00, 0x00, 0x00,
1047
1048         0x08, 0x68, 0x08, 0x68, /* ICE_UDP_OF 54 */
1049         0x00, 0x00, 0x00, 0x00,
1050
1051         0x34, 0xff, 0x00, 0x28,  /* ICE_GTP 62 */
1052         0x00, 0x00, 0x00, 0x00,
1053         0x00, 0x00, 0x00, 0x85,
1054
1055         0x02, 0x00, 0x00, 0x00, /* PDU Session extension header */
1056         0x00, 0x00, 0x00, 0x00,
1057
1058         0x45, 0x00, 0x00, 0x14, /* ICE_IPV4_IL 82 */
1059         0x00, 0x00, 0x40, 0x00,
1060         0x40, 0x00, 0x00, 0x00,
1061         0x00, 0x00, 0x00, 0x00,
1062         0x00, 0x00, 0x00, 0x00,
1063
1064         0x00, 0x00,
1065 };
1066
1067 static const
1068 struct ice_dummy_pkt_offsets dummy_ipv6_gtpu_ipv6_packet_offsets[] = {
1069         { ICE_MAC_OFOS,         0 },
1070         { ICE_IPV6_OFOS,        14 },
1071         { ICE_UDP_OF,           54 },
1072         { ICE_GTP,              62 },
1073         { ICE_IPV6_IL,          82 },
1074         { ICE_PROTOCOL_LAST,    0 },
1075 };
1076
1077 static const u8 dummy_ipv6_gtpu_ipv6_packet[] = {
1078         0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
1079         0x00, 0x00, 0x00, 0x00,
1080         0x00, 0x00, 0x00, 0x00,
1081         0x86, 0xdd,
1082
1083         0x60, 0x00, 0x00, 0x00, /* ICE_IPV6_OFOS 14 */
1084         0x00, 0x6c, 0x11, 0x00, /* Next header UDP*/
1085         0x00, 0x00, 0x00, 0x00,
1086         0x00, 0x00, 0x00, 0x00,
1087         0x00, 0x00, 0x00, 0x00,
1088         0x00, 0x00, 0x00, 0x00,
1089         0x00, 0x00, 0x00, 0x00,
1090         0x00, 0x00, 0x00, 0x00,
1091         0x00, 0x00, 0x00, 0x00,
1092         0x00, 0x00, 0x00, 0x00,
1093
1094         0x08, 0x68, 0x08, 0x68, /* ICE_UDP_OF 54 */
1095         0x00, 0x00, 0x00, 0x00,
1096
1097         0x34, 0xff, 0x00, 0x28,  /* ICE_GTP 62 */
1098         0x00, 0x00, 0x00, 0x00,
1099         0x00, 0x00, 0x00, 0x85,
1100
1101         0x02, 0x00, 0x00, 0x00, /* PDU Session extension header */
1102         0x00, 0x00, 0x00, 0x00,
1103
1104         0x60, 0x00, 0x00, 0x00, /* ICE_IPV6_OFIL 82 */
1105         0x00, 0x00, 0x3b, 0x00,
1106         0x00, 0x00, 0x00, 0x00,
1107         0x00, 0x00, 0x00, 0x00,
1108         0x00, 0x00, 0x00, 0x00,
1109         0x00, 0x00, 0x00, 0x00,
1110         0x00, 0x00, 0x00, 0x00,
1111         0x00, 0x00, 0x00, 0x00,
1112         0x00, 0x00, 0x00, 0x00,
1113         0x00, 0x00, 0x00, 0x00,
1114
1115         0x00, 0x00,
1116 };
1117
1118 static const struct ice_dummy_pkt_offsets dummy_udp_gtp_packet_offsets[] = {
1119         { ICE_MAC_OFOS,         0 },
1120         { ICE_IPV4_OFOS,        14 },
1121         { ICE_UDP_OF,           34 },
1122         { ICE_GTP,              42 },
1123         { ICE_PROTOCOL_LAST,    0 },
1124 };
1125
1126 static const u8 dummy_udp_gtp_packet[] = {
1127         0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
1128         0x00, 0x00, 0x00, 0x00,
1129         0x00, 0x00, 0x00, 0x00,
1130         0x08, 0x00,
1131
1132         0x45, 0x00, 0x00, 0x30, /* ICE_IPV4_OFOS 14 */
1133         0x00, 0x00, 0x00, 0x00,
1134         0x00, 0x11, 0x00, 0x00,
1135         0x00, 0x00, 0x00, 0x00,
1136         0x00, 0x00, 0x00, 0x00,
1137
1138         0x00, 0x00, 0x08, 0x68, /* ICE_UDP_OF 34 */
1139         0x00, 0x1c, 0x00, 0x00,
1140
1141         0x34, 0xff, 0x00, 0x0c, /* ICE_GTP 42 */
1142         0x00, 0x00, 0x00, 0x00,
1143         0x00, 0x00, 0x00, 0x85,
1144
1145         0x02, 0x00, 0x00, 0x00, /* PDU Session extension header */
1146         0x00, 0x00, 0x00, 0x00,
1147
1148 };
1149
1150 static const struct ice_dummy_pkt_offsets dummy_ipv4_gtp_no_pay_packet_offsets[] = {
1151         { ICE_MAC_OFOS,         0 },
1152         { ICE_IPV4_OFOS,        14 },
1153         { ICE_UDP_OF,           34 },
1154         { ICE_GTP_NO_PAY,       42 },
1155         { ICE_PROTOCOL_LAST,    0 },
1156 };
1157
1158 static const
1159 struct ice_dummy_pkt_offsets dummy_ipv6_gtp_no_pay_packet_offsets[] = {
1160         { ICE_MAC_OFOS,         0 },
1161         { ICE_IPV6_OFOS,        14 },
1162         { ICE_UDP_OF,           54 },
1163         { ICE_GTP_NO_PAY,       62 },
1164         { ICE_PROTOCOL_LAST,    0 },
1165 };
1166
1167 static const u8 dummy_ipv6_gtp_packet[] = {
1168         0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
1169         0x00, 0x00, 0x00, 0x00,
1170         0x00, 0x00, 0x00, 0x00,
1171         0x86, 0xdd,
1172
1173         0x60, 0x00, 0x00, 0x00, /* ICE_IPV6_OFOS 14 */
1174         0x00, 0x6c, 0x11, 0x00, /* Next header UDP*/
1175         0x00, 0x00, 0x00, 0x00,
1176         0x00, 0x00, 0x00, 0x00,
1177         0x00, 0x00, 0x00, 0x00,
1178         0x00, 0x00, 0x00, 0x00,
1179         0x00, 0x00, 0x00, 0x00,
1180         0x00, 0x00, 0x00, 0x00,
1181         0x00, 0x00, 0x00, 0x00,
1182         0x00, 0x00, 0x00, 0x00,
1183
1184         0x08, 0x68, 0x08, 0x68, /* ICE_UDP_OF 54 */
1185         0x00, 0x00, 0x00, 0x00,
1186
1187         0x30, 0x00, 0x00, 0x28,  /* ICE_GTP 62 */
1188         0x00, 0x00, 0x00, 0x00,
1189
1190         0x00, 0x00,
1191 };
1192
1193 static const struct ice_dummy_pkt_offsets dummy_pppoe_packet_offsets[] = {
1194         { ICE_MAC_OFOS,         0 },
1195         { ICE_ETYPE_OL,         12 },
1196         { ICE_VLAN_OFOS,        14},
1197         { ICE_PPPOE,            18 },
1198         { ICE_PROTOCOL_LAST,    0 },
1199 };
1200
1201 static const struct ice_dummy_pkt_offsets dummy_pppoe_packet_ipv4_offsets[] = {
1202         { ICE_MAC_OFOS,         0 },
1203         { ICE_ETYPE_OL,         12 },
1204         { ICE_VLAN_OFOS,        14},
1205         { ICE_PPPOE,            18 },
1206         { ICE_IPV4_OFOS,        26 },
1207         { ICE_PROTOCOL_LAST,    0 },
1208 };
1209
1210 static const u8 dummy_pppoe_ipv4_packet[] = {
1211         0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
1212         0x00, 0x00, 0x00, 0x00,
1213         0x00, 0x00, 0x00, 0x00,
1214
1215         0x81, 0x00,             /* ICE_ETYPE_OL 12 */
1216
1217         0x00, 0x00, 0x88, 0x64, /* ICE_VLAN_OFOS 14 */
1218
1219         0x11, 0x00, 0x00, 0x00, /* ICE_PPPOE 18 */
1220         0x00, 0x16,
1221
1222         0x00, 0x21,             /* PPP Link Layer 24 */
1223
1224         0x45, 0x00, 0x00, 0x14, /* ICE_IPV4_IL 26 */
1225         0x00, 0x00, 0x00, 0x00,
1226         0x00, 0x00, 0x00, 0x00,
1227         0x00, 0x00, 0x00, 0x00,
1228         0x00, 0x00, 0x00, 0x00,
1229
1230         0x00, 0x00,             /* 2 bytes for 4 bytes alignment */
1231 };
1232
1233 static const
1234 struct ice_dummy_pkt_offsets dummy_pppoe_ipv4_tcp_packet_offsets[] = {
1235         { ICE_MAC_OFOS,         0 },
1236         { ICE_ETYPE_OL,         12 },
1237         { ICE_VLAN_OFOS,        14},
1238         { ICE_PPPOE,            18 },
1239         { ICE_IPV4_OFOS,        26 },
1240         { ICE_TCP_IL,           46 },
1241         { ICE_PROTOCOL_LAST,    0 },
1242 };
1243
1244 static const u8 dummy_pppoe_ipv4_tcp_packet[] = {
1245         0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
1246         0x00, 0x00, 0x00, 0x00,
1247         0x00, 0x00, 0x00, 0x00,
1248
1249         0x81, 0x00,             /* ICE_ETYPE_OL 12 */
1250
1251         0x00, 0x00, 0x88, 0x64, /* ICE_VLAN_OFOS 14 */
1252
1253         0x11, 0x00, 0x00, 0x00, /* ICE_PPPOE 18 */
1254         0x00, 0x16,
1255
1256         0x00, 0x21,             /* PPP Link Layer 24 */
1257
1258         0x45, 0x00, 0x00, 0x28, /* ICE_IPV4_OFOS 26 */
1259         0x00, 0x01, 0x00, 0x00,
1260         0x00, 0x06, 0x00, 0x00,
1261         0x00, 0x00, 0x00, 0x00,
1262         0x00, 0x00, 0x00, 0x00,
1263
1264         0x00, 0x00, 0x00, 0x00, /* ICE_TCP_IL 46 */
1265         0x00, 0x00, 0x00, 0x00,
1266         0x00, 0x00, 0x00, 0x00,
1267         0x50, 0x00, 0x00, 0x00,
1268         0x00, 0x00, 0x00, 0x00,
1269
1270         0x00, 0x00,             /* 2 bytes for 4 bytes alignment */
1271 };
1272
1273 static const
1274 struct ice_dummy_pkt_offsets dummy_pppoe_ipv4_udp_packet_offsets[] = {
1275         { ICE_MAC_OFOS,         0 },
1276         { ICE_ETYPE_OL,         12 },
1277         { ICE_VLAN_OFOS,        14},
1278         { ICE_PPPOE,            18 },
1279         { ICE_IPV4_OFOS,        26 },
1280         { ICE_UDP_ILOS,         46 },
1281         { ICE_PROTOCOL_LAST,    0 },
1282 };
1283
1284 static const u8 dummy_pppoe_ipv4_udp_packet[] = {
1285         0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
1286         0x00, 0x00, 0x00, 0x00,
1287         0x00, 0x00, 0x00, 0x00,
1288
1289         0x81, 0x00,             /* ICE_ETYPE_OL 12 */
1290
1291         0x00, 0x00, 0x88, 0x64, /* ICE_VLAN_OFOS 14 */
1292
1293         0x11, 0x00, 0x00, 0x00, /* ICE_PPPOE 18 */
1294         0x00, 0x16,
1295
1296         0x00, 0x21,             /* PPP Link Layer 24 */
1297
1298         0x45, 0x00, 0x00, 0x1c, /* ICE_IPV4_OFOS 26 */
1299         0x00, 0x01, 0x00, 0x00,
1300         0x00, 0x11, 0x00, 0x00,
1301         0x00, 0x00, 0x00, 0x00,
1302         0x00, 0x00, 0x00, 0x00,
1303
1304         0x00, 0x00, 0x00, 0x00, /* ICE_UDP_ILOS 46 */
1305         0x00, 0x08, 0x00, 0x00,
1306
1307         0x00, 0x00,             /* 2 bytes for 4 bytes alignment */
1308 };
1309
1310 static const struct ice_dummy_pkt_offsets dummy_pppoe_packet_ipv6_offsets[] = {
1311         { ICE_MAC_OFOS,         0 },
1312         { ICE_ETYPE_OL,         12 },
1313         { ICE_VLAN_OFOS,        14},
1314         { ICE_PPPOE,            18 },
1315         { ICE_IPV6_OFOS,        26 },
1316         { ICE_PROTOCOL_LAST,    0 },
1317 };
1318
1319 static const u8 dummy_pppoe_ipv6_packet[] = {
1320         0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
1321         0x00, 0x00, 0x00, 0x00,
1322         0x00, 0x00, 0x00, 0x00,
1323
1324         0x81, 0x00,             /* ICE_ETYPE_OL 12 */
1325
1326         0x00, 0x00, 0x88, 0x64, /* ICE_VLAN_OFOS 14 */
1327
1328         0x11, 0x00, 0x00, 0x00, /* ICE_PPPOE 18 */
1329         0x00, 0x2a,
1330
1331         0x00, 0x57,             /* PPP Link Layer 24 */
1332
1333         0x60, 0x00, 0x00, 0x00, /* ICE_IPV6_OFOS 26 */
1334         0x00, 0x00, 0x3b, 0x00,
1335         0x00, 0x00, 0x00, 0x00,
1336         0x00, 0x00, 0x00, 0x00,
1337         0x00, 0x00, 0x00, 0x00,
1338         0x00, 0x00, 0x00, 0x00,
1339         0x00, 0x00, 0x00, 0x00,
1340         0x00, 0x00, 0x00, 0x00,
1341         0x00, 0x00, 0x00, 0x00,
1342         0x00, 0x00, 0x00, 0x00,
1343
1344         0x00, 0x00,             /* 2 bytes for 4 bytes alignment */
1345 };
1346
1347 static const
1348 struct ice_dummy_pkt_offsets dummy_pppoe_packet_ipv6_tcp_offsets[] = {
1349         { ICE_MAC_OFOS,         0 },
1350         { ICE_ETYPE_OL,         12 },
1351         { ICE_VLAN_OFOS,        14},
1352         { ICE_PPPOE,            18 },
1353         { ICE_IPV6_OFOS,        26 },
1354         { ICE_TCP_IL,           66 },
1355         { ICE_PROTOCOL_LAST,    0 },
1356 };
1357
1358 static const u8 dummy_pppoe_ipv6_tcp_packet[] = {
1359         0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
1360         0x00, 0x00, 0x00, 0x00,
1361         0x00, 0x00, 0x00, 0x00,
1362
1363         0x81, 0x00,             /* ICE_ETYPE_OL 12 */
1364
1365         0x00, 0x00, 0x88, 0x64, /* ICE_VLAN_OFOS 14 */
1366
1367         0x11, 0x00, 0x00, 0x00, /* ICE_PPPOE 18 */
1368         0x00, 0x2a,
1369
1370         0x00, 0x57,             /* PPP Link Layer 24 */
1371
1372         0x60, 0x00, 0x00, 0x00, /* ICE_IPV6_OFOS 26 */
1373         0x00, 0x14, 0x06, 0x00, /* Next header is TCP */
1374         0x00, 0x00, 0x00, 0x00,
1375         0x00, 0x00, 0x00, 0x00,
1376         0x00, 0x00, 0x00, 0x00,
1377         0x00, 0x00, 0x00, 0x00,
1378         0x00, 0x00, 0x00, 0x00,
1379         0x00, 0x00, 0x00, 0x00,
1380         0x00, 0x00, 0x00, 0x00,
1381         0x00, 0x00, 0x00, 0x00,
1382
1383         0x00, 0x00, 0x00, 0x00, /* ICE_TCP_IL 66 */
1384         0x00, 0x00, 0x00, 0x00,
1385         0x00, 0x00, 0x00, 0x00,
1386         0x50, 0x00, 0x00, 0x00,
1387         0x00, 0x00, 0x00, 0x00,
1388
1389         0x00, 0x00,             /* 2 bytes for 4 bytes alignment */
1390 };
1391
1392 static const
1393 struct ice_dummy_pkt_offsets dummy_pppoe_packet_ipv6_udp_offsets[] = {
1394         { ICE_MAC_OFOS,         0 },
1395         { ICE_ETYPE_OL,         12 },
1396         { ICE_VLAN_OFOS,        14},
1397         { ICE_PPPOE,            18 },
1398         { ICE_IPV6_OFOS,        26 },
1399         { ICE_UDP_ILOS,         66 },
1400         { ICE_PROTOCOL_LAST,    0 },
1401 };
1402
1403 static const u8 dummy_pppoe_ipv6_udp_packet[] = {
1404         0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
1405         0x00, 0x00, 0x00, 0x00,
1406         0x00, 0x00, 0x00, 0x00,
1407
1408         0x81, 0x00,             /* ICE_ETYPE_OL 12 */
1409
1410         0x00, 0x00, 0x88, 0x64, /* ICE_VLAN_OFOS 14 */
1411
1412         0x11, 0x00, 0x00, 0x00, /* ICE_PPPOE 18 */
1413         0x00, 0x2a,
1414
1415         0x00, 0x57,             /* PPP Link Layer 24 */
1416
1417         0x60, 0x00, 0x00, 0x00, /* ICE_IPV6_OFOS 26 */
1418         0x00, 0x08, 0x11, 0x00, /* Next header UDP*/
1419         0x00, 0x00, 0x00, 0x00,
1420         0x00, 0x00, 0x00, 0x00,
1421         0x00, 0x00, 0x00, 0x00,
1422         0x00, 0x00, 0x00, 0x00,
1423         0x00, 0x00, 0x00, 0x00,
1424         0x00, 0x00, 0x00, 0x00,
1425         0x00, 0x00, 0x00, 0x00,
1426         0x00, 0x00, 0x00, 0x00,
1427
1428         0x00, 0x00, 0x00, 0x00, /* ICE_UDP_ILOS 66 */
1429         0x00, 0x08, 0x00, 0x00,
1430
1431         0x00, 0x00,             /* 2 bytes for 4 bytes alignment */
1432 };
1433
1434 static const struct ice_dummy_pkt_offsets dummy_ipv4_esp_packet_offsets[] = {
1435         { ICE_MAC_OFOS,         0 },
1436         { ICE_IPV4_OFOS,        14 },
1437         { ICE_ESP,                      34 },
1438         { ICE_PROTOCOL_LAST,    0 },
1439 };
1440
1441 static const u8 dummy_ipv4_esp_pkt[] = {
1442         0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
1443         0x00, 0x00, 0x00, 0x00,
1444         0x00, 0x00, 0x00, 0x00,
1445         0x08, 0x00,
1446
1447         0x45, 0x00, 0x00, 0x1c, /* ICE_IPV4_IL 14 */
1448         0x00, 0x00, 0x40, 0x00,
1449         0x40, 0x32, 0x00, 0x00,
1450         0x00, 0x00, 0x00, 0x00,
1451         0x00, 0x00, 0x00, 0x00,
1452
1453         0x00, 0x00, 0x00, 0x00, /* ICE_ESP 34 */
1454         0x00, 0x00, 0x00, 0x00,
1455         0x00, 0x00,             /* 2 bytes for 4 bytes alignment */
1456 };
1457
1458 static const struct ice_dummy_pkt_offsets dummy_ipv6_esp_packet_offsets[] = {
1459         { ICE_MAC_OFOS,         0 },
1460         { ICE_IPV6_OFOS,        14 },
1461         { ICE_ESP,                      54 },
1462         { ICE_PROTOCOL_LAST,    0 },
1463 };
1464
1465 static const u8 dummy_ipv6_esp_pkt[] = {
1466         0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
1467         0x00, 0x00, 0x00, 0x00,
1468         0x00, 0x00, 0x00, 0x00,
1469         0x86, 0xDD,
1470
1471         0x60, 0x00, 0x00, 0x00, /* ICE_IPV6_OFOS 14 */
1472         0x00, 0x08, 0x32, 0x00, /* Next header ESP */
1473         0x00, 0x00, 0x00, 0x00,
1474         0x00, 0x00, 0x00, 0x00,
1475         0x00, 0x00, 0x00, 0x00,
1476         0x00, 0x00, 0x00, 0x00,
1477         0x00, 0x00, 0x00, 0x00,
1478         0x00, 0x00, 0x00, 0x00,
1479         0x00, 0x00, 0x00, 0x00,
1480         0x00, 0x00, 0x00, 0x00,
1481
1482         0x00, 0x00, 0x00, 0x00, /* ICE_ESP 54 */
1483         0x00, 0x00, 0x00, 0x00,
1484         0x00, 0x00,             /* 2 bytes for 4 bytes alignment */
1485 };
1486
1487 static const struct ice_dummy_pkt_offsets dummy_ipv4_ah_packet_offsets[] = {
1488         { ICE_MAC_OFOS,         0 },
1489         { ICE_IPV4_OFOS,        14 },
1490         { ICE_AH,                       34 },
1491         { ICE_PROTOCOL_LAST,    0 },
1492 };
1493
1494 static const u8 dummy_ipv4_ah_pkt[] = {
1495         0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
1496         0x00, 0x00, 0x00, 0x00,
1497         0x00, 0x00, 0x00, 0x00,
1498         0x08, 0x00,
1499
1500         0x45, 0x00, 0x00, 0x20, /* ICE_IPV4_IL 14 */
1501         0x00, 0x00, 0x40, 0x00,
1502         0x40, 0x33, 0x00, 0x00,
1503         0x00, 0x00, 0x00, 0x00,
1504         0x00, 0x00, 0x00, 0x00,
1505
1506         0x00, 0x00, 0x00, 0x00, /* ICE_AH 34 */
1507         0x00, 0x00, 0x00, 0x00,
1508         0x00, 0x00, 0x00, 0x00,
1509         0x00, 0x00,             /* 2 bytes for 4 bytes alignment */
1510 };
1511
1512 static const struct ice_dummy_pkt_offsets dummy_ipv6_ah_packet_offsets[] = {
1513         { ICE_MAC_OFOS,         0 },
1514         { ICE_IPV6_OFOS,        14 },
1515         { ICE_AH,                       54 },
1516         { ICE_PROTOCOL_LAST,    0 },
1517 };
1518
1519 static const u8 dummy_ipv6_ah_pkt[] = {
1520         0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
1521         0x00, 0x00, 0x00, 0x00,
1522         0x00, 0x00, 0x00, 0x00,
1523         0x86, 0xDD,
1524
1525         0x60, 0x00, 0x00, 0x00, /* ICE_IPV6_OFOS 14 */
1526         0x00, 0x0c, 0x33, 0x00, /* Next header AH */
1527         0x00, 0x00, 0x00, 0x00,
1528         0x00, 0x00, 0x00, 0x00,
1529         0x00, 0x00, 0x00, 0x00,
1530         0x00, 0x00, 0x00, 0x00,
1531         0x00, 0x00, 0x00, 0x00,
1532         0x00, 0x00, 0x00, 0x00,
1533         0x00, 0x00, 0x00, 0x00,
1534         0x00, 0x00, 0x00, 0x00,
1535
1536         0x00, 0x00, 0x00, 0x00, /* ICE_AH 54 */
1537         0x00, 0x00, 0x00, 0x00,
1538         0x00, 0x00, 0x00, 0x00,
1539         0x00, 0x00,             /* 2 bytes for 4 bytes alignment */
1540 };
1541
1542 static const struct ice_dummy_pkt_offsets dummy_ipv4_nat_packet_offsets[] = {
1543         { ICE_MAC_OFOS,         0 },
1544         { ICE_IPV4_OFOS,        14 },
1545         { ICE_UDP_ILOS,         34 },
1546         { ICE_NAT_T,            42 },
1547         { ICE_PROTOCOL_LAST,    0 },
1548 };
1549
1550 static const u8 dummy_ipv4_nat_pkt[] = {
1551         0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
1552         0x00, 0x00, 0x00, 0x00,
1553         0x00, 0x00, 0x00, 0x00,
1554         0x08, 0x00,
1555
1556         0x45, 0x00, 0x00, 0x24, /* ICE_IPV4_IL 14 */
1557         0x00, 0x00, 0x40, 0x00,
1558         0x40, 0x11, 0x00, 0x00,
1559         0x00, 0x00, 0x00, 0x00,
1560         0x00, 0x00, 0x00, 0x00,
1561
1562         0x00, 0x00, 0x11, 0x94, /* ICE_NAT_T 34 */
1563         0x00, 0x00, 0x00, 0x00,
1564
1565         0x00, 0x00, 0x00, 0x00,
1566         0x00, 0x00, 0x00, 0x00,
1567         0x00, 0x00,             /* 2 bytes for 4 bytes alignment */
1568 };
1569
1570 static const struct ice_dummy_pkt_offsets dummy_ipv6_nat_packet_offsets[] = {
1571         { ICE_MAC_OFOS,         0 },
1572         { ICE_IPV6_OFOS,        14 },
1573         { ICE_UDP_ILOS,         54 },
1574         { ICE_NAT_T,            62 },
1575         { ICE_PROTOCOL_LAST,    0 },
1576 };
1577
1578 static const u8 dummy_ipv6_nat_pkt[] = {
1579         0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
1580         0x00, 0x00, 0x00, 0x00,
1581         0x00, 0x00, 0x00, 0x00,
1582         0x86, 0xDD,
1583
1584         0x60, 0x00, 0x00, 0x00, /* ICE_IPV6_OFOS 14 */
1585         0x00, 0x10, 0x11, 0x00, /* Next header NAT_T */
1586         0x00, 0x00, 0x00, 0x00,
1587         0x00, 0x00, 0x00, 0x00,
1588         0x00, 0x00, 0x00, 0x00,
1589         0x00, 0x00, 0x00, 0x00,
1590         0x00, 0x00, 0x00, 0x00,
1591         0x00, 0x00, 0x00, 0x00,
1592         0x00, 0x00, 0x00, 0x00,
1593         0x00, 0x00, 0x00, 0x00,
1594
1595         0x00, 0x00, 0x11, 0x94, /* ICE_NAT_T 54 */
1596         0x00, 0x00, 0x00, 0x00,
1597
1598         0x00, 0x00, 0x00, 0x00,
1599         0x00, 0x00, 0x00, 0x00,
1600         0x00, 0x00,             /* 2 bytes for 4 bytes alignment */
1601
1602 };
1603
1604 static const struct ice_dummy_pkt_offsets dummy_ipv4_l2tpv3_packet_offsets[] = {
1605         { ICE_MAC_OFOS,         0 },
1606         { ICE_IPV4_OFOS,        14 },
1607         { ICE_L2TPV3,           34 },
1608         { ICE_PROTOCOL_LAST,    0 },
1609 };
1610
1611 static const u8 dummy_ipv4_l2tpv3_pkt[] = {
1612         0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
1613         0x00, 0x00, 0x00, 0x00,
1614         0x00, 0x00, 0x00, 0x00,
1615         0x08, 0x00,
1616
1617         0x45, 0x00, 0x00, 0x20, /* ICE_IPV4_IL 14 */
1618         0x00, 0x00, 0x40, 0x00,
1619         0x40, 0x73, 0x00, 0x00,
1620         0x00, 0x00, 0x00, 0x00,
1621         0x00, 0x00, 0x00, 0x00,
1622
1623         0x00, 0x00, 0x00, 0x00, /* ICE_L2TPV3 34 */
1624         0x00, 0x00, 0x00, 0x00,
1625         0x00, 0x00, 0x00, 0x00,
1626         0x00, 0x00,             /* 2 bytes for 4 bytes alignment */
1627 };
1628
1629 static const struct ice_dummy_pkt_offsets dummy_ipv6_l2tpv3_packet_offsets[] = {
1630         { ICE_MAC_OFOS,         0 },
1631         { ICE_IPV6_OFOS,        14 },
1632         { ICE_L2TPV3,           54 },
1633         { ICE_PROTOCOL_LAST,    0 },
1634 };
1635
1636 static const u8 dummy_ipv6_l2tpv3_pkt[] = {
1637         0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
1638         0x00, 0x00, 0x00, 0x00,
1639         0x00, 0x00, 0x00, 0x00,
1640         0x86, 0xDD,
1641
1642         0x60, 0x00, 0x00, 0x00, /* ICE_IPV6_IL 14 */
1643         0x00, 0x0c, 0x73, 0x40,
1644         0x00, 0x00, 0x00, 0x00,
1645         0x00, 0x00, 0x00, 0x00,
1646         0x00, 0x00, 0x00, 0x00,
1647         0x00, 0x00, 0x00, 0x00,
1648         0x00, 0x00, 0x00, 0x00,
1649         0x00, 0x00, 0x00, 0x00,
1650         0x00, 0x00, 0x00, 0x00,
1651         0x00, 0x00, 0x00, 0x00,
1652
1653         0x00, 0x00, 0x00, 0x00, /* ICE_L2TPV3 54 */
1654         0x00, 0x00, 0x00, 0x00,
1655         0x00, 0x00, 0x00, 0x00,
1656         0x00, 0x00,             /* 2 bytes for 4 bytes alignment */
1657 };
1658
1659 static const struct ice_dummy_pkt_offsets dummy_qinq_ipv4_packet_offsets[] = {
1660         { ICE_MAC_OFOS,         0 },
1661         { ICE_ETYPE_OL,         12 },
1662         { ICE_VLAN_EX,          14 },
1663         { ICE_VLAN_IN,          18 },
1664         { ICE_IPV4_OFOS,        22 },
1665         { ICE_PROTOCOL_LAST,    0 },
1666 };
1667
1668 static const u8 dummy_qinq_ipv4_pkt[] = {
1669         0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
1670         0x00, 0x00, 0x00, 0x00,
1671         0x00, 0x00, 0x00, 0x00,
1672
1673         0x91, 0x00,             /* ICE_ETYPE_OL 12 */
1674
1675         0x00, 0x00, 0x81, 0x00, /* ICE_VLAN_EX 14 */
1676         0x00, 0x00, 0x08, 0x00, /* ICE_VLAN_IN 18 */
1677
1678         0x45, 0x00, 0x00, 0x1c, /* ICE_IPV4_OFOS 22 */
1679         0x00, 0x01, 0x00, 0x00,
1680         0x00, 0x11, 0x00, 0x00,
1681         0x00, 0x00, 0x00, 0x00,
1682         0x00, 0x00, 0x00, 0x00,
1683
1684         0x00, 0x00, 0x00, 0x00, /* ICE_UDP_ILOS 42 */
1685         0x00, 0x08, 0x00, 0x00,
1686
1687         0x00, 0x00,     /* 2 bytes for 4 byte alignment */
1688 };
1689
1690 static const struct ice_dummy_pkt_offsets dummy_qinq_ipv6_packet_offsets[] = {
1691         { ICE_MAC_OFOS,         0 },
1692         { ICE_ETYPE_OL,         12 },
1693         { ICE_VLAN_EX,          14 },
1694         { ICE_VLAN_IN,          18 },
1695         { ICE_IPV6_OFOS,        22 },
1696         { ICE_PROTOCOL_LAST,    0 },
1697 };
1698
1699 static const u8 dummy_qinq_ipv6_pkt[] = {
1700         0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
1701         0x00, 0x00, 0x00, 0x00,
1702         0x00, 0x00, 0x00, 0x00,
1703
1704         0x91, 0x00,             /* ICE_ETYPE_OL 12 */
1705
1706         0x00, 0x00, 0x81, 0x00, /* ICE_VLAN_EX 14 */
1707         0x00, 0x00, 0x86, 0xDD, /* ICE_VLAN_IN 18 */
1708
1709         0x60, 0x00, 0x00, 0x00, /* ICE_IPV6_OFOS 22 */
1710         0x00, 0x10, 0x11, 0x00, /* Next header UDP */
1711         0x00, 0x00, 0x00, 0x00,
1712         0x00, 0x00, 0x00, 0x00,
1713         0x00, 0x00, 0x00, 0x00,
1714         0x00, 0x00, 0x00, 0x00,
1715         0x00, 0x00, 0x00, 0x00,
1716         0x00, 0x00, 0x00, 0x00,
1717         0x00, 0x00, 0x00, 0x00,
1718         0x00, 0x00, 0x00, 0x00,
1719
1720         0x00, 0x00, 0x00, 0x00, /* ICE_UDP_ILOS 62 */
1721         0x00, 0x10, 0x00, 0x00,
1722
1723         0x00, 0x00, 0x00, 0x00, /* needed for ESP packets */
1724         0x00, 0x00, 0x00, 0x00,
1725
1726         0x00, 0x00,     /* 2 bytes for 4 byte alignment */
1727 };
1728
1729 static const struct ice_dummy_pkt_offsets dummy_qinq_pppoe_packet_offsets[] = {
1730         { ICE_MAC_OFOS,         0 },
1731         { ICE_ETYPE_OL,         12 },
1732         { ICE_VLAN_EX,          14 },
1733         { ICE_VLAN_IN,          18 },
1734         { ICE_PPPOE,            22 },
1735         { ICE_PROTOCOL_LAST,    0 },
1736 };
1737
1738 static const
1739 struct ice_dummy_pkt_offsets dummy_qinq_pppoe_ipv4_packet_offsets[] = {
1740         { ICE_MAC_OFOS,         0 },
1741         { ICE_ETYPE_OL,         12 },
1742         { ICE_VLAN_EX,          14 },
1743         { ICE_VLAN_IN,          18 },
1744         { ICE_PPPOE,            22 },
1745         { ICE_IPV4_OFOS,        30 },
1746         { ICE_PROTOCOL_LAST,    0 },
1747 };
1748
1749 static const u8 dummy_qinq_pppoe_ipv4_pkt[] = {
1750         0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
1751         0x00, 0x00, 0x00, 0x00,
1752         0x00, 0x00, 0x00, 0x00,
1753
1754         0x91, 0x00,             /* ICE_ETYPE_OL 12 */
1755
1756         0x00, 0x00, 0x81, 0x00, /* ICE_VLAN_EX 14 */
1757         0x00, 0x00, 0x88, 0x64, /* ICE_VLAN_IN 18 */
1758
1759         0x11, 0x00, 0x00, 0x00, /* ICE_PPPOE 22 */
1760         0x00, 0x16,
1761
1762         0x00, 0x21,             /* PPP Link Layer 28 */
1763
1764         0x45, 0x00, 0x00, 0x14, /* ICE_IPV4_OFOS 30 */
1765         0x00, 0x00, 0x00, 0x00,
1766         0x00, 0x00, 0x00, 0x00,
1767         0x00, 0x00, 0x00, 0x00,
1768         0x00, 0x00, 0x00, 0x00,
1769
1770         0x00, 0x00,     /* 2 bytes for 4 byte alignment */
1771 };
1772
1773 static const
1774 struct ice_dummy_pkt_offsets dummy_qinq_pppoe_packet_ipv6_offsets[] = {
1775         { ICE_MAC_OFOS,         0 },
1776         { ICE_ETYPE_OL,         12 },
1777         { ICE_VLAN_EX,          14},
1778         { ICE_VLAN_IN,          18 },
1779         { ICE_PPPOE,            22 },
1780         { ICE_IPV6_OFOS,        30 },
1781         { ICE_PROTOCOL_LAST,    0 },
1782 };
1783
1784 static const u8 dummy_qinq_pppoe_ipv6_packet[] = {
1785         0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
1786         0x00, 0x00, 0x00, 0x00,
1787         0x00, 0x00, 0x00, 0x00,
1788
1789         0x91, 0x00,             /* ICE_ETYPE_OL 12 */
1790
1791         0x00, 0x00, 0x81, 0x00, /* ICE_VLAN_EX 14 */
1792         0x00, 0x00, 0x88, 0x64, /* ICE_VLAN_IN 18 */
1793
1794         0x11, 0x00, 0x00, 0x00, /* ICE_PPPOE 22 */
1795         0x00, 0x2a,
1796
1797         0x00, 0x57,             /* PPP Link Layer 28*/
1798
1799         0x60, 0x00, 0x00, 0x00, /* ICE_IPV6_OFOS 30 */
1800         0x00, 0x00, 0x3b, 0x00,
1801         0x00, 0x00, 0x00, 0x00,
1802         0x00, 0x00, 0x00, 0x00,
1803         0x00, 0x00, 0x00, 0x00,
1804         0x00, 0x00, 0x00, 0x00,
1805         0x00, 0x00, 0x00, 0x00,
1806         0x00, 0x00, 0x00, 0x00,
1807         0x00, 0x00, 0x00, 0x00,
1808         0x00, 0x00, 0x00, 0x00,
1809
1810         0x00, 0x00,             /* 2 bytes for 4 bytes alignment */
1811 };
1812
1813 /* this is a recipe to profile association bitmap */
1814 static ice_declare_bitmap(recipe_to_profile[ICE_MAX_NUM_RECIPES],
1815                           ICE_MAX_NUM_PROFILES);
1816
1817 /* this is a profile to recipe association bitmap */
1818 static ice_declare_bitmap(profile_to_recipe[ICE_MAX_NUM_PROFILES],
1819                           ICE_MAX_NUM_RECIPES);
1820
1821 static void ice_get_recp_to_prof_map(struct ice_hw *hw);
1822
1823 /**
1824  * ice_collect_result_idx - copy result index values
1825  * @buf: buffer that contains the result index
1826  * @recp: the recipe struct to copy data into
1827  */
1828 static void ice_collect_result_idx(struct ice_aqc_recipe_data_elem *buf,
1829                                    struct ice_sw_recipe *recp)
1830 {
1831         if (buf->content.result_indx & ICE_AQ_RECIPE_RESULT_EN)
1832                 ice_set_bit(buf->content.result_indx &
1833                             ~ICE_AQ_RECIPE_RESULT_EN, recp->res_idxs);
1834 }
1835
1836 static struct ice_prof_type_entry ice_prof_type_tbl[ICE_GTPU_PROFILE] = {
1837         { ICE_PROFID_IPV4_GTPU_IPV4_OTHER,    ICE_SW_TUN_IPV4_GTPU_IPV4},
1838         { ICE_PROFID_IPV4_GTPU_IPV4_UDP,      ICE_SW_TUN_IPV4_GTPU_IPV4_UDP},
1839         { ICE_PROFID_IPV4_GTPU_IPV4_TCP,      ICE_SW_TUN_IPV4_GTPU_IPV4_TCP},
1840         { ICE_PROFID_IPV4_GTPU_EH_IPV4_OTHER, ICE_SW_TUN_IPV4_GTPU_EH_IPV4},
1841         { ICE_PROFID_IPV4_GTPU_EH_IPV4_UDP,   ICE_SW_TUN_IPV4_GTPU_EH_IPV4_UDP},
1842         { ICE_PROFID_IPV4_GTPU_EH_IPV4_TCP,   ICE_SW_TUN_IPV4_GTPU_EH_IPV4_TCP},
1843         { ICE_PROFID_IPV4_GTPU_IPV6_OTHER,    ICE_SW_TUN_IPV4_GTPU_IPV6},
1844         { ICE_PROFID_IPV4_GTPU_IPV6_UDP,      ICE_SW_TUN_IPV4_GTPU_IPV6_UDP},
1845         { ICE_PROFID_IPV4_GTPU_IPV6_TCP,      ICE_SW_TUN_IPV4_GTPU_IPV6_TCP},
1846         { ICE_PROFID_IPV4_GTPU_EH_IPV6_OTHER, ICE_SW_TUN_IPV4_GTPU_EH_IPV6},
1847         { ICE_PROFID_IPV4_GTPU_EH_IPV6_UDP,   ICE_SW_TUN_IPV4_GTPU_EH_IPV6_UDP},
1848         { ICE_PROFID_IPV4_GTPU_EH_IPV6_TCP,   ICE_SW_TUN_IPV4_GTPU_EH_IPV6_TCP},
1849         { ICE_PROFID_IPV6_GTPU_IPV4_OTHER,    ICE_SW_TUN_IPV6_GTPU_IPV4},
1850         { ICE_PROFID_IPV6_GTPU_IPV4_UDP,      ICE_SW_TUN_IPV6_GTPU_IPV4_UDP},
1851         { ICE_PROFID_IPV6_GTPU_IPV4_TCP,      ICE_SW_TUN_IPV6_GTPU_IPV4_TCP},
1852         { ICE_PROFID_IPV6_GTPU_EH_IPV4_OTHER, ICE_SW_TUN_IPV6_GTPU_EH_IPV4},
1853         { ICE_PROFID_IPV6_GTPU_EH_IPV4_UDP,   ICE_SW_TUN_IPV6_GTPU_EH_IPV4_UDP},
1854         { ICE_PROFID_IPV6_GTPU_EH_IPV4_TCP,   ICE_SW_TUN_IPV6_GTPU_EH_IPV4_TCP},
1855         { ICE_PROFID_IPV6_GTPU_IPV6_OTHER,    ICE_SW_TUN_IPV6_GTPU_IPV6},
1856         { ICE_PROFID_IPV6_GTPU_IPV6_UDP,      ICE_SW_TUN_IPV6_GTPU_IPV6_UDP},
1857         { ICE_PROFID_IPV6_GTPU_IPV6_TCP,      ICE_SW_TUN_IPV6_GTPU_IPV6_TCP},
1858         { ICE_PROFID_IPV6_GTPU_EH_IPV6_OTHER, ICE_SW_TUN_IPV6_GTPU_EH_IPV6},
1859         { ICE_PROFID_IPV6_GTPU_EH_IPV6_UDP,   ICE_SW_TUN_IPV6_GTPU_EH_IPV6_UDP},
1860         { ICE_PROFID_IPV6_GTPU_EH_IPV6_TCP,   ICE_SW_TUN_IPV6_GTPU_EH_IPV6_TCP},
1861 };
1862
1863 /**
1864  * ice_get_tun_type_for_recipe - get tunnel type for the recipe
1865  * @rid: recipe ID that we are populating
1866  */
1867 static enum ice_sw_tunnel_type ice_get_tun_type_for_recipe(u8 rid, bool vlan)
1868 {
1869         u8 vxlan_profile[12] = {10, 11, 12, 16, 17, 18, 22, 23, 24, 25, 26, 27};
1870         u8 gre_profile[12] = {13, 14, 15, 19, 20, 21, 28, 29, 30, 31, 32, 33};
1871         u8 pppoe_profile[7] = {34, 35, 36, 37, 38, 39, 40};
1872         u8 non_tun_profile[6] = {4, 5, 6, 7, 8, 9};
1873         enum ice_sw_tunnel_type tun_type;
1874         u16 i, j, k, profile_num = 0;
1875         bool non_tun_valid = false;
1876         bool pppoe_valid = false;
1877         bool vxlan_valid = false;
1878         bool gre_valid = false;
1879         bool gtp_valid = false;
1880         bool flag_valid = false;
1881
1882         for (j = 0; j < ICE_MAX_NUM_PROFILES; j++) {
1883                 if (!ice_is_bit_set(recipe_to_profile[rid], j))
1884                         continue;
1885                 else
1886                         profile_num++;
1887
1888                 for (i = 0; i < 12; i++) {
1889                         if (gre_profile[i] == j)
1890                                 gre_valid = true;
1891                 }
1892
1893                 for (i = 0; i < 12; i++) {
1894                         if (vxlan_profile[i] == j)
1895                                 vxlan_valid = true;
1896                 }
1897
1898                 for (i = 0; i < 7; i++) {
1899                         if (pppoe_profile[i] == j)
1900                                 pppoe_valid = true;
1901                 }
1902
1903                 for (i = 0; i < 6; i++) {
1904                         if (non_tun_profile[i] == j)
1905                                 non_tun_valid = true;
1906                 }
1907
1908                 if (j >= ICE_PROFID_IPV4_GTPU_EH_IPV4_OTHER &&
1909                     j <= ICE_PROFID_IPV6_GTPU_IPV6_TCP)
1910                         gtp_valid = true;
1911
1912                 if ((j >= ICE_PROFID_IPV4_ESP &&
1913                      j <= ICE_PROFID_IPV6_PFCP_SESSION) ||
1914                     (j >= ICE_PROFID_IPV4_GTPC_TEID &&
1915                      j <= ICE_PROFID_IPV6_GTPU_TEID))
1916                         flag_valid = true;
1917         }
1918
1919         if (!non_tun_valid && vxlan_valid)
1920                 tun_type = ICE_SW_TUN_VXLAN;
1921         else if (!non_tun_valid && gre_valid)
1922                 tun_type = ICE_SW_TUN_NVGRE;
1923         else if (!non_tun_valid && pppoe_valid)
1924                 tun_type = ICE_SW_TUN_PPPOE;
1925         else if (!non_tun_valid && gtp_valid)
1926                 tun_type = ICE_SW_TUN_GTP;
1927         else if (non_tun_valid &&
1928                  (vxlan_valid || gre_valid || gtp_valid || pppoe_valid))
1929                 tun_type = ICE_SW_TUN_AND_NON_TUN;
1930         else if (non_tun_valid && !vxlan_valid && !gre_valid && !gtp_valid &&
1931                  !pppoe_valid)
1932                 tun_type = ICE_NON_TUN;
1933         else
1934                 tun_type = ICE_NON_TUN;
1935
1936         if (profile_num > 1 && tun_type == ICE_SW_TUN_PPPOE) {
1937                 i = ice_is_bit_set(recipe_to_profile[rid],
1938                                    ICE_PROFID_PPPOE_IPV4_OTHER);
1939                 j = ice_is_bit_set(recipe_to_profile[rid],
1940                                    ICE_PROFID_PPPOE_IPV6_OTHER);
1941                 if (i && !j)
1942                         tun_type = ICE_SW_TUN_PPPOE_IPV4;
1943                 else if (!i && j)
1944                         tun_type = ICE_SW_TUN_PPPOE_IPV6;
1945         }
1946
1947         if (tun_type == ICE_SW_TUN_GTP) {
1948                 for (k = 0; k < ARRAY_SIZE(ice_prof_type_tbl); k++)
1949                         if (ice_is_bit_set(recipe_to_profile[rid],
1950                                            ice_prof_type_tbl[k].prof_id)) {
1951                                 tun_type = ice_prof_type_tbl[k].type;
1952                                 break;
1953                         }
1954         }
1955
1956         if (profile_num == 1 && (flag_valid || non_tun_valid || pppoe_valid)) {
1957                 for (j = 0; j < ICE_MAX_NUM_PROFILES; j++) {
1958                         if (ice_is_bit_set(recipe_to_profile[rid], j)) {
1959                                 switch (j) {
1960                                 case ICE_PROFID_IPV4_TCP:
1961                                         tun_type = ICE_SW_IPV4_TCP;
1962                                         break;
1963                                 case ICE_PROFID_IPV4_UDP:
1964                                         tun_type = ICE_SW_IPV4_UDP;
1965                                         break;
1966                                 case ICE_PROFID_IPV6_TCP:
1967                                         tun_type = ICE_SW_IPV6_TCP;
1968                                         break;
1969                                 case ICE_PROFID_IPV6_UDP:
1970                                         tun_type = ICE_SW_IPV6_UDP;
1971                                         break;
1972                                 case ICE_PROFID_PPPOE_PAY:
1973                                         tun_type = ICE_SW_TUN_PPPOE_PAY;
1974                                         break;
1975                                 case ICE_PROFID_PPPOE_IPV4_TCP:
1976                                         tun_type = ICE_SW_TUN_PPPOE_IPV4_TCP;
1977                                         break;
1978                                 case ICE_PROFID_PPPOE_IPV4_UDP:
1979                                         tun_type = ICE_SW_TUN_PPPOE_IPV4_UDP;
1980                                         break;
1981                                 case ICE_PROFID_PPPOE_IPV4_OTHER:
1982                                         tun_type = ICE_SW_TUN_PPPOE_IPV4;
1983                                         break;
1984                                 case ICE_PROFID_PPPOE_IPV6_TCP:
1985                                         tun_type = ICE_SW_TUN_PPPOE_IPV6_TCP;
1986                                         break;
1987                                 case ICE_PROFID_PPPOE_IPV6_UDP:
1988                                         tun_type = ICE_SW_TUN_PPPOE_IPV6_UDP;
1989                                         break;
1990                                 case ICE_PROFID_PPPOE_IPV6_OTHER:
1991                                         tun_type = ICE_SW_TUN_PPPOE_IPV6;
1992                                         break;
1993                                 case ICE_PROFID_IPV4_ESP:
1994                                         tun_type = ICE_SW_TUN_IPV4_ESP;
1995                                         break;
1996                                 case ICE_PROFID_IPV6_ESP:
1997                                         tun_type = ICE_SW_TUN_IPV6_ESP;
1998                                         break;
1999                                 case ICE_PROFID_IPV4_AH:
2000                                         tun_type = ICE_SW_TUN_IPV4_AH;
2001                                         break;
2002                                 case ICE_PROFID_IPV6_AH:
2003                                         tun_type = ICE_SW_TUN_IPV6_AH;
2004                                         break;
2005                                 case ICE_PROFID_IPV4_NAT_T:
2006                                         tun_type = ICE_SW_TUN_IPV4_NAT_T;
2007                                         break;
2008                                 case ICE_PROFID_IPV6_NAT_T:
2009                                         tun_type = ICE_SW_TUN_IPV6_NAT_T;
2010                                         break;
2011                                 case ICE_PROFID_IPV4_PFCP_NODE:
2012                                         tun_type =
2013                                         ICE_SW_TUN_PROFID_IPV4_PFCP_NODE;
2014                                         break;
2015                                 case ICE_PROFID_IPV6_PFCP_NODE:
2016                                         tun_type =
2017                                         ICE_SW_TUN_PROFID_IPV6_PFCP_NODE;
2018                                         break;
2019                                 case ICE_PROFID_IPV4_PFCP_SESSION:
2020                                         tun_type =
2021                                         ICE_SW_TUN_PROFID_IPV4_PFCP_SESSION;
2022                                         break;
2023                                 case ICE_PROFID_IPV6_PFCP_SESSION:
2024                                         tun_type =
2025                                         ICE_SW_TUN_PROFID_IPV6_PFCP_SESSION;
2026                                         break;
2027                                 case ICE_PROFID_MAC_IPV4_L2TPV3:
2028                                         tun_type = ICE_SW_TUN_IPV4_L2TPV3;
2029                                         break;
2030                                 case ICE_PROFID_MAC_IPV6_L2TPV3:
2031                                         tun_type = ICE_SW_TUN_IPV6_L2TPV3;
2032                                         break;
2033                                 case ICE_PROFID_IPV4_GTPU_TEID:
2034                                         tun_type = ICE_SW_TUN_IPV4_GTPU_NO_PAY;
2035                                         break;
2036                                 case ICE_PROFID_IPV6_GTPU_TEID:
2037                                         tun_type = ICE_SW_TUN_IPV6_GTPU_NO_PAY;
2038                                         break;
2039                                 default:
2040                                         break;
2041                                 }
2042
2043                                 return tun_type;
2044                         }
2045                 }
2046         }
2047
2048         if (vlan && tun_type == ICE_SW_TUN_PPPOE)
2049                 tun_type = ICE_SW_TUN_PPPOE_QINQ;
2050         else if (vlan && tun_type == ICE_SW_TUN_PPPOE_IPV6)
2051                 tun_type = ICE_SW_TUN_PPPOE_IPV6_QINQ;
2052         else if (vlan && tun_type == ICE_SW_TUN_PPPOE_IPV4)
2053                 tun_type = ICE_SW_TUN_PPPOE_IPV4_QINQ;
2054         else if (vlan && tun_type == ICE_SW_TUN_PPPOE_PAY)
2055                 tun_type = ICE_SW_TUN_PPPOE_PAY_QINQ;
2056         else if (vlan && tun_type == ICE_SW_TUN_AND_NON_TUN)
2057                 tun_type = ICE_SW_TUN_AND_NON_TUN_QINQ;
2058         else if (vlan && tun_type == ICE_NON_TUN)
2059                 tun_type = ICE_NON_TUN_QINQ;
2060
2061         return tun_type;
2062 }
2063
2064 /**
2065  * ice_get_recp_frm_fw - update SW bookkeeping from FW recipe entries
2066  * @hw: pointer to hardware structure
2067  * @recps: struct that we need to populate
2068  * @rid: recipe ID that we are populating
2069  * @refresh_required: true if we should get recipe to profile mapping from FW
2070  *
2071  * This function is used to populate all the necessary entries into our
2072  * bookkeeping so that we have a current list of all the recipes that are
2073  * programmed in the firmware.
2074  */
2075 static enum ice_status
2076 ice_get_recp_frm_fw(struct ice_hw *hw, struct ice_sw_recipe *recps, u8 rid,
2077                     bool *refresh_required)
2078 {
2079         ice_declare_bitmap(result_bm, ICE_MAX_FV_WORDS);
2080         struct ice_aqc_recipe_data_elem *tmp;
2081         u16 num_recps = ICE_MAX_NUM_RECIPES;
2082         struct ice_prot_lkup_ext *lkup_exts;
2083         enum ice_status status;
2084         u8 fv_word_idx = 0;
2085         bool vlan = false;
2086         u16 sub_recps;
2087
2088         ice_zero_bitmap(result_bm, ICE_MAX_FV_WORDS);
2089
2090         /* we need a buffer big enough to accommodate all the recipes */
2091         tmp = (struct ice_aqc_recipe_data_elem *)ice_calloc(hw,
2092                 ICE_MAX_NUM_RECIPES, sizeof(*tmp));
2093         if (!tmp)
2094                 return ICE_ERR_NO_MEMORY;
2095
2096         tmp[0].recipe_indx = rid;
2097         status = ice_aq_get_recipe(hw, tmp, &num_recps, rid, NULL);
2098         /* non-zero status meaning recipe doesn't exist */
2099         if (status)
2100                 goto err_unroll;
2101
2102         /* Get recipe to profile map so that we can get the fv from lkups that
2103          * we read for a recipe from FW. Since we want to minimize the number of
2104          * times we make this FW call, just make one call and cache the copy
2105          * until a new recipe is added. This operation is only required the
2106          * first time to get the changes from FW. Then to search existing
2107          * entries we don't need to update the cache again until another recipe
2108          * gets added.
2109          */
2110         if (*refresh_required) {
2111                 ice_get_recp_to_prof_map(hw);
2112                 *refresh_required = false;
2113         }
2114
2115         /* Start populating all the entries for recps[rid] based on lkups from
2116          * firmware. Note that we are only creating the root recipe in our
2117          * database.
2118          */
2119         lkup_exts = &recps[rid].lkup_exts;
2120
2121         for (sub_recps = 0; sub_recps < num_recps; sub_recps++) {
2122                 struct ice_aqc_recipe_data_elem root_bufs = tmp[sub_recps];
2123                 struct ice_recp_grp_entry *rg_entry;
2124                 u8 i, prof, idx, prot = 0;
2125                 bool is_root;
2126                 u16 off = 0;
2127
2128                 rg_entry = (struct ice_recp_grp_entry *)
2129                         ice_malloc(hw, sizeof(*rg_entry));
2130                 if (!rg_entry) {
2131                         status = ICE_ERR_NO_MEMORY;
2132                         goto err_unroll;
2133                 }
2134
2135                 idx = root_bufs.recipe_indx;
2136                 is_root = root_bufs.content.rid & ICE_AQ_RECIPE_ID_IS_ROOT;
2137
2138                 /* Mark all result indices in this chain */
2139                 if (root_bufs.content.result_indx & ICE_AQ_RECIPE_RESULT_EN)
2140                         ice_set_bit(root_bufs.content.result_indx &
2141                                     ~ICE_AQ_RECIPE_RESULT_EN, result_bm);
2142
2143                 /* get the first profile that is associated with rid */
2144                 prof = ice_find_first_bit(recipe_to_profile[idx],
2145                                           ICE_MAX_NUM_PROFILES);
2146                 for (i = 0; i < ICE_NUM_WORDS_RECIPE; i++) {
2147                         u8 lkup_indx = root_bufs.content.lkup_indx[i + 1];
2148
2149                         rg_entry->fv_idx[i] = lkup_indx;
2150                         rg_entry->fv_mask[i] =
2151                                 LE16_TO_CPU(root_bufs.content.mask[i + 1]);
2152
2153                         /* If the recipe is a chained recipe then all its
2154                          * child recipe's result will have a result index.
2155                          * To fill fv_words we should not use those result
2156                          * index, we only need the protocol ids and offsets.
2157                          * We will skip all the fv_idx which stores result
2158                          * index in them. We also need to skip any fv_idx which
2159                          * has ICE_AQ_RECIPE_LKUP_IGNORE or 0 since it isn't a
2160                          * valid offset value.
2161                          */
2162                         if (ice_is_bit_set(hw->switch_info->prof_res_bm[prof],
2163                                            rg_entry->fv_idx[i]) ||
2164                             rg_entry->fv_idx[i] & ICE_AQ_RECIPE_LKUP_IGNORE ||
2165                             rg_entry->fv_idx[i] == 0)
2166                                 continue;
2167
2168                         ice_find_prot_off(hw, ICE_BLK_SW, prof,
2169                                           rg_entry->fv_idx[i], &prot, &off);
2170                         lkup_exts->fv_words[fv_word_idx].prot_id = prot;
2171                         lkup_exts->fv_words[fv_word_idx].off = off;
2172                         lkup_exts->field_mask[fv_word_idx] =
2173                                 rg_entry->fv_mask[i];
2174                         if (prot == ICE_META_DATA_ID_HW &&
2175                             off == ICE_TUN_FLAG_MDID_OFF)
2176                                 vlan = true;
2177                         fv_word_idx++;
2178                 }
2179                 /* populate rg_list with the data from the child entry of this
2180                  * recipe
2181                  */
2182                 LIST_ADD(&rg_entry->l_entry, &recps[rid].rg_list);
2183
2184                 /* Propagate some data to the recipe database */
2185                 recps[idx].is_root = !!is_root;
2186                 recps[idx].priority = root_bufs.content.act_ctrl_fwd_priority;
2187                 ice_zero_bitmap(recps[idx].res_idxs, ICE_MAX_FV_WORDS);
2188                 if (root_bufs.content.result_indx & ICE_AQ_RECIPE_RESULT_EN) {
2189                         recps[idx].chain_idx = root_bufs.content.result_indx &
2190                                 ~ICE_AQ_RECIPE_RESULT_EN;
2191                         ice_set_bit(recps[idx].chain_idx, recps[idx].res_idxs);
2192                 } else {
2193                         recps[idx].chain_idx = ICE_INVAL_CHAIN_IND;
2194                 }
2195
2196                 if (!is_root)
2197                         continue;
2198
2199                 /* Only do the following for root recipes entries */
2200                 ice_memcpy(recps[idx].r_bitmap, root_bufs.recipe_bitmap,
2201                            sizeof(recps[idx].r_bitmap), ICE_NONDMA_TO_NONDMA);
2202                 recps[idx].root_rid = root_bufs.content.rid &
2203                         ~ICE_AQ_RECIPE_ID_IS_ROOT;
2204                 recps[idx].priority = root_bufs.content.act_ctrl_fwd_priority;
2205         }
2206
2207         /* Complete initialization of the root recipe entry */
2208         lkup_exts->n_val_words = fv_word_idx;
2209         recps[rid].big_recp = (num_recps > 1);
2210         recps[rid].n_grp_count = (u8)num_recps;
2211         recps[rid].tun_type = ice_get_tun_type_for_recipe(rid, vlan);
2212         recps[rid].root_buf = (struct ice_aqc_recipe_data_elem *)
2213                 ice_memdup(hw, tmp, recps[rid].n_grp_count *
2214                            sizeof(*recps[rid].root_buf), ICE_NONDMA_TO_NONDMA);
2215         if (!recps[rid].root_buf)
2216                 goto err_unroll;
2217
2218         /* Copy result indexes */
2219         ice_cp_bitmap(recps[rid].res_idxs, result_bm, ICE_MAX_FV_WORDS);
2220         recps[rid].recp_created = true;
2221
2222 err_unroll:
2223         ice_free(hw, tmp);
2224         return status;
2225 }
2226
2227 /**
2228  * ice_get_recp_to_prof_map - updates recipe to profile mapping
2229  * @hw: pointer to hardware structure
2230  *
2231  * This function is used to populate recipe_to_profile matrix where index to
2232  * this array is the recipe ID and the element is the mapping of which profiles
2233  * is this recipe mapped to.
2234  */
2235 static void ice_get_recp_to_prof_map(struct ice_hw *hw)
2236 {
2237         ice_declare_bitmap(r_bitmap, ICE_MAX_NUM_RECIPES);
2238         u16 i;
2239
2240         for (i = 0; i < hw->switch_info->max_used_prof_index + 1; i++) {
2241                 u16 j;
2242
2243                 ice_zero_bitmap(profile_to_recipe[i], ICE_MAX_NUM_RECIPES);
2244                 ice_zero_bitmap(r_bitmap, ICE_MAX_NUM_RECIPES);
2245                 if (ice_aq_get_recipe_to_profile(hw, i, (u8 *)r_bitmap, NULL))
2246                         continue;
2247                 ice_cp_bitmap(profile_to_recipe[i], r_bitmap,
2248                               ICE_MAX_NUM_RECIPES);
2249                 ice_for_each_set_bit(j, r_bitmap, ICE_MAX_NUM_RECIPES)
2250                         ice_set_bit(i, recipe_to_profile[j]);
2251         }
2252 }
2253
2254 /**
2255  * ice_init_def_sw_recp - initialize the recipe book keeping tables
2256  * @hw: pointer to the HW struct
2257  * @recp_list: pointer to sw recipe list
2258  *
2259  * Allocate memory for the entire recipe table and initialize the structures/
2260  * entries corresponding to basic recipes.
2261  */
2262 enum ice_status
2263 ice_init_def_sw_recp(struct ice_hw *hw, struct ice_sw_recipe **recp_list)
2264 {
2265         struct ice_sw_recipe *recps;
2266         u8 i;
2267
2268         recps = (struct ice_sw_recipe *)
2269                 ice_calloc(hw, ICE_MAX_NUM_RECIPES, sizeof(*recps));
2270         if (!recps)
2271                 return ICE_ERR_NO_MEMORY;
2272
2273         for (i = 0; i < ICE_MAX_NUM_RECIPES; i++) {
2274                 recps[i].root_rid = i;
2275                 INIT_LIST_HEAD(&recps[i].filt_rules);
2276                 INIT_LIST_HEAD(&recps[i].filt_replay_rules);
2277                 INIT_LIST_HEAD(&recps[i].rg_list);
2278                 ice_init_lock(&recps[i].filt_rule_lock);
2279         }
2280
2281         *recp_list = recps;
2282
2283         return ICE_SUCCESS;
2284 }
2285
2286 /**
2287  * ice_aq_get_sw_cfg - get switch configuration
2288  * @hw: pointer to the hardware structure
2289  * @buf: pointer to the result buffer
2290  * @buf_size: length of the buffer available for response
2291  * @req_desc: pointer to requested descriptor
2292  * @num_elems: pointer to number of elements
2293  * @cd: pointer to command details structure or NULL
2294  *
2295  * Get switch configuration (0x0200) to be placed in buf.
2296  * This admin command returns information such as initial VSI/port number
2297  * and switch ID it belongs to.
2298  *
2299  * NOTE: *req_desc is both an input/output parameter.
2300  * The caller of this function first calls this function with *request_desc set
2301  * to 0. If the response from f/w has *req_desc set to 0, all the switch
2302  * configuration information has been returned; if non-zero (meaning not all
2303  * the information was returned), the caller should call this function again
2304  * with *req_desc set to the previous value returned by f/w to get the
2305  * next block of switch configuration information.
2306  *
2307  * *num_elems is output only parameter. This reflects the number of elements
2308  * in response buffer. The caller of this function to use *num_elems while
2309  * parsing the response buffer.
2310  */
2311 static enum ice_status
2312 ice_aq_get_sw_cfg(struct ice_hw *hw, struct ice_aqc_get_sw_cfg_resp_elem *buf,
2313                   u16 buf_size, u16 *req_desc, u16 *num_elems,
2314                   struct ice_sq_cd *cd)
2315 {
2316         struct ice_aqc_get_sw_cfg *cmd;
2317         struct ice_aq_desc desc;
2318         enum ice_status status;
2319
2320         ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_sw_cfg);
2321         cmd = &desc.params.get_sw_conf;
2322         cmd->element = CPU_TO_LE16(*req_desc);
2323
2324         status = ice_aq_send_cmd(hw, &desc, buf, buf_size, cd);
2325         if (!status) {
2326                 *req_desc = LE16_TO_CPU(cmd->element);
2327                 *num_elems = LE16_TO_CPU(cmd->num_elems);
2328         }
2329
2330         return status;
2331 }
2332
2333 /**
2334  * ice_alloc_rss_global_lut - allocate a RSS global LUT
2335  * @hw: pointer to the HW struct
2336  * @shared_res: true to allocate as a shared resource and false to allocate as a dedicated resource
2337  * @global_lut_id: output parameter for the RSS global LUT's ID
2338  */
2339 enum ice_status ice_alloc_rss_global_lut(struct ice_hw *hw, bool shared_res, u16 *global_lut_id)
2340 {
2341         struct ice_aqc_alloc_free_res_elem *sw_buf;
2342         enum ice_status status;
2343         u16 buf_len;
2344
2345         buf_len = ice_struct_size(sw_buf, elem, 1);
2346         sw_buf = (struct ice_aqc_alloc_free_res_elem *)ice_malloc(hw, buf_len);
2347         if (!sw_buf)
2348                 return ICE_ERR_NO_MEMORY;
2349
2350         sw_buf->num_elems = CPU_TO_LE16(1);
2351         sw_buf->res_type = CPU_TO_LE16(ICE_AQC_RES_TYPE_GLOBAL_RSS_HASH |
2352                                        (shared_res ? ICE_AQC_RES_TYPE_FLAG_SHARED :
2353                                        ICE_AQC_RES_TYPE_FLAG_DEDICATED));
2354
2355         status = ice_aq_alloc_free_res(hw, 1, sw_buf, buf_len, ice_aqc_opc_alloc_res, NULL);
2356         if (status) {
2357                 ice_debug(hw, ICE_DBG_RES, "Failed to allocate %s RSS global LUT, status %d\n",
2358                           shared_res ? "shared" : "dedicated", status);
2359                 goto ice_alloc_global_lut_exit;
2360         }
2361
2362         *global_lut_id = LE16_TO_CPU(sw_buf->elem[0].e.sw_resp);
2363
2364 ice_alloc_global_lut_exit:
2365         ice_free(hw, sw_buf);
2366         return status;
2367 }
2368
2369 /**
2370  * ice_free_rss_global_lut - free a RSS global LUT
2371  * @hw: pointer to the HW struct
2372  * @global_lut_id: ID of the RSS global LUT to free
2373  */
2374 enum ice_status ice_free_rss_global_lut(struct ice_hw *hw, u16 global_lut_id)
2375 {
2376         struct ice_aqc_alloc_free_res_elem *sw_buf;
2377         u16 buf_len, num_elems = 1;
2378         enum ice_status status;
2379
2380         buf_len = ice_struct_size(sw_buf, elem, num_elems);
2381         sw_buf = (struct ice_aqc_alloc_free_res_elem *)ice_malloc(hw, buf_len);
2382         if (!sw_buf)
2383                 return ICE_ERR_NO_MEMORY;
2384
2385         sw_buf->num_elems = CPU_TO_LE16(num_elems);
2386         sw_buf->res_type = CPU_TO_LE16(ICE_AQC_RES_TYPE_GLOBAL_RSS_HASH);
2387         sw_buf->elem[0].e.sw_resp = CPU_TO_LE16(global_lut_id);
2388
2389         status = ice_aq_alloc_free_res(hw, num_elems, sw_buf, buf_len, ice_aqc_opc_free_res, NULL);
2390         if (status)
2391                 ice_debug(hw, ICE_DBG_RES, "Failed to free RSS global LUT %d, status %d\n",
2392                           global_lut_id, status);
2393
2394         ice_free(hw, sw_buf);
2395         return status;
2396 }
2397
2398 /**
2399  * ice_alloc_sw - allocate resources specific to switch
2400  * @hw: pointer to the HW struct
2401  * @ena_stats: true to turn on VEB stats
2402  * @shared_res: true for shared resource, false for dedicated resource
2403  * @sw_id: switch ID returned
2404  * @counter_id: VEB counter ID returned
2405  *
2406  * allocates switch resources (SWID and VEB counter) (0x0208)
2407  */
2408 enum ice_status
2409 ice_alloc_sw(struct ice_hw *hw, bool ena_stats, bool shared_res, u16 *sw_id,
2410              u16 *counter_id)
2411 {
2412         struct ice_aqc_alloc_free_res_elem *sw_buf;
2413         struct ice_aqc_res_elem *sw_ele;
2414         enum ice_status status;
2415         u16 buf_len;
2416
2417         buf_len = ice_struct_size(sw_buf, elem, 1);
2418         sw_buf = (struct ice_aqc_alloc_free_res_elem *)ice_malloc(hw, buf_len);
2419         if (!sw_buf)
2420                 return ICE_ERR_NO_MEMORY;
2421
2422         /* Prepare buffer for switch ID.
2423          * The number of resource entries in buffer is passed as 1 since only a
2424          * single switch/VEB instance is allocated, and hence a single sw_id
2425          * is requested.
2426          */
2427         sw_buf->num_elems = CPU_TO_LE16(1);
2428         sw_buf->res_type =
2429                 CPU_TO_LE16(ICE_AQC_RES_TYPE_SWID |
2430                             (shared_res ? ICE_AQC_RES_TYPE_FLAG_SHARED :
2431                             ICE_AQC_RES_TYPE_FLAG_DEDICATED));
2432
2433         status = ice_aq_alloc_free_res(hw, 1, sw_buf, buf_len,
2434                                        ice_aqc_opc_alloc_res, NULL);
2435
2436         if (status)
2437                 goto ice_alloc_sw_exit;
2438
2439         sw_ele = &sw_buf->elem[0];
2440         *sw_id = LE16_TO_CPU(sw_ele->e.sw_resp);
2441
2442         if (ena_stats) {
2443                 /* Prepare buffer for VEB Counter */
2444                 enum ice_adminq_opc opc = ice_aqc_opc_alloc_res;
2445                 struct ice_aqc_alloc_free_res_elem *counter_buf;
2446                 struct ice_aqc_res_elem *counter_ele;
2447
2448                 counter_buf = (struct ice_aqc_alloc_free_res_elem *)
2449                                 ice_malloc(hw, buf_len);
2450                 if (!counter_buf) {
2451                         status = ICE_ERR_NO_MEMORY;
2452                         goto ice_alloc_sw_exit;
2453                 }
2454
2455                 /* The number of resource entries in buffer is passed as 1 since
2456                  * only a single switch/VEB instance is allocated, and hence a
2457                  * single VEB counter is requested.
2458                  */
2459                 counter_buf->num_elems = CPU_TO_LE16(1);
2460                 counter_buf->res_type =
2461                         CPU_TO_LE16(ICE_AQC_RES_TYPE_VEB_COUNTER |
2462                                     ICE_AQC_RES_TYPE_FLAG_DEDICATED);
2463                 status = ice_aq_alloc_free_res(hw, 1, counter_buf, buf_len,
2464                                                opc, NULL);
2465
2466                 if (status) {
2467                         ice_free(hw, counter_buf);
2468                         goto ice_alloc_sw_exit;
2469                 }
2470                 counter_ele = &counter_buf->elem[0];
2471                 *counter_id = LE16_TO_CPU(counter_ele->e.sw_resp);
2472                 ice_free(hw, counter_buf);
2473         }
2474
2475 ice_alloc_sw_exit:
2476         ice_free(hw, sw_buf);
2477         return status;
2478 }
2479
2480 /**
2481  * ice_free_sw - free resources specific to switch
2482  * @hw: pointer to the HW struct
2483  * @sw_id: switch ID returned
2484  * @counter_id: VEB counter ID returned
2485  *
2486  * free switch resources (SWID and VEB counter) (0x0209)
2487  *
2488  * NOTE: This function frees multiple resources. It continues
2489  * releasing other resources even after it encounters error.
2490  * The error code returned is the last error it encountered.
2491  */
2492 enum ice_status ice_free_sw(struct ice_hw *hw, u16 sw_id, u16 counter_id)
2493 {
2494         struct ice_aqc_alloc_free_res_elem *sw_buf, *counter_buf;
2495         enum ice_status status, ret_status;
2496         u16 buf_len;
2497
2498         buf_len = ice_struct_size(sw_buf, elem, 1);
2499         sw_buf = (struct ice_aqc_alloc_free_res_elem *)ice_malloc(hw, buf_len);
2500         if (!sw_buf)
2501                 return ICE_ERR_NO_MEMORY;
2502
2503         /* Prepare buffer to free for switch ID res.
2504          * The number of resource entries in buffer is passed as 1 since only a
2505          * single switch/VEB instance is freed, and hence a single sw_id
2506          * is released.
2507          */
2508         sw_buf->num_elems = CPU_TO_LE16(1);
2509         sw_buf->res_type = CPU_TO_LE16(ICE_AQC_RES_TYPE_SWID);
2510         sw_buf->elem[0].e.sw_resp = CPU_TO_LE16(sw_id);
2511
2512         ret_status = ice_aq_alloc_free_res(hw, 1, sw_buf, buf_len,
2513                                            ice_aqc_opc_free_res, NULL);
2514
2515         if (ret_status)
2516                 ice_debug(hw, ICE_DBG_SW, "CQ CMD Buffer:\n");
2517
2518         /* Prepare buffer to free for VEB Counter resource */
2519         counter_buf = (struct ice_aqc_alloc_free_res_elem *)
2520                         ice_malloc(hw, buf_len);
2521         if (!counter_buf) {
2522                 ice_free(hw, sw_buf);
2523                 return ICE_ERR_NO_MEMORY;
2524         }
2525
2526         /* The number of resource entries in buffer is passed as 1 since only a
2527          * single switch/VEB instance is freed, and hence a single VEB counter
2528          * is released
2529          */
2530         counter_buf->num_elems = CPU_TO_LE16(1);
2531         counter_buf->res_type = CPU_TO_LE16(ICE_AQC_RES_TYPE_VEB_COUNTER);
2532         counter_buf->elem[0].e.sw_resp = CPU_TO_LE16(counter_id);
2533
2534         status = ice_aq_alloc_free_res(hw, 1, counter_buf, buf_len,
2535                                        ice_aqc_opc_free_res, NULL);
2536         if (status) {
2537                 ice_debug(hw, ICE_DBG_SW, "VEB counter resource could not be freed\n");
2538                 ret_status = status;
2539         }
2540
2541         ice_free(hw, counter_buf);
2542         ice_free(hw, sw_buf);
2543         return ret_status;
2544 }
2545
2546 /**
2547  * ice_aq_add_vsi
2548  * @hw: pointer to the HW struct
2549  * @vsi_ctx: pointer to a VSI context struct
2550  * @cd: pointer to command details structure or NULL
2551  *
2552  * Add a VSI context to the hardware (0x0210)
2553  */
2554 enum ice_status
2555 ice_aq_add_vsi(struct ice_hw *hw, struct ice_vsi_ctx *vsi_ctx,
2556                struct ice_sq_cd *cd)
2557 {
2558         struct ice_aqc_add_update_free_vsi_resp *res;
2559         struct ice_aqc_add_get_update_free_vsi *cmd;
2560         struct ice_aq_desc desc;
2561         enum ice_status status;
2562
2563         cmd = &desc.params.vsi_cmd;
2564         res = &desc.params.add_update_free_vsi_res;
2565
2566         ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_add_vsi);
2567
2568         if (!vsi_ctx->alloc_from_pool)
2569                 cmd->vsi_num = CPU_TO_LE16(vsi_ctx->vsi_num |
2570                                            ICE_AQ_VSI_IS_VALID);
2571
2572         cmd->vsi_flags = CPU_TO_LE16(vsi_ctx->flags);
2573
2574         desc.flags |= CPU_TO_LE16(ICE_AQ_FLAG_RD);
2575
2576         status = ice_aq_send_cmd(hw, &desc, &vsi_ctx->info,
2577                                  sizeof(vsi_ctx->info), cd);
2578
2579         if (!status) {
2580                 vsi_ctx->vsi_num = LE16_TO_CPU(res->vsi_num) & ICE_AQ_VSI_NUM_M;
2581                 vsi_ctx->vsis_allocd = LE16_TO_CPU(res->vsi_used);
2582                 vsi_ctx->vsis_unallocated = LE16_TO_CPU(res->vsi_free);
2583         }
2584
2585         return status;
2586 }
2587
2588 /**
2589  * ice_aq_free_vsi
2590  * @hw: pointer to the HW struct
2591  * @vsi_ctx: pointer to a VSI context struct
2592  * @keep_vsi_alloc: keep VSI allocation as part of this PF's resources
2593  * @cd: pointer to command details structure or NULL
2594  *
2595  * Free VSI context info from hardware (0x0213)
2596  */
2597 enum ice_status
2598 ice_aq_free_vsi(struct ice_hw *hw, struct ice_vsi_ctx *vsi_ctx,
2599                 bool keep_vsi_alloc, struct ice_sq_cd *cd)
2600 {
2601         struct ice_aqc_add_update_free_vsi_resp *resp;
2602         struct ice_aqc_add_get_update_free_vsi *cmd;
2603         struct ice_aq_desc desc;
2604         enum ice_status status;
2605
2606         cmd = &desc.params.vsi_cmd;
2607         resp = &desc.params.add_update_free_vsi_res;
2608
2609         ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_free_vsi);
2610
2611         cmd->vsi_num = CPU_TO_LE16(vsi_ctx->vsi_num | ICE_AQ_VSI_IS_VALID);
2612         if (keep_vsi_alloc)
2613                 cmd->cmd_flags = CPU_TO_LE16(ICE_AQ_VSI_KEEP_ALLOC);
2614
2615         status = ice_aq_send_cmd(hw, &desc, NULL, 0, cd);
2616         if (!status) {
2617                 vsi_ctx->vsis_allocd = LE16_TO_CPU(resp->vsi_used);
2618                 vsi_ctx->vsis_unallocated = LE16_TO_CPU(resp->vsi_free);
2619         }
2620
2621         return status;
2622 }
2623
2624 /**
2625  * ice_aq_update_vsi
2626  * @hw: pointer to the HW struct
2627  * @vsi_ctx: pointer to a VSI context struct
2628  * @cd: pointer to command details structure or NULL
2629  *
2630  * Update VSI context in the hardware (0x0211)
2631  */
2632 enum ice_status
2633 ice_aq_update_vsi(struct ice_hw *hw, struct ice_vsi_ctx *vsi_ctx,
2634                   struct ice_sq_cd *cd)
2635 {
2636         struct ice_aqc_add_update_free_vsi_resp *resp;
2637         struct ice_aqc_add_get_update_free_vsi *cmd;
2638         struct ice_aq_desc desc;
2639         enum ice_status status;
2640
2641         cmd = &desc.params.vsi_cmd;
2642         resp = &desc.params.add_update_free_vsi_res;
2643
2644         ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_update_vsi);
2645
2646         cmd->vsi_num = CPU_TO_LE16(vsi_ctx->vsi_num | ICE_AQ_VSI_IS_VALID);
2647
2648         desc.flags |= CPU_TO_LE16(ICE_AQ_FLAG_RD);
2649
2650         status = ice_aq_send_cmd(hw, &desc, &vsi_ctx->info,
2651                                  sizeof(vsi_ctx->info), cd);
2652
2653         if (!status) {
2654                 vsi_ctx->vsis_allocd = LE16_TO_CPU(resp->vsi_used);
2655                 vsi_ctx->vsis_unallocated = LE16_TO_CPU(resp->vsi_free);
2656         }
2657
2658         return status;
2659 }
2660
2661 /**
2662  * ice_is_vsi_valid - check whether the VSI is valid or not
2663  * @hw: pointer to the HW struct
2664  * @vsi_handle: VSI handle
2665  *
2666  * check whether the VSI is valid or not
2667  */
2668 bool ice_is_vsi_valid(struct ice_hw *hw, u16 vsi_handle)
2669 {
2670         return vsi_handle < ICE_MAX_VSI && hw->vsi_ctx[vsi_handle];
2671 }
2672
2673 /**
2674  * ice_get_hw_vsi_num - return the HW VSI number
2675  * @hw: pointer to the HW struct
2676  * @vsi_handle: VSI handle
2677  *
2678  * return the HW VSI number
2679  * Caution: call this function only if VSI is valid (ice_is_vsi_valid)
2680  */
2681 u16 ice_get_hw_vsi_num(struct ice_hw *hw, u16 vsi_handle)
2682 {
2683         return hw->vsi_ctx[vsi_handle]->vsi_num;
2684 }
2685
2686 /**
2687  * ice_get_vsi_ctx - return the VSI context entry for a given VSI handle
2688  * @hw: pointer to the HW struct
2689  * @vsi_handle: VSI handle
2690  *
2691  * return the VSI context entry for a given VSI handle
2692  */
2693 struct ice_vsi_ctx *ice_get_vsi_ctx(struct ice_hw *hw, u16 vsi_handle)
2694 {
2695         return (vsi_handle >= ICE_MAX_VSI) ? NULL : hw->vsi_ctx[vsi_handle];
2696 }
2697
2698 /**
2699  * ice_save_vsi_ctx - save the VSI context for a given VSI handle
2700  * @hw: pointer to the HW struct
2701  * @vsi_handle: VSI handle
2702  * @vsi: VSI context pointer
2703  *
2704  * save the VSI context entry for a given VSI handle
2705  */
2706 static void
2707 ice_save_vsi_ctx(struct ice_hw *hw, u16 vsi_handle, struct ice_vsi_ctx *vsi)
2708 {
2709         hw->vsi_ctx[vsi_handle] = vsi;
2710 }
2711
2712 /**
2713  * ice_clear_vsi_q_ctx - clear VSI queue contexts for all TCs
2714  * @hw: pointer to the HW struct
2715  * @vsi_handle: VSI handle
2716  */
2717 static void ice_clear_vsi_q_ctx(struct ice_hw *hw, u16 vsi_handle)
2718 {
2719         struct ice_vsi_ctx *vsi;
2720         u8 i;
2721
2722         vsi = ice_get_vsi_ctx(hw, vsi_handle);
2723         if (!vsi)
2724                 return;
2725         ice_for_each_traffic_class(i) {
2726                 if (vsi->lan_q_ctx[i]) {
2727                         ice_free(hw, vsi->lan_q_ctx[i]);
2728                         vsi->lan_q_ctx[i] = NULL;
2729                 }
2730         }
2731 }
2732
2733 /**
2734  * ice_clear_vsi_ctx - clear the VSI context entry
2735  * @hw: pointer to the HW struct
2736  * @vsi_handle: VSI handle
2737  *
2738  * clear the VSI context entry
2739  */
2740 static void ice_clear_vsi_ctx(struct ice_hw *hw, u16 vsi_handle)
2741 {
2742         struct ice_vsi_ctx *vsi;
2743
2744         vsi = ice_get_vsi_ctx(hw, vsi_handle);
2745         if (vsi) {
2746                 ice_clear_vsi_q_ctx(hw, vsi_handle);
2747                 ice_free(hw, vsi);
2748                 hw->vsi_ctx[vsi_handle] = NULL;
2749         }
2750 }
2751
2752 /**
2753  * ice_clear_all_vsi_ctx - clear all the VSI context entries
2754  * @hw: pointer to the HW struct
2755  */
2756 void ice_clear_all_vsi_ctx(struct ice_hw *hw)
2757 {
2758         u16 i;
2759
2760         for (i = 0; i < ICE_MAX_VSI; i++)
2761                 ice_clear_vsi_ctx(hw, i);
2762 }
2763
2764 /**
2765  * ice_add_vsi - add VSI context to the hardware and VSI handle list
2766  * @hw: pointer to the HW struct
2767  * @vsi_handle: unique VSI handle provided by drivers
2768  * @vsi_ctx: pointer to a VSI context struct
2769  * @cd: pointer to command details structure or NULL
2770  *
2771  * Add a VSI context to the hardware also add it into the VSI handle list.
2772  * If this function gets called after reset for existing VSIs then update
2773  * with the new HW VSI number in the corresponding VSI handle list entry.
2774  */
2775 enum ice_status
2776 ice_add_vsi(struct ice_hw *hw, u16 vsi_handle, struct ice_vsi_ctx *vsi_ctx,
2777             struct ice_sq_cd *cd)
2778 {
2779         struct ice_vsi_ctx *tmp_vsi_ctx;
2780         enum ice_status status;
2781
2782         if (vsi_handle >= ICE_MAX_VSI)
2783                 return ICE_ERR_PARAM;
2784         status = ice_aq_add_vsi(hw, vsi_ctx, cd);
2785         if (status)
2786                 return status;
2787         tmp_vsi_ctx = ice_get_vsi_ctx(hw, vsi_handle);
2788         if (!tmp_vsi_ctx) {
2789                 /* Create a new VSI context */
2790                 tmp_vsi_ctx = (struct ice_vsi_ctx *)
2791                         ice_malloc(hw, sizeof(*tmp_vsi_ctx));
2792                 if (!tmp_vsi_ctx) {
2793                         ice_aq_free_vsi(hw, vsi_ctx, false, cd);
2794                         return ICE_ERR_NO_MEMORY;
2795                 }
2796                 *tmp_vsi_ctx = *vsi_ctx;
2797
2798                 ice_save_vsi_ctx(hw, vsi_handle, tmp_vsi_ctx);
2799         } else {
2800                 /* update with new HW VSI num */
2801                 tmp_vsi_ctx->vsi_num = vsi_ctx->vsi_num;
2802         }
2803
2804         return ICE_SUCCESS;
2805 }
2806
2807 /**
2808  * ice_free_vsi- free VSI context from hardware and VSI handle list
2809  * @hw: pointer to the HW struct
2810  * @vsi_handle: unique VSI handle
2811  * @vsi_ctx: pointer to a VSI context struct
2812  * @keep_vsi_alloc: keep VSI allocation as part of this PF's resources
2813  * @cd: pointer to command details structure or NULL
2814  *
2815  * Free VSI context info from hardware as well as from VSI handle list
2816  */
2817 enum ice_status
2818 ice_free_vsi(struct ice_hw *hw, u16 vsi_handle, struct ice_vsi_ctx *vsi_ctx,
2819              bool keep_vsi_alloc, struct ice_sq_cd *cd)
2820 {
2821         enum ice_status status;
2822
2823         if (!ice_is_vsi_valid(hw, vsi_handle))
2824                 return ICE_ERR_PARAM;
2825         vsi_ctx->vsi_num = ice_get_hw_vsi_num(hw, vsi_handle);
2826         status = ice_aq_free_vsi(hw, vsi_ctx, keep_vsi_alloc, cd);
2827         if (!status)
2828                 ice_clear_vsi_ctx(hw, vsi_handle);
2829         return status;
2830 }
2831
2832 /**
2833  * ice_update_vsi
2834  * @hw: pointer to the HW struct
2835  * @vsi_handle: unique VSI handle
2836  * @vsi_ctx: pointer to a VSI context struct
2837  * @cd: pointer to command details structure or NULL
2838  *
2839  * Update VSI context in the hardware
2840  */
2841 enum ice_status
2842 ice_update_vsi(struct ice_hw *hw, u16 vsi_handle, struct ice_vsi_ctx *vsi_ctx,
2843                struct ice_sq_cd *cd)
2844 {
2845         if (!ice_is_vsi_valid(hw, vsi_handle))
2846                 return ICE_ERR_PARAM;
2847         vsi_ctx->vsi_num = ice_get_hw_vsi_num(hw, vsi_handle);
2848         return ice_aq_update_vsi(hw, vsi_ctx, cd);
2849 }
2850
2851 /**
2852  * ice_aq_get_vsi_params
2853  * @hw: pointer to the HW struct
2854  * @vsi_ctx: pointer to a VSI context struct
2855  * @cd: pointer to command details structure or NULL
2856  *
2857  * Get VSI context info from hardware (0x0212)
2858  */
2859 enum ice_status
2860 ice_aq_get_vsi_params(struct ice_hw *hw, struct ice_vsi_ctx *vsi_ctx,
2861                       struct ice_sq_cd *cd)
2862 {
2863         struct ice_aqc_add_get_update_free_vsi *cmd;
2864         struct ice_aqc_get_vsi_resp *resp;
2865         struct ice_aq_desc desc;
2866         enum ice_status status;
2867
2868         cmd = &desc.params.vsi_cmd;
2869         resp = &desc.params.get_vsi_resp;
2870
2871         ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_vsi_params);
2872
2873         cmd->vsi_num = CPU_TO_LE16(vsi_ctx->vsi_num | ICE_AQ_VSI_IS_VALID);
2874
2875         status = ice_aq_send_cmd(hw, &desc, &vsi_ctx->info,
2876                                  sizeof(vsi_ctx->info), cd);
2877         if (!status) {
2878                 vsi_ctx->vsi_num = LE16_TO_CPU(resp->vsi_num) &
2879                                         ICE_AQ_VSI_NUM_M;
2880                 vsi_ctx->vsis_allocd = LE16_TO_CPU(resp->vsi_used);
2881                 vsi_ctx->vsis_unallocated = LE16_TO_CPU(resp->vsi_free);
2882         }
2883
2884         return status;
2885 }
2886
2887 /**
2888  * ice_aq_add_update_mir_rule - add/update a mirror rule
2889  * @hw: pointer to the HW struct
2890  * @rule_type: Rule Type
2891  * @dest_vsi: VSI number to which packets will be mirrored
2892  * @count: length of the list
2893  * @mr_buf: buffer for list of mirrored VSI numbers
2894  * @cd: pointer to command details structure or NULL
2895  * @rule_id: Rule ID
2896  *
2897  * Add/Update Mirror Rule (0x260).
2898  */
2899 enum ice_status
2900 ice_aq_add_update_mir_rule(struct ice_hw *hw, u16 rule_type, u16 dest_vsi,
2901                            u16 count, struct ice_mir_rule_buf *mr_buf,
2902                            struct ice_sq_cd *cd, u16 *rule_id)
2903 {
2904         struct ice_aqc_add_update_mir_rule *cmd;
2905         struct ice_aq_desc desc;
2906         enum ice_status status;
2907         __le16 *mr_list = NULL;
2908         u16 buf_size = 0;
2909
2910         switch (rule_type) {
2911         case ICE_AQC_RULE_TYPE_VPORT_INGRESS:
2912         case ICE_AQC_RULE_TYPE_VPORT_EGRESS:
2913                 /* Make sure count and mr_buf are set for these rule_types */
2914                 if (!(count && mr_buf))
2915                         return ICE_ERR_PARAM;
2916
2917                 buf_size = count * sizeof(__le16);
2918                 mr_list = (_FORCE_ __le16 *)ice_malloc(hw, buf_size);
2919                 if (!mr_list)
2920                         return ICE_ERR_NO_MEMORY;
2921                 break;
2922         case ICE_AQC_RULE_TYPE_PPORT_INGRESS:
2923         case ICE_AQC_RULE_TYPE_PPORT_EGRESS:
2924                 /* Make sure count and mr_buf are not set for these
2925                  * rule_types
2926                  */
2927                 if (count || mr_buf)
2928                         return ICE_ERR_PARAM;
2929                 break;
2930         default:
2931                 ice_debug(hw, ICE_DBG_SW, "Error due to unsupported rule_type %u\n", rule_type);
2932                 return ICE_ERR_OUT_OF_RANGE;
2933         }
2934
2935         ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_add_update_mir_rule);
2936
2937         /* Pre-process 'mr_buf' items for add/update of virtual port
2938          * ingress/egress mirroring (but not physical port ingress/egress
2939          * mirroring)
2940          */
2941         if (mr_buf) {
2942                 int i;
2943
2944                 for (i = 0; i < count; i++) {
2945                         u16 id;
2946
2947                         id = mr_buf[i].vsi_idx & ICE_AQC_RULE_MIRRORED_VSI_M;
2948
2949                         /* Validate specified VSI number, make sure it is less
2950                          * than ICE_MAX_VSI, if not return with error.
2951                          */
2952                         if (id >= ICE_MAX_VSI) {
2953                                 ice_debug(hw, ICE_DBG_SW, "Error VSI index (%u) out-of-range\n",
2954                                           id);
2955                                 ice_free(hw, mr_list);
2956                                 return ICE_ERR_OUT_OF_RANGE;
2957                         }
2958
2959                         /* add VSI to mirror rule */
2960                         if (mr_buf[i].add)
2961                                 mr_list[i] =
2962                                         CPU_TO_LE16(id | ICE_AQC_RULE_ACT_M);
2963                         else /* remove VSI from mirror rule */
2964                                 mr_list[i] = CPU_TO_LE16(id);
2965                 }
2966         }
2967
2968         cmd = &desc.params.add_update_rule;
2969         if ((*rule_id) != ICE_INVAL_MIRROR_RULE_ID)
2970                 cmd->rule_id = CPU_TO_LE16(((*rule_id) & ICE_AQC_RULE_ID_M) |
2971                                            ICE_AQC_RULE_ID_VALID_M);
2972         cmd->rule_type = CPU_TO_LE16(rule_type & ICE_AQC_RULE_TYPE_M);
2973         cmd->num_entries = CPU_TO_LE16(count);
2974         cmd->dest = CPU_TO_LE16(dest_vsi);
2975
2976         status = ice_aq_send_cmd(hw, &desc, mr_list, buf_size, cd);
2977         if (!status)
2978                 *rule_id = LE16_TO_CPU(cmd->rule_id) & ICE_AQC_RULE_ID_M;
2979
2980         ice_free(hw, mr_list);
2981
2982         return status;
2983 }
2984
2985 /**
2986  * ice_aq_delete_mir_rule - delete a mirror rule
2987  * @hw: pointer to the HW struct
2988  * @rule_id: Mirror rule ID (to be deleted)
2989  * @keep_allocd: if set, the VSI stays part of the PF allocated res,
2990  *               otherwise it is returned to the shared pool
2991  * @cd: pointer to command details structure or NULL
2992  *
2993  * Delete Mirror Rule (0x261).
2994  */
2995 enum ice_status
2996 ice_aq_delete_mir_rule(struct ice_hw *hw, u16 rule_id, bool keep_allocd,
2997                        struct ice_sq_cd *cd)
2998 {
2999         struct ice_aqc_delete_mir_rule *cmd;
3000         struct ice_aq_desc desc;
3001
3002         /* rule_id should be in the range 0...63 */
3003         if (rule_id >= ICE_MAX_NUM_MIRROR_RULES)
3004                 return ICE_ERR_OUT_OF_RANGE;
3005
3006         ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_del_mir_rule);
3007
3008         cmd = &desc.params.del_rule;
3009         rule_id |= ICE_AQC_RULE_ID_VALID_M;
3010         cmd->rule_id = CPU_TO_LE16(rule_id);
3011
3012         if (keep_allocd)
3013                 cmd->flags = CPU_TO_LE16(ICE_AQC_FLAG_KEEP_ALLOCD_M);
3014
3015         return ice_aq_send_cmd(hw, &desc, NULL, 0, cd);
3016 }
3017
3018 /**
3019  * ice_aq_alloc_free_vsi_list
3020  * @hw: pointer to the HW struct
3021  * @vsi_list_id: VSI list ID returned or used for lookup
3022  * @lkup_type: switch rule filter lookup type
3023  * @opc: switch rules population command type - pass in the command opcode
3024  *
3025  * allocates or free a VSI list resource
3026  */
3027 static enum ice_status
3028 ice_aq_alloc_free_vsi_list(struct ice_hw *hw, u16 *vsi_list_id,
3029                            enum ice_sw_lkup_type lkup_type,
3030                            enum ice_adminq_opc opc)
3031 {
3032         struct ice_aqc_alloc_free_res_elem *sw_buf;
3033         struct ice_aqc_res_elem *vsi_ele;
3034         enum ice_status status;
3035         u16 buf_len;
3036
3037         buf_len = ice_struct_size(sw_buf, elem, 1);
3038         sw_buf = (struct ice_aqc_alloc_free_res_elem *)ice_malloc(hw, buf_len);
3039         if (!sw_buf)
3040                 return ICE_ERR_NO_MEMORY;
3041         sw_buf->num_elems = CPU_TO_LE16(1);
3042
3043         if (lkup_type == ICE_SW_LKUP_MAC ||
3044             lkup_type == ICE_SW_LKUP_MAC_VLAN ||
3045             lkup_type == ICE_SW_LKUP_ETHERTYPE ||
3046             lkup_type == ICE_SW_LKUP_ETHERTYPE_MAC ||
3047             lkup_type == ICE_SW_LKUP_PROMISC ||
3048             lkup_type == ICE_SW_LKUP_PROMISC_VLAN ||
3049             lkup_type == ICE_SW_LKUP_LAST) {
3050                 sw_buf->res_type = CPU_TO_LE16(ICE_AQC_RES_TYPE_VSI_LIST_REP);
3051         } else if (lkup_type == ICE_SW_LKUP_VLAN) {
3052                 sw_buf->res_type =
3053                         CPU_TO_LE16(ICE_AQC_RES_TYPE_VSI_LIST_PRUNE);
3054         } else {
3055                 status = ICE_ERR_PARAM;
3056                 goto ice_aq_alloc_free_vsi_list_exit;
3057         }
3058
3059         if (opc == ice_aqc_opc_free_res)
3060                 sw_buf->elem[0].e.sw_resp = CPU_TO_LE16(*vsi_list_id);
3061
3062         status = ice_aq_alloc_free_res(hw, 1, sw_buf, buf_len, opc, NULL);
3063         if (status)
3064                 goto ice_aq_alloc_free_vsi_list_exit;
3065
3066         if (opc == ice_aqc_opc_alloc_res) {
3067                 vsi_ele = &sw_buf->elem[0];
3068                 *vsi_list_id = LE16_TO_CPU(vsi_ele->e.sw_resp);
3069         }
3070
3071 ice_aq_alloc_free_vsi_list_exit:
3072         ice_free(hw, sw_buf);
3073         return status;
3074 }
3075
3076 /**
3077  * ice_aq_set_storm_ctrl - Sets storm control configuration
3078  * @hw: pointer to the HW struct
3079  * @bcast_thresh: represents the upper threshold for broadcast storm control
3080  * @mcast_thresh: represents the upper threshold for multicast storm control
3081  * @ctl_bitmask: storm control knobs
3082  *
3083  * Sets the storm control configuration (0x0280)
3084  */
3085 enum ice_status
3086 ice_aq_set_storm_ctrl(struct ice_hw *hw, u32 bcast_thresh, u32 mcast_thresh,
3087                       u32 ctl_bitmask)
3088 {
3089         struct ice_aqc_storm_cfg *cmd;
3090         struct ice_aq_desc desc;
3091
3092         cmd = &desc.params.storm_conf;
3093
3094         ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_set_storm_cfg);
3095
3096         cmd->bcast_thresh_size = CPU_TO_LE32(bcast_thresh & ICE_AQ_THRESHOLD_M);
3097         cmd->mcast_thresh_size = CPU_TO_LE32(mcast_thresh & ICE_AQ_THRESHOLD_M);
3098         cmd->storm_ctrl_ctrl = CPU_TO_LE32(ctl_bitmask);
3099
3100         return ice_aq_send_cmd(hw, &desc, NULL, 0, NULL);
3101 }
3102
3103 /**
3104  * ice_aq_get_storm_ctrl - gets storm control configuration
3105  * @hw: pointer to the HW struct
3106  * @bcast_thresh: represents the upper threshold for broadcast storm control
3107  * @mcast_thresh: represents the upper threshold for multicast storm control
3108  * @ctl_bitmask: storm control knobs
3109  *
3110  * Gets the storm control configuration (0x0281)
3111  */
3112 enum ice_status
3113 ice_aq_get_storm_ctrl(struct ice_hw *hw, u32 *bcast_thresh, u32 *mcast_thresh,
3114                       u32 *ctl_bitmask)
3115 {
3116         enum ice_status status;
3117         struct ice_aq_desc desc;
3118
3119         ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_storm_cfg);
3120
3121         status = ice_aq_send_cmd(hw, &desc, NULL, 0, NULL);
3122         if (!status) {
3123                 struct ice_aqc_storm_cfg *resp = &desc.params.storm_conf;
3124
3125                 if (bcast_thresh)
3126                         *bcast_thresh = LE32_TO_CPU(resp->bcast_thresh_size) &
3127                                 ICE_AQ_THRESHOLD_M;
3128                 if (mcast_thresh)
3129                         *mcast_thresh = LE32_TO_CPU(resp->mcast_thresh_size) &
3130                                 ICE_AQ_THRESHOLD_M;
3131                 if (ctl_bitmask)
3132                         *ctl_bitmask = LE32_TO_CPU(resp->storm_ctrl_ctrl);
3133         }
3134
3135         return status;
3136 }
3137
3138 /**
3139  * ice_aq_sw_rules - add/update/remove switch rules
3140  * @hw: pointer to the HW struct
3141  * @rule_list: pointer to switch rule population list
3142  * @rule_list_sz: total size of the rule list in bytes
3143  * @num_rules: number of switch rules in the rule_list
3144  * @opc: switch rules population command type - pass in the command opcode
3145  * @cd: pointer to command details structure or NULL
3146  *
3147  * Add(0x02a0)/Update(0x02a1)/Remove(0x02a2) switch rules commands to firmware
3148  */
3149 static enum ice_status
3150 ice_aq_sw_rules(struct ice_hw *hw, void *rule_list, u16 rule_list_sz,
3151                 u8 num_rules, enum ice_adminq_opc opc, struct ice_sq_cd *cd)
3152 {
3153         struct ice_aq_desc desc;
3154         enum ice_status status;
3155
3156         ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__);
3157
3158         if (opc != ice_aqc_opc_add_sw_rules &&
3159             opc != ice_aqc_opc_update_sw_rules &&
3160             opc != ice_aqc_opc_remove_sw_rules)
3161                 return ICE_ERR_PARAM;
3162
3163         ice_fill_dflt_direct_cmd_desc(&desc, opc);
3164
3165         desc.flags |= CPU_TO_LE16(ICE_AQ_FLAG_RD);
3166         desc.params.sw_rules.num_rules_fltr_entry_index =
3167                 CPU_TO_LE16(num_rules);
3168         status = ice_aq_send_cmd(hw, &desc, rule_list, rule_list_sz, cd);
3169         if (opc != ice_aqc_opc_add_sw_rules &&
3170             hw->adminq.sq_last_status == ICE_AQ_RC_ENOENT)
3171                 status = ICE_ERR_DOES_NOT_EXIST;
3172
3173         return status;
3174 }
3175
3176 /**
3177  * ice_aq_add_recipe - add switch recipe
3178  * @hw: pointer to the HW struct
3179  * @s_recipe_list: pointer to switch rule population list
3180  * @num_recipes: number of switch recipes in the list
3181  * @cd: pointer to command details structure or NULL
3182  *
3183  * Add(0x0290)
3184  */
3185 enum ice_status
3186 ice_aq_add_recipe(struct ice_hw *hw,
3187                   struct ice_aqc_recipe_data_elem *s_recipe_list,
3188                   u16 num_recipes, struct ice_sq_cd *cd)
3189 {
3190         struct ice_aqc_add_get_recipe *cmd;
3191         struct ice_aq_desc desc;
3192         u16 buf_size;
3193
3194         ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__);
3195         cmd = &desc.params.add_get_recipe;
3196         ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_add_recipe);
3197
3198         cmd->num_sub_recipes = CPU_TO_LE16(num_recipes);
3199         desc.flags |= CPU_TO_LE16(ICE_AQ_FLAG_RD);
3200
3201         buf_size = num_recipes * sizeof(*s_recipe_list);
3202
3203         return ice_aq_send_cmd(hw, &desc, s_recipe_list, buf_size, cd);
3204 }
3205
3206 /**
3207  * ice_aq_get_recipe - get switch recipe
3208  * @hw: pointer to the HW struct
3209  * @s_recipe_list: pointer to switch rule population list
3210  * @num_recipes: pointer to the number of recipes (input and output)
3211  * @recipe_root: root recipe number of recipe(s) to retrieve
3212  * @cd: pointer to command details structure or NULL
3213  *
3214  * Get(0x0292)
3215  *
3216  * On input, *num_recipes should equal the number of entries in s_recipe_list.
3217  * On output, *num_recipes will equal the number of entries returned in
3218  * s_recipe_list.
3219  *
3220  * The caller must supply enough space in s_recipe_list to hold all possible
3221  * recipes and *num_recipes must equal ICE_MAX_NUM_RECIPES.
3222  */
3223 enum ice_status
3224 ice_aq_get_recipe(struct ice_hw *hw,
3225                   struct ice_aqc_recipe_data_elem *s_recipe_list,
3226                   u16 *num_recipes, u16 recipe_root, struct ice_sq_cd *cd)
3227 {
3228         struct ice_aqc_add_get_recipe *cmd;
3229         struct ice_aq_desc desc;
3230         enum ice_status status;
3231         u16 buf_size;
3232
3233         if (*num_recipes != ICE_MAX_NUM_RECIPES)
3234                 return ICE_ERR_PARAM;
3235
3236         ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__);
3237         cmd = &desc.params.add_get_recipe;
3238         ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_recipe);
3239
3240         cmd->return_index = CPU_TO_LE16(recipe_root);
3241         cmd->num_sub_recipes = 0;
3242
3243         buf_size = *num_recipes * sizeof(*s_recipe_list);
3244
3245         status = ice_aq_send_cmd(hw, &desc, s_recipe_list, buf_size, cd);
3246         *num_recipes = LE16_TO_CPU(cmd->num_sub_recipes);
3247
3248         return status;
3249 }
3250
3251 /**
3252  * ice_update_recipe_lkup_idx - update a default recipe based on the lkup_idx
3253  * @hw: pointer to the HW struct
3254  * @params: parameters used to update the default recipe
3255  *
3256  * This function only supports updating default recipes and it only supports
3257  * updating a single recipe based on the lkup_idx at a time.
3258  *
3259  * This is done as a read-modify-write operation. First, get the current recipe
3260  * contents based on the recipe's ID. Then modify the field vector index and
3261  * mask if it's valid at the lkup_idx. Finally, use the add recipe AQ to update
3262  * the pre-existing recipe with the modifications.
3263  */
3264 enum ice_status
3265 ice_update_recipe_lkup_idx(struct ice_hw *hw,
3266                            struct ice_update_recipe_lkup_idx_params *params)
3267 {
3268         struct ice_aqc_recipe_data_elem *rcp_list;
3269         u16 num_recps = ICE_MAX_NUM_RECIPES;
3270         enum ice_status status;
3271
3272         rcp_list = (struct ice_aqc_recipe_data_elem *)ice_malloc(hw, num_recps * sizeof(*rcp_list));
3273         if (!rcp_list)
3274                 return ICE_ERR_NO_MEMORY;
3275
3276         /* read current recipe list from firmware */
3277         rcp_list->recipe_indx = params->rid;
3278         status = ice_aq_get_recipe(hw, rcp_list, &num_recps, params->rid, NULL);
3279         if (status) {
3280                 ice_debug(hw, ICE_DBG_SW, "Failed to get recipe %d, status %d\n",
3281                           params->rid, status);
3282                 goto error_out;
3283         }
3284
3285         /* only modify existing recipe's lkup_idx and mask if valid, while
3286          * leaving all other fields the same, then update the recipe firmware
3287          */
3288         rcp_list->content.lkup_indx[params->lkup_idx] = params->fv_idx;
3289         if (params->mask_valid)
3290                 rcp_list->content.mask[params->lkup_idx] =
3291                         CPU_TO_LE16(params->mask);
3292
3293         if (params->ignore_valid)
3294                 rcp_list->content.lkup_indx[params->lkup_idx] |=
3295                         ICE_AQ_RECIPE_LKUP_IGNORE;
3296
3297         status = ice_aq_add_recipe(hw, &rcp_list[0], 1, NULL);
3298         if (status)
3299                 ice_debug(hw, ICE_DBG_SW, "Failed to update recipe %d lkup_idx %d fv_idx %d mask %d mask_valid %s, status %d\n",
3300                           params->rid, params->lkup_idx, params->fv_idx,
3301                           params->mask, params->mask_valid ? "true" : "false",
3302                           status);
3303
3304 error_out:
3305         ice_free(hw, rcp_list);
3306         return status;
3307 }
3308
3309 /**
3310  * ice_aq_map_recipe_to_profile - Map recipe to packet profile
3311  * @hw: pointer to the HW struct
3312  * @profile_id: package profile ID to associate the recipe with
3313  * @r_bitmap: Recipe bitmap filled in and need to be returned as response
3314  * @cd: pointer to command details structure or NULL
3315  * Recipe to profile association (0x0291)
3316  */
3317 enum ice_status
3318 ice_aq_map_recipe_to_profile(struct ice_hw *hw, u32 profile_id, u8 *r_bitmap,
3319                              struct ice_sq_cd *cd)
3320 {
3321         struct ice_aqc_recipe_to_profile *cmd;
3322         struct ice_aq_desc desc;
3323
3324         ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__);
3325         cmd = &desc.params.recipe_to_profile;
3326         ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_recipe_to_profile);
3327         cmd->profile_id = CPU_TO_LE16(profile_id);
3328         /* Set the recipe ID bit in the bitmask to let the device know which
3329          * profile we are associating the recipe to
3330          */
3331         ice_memcpy(cmd->recipe_assoc, r_bitmap, sizeof(cmd->recipe_assoc),
3332                    ICE_NONDMA_TO_NONDMA);
3333
3334         return ice_aq_send_cmd(hw, &desc, NULL, 0, cd);
3335 }
3336
3337 /**
3338  * ice_aq_get_recipe_to_profile - Map recipe to packet profile
3339  * @hw: pointer to the HW struct
3340  * @profile_id: package profile ID to associate the recipe with
3341  * @r_bitmap: Recipe bitmap filled in and need to be returned as response
3342  * @cd: pointer to command details structure or NULL
3343  * Associate profile ID with given recipe (0x0293)
3344  */
3345 enum ice_status
3346 ice_aq_get_recipe_to_profile(struct ice_hw *hw, u32 profile_id, u8 *r_bitmap,
3347                              struct ice_sq_cd *cd)
3348 {
3349         struct ice_aqc_recipe_to_profile *cmd;
3350         struct ice_aq_desc desc;
3351         enum ice_status status;
3352
3353         ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__);
3354         cmd = &desc.params.recipe_to_profile;
3355         ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_recipe_to_profile);
3356         cmd->profile_id = CPU_TO_LE16(profile_id);
3357
3358         status = ice_aq_send_cmd(hw, &desc, NULL, 0, cd);
3359         if (!status)
3360                 ice_memcpy(r_bitmap, cmd->recipe_assoc,
3361                            sizeof(cmd->recipe_assoc), ICE_NONDMA_TO_NONDMA);
3362
3363         return status;
3364 }
3365
3366 /**
3367  * ice_alloc_recipe - add recipe resource
3368  * @hw: pointer to the hardware structure
3369  * @rid: recipe ID returned as response to AQ call
3370  */
3371 enum ice_status ice_alloc_recipe(struct ice_hw *hw, u16 *rid)
3372 {
3373         struct ice_aqc_alloc_free_res_elem *sw_buf;
3374         enum ice_status status;
3375         u16 buf_len;
3376
3377         buf_len = ice_struct_size(sw_buf, elem, 1);
3378         sw_buf = (struct ice_aqc_alloc_free_res_elem *)ice_malloc(hw, buf_len);
3379         if (!sw_buf)
3380                 return ICE_ERR_NO_MEMORY;
3381
3382         sw_buf->num_elems = CPU_TO_LE16(1);
3383         sw_buf->res_type = CPU_TO_LE16((ICE_AQC_RES_TYPE_RECIPE <<
3384                                         ICE_AQC_RES_TYPE_S) |
3385                                         ICE_AQC_RES_TYPE_FLAG_SHARED);
3386         status = ice_aq_alloc_free_res(hw, 1, sw_buf, buf_len,
3387                                        ice_aqc_opc_alloc_res, NULL);
3388         if (!status)
3389                 *rid = LE16_TO_CPU(sw_buf->elem[0].e.sw_resp);
3390         ice_free(hw, sw_buf);
3391
3392         return status;
3393 }
3394
3395 /* ice_init_port_info - Initialize port_info with switch configuration data
3396  * @pi: pointer to port_info
3397  * @vsi_port_num: VSI number or port number
3398  * @type: Type of switch element (port or VSI)
3399  * @swid: switch ID of the switch the element is attached to
3400  * @pf_vf_num: PF or VF number
3401  * @is_vf: true if the element is a VF, false otherwise
3402  */
3403 static void
3404 ice_init_port_info(struct ice_port_info *pi, u16 vsi_port_num, u8 type,
3405                    u16 swid, u16 pf_vf_num, bool is_vf)
3406 {
3407         switch (type) {
3408         case ICE_AQC_GET_SW_CONF_RESP_PHYS_PORT:
3409                 pi->lport = (u8)(vsi_port_num & ICE_LPORT_MASK);
3410                 pi->sw_id = swid;
3411                 pi->pf_vf_num = pf_vf_num;
3412                 pi->is_vf = is_vf;
3413                 pi->dflt_tx_vsi_num = ICE_DFLT_VSI_INVAL;
3414                 pi->dflt_rx_vsi_num = ICE_DFLT_VSI_INVAL;
3415                 break;
3416         default:
3417                 ice_debug(pi->hw, ICE_DBG_SW, "incorrect VSI/port type received\n");
3418                 break;
3419         }
3420 }
3421
3422 /* ice_get_initial_sw_cfg - Get initial port and default VSI data
3423  * @hw: pointer to the hardware structure
3424  */
3425 enum ice_status ice_get_initial_sw_cfg(struct ice_hw *hw)
3426 {
3427         struct ice_aqc_get_sw_cfg_resp_elem *rbuf;
3428         enum ice_status status;
3429         u8 num_total_ports;
3430         u16 req_desc = 0;
3431         u16 num_elems;
3432         u8 j = 0;
3433         u16 i;
3434
3435         num_total_ports = 1;
3436
3437         rbuf = (struct ice_aqc_get_sw_cfg_resp_elem *)
3438                 ice_malloc(hw, ICE_SW_CFG_MAX_BUF_LEN);
3439
3440         if (!rbuf)
3441                 return ICE_ERR_NO_MEMORY;
3442
3443         /* Multiple calls to ice_aq_get_sw_cfg may be required
3444          * to get all the switch configuration information. The need
3445          * for additional calls is indicated by ice_aq_get_sw_cfg
3446          * writing a non-zero value in req_desc
3447          */
3448         do {
3449                 struct ice_aqc_get_sw_cfg_resp_elem *ele;
3450
3451                 status = ice_aq_get_sw_cfg(hw, rbuf, ICE_SW_CFG_MAX_BUF_LEN,
3452                                            &req_desc, &num_elems, NULL);
3453
3454                 if (status)
3455                         break;
3456
3457                 for (i = 0, ele = rbuf; i < num_elems; i++, ele++) {
3458                         u16 pf_vf_num, swid, vsi_port_num;
3459                         bool is_vf = false;
3460                         u8 res_type;
3461
3462                         vsi_port_num = LE16_TO_CPU(ele->vsi_port_num) &
3463                                 ICE_AQC_GET_SW_CONF_RESP_VSI_PORT_NUM_M;
3464
3465                         pf_vf_num = LE16_TO_CPU(ele->pf_vf_num) &
3466                                 ICE_AQC_GET_SW_CONF_RESP_FUNC_NUM_M;
3467
3468                         swid = LE16_TO_CPU(ele->swid);
3469
3470                         if (LE16_TO_CPU(ele->pf_vf_num) &
3471                             ICE_AQC_GET_SW_CONF_RESP_IS_VF)
3472                                 is_vf = true;
3473
3474                         res_type = (u8)(LE16_TO_CPU(ele->vsi_port_num) >>
3475                                         ICE_AQC_GET_SW_CONF_RESP_TYPE_S);
3476
3477                         switch (res_type) {
3478                         case ICE_AQC_GET_SW_CONF_RESP_PHYS_PORT:
3479                         case ICE_AQC_GET_SW_CONF_RESP_VIRT_PORT:
3480                                 if (j == num_total_ports) {
3481                                         ice_debug(hw, ICE_DBG_SW, "more ports than expected\n");
3482                                         status = ICE_ERR_CFG;
3483                                         goto out;
3484                                 }
3485                                 ice_init_port_info(hw->port_info,
3486                                                    vsi_port_num, res_type, swid,
3487                                                    pf_vf_num, is_vf);
3488                                 j++;
3489                                 break;
3490                         default:
3491                                 break;
3492                         }
3493                 }
3494         } while (req_desc && !status);
3495
3496 out:
3497         ice_free(hw, rbuf);
3498         return status;
3499 }
3500
3501 /**
3502  * ice_fill_sw_info - Helper function to populate lb_en and lan_en
3503  * @hw: pointer to the hardware structure
3504  * @fi: filter info structure to fill/update
3505  *
3506  * This helper function populates the lb_en and lan_en elements of the provided
3507  * ice_fltr_info struct using the switch's type and characteristics of the
3508  * switch rule being configured.
3509  */
3510 static void ice_fill_sw_info(struct ice_hw *hw, struct ice_fltr_info *fi)
3511 {
3512         if ((fi->flag & ICE_FLTR_RX) &&
3513             (fi->fltr_act == ICE_FWD_TO_VSI ||
3514              fi->fltr_act == ICE_FWD_TO_VSI_LIST) &&
3515             fi->lkup_type == ICE_SW_LKUP_LAST)
3516                 fi->lan_en = true;
3517         fi->lb_en = false;
3518         fi->lan_en = false;
3519         if ((fi->flag & ICE_FLTR_TX) &&
3520             (fi->fltr_act == ICE_FWD_TO_VSI ||
3521              fi->fltr_act == ICE_FWD_TO_VSI_LIST ||
3522              fi->fltr_act == ICE_FWD_TO_Q ||
3523              fi->fltr_act == ICE_FWD_TO_QGRP)) {
3524                 /* Setting LB for prune actions will result in replicated
3525                  * packets to the internal switch that will be dropped.
3526                  */
3527                 if (fi->lkup_type != ICE_SW_LKUP_VLAN)
3528                         fi->lb_en = true;
3529
3530                 /* Set lan_en to TRUE if
3531                  * 1. The switch is a VEB AND
3532                  * 2
3533                  * 2.1 The lookup is a directional lookup like ethertype,
3534                  * promiscuous, ethertype-MAC, promiscuous-VLAN
3535                  * and default-port OR
3536                  * 2.2 The lookup is VLAN, OR
3537                  * 2.3 The lookup is MAC with mcast or bcast addr for MAC, OR
3538                  * 2.4 The lookup is MAC_VLAN with mcast or bcast addr for MAC.
3539                  *
3540                  * OR
3541                  *
3542                  * The switch is a VEPA.
3543                  *
3544                  * In all other cases, the LAN enable has to be set to false.
3545                  */
3546                 if (hw->evb_veb) {
3547                         if (fi->lkup_type == ICE_SW_LKUP_ETHERTYPE ||
3548                             fi->lkup_type == ICE_SW_LKUP_PROMISC ||
3549                             fi->lkup_type == ICE_SW_LKUP_ETHERTYPE_MAC ||
3550                             fi->lkup_type == ICE_SW_LKUP_PROMISC_VLAN ||
3551                             fi->lkup_type == ICE_SW_LKUP_DFLT ||
3552                             fi->lkup_type == ICE_SW_LKUP_VLAN ||
3553                             (fi->lkup_type == ICE_SW_LKUP_MAC &&
3554                              !IS_UNICAST_ETHER_ADDR(fi->l_data.mac.mac_addr)) ||
3555                             (fi->lkup_type == ICE_SW_LKUP_MAC_VLAN &&
3556                              !IS_UNICAST_ETHER_ADDR(fi->l_data.mac.mac_addr)))
3557                                 fi->lan_en = true;
3558                 } else {
3559                         fi->lan_en = true;
3560                 }
3561         }
3562 }
3563
3564 /**
3565  * ice_fill_sw_rule - Helper function to fill switch rule structure
3566  * @hw: pointer to the hardware structure
3567  * @f_info: entry containing packet forwarding information
3568  * @s_rule: switch rule structure to be filled in based on mac_entry
3569  * @opc: switch rules population command type - pass in the command opcode
3570  */
3571 static void
3572 ice_fill_sw_rule(struct ice_hw *hw, struct ice_fltr_info *f_info,
3573                  struct ice_aqc_sw_rules_elem *s_rule, enum ice_adminq_opc opc)
3574 {
3575         u16 vlan_id = ICE_MAX_VLAN_ID + 1;
3576         u16 vlan_tpid = ICE_ETH_P_8021Q;
3577         void *daddr = NULL;
3578         u16 eth_hdr_sz;
3579         u8 *eth_hdr;
3580         u32 act = 0;
3581         __be16 *off;
3582         u8 q_rgn;
3583
3584         if (opc == ice_aqc_opc_remove_sw_rules) {
3585                 s_rule->pdata.lkup_tx_rx.act = 0;
3586                 s_rule->pdata.lkup_tx_rx.index =
3587                         CPU_TO_LE16(f_info->fltr_rule_id);
3588                 s_rule->pdata.lkup_tx_rx.hdr_len = 0;
3589                 return;
3590         }
3591
3592         eth_hdr_sz = sizeof(dummy_eth_header);
3593         eth_hdr = s_rule->pdata.lkup_tx_rx.hdr;
3594
3595         /* initialize the ether header with a dummy header */
3596         ice_memcpy(eth_hdr, dummy_eth_header, eth_hdr_sz, ICE_NONDMA_TO_NONDMA);
3597         ice_fill_sw_info(hw, f_info);
3598
3599         switch (f_info->fltr_act) {
3600         case ICE_FWD_TO_VSI:
3601                 act |= (f_info->fwd_id.hw_vsi_id << ICE_SINGLE_ACT_VSI_ID_S) &
3602                         ICE_SINGLE_ACT_VSI_ID_M;
3603                 if (f_info->lkup_type != ICE_SW_LKUP_VLAN)
3604                         act |= ICE_SINGLE_ACT_VSI_FORWARDING |
3605                                 ICE_SINGLE_ACT_VALID_BIT;
3606                 break;
3607         case ICE_FWD_TO_VSI_LIST:
3608                 act |= ICE_SINGLE_ACT_VSI_LIST;
3609                 act |= (f_info->fwd_id.vsi_list_id <<
3610                         ICE_SINGLE_ACT_VSI_LIST_ID_S) &
3611                         ICE_SINGLE_ACT_VSI_LIST_ID_M;
3612                 if (f_info->lkup_type != ICE_SW_LKUP_VLAN)
3613                         act |= ICE_SINGLE_ACT_VSI_FORWARDING |
3614                                 ICE_SINGLE_ACT_VALID_BIT;
3615                 break;
3616         case ICE_FWD_TO_Q:
3617                 act |= ICE_SINGLE_ACT_TO_Q;
3618                 act |= (f_info->fwd_id.q_id << ICE_SINGLE_ACT_Q_INDEX_S) &
3619                         ICE_SINGLE_ACT_Q_INDEX_M;
3620                 break;
3621         case ICE_DROP_PACKET:
3622                 act |= ICE_SINGLE_ACT_VSI_FORWARDING | ICE_SINGLE_ACT_DROP |
3623                         ICE_SINGLE_ACT_VALID_BIT;
3624                 break;
3625         case ICE_FWD_TO_QGRP:
3626                 q_rgn = f_info->qgrp_size > 0 ?
3627                         (u8)ice_ilog2(f_info->qgrp_size) : 0;
3628                 act |= ICE_SINGLE_ACT_TO_Q;
3629                 act |= (f_info->fwd_id.q_id << ICE_SINGLE_ACT_Q_INDEX_S) &
3630                         ICE_SINGLE_ACT_Q_INDEX_M;
3631                 act |= (q_rgn << ICE_SINGLE_ACT_Q_REGION_S) &
3632                         ICE_SINGLE_ACT_Q_REGION_M;
3633                 break;
3634         default:
3635                 return;
3636         }
3637
3638         if (f_info->lb_en)
3639                 act |= ICE_SINGLE_ACT_LB_ENABLE;
3640         if (f_info->lan_en)
3641                 act |= ICE_SINGLE_ACT_LAN_ENABLE;
3642
3643         switch (f_info->lkup_type) {
3644         case ICE_SW_LKUP_MAC:
3645                 daddr = f_info->l_data.mac.mac_addr;
3646                 break;
3647         case ICE_SW_LKUP_VLAN:
3648                 vlan_id = f_info->l_data.vlan.vlan_id;
3649                 if (f_info->l_data.vlan.tpid_valid)
3650                         vlan_tpid = f_info->l_data.vlan.tpid;
3651                 if (f_info->fltr_act == ICE_FWD_TO_VSI ||
3652                     f_info->fltr_act == ICE_FWD_TO_VSI_LIST) {
3653                         act |= ICE_SINGLE_ACT_PRUNE;
3654                         act |= ICE_SINGLE_ACT_EGRESS | ICE_SINGLE_ACT_INGRESS;
3655                 }
3656                 break;
3657         case ICE_SW_LKUP_ETHERTYPE_MAC:
3658                 daddr = f_info->l_data.ethertype_mac.mac_addr;
3659                 /* fall-through */
3660         case ICE_SW_LKUP_ETHERTYPE:
3661                 off = (_FORCE_ __be16 *)(eth_hdr + ICE_ETH_ETHTYPE_OFFSET);
3662                 *off = CPU_TO_BE16(f_info->l_data.ethertype_mac.ethertype);
3663                 break;
3664         case ICE_SW_LKUP_MAC_VLAN:
3665                 daddr = f_info->l_data.mac_vlan.mac_addr;
3666                 vlan_id = f_info->l_data.mac_vlan.vlan_id;
3667                 break;
3668         case ICE_SW_LKUP_PROMISC_VLAN:
3669                 vlan_id = f_info->l_data.mac_vlan.vlan_id;
3670                 /* fall-through */
3671         case ICE_SW_LKUP_PROMISC:
3672                 daddr = f_info->l_data.mac_vlan.mac_addr;
3673                 break;
3674         default:
3675                 break;
3676         }
3677
3678         s_rule->type = (f_info->flag & ICE_FLTR_RX) ?
3679                 CPU_TO_LE16(ICE_AQC_SW_RULES_T_LKUP_RX) :
3680                 CPU_TO_LE16(ICE_AQC_SW_RULES_T_LKUP_TX);
3681
3682         /* Recipe set depending on lookup type */
3683         s_rule->pdata.lkup_tx_rx.recipe_id = CPU_TO_LE16(f_info->lkup_type);
3684         s_rule->pdata.lkup_tx_rx.src = CPU_TO_LE16(f_info->src);
3685         s_rule->pdata.lkup_tx_rx.act = CPU_TO_LE32(act);
3686
3687         if (daddr)
3688                 ice_memcpy(eth_hdr + ICE_ETH_DA_OFFSET, daddr, ETH_ALEN,
3689                            ICE_NONDMA_TO_NONDMA);
3690
3691         if (!(vlan_id > ICE_MAX_VLAN_ID)) {
3692                 off = (_FORCE_ __be16 *)(eth_hdr + ICE_ETH_VLAN_TCI_OFFSET);
3693                 *off = CPU_TO_BE16(vlan_id);
3694                 off = (_FORCE_ __be16 *)(eth_hdr + ICE_ETH_ETHTYPE_OFFSET);
3695                 *off = CPU_TO_BE16(vlan_tpid);
3696         }
3697
3698         /* Create the switch rule with the final dummy Ethernet header */
3699         if (opc != ice_aqc_opc_update_sw_rules)
3700                 s_rule->pdata.lkup_tx_rx.hdr_len = CPU_TO_LE16(eth_hdr_sz);
3701 }
3702
3703 /**
3704  * ice_add_marker_act
3705  * @hw: pointer to the hardware structure
3706  * @m_ent: the management entry for which sw marker needs to be added
3707  * @sw_marker: sw marker to tag the Rx descriptor with
3708  * @l_id: large action resource ID
3709  *
3710  * Create a large action to hold software marker and update the switch rule
3711  * entry pointed by m_ent with newly created large action
3712  */
3713 static enum ice_status
3714 ice_add_marker_act(struct ice_hw *hw, struct ice_fltr_mgmt_list_entry *m_ent,
3715                    u16 sw_marker, u16 l_id)
3716 {
3717         struct ice_aqc_sw_rules_elem *lg_act, *rx_tx;
3718         /* For software marker we need 3 large actions
3719          * 1. FWD action: FWD TO VSI or VSI LIST
3720          * 2. GENERIC VALUE action to hold the profile ID
3721          * 3. GENERIC VALUE action to hold the software marker ID
3722          */
3723         const u16 num_lg_acts = 3;
3724         enum ice_status status;
3725         u16 lg_act_size;
3726         u16 rules_size;
3727         u32 act;
3728         u16 id;
3729
3730         if (m_ent->fltr_info.lkup_type != ICE_SW_LKUP_MAC)
3731                 return ICE_ERR_PARAM;
3732
3733         /* Create two back-to-back switch rules and submit them to the HW using
3734          * one memory buffer:
3735          *    1. Large Action
3736          *    2. Look up Tx Rx
3737          */
3738         lg_act_size = (u16)ICE_SW_RULE_LG_ACT_SIZE(num_lg_acts);
3739         rules_size = lg_act_size + ICE_SW_RULE_RX_TX_ETH_HDR_SIZE;
3740         lg_act = (struct ice_aqc_sw_rules_elem *)ice_malloc(hw, rules_size);
3741         if (!lg_act)
3742                 return ICE_ERR_NO_MEMORY;
3743
3744         rx_tx = (struct ice_aqc_sw_rules_elem *)((u8 *)lg_act + lg_act_size);
3745
3746         /* Fill in the first switch rule i.e. large action */
3747         lg_act->type = CPU_TO_LE16(ICE_AQC_SW_RULES_T_LG_ACT);
3748         lg_act->pdata.lg_act.index = CPU_TO_LE16(l_id);
3749         lg_act->pdata.lg_act.size = CPU_TO_LE16(num_lg_acts);
3750
3751         /* First action VSI forwarding or VSI list forwarding depending on how
3752          * many VSIs
3753          */
3754         id = (m_ent->vsi_count > 1) ? m_ent->fltr_info.fwd_id.vsi_list_id :
3755                 m_ent->fltr_info.fwd_id.hw_vsi_id;
3756
3757         act = ICE_LG_ACT_VSI_FORWARDING | ICE_LG_ACT_VALID_BIT;
3758         act |= (id << ICE_LG_ACT_VSI_LIST_ID_S) & ICE_LG_ACT_VSI_LIST_ID_M;
3759         if (m_ent->vsi_count > 1)
3760                 act |= ICE_LG_ACT_VSI_LIST;
3761         lg_act->pdata.lg_act.act[0] = CPU_TO_LE32(act);
3762
3763         /* Second action descriptor type */
3764         act = ICE_LG_ACT_GENERIC;
3765
3766         act |= (1 << ICE_LG_ACT_GENERIC_VALUE_S) & ICE_LG_ACT_GENERIC_VALUE_M;
3767         lg_act->pdata.lg_act.act[1] = CPU_TO_LE32(act);
3768
3769         act = (ICE_LG_ACT_GENERIC_OFF_RX_DESC_PROF_IDX <<
3770                ICE_LG_ACT_GENERIC_OFFSET_S) & ICE_LG_ACT_GENERIC_OFFSET_M;
3771
3772         /* Third action Marker value */
3773         act |= ICE_LG_ACT_GENERIC;
3774         act |= (sw_marker << ICE_LG_ACT_GENERIC_VALUE_S) &
3775                 ICE_LG_ACT_GENERIC_VALUE_M;
3776
3777         lg_act->pdata.lg_act.act[2] = CPU_TO_LE32(act);
3778
3779         /* call the fill switch rule to fill the lookup Tx Rx structure */
3780         ice_fill_sw_rule(hw, &m_ent->fltr_info, rx_tx,
3781                          ice_aqc_opc_update_sw_rules);
3782
3783         /* Update the action to point to the large action ID */
3784         rx_tx->pdata.lkup_tx_rx.act =
3785                 CPU_TO_LE32(ICE_SINGLE_ACT_PTR |
3786                             ((l_id << ICE_SINGLE_ACT_PTR_VAL_S) &
3787                              ICE_SINGLE_ACT_PTR_VAL_M));
3788
3789         /* Use the filter rule ID of the previously created rule with single
3790          * act. Once the update happens, hardware will treat this as large
3791          * action
3792          */
3793         rx_tx->pdata.lkup_tx_rx.index =
3794                 CPU_TO_LE16(m_ent->fltr_info.fltr_rule_id);
3795
3796         status = ice_aq_sw_rules(hw, lg_act, rules_size, 2,
3797                                  ice_aqc_opc_update_sw_rules, NULL);
3798         if (!status) {
3799                 m_ent->lg_act_idx = l_id;
3800                 m_ent->sw_marker_id = sw_marker;
3801         }
3802
3803         ice_free(hw, lg_act);
3804         return status;
3805 }
3806
3807 /**
3808  * ice_add_counter_act - add/update filter rule with counter action
3809  * @hw: pointer to the hardware structure
3810  * @m_ent: the management entry for which counter needs to be added
3811  * @counter_id: VLAN counter ID returned as part of allocate resource
3812  * @l_id: large action resource ID
3813  */
3814 static enum ice_status
3815 ice_add_counter_act(struct ice_hw *hw, struct ice_fltr_mgmt_list_entry *m_ent,
3816                     u16 counter_id, u16 l_id)
3817 {
3818         struct ice_aqc_sw_rules_elem *lg_act;
3819         struct ice_aqc_sw_rules_elem *rx_tx;
3820         enum ice_status status;
3821         /* 2 actions will be added while adding a large action counter */
3822         const int num_acts = 2;
3823         u16 lg_act_size;
3824         u16 rules_size;
3825         u16 f_rule_id;
3826         u32 act;
3827         u16 id;
3828
3829         if (m_ent->fltr_info.lkup_type != ICE_SW_LKUP_MAC)
3830                 return ICE_ERR_PARAM;
3831
3832         /* Create two back-to-back switch rules and submit them to the HW using
3833          * one memory buffer:
3834          * 1. Large Action
3835          * 2. Look up Tx Rx
3836          */
3837         lg_act_size = (u16)ICE_SW_RULE_LG_ACT_SIZE(num_acts);
3838         rules_size = lg_act_size + ICE_SW_RULE_RX_TX_ETH_HDR_SIZE;
3839         lg_act = (struct ice_aqc_sw_rules_elem *)ice_malloc(hw, rules_size);
3840         if (!lg_act)
3841                 return ICE_ERR_NO_MEMORY;
3842
3843         rx_tx = (struct ice_aqc_sw_rules_elem *)((u8 *)lg_act + lg_act_size);
3844
3845         /* Fill in the first switch rule i.e. large action */
3846         lg_act->type = CPU_TO_LE16(ICE_AQC_SW_RULES_T_LG_ACT);
3847         lg_act->pdata.lg_act.index = CPU_TO_LE16(l_id);
3848         lg_act->pdata.lg_act.size = CPU_TO_LE16(num_acts);
3849
3850         /* First action VSI forwarding or VSI list forwarding depending on how
3851          * many VSIs
3852          */
3853         id = (m_ent->vsi_count > 1) ?  m_ent->fltr_info.fwd_id.vsi_list_id :
3854                 m_ent->fltr_info.fwd_id.hw_vsi_id;
3855
3856         act = ICE_LG_ACT_VSI_FORWARDING | ICE_LG_ACT_VALID_BIT;
3857         act |= (id << ICE_LG_ACT_VSI_LIST_ID_S) &
3858                 ICE_LG_ACT_VSI_LIST_ID_M;
3859         if (m_ent->vsi_count > 1)
3860                 act |= ICE_LG_ACT_VSI_LIST;
3861         lg_act->pdata.lg_act.act[0] = CPU_TO_LE32(act);
3862
3863         /* Second action counter ID */
3864         act = ICE_LG_ACT_STAT_COUNT;
3865         act |= (counter_id << ICE_LG_ACT_STAT_COUNT_S) &
3866                 ICE_LG_ACT_STAT_COUNT_M;
3867         lg_act->pdata.lg_act.act[1] = CPU_TO_LE32(act);
3868
3869         /* call the fill switch rule to fill the lookup Tx Rx structure */
3870         ice_fill_sw_rule(hw, &m_ent->fltr_info, rx_tx,
3871                          ice_aqc_opc_update_sw_rules);
3872
3873         act = ICE_SINGLE_ACT_PTR;
3874         act |= (l_id << ICE_SINGLE_ACT_PTR_VAL_S) & ICE_SINGLE_ACT_PTR_VAL_M;
3875         rx_tx->pdata.lkup_tx_rx.act = CPU_TO_LE32(act);
3876
3877         /* Use the filter rule ID of the previously created rule with single
3878          * act. Once the update happens, hardware will treat this as large
3879          * action
3880          */
3881         f_rule_id = m_ent->fltr_info.fltr_rule_id;
3882         rx_tx->pdata.lkup_tx_rx.index = CPU_TO_LE16(f_rule_id);
3883
3884         status = ice_aq_sw_rules(hw, lg_act, rules_size, 2,
3885                                  ice_aqc_opc_update_sw_rules, NULL);
3886         if (!status) {
3887                 m_ent->lg_act_idx = l_id;
3888                 m_ent->counter_index = counter_id;
3889         }
3890
3891         ice_free(hw, lg_act);
3892         return status;
3893 }
3894
3895 /**
3896  * ice_create_vsi_list_map
3897  * @hw: pointer to the hardware structure
3898  * @vsi_handle_arr: array of VSI handles to set in the VSI mapping
3899  * @num_vsi: number of VSI handles in the array
3900  * @vsi_list_id: VSI list ID generated as part of allocate resource
3901  *
3902  * Helper function to create a new entry of VSI list ID to VSI mapping
3903  * using the given VSI list ID
3904  */
3905 static struct ice_vsi_list_map_info *
3906 ice_create_vsi_list_map(struct ice_hw *hw, u16 *vsi_handle_arr, u16 num_vsi,
3907                         u16 vsi_list_id)
3908 {
3909         struct ice_switch_info *sw = hw->switch_info;
3910         struct ice_vsi_list_map_info *v_map;
3911         int i;
3912
3913         v_map = (struct ice_vsi_list_map_info *)ice_malloc(hw, sizeof(*v_map));
3914         if (!v_map)
3915                 return NULL;
3916
3917         v_map->vsi_list_id = vsi_list_id;
3918         v_map->ref_cnt = 1;
3919         for (i = 0; i < num_vsi; i++)
3920                 ice_set_bit(vsi_handle_arr[i], v_map->vsi_map);
3921
3922         LIST_ADD(&v_map->list_entry, &sw->vsi_list_map_head);
3923         return v_map;
3924 }
3925
3926 /**
3927  * ice_update_vsi_list_rule
3928  * @hw: pointer to the hardware structure
3929  * @vsi_handle_arr: array of VSI handles to form a VSI list
3930  * @num_vsi: number of VSI handles in the array
3931  * @vsi_list_id: VSI list ID generated as part of allocate resource
3932  * @remove: Boolean value to indicate if this is a remove action
3933  * @opc: switch rules population command type - pass in the command opcode
3934  * @lkup_type: lookup type of the filter
3935  *
3936  * Call AQ command to add a new switch rule or update existing switch rule
3937  * using the given VSI list ID
3938  */
3939 static enum ice_status
3940 ice_update_vsi_list_rule(struct ice_hw *hw, u16 *vsi_handle_arr, u16 num_vsi,
3941                          u16 vsi_list_id, bool remove, enum ice_adminq_opc opc,
3942                          enum ice_sw_lkup_type lkup_type)
3943 {
3944         struct ice_aqc_sw_rules_elem *s_rule;
3945         enum ice_status status;
3946         u16 s_rule_size;
3947         u16 rule_type;
3948         int i;
3949
3950         if (!num_vsi)
3951                 return ICE_ERR_PARAM;
3952
3953         if (lkup_type == ICE_SW_LKUP_MAC ||
3954             lkup_type == ICE_SW_LKUP_MAC_VLAN ||
3955             lkup_type == ICE_SW_LKUP_ETHERTYPE ||
3956             lkup_type == ICE_SW_LKUP_ETHERTYPE_MAC ||
3957             lkup_type == ICE_SW_LKUP_PROMISC ||
3958             lkup_type == ICE_SW_LKUP_PROMISC_VLAN ||
3959             lkup_type == ICE_SW_LKUP_LAST)
3960                 rule_type = remove ? ICE_AQC_SW_RULES_T_VSI_LIST_CLEAR :
3961                         ICE_AQC_SW_RULES_T_VSI_LIST_SET;
3962         else if (lkup_type == ICE_SW_LKUP_VLAN)
3963                 rule_type = remove ? ICE_AQC_SW_RULES_T_PRUNE_LIST_CLEAR :
3964                         ICE_AQC_SW_RULES_T_PRUNE_LIST_SET;
3965         else
3966                 return ICE_ERR_PARAM;
3967
3968         s_rule_size = (u16)ICE_SW_RULE_VSI_LIST_SIZE(num_vsi);
3969         s_rule = (struct ice_aqc_sw_rules_elem *)ice_malloc(hw, s_rule_size);
3970         if (!s_rule)
3971                 return ICE_ERR_NO_MEMORY;
3972         for (i = 0; i < num_vsi; i++) {
3973                 if (!ice_is_vsi_valid(hw, vsi_handle_arr[i])) {
3974                         status = ICE_ERR_PARAM;
3975                         goto exit;
3976                 }
3977                 /* AQ call requires hw_vsi_id(s) */
3978                 s_rule->pdata.vsi_list.vsi[i] =
3979                         CPU_TO_LE16(ice_get_hw_vsi_num(hw, vsi_handle_arr[i]));
3980         }
3981
3982         s_rule->type = CPU_TO_LE16(rule_type);
3983         s_rule->pdata.vsi_list.number_vsi = CPU_TO_LE16(num_vsi);
3984         s_rule->pdata.vsi_list.index = CPU_TO_LE16(vsi_list_id);
3985
3986         status = ice_aq_sw_rules(hw, s_rule, s_rule_size, 1, opc, NULL);
3987
3988 exit:
3989         ice_free(hw, s_rule);
3990         return status;
3991 }
3992
3993 /**
3994  * ice_create_vsi_list_rule - Creates and populates a VSI list rule
3995  * @hw: pointer to the HW struct
3996  * @vsi_handle_arr: array of VSI handles to form a VSI list
3997  * @num_vsi: number of VSI handles in the array
3998  * @vsi_list_id: stores the ID of the VSI list to be created
3999  * @lkup_type: switch rule filter's lookup type
4000  */
4001 static enum ice_status
4002 ice_create_vsi_list_rule(struct ice_hw *hw, u16 *vsi_handle_arr, u16 num_vsi,
4003                          u16 *vsi_list_id, enum ice_sw_lkup_type lkup_type)
4004 {
4005         enum ice_status status;
4006
4007         status = ice_aq_alloc_free_vsi_list(hw, vsi_list_id, lkup_type,
4008                                             ice_aqc_opc_alloc_res);
4009         if (status)
4010                 return status;
4011
4012         /* Update the newly created VSI list to include the specified VSIs */
4013         return ice_update_vsi_list_rule(hw, vsi_handle_arr, num_vsi,
4014                                         *vsi_list_id, false,
4015                                         ice_aqc_opc_add_sw_rules, lkup_type);
4016 }
4017
4018 /**
4019  * ice_create_pkt_fwd_rule
4020  * @hw: pointer to the hardware structure
4021  * @recp_list: corresponding filter management list
4022  * @f_entry: entry containing packet forwarding information
4023  *
4024  * Create switch rule with given filter information and add an entry
4025  * to the corresponding filter management list to track this switch rule
4026  * and VSI mapping
4027  */
4028 static enum ice_status
4029 ice_create_pkt_fwd_rule(struct ice_hw *hw, struct ice_sw_recipe *recp_list,
4030                         struct ice_fltr_list_entry *f_entry)
4031 {
4032         struct ice_fltr_mgmt_list_entry *fm_entry;
4033         struct ice_aqc_sw_rules_elem *s_rule;
4034         enum ice_status status;
4035
4036         s_rule = (struct ice_aqc_sw_rules_elem *)
4037                 ice_malloc(hw, ICE_SW_RULE_RX_TX_ETH_HDR_SIZE);
4038         if (!s_rule)
4039                 return ICE_ERR_NO_MEMORY;
4040         fm_entry = (struct ice_fltr_mgmt_list_entry *)
4041                    ice_malloc(hw, sizeof(*fm_entry));
4042         if (!fm_entry) {
4043                 status = ICE_ERR_NO_MEMORY;
4044                 goto ice_create_pkt_fwd_rule_exit;
4045         }
4046
4047         fm_entry->fltr_info = f_entry->fltr_info;
4048
4049         /* Initialize all the fields for the management entry */
4050         fm_entry->vsi_count = 1;
4051         fm_entry->lg_act_idx = ICE_INVAL_LG_ACT_INDEX;
4052         fm_entry->sw_marker_id = ICE_INVAL_SW_MARKER_ID;
4053         fm_entry->counter_index = ICE_INVAL_COUNTER_ID;
4054
4055         ice_fill_sw_rule(hw, &fm_entry->fltr_info, s_rule,
4056                          ice_aqc_opc_add_sw_rules);
4057
4058         status = ice_aq_sw_rules(hw, s_rule, ICE_SW_RULE_RX_TX_ETH_HDR_SIZE, 1,
4059                                  ice_aqc_opc_add_sw_rules, NULL);
4060         if (status) {
4061                 ice_free(hw, fm_entry);
4062                 goto ice_create_pkt_fwd_rule_exit;
4063         }
4064
4065         f_entry->fltr_info.fltr_rule_id =
4066                 LE16_TO_CPU(s_rule->pdata.lkup_tx_rx.index);
4067         fm_entry->fltr_info.fltr_rule_id =
4068                 LE16_TO_CPU(s_rule->pdata.lkup_tx_rx.index);
4069
4070         /* The book keeping entries will get removed when base driver
4071          * calls remove filter AQ command
4072          */
4073         LIST_ADD(&fm_entry->list_entry, &recp_list->filt_rules);
4074
4075 ice_create_pkt_fwd_rule_exit:
4076         ice_free(hw, s_rule);
4077         return status;
4078 }
4079
4080 /**
4081  * ice_update_pkt_fwd_rule
4082  * @hw: pointer to the hardware structure
4083  * @f_info: filter information for switch rule
4084  *
4085  * Call AQ command to update a previously created switch rule with a
4086  * VSI list ID
4087  */
4088 static enum ice_status
4089 ice_update_pkt_fwd_rule(struct ice_hw *hw, struct ice_fltr_info *f_info)
4090 {
4091         struct ice_aqc_sw_rules_elem *s_rule;
4092         enum ice_status status;
4093
4094         s_rule = (struct ice_aqc_sw_rules_elem *)
4095                 ice_malloc(hw, ICE_SW_RULE_RX_TX_ETH_HDR_SIZE);
4096         if (!s_rule)
4097                 return ICE_ERR_NO_MEMORY;
4098
4099         ice_fill_sw_rule(hw, f_info, s_rule, ice_aqc_opc_update_sw_rules);
4100
4101         s_rule->pdata.lkup_tx_rx.index = CPU_TO_LE16(f_info->fltr_rule_id);
4102
4103         /* Update switch rule with new rule set to forward VSI list */
4104         status = ice_aq_sw_rules(hw, s_rule, ICE_SW_RULE_RX_TX_ETH_HDR_SIZE, 1,
4105                                  ice_aqc_opc_update_sw_rules, NULL);
4106
4107         ice_free(hw, s_rule);
4108         return status;
4109 }
4110
4111 /**
4112  * ice_update_sw_rule_bridge_mode
4113  * @hw: pointer to the HW struct
4114  *
4115  * Updates unicast switch filter rules based on VEB/VEPA mode
4116  */
4117 enum ice_status ice_update_sw_rule_bridge_mode(struct ice_hw *hw)
4118 {
4119         struct ice_switch_info *sw = hw->switch_info;
4120         struct ice_fltr_mgmt_list_entry *fm_entry;
4121         enum ice_status status = ICE_SUCCESS;
4122         struct LIST_HEAD_TYPE *rule_head;
4123         struct ice_lock *rule_lock; /* Lock to protect filter rule list */
4124
4125         rule_lock = &sw->recp_list[ICE_SW_LKUP_MAC].filt_rule_lock;
4126         rule_head = &sw->recp_list[ICE_SW_LKUP_MAC].filt_rules;
4127
4128         ice_acquire_lock(rule_lock);
4129         LIST_FOR_EACH_ENTRY(fm_entry, rule_head, ice_fltr_mgmt_list_entry,
4130                             list_entry) {
4131                 struct ice_fltr_info *fi = &fm_entry->fltr_info;
4132                 u8 *addr = fi->l_data.mac.mac_addr;
4133
4134                 /* Update unicast Tx rules to reflect the selected
4135                  * VEB/VEPA mode
4136                  */
4137                 if ((fi->flag & ICE_FLTR_TX) && IS_UNICAST_ETHER_ADDR(addr) &&
4138                     (fi->fltr_act == ICE_FWD_TO_VSI ||
4139                      fi->fltr_act == ICE_FWD_TO_VSI_LIST ||
4140                      fi->fltr_act == ICE_FWD_TO_Q ||
4141                      fi->fltr_act == ICE_FWD_TO_QGRP)) {
4142                         status = ice_update_pkt_fwd_rule(hw, fi);
4143                         if (status)
4144                                 break;
4145                 }
4146         }
4147
4148         ice_release_lock(rule_lock);
4149
4150         return status;
4151 }
4152
4153 /**
4154  * ice_add_update_vsi_list
4155  * @hw: pointer to the hardware structure
4156  * @m_entry: pointer to current filter management list entry
4157  * @cur_fltr: filter information from the book keeping entry
4158  * @new_fltr: filter information with the new VSI to be added
4159  *
4160  * Call AQ command to add or update previously created VSI list with new VSI.
4161  *
4162  * Helper function to do book keeping associated with adding filter information
4163  * The algorithm to do the book keeping is described below :
4164  * When a VSI needs to subscribe to a given filter (MAC/VLAN/Ethtype etc.)
4165  *      if only one VSI has been added till now
4166  *              Allocate a new VSI list and add two VSIs
4167  *              to this list using switch rule command
4168  *              Update the previously created switch rule with the
4169  *              newly created VSI list ID
4170  *      if a VSI list was previously created
4171  *              Add the new VSI to the previously created VSI list set
4172  *              using the update switch rule command
4173  */
4174 static enum ice_status
4175 ice_add_update_vsi_list(struct ice_hw *hw,
4176                         struct ice_fltr_mgmt_list_entry *m_entry,
4177                         struct ice_fltr_info *cur_fltr,
4178                         struct ice_fltr_info *new_fltr)
4179 {
4180         enum ice_status status = ICE_SUCCESS;
4181         u16 vsi_list_id = 0;
4182
4183         if ((cur_fltr->fltr_act == ICE_FWD_TO_Q ||
4184              cur_fltr->fltr_act == ICE_FWD_TO_QGRP))
4185                 return ICE_ERR_NOT_IMPL;
4186
4187         if ((new_fltr->fltr_act == ICE_FWD_TO_Q ||
4188              new_fltr->fltr_act == ICE_FWD_TO_QGRP) &&
4189             (cur_fltr->fltr_act == ICE_FWD_TO_VSI ||
4190              cur_fltr->fltr_act == ICE_FWD_TO_VSI_LIST))
4191                 return ICE_ERR_NOT_IMPL;
4192
4193         if (m_entry->vsi_count < 2 && !m_entry->vsi_list_info) {
4194                 /* Only one entry existed in the mapping and it was not already
4195                  * a part of a VSI list. So, create a VSI list with the old and
4196                  * new VSIs.
4197                  */
4198                 struct ice_fltr_info tmp_fltr;
4199                 u16 vsi_handle_arr[2];
4200
4201                 /* A rule already exists with the new VSI being added */
4202                 if (cur_fltr->fwd_id.hw_vsi_id == new_fltr->fwd_id.hw_vsi_id)
4203                         return ICE_ERR_ALREADY_EXISTS;
4204
4205                 vsi_handle_arr[0] = cur_fltr->vsi_handle;
4206                 vsi_handle_arr[1] = new_fltr->vsi_handle;
4207                 status = ice_create_vsi_list_rule(hw, &vsi_handle_arr[0], 2,
4208                                                   &vsi_list_id,
4209                                                   new_fltr->lkup_type);
4210                 if (status)
4211                         return status;
4212
4213                 tmp_fltr = *new_fltr;
4214                 tmp_fltr.fltr_rule_id = cur_fltr->fltr_rule_id;
4215                 tmp_fltr.fltr_act = ICE_FWD_TO_VSI_LIST;
4216                 tmp_fltr.fwd_id.vsi_list_id = vsi_list_id;
4217                 /* Update the previous switch rule of "MAC forward to VSI" to
4218                  * "MAC fwd to VSI list"
4219                  */
4220                 status = ice_update_pkt_fwd_rule(hw, &tmp_fltr);
4221                 if (status)
4222                         return status;
4223
4224                 cur_fltr->fwd_id.vsi_list_id = vsi_list_id;
4225                 cur_fltr->fltr_act = ICE_FWD_TO_VSI_LIST;
4226                 m_entry->vsi_list_info =
4227                         ice_create_vsi_list_map(hw, &vsi_handle_arr[0], 2,
4228                                                 vsi_list_id);
4229
4230                 if (!m_entry->vsi_list_info)
4231                         return ICE_ERR_NO_MEMORY;
4232
4233                 /* If this entry was large action then the large action needs
4234                  * to be updated to point to FWD to VSI list
4235                  */
4236                 if (m_entry->sw_marker_id != ICE_INVAL_SW_MARKER_ID)
4237                         status =
4238                             ice_add_marker_act(hw, m_entry,
4239                                                m_entry->sw_marker_id,
4240                                                m_entry->lg_act_idx);
4241         } else {
4242                 u16 vsi_handle = new_fltr->vsi_handle;
4243                 enum ice_adminq_opc opcode;
4244
4245                 if (!m_entry->vsi_list_info)
4246                         return ICE_ERR_CFG;
4247
4248                 /* A rule already exists with the new VSI being added */
4249                 if (ice_is_bit_set(m_entry->vsi_list_info->vsi_map, vsi_handle))
4250                         return ICE_SUCCESS;
4251
4252                 /* Update the previously created VSI list set with
4253                  * the new VSI ID passed in
4254                  */
4255                 vsi_list_id = cur_fltr->fwd_id.vsi_list_id;
4256                 opcode = ice_aqc_opc_update_sw_rules;
4257
4258                 status = ice_update_vsi_list_rule(hw, &vsi_handle, 1,
4259                                                   vsi_list_id, false, opcode,
4260                                                   new_fltr->lkup_type);
4261                 /* update VSI list mapping info with new VSI ID */
4262                 if (!status)
4263                         ice_set_bit(vsi_handle,
4264                                     m_entry->vsi_list_info->vsi_map);
4265         }
4266         if (!status)
4267                 m_entry->vsi_count++;
4268         return status;
4269 }
4270
4271 /**
4272  * ice_find_rule_entry - Search a rule entry
4273  * @list_head: head of rule list
4274  * @f_info: rule information
4275  *
4276  * Helper function to search for a given rule entry
4277  * Returns pointer to entry storing the rule if found
4278  */
4279 static struct ice_fltr_mgmt_list_entry *
4280 ice_find_rule_entry(struct LIST_HEAD_TYPE *list_head,
4281                     struct ice_fltr_info *f_info)
4282 {
4283         struct ice_fltr_mgmt_list_entry *list_itr, *ret = NULL;
4284
4285         LIST_FOR_EACH_ENTRY(list_itr, list_head, ice_fltr_mgmt_list_entry,
4286                             list_entry) {
4287                 if (!memcmp(&f_info->l_data, &list_itr->fltr_info.l_data,
4288                             sizeof(f_info->l_data)) &&
4289                     f_info->flag == list_itr->fltr_info.flag) {
4290                         ret = list_itr;
4291                         break;
4292                 }
4293         }
4294         return ret;
4295 }
4296
4297 /**
4298  * ice_find_vsi_list_entry - Search VSI list map with VSI count 1
4299  * @recp_list: VSI lists needs to be searched
4300  * @vsi_handle: VSI handle to be found in VSI list
4301  * @vsi_list_id: VSI list ID found containing vsi_handle
4302  *
4303  * Helper function to search a VSI list with single entry containing given VSI
4304  * handle element. This can be extended further to search VSI list with more
4305  * than 1 vsi_count. Returns pointer to VSI list entry if found.
4306  */
4307 static struct ice_vsi_list_map_info *
4308 ice_find_vsi_list_entry(struct ice_sw_recipe *recp_list, u16 vsi_handle,
4309                         u16 *vsi_list_id)
4310 {
4311         struct ice_vsi_list_map_info *map_info = NULL;
4312         struct LIST_HEAD_TYPE *list_head;
4313
4314         list_head = &recp_list->filt_rules;
4315         if (recp_list->adv_rule) {
4316                 struct ice_adv_fltr_mgmt_list_entry *list_itr;
4317
4318                 LIST_FOR_EACH_ENTRY(list_itr, list_head,
4319                                     ice_adv_fltr_mgmt_list_entry,
4320                                     list_entry) {
4321                         if (list_itr->vsi_list_info) {
4322                                 map_info = list_itr->vsi_list_info;
4323                                 if (ice_is_bit_set(map_info->vsi_map,
4324                                                    vsi_handle)) {
4325                                         *vsi_list_id = map_info->vsi_list_id;
4326                                         return map_info;
4327                                 }
4328                         }
4329                 }
4330         } else {
4331                 struct ice_fltr_mgmt_list_entry *list_itr;
4332
4333                 LIST_FOR_EACH_ENTRY(list_itr, list_head,
4334                                     ice_fltr_mgmt_list_entry,
4335                                     list_entry) {
4336                         if (list_itr->vsi_count == 1 &&
4337                             list_itr->vsi_list_info) {
4338                                 map_info = list_itr->vsi_list_info;
4339                                 if (ice_is_bit_set(map_info->vsi_map,
4340                                                    vsi_handle)) {
4341                                         *vsi_list_id = map_info->vsi_list_id;
4342                                         return map_info;
4343                                 }
4344                         }
4345                 }
4346         }
4347         return NULL;
4348 }
4349
4350 /**
4351  * ice_add_rule_internal - add rule for a given lookup type
4352  * @hw: pointer to the hardware structure
4353  * @recp_list: recipe list for which rule has to be added
4354  * @lport: logic port number on which function add rule
4355  * @f_entry: structure containing MAC forwarding information
4356  *
4357  * Adds or updates the rule lists for a given recipe
4358  */
4359 static enum ice_status
4360 ice_add_rule_internal(struct ice_hw *hw, struct ice_sw_recipe *recp_list,
4361                       u8 lport, struct ice_fltr_list_entry *f_entry)
4362 {
4363         struct ice_fltr_info *new_fltr, *cur_fltr;
4364         struct ice_fltr_mgmt_list_entry *m_entry;
4365         struct ice_lock *rule_lock; /* Lock to protect filter rule list */
4366         enum ice_status status = ICE_SUCCESS;
4367
4368         if (!ice_is_vsi_valid(hw, f_entry->fltr_info.vsi_handle))
4369                 return ICE_ERR_PARAM;
4370
4371         /* Load the hw_vsi_id only if the fwd action is fwd to VSI */
4372         if (f_entry->fltr_info.fltr_act == ICE_FWD_TO_VSI)
4373                 f_entry->fltr_info.fwd_id.hw_vsi_id =
4374                         ice_get_hw_vsi_num(hw, f_entry->fltr_info.vsi_handle);
4375
4376         rule_lock = &recp_list->filt_rule_lock;
4377
4378         ice_acquire_lock(rule_lock);
4379         new_fltr = &f_entry->fltr_info;
4380         if (new_fltr->flag & ICE_FLTR_RX)
4381                 new_fltr->src = lport;
4382         else if (new_fltr->flag & ICE_FLTR_TX)
4383                 new_fltr->src =
4384                         ice_get_hw_vsi_num(hw, f_entry->fltr_info.vsi_handle);
4385
4386         m_entry = ice_find_rule_entry(&recp_list->filt_rules, new_fltr);
4387         if (!m_entry) {
4388                 status = ice_create_pkt_fwd_rule(hw, recp_list, f_entry);
4389                 goto exit_add_rule_internal;
4390         }
4391
4392         cur_fltr = &m_entry->fltr_info;
4393         status = ice_add_update_vsi_list(hw, m_entry, cur_fltr, new_fltr);
4394
4395 exit_add_rule_internal:
4396         ice_release_lock(rule_lock);
4397         return status;
4398 }
4399
4400 /**
4401  * ice_remove_vsi_list_rule
4402  * @hw: pointer to the hardware structure
4403  * @vsi_list_id: VSI list ID generated as part of allocate resource
4404  * @lkup_type: switch rule filter lookup type
4405  *
4406  * The VSI list should be emptied before this function is called to remove the
4407  * VSI list.
4408  */
4409 static enum ice_status
4410 ice_remove_vsi_list_rule(struct ice_hw *hw, u16 vsi_list_id,
4411                          enum ice_sw_lkup_type lkup_type)
4412 {
4413         /* Free the vsi_list resource that we allocated. It is assumed that the
4414          * list is empty at this point.
4415          */
4416         return ice_aq_alloc_free_vsi_list(hw, &vsi_list_id, lkup_type,
4417                                             ice_aqc_opc_free_res);
4418 }
4419
4420 /**
4421  * ice_rem_update_vsi_list
4422  * @hw: pointer to the hardware structure
4423  * @vsi_handle: VSI handle of the VSI to remove
4424  * @fm_list: filter management entry for which the VSI list management needs to
4425  *           be done
4426  */
4427 static enum ice_status
4428 ice_rem_update_vsi_list(struct ice_hw *hw, u16 vsi_handle,
4429                         struct ice_fltr_mgmt_list_entry *fm_list)
4430 {
4431         enum ice_sw_lkup_type lkup_type;
4432         enum ice_status status = ICE_SUCCESS;
4433         u16 vsi_list_id;
4434
4435         if (fm_list->fltr_info.fltr_act != ICE_FWD_TO_VSI_LIST ||
4436             fm_list->vsi_count == 0)
4437                 return ICE_ERR_PARAM;
4438
4439         /* A rule with the VSI being removed does not exist */
4440         if (!ice_is_bit_set(fm_list->vsi_list_info->vsi_map, vsi_handle))
4441                 return ICE_ERR_DOES_NOT_EXIST;
4442
4443         lkup_type = fm_list->fltr_info.lkup_type;
4444         vsi_list_id = fm_list->fltr_info.fwd_id.vsi_list_id;
4445         status = ice_update_vsi_list_rule(hw, &vsi_handle, 1, vsi_list_id, true,
4446                                           ice_aqc_opc_update_sw_rules,
4447                                           lkup_type);
4448         if (status)
4449                 return status;
4450
4451         fm_list->vsi_count--;
4452         ice_clear_bit(vsi_handle, fm_list->vsi_list_info->vsi_map);
4453
4454         if (fm_list->vsi_count == 1 && lkup_type != ICE_SW_LKUP_VLAN) {
4455                 struct ice_fltr_info tmp_fltr_info = fm_list->fltr_info;
4456                 struct ice_vsi_list_map_info *vsi_list_info =
4457                         fm_list->vsi_list_info;
4458                 u16 rem_vsi_handle;
4459
4460                 rem_vsi_handle = ice_find_first_bit(vsi_list_info->vsi_map,
4461                                                     ICE_MAX_VSI);
4462                 if (!ice_is_vsi_valid(hw, rem_vsi_handle))
4463                         return ICE_ERR_OUT_OF_RANGE;
4464
4465                 /* Make sure VSI list is empty before removing it below */
4466                 status = ice_update_vsi_list_rule(hw, &rem_vsi_handle, 1,
4467                                                   vsi_list_id, true,
4468                                                   ice_aqc_opc_update_sw_rules,
4469                                                   lkup_type);
4470                 if (status)
4471                         return status;
4472
4473                 tmp_fltr_info.fltr_act = ICE_FWD_TO_VSI;
4474                 tmp_fltr_info.fwd_id.hw_vsi_id =
4475                         ice_get_hw_vsi_num(hw, rem_vsi_handle);
4476                 tmp_fltr_info.vsi_handle = rem_vsi_handle;
4477                 status = ice_update_pkt_fwd_rule(hw, &tmp_fltr_info);
4478                 if (status) {
4479                         ice_debug(hw, ICE_DBG_SW, "Failed to update pkt fwd rule to FWD_TO_VSI on HW VSI %d, error %d\n",
4480                                   tmp_fltr_info.fwd_id.hw_vsi_id, status);
4481                         return status;
4482                 }
4483
4484                 fm_list->fltr_info = tmp_fltr_info;
4485         }
4486
4487         if ((fm_list->vsi_count == 1 && lkup_type != ICE_SW_LKUP_VLAN) ||
4488             (fm_list->vsi_count == 0 && lkup_type == ICE_SW_LKUP_VLAN)) {
4489                 struct ice_vsi_list_map_info *vsi_list_info =
4490                         fm_list->vsi_list_info;
4491
4492                 /* Remove the VSI list since it is no longer used */
4493                 status = ice_remove_vsi_list_rule(hw, vsi_list_id, lkup_type);
4494                 if (status) {
4495                         ice_debug(hw, ICE_DBG_SW, "Failed to remove VSI list %d, error %d\n",
4496                                   vsi_list_id, status);
4497                         return status;
4498                 }
4499
4500                 LIST_DEL(&vsi_list_info->list_entry);
4501                 ice_free(hw, vsi_list_info);
4502                 fm_list->vsi_list_info = NULL;
4503         }
4504
4505         return status;
4506 }
4507
4508 /**
4509  * ice_remove_rule_internal - Remove a filter rule of a given type
4510  *
4511  * @hw: pointer to the hardware structure
4512  * @recp_list: recipe list for which the rule needs to removed
4513  * @f_entry: rule entry containing filter information
4514  */
4515 static enum ice_status
4516 ice_remove_rule_internal(struct ice_hw *hw, struct ice_sw_recipe *recp_list,
4517                          struct ice_fltr_list_entry *f_entry)
4518 {
4519         struct ice_fltr_mgmt_list_entry *list_elem;
4520         struct ice_lock *rule_lock; /* Lock to protect filter rule list */
4521         enum ice_status status = ICE_SUCCESS;
4522         bool remove_rule = false;
4523         u16 vsi_handle;
4524
4525         if (!ice_is_vsi_valid(hw, f_entry->fltr_info.vsi_handle))
4526                 return ICE_ERR_PARAM;
4527         f_entry->fltr_info.fwd_id.hw_vsi_id =
4528                 ice_get_hw_vsi_num(hw, f_entry->fltr_info.vsi_handle);
4529
4530         rule_lock = &recp_list->filt_rule_lock;
4531         ice_acquire_lock(rule_lock);
4532         list_elem = ice_find_rule_entry(&recp_list->filt_rules,
4533                                         &f_entry->fltr_info);
4534         if (!list_elem) {
4535                 status = ICE_ERR_DOES_NOT_EXIST;
4536                 goto exit;
4537         }
4538
4539         if (list_elem->fltr_info.fltr_act != ICE_FWD_TO_VSI_LIST) {
4540                 remove_rule = true;
4541         } else if (!list_elem->vsi_list_info) {
4542                 status = ICE_ERR_DOES_NOT_EXIST;
4543                 goto exit;
4544         } else if (list_elem->vsi_list_info->ref_cnt > 1) {
4545                 /* a ref_cnt > 1 indicates that the vsi_list is being
4546                  * shared by multiple rules. Decrement the ref_cnt and
4547                  * remove this rule, but do not modify the list, as it
4548                  * is in-use by other rules.
4549                  */
4550                 list_elem->vsi_list_info->ref_cnt--;
4551                 remove_rule = true;
4552         } else {
4553                 /* a ref_cnt of 1 indicates the vsi_list is only used
4554                  * by one rule. However, the original removal request is only
4555                  * for a single VSI. Update the vsi_list first, and only
4556                  * remove the rule if there are no further VSIs in this list.
4557                  */
4558                 vsi_handle = f_entry->fltr_info.vsi_handle;
4559                 status = ice_rem_update_vsi_list(hw, vsi_handle, list_elem);
4560                 if (status)
4561                         goto exit;
4562                 /* if VSI count goes to zero after updating the VSI list */
4563                 if (list_elem->vsi_count == 0)
4564                         remove_rule = true;
4565         }
4566
4567         if (remove_rule) {
4568                 /* Remove the lookup rule */
4569                 struct ice_aqc_sw_rules_elem *s_rule;
4570
4571                 s_rule = (struct ice_aqc_sw_rules_elem *)
4572                         ice_malloc(hw, ICE_SW_RULE_RX_TX_NO_HDR_SIZE);
4573                 if (!s_rule) {
4574                         status = ICE_ERR_NO_MEMORY;
4575                         goto exit;
4576                 }
4577
4578                 ice_fill_sw_rule(hw, &list_elem->fltr_info, s_rule,
4579                                  ice_aqc_opc_remove_sw_rules);
4580
4581                 status = ice_aq_sw_rules(hw, s_rule,
4582                                          ICE_SW_RULE_RX_TX_NO_HDR_SIZE, 1,
4583                                          ice_aqc_opc_remove_sw_rules, NULL);
4584
4585                 /* Remove a book keeping from the list */
4586                 ice_free(hw, s_rule);
4587
4588                 if (status)
4589                         goto exit;
4590
4591                 LIST_DEL(&list_elem->list_entry);
4592                 ice_free(hw, list_elem);
4593         }
4594 exit:
4595         ice_release_lock(rule_lock);
4596         return status;
4597 }
4598
4599 /**
4600  * ice_aq_get_res_alloc - get allocated resources
4601  * @hw: pointer to the HW struct
4602  * @num_entries: pointer to u16 to store the number of resource entries returned
4603  * @buf: pointer to buffer
4604  * @buf_size: size of buf
4605  * @cd: pointer to command details structure or NULL
4606  *
4607  * The caller-supplied buffer must be large enough to store the resource
4608  * information for all resource types. Each resource type is an
4609  * ice_aqc_get_res_resp_elem structure.
4610  */
4611 enum ice_status
4612 ice_aq_get_res_alloc(struct ice_hw *hw, u16 *num_entries,
4613                      struct ice_aqc_get_res_resp_elem *buf, u16 buf_size,
4614                      struct ice_sq_cd *cd)
4615 {
4616         struct ice_aqc_get_res_alloc *resp;
4617         enum ice_status status;
4618         struct ice_aq_desc desc;
4619
4620         if (!buf)
4621                 return ICE_ERR_BAD_PTR;
4622
4623         if (buf_size < ICE_AQ_GET_RES_ALLOC_BUF_LEN)
4624                 return ICE_ERR_INVAL_SIZE;
4625
4626         resp = &desc.params.get_res;
4627
4628         ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_res_alloc);
4629         status = ice_aq_send_cmd(hw, &desc, buf, buf_size, cd);
4630
4631         if (!status && num_entries)
4632                 *num_entries = LE16_TO_CPU(resp->resp_elem_num);
4633
4634         return status;
4635 }
4636
4637 /**
4638  * ice_aq_get_res_descs - get allocated resource descriptors
4639  * @hw: pointer to the hardware structure
4640  * @num_entries: number of resource entries in buffer
4641  * @buf: structure to hold response data buffer
4642  * @buf_size: size of buffer
4643  * @res_type: resource type
4644  * @res_shared: is resource shared
4645  * @desc_id: input - first desc ID to start; output - next desc ID
4646  * @cd: pointer to command details structure or NULL
4647  */
4648 enum ice_status
4649 ice_aq_get_res_descs(struct ice_hw *hw, u16 num_entries,
4650                      struct ice_aqc_res_elem *buf, u16 buf_size, u16 res_type,
4651                      bool res_shared, u16 *desc_id, struct ice_sq_cd *cd)
4652 {
4653         struct ice_aqc_get_allocd_res_desc *cmd;
4654         struct ice_aq_desc desc;
4655         enum ice_status status;
4656
4657         ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__);
4658
4659         cmd = &desc.params.get_res_desc;
4660
4661         if (!buf)
4662                 return ICE_ERR_PARAM;
4663
4664         if (buf_size != (num_entries * sizeof(*buf)))
4665                 return ICE_ERR_PARAM;
4666
4667         ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_allocd_res_desc);
4668
4669         cmd->ops.cmd.res = CPU_TO_LE16(((res_type << ICE_AQC_RES_TYPE_S) &
4670                                          ICE_AQC_RES_TYPE_M) | (res_shared ?
4671                                         ICE_AQC_RES_TYPE_FLAG_SHARED : 0));
4672         cmd->ops.cmd.first_desc = CPU_TO_LE16(*desc_id);
4673
4674         status = ice_aq_send_cmd(hw, &desc, buf, buf_size, cd);
4675         if (!status)
4676                 *desc_id = LE16_TO_CPU(cmd->ops.resp.next_desc);
4677
4678         return status;
4679 }
4680
4681 /**
4682  * ice_add_mac_rule - Add a MAC address based filter rule
4683  * @hw: pointer to the hardware structure
4684  * @m_list: list of MAC addresses and forwarding information
4685  * @sw: pointer to switch info struct for which function add rule
4686  * @lport: logic port number on which function add rule
4687  *
4688  * IMPORTANT: When the ucast_shared flag is set to false and m_list has
4689  * multiple unicast addresses, the function assumes that all the
4690  * addresses are unique in a given add_mac call. It doesn't
4691  * check for duplicates in this case, removing duplicates from a given
4692  * list should be taken care of in the caller of this function.
4693  */
4694 static enum ice_status
4695 ice_add_mac_rule(struct ice_hw *hw, struct LIST_HEAD_TYPE *m_list,
4696                  struct ice_switch_info *sw, u8 lport)
4697 {
4698         struct ice_sw_recipe *recp_list = &sw->recp_list[ICE_SW_LKUP_MAC];
4699         struct ice_aqc_sw_rules_elem *s_rule, *r_iter;
4700         struct ice_fltr_list_entry *m_list_itr;
4701         struct LIST_HEAD_TYPE *rule_head;
4702         u16 total_elem_left, s_rule_size;
4703         struct ice_lock *rule_lock; /* Lock to protect filter rule list */
4704         enum ice_status status = ICE_SUCCESS;
4705         u16 num_unicast = 0;
4706         u8 elem_sent;
4707
4708         s_rule = NULL;
4709         rule_lock = &recp_list->filt_rule_lock;
4710         rule_head = &recp_list->filt_rules;
4711
4712         LIST_FOR_EACH_ENTRY(m_list_itr, m_list, ice_fltr_list_entry,
4713                             list_entry) {
4714                 u8 *add = &m_list_itr->fltr_info.l_data.mac.mac_addr[0];
4715                 u16 vsi_handle;
4716                 u16 hw_vsi_id;
4717
4718                 m_list_itr->fltr_info.flag = ICE_FLTR_TX;
4719                 vsi_handle = m_list_itr->fltr_info.vsi_handle;
4720                 if (!ice_is_vsi_valid(hw, vsi_handle))
4721                         return ICE_ERR_PARAM;
4722                 hw_vsi_id = ice_get_hw_vsi_num(hw, vsi_handle);
4723                 m_list_itr->fltr_info.fwd_id.hw_vsi_id = hw_vsi_id;
4724                 /* update the src in case it is VSI num */
4725                 if (m_list_itr->fltr_info.src_id != ICE_SRC_ID_VSI)
4726                         return ICE_ERR_PARAM;
4727                 m_list_itr->fltr_info.src = hw_vsi_id;
4728                 if (m_list_itr->fltr_info.lkup_type != ICE_SW_LKUP_MAC ||
4729                     IS_ZERO_ETHER_ADDR(add))
4730                         return ICE_ERR_PARAM;
4731                 if (IS_UNICAST_ETHER_ADDR(add) && !hw->ucast_shared) {
4732                         /* Don't overwrite the unicast address */
4733                         ice_acquire_lock(rule_lock);
4734                         if (ice_find_rule_entry(rule_head,
4735                                                 &m_list_itr->fltr_info)) {
4736                                 ice_release_lock(rule_lock);
4737                                 return ICE_ERR_ALREADY_EXISTS;
4738                         }
4739                         ice_release_lock(rule_lock);
4740                         num_unicast++;
4741                 } else if (IS_MULTICAST_ETHER_ADDR(add) ||
4742                            (IS_UNICAST_ETHER_ADDR(add) && hw->ucast_shared)) {
4743                         m_list_itr->status =
4744                                 ice_add_rule_internal(hw, recp_list, lport,
4745                                                       m_list_itr);
4746                         if (m_list_itr->status)
4747                                 return m_list_itr->status;
4748                 }
4749         }
4750
4751         ice_acquire_lock(rule_lock);
4752         /* Exit if no suitable entries were found for adding bulk switch rule */
4753         if (!num_unicast) {
4754                 status = ICE_SUCCESS;
4755                 goto ice_add_mac_exit;
4756         }
4757
4758         /* Allocate switch rule buffer for the bulk update for unicast */
4759         s_rule_size = ICE_SW_RULE_RX_TX_ETH_HDR_SIZE;
4760         s_rule = (struct ice_aqc_sw_rules_elem *)
4761                 ice_calloc(hw, num_unicast, s_rule_size);
4762         if (!s_rule) {
4763                 status = ICE_ERR_NO_MEMORY;
4764                 goto ice_add_mac_exit;
4765         }
4766
4767         r_iter = s_rule;
4768         LIST_FOR_EACH_ENTRY(m_list_itr, m_list, ice_fltr_list_entry,
4769                             list_entry) {
4770                 struct ice_fltr_info *f_info = &m_list_itr->fltr_info;
4771                 u8 *mac_addr = &f_info->l_data.mac.mac_addr[0];
4772
4773                 if (IS_UNICAST_ETHER_ADDR(mac_addr)) {
4774                         ice_fill_sw_rule(hw, &m_list_itr->fltr_info, r_iter,
4775                                          ice_aqc_opc_add_sw_rules);
4776                         r_iter = (struct ice_aqc_sw_rules_elem *)
4777                                 ((u8 *)r_iter + s_rule_size);
4778                 }
4779         }
4780
4781         /* Call AQ bulk switch rule update for all unicast addresses */
4782         r_iter = s_rule;
4783         /* Call AQ switch rule in AQ_MAX chunk */
4784         for (total_elem_left = num_unicast; total_elem_left > 0;
4785              total_elem_left -= elem_sent) {
4786                 struct ice_aqc_sw_rules_elem *entry = r_iter;
4787
4788                 elem_sent = MIN_T(u8, total_elem_left,
4789                                   (ICE_AQ_MAX_BUF_LEN / s_rule_size));
4790                 status = ice_aq_sw_rules(hw, entry, elem_sent * s_rule_size,
4791                                          elem_sent, ice_aqc_opc_add_sw_rules,
4792                                          NULL);
4793                 if (status)
4794                         goto ice_add_mac_exit;
4795                 r_iter = (struct ice_aqc_sw_rules_elem *)
4796                         ((u8 *)r_iter + (elem_sent * s_rule_size));
4797         }
4798
4799         /* Fill up rule ID based on the value returned from FW */
4800         r_iter = s_rule;
4801         LIST_FOR_EACH_ENTRY(m_list_itr, m_list, ice_fltr_list_entry,
4802                             list_entry) {
4803                 struct ice_fltr_info *f_info = &m_list_itr->fltr_info;
4804                 u8 *mac_addr = &f_info->l_data.mac.mac_addr[0];
4805                 struct ice_fltr_mgmt_list_entry *fm_entry;
4806
4807                 if (IS_UNICAST_ETHER_ADDR(mac_addr)) {
4808                         f_info->fltr_rule_id =
4809                                 LE16_TO_CPU(r_iter->pdata.lkup_tx_rx.index);
4810                         f_info->fltr_act = ICE_FWD_TO_VSI;
4811                         /* Create an entry to track this MAC address */
4812                         fm_entry = (struct ice_fltr_mgmt_list_entry *)
4813                                 ice_malloc(hw, sizeof(*fm_entry));
4814                         if (!fm_entry) {
4815                                 status = ICE_ERR_NO_MEMORY;
4816                                 goto ice_add_mac_exit;
4817                         }
4818                         fm_entry->fltr_info = *f_info;
4819                         fm_entry->vsi_count = 1;
4820                         /* The book keeping entries will get removed when
4821                          * base driver calls remove filter AQ command
4822                          */
4823
4824                         LIST_ADD(&fm_entry->list_entry, rule_head);
4825                         r_iter = (struct ice_aqc_sw_rules_elem *)
4826                                 ((u8 *)r_iter + s_rule_size);
4827                 }
4828         }
4829
4830 ice_add_mac_exit:
4831         ice_release_lock(rule_lock);
4832         if (s_rule)
4833                 ice_free(hw, s_rule);
4834         return status;
4835 }
4836
4837 /**
4838  * ice_add_mac - Add a MAC address based filter rule
4839  * @hw: pointer to the hardware structure
4840  * @m_list: list of MAC addresses and forwarding information
4841  *
4842  * Function add MAC rule for logical port from HW struct
4843  */
4844 enum ice_status ice_add_mac(struct ice_hw *hw, struct LIST_HEAD_TYPE *m_list)
4845 {
4846         if (!m_list || !hw)
4847                 return ICE_ERR_PARAM;
4848
4849         return ice_add_mac_rule(hw, m_list, hw->switch_info,
4850                                 hw->port_info->lport);
4851 }
4852
4853 /**
4854  * ice_add_vlan_internal - Add one VLAN based filter rule
4855  * @hw: pointer to the hardware structure
4856  * @recp_list: recipe list for which rule has to be added
4857  * @f_entry: filter entry containing one VLAN information
4858  */
4859 static enum ice_status
4860 ice_add_vlan_internal(struct ice_hw *hw, struct ice_sw_recipe *recp_list,
4861                       struct ice_fltr_list_entry *f_entry)
4862 {
4863         struct ice_fltr_mgmt_list_entry *v_list_itr;
4864         struct ice_fltr_info *new_fltr, *cur_fltr;
4865         enum ice_sw_lkup_type lkup_type;
4866         u16 vsi_list_id = 0, vsi_handle;
4867         struct ice_lock *rule_lock; /* Lock to protect filter rule list */
4868         enum ice_status status = ICE_SUCCESS;
4869
4870         if (!ice_is_vsi_valid(hw, f_entry->fltr_info.vsi_handle))
4871                 return ICE_ERR_PARAM;
4872
4873         f_entry->fltr_info.fwd_id.hw_vsi_id =
4874                 ice_get_hw_vsi_num(hw, f_entry->fltr_info.vsi_handle);
4875         new_fltr = &f_entry->fltr_info;
4876
4877         /* VLAN ID should only be 12 bits */
4878         if (new_fltr->l_data.vlan.vlan_id > ICE_MAX_VLAN_ID)
4879                 return ICE_ERR_PARAM;
4880
4881         if (new_fltr->src_id != ICE_SRC_ID_VSI)
4882                 return ICE_ERR_PARAM;
4883
4884         new_fltr->src = new_fltr->fwd_id.hw_vsi_id;
4885         lkup_type = new_fltr->lkup_type;
4886         vsi_handle = new_fltr->vsi_handle;
4887         rule_lock = &recp_list->filt_rule_lock;
4888         ice_acquire_lock(rule_lock);
4889         v_list_itr = ice_find_rule_entry(&recp_list->filt_rules, new_fltr);
4890         if (!v_list_itr) {
4891                 struct ice_vsi_list_map_info *map_info = NULL;
4892
4893                 if (new_fltr->fltr_act == ICE_FWD_TO_VSI) {
4894                         /* All VLAN pruning rules use a VSI list. Check if
4895                          * there is already a VSI list containing VSI that we
4896                          * want to add. If found, use the same vsi_list_id for
4897                          * this new VLAN rule or else create a new list.
4898                          */
4899                         map_info = ice_find_vsi_list_entry(recp_list,
4900                                                            vsi_handle,
4901                                                            &vsi_list_id);
4902                         if (!map_info) {
4903                                 status = ice_create_vsi_list_rule(hw,
4904                                                                   &vsi_handle,
4905                                                                   1,
4906                                                                   &vsi_list_id,
4907                                                                   lkup_type);
4908                                 if (status)
4909                                         goto exit;
4910                         }
4911                         /* Convert the action to forwarding to a VSI list. */
4912                         new_fltr->fltr_act = ICE_FWD_TO_VSI_LIST;
4913                         new_fltr->fwd_id.vsi_list_id = vsi_list_id;
4914                 }
4915
4916                 status = ice_create_pkt_fwd_rule(hw, recp_list, f_entry);
4917                 if (!status) {
4918                         v_list_itr = ice_find_rule_entry(&recp_list->filt_rules,
4919                                                          new_fltr);
4920                         if (!v_list_itr) {
4921                                 status = ICE_ERR_DOES_NOT_EXIST;
4922                                 goto exit;
4923                         }
4924                         /* reuse VSI list for new rule and increment ref_cnt */
4925                         if (map_info) {
4926                                 v_list_itr->vsi_list_info = map_info;
4927                                 map_info->ref_cnt++;
4928                         } else {
4929                                 v_list_itr->vsi_list_info =
4930                                         ice_create_vsi_list_map(hw, &vsi_handle,
4931                                                                 1, vsi_list_id);
4932                         }
4933                 }
4934         } else if (v_list_itr->vsi_list_info->ref_cnt == 1) {
4935                 /* Update existing VSI list to add new VSI ID only if it used
4936                  * by one VLAN rule.
4937                  */
4938                 cur_fltr = &v_list_itr->fltr_info;
4939                 status = ice_add_update_vsi_list(hw, v_list_itr, cur_fltr,
4940                                                  new_fltr);
4941         } else {
4942                 /* If VLAN rule exists and VSI list being used by this rule is
4943                  * referenced by more than 1 VLAN rule. Then create a new VSI
4944                  * list appending previous VSI with new VSI and update existing
4945                  * VLAN rule to point to new VSI list ID
4946                  */
4947                 struct ice_fltr_info tmp_fltr;
4948                 u16 vsi_handle_arr[2];
4949                 u16 cur_handle;
4950
4951                 /* Current implementation only supports reusing VSI list with
4952                  * one VSI count. We should never hit below condition
4953                  */
4954                 if (v_list_itr->vsi_count > 1 &&
4955                     v_list_itr->vsi_list_info->ref_cnt > 1) {
4956                         ice_debug(hw, ICE_DBG_SW, "Invalid configuration: Optimization to reuse VSI list with more than one VSI is not being done yet\n");
4957                         status = ICE_ERR_CFG;
4958                         goto exit;
4959                 }
4960
4961                 cur_handle =
4962                         ice_find_first_bit(v_list_itr->vsi_list_info->vsi_map,
4963                                            ICE_MAX_VSI);
4964
4965                 /* A rule already exists with the new VSI being added */
4966                 if (cur_handle == vsi_handle) {
4967                         status = ICE_ERR_ALREADY_EXISTS;
4968                         goto exit;
4969                 }
4970
4971                 vsi_handle_arr[0] = cur_handle;
4972                 vsi_handle_arr[1] = vsi_handle;
4973                 status = ice_create_vsi_list_rule(hw, &vsi_handle_arr[0], 2,
4974                                                   &vsi_list_id, lkup_type);
4975                 if (status)
4976                         goto exit;
4977
4978                 tmp_fltr = v_list_itr->fltr_info;
4979                 tmp_fltr.fltr_rule_id = v_list_itr->fltr_info.fltr_rule_id;
4980                 tmp_fltr.fwd_id.vsi_list_id = vsi_list_id;
4981                 tmp_fltr.fltr_act = ICE_FWD_TO_VSI_LIST;
4982                 /* Update the previous switch rule to a new VSI list which
4983                  * includes current VSI that is requested
4984                  */
4985                 status = ice_update_pkt_fwd_rule(hw, &tmp_fltr);
4986                 if (status)
4987                         goto exit;
4988
4989                 /* before overriding VSI list map info. decrement ref_cnt of
4990                  * previous VSI list
4991                  */
4992                 v_list_itr->vsi_list_info->ref_cnt--;
4993
4994                 /* now update to newly created list */
4995                 v_list_itr->fltr_info.fwd_id.vsi_list_id = vsi_list_id;
4996                 v_list_itr->vsi_list_info =
4997                         ice_create_vsi_list_map(hw, &vsi_handle_arr[0], 2,
4998                                                 vsi_list_id);
4999                 v_list_itr->vsi_count++;
5000         }
5001
5002 exit:
5003         ice_release_lock(rule_lock);
5004         return status;
5005 }
5006
5007 /**
5008  * ice_add_vlan_rule - Add VLAN based filter rule
5009  * @hw: pointer to the hardware structure
5010  * @v_list: list of VLAN entries and forwarding information
5011  * @sw: pointer to switch info struct for which function add rule
5012  */
5013 static enum ice_status
5014 ice_add_vlan_rule(struct ice_hw *hw, struct LIST_HEAD_TYPE *v_list,
5015                   struct ice_switch_info *sw)
5016 {
5017         struct ice_fltr_list_entry *v_list_itr;
5018         struct ice_sw_recipe *recp_list;
5019
5020         recp_list = &sw->recp_list[ICE_SW_LKUP_VLAN];
5021         LIST_FOR_EACH_ENTRY(v_list_itr, v_list, ice_fltr_list_entry,
5022                             list_entry) {
5023                 if (v_list_itr->fltr_info.lkup_type != ICE_SW_LKUP_VLAN)
5024                         return ICE_ERR_PARAM;
5025                 v_list_itr->fltr_info.flag = ICE_FLTR_TX;
5026                 v_list_itr->status = ice_add_vlan_internal(hw, recp_list,
5027                                                            v_list_itr);
5028                 if (v_list_itr->status)
5029                         return v_list_itr->status;
5030         }
5031         return ICE_SUCCESS;
5032 }
5033
5034 /**
5035  * ice_add_vlan - Add a VLAN based filter rule
5036  * @hw: pointer to the hardware structure
5037  * @v_list: list of VLAN and forwarding information
5038  *
5039  * Function add VLAN rule for logical port from HW struct
5040  */
5041 enum ice_status ice_add_vlan(struct ice_hw *hw, struct LIST_HEAD_TYPE *v_list)
5042 {
5043         if (!v_list || !hw)
5044                 return ICE_ERR_PARAM;
5045
5046         return ice_add_vlan_rule(hw, v_list, hw->switch_info);
5047 }
5048
5049 /**
5050  * ice_add_mac_vlan_rule - Add MAC and VLAN pair based filter rule
5051  * @hw: pointer to the hardware structure
5052  * @mv_list: list of MAC and VLAN filters
5053  * @sw: pointer to switch info struct for which function add rule
5054  * @lport: logic port number on which function add rule
5055  *
5056  * If the VSI on which the MAC-VLAN pair has to be added has Rx and Tx VLAN
5057  * pruning bits enabled, then it is the responsibility of the caller to make
5058  * sure to add a VLAN only filter on the same VSI. Packets belonging to that
5059  * VLAN won't be received on that VSI otherwise.
5060  */
5061 static enum ice_status
5062 ice_add_mac_vlan_rule(struct ice_hw *hw, struct LIST_HEAD_TYPE *mv_list,
5063                       struct ice_switch_info *sw, u8 lport)
5064 {
5065         struct ice_fltr_list_entry *mv_list_itr;
5066         struct ice_sw_recipe *recp_list;
5067
5068         if (!mv_list || !hw)
5069                 return ICE_ERR_PARAM;
5070
5071         recp_list = &sw->recp_list[ICE_SW_LKUP_MAC_VLAN];
5072         LIST_FOR_EACH_ENTRY(mv_list_itr, mv_list, ice_fltr_list_entry,
5073                             list_entry) {
5074                 enum ice_sw_lkup_type l_type =
5075                         mv_list_itr->fltr_info.lkup_type;
5076
5077                 if (l_type != ICE_SW_LKUP_MAC_VLAN)
5078                         return ICE_ERR_PARAM;
5079                 mv_list_itr->fltr_info.flag = ICE_FLTR_TX;
5080                 mv_list_itr->status =
5081                         ice_add_rule_internal(hw, recp_list, lport,
5082                                               mv_list_itr);
5083                 if (mv_list_itr->status)
5084                         return mv_list_itr->status;
5085         }
5086         return ICE_SUCCESS;
5087 }
5088
5089 /**
5090  * ice_add_mac_vlan - Add a MAC VLAN address based filter rule
5091  * @hw: pointer to the hardware structure
5092  * @mv_list: list of MAC VLAN addresses and forwarding information
5093  *
5094  * Function add MAC VLAN rule for logical port from HW struct
5095  */
5096 enum ice_status
5097 ice_add_mac_vlan(struct ice_hw *hw, struct LIST_HEAD_TYPE *mv_list)
5098 {
5099         if (!mv_list || !hw)
5100                 return ICE_ERR_PARAM;
5101
5102         return ice_add_mac_vlan_rule(hw, mv_list, hw->switch_info,
5103                                      hw->port_info->lport);
5104 }
5105
5106 /**
5107  * ice_add_eth_mac_rule - Add ethertype and MAC based filter rule
5108  * @hw: pointer to the hardware structure
5109  * @em_list: list of ether type MAC filter, MAC is optional
5110  * @sw: pointer to switch info struct for which function add rule
5111  * @lport: logic port number on which function add rule
5112  *
5113  * This function requires the caller to populate the entries in
5114  * the filter list with the necessary fields (including flags to
5115  * indicate Tx or Rx rules).
5116  */
5117 static enum ice_status
5118 ice_add_eth_mac_rule(struct ice_hw *hw, struct LIST_HEAD_TYPE *em_list,
5119                      struct ice_switch_info *sw, u8 lport)
5120 {
5121         struct ice_fltr_list_entry *em_list_itr;
5122
5123         LIST_FOR_EACH_ENTRY(em_list_itr, em_list, ice_fltr_list_entry,
5124                             list_entry) {
5125                 struct ice_sw_recipe *recp_list;
5126                 enum ice_sw_lkup_type l_type;
5127
5128                 l_type = em_list_itr->fltr_info.lkup_type;
5129                 recp_list = &sw->recp_list[l_type];
5130
5131                 if (l_type != ICE_SW_LKUP_ETHERTYPE_MAC &&
5132                     l_type != ICE_SW_LKUP_ETHERTYPE)
5133                         return ICE_ERR_PARAM;
5134
5135                 em_list_itr->status = ice_add_rule_internal(hw, recp_list,
5136                                                             lport,
5137                                                             em_list_itr);
5138                 if (em_list_itr->status)
5139                         return em_list_itr->status;
5140         }
5141         return ICE_SUCCESS;
5142 }
5143
5144 /**
5145  * ice_add_eth_mac - Add a ethertype based filter rule
5146  * @hw: pointer to the hardware structure
5147  * @em_list: list of ethertype and forwarding information
5148  *
5149  * Function add ethertype rule for logical port from HW struct
5150  */
5151 enum ice_status
5152 ice_add_eth_mac(struct ice_hw *hw, struct LIST_HEAD_TYPE *em_list)
5153 {
5154         if (!em_list || !hw)
5155                 return ICE_ERR_PARAM;
5156
5157         return ice_add_eth_mac_rule(hw, em_list, hw->switch_info,
5158                                     hw->port_info->lport);
5159 }
5160
5161 /**
5162  * ice_remove_eth_mac_rule - Remove an ethertype (or MAC) based filter rule
5163  * @hw: pointer to the hardware structure
5164  * @em_list: list of ethertype or ethertype MAC entries
5165  * @sw: pointer to switch info struct for which function add rule
5166  */
5167 static enum ice_status
5168 ice_remove_eth_mac_rule(struct ice_hw *hw, struct LIST_HEAD_TYPE *em_list,
5169                         struct ice_switch_info *sw)
5170 {
5171         struct ice_fltr_list_entry *em_list_itr, *tmp;
5172
5173         LIST_FOR_EACH_ENTRY_SAFE(em_list_itr, tmp, em_list, ice_fltr_list_entry,
5174                                  list_entry) {
5175                 struct ice_sw_recipe *recp_list;
5176                 enum ice_sw_lkup_type l_type;
5177
5178                 l_type = em_list_itr->fltr_info.lkup_type;
5179
5180                 if (l_type != ICE_SW_LKUP_ETHERTYPE_MAC &&
5181                     l_type != ICE_SW_LKUP_ETHERTYPE)
5182                         return ICE_ERR_PARAM;
5183
5184                 recp_list = &sw->recp_list[l_type];
5185                 em_list_itr->status = ice_remove_rule_internal(hw, recp_list,
5186                                                                em_list_itr);
5187                 if (em_list_itr->status)
5188                         return em_list_itr->status;
5189         }
5190         return ICE_SUCCESS;
5191 }
5192
5193 /**
5194  * ice_remove_eth_mac - remove a ethertype based filter rule
5195  * @hw: pointer to the hardware structure
5196  * @em_list: list of ethertype and forwarding information
5197  *
5198  */
5199 enum ice_status
5200 ice_remove_eth_mac(struct ice_hw *hw, struct LIST_HEAD_TYPE *em_list)
5201 {
5202         if (!em_list || !hw)
5203                 return ICE_ERR_PARAM;
5204
5205         return ice_remove_eth_mac_rule(hw, em_list, hw->switch_info);
5206 }
5207
5208 /**
5209  * ice_rem_sw_rule_info
5210  * @hw: pointer to the hardware structure
5211  * @rule_head: pointer to the switch list structure that we want to delete
5212  */
5213 static void
5214 ice_rem_sw_rule_info(struct ice_hw *hw, struct LIST_HEAD_TYPE *rule_head)
5215 {
5216         if (!LIST_EMPTY(rule_head)) {
5217                 struct ice_fltr_mgmt_list_entry *entry;
5218                 struct ice_fltr_mgmt_list_entry *tmp;
5219
5220                 LIST_FOR_EACH_ENTRY_SAFE(entry, tmp, rule_head,
5221                                          ice_fltr_mgmt_list_entry, list_entry) {
5222                         LIST_DEL(&entry->list_entry);
5223                         ice_free(hw, entry);
5224                 }
5225         }
5226 }
5227
5228 /**
5229  * ice_rem_adv_rule_info
5230  * @hw: pointer to the hardware structure
5231  * @rule_head: pointer to the switch list structure that we want to delete
5232  */
5233 static void
5234 ice_rem_adv_rule_info(struct ice_hw *hw, struct LIST_HEAD_TYPE *rule_head)
5235 {
5236         struct ice_adv_fltr_mgmt_list_entry *tmp_entry;
5237         struct ice_adv_fltr_mgmt_list_entry *lst_itr;
5238
5239         if (LIST_EMPTY(rule_head))
5240                 return;
5241
5242         LIST_FOR_EACH_ENTRY_SAFE(lst_itr, tmp_entry, rule_head,
5243                                  ice_adv_fltr_mgmt_list_entry, list_entry) {
5244                 LIST_DEL(&lst_itr->list_entry);
5245                 ice_free(hw, lst_itr->lkups);
5246                 ice_free(hw, lst_itr);
5247         }
5248 }
5249
5250 /**
5251  * ice_rem_all_sw_rules_info
5252  * @hw: pointer to the hardware structure
5253  */
5254 void ice_rem_all_sw_rules_info(struct ice_hw *hw)
5255 {
5256         struct ice_switch_info *sw = hw->switch_info;
5257         u8 i;
5258
5259         for (i = 0; i < ICE_MAX_NUM_RECIPES; i++) {
5260                 struct LIST_HEAD_TYPE *rule_head;
5261
5262                 rule_head = &sw->recp_list[i].filt_rules;
5263                 if (!sw->recp_list[i].adv_rule)
5264                         ice_rem_sw_rule_info(hw, rule_head);
5265                 else
5266                         ice_rem_adv_rule_info(hw, rule_head);
5267                 if (sw->recp_list[i].adv_rule &&
5268                     LIST_EMPTY(&sw->recp_list[i].filt_rules))
5269                         sw->recp_list[i].adv_rule = false;
5270         }
5271 }
5272
5273 /**
5274  * ice_cfg_dflt_vsi - change state of VSI to set/clear default
5275  * @pi: pointer to the port_info structure
5276  * @vsi_handle: VSI handle to set as default
5277  * @set: true to add the above mentioned switch rule, false to remove it
5278  * @direction: ICE_FLTR_RX or ICE_FLTR_TX
5279  *
5280  * add filter rule to set/unset given VSI as default VSI for the switch
5281  * (represented by swid)
5282  */
5283 enum ice_status
5284 ice_cfg_dflt_vsi(struct ice_port_info *pi, u16 vsi_handle, bool set,
5285                  u8 direction)
5286 {
5287         struct ice_aqc_sw_rules_elem *s_rule;
5288         struct ice_fltr_info f_info;
5289         struct ice_hw *hw = pi->hw;
5290         enum ice_adminq_opc opcode;
5291         enum ice_status status;
5292         u16 s_rule_size;
5293         u16 hw_vsi_id;
5294
5295         if (!ice_is_vsi_valid(hw, vsi_handle))
5296                 return ICE_ERR_PARAM;
5297         hw_vsi_id = ice_get_hw_vsi_num(hw, vsi_handle);
5298
5299         s_rule_size = set ? ICE_SW_RULE_RX_TX_ETH_HDR_SIZE :
5300                 ICE_SW_RULE_RX_TX_NO_HDR_SIZE;
5301
5302         s_rule = (struct ice_aqc_sw_rules_elem *)ice_malloc(hw, s_rule_size);
5303         if (!s_rule)
5304                 return ICE_ERR_NO_MEMORY;
5305
5306         ice_memset(&f_info, 0, sizeof(f_info), ICE_NONDMA_MEM);
5307
5308         f_info.lkup_type = ICE_SW_LKUP_DFLT;
5309         f_info.flag = direction;
5310         f_info.fltr_act = ICE_FWD_TO_VSI;
5311         f_info.fwd_id.hw_vsi_id = hw_vsi_id;
5312
5313         if (f_info.flag & ICE_FLTR_RX) {
5314                 f_info.src = pi->lport;
5315                 f_info.src_id = ICE_SRC_ID_LPORT;
5316                 if (!set)
5317                         f_info.fltr_rule_id =
5318                                 pi->dflt_rx_vsi_rule_id;
5319         } else if (f_info.flag & ICE_FLTR_TX) {
5320                 f_info.src_id = ICE_SRC_ID_VSI;
5321                 f_info.src = hw_vsi_id;
5322                 if (!set)
5323                         f_info.fltr_rule_id =
5324                                 pi->dflt_tx_vsi_rule_id;
5325         }
5326
5327         if (set)
5328                 opcode = ice_aqc_opc_add_sw_rules;
5329         else
5330                 opcode = ice_aqc_opc_remove_sw_rules;
5331
5332         ice_fill_sw_rule(hw, &f_info, s_rule, opcode);
5333
5334         status = ice_aq_sw_rules(hw, s_rule, s_rule_size, 1, opcode, NULL);
5335         if (status || !(f_info.flag & ICE_FLTR_TX_RX))
5336                 goto out;
5337         if (set) {
5338                 u16 index = LE16_TO_CPU(s_rule->pdata.lkup_tx_rx.index);
5339
5340                 if (f_info.flag & ICE_FLTR_TX) {
5341                         pi->dflt_tx_vsi_num = hw_vsi_id;
5342                         pi->dflt_tx_vsi_rule_id = index;
5343                 } else if (f_info.flag & ICE_FLTR_RX) {
5344                         pi->dflt_rx_vsi_num = hw_vsi_id;
5345                         pi->dflt_rx_vsi_rule_id = index;
5346                 }
5347         } else {
5348                 if (f_info.flag & ICE_FLTR_TX) {
5349                         pi->dflt_tx_vsi_num = ICE_DFLT_VSI_INVAL;
5350                         pi->dflt_tx_vsi_rule_id = ICE_INVAL_ACT;
5351                 } else if (f_info.flag & ICE_FLTR_RX) {
5352                         pi->dflt_rx_vsi_num = ICE_DFLT_VSI_INVAL;
5353                         pi->dflt_rx_vsi_rule_id = ICE_INVAL_ACT;
5354                 }
5355         }
5356
5357 out:
5358         ice_free(hw, s_rule);
5359         return status;
5360 }
5361
5362 /**
5363  * ice_find_ucast_rule_entry - Search for a unicast MAC filter rule entry
5364  * @list_head: head of rule list
5365  * @f_info: rule information
5366  *
5367  * Helper function to search for a unicast rule entry - this is to be used
5368  * to remove unicast MAC filter that is not shared with other VSIs on the
5369  * PF switch.
5370  *
5371  * Returns pointer to entry storing the rule if found
5372  */
5373 static struct ice_fltr_mgmt_list_entry *
5374 ice_find_ucast_rule_entry(struct LIST_HEAD_TYPE *list_head,
5375                           struct ice_fltr_info *f_info)
5376 {
5377         struct ice_fltr_mgmt_list_entry *list_itr;
5378
5379         LIST_FOR_EACH_ENTRY(list_itr, list_head, ice_fltr_mgmt_list_entry,
5380                             list_entry) {
5381                 if (!memcmp(&f_info->l_data, &list_itr->fltr_info.l_data,
5382                             sizeof(f_info->l_data)) &&
5383                     f_info->fwd_id.hw_vsi_id ==
5384                     list_itr->fltr_info.fwd_id.hw_vsi_id &&
5385                     f_info->flag == list_itr->fltr_info.flag)
5386                         return list_itr;
5387         }
5388         return NULL;
5389 }
5390
5391 /**
5392  * ice_remove_mac_rule - remove a MAC based filter rule
5393  * @hw: pointer to the hardware structure
5394  * @m_list: list of MAC addresses and forwarding information
5395  * @recp_list: list from which function remove MAC address
5396  *
5397  * This function removes either a MAC filter rule or a specific VSI from a
5398  * VSI list for a multicast MAC address.
5399  *
5400  * Returns ICE_ERR_DOES_NOT_EXIST if a given entry was not added by
5401  * ice_add_mac. Caller should be aware that this call will only work if all
5402  * the entries passed into m_list were added previously. It will not attempt to
5403  * do a partial remove of entries that were found.
5404  */
5405 static enum ice_status
5406 ice_remove_mac_rule(struct ice_hw *hw, struct LIST_HEAD_TYPE *m_list,
5407                     struct ice_sw_recipe *recp_list)
5408 {
5409         struct ice_fltr_list_entry *list_itr, *tmp;
5410         struct ice_lock *rule_lock; /* Lock to protect filter rule list */
5411
5412         if (!m_list)
5413                 return ICE_ERR_PARAM;
5414
5415         rule_lock = &recp_list->filt_rule_lock;
5416         LIST_FOR_EACH_ENTRY_SAFE(list_itr, tmp, m_list, ice_fltr_list_entry,
5417                                  list_entry) {
5418                 enum ice_sw_lkup_type l_type = list_itr->fltr_info.lkup_type;
5419                 u8 *add = &list_itr->fltr_info.l_data.mac.mac_addr[0];
5420                 u16 vsi_handle;
5421
5422                 if (l_type != ICE_SW_LKUP_MAC)
5423                         return ICE_ERR_PARAM;
5424
5425                 vsi_handle = list_itr->fltr_info.vsi_handle;
5426                 if (!ice_is_vsi_valid(hw, vsi_handle))
5427                         return ICE_ERR_PARAM;
5428
5429                 list_itr->fltr_info.fwd_id.hw_vsi_id =
5430                                         ice_get_hw_vsi_num(hw, vsi_handle);
5431                 if (IS_UNICAST_ETHER_ADDR(add) && !hw->ucast_shared) {
5432                         /* Don't remove the unicast address that belongs to
5433                          * another VSI on the switch, since it is not being
5434                          * shared...
5435                          */
5436                         ice_acquire_lock(rule_lock);
5437                         if (!ice_find_ucast_rule_entry(&recp_list->filt_rules,
5438                                                        &list_itr->fltr_info)) {
5439                                 ice_release_lock(rule_lock);
5440                                 return ICE_ERR_DOES_NOT_EXIST;
5441                         }
5442                         ice_release_lock(rule_lock);
5443                 }
5444                 list_itr->status = ice_remove_rule_internal(hw, recp_list,
5445                                                             list_itr);
5446                 if (list_itr->status)
5447                         return list_itr->status;
5448         }
5449         return ICE_SUCCESS;
5450 }
5451
5452 /**
5453  * ice_remove_mac - remove a MAC address based filter rule
5454  * @hw: pointer to the hardware structure
5455  * @m_list: list of MAC addresses and forwarding information
5456  *
5457  */
5458 enum ice_status ice_remove_mac(struct ice_hw *hw, struct LIST_HEAD_TYPE *m_list)
5459 {
5460         struct ice_sw_recipe *recp_list;
5461
5462         recp_list = &hw->switch_info->recp_list[ICE_SW_LKUP_MAC];
5463         return ice_remove_mac_rule(hw, m_list, recp_list);
5464 }
5465
5466 /**
5467  * ice_remove_vlan_rule - Remove VLAN based filter rule
5468  * @hw: pointer to the hardware structure
5469  * @v_list: list of VLAN entries and forwarding information
5470  * @recp_list: list from which function remove VLAN
5471  */
5472 static enum ice_status
5473 ice_remove_vlan_rule(struct ice_hw *hw, struct LIST_HEAD_TYPE *v_list,
5474                      struct ice_sw_recipe *recp_list)
5475 {
5476         struct ice_fltr_list_entry *v_list_itr, *tmp;
5477
5478         LIST_FOR_EACH_ENTRY_SAFE(v_list_itr, tmp, v_list, ice_fltr_list_entry,
5479                                  list_entry) {
5480                 enum ice_sw_lkup_type l_type = v_list_itr->fltr_info.lkup_type;
5481
5482                 if (l_type != ICE_SW_LKUP_VLAN)
5483                         return ICE_ERR_PARAM;
5484                 v_list_itr->status = ice_remove_rule_internal(hw, recp_list,
5485                                                               v_list_itr);
5486                 if (v_list_itr->status)
5487                         return v_list_itr->status;
5488         }
5489         return ICE_SUCCESS;
5490 }
5491
5492 /**
5493  * ice_remove_vlan - remove a VLAN address based filter rule
5494  * @hw: pointer to the hardware structure
5495  * @v_list: list of VLAN and forwarding information
5496  *
5497  */
5498 enum ice_status
5499 ice_remove_vlan(struct ice_hw *hw, struct LIST_HEAD_TYPE *v_list)
5500 {
5501         struct ice_sw_recipe *recp_list;
5502
5503         if (!v_list || !hw)
5504                 return ICE_ERR_PARAM;
5505
5506         recp_list = &hw->switch_info->recp_list[ICE_SW_LKUP_VLAN];
5507         return ice_remove_vlan_rule(hw, v_list, recp_list);
5508 }
5509
5510 /**
5511  * ice_remove_mac_vlan_rule - Remove MAC VLAN based filter rule
5512  * @hw: pointer to the hardware structure
5513  * @v_list: list of MAC VLAN entries and forwarding information
5514  * @recp_list: list from which function remove MAC VLAN
5515  */
5516 static enum ice_status
5517 ice_remove_mac_vlan_rule(struct ice_hw *hw, struct LIST_HEAD_TYPE *v_list,
5518                          struct ice_sw_recipe *recp_list)
5519 {
5520         struct ice_fltr_list_entry *v_list_itr, *tmp;
5521
5522         recp_list = &hw->switch_info->recp_list[ICE_SW_LKUP_MAC_VLAN];
5523         LIST_FOR_EACH_ENTRY_SAFE(v_list_itr, tmp, v_list, ice_fltr_list_entry,
5524                                  list_entry) {
5525                 enum ice_sw_lkup_type l_type = v_list_itr->fltr_info.lkup_type;
5526
5527                 if (l_type != ICE_SW_LKUP_MAC_VLAN)
5528                         return ICE_ERR_PARAM;
5529                 v_list_itr->status =
5530                         ice_remove_rule_internal(hw, recp_list,
5531                                                  v_list_itr);
5532                 if (v_list_itr->status)
5533                         return v_list_itr->status;
5534         }
5535         return ICE_SUCCESS;
5536 }
5537
5538 /**
5539  * ice_remove_mac_vlan - remove a MAC VLAN address based filter rule
5540  * @hw: pointer to the hardware structure
5541  * @mv_list: list of MAC VLAN and forwarding information
5542  */
5543 enum ice_status
5544 ice_remove_mac_vlan(struct ice_hw *hw, struct LIST_HEAD_TYPE *mv_list)
5545 {
5546         struct ice_sw_recipe *recp_list;
5547
5548         if (!mv_list || !hw)
5549                 return ICE_ERR_PARAM;
5550
5551         recp_list = &hw->switch_info->recp_list[ICE_SW_LKUP_MAC_VLAN];
5552         return ice_remove_mac_vlan_rule(hw, mv_list, recp_list);
5553 }
5554
5555 /**
5556  * ice_vsi_uses_fltr - Determine if given VSI uses specified filter
5557  * @fm_entry: filter entry to inspect
5558  * @vsi_handle: VSI handle to compare with filter info
5559  */
5560 static bool
5561 ice_vsi_uses_fltr(struct ice_fltr_mgmt_list_entry *fm_entry, u16 vsi_handle)
5562 {
5563         return ((fm_entry->fltr_info.fltr_act == ICE_FWD_TO_VSI &&
5564                  fm_entry->fltr_info.vsi_handle == vsi_handle) ||
5565                 (fm_entry->fltr_info.fltr_act == ICE_FWD_TO_VSI_LIST &&
5566                  fm_entry->vsi_list_info &&
5567                  (ice_is_bit_set(fm_entry->vsi_list_info->vsi_map,
5568                                  vsi_handle))));
5569 }
5570
5571 /**
5572  * ice_add_entry_to_vsi_fltr_list - Add copy of fltr_list_entry to remove list
5573  * @hw: pointer to the hardware structure
5574  * @vsi_handle: VSI handle to remove filters from
5575  * @vsi_list_head: pointer to the list to add entry to
5576  * @fi: pointer to fltr_info of filter entry to copy & add
5577  *
5578  * Helper function, used when creating a list of filters to remove from
5579  * a specific VSI. The entry added to vsi_list_head is a COPY of the
5580  * original filter entry, with the exception of fltr_info.fltr_act and
5581  * fltr_info.fwd_id fields. These are set such that later logic can
5582  * extract which VSI to remove the fltr from, and pass on that information.
5583  */
5584 static enum ice_status
5585 ice_add_entry_to_vsi_fltr_list(struct ice_hw *hw, u16 vsi_handle,
5586                                struct LIST_HEAD_TYPE *vsi_list_head,
5587                                struct ice_fltr_info *fi)
5588 {
5589         struct ice_fltr_list_entry *tmp;
5590
5591         /* this memory is freed up in the caller function
5592          * once filters for this VSI are removed
5593          */
5594         tmp = (struct ice_fltr_list_entry *)ice_malloc(hw, sizeof(*tmp));
5595         if (!tmp)
5596                 return ICE_ERR_NO_MEMORY;
5597
5598         tmp->fltr_info = *fi;
5599
5600         /* Overwrite these fields to indicate which VSI to remove filter from,
5601          * so find and remove logic can extract the information from the
5602          * list entries. Note that original entries will still have proper
5603          * values.
5604          */
5605         tmp->fltr_info.fltr_act = ICE_FWD_TO_VSI;
5606         tmp->fltr_info.vsi_handle = vsi_handle;
5607         tmp->fltr_info.fwd_id.hw_vsi_id = ice_get_hw_vsi_num(hw, vsi_handle);
5608
5609         LIST_ADD(&tmp->list_entry, vsi_list_head);
5610
5611         return ICE_SUCCESS;
5612 }
5613
5614 /**
5615  * ice_add_to_vsi_fltr_list - Add VSI filters to the list
5616  * @hw: pointer to the hardware structure
5617  * @vsi_handle: VSI handle to remove filters from
5618  * @lkup_list_head: pointer to the list that has certain lookup type filters
5619  * @vsi_list_head: pointer to the list pertaining to VSI with vsi_handle
5620  *
5621  * Locates all filters in lkup_list_head that are used by the given VSI,
5622  * and adds COPIES of those entries to vsi_list_head (intended to be used
5623  * to remove the listed filters).
5624  * Note that this means all entries in vsi_list_head must be explicitly
5625  * deallocated by the caller when done with list.
5626  */
5627 static enum ice_status
5628 ice_add_to_vsi_fltr_list(struct ice_hw *hw, u16 vsi_handle,
5629                          struct LIST_HEAD_TYPE *lkup_list_head,
5630                          struct LIST_HEAD_TYPE *vsi_list_head)
5631 {
5632         struct ice_fltr_mgmt_list_entry *fm_entry;
5633         enum ice_status status = ICE_SUCCESS;
5634
5635         /* check to make sure VSI ID is valid and within boundary */
5636         if (!ice_is_vsi_valid(hw, vsi_handle))
5637                 return ICE_ERR_PARAM;
5638
5639         LIST_FOR_EACH_ENTRY(fm_entry, lkup_list_head,
5640                             ice_fltr_mgmt_list_entry, list_entry) {
5641                 if (!ice_vsi_uses_fltr(fm_entry, vsi_handle))
5642                         continue;
5643
5644                 status = ice_add_entry_to_vsi_fltr_list(hw, vsi_handle,
5645                                                         vsi_list_head,
5646                                                         &fm_entry->fltr_info);
5647                 if (status)
5648                         return status;
5649         }
5650         return status;
5651 }
5652
5653 /**
5654  * ice_determine_promisc_mask
5655  * @fi: filter info to parse
5656  *
5657  * Helper function to determine which ICE_PROMISC_ mask corresponds
5658  * to given filter into.
5659  */
5660 static u8 ice_determine_promisc_mask(struct ice_fltr_info *fi)
5661 {
5662         u16 vid = fi->l_data.mac_vlan.vlan_id;
5663         u8 *macaddr = fi->l_data.mac.mac_addr;
5664         bool is_tx_fltr = false;
5665         u8 promisc_mask = 0;
5666
5667         if (fi->flag == ICE_FLTR_TX)
5668                 is_tx_fltr = true;
5669
5670         if (IS_BROADCAST_ETHER_ADDR(macaddr))
5671                 promisc_mask |= is_tx_fltr ?
5672                         ICE_PROMISC_BCAST_TX : ICE_PROMISC_BCAST_RX;
5673         else if (IS_MULTICAST_ETHER_ADDR(macaddr))
5674                 promisc_mask |= is_tx_fltr ?
5675                         ICE_PROMISC_MCAST_TX : ICE_PROMISC_MCAST_RX;
5676         else if (IS_UNICAST_ETHER_ADDR(macaddr))
5677                 promisc_mask |= is_tx_fltr ?
5678                         ICE_PROMISC_UCAST_TX : ICE_PROMISC_UCAST_RX;
5679         if (vid)
5680                 promisc_mask |= is_tx_fltr ?
5681                         ICE_PROMISC_VLAN_TX : ICE_PROMISC_VLAN_RX;
5682
5683         return promisc_mask;
5684 }
5685
5686 /**
5687  * _ice_get_vsi_promisc - get promiscuous mode of given VSI
5688  * @hw: pointer to the hardware structure
5689  * @vsi_handle: VSI handle to retrieve info from
5690  * @promisc_mask: pointer to mask to be filled in
5691  * @vid: VLAN ID of promisc VLAN VSI
5692  * @sw: pointer to switch info struct for which function add rule
5693  */
5694 static enum ice_status
5695 _ice_get_vsi_promisc(struct ice_hw *hw, u16 vsi_handle, u8 *promisc_mask,
5696                      u16 *vid, struct ice_switch_info *sw)
5697 {
5698         struct ice_fltr_mgmt_list_entry *itr;
5699         struct LIST_HEAD_TYPE *rule_head;
5700         struct ice_lock *rule_lock;     /* Lock to protect filter rule list */
5701
5702         if (!ice_is_vsi_valid(hw, vsi_handle))
5703                 return ICE_ERR_PARAM;
5704
5705         *vid = 0;
5706         *promisc_mask = 0;
5707         rule_head = &sw->recp_list[ICE_SW_LKUP_PROMISC].filt_rules;
5708         rule_lock = &sw->recp_list[ICE_SW_LKUP_PROMISC].filt_rule_lock;
5709
5710         ice_acquire_lock(rule_lock);
5711         LIST_FOR_EACH_ENTRY(itr, rule_head,
5712                             ice_fltr_mgmt_list_entry, list_entry) {
5713                 /* Continue if this filter doesn't apply to this VSI or the
5714                  * VSI ID is not in the VSI map for this filter
5715                  */
5716                 if (!ice_vsi_uses_fltr(itr, vsi_handle))
5717                         continue;
5718
5719                 *promisc_mask |= ice_determine_promisc_mask(&itr->fltr_info);
5720         }
5721         ice_release_lock(rule_lock);
5722
5723         return ICE_SUCCESS;
5724 }
5725
5726 /**
5727  * ice_get_vsi_promisc - get promiscuous mode of given VSI
5728  * @hw: pointer to the hardware structure
5729  * @vsi_handle: VSI handle to retrieve info from
5730  * @promisc_mask: pointer to mask to be filled in
5731  * @vid: VLAN ID of promisc VLAN VSI
5732  */
5733 enum ice_status
5734 ice_get_vsi_promisc(struct ice_hw *hw, u16 vsi_handle, u8 *promisc_mask,
5735                     u16 *vid)
5736 {
5737         return _ice_get_vsi_promisc(hw, vsi_handle, promisc_mask,
5738                                     vid, hw->switch_info);
5739 }
5740
5741 /**
5742  * _ice_get_vsi_vlan_promisc - get VLAN promiscuous mode of given VSI
5743  * @hw: pointer to the hardware structure
5744  * @vsi_handle: VSI handle to retrieve info from
5745  * @promisc_mask: pointer to mask to be filled in
5746  * @vid: VLAN ID of promisc VLAN VSI
5747  * @sw: pointer to switch info struct for which function add rule
5748  */
5749 static enum ice_status
5750 _ice_get_vsi_vlan_promisc(struct ice_hw *hw, u16 vsi_handle, u8 *promisc_mask,
5751                           u16 *vid, struct ice_switch_info *sw)
5752 {
5753         struct ice_fltr_mgmt_list_entry *itr;
5754         struct LIST_HEAD_TYPE *rule_head;
5755         struct ice_lock *rule_lock;     /* Lock to protect filter rule list */
5756
5757         if (!ice_is_vsi_valid(hw, vsi_handle))
5758                 return ICE_ERR_PARAM;
5759
5760         *vid = 0;
5761         *promisc_mask = 0;
5762         rule_head = &sw->recp_list[ICE_SW_LKUP_PROMISC_VLAN].filt_rules;
5763         rule_lock = &sw->recp_list[ICE_SW_LKUP_PROMISC_VLAN].filt_rule_lock;
5764
5765         ice_acquire_lock(rule_lock);
5766         LIST_FOR_EACH_ENTRY(itr, rule_head, ice_fltr_mgmt_list_entry,
5767                             list_entry) {
5768                 /* Continue if this filter doesn't apply to this VSI or the
5769                  * VSI ID is not in the VSI map for this filter
5770                  */
5771                 if (!ice_vsi_uses_fltr(itr, vsi_handle))
5772                         continue;
5773
5774                 *promisc_mask |= ice_determine_promisc_mask(&itr->fltr_info);
5775         }
5776         ice_release_lock(rule_lock);
5777
5778         return ICE_SUCCESS;
5779 }
5780
5781 /**
5782  * ice_get_vsi_vlan_promisc - get VLAN promiscuous mode of given VSI
5783  * @hw: pointer to the hardware structure
5784  * @vsi_handle: VSI handle to retrieve info from
5785  * @promisc_mask: pointer to mask to be filled in
5786  * @vid: VLAN ID of promisc VLAN VSI
5787  */
5788 enum ice_status
5789 ice_get_vsi_vlan_promisc(struct ice_hw *hw, u16 vsi_handle, u8 *promisc_mask,
5790                          u16 *vid)
5791 {
5792         return _ice_get_vsi_vlan_promisc(hw, vsi_handle, promisc_mask,
5793                                          vid, hw->switch_info);
5794 }
5795
5796 /**
5797  * ice_remove_promisc - Remove promisc based filter rules
5798  * @hw: pointer to the hardware structure
5799  * @recp_id: recipe ID for which the rule needs to removed
5800  * @v_list: list of promisc entries
5801  */
5802 static enum ice_status
5803 ice_remove_promisc(struct ice_hw *hw, u8 recp_id,
5804                    struct LIST_HEAD_TYPE *v_list)
5805 {
5806         struct ice_fltr_list_entry *v_list_itr, *tmp;
5807         struct ice_sw_recipe *recp_list;
5808
5809         recp_list = &hw->switch_info->recp_list[recp_id];
5810         LIST_FOR_EACH_ENTRY_SAFE(v_list_itr, tmp, v_list, ice_fltr_list_entry,
5811                                  list_entry) {
5812                 v_list_itr->status =
5813                         ice_remove_rule_internal(hw, recp_list, v_list_itr);
5814                 if (v_list_itr->status)
5815                         return v_list_itr->status;
5816         }
5817         return ICE_SUCCESS;
5818 }
5819
5820 /**
5821  * _ice_clear_vsi_promisc - clear specified promiscuous mode(s)
5822  * @hw: pointer to the hardware structure
5823  * @vsi_handle: VSI handle to clear mode
5824  * @promisc_mask: mask of promiscuous config bits to clear
5825  * @vid: VLAN ID to clear VLAN promiscuous
5826  * @sw: pointer to switch info struct for which function add rule
5827  */
5828 static enum ice_status
5829 _ice_clear_vsi_promisc(struct ice_hw *hw, u16 vsi_handle, u8 promisc_mask,
5830                        u16 vid, struct ice_switch_info *sw)
5831 {
5832         struct ice_fltr_list_entry *fm_entry, *tmp;
5833         struct LIST_HEAD_TYPE remove_list_head;
5834         struct ice_fltr_mgmt_list_entry *itr;
5835         struct LIST_HEAD_TYPE *rule_head;
5836         struct ice_lock *rule_lock;     /* Lock to protect filter rule list */
5837         enum ice_status status = ICE_SUCCESS;
5838         u8 recipe_id;
5839
5840         if (!ice_is_vsi_valid(hw, vsi_handle))
5841                 return ICE_ERR_PARAM;
5842
5843         if (promisc_mask & (ICE_PROMISC_VLAN_RX | ICE_PROMISC_VLAN_TX))
5844                 recipe_id = ICE_SW_LKUP_PROMISC_VLAN;
5845         else
5846                 recipe_id = ICE_SW_LKUP_PROMISC;
5847
5848         rule_head = &sw->recp_list[recipe_id].filt_rules;
5849         rule_lock = &sw->recp_list[recipe_id].filt_rule_lock;
5850
5851         INIT_LIST_HEAD(&remove_list_head);
5852
5853         ice_acquire_lock(rule_lock);
5854         LIST_FOR_EACH_ENTRY(itr, rule_head,
5855                             ice_fltr_mgmt_list_entry, list_entry) {
5856                 struct ice_fltr_info *fltr_info;
5857                 u8 fltr_promisc_mask = 0;
5858
5859                 if (!ice_vsi_uses_fltr(itr, vsi_handle))
5860                         continue;
5861                 fltr_info = &itr->fltr_info;
5862
5863                 if (recipe_id == ICE_SW_LKUP_PROMISC_VLAN &&
5864                     vid != fltr_info->l_data.mac_vlan.vlan_id)
5865                         continue;
5866
5867                 fltr_promisc_mask |= ice_determine_promisc_mask(fltr_info);
5868
5869                 /* Skip if filter is not completely specified by given mask */
5870                 if (fltr_promisc_mask & ~promisc_mask)
5871                         continue;
5872
5873                 status = ice_add_entry_to_vsi_fltr_list(hw, vsi_handle,
5874                                                         &remove_list_head,
5875                                                         fltr_info);
5876                 if (status) {
5877                         ice_release_lock(rule_lock);
5878                         goto free_fltr_list;
5879                 }
5880         }
5881         ice_release_lock(rule_lock);
5882
5883         status = ice_remove_promisc(hw, recipe_id, &remove_list_head);
5884
5885 free_fltr_list:
5886         LIST_FOR_EACH_ENTRY_SAFE(fm_entry, tmp, &remove_list_head,
5887                                  ice_fltr_list_entry, list_entry) {
5888                 LIST_DEL(&fm_entry->list_entry);
5889                 ice_free(hw, fm_entry);
5890         }
5891
5892         return status;
5893 }
5894
5895 /**
5896  * ice_clear_vsi_promisc - clear specified promiscuous mode(s) for given VSI
5897  * @hw: pointer to the hardware structure
5898  * @vsi_handle: VSI handle to clear mode
5899  * @promisc_mask: mask of promiscuous config bits to clear
5900  * @vid: VLAN ID to clear VLAN promiscuous
5901  */
5902 enum ice_status
5903 ice_clear_vsi_promisc(struct ice_hw *hw, u16 vsi_handle,
5904                       u8 promisc_mask, u16 vid)
5905 {
5906         return _ice_clear_vsi_promisc(hw, vsi_handle, promisc_mask,
5907                                       vid, hw->switch_info);
5908 }
5909
5910 /**
5911  * _ice_set_vsi_promisc - set given VSI to given promiscuous mode(s)
5912  * @hw: pointer to the hardware structure
5913  * @vsi_handle: VSI handle to configure
5914  * @promisc_mask: mask of promiscuous config bits
5915  * @vid: VLAN ID to set VLAN promiscuous
5916  * @lport: logical port number to configure promisc mode
5917  * @sw: pointer to switch info struct for which function add rule
5918  */
5919 static enum ice_status
5920 _ice_set_vsi_promisc(struct ice_hw *hw, u16 vsi_handle, u8 promisc_mask,
5921                      u16 vid, u8 lport, struct ice_switch_info *sw)
5922 {
5923         enum { UCAST_FLTR = 1, MCAST_FLTR, BCAST_FLTR };
5924         struct ice_fltr_list_entry f_list_entry;
5925         struct ice_fltr_info new_fltr;
5926         enum ice_status status = ICE_SUCCESS;
5927         bool is_tx_fltr;
5928         u16 hw_vsi_id;
5929         int pkt_type;
5930         u8 recipe_id;
5931
5932         ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__);
5933
5934         if (!ice_is_vsi_valid(hw, vsi_handle))
5935                 return ICE_ERR_PARAM;
5936         hw_vsi_id = ice_get_hw_vsi_num(hw, vsi_handle);
5937
5938         ice_memset(&new_fltr, 0, sizeof(new_fltr), ICE_NONDMA_MEM);
5939
5940         if (promisc_mask & (ICE_PROMISC_VLAN_RX | ICE_PROMISC_VLAN_TX)) {
5941                 new_fltr.lkup_type = ICE_SW_LKUP_PROMISC_VLAN;
5942                 new_fltr.l_data.mac_vlan.vlan_id = vid;
5943                 recipe_id = ICE_SW_LKUP_PROMISC_VLAN;
5944         } else {
5945                 new_fltr.lkup_type = ICE_SW_LKUP_PROMISC;
5946                 recipe_id = ICE_SW_LKUP_PROMISC;
5947         }
5948
5949         /* Separate filters must be set for each direction/packet type
5950          * combination, so we will loop over the mask value, store the
5951          * individual type, and clear it out in the input mask as it
5952          * is found.
5953          */
5954         while (promisc_mask) {
5955                 struct ice_sw_recipe *recp_list;
5956                 u8 *mac_addr;
5957
5958                 pkt_type = 0;
5959                 is_tx_fltr = false;
5960
5961                 if (promisc_mask & ICE_PROMISC_UCAST_RX) {
5962                         promisc_mask &= ~ICE_PROMISC_UCAST_RX;
5963                         pkt_type = UCAST_FLTR;
5964                 } else if (promisc_mask & ICE_PROMISC_UCAST_TX) {
5965                         promisc_mask &= ~ICE_PROMISC_UCAST_TX;
5966                         pkt_type = UCAST_FLTR;
5967                         is_tx_fltr = true;
5968                 } else if (promisc_mask & ICE_PROMISC_MCAST_RX) {
5969                         promisc_mask &= ~ICE_PROMISC_MCAST_RX;
5970                         pkt_type = MCAST_FLTR;
5971                 } else if (promisc_mask & ICE_PROMISC_MCAST_TX) {
5972                         promisc_mask &= ~ICE_PROMISC_MCAST_TX;
5973                         pkt_type = MCAST_FLTR;
5974                         is_tx_fltr = true;
5975                 } else if (promisc_mask & ICE_PROMISC_BCAST_RX) {
5976                         promisc_mask &= ~ICE_PROMISC_BCAST_RX;
5977                         pkt_type = BCAST_FLTR;
5978                 } else if (promisc_mask & ICE_PROMISC_BCAST_TX) {
5979                         promisc_mask &= ~ICE_PROMISC_BCAST_TX;
5980                         pkt_type = BCAST_FLTR;
5981                         is_tx_fltr = true;
5982                 }
5983
5984                 /* Check for VLAN promiscuous flag */
5985                 if (promisc_mask & ICE_PROMISC_VLAN_RX) {
5986                         promisc_mask &= ~ICE_PROMISC_VLAN_RX;
5987                 } else if (promisc_mask & ICE_PROMISC_VLAN_TX) {
5988                         promisc_mask &= ~ICE_PROMISC_VLAN_TX;
5989                         is_tx_fltr = true;
5990                 }
5991
5992                 /* Set filter DA based on packet type */
5993                 mac_addr = new_fltr.l_data.mac.mac_addr;
5994                 if (pkt_type == BCAST_FLTR) {
5995                         ice_memset(mac_addr, 0xff, ETH_ALEN, ICE_NONDMA_MEM);
5996                 } else if (pkt_type == MCAST_FLTR ||
5997                            pkt_type == UCAST_FLTR) {
5998                         /* Use the dummy ether header DA */
5999                         ice_memcpy(mac_addr, dummy_eth_header, ETH_ALEN,
6000                                    ICE_NONDMA_TO_NONDMA);
6001                         if (pkt_type == MCAST_FLTR)
6002                                 mac_addr[0] |= 0x1;     /* Set multicast bit */
6003                 }
6004
6005                 /* Need to reset this to zero for all iterations */
6006                 new_fltr.flag = 0;
6007                 if (is_tx_fltr) {
6008                         new_fltr.flag |= ICE_FLTR_TX;
6009                         new_fltr.src = hw_vsi_id;
6010                 } else {
6011                         new_fltr.flag |= ICE_FLTR_RX;
6012                         new_fltr.src = lport;
6013                 }
6014
6015                 new_fltr.fltr_act = ICE_FWD_TO_VSI;
6016                 new_fltr.vsi_handle = vsi_handle;
6017                 new_fltr.fwd_id.hw_vsi_id = hw_vsi_id;
6018                 f_list_entry.fltr_info = new_fltr;
6019                 recp_list = &sw->recp_list[recipe_id];
6020
6021                 status = ice_add_rule_internal(hw, recp_list, lport,
6022                                                &f_list_entry);
6023                 if (status != ICE_SUCCESS)
6024                         goto set_promisc_exit;
6025         }
6026
6027 set_promisc_exit:
6028         return status;
6029 }
6030
6031 /**
6032  * ice_set_vsi_promisc - set given VSI to given promiscuous mode(s)
6033  * @hw: pointer to the hardware structure
6034  * @vsi_handle: VSI handle to configure
6035  * @promisc_mask: mask of promiscuous config bits
6036  * @vid: VLAN ID to set VLAN promiscuous
6037  */
6038 enum ice_status
6039 ice_set_vsi_promisc(struct ice_hw *hw, u16 vsi_handle, u8 promisc_mask,
6040                     u16 vid)
6041 {
6042         return _ice_set_vsi_promisc(hw, vsi_handle, promisc_mask, vid,
6043                                     hw->port_info->lport,
6044                                     hw->switch_info);
6045 }
6046
6047 /**
6048  * _ice_set_vlan_vsi_promisc
6049  * @hw: pointer to the hardware structure
6050  * @vsi_handle: VSI handle to configure
6051  * @promisc_mask: mask of promiscuous config bits
6052  * @rm_vlan_promisc: Clear VLANs VSI promisc mode
6053  * @lport: logical port number to configure promisc mode
6054  * @sw: pointer to switch info struct for which function add rule
6055  *
6056  * Configure VSI with all associated VLANs to given promiscuous mode(s)
6057  */
6058 static enum ice_status
6059 _ice_set_vlan_vsi_promisc(struct ice_hw *hw, u16 vsi_handle, u8 promisc_mask,
6060                           bool rm_vlan_promisc, u8 lport,
6061                           struct ice_switch_info *sw)
6062 {
6063         struct ice_fltr_list_entry *list_itr, *tmp;
6064         struct LIST_HEAD_TYPE vsi_list_head;
6065         struct LIST_HEAD_TYPE *vlan_head;
6066         struct ice_lock *vlan_lock; /* Lock to protect filter rule list */
6067         enum ice_status status;
6068         u16 vlan_id;
6069
6070         INIT_LIST_HEAD(&vsi_list_head);
6071         vlan_lock = &sw->recp_list[ICE_SW_LKUP_VLAN].filt_rule_lock;
6072         vlan_head = &sw->recp_list[ICE_SW_LKUP_VLAN].filt_rules;
6073         ice_acquire_lock(vlan_lock);
6074         status = ice_add_to_vsi_fltr_list(hw, vsi_handle, vlan_head,
6075                                           &vsi_list_head);
6076         ice_release_lock(vlan_lock);
6077         if (status)
6078                 goto free_fltr_list;
6079
6080         LIST_FOR_EACH_ENTRY(list_itr, &vsi_list_head, ice_fltr_list_entry,
6081                             list_entry) {
6082                 vlan_id = list_itr->fltr_info.l_data.vlan.vlan_id;
6083                 if (rm_vlan_promisc)
6084                         status =  _ice_clear_vsi_promisc(hw, vsi_handle,
6085                                                          promisc_mask,
6086                                                          vlan_id, sw);
6087                 else
6088                         status =  _ice_set_vsi_promisc(hw, vsi_handle,
6089                                                        promisc_mask, vlan_id,
6090                                                        lport, sw);
6091                 if (status)
6092                         break;
6093         }
6094
6095 free_fltr_list:
6096         LIST_FOR_EACH_ENTRY_SAFE(list_itr, tmp, &vsi_list_head,
6097                                  ice_fltr_list_entry, list_entry) {
6098                 LIST_DEL(&list_itr->list_entry);
6099                 ice_free(hw, list_itr);
6100         }
6101         return status;
6102 }
6103
6104 /**
6105  * ice_set_vlan_vsi_promisc
6106  * @hw: pointer to the hardware structure
6107  * @vsi_handle: VSI handle to configure
6108  * @promisc_mask: mask of promiscuous config bits
6109  * @rm_vlan_promisc: Clear VLANs VSI promisc mode
6110  *
6111  * Configure VSI with all associated VLANs to given promiscuous mode(s)
6112  */
6113 enum ice_status
6114 ice_set_vlan_vsi_promisc(struct ice_hw *hw, u16 vsi_handle, u8 promisc_mask,
6115                          bool rm_vlan_promisc)
6116 {
6117         return _ice_set_vlan_vsi_promisc(hw, vsi_handle, promisc_mask,
6118                                          rm_vlan_promisc, hw->port_info->lport,
6119                                          hw->switch_info);
6120 }
6121
6122 /**
6123  * ice_remove_vsi_lkup_fltr - Remove lookup type filters for a VSI
6124  * @hw: pointer to the hardware structure
6125  * @vsi_handle: VSI handle to remove filters from
6126  * @recp_list: recipe list from which function remove fltr
6127  * @lkup: switch rule filter lookup type
6128  */
6129 static void
6130 ice_remove_vsi_lkup_fltr(struct ice_hw *hw, u16 vsi_handle,
6131                          struct ice_sw_recipe *recp_list,
6132                          enum ice_sw_lkup_type lkup)
6133 {
6134         struct ice_fltr_list_entry *fm_entry;
6135         struct LIST_HEAD_TYPE remove_list_head;
6136         struct LIST_HEAD_TYPE *rule_head;
6137         struct ice_fltr_list_entry *tmp;
6138         struct ice_lock *rule_lock;     /* Lock to protect filter rule list */
6139         enum ice_status status;
6140
6141         INIT_LIST_HEAD(&remove_list_head);
6142         rule_lock = &recp_list[lkup].filt_rule_lock;
6143         rule_head = &recp_list[lkup].filt_rules;
6144         ice_acquire_lock(rule_lock);
6145         status = ice_add_to_vsi_fltr_list(hw, vsi_handle, rule_head,
6146                                           &remove_list_head);
6147         ice_release_lock(rule_lock);
6148         if (status)
6149                 goto free_fltr_list;
6150
6151         switch (lkup) {
6152         case ICE_SW_LKUP_MAC:
6153                 ice_remove_mac_rule(hw, &remove_list_head, &recp_list[lkup]);
6154                 break;
6155         case ICE_SW_LKUP_VLAN:
6156                 ice_remove_vlan_rule(hw, &remove_list_head, &recp_list[lkup]);
6157                 break;
6158         case ICE_SW_LKUP_PROMISC:
6159         case ICE_SW_LKUP_PROMISC_VLAN:
6160                 ice_remove_promisc(hw, lkup, &remove_list_head);
6161                 break;
6162         case ICE_SW_LKUP_MAC_VLAN:
6163                 ice_remove_mac_vlan(hw, &remove_list_head);
6164                 break;
6165         case ICE_SW_LKUP_ETHERTYPE:
6166         case ICE_SW_LKUP_ETHERTYPE_MAC:
6167                 ice_remove_eth_mac(hw, &remove_list_head);
6168                 break;
6169         case ICE_SW_LKUP_DFLT:
6170                 ice_debug(hw, ICE_DBG_SW, "Remove filters for this lookup type hasn't been implemented yet\n");
6171                 break;
6172         case ICE_SW_LKUP_LAST:
6173                 ice_debug(hw, ICE_DBG_SW, "Unsupported lookup type\n");
6174                 break;
6175         }
6176
6177 free_fltr_list:
6178         LIST_FOR_EACH_ENTRY_SAFE(fm_entry, tmp, &remove_list_head,
6179                                  ice_fltr_list_entry, list_entry) {
6180                 LIST_DEL(&fm_entry->list_entry);
6181                 ice_free(hw, fm_entry);
6182         }
6183 }
6184
6185 /**
6186  * ice_remove_vsi_fltr_rule - Remove all filters for a VSI
6187  * @hw: pointer to the hardware structure
6188  * @vsi_handle: VSI handle to remove filters from
6189  * @sw: pointer to switch info struct
6190  */
6191 static void
6192 ice_remove_vsi_fltr_rule(struct ice_hw *hw, u16 vsi_handle,
6193                          struct ice_switch_info *sw)
6194 {
6195         ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__);
6196
6197         ice_remove_vsi_lkup_fltr(hw, vsi_handle,
6198                                  sw->recp_list, ICE_SW_LKUP_MAC);
6199         ice_remove_vsi_lkup_fltr(hw, vsi_handle,
6200                                  sw->recp_list, ICE_SW_LKUP_MAC_VLAN);
6201         ice_remove_vsi_lkup_fltr(hw, vsi_handle,
6202                                  sw->recp_list, ICE_SW_LKUP_PROMISC);
6203         ice_remove_vsi_lkup_fltr(hw, vsi_handle,
6204                                  sw->recp_list, ICE_SW_LKUP_VLAN);
6205         ice_remove_vsi_lkup_fltr(hw, vsi_handle,
6206                                  sw->recp_list, ICE_SW_LKUP_DFLT);
6207         ice_remove_vsi_lkup_fltr(hw, vsi_handle,
6208                                  sw->recp_list, ICE_SW_LKUP_ETHERTYPE);
6209         ice_remove_vsi_lkup_fltr(hw, vsi_handle,
6210                                  sw->recp_list, ICE_SW_LKUP_ETHERTYPE_MAC);
6211         ice_remove_vsi_lkup_fltr(hw, vsi_handle,
6212                                  sw->recp_list, ICE_SW_LKUP_PROMISC_VLAN);
6213 }
6214
6215 /**
6216  * ice_remove_vsi_fltr - Remove all filters for a VSI
6217  * @hw: pointer to the hardware structure
6218  * @vsi_handle: VSI handle to remove filters from
6219  */
6220 void ice_remove_vsi_fltr(struct ice_hw *hw, u16 vsi_handle)
6221 {
6222         ice_remove_vsi_fltr_rule(hw, vsi_handle, hw->switch_info);
6223 }
6224
6225 /**
6226  * ice_alloc_res_cntr - allocating resource counter
6227  * @hw: pointer to the hardware structure
6228  * @type: type of resource
6229  * @alloc_shared: if set it is shared else dedicated
6230  * @num_items: number of entries requested for FD resource type
6231  * @counter_id: counter index returned by AQ call
6232  */
6233 enum ice_status
6234 ice_alloc_res_cntr(struct ice_hw *hw, u8 type, u8 alloc_shared, u16 num_items,
6235                    u16 *counter_id)
6236 {
6237         struct ice_aqc_alloc_free_res_elem *buf;
6238         enum ice_status status;
6239         u16 buf_len;
6240
6241         /* Allocate resource */
6242         buf_len = ice_struct_size(buf, elem, 1);
6243         buf = (struct ice_aqc_alloc_free_res_elem *)ice_malloc(hw, buf_len);
6244         if (!buf)
6245                 return ICE_ERR_NO_MEMORY;
6246
6247         buf->num_elems = CPU_TO_LE16(num_items);
6248         buf->res_type = CPU_TO_LE16(((type << ICE_AQC_RES_TYPE_S) &
6249                                       ICE_AQC_RES_TYPE_M) | alloc_shared);
6250
6251         status = ice_aq_alloc_free_res(hw, 1, buf, buf_len,
6252                                        ice_aqc_opc_alloc_res, NULL);
6253         if (status)
6254                 goto exit;
6255
6256         *counter_id = LE16_TO_CPU(buf->elem[0].e.sw_resp);
6257
6258 exit:
6259         ice_free(hw, buf);
6260         return status;
6261 }
6262
6263 /**
6264  * ice_free_res_cntr - free resource counter
6265  * @hw: pointer to the hardware structure
6266  * @type: type of resource
6267  * @alloc_shared: if set it is shared else dedicated
6268  * @num_items: number of entries to be freed for FD resource type
6269  * @counter_id: counter ID resource which needs to be freed
6270  */
6271 enum ice_status
6272 ice_free_res_cntr(struct ice_hw *hw, u8 type, u8 alloc_shared, u16 num_items,
6273                   u16 counter_id)
6274 {
6275         struct ice_aqc_alloc_free_res_elem *buf;
6276         enum ice_status status;
6277         u16 buf_len;
6278
6279         /* Free resource */
6280         buf_len = ice_struct_size(buf, elem, 1);
6281         buf = (struct ice_aqc_alloc_free_res_elem *)ice_malloc(hw, buf_len);
6282         if (!buf)
6283                 return ICE_ERR_NO_MEMORY;
6284
6285         buf->num_elems = CPU_TO_LE16(num_items);
6286         buf->res_type = CPU_TO_LE16(((type << ICE_AQC_RES_TYPE_S) &
6287                                       ICE_AQC_RES_TYPE_M) | alloc_shared);
6288         buf->elem[0].e.sw_resp = CPU_TO_LE16(counter_id);
6289
6290         status = ice_aq_alloc_free_res(hw, 1, buf, buf_len,
6291                                        ice_aqc_opc_free_res, NULL);
6292         if (status)
6293                 ice_debug(hw, ICE_DBG_SW, "counter resource could not be freed\n");
6294
6295         ice_free(hw, buf);
6296         return status;
6297 }
6298
6299 /**
6300  * ice_alloc_vlan_res_counter - obtain counter resource for VLAN type
6301  * @hw: pointer to the hardware structure
6302  * @counter_id: returns counter index
6303  */
6304 enum ice_status ice_alloc_vlan_res_counter(struct ice_hw *hw, u16 *counter_id)
6305 {
6306         return ice_alloc_res_cntr(hw, ICE_AQC_RES_TYPE_VLAN_COUNTER,
6307                                   ICE_AQC_RES_TYPE_FLAG_DEDICATED, 1,
6308                                   counter_id);
6309 }
6310
6311 /**
6312  * ice_free_vlan_res_counter - Free counter resource for VLAN type
6313  * @hw: pointer to the hardware structure
6314  * @counter_id: counter index to be freed
6315  */
6316 enum ice_status ice_free_vlan_res_counter(struct ice_hw *hw, u16 counter_id)
6317 {
6318         return ice_free_res_cntr(hw, ICE_AQC_RES_TYPE_VLAN_COUNTER,
6319                                  ICE_AQC_RES_TYPE_FLAG_DEDICATED, 1,
6320                                  counter_id);
6321 }
6322
6323 /**
6324  * ice_alloc_res_lg_act - add large action resource
6325  * @hw: pointer to the hardware structure
6326  * @l_id: large action ID to fill it in
6327  * @num_acts: number of actions to hold with a large action entry
6328  */
6329 static enum ice_status
6330 ice_alloc_res_lg_act(struct ice_hw *hw, u16 *l_id, u16 num_acts)
6331 {
6332         struct ice_aqc_alloc_free_res_elem *sw_buf;
6333         enum ice_status status;
6334         u16 buf_len;
6335
6336         if (num_acts > ICE_MAX_LG_ACT || num_acts == 0)
6337                 return ICE_ERR_PARAM;
6338
6339         /* Allocate resource for large action */
6340         buf_len = ice_struct_size(sw_buf, elem, 1);
6341         sw_buf = (struct ice_aqc_alloc_free_res_elem *)ice_malloc(hw, buf_len);
6342         if (!sw_buf)
6343                 return ICE_ERR_NO_MEMORY;
6344
6345         sw_buf->num_elems = CPU_TO_LE16(1);
6346
6347         /* If num_acts is 1, use ICE_AQC_RES_TYPE_WIDE_TABLE_1.
6348          * If num_acts is 2, use ICE_AQC_RES_TYPE_WIDE_TABLE_3.
6349          * If num_acts is greater than 2, then use
6350          * ICE_AQC_RES_TYPE_WIDE_TABLE_4.
6351          * The num_acts cannot exceed 4. This was ensured at the
6352          * beginning of the function.
6353          */
6354         if (num_acts == 1)
6355                 sw_buf->res_type = CPU_TO_LE16(ICE_AQC_RES_TYPE_WIDE_TABLE_1);
6356         else if (num_acts == 2)
6357                 sw_buf->res_type = CPU_TO_LE16(ICE_AQC_RES_TYPE_WIDE_TABLE_2);
6358         else
6359                 sw_buf->res_type = CPU_TO_LE16(ICE_AQC_RES_TYPE_WIDE_TABLE_4);
6360
6361         status = ice_aq_alloc_free_res(hw, 1, sw_buf, buf_len,
6362                                        ice_aqc_opc_alloc_res, NULL);
6363         if (!status)
6364                 *l_id = LE16_TO_CPU(sw_buf->elem[0].e.sw_resp);
6365
6366         ice_free(hw, sw_buf);
6367         return status;
6368 }
6369
6370 /**
6371  * ice_add_mac_with_sw_marker - add filter with sw marker
6372  * @hw: pointer to the hardware structure
6373  * @f_info: filter info structure containing the MAC filter information
6374  * @sw_marker: sw marker to tag the Rx descriptor with
6375  */
6376 enum ice_status
6377 ice_add_mac_with_sw_marker(struct ice_hw *hw, struct ice_fltr_info *f_info,
6378                            u16 sw_marker)
6379 {
6380         struct ice_fltr_mgmt_list_entry *m_entry;
6381         struct ice_fltr_list_entry fl_info;
6382         struct ice_sw_recipe *recp_list;
6383         struct LIST_HEAD_TYPE l_head;
6384         struct ice_lock *rule_lock;     /* Lock to protect filter rule list */
6385         enum ice_status ret;
6386         bool entry_exists;
6387         u16 lg_act_id;
6388
6389         if (f_info->fltr_act != ICE_FWD_TO_VSI)
6390                 return ICE_ERR_PARAM;
6391
6392         if (f_info->lkup_type != ICE_SW_LKUP_MAC)
6393                 return ICE_ERR_PARAM;
6394
6395         if (sw_marker == ICE_INVAL_SW_MARKER_ID)
6396                 return ICE_ERR_PARAM;
6397
6398         if (!ice_is_vsi_valid(hw, f_info->vsi_handle))
6399                 return ICE_ERR_PARAM;
6400         f_info->fwd_id.hw_vsi_id = ice_get_hw_vsi_num(hw, f_info->vsi_handle);
6401
6402         /* Add filter if it doesn't exist so then the adding of large
6403          * action always results in update
6404          */
6405
6406         INIT_LIST_HEAD(&l_head);
6407         fl_info.fltr_info = *f_info;
6408         LIST_ADD(&fl_info.list_entry, &l_head);
6409
6410         entry_exists = false;
6411         ret = ice_add_mac_rule(hw, &l_head, hw->switch_info,
6412                                hw->port_info->lport);
6413         if (ret == ICE_ERR_ALREADY_EXISTS)
6414                 entry_exists = true;
6415         else if (ret)
6416                 return ret;
6417
6418         recp_list = &hw->switch_info->recp_list[ICE_SW_LKUP_MAC];
6419         rule_lock = &recp_list->filt_rule_lock;
6420         ice_acquire_lock(rule_lock);
6421         /* Get the book keeping entry for the filter */
6422         m_entry = ice_find_rule_entry(&recp_list->filt_rules, f_info);
6423         if (!m_entry)
6424                 goto exit_error;
6425
6426         /* If counter action was enabled for this rule then don't enable
6427          * sw marker large action
6428          */
6429         if (m_entry->counter_index != ICE_INVAL_COUNTER_ID) {
6430                 ret = ICE_ERR_PARAM;
6431                 goto exit_error;
6432         }
6433
6434         /* if same marker was added before */
6435         if (m_entry->sw_marker_id == sw_marker) {
6436                 ret = ICE_ERR_ALREADY_EXISTS;
6437                 goto exit_error;
6438         }
6439
6440         /* Allocate a hardware table entry to hold large act. Three actions
6441          * for marker based large action
6442          */
6443         ret = ice_alloc_res_lg_act(hw, &lg_act_id, 3);
6444         if (ret)
6445                 goto exit_error;
6446
6447         if (lg_act_id == ICE_INVAL_LG_ACT_INDEX)
6448                 goto exit_error;
6449
6450         /* Update the switch rule to add the marker action */
6451         ret = ice_add_marker_act(hw, m_entry, sw_marker, lg_act_id);
6452         if (!ret) {
6453                 ice_release_lock(rule_lock);
6454                 return ret;
6455         }
6456
6457 exit_error:
6458         ice_release_lock(rule_lock);
6459         /* only remove entry if it did not exist previously */
6460         if (!entry_exists)
6461                 ret = ice_remove_mac(hw, &l_head);
6462
6463         return ret;
6464 }
6465
6466 /**
6467  * ice_add_mac_with_counter - add filter with counter enabled
6468  * @hw: pointer to the hardware structure
6469  * @f_info: pointer to filter info structure containing the MAC filter
6470  *          information
6471  */
6472 enum ice_status
6473 ice_add_mac_with_counter(struct ice_hw *hw, struct ice_fltr_info *f_info)
6474 {
6475         struct ice_fltr_mgmt_list_entry *m_entry;
6476         struct ice_fltr_list_entry fl_info;
6477         struct ice_sw_recipe *recp_list;
6478         struct LIST_HEAD_TYPE l_head;
6479         struct ice_lock *rule_lock;     /* Lock to protect filter rule list */
6480         enum ice_status ret;
6481         bool entry_exist;
6482         u16 counter_id;
6483         u16 lg_act_id;
6484
6485         if (f_info->fltr_act != ICE_FWD_TO_VSI)
6486                 return ICE_ERR_PARAM;
6487
6488         if (f_info->lkup_type != ICE_SW_LKUP_MAC)
6489                 return ICE_ERR_PARAM;
6490
6491         if (!ice_is_vsi_valid(hw, f_info->vsi_handle))
6492                 return ICE_ERR_PARAM;
6493         f_info->fwd_id.hw_vsi_id = ice_get_hw_vsi_num(hw, f_info->vsi_handle);
6494         recp_list = &hw->switch_info->recp_list[ICE_SW_LKUP_MAC];
6495
6496         entry_exist = false;
6497
6498         rule_lock = &recp_list->filt_rule_lock;
6499
6500         /* Add filter if it doesn't exist so then the adding of large
6501          * action always results in update
6502          */
6503         INIT_LIST_HEAD(&l_head);
6504
6505         fl_info.fltr_info = *f_info;
6506         LIST_ADD(&fl_info.list_entry, &l_head);
6507
6508         ret = ice_add_mac_rule(hw, &l_head, hw->switch_info,
6509                                hw->port_info->lport);
6510         if (ret == ICE_ERR_ALREADY_EXISTS)
6511                 entry_exist = true;
6512         else if (ret)
6513                 return ret;
6514
6515         ice_acquire_lock(rule_lock);
6516         m_entry = ice_find_rule_entry(&recp_list->filt_rules, f_info);
6517         if (!m_entry) {
6518                 ret = ICE_ERR_BAD_PTR;
6519                 goto exit_error;
6520         }
6521
6522         /* Don't enable counter for a filter for which sw marker was enabled */
6523         if (m_entry->sw_marker_id != ICE_INVAL_SW_MARKER_ID) {
6524                 ret = ICE_ERR_PARAM;
6525                 goto exit_error;
6526         }
6527
6528         /* If a counter was already enabled then don't need to add again */
6529         if (m_entry->counter_index != ICE_INVAL_COUNTER_ID) {
6530                 ret = ICE_ERR_ALREADY_EXISTS;
6531                 goto exit_error;
6532         }
6533
6534         /* Allocate a hardware table entry to VLAN counter */
6535         ret = ice_alloc_vlan_res_counter(hw, &counter_id);
6536         if (ret)
6537                 goto exit_error;
6538
6539         /* Allocate a hardware table entry to hold large act. Two actions for
6540          * counter based large action
6541          */
6542         ret = ice_alloc_res_lg_act(hw, &lg_act_id, 2);
6543         if (ret)
6544                 goto exit_error;
6545
6546         if (lg_act_id == ICE_INVAL_LG_ACT_INDEX)
6547                 goto exit_error;
6548
6549         /* Update the switch rule to add the counter action */
6550         ret = ice_add_counter_act(hw, m_entry, counter_id, lg_act_id);
6551         if (!ret) {
6552                 ice_release_lock(rule_lock);
6553                 return ret;
6554         }
6555
6556 exit_error:
6557         ice_release_lock(rule_lock);
6558         /* only remove entry if it did not exist previously */
6559         if (!entry_exist)
6560                 ret = ice_remove_mac(hw, &l_head);
6561
6562         return ret;
6563 }
6564
6565 /* This is mapping table entry that maps every word within a given protocol
6566  * structure to the real byte offset as per the specification of that
6567  * protocol header.
6568  * for example dst address is 3 words in ethertype header and corresponding
6569  * bytes are 0, 2, 3 in the actual packet header and src address is at 4, 6, 8
6570  * IMPORTANT: Every structure part of "ice_prot_hdr" union should have a
6571  * matching entry describing its field. This needs to be updated if new
6572  * structure is added to that union.
6573  */
6574 static const struct ice_prot_ext_tbl_entry ice_prot_ext[ICE_PROTOCOL_LAST] = {
6575         { ICE_MAC_OFOS,         { 0, 2, 4, 6, 8, 10, 12 } },
6576         { ICE_MAC_IL,           { 0, 2, 4, 6, 8, 10, 12 } },
6577         { ICE_ETYPE_OL,         { 0 } },
6578         { ICE_VLAN_OFOS,        { 0, 2 } },
6579         { ICE_IPV4_OFOS,        { 0, 2, 4, 6, 8, 10, 12, 14, 16, 18 } },
6580         { ICE_IPV4_IL,          { 0, 2, 4, 6, 8, 10, 12, 14, 16, 18 } },
6581         { ICE_IPV6_OFOS,        { 0, 2, 4, 6, 8, 10, 12, 14, 16, 18, 20, 22, 24,
6582                                  26, 28, 30, 32, 34, 36, 38 } },
6583         { ICE_IPV6_IL,          { 0, 2, 4, 6, 8, 10, 12, 14, 16, 18, 20, 22, 24,
6584                                  26, 28, 30, 32, 34, 36, 38 } },
6585         { ICE_TCP_IL,           { 0, 2 } },
6586         { ICE_UDP_OF,           { 0, 2 } },
6587         { ICE_UDP_ILOS,         { 0, 2 } },
6588         { ICE_SCTP_IL,          { 0, 2 } },
6589         { ICE_VXLAN,            { 8, 10, 12, 14 } },
6590         { ICE_GENEVE,           { 8, 10, 12, 14 } },
6591         { ICE_VXLAN_GPE,        { 8, 10, 12, 14 } },
6592         { ICE_NVGRE,            { 0, 2, 4, 6 } },
6593         { ICE_GTP,              { 8, 10, 12, 14, 16, 18, 20, 22 } },
6594         { ICE_PPPOE,            { 0, 2, 4, 6 } },
6595         { ICE_PFCP,             { 8, 10, 12, 14, 16, 18, 20, 22 } },
6596         { ICE_L2TPV3,           { 0, 2, 4, 6, 8, 10 } },
6597         { ICE_ESP,              { 0, 2, 4, 6 } },
6598         { ICE_AH,               { 0, 2, 4, 6, 8, 10 } },
6599         { ICE_NAT_T,            { 8, 10, 12, 14 } },
6600         { ICE_GTP_NO_PAY,       { 8, 10, 12, 14 } },
6601         { ICE_VLAN_EX,          { 0, 2 } },
6602         { ICE_VLAN_IN,          { 0, 2 } },
6603 };
6604
6605 /* The following table describes preferred grouping of recipes.
6606  * If a recipe that needs to be programmed is a superset or matches one of the
6607  * following combinations, then the recipe needs to be chained as per the
6608  * following policy.
6609  */
6610
6611 static struct ice_protocol_entry ice_prot_id_tbl[ICE_PROTOCOL_LAST] = {
6612         { ICE_MAC_OFOS,         ICE_MAC_OFOS_HW },
6613         { ICE_MAC_IL,           ICE_MAC_IL_HW },
6614         { ICE_ETYPE_OL,         ICE_ETYPE_OL_HW },
6615         { ICE_VLAN_OFOS,        ICE_VLAN_OL_HW },
6616         { ICE_IPV4_OFOS,        ICE_IPV4_OFOS_HW },
6617         { ICE_IPV4_IL,          ICE_IPV4_IL_HW },
6618         { ICE_IPV6_OFOS,        ICE_IPV6_OFOS_HW },
6619         { ICE_IPV6_IL,          ICE_IPV6_IL_HW },
6620         { ICE_TCP_IL,           ICE_TCP_IL_HW },
6621         { ICE_UDP_OF,           ICE_UDP_OF_HW },
6622         { ICE_UDP_ILOS,         ICE_UDP_ILOS_HW },
6623         { ICE_SCTP_IL,          ICE_SCTP_IL_HW },
6624         { ICE_VXLAN,            ICE_UDP_OF_HW },
6625         { ICE_GENEVE,           ICE_UDP_OF_HW },
6626         { ICE_VXLAN_GPE,        ICE_UDP_OF_HW },
6627         { ICE_NVGRE,            ICE_GRE_OF_HW },
6628         { ICE_GTP,              ICE_UDP_OF_HW },
6629         { ICE_PPPOE,            ICE_PPPOE_HW },
6630         { ICE_PFCP,             ICE_UDP_ILOS_HW },
6631         { ICE_L2TPV3,           ICE_L2TPV3_HW },
6632         { ICE_ESP,              ICE_ESP_HW },
6633         { ICE_AH,               ICE_AH_HW },
6634         { ICE_NAT_T,            ICE_UDP_ILOS_HW },
6635         { ICE_GTP_NO_PAY,       ICE_UDP_ILOS_HW },
6636         { ICE_VLAN_EX,          ICE_VLAN_OF_HW },
6637         { ICE_VLAN_IN,          ICE_VLAN_OL_HW },
6638 };
6639
6640 /**
6641  * ice_find_recp - find a recipe
6642  * @hw: pointer to the hardware structure
6643  * @lkup_exts: extension sequence to match
6644  *
6645  * Returns index of matching recipe, or ICE_MAX_NUM_RECIPES if not found.
6646  */
6647 static u16 ice_find_recp(struct ice_hw *hw, struct ice_prot_lkup_ext *lkup_exts,
6648                          enum ice_sw_tunnel_type tun_type, u32 priority)
6649 {
6650         bool refresh_required = true;
6651         struct ice_sw_recipe *recp;
6652         u8 i;
6653
6654         /* Walk through existing recipes to find a match */
6655         recp = hw->switch_info->recp_list;
6656         for (i = 0; i < ICE_MAX_NUM_RECIPES; i++) {
6657                 /* If recipe was not created for this ID, in SW bookkeeping,
6658                  * check if FW has an entry for this recipe. If the FW has an
6659                  * entry update it in our SW bookkeeping and continue with the
6660                  * matching.
6661                  */
6662                 if (!recp[i].recp_created)
6663                         if (ice_get_recp_frm_fw(hw,
6664                                                 hw->switch_info->recp_list, i,
6665                                                 &refresh_required))
6666                                 continue;
6667
6668                 /* Skip inverse action recipes */
6669                 if (recp[i].root_buf && recp[i].root_buf->content.act_ctrl &
6670                     ICE_AQ_RECIPE_ACT_INV_ACT)
6671                         continue;
6672
6673                 /* if number of words we are looking for match */
6674                 if (lkup_exts->n_val_words == recp[i].lkup_exts.n_val_words) {
6675                         struct ice_fv_word *ar = recp[i].lkup_exts.fv_words;
6676                         struct ice_fv_word *be = lkup_exts->fv_words;
6677                         u16 *cr = recp[i].lkup_exts.field_mask;
6678                         u16 *de = lkup_exts->field_mask;
6679                         bool found = true;
6680                         u8 pe, qr;
6681
6682                         /* ar, cr, and qr are related to the recipe words, while
6683                          * be, de, and pe are related to the lookup words
6684                          */
6685                         for (pe = 0; pe < lkup_exts->n_val_words; pe++) {
6686                                 for (qr = 0; qr < recp[i].lkup_exts.n_val_words;
6687                                      qr++) {
6688                                         if (ar[qr].off == be[pe].off &&
6689                                             ar[qr].prot_id == be[pe].prot_id &&
6690                                             cr[qr] == de[pe])
6691                                                 /* Found the "pe"th word in the
6692                                                  * given recipe
6693                                                  */
6694                                                 break;
6695                                 }
6696                                 /* After walking through all the words in the
6697                                  * "i"th recipe if "p"th word was not found then
6698                                  * this recipe is not what we are looking for.
6699                                  * So break out from this loop and try the next
6700                                  * recipe
6701                                  */
6702                                 if (qr >= recp[i].lkup_exts.n_val_words) {
6703                                         found = false;
6704                                         break;
6705                                 }
6706                         }
6707                         /* If for "i"th recipe the found was never set to false
6708                          * then it means we found our match
6709                          */
6710                         if (tun_type == recp[i].tun_type && found &&
6711                             priority == recp[i].priority)
6712                                 return i; /* Return the recipe ID */
6713                 }
6714         }
6715         return ICE_MAX_NUM_RECIPES;
6716 }
6717
6718 /**
6719  * ice_change_proto_id_to_dvm - change proto id in prot_id_tbl
6720  *
6721  * As protocol id for outer vlan is different in dvm and svm, if dvm is
6722  * supported protocol array record for outer vlan has to be modified to
6723  * reflect the value proper for DVM.
6724  */
6725 void ice_change_proto_id_to_dvm(void)
6726 {
6727         u8 i;
6728
6729         for (i = 0; i < ARRAY_SIZE(ice_prot_id_tbl); i++)
6730                 if (ice_prot_id_tbl[i].type == ICE_VLAN_OFOS &&
6731                     ice_prot_id_tbl[i].protocol_id != ICE_VLAN_OF_HW)
6732                         ice_prot_id_tbl[i].protocol_id = ICE_VLAN_OF_HW;
6733 }
6734
6735 /**
6736  * ice_prot_type_to_id - get protocol ID from protocol type
6737  * @type: protocol type
6738  * @id: pointer to variable that will receive the ID
6739  *
6740  * Returns true if found, false otherwise
6741  */
6742 static bool ice_prot_type_to_id(enum ice_protocol_type type, u8 *id)
6743 {
6744         u8 i;
6745
6746         for (i = 0; i < ARRAY_SIZE(ice_prot_id_tbl); i++)
6747                 if (ice_prot_id_tbl[i].type == type) {
6748                         *id = ice_prot_id_tbl[i].protocol_id;
6749                         return true;
6750                 }
6751         return false;
6752 }
6753
6754 /**
6755  * ice_fill_valid_words - count valid words
6756  * @rule: advanced rule with lookup information
6757  * @lkup_exts: byte offset extractions of the words that are valid
6758  *
6759  * calculate valid words in a lookup rule using mask value
6760  */
6761 static u8
6762 ice_fill_valid_words(struct ice_adv_lkup_elem *rule,
6763                      struct ice_prot_lkup_ext *lkup_exts)
6764 {
6765         u8 j, word, prot_id, ret_val;
6766
6767         if (!ice_prot_type_to_id(rule->type, &prot_id))
6768                 return 0;
6769
6770         word = lkup_exts->n_val_words;
6771
6772         for (j = 0; j < sizeof(rule->m_u) / sizeof(u16); j++)
6773                 if (((u16 *)&rule->m_u)[j] &&
6774                     (size_t)rule->type < ARRAY_SIZE(ice_prot_ext)) {
6775                         /* No more space to accommodate */
6776                         if (word >= ICE_MAX_CHAIN_WORDS)
6777                                 return 0;
6778                         lkup_exts->fv_words[word].off =
6779                                 ice_prot_ext[rule->type].offs[j];
6780                         lkup_exts->fv_words[word].prot_id =
6781                                 ice_prot_id_tbl[rule->type].protocol_id;
6782                         lkup_exts->field_mask[word] =
6783                                 BE16_TO_CPU(((_FORCE_ __be16 *)&rule->m_u)[j]);
6784                         word++;
6785                 }
6786
6787         ret_val = word - lkup_exts->n_val_words;
6788         lkup_exts->n_val_words = word;
6789
6790         return ret_val;
6791 }
6792
6793 /**
6794  * ice_create_first_fit_recp_def - Create a recipe grouping
6795  * @hw: pointer to the hardware structure
6796  * @lkup_exts: an array of protocol header extractions
6797  * @rg_list: pointer to a list that stores new recipe groups
6798  * @recp_cnt: pointer to a variable that stores returned number of recipe groups
6799  *
6800  * Using first fit algorithm, take all the words that are still not done
6801  * and start grouping them in 4-word groups. Each group makes up one
6802  * recipe.
6803  */
6804 static enum ice_status
6805 ice_create_first_fit_recp_def(struct ice_hw *hw,
6806                               struct ice_prot_lkup_ext *lkup_exts,
6807                               struct LIST_HEAD_TYPE *rg_list,
6808                               u8 *recp_cnt)
6809 {
6810         struct ice_pref_recipe_group *grp = NULL;
6811         u8 j;
6812
6813         *recp_cnt = 0;
6814
6815         if (!lkup_exts->n_val_words) {
6816                 struct ice_recp_grp_entry *entry;
6817
6818                 entry = (struct ice_recp_grp_entry *)
6819                         ice_malloc(hw, sizeof(*entry));
6820                 if (!entry)
6821                         return ICE_ERR_NO_MEMORY;
6822                 LIST_ADD(&entry->l_entry, rg_list);
6823                 grp = &entry->r_group;
6824                 (*recp_cnt)++;
6825                 grp->n_val_pairs = 0;
6826         }
6827
6828         /* Walk through every word in the rule to check if it is not done. If so
6829          * then this word needs to be part of a new recipe.
6830          */
6831         for (j = 0; j < lkup_exts->n_val_words; j++)
6832                 if (!ice_is_bit_set(lkup_exts->done, j)) {
6833                         if (!grp ||
6834                             grp->n_val_pairs == ICE_NUM_WORDS_RECIPE) {
6835                                 struct ice_recp_grp_entry *entry;
6836
6837                                 entry = (struct ice_recp_grp_entry *)
6838                                         ice_malloc(hw, sizeof(*entry));
6839                                 if (!entry)
6840                                         return ICE_ERR_NO_MEMORY;
6841                                 LIST_ADD(&entry->l_entry, rg_list);
6842                                 grp = &entry->r_group;
6843                                 (*recp_cnt)++;
6844                         }
6845
6846                         grp->pairs[grp->n_val_pairs].prot_id =
6847                                 lkup_exts->fv_words[j].prot_id;
6848                         grp->pairs[grp->n_val_pairs].off =
6849                                 lkup_exts->fv_words[j].off;
6850                         grp->mask[grp->n_val_pairs] = lkup_exts->field_mask[j];
6851                         grp->n_val_pairs++;
6852                 }
6853
6854         return ICE_SUCCESS;
6855 }
6856
6857 /**
6858  * ice_fill_fv_word_index - fill in the field vector indices for a recipe group
6859  * @hw: pointer to the hardware structure
6860  * @fv_list: field vector with the extraction sequence information
6861  * @rg_list: recipe groupings with protocol-offset pairs
6862  *
6863  * Helper function to fill in the field vector indices for protocol-offset
6864  * pairs. These indexes are then ultimately programmed into a recipe.
6865  */
6866 static enum ice_status
6867 ice_fill_fv_word_index(struct ice_hw *hw, struct LIST_HEAD_TYPE *fv_list,
6868                        struct LIST_HEAD_TYPE *rg_list)
6869 {
6870         struct ice_sw_fv_list_entry *fv;
6871         struct ice_recp_grp_entry *rg;
6872         struct ice_fv_word *fv_ext;
6873
6874         if (LIST_EMPTY(fv_list))
6875                 return ICE_SUCCESS;
6876
6877         fv = LIST_FIRST_ENTRY(fv_list, struct ice_sw_fv_list_entry, list_entry);
6878         fv_ext = fv->fv_ptr->ew;
6879
6880         LIST_FOR_EACH_ENTRY(rg, rg_list, ice_recp_grp_entry, l_entry) {
6881                 u8 i;
6882
6883                 for (i = 0; i < rg->r_group.n_val_pairs; i++) {
6884                         struct ice_fv_word *pr;
6885                         bool found = false;
6886                         u16 mask;
6887                         u8 j;
6888
6889                         pr = &rg->r_group.pairs[i];
6890                         mask = rg->r_group.mask[i];
6891
6892                         for (j = 0; j < hw->blk[ICE_BLK_SW].es.fvw; j++)
6893                                 if (fv_ext[j].prot_id == pr->prot_id &&
6894                                     fv_ext[j].off == pr->off) {
6895                                         found = true;
6896
6897                                         /* Store index of field vector */
6898                                         rg->fv_idx[i] = j;
6899                                         rg->fv_mask[i] = mask;
6900                                         break;
6901                                 }
6902
6903                         /* Protocol/offset could not be found, caller gave an
6904                          * invalid pair
6905                          */
6906                         if (!found)
6907                                 return ICE_ERR_PARAM;
6908                 }
6909         }
6910
6911         return ICE_SUCCESS;
6912 }
6913
6914 /**
6915  * ice_find_free_recp_res_idx - find free result indexes for recipe
6916  * @hw: pointer to hardware structure
6917  * @profiles: bitmap of profiles that will be associated with the new recipe
6918  * @free_idx: pointer to variable to receive the free index bitmap
6919  *
6920  * The algorithm used here is:
6921  *      1. When creating a new recipe, create a set P which contains all
6922  *         Profiles that will be associated with our new recipe
6923  *
6924  *      2. For each Profile p in set P:
6925  *          a. Add all recipes associated with Profile p into set R
6926  *          b. Optional : PossibleIndexes &= profile[p].possibleIndexes
6927  *              [initially PossibleIndexes should be 0xFFFFFFFFFFFFFFFF]
6928  *              i. Or just assume they all have the same possible indexes:
6929  *                      44, 45, 46, 47
6930  *                      i.e., PossibleIndexes = 0x0000F00000000000
6931  *
6932  *      3. For each Recipe r in set R:
6933  *          a. UsedIndexes |= (bitwise or ) recipe[r].res_indexes
6934  *          b. FreeIndexes = UsedIndexes ^ PossibleIndexes
6935  *
6936  *      FreeIndexes will contain the bits indicating the indexes free for use,
6937  *      then the code needs to update the recipe[r].used_result_idx_bits to
6938  *      indicate which indexes were selected for use by this recipe.
6939  */
6940 static u16
6941 ice_find_free_recp_res_idx(struct ice_hw *hw, const ice_bitmap_t *profiles,
6942                            ice_bitmap_t *free_idx)
6943 {
6944         ice_declare_bitmap(possible_idx, ICE_MAX_FV_WORDS);
6945         ice_declare_bitmap(recipes, ICE_MAX_NUM_RECIPES);
6946         ice_declare_bitmap(used_idx, ICE_MAX_FV_WORDS);
6947         u16 bit;
6948
6949         ice_zero_bitmap(possible_idx, ICE_MAX_FV_WORDS);
6950         ice_zero_bitmap(recipes, ICE_MAX_NUM_RECIPES);
6951         ice_zero_bitmap(used_idx, ICE_MAX_FV_WORDS);
6952         ice_zero_bitmap(free_idx, ICE_MAX_FV_WORDS);
6953
6954         ice_bitmap_set(possible_idx, 0, ICE_MAX_FV_WORDS);
6955
6956         /* For each profile we are going to associate the recipe with, add the
6957          * recipes that are associated with that profile. This will give us
6958          * the set of recipes that our recipe may collide with. Also, determine
6959          * what possible result indexes are usable given this set of profiles.
6960          */
6961         ice_for_each_set_bit(bit, profiles, ICE_MAX_NUM_PROFILES) {
6962                 ice_or_bitmap(recipes, recipes, profile_to_recipe[bit],
6963                               ICE_MAX_NUM_RECIPES);
6964                 ice_and_bitmap(possible_idx, possible_idx,
6965                                hw->switch_info->prof_res_bm[bit],
6966                                ICE_MAX_FV_WORDS);
6967         }
6968
6969         /* For each recipe that our new recipe may collide with, determine
6970          * which indexes have been used.
6971          */
6972         ice_for_each_set_bit(bit, recipes, ICE_MAX_NUM_RECIPES)
6973                 ice_or_bitmap(used_idx, used_idx,
6974                               hw->switch_info->recp_list[bit].res_idxs,
6975                               ICE_MAX_FV_WORDS);
6976
6977         ice_xor_bitmap(free_idx, used_idx, possible_idx, ICE_MAX_FV_WORDS);
6978
6979         /* return number of free indexes */
6980         return (u16)ice_bitmap_hweight(free_idx, ICE_MAX_FV_WORDS);
6981 }
6982
6983 /**
6984  * ice_add_sw_recipe - function to call AQ calls to create switch recipe
6985  * @hw: pointer to hardware structure
6986  * @rm: recipe management list entry
6987  * @profiles: bitmap of profiles that will be associated.
6988  */
6989 static enum ice_status
6990 ice_add_sw_recipe(struct ice_hw *hw, struct ice_sw_recipe *rm,
6991                   ice_bitmap_t *profiles)
6992 {
6993         ice_declare_bitmap(result_idx_bm, ICE_MAX_FV_WORDS);
6994         struct ice_aqc_recipe_data_elem *tmp;
6995         struct ice_aqc_recipe_data_elem *buf;
6996         struct ice_recp_grp_entry *entry;
6997         enum ice_status status;
6998         u16 free_res_idx;
6999         u16 recipe_count;
7000         u8 chain_idx;
7001         u8 recps = 0;
7002
7003         /* When more than one recipe are required, another recipe is needed to
7004          * chain them together. Matching a tunnel metadata ID takes up one of
7005          * the match fields in the chaining recipe reducing the number of
7006          * chained recipes by one.
7007          */
7008          /* check number of free result indices */
7009         ice_zero_bitmap(result_idx_bm, ICE_MAX_FV_WORDS);
7010         free_res_idx = ice_find_free_recp_res_idx(hw, profiles, result_idx_bm);
7011
7012         ice_debug(hw, ICE_DBG_SW, "Result idx slots: %d, need %d\n",
7013                   free_res_idx, rm->n_grp_count);
7014
7015         if (rm->n_grp_count > 1) {
7016                 if (rm->n_grp_count > free_res_idx)
7017                         return ICE_ERR_MAX_LIMIT;
7018
7019                 rm->n_grp_count++;
7020         }
7021
7022         if (rm->n_grp_count > ICE_MAX_CHAIN_RECIPE)
7023                 return ICE_ERR_MAX_LIMIT;
7024
7025         tmp = (struct ice_aqc_recipe_data_elem *)ice_calloc(hw,
7026                                                             ICE_MAX_NUM_RECIPES,
7027                                                             sizeof(*tmp));
7028         if (!tmp)
7029                 return ICE_ERR_NO_MEMORY;
7030
7031         buf = (struct ice_aqc_recipe_data_elem *)
7032                 ice_calloc(hw, rm->n_grp_count, sizeof(*buf));
7033         if (!buf) {
7034                 status = ICE_ERR_NO_MEMORY;
7035                 goto err_mem;
7036         }
7037
7038         ice_zero_bitmap(rm->r_bitmap, ICE_MAX_NUM_RECIPES);
7039         recipe_count = ICE_MAX_NUM_RECIPES;
7040         status = ice_aq_get_recipe(hw, tmp, &recipe_count, ICE_SW_LKUP_MAC,
7041                                    NULL);
7042         if (status || recipe_count == 0)
7043                 goto err_unroll;
7044
7045         /* Allocate the recipe resources, and configure them according to the
7046          * match fields from protocol headers and extracted field vectors.
7047          */
7048         chain_idx = ice_find_first_bit(result_idx_bm, ICE_MAX_FV_WORDS);
7049         LIST_FOR_EACH_ENTRY(entry, &rm->rg_list, ice_recp_grp_entry, l_entry) {
7050                 u8 i;
7051
7052                 status = ice_alloc_recipe(hw, &entry->rid);
7053                 if (status)
7054                         goto err_unroll;
7055
7056                 /* Clear the result index of the located recipe, as this will be
7057                  * updated, if needed, later in the recipe creation process.
7058                  */
7059                 tmp[0].content.result_indx = 0;
7060
7061                 buf[recps] = tmp[0];
7062                 buf[recps].recipe_indx = (u8)entry->rid;
7063                 /* if the recipe is a non-root recipe RID should be programmed
7064                  * as 0 for the rules to be applied correctly.
7065                  */
7066                 buf[recps].content.rid = 0;
7067                 ice_memset(&buf[recps].content.lkup_indx, 0,
7068                            sizeof(buf[recps].content.lkup_indx),
7069                            ICE_NONDMA_MEM);
7070
7071                 /* All recipes use look-up index 0 to match switch ID. */
7072                 buf[recps].content.lkup_indx[0] = ICE_AQ_SW_ID_LKUP_IDX;
7073                 buf[recps].content.mask[0] =
7074                         CPU_TO_LE16(ICE_AQ_SW_ID_LKUP_MASK);
7075                 /* Setup lkup_indx 1..4 to INVALID/ignore and set the mask
7076                  * to be 0
7077                  */
7078                 for (i = 1; i <= ICE_NUM_WORDS_RECIPE; i++) {
7079                         buf[recps].content.lkup_indx[i] = 0x80;
7080                         buf[recps].content.mask[i] = 0;
7081                 }
7082
7083                 for (i = 0; i < entry->r_group.n_val_pairs; i++) {
7084                         buf[recps].content.lkup_indx[i + 1] = entry->fv_idx[i];
7085                         buf[recps].content.mask[i + 1] =
7086                                 CPU_TO_LE16(entry->fv_mask[i]);
7087                 }
7088
7089                 if (rm->n_grp_count > 1) {
7090                         /* Checks to see if there really is a valid result index
7091                          * that can be used.
7092                          */
7093                         if (chain_idx >= ICE_MAX_FV_WORDS) {
7094                                 ice_debug(hw, ICE_DBG_SW, "No chain index available\n");
7095                                 status = ICE_ERR_MAX_LIMIT;
7096                                 goto err_unroll;
7097                         }
7098
7099                         entry->chain_idx = chain_idx;
7100                         buf[recps].content.result_indx =
7101                                 ICE_AQ_RECIPE_RESULT_EN |
7102                                 ((chain_idx << ICE_AQ_RECIPE_RESULT_DATA_S) &
7103                                  ICE_AQ_RECIPE_RESULT_DATA_M);
7104                         ice_clear_bit(chain_idx, result_idx_bm);
7105                         chain_idx = ice_find_first_bit(result_idx_bm,
7106                                                        ICE_MAX_FV_WORDS);
7107                 }
7108
7109                 /* fill recipe dependencies */
7110                 ice_zero_bitmap((ice_bitmap_t *)buf[recps].recipe_bitmap,
7111                                 ICE_MAX_NUM_RECIPES);
7112                 ice_set_bit(buf[recps].recipe_indx,
7113                             (ice_bitmap_t *)buf[recps].recipe_bitmap);
7114                 buf[recps].content.act_ctrl_fwd_priority = rm->priority;
7115                 recps++;
7116         }
7117
7118         if (rm->n_grp_count == 1) {
7119                 rm->root_rid = buf[0].recipe_indx;
7120                 ice_set_bit(buf[0].recipe_indx, rm->r_bitmap);
7121                 buf[0].content.rid = rm->root_rid | ICE_AQ_RECIPE_ID_IS_ROOT;
7122                 if (sizeof(buf[0].recipe_bitmap) >= sizeof(rm->r_bitmap)) {
7123                         ice_memcpy(buf[0].recipe_bitmap, rm->r_bitmap,
7124                                    sizeof(buf[0].recipe_bitmap),
7125                                    ICE_NONDMA_TO_NONDMA);
7126                 } else {
7127                         status = ICE_ERR_BAD_PTR;
7128                         goto err_unroll;
7129                 }
7130                 /* Applicable only for ROOT_RECIPE, set the fwd_priority for
7131                  * the recipe which is getting created if specified
7132                  * by user. Usually any advanced switch filter, which results
7133                  * into new extraction sequence, ended up creating a new recipe
7134                  * of type ROOT and usually recipes are associated with profiles
7135                  * Switch rule referreing newly created recipe, needs to have
7136                  * either/or 'fwd' or 'join' priority, otherwise switch rule
7137                  * evaluation will not happen correctly. In other words, if
7138                  * switch rule to be evaluated on priority basis, then recipe
7139                  * needs to have priority, otherwise it will be evaluated last.
7140                  */
7141                 buf[0].content.act_ctrl_fwd_priority = rm->priority;
7142         } else {
7143                 struct ice_recp_grp_entry *last_chain_entry;
7144                 u16 rid, i;
7145
7146                 /* Allocate the last recipe that will chain the outcomes of the
7147                  * other recipes together
7148                  */
7149                 status = ice_alloc_recipe(hw, &rid);
7150                 if (status)
7151                         goto err_unroll;
7152
7153                 buf[recps].recipe_indx = (u8)rid;
7154                 buf[recps].content.rid = (u8)rid;
7155                 buf[recps].content.rid |= ICE_AQ_RECIPE_ID_IS_ROOT;
7156                 /* the new entry created should also be part of rg_list to
7157                  * make sure we have complete recipe
7158                  */
7159                 last_chain_entry = (struct ice_recp_grp_entry *)ice_malloc(hw,
7160                         sizeof(*last_chain_entry));
7161                 if (!last_chain_entry) {
7162                         status = ICE_ERR_NO_MEMORY;
7163                         goto err_unroll;
7164                 }
7165                 last_chain_entry->rid = rid;
7166                 ice_memset(&buf[recps].content.lkup_indx, 0,
7167                            sizeof(buf[recps].content.lkup_indx),
7168                            ICE_NONDMA_MEM);
7169                 /* All recipes use look-up index 0 to match switch ID. */
7170                 buf[recps].content.lkup_indx[0] = ICE_AQ_SW_ID_LKUP_IDX;
7171                 buf[recps].content.mask[0] =
7172                         CPU_TO_LE16(ICE_AQ_SW_ID_LKUP_MASK);
7173                 for (i = 1; i <= ICE_NUM_WORDS_RECIPE; i++) {
7174                         buf[recps].content.lkup_indx[i] =
7175                                 ICE_AQ_RECIPE_LKUP_IGNORE;
7176                         buf[recps].content.mask[i] = 0;
7177                 }
7178
7179                 i = 1;
7180                 /* update r_bitmap with the recp that is used for chaining */
7181                 ice_set_bit(rid, rm->r_bitmap);
7182                 /* this is the recipe that chains all the other recipes so it
7183                  * should not have a chaining ID to indicate the same
7184                  */
7185                 last_chain_entry->chain_idx = ICE_INVAL_CHAIN_IND;
7186                 LIST_FOR_EACH_ENTRY(entry, &rm->rg_list, ice_recp_grp_entry,
7187                                     l_entry) {
7188                         last_chain_entry->fv_idx[i] = entry->chain_idx;
7189                         buf[recps].content.lkup_indx[i] = entry->chain_idx;
7190                         buf[recps].content.mask[i++] = CPU_TO_LE16(0xFFFF);
7191                         ice_set_bit(entry->rid, rm->r_bitmap);
7192                 }
7193                 LIST_ADD(&last_chain_entry->l_entry, &rm->rg_list);
7194                 if (sizeof(buf[recps].recipe_bitmap) >=
7195                     sizeof(rm->r_bitmap)) {
7196                         ice_memcpy(buf[recps].recipe_bitmap, rm->r_bitmap,
7197                                    sizeof(buf[recps].recipe_bitmap),
7198                                    ICE_NONDMA_TO_NONDMA);
7199                 } else {
7200                         status = ICE_ERR_BAD_PTR;
7201                         goto err_unroll;
7202                 }
7203                 buf[recps].content.act_ctrl_fwd_priority = rm->priority;
7204
7205                 recps++;
7206                 rm->root_rid = (u8)rid;
7207         }
7208         status = ice_acquire_change_lock(hw, ICE_RES_WRITE);
7209         if (status)
7210                 goto err_unroll;
7211
7212         status = ice_aq_add_recipe(hw, buf, rm->n_grp_count, NULL);
7213         ice_release_change_lock(hw);
7214         if (status)
7215                 goto err_unroll;
7216
7217         /* Every recipe that just got created add it to the recipe
7218          * book keeping list
7219          */
7220         LIST_FOR_EACH_ENTRY(entry, &rm->rg_list, ice_recp_grp_entry, l_entry) {
7221                 struct ice_switch_info *sw = hw->switch_info;
7222                 bool is_root, idx_found = false;
7223                 struct ice_sw_recipe *recp;
7224                 u16 idx, buf_idx = 0;
7225
7226                 /* find buffer index for copying some data */
7227                 for (idx = 0; idx < rm->n_grp_count; idx++)
7228                         if (buf[idx].recipe_indx == entry->rid) {
7229                                 buf_idx = idx;
7230                                 idx_found = true;
7231                         }
7232
7233                 if (!idx_found) {
7234                         status = ICE_ERR_OUT_OF_RANGE;
7235                         goto err_unroll;
7236                 }
7237
7238                 recp = &sw->recp_list[entry->rid];
7239                 is_root = (rm->root_rid == entry->rid);
7240                 recp->is_root = is_root;
7241
7242                 recp->root_rid = entry->rid;
7243                 recp->big_recp = (is_root && rm->n_grp_count > 1);
7244
7245                 ice_memcpy(&recp->ext_words, entry->r_group.pairs,
7246                            entry->r_group.n_val_pairs *
7247                            sizeof(struct ice_fv_word),
7248                            ICE_NONDMA_TO_NONDMA);
7249
7250                 ice_memcpy(recp->r_bitmap, buf[buf_idx].recipe_bitmap,
7251                            sizeof(recp->r_bitmap), ICE_NONDMA_TO_NONDMA);
7252
7253                 /* Copy non-result fv index values and masks to recipe. This
7254                  * call will also update the result recipe bitmask.
7255                  */
7256                 ice_collect_result_idx(&buf[buf_idx], recp);
7257
7258                 /* for non-root recipes, also copy to the root, this allows
7259                  * easier matching of a complete chained recipe
7260                  */
7261                 if (!is_root)
7262                         ice_collect_result_idx(&buf[buf_idx],
7263                                                &sw->recp_list[rm->root_rid]);
7264
7265                 recp->n_ext_words = entry->r_group.n_val_pairs;
7266                 recp->chain_idx = entry->chain_idx;
7267                 recp->priority = buf[buf_idx].content.act_ctrl_fwd_priority;
7268                 recp->n_grp_count = rm->n_grp_count;
7269                 recp->tun_type = rm->tun_type;
7270                 recp->recp_created = true;
7271         }
7272         rm->root_buf = buf;
7273         ice_free(hw, tmp);
7274         return status;
7275
7276 err_unroll:
7277 err_mem:
7278         ice_free(hw, tmp);
7279         ice_free(hw, buf);
7280         return status;
7281 }
7282
7283 /**
7284  * ice_create_recipe_group - creates recipe group
7285  * @hw: pointer to hardware structure
7286  * @rm: recipe management list entry
7287  * @lkup_exts: lookup elements
7288  */
7289 static enum ice_status
7290 ice_create_recipe_group(struct ice_hw *hw, struct ice_sw_recipe *rm,
7291                         struct ice_prot_lkup_ext *lkup_exts)
7292 {
7293         enum ice_status status;
7294         u8 recp_count = 0;
7295
7296         rm->n_grp_count = 0;
7297
7298         /* Create recipes for words that are marked not done by packing them
7299          * as best fit.
7300          */
7301         status = ice_create_first_fit_recp_def(hw, lkup_exts,
7302                                                &rm->rg_list, &recp_count);
7303         if (!status) {
7304                 rm->n_grp_count += recp_count;
7305                 rm->n_ext_words = lkup_exts->n_val_words;
7306                 ice_memcpy(&rm->ext_words, lkup_exts->fv_words,
7307                            sizeof(rm->ext_words), ICE_NONDMA_TO_NONDMA);
7308                 ice_memcpy(rm->word_masks, lkup_exts->field_mask,
7309                            sizeof(rm->word_masks), ICE_NONDMA_TO_NONDMA);
7310         }
7311
7312         return status;
7313 }
7314
7315 /**
7316  * ice_get_fv - get field vectors/extraction sequences for spec. lookup types
7317  * @hw: pointer to hardware structure
7318  * @lkups: lookup elements or match criteria for the advanced recipe, one
7319  *         structure per protocol header
7320  * @lkups_cnt: number of protocols
7321  * @bm: bitmap of field vectors to consider
7322  * @fv_list: pointer to a list that holds the returned field vectors
7323  */
7324 static enum ice_status
7325 ice_get_fv(struct ice_hw *hw, struct ice_adv_lkup_elem *lkups, u16 lkups_cnt,
7326            ice_bitmap_t *bm, struct LIST_HEAD_TYPE *fv_list)
7327 {
7328         enum ice_status status;
7329         u8 *prot_ids;
7330         u16 i;
7331
7332         if (!lkups_cnt)
7333                 return ICE_SUCCESS;
7334
7335         prot_ids = (u8 *)ice_calloc(hw, lkups_cnt, sizeof(*prot_ids));
7336         if (!prot_ids)
7337                 return ICE_ERR_NO_MEMORY;
7338
7339         for (i = 0; i < lkups_cnt; i++)
7340                 if (!ice_prot_type_to_id(lkups[i].type, &prot_ids[i])) {
7341                         status = ICE_ERR_CFG;
7342                         goto free_mem;
7343                 }
7344
7345         /* Find field vectors that include all specified protocol types */
7346         status = ice_get_sw_fv_list(hw, prot_ids, lkups_cnt, bm, fv_list);
7347
7348 free_mem:
7349         ice_free(hw, prot_ids);
7350         return status;
7351 }
7352
7353 /**
7354  * ice_tun_type_match_word - determine if tun type needs a match mask
7355  * @tun_type: tunnel type
7356  * @mask: mask to be used for the tunnel
7357  */
7358 static bool ice_tun_type_match_word(enum ice_sw_tunnel_type tun_type, u16 *mask)
7359 {
7360         switch (tun_type) {
7361         case ICE_SW_TUN_VXLAN_GPE:
7362         case ICE_SW_TUN_GENEVE:
7363         case ICE_SW_TUN_VXLAN:
7364         case ICE_SW_TUN_NVGRE:
7365         case ICE_SW_TUN_UDP:
7366         case ICE_ALL_TUNNELS:
7367         case ICE_SW_TUN_AND_NON_TUN_QINQ:
7368         case ICE_NON_TUN_QINQ:
7369         case ICE_SW_TUN_PPPOE_QINQ:
7370         case ICE_SW_TUN_PPPOE_PAY_QINQ:
7371         case ICE_SW_TUN_PPPOE_IPV4_QINQ:
7372         case ICE_SW_TUN_PPPOE_IPV6_QINQ:
7373                 *mask = ICE_TUN_FLAG_MASK;
7374                 return true;
7375
7376         case ICE_SW_TUN_GENEVE_VLAN:
7377         case ICE_SW_TUN_VXLAN_VLAN:
7378                 *mask = ICE_TUN_FLAG_MASK & ~ICE_TUN_FLAG_VLAN_MASK;
7379                 return true;
7380
7381         default:
7382                 *mask = 0;
7383                 return false;
7384         }
7385 }
7386
7387 /**
7388  * ice_add_special_words - Add words that are not protocols, such as metadata
7389  * @rinfo: other information regarding the rule e.g. priority and action info
7390  * @lkup_exts: lookup word structure
7391  */
7392 static enum ice_status
7393 ice_add_special_words(struct ice_adv_rule_info *rinfo,
7394                       struct ice_prot_lkup_ext *lkup_exts)
7395 {
7396         u16 mask;
7397
7398         /* If this is a tunneled packet, then add recipe index to match the
7399          * tunnel bit in the packet metadata flags.
7400          */
7401         if (ice_tun_type_match_word(rinfo->tun_type, &mask)) {
7402                 if (lkup_exts->n_val_words < ICE_MAX_CHAIN_WORDS) {
7403                         u8 word = lkup_exts->n_val_words++;
7404
7405                         lkup_exts->fv_words[word].prot_id = ICE_META_DATA_ID_HW;
7406                         lkup_exts->fv_words[word].off = ICE_TUN_FLAG_MDID_OFF;
7407                         lkup_exts->field_mask[word] = mask;
7408                 } else {
7409                         return ICE_ERR_MAX_LIMIT;
7410                 }
7411         }
7412
7413         return ICE_SUCCESS;
7414 }
7415
7416 /* ice_get_compat_fv_bitmap - Get compatible field vector bitmap for rule
7417  * @hw: pointer to hardware structure
7418  * @rinfo: other information regarding the rule e.g. priority and action info
7419  * @bm: pointer to memory for returning the bitmap of field vectors
7420  */
7421 static void
7422 ice_get_compat_fv_bitmap(struct ice_hw *hw, struct ice_adv_rule_info *rinfo,
7423                          ice_bitmap_t *bm)
7424 {
7425         enum ice_prof_type prof_type;
7426
7427         ice_zero_bitmap(bm, ICE_MAX_NUM_PROFILES);
7428
7429         switch (rinfo->tun_type) {
7430         case ICE_NON_TUN:
7431         case ICE_NON_TUN_QINQ:
7432                 prof_type = ICE_PROF_NON_TUN;
7433                 break;
7434         case ICE_ALL_TUNNELS:
7435                 prof_type = ICE_PROF_TUN_ALL;
7436                 break;
7437         case ICE_SW_TUN_VXLAN_GPE:
7438         case ICE_SW_TUN_GENEVE:
7439         case ICE_SW_TUN_GENEVE_VLAN:
7440         case ICE_SW_TUN_VXLAN:
7441         case ICE_SW_TUN_VXLAN_VLAN:
7442         case ICE_SW_TUN_UDP:
7443         case ICE_SW_TUN_GTP:
7444                 prof_type = ICE_PROF_TUN_UDP;
7445                 break;
7446         case ICE_SW_TUN_NVGRE:
7447                 prof_type = ICE_PROF_TUN_GRE;
7448                 break;
7449         case ICE_SW_TUN_PPPOE:
7450         case ICE_SW_TUN_PPPOE_QINQ:
7451                 prof_type = ICE_PROF_TUN_PPPOE;
7452                 break;
7453         case ICE_SW_TUN_PPPOE_PAY:
7454         case ICE_SW_TUN_PPPOE_PAY_QINQ:
7455                 ice_set_bit(ICE_PROFID_PPPOE_PAY, bm);
7456                 return;
7457         case ICE_SW_TUN_PPPOE_IPV4:
7458         case ICE_SW_TUN_PPPOE_IPV4_QINQ:
7459                 ice_set_bit(ICE_PROFID_PPPOE_IPV4_OTHER, bm);
7460                 ice_set_bit(ICE_PROFID_PPPOE_IPV4_UDP, bm);
7461                 ice_set_bit(ICE_PROFID_PPPOE_IPV4_TCP, bm);
7462                 return;
7463         case ICE_SW_TUN_PPPOE_IPV4_TCP:
7464                 ice_set_bit(ICE_PROFID_PPPOE_IPV4_TCP, bm);
7465                 return;
7466         case ICE_SW_TUN_PPPOE_IPV4_UDP:
7467                 ice_set_bit(ICE_PROFID_PPPOE_IPV4_UDP, bm);
7468                 return;
7469         case ICE_SW_TUN_PPPOE_IPV6:
7470         case ICE_SW_TUN_PPPOE_IPV6_QINQ:
7471                 ice_set_bit(ICE_PROFID_PPPOE_IPV6_OTHER, bm);
7472                 ice_set_bit(ICE_PROFID_PPPOE_IPV6_UDP, bm);
7473                 ice_set_bit(ICE_PROFID_PPPOE_IPV6_TCP, bm);
7474                 return;
7475         case ICE_SW_TUN_PPPOE_IPV6_TCP:
7476                 ice_set_bit(ICE_PROFID_PPPOE_IPV6_TCP, bm);
7477                 return;
7478         case ICE_SW_TUN_PPPOE_IPV6_UDP:
7479                 ice_set_bit(ICE_PROFID_PPPOE_IPV6_UDP, bm);
7480                 return;
7481         case ICE_SW_TUN_PROFID_IPV6_ESP:
7482         case ICE_SW_TUN_IPV6_ESP:
7483                 ice_set_bit(ICE_PROFID_IPV6_ESP, bm);
7484                 return;
7485         case ICE_SW_TUN_PROFID_IPV6_AH:
7486         case ICE_SW_TUN_IPV6_AH:
7487                 ice_set_bit(ICE_PROFID_IPV6_AH, bm);
7488                 return;
7489         case ICE_SW_TUN_PROFID_MAC_IPV6_L2TPV3:
7490         case ICE_SW_TUN_IPV6_L2TPV3:
7491                 ice_set_bit(ICE_PROFID_MAC_IPV6_L2TPV3, bm);
7492                 return;
7493         case ICE_SW_TUN_PROFID_IPV6_NAT_T:
7494         case ICE_SW_TUN_IPV6_NAT_T:
7495                 ice_set_bit(ICE_PROFID_IPV6_NAT_T, bm);
7496                 return;
7497         case ICE_SW_TUN_PROFID_IPV4_PFCP_NODE:
7498                 ice_set_bit(ICE_PROFID_IPV4_PFCP_NODE, bm);
7499                 return;
7500         case ICE_SW_TUN_PROFID_IPV4_PFCP_SESSION:
7501                 ice_set_bit(ICE_PROFID_IPV4_PFCP_SESSION, bm);
7502                 return;
7503         case ICE_SW_TUN_PROFID_IPV6_PFCP_NODE:
7504                 ice_set_bit(ICE_PROFID_IPV6_PFCP_NODE, bm);
7505                 return;
7506         case ICE_SW_TUN_PROFID_IPV6_PFCP_SESSION:
7507                 ice_set_bit(ICE_PROFID_IPV6_PFCP_SESSION, bm);
7508                 return;
7509         case ICE_SW_TUN_IPV4_NAT_T:
7510                 ice_set_bit(ICE_PROFID_IPV4_NAT_T, bm);
7511                 return;
7512         case ICE_SW_TUN_IPV4_L2TPV3:
7513                 ice_set_bit(ICE_PROFID_MAC_IPV4_L2TPV3, bm);
7514                 return;
7515         case ICE_SW_TUN_IPV4_ESP:
7516                 ice_set_bit(ICE_PROFID_IPV4_ESP, bm);
7517                 return;
7518         case ICE_SW_TUN_IPV4_AH:
7519                 ice_set_bit(ICE_PROFID_IPV4_AH, bm);
7520                 return;
7521         case ICE_SW_IPV4_TCP:
7522                 ice_set_bit(ICE_PROFID_IPV4_TCP, bm);
7523                 return;
7524         case ICE_SW_IPV4_UDP:
7525                 ice_set_bit(ICE_PROFID_IPV4_UDP, bm);
7526                 return;
7527         case ICE_SW_IPV6_TCP:
7528                 ice_set_bit(ICE_PROFID_IPV6_TCP, bm);
7529                 return;
7530         case ICE_SW_IPV6_UDP:
7531                 ice_set_bit(ICE_PROFID_IPV6_UDP, bm);
7532                 return;
7533         case ICE_SW_TUN_IPV4_GTPU_NO_PAY:
7534                 ice_set_bit(ICE_PROFID_IPV4_GTPU_TEID, bm);
7535                 return;
7536         case ICE_SW_TUN_IPV6_GTPU_NO_PAY:
7537                 ice_set_bit(ICE_PROFID_IPV6_GTPU_TEID, bm);
7538                 return;
7539         case ICE_SW_TUN_IPV4_GTPU_IPV4:
7540                 ice_set_bit(ICE_PROFID_IPV4_GTPU_IPV4_OTHER, bm);
7541                 ice_set_bit(ICE_PROFID_IPV4_GTPU_IPV4_UDP, bm);
7542                 ice_set_bit(ICE_PROFID_IPV4_GTPU_IPV4_TCP, bm);
7543                 return;
7544         case ICE_SW_TUN_IPV4_GTPU_IPV4_UDP:
7545                 ice_set_bit(ICE_PROFID_IPV4_GTPU_IPV4_UDP, bm);
7546                 return;
7547         case ICE_SW_TUN_IPV4_GTPU_IPV4_TCP:
7548                 ice_set_bit(ICE_PROFID_IPV4_GTPU_IPV4_TCP, bm);
7549                 return;
7550         case ICE_SW_TUN_IPV4_GTPU_EH_IPV4:
7551                 ice_set_bit(ICE_PROFID_IPV4_GTPU_EH_IPV4_OTHER, bm);
7552                 ice_set_bit(ICE_PROFID_IPV4_GTPU_EH_IPV4_UDP, bm);
7553                 ice_set_bit(ICE_PROFID_IPV4_GTPU_EH_IPV4_TCP, bm);
7554                 return;
7555         case ICE_SW_TUN_IPV4_GTPU_EH_IPV4_UDP:
7556                 ice_set_bit(ICE_PROFID_IPV4_GTPU_EH_IPV4_UDP, bm);
7557                 return;
7558         case ICE_SW_TUN_IPV4_GTPU_EH_IPV4_TCP:
7559                 ice_set_bit(ICE_PROFID_IPV4_GTPU_EH_IPV4_TCP, bm);
7560                 return;
7561         case ICE_SW_TUN_IPV6_GTPU_IPV4:
7562                 ice_set_bit(ICE_PROFID_IPV6_GTPU_IPV4_OTHER, bm);
7563                 ice_set_bit(ICE_PROFID_IPV6_GTPU_IPV4_UDP, bm);
7564                 ice_set_bit(ICE_PROFID_IPV6_GTPU_IPV4_TCP, bm);
7565                 return;
7566         case ICE_SW_TUN_IPV6_GTPU_IPV4_UDP:
7567                 ice_set_bit(ICE_PROFID_IPV6_GTPU_IPV4_UDP, bm);
7568                 return;
7569         case ICE_SW_TUN_IPV6_GTPU_IPV4_TCP:
7570                 ice_set_bit(ICE_PROFID_IPV6_GTPU_IPV4_TCP, bm);
7571                 return;
7572         case ICE_SW_TUN_IPV6_GTPU_EH_IPV4:
7573                 ice_set_bit(ICE_PROFID_IPV6_GTPU_EH_IPV4_OTHER, bm);
7574                 ice_set_bit(ICE_PROFID_IPV6_GTPU_EH_IPV4_UDP, bm);
7575                 ice_set_bit(ICE_PROFID_IPV6_GTPU_EH_IPV4_TCP, bm);
7576                 return;
7577         case ICE_SW_TUN_IPV6_GTPU_EH_IPV4_UDP:
7578                 ice_set_bit(ICE_PROFID_IPV6_GTPU_EH_IPV4_UDP, bm);
7579                 return;
7580         case ICE_SW_TUN_IPV6_GTPU_EH_IPV4_TCP:
7581                 ice_set_bit(ICE_PROFID_IPV6_GTPU_EH_IPV4_TCP, bm);
7582                 return;
7583         case ICE_SW_TUN_IPV4_GTPU_IPV6:
7584                 ice_set_bit(ICE_PROFID_IPV4_GTPU_IPV6_OTHER, bm);
7585                 ice_set_bit(ICE_PROFID_IPV4_GTPU_IPV6_UDP, bm);
7586                 ice_set_bit(ICE_PROFID_IPV4_GTPU_IPV6_TCP, bm);
7587                 return;
7588         case ICE_SW_TUN_IPV4_GTPU_IPV6_UDP:
7589                 ice_set_bit(ICE_PROFID_IPV4_GTPU_IPV6_UDP, bm);
7590                 return;
7591         case ICE_SW_TUN_IPV4_GTPU_IPV6_TCP:
7592                 ice_set_bit(ICE_PROFID_IPV4_GTPU_IPV6_TCP, bm);
7593                 return;
7594         case ICE_SW_TUN_IPV4_GTPU_EH_IPV6:
7595                 ice_set_bit(ICE_PROFID_IPV4_GTPU_EH_IPV6_OTHER, bm);
7596                 ice_set_bit(ICE_PROFID_IPV4_GTPU_EH_IPV6_UDP, bm);
7597                 ice_set_bit(ICE_PROFID_IPV4_GTPU_EH_IPV6_TCP, bm);
7598                 return;
7599         case ICE_SW_TUN_IPV4_GTPU_EH_IPV6_UDP:
7600                 ice_set_bit(ICE_PROFID_IPV4_GTPU_EH_IPV6_UDP, bm);
7601                 return;
7602         case ICE_SW_TUN_IPV4_GTPU_EH_IPV6_TCP:
7603                 ice_set_bit(ICE_PROFID_IPV4_GTPU_EH_IPV6_TCP, bm);
7604                 return;
7605         case ICE_SW_TUN_IPV6_GTPU_IPV6:
7606                 ice_set_bit(ICE_PROFID_IPV6_GTPU_IPV6_OTHER, bm);
7607                 ice_set_bit(ICE_PROFID_IPV6_GTPU_IPV6_UDP, bm);
7608                 ice_set_bit(ICE_PROFID_IPV6_GTPU_IPV6_TCP, bm);
7609                 return;
7610         case ICE_SW_TUN_IPV6_GTPU_IPV6_UDP:
7611                 ice_set_bit(ICE_PROFID_IPV6_GTPU_IPV6_UDP, bm);
7612                 return;
7613         case ICE_SW_TUN_IPV6_GTPU_IPV6_TCP:
7614                 ice_set_bit(ICE_PROFID_IPV6_GTPU_IPV6_TCP, bm);
7615                 return;
7616         case ICE_SW_TUN_IPV6_GTPU_EH_IPV6:
7617                 ice_set_bit(ICE_PROFID_IPV6_GTPU_EH_IPV6_OTHER, bm);
7618                 ice_set_bit(ICE_PROFID_IPV6_GTPU_EH_IPV6_UDP, bm);
7619                 ice_set_bit(ICE_PROFID_IPV6_GTPU_EH_IPV6_TCP, bm);
7620                 return;
7621         case ICE_SW_TUN_IPV6_GTPU_EH_IPV6_UDP:
7622                 ice_set_bit(ICE_PROFID_IPV6_GTPU_EH_IPV6_UDP, bm);
7623                 return;
7624         case ICE_SW_TUN_IPV6_GTPU_EH_IPV6_TCP:
7625                 ice_set_bit(ICE_PROFID_IPV6_GTPU_EH_IPV6_TCP, bm);
7626                 return;
7627         case ICE_SW_TUN_AND_NON_TUN:
7628         case ICE_SW_TUN_AND_NON_TUN_QINQ:
7629         default:
7630                 prof_type = ICE_PROF_ALL;
7631                 break;
7632         }
7633
7634         ice_get_sw_fv_bitmap(hw, prof_type, bm);
7635 }
7636
7637 /**
7638  * ice_is_prof_rule - determine if rule type is a profile rule
7639  * @type: the rule type
7640  *
7641  * if the rule type is a profile rule, that means that there no field value
7642  * match required, in this case just a profile hit is required.
7643  */
7644 bool ice_is_prof_rule(enum ice_sw_tunnel_type type)
7645 {
7646         switch (type) {
7647         case ICE_SW_TUN_PROFID_IPV6_ESP:
7648         case ICE_SW_TUN_PROFID_IPV6_AH:
7649         case ICE_SW_TUN_PROFID_MAC_IPV6_L2TPV3:
7650         case ICE_SW_TUN_PROFID_IPV6_NAT_T:
7651         case ICE_SW_TUN_PROFID_IPV4_PFCP_NODE:
7652         case ICE_SW_TUN_PROFID_IPV4_PFCP_SESSION:
7653         case ICE_SW_TUN_PROFID_IPV6_PFCP_NODE:
7654         case ICE_SW_TUN_PROFID_IPV6_PFCP_SESSION:
7655                 return true;
7656         default:
7657                 break;
7658         }
7659
7660         return false;
7661 }
7662
7663 /**
7664  * ice_add_adv_recipe - Add an advanced recipe that is not part of the default
7665  * @hw: pointer to hardware structure
7666  * @lkups: lookup elements or match criteria for the advanced recipe, one
7667  *  structure per protocol header
7668  * @lkups_cnt: number of protocols
7669  * @rinfo: other information regarding the rule e.g. priority and action info
7670  * @rid: return the recipe ID of the recipe created
7671  */
7672 static enum ice_status
7673 ice_add_adv_recipe(struct ice_hw *hw, struct ice_adv_lkup_elem *lkups,
7674                    u16 lkups_cnt, struct ice_adv_rule_info *rinfo, u16 *rid)
7675 {
7676         ice_declare_bitmap(fv_bitmap, ICE_MAX_NUM_PROFILES);
7677         ice_declare_bitmap(profiles, ICE_MAX_NUM_PROFILES);
7678         struct ice_prot_lkup_ext *lkup_exts;
7679         struct ice_recp_grp_entry *r_entry;
7680         struct ice_sw_fv_list_entry *fvit;
7681         struct ice_recp_grp_entry *r_tmp;
7682         struct ice_sw_fv_list_entry *tmp;
7683         enum ice_status status = ICE_SUCCESS;
7684         struct ice_sw_recipe *rm;
7685         u8 i;
7686
7687         if (!ice_is_prof_rule(rinfo->tun_type) && !lkups_cnt)
7688                 return ICE_ERR_PARAM;
7689
7690         lkup_exts = (struct ice_prot_lkup_ext *)
7691                 ice_malloc(hw, sizeof(*lkup_exts));
7692         if (!lkup_exts)
7693                 return ICE_ERR_NO_MEMORY;
7694
7695         /* Determine the number of words to be matched and if it exceeds a
7696          * recipe's restrictions
7697          */
7698         for (i = 0; i < lkups_cnt; i++) {
7699                 u16 count;
7700
7701                 if (lkups[i].type >= ICE_PROTOCOL_LAST) {
7702                         status = ICE_ERR_CFG;
7703                         goto err_free_lkup_exts;
7704                 }
7705
7706                 count = ice_fill_valid_words(&lkups[i], lkup_exts);
7707                 if (!count) {
7708                         status = ICE_ERR_CFG;
7709                         goto err_free_lkup_exts;
7710                 }
7711         }
7712
7713         rm = (struct ice_sw_recipe *)ice_malloc(hw, sizeof(*rm));
7714         if (!rm) {
7715                 status = ICE_ERR_NO_MEMORY;
7716                 goto err_free_lkup_exts;
7717         }
7718
7719         /* Get field vectors that contain fields extracted from all the protocol
7720          * headers being programmed.
7721          */
7722         INIT_LIST_HEAD(&rm->fv_list);
7723         INIT_LIST_HEAD(&rm->rg_list);
7724
7725         /* Get bitmap of field vectors (profiles) that are compatible with the
7726          * rule request; only these will be searched in the subsequent call to
7727          * ice_get_fv.
7728          */
7729         ice_get_compat_fv_bitmap(hw, rinfo, fv_bitmap);
7730
7731         status = ice_get_fv(hw, lkups, lkups_cnt, fv_bitmap, &rm->fv_list);
7732         if (status)
7733                 goto err_unroll;
7734
7735         /* Create any special protocol/offset pairs, such as looking at tunnel
7736          * bits by extracting metadata
7737          */
7738         status = ice_add_special_words(rinfo, lkup_exts);
7739         if (status)
7740                 goto err_free_lkup_exts;
7741
7742         /* Group match words into recipes using preferred recipe grouping
7743          * criteria.
7744          */
7745         status = ice_create_recipe_group(hw, rm, lkup_exts);
7746         if (status)
7747                 goto err_unroll;
7748
7749         /* set the recipe priority if specified */
7750         rm->priority = (u8)rinfo->priority;
7751
7752         /* Find offsets from the field vector. Pick the first one for all the
7753          * recipes.
7754          */
7755         status = ice_fill_fv_word_index(hw, &rm->fv_list, &rm->rg_list);
7756         if (status)
7757                 goto err_unroll;
7758
7759         /* An empty FV list means to use all the profiles returned in the
7760          * profile bitmap
7761          */
7762         if (LIST_EMPTY(&rm->fv_list)) {
7763                 u16 j;
7764
7765                 ice_for_each_set_bit(j, fv_bitmap, ICE_MAX_NUM_PROFILES) {
7766                         struct ice_sw_fv_list_entry *fvl;
7767
7768                         fvl = (struct ice_sw_fv_list_entry *)
7769                                 ice_malloc(hw, sizeof(*fvl));
7770                         if (!fvl)
7771                                 goto err_unroll;
7772                         fvl->fv_ptr = NULL;
7773                         fvl->profile_id = j;
7774                         LIST_ADD(&fvl->list_entry, &rm->fv_list);
7775                 }
7776         }
7777
7778         /* get bitmap of all profiles the recipe will be associated with */
7779         ice_zero_bitmap(profiles, ICE_MAX_NUM_PROFILES);
7780         LIST_FOR_EACH_ENTRY(fvit, &rm->fv_list, ice_sw_fv_list_entry,
7781                             list_entry) {
7782                 ice_debug(hw, ICE_DBG_SW, "profile: %d\n", fvit->profile_id);
7783                 ice_set_bit((u16)fvit->profile_id, profiles);
7784         }
7785
7786         /* Look for a recipe which matches our requested fv / mask list */
7787         *rid = ice_find_recp(hw, lkup_exts, rinfo->tun_type, rinfo->priority);
7788         if (*rid < ICE_MAX_NUM_RECIPES)
7789                 /* Success if found a recipe that match the existing criteria */
7790                 goto err_unroll;
7791
7792         rm->tun_type = rinfo->tun_type;
7793         /* Recipe we need does not exist, add a recipe */
7794         status = ice_add_sw_recipe(hw, rm, profiles);
7795         if (status)
7796                 goto err_unroll;
7797
7798         /* Associate all the recipes created with all the profiles in the
7799          * common field vector.
7800          */
7801         LIST_FOR_EACH_ENTRY(fvit, &rm->fv_list, ice_sw_fv_list_entry,
7802                             list_entry) {
7803                 ice_declare_bitmap(r_bitmap, ICE_MAX_NUM_RECIPES);
7804                 u16 j;
7805
7806                 status = ice_aq_get_recipe_to_profile(hw, fvit->profile_id,
7807                                                       (u8 *)r_bitmap, NULL);
7808                 if (status)
7809                         goto err_unroll;
7810
7811                 ice_or_bitmap(r_bitmap, r_bitmap, rm->r_bitmap,
7812                               ICE_MAX_NUM_RECIPES);
7813                 status = ice_acquire_change_lock(hw, ICE_RES_WRITE);
7814                 if (status)
7815                         goto err_unroll;
7816
7817                 status = ice_aq_map_recipe_to_profile(hw, fvit->profile_id,
7818                                                       (u8 *)r_bitmap,
7819                                                       NULL);
7820                 ice_release_change_lock(hw);
7821
7822                 if (status)
7823                         goto err_unroll;
7824
7825                 /* Update profile to recipe bitmap array */
7826                 ice_cp_bitmap(profile_to_recipe[fvit->profile_id], r_bitmap,
7827                               ICE_MAX_NUM_RECIPES);
7828
7829                 /* Update recipe to profile bitmap array */
7830                 ice_for_each_set_bit(j, rm->r_bitmap, ICE_MAX_NUM_RECIPES)
7831                         ice_set_bit((u16)fvit->profile_id,
7832                                     recipe_to_profile[j]);
7833         }
7834
7835         *rid = rm->root_rid;
7836         ice_memcpy(&hw->switch_info->recp_list[*rid].lkup_exts,
7837                    lkup_exts, sizeof(*lkup_exts), ICE_NONDMA_TO_NONDMA);
7838 err_unroll:
7839         LIST_FOR_EACH_ENTRY_SAFE(r_entry, r_tmp, &rm->rg_list,
7840                                  ice_recp_grp_entry, l_entry) {
7841                 LIST_DEL(&r_entry->l_entry);
7842                 ice_free(hw, r_entry);
7843         }
7844
7845         LIST_FOR_EACH_ENTRY_SAFE(fvit, tmp, &rm->fv_list, ice_sw_fv_list_entry,
7846                                  list_entry) {
7847                 LIST_DEL(&fvit->list_entry);
7848                 ice_free(hw, fvit);
7849         }
7850
7851         if (rm->root_buf)
7852                 ice_free(hw, rm->root_buf);
7853
7854         ice_free(hw, rm);
7855
7856 err_free_lkup_exts:
7857         ice_free(hw, lkup_exts);
7858
7859         return status;
7860 }
7861
7862 /**
7863  * ice_find_dummy_packet - find dummy packet by tunnel type
7864  *
7865  * @lkups: lookup elements or match criteria for the advanced recipe, one
7866  *         structure per protocol header
7867  * @lkups_cnt: number of protocols
7868  * @tun_type: tunnel type from the match criteria
7869  * @pkt: dummy packet to fill according to filter match criteria
7870  * @pkt_len: packet length of dummy packet
7871  * @offsets: pointer to receive the pointer to the offsets for the packet
7872  */
7873 static void
7874 ice_find_dummy_packet(struct ice_adv_lkup_elem *lkups, u16 lkups_cnt,
7875                       enum ice_sw_tunnel_type tun_type, const u8 **pkt,
7876                       u16 *pkt_len,
7877                       const struct ice_dummy_pkt_offsets **offsets)
7878 {
7879         bool tcp = false, udp = false, ipv6 = false, vlan = false;
7880         bool gre = false, mpls = false;
7881         u16 i;
7882
7883         for (i = 0; i < lkups_cnt; i++) {
7884                 if (lkups[i].type == ICE_UDP_ILOS)
7885                         udp = true;
7886                 else if (lkups[i].type == ICE_TCP_IL)
7887                         tcp = true;
7888                 else if (lkups[i].type == ICE_IPV6_OFOS)
7889                         ipv6 = true;
7890                 else if (lkups[i].type == ICE_VLAN_OFOS)
7891                         vlan = true;
7892                 else if (lkups[i].type == ICE_IPV4_OFOS &&
7893                          lkups[i].h_u.ipv4_hdr.protocol ==
7894                                 ICE_IPV4_NVGRE_PROTO_ID &&
7895                          lkups[i].m_u.ipv4_hdr.protocol ==
7896                                 0xFF)
7897                         gre = true;
7898                 else if (lkups[i].type == ICE_PPPOE &&
7899                          lkups[i].h_u.pppoe_hdr.ppp_prot_id ==
7900                                 CPU_TO_BE16(ICE_PPP_IPV6_PROTO_ID) &&
7901                          lkups[i].m_u.pppoe_hdr.ppp_prot_id ==
7902                                 0xFFFF)
7903                         ipv6 = true;
7904                 else if (lkups[i].type == ICE_ETYPE_OL &&
7905                          lkups[i].h_u.ethertype.ethtype_id ==
7906                                 CPU_TO_BE16(ICE_IPV6_ETHER_ID) &&
7907                          lkups[i].m_u.ethertype.ethtype_id ==
7908                                         0xFFFF)
7909                         ipv6 = true;
7910                 else if (lkups[i].type == ICE_IPV4_IL &&
7911                          lkups[i].h_u.ipv4_hdr.protocol ==
7912                                 ICE_TCP_PROTO_ID &&
7913                          lkups[i].m_u.ipv4_hdr.protocol ==
7914                                 0xFF)
7915                         tcp = true;
7916                 else if (lkups[i].type == ICE_ETYPE_OL &&
7917                          lkups[i].h_u.ethertype.ethtype_id ==
7918                                 CPU_TO_BE16(ICE_MPLS_ETHER_ID) &&
7919                          lkups[i].m_u.ethertype.ethtype_id == 0xFFFF)
7920                         mpls = true;
7921         }
7922
7923         if ((tun_type == ICE_SW_TUN_AND_NON_TUN_QINQ ||
7924              tun_type == ICE_NON_TUN_QINQ) && ipv6) {
7925                 *pkt = dummy_qinq_ipv6_pkt;
7926                 *pkt_len = sizeof(dummy_qinq_ipv6_pkt);
7927                 *offsets = dummy_qinq_ipv6_packet_offsets;
7928                 return;
7929         } else if (tun_type == ICE_SW_TUN_AND_NON_TUN_QINQ ||
7930                            tun_type == ICE_NON_TUN_QINQ) {
7931                 *pkt = dummy_qinq_ipv4_pkt;
7932                 *pkt_len = sizeof(dummy_qinq_ipv4_pkt);
7933                 *offsets = dummy_qinq_ipv4_packet_offsets;
7934                 return;
7935         }
7936
7937         if (tun_type == ICE_SW_TUN_PPPOE_IPV6_QINQ) {
7938                 *pkt = dummy_qinq_pppoe_ipv6_packet;
7939                 *pkt_len = sizeof(dummy_qinq_pppoe_ipv6_packet);
7940                 *offsets = dummy_qinq_pppoe_packet_ipv6_offsets;
7941                 return;
7942         } else if (tun_type == ICE_SW_TUN_PPPOE_IPV4_QINQ) {
7943                 *pkt = dummy_qinq_pppoe_ipv4_pkt;
7944                 *pkt_len = sizeof(dummy_qinq_pppoe_ipv4_pkt);
7945                 *offsets = dummy_qinq_pppoe_ipv4_packet_offsets;
7946                 return;
7947         } else if (tun_type == ICE_SW_TUN_PPPOE_QINQ && ipv6) {
7948                 *pkt = dummy_qinq_pppoe_ipv6_packet;
7949                 *pkt_len = sizeof(dummy_qinq_pppoe_ipv6_packet);
7950                 *offsets = dummy_qinq_pppoe_packet_offsets;
7951                 return;
7952         } else if (tun_type == ICE_SW_TUN_PPPOE_QINQ ||
7953                         tun_type == ICE_SW_TUN_PPPOE_PAY_QINQ) {
7954                 *pkt = dummy_qinq_pppoe_ipv4_pkt;
7955                 *pkt_len = sizeof(dummy_qinq_pppoe_ipv4_pkt);
7956                 *offsets = dummy_qinq_pppoe_packet_offsets;
7957                 return;
7958         }
7959
7960         if (tun_type == ICE_SW_TUN_IPV4_GTPU_NO_PAY) {
7961                 *pkt = dummy_ipv4_gtpu_ipv4_packet;
7962                 *pkt_len = sizeof(dummy_ipv4_gtpu_ipv4_packet);
7963                 *offsets = dummy_ipv4_gtp_no_pay_packet_offsets;
7964                 return;
7965         } else if (tun_type == ICE_SW_TUN_IPV6_GTPU_NO_PAY) {
7966                 *pkt = dummy_ipv6_gtp_packet;
7967                 *pkt_len = sizeof(dummy_ipv6_gtp_packet);
7968                 *offsets = dummy_ipv6_gtp_no_pay_packet_offsets;
7969                 return;
7970         }
7971
7972         if (tun_type == ICE_SW_TUN_IPV4_ESP) {
7973                 *pkt = dummy_ipv4_esp_pkt;
7974                 *pkt_len = sizeof(dummy_ipv4_esp_pkt);
7975                 *offsets = dummy_ipv4_esp_packet_offsets;
7976                 return;
7977         }
7978
7979         if (tun_type == ICE_SW_TUN_IPV6_ESP) {
7980                 *pkt = dummy_ipv6_esp_pkt;
7981                 *pkt_len = sizeof(dummy_ipv6_esp_pkt);
7982                 *offsets = dummy_ipv6_esp_packet_offsets;
7983                 return;
7984         }
7985
7986         if (tun_type == ICE_SW_TUN_IPV4_AH) {
7987                 *pkt = dummy_ipv4_ah_pkt;
7988                 *pkt_len = sizeof(dummy_ipv4_ah_pkt);
7989                 *offsets = dummy_ipv4_ah_packet_offsets;
7990                 return;
7991         }
7992
7993         if (tun_type == ICE_SW_TUN_IPV6_AH) {
7994                 *pkt = dummy_ipv6_ah_pkt;
7995                 *pkt_len = sizeof(dummy_ipv6_ah_pkt);
7996                 *offsets = dummy_ipv6_ah_packet_offsets;
7997                 return;
7998         }
7999
8000         if (tun_type == ICE_SW_TUN_IPV4_NAT_T) {
8001                 *pkt = dummy_ipv4_nat_pkt;
8002                 *pkt_len = sizeof(dummy_ipv4_nat_pkt);
8003                 *offsets = dummy_ipv4_nat_packet_offsets;
8004                 return;
8005         }
8006
8007         if (tun_type == ICE_SW_TUN_IPV6_NAT_T) {
8008                 *pkt = dummy_ipv6_nat_pkt;
8009                 *pkt_len = sizeof(dummy_ipv6_nat_pkt);
8010                 *offsets = dummy_ipv6_nat_packet_offsets;
8011                 return;
8012         }
8013
8014         if (tun_type == ICE_SW_TUN_IPV4_L2TPV3) {
8015                 *pkt = dummy_ipv4_l2tpv3_pkt;
8016                 *pkt_len = sizeof(dummy_ipv4_l2tpv3_pkt);
8017                 *offsets = dummy_ipv4_l2tpv3_packet_offsets;
8018                 return;
8019         }
8020
8021         if (tun_type == ICE_SW_TUN_IPV6_L2TPV3) {
8022                 *pkt = dummy_ipv6_l2tpv3_pkt;
8023                 *pkt_len = sizeof(dummy_ipv6_l2tpv3_pkt);
8024                 *offsets = dummy_ipv6_l2tpv3_packet_offsets;
8025                 return;
8026         }
8027
8028         if (tun_type == ICE_SW_TUN_GTP) {
8029                 *pkt = dummy_udp_gtp_packet;
8030                 *pkt_len = sizeof(dummy_udp_gtp_packet);
8031                 *offsets = dummy_udp_gtp_packet_offsets;
8032                 return;
8033         }
8034
8035         if (tun_type == ICE_SW_TUN_IPV4_GTPU_IPV4 ||
8036             tun_type == ICE_SW_TUN_IPV4_GTPU_EH_IPV4) {
8037                 *pkt = dummy_ipv4_gtpu_ipv4_packet;
8038                 *pkt_len = sizeof(dummy_ipv4_gtpu_ipv4_packet);
8039                 *offsets = dummy_ipv4_gtpu_ipv4_packet_offsets;
8040                 return;
8041         }
8042
8043         if (tun_type == ICE_SW_TUN_IPV4_GTPU_IPV4_UDP ||
8044             tun_type == ICE_SW_TUN_IPV4_GTPU_EH_IPV4_UDP) {
8045                 *pkt = dummy_ipv4_gtpu_ipv4_udp_packet;
8046                 *pkt_len = sizeof(dummy_ipv4_gtpu_ipv4_udp_packet);
8047                 *offsets = dummy_ipv4_gtpu_ipv4_udp_packet_offsets;
8048                 return;
8049         }
8050
8051         if (tun_type == ICE_SW_TUN_IPV4_GTPU_IPV4_TCP ||
8052             tun_type == ICE_SW_TUN_IPV4_GTPU_EH_IPV4_TCP) {
8053                 *pkt = dummy_ipv4_gtpu_ipv4_tcp_packet;
8054                 *pkt_len = sizeof(dummy_ipv4_gtpu_ipv4_tcp_packet);
8055                 *offsets = dummy_ipv4_gtpu_ipv4_tcp_packet_offsets;
8056                 return;
8057         }
8058
8059         if (tun_type == ICE_SW_TUN_IPV4_GTPU_IPV6 ||
8060             tun_type == ICE_SW_TUN_IPV4_GTPU_EH_IPV6) {
8061                 *pkt = dummy_ipv4_gtpu_ipv6_packet;
8062                 *pkt_len = sizeof(dummy_ipv4_gtpu_ipv6_packet);
8063                 *offsets = dummy_ipv4_gtpu_ipv6_packet_offsets;
8064                 return;
8065         }
8066
8067         if (tun_type == ICE_SW_TUN_IPV4_GTPU_IPV6_UDP ||
8068             tun_type == ICE_SW_TUN_IPV4_GTPU_EH_IPV6_UDP) {
8069                 *pkt = dummy_ipv4_gtpu_ipv6_udp_packet;
8070                 *pkt_len = sizeof(dummy_ipv4_gtpu_ipv6_udp_packet);
8071                 *offsets = dummy_ipv4_gtpu_ipv6_udp_packet_offsets;
8072                 return;
8073         }
8074
8075         if (tun_type == ICE_SW_TUN_IPV4_GTPU_IPV6_TCP ||
8076             tun_type == ICE_SW_TUN_IPV4_GTPU_EH_IPV6_TCP) {
8077                 *pkt = dummy_ipv4_gtpu_ipv6_tcp_packet;
8078                 *pkt_len = sizeof(dummy_ipv4_gtpu_ipv6_tcp_packet);
8079                 *offsets = dummy_ipv4_gtpu_ipv6_tcp_packet_offsets;
8080                 return;
8081         }
8082
8083         if (tun_type == ICE_SW_TUN_IPV6_GTPU_IPV4 ||
8084             tun_type == ICE_SW_TUN_IPV6_GTPU_EH_IPV4) {
8085                 *pkt = dummy_ipv6_gtpu_ipv4_packet;
8086                 *pkt_len = sizeof(dummy_ipv6_gtpu_ipv4_packet);
8087                 *offsets = dummy_ipv6_gtpu_ipv4_packet_offsets;
8088                 return;
8089         }
8090
8091         if (tun_type == ICE_SW_TUN_IPV6_GTPU_IPV4_UDP ||
8092             tun_type == ICE_SW_TUN_IPV6_GTPU_EH_IPV4_UDP) {
8093                 *pkt = dummy_ipv6_gtpu_ipv4_udp_packet;
8094                 *pkt_len = sizeof(dummy_ipv6_gtpu_ipv4_udp_packet);
8095                 *offsets = dummy_ipv6_gtpu_ipv4_udp_packet_offsets;
8096                 return;
8097         }
8098
8099         if (tun_type == ICE_SW_TUN_IPV6_GTPU_IPV4_TCP ||
8100             tun_type == ICE_SW_TUN_IPV6_GTPU_EH_IPV4_TCP) {
8101                 *pkt = dummy_ipv6_gtpu_ipv4_tcp_packet;
8102                 *pkt_len = sizeof(dummy_ipv6_gtpu_ipv4_tcp_packet);
8103                 *offsets = dummy_ipv6_gtpu_ipv4_tcp_packet_offsets;
8104                 return;
8105         }
8106
8107         if (tun_type == ICE_SW_TUN_IPV6_GTPU_IPV6 ||
8108             tun_type == ICE_SW_TUN_IPV6_GTPU_EH_IPV6) {
8109                 *pkt = dummy_ipv6_gtpu_ipv6_packet;
8110                 *pkt_len = sizeof(dummy_ipv6_gtpu_ipv6_packet);
8111                 *offsets = dummy_ipv6_gtpu_ipv6_packet_offsets;
8112                 return;
8113         }
8114
8115         if (tun_type == ICE_SW_TUN_IPV6_GTPU_IPV6_UDP ||
8116             tun_type == ICE_SW_TUN_IPV6_GTPU_EH_IPV6_UDP) {
8117                 *pkt = dummy_ipv6_gtpu_ipv6_udp_packet;
8118                 *pkt_len = sizeof(dummy_ipv6_gtpu_ipv6_udp_packet);
8119                 *offsets = dummy_ipv6_gtpu_ipv6_udp_packet_offsets;
8120                 return;
8121         }
8122
8123         if (tun_type == ICE_SW_TUN_IPV6_GTPU_IPV6_TCP ||
8124             tun_type == ICE_SW_TUN_IPV6_GTPU_EH_IPV6_TCP) {
8125                 *pkt = dummy_ipv6_gtpu_ipv6_tcp_packet;
8126                 *pkt_len = sizeof(dummy_ipv6_gtpu_ipv6_tcp_packet);
8127                 *offsets = dummy_ipv6_gtpu_ipv6_tcp_packet_offsets;
8128                 return;
8129         }
8130
8131         if (tun_type == ICE_SW_TUN_PPPOE && ipv6) {
8132                 *pkt = dummy_pppoe_ipv6_packet;
8133                 *pkt_len = sizeof(dummy_pppoe_ipv6_packet);
8134                 *offsets = dummy_pppoe_packet_offsets;
8135                 return;
8136         } else if (tun_type == ICE_SW_TUN_PPPOE ||
8137                 tun_type == ICE_SW_TUN_PPPOE_PAY) {
8138                 *pkt = dummy_pppoe_ipv4_packet;
8139                 *pkt_len = sizeof(dummy_pppoe_ipv4_packet);
8140                 *offsets = dummy_pppoe_packet_offsets;
8141                 return;
8142         }
8143
8144         if (tun_type == ICE_SW_TUN_PPPOE_IPV4) {
8145                 *pkt = dummy_pppoe_ipv4_packet;
8146                 *pkt_len = sizeof(dummy_pppoe_ipv4_packet);
8147                 *offsets = dummy_pppoe_packet_ipv4_offsets;
8148                 return;
8149         }
8150
8151         if (tun_type == ICE_SW_TUN_PPPOE_IPV4_TCP) {
8152                 *pkt = dummy_pppoe_ipv4_tcp_packet;
8153                 *pkt_len = sizeof(dummy_pppoe_ipv4_tcp_packet);
8154                 *offsets = dummy_pppoe_ipv4_tcp_packet_offsets;
8155                 return;
8156         }
8157
8158         if (tun_type == ICE_SW_TUN_PPPOE_IPV4_UDP) {
8159                 *pkt = dummy_pppoe_ipv4_udp_packet;
8160                 *pkt_len = sizeof(dummy_pppoe_ipv4_udp_packet);
8161                 *offsets = dummy_pppoe_ipv4_udp_packet_offsets;
8162                 return;
8163         }
8164
8165         if (tun_type == ICE_SW_TUN_PPPOE_IPV6) {
8166                 *pkt = dummy_pppoe_ipv6_packet;
8167                 *pkt_len = sizeof(dummy_pppoe_ipv6_packet);
8168                 *offsets = dummy_pppoe_packet_ipv6_offsets;
8169                 return;
8170         }
8171
8172         if (tun_type == ICE_SW_TUN_PPPOE_IPV6_TCP) {
8173                 *pkt = dummy_pppoe_ipv6_tcp_packet;
8174                 *pkt_len = sizeof(dummy_pppoe_ipv6_tcp_packet);
8175                 *offsets = dummy_pppoe_packet_ipv6_tcp_offsets;
8176                 return;
8177         }
8178
8179         if (tun_type == ICE_SW_TUN_PPPOE_IPV6_UDP) {
8180                 *pkt = dummy_pppoe_ipv6_udp_packet;
8181                 *pkt_len = sizeof(dummy_pppoe_ipv6_udp_packet);
8182                 *offsets = dummy_pppoe_packet_ipv6_udp_offsets;
8183                 return;
8184         }
8185
8186         if (tun_type == ICE_SW_IPV4_TCP) {
8187                 *pkt = dummy_tcp_packet;
8188                 *pkt_len = sizeof(dummy_tcp_packet);
8189                 *offsets = dummy_tcp_packet_offsets;
8190                 return;
8191         }
8192
8193         if (tun_type == ICE_SW_IPV4_UDP) {
8194                 *pkt = dummy_udp_packet;
8195                 *pkt_len = sizeof(dummy_udp_packet);
8196                 *offsets = dummy_udp_packet_offsets;
8197                 return;
8198         }
8199
8200         if (tun_type == ICE_SW_IPV6_TCP) {
8201                 *pkt = dummy_tcp_ipv6_packet;
8202                 *pkt_len = sizeof(dummy_tcp_ipv6_packet);
8203                 *offsets = dummy_tcp_ipv6_packet_offsets;
8204                 return;
8205         }
8206
8207         if (tun_type == ICE_SW_IPV6_UDP) {
8208                 *pkt = dummy_udp_ipv6_packet;
8209                 *pkt_len = sizeof(dummy_udp_ipv6_packet);
8210                 *offsets = dummy_udp_ipv6_packet_offsets;
8211                 return;
8212         }
8213
8214         if (tun_type == ICE_ALL_TUNNELS) {
8215                 *pkt = dummy_gre_udp_packet;
8216                 *pkt_len = sizeof(dummy_gre_udp_packet);
8217                 *offsets = dummy_gre_udp_packet_offsets;
8218                 return;
8219         }
8220
8221         if (tun_type == ICE_SW_TUN_NVGRE || gre) {
8222                 if (tcp) {
8223                         *pkt = dummy_gre_tcp_packet;
8224                         *pkt_len = sizeof(dummy_gre_tcp_packet);
8225                         *offsets = dummy_gre_tcp_packet_offsets;
8226                         return;
8227                 }
8228
8229                 *pkt = dummy_gre_udp_packet;
8230                 *pkt_len = sizeof(dummy_gre_udp_packet);
8231                 *offsets = dummy_gre_udp_packet_offsets;
8232                 return;
8233         }
8234
8235         if (tun_type == ICE_SW_TUN_VXLAN || tun_type == ICE_SW_TUN_GENEVE ||
8236             tun_type == ICE_SW_TUN_VXLAN_GPE || tun_type == ICE_SW_TUN_UDP ||
8237             tun_type == ICE_SW_TUN_GENEVE_VLAN ||
8238             tun_type == ICE_SW_TUN_VXLAN_VLAN) {
8239                 if (tcp) {
8240                         *pkt = dummy_udp_tun_tcp_packet;
8241                         *pkt_len = sizeof(dummy_udp_tun_tcp_packet);
8242                         *offsets = dummy_udp_tun_tcp_packet_offsets;
8243                         return;
8244                 }
8245
8246                 *pkt = dummy_udp_tun_udp_packet;
8247                 *pkt_len = sizeof(dummy_udp_tun_udp_packet);
8248                 *offsets = dummy_udp_tun_udp_packet_offsets;
8249                 return;
8250         }
8251
8252         if (udp && !ipv6) {
8253                 if (vlan) {
8254                         *pkt = dummy_vlan_udp_packet;
8255                         *pkt_len = sizeof(dummy_vlan_udp_packet);
8256                         *offsets = dummy_vlan_udp_packet_offsets;
8257                         return;
8258                 }
8259                 *pkt = dummy_udp_packet;
8260                 *pkt_len = sizeof(dummy_udp_packet);
8261                 *offsets = dummy_udp_packet_offsets;
8262                 return;
8263         } else if (udp && ipv6) {
8264                 if (vlan) {
8265                         *pkt = dummy_vlan_udp_ipv6_packet;
8266                         *pkt_len = sizeof(dummy_vlan_udp_ipv6_packet);
8267                         *offsets = dummy_vlan_udp_ipv6_packet_offsets;
8268                         return;
8269                 }
8270                 *pkt = dummy_udp_ipv6_packet;
8271                 *pkt_len = sizeof(dummy_udp_ipv6_packet);
8272                 *offsets = dummy_udp_ipv6_packet_offsets;
8273                 return;
8274         } else if ((tcp && ipv6) || ipv6) {
8275                 if (vlan) {
8276                         *pkt = dummy_vlan_tcp_ipv6_packet;
8277                         *pkt_len = sizeof(dummy_vlan_tcp_ipv6_packet);
8278                         *offsets = dummy_vlan_tcp_ipv6_packet_offsets;
8279                         return;
8280                 }
8281                 *pkt = dummy_tcp_ipv6_packet;
8282                 *pkt_len = sizeof(dummy_tcp_ipv6_packet);
8283                 *offsets = dummy_tcp_ipv6_packet_offsets;
8284                 return;
8285         }
8286
8287         if (vlan) {
8288                 *pkt = dummy_vlan_tcp_packet;
8289                 *pkt_len = sizeof(dummy_vlan_tcp_packet);
8290                 *offsets = dummy_vlan_tcp_packet_offsets;
8291         }  else if (mpls) {
8292                 *pkt = dummy_mpls_packet;
8293                 *pkt_len = sizeof(dummy_mpls_packet);
8294                 *offsets = dummy_mpls_packet_offsets;
8295         } else {
8296                 *pkt = dummy_tcp_packet;
8297                 *pkt_len = sizeof(dummy_tcp_packet);
8298                 *offsets = dummy_tcp_packet_offsets;
8299         }
8300 }
8301
8302 /**
8303  * ice_fill_adv_dummy_packet - fill a dummy packet with given match criteria
8304  *
8305  * @lkups: lookup elements or match criteria for the advanced recipe, one
8306  *         structure per protocol header
8307  * @lkups_cnt: number of protocols
8308  * @s_rule: stores rule information from the match criteria
8309  * @dummy_pkt: dummy packet to fill according to filter match criteria
8310  * @pkt_len: packet length of dummy packet
8311  * @offsets: offset info for the dummy packet
8312  */
8313 static enum ice_status
8314 ice_fill_adv_dummy_packet(struct ice_adv_lkup_elem *lkups, u16 lkups_cnt,
8315                           struct ice_aqc_sw_rules_elem *s_rule,
8316                           const u8 *dummy_pkt, u16 pkt_len,
8317                           const struct ice_dummy_pkt_offsets *offsets)
8318 {
8319         u8 *pkt;
8320         u16 i;
8321
8322         /* Start with a packet with a pre-defined/dummy content. Then, fill
8323          * in the header values to be looked up or matched.
8324          */
8325         pkt = s_rule->pdata.lkup_tx_rx.hdr;
8326
8327         ice_memcpy(pkt, dummy_pkt, pkt_len, ICE_NONDMA_TO_NONDMA);
8328
8329         for (i = 0; i < lkups_cnt; i++) {
8330                 enum ice_protocol_type type;
8331                 u16 offset = 0, len = 0, j;
8332                 bool found = false;
8333
8334                 /* find the start of this layer; it should be found since this
8335                  * was already checked when search for the dummy packet
8336                  */
8337                 type = lkups[i].type;
8338                 for (j = 0; offsets[j].type != ICE_PROTOCOL_LAST; j++) {
8339                         if (type == offsets[j].type) {
8340                                 offset = offsets[j].offset;
8341                                 found = true;
8342                                 break;
8343                         }
8344                 }
8345                 /* this should never happen in a correct calling sequence */
8346                 if (!found)
8347                         return ICE_ERR_PARAM;
8348
8349                 switch (lkups[i].type) {
8350                 case ICE_MAC_OFOS:
8351                 case ICE_MAC_IL:
8352                         len = sizeof(struct ice_ether_hdr);
8353                         break;
8354                 case ICE_ETYPE_OL:
8355                         len = sizeof(struct ice_ethtype_hdr);
8356                         break;
8357                 case ICE_VLAN_OFOS:
8358                 case ICE_VLAN_EX:
8359                 case ICE_VLAN_IN:
8360                         len = sizeof(struct ice_vlan_hdr);
8361                         break;
8362                 case ICE_IPV4_OFOS:
8363                 case ICE_IPV4_IL:
8364                         len = sizeof(struct ice_ipv4_hdr);
8365                         break;
8366                 case ICE_IPV6_OFOS:
8367                 case ICE_IPV6_IL:
8368                         len = sizeof(struct ice_ipv6_hdr);
8369                         break;
8370                 case ICE_TCP_IL:
8371                 case ICE_UDP_OF:
8372                 case ICE_UDP_ILOS:
8373                         len = sizeof(struct ice_l4_hdr);
8374                         break;
8375                 case ICE_SCTP_IL:
8376                         len = sizeof(struct ice_sctp_hdr);
8377                         break;
8378                 case ICE_NVGRE:
8379                         len = sizeof(struct ice_nvgre);
8380                         break;
8381                 case ICE_VXLAN:
8382                 case ICE_GENEVE:
8383                 case ICE_VXLAN_GPE:
8384                         len = sizeof(struct ice_udp_tnl_hdr);
8385                         break;
8386
8387                 case ICE_GTP:
8388                 case ICE_GTP_NO_PAY:
8389                         len = sizeof(struct ice_udp_gtp_hdr);
8390                         break;
8391                 case ICE_PPPOE:
8392                         len = sizeof(struct ice_pppoe_hdr);
8393                         break;
8394                 case ICE_ESP:
8395                         len = sizeof(struct ice_esp_hdr);
8396                         break;
8397                 case ICE_NAT_T:
8398                         len = sizeof(struct ice_nat_t_hdr);
8399                         break;
8400                 case ICE_AH:
8401                         len = sizeof(struct ice_ah_hdr);
8402                         break;
8403                 case ICE_L2TPV3:
8404                         len = sizeof(struct ice_l2tpv3_sess_hdr);
8405                         break;
8406                 default:
8407                         return ICE_ERR_PARAM;
8408                 }
8409
8410                 /* the length should be a word multiple */
8411                 if (len % ICE_BYTES_PER_WORD)
8412                         return ICE_ERR_CFG;
8413
8414                 /* We have the offset to the header start, the length, the
8415                  * caller's header values and mask. Use this information to
8416                  * copy the data into the dummy packet appropriately based on
8417                  * the mask. Note that we need to only write the bits as
8418                  * indicated by the mask to make sure we don't improperly write
8419                  * over any significant packet data.
8420                  */
8421                 for (j = 0; j < len / sizeof(u16); j++)
8422                         if (((u16 *)&lkups[i].m_u)[j])
8423                                 ((u16 *)(pkt + offset))[j] =
8424                                         (((u16 *)(pkt + offset))[j] &
8425                                          ~((u16 *)&lkups[i].m_u)[j]) |
8426                                         (((u16 *)&lkups[i].h_u)[j] &
8427                                          ((u16 *)&lkups[i].m_u)[j]);
8428         }
8429
8430         s_rule->pdata.lkup_tx_rx.hdr_len = CPU_TO_LE16(pkt_len);
8431
8432         return ICE_SUCCESS;
8433 }
8434
8435 /**
8436  * ice_fill_adv_packet_tun - fill dummy packet with udp tunnel port
8437  * @hw: pointer to the hardware structure
8438  * @tun_type: tunnel type
8439  * @pkt: dummy packet to fill in
8440  * @offsets: offset info for the dummy packet
8441  */
8442 static enum ice_status
8443 ice_fill_adv_packet_tun(struct ice_hw *hw, enum ice_sw_tunnel_type tun_type,
8444                         u8 *pkt, const struct ice_dummy_pkt_offsets *offsets)
8445 {
8446         u16 open_port, i;
8447
8448         switch (tun_type) {
8449         case ICE_SW_TUN_AND_NON_TUN:
8450         case ICE_SW_TUN_VXLAN_GPE:
8451         case ICE_SW_TUN_VXLAN:
8452         case ICE_SW_TUN_VXLAN_VLAN:
8453         case ICE_SW_TUN_UDP:
8454                 if (!ice_get_open_tunnel_port(hw, TNL_VXLAN, &open_port))
8455                         return ICE_ERR_CFG;
8456                 break;
8457
8458         case ICE_SW_TUN_GENEVE:
8459         case ICE_SW_TUN_GENEVE_VLAN:
8460                 if (!ice_get_open_tunnel_port(hw, TNL_GENEVE, &open_port))
8461                         return ICE_ERR_CFG;
8462                 break;
8463
8464         default:
8465                 /* Nothing needs to be done for this tunnel type */
8466                 return ICE_SUCCESS;
8467         }
8468
8469         /* Find the outer UDP protocol header and insert the port number */
8470         for (i = 0; offsets[i].type != ICE_PROTOCOL_LAST; i++) {
8471                 if (offsets[i].type == ICE_UDP_OF) {
8472                         struct ice_l4_hdr *hdr;
8473                         u16 offset;
8474
8475                         offset = offsets[i].offset;
8476                         hdr = (struct ice_l4_hdr *)&pkt[offset];
8477                         hdr->dst_port = CPU_TO_BE16(open_port);
8478
8479                         return ICE_SUCCESS;
8480                 }
8481         }
8482
8483         return ICE_ERR_CFG;
8484 }
8485
8486 /**
8487  * ice_find_adv_rule_entry - Search a rule entry
8488  * @hw: pointer to the hardware structure
8489  * @lkups: lookup elements or match criteria for the advanced recipe, one
8490  *         structure per protocol header
8491  * @lkups_cnt: number of protocols
8492  * @recp_id: recipe ID for which we are finding the rule
8493  * @rinfo: other information regarding the rule e.g. priority and action info
8494  *
8495  * Helper function to search for a given advance rule entry
8496  * Returns pointer to entry storing the rule if found
8497  */
8498 static struct ice_adv_fltr_mgmt_list_entry *
8499 ice_find_adv_rule_entry(struct ice_hw *hw, struct ice_adv_lkup_elem *lkups,
8500                         u16 lkups_cnt, u16 recp_id,
8501                         struct ice_adv_rule_info *rinfo)
8502 {
8503         struct ice_adv_fltr_mgmt_list_entry *list_itr;
8504         struct ice_switch_info *sw = hw->switch_info;
8505         int i;
8506
8507         LIST_FOR_EACH_ENTRY(list_itr, &sw->recp_list[recp_id].filt_rules,
8508                             ice_adv_fltr_mgmt_list_entry, list_entry) {
8509                 bool lkups_matched = true;
8510
8511                 if (lkups_cnt != list_itr->lkups_cnt)
8512                         continue;
8513                 for (i = 0; i < list_itr->lkups_cnt; i++)
8514                         if (memcmp(&list_itr->lkups[i], &lkups[i],
8515                                    sizeof(*lkups))) {
8516                                 lkups_matched = false;
8517                                 break;
8518                         }
8519                 if (rinfo->sw_act.flag == list_itr->rule_info.sw_act.flag &&
8520                     rinfo->tun_type == list_itr->rule_info.tun_type &&
8521                     lkups_matched)
8522                         return list_itr;
8523         }
8524         return NULL;
8525 }
8526
8527 /**
8528  * ice_adv_add_update_vsi_list
8529  * @hw: pointer to the hardware structure
8530  * @m_entry: pointer to current adv filter management list entry
8531  * @cur_fltr: filter information from the book keeping entry
8532  * @new_fltr: filter information with the new VSI to be added
8533  *
8534  * Call AQ command to add or update previously created VSI list with new VSI.
8535  *
8536  * Helper function to do book keeping associated with adding filter information
8537  * The algorithm to do the booking keeping is described below :
8538  * When a VSI needs to subscribe to a given advanced filter
8539  *      if only one VSI has been added till now
8540  *              Allocate a new VSI list and add two VSIs
8541  *              to this list using switch rule command
8542  *              Update the previously created switch rule with the
8543  *              newly created VSI list ID
8544  *      if a VSI list was previously created
8545  *              Add the new VSI to the previously created VSI list set
8546  *              using the update switch rule command
8547  */
8548 static enum ice_status
8549 ice_adv_add_update_vsi_list(struct ice_hw *hw,
8550                             struct ice_adv_fltr_mgmt_list_entry *m_entry,
8551                             struct ice_adv_rule_info *cur_fltr,
8552                             struct ice_adv_rule_info *new_fltr)
8553 {
8554         enum ice_status status;
8555         u16 vsi_list_id = 0;
8556
8557         if (cur_fltr->sw_act.fltr_act == ICE_FWD_TO_Q ||
8558             cur_fltr->sw_act.fltr_act == ICE_FWD_TO_QGRP ||
8559             cur_fltr->sw_act.fltr_act == ICE_DROP_PACKET)
8560                 return ICE_ERR_NOT_IMPL;
8561
8562         if ((new_fltr->sw_act.fltr_act == ICE_FWD_TO_Q ||
8563              new_fltr->sw_act.fltr_act == ICE_FWD_TO_QGRP) &&
8564             (cur_fltr->sw_act.fltr_act == ICE_FWD_TO_VSI ||
8565              cur_fltr->sw_act.fltr_act == ICE_FWD_TO_VSI_LIST))
8566                 return ICE_ERR_NOT_IMPL;
8567
8568         if (m_entry->vsi_count < 2 && !m_entry->vsi_list_info) {
8569                  /* Only one entry existed in the mapping and it was not already
8570                   * a part of a VSI list. So, create a VSI list with the old and
8571                   * new VSIs.
8572                   */
8573                 struct ice_fltr_info tmp_fltr;
8574                 u16 vsi_handle_arr[2];
8575
8576                 /* A rule already exists with the new VSI being added */
8577                 if (cur_fltr->sw_act.fwd_id.hw_vsi_id ==
8578                     new_fltr->sw_act.fwd_id.hw_vsi_id)
8579                         return ICE_ERR_ALREADY_EXISTS;
8580
8581                 vsi_handle_arr[0] = cur_fltr->sw_act.vsi_handle;
8582                 vsi_handle_arr[1] = new_fltr->sw_act.vsi_handle;
8583                 status = ice_create_vsi_list_rule(hw, &vsi_handle_arr[0], 2,
8584                                                   &vsi_list_id,
8585                                                   ICE_SW_LKUP_LAST);
8586                 if (status)
8587                         return status;
8588
8589                 ice_memset(&tmp_fltr, 0, sizeof(tmp_fltr), ICE_NONDMA_MEM);
8590                 tmp_fltr.flag = m_entry->rule_info.sw_act.flag;
8591                 tmp_fltr.fltr_rule_id = cur_fltr->fltr_rule_id;
8592                 tmp_fltr.fltr_act = ICE_FWD_TO_VSI_LIST;
8593                 tmp_fltr.fwd_id.vsi_list_id = vsi_list_id;
8594                 tmp_fltr.lkup_type = ICE_SW_LKUP_LAST;
8595
8596                 /* Update the previous switch rule of "forward to VSI" to
8597                  * "fwd to VSI list"
8598                  */
8599                 status = ice_update_pkt_fwd_rule(hw, &tmp_fltr);
8600                 if (status)
8601                         return status;
8602
8603                 cur_fltr->sw_act.fwd_id.vsi_list_id = vsi_list_id;
8604                 cur_fltr->sw_act.fltr_act = ICE_FWD_TO_VSI_LIST;
8605                 m_entry->vsi_list_info =
8606                         ice_create_vsi_list_map(hw, &vsi_handle_arr[0], 2,
8607                                                 vsi_list_id);
8608         } else {
8609                 u16 vsi_handle = new_fltr->sw_act.vsi_handle;
8610
8611                 if (!m_entry->vsi_list_info)
8612                         return ICE_ERR_CFG;
8613
8614                 /* A rule already exists with the new VSI being added */
8615                 if (ice_is_bit_set(m_entry->vsi_list_info->vsi_map, vsi_handle))
8616                         return ICE_SUCCESS;
8617
8618                 /* Update the previously created VSI list set with
8619                  * the new VSI ID passed in
8620                  */
8621                 vsi_list_id = cur_fltr->sw_act.fwd_id.vsi_list_id;
8622
8623                 status = ice_update_vsi_list_rule(hw, &vsi_handle, 1,
8624                                                   vsi_list_id, false,
8625                                                   ice_aqc_opc_update_sw_rules,
8626                                                   ICE_SW_LKUP_LAST);
8627                 /* update VSI list mapping info with new VSI ID */
8628                 if (!status)
8629                         ice_set_bit(vsi_handle,
8630                                     m_entry->vsi_list_info->vsi_map);
8631         }
8632         if (!status)
8633                 m_entry->vsi_count++;
8634         return status;
8635 }
8636
8637 /**
8638  * ice_add_adv_rule - helper function to create an advanced switch rule
8639  * @hw: pointer to the hardware structure
8640  * @lkups: information on the words that needs to be looked up. All words
8641  * together makes one recipe
8642  * @lkups_cnt: num of entries in the lkups array
8643  * @rinfo: other information related to the rule that needs to be programmed
8644  * @added_entry: this will return recipe_id, rule_id and vsi_handle. should be
8645  *               ignored is case of error.
8646  *
8647  * This function can program only 1 rule at a time. The lkups is used to
8648  * describe the all the words that forms the "lookup" portion of the recipe.
8649  * These words can span multiple protocols. Callers to this function need to
8650  * pass in a list of protocol headers with lookup information along and mask
8651  * that determines which words are valid from the given protocol header.
8652  * rinfo describes other information related to this rule such as forwarding
8653  * IDs, priority of this rule, etc.
8654  */
8655 enum ice_status
8656 ice_add_adv_rule(struct ice_hw *hw, struct ice_adv_lkup_elem *lkups,
8657                  u16 lkups_cnt, struct ice_adv_rule_info *rinfo,
8658                  struct ice_rule_query_data *added_entry)
8659 {
8660         struct ice_adv_fltr_mgmt_list_entry *m_entry, *adv_fltr = NULL;
8661         u16 rid = 0, i, pkt_len, rule_buf_sz, vsi_handle;
8662         const struct ice_dummy_pkt_offsets *pkt_offsets;
8663         struct ice_aqc_sw_rules_elem *s_rule = NULL;
8664         struct LIST_HEAD_TYPE *rule_head;
8665         struct ice_switch_info *sw;
8666         enum ice_status status;
8667         const u8 *pkt = NULL;
8668         bool prof_rule;
8669         u16 word_cnt;
8670         u32 act = 0;
8671         u8 q_rgn;
8672
8673         /* Initialize profile to result index bitmap */
8674         if (!hw->switch_info->prof_res_bm_init) {
8675                 hw->switch_info->prof_res_bm_init = 1;
8676                 ice_init_prof_result_bm(hw);
8677         }
8678
8679         prof_rule = ice_is_prof_rule(rinfo->tun_type);
8680         if (!prof_rule && !lkups_cnt)
8681                 return ICE_ERR_PARAM;
8682
8683         /* get # of words we need to match */
8684         word_cnt = 0;
8685         for (i = 0; i < lkups_cnt; i++) {
8686                 u16 j, *ptr;
8687
8688                 ptr = (u16 *)&lkups[i].m_u;
8689                 for (j = 0; j < sizeof(lkups->m_u) / sizeof(u16); j++)
8690                         if (ptr[j] != 0)
8691                                 word_cnt++;
8692         }
8693
8694         if (prof_rule) {
8695                 if (word_cnt > ICE_MAX_CHAIN_WORDS)
8696                         return ICE_ERR_PARAM;
8697         } else {
8698                 if (!word_cnt || word_cnt > ICE_MAX_CHAIN_WORDS)
8699                         return ICE_ERR_PARAM;
8700         }
8701
8702         /* make sure that we can locate a dummy packet */
8703         ice_find_dummy_packet(lkups, lkups_cnt, rinfo->tun_type, &pkt, &pkt_len,
8704                               &pkt_offsets);
8705         if (!pkt) {
8706                 status = ICE_ERR_PARAM;
8707                 goto err_ice_add_adv_rule;
8708         }
8709
8710         if (!(rinfo->sw_act.fltr_act == ICE_FWD_TO_VSI ||
8711               rinfo->sw_act.fltr_act == ICE_FWD_TO_Q ||
8712               rinfo->sw_act.fltr_act == ICE_FWD_TO_QGRP ||
8713               rinfo->sw_act.fltr_act == ICE_DROP_PACKET))
8714                 return ICE_ERR_CFG;
8715
8716         vsi_handle = rinfo->sw_act.vsi_handle;
8717         if (!ice_is_vsi_valid(hw, vsi_handle))
8718                 return ICE_ERR_PARAM;
8719
8720         if (rinfo->sw_act.fltr_act == ICE_FWD_TO_VSI)
8721                 rinfo->sw_act.fwd_id.hw_vsi_id =
8722                         ice_get_hw_vsi_num(hw, vsi_handle);
8723         if (rinfo->sw_act.flag & ICE_FLTR_TX)
8724                 rinfo->sw_act.src = ice_get_hw_vsi_num(hw, vsi_handle);
8725
8726         status = ice_add_adv_recipe(hw, lkups, lkups_cnt, rinfo, &rid);
8727         if (status)
8728                 return status;
8729         m_entry = ice_find_adv_rule_entry(hw, lkups, lkups_cnt, rid, rinfo);
8730         if (m_entry) {
8731                 /* we have to add VSI to VSI_LIST and increment vsi_count.
8732                  * Also Update VSI list so that we can change forwarding rule
8733                  * if the rule already exists, we will check if it exists with
8734                  * same vsi_id, if not then add it to the VSI list if it already
8735                  * exists if not then create a VSI list and add the existing VSI
8736                  * ID and the new VSI ID to the list
8737                  * We will add that VSI to the list
8738                  */
8739                 status = ice_adv_add_update_vsi_list(hw, m_entry,
8740                                                      &m_entry->rule_info,
8741                                                      rinfo);
8742                 if (added_entry) {
8743                         added_entry->rid = rid;
8744                         added_entry->rule_id = m_entry->rule_info.fltr_rule_id;
8745                         added_entry->vsi_handle = rinfo->sw_act.vsi_handle;
8746                 }
8747                 return status;
8748         }
8749         rule_buf_sz = ICE_SW_RULE_RX_TX_NO_HDR_SIZE + pkt_len;
8750         s_rule = (struct ice_aqc_sw_rules_elem *)ice_malloc(hw, rule_buf_sz);
8751         if (!s_rule)
8752                 return ICE_ERR_NO_MEMORY;
8753         act |= ICE_SINGLE_ACT_LAN_ENABLE;
8754         switch (rinfo->sw_act.fltr_act) {
8755         case ICE_FWD_TO_VSI:
8756                 act |= (rinfo->sw_act.fwd_id.hw_vsi_id <<
8757                         ICE_SINGLE_ACT_VSI_ID_S) & ICE_SINGLE_ACT_VSI_ID_M;
8758                 act |= ICE_SINGLE_ACT_VSI_FORWARDING | ICE_SINGLE_ACT_VALID_BIT;
8759                 break;
8760         case ICE_FWD_TO_Q:
8761                 act |= ICE_SINGLE_ACT_TO_Q;
8762                 act |= (rinfo->sw_act.fwd_id.q_id << ICE_SINGLE_ACT_Q_INDEX_S) &
8763                        ICE_SINGLE_ACT_Q_INDEX_M;
8764                 break;
8765         case ICE_FWD_TO_QGRP:
8766                 q_rgn = rinfo->sw_act.qgrp_size > 0 ?
8767                         (u8)ice_ilog2(rinfo->sw_act.qgrp_size) : 0;
8768                 act |= ICE_SINGLE_ACT_TO_Q;
8769                 act |= (rinfo->sw_act.fwd_id.q_id << ICE_SINGLE_ACT_Q_INDEX_S) &
8770                        ICE_SINGLE_ACT_Q_INDEX_M;
8771                 act |= (q_rgn << ICE_SINGLE_ACT_Q_REGION_S) &
8772                        ICE_SINGLE_ACT_Q_REGION_M;
8773                 break;
8774         case ICE_DROP_PACKET:
8775                 act |= ICE_SINGLE_ACT_VSI_FORWARDING | ICE_SINGLE_ACT_DROP |
8776                        ICE_SINGLE_ACT_VALID_BIT;
8777                 break;
8778         default:
8779                 status = ICE_ERR_CFG;
8780                 goto err_ice_add_adv_rule;
8781         }
8782
8783         /* set the rule LOOKUP type based on caller specified 'RX'
8784          * instead of hardcoding it to be either LOOKUP_TX/RX
8785          *
8786          * for 'RX' set the source to be the port number
8787          * for 'TX' set the source to be the source HW VSI number (determined
8788          * by caller)
8789          */
8790         if (rinfo->rx) {
8791                 s_rule->type = CPU_TO_LE16(ICE_AQC_SW_RULES_T_LKUP_RX);
8792                 s_rule->pdata.lkup_tx_rx.src =
8793                         CPU_TO_LE16(hw->port_info->lport);
8794         } else {
8795                 s_rule->type = CPU_TO_LE16(ICE_AQC_SW_RULES_T_LKUP_TX);
8796                 s_rule->pdata.lkup_tx_rx.src = CPU_TO_LE16(rinfo->sw_act.src);
8797         }
8798
8799         s_rule->pdata.lkup_tx_rx.recipe_id = CPU_TO_LE16(rid);
8800         s_rule->pdata.lkup_tx_rx.act = CPU_TO_LE32(act);
8801
8802         status = ice_fill_adv_dummy_packet(lkups, lkups_cnt, s_rule, pkt,
8803                                            pkt_len, pkt_offsets);
8804         if (status)
8805                 goto err_ice_add_adv_rule;
8806
8807         if (rinfo->tun_type != ICE_NON_TUN &&
8808             rinfo->tun_type != ICE_SW_TUN_AND_NON_TUN) {
8809                 status = ice_fill_adv_packet_tun(hw, rinfo->tun_type,
8810                                                  s_rule->pdata.lkup_tx_rx.hdr,
8811                                                  pkt_offsets);
8812                 if (status)
8813                         goto err_ice_add_adv_rule;
8814         }
8815
8816         status = ice_aq_sw_rules(hw, (struct ice_aqc_sw_rules *)s_rule,
8817                                  rule_buf_sz, 1, ice_aqc_opc_add_sw_rules,
8818                                  NULL);
8819         if (status)
8820                 goto err_ice_add_adv_rule;
8821         adv_fltr = (struct ice_adv_fltr_mgmt_list_entry *)
8822                 ice_malloc(hw, sizeof(struct ice_adv_fltr_mgmt_list_entry));
8823         if (!adv_fltr) {
8824                 status = ICE_ERR_NO_MEMORY;
8825                 goto err_ice_add_adv_rule;
8826         }
8827
8828         adv_fltr->lkups = (struct ice_adv_lkup_elem *)
8829                 ice_memdup(hw, lkups, lkups_cnt * sizeof(*lkups),
8830                            ICE_NONDMA_TO_NONDMA);
8831         if (!adv_fltr->lkups && !prof_rule) {
8832                 status = ICE_ERR_NO_MEMORY;
8833                 goto err_ice_add_adv_rule;
8834         }
8835
8836         adv_fltr->lkups_cnt = lkups_cnt;
8837         adv_fltr->rule_info = *rinfo;
8838         adv_fltr->rule_info.fltr_rule_id =
8839                 LE16_TO_CPU(s_rule->pdata.lkup_tx_rx.index);
8840         sw = hw->switch_info;
8841         sw->recp_list[rid].adv_rule = true;
8842         rule_head = &sw->recp_list[rid].filt_rules;
8843
8844         if (rinfo->sw_act.fltr_act == ICE_FWD_TO_VSI)
8845                 adv_fltr->vsi_count = 1;
8846
8847         /* Add rule entry to book keeping list */
8848         LIST_ADD(&adv_fltr->list_entry, rule_head);
8849         if (added_entry) {
8850                 added_entry->rid = rid;
8851                 added_entry->rule_id = adv_fltr->rule_info.fltr_rule_id;
8852                 added_entry->vsi_handle = rinfo->sw_act.vsi_handle;
8853         }
8854 err_ice_add_adv_rule:
8855         if (status && adv_fltr) {
8856                 ice_free(hw, adv_fltr->lkups);
8857                 ice_free(hw, adv_fltr);
8858         }
8859
8860         ice_free(hw, s_rule);
8861
8862         return status;
8863 }
8864
8865 /**
8866  * ice_adv_rem_update_vsi_list
8867  * @hw: pointer to the hardware structure
8868  * @vsi_handle: VSI handle of the VSI to remove
8869  * @fm_list: filter management entry for which the VSI list management needs to
8870  *           be done
8871  */
8872 static enum ice_status
8873 ice_adv_rem_update_vsi_list(struct ice_hw *hw, u16 vsi_handle,
8874                             struct ice_adv_fltr_mgmt_list_entry *fm_list)
8875 {
8876         struct ice_vsi_list_map_info *vsi_list_info;
8877         enum ice_sw_lkup_type lkup_type;
8878         enum ice_status status;
8879         u16 vsi_list_id;
8880
8881         if (fm_list->rule_info.sw_act.fltr_act != ICE_FWD_TO_VSI_LIST ||
8882             fm_list->vsi_count == 0)
8883                 return ICE_ERR_PARAM;
8884
8885         /* A rule with the VSI being removed does not exist */
8886         if (!ice_is_bit_set(fm_list->vsi_list_info->vsi_map, vsi_handle))
8887                 return ICE_ERR_DOES_NOT_EXIST;
8888
8889         lkup_type = ICE_SW_LKUP_LAST;
8890         vsi_list_id = fm_list->rule_info.sw_act.fwd_id.vsi_list_id;
8891         status = ice_update_vsi_list_rule(hw, &vsi_handle, 1, vsi_list_id, true,
8892                                           ice_aqc_opc_update_sw_rules,
8893                                           lkup_type);
8894         if (status)
8895                 return status;
8896
8897         fm_list->vsi_count--;
8898         ice_clear_bit(vsi_handle, fm_list->vsi_list_info->vsi_map);
8899         vsi_list_info = fm_list->vsi_list_info;
8900         if (fm_list->vsi_count == 1) {
8901                 struct ice_fltr_info tmp_fltr;
8902                 u16 rem_vsi_handle;
8903
8904                 rem_vsi_handle = ice_find_first_bit(vsi_list_info->vsi_map,
8905                                                     ICE_MAX_VSI);
8906                 if (!ice_is_vsi_valid(hw, rem_vsi_handle))
8907                         return ICE_ERR_OUT_OF_RANGE;
8908
8909                 /* Make sure VSI list is empty before removing it below */
8910                 status = ice_update_vsi_list_rule(hw, &rem_vsi_handle, 1,
8911                                                   vsi_list_id, true,
8912                                                   ice_aqc_opc_update_sw_rules,
8913                                                   lkup_type);
8914                 if (status)
8915                         return status;
8916
8917                 ice_memset(&tmp_fltr, 0, sizeof(tmp_fltr), ICE_NONDMA_MEM);
8918                 tmp_fltr.flag = fm_list->rule_info.sw_act.flag;
8919                 tmp_fltr.fltr_rule_id = fm_list->rule_info.fltr_rule_id;
8920                 fm_list->rule_info.sw_act.fltr_act = ICE_FWD_TO_VSI;
8921                 tmp_fltr.fltr_act = ICE_FWD_TO_VSI;
8922                 tmp_fltr.fwd_id.hw_vsi_id =
8923                         ice_get_hw_vsi_num(hw, rem_vsi_handle);
8924                 fm_list->rule_info.sw_act.fwd_id.hw_vsi_id =
8925                         ice_get_hw_vsi_num(hw, rem_vsi_handle);
8926                 fm_list->rule_info.sw_act.vsi_handle = rem_vsi_handle;
8927
8928                 /* Update the previous switch rule of "MAC forward to VSI" to
8929                  * "MAC fwd to VSI list"
8930                  */
8931                 status = ice_update_pkt_fwd_rule(hw, &tmp_fltr);
8932                 if (status) {
8933                         ice_debug(hw, ICE_DBG_SW, "Failed to update pkt fwd rule to FWD_TO_VSI on HW VSI %d, error %d\n",
8934                                   tmp_fltr.fwd_id.hw_vsi_id, status);
8935                         return status;
8936                 }
8937                 fm_list->vsi_list_info->ref_cnt--;
8938
8939                 /* Remove the VSI list since it is no longer used */
8940                 status = ice_remove_vsi_list_rule(hw, vsi_list_id, lkup_type);
8941                 if (status) {
8942                         ice_debug(hw, ICE_DBG_SW, "Failed to remove VSI list %d, error %d\n",
8943                                   vsi_list_id, status);
8944                         return status;
8945                 }
8946
8947                 LIST_DEL(&vsi_list_info->list_entry);
8948                 ice_free(hw, vsi_list_info);
8949                 fm_list->vsi_list_info = NULL;
8950         }
8951
8952         return status;
8953 }
8954
8955 /**
8956  * ice_rem_adv_rule - removes existing advanced switch rule
8957  * @hw: pointer to the hardware structure
8958  * @lkups: information on the words that needs to be looked up. All words
8959  *         together makes one recipe
8960  * @lkups_cnt: num of entries in the lkups array
8961  * @rinfo: Its the pointer to the rule information for the rule
8962  *
8963  * This function can be used to remove 1 rule at a time. The lkups is
8964  * used to describe all the words that forms the "lookup" portion of the
8965  * rule. These words can span multiple protocols. Callers to this function
8966  * need to pass in a list of protocol headers with lookup information along
8967  * and mask that determines which words are valid from the given protocol
8968  * header. rinfo describes other information related to this rule such as
8969  * forwarding IDs, priority of this rule, etc.
8970  */
8971 enum ice_status
8972 ice_rem_adv_rule(struct ice_hw *hw, struct ice_adv_lkup_elem *lkups,
8973                  u16 lkups_cnt, struct ice_adv_rule_info *rinfo)
8974 {
8975         struct ice_adv_fltr_mgmt_list_entry *list_elem;
8976         struct ice_prot_lkup_ext lkup_exts;
8977         struct ice_lock *rule_lock; /* Lock to protect filter rule list */
8978         enum ice_status status = ICE_SUCCESS;
8979         bool remove_rule = false;
8980         u16 i, rid, vsi_handle;
8981
8982         ice_memset(&lkup_exts, 0, sizeof(lkup_exts), ICE_NONDMA_MEM);
8983         for (i = 0; i < lkups_cnt; i++) {
8984                 u16 count;
8985
8986                 if (lkups[i].type >= ICE_PROTOCOL_LAST)
8987                         return ICE_ERR_CFG;
8988
8989                 count = ice_fill_valid_words(&lkups[i], &lkup_exts);
8990                 if (!count)
8991                         return ICE_ERR_CFG;
8992         }
8993
8994         /* Create any special protocol/offset pairs, such as looking at tunnel
8995          * bits by extracting metadata
8996          */
8997         status = ice_add_special_words(rinfo, &lkup_exts);
8998         if (status)
8999                 return status;
9000
9001         rid = ice_find_recp(hw, &lkup_exts, rinfo->tun_type, rinfo->priority);
9002         /* If did not find a recipe that match the existing criteria */
9003         if (rid == ICE_MAX_NUM_RECIPES)
9004                 return ICE_ERR_PARAM;
9005
9006         rule_lock = &hw->switch_info->recp_list[rid].filt_rule_lock;
9007         list_elem = ice_find_adv_rule_entry(hw, lkups, lkups_cnt, rid, rinfo);
9008         /* the rule is already removed */
9009         if (!list_elem)
9010                 return ICE_SUCCESS;
9011         ice_acquire_lock(rule_lock);
9012         if (list_elem->rule_info.sw_act.fltr_act != ICE_FWD_TO_VSI_LIST) {
9013                 remove_rule = true;
9014         } else if (list_elem->vsi_count > 1) {
9015                 remove_rule = false;
9016                 vsi_handle = rinfo->sw_act.vsi_handle;
9017                 status = ice_adv_rem_update_vsi_list(hw, vsi_handle, list_elem);
9018         } else {
9019                 vsi_handle = rinfo->sw_act.vsi_handle;
9020                 status = ice_adv_rem_update_vsi_list(hw, vsi_handle, list_elem);
9021                 if (status) {
9022                         ice_release_lock(rule_lock);
9023                         return status;
9024                 }
9025                 if (list_elem->vsi_count == 0)
9026                         remove_rule = true;
9027         }
9028         ice_release_lock(rule_lock);
9029         if (remove_rule) {
9030                 struct ice_aqc_sw_rules_elem *s_rule;
9031                 u16 rule_buf_sz;
9032
9033                 rule_buf_sz = ICE_SW_RULE_RX_TX_NO_HDR_SIZE;
9034                 s_rule = (struct ice_aqc_sw_rules_elem *)
9035                         ice_malloc(hw, rule_buf_sz);
9036                 if (!s_rule)
9037                         return ICE_ERR_NO_MEMORY;
9038                 s_rule->pdata.lkup_tx_rx.act = 0;
9039                 s_rule->pdata.lkup_tx_rx.index =
9040                         CPU_TO_LE16(list_elem->rule_info.fltr_rule_id);
9041                 s_rule->pdata.lkup_tx_rx.hdr_len = 0;
9042                 status = ice_aq_sw_rules(hw, (struct ice_aqc_sw_rules *)s_rule,
9043                                          rule_buf_sz, 1,
9044                                          ice_aqc_opc_remove_sw_rules, NULL);
9045                 if (status == ICE_SUCCESS || status == ICE_ERR_DOES_NOT_EXIST) {
9046                         struct ice_switch_info *sw = hw->switch_info;
9047
9048                         ice_acquire_lock(rule_lock);
9049                         LIST_DEL(&list_elem->list_entry);
9050                         ice_free(hw, list_elem->lkups);
9051                         ice_free(hw, list_elem);
9052                         ice_release_lock(rule_lock);
9053                         if (LIST_EMPTY(&sw->recp_list[rid].filt_rules))
9054                                 sw->recp_list[rid].adv_rule = false;
9055                 }
9056                 ice_free(hw, s_rule);
9057         }
9058         return status;
9059 }
9060
9061 /**
9062  * ice_rem_adv_rule_by_id - removes existing advanced switch rule by ID
9063  * @hw: pointer to the hardware structure
9064  * @remove_entry: data struct which holds rule_id, VSI handle and recipe ID
9065  *
9066  * This function is used to remove 1 rule at a time. The removal is based on
9067  * the remove_entry parameter. This function will remove rule for a given
9068  * vsi_handle with a given rule_id which is passed as parameter in remove_entry
9069  */
9070 enum ice_status
9071 ice_rem_adv_rule_by_id(struct ice_hw *hw,
9072                        struct ice_rule_query_data *remove_entry)
9073 {
9074         struct ice_adv_fltr_mgmt_list_entry *list_itr;
9075         struct LIST_HEAD_TYPE *list_head;
9076         struct ice_adv_rule_info rinfo;
9077         struct ice_switch_info *sw;
9078
9079         sw = hw->switch_info;
9080         if (!sw->recp_list[remove_entry->rid].recp_created)
9081                 return ICE_ERR_PARAM;
9082         list_head = &sw->recp_list[remove_entry->rid].filt_rules;
9083         LIST_FOR_EACH_ENTRY(list_itr, list_head, ice_adv_fltr_mgmt_list_entry,
9084                             list_entry) {
9085                 if (list_itr->rule_info.fltr_rule_id ==
9086                     remove_entry->rule_id) {
9087                         rinfo = list_itr->rule_info;
9088                         rinfo.sw_act.vsi_handle = remove_entry->vsi_handle;
9089                         return ice_rem_adv_rule(hw, list_itr->lkups,
9090                                                 list_itr->lkups_cnt, &rinfo);
9091                 }
9092         }
9093         /* either list is empty or unable to find rule */
9094         return ICE_ERR_DOES_NOT_EXIST;
9095 }
9096
9097 /**
9098  * ice_rem_adv_rule_for_vsi - removes existing advanced switch rules for a
9099  *                       given VSI handle
9100  * @hw: pointer to the hardware structure
9101  * @vsi_handle: VSI handle for which we are supposed to remove all the rules.
9102  *
9103  * This function is used to remove all the rules for a given VSI and as soon
9104  * as removing a rule fails, it will return immediately with the error code,
9105  * else it will return ICE_SUCCESS
9106  */
9107 enum ice_status ice_rem_adv_rule_for_vsi(struct ice_hw *hw, u16 vsi_handle)
9108 {
9109         struct ice_adv_fltr_mgmt_list_entry *list_itr, *tmp_entry;
9110         struct ice_vsi_list_map_info *map_info;
9111         struct LIST_HEAD_TYPE *list_head;
9112         struct ice_adv_rule_info rinfo;
9113         struct ice_switch_info *sw;
9114         enum ice_status status;
9115         u8 rid;
9116
9117         sw = hw->switch_info;
9118         for (rid = 0; rid < ICE_MAX_NUM_RECIPES; rid++) {
9119                 if (!sw->recp_list[rid].recp_created)
9120                         continue;
9121                 if (!sw->recp_list[rid].adv_rule)
9122                         continue;
9123
9124                 list_head = &sw->recp_list[rid].filt_rules;
9125                 LIST_FOR_EACH_ENTRY_SAFE(list_itr, tmp_entry, list_head,
9126                                          ice_adv_fltr_mgmt_list_entry,
9127                                          list_entry) {
9128                         rinfo = list_itr->rule_info;
9129
9130                         if (rinfo.sw_act.fltr_act == ICE_FWD_TO_VSI_LIST) {
9131                                 map_info = list_itr->vsi_list_info;
9132                                 if (!map_info)
9133                                         continue;
9134
9135                                 if (!ice_is_bit_set(map_info->vsi_map,
9136                                                     vsi_handle))
9137                                         continue;
9138                         } else if (rinfo.sw_act.vsi_handle != vsi_handle) {
9139                                 continue;
9140                         }
9141
9142                         rinfo.sw_act.vsi_handle = vsi_handle;
9143                         status = ice_rem_adv_rule(hw, list_itr->lkups,
9144                                                   list_itr->lkups_cnt, &rinfo);
9145
9146                         if (status)
9147                                 return status;
9148                 }
9149         }
9150         return ICE_SUCCESS;
9151 }
9152
9153 /**
9154  * ice_replay_fltr - Replay all the filters stored by a specific list head
9155  * @hw: pointer to the hardware structure
9156  * @list_head: list for which filters needs to be replayed
9157  * @recp_id: Recipe ID for which rules need to be replayed
9158  */
9159 static enum ice_status
9160 ice_replay_fltr(struct ice_hw *hw, u8 recp_id, struct LIST_HEAD_TYPE *list_head)
9161 {
9162         struct ice_fltr_mgmt_list_entry *itr;
9163         enum ice_status status = ICE_SUCCESS;
9164         struct ice_sw_recipe *recp_list;
9165         u8 lport = hw->port_info->lport;
9166         struct LIST_HEAD_TYPE l_head;
9167
9168         if (LIST_EMPTY(list_head))
9169                 return status;
9170
9171         recp_list = &hw->switch_info->recp_list[recp_id];
9172         /* Move entries from the given list_head to a temporary l_head so that
9173          * they can be replayed. Otherwise when trying to re-add the same
9174          * filter, the function will return already exists
9175          */
9176         LIST_REPLACE_INIT(list_head, &l_head);
9177
9178         /* Mark the given list_head empty by reinitializing it so filters
9179          * could be added again by *handler
9180          */
9181         LIST_FOR_EACH_ENTRY(itr, &l_head, ice_fltr_mgmt_list_entry,
9182                             list_entry) {
9183                 struct ice_fltr_list_entry f_entry;
9184                 u16 vsi_handle;
9185
9186                 f_entry.fltr_info = itr->fltr_info;
9187                 if (itr->vsi_count < 2 && recp_id != ICE_SW_LKUP_VLAN) {
9188                         status = ice_add_rule_internal(hw, recp_list, lport,
9189                                                        &f_entry);
9190                         if (status != ICE_SUCCESS)
9191                                 goto end;
9192                         continue;
9193                 }
9194
9195                 /* Add a filter per VSI separately */
9196                 ice_for_each_set_bit(vsi_handle, itr->vsi_list_info->vsi_map,
9197                                      ICE_MAX_VSI) {
9198                         if (!ice_is_vsi_valid(hw, vsi_handle))
9199                                 break;
9200
9201                         ice_clear_bit(vsi_handle, itr->vsi_list_info->vsi_map);
9202                         f_entry.fltr_info.vsi_handle = vsi_handle;
9203                         f_entry.fltr_info.fwd_id.hw_vsi_id =
9204                                 ice_get_hw_vsi_num(hw, vsi_handle);
9205                         f_entry.fltr_info.fltr_act = ICE_FWD_TO_VSI;
9206                         if (recp_id == ICE_SW_LKUP_VLAN)
9207                                 status = ice_add_vlan_internal(hw, recp_list,
9208                                                                &f_entry);
9209                         else
9210                                 status = ice_add_rule_internal(hw, recp_list,
9211                                                                lport,
9212                                                                &f_entry);
9213                         if (status != ICE_SUCCESS)
9214                                 goto end;
9215                 }
9216         }
9217 end:
9218         /* Clear the filter management list */
9219         ice_rem_sw_rule_info(hw, &l_head);
9220         return status;
9221 }
9222
9223 /**
9224  * ice_replay_all_fltr - replay all filters stored in bookkeeping lists
9225  * @hw: pointer to the hardware structure
9226  *
9227  * NOTE: This function does not clean up partially added filters on error.
9228  * It is up to caller of the function to issue a reset or fail early.
9229  */
9230 enum ice_status ice_replay_all_fltr(struct ice_hw *hw)
9231 {
9232         struct ice_switch_info *sw = hw->switch_info;
9233         enum ice_status status = ICE_SUCCESS;
9234         u8 i;
9235
9236         for (i = 0; i < ICE_MAX_NUM_RECIPES; i++) {
9237                 struct LIST_HEAD_TYPE *head = &sw->recp_list[i].filt_rules;
9238
9239                 status = ice_replay_fltr(hw, i, head);
9240                 if (status != ICE_SUCCESS)
9241                         return status;
9242         }
9243         return status;
9244 }
9245
9246 /**
9247  * ice_replay_vsi_fltr - Replay filters for requested VSI
9248  * @hw: pointer to the hardware structure
9249  * @pi: pointer to port information structure
9250  * @sw: pointer to switch info struct for which function replays filters
9251  * @vsi_handle: driver VSI handle
9252  * @recp_id: Recipe ID for which rules need to be replayed
9253  * @list_head: list for which filters need to be replayed
9254  *
9255  * Replays the filter of recipe recp_id for a VSI represented via vsi_handle.
9256  * It is required to pass valid VSI handle.
9257  */
9258 static enum ice_status
9259 ice_replay_vsi_fltr(struct ice_hw *hw, struct ice_port_info *pi,
9260                     struct ice_switch_info *sw, u16 vsi_handle, u8 recp_id,
9261                     struct LIST_HEAD_TYPE *list_head)
9262 {
9263         struct ice_fltr_mgmt_list_entry *itr;
9264         enum ice_status status = ICE_SUCCESS;
9265         struct ice_sw_recipe *recp_list;
9266         u16 hw_vsi_id;
9267
9268         if (LIST_EMPTY(list_head))
9269                 return status;
9270         recp_list = &sw->recp_list[recp_id];
9271         hw_vsi_id = ice_get_hw_vsi_num(hw, vsi_handle);
9272
9273         LIST_FOR_EACH_ENTRY(itr, list_head, ice_fltr_mgmt_list_entry,
9274                             list_entry) {
9275                 struct ice_fltr_list_entry f_entry;
9276
9277                 f_entry.fltr_info = itr->fltr_info;
9278                 if (itr->vsi_count < 2 && recp_id != ICE_SW_LKUP_VLAN &&
9279                     itr->fltr_info.vsi_handle == vsi_handle) {
9280                         /* update the src in case it is VSI num */
9281                         if (f_entry.fltr_info.src_id == ICE_SRC_ID_VSI)
9282                                 f_entry.fltr_info.src = hw_vsi_id;
9283                         status = ice_add_rule_internal(hw, recp_list,
9284                                                        pi->lport,
9285                                                        &f_entry);
9286                         if (status != ICE_SUCCESS)
9287                                 goto end;
9288                         continue;
9289                 }
9290                 if (!itr->vsi_list_info ||
9291                     !ice_is_bit_set(itr->vsi_list_info->vsi_map, vsi_handle))
9292                         continue;
9293                 /* Clearing it so that the logic can add it back */
9294                 ice_clear_bit(vsi_handle, itr->vsi_list_info->vsi_map);
9295                 f_entry.fltr_info.vsi_handle = vsi_handle;
9296                 f_entry.fltr_info.fltr_act = ICE_FWD_TO_VSI;
9297                 /* update the src in case it is VSI num */
9298                 if (f_entry.fltr_info.src_id == ICE_SRC_ID_VSI)
9299                         f_entry.fltr_info.src = hw_vsi_id;
9300                 if (recp_id == ICE_SW_LKUP_VLAN)
9301                         status = ice_add_vlan_internal(hw, recp_list, &f_entry);
9302                 else
9303                         status = ice_add_rule_internal(hw, recp_list,
9304                                                        pi->lport,
9305                                                        &f_entry);
9306                 if (status != ICE_SUCCESS)
9307                         goto end;
9308         }
9309 end:
9310         return status;
9311 }
9312
9313 /**
9314  * ice_replay_vsi_adv_rule - Replay advanced rule for requested VSI
9315  * @hw: pointer to the hardware structure
9316  * @vsi_handle: driver VSI handle
9317  * @list_head: list for which filters need to be replayed
9318  *
9319  * Replay the advanced rule for the given VSI.
9320  */
9321 static enum ice_status
9322 ice_replay_vsi_adv_rule(struct ice_hw *hw, u16 vsi_handle,
9323                         struct LIST_HEAD_TYPE *list_head)
9324 {
9325         struct ice_rule_query_data added_entry = { 0 };
9326         struct ice_adv_fltr_mgmt_list_entry *adv_fltr;
9327         enum ice_status status = ICE_SUCCESS;
9328
9329         if (LIST_EMPTY(list_head))
9330                 return status;
9331         LIST_FOR_EACH_ENTRY(adv_fltr, list_head, ice_adv_fltr_mgmt_list_entry,
9332                             list_entry) {
9333                 struct ice_adv_rule_info *rinfo = &adv_fltr->rule_info;
9334                 u16 lk_cnt = adv_fltr->lkups_cnt;
9335
9336                 if (vsi_handle != rinfo->sw_act.vsi_handle)
9337                         continue;
9338                 status = ice_add_adv_rule(hw, adv_fltr->lkups, lk_cnt, rinfo,
9339                                           &added_entry);
9340                 if (status)
9341                         break;
9342         }
9343         return status;
9344 }
9345
9346 /**
9347  * ice_replay_vsi_all_fltr - replay all filters stored in bookkeeping lists
9348  * @hw: pointer to the hardware structure
9349  * @pi: pointer to port information structure
9350  * @vsi_handle: driver VSI handle
9351  *
9352  * Replays filters for requested VSI via vsi_handle.
9353  */
9354 enum ice_status
9355 ice_replay_vsi_all_fltr(struct ice_hw *hw, struct ice_port_info *pi,
9356                         u16 vsi_handle)
9357 {
9358         struct ice_switch_info *sw = hw->switch_info;
9359         enum ice_status status;
9360         u8 i;
9361
9362         /* Update the recipes that were created */
9363         for (i = 0; i < ICE_MAX_NUM_RECIPES; i++) {
9364                 struct LIST_HEAD_TYPE *head;
9365
9366                 head = &sw->recp_list[i].filt_replay_rules;
9367                 if (!sw->recp_list[i].adv_rule)
9368                         status = ice_replay_vsi_fltr(hw, pi, sw, vsi_handle, i,
9369                                                      head);
9370                 else
9371                         status = ice_replay_vsi_adv_rule(hw, vsi_handle, head);
9372                 if (status != ICE_SUCCESS)
9373                         return status;
9374         }
9375
9376         return ICE_SUCCESS;
9377 }
9378
9379 /**
9380  * ice_rm_sw_replay_rule_info - helper function to delete filter replay rules
9381  * @hw: pointer to the HW struct
9382  * @sw: pointer to switch info struct for which function removes filters
9383  *
9384  * Deletes the filter replay rules for given switch
9385  */
9386 void ice_rm_sw_replay_rule_info(struct ice_hw *hw, struct ice_switch_info *sw)
9387 {
9388         u8 i;
9389
9390         if (!sw)
9391                 return;
9392
9393         for (i = 0; i < ICE_MAX_NUM_RECIPES; i++) {
9394                 if (!LIST_EMPTY(&sw->recp_list[i].filt_replay_rules)) {
9395                         struct LIST_HEAD_TYPE *l_head;
9396
9397                         l_head = &sw->recp_list[i].filt_replay_rules;
9398                         if (!sw->recp_list[i].adv_rule)
9399                                 ice_rem_sw_rule_info(hw, l_head);
9400                         else
9401                                 ice_rem_adv_rule_info(hw, l_head);
9402                 }
9403         }
9404 }
9405
9406 /**
9407  * ice_rm_all_sw_replay_rule_info - deletes filter replay rules
9408  * @hw: pointer to the HW struct
9409  *
9410  * Deletes the filter replay rules.
9411  */
9412 void ice_rm_all_sw_replay_rule_info(struct ice_hw *hw)
9413 {
9414         ice_rm_sw_replay_rule_info(hw, hw->switch_info);
9415 }