net/ice/base: support GTPU for DCF switch filter
[dpdk.git] / drivers / net / ice / base / ice_switch.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2001-2021 Intel Corporation
3  */
4
5 #include "ice_switch.h"
6 #include "ice_flex_type.h"
7 #include "ice_flow.h"
8
9 #define ICE_ETH_DA_OFFSET               0
10 #define ICE_ETH_ETHTYPE_OFFSET          12
11 #define ICE_ETH_VLAN_TCI_OFFSET         14
12 #define ICE_MAX_VLAN_ID                 0xFFF
13 #define ICE_IPV4_NVGRE_PROTO_ID         0x002F
14 #define ICE_PPP_IPV6_PROTO_ID           0x0057
15 #define ICE_IPV6_ETHER_ID               0x86DD
16 #define ICE_TCP_PROTO_ID                0x06
17 #define ICE_GTPU_PROFILE                24
18 #define ICE_ETH_P_8021Q                 0x8100
19
20 /* Dummy ethernet header needed in the ice_aqc_sw_rules_elem
21  * struct to configure any switch filter rules.
22  * {DA (6 bytes), SA(6 bytes),
23  * Ether type (2 bytes for header without VLAN tag) OR
24  * VLAN tag (4 bytes for header with VLAN tag) }
25  *
26  * Word on Hardcoded values
27  * byte 0 = 0x2: to identify it as locally administered DA MAC
28  * byte 6 = 0x2: to identify it as locally administered SA MAC
29  * byte 12 = 0x81 & byte 13 = 0x00:
30  *      In case of VLAN filter first two bytes defines ether type (0x8100)
31  *      and remaining two bytes are placeholder for programming a given VLAN ID
32  *      In case of Ether type filter it is treated as header without VLAN tag
33  *      and byte 12 and 13 is used to program a given Ether type instead
34  */
35 static const u8 dummy_eth_header[DUMMY_ETH_HDR_LEN] = { 0x2, 0, 0, 0, 0, 0,
36                                                         0x2, 0, 0, 0, 0, 0,
37                                                         0x81, 0, 0, 0};
38
39 struct ice_dummy_pkt_offsets {
40         enum ice_protocol_type type;
41         u16 offset; /* ICE_PROTOCOL_LAST indicates end of list */
42 };
43
44 static const struct ice_dummy_pkt_offsets dummy_gre_tcp_packet_offsets[] = {
45         { ICE_MAC_OFOS,         0 },
46         { ICE_ETYPE_OL,         12 },
47         { ICE_IPV4_OFOS,        14 },
48         { ICE_NVGRE,            34 },
49         { ICE_MAC_IL,           42 },
50         { ICE_IPV4_IL,          56 },
51         { ICE_TCP_IL,           76 },
52         { ICE_PROTOCOL_LAST,    0 },
53 };
54
55 static const u8 dummy_gre_tcp_packet[] = {
56         0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
57         0x00, 0x00, 0x00, 0x00,
58         0x00, 0x00, 0x00, 0x00,
59
60         0x08, 0x00,             /* ICE_ETYPE_OL 12 */
61
62         0x45, 0x00, 0x00, 0x3E, /* ICE_IPV4_OFOS 14 */
63         0x00, 0x00, 0x00, 0x00,
64         0x00, 0x2F, 0x00, 0x00,
65         0x00, 0x00, 0x00, 0x00,
66         0x00, 0x00, 0x00, 0x00,
67
68         0x80, 0x00, 0x65, 0x58, /* ICE_NVGRE 34 */
69         0x00, 0x00, 0x00, 0x00,
70
71         0x00, 0x00, 0x00, 0x00, /* ICE_MAC_IL 42 */
72         0x00, 0x00, 0x00, 0x00,
73         0x00, 0x00, 0x00, 0x00,
74         0x08, 0x00,
75
76         0x45, 0x00, 0x00, 0x14, /* ICE_IPV4_IL 56 */
77         0x00, 0x00, 0x00, 0x00,
78         0x00, 0x06, 0x00, 0x00,
79         0x00, 0x00, 0x00, 0x00,
80         0x00, 0x00, 0x00, 0x00,
81
82         0x00, 0x00, 0x00, 0x00, /* ICE_TCP_IL 76 */
83         0x00, 0x00, 0x00, 0x00,
84         0x00, 0x00, 0x00, 0x00,
85         0x50, 0x02, 0x20, 0x00,
86         0x00, 0x00, 0x00, 0x00
87 };
88
89 static const struct ice_dummy_pkt_offsets dummy_gre_udp_packet_offsets[] = {
90         { ICE_MAC_OFOS,         0 },
91         { ICE_ETYPE_OL,         12 },
92         { ICE_IPV4_OFOS,        14 },
93         { ICE_NVGRE,            34 },
94         { ICE_MAC_IL,           42 },
95         { ICE_IPV4_IL,          56 },
96         { ICE_UDP_ILOS,         76 },
97         { ICE_PROTOCOL_LAST,    0 },
98 };
99
100 static const u8 dummy_gre_udp_packet[] = {
101         0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
102         0x00, 0x00, 0x00, 0x00,
103         0x00, 0x00, 0x00, 0x00,
104
105         0x08, 0x00,             /* ICE_ETYPE_OL 12 */
106
107         0x45, 0x00, 0x00, 0x3E, /* ICE_IPV4_OFOS 14 */
108         0x00, 0x00, 0x00, 0x00,
109         0x00, 0x2F, 0x00, 0x00,
110         0x00, 0x00, 0x00, 0x00,
111         0x00, 0x00, 0x00, 0x00,
112
113         0x80, 0x00, 0x65, 0x58, /* ICE_NVGRE 34 */
114         0x00, 0x00, 0x00, 0x00,
115
116         0x00, 0x00, 0x00, 0x00, /* ICE_MAC_IL 42 */
117         0x00, 0x00, 0x00, 0x00,
118         0x00, 0x00, 0x00, 0x00,
119         0x08, 0x00,
120
121         0x45, 0x00, 0x00, 0x14, /* ICE_IPV4_IL 56 */
122         0x00, 0x00, 0x00, 0x00,
123         0x00, 0x11, 0x00, 0x00,
124         0x00, 0x00, 0x00, 0x00,
125         0x00, 0x00, 0x00, 0x00,
126
127         0x00, 0x00, 0x00, 0x00, /* ICE_UDP_ILOS 76 */
128         0x00, 0x08, 0x00, 0x00,
129 };
130
131 static const struct ice_dummy_pkt_offsets dummy_udp_tun_tcp_packet_offsets[] = {
132         { ICE_MAC_OFOS,         0 },
133         { ICE_ETYPE_OL,         12 },
134         { ICE_IPV4_OFOS,        14 },
135         { ICE_UDP_OF,           34 },
136         { ICE_VXLAN,            42 },
137         { ICE_GENEVE,           42 },
138         { ICE_VXLAN_GPE,        42 },
139         { ICE_MAC_IL,           50 },
140         { ICE_IPV4_IL,          64 },
141         { ICE_TCP_IL,           84 },
142         { ICE_PROTOCOL_LAST,    0 },
143 };
144
145 static const u8 dummy_udp_tun_tcp_packet[] = {
146         0x00, 0x00, 0x00, 0x00,  /* ICE_MAC_OFOS 0 */
147         0x00, 0x00, 0x00, 0x00,
148         0x00, 0x00, 0x00, 0x00,
149
150         0x08, 0x00,             /* ICE_ETYPE_OL 12 */
151
152         0x45, 0x00, 0x00, 0x5a, /* ICE_IPV4_OFOS 14 */
153         0x00, 0x01, 0x00, 0x00,
154         0x40, 0x11, 0x00, 0x00,
155         0x00, 0x00, 0x00, 0x00,
156         0x00, 0x00, 0x00, 0x00,
157
158         0x00, 0x00, 0x12, 0xb5, /* ICE_UDP_OF 34 */
159         0x00, 0x46, 0x00, 0x00,
160
161         0x00, 0x00, 0x65, 0x58, /* ICE_VXLAN 42 */
162         0x00, 0x00, 0x00, 0x00,
163
164         0x00, 0x00, 0x00, 0x00, /* ICE_MAC_IL 50 */
165         0x00, 0x00, 0x00, 0x00,
166         0x00, 0x00, 0x00, 0x00,
167         0x08, 0x00,
168
169         0x45, 0x00, 0x00, 0x28, /* ICE_IPV4_IL 64 */
170         0x00, 0x01, 0x00, 0x00,
171         0x40, 0x06, 0x00, 0x00,
172         0x00, 0x00, 0x00, 0x00,
173         0x00, 0x00, 0x00, 0x00,
174
175         0x00, 0x00, 0x00, 0x00, /* ICE_TCP_IL 84 */
176         0x00, 0x00, 0x00, 0x00,
177         0x00, 0x00, 0x00, 0x00,
178         0x50, 0x02, 0x20, 0x00,
179         0x00, 0x00, 0x00, 0x00
180 };
181
182 static const struct ice_dummy_pkt_offsets dummy_udp_tun_udp_packet_offsets[] = {
183         { ICE_MAC_OFOS,         0 },
184         { ICE_ETYPE_OL,         12 },
185         { ICE_IPV4_OFOS,        14 },
186         { ICE_UDP_OF,           34 },
187         { ICE_VXLAN,            42 },
188         { ICE_GENEVE,           42 },
189         { ICE_VXLAN_GPE,        42 },
190         { ICE_MAC_IL,           50 },
191         { ICE_IPV4_IL,          64 },
192         { ICE_UDP_ILOS,         84 },
193         { ICE_PROTOCOL_LAST,    0 },
194 };
195
196 static const u8 dummy_udp_tun_udp_packet[] = {
197         0x00, 0x00, 0x00, 0x00,  /* ICE_MAC_OFOS 0 */
198         0x00, 0x00, 0x00, 0x00,
199         0x00, 0x00, 0x00, 0x00,
200
201         0x08, 0x00,             /* ICE_ETYPE_OL 12 */
202
203         0x45, 0x00, 0x00, 0x4e, /* ICE_IPV4_OFOS 14 */
204         0x00, 0x01, 0x00, 0x00,
205         0x00, 0x11, 0x00, 0x00,
206         0x00, 0x00, 0x00, 0x00,
207         0x00, 0x00, 0x00, 0x00,
208
209         0x00, 0x00, 0x12, 0xb5, /* ICE_UDP_OF 34 */
210         0x00, 0x3a, 0x00, 0x00,
211
212         0x00, 0x00, 0x65, 0x58, /* ICE_VXLAN 42 */
213         0x00, 0x00, 0x00, 0x00,
214
215         0x00, 0x00, 0x00, 0x00, /* ICE_MAC_IL 50 */
216         0x00, 0x00, 0x00, 0x00,
217         0x00, 0x00, 0x00, 0x00,
218         0x08, 0x00,
219
220         0x45, 0x00, 0x00, 0x1c, /* ICE_IPV4_IL 64 */
221         0x00, 0x01, 0x00, 0x00,
222         0x00, 0x11, 0x00, 0x00,
223         0x00, 0x00, 0x00, 0x00,
224         0x00, 0x00, 0x00, 0x00,
225
226         0x00, 0x00, 0x00, 0x00, /* ICE_UDP_ILOS 84 */
227         0x00, 0x08, 0x00, 0x00,
228 };
229
230 /* offset info for MAC + IPv4 + UDP dummy packet */
231 static const struct ice_dummy_pkt_offsets dummy_udp_packet_offsets[] = {
232         { ICE_MAC_OFOS,         0 },
233         { ICE_ETYPE_OL,         12 },
234         { ICE_IPV4_OFOS,        14 },
235         { ICE_UDP_ILOS,         34 },
236         { ICE_PROTOCOL_LAST,    0 },
237 };
238
239 /* Dummy packet for MAC + IPv4 + UDP */
240 static const u8 dummy_udp_packet[] = {
241         0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
242         0x00, 0x00, 0x00, 0x00,
243         0x00, 0x00, 0x00, 0x00,
244
245         0x08, 0x00,             /* ICE_ETYPE_OL 12 */
246
247         0x45, 0x00, 0x00, 0x1c, /* ICE_IPV4_OFOS 14 */
248         0x00, 0x01, 0x00, 0x00,
249         0x00, 0x11, 0x00, 0x00,
250         0x00, 0x00, 0x00, 0x00,
251         0x00, 0x00, 0x00, 0x00,
252
253         0x00, 0x00, 0x00, 0x00, /* ICE_UDP_ILOS 34 */
254         0x00, 0x08, 0x00, 0x00,
255
256         0x00, 0x00,     /* 2 bytes for 4 byte alignment */
257 };
258
259 /* offset info for MAC + VLAN + IPv4 + UDP dummy packet */
260 static const struct ice_dummy_pkt_offsets dummy_vlan_udp_packet_offsets[] = {
261         { ICE_MAC_OFOS,         0 },
262         { ICE_ETYPE_OL,         12 },
263         { ICE_VLAN_OFOS,        14 },
264         { ICE_IPV4_OFOS,        18 },
265         { ICE_UDP_ILOS,         38 },
266         { ICE_PROTOCOL_LAST,    0 },
267 };
268
269 /* C-tag (801.1Q), IPv4:UDP dummy packet */
270 static const u8 dummy_vlan_udp_packet[] = {
271         0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
272         0x00, 0x00, 0x00, 0x00,
273         0x00, 0x00, 0x00, 0x00,
274
275         0x81, 0x00,             /* ICE_ETYPE_OL 12 */
276
277         0x00, 0x00, 0x08, 0x00, /* ICE_VLAN_OFOS 14 */
278
279         0x45, 0x00, 0x00, 0x1c, /* ICE_IPV4_OFOS 18 */
280         0x00, 0x01, 0x00, 0x00,
281         0x00, 0x11, 0x00, 0x00,
282         0x00, 0x00, 0x00, 0x00,
283         0x00, 0x00, 0x00, 0x00,
284
285         0x00, 0x00, 0x00, 0x00, /* ICE_UDP_ILOS 38 */
286         0x00, 0x08, 0x00, 0x00,
287
288         0x00, 0x00,     /* 2 bytes for 4 byte alignment */
289 };
290
291 /* offset info for MAC + IPv4 + TCP dummy packet */
292 static const struct ice_dummy_pkt_offsets dummy_tcp_packet_offsets[] = {
293         { ICE_MAC_OFOS,         0 },
294         { ICE_ETYPE_OL,         12 },
295         { ICE_IPV4_OFOS,        14 },
296         { ICE_TCP_IL,           34 },
297         { ICE_PROTOCOL_LAST,    0 },
298 };
299
300 /* Dummy packet for MAC + IPv4 + TCP */
301 static const u8 dummy_tcp_packet[] = {
302         0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
303         0x00, 0x00, 0x00, 0x00,
304         0x00, 0x00, 0x00, 0x00,
305
306         0x08, 0x00,             /* ICE_ETYPE_OL 12 */
307
308         0x45, 0x00, 0x00, 0x28, /* ICE_IPV4_OFOS 14 */
309         0x00, 0x01, 0x00, 0x00,
310         0x00, 0x06, 0x00, 0x00,
311         0x00, 0x00, 0x00, 0x00,
312         0x00, 0x00, 0x00, 0x00,
313
314         0x00, 0x00, 0x00, 0x00, /* ICE_TCP_IL 34 */
315         0x00, 0x00, 0x00, 0x00,
316         0x00, 0x00, 0x00, 0x00,
317         0x50, 0x00, 0x00, 0x00,
318         0x00, 0x00, 0x00, 0x00,
319
320         0x00, 0x00,     /* 2 bytes for 4 byte alignment */
321 };
322
323 /* offset info for MAC + VLAN (C-tag, 802.1Q) + IPv4 + TCP dummy packet */
324 static const struct ice_dummy_pkt_offsets dummy_vlan_tcp_packet_offsets[] = {
325         { ICE_MAC_OFOS,         0 },
326         { ICE_ETYPE_OL,         12 },
327         { ICE_VLAN_OFOS,        14 },
328         { ICE_IPV4_OFOS,        18 },
329         { ICE_TCP_IL,           38 },
330         { ICE_PROTOCOL_LAST,    0 },
331 };
332
333 /* C-tag (801.1Q), IPv4:TCP dummy packet */
334 static const u8 dummy_vlan_tcp_packet[] = {
335         0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
336         0x00, 0x00, 0x00, 0x00,
337         0x00, 0x00, 0x00, 0x00,
338
339         0x81, 0x00,             /* ICE_ETYPE_OL 12 */
340
341         0x00, 0x00, 0x08, 0x00, /* ICE_VLAN_OFOS 14 */
342
343         0x45, 0x00, 0x00, 0x28, /* ICE_IPV4_OFOS 18 */
344         0x00, 0x01, 0x00, 0x00,
345         0x00, 0x06, 0x00, 0x00,
346         0x00, 0x00, 0x00, 0x00,
347         0x00, 0x00, 0x00, 0x00,
348
349         0x00, 0x00, 0x00, 0x00, /* ICE_TCP_IL 38 */
350         0x00, 0x00, 0x00, 0x00,
351         0x00, 0x00, 0x00, 0x00,
352         0x50, 0x00, 0x00, 0x00,
353         0x00, 0x00, 0x00, 0x00,
354
355         0x00, 0x00,     /* 2 bytes for 4 byte alignment */
356 };
357
358 static const struct ice_dummy_pkt_offsets dummy_tcp_ipv6_packet_offsets[] = {
359         { ICE_MAC_OFOS,         0 },
360         { ICE_ETYPE_OL,         12 },
361         { ICE_IPV6_OFOS,        14 },
362         { ICE_TCP_IL,           54 },
363         { ICE_PROTOCOL_LAST,    0 },
364 };
365
366 static const u8 dummy_tcp_ipv6_packet[] = {
367         0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
368         0x00, 0x00, 0x00, 0x00,
369         0x00, 0x00, 0x00, 0x00,
370
371         0x86, 0xDD,             /* ICE_ETYPE_OL 12 */
372
373         0x60, 0x00, 0x00, 0x00, /* ICE_IPV6_OFOS 40 */
374         0x00, 0x14, 0x06, 0x00, /* Next header is TCP */
375         0x00, 0x00, 0x00, 0x00,
376         0x00, 0x00, 0x00, 0x00,
377         0x00, 0x00, 0x00, 0x00,
378         0x00, 0x00, 0x00, 0x00,
379         0x00, 0x00, 0x00, 0x00,
380         0x00, 0x00, 0x00, 0x00,
381         0x00, 0x00, 0x00, 0x00,
382         0x00, 0x00, 0x00, 0x00,
383
384         0x00, 0x00, 0x00, 0x00, /* ICE_TCP_IL 54 */
385         0x00, 0x00, 0x00, 0x00,
386         0x00, 0x00, 0x00, 0x00,
387         0x50, 0x00, 0x00, 0x00,
388         0x00, 0x00, 0x00, 0x00,
389
390         0x00, 0x00, /* 2 bytes for 4 byte alignment */
391 };
392
393 /* C-tag (802.1Q): IPv6 + TCP */
394 static const struct ice_dummy_pkt_offsets
395 dummy_vlan_tcp_ipv6_packet_offsets[] = {
396         { ICE_MAC_OFOS,         0 },
397         { ICE_ETYPE_OL,         12 },
398         { ICE_VLAN_OFOS,        14 },
399         { ICE_IPV6_OFOS,        18 },
400         { ICE_TCP_IL,           58 },
401         { ICE_PROTOCOL_LAST,    0 },
402 };
403
404 /* C-tag (802.1Q), IPv6 + TCP dummy packet */
405 static const u8 dummy_vlan_tcp_ipv6_packet[] = {
406         0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
407         0x00, 0x00, 0x00, 0x00,
408         0x00, 0x00, 0x00, 0x00,
409
410         0x81, 0x00,             /* ICE_ETYPE_OL 12 */
411
412         0x00, 0x00, 0x86, 0xDD, /* ICE_VLAN_OFOS 14 */
413
414         0x60, 0x00, 0x00, 0x00, /* ICE_IPV6_OFOS 18 */
415         0x00, 0x14, 0x06, 0x00, /* Next header is TCP */
416         0x00, 0x00, 0x00, 0x00,
417         0x00, 0x00, 0x00, 0x00,
418         0x00, 0x00, 0x00, 0x00,
419         0x00, 0x00, 0x00, 0x00,
420         0x00, 0x00, 0x00, 0x00,
421         0x00, 0x00, 0x00, 0x00,
422         0x00, 0x00, 0x00, 0x00,
423         0x00, 0x00, 0x00, 0x00,
424
425         0x00, 0x00, 0x00, 0x00, /* ICE_TCP_IL 58 */
426         0x00, 0x00, 0x00, 0x00,
427         0x00, 0x00, 0x00, 0x00,
428         0x50, 0x00, 0x00, 0x00,
429         0x00, 0x00, 0x00, 0x00,
430
431         0x00, 0x00, /* 2 bytes for 4 byte alignment */
432 };
433
434 /* IPv6 + UDP */
435 static const struct ice_dummy_pkt_offsets dummy_udp_ipv6_packet_offsets[] = {
436         { ICE_MAC_OFOS,         0 },
437         { ICE_ETYPE_OL,         12 },
438         { ICE_IPV6_OFOS,        14 },
439         { ICE_UDP_ILOS,         54 },
440         { ICE_PROTOCOL_LAST,    0 },
441 };
442
443 /* IPv6 + UDP dummy packet */
444 static const u8 dummy_udp_ipv6_packet[] = {
445         0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
446         0x00, 0x00, 0x00, 0x00,
447         0x00, 0x00, 0x00, 0x00,
448
449         0x86, 0xDD,             /* ICE_ETYPE_OL 12 */
450
451         0x60, 0x00, 0x00, 0x00, /* ICE_IPV6_OFOS 40 */
452         0x00, 0x10, 0x11, 0x00, /* Next header UDP */
453         0x00, 0x00, 0x00, 0x00,
454         0x00, 0x00, 0x00, 0x00,
455         0x00, 0x00, 0x00, 0x00,
456         0x00, 0x00, 0x00, 0x00,
457         0x00, 0x00, 0x00, 0x00,
458         0x00, 0x00, 0x00, 0x00,
459         0x00, 0x00, 0x00, 0x00,
460         0x00, 0x00, 0x00, 0x00,
461
462         0x00, 0x00, 0x00, 0x00, /* ICE_UDP_ILOS 54 */
463         0x00, 0x10, 0x00, 0x00,
464
465         0x00, 0x00, 0x00, 0x00, /* needed for ESP packets */
466         0x00, 0x00, 0x00, 0x00,
467
468         0x00, 0x00, /* 2 bytes for 4 byte alignment */
469 };
470
471 /* C-tag (802.1Q): IPv6 + UDP */
472 static const struct ice_dummy_pkt_offsets
473 dummy_vlan_udp_ipv6_packet_offsets[] = {
474         { ICE_MAC_OFOS,         0 },
475         { ICE_ETYPE_OL,         12 },
476         { ICE_VLAN_OFOS,        14 },
477         { ICE_IPV6_OFOS,        18 },
478         { ICE_UDP_ILOS,         58 },
479         { ICE_PROTOCOL_LAST,    0 },
480 };
481
482 /* C-tag (802.1Q), IPv6 + UDP dummy packet */
483 static const u8 dummy_vlan_udp_ipv6_packet[] = {
484         0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
485         0x00, 0x00, 0x00, 0x00,
486         0x00, 0x00, 0x00, 0x00,
487
488         0x81, 0x00,             /* ICE_ETYPE_OL 12 */
489
490         0x00, 0x00, 0x86, 0xDD, /* ICE_VLAN_OFOS 14 */
491
492         0x60, 0x00, 0x00, 0x00, /* ICE_IPV6_OFOS 18 */
493         0x00, 0x08, 0x11, 0x00, /* Next header UDP */
494         0x00, 0x00, 0x00, 0x00,
495         0x00, 0x00, 0x00, 0x00,
496         0x00, 0x00, 0x00, 0x00,
497         0x00, 0x00, 0x00, 0x00,
498         0x00, 0x00, 0x00, 0x00,
499         0x00, 0x00, 0x00, 0x00,
500         0x00, 0x00, 0x00, 0x00,
501         0x00, 0x00, 0x00, 0x00,
502
503         0x00, 0x00, 0x00, 0x00, /* ICE_UDP_ILOS 58 */
504         0x00, 0x08, 0x00, 0x00,
505
506         0x00, 0x00, /* 2 bytes for 4 byte alignment */
507 };
508
509 /* Outer IPv4 + Outer UDP + GTP + Inner IPv4 + Inner TCP */
510 static const struct ice_dummy_pkt_offsets dummy_ipv4_gtpu_ipv4_tcp_packet_offsets[] = {
511         { ICE_MAC_OFOS,         0 },
512         { ICE_IPV4_OFOS,        14 },
513         { ICE_UDP_OF,           34 },
514         { ICE_GTP,              42 },
515         { ICE_IPV4_IL,          62 },
516         { ICE_TCP_IL,           82 },
517         { ICE_PROTOCOL_LAST,    0 },
518 };
519
520 static const u8 dummy_ipv4_gtpu_ipv4_tcp_packet[] = {
521         0x00, 0x00, 0x00, 0x00, /* Ethernet 0 */
522         0x00, 0x00, 0x00, 0x00,
523         0x00, 0x00, 0x00, 0x00,
524         0x08, 0x00,
525
526         0x45, 0x00, 0x00, 0x58, /* IP 14 */
527         0x00, 0x00, 0x00, 0x00,
528         0x00, 0x11, 0x00, 0x00,
529         0x00, 0x00, 0x00, 0x00,
530         0x00, 0x00, 0x00, 0x00,
531
532         0x00, 0x00, 0x08, 0x68, /* UDP 34 */
533         0x00, 0x44, 0x00, 0x00,
534
535         0x34, 0xff, 0x00, 0x34, /* GTP-U Header 42 */
536         0x00, 0x00, 0x00, 0x00,
537         0x00, 0x00, 0x00, 0x85,
538
539         0x02, 0x00, 0x00, 0x00, /* GTP_PDUSession_ExtensionHeader 54 */
540         0x00, 0x00, 0x00, 0x00,
541
542         0x45, 0x00, 0x00, 0x28, /* IP 62 */
543         0x00, 0x00, 0x00, 0x00,
544         0x00, 0x06, 0x00, 0x00,
545         0x00, 0x00, 0x00, 0x00,
546         0x00, 0x00, 0x00, 0x00,
547
548         0x00, 0x00, 0x00, 0x00, /* TCP 82 */
549         0x00, 0x00, 0x00, 0x00,
550         0x00, 0x00, 0x00, 0x00,
551         0x50, 0x00, 0x00, 0x00,
552         0x00, 0x00, 0x00, 0x00,
553
554         0x00, 0x00, /* 2 bytes for 4 byte alignment */
555 };
556
557 /* Outer IPv4 + Outer UDP + GTP + Inner IPv4 + Inner UDP */
558 static const struct ice_dummy_pkt_offsets dummy_ipv4_gtpu_ipv4_udp_packet_offsets[] = {
559         { ICE_MAC_OFOS,         0 },
560         { ICE_IPV4_OFOS,        14 },
561         { ICE_UDP_OF,           34 },
562         { ICE_GTP,              42 },
563         { ICE_IPV4_IL,          62 },
564         { ICE_UDP_ILOS,         82 },
565         { ICE_PROTOCOL_LAST,    0 },
566 };
567
568 static const u8 dummy_ipv4_gtpu_ipv4_udp_packet[] = {
569         0x00, 0x00, 0x00, 0x00, /* Ethernet 0 */
570         0x00, 0x00, 0x00, 0x00,
571         0x00, 0x00, 0x00, 0x00,
572         0x08, 0x00,
573
574         0x45, 0x00, 0x00, 0x4c, /* IP 14 */
575         0x00, 0x00, 0x00, 0x00,
576         0x00, 0x11, 0x00, 0x00,
577         0x00, 0x00, 0x00, 0x00,
578         0x00, 0x00, 0x00, 0x00,
579
580         0x00, 0x00, 0x08, 0x68, /* UDP 34 */
581         0x00, 0x38, 0x00, 0x00,
582
583         0x34, 0xff, 0x00, 0x28, /* GTP-U Header 42 */
584         0x00, 0x00, 0x00, 0x00,
585         0x00, 0x00, 0x00, 0x85,
586
587         0x02, 0x00, 0x00, 0x00, /* GTP_PDUSession_ExtensionHeader 54 */
588         0x00, 0x00, 0x00, 0x00,
589
590         0x45, 0x00, 0x00, 0x1c, /* IP 62 */
591         0x00, 0x00, 0x00, 0x00,
592         0x00, 0x11, 0x00, 0x00,
593         0x00, 0x00, 0x00, 0x00,
594         0x00, 0x00, 0x00, 0x00,
595
596         0x00, 0x00, 0x00, 0x00, /* UDP 82 */
597         0x00, 0x08, 0x00, 0x00,
598
599         0x00, 0x00, /* 2 bytes for 4 byte alignment */
600 };
601
602 /* Outer IPv6 + Outer UDP + GTP + Inner IPv4 + Inner TCP */
603 static const struct ice_dummy_pkt_offsets dummy_ipv4_gtpu_ipv6_tcp_packet_offsets[] = {
604         { ICE_MAC_OFOS,         0 },
605         { ICE_IPV4_OFOS,        14 },
606         { ICE_UDP_OF,           34 },
607         { ICE_GTP,              42 },
608         { ICE_IPV6_IL,          62 },
609         { ICE_TCP_IL,           102 },
610         { ICE_PROTOCOL_LAST,    0 },
611 };
612
613 static const u8 dummy_ipv4_gtpu_ipv6_tcp_packet[] = {
614         0x00, 0x00, 0x00, 0x00, /* Ethernet 0 */
615         0x00, 0x00, 0x00, 0x00,
616         0x00, 0x00, 0x00, 0x00,
617         0x08, 0x00,
618
619         0x45, 0x00, 0x00, 0x6c, /* IP 14 */
620         0x00, 0x00, 0x00, 0x00,
621         0x00, 0x11, 0x00, 0x00,
622         0x00, 0x00, 0x00, 0x00,
623         0x00, 0x00, 0x00, 0x00,
624
625         0x00, 0x00, 0x08, 0x68, /* UDP 34 */
626         0x00, 0x58, 0x00, 0x00,
627
628         0x34, 0xff, 0x00, 0x48, /* GTP-U Header 42 */
629         0x00, 0x00, 0x00, 0x00,
630         0x00, 0x00, 0x00, 0x85,
631
632         0x02, 0x00, 0x00, 0x00, /* GTP_PDUSession_ExtensionHeader 54 */
633         0x00, 0x00, 0x00, 0x00,
634
635         0x60, 0x00, 0x00, 0x00, /* IPv6 62 */
636         0x00, 0x14, 0x06, 0x00,
637         0x00, 0x00, 0x00, 0x00,
638         0x00, 0x00, 0x00, 0x00,
639         0x00, 0x00, 0x00, 0x00,
640         0x00, 0x00, 0x00, 0x00,
641         0x00, 0x00, 0x00, 0x00,
642         0x00, 0x00, 0x00, 0x00,
643         0x00, 0x00, 0x00, 0x00,
644         0x00, 0x00, 0x00, 0x00,
645
646         0x00, 0x00, 0x00, 0x00, /* TCP 102 */
647         0x00, 0x00, 0x00, 0x00,
648         0x00, 0x00, 0x00, 0x00,
649         0x50, 0x00, 0x00, 0x00,
650         0x00, 0x00, 0x00, 0x00,
651
652         0x00, 0x00, /* 2 bytes for 4 byte alignment */
653 };
654
655 static const struct ice_dummy_pkt_offsets dummy_ipv4_gtpu_ipv6_udp_packet_offsets[] = {
656         { ICE_MAC_OFOS,         0 },
657         { ICE_IPV4_OFOS,        14 },
658         { ICE_UDP_OF,           34 },
659         { ICE_GTP,              42 },
660         { ICE_IPV6_IL,          62 },
661         { ICE_UDP_ILOS,         102 },
662         { ICE_PROTOCOL_LAST,    0 },
663 };
664
665 static const u8 dummy_ipv4_gtpu_ipv6_udp_packet[] = {
666         0x00, 0x00, 0x00, 0x00, /* Ethernet 0 */
667         0x00, 0x00, 0x00, 0x00,
668         0x00, 0x00, 0x00, 0x00,
669         0x08, 0x00,
670
671         0x45, 0x00, 0x00, 0x60, /* IP 14 */
672         0x00, 0x00, 0x00, 0x00,
673         0x00, 0x11, 0x00, 0x00,
674         0x00, 0x00, 0x00, 0x00,
675         0x00, 0x00, 0x00, 0x00,
676
677         0x00, 0x00, 0x08, 0x68, /* UDP 34 */
678         0x00, 0x4c, 0x00, 0x00,
679
680         0x34, 0xff, 0x00, 0x3c, /* GTP-U Header 42 */
681         0x00, 0x00, 0x00, 0x00,
682         0x00, 0x00, 0x00, 0x85,
683
684         0x02, 0x00, 0x00, 0x00, /* GTP_PDUSession_ExtensionHeader 54 */
685         0x00, 0x00, 0x00, 0x00,
686
687         0x60, 0x00, 0x00, 0x00, /* IPv6 62 */
688         0x00, 0x08, 0x11, 0x00,
689         0x00, 0x00, 0x00, 0x00,
690         0x00, 0x00, 0x00, 0x00,
691         0x00, 0x00, 0x00, 0x00,
692         0x00, 0x00, 0x00, 0x00,
693         0x00, 0x00, 0x00, 0x00,
694         0x00, 0x00, 0x00, 0x00,
695         0x00, 0x00, 0x00, 0x00,
696         0x00, 0x00, 0x00, 0x00,
697
698         0x00, 0x00, 0x00, 0x00, /* UDP 102 */
699         0x00, 0x08, 0x00, 0x00,
700
701         0x00, 0x00, /* 2 bytes for 4 byte alignment */
702 };
703
704 static const struct ice_dummy_pkt_offsets dummy_ipv6_gtpu_ipv4_tcp_packet_offsets[] = {
705         { ICE_MAC_OFOS,         0 },
706         { ICE_IPV6_OFOS,        14 },
707         { ICE_UDP_OF,           54 },
708         { ICE_GTP,              62 },
709         { ICE_IPV4_IL,          82 },
710         { ICE_TCP_IL,           102 },
711         { ICE_PROTOCOL_LAST,    0 },
712 };
713
714 static const u8 dummy_ipv6_gtpu_ipv4_tcp_packet[] = {
715         0x00, 0x00, 0x00, 0x00, /* Ethernet 0 */
716         0x00, 0x00, 0x00, 0x00,
717         0x00, 0x00, 0x00, 0x00,
718         0x86, 0xdd,
719
720         0x60, 0x00, 0x00, 0x00, /* IPv6 14 */
721         0x00, 0x44, 0x11, 0x00,
722         0x00, 0x00, 0x00, 0x00,
723         0x00, 0x00, 0x00, 0x00,
724         0x00, 0x00, 0x00, 0x00,
725         0x00, 0x00, 0x00, 0x00,
726         0x00, 0x00, 0x00, 0x00,
727         0x00, 0x00, 0x00, 0x00,
728         0x00, 0x00, 0x00, 0x00,
729         0x00, 0x00, 0x00, 0x00,
730
731         0x00, 0x00, 0x08, 0x68, /* UDP 54 */
732         0x00, 0x44, 0x00, 0x00,
733
734         0x34, 0xff, 0x00, 0x34, /* GTP-U Header 62 */
735         0x00, 0x00, 0x00, 0x00,
736         0x00, 0x00, 0x00, 0x85,
737
738         0x02, 0x00, 0x00, 0x00, /* GTP_PDUSession_ExtensionHeader 74 */
739         0x00, 0x00, 0x00, 0x00,
740
741         0x45, 0x00, 0x00, 0x28, /* IP 82 */
742         0x00, 0x00, 0x00, 0x00,
743         0x00, 0x06, 0x00, 0x00,
744         0x00, 0x00, 0x00, 0x00,
745         0x00, 0x00, 0x00, 0x00,
746
747         0x00, 0x00, 0x00, 0x00, /* TCP 102 */
748         0x00, 0x00, 0x00, 0x00,
749         0x00, 0x00, 0x00, 0x00,
750         0x50, 0x00, 0x00, 0x00,
751         0x00, 0x00, 0x00, 0x00,
752
753         0x00, 0x00, /* 2 bytes for 4 byte alignment */
754 };
755
756 static const struct ice_dummy_pkt_offsets dummy_ipv6_gtpu_ipv4_udp_packet_offsets[] = {
757         { ICE_MAC_OFOS,         0 },
758         { ICE_IPV6_OFOS,        14 },
759         { ICE_UDP_OF,           54 },
760         { ICE_GTP,              62 },
761         { ICE_IPV4_IL,          82 },
762         { ICE_UDP_ILOS,         102 },
763         { ICE_PROTOCOL_LAST,    0 },
764 };
765
766 static const u8 dummy_ipv6_gtpu_ipv4_udp_packet[] = {
767         0x00, 0x00, 0x00, 0x00, /* Ethernet 0 */
768         0x00, 0x00, 0x00, 0x00,
769         0x00, 0x00, 0x00, 0x00,
770         0x86, 0xdd,
771
772         0x60, 0x00, 0x00, 0x00, /* IPv6 14 */
773         0x00, 0x38, 0x11, 0x00,
774         0x00, 0x00, 0x00, 0x00,
775         0x00, 0x00, 0x00, 0x00,
776         0x00, 0x00, 0x00, 0x00,
777         0x00, 0x00, 0x00, 0x00,
778         0x00, 0x00, 0x00, 0x00,
779         0x00, 0x00, 0x00, 0x00,
780         0x00, 0x00, 0x00, 0x00,
781         0x00, 0x00, 0x00, 0x00,
782
783         0x00, 0x00, 0x08, 0x68, /* UDP 54 */
784         0x00, 0x38, 0x00, 0x00,
785
786         0x34, 0xff, 0x00, 0x28, /* GTP-U Header 62 */
787         0x00, 0x00, 0x00, 0x00,
788         0x00, 0x00, 0x00, 0x85,
789
790         0x02, 0x00, 0x00, 0x00, /* GTP_PDUSession_ExtensionHeader 74 */
791         0x00, 0x00, 0x00, 0x00,
792
793         0x45, 0x00, 0x00, 0x1c, /* IP 82 */
794         0x00, 0x00, 0x00, 0x00,
795         0x00, 0x11, 0x00, 0x00,
796         0x00, 0x00, 0x00, 0x00,
797         0x00, 0x00, 0x00, 0x00,
798
799         0x00, 0x00, 0x00, 0x00, /* UDP 102 */
800         0x00, 0x08, 0x00, 0x00,
801
802         0x00, 0x00, /* 2 bytes for 4 byte alignment */
803 };
804
805 static const struct ice_dummy_pkt_offsets dummy_ipv6_gtpu_ipv6_tcp_packet_offsets[] = {
806         { ICE_MAC_OFOS,         0 },
807         { ICE_IPV6_OFOS,        14 },
808         { ICE_UDP_OF,           54 },
809         { ICE_GTP,              62 },
810         { ICE_IPV6_IL,          82 },
811         { ICE_TCP_IL,           102 },
812         { ICE_PROTOCOL_LAST,    0 },
813 };
814
815 static const u8 dummy_ipv6_gtpu_ipv6_tcp_packet[] = {
816         0x00, 0x00, 0x00, 0x00, /* Ethernet 0 */
817         0x00, 0x00, 0x00, 0x00,
818         0x00, 0x00, 0x00, 0x00,
819         0x86, 0xdd,
820
821         0x60, 0x00, 0x00, 0x00, /* IPv6 14 */
822         0x00, 0x58, 0x11, 0x00,
823         0x00, 0x00, 0x00, 0x00,
824         0x00, 0x00, 0x00, 0x00,
825         0x00, 0x00, 0x00, 0x00,
826         0x00, 0x00, 0x00, 0x00,
827         0x00, 0x00, 0x00, 0x00,
828         0x00, 0x00, 0x00, 0x00,
829         0x00, 0x00, 0x00, 0x00,
830         0x00, 0x00, 0x00, 0x00,
831
832         0x00, 0x00, 0x08, 0x68, /* UDP 54 */
833         0x00, 0x58, 0x00, 0x00,
834
835         0x34, 0xff, 0x00, 0x48, /* GTP-U Header 62 */
836         0x00, 0x00, 0x00, 0x00,
837         0x00, 0x00, 0x00, 0x85,
838
839         0x02, 0x00, 0x00, 0x00, /* GTP_PDUSession_ExtensionHeader 74 */
840         0x00, 0x00, 0x00, 0x00,
841
842         0x60, 0x00, 0x00, 0x00, /* IPv6 82 */
843         0x00, 0x14, 0x06, 0x00,
844         0x00, 0x00, 0x00, 0x00,
845         0x00, 0x00, 0x00, 0x00,
846         0x00, 0x00, 0x00, 0x00,
847         0x00, 0x00, 0x00, 0x00,
848         0x00, 0x00, 0x00, 0x00,
849         0x00, 0x00, 0x00, 0x00,
850         0x00, 0x00, 0x00, 0x00,
851         0x00, 0x00, 0x00, 0x00,
852
853         0x00, 0x00, 0x00, 0x00, /* TCP 122 */
854         0x00, 0x00, 0x00, 0x00,
855         0x00, 0x00, 0x00, 0x00,
856         0x50, 0x00, 0x00, 0x00,
857         0x00, 0x00, 0x00, 0x00,
858
859         0x00, 0x00, /* 2 bytes for 4 byte alignment */
860 };
861
862 static const struct ice_dummy_pkt_offsets dummy_ipv6_gtpu_ipv6_udp_packet_offsets[] = {
863         { ICE_MAC_OFOS,         0 },
864         { ICE_IPV6_OFOS,        14 },
865         { ICE_UDP_OF,           54 },
866         { ICE_GTP,              62 },
867         { ICE_IPV6_IL,          82 },
868         { ICE_UDP_ILOS,         102 },
869         { ICE_PROTOCOL_LAST,    0 },
870 };
871
872 static const u8 dummy_ipv6_gtpu_ipv6_udp_packet[] = {
873         0x00, 0x00, 0x00, 0x00, /* Ethernet 0 */
874         0x00, 0x00, 0x00, 0x00,
875         0x00, 0x00, 0x00, 0x00,
876         0x86, 0xdd,
877
878         0x60, 0x00, 0x00, 0x00, /* IPv6 14 */
879         0x00, 0x4c, 0x11, 0x00,
880         0x00, 0x00, 0x00, 0x00,
881         0x00, 0x00, 0x00, 0x00,
882         0x00, 0x00, 0x00, 0x00,
883         0x00, 0x00, 0x00, 0x00,
884         0x00, 0x00, 0x00, 0x00,
885         0x00, 0x00, 0x00, 0x00,
886         0x00, 0x00, 0x00, 0x00,
887         0x00, 0x00, 0x00, 0x00,
888
889         0x00, 0x00, 0x08, 0x68, /* UDP 54 */
890         0x00, 0x4c, 0x00, 0x00,
891
892         0x34, 0xff, 0x00, 0x3c, /* GTP-U Header 62 */
893         0x00, 0x00, 0x00, 0x00,
894         0x00, 0x00, 0x00, 0x85,
895
896         0x02, 0x00, 0x00, 0x00, /* GTP_PDUSession_ExtensionHeader 74 */
897         0x00, 0x00, 0x00, 0x00,
898
899         0x60, 0x00, 0x00, 0x00, /* IPv6 82 */
900         0x00, 0x08, 0x11, 0x00,
901         0x00, 0x00, 0x00, 0x00,
902         0x00, 0x00, 0x00, 0x00,
903         0x00, 0x00, 0x00, 0x00,
904         0x00, 0x00, 0x00, 0x00,
905         0x00, 0x00, 0x00, 0x00,
906         0x00, 0x00, 0x00, 0x00,
907         0x00, 0x00, 0x00, 0x00,
908         0x00, 0x00, 0x00, 0x00,
909
910         0x00, 0x00, 0x00, 0x00, /* UDP 122 */
911         0x00, 0x08, 0x00, 0x00,
912
913         0x00, 0x00, /* 2 bytes for 4 byte alignment */
914 };
915
916 static const struct ice_dummy_pkt_offsets dummy_ipv4_gtpu_ipv4_packet_offsets[] = {
917         { ICE_MAC_OFOS,         0 },
918         { ICE_IPV4_OFOS,        14 },
919         { ICE_UDP_OF,           34 },
920         { ICE_GTP,              42 },
921         { ICE_IPV4_IL,          62 },
922         { ICE_PROTOCOL_LAST,    0 },
923 };
924
925 static const u8 dummy_ipv4_gtpu_ipv4_packet[] = {
926         0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
927         0x00, 0x00, 0x00, 0x00,
928         0x00, 0x00, 0x00, 0x00,
929         0x08, 0x00,
930
931         0x45, 0x00, 0x00, 0x44, /* ICE_IPV4_OFOS 14 */
932         0x00, 0x00, 0x40, 0x00,
933         0x40, 0x11, 0x00, 0x00,
934         0x00, 0x00, 0x00, 0x00,
935         0x00, 0x00, 0x00, 0x00,
936
937         0x08, 0x68, 0x08, 0x68, /* ICE_UDP_OF 34 */
938         0x00, 0x00, 0x00, 0x00,
939
940         0x34, 0xff, 0x00, 0x28,  /* ICE_GTP 42 */
941         0x00, 0x00, 0x00, 0x00,
942         0x00, 0x00, 0x00, 0x85,
943
944         0x02, 0x00, 0x00, 0x00, /* PDU Session extension header */
945         0x00, 0x00, 0x00, 0x00,
946
947         0x45, 0x00, 0x00, 0x14, /* ICE_IPV4_IL 62 */
948         0x00, 0x00, 0x40, 0x00,
949         0x40, 0x00, 0x00, 0x00,
950         0x00, 0x00, 0x00, 0x00,
951         0x00, 0x00, 0x00, 0x00,
952         0x00, 0x00,
953 };
954
955 static const
956 struct ice_dummy_pkt_offsets dummy_ipv4_gtpu_ipv6_packet_offsets[] = {
957         { ICE_MAC_OFOS,         0 },
958         { ICE_IPV4_OFOS,        14 },
959         { ICE_UDP_OF,           34 },
960         { ICE_GTP,              42 },
961         { ICE_IPV6_IL,          62 },
962         { ICE_PROTOCOL_LAST,    0 },
963 };
964
965 static const u8 dummy_ipv4_gtpu_ipv6_packet[] = {
966         0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
967         0x00, 0x00, 0x00, 0x00,
968         0x00, 0x00, 0x00, 0x00,
969         0x08, 0x00,
970
971         0x45, 0x00, 0x00, 0x58, /* ICE_IPV4_OFOS 14 */
972         0x00, 0x00, 0x40, 0x00,
973         0x40, 0x11, 0x00, 0x00,
974         0x00, 0x00, 0x00, 0x00,
975         0x00, 0x00, 0x00, 0x00,
976
977         0x08, 0x68, 0x08, 0x68, /* ICE_UDP_OF 34 */
978         0x00, 0x00, 0x00, 0x00,
979
980         0x34, 0xff, 0x00, 0x28,  /* ICE_GTP 42 */
981         0x00, 0x00, 0x00, 0x00,
982         0x00, 0x00, 0x00, 0x85,
983
984         0x02, 0x00, 0x00, 0x00, /* PDU Session extension header */
985         0x00, 0x00, 0x00, 0x00,
986
987         0x60, 0x00, 0x00, 0x00, /* ICE_IPV6_IL 62 */
988         0x00, 0x00, 0x3b, 0x00,
989         0x00, 0x00, 0x00, 0x00,
990         0x00, 0x00, 0x00, 0x00,
991         0x00, 0x00, 0x00, 0x00,
992         0x00, 0x00, 0x00, 0x00,
993         0x00, 0x00, 0x00, 0x00,
994         0x00, 0x00, 0x00, 0x00,
995         0x00, 0x00, 0x00, 0x00,
996         0x00, 0x00, 0x00, 0x00,
997
998         0x00, 0x00,
999 };
1000
1001 static const
1002 struct ice_dummy_pkt_offsets dummy_ipv6_gtpu_ipv4_packet_offsets[] = {
1003         { ICE_MAC_OFOS,         0 },
1004         { ICE_IPV6_OFOS,        14 },
1005         { ICE_UDP_OF,           54 },
1006         { ICE_GTP,              62 },
1007         { ICE_IPV4_IL,          82 },
1008         { ICE_PROTOCOL_LAST,    0 },
1009 };
1010
1011 static const u8 dummy_ipv6_gtpu_ipv4_packet[] = {
1012         0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
1013         0x00, 0x00, 0x00, 0x00,
1014         0x00, 0x00, 0x00, 0x00,
1015         0x86, 0xdd,
1016
1017         0x60, 0x00, 0x00, 0x00, /* ICE_IPV6_OFOS 14 */
1018         0x00, 0x58, 0x11, 0x00, /* Next header UDP*/
1019         0x00, 0x00, 0x00, 0x00,
1020         0x00, 0x00, 0x00, 0x00,
1021         0x00, 0x00, 0x00, 0x00,
1022         0x00, 0x00, 0x00, 0x00,
1023         0x00, 0x00, 0x00, 0x00,
1024         0x00, 0x00, 0x00, 0x00,
1025         0x00, 0x00, 0x00, 0x00,
1026         0x00, 0x00, 0x00, 0x00,
1027
1028         0x08, 0x68, 0x08, 0x68, /* ICE_UDP_OF 54 */
1029         0x00, 0x00, 0x00, 0x00,
1030
1031         0x34, 0xff, 0x00, 0x28,  /* ICE_GTP 62 */
1032         0x00, 0x00, 0x00, 0x00,
1033         0x00, 0x00, 0x00, 0x85,
1034
1035         0x02, 0x00, 0x00, 0x00, /* PDU Session extension header */
1036         0x00, 0x00, 0x00, 0x00,
1037
1038         0x45, 0x00, 0x00, 0x14, /* ICE_IPV4_IL 82 */
1039         0x00, 0x00, 0x40, 0x00,
1040         0x40, 0x00, 0x00, 0x00,
1041         0x00, 0x00, 0x00, 0x00,
1042         0x00, 0x00, 0x00, 0x00,
1043
1044         0x00, 0x00,
1045 };
1046
1047 static const
1048 struct ice_dummy_pkt_offsets dummy_ipv6_gtpu_ipv6_packet_offsets[] = {
1049         { ICE_MAC_OFOS,         0 },
1050         { ICE_IPV6_OFOS,        14 },
1051         { ICE_UDP_OF,           54 },
1052         { ICE_GTP,              62 },
1053         { ICE_IPV6_IL,          82 },
1054         { ICE_PROTOCOL_LAST,    0 },
1055 };
1056
1057 static const u8 dummy_ipv6_gtpu_ipv6_packet[] = {
1058         0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
1059         0x00, 0x00, 0x00, 0x00,
1060         0x00, 0x00, 0x00, 0x00,
1061         0x86, 0xdd,
1062
1063         0x60, 0x00, 0x00, 0x00, /* ICE_IPV6_OFOS 14 */
1064         0x00, 0x6c, 0x11, 0x00, /* Next header UDP*/
1065         0x00, 0x00, 0x00, 0x00,
1066         0x00, 0x00, 0x00, 0x00,
1067         0x00, 0x00, 0x00, 0x00,
1068         0x00, 0x00, 0x00, 0x00,
1069         0x00, 0x00, 0x00, 0x00,
1070         0x00, 0x00, 0x00, 0x00,
1071         0x00, 0x00, 0x00, 0x00,
1072         0x00, 0x00, 0x00, 0x00,
1073
1074         0x08, 0x68, 0x08, 0x68, /* ICE_UDP_OF 54 */
1075         0x00, 0x00, 0x00, 0x00,
1076
1077         0x34, 0xff, 0x00, 0x28,  /* ICE_GTP 62 */
1078         0x00, 0x00, 0x00, 0x00,
1079         0x00, 0x00, 0x00, 0x85,
1080
1081         0x02, 0x00, 0x00, 0x00, /* PDU Session extension header */
1082         0x00, 0x00, 0x00, 0x00,
1083
1084         0x60, 0x00, 0x00, 0x00, /* ICE_IPV6_OFIL 82 */
1085         0x00, 0x00, 0x3b, 0x00,
1086         0x00, 0x00, 0x00, 0x00,
1087         0x00, 0x00, 0x00, 0x00,
1088         0x00, 0x00, 0x00, 0x00,
1089         0x00, 0x00, 0x00, 0x00,
1090         0x00, 0x00, 0x00, 0x00,
1091         0x00, 0x00, 0x00, 0x00,
1092         0x00, 0x00, 0x00, 0x00,
1093         0x00, 0x00, 0x00, 0x00,
1094
1095         0x00, 0x00,
1096 };
1097
1098 static const struct ice_dummy_pkt_offsets dummy_udp_gtp_packet_offsets[] = {
1099         { ICE_MAC_OFOS,         0 },
1100         { ICE_IPV4_OFOS,        14 },
1101         { ICE_UDP_OF,           34 },
1102         { ICE_GTP,              42 },
1103         { ICE_PROTOCOL_LAST,    0 },
1104 };
1105
1106 static const u8 dummy_udp_gtp_packet[] = {
1107         0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
1108         0x00, 0x00, 0x00, 0x00,
1109         0x00, 0x00, 0x00, 0x00,
1110         0x08, 0x00,
1111
1112         0x45, 0x00, 0x00, 0x30, /* ICE_IPV4_OFOS 14 */
1113         0x00, 0x00, 0x00, 0x00,
1114         0x00, 0x11, 0x00, 0x00,
1115         0x00, 0x00, 0x00, 0x00,
1116         0x00, 0x00, 0x00, 0x00,
1117
1118         0x00, 0x00, 0x08, 0x68, /* ICE_UDP_OF 34 */
1119         0x00, 0x1c, 0x00, 0x00,
1120
1121         0x34, 0xff, 0x00, 0x0c, /* ICE_GTP 42 */
1122         0x00, 0x00, 0x00, 0x00,
1123         0x00, 0x00, 0x00, 0x85,
1124
1125         0x02, 0x00, 0x00, 0x00, /* PDU Session extension header */
1126         0x00, 0x00, 0x00, 0x00,
1127
1128 };
1129
1130 static const struct ice_dummy_pkt_offsets dummy_ipv4_gtp_no_pay_packet_offsets[] = {
1131         { ICE_MAC_OFOS,         0 },
1132         { ICE_IPV4_OFOS,        14 },
1133         { ICE_UDP_OF,           34 },
1134         { ICE_GTP_NO_PAY,       42 },
1135         { ICE_PROTOCOL_LAST,    0 },
1136 };
1137
1138 static const
1139 struct ice_dummy_pkt_offsets dummy_ipv6_gtp_no_pay_packet_offsets[] = {
1140         { ICE_MAC_OFOS,         0 },
1141         { ICE_IPV6_OFOS,        14 },
1142         { ICE_UDP_OF,           54 },
1143         { ICE_GTP_NO_PAY,       62 },
1144         { ICE_PROTOCOL_LAST,    0 },
1145 };
1146
1147 static const u8 dummy_ipv6_gtp_packet[] = {
1148         0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
1149         0x00, 0x00, 0x00, 0x00,
1150         0x00, 0x00, 0x00, 0x00,
1151         0x86, 0xdd,
1152
1153         0x60, 0x00, 0x00, 0x00, /* ICE_IPV6_OFOS 14 */
1154         0x00, 0x6c, 0x11, 0x00, /* Next header UDP*/
1155         0x00, 0x00, 0x00, 0x00,
1156         0x00, 0x00, 0x00, 0x00,
1157         0x00, 0x00, 0x00, 0x00,
1158         0x00, 0x00, 0x00, 0x00,
1159         0x00, 0x00, 0x00, 0x00,
1160         0x00, 0x00, 0x00, 0x00,
1161         0x00, 0x00, 0x00, 0x00,
1162         0x00, 0x00, 0x00, 0x00,
1163
1164         0x08, 0x68, 0x08, 0x68, /* ICE_UDP_OF 54 */
1165         0x00, 0x00, 0x00, 0x00,
1166
1167         0x30, 0x00, 0x00, 0x28,  /* ICE_GTP 62 */
1168         0x00, 0x00, 0x00, 0x00,
1169
1170         0x00, 0x00,
1171 };
1172
1173 static const struct ice_dummy_pkt_offsets dummy_pppoe_packet_offsets[] = {
1174         { ICE_MAC_OFOS,         0 },
1175         { ICE_ETYPE_OL,         12 },
1176         { ICE_VLAN_OFOS,        14},
1177         { ICE_PPPOE,            18 },
1178         { ICE_PROTOCOL_LAST,    0 },
1179 };
1180
1181 static const struct ice_dummy_pkt_offsets dummy_pppoe_packet_ipv4_offsets[] = {
1182         { ICE_MAC_OFOS,         0 },
1183         { ICE_ETYPE_OL,         12 },
1184         { ICE_VLAN_OFOS,        14},
1185         { ICE_PPPOE,            18 },
1186         { ICE_IPV4_OFOS,        26 },
1187         { ICE_PROTOCOL_LAST,    0 },
1188 };
1189
1190 static const u8 dummy_pppoe_ipv4_packet[] = {
1191         0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
1192         0x00, 0x00, 0x00, 0x00,
1193         0x00, 0x00, 0x00, 0x00,
1194
1195         0x81, 0x00,             /* ICE_ETYPE_OL 12 */
1196
1197         0x00, 0x00, 0x88, 0x64, /* ICE_VLAN_OFOS 14 */
1198
1199         0x11, 0x00, 0x00, 0x00, /* ICE_PPPOE 18 */
1200         0x00, 0x16,
1201
1202         0x00, 0x21,             /* PPP Link Layer 24 */
1203
1204         0x45, 0x00, 0x00, 0x14, /* ICE_IPV4_IL 26 */
1205         0x00, 0x00, 0x00, 0x00,
1206         0x00, 0x00, 0x00, 0x00,
1207         0x00, 0x00, 0x00, 0x00,
1208         0x00, 0x00, 0x00, 0x00,
1209
1210         0x00, 0x00,             /* 2 bytes for 4 bytes alignment */
1211 };
1212
1213 static const
1214 struct ice_dummy_pkt_offsets dummy_pppoe_ipv4_tcp_packet_offsets[] = {
1215         { ICE_MAC_OFOS,         0 },
1216         { ICE_ETYPE_OL,         12 },
1217         { ICE_VLAN_OFOS,        14},
1218         { ICE_PPPOE,            18 },
1219         { ICE_IPV4_OFOS,        26 },
1220         { ICE_TCP_IL,           46 },
1221         { ICE_PROTOCOL_LAST,    0 },
1222 };
1223
1224 static const u8 dummy_pppoe_ipv4_tcp_packet[] = {
1225         0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
1226         0x00, 0x00, 0x00, 0x00,
1227         0x00, 0x00, 0x00, 0x00,
1228
1229         0x81, 0x00,             /* ICE_ETYPE_OL 12 */
1230
1231         0x00, 0x00, 0x88, 0x64, /* ICE_VLAN_OFOS 14 */
1232
1233         0x11, 0x00, 0x00, 0x00, /* ICE_PPPOE 18 */
1234         0x00, 0x16,
1235
1236         0x00, 0x21,             /* PPP Link Layer 24 */
1237
1238         0x45, 0x00, 0x00, 0x28, /* ICE_IPV4_OFOS 26 */
1239         0x00, 0x01, 0x00, 0x00,
1240         0x00, 0x06, 0x00, 0x00,
1241         0x00, 0x00, 0x00, 0x00,
1242         0x00, 0x00, 0x00, 0x00,
1243
1244         0x00, 0x00, 0x00, 0x00, /* ICE_TCP_IL 46 */
1245         0x00, 0x00, 0x00, 0x00,
1246         0x00, 0x00, 0x00, 0x00,
1247         0x50, 0x00, 0x00, 0x00,
1248         0x00, 0x00, 0x00, 0x00,
1249
1250         0x00, 0x00,             /* 2 bytes for 4 bytes alignment */
1251 };
1252
1253 static const
1254 struct ice_dummy_pkt_offsets dummy_pppoe_ipv4_udp_packet_offsets[] = {
1255         { ICE_MAC_OFOS,         0 },
1256         { ICE_ETYPE_OL,         12 },
1257         { ICE_VLAN_OFOS,        14},
1258         { ICE_PPPOE,            18 },
1259         { ICE_IPV4_OFOS,        26 },
1260         { ICE_UDP_ILOS,         46 },
1261         { ICE_PROTOCOL_LAST,    0 },
1262 };
1263
1264 static const u8 dummy_pppoe_ipv4_udp_packet[] = {
1265         0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
1266         0x00, 0x00, 0x00, 0x00,
1267         0x00, 0x00, 0x00, 0x00,
1268
1269         0x81, 0x00,             /* ICE_ETYPE_OL 12 */
1270
1271         0x00, 0x00, 0x88, 0x64, /* ICE_VLAN_OFOS 14 */
1272
1273         0x11, 0x00, 0x00, 0x00, /* ICE_PPPOE 18 */
1274         0x00, 0x16,
1275
1276         0x00, 0x21,             /* PPP Link Layer 24 */
1277
1278         0x45, 0x00, 0x00, 0x1c, /* ICE_IPV4_OFOS 26 */
1279         0x00, 0x01, 0x00, 0x00,
1280         0x00, 0x11, 0x00, 0x00,
1281         0x00, 0x00, 0x00, 0x00,
1282         0x00, 0x00, 0x00, 0x00,
1283
1284         0x00, 0x00, 0x00, 0x00, /* ICE_UDP_ILOS 46 */
1285         0x00, 0x08, 0x00, 0x00,
1286
1287         0x00, 0x00,             /* 2 bytes for 4 bytes alignment */
1288 };
1289
1290 static const struct ice_dummy_pkt_offsets dummy_pppoe_packet_ipv6_offsets[] = {
1291         { ICE_MAC_OFOS,         0 },
1292         { ICE_ETYPE_OL,         12 },
1293         { ICE_VLAN_OFOS,        14},
1294         { ICE_PPPOE,            18 },
1295         { ICE_IPV6_OFOS,        26 },
1296         { ICE_PROTOCOL_LAST,    0 },
1297 };
1298
1299 static const u8 dummy_pppoe_ipv6_packet[] = {
1300         0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
1301         0x00, 0x00, 0x00, 0x00,
1302         0x00, 0x00, 0x00, 0x00,
1303
1304         0x81, 0x00,             /* ICE_ETYPE_OL 12 */
1305
1306         0x00, 0x00, 0x88, 0x64, /* ICE_VLAN_OFOS 14 */
1307
1308         0x11, 0x00, 0x00, 0x00, /* ICE_PPPOE 18 */
1309         0x00, 0x2a,
1310
1311         0x00, 0x57,             /* PPP Link Layer 24 */
1312
1313         0x60, 0x00, 0x00, 0x00, /* ICE_IPV6_OFOS 26 */
1314         0x00, 0x00, 0x3b, 0x00,
1315         0x00, 0x00, 0x00, 0x00,
1316         0x00, 0x00, 0x00, 0x00,
1317         0x00, 0x00, 0x00, 0x00,
1318         0x00, 0x00, 0x00, 0x00,
1319         0x00, 0x00, 0x00, 0x00,
1320         0x00, 0x00, 0x00, 0x00,
1321         0x00, 0x00, 0x00, 0x00,
1322         0x00, 0x00, 0x00, 0x00,
1323
1324         0x00, 0x00,             /* 2 bytes for 4 bytes alignment */
1325 };
1326
1327 static const
1328 struct ice_dummy_pkt_offsets dummy_pppoe_packet_ipv6_tcp_offsets[] = {
1329         { ICE_MAC_OFOS,         0 },
1330         { ICE_ETYPE_OL,         12 },
1331         { ICE_VLAN_OFOS,        14},
1332         { ICE_PPPOE,            18 },
1333         { ICE_IPV6_OFOS,        26 },
1334         { ICE_TCP_IL,           66 },
1335         { ICE_PROTOCOL_LAST,    0 },
1336 };
1337
1338 static const u8 dummy_pppoe_ipv6_tcp_packet[] = {
1339         0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
1340         0x00, 0x00, 0x00, 0x00,
1341         0x00, 0x00, 0x00, 0x00,
1342
1343         0x81, 0x00,             /* ICE_ETYPE_OL 12 */
1344
1345         0x00, 0x00, 0x88, 0x64, /* ICE_VLAN_OFOS 14 */
1346
1347         0x11, 0x00, 0x00, 0x00, /* ICE_PPPOE 18 */
1348         0x00, 0x2a,
1349
1350         0x00, 0x57,             /* PPP Link Layer 24 */
1351
1352         0x60, 0x00, 0x00, 0x00, /* ICE_IPV6_OFOS 26 */
1353         0x00, 0x14, 0x06, 0x00, /* Next header is TCP */
1354         0x00, 0x00, 0x00, 0x00,
1355         0x00, 0x00, 0x00, 0x00,
1356         0x00, 0x00, 0x00, 0x00,
1357         0x00, 0x00, 0x00, 0x00,
1358         0x00, 0x00, 0x00, 0x00,
1359         0x00, 0x00, 0x00, 0x00,
1360         0x00, 0x00, 0x00, 0x00,
1361         0x00, 0x00, 0x00, 0x00,
1362
1363         0x00, 0x00, 0x00, 0x00, /* ICE_TCP_IL 66 */
1364         0x00, 0x00, 0x00, 0x00,
1365         0x00, 0x00, 0x00, 0x00,
1366         0x50, 0x00, 0x00, 0x00,
1367         0x00, 0x00, 0x00, 0x00,
1368
1369         0x00, 0x00,             /* 2 bytes for 4 bytes alignment */
1370 };
1371
1372 static const
1373 struct ice_dummy_pkt_offsets dummy_pppoe_packet_ipv6_udp_offsets[] = {
1374         { ICE_MAC_OFOS,         0 },
1375         { ICE_ETYPE_OL,         12 },
1376         { ICE_VLAN_OFOS,        14},
1377         { ICE_PPPOE,            18 },
1378         { ICE_IPV6_OFOS,        26 },
1379         { ICE_UDP_ILOS,         66 },
1380         { ICE_PROTOCOL_LAST,    0 },
1381 };
1382
1383 static const u8 dummy_pppoe_ipv6_udp_packet[] = {
1384         0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
1385         0x00, 0x00, 0x00, 0x00,
1386         0x00, 0x00, 0x00, 0x00,
1387
1388         0x81, 0x00,             /* ICE_ETYPE_OL 12 */
1389
1390         0x00, 0x00, 0x88, 0x64, /* ICE_VLAN_OFOS 14 */
1391
1392         0x11, 0x00, 0x00, 0x00, /* ICE_PPPOE 18 */
1393         0x00, 0x2a,
1394
1395         0x00, 0x57,             /* PPP Link Layer 24 */
1396
1397         0x60, 0x00, 0x00, 0x00, /* ICE_IPV6_OFOS 26 */
1398         0x00, 0x08, 0x11, 0x00, /* Next header UDP*/
1399         0x00, 0x00, 0x00, 0x00,
1400         0x00, 0x00, 0x00, 0x00,
1401         0x00, 0x00, 0x00, 0x00,
1402         0x00, 0x00, 0x00, 0x00,
1403         0x00, 0x00, 0x00, 0x00,
1404         0x00, 0x00, 0x00, 0x00,
1405         0x00, 0x00, 0x00, 0x00,
1406         0x00, 0x00, 0x00, 0x00,
1407
1408         0x00, 0x00, 0x00, 0x00, /* ICE_UDP_ILOS 66 */
1409         0x00, 0x08, 0x00, 0x00,
1410
1411         0x00, 0x00,             /* 2 bytes for 4 bytes alignment */
1412 };
1413
1414 static const struct ice_dummy_pkt_offsets dummy_ipv4_esp_packet_offsets[] = {
1415         { ICE_MAC_OFOS,         0 },
1416         { ICE_IPV4_OFOS,        14 },
1417         { ICE_ESP,                      34 },
1418         { ICE_PROTOCOL_LAST,    0 },
1419 };
1420
1421 static const u8 dummy_ipv4_esp_pkt[] = {
1422         0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
1423         0x00, 0x00, 0x00, 0x00,
1424         0x00, 0x00, 0x00, 0x00,
1425         0x08, 0x00,
1426
1427         0x45, 0x00, 0x00, 0x1c, /* ICE_IPV4_IL 14 */
1428         0x00, 0x00, 0x40, 0x00,
1429         0x40, 0x32, 0x00, 0x00,
1430         0x00, 0x00, 0x00, 0x00,
1431         0x00, 0x00, 0x00, 0x00,
1432
1433         0x00, 0x00, 0x00, 0x00, /* ICE_ESP 34 */
1434         0x00, 0x00, 0x00, 0x00,
1435         0x00, 0x00,             /* 2 bytes for 4 bytes alignment */
1436 };
1437
1438 static const struct ice_dummy_pkt_offsets dummy_ipv6_esp_packet_offsets[] = {
1439         { ICE_MAC_OFOS,         0 },
1440         { ICE_IPV6_OFOS,        14 },
1441         { ICE_ESP,                      54 },
1442         { ICE_PROTOCOL_LAST,    0 },
1443 };
1444
1445 static const u8 dummy_ipv6_esp_pkt[] = {
1446         0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
1447         0x00, 0x00, 0x00, 0x00,
1448         0x00, 0x00, 0x00, 0x00,
1449         0x86, 0xDD,
1450
1451         0x60, 0x00, 0x00, 0x00, /* ICE_IPV6_OFOS 14 */
1452         0x00, 0x08, 0x32, 0x00, /* Next header ESP */
1453         0x00, 0x00, 0x00, 0x00,
1454         0x00, 0x00, 0x00, 0x00,
1455         0x00, 0x00, 0x00, 0x00,
1456         0x00, 0x00, 0x00, 0x00,
1457         0x00, 0x00, 0x00, 0x00,
1458         0x00, 0x00, 0x00, 0x00,
1459         0x00, 0x00, 0x00, 0x00,
1460         0x00, 0x00, 0x00, 0x00,
1461
1462         0x00, 0x00, 0x00, 0x00, /* ICE_ESP 54 */
1463         0x00, 0x00, 0x00, 0x00,
1464         0x00, 0x00,             /* 2 bytes for 4 bytes alignment */
1465 };
1466
1467 static const struct ice_dummy_pkt_offsets dummy_ipv4_ah_packet_offsets[] = {
1468         { ICE_MAC_OFOS,         0 },
1469         { ICE_IPV4_OFOS,        14 },
1470         { ICE_AH,                       34 },
1471         { ICE_PROTOCOL_LAST,    0 },
1472 };
1473
1474 static const u8 dummy_ipv4_ah_pkt[] = {
1475         0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
1476         0x00, 0x00, 0x00, 0x00,
1477         0x00, 0x00, 0x00, 0x00,
1478         0x08, 0x00,
1479
1480         0x45, 0x00, 0x00, 0x20, /* ICE_IPV4_IL 14 */
1481         0x00, 0x00, 0x40, 0x00,
1482         0x40, 0x33, 0x00, 0x00,
1483         0x00, 0x00, 0x00, 0x00,
1484         0x00, 0x00, 0x00, 0x00,
1485
1486         0x00, 0x00, 0x00, 0x00, /* ICE_AH 34 */
1487         0x00, 0x00, 0x00, 0x00,
1488         0x00, 0x00, 0x00, 0x00,
1489         0x00, 0x00,             /* 2 bytes for 4 bytes alignment */
1490 };
1491
1492 static const struct ice_dummy_pkt_offsets dummy_ipv6_ah_packet_offsets[] = {
1493         { ICE_MAC_OFOS,         0 },
1494         { ICE_IPV6_OFOS,        14 },
1495         { ICE_AH,                       54 },
1496         { ICE_PROTOCOL_LAST,    0 },
1497 };
1498
1499 static const u8 dummy_ipv6_ah_pkt[] = {
1500         0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
1501         0x00, 0x00, 0x00, 0x00,
1502         0x00, 0x00, 0x00, 0x00,
1503         0x86, 0xDD,
1504
1505         0x60, 0x00, 0x00, 0x00, /* ICE_IPV6_OFOS 14 */
1506         0x00, 0x0c, 0x33, 0x00, /* Next header AH */
1507         0x00, 0x00, 0x00, 0x00,
1508         0x00, 0x00, 0x00, 0x00,
1509         0x00, 0x00, 0x00, 0x00,
1510         0x00, 0x00, 0x00, 0x00,
1511         0x00, 0x00, 0x00, 0x00,
1512         0x00, 0x00, 0x00, 0x00,
1513         0x00, 0x00, 0x00, 0x00,
1514         0x00, 0x00, 0x00, 0x00,
1515
1516         0x00, 0x00, 0x00, 0x00, /* ICE_AH 54 */
1517         0x00, 0x00, 0x00, 0x00,
1518         0x00, 0x00, 0x00, 0x00,
1519         0x00, 0x00,             /* 2 bytes for 4 bytes alignment */
1520 };
1521
1522 static const struct ice_dummy_pkt_offsets dummy_ipv4_nat_packet_offsets[] = {
1523         { ICE_MAC_OFOS,         0 },
1524         { ICE_IPV4_OFOS,        14 },
1525         { ICE_UDP_ILOS,         34 },
1526         { ICE_NAT_T,            42 },
1527         { ICE_PROTOCOL_LAST,    0 },
1528 };
1529
1530 static const u8 dummy_ipv4_nat_pkt[] = {
1531         0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
1532         0x00, 0x00, 0x00, 0x00,
1533         0x00, 0x00, 0x00, 0x00,
1534         0x08, 0x00,
1535
1536         0x45, 0x00, 0x00, 0x24, /* ICE_IPV4_IL 14 */
1537         0x00, 0x00, 0x40, 0x00,
1538         0x40, 0x11, 0x00, 0x00,
1539         0x00, 0x00, 0x00, 0x00,
1540         0x00, 0x00, 0x00, 0x00,
1541
1542         0x00, 0x00, 0x11, 0x94, /* ICE_NAT_T 34 */
1543         0x00, 0x00, 0x00, 0x00,
1544
1545         0x00, 0x00, 0x00, 0x00,
1546         0x00, 0x00, 0x00, 0x00,
1547         0x00, 0x00,             /* 2 bytes for 4 bytes alignment */
1548 };
1549
1550 static const struct ice_dummy_pkt_offsets dummy_ipv6_nat_packet_offsets[] = {
1551         { ICE_MAC_OFOS,         0 },
1552         { ICE_IPV6_OFOS,        14 },
1553         { ICE_UDP_ILOS,         54 },
1554         { ICE_NAT_T,            62 },
1555         { ICE_PROTOCOL_LAST,    0 },
1556 };
1557
1558 static const u8 dummy_ipv6_nat_pkt[] = {
1559         0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
1560         0x00, 0x00, 0x00, 0x00,
1561         0x00, 0x00, 0x00, 0x00,
1562         0x86, 0xDD,
1563
1564         0x60, 0x00, 0x00, 0x00, /* ICE_IPV6_OFOS 14 */
1565         0x00, 0x10, 0x11, 0x00, /* Next header NAT_T */
1566         0x00, 0x00, 0x00, 0x00,
1567         0x00, 0x00, 0x00, 0x00,
1568         0x00, 0x00, 0x00, 0x00,
1569         0x00, 0x00, 0x00, 0x00,
1570         0x00, 0x00, 0x00, 0x00,
1571         0x00, 0x00, 0x00, 0x00,
1572         0x00, 0x00, 0x00, 0x00,
1573         0x00, 0x00, 0x00, 0x00,
1574
1575         0x00, 0x00, 0x11, 0x94, /* ICE_NAT_T 54 */
1576         0x00, 0x00, 0x00, 0x00,
1577
1578         0x00, 0x00, 0x00, 0x00,
1579         0x00, 0x00, 0x00, 0x00,
1580         0x00, 0x00,             /* 2 bytes for 4 bytes alignment */
1581
1582 };
1583
1584 static const struct ice_dummy_pkt_offsets dummy_ipv4_l2tpv3_packet_offsets[] = {
1585         { ICE_MAC_OFOS,         0 },
1586         { ICE_IPV4_OFOS,        14 },
1587         { ICE_L2TPV3,           34 },
1588         { ICE_PROTOCOL_LAST,    0 },
1589 };
1590
1591 static const u8 dummy_ipv4_l2tpv3_pkt[] = {
1592         0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
1593         0x00, 0x00, 0x00, 0x00,
1594         0x00, 0x00, 0x00, 0x00,
1595         0x08, 0x00,
1596
1597         0x45, 0x00, 0x00, 0x20, /* ICE_IPV4_IL 14 */
1598         0x00, 0x00, 0x40, 0x00,
1599         0x40, 0x73, 0x00, 0x00,
1600         0x00, 0x00, 0x00, 0x00,
1601         0x00, 0x00, 0x00, 0x00,
1602
1603         0x00, 0x00, 0x00, 0x00, /* ICE_L2TPV3 34 */
1604         0x00, 0x00, 0x00, 0x00,
1605         0x00, 0x00, 0x00, 0x00,
1606         0x00, 0x00,             /* 2 bytes for 4 bytes alignment */
1607 };
1608
1609 static const struct ice_dummy_pkt_offsets dummy_ipv6_l2tpv3_packet_offsets[] = {
1610         { ICE_MAC_OFOS,         0 },
1611         { ICE_IPV6_OFOS,        14 },
1612         { ICE_L2TPV3,           54 },
1613         { ICE_PROTOCOL_LAST,    0 },
1614 };
1615
1616 static const u8 dummy_ipv6_l2tpv3_pkt[] = {
1617         0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
1618         0x00, 0x00, 0x00, 0x00,
1619         0x00, 0x00, 0x00, 0x00,
1620         0x86, 0xDD,
1621
1622         0x60, 0x00, 0x00, 0x00, /* ICE_IPV6_IL 14 */
1623         0x00, 0x0c, 0x73, 0x40,
1624         0x00, 0x00, 0x00, 0x00,
1625         0x00, 0x00, 0x00, 0x00,
1626         0x00, 0x00, 0x00, 0x00,
1627         0x00, 0x00, 0x00, 0x00,
1628         0x00, 0x00, 0x00, 0x00,
1629         0x00, 0x00, 0x00, 0x00,
1630         0x00, 0x00, 0x00, 0x00,
1631         0x00, 0x00, 0x00, 0x00,
1632
1633         0x00, 0x00, 0x00, 0x00, /* ICE_L2TPV3 54 */
1634         0x00, 0x00, 0x00, 0x00,
1635         0x00, 0x00, 0x00, 0x00,
1636         0x00, 0x00,             /* 2 bytes for 4 bytes alignment */
1637 };
1638
1639 static const struct ice_dummy_pkt_offsets dummy_qinq_ipv4_packet_offsets[] = {
1640         { ICE_MAC_OFOS,         0 },
1641         { ICE_ETYPE_OL,         12 },
1642         { ICE_VLAN_EX,          14 },
1643         { ICE_VLAN_IN,          18 },
1644         { ICE_IPV4_OFOS,        22 },
1645         { ICE_PROTOCOL_LAST,    0 },
1646 };
1647
1648 static const u8 dummy_qinq_ipv4_pkt[] = {
1649         0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
1650         0x00, 0x00, 0x00, 0x00,
1651         0x00, 0x00, 0x00, 0x00,
1652
1653         0x91, 0x00,             /* ICE_ETYPE_OL 12 */
1654
1655         0x00, 0x00, 0x81, 0x00, /* ICE_VLAN_EX 14 */
1656         0x00, 0x00, 0x08, 0x00, /* ICE_VLAN_IN 18 */
1657
1658         0x45, 0x00, 0x00, 0x1c, /* ICE_IPV4_OFOS 22 */
1659         0x00, 0x01, 0x00, 0x00,
1660         0x00, 0x11, 0x00, 0x00,
1661         0x00, 0x00, 0x00, 0x00,
1662         0x00, 0x00, 0x00, 0x00,
1663
1664         0x00, 0x00, 0x00, 0x00, /* ICE_UDP_ILOS 42 */
1665         0x00, 0x08, 0x00, 0x00,
1666
1667         0x00, 0x00,     /* 2 bytes for 4 byte alignment */
1668 };
1669
1670 static const struct ice_dummy_pkt_offsets dummy_qinq_ipv6_packet_offsets[] = {
1671         { ICE_MAC_OFOS,         0 },
1672         { ICE_ETYPE_OL,         12 },
1673         { ICE_VLAN_EX,          14 },
1674         { ICE_VLAN_IN,          18 },
1675         { ICE_IPV6_OFOS,        22 },
1676         { ICE_PROTOCOL_LAST,    0 },
1677 };
1678
1679 static const u8 dummy_qinq_ipv6_pkt[] = {
1680         0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
1681         0x00, 0x00, 0x00, 0x00,
1682         0x00, 0x00, 0x00, 0x00,
1683
1684         0x91, 0x00,             /* ICE_ETYPE_OL 12 */
1685
1686         0x00, 0x00, 0x81, 0x00, /* ICE_VLAN_EX 14 */
1687         0x00, 0x00, 0x86, 0xDD, /* ICE_VLAN_IN 18 */
1688
1689         0x60, 0x00, 0x00, 0x00, /* ICE_IPV6_OFOS 22 */
1690         0x00, 0x10, 0x11, 0x00, /* Next header UDP */
1691         0x00, 0x00, 0x00, 0x00,
1692         0x00, 0x00, 0x00, 0x00,
1693         0x00, 0x00, 0x00, 0x00,
1694         0x00, 0x00, 0x00, 0x00,
1695         0x00, 0x00, 0x00, 0x00,
1696         0x00, 0x00, 0x00, 0x00,
1697         0x00, 0x00, 0x00, 0x00,
1698         0x00, 0x00, 0x00, 0x00,
1699
1700         0x00, 0x00, 0x00, 0x00, /* ICE_UDP_ILOS 62 */
1701         0x00, 0x10, 0x00, 0x00,
1702
1703         0x00, 0x00, 0x00, 0x00, /* needed for ESP packets */
1704         0x00, 0x00, 0x00, 0x00,
1705
1706         0x00, 0x00,     /* 2 bytes for 4 byte alignment */
1707 };
1708
1709 static const struct ice_dummy_pkt_offsets dummy_qinq_pppoe_packet_offsets[] = {
1710         { ICE_MAC_OFOS,         0 },
1711         { ICE_ETYPE_OL,         12 },
1712         { ICE_VLAN_EX,          14 },
1713         { ICE_VLAN_IN,          18 },
1714         { ICE_PPPOE,            22 },
1715         { ICE_PROTOCOL_LAST,    0 },
1716 };
1717
1718 static const
1719 struct ice_dummy_pkt_offsets dummy_qinq_pppoe_ipv4_packet_offsets[] = {
1720         { ICE_MAC_OFOS,         0 },
1721         { ICE_ETYPE_OL,         12 },
1722         { ICE_VLAN_EX,          14 },
1723         { ICE_VLAN_IN,          18 },
1724         { ICE_PPPOE,            22 },
1725         { ICE_IPV4_OFOS,        30 },
1726         { ICE_PROTOCOL_LAST,    0 },
1727 };
1728
1729 static const u8 dummy_qinq_pppoe_ipv4_pkt[] = {
1730         0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
1731         0x00, 0x00, 0x00, 0x00,
1732         0x00, 0x00, 0x00, 0x00,
1733
1734         0x91, 0x00,             /* ICE_ETYPE_OL 12 */
1735
1736         0x00, 0x00, 0x81, 0x00, /* ICE_VLAN_EX 14 */
1737         0x00, 0x00, 0x88, 0x64, /* ICE_VLAN_IN 18 */
1738
1739         0x11, 0x00, 0x00, 0x00, /* ICE_PPPOE 22 */
1740         0x00, 0x16,
1741
1742         0x00, 0x21,             /* PPP Link Layer 28 */
1743
1744         0x45, 0x00, 0x00, 0x14, /* ICE_IPV4_OFOS 30 */
1745         0x00, 0x00, 0x00, 0x00,
1746         0x00, 0x00, 0x00, 0x00,
1747         0x00, 0x00, 0x00, 0x00,
1748         0x00, 0x00, 0x00, 0x00,
1749
1750         0x00, 0x00,     /* 2 bytes for 4 byte alignment */
1751 };
1752
1753 static const
1754 struct ice_dummy_pkt_offsets dummy_qinq_pppoe_packet_ipv6_offsets[] = {
1755         { ICE_MAC_OFOS,         0 },
1756         { ICE_ETYPE_OL,         12 },
1757         { ICE_VLAN_EX,          14},
1758         { ICE_VLAN_IN,          18 },
1759         { ICE_PPPOE,            22 },
1760         { ICE_IPV6_OFOS,        30 },
1761         { ICE_PROTOCOL_LAST,    0 },
1762 };
1763
1764 static const u8 dummy_qinq_pppoe_ipv6_packet[] = {
1765         0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
1766         0x00, 0x00, 0x00, 0x00,
1767         0x00, 0x00, 0x00, 0x00,
1768
1769         0x91, 0x00,             /* ICE_ETYPE_OL 12 */
1770
1771         0x00, 0x00, 0x81, 0x00, /* ICE_VLAN_EX 14 */
1772         0x00, 0x00, 0x88, 0x64, /* ICE_VLAN_IN 18 */
1773
1774         0x11, 0x00, 0x00, 0x00, /* ICE_PPPOE 22 */
1775         0x00, 0x2a,
1776
1777         0x00, 0x57,             /* PPP Link Layer 28*/
1778
1779         0x60, 0x00, 0x00, 0x00, /* ICE_IPV6_OFOS 30 */
1780         0x00, 0x00, 0x3b, 0x00,
1781         0x00, 0x00, 0x00, 0x00,
1782         0x00, 0x00, 0x00, 0x00,
1783         0x00, 0x00, 0x00, 0x00,
1784         0x00, 0x00, 0x00, 0x00,
1785         0x00, 0x00, 0x00, 0x00,
1786         0x00, 0x00, 0x00, 0x00,
1787         0x00, 0x00, 0x00, 0x00,
1788         0x00, 0x00, 0x00, 0x00,
1789
1790         0x00, 0x00,             /* 2 bytes for 4 bytes alignment */
1791 };
1792
1793 /* this is a recipe to profile association bitmap */
1794 static ice_declare_bitmap(recipe_to_profile[ICE_MAX_NUM_RECIPES],
1795                           ICE_MAX_NUM_PROFILES);
1796
1797 /* this is a profile to recipe association bitmap */
1798 static ice_declare_bitmap(profile_to_recipe[ICE_MAX_NUM_PROFILES],
1799                           ICE_MAX_NUM_RECIPES);
1800
1801 static void ice_get_recp_to_prof_map(struct ice_hw *hw);
1802
1803 /**
1804  * ice_collect_result_idx - copy result index values
1805  * @buf: buffer that contains the result index
1806  * @recp: the recipe struct to copy data into
1807  */
1808 static void ice_collect_result_idx(struct ice_aqc_recipe_data_elem *buf,
1809                                    struct ice_sw_recipe *recp)
1810 {
1811         if (buf->content.result_indx & ICE_AQ_RECIPE_RESULT_EN)
1812                 ice_set_bit(buf->content.result_indx &
1813                             ~ICE_AQ_RECIPE_RESULT_EN, recp->res_idxs);
1814 }
1815
1816 static struct ice_prof_type_entry ice_prof_type_tbl[ICE_GTPU_PROFILE] = {
1817         { ICE_PROFID_IPV4_GTPU_IPV4_OTHER,    ICE_SW_TUN_IPV4_GTPU_IPV4},
1818         { ICE_PROFID_IPV4_GTPU_IPV4_UDP,      ICE_SW_TUN_IPV4_GTPU_IPV4_UDP},
1819         { ICE_PROFID_IPV4_GTPU_IPV4_TCP,      ICE_SW_TUN_IPV4_GTPU_IPV4_TCP},
1820         { ICE_PROFID_IPV4_GTPU_EH_IPV4_OTHER, ICE_SW_TUN_IPV4_GTPU_EH_IPV4},
1821         { ICE_PROFID_IPV4_GTPU_EH_IPV4_UDP,   ICE_SW_TUN_IPV4_GTPU_EH_IPV4_UDP},
1822         { ICE_PROFID_IPV4_GTPU_EH_IPV4_TCP,   ICE_SW_TUN_IPV4_GTPU_EH_IPV4_TCP},
1823         { ICE_PROFID_IPV4_GTPU_IPV6_OTHER,    ICE_SW_TUN_IPV4_GTPU_IPV6},
1824         { ICE_PROFID_IPV4_GTPU_IPV6_UDP,      ICE_SW_TUN_IPV4_GTPU_IPV6_UDP},
1825         { ICE_PROFID_IPV4_GTPU_IPV6_TCP,      ICE_SW_TUN_IPV4_GTPU_IPV6_TCP},
1826         { ICE_PROFID_IPV4_GTPU_EH_IPV6_OTHER, ICE_SW_TUN_IPV4_GTPU_EH_IPV6},
1827         { ICE_PROFID_IPV4_GTPU_EH_IPV6_UDP,   ICE_SW_TUN_IPV4_GTPU_EH_IPV6_UDP},
1828         { ICE_PROFID_IPV4_GTPU_EH_IPV6_TCP,   ICE_SW_TUN_IPV4_GTPU_EH_IPV6_TCP},
1829         { ICE_PROFID_IPV6_GTPU_IPV4_OTHER,    ICE_SW_TUN_IPV6_GTPU_IPV4},
1830         { ICE_PROFID_IPV6_GTPU_IPV4_UDP,      ICE_SW_TUN_IPV6_GTPU_IPV4_UDP},
1831         { ICE_PROFID_IPV6_GTPU_IPV4_TCP,      ICE_SW_TUN_IPV6_GTPU_IPV4_TCP},
1832         { ICE_PROFID_IPV6_GTPU_EH_IPV4_OTHER, ICE_SW_TUN_IPV6_GTPU_EH_IPV4},
1833         { ICE_PROFID_IPV6_GTPU_EH_IPV4_UDP,   ICE_SW_TUN_IPV6_GTPU_EH_IPV4_UDP},
1834         { ICE_PROFID_IPV6_GTPU_EH_IPV4_TCP,   ICE_SW_TUN_IPV6_GTPU_EH_IPV4_TCP},
1835         { ICE_PROFID_IPV6_GTPU_IPV6_OTHER,    ICE_SW_TUN_IPV6_GTPU_IPV6},
1836         { ICE_PROFID_IPV6_GTPU_IPV6_UDP,      ICE_SW_TUN_IPV6_GTPU_IPV6_UDP},
1837         { ICE_PROFID_IPV6_GTPU_IPV6_TCP,      ICE_SW_TUN_IPV6_GTPU_IPV6_TCP},
1838         { ICE_PROFID_IPV6_GTPU_EH_IPV6_OTHER, ICE_SW_TUN_IPV6_GTPU_EH_IPV6},
1839         { ICE_PROFID_IPV6_GTPU_EH_IPV6_UDP,   ICE_SW_TUN_IPV6_GTPU_EH_IPV6_UDP},
1840         { ICE_PROFID_IPV6_GTPU_EH_IPV6_TCP,   ICE_SW_TUN_IPV6_GTPU_EH_IPV6_TCP},
1841 };
1842
1843 /**
1844  * ice_get_tun_type_for_recipe - get tunnel type for the recipe
1845  * @rid: recipe ID that we are populating
1846  */
1847 static enum ice_sw_tunnel_type ice_get_tun_type_for_recipe(u8 rid, bool vlan)
1848 {
1849         u8 vxlan_profile[12] = {10, 11, 12, 16, 17, 18, 22, 23, 24, 25, 26, 27};
1850         u8 gre_profile[12] = {13, 14, 15, 19, 20, 21, 28, 29, 30, 31, 32, 33};
1851         u8 pppoe_profile[7] = {34, 35, 36, 37, 38, 39, 40};
1852         u8 non_tun_profile[6] = {4, 5, 6, 7, 8, 9};
1853         enum ice_sw_tunnel_type tun_type;
1854         u16 i, j, k, profile_num = 0;
1855         bool non_tun_valid = false;
1856         bool pppoe_valid = false;
1857         bool vxlan_valid = false;
1858         bool gre_valid = false;
1859         bool gtp_valid = false;
1860         bool flag_valid = false;
1861
1862         for (j = 0; j < ICE_MAX_NUM_PROFILES; j++) {
1863                 if (!ice_is_bit_set(recipe_to_profile[rid], j))
1864                         continue;
1865                 else
1866                         profile_num++;
1867
1868                 for (i = 0; i < 12; i++) {
1869                         if (gre_profile[i] == j)
1870                                 gre_valid = true;
1871                 }
1872
1873                 for (i = 0; i < 12; i++) {
1874                         if (vxlan_profile[i] == j)
1875                                 vxlan_valid = true;
1876                 }
1877
1878                 for (i = 0; i < 7; i++) {
1879                         if (pppoe_profile[i] == j)
1880                                 pppoe_valid = true;
1881                 }
1882
1883                 for (i = 0; i < 6; i++) {
1884                         if (non_tun_profile[i] == j)
1885                                 non_tun_valid = true;
1886                 }
1887
1888                 if (j >= ICE_PROFID_IPV4_GTPU_EH_IPV4_OTHER &&
1889                     j <= ICE_PROFID_IPV6_GTPU_IPV6_TCP)
1890                         gtp_valid = true;
1891
1892                 if ((j >= ICE_PROFID_IPV4_ESP &&
1893                      j <= ICE_PROFID_IPV6_PFCP_SESSION) ||
1894                     (j >= ICE_PROFID_IPV4_GTPC_TEID &&
1895                      j <= ICE_PROFID_IPV6_GTPU_TEID))
1896                         flag_valid = true;
1897         }
1898
1899         if (!non_tun_valid && vxlan_valid)
1900                 tun_type = ICE_SW_TUN_VXLAN;
1901         else if (!non_tun_valid && gre_valid)
1902                 tun_type = ICE_SW_TUN_NVGRE;
1903         else if (!non_tun_valid && pppoe_valid)
1904                 tun_type = ICE_SW_TUN_PPPOE;
1905         else if (!non_tun_valid && gtp_valid)
1906                 tun_type = ICE_SW_TUN_GTP;
1907         else if (non_tun_valid &&
1908                  (vxlan_valid || gre_valid || gtp_valid || pppoe_valid))
1909                 tun_type = ICE_SW_TUN_AND_NON_TUN;
1910         else if (non_tun_valid && !vxlan_valid && !gre_valid && !gtp_valid &&
1911                  !pppoe_valid)
1912                 tun_type = ICE_NON_TUN;
1913         else
1914                 tun_type = ICE_NON_TUN;
1915
1916         if (profile_num > 1 && tun_type == ICE_SW_TUN_PPPOE) {
1917                 i = ice_is_bit_set(recipe_to_profile[rid],
1918                                    ICE_PROFID_PPPOE_IPV4_OTHER);
1919                 j = ice_is_bit_set(recipe_to_profile[rid],
1920                                    ICE_PROFID_PPPOE_IPV6_OTHER);
1921                 if (i && !j)
1922                         tun_type = ICE_SW_TUN_PPPOE_IPV4;
1923                 else if (!i && j)
1924                         tun_type = ICE_SW_TUN_PPPOE_IPV6;
1925         }
1926
1927         if (tun_type == ICE_SW_TUN_GTP) {
1928                 for (k = 0; k < ARRAY_SIZE(ice_prof_type_tbl); k++)
1929                         if (ice_is_bit_set(recipe_to_profile[rid],
1930                                            ice_prof_type_tbl[k].prof_id)) {
1931                                 tun_type = ice_prof_type_tbl[k].type;
1932                                 break;
1933                         }
1934         }
1935
1936         if (profile_num == 1 && (flag_valid || non_tun_valid || pppoe_valid)) {
1937                 for (j = 0; j < ICE_MAX_NUM_PROFILES; j++) {
1938                         if (ice_is_bit_set(recipe_to_profile[rid], j)) {
1939                                 switch (j) {
1940                                 case ICE_PROFID_IPV4_TCP:
1941                                         tun_type = ICE_SW_IPV4_TCP;
1942                                         break;
1943                                 case ICE_PROFID_IPV4_UDP:
1944                                         tun_type = ICE_SW_IPV4_UDP;
1945                                         break;
1946                                 case ICE_PROFID_IPV6_TCP:
1947                                         tun_type = ICE_SW_IPV6_TCP;
1948                                         break;
1949                                 case ICE_PROFID_IPV6_UDP:
1950                                         tun_type = ICE_SW_IPV6_UDP;
1951                                         break;
1952                                 case ICE_PROFID_PPPOE_PAY:
1953                                         tun_type = ICE_SW_TUN_PPPOE_PAY;
1954                                         break;
1955                                 case ICE_PROFID_PPPOE_IPV4_TCP:
1956                                         tun_type = ICE_SW_TUN_PPPOE_IPV4_TCP;
1957                                         break;
1958                                 case ICE_PROFID_PPPOE_IPV4_UDP:
1959                                         tun_type = ICE_SW_TUN_PPPOE_IPV4_UDP;
1960                                         break;
1961                                 case ICE_PROFID_PPPOE_IPV4_OTHER:
1962                                         tun_type = ICE_SW_TUN_PPPOE_IPV4;
1963                                         break;
1964                                 case ICE_PROFID_PPPOE_IPV6_TCP:
1965                                         tun_type = ICE_SW_TUN_PPPOE_IPV6_TCP;
1966                                         break;
1967                                 case ICE_PROFID_PPPOE_IPV6_UDP:
1968                                         tun_type = ICE_SW_TUN_PPPOE_IPV6_UDP;
1969                                         break;
1970                                 case ICE_PROFID_PPPOE_IPV6_OTHER:
1971                                         tun_type = ICE_SW_TUN_PPPOE_IPV6;
1972                                         break;
1973                                 case ICE_PROFID_IPV4_ESP:
1974                                         tun_type = ICE_SW_TUN_IPV4_ESP;
1975                                         break;
1976                                 case ICE_PROFID_IPV6_ESP:
1977                                         tun_type = ICE_SW_TUN_IPV6_ESP;
1978                                         break;
1979                                 case ICE_PROFID_IPV4_AH:
1980                                         tun_type = ICE_SW_TUN_IPV4_AH;
1981                                         break;
1982                                 case ICE_PROFID_IPV6_AH:
1983                                         tun_type = ICE_SW_TUN_IPV6_AH;
1984                                         break;
1985                                 case ICE_PROFID_IPV4_NAT_T:
1986                                         tun_type = ICE_SW_TUN_IPV4_NAT_T;
1987                                         break;
1988                                 case ICE_PROFID_IPV6_NAT_T:
1989                                         tun_type = ICE_SW_TUN_IPV6_NAT_T;
1990                                         break;
1991                                 case ICE_PROFID_IPV4_PFCP_NODE:
1992                                         tun_type =
1993                                         ICE_SW_TUN_PROFID_IPV4_PFCP_NODE;
1994                                         break;
1995                                 case ICE_PROFID_IPV6_PFCP_NODE:
1996                                         tun_type =
1997                                         ICE_SW_TUN_PROFID_IPV6_PFCP_NODE;
1998                                         break;
1999                                 case ICE_PROFID_IPV4_PFCP_SESSION:
2000                                         tun_type =
2001                                         ICE_SW_TUN_PROFID_IPV4_PFCP_SESSION;
2002                                         break;
2003                                 case ICE_PROFID_IPV6_PFCP_SESSION:
2004                                         tun_type =
2005                                         ICE_SW_TUN_PROFID_IPV6_PFCP_SESSION;
2006                                         break;
2007                                 case ICE_PROFID_MAC_IPV4_L2TPV3:
2008                                         tun_type = ICE_SW_TUN_IPV4_L2TPV3;
2009                                         break;
2010                                 case ICE_PROFID_MAC_IPV6_L2TPV3:
2011                                         tun_type = ICE_SW_TUN_IPV6_L2TPV3;
2012                                         break;
2013                                 case ICE_PROFID_IPV4_GTPU_TEID:
2014                                         tun_type = ICE_SW_TUN_IPV4_GTPU_NO_PAY;
2015                                         break;
2016                                 case ICE_PROFID_IPV6_GTPU_TEID:
2017                                         tun_type = ICE_SW_TUN_IPV6_GTPU_NO_PAY;
2018                                         break;
2019                                 default:
2020                                         break;
2021                                 }
2022
2023                                 return tun_type;
2024                         }
2025                 }
2026         }
2027
2028         if (vlan && tun_type == ICE_SW_TUN_PPPOE)
2029                 tun_type = ICE_SW_TUN_PPPOE_QINQ;
2030         else if (vlan && tun_type == ICE_SW_TUN_PPPOE_IPV6)
2031                 tun_type = ICE_SW_TUN_PPPOE_IPV6_QINQ;
2032         else if (vlan && tun_type == ICE_SW_TUN_PPPOE_IPV4)
2033                 tun_type = ICE_SW_TUN_PPPOE_IPV4_QINQ;
2034         else if (vlan && tun_type == ICE_SW_TUN_PPPOE_PAY)
2035                 tun_type = ICE_SW_TUN_PPPOE_PAY_QINQ;
2036         else if (vlan && tun_type == ICE_SW_TUN_AND_NON_TUN)
2037                 tun_type = ICE_SW_TUN_AND_NON_TUN_QINQ;
2038         else if (vlan && tun_type == ICE_NON_TUN)
2039                 tun_type = ICE_NON_TUN_QINQ;
2040
2041         return tun_type;
2042 }
2043
2044 /**
2045  * ice_get_recp_frm_fw - update SW bookkeeping from FW recipe entries
2046  * @hw: pointer to hardware structure
2047  * @recps: struct that we need to populate
2048  * @rid: recipe ID that we are populating
2049  * @refresh_required: true if we should get recipe to profile mapping from FW
2050  *
2051  * This function is used to populate all the necessary entries into our
2052  * bookkeeping so that we have a current list of all the recipes that are
2053  * programmed in the firmware.
2054  */
2055 static enum ice_status
2056 ice_get_recp_frm_fw(struct ice_hw *hw, struct ice_sw_recipe *recps, u8 rid,
2057                     bool *refresh_required)
2058 {
2059         ice_declare_bitmap(result_bm, ICE_MAX_FV_WORDS);
2060         struct ice_aqc_recipe_data_elem *tmp;
2061         u16 num_recps = ICE_MAX_NUM_RECIPES;
2062         struct ice_prot_lkup_ext *lkup_exts;
2063         enum ice_status status;
2064         u8 fv_word_idx = 0;
2065         bool vlan = false;
2066         u16 sub_recps;
2067
2068         ice_zero_bitmap(result_bm, ICE_MAX_FV_WORDS);
2069
2070         /* we need a buffer big enough to accommodate all the recipes */
2071         tmp = (struct ice_aqc_recipe_data_elem *)ice_calloc(hw,
2072                 ICE_MAX_NUM_RECIPES, sizeof(*tmp));
2073         if (!tmp)
2074                 return ICE_ERR_NO_MEMORY;
2075
2076         tmp[0].recipe_indx = rid;
2077         status = ice_aq_get_recipe(hw, tmp, &num_recps, rid, NULL);
2078         /* non-zero status meaning recipe doesn't exist */
2079         if (status)
2080                 goto err_unroll;
2081
2082         /* Get recipe to profile map so that we can get the fv from lkups that
2083          * we read for a recipe from FW. Since we want to minimize the number of
2084          * times we make this FW call, just make one call and cache the copy
2085          * until a new recipe is added. This operation is only required the
2086          * first time to get the changes from FW. Then to search existing
2087          * entries we don't need to update the cache again until another recipe
2088          * gets added.
2089          */
2090         if (*refresh_required) {
2091                 ice_get_recp_to_prof_map(hw);
2092                 *refresh_required = false;
2093         }
2094
2095         /* Start populating all the entries for recps[rid] based on lkups from
2096          * firmware. Note that we are only creating the root recipe in our
2097          * database.
2098          */
2099         lkup_exts = &recps[rid].lkup_exts;
2100
2101         for (sub_recps = 0; sub_recps < num_recps; sub_recps++) {
2102                 struct ice_aqc_recipe_data_elem root_bufs = tmp[sub_recps];
2103                 struct ice_recp_grp_entry *rg_entry;
2104                 u8 i, prof, idx, prot = 0;
2105                 bool is_root;
2106                 u16 off = 0;
2107
2108                 rg_entry = (struct ice_recp_grp_entry *)
2109                         ice_malloc(hw, sizeof(*rg_entry));
2110                 if (!rg_entry) {
2111                         status = ICE_ERR_NO_MEMORY;
2112                         goto err_unroll;
2113                 }
2114
2115                 idx = root_bufs.recipe_indx;
2116                 is_root = root_bufs.content.rid & ICE_AQ_RECIPE_ID_IS_ROOT;
2117
2118                 /* Mark all result indices in this chain */
2119                 if (root_bufs.content.result_indx & ICE_AQ_RECIPE_RESULT_EN)
2120                         ice_set_bit(root_bufs.content.result_indx &
2121                                     ~ICE_AQ_RECIPE_RESULT_EN, result_bm);
2122
2123                 /* get the first profile that is associated with rid */
2124                 prof = ice_find_first_bit(recipe_to_profile[idx],
2125                                           ICE_MAX_NUM_PROFILES);
2126                 for (i = 0; i < ICE_NUM_WORDS_RECIPE; i++) {
2127                         u8 lkup_indx = root_bufs.content.lkup_indx[i + 1];
2128
2129                         rg_entry->fv_idx[i] = lkup_indx;
2130                         rg_entry->fv_mask[i] =
2131                                 LE16_TO_CPU(root_bufs.content.mask[i + 1]);
2132
2133                         /* If the recipe is a chained recipe then all its
2134                          * child recipe's result will have a result index.
2135                          * To fill fv_words we should not use those result
2136                          * index, we only need the protocol ids and offsets.
2137                          * We will skip all the fv_idx which stores result
2138                          * index in them. We also need to skip any fv_idx which
2139                          * has ICE_AQ_RECIPE_LKUP_IGNORE or 0 since it isn't a
2140                          * valid offset value.
2141                          */
2142                         if (ice_is_bit_set(hw->switch_info->prof_res_bm[prof],
2143                                            rg_entry->fv_idx[i]) ||
2144                             rg_entry->fv_idx[i] & ICE_AQ_RECIPE_LKUP_IGNORE ||
2145                             rg_entry->fv_idx[i] == 0)
2146                                 continue;
2147
2148                         ice_find_prot_off(hw, ICE_BLK_SW, prof,
2149                                           rg_entry->fv_idx[i], &prot, &off);
2150                         lkup_exts->fv_words[fv_word_idx].prot_id = prot;
2151                         lkup_exts->fv_words[fv_word_idx].off = off;
2152                         lkup_exts->field_mask[fv_word_idx] =
2153                                 rg_entry->fv_mask[i];
2154                         if (prot == ICE_META_DATA_ID_HW &&
2155                             off == ICE_TUN_FLAG_MDID_OFF)
2156                                 vlan = true;
2157                         fv_word_idx++;
2158                 }
2159                 /* populate rg_list with the data from the child entry of this
2160                  * recipe
2161                  */
2162                 LIST_ADD(&rg_entry->l_entry, &recps[rid].rg_list);
2163
2164                 /* Propagate some data to the recipe database */
2165                 recps[idx].is_root = !!is_root;
2166                 recps[idx].priority = root_bufs.content.act_ctrl_fwd_priority;
2167                 ice_zero_bitmap(recps[idx].res_idxs, ICE_MAX_FV_WORDS);
2168                 if (root_bufs.content.result_indx & ICE_AQ_RECIPE_RESULT_EN) {
2169                         recps[idx].chain_idx = root_bufs.content.result_indx &
2170                                 ~ICE_AQ_RECIPE_RESULT_EN;
2171                         ice_set_bit(recps[idx].chain_idx, recps[idx].res_idxs);
2172                 } else {
2173                         recps[idx].chain_idx = ICE_INVAL_CHAIN_IND;
2174                 }
2175
2176                 if (!is_root)
2177                         continue;
2178
2179                 /* Only do the following for root recipes entries */
2180                 ice_memcpy(recps[idx].r_bitmap, root_bufs.recipe_bitmap,
2181                            sizeof(recps[idx].r_bitmap), ICE_NONDMA_TO_NONDMA);
2182                 recps[idx].root_rid = root_bufs.content.rid &
2183                         ~ICE_AQ_RECIPE_ID_IS_ROOT;
2184                 recps[idx].priority = root_bufs.content.act_ctrl_fwd_priority;
2185         }
2186
2187         /* Complete initialization of the root recipe entry */
2188         lkup_exts->n_val_words = fv_word_idx;
2189         recps[rid].big_recp = (num_recps > 1);
2190         recps[rid].n_grp_count = (u8)num_recps;
2191         recps[rid].tun_type = ice_get_tun_type_for_recipe(rid, vlan);
2192         recps[rid].root_buf = (struct ice_aqc_recipe_data_elem *)
2193                 ice_memdup(hw, tmp, recps[rid].n_grp_count *
2194                            sizeof(*recps[rid].root_buf), ICE_NONDMA_TO_NONDMA);
2195         if (!recps[rid].root_buf)
2196                 goto err_unroll;
2197
2198         /* Copy result indexes */
2199         ice_cp_bitmap(recps[rid].res_idxs, result_bm, ICE_MAX_FV_WORDS);
2200         recps[rid].recp_created = true;
2201
2202 err_unroll:
2203         ice_free(hw, tmp);
2204         return status;
2205 }
2206
2207 /**
2208  * ice_get_recp_to_prof_map - updates recipe to profile mapping
2209  * @hw: pointer to hardware structure
2210  *
2211  * This function is used to populate recipe_to_profile matrix where index to
2212  * this array is the recipe ID and the element is the mapping of which profiles
2213  * is this recipe mapped to.
2214  */
2215 static void ice_get_recp_to_prof_map(struct ice_hw *hw)
2216 {
2217         ice_declare_bitmap(r_bitmap, ICE_MAX_NUM_RECIPES);
2218         u16 i;
2219
2220         for (i = 0; i < hw->switch_info->max_used_prof_index + 1; i++) {
2221                 u16 j;
2222
2223                 ice_zero_bitmap(profile_to_recipe[i], ICE_MAX_NUM_RECIPES);
2224                 ice_zero_bitmap(r_bitmap, ICE_MAX_NUM_RECIPES);
2225                 if (ice_aq_get_recipe_to_profile(hw, i, (u8 *)r_bitmap, NULL))
2226                         continue;
2227                 ice_cp_bitmap(profile_to_recipe[i], r_bitmap,
2228                               ICE_MAX_NUM_RECIPES);
2229                 ice_for_each_set_bit(j, r_bitmap, ICE_MAX_NUM_RECIPES)
2230                         ice_set_bit(i, recipe_to_profile[j]);
2231         }
2232 }
2233
2234 /**
2235  * ice_init_def_sw_recp - initialize the recipe book keeping tables
2236  * @hw: pointer to the HW struct
2237  * @recp_list: pointer to sw recipe list
2238  *
2239  * Allocate memory for the entire recipe table and initialize the structures/
2240  * entries corresponding to basic recipes.
2241  */
2242 enum ice_status
2243 ice_init_def_sw_recp(struct ice_hw *hw, struct ice_sw_recipe **recp_list)
2244 {
2245         struct ice_sw_recipe *recps;
2246         u8 i;
2247
2248         recps = (struct ice_sw_recipe *)
2249                 ice_calloc(hw, ICE_MAX_NUM_RECIPES, sizeof(*recps));
2250         if (!recps)
2251                 return ICE_ERR_NO_MEMORY;
2252
2253         for (i = 0; i < ICE_MAX_NUM_RECIPES; i++) {
2254                 recps[i].root_rid = i;
2255                 INIT_LIST_HEAD(&recps[i].filt_rules);
2256                 INIT_LIST_HEAD(&recps[i].filt_replay_rules);
2257                 INIT_LIST_HEAD(&recps[i].rg_list);
2258                 ice_init_lock(&recps[i].filt_rule_lock);
2259         }
2260
2261         *recp_list = recps;
2262
2263         return ICE_SUCCESS;
2264 }
2265
2266 /**
2267  * ice_aq_get_sw_cfg - get switch configuration
2268  * @hw: pointer to the hardware structure
2269  * @buf: pointer to the result buffer
2270  * @buf_size: length of the buffer available for response
2271  * @req_desc: pointer to requested descriptor
2272  * @num_elems: pointer to number of elements
2273  * @cd: pointer to command details structure or NULL
2274  *
2275  * Get switch configuration (0x0200) to be placed in buf.
2276  * This admin command returns information such as initial VSI/port number
2277  * and switch ID it belongs to.
2278  *
2279  * NOTE: *req_desc is both an input/output parameter.
2280  * The caller of this function first calls this function with *request_desc set
2281  * to 0. If the response from f/w has *req_desc set to 0, all the switch
2282  * configuration information has been returned; if non-zero (meaning not all
2283  * the information was returned), the caller should call this function again
2284  * with *req_desc set to the previous value returned by f/w to get the
2285  * next block of switch configuration information.
2286  *
2287  * *num_elems is output only parameter. This reflects the number of elements
2288  * in response buffer. The caller of this function to use *num_elems while
2289  * parsing the response buffer.
2290  */
2291 static enum ice_status
2292 ice_aq_get_sw_cfg(struct ice_hw *hw, struct ice_aqc_get_sw_cfg_resp_elem *buf,
2293                   u16 buf_size, u16 *req_desc, u16 *num_elems,
2294                   struct ice_sq_cd *cd)
2295 {
2296         struct ice_aqc_get_sw_cfg *cmd;
2297         struct ice_aq_desc desc;
2298         enum ice_status status;
2299
2300         ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_sw_cfg);
2301         cmd = &desc.params.get_sw_conf;
2302         cmd->element = CPU_TO_LE16(*req_desc);
2303
2304         status = ice_aq_send_cmd(hw, &desc, buf, buf_size, cd);
2305         if (!status) {
2306                 *req_desc = LE16_TO_CPU(cmd->element);
2307                 *num_elems = LE16_TO_CPU(cmd->num_elems);
2308         }
2309
2310         return status;
2311 }
2312
2313 /**
2314  * ice_alloc_rss_global_lut - allocate a RSS global LUT
2315  * @hw: pointer to the HW struct
2316  * @shared_res: true to allocate as a shared resource and false to allocate as a dedicated resource
2317  * @global_lut_id: output parameter for the RSS global LUT's ID
2318  */
2319 enum ice_status ice_alloc_rss_global_lut(struct ice_hw *hw, bool shared_res, u16 *global_lut_id)
2320 {
2321         struct ice_aqc_alloc_free_res_elem *sw_buf;
2322         enum ice_status status;
2323         u16 buf_len;
2324
2325         buf_len = ice_struct_size(sw_buf, elem, 1);
2326         sw_buf = (struct ice_aqc_alloc_free_res_elem *)ice_malloc(hw, buf_len);
2327         if (!sw_buf)
2328                 return ICE_ERR_NO_MEMORY;
2329
2330         sw_buf->num_elems = CPU_TO_LE16(1);
2331         sw_buf->res_type = CPU_TO_LE16(ICE_AQC_RES_TYPE_GLOBAL_RSS_HASH |
2332                                        (shared_res ? ICE_AQC_RES_TYPE_FLAG_SHARED :
2333                                        ICE_AQC_RES_TYPE_FLAG_DEDICATED));
2334
2335         status = ice_aq_alloc_free_res(hw, 1, sw_buf, buf_len, ice_aqc_opc_alloc_res, NULL);
2336         if (status) {
2337                 ice_debug(hw, ICE_DBG_RES, "Failed to allocate %s RSS global LUT, status %d\n",
2338                           shared_res ? "shared" : "dedicated", status);
2339                 goto ice_alloc_global_lut_exit;
2340         }
2341
2342         *global_lut_id = LE16_TO_CPU(sw_buf->elem[0].e.sw_resp);
2343
2344 ice_alloc_global_lut_exit:
2345         ice_free(hw, sw_buf);
2346         return status;
2347 }
2348
2349 /**
2350  * ice_free_rss_global_lut - free a RSS global LUT
2351  * @hw: pointer to the HW struct
2352  * @global_lut_id: ID of the RSS global LUT to free
2353  */
2354 enum ice_status ice_free_rss_global_lut(struct ice_hw *hw, u16 global_lut_id)
2355 {
2356         struct ice_aqc_alloc_free_res_elem *sw_buf;
2357         u16 buf_len, num_elems = 1;
2358         enum ice_status status;
2359
2360         buf_len = ice_struct_size(sw_buf, elem, num_elems);
2361         sw_buf = (struct ice_aqc_alloc_free_res_elem *)ice_malloc(hw, buf_len);
2362         if (!sw_buf)
2363                 return ICE_ERR_NO_MEMORY;
2364
2365         sw_buf->num_elems = CPU_TO_LE16(num_elems);
2366         sw_buf->res_type = CPU_TO_LE16(ICE_AQC_RES_TYPE_GLOBAL_RSS_HASH);
2367         sw_buf->elem[0].e.sw_resp = CPU_TO_LE16(global_lut_id);
2368
2369         status = ice_aq_alloc_free_res(hw, num_elems, sw_buf, buf_len, ice_aqc_opc_free_res, NULL);
2370         if (status)
2371                 ice_debug(hw, ICE_DBG_RES, "Failed to free RSS global LUT %d, status %d\n",
2372                           global_lut_id, status);
2373
2374         ice_free(hw, sw_buf);
2375         return status;
2376 }
2377
2378 /**
2379  * ice_alloc_sw - allocate resources specific to switch
2380  * @hw: pointer to the HW struct
2381  * @ena_stats: true to turn on VEB stats
2382  * @shared_res: true for shared resource, false for dedicated resource
2383  * @sw_id: switch ID returned
2384  * @counter_id: VEB counter ID returned
2385  *
2386  * allocates switch resources (SWID and VEB counter) (0x0208)
2387  */
2388 enum ice_status
2389 ice_alloc_sw(struct ice_hw *hw, bool ena_stats, bool shared_res, u16 *sw_id,
2390              u16 *counter_id)
2391 {
2392         struct ice_aqc_alloc_free_res_elem *sw_buf;
2393         struct ice_aqc_res_elem *sw_ele;
2394         enum ice_status status;
2395         u16 buf_len;
2396
2397         buf_len = ice_struct_size(sw_buf, elem, 1);
2398         sw_buf = (struct ice_aqc_alloc_free_res_elem *)ice_malloc(hw, buf_len);
2399         if (!sw_buf)
2400                 return ICE_ERR_NO_MEMORY;
2401
2402         /* Prepare buffer for switch ID.
2403          * The number of resource entries in buffer is passed as 1 since only a
2404          * single switch/VEB instance is allocated, and hence a single sw_id
2405          * is requested.
2406          */
2407         sw_buf->num_elems = CPU_TO_LE16(1);
2408         sw_buf->res_type =
2409                 CPU_TO_LE16(ICE_AQC_RES_TYPE_SWID |
2410                             (shared_res ? ICE_AQC_RES_TYPE_FLAG_SHARED :
2411                             ICE_AQC_RES_TYPE_FLAG_DEDICATED));
2412
2413         status = ice_aq_alloc_free_res(hw, 1, sw_buf, buf_len,
2414                                        ice_aqc_opc_alloc_res, NULL);
2415
2416         if (status)
2417                 goto ice_alloc_sw_exit;
2418
2419         sw_ele = &sw_buf->elem[0];
2420         *sw_id = LE16_TO_CPU(sw_ele->e.sw_resp);
2421
2422         if (ena_stats) {
2423                 /* Prepare buffer for VEB Counter */
2424                 enum ice_adminq_opc opc = ice_aqc_opc_alloc_res;
2425                 struct ice_aqc_alloc_free_res_elem *counter_buf;
2426                 struct ice_aqc_res_elem *counter_ele;
2427
2428                 counter_buf = (struct ice_aqc_alloc_free_res_elem *)
2429                                 ice_malloc(hw, buf_len);
2430                 if (!counter_buf) {
2431                         status = ICE_ERR_NO_MEMORY;
2432                         goto ice_alloc_sw_exit;
2433                 }
2434
2435                 /* The number of resource entries in buffer is passed as 1 since
2436                  * only a single switch/VEB instance is allocated, and hence a
2437                  * single VEB counter is requested.
2438                  */
2439                 counter_buf->num_elems = CPU_TO_LE16(1);
2440                 counter_buf->res_type =
2441                         CPU_TO_LE16(ICE_AQC_RES_TYPE_VEB_COUNTER |
2442                                     ICE_AQC_RES_TYPE_FLAG_DEDICATED);
2443                 status = ice_aq_alloc_free_res(hw, 1, counter_buf, buf_len,
2444                                                opc, NULL);
2445
2446                 if (status) {
2447                         ice_free(hw, counter_buf);
2448                         goto ice_alloc_sw_exit;
2449                 }
2450                 counter_ele = &counter_buf->elem[0];
2451                 *counter_id = LE16_TO_CPU(counter_ele->e.sw_resp);
2452                 ice_free(hw, counter_buf);
2453         }
2454
2455 ice_alloc_sw_exit:
2456         ice_free(hw, sw_buf);
2457         return status;
2458 }
2459
2460 /**
2461  * ice_free_sw - free resources specific to switch
2462  * @hw: pointer to the HW struct
2463  * @sw_id: switch ID returned
2464  * @counter_id: VEB counter ID returned
2465  *
2466  * free switch resources (SWID and VEB counter) (0x0209)
2467  *
2468  * NOTE: This function frees multiple resources. It continues
2469  * releasing other resources even after it encounters error.
2470  * The error code returned is the last error it encountered.
2471  */
2472 enum ice_status ice_free_sw(struct ice_hw *hw, u16 sw_id, u16 counter_id)
2473 {
2474         struct ice_aqc_alloc_free_res_elem *sw_buf, *counter_buf;
2475         enum ice_status status, ret_status;
2476         u16 buf_len;
2477
2478         buf_len = ice_struct_size(sw_buf, elem, 1);
2479         sw_buf = (struct ice_aqc_alloc_free_res_elem *)ice_malloc(hw, buf_len);
2480         if (!sw_buf)
2481                 return ICE_ERR_NO_MEMORY;
2482
2483         /* Prepare buffer to free for switch ID res.
2484          * The number of resource entries in buffer is passed as 1 since only a
2485          * single switch/VEB instance is freed, and hence a single sw_id
2486          * is released.
2487          */
2488         sw_buf->num_elems = CPU_TO_LE16(1);
2489         sw_buf->res_type = CPU_TO_LE16(ICE_AQC_RES_TYPE_SWID);
2490         sw_buf->elem[0].e.sw_resp = CPU_TO_LE16(sw_id);
2491
2492         ret_status = ice_aq_alloc_free_res(hw, 1, sw_buf, buf_len,
2493                                            ice_aqc_opc_free_res, NULL);
2494
2495         if (ret_status)
2496                 ice_debug(hw, ICE_DBG_SW, "CQ CMD Buffer:\n");
2497
2498         /* Prepare buffer to free for VEB Counter resource */
2499         counter_buf = (struct ice_aqc_alloc_free_res_elem *)
2500                         ice_malloc(hw, buf_len);
2501         if (!counter_buf) {
2502                 ice_free(hw, sw_buf);
2503                 return ICE_ERR_NO_MEMORY;
2504         }
2505
2506         /* The number of resource entries in buffer is passed as 1 since only a
2507          * single switch/VEB instance is freed, and hence a single VEB counter
2508          * is released
2509          */
2510         counter_buf->num_elems = CPU_TO_LE16(1);
2511         counter_buf->res_type = CPU_TO_LE16(ICE_AQC_RES_TYPE_VEB_COUNTER);
2512         counter_buf->elem[0].e.sw_resp = CPU_TO_LE16(counter_id);
2513
2514         status = ice_aq_alloc_free_res(hw, 1, counter_buf, buf_len,
2515                                        ice_aqc_opc_free_res, NULL);
2516         if (status) {
2517                 ice_debug(hw, ICE_DBG_SW, "VEB counter resource could not be freed\n");
2518                 ret_status = status;
2519         }
2520
2521         ice_free(hw, counter_buf);
2522         ice_free(hw, sw_buf);
2523         return ret_status;
2524 }
2525
2526 /**
2527  * ice_aq_add_vsi
2528  * @hw: pointer to the HW struct
2529  * @vsi_ctx: pointer to a VSI context struct
2530  * @cd: pointer to command details structure or NULL
2531  *
2532  * Add a VSI context to the hardware (0x0210)
2533  */
2534 enum ice_status
2535 ice_aq_add_vsi(struct ice_hw *hw, struct ice_vsi_ctx *vsi_ctx,
2536                struct ice_sq_cd *cd)
2537 {
2538         struct ice_aqc_add_update_free_vsi_resp *res;
2539         struct ice_aqc_add_get_update_free_vsi *cmd;
2540         struct ice_aq_desc desc;
2541         enum ice_status status;
2542
2543         cmd = &desc.params.vsi_cmd;
2544         res = &desc.params.add_update_free_vsi_res;
2545
2546         ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_add_vsi);
2547
2548         if (!vsi_ctx->alloc_from_pool)
2549                 cmd->vsi_num = CPU_TO_LE16(vsi_ctx->vsi_num |
2550                                            ICE_AQ_VSI_IS_VALID);
2551
2552         cmd->vsi_flags = CPU_TO_LE16(vsi_ctx->flags);
2553
2554         desc.flags |= CPU_TO_LE16(ICE_AQ_FLAG_RD);
2555
2556         status = ice_aq_send_cmd(hw, &desc, &vsi_ctx->info,
2557                                  sizeof(vsi_ctx->info), cd);
2558
2559         if (!status) {
2560                 vsi_ctx->vsi_num = LE16_TO_CPU(res->vsi_num) & ICE_AQ_VSI_NUM_M;
2561                 vsi_ctx->vsis_allocd = LE16_TO_CPU(res->vsi_used);
2562                 vsi_ctx->vsis_unallocated = LE16_TO_CPU(res->vsi_free);
2563         }
2564
2565         return status;
2566 }
2567
2568 /**
2569  * ice_aq_free_vsi
2570  * @hw: pointer to the HW struct
2571  * @vsi_ctx: pointer to a VSI context struct
2572  * @keep_vsi_alloc: keep VSI allocation as part of this PF's resources
2573  * @cd: pointer to command details structure or NULL
2574  *
2575  * Free VSI context info from hardware (0x0213)
2576  */
2577 enum ice_status
2578 ice_aq_free_vsi(struct ice_hw *hw, struct ice_vsi_ctx *vsi_ctx,
2579                 bool keep_vsi_alloc, struct ice_sq_cd *cd)
2580 {
2581         struct ice_aqc_add_update_free_vsi_resp *resp;
2582         struct ice_aqc_add_get_update_free_vsi *cmd;
2583         struct ice_aq_desc desc;
2584         enum ice_status status;
2585
2586         cmd = &desc.params.vsi_cmd;
2587         resp = &desc.params.add_update_free_vsi_res;
2588
2589         ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_free_vsi);
2590
2591         cmd->vsi_num = CPU_TO_LE16(vsi_ctx->vsi_num | ICE_AQ_VSI_IS_VALID);
2592         if (keep_vsi_alloc)
2593                 cmd->cmd_flags = CPU_TO_LE16(ICE_AQ_VSI_KEEP_ALLOC);
2594
2595         status = ice_aq_send_cmd(hw, &desc, NULL, 0, cd);
2596         if (!status) {
2597                 vsi_ctx->vsis_allocd = LE16_TO_CPU(resp->vsi_used);
2598                 vsi_ctx->vsis_unallocated = LE16_TO_CPU(resp->vsi_free);
2599         }
2600
2601         return status;
2602 }
2603
2604 /**
2605  * ice_aq_update_vsi
2606  * @hw: pointer to the HW struct
2607  * @vsi_ctx: pointer to a VSI context struct
2608  * @cd: pointer to command details structure or NULL
2609  *
2610  * Update VSI context in the hardware (0x0211)
2611  */
2612 enum ice_status
2613 ice_aq_update_vsi(struct ice_hw *hw, struct ice_vsi_ctx *vsi_ctx,
2614                   struct ice_sq_cd *cd)
2615 {
2616         struct ice_aqc_add_update_free_vsi_resp *resp;
2617         struct ice_aqc_add_get_update_free_vsi *cmd;
2618         struct ice_aq_desc desc;
2619         enum ice_status status;
2620
2621         cmd = &desc.params.vsi_cmd;
2622         resp = &desc.params.add_update_free_vsi_res;
2623
2624         ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_update_vsi);
2625
2626         cmd->vsi_num = CPU_TO_LE16(vsi_ctx->vsi_num | ICE_AQ_VSI_IS_VALID);
2627
2628         desc.flags |= CPU_TO_LE16(ICE_AQ_FLAG_RD);
2629
2630         status = ice_aq_send_cmd(hw, &desc, &vsi_ctx->info,
2631                                  sizeof(vsi_ctx->info), cd);
2632
2633         if (!status) {
2634                 vsi_ctx->vsis_allocd = LE16_TO_CPU(resp->vsi_used);
2635                 vsi_ctx->vsis_unallocated = LE16_TO_CPU(resp->vsi_free);
2636         }
2637
2638         return status;
2639 }
2640
2641 /**
2642  * ice_is_vsi_valid - check whether the VSI is valid or not
2643  * @hw: pointer to the HW struct
2644  * @vsi_handle: VSI handle
2645  *
2646  * check whether the VSI is valid or not
2647  */
2648 bool ice_is_vsi_valid(struct ice_hw *hw, u16 vsi_handle)
2649 {
2650         return vsi_handle < ICE_MAX_VSI && hw->vsi_ctx[vsi_handle];
2651 }
2652
2653 /**
2654  * ice_get_hw_vsi_num - return the HW VSI number
2655  * @hw: pointer to the HW struct
2656  * @vsi_handle: VSI handle
2657  *
2658  * return the HW VSI number
2659  * Caution: call this function only if VSI is valid (ice_is_vsi_valid)
2660  */
2661 u16 ice_get_hw_vsi_num(struct ice_hw *hw, u16 vsi_handle)
2662 {
2663         return hw->vsi_ctx[vsi_handle]->vsi_num;
2664 }
2665
2666 /**
2667  * ice_get_vsi_ctx - return the VSI context entry for a given VSI handle
2668  * @hw: pointer to the HW struct
2669  * @vsi_handle: VSI handle
2670  *
2671  * return the VSI context entry for a given VSI handle
2672  */
2673 struct ice_vsi_ctx *ice_get_vsi_ctx(struct ice_hw *hw, u16 vsi_handle)
2674 {
2675         return (vsi_handle >= ICE_MAX_VSI) ? NULL : hw->vsi_ctx[vsi_handle];
2676 }
2677
2678 /**
2679  * ice_save_vsi_ctx - save the VSI context for a given VSI handle
2680  * @hw: pointer to the HW struct
2681  * @vsi_handle: VSI handle
2682  * @vsi: VSI context pointer
2683  *
2684  * save the VSI context entry for a given VSI handle
2685  */
2686 static void
2687 ice_save_vsi_ctx(struct ice_hw *hw, u16 vsi_handle, struct ice_vsi_ctx *vsi)
2688 {
2689         hw->vsi_ctx[vsi_handle] = vsi;
2690 }
2691
2692 /**
2693  * ice_clear_vsi_q_ctx - clear VSI queue contexts for all TCs
2694  * @hw: pointer to the HW struct
2695  * @vsi_handle: VSI handle
2696  */
2697 static void ice_clear_vsi_q_ctx(struct ice_hw *hw, u16 vsi_handle)
2698 {
2699         struct ice_vsi_ctx *vsi;
2700         u8 i;
2701
2702         vsi = ice_get_vsi_ctx(hw, vsi_handle);
2703         if (!vsi)
2704                 return;
2705         ice_for_each_traffic_class(i) {
2706                 if (vsi->lan_q_ctx[i]) {
2707                         ice_free(hw, vsi->lan_q_ctx[i]);
2708                         vsi->lan_q_ctx[i] = NULL;
2709                 }
2710         }
2711 }
2712
2713 /**
2714  * ice_clear_vsi_ctx - clear the VSI context entry
2715  * @hw: pointer to the HW struct
2716  * @vsi_handle: VSI handle
2717  *
2718  * clear the VSI context entry
2719  */
2720 static void ice_clear_vsi_ctx(struct ice_hw *hw, u16 vsi_handle)
2721 {
2722         struct ice_vsi_ctx *vsi;
2723
2724         vsi = ice_get_vsi_ctx(hw, vsi_handle);
2725         if (vsi) {
2726                 ice_clear_vsi_q_ctx(hw, vsi_handle);
2727                 ice_free(hw, vsi);
2728                 hw->vsi_ctx[vsi_handle] = NULL;
2729         }
2730 }
2731
2732 /**
2733  * ice_clear_all_vsi_ctx - clear all the VSI context entries
2734  * @hw: pointer to the HW struct
2735  */
2736 void ice_clear_all_vsi_ctx(struct ice_hw *hw)
2737 {
2738         u16 i;
2739
2740         for (i = 0; i < ICE_MAX_VSI; i++)
2741                 ice_clear_vsi_ctx(hw, i);
2742 }
2743
2744 /**
2745  * ice_add_vsi - add VSI context to the hardware and VSI handle list
2746  * @hw: pointer to the HW struct
2747  * @vsi_handle: unique VSI handle provided by drivers
2748  * @vsi_ctx: pointer to a VSI context struct
2749  * @cd: pointer to command details structure or NULL
2750  *
2751  * Add a VSI context to the hardware also add it into the VSI handle list.
2752  * If this function gets called after reset for existing VSIs then update
2753  * with the new HW VSI number in the corresponding VSI handle list entry.
2754  */
2755 enum ice_status
2756 ice_add_vsi(struct ice_hw *hw, u16 vsi_handle, struct ice_vsi_ctx *vsi_ctx,
2757             struct ice_sq_cd *cd)
2758 {
2759         struct ice_vsi_ctx *tmp_vsi_ctx;
2760         enum ice_status status;
2761
2762         if (vsi_handle >= ICE_MAX_VSI)
2763                 return ICE_ERR_PARAM;
2764         status = ice_aq_add_vsi(hw, vsi_ctx, cd);
2765         if (status)
2766                 return status;
2767         tmp_vsi_ctx = ice_get_vsi_ctx(hw, vsi_handle);
2768         if (!tmp_vsi_ctx) {
2769                 /* Create a new VSI context */
2770                 tmp_vsi_ctx = (struct ice_vsi_ctx *)
2771                         ice_malloc(hw, sizeof(*tmp_vsi_ctx));
2772                 if (!tmp_vsi_ctx) {
2773                         ice_aq_free_vsi(hw, vsi_ctx, false, cd);
2774                         return ICE_ERR_NO_MEMORY;
2775                 }
2776                 *tmp_vsi_ctx = *vsi_ctx;
2777
2778                 ice_save_vsi_ctx(hw, vsi_handle, tmp_vsi_ctx);
2779         } else {
2780                 /* update with new HW VSI num */
2781                 tmp_vsi_ctx->vsi_num = vsi_ctx->vsi_num;
2782         }
2783
2784         return ICE_SUCCESS;
2785 }
2786
2787 /**
2788  * ice_free_vsi- free VSI context from hardware and VSI handle list
2789  * @hw: pointer to the HW struct
2790  * @vsi_handle: unique VSI handle
2791  * @vsi_ctx: pointer to a VSI context struct
2792  * @keep_vsi_alloc: keep VSI allocation as part of this PF's resources
2793  * @cd: pointer to command details structure or NULL
2794  *
2795  * Free VSI context info from hardware as well as from VSI handle list
2796  */
2797 enum ice_status
2798 ice_free_vsi(struct ice_hw *hw, u16 vsi_handle, struct ice_vsi_ctx *vsi_ctx,
2799              bool keep_vsi_alloc, struct ice_sq_cd *cd)
2800 {
2801         enum ice_status status;
2802
2803         if (!ice_is_vsi_valid(hw, vsi_handle))
2804                 return ICE_ERR_PARAM;
2805         vsi_ctx->vsi_num = ice_get_hw_vsi_num(hw, vsi_handle);
2806         status = ice_aq_free_vsi(hw, vsi_ctx, keep_vsi_alloc, cd);
2807         if (!status)
2808                 ice_clear_vsi_ctx(hw, vsi_handle);
2809         return status;
2810 }
2811
2812 /**
2813  * ice_update_vsi
2814  * @hw: pointer to the HW struct
2815  * @vsi_handle: unique VSI handle
2816  * @vsi_ctx: pointer to a VSI context struct
2817  * @cd: pointer to command details structure or NULL
2818  *
2819  * Update VSI context in the hardware
2820  */
2821 enum ice_status
2822 ice_update_vsi(struct ice_hw *hw, u16 vsi_handle, struct ice_vsi_ctx *vsi_ctx,
2823                struct ice_sq_cd *cd)
2824 {
2825         if (!ice_is_vsi_valid(hw, vsi_handle))
2826                 return ICE_ERR_PARAM;
2827         vsi_ctx->vsi_num = ice_get_hw_vsi_num(hw, vsi_handle);
2828         return ice_aq_update_vsi(hw, vsi_ctx, cd);
2829 }
2830
2831 /**
2832  * ice_aq_get_vsi_params
2833  * @hw: pointer to the HW struct
2834  * @vsi_ctx: pointer to a VSI context struct
2835  * @cd: pointer to command details structure or NULL
2836  *
2837  * Get VSI context info from hardware (0x0212)
2838  */
2839 enum ice_status
2840 ice_aq_get_vsi_params(struct ice_hw *hw, struct ice_vsi_ctx *vsi_ctx,
2841                       struct ice_sq_cd *cd)
2842 {
2843         struct ice_aqc_add_get_update_free_vsi *cmd;
2844         struct ice_aqc_get_vsi_resp *resp;
2845         struct ice_aq_desc desc;
2846         enum ice_status status;
2847
2848         cmd = &desc.params.vsi_cmd;
2849         resp = &desc.params.get_vsi_resp;
2850
2851         ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_vsi_params);
2852
2853         cmd->vsi_num = CPU_TO_LE16(vsi_ctx->vsi_num | ICE_AQ_VSI_IS_VALID);
2854
2855         status = ice_aq_send_cmd(hw, &desc, &vsi_ctx->info,
2856                                  sizeof(vsi_ctx->info), cd);
2857         if (!status) {
2858                 vsi_ctx->vsi_num = LE16_TO_CPU(resp->vsi_num) &
2859                                         ICE_AQ_VSI_NUM_M;
2860                 vsi_ctx->vsis_allocd = LE16_TO_CPU(resp->vsi_used);
2861                 vsi_ctx->vsis_unallocated = LE16_TO_CPU(resp->vsi_free);
2862         }
2863
2864         return status;
2865 }
2866
2867 /**
2868  * ice_aq_add_update_mir_rule - add/update a mirror rule
2869  * @hw: pointer to the HW struct
2870  * @rule_type: Rule Type
2871  * @dest_vsi: VSI number to which packets will be mirrored
2872  * @count: length of the list
2873  * @mr_buf: buffer for list of mirrored VSI numbers
2874  * @cd: pointer to command details structure or NULL
2875  * @rule_id: Rule ID
2876  *
2877  * Add/Update Mirror Rule (0x260).
2878  */
2879 enum ice_status
2880 ice_aq_add_update_mir_rule(struct ice_hw *hw, u16 rule_type, u16 dest_vsi,
2881                            u16 count, struct ice_mir_rule_buf *mr_buf,
2882                            struct ice_sq_cd *cd, u16 *rule_id)
2883 {
2884         struct ice_aqc_add_update_mir_rule *cmd;
2885         struct ice_aq_desc desc;
2886         enum ice_status status;
2887         __le16 *mr_list = NULL;
2888         u16 buf_size = 0;
2889
2890         switch (rule_type) {
2891         case ICE_AQC_RULE_TYPE_VPORT_INGRESS:
2892         case ICE_AQC_RULE_TYPE_VPORT_EGRESS:
2893                 /* Make sure count and mr_buf are set for these rule_types */
2894                 if (!(count && mr_buf))
2895                         return ICE_ERR_PARAM;
2896
2897                 buf_size = count * sizeof(__le16);
2898                 mr_list = (_FORCE_ __le16 *)ice_malloc(hw, buf_size);
2899                 if (!mr_list)
2900                         return ICE_ERR_NO_MEMORY;
2901                 break;
2902         case ICE_AQC_RULE_TYPE_PPORT_INGRESS:
2903         case ICE_AQC_RULE_TYPE_PPORT_EGRESS:
2904                 /* Make sure count and mr_buf are not set for these
2905                  * rule_types
2906                  */
2907                 if (count || mr_buf)
2908                         return ICE_ERR_PARAM;
2909                 break;
2910         default:
2911                 ice_debug(hw, ICE_DBG_SW, "Error due to unsupported rule_type %u\n", rule_type);
2912                 return ICE_ERR_OUT_OF_RANGE;
2913         }
2914
2915         ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_add_update_mir_rule);
2916
2917         /* Pre-process 'mr_buf' items for add/update of virtual port
2918          * ingress/egress mirroring (but not physical port ingress/egress
2919          * mirroring)
2920          */
2921         if (mr_buf) {
2922                 int i;
2923
2924                 for (i = 0; i < count; i++) {
2925                         u16 id;
2926
2927                         id = mr_buf[i].vsi_idx & ICE_AQC_RULE_MIRRORED_VSI_M;
2928
2929                         /* Validate specified VSI number, make sure it is less
2930                          * than ICE_MAX_VSI, if not return with error.
2931                          */
2932                         if (id >= ICE_MAX_VSI) {
2933                                 ice_debug(hw, ICE_DBG_SW, "Error VSI index (%u) out-of-range\n",
2934                                           id);
2935                                 ice_free(hw, mr_list);
2936                                 return ICE_ERR_OUT_OF_RANGE;
2937                         }
2938
2939                         /* add VSI to mirror rule */
2940                         if (mr_buf[i].add)
2941                                 mr_list[i] =
2942                                         CPU_TO_LE16(id | ICE_AQC_RULE_ACT_M);
2943                         else /* remove VSI from mirror rule */
2944                                 mr_list[i] = CPU_TO_LE16(id);
2945                 }
2946         }
2947
2948         cmd = &desc.params.add_update_rule;
2949         if ((*rule_id) != ICE_INVAL_MIRROR_RULE_ID)
2950                 cmd->rule_id = CPU_TO_LE16(((*rule_id) & ICE_AQC_RULE_ID_M) |
2951                                            ICE_AQC_RULE_ID_VALID_M);
2952         cmd->rule_type = CPU_TO_LE16(rule_type & ICE_AQC_RULE_TYPE_M);
2953         cmd->num_entries = CPU_TO_LE16(count);
2954         cmd->dest = CPU_TO_LE16(dest_vsi);
2955
2956         status = ice_aq_send_cmd(hw, &desc, mr_list, buf_size, cd);
2957         if (!status)
2958                 *rule_id = LE16_TO_CPU(cmd->rule_id) & ICE_AQC_RULE_ID_M;
2959
2960         ice_free(hw, mr_list);
2961
2962         return status;
2963 }
2964
2965 /**
2966  * ice_aq_delete_mir_rule - delete a mirror rule
2967  * @hw: pointer to the HW struct
2968  * @rule_id: Mirror rule ID (to be deleted)
2969  * @keep_allocd: if set, the VSI stays part of the PF allocated res,
2970  *               otherwise it is returned to the shared pool
2971  * @cd: pointer to command details structure or NULL
2972  *
2973  * Delete Mirror Rule (0x261).
2974  */
2975 enum ice_status
2976 ice_aq_delete_mir_rule(struct ice_hw *hw, u16 rule_id, bool keep_allocd,
2977                        struct ice_sq_cd *cd)
2978 {
2979         struct ice_aqc_delete_mir_rule *cmd;
2980         struct ice_aq_desc desc;
2981
2982         /* rule_id should be in the range 0...63 */
2983         if (rule_id >= ICE_MAX_NUM_MIRROR_RULES)
2984                 return ICE_ERR_OUT_OF_RANGE;
2985
2986         ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_del_mir_rule);
2987
2988         cmd = &desc.params.del_rule;
2989         rule_id |= ICE_AQC_RULE_ID_VALID_M;
2990         cmd->rule_id = CPU_TO_LE16(rule_id);
2991
2992         if (keep_allocd)
2993                 cmd->flags = CPU_TO_LE16(ICE_AQC_FLAG_KEEP_ALLOCD_M);
2994
2995         return ice_aq_send_cmd(hw, &desc, NULL, 0, cd);
2996 }
2997
2998 /**
2999  * ice_aq_alloc_free_vsi_list
3000  * @hw: pointer to the HW struct
3001  * @vsi_list_id: VSI list ID returned or used for lookup
3002  * @lkup_type: switch rule filter lookup type
3003  * @opc: switch rules population command type - pass in the command opcode
3004  *
3005  * allocates or free a VSI list resource
3006  */
3007 static enum ice_status
3008 ice_aq_alloc_free_vsi_list(struct ice_hw *hw, u16 *vsi_list_id,
3009                            enum ice_sw_lkup_type lkup_type,
3010                            enum ice_adminq_opc opc)
3011 {
3012         struct ice_aqc_alloc_free_res_elem *sw_buf;
3013         struct ice_aqc_res_elem *vsi_ele;
3014         enum ice_status status;
3015         u16 buf_len;
3016
3017         buf_len = ice_struct_size(sw_buf, elem, 1);
3018         sw_buf = (struct ice_aqc_alloc_free_res_elem *)ice_malloc(hw, buf_len);
3019         if (!sw_buf)
3020                 return ICE_ERR_NO_MEMORY;
3021         sw_buf->num_elems = CPU_TO_LE16(1);
3022
3023         if (lkup_type == ICE_SW_LKUP_MAC ||
3024             lkup_type == ICE_SW_LKUP_MAC_VLAN ||
3025             lkup_type == ICE_SW_LKUP_ETHERTYPE ||
3026             lkup_type == ICE_SW_LKUP_ETHERTYPE_MAC ||
3027             lkup_type == ICE_SW_LKUP_PROMISC ||
3028             lkup_type == ICE_SW_LKUP_PROMISC_VLAN ||
3029             lkup_type == ICE_SW_LKUP_LAST) {
3030                 sw_buf->res_type = CPU_TO_LE16(ICE_AQC_RES_TYPE_VSI_LIST_REP);
3031         } else if (lkup_type == ICE_SW_LKUP_VLAN) {
3032                 sw_buf->res_type =
3033                         CPU_TO_LE16(ICE_AQC_RES_TYPE_VSI_LIST_PRUNE);
3034         } else {
3035                 status = ICE_ERR_PARAM;
3036                 goto ice_aq_alloc_free_vsi_list_exit;
3037         }
3038
3039         if (opc == ice_aqc_opc_free_res)
3040                 sw_buf->elem[0].e.sw_resp = CPU_TO_LE16(*vsi_list_id);
3041
3042         status = ice_aq_alloc_free_res(hw, 1, sw_buf, buf_len, opc, NULL);
3043         if (status)
3044                 goto ice_aq_alloc_free_vsi_list_exit;
3045
3046         if (opc == ice_aqc_opc_alloc_res) {
3047                 vsi_ele = &sw_buf->elem[0];
3048                 *vsi_list_id = LE16_TO_CPU(vsi_ele->e.sw_resp);
3049         }
3050
3051 ice_aq_alloc_free_vsi_list_exit:
3052         ice_free(hw, sw_buf);
3053         return status;
3054 }
3055
3056 /**
3057  * ice_aq_set_storm_ctrl - Sets storm control configuration
3058  * @hw: pointer to the HW struct
3059  * @bcast_thresh: represents the upper threshold for broadcast storm control
3060  * @mcast_thresh: represents the upper threshold for multicast storm control
3061  * @ctl_bitmask: storm control knobs
3062  *
3063  * Sets the storm control configuration (0x0280)
3064  */
3065 enum ice_status
3066 ice_aq_set_storm_ctrl(struct ice_hw *hw, u32 bcast_thresh, u32 mcast_thresh,
3067                       u32 ctl_bitmask)
3068 {
3069         struct ice_aqc_storm_cfg *cmd;
3070         struct ice_aq_desc desc;
3071
3072         cmd = &desc.params.storm_conf;
3073
3074         ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_set_storm_cfg);
3075
3076         cmd->bcast_thresh_size = CPU_TO_LE32(bcast_thresh & ICE_AQ_THRESHOLD_M);
3077         cmd->mcast_thresh_size = CPU_TO_LE32(mcast_thresh & ICE_AQ_THRESHOLD_M);
3078         cmd->storm_ctrl_ctrl = CPU_TO_LE32(ctl_bitmask);
3079
3080         return ice_aq_send_cmd(hw, &desc, NULL, 0, NULL);
3081 }
3082
3083 /**
3084  * ice_aq_get_storm_ctrl - gets storm control configuration
3085  * @hw: pointer to the HW struct
3086  * @bcast_thresh: represents the upper threshold for broadcast storm control
3087  * @mcast_thresh: represents the upper threshold for multicast storm control
3088  * @ctl_bitmask: storm control knobs
3089  *
3090  * Gets the storm control configuration (0x0281)
3091  */
3092 enum ice_status
3093 ice_aq_get_storm_ctrl(struct ice_hw *hw, u32 *bcast_thresh, u32 *mcast_thresh,
3094                       u32 *ctl_bitmask)
3095 {
3096         enum ice_status status;
3097         struct ice_aq_desc desc;
3098
3099         ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_storm_cfg);
3100
3101         status = ice_aq_send_cmd(hw, &desc, NULL, 0, NULL);
3102         if (!status) {
3103                 struct ice_aqc_storm_cfg *resp = &desc.params.storm_conf;
3104
3105                 if (bcast_thresh)
3106                         *bcast_thresh = LE32_TO_CPU(resp->bcast_thresh_size) &
3107                                 ICE_AQ_THRESHOLD_M;
3108                 if (mcast_thresh)
3109                         *mcast_thresh = LE32_TO_CPU(resp->mcast_thresh_size) &
3110                                 ICE_AQ_THRESHOLD_M;
3111                 if (ctl_bitmask)
3112                         *ctl_bitmask = LE32_TO_CPU(resp->storm_ctrl_ctrl);
3113         }
3114
3115         return status;
3116 }
3117
3118 /**
3119  * ice_aq_sw_rules - add/update/remove switch rules
3120  * @hw: pointer to the HW struct
3121  * @rule_list: pointer to switch rule population list
3122  * @rule_list_sz: total size of the rule list in bytes
3123  * @num_rules: number of switch rules in the rule_list
3124  * @opc: switch rules population command type - pass in the command opcode
3125  * @cd: pointer to command details structure or NULL
3126  *
3127  * Add(0x02a0)/Update(0x02a1)/Remove(0x02a2) switch rules commands to firmware
3128  */
3129 static enum ice_status
3130 ice_aq_sw_rules(struct ice_hw *hw, void *rule_list, u16 rule_list_sz,
3131                 u8 num_rules, enum ice_adminq_opc opc, struct ice_sq_cd *cd)
3132 {
3133         struct ice_aq_desc desc;
3134         enum ice_status status;
3135
3136         ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__);
3137
3138         if (opc != ice_aqc_opc_add_sw_rules &&
3139             opc != ice_aqc_opc_update_sw_rules &&
3140             opc != ice_aqc_opc_remove_sw_rules)
3141                 return ICE_ERR_PARAM;
3142
3143         ice_fill_dflt_direct_cmd_desc(&desc, opc);
3144
3145         desc.flags |= CPU_TO_LE16(ICE_AQ_FLAG_RD);
3146         desc.params.sw_rules.num_rules_fltr_entry_index =
3147                 CPU_TO_LE16(num_rules);
3148         status = ice_aq_send_cmd(hw, &desc, rule_list, rule_list_sz, cd);
3149         if (opc != ice_aqc_opc_add_sw_rules &&
3150             hw->adminq.sq_last_status == ICE_AQ_RC_ENOENT)
3151                 status = ICE_ERR_DOES_NOT_EXIST;
3152
3153         return status;
3154 }
3155
3156 /**
3157  * ice_aq_add_recipe - add switch recipe
3158  * @hw: pointer to the HW struct
3159  * @s_recipe_list: pointer to switch rule population list
3160  * @num_recipes: number of switch recipes in the list
3161  * @cd: pointer to command details structure or NULL
3162  *
3163  * Add(0x0290)
3164  */
3165 enum ice_status
3166 ice_aq_add_recipe(struct ice_hw *hw,
3167                   struct ice_aqc_recipe_data_elem *s_recipe_list,
3168                   u16 num_recipes, struct ice_sq_cd *cd)
3169 {
3170         struct ice_aqc_add_get_recipe *cmd;
3171         struct ice_aq_desc desc;
3172         u16 buf_size;
3173
3174         ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__);
3175         cmd = &desc.params.add_get_recipe;
3176         ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_add_recipe);
3177
3178         cmd->num_sub_recipes = CPU_TO_LE16(num_recipes);
3179         desc.flags |= CPU_TO_LE16(ICE_AQ_FLAG_RD);
3180
3181         buf_size = num_recipes * sizeof(*s_recipe_list);
3182
3183         return ice_aq_send_cmd(hw, &desc, s_recipe_list, buf_size, cd);
3184 }
3185
3186 /**
3187  * ice_aq_get_recipe - get switch recipe
3188  * @hw: pointer to the HW struct
3189  * @s_recipe_list: pointer to switch rule population list
3190  * @num_recipes: pointer to the number of recipes (input and output)
3191  * @recipe_root: root recipe number of recipe(s) to retrieve
3192  * @cd: pointer to command details structure or NULL
3193  *
3194  * Get(0x0292)
3195  *
3196  * On input, *num_recipes should equal the number of entries in s_recipe_list.
3197  * On output, *num_recipes will equal the number of entries returned in
3198  * s_recipe_list.
3199  *
3200  * The caller must supply enough space in s_recipe_list to hold all possible
3201  * recipes and *num_recipes must equal ICE_MAX_NUM_RECIPES.
3202  */
3203 enum ice_status
3204 ice_aq_get_recipe(struct ice_hw *hw,
3205                   struct ice_aqc_recipe_data_elem *s_recipe_list,
3206                   u16 *num_recipes, u16 recipe_root, struct ice_sq_cd *cd)
3207 {
3208         struct ice_aqc_add_get_recipe *cmd;
3209         struct ice_aq_desc desc;
3210         enum ice_status status;
3211         u16 buf_size;
3212
3213         if (*num_recipes != ICE_MAX_NUM_RECIPES)
3214                 return ICE_ERR_PARAM;
3215
3216         ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__);
3217         cmd = &desc.params.add_get_recipe;
3218         ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_recipe);
3219
3220         cmd->return_index = CPU_TO_LE16(recipe_root);
3221         cmd->num_sub_recipes = 0;
3222
3223         buf_size = *num_recipes * sizeof(*s_recipe_list);
3224
3225         status = ice_aq_send_cmd(hw, &desc, s_recipe_list, buf_size, cd);
3226         *num_recipes = LE16_TO_CPU(cmd->num_sub_recipes);
3227
3228         return status;
3229 }
3230
3231 /**
3232  * ice_update_recipe_lkup_idx - update a default recipe based on the lkup_idx
3233  * @hw: pointer to the HW struct
3234  * @params: parameters used to update the default recipe
3235  *
3236  * This function only supports updating default recipes and it only supports
3237  * updating a single recipe based on the lkup_idx at a time.
3238  *
3239  * This is done as a read-modify-write operation. First, get the current recipe
3240  * contents based on the recipe's ID. Then modify the field vector index and
3241  * mask if it's valid at the lkup_idx. Finally, use the add recipe AQ to update
3242  * the pre-existing recipe with the modifications.
3243  */
3244 enum ice_status
3245 ice_update_recipe_lkup_idx(struct ice_hw *hw,
3246                            struct ice_update_recipe_lkup_idx_params *params)
3247 {
3248         struct ice_aqc_recipe_data_elem *rcp_list;
3249         u16 num_recps = ICE_MAX_NUM_RECIPES;
3250         enum ice_status status;
3251
3252         rcp_list = (struct ice_aqc_recipe_data_elem *)ice_malloc(hw, num_recps * sizeof(*rcp_list));
3253         if (!rcp_list)
3254                 return ICE_ERR_NO_MEMORY;
3255
3256         /* read current recipe list from firmware */
3257         rcp_list->recipe_indx = params->rid;
3258         status = ice_aq_get_recipe(hw, rcp_list, &num_recps, params->rid, NULL);
3259         if (status) {
3260                 ice_debug(hw, ICE_DBG_SW, "Failed to get recipe %d, status %d\n",
3261                           params->rid, status);
3262                 goto error_out;
3263         }
3264
3265         /* only modify existing recipe's lkup_idx and mask if valid, while
3266          * leaving all other fields the same, then update the recipe firmware
3267          */
3268         rcp_list->content.lkup_indx[params->lkup_idx] = params->fv_idx;
3269         if (params->mask_valid)
3270                 rcp_list->content.mask[params->lkup_idx] =
3271                         CPU_TO_LE16(params->mask);
3272
3273         if (params->ignore_valid)
3274                 rcp_list->content.lkup_indx[params->lkup_idx] |=
3275                         ICE_AQ_RECIPE_LKUP_IGNORE;
3276
3277         status = ice_aq_add_recipe(hw, &rcp_list[0], 1, NULL);
3278         if (status)
3279                 ice_debug(hw, ICE_DBG_SW, "Failed to update recipe %d lkup_idx %d fv_idx %d mask %d mask_valid %s, status %d\n",
3280                           params->rid, params->lkup_idx, params->fv_idx,
3281                           params->mask, params->mask_valid ? "true" : "false",
3282                           status);
3283
3284 error_out:
3285         ice_free(hw, rcp_list);
3286         return status;
3287 }
3288
3289 /**
3290  * ice_aq_map_recipe_to_profile - Map recipe to packet profile
3291  * @hw: pointer to the HW struct
3292  * @profile_id: package profile ID to associate the recipe with
3293  * @r_bitmap: Recipe bitmap filled in and need to be returned as response
3294  * @cd: pointer to command details structure or NULL
3295  * Recipe to profile association (0x0291)
3296  */
3297 enum ice_status
3298 ice_aq_map_recipe_to_profile(struct ice_hw *hw, u32 profile_id, u8 *r_bitmap,
3299                              struct ice_sq_cd *cd)
3300 {
3301         struct ice_aqc_recipe_to_profile *cmd;
3302         struct ice_aq_desc desc;
3303
3304         ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__);
3305         cmd = &desc.params.recipe_to_profile;
3306         ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_recipe_to_profile);
3307         cmd->profile_id = CPU_TO_LE16(profile_id);
3308         /* Set the recipe ID bit in the bitmask to let the device know which
3309          * profile we are associating the recipe to
3310          */
3311         ice_memcpy(cmd->recipe_assoc, r_bitmap, sizeof(cmd->recipe_assoc),
3312                    ICE_NONDMA_TO_NONDMA);
3313
3314         return ice_aq_send_cmd(hw, &desc, NULL, 0, cd);
3315 }
3316
3317 /**
3318  * ice_aq_get_recipe_to_profile - Map recipe to packet profile
3319  * @hw: pointer to the HW struct
3320  * @profile_id: package profile ID to associate the recipe with
3321  * @r_bitmap: Recipe bitmap filled in and need to be returned as response
3322  * @cd: pointer to command details structure or NULL
3323  * Associate profile ID with given recipe (0x0293)
3324  */
3325 enum ice_status
3326 ice_aq_get_recipe_to_profile(struct ice_hw *hw, u32 profile_id, u8 *r_bitmap,
3327                              struct ice_sq_cd *cd)
3328 {
3329         struct ice_aqc_recipe_to_profile *cmd;
3330         struct ice_aq_desc desc;
3331         enum ice_status status;
3332
3333         ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__);
3334         cmd = &desc.params.recipe_to_profile;
3335         ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_recipe_to_profile);
3336         cmd->profile_id = CPU_TO_LE16(profile_id);
3337
3338         status = ice_aq_send_cmd(hw, &desc, NULL, 0, cd);
3339         if (!status)
3340                 ice_memcpy(r_bitmap, cmd->recipe_assoc,
3341                            sizeof(cmd->recipe_assoc), ICE_NONDMA_TO_NONDMA);
3342
3343         return status;
3344 }
3345
3346 /**
3347  * ice_alloc_recipe - add recipe resource
3348  * @hw: pointer to the hardware structure
3349  * @rid: recipe ID returned as response to AQ call
3350  */
3351 enum ice_status ice_alloc_recipe(struct ice_hw *hw, u16 *rid)
3352 {
3353         struct ice_aqc_alloc_free_res_elem *sw_buf;
3354         enum ice_status status;
3355         u16 buf_len;
3356
3357         buf_len = ice_struct_size(sw_buf, elem, 1);
3358         sw_buf = (struct ice_aqc_alloc_free_res_elem *)ice_malloc(hw, buf_len);
3359         if (!sw_buf)
3360                 return ICE_ERR_NO_MEMORY;
3361
3362         sw_buf->num_elems = CPU_TO_LE16(1);
3363         sw_buf->res_type = CPU_TO_LE16((ICE_AQC_RES_TYPE_RECIPE <<
3364                                         ICE_AQC_RES_TYPE_S) |
3365                                         ICE_AQC_RES_TYPE_FLAG_SHARED);
3366         status = ice_aq_alloc_free_res(hw, 1, sw_buf, buf_len,
3367                                        ice_aqc_opc_alloc_res, NULL);
3368         if (!status)
3369                 *rid = LE16_TO_CPU(sw_buf->elem[0].e.sw_resp);
3370         ice_free(hw, sw_buf);
3371
3372         return status;
3373 }
3374
3375 /* ice_init_port_info - Initialize port_info with switch configuration data
3376  * @pi: pointer to port_info
3377  * @vsi_port_num: VSI number or port number
3378  * @type: Type of switch element (port or VSI)
3379  * @swid: switch ID of the switch the element is attached to
3380  * @pf_vf_num: PF or VF number
3381  * @is_vf: true if the element is a VF, false otherwise
3382  */
3383 static void
3384 ice_init_port_info(struct ice_port_info *pi, u16 vsi_port_num, u8 type,
3385                    u16 swid, u16 pf_vf_num, bool is_vf)
3386 {
3387         switch (type) {
3388         case ICE_AQC_GET_SW_CONF_RESP_PHYS_PORT:
3389                 pi->lport = (u8)(vsi_port_num & ICE_LPORT_MASK);
3390                 pi->sw_id = swid;
3391                 pi->pf_vf_num = pf_vf_num;
3392                 pi->is_vf = is_vf;
3393                 pi->dflt_tx_vsi_num = ICE_DFLT_VSI_INVAL;
3394                 pi->dflt_rx_vsi_num = ICE_DFLT_VSI_INVAL;
3395                 break;
3396         default:
3397                 ice_debug(pi->hw, ICE_DBG_SW, "incorrect VSI/port type received\n");
3398                 break;
3399         }
3400 }
3401
3402 /* ice_get_initial_sw_cfg - Get initial port and default VSI data
3403  * @hw: pointer to the hardware structure
3404  */
3405 enum ice_status ice_get_initial_sw_cfg(struct ice_hw *hw)
3406 {
3407         struct ice_aqc_get_sw_cfg_resp_elem *rbuf;
3408         enum ice_status status;
3409         u8 num_total_ports;
3410         u16 req_desc = 0;
3411         u16 num_elems;
3412         u8 j = 0;
3413         u16 i;
3414
3415         num_total_ports = 1;
3416
3417         rbuf = (struct ice_aqc_get_sw_cfg_resp_elem *)
3418                 ice_malloc(hw, ICE_SW_CFG_MAX_BUF_LEN);
3419
3420         if (!rbuf)
3421                 return ICE_ERR_NO_MEMORY;
3422
3423         /* Multiple calls to ice_aq_get_sw_cfg may be required
3424          * to get all the switch configuration information. The need
3425          * for additional calls is indicated by ice_aq_get_sw_cfg
3426          * writing a non-zero value in req_desc
3427          */
3428         do {
3429                 struct ice_aqc_get_sw_cfg_resp_elem *ele;
3430
3431                 status = ice_aq_get_sw_cfg(hw, rbuf, ICE_SW_CFG_MAX_BUF_LEN,
3432                                            &req_desc, &num_elems, NULL);
3433
3434                 if (status)
3435                         break;
3436
3437                 for (i = 0, ele = rbuf; i < num_elems; i++, ele++) {
3438                         u16 pf_vf_num, swid, vsi_port_num;
3439                         bool is_vf = false;
3440                         u8 res_type;
3441
3442                         vsi_port_num = LE16_TO_CPU(ele->vsi_port_num) &
3443                                 ICE_AQC_GET_SW_CONF_RESP_VSI_PORT_NUM_M;
3444
3445                         pf_vf_num = LE16_TO_CPU(ele->pf_vf_num) &
3446                                 ICE_AQC_GET_SW_CONF_RESP_FUNC_NUM_M;
3447
3448                         swid = LE16_TO_CPU(ele->swid);
3449
3450                         if (LE16_TO_CPU(ele->pf_vf_num) &
3451                             ICE_AQC_GET_SW_CONF_RESP_IS_VF)
3452                                 is_vf = true;
3453
3454                         res_type = (u8)(LE16_TO_CPU(ele->vsi_port_num) >>
3455                                         ICE_AQC_GET_SW_CONF_RESP_TYPE_S);
3456
3457                         switch (res_type) {
3458                         case ICE_AQC_GET_SW_CONF_RESP_PHYS_PORT:
3459                         case ICE_AQC_GET_SW_CONF_RESP_VIRT_PORT:
3460                                 if (j == num_total_ports) {
3461                                         ice_debug(hw, ICE_DBG_SW, "more ports than expected\n");
3462                                         status = ICE_ERR_CFG;
3463                                         goto out;
3464                                 }
3465                                 ice_init_port_info(hw->port_info,
3466                                                    vsi_port_num, res_type, swid,
3467                                                    pf_vf_num, is_vf);
3468                                 j++;
3469                                 break;
3470                         default:
3471                                 break;
3472                         }
3473                 }
3474         } while (req_desc && !status);
3475
3476 out:
3477         ice_free(hw, rbuf);
3478         return status;
3479 }
3480
3481 /**
3482  * ice_fill_sw_info - Helper function to populate lb_en and lan_en
3483  * @hw: pointer to the hardware structure
3484  * @fi: filter info structure to fill/update
3485  *
3486  * This helper function populates the lb_en and lan_en elements of the provided
3487  * ice_fltr_info struct using the switch's type and characteristics of the
3488  * switch rule being configured.
3489  */
3490 static void ice_fill_sw_info(struct ice_hw *hw, struct ice_fltr_info *fi)
3491 {
3492         if ((fi->flag & ICE_FLTR_RX) &&
3493             (fi->fltr_act == ICE_FWD_TO_VSI ||
3494              fi->fltr_act == ICE_FWD_TO_VSI_LIST) &&
3495             fi->lkup_type == ICE_SW_LKUP_LAST)
3496                 fi->lan_en = true;
3497         fi->lb_en = false;
3498         fi->lan_en = false;
3499         if ((fi->flag & ICE_FLTR_TX) &&
3500             (fi->fltr_act == ICE_FWD_TO_VSI ||
3501              fi->fltr_act == ICE_FWD_TO_VSI_LIST ||
3502              fi->fltr_act == ICE_FWD_TO_Q ||
3503              fi->fltr_act == ICE_FWD_TO_QGRP)) {
3504                 /* Setting LB for prune actions will result in replicated
3505                  * packets to the internal switch that will be dropped.
3506                  */
3507                 if (fi->lkup_type != ICE_SW_LKUP_VLAN)
3508                         fi->lb_en = true;
3509
3510                 /* Set lan_en to TRUE if
3511                  * 1. The switch is a VEB AND
3512                  * 2
3513                  * 2.1 The lookup is a directional lookup like ethertype,
3514                  * promiscuous, ethertype-MAC, promiscuous-VLAN
3515                  * and default-port OR
3516                  * 2.2 The lookup is VLAN, OR
3517                  * 2.3 The lookup is MAC with mcast or bcast addr for MAC, OR
3518                  * 2.4 The lookup is MAC_VLAN with mcast or bcast addr for MAC.
3519                  *
3520                  * OR
3521                  *
3522                  * The switch is a VEPA.
3523                  *
3524                  * In all other cases, the LAN enable has to be set to false.
3525                  */
3526                 if (hw->evb_veb) {
3527                         if (fi->lkup_type == ICE_SW_LKUP_ETHERTYPE ||
3528                             fi->lkup_type == ICE_SW_LKUP_PROMISC ||
3529                             fi->lkup_type == ICE_SW_LKUP_ETHERTYPE_MAC ||
3530                             fi->lkup_type == ICE_SW_LKUP_PROMISC_VLAN ||
3531                             fi->lkup_type == ICE_SW_LKUP_DFLT ||
3532                             fi->lkup_type == ICE_SW_LKUP_VLAN ||
3533                             (fi->lkup_type == ICE_SW_LKUP_MAC &&
3534                              !IS_UNICAST_ETHER_ADDR(fi->l_data.mac.mac_addr)) ||
3535                             (fi->lkup_type == ICE_SW_LKUP_MAC_VLAN &&
3536                              !IS_UNICAST_ETHER_ADDR(fi->l_data.mac.mac_addr)))
3537                                 fi->lan_en = true;
3538                 } else {
3539                         fi->lan_en = true;
3540                 }
3541         }
3542 }
3543
3544 /**
3545  * ice_fill_sw_rule - Helper function to fill switch rule structure
3546  * @hw: pointer to the hardware structure
3547  * @f_info: entry containing packet forwarding information
3548  * @s_rule: switch rule structure to be filled in based on mac_entry
3549  * @opc: switch rules population command type - pass in the command opcode
3550  */
3551 static void
3552 ice_fill_sw_rule(struct ice_hw *hw, struct ice_fltr_info *f_info,
3553                  struct ice_aqc_sw_rules_elem *s_rule, enum ice_adminq_opc opc)
3554 {
3555         u16 vlan_id = ICE_MAX_VLAN_ID + 1;
3556         u16 vlan_tpid = ICE_ETH_P_8021Q;
3557         void *daddr = NULL;
3558         u16 eth_hdr_sz;
3559         u8 *eth_hdr;
3560         u32 act = 0;
3561         __be16 *off;
3562         u8 q_rgn;
3563
3564         if (opc == ice_aqc_opc_remove_sw_rules) {
3565                 s_rule->pdata.lkup_tx_rx.act = 0;
3566                 s_rule->pdata.lkup_tx_rx.index =
3567                         CPU_TO_LE16(f_info->fltr_rule_id);
3568                 s_rule->pdata.lkup_tx_rx.hdr_len = 0;
3569                 return;
3570         }
3571
3572         eth_hdr_sz = sizeof(dummy_eth_header);
3573         eth_hdr = s_rule->pdata.lkup_tx_rx.hdr;
3574
3575         /* initialize the ether header with a dummy header */
3576         ice_memcpy(eth_hdr, dummy_eth_header, eth_hdr_sz, ICE_NONDMA_TO_NONDMA);
3577         ice_fill_sw_info(hw, f_info);
3578
3579         switch (f_info->fltr_act) {
3580         case ICE_FWD_TO_VSI:
3581                 act |= (f_info->fwd_id.hw_vsi_id << ICE_SINGLE_ACT_VSI_ID_S) &
3582                         ICE_SINGLE_ACT_VSI_ID_M;
3583                 if (f_info->lkup_type != ICE_SW_LKUP_VLAN)
3584                         act |= ICE_SINGLE_ACT_VSI_FORWARDING |
3585                                 ICE_SINGLE_ACT_VALID_BIT;
3586                 break;
3587         case ICE_FWD_TO_VSI_LIST:
3588                 act |= ICE_SINGLE_ACT_VSI_LIST;
3589                 act |= (f_info->fwd_id.vsi_list_id <<
3590                         ICE_SINGLE_ACT_VSI_LIST_ID_S) &
3591                         ICE_SINGLE_ACT_VSI_LIST_ID_M;
3592                 if (f_info->lkup_type != ICE_SW_LKUP_VLAN)
3593                         act |= ICE_SINGLE_ACT_VSI_FORWARDING |
3594                                 ICE_SINGLE_ACT_VALID_BIT;
3595                 break;
3596         case ICE_FWD_TO_Q:
3597                 act |= ICE_SINGLE_ACT_TO_Q;
3598                 act |= (f_info->fwd_id.q_id << ICE_SINGLE_ACT_Q_INDEX_S) &
3599                         ICE_SINGLE_ACT_Q_INDEX_M;
3600                 break;
3601         case ICE_DROP_PACKET:
3602                 act |= ICE_SINGLE_ACT_VSI_FORWARDING | ICE_SINGLE_ACT_DROP |
3603                         ICE_SINGLE_ACT_VALID_BIT;
3604                 break;
3605         case ICE_FWD_TO_QGRP:
3606                 q_rgn = f_info->qgrp_size > 0 ?
3607                         (u8)ice_ilog2(f_info->qgrp_size) : 0;
3608                 act |= ICE_SINGLE_ACT_TO_Q;
3609                 act |= (f_info->fwd_id.q_id << ICE_SINGLE_ACT_Q_INDEX_S) &
3610                         ICE_SINGLE_ACT_Q_INDEX_M;
3611                 act |= (q_rgn << ICE_SINGLE_ACT_Q_REGION_S) &
3612                         ICE_SINGLE_ACT_Q_REGION_M;
3613                 break;
3614         default:
3615                 return;
3616         }
3617
3618         if (f_info->lb_en)
3619                 act |= ICE_SINGLE_ACT_LB_ENABLE;
3620         if (f_info->lan_en)
3621                 act |= ICE_SINGLE_ACT_LAN_ENABLE;
3622
3623         switch (f_info->lkup_type) {
3624         case ICE_SW_LKUP_MAC:
3625                 daddr = f_info->l_data.mac.mac_addr;
3626                 break;
3627         case ICE_SW_LKUP_VLAN:
3628                 vlan_id = f_info->l_data.vlan.vlan_id;
3629                 if (f_info->l_data.vlan.tpid_valid)
3630                         vlan_tpid = f_info->l_data.vlan.tpid;
3631                 if (f_info->fltr_act == ICE_FWD_TO_VSI ||
3632                     f_info->fltr_act == ICE_FWD_TO_VSI_LIST) {
3633                         act |= ICE_SINGLE_ACT_PRUNE;
3634                         act |= ICE_SINGLE_ACT_EGRESS | ICE_SINGLE_ACT_INGRESS;
3635                 }
3636                 break;
3637         case ICE_SW_LKUP_ETHERTYPE_MAC:
3638                 daddr = f_info->l_data.ethertype_mac.mac_addr;
3639                 /* fall-through */
3640         case ICE_SW_LKUP_ETHERTYPE:
3641                 off = (_FORCE_ __be16 *)(eth_hdr + ICE_ETH_ETHTYPE_OFFSET);
3642                 *off = CPU_TO_BE16(f_info->l_data.ethertype_mac.ethertype);
3643                 break;
3644         case ICE_SW_LKUP_MAC_VLAN:
3645                 daddr = f_info->l_data.mac_vlan.mac_addr;
3646                 vlan_id = f_info->l_data.mac_vlan.vlan_id;
3647                 break;
3648         case ICE_SW_LKUP_PROMISC_VLAN:
3649                 vlan_id = f_info->l_data.mac_vlan.vlan_id;
3650                 /* fall-through */
3651         case ICE_SW_LKUP_PROMISC:
3652                 daddr = f_info->l_data.mac_vlan.mac_addr;
3653                 break;
3654         default:
3655                 break;
3656         }
3657
3658         s_rule->type = (f_info->flag & ICE_FLTR_RX) ?
3659                 CPU_TO_LE16(ICE_AQC_SW_RULES_T_LKUP_RX) :
3660                 CPU_TO_LE16(ICE_AQC_SW_RULES_T_LKUP_TX);
3661
3662         /* Recipe set depending on lookup type */
3663         s_rule->pdata.lkup_tx_rx.recipe_id = CPU_TO_LE16(f_info->lkup_type);
3664         s_rule->pdata.lkup_tx_rx.src = CPU_TO_LE16(f_info->src);
3665         s_rule->pdata.lkup_tx_rx.act = CPU_TO_LE32(act);
3666
3667         if (daddr)
3668                 ice_memcpy(eth_hdr + ICE_ETH_DA_OFFSET, daddr, ETH_ALEN,
3669                            ICE_NONDMA_TO_NONDMA);
3670
3671         if (!(vlan_id > ICE_MAX_VLAN_ID)) {
3672                 off = (_FORCE_ __be16 *)(eth_hdr + ICE_ETH_VLAN_TCI_OFFSET);
3673                 *off = CPU_TO_BE16(vlan_id);
3674                 off = (_FORCE_ __be16 *)(eth_hdr + ICE_ETH_ETHTYPE_OFFSET);
3675                 *off = CPU_TO_BE16(vlan_tpid);
3676         }
3677
3678         /* Create the switch rule with the final dummy Ethernet header */
3679         if (opc != ice_aqc_opc_update_sw_rules)
3680                 s_rule->pdata.lkup_tx_rx.hdr_len = CPU_TO_LE16(eth_hdr_sz);
3681 }
3682
3683 /**
3684  * ice_add_marker_act
3685  * @hw: pointer to the hardware structure
3686  * @m_ent: the management entry for which sw marker needs to be added
3687  * @sw_marker: sw marker to tag the Rx descriptor with
3688  * @l_id: large action resource ID
3689  *
3690  * Create a large action to hold software marker and update the switch rule
3691  * entry pointed by m_ent with newly created large action
3692  */
3693 static enum ice_status
3694 ice_add_marker_act(struct ice_hw *hw, struct ice_fltr_mgmt_list_entry *m_ent,
3695                    u16 sw_marker, u16 l_id)
3696 {
3697         struct ice_aqc_sw_rules_elem *lg_act, *rx_tx;
3698         /* For software marker we need 3 large actions
3699          * 1. FWD action: FWD TO VSI or VSI LIST
3700          * 2. GENERIC VALUE action to hold the profile ID
3701          * 3. GENERIC VALUE action to hold the software marker ID
3702          */
3703         const u16 num_lg_acts = 3;
3704         enum ice_status status;
3705         u16 lg_act_size;
3706         u16 rules_size;
3707         u32 act;
3708         u16 id;
3709
3710         if (m_ent->fltr_info.lkup_type != ICE_SW_LKUP_MAC)
3711                 return ICE_ERR_PARAM;
3712
3713         /* Create two back-to-back switch rules and submit them to the HW using
3714          * one memory buffer:
3715          *    1. Large Action
3716          *    2. Look up Tx Rx
3717          */
3718         lg_act_size = (u16)ICE_SW_RULE_LG_ACT_SIZE(num_lg_acts);
3719         rules_size = lg_act_size + ICE_SW_RULE_RX_TX_ETH_HDR_SIZE;
3720         lg_act = (struct ice_aqc_sw_rules_elem *)ice_malloc(hw, rules_size);
3721         if (!lg_act)
3722                 return ICE_ERR_NO_MEMORY;
3723
3724         rx_tx = (struct ice_aqc_sw_rules_elem *)((u8 *)lg_act + lg_act_size);
3725
3726         /* Fill in the first switch rule i.e. large action */
3727         lg_act->type = CPU_TO_LE16(ICE_AQC_SW_RULES_T_LG_ACT);
3728         lg_act->pdata.lg_act.index = CPU_TO_LE16(l_id);
3729         lg_act->pdata.lg_act.size = CPU_TO_LE16(num_lg_acts);
3730
3731         /* First action VSI forwarding or VSI list forwarding depending on how
3732          * many VSIs
3733          */
3734         id = (m_ent->vsi_count > 1) ? m_ent->fltr_info.fwd_id.vsi_list_id :
3735                 m_ent->fltr_info.fwd_id.hw_vsi_id;
3736
3737         act = ICE_LG_ACT_VSI_FORWARDING | ICE_LG_ACT_VALID_BIT;
3738         act |= (id << ICE_LG_ACT_VSI_LIST_ID_S) & ICE_LG_ACT_VSI_LIST_ID_M;
3739         if (m_ent->vsi_count > 1)
3740                 act |= ICE_LG_ACT_VSI_LIST;
3741         lg_act->pdata.lg_act.act[0] = CPU_TO_LE32(act);
3742
3743         /* Second action descriptor type */
3744         act = ICE_LG_ACT_GENERIC;
3745
3746         act |= (1 << ICE_LG_ACT_GENERIC_VALUE_S) & ICE_LG_ACT_GENERIC_VALUE_M;
3747         lg_act->pdata.lg_act.act[1] = CPU_TO_LE32(act);
3748
3749         act = (ICE_LG_ACT_GENERIC_OFF_RX_DESC_PROF_IDX <<
3750                ICE_LG_ACT_GENERIC_OFFSET_S) & ICE_LG_ACT_GENERIC_OFFSET_M;
3751
3752         /* Third action Marker value */
3753         act |= ICE_LG_ACT_GENERIC;
3754         act |= (sw_marker << ICE_LG_ACT_GENERIC_VALUE_S) &
3755                 ICE_LG_ACT_GENERIC_VALUE_M;
3756
3757         lg_act->pdata.lg_act.act[2] = CPU_TO_LE32(act);
3758
3759         /* call the fill switch rule to fill the lookup Tx Rx structure */
3760         ice_fill_sw_rule(hw, &m_ent->fltr_info, rx_tx,
3761                          ice_aqc_opc_update_sw_rules);
3762
3763         /* Update the action to point to the large action ID */
3764         rx_tx->pdata.lkup_tx_rx.act =
3765                 CPU_TO_LE32(ICE_SINGLE_ACT_PTR |
3766                             ((l_id << ICE_SINGLE_ACT_PTR_VAL_S) &
3767                              ICE_SINGLE_ACT_PTR_VAL_M));
3768
3769         /* Use the filter rule ID of the previously created rule with single
3770          * act. Once the update happens, hardware will treat this as large
3771          * action
3772          */
3773         rx_tx->pdata.lkup_tx_rx.index =
3774                 CPU_TO_LE16(m_ent->fltr_info.fltr_rule_id);
3775
3776         status = ice_aq_sw_rules(hw, lg_act, rules_size, 2,
3777                                  ice_aqc_opc_update_sw_rules, NULL);
3778         if (!status) {
3779                 m_ent->lg_act_idx = l_id;
3780                 m_ent->sw_marker_id = sw_marker;
3781         }
3782
3783         ice_free(hw, lg_act);
3784         return status;
3785 }
3786
3787 /**
3788  * ice_add_counter_act - add/update filter rule with counter action
3789  * @hw: pointer to the hardware structure
3790  * @m_ent: the management entry for which counter needs to be added
3791  * @counter_id: VLAN counter ID returned as part of allocate resource
3792  * @l_id: large action resource ID
3793  */
3794 static enum ice_status
3795 ice_add_counter_act(struct ice_hw *hw, struct ice_fltr_mgmt_list_entry *m_ent,
3796                     u16 counter_id, u16 l_id)
3797 {
3798         struct ice_aqc_sw_rules_elem *lg_act;
3799         struct ice_aqc_sw_rules_elem *rx_tx;
3800         enum ice_status status;
3801         /* 2 actions will be added while adding a large action counter */
3802         const int num_acts = 2;
3803         u16 lg_act_size;
3804         u16 rules_size;
3805         u16 f_rule_id;
3806         u32 act;
3807         u16 id;
3808
3809         if (m_ent->fltr_info.lkup_type != ICE_SW_LKUP_MAC)
3810                 return ICE_ERR_PARAM;
3811
3812         /* Create two back-to-back switch rules and submit them to the HW using
3813          * one memory buffer:
3814          * 1. Large Action
3815          * 2. Look up Tx Rx
3816          */
3817         lg_act_size = (u16)ICE_SW_RULE_LG_ACT_SIZE(num_acts);
3818         rules_size = lg_act_size + ICE_SW_RULE_RX_TX_ETH_HDR_SIZE;
3819         lg_act = (struct ice_aqc_sw_rules_elem *)ice_malloc(hw, rules_size);
3820         if (!lg_act)
3821                 return ICE_ERR_NO_MEMORY;
3822
3823         rx_tx = (struct ice_aqc_sw_rules_elem *)((u8 *)lg_act + lg_act_size);
3824
3825         /* Fill in the first switch rule i.e. large action */
3826         lg_act->type = CPU_TO_LE16(ICE_AQC_SW_RULES_T_LG_ACT);
3827         lg_act->pdata.lg_act.index = CPU_TO_LE16(l_id);
3828         lg_act->pdata.lg_act.size = CPU_TO_LE16(num_acts);
3829
3830         /* First action VSI forwarding or VSI list forwarding depending on how
3831          * many VSIs
3832          */
3833         id = (m_ent->vsi_count > 1) ?  m_ent->fltr_info.fwd_id.vsi_list_id :
3834                 m_ent->fltr_info.fwd_id.hw_vsi_id;
3835
3836         act = ICE_LG_ACT_VSI_FORWARDING | ICE_LG_ACT_VALID_BIT;
3837         act |= (id << ICE_LG_ACT_VSI_LIST_ID_S) &
3838                 ICE_LG_ACT_VSI_LIST_ID_M;
3839         if (m_ent->vsi_count > 1)
3840                 act |= ICE_LG_ACT_VSI_LIST;
3841         lg_act->pdata.lg_act.act[0] = CPU_TO_LE32(act);
3842
3843         /* Second action counter ID */
3844         act = ICE_LG_ACT_STAT_COUNT;
3845         act |= (counter_id << ICE_LG_ACT_STAT_COUNT_S) &
3846                 ICE_LG_ACT_STAT_COUNT_M;
3847         lg_act->pdata.lg_act.act[1] = CPU_TO_LE32(act);
3848
3849         /* call the fill switch rule to fill the lookup Tx Rx structure */
3850         ice_fill_sw_rule(hw, &m_ent->fltr_info, rx_tx,
3851                          ice_aqc_opc_update_sw_rules);
3852
3853         act = ICE_SINGLE_ACT_PTR;
3854         act |= (l_id << ICE_SINGLE_ACT_PTR_VAL_S) & ICE_SINGLE_ACT_PTR_VAL_M;
3855         rx_tx->pdata.lkup_tx_rx.act = CPU_TO_LE32(act);
3856
3857         /* Use the filter rule ID of the previously created rule with single
3858          * act. Once the update happens, hardware will treat this as large
3859          * action
3860          */
3861         f_rule_id = m_ent->fltr_info.fltr_rule_id;
3862         rx_tx->pdata.lkup_tx_rx.index = CPU_TO_LE16(f_rule_id);
3863
3864         status = ice_aq_sw_rules(hw, lg_act, rules_size, 2,
3865                                  ice_aqc_opc_update_sw_rules, NULL);
3866         if (!status) {
3867                 m_ent->lg_act_idx = l_id;
3868                 m_ent->counter_index = counter_id;
3869         }
3870
3871         ice_free(hw, lg_act);
3872         return status;
3873 }
3874
3875 /**
3876  * ice_create_vsi_list_map
3877  * @hw: pointer to the hardware structure
3878  * @vsi_handle_arr: array of VSI handles to set in the VSI mapping
3879  * @num_vsi: number of VSI handles in the array
3880  * @vsi_list_id: VSI list ID generated as part of allocate resource
3881  *
3882  * Helper function to create a new entry of VSI list ID to VSI mapping
3883  * using the given VSI list ID
3884  */
3885 static struct ice_vsi_list_map_info *
3886 ice_create_vsi_list_map(struct ice_hw *hw, u16 *vsi_handle_arr, u16 num_vsi,
3887                         u16 vsi_list_id)
3888 {
3889         struct ice_switch_info *sw = hw->switch_info;
3890         struct ice_vsi_list_map_info *v_map;
3891         int i;
3892
3893         v_map = (struct ice_vsi_list_map_info *)ice_malloc(hw, sizeof(*v_map));
3894         if (!v_map)
3895                 return NULL;
3896
3897         v_map->vsi_list_id = vsi_list_id;
3898         v_map->ref_cnt = 1;
3899         for (i = 0; i < num_vsi; i++)
3900                 ice_set_bit(vsi_handle_arr[i], v_map->vsi_map);
3901
3902         LIST_ADD(&v_map->list_entry, &sw->vsi_list_map_head);
3903         return v_map;
3904 }
3905
3906 /**
3907  * ice_update_vsi_list_rule
3908  * @hw: pointer to the hardware structure
3909  * @vsi_handle_arr: array of VSI handles to form a VSI list
3910  * @num_vsi: number of VSI handles in the array
3911  * @vsi_list_id: VSI list ID generated as part of allocate resource
3912  * @remove: Boolean value to indicate if this is a remove action
3913  * @opc: switch rules population command type - pass in the command opcode
3914  * @lkup_type: lookup type of the filter
3915  *
3916  * Call AQ command to add a new switch rule or update existing switch rule
3917  * using the given VSI list ID
3918  */
3919 static enum ice_status
3920 ice_update_vsi_list_rule(struct ice_hw *hw, u16 *vsi_handle_arr, u16 num_vsi,
3921                          u16 vsi_list_id, bool remove, enum ice_adminq_opc opc,
3922                          enum ice_sw_lkup_type lkup_type)
3923 {
3924         struct ice_aqc_sw_rules_elem *s_rule;
3925         enum ice_status status;
3926         u16 s_rule_size;
3927         u16 rule_type;
3928         int i;
3929
3930         if (!num_vsi)
3931                 return ICE_ERR_PARAM;
3932
3933         if (lkup_type == ICE_SW_LKUP_MAC ||
3934             lkup_type == ICE_SW_LKUP_MAC_VLAN ||
3935             lkup_type == ICE_SW_LKUP_ETHERTYPE ||
3936             lkup_type == ICE_SW_LKUP_ETHERTYPE_MAC ||
3937             lkup_type == ICE_SW_LKUP_PROMISC ||
3938             lkup_type == ICE_SW_LKUP_PROMISC_VLAN ||
3939             lkup_type == ICE_SW_LKUP_LAST)
3940                 rule_type = remove ? ICE_AQC_SW_RULES_T_VSI_LIST_CLEAR :
3941                         ICE_AQC_SW_RULES_T_VSI_LIST_SET;
3942         else if (lkup_type == ICE_SW_LKUP_VLAN)
3943                 rule_type = remove ? ICE_AQC_SW_RULES_T_PRUNE_LIST_CLEAR :
3944                         ICE_AQC_SW_RULES_T_PRUNE_LIST_SET;
3945         else
3946                 return ICE_ERR_PARAM;
3947
3948         s_rule_size = (u16)ICE_SW_RULE_VSI_LIST_SIZE(num_vsi);
3949         s_rule = (struct ice_aqc_sw_rules_elem *)ice_malloc(hw, s_rule_size);
3950         if (!s_rule)
3951                 return ICE_ERR_NO_MEMORY;
3952         for (i = 0; i < num_vsi; i++) {
3953                 if (!ice_is_vsi_valid(hw, vsi_handle_arr[i])) {
3954                         status = ICE_ERR_PARAM;
3955                         goto exit;
3956                 }
3957                 /* AQ call requires hw_vsi_id(s) */
3958                 s_rule->pdata.vsi_list.vsi[i] =
3959                         CPU_TO_LE16(ice_get_hw_vsi_num(hw, vsi_handle_arr[i]));
3960         }
3961
3962         s_rule->type = CPU_TO_LE16(rule_type);
3963         s_rule->pdata.vsi_list.number_vsi = CPU_TO_LE16(num_vsi);
3964         s_rule->pdata.vsi_list.index = CPU_TO_LE16(vsi_list_id);
3965
3966         status = ice_aq_sw_rules(hw, s_rule, s_rule_size, 1, opc, NULL);
3967
3968 exit:
3969         ice_free(hw, s_rule);
3970         return status;
3971 }
3972
3973 /**
3974  * ice_create_vsi_list_rule - Creates and populates a VSI list rule
3975  * @hw: pointer to the HW struct
3976  * @vsi_handle_arr: array of VSI handles to form a VSI list
3977  * @num_vsi: number of VSI handles in the array
3978  * @vsi_list_id: stores the ID of the VSI list to be created
3979  * @lkup_type: switch rule filter's lookup type
3980  */
3981 static enum ice_status
3982 ice_create_vsi_list_rule(struct ice_hw *hw, u16 *vsi_handle_arr, u16 num_vsi,
3983                          u16 *vsi_list_id, enum ice_sw_lkup_type lkup_type)
3984 {
3985         enum ice_status status;
3986
3987         status = ice_aq_alloc_free_vsi_list(hw, vsi_list_id, lkup_type,
3988                                             ice_aqc_opc_alloc_res);
3989         if (status)
3990                 return status;
3991
3992         /* Update the newly created VSI list to include the specified VSIs */
3993         return ice_update_vsi_list_rule(hw, vsi_handle_arr, num_vsi,
3994                                         *vsi_list_id, false,
3995                                         ice_aqc_opc_add_sw_rules, lkup_type);
3996 }
3997
3998 /**
3999  * ice_create_pkt_fwd_rule
4000  * @hw: pointer to the hardware structure
4001  * @recp_list: corresponding filter management list
4002  * @f_entry: entry containing packet forwarding information
4003  *
4004  * Create switch rule with given filter information and add an entry
4005  * to the corresponding filter management list to track this switch rule
4006  * and VSI mapping
4007  */
4008 static enum ice_status
4009 ice_create_pkt_fwd_rule(struct ice_hw *hw, struct ice_sw_recipe *recp_list,
4010                         struct ice_fltr_list_entry *f_entry)
4011 {
4012         struct ice_fltr_mgmt_list_entry *fm_entry;
4013         struct ice_aqc_sw_rules_elem *s_rule;
4014         enum ice_status status;
4015
4016         s_rule = (struct ice_aqc_sw_rules_elem *)
4017                 ice_malloc(hw, ICE_SW_RULE_RX_TX_ETH_HDR_SIZE);
4018         if (!s_rule)
4019                 return ICE_ERR_NO_MEMORY;
4020         fm_entry = (struct ice_fltr_mgmt_list_entry *)
4021                    ice_malloc(hw, sizeof(*fm_entry));
4022         if (!fm_entry) {
4023                 status = ICE_ERR_NO_MEMORY;
4024                 goto ice_create_pkt_fwd_rule_exit;
4025         }
4026
4027         fm_entry->fltr_info = f_entry->fltr_info;
4028
4029         /* Initialize all the fields for the management entry */
4030         fm_entry->vsi_count = 1;
4031         fm_entry->lg_act_idx = ICE_INVAL_LG_ACT_INDEX;
4032         fm_entry->sw_marker_id = ICE_INVAL_SW_MARKER_ID;
4033         fm_entry->counter_index = ICE_INVAL_COUNTER_ID;
4034
4035         ice_fill_sw_rule(hw, &fm_entry->fltr_info, s_rule,
4036                          ice_aqc_opc_add_sw_rules);
4037
4038         status = ice_aq_sw_rules(hw, s_rule, ICE_SW_RULE_RX_TX_ETH_HDR_SIZE, 1,
4039                                  ice_aqc_opc_add_sw_rules, NULL);
4040         if (status) {
4041                 ice_free(hw, fm_entry);
4042                 goto ice_create_pkt_fwd_rule_exit;
4043         }
4044
4045         f_entry->fltr_info.fltr_rule_id =
4046                 LE16_TO_CPU(s_rule->pdata.lkup_tx_rx.index);
4047         fm_entry->fltr_info.fltr_rule_id =
4048                 LE16_TO_CPU(s_rule->pdata.lkup_tx_rx.index);
4049
4050         /* The book keeping entries will get removed when base driver
4051          * calls remove filter AQ command
4052          */
4053         LIST_ADD(&fm_entry->list_entry, &recp_list->filt_rules);
4054
4055 ice_create_pkt_fwd_rule_exit:
4056         ice_free(hw, s_rule);
4057         return status;
4058 }
4059
4060 /**
4061  * ice_update_pkt_fwd_rule
4062  * @hw: pointer to the hardware structure
4063  * @f_info: filter information for switch rule
4064  *
4065  * Call AQ command to update a previously created switch rule with a
4066  * VSI list ID
4067  */
4068 static enum ice_status
4069 ice_update_pkt_fwd_rule(struct ice_hw *hw, struct ice_fltr_info *f_info)
4070 {
4071         struct ice_aqc_sw_rules_elem *s_rule;
4072         enum ice_status status;
4073
4074         s_rule = (struct ice_aqc_sw_rules_elem *)
4075                 ice_malloc(hw, ICE_SW_RULE_RX_TX_ETH_HDR_SIZE);
4076         if (!s_rule)
4077                 return ICE_ERR_NO_MEMORY;
4078
4079         ice_fill_sw_rule(hw, f_info, s_rule, ice_aqc_opc_update_sw_rules);
4080
4081         s_rule->pdata.lkup_tx_rx.index = CPU_TO_LE16(f_info->fltr_rule_id);
4082
4083         /* Update switch rule with new rule set to forward VSI list */
4084         status = ice_aq_sw_rules(hw, s_rule, ICE_SW_RULE_RX_TX_ETH_HDR_SIZE, 1,
4085                                  ice_aqc_opc_update_sw_rules, NULL);
4086
4087         ice_free(hw, s_rule);
4088         return status;
4089 }
4090
4091 /**
4092  * ice_update_sw_rule_bridge_mode
4093  * @hw: pointer to the HW struct
4094  *
4095  * Updates unicast switch filter rules based on VEB/VEPA mode
4096  */
4097 enum ice_status ice_update_sw_rule_bridge_mode(struct ice_hw *hw)
4098 {
4099         struct ice_switch_info *sw = hw->switch_info;
4100         struct ice_fltr_mgmt_list_entry *fm_entry;
4101         enum ice_status status = ICE_SUCCESS;
4102         struct LIST_HEAD_TYPE *rule_head;
4103         struct ice_lock *rule_lock; /* Lock to protect filter rule list */
4104
4105         rule_lock = &sw->recp_list[ICE_SW_LKUP_MAC].filt_rule_lock;
4106         rule_head = &sw->recp_list[ICE_SW_LKUP_MAC].filt_rules;
4107
4108         ice_acquire_lock(rule_lock);
4109         LIST_FOR_EACH_ENTRY(fm_entry, rule_head, ice_fltr_mgmt_list_entry,
4110                             list_entry) {
4111                 struct ice_fltr_info *fi = &fm_entry->fltr_info;
4112                 u8 *addr = fi->l_data.mac.mac_addr;
4113
4114                 /* Update unicast Tx rules to reflect the selected
4115                  * VEB/VEPA mode
4116                  */
4117                 if ((fi->flag & ICE_FLTR_TX) && IS_UNICAST_ETHER_ADDR(addr) &&
4118                     (fi->fltr_act == ICE_FWD_TO_VSI ||
4119                      fi->fltr_act == ICE_FWD_TO_VSI_LIST ||
4120                      fi->fltr_act == ICE_FWD_TO_Q ||
4121                      fi->fltr_act == ICE_FWD_TO_QGRP)) {
4122                         status = ice_update_pkt_fwd_rule(hw, fi);
4123                         if (status)
4124                                 break;
4125                 }
4126         }
4127
4128         ice_release_lock(rule_lock);
4129
4130         return status;
4131 }
4132
4133 /**
4134  * ice_add_update_vsi_list
4135  * @hw: pointer to the hardware structure
4136  * @m_entry: pointer to current filter management list entry
4137  * @cur_fltr: filter information from the book keeping entry
4138  * @new_fltr: filter information with the new VSI to be added
4139  *
4140  * Call AQ command to add or update previously created VSI list with new VSI.
4141  *
4142  * Helper function to do book keeping associated with adding filter information
4143  * The algorithm to do the book keeping is described below :
4144  * When a VSI needs to subscribe to a given filter (MAC/VLAN/Ethtype etc.)
4145  *      if only one VSI has been added till now
4146  *              Allocate a new VSI list and add two VSIs
4147  *              to this list using switch rule command
4148  *              Update the previously created switch rule with the
4149  *              newly created VSI list ID
4150  *      if a VSI list was previously created
4151  *              Add the new VSI to the previously created VSI list set
4152  *              using the update switch rule command
4153  */
4154 static enum ice_status
4155 ice_add_update_vsi_list(struct ice_hw *hw,
4156                         struct ice_fltr_mgmt_list_entry *m_entry,
4157                         struct ice_fltr_info *cur_fltr,
4158                         struct ice_fltr_info *new_fltr)
4159 {
4160         enum ice_status status = ICE_SUCCESS;
4161         u16 vsi_list_id = 0;
4162
4163         if ((cur_fltr->fltr_act == ICE_FWD_TO_Q ||
4164              cur_fltr->fltr_act == ICE_FWD_TO_QGRP))
4165                 return ICE_ERR_NOT_IMPL;
4166
4167         if ((new_fltr->fltr_act == ICE_FWD_TO_Q ||
4168              new_fltr->fltr_act == ICE_FWD_TO_QGRP) &&
4169             (cur_fltr->fltr_act == ICE_FWD_TO_VSI ||
4170              cur_fltr->fltr_act == ICE_FWD_TO_VSI_LIST))
4171                 return ICE_ERR_NOT_IMPL;
4172
4173         if (m_entry->vsi_count < 2 && !m_entry->vsi_list_info) {
4174                 /* Only one entry existed in the mapping and it was not already
4175                  * a part of a VSI list. So, create a VSI list with the old and
4176                  * new VSIs.
4177                  */
4178                 struct ice_fltr_info tmp_fltr;
4179                 u16 vsi_handle_arr[2];
4180
4181                 /* A rule already exists with the new VSI being added */
4182                 if (cur_fltr->fwd_id.hw_vsi_id == new_fltr->fwd_id.hw_vsi_id)
4183                         return ICE_ERR_ALREADY_EXISTS;
4184
4185                 vsi_handle_arr[0] = cur_fltr->vsi_handle;
4186                 vsi_handle_arr[1] = new_fltr->vsi_handle;
4187                 status = ice_create_vsi_list_rule(hw, &vsi_handle_arr[0], 2,
4188                                                   &vsi_list_id,
4189                                                   new_fltr->lkup_type);
4190                 if (status)
4191                         return status;
4192
4193                 tmp_fltr = *new_fltr;
4194                 tmp_fltr.fltr_rule_id = cur_fltr->fltr_rule_id;
4195                 tmp_fltr.fltr_act = ICE_FWD_TO_VSI_LIST;
4196                 tmp_fltr.fwd_id.vsi_list_id = vsi_list_id;
4197                 /* Update the previous switch rule of "MAC forward to VSI" to
4198                  * "MAC fwd to VSI list"
4199                  */
4200                 status = ice_update_pkt_fwd_rule(hw, &tmp_fltr);
4201                 if (status)
4202                         return status;
4203
4204                 cur_fltr->fwd_id.vsi_list_id = vsi_list_id;
4205                 cur_fltr->fltr_act = ICE_FWD_TO_VSI_LIST;
4206                 m_entry->vsi_list_info =
4207                         ice_create_vsi_list_map(hw, &vsi_handle_arr[0], 2,
4208                                                 vsi_list_id);
4209
4210                 if (!m_entry->vsi_list_info)
4211                         return ICE_ERR_NO_MEMORY;
4212
4213                 /* If this entry was large action then the large action needs
4214                  * to be updated to point to FWD to VSI list
4215                  */
4216                 if (m_entry->sw_marker_id != ICE_INVAL_SW_MARKER_ID)
4217                         status =
4218                             ice_add_marker_act(hw, m_entry,
4219                                                m_entry->sw_marker_id,
4220                                                m_entry->lg_act_idx);
4221         } else {
4222                 u16 vsi_handle = new_fltr->vsi_handle;
4223                 enum ice_adminq_opc opcode;
4224
4225                 if (!m_entry->vsi_list_info)
4226                         return ICE_ERR_CFG;
4227
4228                 /* A rule already exists with the new VSI being added */
4229                 if (ice_is_bit_set(m_entry->vsi_list_info->vsi_map, vsi_handle))
4230                         return ICE_SUCCESS;
4231
4232                 /* Update the previously created VSI list set with
4233                  * the new VSI ID passed in
4234                  */
4235                 vsi_list_id = cur_fltr->fwd_id.vsi_list_id;
4236                 opcode = ice_aqc_opc_update_sw_rules;
4237
4238                 status = ice_update_vsi_list_rule(hw, &vsi_handle, 1,
4239                                                   vsi_list_id, false, opcode,
4240                                                   new_fltr->lkup_type);
4241                 /* update VSI list mapping info with new VSI ID */
4242                 if (!status)
4243                         ice_set_bit(vsi_handle,
4244                                     m_entry->vsi_list_info->vsi_map);
4245         }
4246         if (!status)
4247                 m_entry->vsi_count++;
4248         return status;
4249 }
4250
4251 /**
4252  * ice_find_rule_entry - Search a rule entry
4253  * @list_head: head of rule list
4254  * @f_info: rule information
4255  *
4256  * Helper function to search for a given rule entry
4257  * Returns pointer to entry storing the rule if found
4258  */
4259 static struct ice_fltr_mgmt_list_entry *
4260 ice_find_rule_entry(struct LIST_HEAD_TYPE *list_head,
4261                     struct ice_fltr_info *f_info)
4262 {
4263         struct ice_fltr_mgmt_list_entry *list_itr, *ret = NULL;
4264
4265         LIST_FOR_EACH_ENTRY(list_itr, list_head, ice_fltr_mgmt_list_entry,
4266                             list_entry) {
4267                 if (!memcmp(&f_info->l_data, &list_itr->fltr_info.l_data,
4268                             sizeof(f_info->l_data)) &&
4269                     f_info->flag == list_itr->fltr_info.flag) {
4270                         ret = list_itr;
4271                         break;
4272                 }
4273         }
4274         return ret;
4275 }
4276
4277 /**
4278  * ice_find_vsi_list_entry - Search VSI list map with VSI count 1
4279  * @recp_list: VSI lists needs to be searched
4280  * @vsi_handle: VSI handle to be found in VSI list
4281  * @vsi_list_id: VSI list ID found containing vsi_handle
4282  *
4283  * Helper function to search a VSI list with single entry containing given VSI
4284  * handle element. This can be extended further to search VSI list with more
4285  * than 1 vsi_count. Returns pointer to VSI list entry if found.
4286  */
4287 static struct ice_vsi_list_map_info *
4288 ice_find_vsi_list_entry(struct ice_sw_recipe *recp_list, u16 vsi_handle,
4289                         u16 *vsi_list_id)
4290 {
4291         struct ice_vsi_list_map_info *map_info = NULL;
4292         struct LIST_HEAD_TYPE *list_head;
4293
4294         list_head = &recp_list->filt_rules;
4295         if (recp_list->adv_rule) {
4296                 struct ice_adv_fltr_mgmt_list_entry *list_itr;
4297
4298                 LIST_FOR_EACH_ENTRY(list_itr, list_head,
4299                                     ice_adv_fltr_mgmt_list_entry,
4300                                     list_entry) {
4301                         if (list_itr->vsi_list_info) {
4302                                 map_info = list_itr->vsi_list_info;
4303                                 if (ice_is_bit_set(map_info->vsi_map,
4304                                                    vsi_handle)) {
4305                                         *vsi_list_id = map_info->vsi_list_id;
4306                                         return map_info;
4307                                 }
4308                         }
4309                 }
4310         } else {
4311                 struct ice_fltr_mgmt_list_entry *list_itr;
4312
4313                 LIST_FOR_EACH_ENTRY(list_itr, list_head,
4314                                     ice_fltr_mgmt_list_entry,
4315                                     list_entry) {
4316                         if (list_itr->vsi_count == 1 &&
4317                             list_itr->vsi_list_info) {
4318                                 map_info = list_itr->vsi_list_info;
4319                                 if (ice_is_bit_set(map_info->vsi_map,
4320                                                    vsi_handle)) {
4321                                         *vsi_list_id = map_info->vsi_list_id;
4322                                         return map_info;
4323                                 }
4324                         }
4325                 }
4326         }
4327         return NULL;
4328 }
4329
4330 /**
4331  * ice_add_rule_internal - add rule for a given lookup type
4332  * @hw: pointer to the hardware structure
4333  * @recp_list: recipe list for which rule has to be added
4334  * @lport: logic port number on which function add rule
4335  * @f_entry: structure containing MAC forwarding information
4336  *
4337  * Adds or updates the rule lists for a given recipe
4338  */
4339 static enum ice_status
4340 ice_add_rule_internal(struct ice_hw *hw, struct ice_sw_recipe *recp_list,
4341                       u8 lport, struct ice_fltr_list_entry *f_entry)
4342 {
4343         struct ice_fltr_info *new_fltr, *cur_fltr;
4344         struct ice_fltr_mgmt_list_entry *m_entry;
4345         struct ice_lock *rule_lock; /* Lock to protect filter rule list */
4346         enum ice_status status = ICE_SUCCESS;
4347
4348         if (!ice_is_vsi_valid(hw, f_entry->fltr_info.vsi_handle))
4349                 return ICE_ERR_PARAM;
4350
4351         /* Load the hw_vsi_id only if the fwd action is fwd to VSI */
4352         if (f_entry->fltr_info.fltr_act == ICE_FWD_TO_VSI)
4353                 f_entry->fltr_info.fwd_id.hw_vsi_id =
4354                         ice_get_hw_vsi_num(hw, f_entry->fltr_info.vsi_handle);
4355
4356         rule_lock = &recp_list->filt_rule_lock;
4357
4358         ice_acquire_lock(rule_lock);
4359         new_fltr = &f_entry->fltr_info;
4360         if (new_fltr->flag & ICE_FLTR_RX)
4361                 new_fltr->src = lport;
4362         else if (new_fltr->flag & ICE_FLTR_TX)
4363                 new_fltr->src =
4364                         ice_get_hw_vsi_num(hw, f_entry->fltr_info.vsi_handle);
4365
4366         m_entry = ice_find_rule_entry(&recp_list->filt_rules, new_fltr);
4367         if (!m_entry) {
4368                 status = ice_create_pkt_fwd_rule(hw, recp_list, f_entry);
4369                 goto exit_add_rule_internal;
4370         }
4371
4372         cur_fltr = &m_entry->fltr_info;
4373         status = ice_add_update_vsi_list(hw, m_entry, cur_fltr, new_fltr);
4374
4375 exit_add_rule_internal:
4376         ice_release_lock(rule_lock);
4377         return status;
4378 }
4379
4380 /**
4381  * ice_remove_vsi_list_rule
4382  * @hw: pointer to the hardware structure
4383  * @vsi_list_id: VSI list ID generated as part of allocate resource
4384  * @lkup_type: switch rule filter lookup type
4385  *
4386  * The VSI list should be emptied before this function is called to remove the
4387  * VSI list.
4388  */
4389 static enum ice_status
4390 ice_remove_vsi_list_rule(struct ice_hw *hw, u16 vsi_list_id,
4391                          enum ice_sw_lkup_type lkup_type)
4392 {
4393         /* Free the vsi_list resource that we allocated. It is assumed that the
4394          * list is empty at this point.
4395          */
4396         return ice_aq_alloc_free_vsi_list(hw, &vsi_list_id, lkup_type,
4397                                             ice_aqc_opc_free_res);
4398 }
4399
4400 /**
4401  * ice_rem_update_vsi_list
4402  * @hw: pointer to the hardware structure
4403  * @vsi_handle: VSI handle of the VSI to remove
4404  * @fm_list: filter management entry for which the VSI list management needs to
4405  *           be done
4406  */
4407 static enum ice_status
4408 ice_rem_update_vsi_list(struct ice_hw *hw, u16 vsi_handle,
4409                         struct ice_fltr_mgmt_list_entry *fm_list)
4410 {
4411         enum ice_sw_lkup_type lkup_type;
4412         enum ice_status status = ICE_SUCCESS;
4413         u16 vsi_list_id;
4414
4415         if (fm_list->fltr_info.fltr_act != ICE_FWD_TO_VSI_LIST ||
4416             fm_list->vsi_count == 0)
4417                 return ICE_ERR_PARAM;
4418
4419         /* A rule with the VSI being removed does not exist */
4420         if (!ice_is_bit_set(fm_list->vsi_list_info->vsi_map, vsi_handle))
4421                 return ICE_ERR_DOES_NOT_EXIST;
4422
4423         lkup_type = fm_list->fltr_info.lkup_type;
4424         vsi_list_id = fm_list->fltr_info.fwd_id.vsi_list_id;
4425         status = ice_update_vsi_list_rule(hw, &vsi_handle, 1, vsi_list_id, true,
4426                                           ice_aqc_opc_update_sw_rules,
4427                                           lkup_type);
4428         if (status)
4429                 return status;
4430
4431         fm_list->vsi_count--;
4432         ice_clear_bit(vsi_handle, fm_list->vsi_list_info->vsi_map);
4433
4434         if (fm_list->vsi_count == 1 && lkup_type != ICE_SW_LKUP_VLAN) {
4435                 struct ice_fltr_info tmp_fltr_info = fm_list->fltr_info;
4436                 struct ice_vsi_list_map_info *vsi_list_info =
4437                         fm_list->vsi_list_info;
4438                 u16 rem_vsi_handle;
4439
4440                 rem_vsi_handle = ice_find_first_bit(vsi_list_info->vsi_map,
4441                                                     ICE_MAX_VSI);
4442                 if (!ice_is_vsi_valid(hw, rem_vsi_handle))
4443                         return ICE_ERR_OUT_OF_RANGE;
4444
4445                 /* Make sure VSI list is empty before removing it below */
4446                 status = ice_update_vsi_list_rule(hw, &rem_vsi_handle, 1,
4447                                                   vsi_list_id, true,
4448                                                   ice_aqc_opc_update_sw_rules,
4449                                                   lkup_type);
4450                 if (status)
4451                         return status;
4452
4453                 tmp_fltr_info.fltr_act = ICE_FWD_TO_VSI;
4454                 tmp_fltr_info.fwd_id.hw_vsi_id =
4455                         ice_get_hw_vsi_num(hw, rem_vsi_handle);
4456                 tmp_fltr_info.vsi_handle = rem_vsi_handle;
4457                 status = ice_update_pkt_fwd_rule(hw, &tmp_fltr_info);
4458                 if (status) {
4459                         ice_debug(hw, ICE_DBG_SW, "Failed to update pkt fwd rule to FWD_TO_VSI on HW VSI %d, error %d\n",
4460                                   tmp_fltr_info.fwd_id.hw_vsi_id, status);
4461                         return status;
4462                 }
4463
4464                 fm_list->fltr_info = tmp_fltr_info;
4465         }
4466
4467         if ((fm_list->vsi_count == 1 && lkup_type != ICE_SW_LKUP_VLAN) ||
4468             (fm_list->vsi_count == 0 && lkup_type == ICE_SW_LKUP_VLAN)) {
4469                 struct ice_vsi_list_map_info *vsi_list_info =
4470                         fm_list->vsi_list_info;
4471
4472                 /* Remove the VSI list since it is no longer used */
4473                 status = ice_remove_vsi_list_rule(hw, vsi_list_id, lkup_type);
4474                 if (status) {
4475                         ice_debug(hw, ICE_DBG_SW, "Failed to remove VSI list %d, error %d\n",
4476                                   vsi_list_id, status);
4477                         return status;
4478                 }
4479
4480                 LIST_DEL(&vsi_list_info->list_entry);
4481                 ice_free(hw, vsi_list_info);
4482                 fm_list->vsi_list_info = NULL;
4483         }
4484
4485         return status;
4486 }
4487
4488 /**
4489  * ice_remove_rule_internal - Remove a filter rule of a given type
4490  *
4491  * @hw: pointer to the hardware structure
4492  * @recp_list: recipe list for which the rule needs to removed
4493  * @f_entry: rule entry containing filter information
4494  */
4495 static enum ice_status
4496 ice_remove_rule_internal(struct ice_hw *hw, struct ice_sw_recipe *recp_list,
4497                          struct ice_fltr_list_entry *f_entry)
4498 {
4499         struct ice_fltr_mgmt_list_entry *list_elem;
4500         struct ice_lock *rule_lock; /* Lock to protect filter rule list */
4501         enum ice_status status = ICE_SUCCESS;
4502         bool remove_rule = false;
4503         u16 vsi_handle;
4504
4505         if (!ice_is_vsi_valid(hw, f_entry->fltr_info.vsi_handle))
4506                 return ICE_ERR_PARAM;
4507         f_entry->fltr_info.fwd_id.hw_vsi_id =
4508                 ice_get_hw_vsi_num(hw, f_entry->fltr_info.vsi_handle);
4509
4510         rule_lock = &recp_list->filt_rule_lock;
4511         ice_acquire_lock(rule_lock);
4512         list_elem = ice_find_rule_entry(&recp_list->filt_rules,
4513                                         &f_entry->fltr_info);
4514         if (!list_elem) {
4515                 status = ICE_ERR_DOES_NOT_EXIST;
4516                 goto exit;
4517         }
4518
4519         if (list_elem->fltr_info.fltr_act != ICE_FWD_TO_VSI_LIST) {
4520                 remove_rule = true;
4521         } else if (!list_elem->vsi_list_info) {
4522                 status = ICE_ERR_DOES_NOT_EXIST;
4523                 goto exit;
4524         } else if (list_elem->vsi_list_info->ref_cnt > 1) {
4525                 /* a ref_cnt > 1 indicates that the vsi_list is being
4526                  * shared by multiple rules. Decrement the ref_cnt and
4527                  * remove this rule, but do not modify the list, as it
4528                  * is in-use by other rules.
4529                  */
4530                 list_elem->vsi_list_info->ref_cnt--;
4531                 remove_rule = true;
4532         } else {
4533                 /* a ref_cnt of 1 indicates the vsi_list is only used
4534                  * by one rule. However, the original removal request is only
4535                  * for a single VSI. Update the vsi_list first, and only
4536                  * remove the rule if there are no further VSIs in this list.
4537                  */
4538                 vsi_handle = f_entry->fltr_info.vsi_handle;
4539                 status = ice_rem_update_vsi_list(hw, vsi_handle, list_elem);
4540                 if (status)
4541                         goto exit;
4542                 /* if VSI count goes to zero after updating the VSI list */
4543                 if (list_elem->vsi_count == 0)
4544                         remove_rule = true;
4545         }
4546
4547         if (remove_rule) {
4548                 /* Remove the lookup rule */
4549                 struct ice_aqc_sw_rules_elem *s_rule;
4550
4551                 s_rule = (struct ice_aqc_sw_rules_elem *)
4552                         ice_malloc(hw, ICE_SW_RULE_RX_TX_NO_HDR_SIZE);
4553                 if (!s_rule) {
4554                         status = ICE_ERR_NO_MEMORY;
4555                         goto exit;
4556                 }
4557
4558                 ice_fill_sw_rule(hw, &list_elem->fltr_info, s_rule,
4559                                  ice_aqc_opc_remove_sw_rules);
4560
4561                 status = ice_aq_sw_rules(hw, s_rule,
4562                                          ICE_SW_RULE_RX_TX_NO_HDR_SIZE, 1,
4563                                          ice_aqc_opc_remove_sw_rules, NULL);
4564
4565                 /* Remove a book keeping from the list */
4566                 ice_free(hw, s_rule);
4567
4568                 if (status)
4569                         goto exit;
4570
4571                 LIST_DEL(&list_elem->list_entry);
4572                 ice_free(hw, list_elem);
4573         }
4574 exit:
4575         ice_release_lock(rule_lock);
4576         return status;
4577 }
4578
4579 /**
4580  * ice_aq_get_res_alloc - get allocated resources
4581  * @hw: pointer to the HW struct
4582  * @num_entries: pointer to u16 to store the number of resource entries returned
4583  * @buf: pointer to buffer
4584  * @buf_size: size of buf
4585  * @cd: pointer to command details structure or NULL
4586  *
4587  * The caller-supplied buffer must be large enough to store the resource
4588  * information for all resource types. Each resource type is an
4589  * ice_aqc_get_res_resp_elem structure.
4590  */
4591 enum ice_status
4592 ice_aq_get_res_alloc(struct ice_hw *hw, u16 *num_entries,
4593                      struct ice_aqc_get_res_resp_elem *buf, u16 buf_size,
4594                      struct ice_sq_cd *cd)
4595 {
4596         struct ice_aqc_get_res_alloc *resp;
4597         enum ice_status status;
4598         struct ice_aq_desc desc;
4599
4600         if (!buf)
4601                 return ICE_ERR_BAD_PTR;
4602
4603         if (buf_size < ICE_AQ_GET_RES_ALLOC_BUF_LEN)
4604                 return ICE_ERR_INVAL_SIZE;
4605
4606         resp = &desc.params.get_res;
4607
4608         ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_res_alloc);
4609         status = ice_aq_send_cmd(hw, &desc, buf, buf_size, cd);
4610
4611         if (!status && num_entries)
4612                 *num_entries = LE16_TO_CPU(resp->resp_elem_num);
4613
4614         return status;
4615 }
4616
4617 /**
4618  * ice_aq_get_res_descs - get allocated resource descriptors
4619  * @hw: pointer to the hardware structure
4620  * @num_entries: number of resource entries in buffer
4621  * @buf: structure to hold response data buffer
4622  * @buf_size: size of buffer
4623  * @res_type: resource type
4624  * @res_shared: is resource shared
4625  * @desc_id: input - first desc ID to start; output - next desc ID
4626  * @cd: pointer to command details structure or NULL
4627  */
4628 enum ice_status
4629 ice_aq_get_res_descs(struct ice_hw *hw, u16 num_entries,
4630                      struct ice_aqc_res_elem *buf, u16 buf_size, u16 res_type,
4631                      bool res_shared, u16 *desc_id, struct ice_sq_cd *cd)
4632 {
4633         struct ice_aqc_get_allocd_res_desc *cmd;
4634         struct ice_aq_desc desc;
4635         enum ice_status status;
4636
4637         ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__);
4638
4639         cmd = &desc.params.get_res_desc;
4640
4641         if (!buf)
4642                 return ICE_ERR_PARAM;
4643
4644         if (buf_size != (num_entries * sizeof(*buf)))
4645                 return ICE_ERR_PARAM;
4646
4647         ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_allocd_res_desc);
4648
4649         cmd->ops.cmd.res = CPU_TO_LE16(((res_type << ICE_AQC_RES_TYPE_S) &
4650                                          ICE_AQC_RES_TYPE_M) | (res_shared ?
4651                                         ICE_AQC_RES_TYPE_FLAG_SHARED : 0));
4652         cmd->ops.cmd.first_desc = CPU_TO_LE16(*desc_id);
4653
4654         status = ice_aq_send_cmd(hw, &desc, buf, buf_size, cd);
4655         if (!status)
4656                 *desc_id = LE16_TO_CPU(cmd->ops.resp.next_desc);
4657
4658         return status;
4659 }
4660
4661 /**
4662  * ice_add_mac_rule - Add a MAC address based filter rule
4663  * @hw: pointer to the hardware structure
4664  * @m_list: list of MAC addresses and forwarding information
4665  * @sw: pointer to switch info struct for which function add rule
4666  * @lport: logic port number on which function add rule
4667  *
4668  * IMPORTANT: When the ucast_shared flag is set to false and m_list has
4669  * multiple unicast addresses, the function assumes that all the
4670  * addresses are unique in a given add_mac call. It doesn't
4671  * check for duplicates in this case, removing duplicates from a given
4672  * list should be taken care of in the caller of this function.
4673  */
4674 static enum ice_status
4675 ice_add_mac_rule(struct ice_hw *hw, struct LIST_HEAD_TYPE *m_list,
4676                  struct ice_switch_info *sw, u8 lport)
4677 {
4678         struct ice_sw_recipe *recp_list = &sw->recp_list[ICE_SW_LKUP_MAC];
4679         struct ice_aqc_sw_rules_elem *s_rule, *r_iter;
4680         struct ice_fltr_list_entry *m_list_itr;
4681         struct LIST_HEAD_TYPE *rule_head;
4682         u16 total_elem_left, s_rule_size;
4683         struct ice_lock *rule_lock; /* Lock to protect filter rule list */
4684         enum ice_status status = ICE_SUCCESS;
4685         u16 num_unicast = 0;
4686         u8 elem_sent;
4687
4688         s_rule = NULL;
4689         rule_lock = &recp_list->filt_rule_lock;
4690         rule_head = &recp_list->filt_rules;
4691
4692         LIST_FOR_EACH_ENTRY(m_list_itr, m_list, ice_fltr_list_entry,
4693                             list_entry) {
4694                 u8 *add = &m_list_itr->fltr_info.l_data.mac.mac_addr[0];
4695                 u16 vsi_handle;
4696                 u16 hw_vsi_id;
4697
4698                 m_list_itr->fltr_info.flag = ICE_FLTR_TX;
4699                 vsi_handle = m_list_itr->fltr_info.vsi_handle;
4700                 if (!ice_is_vsi_valid(hw, vsi_handle))
4701                         return ICE_ERR_PARAM;
4702                 hw_vsi_id = ice_get_hw_vsi_num(hw, vsi_handle);
4703                 m_list_itr->fltr_info.fwd_id.hw_vsi_id = hw_vsi_id;
4704                 /* update the src in case it is VSI num */
4705                 if (m_list_itr->fltr_info.src_id != ICE_SRC_ID_VSI)
4706                         return ICE_ERR_PARAM;
4707                 m_list_itr->fltr_info.src = hw_vsi_id;
4708                 if (m_list_itr->fltr_info.lkup_type != ICE_SW_LKUP_MAC ||
4709                     IS_ZERO_ETHER_ADDR(add))
4710                         return ICE_ERR_PARAM;
4711                 if (IS_UNICAST_ETHER_ADDR(add) && !hw->ucast_shared) {
4712                         /* Don't overwrite the unicast address */
4713                         ice_acquire_lock(rule_lock);
4714                         if (ice_find_rule_entry(rule_head,
4715                                                 &m_list_itr->fltr_info)) {
4716                                 ice_release_lock(rule_lock);
4717                                 return ICE_ERR_ALREADY_EXISTS;
4718                         }
4719                         ice_release_lock(rule_lock);
4720                         num_unicast++;
4721                 } else if (IS_MULTICAST_ETHER_ADDR(add) ||
4722                            (IS_UNICAST_ETHER_ADDR(add) && hw->ucast_shared)) {
4723                         m_list_itr->status =
4724                                 ice_add_rule_internal(hw, recp_list, lport,
4725                                                       m_list_itr);
4726                         if (m_list_itr->status)
4727                                 return m_list_itr->status;
4728                 }
4729         }
4730
4731         ice_acquire_lock(rule_lock);
4732         /* Exit if no suitable entries were found for adding bulk switch rule */
4733         if (!num_unicast) {
4734                 status = ICE_SUCCESS;
4735                 goto ice_add_mac_exit;
4736         }
4737
4738         /* Allocate switch rule buffer for the bulk update for unicast */
4739         s_rule_size = ICE_SW_RULE_RX_TX_ETH_HDR_SIZE;
4740         s_rule = (struct ice_aqc_sw_rules_elem *)
4741                 ice_calloc(hw, num_unicast, s_rule_size);
4742         if (!s_rule) {
4743                 status = ICE_ERR_NO_MEMORY;
4744                 goto ice_add_mac_exit;
4745         }
4746
4747         r_iter = s_rule;
4748         LIST_FOR_EACH_ENTRY(m_list_itr, m_list, ice_fltr_list_entry,
4749                             list_entry) {
4750                 struct ice_fltr_info *f_info = &m_list_itr->fltr_info;
4751                 u8 *mac_addr = &f_info->l_data.mac.mac_addr[0];
4752
4753                 if (IS_UNICAST_ETHER_ADDR(mac_addr)) {
4754                         ice_fill_sw_rule(hw, &m_list_itr->fltr_info, r_iter,
4755                                          ice_aqc_opc_add_sw_rules);
4756                         r_iter = (struct ice_aqc_sw_rules_elem *)
4757                                 ((u8 *)r_iter + s_rule_size);
4758                 }
4759         }
4760
4761         /* Call AQ bulk switch rule update for all unicast addresses */
4762         r_iter = s_rule;
4763         /* Call AQ switch rule in AQ_MAX chunk */
4764         for (total_elem_left = num_unicast; total_elem_left > 0;
4765              total_elem_left -= elem_sent) {
4766                 struct ice_aqc_sw_rules_elem *entry = r_iter;
4767
4768                 elem_sent = MIN_T(u8, total_elem_left,
4769                                   (ICE_AQ_MAX_BUF_LEN / s_rule_size));
4770                 status = ice_aq_sw_rules(hw, entry, elem_sent * s_rule_size,
4771                                          elem_sent, ice_aqc_opc_add_sw_rules,
4772                                          NULL);
4773                 if (status)
4774                         goto ice_add_mac_exit;
4775                 r_iter = (struct ice_aqc_sw_rules_elem *)
4776                         ((u8 *)r_iter + (elem_sent * s_rule_size));
4777         }
4778
4779         /* Fill up rule ID based on the value returned from FW */
4780         r_iter = s_rule;
4781         LIST_FOR_EACH_ENTRY(m_list_itr, m_list, ice_fltr_list_entry,
4782                             list_entry) {
4783                 struct ice_fltr_info *f_info = &m_list_itr->fltr_info;
4784                 u8 *mac_addr = &f_info->l_data.mac.mac_addr[0];
4785                 struct ice_fltr_mgmt_list_entry *fm_entry;
4786
4787                 if (IS_UNICAST_ETHER_ADDR(mac_addr)) {
4788                         f_info->fltr_rule_id =
4789                                 LE16_TO_CPU(r_iter->pdata.lkup_tx_rx.index);
4790                         f_info->fltr_act = ICE_FWD_TO_VSI;
4791                         /* Create an entry to track this MAC address */
4792                         fm_entry = (struct ice_fltr_mgmt_list_entry *)
4793                                 ice_malloc(hw, sizeof(*fm_entry));
4794                         if (!fm_entry) {
4795                                 status = ICE_ERR_NO_MEMORY;
4796                                 goto ice_add_mac_exit;
4797                         }
4798                         fm_entry->fltr_info = *f_info;
4799                         fm_entry->vsi_count = 1;
4800                         /* The book keeping entries will get removed when
4801                          * base driver calls remove filter AQ command
4802                          */
4803
4804                         LIST_ADD(&fm_entry->list_entry, rule_head);
4805                         r_iter = (struct ice_aqc_sw_rules_elem *)
4806                                 ((u8 *)r_iter + s_rule_size);
4807                 }
4808         }
4809
4810 ice_add_mac_exit:
4811         ice_release_lock(rule_lock);
4812         if (s_rule)
4813                 ice_free(hw, s_rule);
4814         return status;
4815 }
4816
4817 /**
4818  * ice_add_mac - Add a MAC address based filter rule
4819  * @hw: pointer to the hardware structure
4820  * @m_list: list of MAC addresses and forwarding information
4821  *
4822  * Function add MAC rule for logical port from HW struct
4823  */
4824 enum ice_status ice_add_mac(struct ice_hw *hw, struct LIST_HEAD_TYPE *m_list)
4825 {
4826         if (!m_list || !hw)
4827                 return ICE_ERR_PARAM;
4828
4829         return ice_add_mac_rule(hw, m_list, hw->switch_info,
4830                                 hw->port_info->lport);
4831 }
4832
4833 /**
4834  * ice_add_vlan_internal - Add one VLAN based filter rule
4835  * @hw: pointer to the hardware structure
4836  * @recp_list: recipe list for which rule has to be added
4837  * @f_entry: filter entry containing one VLAN information
4838  */
4839 static enum ice_status
4840 ice_add_vlan_internal(struct ice_hw *hw, struct ice_sw_recipe *recp_list,
4841                       struct ice_fltr_list_entry *f_entry)
4842 {
4843         struct ice_fltr_mgmt_list_entry *v_list_itr;
4844         struct ice_fltr_info *new_fltr, *cur_fltr;
4845         enum ice_sw_lkup_type lkup_type;
4846         u16 vsi_list_id = 0, vsi_handle;
4847         struct ice_lock *rule_lock; /* Lock to protect filter rule list */
4848         enum ice_status status = ICE_SUCCESS;
4849
4850         if (!ice_is_vsi_valid(hw, f_entry->fltr_info.vsi_handle))
4851                 return ICE_ERR_PARAM;
4852
4853         f_entry->fltr_info.fwd_id.hw_vsi_id =
4854                 ice_get_hw_vsi_num(hw, f_entry->fltr_info.vsi_handle);
4855         new_fltr = &f_entry->fltr_info;
4856
4857         /* VLAN ID should only be 12 bits */
4858         if (new_fltr->l_data.vlan.vlan_id > ICE_MAX_VLAN_ID)
4859                 return ICE_ERR_PARAM;
4860
4861         if (new_fltr->src_id != ICE_SRC_ID_VSI)
4862                 return ICE_ERR_PARAM;
4863
4864         new_fltr->src = new_fltr->fwd_id.hw_vsi_id;
4865         lkup_type = new_fltr->lkup_type;
4866         vsi_handle = new_fltr->vsi_handle;
4867         rule_lock = &recp_list->filt_rule_lock;
4868         ice_acquire_lock(rule_lock);
4869         v_list_itr = ice_find_rule_entry(&recp_list->filt_rules, new_fltr);
4870         if (!v_list_itr) {
4871                 struct ice_vsi_list_map_info *map_info = NULL;
4872
4873                 if (new_fltr->fltr_act == ICE_FWD_TO_VSI) {
4874                         /* All VLAN pruning rules use a VSI list. Check if
4875                          * there is already a VSI list containing VSI that we
4876                          * want to add. If found, use the same vsi_list_id for
4877                          * this new VLAN rule or else create a new list.
4878                          */
4879                         map_info = ice_find_vsi_list_entry(recp_list,
4880                                                            vsi_handle,
4881                                                            &vsi_list_id);
4882                         if (!map_info) {
4883                                 status = ice_create_vsi_list_rule(hw,
4884                                                                   &vsi_handle,
4885                                                                   1,
4886                                                                   &vsi_list_id,
4887                                                                   lkup_type);
4888                                 if (status)
4889                                         goto exit;
4890                         }
4891                         /* Convert the action to forwarding to a VSI list. */
4892                         new_fltr->fltr_act = ICE_FWD_TO_VSI_LIST;
4893                         new_fltr->fwd_id.vsi_list_id = vsi_list_id;
4894                 }
4895
4896                 status = ice_create_pkt_fwd_rule(hw, recp_list, f_entry);
4897                 if (!status) {
4898                         v_list_itr = ice_find_rule_entry(&recp_list->filt_rules,
4899                                                          new_fltr);
4900                         if (!v_list_itr) {
4901                                 status = ICE_ERR_DOES_NOT_EXIST;
4902                                 goto exit;
4903                         }
4904                         /* reuse VSI list for new rule and increment ref_cnt */
4905                         if (map_info) {
4906                                 v_list_itr->vsi_list_info = map_info;
4907                                 map_info->ref_cnt++;
4908                         } else {
4909                                 v_list_itr->vsi_list_info =
4910                                         ice_create_vsi_list_map(hw, &vsi_handle,
4911                                                                 1, vsi_list_id);
4912                         }
4913                 }
4914         } else if (v_list_itr->vsi_list_info->ref_cnt == 1) {
4915                 /* Update existing VSI list to add new VSI ID only if it used
4916                  * by one VLAN rule.
4917                  */
4918                 cur_fltr = &v_list_itr->fltr_info;
4919                 status = ice_add_update_vsi_list(hw, v_list_itr, cur_fltr,
4920                                                  new_fltr);
4921         } else {
4922                 /* If VLAN rule exists and VSI list being used by this rule is
4923                  * referenced by more than 1 VLAN rule. Then create a new VSI
4924                  * list appending previous VSI with new VSI and update existing
4925                  * VLAN rule to point to new VSI list ID
4926                  */
4927                 struct ice_fltr_info tmp_fltr;
4928                 u16 vsi_handle_arr[2];
4929                 u16 cur_handle;
4930
4931                 /* Current implementation only supports reusing VSI list with
4932                  * one VSI count. We should never hit below condition
4933                  */
4934                 if (v_list_itr->vsi_count > 1 &&
4935                     v_list_itr->vsi_list_info->ref_cnt > 1) {
4936                         ice_debug(hw, ICE_DBG_SW, "Invalid configuration: Optimization to reuse VSI list with more than one VSI is not being done yet\n");
4937                         status = ICE_ERR_CFG;
4938                         goto exit;
4939                 }
4940
4941                 cur_handle =
4942                         ice_find_first_bit(v_list_itr->vsi_list_info->vsi_map,
4943                                            ICE_MAX_VSI);
4944
4945                 /* A rule already exists with the new VSI being added */
4946                 if (cur_handle == vsi_handle) {
4947                         status = ICE_ERR_ALREADY_EXISTS;
4948                         goto exit;
4949                 }
4950
4951                 vsi_handle_arr[0] = cur_handle;
4952                 vsi_handle_arr[1] = vsi_handle;
4953                 status = ice_create_vsi_list_rule(hw, &vsi_handle_arr[0], 2,
4954                                                   &vsi_list_id, lkup_type);
4955                 if (status)
4956                         goto exit;
4957
4958                 tmp_fltr = v_list_itr->fltr_info;
4959                 tmp_fltr.fltr_rule_id = v_list_itr->fltr_info.fltr_rule_id;
4960                 tmp_fltr.fwd_id.vsi_list_id = vsi_list_id;
4961                 tmp_fltr.fltr_act = ICE_FWD_TO_VSI_LIST;
4962                 /* Update the previous switch rule to a new VSI list which
4963                  * includes current VSI that is requested
4964                  */
4965                 status = ice_update_pkt_fwd_rule(hw, &tmp_fltr);
4966                 if (status)
4967                         goto exit;
4968
4969                 /* before overriding VSI list map info. decrement ref_cnt of
4970                  * previous VSI list
4971                  */
4972                 v_list_itr->vsi_list_info->ref_cnt--;
4973
4974                 /* now update to newly created list */
4975                 v_list_itr->fltr_info.fwd_id.vsi_list_id = vsi_list_id;
4976                 v_list_itr->vsi_list_info =
4977                         ice_create_vsi_list_map(hw, &vsi_handle_arr[0], 2,
4978                                                 vsi_list_id);
4979                 v_list_itr->vsi_count++;
4980         }
4981
4982 exit:
4983         ice_release_lock(rule_lock);
4984         return status;
4985 }
4986
4987 /**
4988  * ice_add_vlan_rule - Add VLAN based filter rule
4989  * @hw: pointer to the hardware structure
4990  * @v_list: list of VLAN entries and forwarding information
4991  * @sw: pointer to switch info struct for which function add rule
4992  */
4993 static enum ice_status
4994 ice_add_vlan_rule(struct ice_hw *hw, struct LIST_HEAD_TYPE *v_list,
4995                   struct ice_switch_info *sw)
4996 {
4997         struct ice_fltr_list_entry *v_list_itr;
4998         struct ice_sw_recipe *recp_list;
4999
5000         recp_list = &sw->recp_list[ICE_SW_LKUP_VLAN];
5001         LIST_FOR_EACH_ENTRY(v_list_itr, v_list, ice_fltr_list_entry,
5002                             list_entry) {
5003                 if (v_list_itr->fltr_info.lkup_type != ICE_SW_LKUP_VLAN)
5004                         return ICE_ERR_PARAM;
5005                 v_list_itr->fltr_info.flag = ICE_FLTR_TX;
5006                 v_list_itr->status = ice_add_vlan_internal(hw, recp_list,
5007                                                            v_list_itr);
5008                 if (v_list_itr->status)
5009                         return v_list_itr->status;
5010         }
5011         return ICE_SUCCESS;
5012 }
5013
5014 /**
5015  * ice_add_vlan - Add a VLAN based filter rule
5016  * @hw: pointer to the hardware structure
5017  * @v_list: list of VLAN and forwarding information
5018  *
5019  * Function add VLAN rule for logical port from HW struct
5020  */
5021 enum ice_status ice_add_vlan(struct ice_hw *hw, struct LIST_HEAD_TYPE *v_list)
5022 {
5023         if (!v_list || !hw)
5024                 return ICE_ERR_PARAM;
5025
5026         return ice_add_vlan_rule(hw, v_list, hw->switch_info);
5027 }
5028
5029 /**
5030  * ice_add_mac_vlan_rule - Add MAC and VLAN pair based filter rule
5031  * @hw: pointer to the hardware structure
5032  * @mv_list: list of MAC and VLAN filters
5033  * @sw: pointer to switch info struct for which function add rule
5034  * @lport: logic port number on which function add rule
5035  *
5036  * If the VSI on which the MAC-VLAN pair has to be added has Rx and Tx VLAN
5037  * pruning bits enabled, then it is the responsibility of the caller to make
5038  * sure to add a VLAN only filter on the same VSI. Packets belonging to that
5039  * VLAN won't be received on that VSI otherwise.
5040  */
5041 static enum ice_status
5042 ice_add_mac_vlan_rule(struct ice_hw *hw, struct LIST_HEAD_TYPE *mv_list,
5043                       struct ice_switch_info *sw, u8 lport)
5044 {
5045         struct ice_fltr_list_entry *mv_list_itr;
5046         struct ice_sw_recipe *recp_list;
5047
5048         if (!mv_list || !hw)
5049                 return ICE_ERR_PARAM;
5050
5051         recp_list = &sw->recp_list[ICE_SW_LKUP_MAC_VLAN];
5052         LIST_FOR_EACH_ENTRY(mv_list_itr, mv_list, ice_fltr_list_entry,
5053                             list_entry) {
5054                 enum ice_sw_lkup_type l_type =
5055                         mv_list_itr->fltr_info.lkup_type;
5056
5057                 if (l_type != ICE_SW_LKUP_MAC_VLAN)
5058                         return ICE_ERR_PARAM;
5059                 mv_list_itr->fltr_info.flag = ICE_FLTR_TX;
5060                 mv_list_itr->status =
5061                         ice_add_rule_internal(hw, recp_list, lport,
5062                                               mv_list_itr);
5063                 if (mv_list_itr->status)
5064                         return mv_list_itr->status;
5065         }
5066         return ICE_SUCCESS;
5067 }
5068
5069 /**
5070  * ice_add_mac_vlan - Add a MAC VLAN address based filter rule
5071  * @hw: pointer to the hardware structure
5072  * @mv_list: list of MAC VLAN addresses and forwarding information
5073  *
5074  * Function add MAC VLAN rule for logical port from HW struct
5075  */
5076 enum ice_status
5077 ice_add_mac_vlan(struct ice_hw *hw, struct LIST_HEAD_TYPE *mv_list)
5078 {
5079         if (!mv_list || !hw)
5080                 return ICE_ERR_PARAM;
5081
5082         return ice_add_mac_vlan_rule(hw, mv_list, hw->switch_info,
5083                                      hw->port_info->lport);
5084 }
5085
5086 /**
5087  * ice_add_eth_mac_rule - Add ethertype and MAC based filter rule
5088  * @hw: pointer to the hardware structure
5089  * @em_list: list of ether type MAC filter, MAC is optional
5090  * @sw: pointer to switch info struct for which function add rule
5091  * @lport: logic port number on which function add rule
5092  *
5093  * This function requires the caller to populate the entries in
5094  * the filter list with the necessary fields (including flags to
5095  * indicate Tx or Rx rules).
5096  */
5097 static enum ice_status
5098 ice_add_eth_mac_rule(struct ice_hw *hw, struct LIST_HEAD_TYPE *em_list,
5099                      struct ice_switch_info *sw, u8 lport)
5100 {
5101         struct ice_fltr_list_entry *em_list_itr;
5102
5103         LIST_FOR_EACH_ENTRY(em_list_itr, em_list, ice_fltr_list_entry,
5104                             list_entry) {
5105                 struct ice_sw_recipe *recp_list;
5106                 enum ice_sw_lkup_type l_type;
5107
5108                 l_type = em_list_itr->fltr_info.lkup_type;
5109                 recp_list = &sw->recp_list[l_type];
5110
5111                 if (l_type != ICE_SW_LKUP_ETHERTYPE_MAC &&
5112                     l_type != ICE_SW_LKUP_ETHERTYPE)
5113                         return ICE_ERR_PARAM;
5114
5115                 em_list_itr->status = ice_add_rule_internal(hw, recp_list,
5116                                                             lport,
5117                                                             em_list_itr);
5118                 if (em_list_itr->status)
5119                         return em_list_itr->status;
5120         }
5121         return ICE_SUCCESS;
5122 }
5123
5124 /**
5125  * ice_add_eth_mac - Add a ethertype based filter rule
5126  * @hw: pointer to the hardware structure
5127  * @em_list: list of ethertype and forwarding information
5128  *
5129  * Function add ethertype rule for logical port from HW struct
5130  */
5131 enum ice_status
5132 ice_add_eth_mac(struct ice_hw *hw, struct LIST_HEAD_TYPE *em_list)
5133 {
5134         if (!em_list || !hw)
5135                 return ICE_ERR_PARAM;
5136
5137         return ice_add_eth_mac_rule(hw, em_list, hw->switch_info,
5138                                     hw->port_info->lport);
5139 }
5140
5141 /**
5142  * ice_remove_eth_mac_rule - Remove an ethertype (or MAC) based filter rule
5143  * @hw: pointer to the hardware structure
5144  * @em_list: list of ethertype or ethertype MAC entries
5145  * @sw: pointer to switch info struct for which function add rule
5146  */
5147 static enum ice_status
5148 ice_remove_eth_mac_rule(struct ice_hw *hw, struct LIST_HEAD_TYPE *em_list,
5149                         struct ice_switch_info *sw)
5150 {
5151         struct ice_fltr_list_entry *em_list_itr, *tmp;
5152
5153         LIST_FOR_EACH_ENTRY_SAFE(em_list_itr, tmp, em_list, ice_fltr_list_entry,
5154                                  list_entry) {
5155                 struct ice_sw_recipe *recp_list;
5156                 enum ice_sw_lkup_type l_type;
5157
5158                 l_type = em_list_itr->fltr_info.lkup_type;
5159
5160                 if (l_type != ICE_SW_LKUP_ETHERTYPE_MAC &&
5161                     l_type != ICE_SW_LKUP_ETHERTYPE)
5162                         return ICE_ERR_PARAM;
5163
5164                 recp_list = &sw->recp_list[l_type];
5165                 em_list_itr->status = ice_remove_rule_internal(hw, recp_list,
5166                                                                em_list_itr);
5167                 if (em_list_itr->status)
5168                         return em_list_itr->status;
5169         }
5170         return ICE_SUCCESS;
5171 }
5172
5173 /**
5174  * ice_remove_eth_mac - remove a ethertype based filter rule
5175  * @hw: pointer to the hardware structure
5176  * @em_list: list of ethertype and forwarding information
5177  *
5178  */
5179 enum ice_status
5180 ice_remove_eth_mac(struct ice_hw *hw, struct LIST_HEAD_TYPE *em_list)
5181 {
5182         if (!em_list || !hw)
5183                 return ICE_ERR_PARAM;
5184
5185         return ice_remove_eth_mac_rule(hw, em_list, hw->switch_info);
5186 }
5187
5188 /**
5189  * ice_rem_sw_rule_info
5190  * @hw: pointer to the hardware structure
5191  * @rule_head: pointer to the switch list structure that we want to delete
5192  */
5193 static void
5194 ice_rem_sw_rule_info(struct ice_hw *hw, struct LIST_HEAD_TYPE *rule_head)
5195 {
5196         if (!LIST_EMPTY(rule_head)) {
5197                 struct ice_fltr_mgmt_list_entry *entry;
5198                 struct ice_fltr_mgmt_list_entry *tmp;
5199
5200                 LIST_FOR_EACH_ENTRY_SAFE(entry, tmp, rule_head,
5201                                          ice_fltr_mgmt_list_entry, list_entry) {
5202                         LIST_DEL(&entry->list_entry);
5203                         ice_free(hw, entry);
5204                 }
5205         }
5206 }
5207
5208 /**
5209  * ice_rem_adv_rule_info
5210  * @hw: pointer to the hardware structure
5211  * @rule_head: pointer to the switch list structure that we want to delete
5212  */
5213 static void
5214 ice_rem_adv_rule_info(struct ice_hw *hw, struct LIST_HEAD_TYPE *rule_head)
5215 {
5216         struct ice_adv_fltr_mgmt_list_entry *tmp_entry;
5217         struct ice_adv_fltr_mgmt_list_entry *lst_itr;
5218
5219         if (LIST_EMPTY(rule_head))
5220                 return;
5221
5222         LIST_FOR_EACH_ENTRY_SAFE(lst_itr, tmp_entry, rule_head,
5223                                  ice_adv_fltr_mgmt_list_entry, list_entry) {
5224                 LIST_DEL(&lst_itr->list_entry);
5225                 ice_free(hw, lst_itr->lkups);
5226                 ice_free(hw, lst_itr);
5227         }
5228 }
5229
5230 /**
5231  * ice_rem_all_sw_rules_info
5232  * @hw: pointer to the hardware structure
5233  */
5234 void ice_rem_all_sw_rules_info(struct ice_hw *hw)
5235 {
5236         struct ice_switch_info *sw = hw->switch_info;
5237         u8 i;
5238
5239         for (i = 0; i < ICE_MAX_NUM_RECIPES; i++) {
5240                 struct LIST_HEAD_TYPE *rule_head;
5241
5242                 rule_head = &sw->recp_list[i].filt_rules;
5243                 if (!sw->recp_list[i].adv_rule)
5244                         ice_rem_sw_rule_info(hw, rule_head);
5245                 else
5246                         ice_rem_adv_rule_info(hw, rule_head);
5247                 if (sw->recp_list[i].adv_rule &&
5248                     LIST_EMPTY(&sw->recp_list[i].filt_rules))
5249                         sw->recp_list[i].adv_rule = false;
5250         }
5251 }
5252
5253 /**
5254  * ice_cfg_dflt_vsi - change state of VSI to set/clear default
5255  * @pi: pointer to the port_info structure
5256  * @vsi_handle: VSI handle to set as default
5257  * @set: true to add the above mentioned switch rule, false to remove it
5258  * @direction: ICE_FLTR_RX or ICE_FLTR_TX
5259  *
5260  * add filter rule to set/unset given VSI as default VSI for the switch
5261  * (represented by swid)
5262  */
5263 enum ice_status
5264 ice_cfg_dflt_vsi(struct ice_port_info *pi, u16 vsi_handle, bool set,
5265                  u8 direction)
5266 {
5267         struct ice_aqc_sw_rules_elem *s_rule;
5268         struct ice_fltr_info f_info;
5269         struct ice_hw *hw = pi->hw;
5270         enum ice_adminq_opc opcode;
5271         enum ice_status status;
5272         u16 s_rule_size;
5273         u16 hw_vsi_id;
5274
5275         if (!ice_is_vsi_valid(hw, vsi_handle))
5276                 return ICE_ERR_PARAM;
5277         hw_vsi_id = ice_get_hw_vsi_num(hw, vsi_handle);
5278
5279         s_rule_size = set ? ICE_SW_RULE_RX_TX_ETH_HDR_SIZE :
5280                 ICE_SW_RULE_RX_TX_NO_HDR_SIZE;
5281
5282         s_rule = (struct ice_aqc_sw_rules_elem *)ice_malloc(hw, s_rule_size);
5283         if (!s_rule)
5284                 return ICE_ERR_NO_MEMORY;
5285
5286         ice_memset(&f_info, 0, sizeof(f_info), ICE_NONDMA_MEM);
5287
5288         f_info.lkup_type = ICE_SW_LKUP_DFLT;
5289         f_info.flag = direction;
5290         f_info.fltr_act = ICE_FWD_TO_VSI;
5291         f_info.fwd_id.hw_vsi_id = hw_vsi_id;
5292
5293         if (f_info.flag & ICE_FLTR_RX) {
5294                 f_info.src = pi->lport;
5295                 f_info.src_id = ICE_SRC_ID_LPORT;
5296                 if (!set)
5297                         f_info.fltr_rule_id =
5298                                 pi->dflt_rx_vsi_rule_id;
5299         } else if (f_info.flag & ICE_FLTR_TX) {
5300                 f_info.src_id = ICE_SRC_ID_VSI;
5301                 f_info.src = hw_vsi_id;
5302                 if (!set)
5303                         f_info.fltr_rule_id =
5304                                 pi->dflt_tx_vsi_rule_id;
5305         }
5306
5307         if (set)
5308                 opcode = ice_aqc_opc_add_sw_rules;
5309         else
5310                 opcode = ice_aqc_opc_remove_sw_rules;
5311
5312         ice_fill_sw_rule(hw, &f_info, s_rule, opcode);
5313
5314         status = ice_aq_sw_rules(hw, s_rule, s_rule_size, 1, opcode, NULL);
5315         if (status || !(f_info.flag & ICE_FLTR_TX_RX))
5316                 goto out;
5317         if (set) {
5318                 u16 index = LE16_TO_CPU(s_rule->pdata.lkup_tx_rx.index);
5319
5320                 if (f_info.flag & ICE_FLTR_TX) {
5321                         pi->dflt_tx_vsi_num = hw_vsi_id;
5322                         pi->dflt_tx_vsi_rule_id = index;
5323                 } else if (f_info.flag & ICE_FLTR_RX) {
5324                         pi->dflt_rx_vsi_num = hw_vsi_id;
5325                         pi->dflt_rx_vsi_rule_id = index;
5326                 }
5327         } else {
5328                 if (f_info.flag & ICE_FLTR_TX) {
5329                         pi->dflt_tx_vsi_num = ICE_DFLT_VSI_INVAL;
5330                         pi->dflt_tx_vsi_rule_id = ICE_INVAL_ACT;
5331                 } else if (f_info.flag & ICE_FLTR_RX) {
5332                         pi->dflt_rx_vsi_num = ICE_DFLT_VSI_INVAL;
5333                         pi->dflt_rx_vsi_rule_id = ICE_INVAL_ACT;
5334                 }
5335         }
5336
5337 out:
5338         ice_free(hw, s_rule);
5339         return status;
5340 }
5341
5342 /**
5343  * ice_find_ucast_rule_entry - Search for a unicast MAC filter rule entry
5344  * @list_head: head of rule list
5345  * @f_info: rule information
5346  *
5347  * Helper function to search for a unicast rule entry - this is to be used
5348  * to remove unicast MAC filter that is not shared with other VSIs on the
5349  * PF switch.
5350  *
5351  * Returns pointer to entry storing the rule if found
5352  */
5353 static struct ice_fltr_mgmt_list_entry *
5354 ice_find_ucast_rule_entry(struct LIST_HEAD_TYPE *list_head,
5355                           struct ice_fltr_info *f_info)
5356 {
5357         struct ice_fltr_mgmt_list_entry *list_itr;
5358
5359         LIST_FOR_EACH_ENTRY(list_itr, list_head, ice_fltr_mgmt_list_entry,
5360                             list_entry) {
5361                 if (!memcmp(&f_info->l_data, &list_itr->fltr_info.l_data,
5362                             sizeof(f_info->l_data)) &&
5363                     f_info->fwd_id.hw_vsi_id ==
5364                     list_itr->fltr_info.fwd_id.hw_vsi_id &&
5365                     f_info->flag == list_itr->fltr_info.flag)
5366                         return list_itr;
5367         }
5368         return NULL;
5369 }
5370
5371 /**
5372  * ice_remove_mac_rule - remove a MAC based filter rule
5373  * @hw: pointer to the hardware structure
5374  * @m_list: list of MAC addresses and forwarding information
5375  * @recp_list: list from which function remove MAC address
5376  *
5377  * This function removes either a MAC filter rule or a specific VSI from a
5378  * VSI list for a multicast MAC address.
5379  *
5380  * Returns ICE_ERR_DOES_NOT_EXIST if a given entry was not added by
5381  * ice_add_mac. Caller should be aware that this call will only work if all
5382  * the entries passed into m_list were added previously. It will not attempt to
5383  * do a partial remove of entries that were found.
5384  */
5385 static enum ice_status
5386 ice_remove_mac_rule(struct ice_hw *hw, struct LIST_HEAD_TYPE *m_list,
5387                     struct ice_sw_recipe *recp_list)
5388 {
5389         struct ice_fltr_list_entry *list_itr, *tmp;
5390         struct ice_lock *rule_lock; /* Lock to protect filter rule list */
5391
5392         if (!m_list)
5393                 return ICE_ERR_PARAM;
5394
5395         rule_lock = &recp_list->filt_rule_lock;
5396         LIST_FOR_EACH_ENTRY_SAFE(list_itr, tmp, m_list, ice_fltr_list_entry,
5397                                  list_entry) {
5398                 enum ice_sw_lkup_type l_type = list_itr->fltr_info.lkup_type;
5399                 u8 *add = &list_itr->fltr_info.l_data.mac.mac_addr[0];
5400                 u16 vsi_handle;
5401
5402                 if (l_type != ICE_SW_LKUP_MAC)
5403                         return ICE_ERR_PARAM;
5404
5405                 vsi_handle = list_itr->fltr_info.vsi_handle;
5406                 if (!ice_is_vsi_valid(hw, vsi_handle))
5407                         return ICE_ERR_PARAM;
5408
5409                 list_itr->fltr_info.fwd_id.hw_vsi_id =
5410                                         ice_get_hw_vsi_num(hw, vsi_handle);
5411                 if (IS_UNICAST_ETHER_ADDR(add) && !hw->ucast_shared) {
5412                         /* Don't remove the unicast address that belongs to
5413                          * another VSI on the switch, since it is not being
5414                          * shared...
5415                          */
5416                         ice_acquire_lock(rule_lock);
5417                         if (!ice_find_ucast_rule_entry(&recp_list->filt_rules,
5418                                                        &list_itr->fltr_info)) {
5419                                 ice_release_lock(rule_lock);
5420                                 return ICE_ERR_DOES_NOT_EXIST;
5421                         }
5422                         ice_release_lock(rule_lock);
5423                 }
5424                 list_itr->status = ice_remove_rule_internal(hw, recp_list,
5425                                                             list_itr);
5426                 if (list_itr->status)
5427                         return list_itr->status;
5428         }
5429         return ICE_SUCCESS;
5430 }
5431
5432 /**
5433  * ice_remove_mac - remove a MAC address based filter rule
5434  * @hw: pointer to the hardware structure
5435  * @m_list: list of MAC addresses and forwarding information
5436  *
5437  */
5438 enum ice_status ice_remove_mac(struct ice_hw *hw, struct LIST_HEAD_TYPE *m_list)
5439 {
5440         struct ice_sw_recipe *recp_list;
5441
5442         recp_list = &hw->switch_info->recp_list[ICE_SW_LKUP_MAC];
5443         return ice_remove_mac_rule(hw, m_list, recp_list);
5444 }
5445
5446 /**
5447  * ice_remove_vlan_rule - Remove VLAN based filter rule
5448  * @hw: pointer to the hardware structure
5449  * @v_list: list of VLAN entries and forwarding information
5450  * @recp_list: list from which function remove VLAN
5451  */
5452 static enum ice_status
5453 ice_remove_vlan_rule(struct ice_hw *hw, struct LIST_HEAD_TYPE *v_list,
5454                      struct ice_sw_recipe *recp_list)
5455 {
5456         struct ice_fltr_list_entry *v_list_itr, *tmp;
5457
5458         LIST_FOR_EACH_ENTRY_SAFE(v_list_itr, tmp, v_list, ice_fltr_list_entry,
5459                                  list_entry) {
5460                 enum ice_sw_lkup_type l_type = v_list_itr->fltr_info.lkup_type;
5461
5462                 if (l_type != ICE_SW_LKUP_VLAN)
5463                         return ICE_ERR_PARAM;
5464                 v_list_itr->status = ice_remove_rule_internal(hw, recp_list,
5465                                                               v_list_itr);
5466                 if (v_list_itr->status)
5467                         return v_list_itr->status;
5468         }
5469         return ICE_SUCCESS;
5470 }
5471
5472 /**
5473  * ice_remove_vlan - remove a VLAN address based filter rule
5474  * @hw: pointer to the hardware structure
5475  * @v_list: list of VLAN and forwarding information
5476  *
5477  */
5478 enum ice_status
5479 ice_remove_vlan(struct ice_hw *hw, struct LIST_HEAD_TYPE *v_list)
5480 {
5481         struct ice_sw_recipe *recp_list;
5482
5483         if (!v_list || !hw)
5484                 return ICE_ERR_PARAM;
5485
5486         recp_list = &hw->switch_info->recp_list[ICE_SW_LKUP_VLAN];
5487         return ice_remove_vlan_rule(hw, v_list, recp_list);
5488 }
5489
5490 /**
5491  * ice_remove_mac_vlan_rule - Remove MAC VLAN based filter rule
5492  * @hw: pointer to the hardware structure
5493  * @v_list: list of MAC VLAN entries and forwarding information
5494  * @recp_list: list from which function remove MAC VLAN
5495  */
5496 static enum ice_status
5497 ice_remove_mac_vlan_rule(struct ice_hw *hw, struct LIST_HEAD_TYPE *v_list,
5498                          struct ice_sw_recipe *recp_list)
5499 {
5500         struct ice_fltr_list_entry *v_list_itr, *tmp;
5501
5502         recp_list = &hw->switch_info->recp_list[ICE_SW_LKUP_MAC_VLAN];
5503         LIST_FOR_EACH_ENTRY_SAFE(v_list_itr, tmp, v_list, ice_fltr_list_entry,
5504                                  list_entry) {
5505                 enum ice_sw_lkup_type l_type = v_list_itr->fltr_info.lkup_type;
5506
5507                 if (l_type != ICE_SW_LKUP_MAC_VLAN)
5508                         return ICE_ERR_PARAM;
5509                 v_list_itr->status =
5510                         ice_remove_rule_internal(hw, recp_list,
5511                                                  v_list_itr);
5512                 if (v_list_itr->status)
5513                         return v_list_itr->status;
5514         }
5515         return ICE_SUCCESS;
5516 }
5517
5518 /**
5519  * ice_remove_mac_vlan - remove a MAC VLAN address based filter rule
5520  * @hw: pointer to the hardware structure
5521  * @mv_list: list of MAC VLAN and forwarding information
5522  */
5523 enum ice_status
5524 ice_remove_mac_vlan(struct ice_hw *hw, struct LIST_HEAD_TYPE *mv_list)
5525 {
5526         struct ice_sw_recipe *recp_list;
5527
5528         if (!mv_list || !hw)
5529                 return ICE_ERR_PARAM;
5530
5531         recp_list = &hw->switch_info->recp_list[ICE_SW_LKUP_MAC_VLAN];
5532         return ice_remove_mac_vlan_rule(hw, mv_list, recp_list);
5533 }
5534
5535 /**
5536  * ice_vsi_uses_fltr - Determine if given VSI uses specified filter
5537  * @fm_entry: filter entry to inspect
5538  * @vsi_handle: VSI handle to compare with filter info
5539  */
5540 static bool
5541 ice_vsi_uses_fltr(struct ice_fltr_mgmt_list_entry *fm_entry, u16 vsi_handle)
5542 {
5543         return ((fm_entry->fltr_info.fltr_act == ICE_FWD_TO_VSI &&
5544                  fm_entry->fltr_info.vsi_handle == vsi_handle) ||
5545                 (fm_entry->fltr_info.fltr_act == ICE_FWD_TO_VSI_LIST &&
5546                  fm_entry->vsi_list_info &&
5547                  (ice_is_bit_set(fm_entry->vsi_list_info->vsi_map,
5548                                  vsi_handle))));
5549 }
5550
5551 /**
5552  * ice_add_entry_to_vsi_fltr_list - Add copy of fltr_list_entry to remove list
5553  * @hw: pointer to the hardware structure
5554  * @vsi_handle: VSI handle to remove filters from
5555  * @vsi_list_head: pointer to the list to add entry to
5556  * @fi: pointer to fltr_info of filter entry to copy & add
5557  *
5558  * Helper function, used when creating a list of filters to remove from
5559  * a specific VSI. The entry added to vsi_list_head is a COPY of the
5560  * original filter entry, with the exception of fltr_info.fltr_act and
5561  * fltr_info.fwd_id fields. These are set such that later logic can
5562  * extract which VSI to remove the fltr from, and pass on that information.
5563  */
5564 static enum ice_status
5565 ice_add_entry_to_vsi_fltr_list(struct ice_hw *hw, u16 vsi_handle,
5566                                struct LIST_HEAD_TYPE *vsi_list_head,
5567                                struct ice_fltr_info *fi)
5568 {
5569         struct ice_fltr_list_entry *tmp;
5570
5571         /* this memory is freed up in the caller function
5572          * once filters for this VSI are removed
5573          */
5574         tmp = (struct ice_fltr_list_entry *)ice_malloc(hw, sizeof(*tmp));
5575         if (!tmp)
5576                 return ICE_ERR_NO_MEMORY;
5577
5578         tmp->fltr_info = *fi;
5579
5580         /* Overwrite these fields to indicate which VSI to remove filter from,
5581          * so find and remove logic can extract the information from the
5582          * list entries. Note that original entries will still have proper
5583          * values.
5584          */
5585         tmp->fltr_info.fltr_act = ICE_FWD_TO_VSI;
5586         tmp->fltr_info.vsi_handle = vsi_handle;
5587         tmp->fltr_info.fwd_id.hw_vsi_id = ice_get_hw_vsi_num(hw, vsi_handle);
5588
5589         LIST_ADD(&tmp->list_entry, vsi_list_head);
5590
5591         return ICE_SUCCESS;
5592 }
5593
5594 /**
5595  * ice_add_to_vsi_fltr_list - Add VSI filters to the list
5596  * @hw: pointer to the hardware structure
5597  * @vsi_handle: VSI handle to remove filters from
5598  * @lkup_list_head: pointer to the list that has certain lookup type filters
5599  * @vsi_list_head: pointer to the list pertaining to VSI with vsi_handle
5600  *
5601  * Locates all filters in lkup_list_head that are used by the given VSI,
5602  * and adds COPIES of those entries to vsi_list_head (intended to be used
5603  * to remove the listed filters).
5604  * Note that this means all entries in vsi_list_head must be explicitly
5605  * deallocated by the caller when done with list.
5606  */
5607 static enum ice_status
5608 ice_add_to_vsi_fltr_list(struct ice_hw *hw, u16 vsi_handle,
5609                          struct LIST_HEAD_TYPE *lkup_list_head,
5610                          struct LIST_HEAD_TYPE *vsi_list_head)
5611 {
5612         struct ice_fltr_mgmt_list_entry *fm_entry;
5613         enum ice_status status = ICE_SUCCESS;
5614
5615         /* check to make sure VSI ID is valid and within boundary */
5616         if (!ice_is_vsi_valid(hw, vsi_handle))
5617                 return ICE_ERR_PARAM;
5618
5619         LIST_FOR_EACH_ENTRY(fm_entry, lkup_list_head,
5620                             ice_fltr_mgmt_list_entry, list_entry) {
5621                 if (!ice_vsi_uses_fltr(fm_entry, vsi_handle))
5622                         continue;
5623
5624                 status = ice_add_entry_to_vsi_fltr_list(hw, vsi_handle,
5625                                                         vsi_list_head,
5626                                                         &fm_entry->fltr_info);
5627                 if (status)
5628                         return status;
5629         }
5630         return status;
5631 }
5632
5633 /**
5634  * ice_determine_promisc_mask
5635  * @fi: filter info to parse
5636  *
5637  * Helper function to determine which ICE_PROMISC_ mask corresponds
5638  * to given filter into.
5639  */
5640 static u8 ice_determine_promisc_mask(struct ice_fltr_info *fi)
5641 {
5642         u16 vid = fi->l_data.mac_vlan.vlan_id;
5643         u8 *macaddr = fi->l_data.mac.mac_addr;
5644         bool is_tx_fltr = false;
5645         u8 promisc_mask = 0;
5646
5647         if (fi->flag == ICE_FLTR_TX)
5648                 is_tx_fltr = true;
5649
5650         if (IS_BROADCAST_ETHER_ADDR(macaddr))
5651                 promisc_mask |= is_tx_fltr ?
5652                         ICE_PROMISC_BCAST_TX : ICE_PROMISC_BCAST_RX;
5653         else if (IS_MULTICAST_ETHER_ADDR(macaddr))
5654                 promisc_mask |= is_tx_fltr ?
5655                         ICE_PROMISC_MCAST_TX : ICE_PROMISC_MCAST_RX;
5656         else if (IS_UNICAST_ETHER_ADDR(macaddr))
5657                 promisc_mask |= is_tx_fltr ?
5658                         ICE_PROMISC_UCAST_TX : ICE_PROMISC_UCAST_RX;
5659         if (vid)
5660                 promisc_mask |= is_tx_fltr ?
5661                         ICE_PROMISC_VLAN_TX : ICE_PROMISC_VLAN_RX;
5662
5663         return promisc_mask;
5664 }
5665
5666 /**
5667  * _ice_get_vsi_promisc - get promiscuous mode of given VSI
5668  * @hw: pointer to the hardware structure
5669  * @vsi_handle: VSI handle to retrieve info from
5670  * @promisc_mask: pointer to mask to be filled in
5671  * @vid: VLAN ID of promisc VLAN VSI
5672  * @sw: pointer to switch info struct for which function add rule
5673  */
5674 static enum ice_status
5675 _ice_get_vsi_promisc(struct ice_hw *hw, u16 vsi_handle, u8 *promisc_mask,
5676                      u16 *vid, struct ice_switch_info *sw)
5677 {
5678         struct ice_fltr_mgmt_list_entry *itr;
5679         struct LIST_HEAD_TYPE *rule_head;
5680         struct ice_lock *rule_lock;     /* Lock to protect filter rule list */
5681
5682         if (!ice_is_vsi_valid(hw, vsi_handle))
5683                 return ICE_ERR_PARAM;
5684
5685         *vid = 0;
5686         *promisc_mask = 0;
5687         rule_head = &sw->recp_list[ICE_SW_LKUP_PROMISC].filt_rules;
5688         rule_lock = &sw->recp_list[ICE_SW_LKUP_PROMISC].filt_rule_lock;
5689
5690         ice_acquire_lock(rule_lock);
5691         LIST_FOR_EACH_ENTRY(itr, rule_head,
5692                             ice_fltr_mgmt_list_entry, list_entry) {
5693                 /* Continue if this filter doesn't apply to this VSI or the
5694                  * VSI ID is not in the VSI map for this filter
5695                  */
5696                 if (!ice_vsi_uses_fltr(itr, vsi_handle))
5697                         continue;
5698
5699                 *promisc_mask |= ice_determine_promisc_mask(&itr->fltr_info);
5700         }
5701         ice_release_lock(rule_lock);
5702
5703         return ICE_SUCCESS;
5704 }
5705
5706 /**
5707  * ice_get_vsi_promisc - get promiscuous mode of given VSI
5708  * @hw: pointer to the hardware structure
5709  * @vsi_handle: VSI handle to retrieve info from
5710  * @promisc_mask: pointer to mask to be filled in
5711  * @vid: VLAN ID of promisc VLAN VSI
5712  */
5713 enum ice_status
5714 ice_get_vsi_promisc(struct ice_hw *hw, u16 vsi_handle, u8 *promisc_mask,
5715                     u16 *vid)
5716 {
5717         return _ice_get_vsi_promisc(hw, vsi_handle, promisc_mask,
5718                                     vid, hw->switch_info);
5719 }
5720
5721 /**
5722  * _ice_get_vsi_vlan_promisc - get VLAN promiscuous mode of given VSI
5723  * @hw: pointer to the hardware structure
5724  * @vsi_handle: VSI handle to retrieve info from
5725  * @promisc_mask: pointer to mask to be filled in
5726  * @vid: VLAN ID of promisc VLAN VSI
5727  * @sw: pointer to switch info struct for which function add rule
5728  */
5729 static enum ice_status
5730 _ice_get_vsi_vlan_promisc(struct ice_hw *hw, u16 vsi_handle, u8 *promisc_mask,
5731                           u16 *vid, struct ice_switch_info *sw)
5732 {
5733         struct ice_fltr_mgmt_list_entry *itr;
5734         struct LIST_HEAD_TYPE *rule_head;
5735         struct ice_lock *rule_lock;     /* Lock to protect filter rule list */
5736
5737         if (!ice_is_vsi_valid(hw, vsi_handle))
5738                 return ICE_ERR_PARAM;
5739
5740         *vid = 0;
5741         *promisc_mask = 0;
5742         rule_head = &sw->recp_list[ICE_SW_LKUP_PROMISC_VLAN].filt_rules;
5743         rule_lock = &sw->recp_list[ICE_SW_LKUP_PROMISC_VLAN].filt_rule_lock;
5744
5745         ice_acquire_lock(rule_lock);
5746         LIST_FOR_EACH_ENTRY(itr, rule_head, ice_fltr_mgmt_list_entry,
5747                             list_entry) {
5748                 /* Continue if this filter doesn't apply to this VSI or the
5749                  * VSI ID is not in the VSI map for this filter
5750                  */
5751                 if (!ice_vsi_uses_fltr(itr, vsi_handle))
5752                         continue;
5753
5754                 *promisc_mask |= ice_determine_promisc_mask(&itr->fltr_info);
5755         }
5756         ice_release_lock(rule_lock);
5757
5758         return ICE_SUCCESS;
5759 }
5760
5761 /**
5762  * ice_get_vsi_vlan_promisc - get VLAN promiscuous mode of given VSI
5763  * @hw: pointer to the hardware structure
5764  * @vsi_handle: VSI handle to retrieve info from
5765  * @promisc_mask: pointer to mask to be filled in
5766  * @vid: VLAN ID of promisc VLAN VSI
5767  */
5768 enum ice_status
5769 ice_get_vsi_vlan_promisc(struct ice_hw *hw, u16 vsi_handle, u8 *promisc_mask,
5770                          u16 *vid)
5771 {
5772         return _ice_get_vsi_vlan_promisc(hw, vsi_handle, promisc_mask,
5773                                          vid, hw->switch_info);
5774 }
5775
5776 /**
5777  * ice_remove_promisc - Remove promisc based filter rules
5778  * @hw: pointer to the hardware structure
5779  * @recp_id: recipe ID for which the rule needs to removed
5780  * @v_list: list of promisc entries
5781  */
5782 static enum ice_status
5783 ice_remove_promisc(struct ice_hw *hw, u8 recp_id,
5784                    struct LIST_HEAD_TYPE *v_list)
5785 {
5786         struct ice_fltr_list_entry *v_list_itr, *tmp;
5787         struct ice_sw_recipe *recp_list;
5788
5789         recp_list = &hw->switch_info->recp_list[recp_id];
5790         LIST_FOR_EACH_ENTRY_SAFE(v_list_itr, tmp, v_list, ice_fltr_list_entry,
5791                                  list_entry) {
5792                 v_list_itr->status =
5793                         ice_remove_rule_internal(hw, recp_list, v_list_itr);
5794                 if (v_list_itr->status)
5795                         return v_list_itr->status;
5796         }
5797         return ICE_SUCCESS;
5798 }
5799
5800 /**
5801  * _ice_clear_vsi_promisc - clear specified promiscuous mode(s)
5802  * @hw: pointer to the hardware structure
5803  * @vsi_handle: VSI handle to clear mode
5804  * @promisc_mask: mask of promiscuous config bits to clear
5805  * @vid: VLAN ID to clear VLAN promiscuous
5806  * @sw: pointer to switch info struct for which function add rule
5807  */
5808 static enum ice_status
5809 _ice_clear_vsi_promisc(struct ice_hw *hw, u16 vsi_handle, u8 promisc_mask,
5810                        u16 vid, struct ice_switch_info *sw)
5811 {
5812         struct ice_fltr_list_entry *fm_entry, *tmp;
5813         struct LIST_HEAD_TYPE remove_list_head;
5814         struct ice_fltr_mgmt_list_entry *itr;
5815         struct LIST_HEAD_TYPE *rule_head;
5816         struct ice_lock *rule_lock;     /* Lock to protect filter rule list */
5817         enum ice_status status = ICE_SUCCESS;
5818         u8 recipe_id;
5819
5820         if (!ice_is_vsi_valid(hw, vsi_handle))
5821                 return ICE_ERR_PARAM;
5822
5823         if (promisc_mask & (ICE_PROMISC_VLAN_RX | ICE_PROMISC_VLAN_TX))
5824                 recipe_id = ICE_SW_LKUP_PROMISC_VLAN;
5825         else
5826                 recipe_id = ICE_SW_LKUP_PROMISC;
5827
5828         rule_head = &sw->recp_list[recipe_id].filt_rules;
5829         rule_lock = &sw->recp_list[recipe_id].filt_rule_lock;
5830
5831         INIT_LIST_HEAD(&remove_list_head);
5832
5833         ice_acquire_lock(rule_lock);
5834         LIST_FOR_EACH_ENTRY(itr, rule_head,
5835                             ice_fltr_mgmt_list_entry, list_entry) {
5836                 struct ice_fltr_info *fltr_info;
5837                 u8 fltr_promisc_mask = 0;
5838
5839                 if (!ice_vsi_uses_fltr(itr, vsi_handle))
5840                         continue;
5841                 fltr_info = &itr->fltr_info;
5842
5843                 if (recipe_id == ICE_SW_LKUP_PROMISC_VLAN &&
5844                     vid != fltr_info->l_data.mac_vlan.vlan_id)
5845                         continue;
5846
5847                 fltr_promisc_mask |= ice_determine_promisc_mask(fltr_info);
5848
5849                 /* Skip if filter is not completely specified by given mask */
5850                 if (fltr_promisc_mask & ~promisc_mask)
5851                         continue;
5852
5853                 status = ice_add_entry_to_vsi_fltr_list(hw, vsi_handle,
5854                                                         &remove_list_head,
5855                                                         fltr_info);
5856                 if (status) {
5857                         ice_release_lock(rule_lock);
5858                         goto free_fltr_list;
5859                 }
5860         }
5861         ice_release_lock(rule_lock);
5862
5863         status = ice_remove_promisc(hw, recipe_id, &remove_list_head);
5864
5865 free_fltr_list:
5866         LIST_FOR_EACH_ENTRY_SAFE(fm_entry, tmp, &remove_list_head,
5867                                  ice_fltr_list_entry, list_entry) {
5868                 LIST_DEL(&fm_entry->list_entry);
5869                 ice_free(hw, fm_entry);
5870         }
5871
5872         return status;
5873 }
5874
5875 /**
5876  * ice_clear_vsi_promisc - clear specified promiscuous mode(s) for given VSI
5877  * @hw: pointer to the hardware structure
5878  * @vsi_handle: VSI handle to clear mode
5879  * @promisc_mask: mask of promiscuous config bits to clear
5880  * @vid: VLAN ID to clear VLAN promiscuous
5881  */
5882 enum ice_status
5883 ice_clear_vsi_promisc(struct ice_hw *hw, u16 vsi_handle,
5884                       u8 promisc_mask, u16 vid)
5885 {
5886         return _ice_clear_vsi_promisc(hw, vsi_handle, promisc_mask,
5887                                       vid, hw->switch_info);
5888 }
5889
5890 /**
5891  * _ice_set_vsi_promisc - set given VSI to given promiscuous mode(s)
5892  * @hw: pointer to the hardware structure
5893  * @vsi_handle: VSI handle to configure
5894  * @promisc_mask: mask of promiscuous config bits
5895  * @vid: VLAN ID to set VLAN promiscuous
5896  * @lport: logical port number to configure promisc mode
5897  * @sw: pointer to switch info struct for which function add rule
5898  */
5899 static enum ice_status
5900 _ice_set_vsi_promisc(struct ice_hw *hw, u16 vsi_handle, u8 promisc_mask,
5901                      u16 vid, u8 lport, struct ice_switch_info *sw)
5902 {
5903         enum { UCAST_FLTR = 1, MCAST_FLTR, BCAST_FLTR };
5904         struct ice_fltr_list_entry f_list_entry;
5905         struct ice_fltr_info new_fltr;
5906         enum ice_status status = ICE_SUCCESS;
5907         bool is_tx_fltr;
5908         u16 hw_vsi_id;
5909         int pkt_type;
5910         u8 recipe_id;
5911
5912         ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__);
5913
5914         if (!ice_is_vsi_valid(hw, vsi_handle))
5915                 return ICE_ERR_PARAM;
5916         hw_vsi_id = ice_get_hw_vsi_num(hw, vsi_handle);
5917
5918         ice_memset(&new_fltr, 0, sizeof(new_fltr), ICE_NONDMA_MEM);
5919
5920         if (promisc_mask & (ICE_PROMISC_VLAN_RX | ICE_PROMISC_VLAN_TX)) {
5921                 new_fltr.lkup_type = ICE_SW_LKUP_PROMISC_VLAN;
5922                 new_fltr.l_data.mac_vlan.vlan_id = vid;
5923                 recipe_id = ICE_SW_LKUP_PROMISC_VLAN;
5924         } else {
5925                 new_fltr.lkup_type = ICE_SW_LKUP_PROMISC;
5926                 recipe_id = ICE_SW_LKUP_PROMISC;
5927         }
5928
5929         /* Separate filters must be set for each direction/packet type
5930          * combination, so we will loop over the mask value, store the
5931          * individual type, and clear it out in the input mask as it
5932          * is found.
5933          */
5934         while (promisc_mask) {
5935                 struct ice_sw_recipe *recp_list;
5936                 u8 *mac_addr;
5937
5938                 pkt_type = 0;
5939                 is_tx_fltr = false;
5940
5941                 if (promisc_mask & ICE_PROMISC_UCAST_RX) {
5942                         promisc_mask &= ~ICE_PROMISC_UCAST_RX;
5943                         pkt_type = UCAST_FLTR;
5944                 } else if (promisc_mask & ICE_PROMISC_UCAST_TX) {
5945                         promisc_mask &= ~ICE_PROMISC_UCAST_TX;
5946                         pkt_type = UCAST_FLTR;
5947                         is_tx_fltr = true;
5948                 } else if (promisc_mask & ICE_PROMISC_MCAST_RX) {
5949                         promisc_mask &= ~ICE_PROMISC_MCAST_RX;
5950                         pkt_type = MCAST_FLTR;
5951                 } else if (promisc_mask & ICE_PROMISC_MCAST_TX) {
5952                         promisc_mask &= ~ICE_PROMISC_MCAST_TX;
5953                         pkt_type = MCAST_FLTR;
5954                         is_tx_fltr = true;
5955                 } else if (promisc_mask & ICE_PROMISC_BCAST_RX) {
5956                         promisc_mask &= ~ICE_PROMISC_BCAST_RX;
5957                         pkt_type = BCAST_FLTR;
5958                 } else if (promisc_mask & ICE_PROMISC_BCAST_TX) {
5959                         promisc_mask &= ~ICE_PROMISC_BCAST_TX;
5960                         pkt_type = BCAST_FLTR;
5961                         is_tx_fltr = true;
5962                 }
5963
5964                 /* Check for VLAN promiscuous flag */
5965                 if (promisc_mask & ICE_PROMISC_VLAN_RX) {
5966                         promisc_mask &= ~ICE_PROMISC_VLAN_RX;
5967                 } else if (promisc_mask & ICE_PROMISC_VLAN_TX) {
5968                         promisc_mask &= ~ICE_PROMISC_VLAN_TX;
5969                         is_tx_fltr = true;
5970                 }
5971
5972                 /* Set filter DA based on packet type */
5973                 mac_addr = new_fltr.l_data.mac.mac_addr;
5974                 if (pkt_type == BCAST_FLTR) {
5975                         ice_memset(mac_addr, 0xff, ETH_ALEN, ICE_NONDMA_MEM);
5976                 } else if (pkt_type == MCAST_FLTR ||
5977                            pkt_type == UCAST_FLTR) {
5978                         /* Use the dummy ether header DA */
5979                         ice_memcpy(mac_addr, dummy_eth_header, ETH_ALEN,
5980                                    ICE_NONDMA_TO_NONDMA);
5981                         if (pkt_type == MCAST_FLTR)
5982                                 mac_addr[0] |= 0x1;     /* Set multicast bit */
5983                 }
5984
5985                 /* Need to reset this to zero for all iterations */
5986                 new_fltr.flag = 0;
5987                 if (is_tx_fltr) {
5988                         new_fltr.flag |= ICE_FLTR_TX;
5989                         new_fltr.src = hw_vsi_id;
5990                 } else {
5991                         new_fltr.flag |= ICE_FLTR_RX;
5992                         new_fltr.src = lport;
5993                 }
5994
5995                 new_fltr.fltr_act = ICE_FWD_TO_VSI;
5996                 new_fltr.vsi_handle = vsi_handle;
5997                 new_fltr.fwd_id.hw_vsi_id = hw_vsi_id;
5998                 f_list_entry.fltr_info = new_fltr;
5999                 recp_list = &sw->recp_list[recipe_id];
6000
6001                 status = ice_add_rule_internal(hw, recp_list, lport,
6002                                                &f_list_entry);
6003                 if (status != ICE_SUCCESS)
6004                         goto set_promisc_exit;
6005         }
6006
6007 set_promisc_exit:
6008         return status;
6009 }
6010
6011 /**
6012  * ice_set_vsi_promisc - set given VSI to given promiscuous mode(s)
6013  * @hw: pointer to the hardware structure
6014  * @vsi_handle: VSI handle to configure
6015  * @promisc_mask: mask of promiscuous config bits
6016  * @vid: VLAN ID to set VLAN promiscuous
6017  */
6018 enum ice_status
6019 ice_set_vsi_promisc(struct ice_hw *hw, u16 vsi_handle, u8 promisc_mask,
6020                     u16 vid)
6021 {
6022         return _ice_set_vsi_promisc(hw, vsi_handle, promisc_mask, vid,
6023                                     hw->port_info->lport,
6024                                     hw->switch_info);
6025 }
6026
6027 /**
6028  * _ice_set_vlan_vsi_promisc
6029  * @hw: pointer to the hardware structure
6030  * @vsi_handle: VSI handle to configure
6031  * @promisc_mask: mask of promiscuous config bits
6032  * @rm_vlan_promisc: Clear VLANs VSI promisc mode
6033  * @lport: logical port number to configure promisc mode
6034  * @sw: pointer to switch info struct for which function add rule
6035  *
6036  * Configure VSI with all associated VLANs to given promiscuous mode(s)
6037  */
6038 static enum ice_status
6039 _ice_set_vlan_vsi_promisc(struct ice_hw *hw, u16 vsi_handle, u8 promisc_mask,
6040                           bool rm_vlan_promisc, u8 lport,
6041                           struct ice_switch_info *sw)
6042 {
6043         struct ice_fltr_list_entry *list_itr, *tmp;
6044         struct LIST_HEAD_TYPE vsi_list_head;
6045         struct LIST_HEAD_TYPE *vlan_head;
6046         struct ice_lock *vlan_lock; /* Lock to protect filter rule list */
6047         enum ice_status status;
6048         u16 vlan_id;
6049
6050         INIT_LIST_HEAD(&vsi_list_head);
6051         vlan_lock = &sw->recp_list[ICE_SW_LKUP_VLAN].filt_rule_lock;
6052         vlan_head = &sw->recp_list[ICE_SW_LKUP_VLAN].filt_rules;
6053         ice_acquire_lock(vlan_lock);
6054         status = ice_add_to_vsi_fltr_list(hw, vsi_handle, vlan_head,
6055                                           &vsi_list_head);
6056         ice_release_lock(vlan_lock);
6057         if (status)
6058                 goto free_fltr_list;
6059
6060         LIST_FOR_EACH_ENTRY(list_itr, &vsi_list_head, ice_fltr_list_entry,
6061                             list_entry) {
6062                 vlan_id = list_itr->fltr_info.l_data.vlan.vlan_id;
6063                 if (rm_vlan_promisc)
6064                         status =  _ice_clear_vsi_promisc(hw, vsi_handle,
6065                                                          promisc_mask,
6066                                                          vlan_id, sw);
6067                 else
6068                         status =  _ice_set_vsi_promisc(hw, vsi_handle,
6069                                                        promisc_mask, vlan_id,
6070                                                        lport, sw);
6071                 if (status)
6072                         break;
6073         }
6074
6075 free_fltr_list:
6076         LIST_FOR_EACH_ENTRY_SAFE(list_itr, tmp, &vsi_list_head,
6077                                  ice_fltr_list_entry, list_entry) {
6078                 LIST_DEL(&list_itr->list_entry);
6079                 ice_free(hw, list_itr);
6080         }
6081         return status;
6082 }
6083
6084 /**
6085  * ice_set_vlan_vsi_promisc
6086  * @hw: pointer to the hardware structure
6087  * @vsi_handle: VSI handle to configure
6088  * @promisc_mask: mask of promiscuous config bits
6089  * @rm_vlan_promisc: Clear VLANs VSI promisc mode
6090  *
6091  * Configure VSI with all associated VLANs to given promiscuous mode(s)
6092  */
6093 enum ice_status
6094 ice_set_vlan_vsi_promisc(struct ice_hw *hw, u16 vsi_handle, u8 promisc_mask,
6095                          bool rm_vlan_promisc)
6096 {
6097         return _ice_set_vlan_vsi_promisc(hw, vsi_handle, promisc_mask,
6098                                          rm_vlan_promisc, hw->port_info->lport,
6099                                          hw->switch_info);
6100 }
6101
6102 /**
6103  * ice_remove_vsi_lkup_fltr - Remove lookup type filters for a VSI
6104  * @hw: pointer to the hardware structure
6105  * @vsi_handle: VSI handle to remove filters from
6106  * @recp_list: recipe list from which function remove fltr
6107  * @lkup: switch rule filter lookup type
6108  */
6109 static void
6110 ice_remove_vsi_lkup_fltr(struct ice_hw *hw, u16 vsi_handle,
6111                          struct ice_sw_recipe *recp_list,
6112                          enum ice_sw_lkup_type lkup)
6113 {
6114         struct ice_fltr_list_entry *fm_entry;
6115         struct LIST_HEAD_TYPE remove_list_head;
6116         struct LIST_HEAD_TYPE *rule_head;
6117         struct ice_fltr_list_entry *tmp;
6118         struct ice_lock *rule_lock;     /* Lock to protect filter rule list */
6119         enum ice_status status;
6120
6121         INIT_LIST_HEAD(&remove_list_head);
6122         rule_lock = &recp_list[lkup].filt_rule_lock;
6123         rule_head = &recp_list[lkup].filt_rules;
6124         ice_acquire_lock(rule_lock);
6125         status = ice_add_to_vsi_fltr_list(hw, vsi_handle, rule_head,
6126                                           &remove_list_head);
6127         ice_release_lock(rule_lock);
6128         if (status)
6129                 goto free_fltr_list;
6130
6131         switch (lkup) {
6132         case ICE_SW_LKUP_MAC:
6133                 ice_remove_mac_rule(hw, &remove_list_head, &recp_list[lkup]);
6134                 break;
6135         case ICE_SW_LKUP_VLAN:
6136                 ice_remove_vlan_rule(hw, &remove_list_head, &recp_list[lkup]);
6137                 break;
6138         case ICE_SW_LKUP_PROMISC:
6139         case ICE_SW_LKUP_PROMISC_VLAN:
6140                 ice_remove_promisc(hw, lkup, &remove_list_head);
6141                 break;
6142         case ICE_SW_LKUP_MAC_VLAN:
6143                 ice_remove_mac_vlan(hw, &remove_list_head);
6144                 break;
6145         case ICE_SW_LKUP_ETHERTYPE:
6146         case ICE_SW_LKUP_ETHERTYPE_MAC:
6147                 ice_remove_eth_mac(hw, &remove_list_head);
6148                 break;
6149         case ICE_SW_LKUP_DFLT:
6150                 ice_debug(hw, ICE_DBG_SW, "Remove filters for this lookup type hasn't been implemented yet\n");
6151                 break;
6152         case ICE_SW_LKUP_LAST:
6153                 ice_debug(hw, ICE_DBG_SW, "Unsupported lookup type\n");
6154                 break;
6155         }
6156
6157 free_fltr_list:
6158         LIST_FOR_EACH_ENTRY_SAFE(fm_entry, tmp, &remove_list_head,
6159                                  ice_fltr_list_entry, list_entry) {
6160                 LIST_DEL(&fm_entry->list_entry);
6161                 ice_free(hw, fm_entry);
6162         }
6163 }
6164
6165 /**
6166  * ice_remove_vsi_fltr_rule - Remove all filters for a VSI
6167  * @hw: pointer to the hardware structure
6168  * @vsi_handle: VSI handle to remove filters from
6169  * @sw: pointer to switch info struct
6170  */
6171 static void
6172 ice_remove_vsi_fltr_rule(struct ice_hw *hw, u16 vsi_handle,
6173                          struct ice_switch_info *sw)
6174 {
6175         ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__);
6176
6177         ice_remove_vsi_lkup_fltr(hw, vsi_handle,
6178                                  sw->recp_list, ICE_SW_LKUP_MAC);
6179         ice_remove_vsi_lkup_fltr(hw, vsi_handle,
6180                                  sw->recp_list, ICE_SW_LKUP_MAC_VLAN);
6181         ice_remove_vsi_lkup_fltr(hw, vsi_handle,
6182                                  sw->recp_list, ICE_SW_LKUP_PROMISC);
6183         ice_remove_vsi_lkup_fltr(hw, vsi_handle,
6184                                  sw->recp_list, ICE_SW_LKUP_VLAN);
6185         ice_remove_vsi_lkup_fltr(hw, vsi_handle,
6186                                  sw->recp_list, ICE_SW_LKUP_DFLT);
6187         ice_remove_vsi_lkup_fltr(hw, vsi_handle,
6188                                  sw->recp_list, ICE_SW_LKUP_ETHERTYPE);
6189         ice_remove_vsi_lkup_fltr(hw, vsi_handle,
6190                                  sw->recp_list, ICE_SW_LKUP_ETHERTYPE_MAC);
6191         ice_remove_vsi_lkup_fltr(hw, vsi_handle,
6192                                  sw->recp_list, ICE_SW_LKUP_PROMISC_VLAN);
6193 }
6194
6195 /**
6196  * ice_remove_vsi_fltr - Remove all filters for a VSI
6197  * @hw: pointer to the hardware structure
6198  * @vsi_handle: VSI handle to remove filters from
6199  */
6200 void ice_remove_vsi_fltr(struct ice_hw *hw, u16 vsi_handle)
6201 {
6202         ice_remove_vsi_fltr_rule(hw, vsi_handle, hw->switch_info);
6203 }
6204
6205 /**
6206  * ice_alloc_res_cntr - allocating resource counter
6207  * @hw: pointer to the hardware structure
6208  * @type: type of resource
6209  * @alloc_shared: if set it is shared else dedicated
6210  * @num_items: number of entries requested for FD resource type
6211  * @counter_id: counter index returned by AQ call
6212  */
6213 enum ice_status
6214 ice_alloc_res_cntr(struct ice_hw *hw, u8 type, u8 alloc_shared, u16 num_items,
6215                    u16 *counter_id)
6216 {
6217         struct ice_aqc_alloc_free_res_elem *buf;
6218         enum ice_status status;
6219         u16 buf_len;
6220
6221         /* Allocate resource */
6222         buf_len = ice_struct_size(buf, elem, 1);
6223         buf = (struct ice_aqc_alloc_free_res_elem *)ice_malloc(hw, buf_len);
6224         if (!buf)
6225                 return ICE_ERR_NO_MEMORY;
6226
6227         buf->num_elems = CPU_TO_LE16(num_items);
6228         buf->res_type = CPU_TO_LE16(((type << ICE_AQC_RES_TYPE_S) &
6229                                       ICE_AQC_RES_TYPE_M) | alloc_shared);
6230
6231         status = ice_aq_alloc_free_res(hw, 1, buf, buf_len,
6232                                        ice_aqc_opc_alloc_res, NULL);
6233         if (status)
6234                 goto exit;
6235
6236         *counter_id = LE16_TO_CPU(buf->elem[0].e.sw_resp);
6237
6238 exit:
6239         ice_free(hw, buf);
6240         return status;
6241 }
6242
6243 /**
6244  * ice_free_res_cntr - free resource counter
6245  * @hw: pointer to the hardware structure
6246  * @type: type of resource
6247  * @alloc_shared: if set it is shared else dedicated
6248  * @num_items: number of entries to be freed for FD resource type
6249  * @counter_id: counter ID resource which needs to be freed
6250  */
6251 enum ice_status
6252 ice_free_res_cntr(struct ice_hw *hw, u8 type, u8 alloc_shared, u16 num_items,
6253                   u16 counter_id)
6254 {
6255         struct ice_aqc_alloc_free_res_elem *buf;
6256         enum ice_status status;
6257         u16 buf_len;
6258
6259         /* Free resource */
6260         buf_len = ice_struct_size(buf, elem, 1);
6261         buf = (struct ice_aqc_alloc_free_res_elem *)ice_malloc(hw, buf_len);
6262         if (!buf)
6263                 return ICE_ERR_NO_MEMORY;
6264
6265         buf->num_elems = CPU_TO_LE16(num_items);
6266         buf->res_type = CPU_TO_LE16(((type << ICE_AQC_RES_TYPE_S) &
6267                                       ICE_AQC_RES_TYPE_M) | alloc_shared);
6268         buf->elem[0].e.sw_resp = CPU_TO_LE16(counter_id);
6269
6270         status = ice_aq_alloc_free_res(hw, 1, buf, buf_len,
6271                                        ice_aqc_opc_free_res, NULL);
6272         if (status)
6273                 ice_debug(hw, ICE_DBG_SW, "counter resource could not be freed\n");
6274
6275         ice_free(hw, buf);
6276         return status;
6277 }
6278
6279 /**
6280  * ice_alloc_vlan_res_counter - obtain counter resource for VLAN type
6281  * @hw: pointer to the hardware structure
6282  * @counter_id: returns counter index
6283  */
6284 enum ice_status ice_alloc_vlan_res_counter(struct ice_hw *hw, u16 *counter_id)
6285 {
6286         return ice_alloc_res_cntr(hw, ICE_AQC_RES_TYPE_VLAN_COUNTER,
6287                                   ICE_AQC_RES_TYPE_FLAG_DEDICATED, 1,
6288                                   counter_id);
6289 }
6290
6291 /**
6292  * ice_free_vlan_res_counter - Free counter resource for VLAN type
6293  * @hw: pointer to the hardware structure
6294  * @counter_id: counter index to be freed
6295  */
6296 enum ice_status ice_free_vlan_res_counter(struct ice_hw *hw, u16 counter_id)
6297 {
6298         return ice_free_res_cntr(hw, ICE_AQC_RES_TYPE_VLAN_COUNTER,
6299                                  ICE_AQC_RES_TYPE_FLAG_DEDICATED, 1,
6300                                  counter_id);
6301 }
6302
6303 /**
6304  * ice_alloc_res_lg_act - add large action resource
6305  * @hw: pointer to the hardware structure
6306  * @l_id: large action ID to fill it in
6307  * @num_acts: number of actions to hold with a large action entry
6308  */
6309 static enum ice_status
6310 ice_alloc_res_lg_act(struct ice_hw *hw, u16 *l_id, u16 num_acts)
6311 {
6312         struct ice_aqc_alloc_free_res_elem *sw_buf;
6313         enum ice_status status;
6314         u16 buf_len;
6315
6316         if (num_acts > ICE_MAX_LG_ACT || num_acts == 0)
6317                 return ICE_ERR_PARAM;
6318
6319         /* Allocate resource for large action */
6320         buf_len = ice_struct_size(sw_buf, elem, 1);
6321         sw_buf = (struct ice_aqc_alloc_free_res_elem *)ice_malloc(hw, buf_len);
6322         if (!sw_buf)
6323                 return ICE_ERR_NO_MEMORY;
6324
6325         sw_buf->num_elems = CPU_TO_LE16(1);
6326
6327         /* If num_acts is 1, use ICE_AQC_RES_TYPE_WIDE_TABLE_1.
6328          * If num_acts is 2, use ICE_AQC_RES_TYPE_WIDE_TABLE_3.
6329          * If num_acts is greater than 2, then use
6330          * ICE_AQC_RES_TYPE_WIDE_TABLE_4.
6331          * The num_acts cannot exceed 4. This was ensured at the
6332          * beginning of the function.
6333          */
6334         if (num_acts == 1)
6335                 sw_buf->res_type = CPU_TO_LE16(ICE_AQC_RES_TYPE_WIDE_TABLE_1);
6336         else if (num_acts == 2)
6337                 sw_buf->res_type = CPU_TO_LE16(ICE_AQC_RES_TYPE_WIDE_TABLE_2);
6338         else
6339                 sw_buf->res_type = CPU_TO_LE16(ICE_AQC_RES_TYPE_WIDE_TABLE_4);
6340
6341         status = ice_aq_alloc_free_res(hw, 1, sw_buf, buf_len,
6342                                        ice_aqc_opc_alloc_res, NULL);
6343         if (!status)
6344                 *l_id = LE16_TO_CPU(sw_buf->elem[0].e.sw_resp);
6345
6346         ice_free(hw, sw_buf);
6347         return status;
6348 }
6349
6350 /**
6351  * ice_add_mac_with_sw_marker - add filter with sw marker
6352  * @hw: pointer to the hardware structure
6353  * @f_info: filter info structure containing the MAC filter information
6354  * @sw_marker: sw marker to tag the Rx descriptor with
6355  */
6356 enum ice_status
6357 ice_add_mac_with_sw_marker(struct ice_hw *hw, struct ice_fltr_info *f_info,
6358                            u16 sw_marker)
6359 {
6360         struct ice_fltr_mgmt_list_entry *m_entry;
6361         struct ice_fltr_list_entry fl_info;
6362         struct ice_sw_recipe *recp_list;
6363         struct LIST_HEAD_TYPE l_head;
6364         struct ice_lock *rule_lock;     /* Lock to protect filter rule list */
6365         enum ice_status ret;
6366         bool entry_exists;
6367         u16 lg_act_id;
6368
6369         if (f_info->fltr_act != ICE_FWD_TO_VSI)
6370                 return ICE_ERR_PARAM;
6371
6372         if (f_info->lkup_type != ICE_SW_LKUP_MAC)
6373                 return ICE_ERR_PARAM;
6374
6375         if (sw_marker == ICE_INVAL_SW_MARKER_ID)
6376                 return ICE_ERR_PARAM;
6377
6378         if (!ice_is_vsi_valid(hw, f_info->vsi_handle))
6379                 return ICE_ERR_PARAM;
6380         f_info->fwd_id.hw_vsi_id = ice_get_hw_vsi_num(hw, f_info->vsi_handle);
6381
6382         /* Add filter if it doesn't exist so then the adding of large
6383          * action always results in update
6384          */
6385
6386         INIT_LIST_HEAD(&l_head);
6387         fl_info.fltr_info = *f_info;
6388         LIST_ADD(&fl_info.list_entry, &l_head);
6389
6390         entry_exists = false;
6391         ret = ice_add_mac_rule(hw, &l_head, hw->switch_info,
6392                                hw->port_info->lport);
6393         if (ret == ICE_ERR_ALREADY_EXISTS)
6394                 entry_exists = true;
6395         else if (ret)
6396                 return ret;
6397
6398         recp_list = &hw->switch_info->recp_list[ICE_SW_LKUP_MAC];
6399         rule_lock = &recp_list->filt_rule_lock;
6400         ice_acquire_lock(rule_lock);
6401         /* Get the book keeping entry for the filter */
6402         m_entry = ice_find_rule_entry(&recp_list->filt_rules, f_info);
6403         if (!m_entry)
6404                 goto exit_error;
6405
6406         /* If counter action was enabled for this rule then don't enable
6407          * sw marker large action
6408          */
6409         if (m_entry->counter_index != ICE_INVAL_COUNTER_ID) {
6410                 ret = ICE_ERR_PARAM;
6411                 goto exit_error;
6412         }
6413
6414         /* if same marker was added before */
6415         if (m_entry->sw_marker_id == sw_marker) {
6416                 ret = ICE_ERR_ALREADY_EXISTS;
6417                 goto exit_error;
6418         }
6419
6420         /* Allocate a hardware table entry to hold large act. Three actions
6421          * for marker based large action
6422          */
6423         ret = ice_alloc_res_lg_act(hw, &lg_act_id, 3);
6424         if (ret)
6425                 goto exit_error;
6426
6427         if (lg_act_id == ICE_INVAL_LG_ACT_INDEX)
6428                 goto exit_error;
6429
6430         /* Update the switch rule to add the marker action */
6431         ret = ice_add_marker_act(hw, m_entry, sw_marker, lg_act_id);
6432         if (!ret) {
6433                 ice_release_lock(rule_lock);
6434                 return ret;
6435         }
6436
6437 exit_error:
6438         ice_release_lock(rule_lock);
6439         /* only remove entry if it did not exist previously */
6440         if (!entry_exists)
6441                 ret = ice_remove_mac(hw, &l_head);
6442
6443         return ret;
6444 }
6445
6446 /**
6447  * ice_add_mac_with_counter - add filter with counter enabled
6448  * @hw: pointer to the hardware structure
6449  * @f_info: pointer to filter info structure containing the MAC filter
6450  *          information
6451  */
6452 enum ice_status
6453 ice_add_mac_with_counter(struct ice_hw *hw, struct ice_fltr_info *f_info)
6454 {
6455         struct ice_fltr_mgmt_list_entry *m_entry;
6456         struct ice_fltr_list_entry fl_info;
6457         struct ice_sw_recipe *recp_list;
6458         struct LIST_HEAD_TYPE l_head;
6459         struct ice_lock *rule_lock;     /* Lock to protect filter rule list */
6460         enum ice_status ret;
6461         bool entry_exist;
6462         u16 counter_id;
6463         u16 lg_act_id;
6464
6465         if (f_info->fltr_act != ICE_FWD_TO_VSI)
6466                 return ICE_ERR_PARAM;
6467
6468         if (f_info->lkup_type != ICE_SW_LKUP_MAC)
6469                 return ICE_ERR_PARAM;
6470
6471         if (!ice_is_vsi_valid(hw, f_info->vsi_handle))
6472                 return ICE_ERR_PARAM;
6473         f_info->fwd_id.hw_vsi_id = ice_get_hw_vsi_num(hw, f_info->vsi_handle);
6474         recp_list = &hw->switch_info->recp_list[ICE_SW_LKUP_MAC];
6475
6476         entry_exist = false;
6477
6478         rule_lock = &recp_list->filt_rule_lock;
6479
6480         /* Add filter if it doesn't exist so then the adding of large
6481          * action always results in update
6482          */
6483         INIT_LIST_HEAD(&l_head);
6484
6485         fl_info.fltr_info = *f_info;
6486         LIST_ADD(&fl_info.list_entry, &l_head);
6487
6488         ret = ice_add_mac_rule(hw, &l_head, hw->switch_info,
6489                                hw->port_info->lport);
6490         if (ret == ICE_ERR_ALREADY_EXISTS)
6491                 entry_exist = true;
6492         else if (ret)
6493                 return ret;
6494
6495         ice_acquire_lock(rule_lock);
6496         m_entry = ice_find_rule_entry(&recp_list->filt_rules, f_info);
6497         if (!m_entry) {
6498                 ret = ICE_ERR_BAD_PTR;
6499                 goto exit_error;
6500         }
6501
6502         /* Don't enable counter for a filter for which sw marker was enabled */
6503         if (m_entry->sw_marker_id != ICE_INVAL_SW_MARKER_ID) {
6504                 ret = ICE_ERR_PARAM;
6505                 goto exit_error;
6506         }
6507
6508         /* If a counter was already enabled then don't need to add again */
6509         if (m_entry->counter_index != ICE_INVAL_COUNTER_ID) {
6510                 ret = ICE_ERR_ALREADY_EXISTS;
6511                 goto exit_error;
6512         }
6513
6514         /* Allocate a hardware table entry to VLAN counter */
6515         ret = ice_alloc_vlan_res_counter(hw, &counter_id);
6516         if (ret)
6517                 goto exit_error;
6518
6519         /* Allocate a hardware table entry to hold large act. Two actions for
6520          * counter based large action
6521          */
6522         ret = ice_alloc_res_lg_act(hw, &lg_act_id, 2);
6523         if (ret)
6524                 goto exit_error;
6525
6526         if (lg_act_id == ICE_INVAL_LG_ACT_INDEX)
6527                 goto exit_error;
6528
6529         /* Update the switch rule to add the counter action */
6530         ret = ice_add_counter_act(hw, m_entry, counter_id, lg_act_id);
6531         if (!ret) {
6532                 ice_release_lock(rule_lock);
6533                 return ret;
6534         }
6535
6536 exit_error:
6537         ice_release_lock(rule_lock);
6538         /* only remove entry if it did not exist previously */
6539         if (!entry_exist)
6540                 ret = ice_remove_mac(hw, &l_head);
6541
6542         return ret;
6543 }
6544
6545 /* This is mapping table entry that maps every word within a given protocol
6546  * structure to the real byte offset as per the specification of that
6547  * protocol header.
6548  * for example dst address is 3 words in ethertype header and corresponding
6549  * bytes are 0, 2, 3 in the actual packet header and src address is at 4, 6, 8
6550  * IMPORTANT: Every structure part of "ice_prot_hdr" union should have a
6551  * matching entry describing its field. This needs to be updated if new
6552  * structure is added to that union.
6553  */
6554 static const struct ice_prot_ext_tbl_entry ice_prot_ext[ICE_PROTOCOL_LAST] = {
6555         { ICE_MAC_OFOS,         { 0, 2, 4, 6, 8, 10, 12 } },
6556         { ICE_MAC_IL,           { 0, 2, 4, 6, 8, 10, 12 } },
6557         { ICE_ETYPE_OL,         { 0 } },
6558         { ICE_VLAN_OFOS,        { 0, 2 } },
6559         { ICE_IPV4_OFOS,        { 0, 2, 4, 6, 8, 10, 12, 14, 16, 18 } },
6560         { ICE_IPV4_IL,          { 0, 2, 4, 6, 8, 10, 12, 14, 16, 18 } },
6561         { ICE_IPV6_OFOS,        { 0, 2, 4, 6, 8, 10, 12, 14, 16, 18, 20, 22, 24,
6562                                  26, 28, 30, 32, 34, 36, 38 } },
6563         { ICE_IPV6_IL,          { 0, 2, 4, 6, 8, 10, 12, 14, 16, 18, 20, 22, 24,
6564                                  26, 28, 30, 32, 34, 36, 38 } },
6565         { ICE_TCP_IL,           { 0, 2 } },
6566         { ICE_UDP_OF,           { 0, 2 } },
6567         { ICE_UDP_ILOS,         { 0, 2 } },
6568         { ICE_SCTP_IL,          { 0, 2 } },
6569         { ICE_VXLAN,            { 8, 10, 12, 14 } },
6570         { ICE_GENEVE,           { 8, 10, 12, 14 } },
6571         { ICE_VXLAN_GPE,        { 8, 10, 12, 14 } },
6572         { ICE_NVGRE,            { 0, 2, 4, 6 } },
6573         { ICE_GTP,              { 8, 10, 12, 14, 16, 18, 20, 22 } },
6574         { ICE_PPPOE,            { 0, 2, 4, 6 } },
6575         { ICE_PFCP,             { 8, 10, 12, 14, 16, 18, 20, 22 } },
6576         { ICE_L2TPV3,           { 0, 2, 4, 6, 8, 10 } },
6577         { ICE_ESP,              { 0, 2, 4, 6 } },
6578         { ICE_AH,               { 0, 2, 4, 6, 8, 10 } },
6579         { ICE_NAT_T,            { 8, 10, 12, 14 } },
6580         { ICE_GTP_NO_PAY,       { 8, 10, 12, 14 } },
6581         { ICE_VLAN_EX,          { 0, 2 } },
6582         { ICE_VLAN_IN,          { 0, 2 } },
6583 };
6584
6585 /* The following table describes preferred grouping of recipes.
6586  * If a recipe that needs to be programmed is a superset or matches one of the
6587  * following combinations, then the recipe needs to be chained as per the
6588  * following policy.
6589  */
6590
6591 static struct ice_protocol_entry ice_prot_id_tbl[ICE_PROTOCOL_LAST] = {
6592         { ICE_MAC_OFOS,         ICE_MAC_OFOS_HW },
6593         { ICE_MAC_IL,           ICE_MAC_IL_HW },
6594         { ICE_ETYPE_OL,         ICE_ETYPE_OL_HW },
6595         { ICE_VLAN_OFOS,        ICE_VLAN_OL_HW },
6596         { ICE_IPV4_OFOS,        ICE_IPV4_OFOS_HW },
6597         { ICE_IPV4_IL,          ICE_IPV4_IL_HW },
6598         { ICE_IPV6_OFOS,        ICE_IPV6_OFOS_HW },
6599         { ICE_IPV6_IL,          ICE_IPV6_IL_HW },
6600         { ICE_TCP_IL,           ICE_TCP_IL_HW },
6601         { ICE_UDP_OF,           ICE_UDP_OF_HW },
6602         { ICE_UDP_ILOS,         ICE_UDP_ILOS_HW },
6603         { ICE_SCTP_IL,          ICE_SCTP_IL_HW },
6604         { ICE_VXLAN,            ICE_UDP_OF_HW },
6605         { ICE_GENEVE,           ICE_UDP_OF_HW },
6606         { ICE_VXLAN_GPE,        ICE_UDP_OF_HW },
6607         { ICE_NVGRE,            ICE_GRE_OF_HW },
6608         { ICE_GTP,              ICE_UDP_OF_HW },
6609         { ICE_PPPOE,            ICE_PPPOE_HW },
6610         { ICE_PFCP,             ICE_UDP_ILOS_HW },
6611         { ICE_L2TPV3,           ICE_L2TPV3_HW },
6612         { ICE_ESP,              ICE_ESP_HW },
6613         { ICE_AH,               ICE_AH_HW },
6614         { ICE_NAT_T,            ICE_UDP_ILOS_HW },
6615         { ICE_GTP_NO_PAY,       ICE_UDP_ILOS_HW },
6616         { ICE_VLAN_EX,          ICE_VLAN_OF_HW },
6617         { ICE_VLAN_IN,          ICE_VLAN_OL_HW },
6618 };
6619
6620 /**
6621  * ice_find_recp - find a recipe
6622  * @hw: pointer to the hardware structure
6623  * @lkup_exts: extension sequence to match
6624  *
6625  * Returns index of matching recipe, or ICE_MAX_NUM_RECIPES if not found.
6626  */
6627 static u16 ice_find_recp(struct ice_hw *hw, struct ice_prot_lkup_ext *lkup_exts,
6628                          enum ice_sw_tunnel_type tun_type, u32 priority)
6629 {
6630         bool refresh_required = true;
6631         struct ice_sw_recipe *recp;
6632         u8 i;
6633
6634         /* Walk through existing recipes to find a match */
6635         recp = hw->switch_info->recp_list;
6636         for (i = 0; i < ICE_MAX_NUM_RECIPES; i++) {
6637                 /* If recipe was not created for this ID, in SW bookkeeping,
6638                  * check if FW has an entry for this recipe. If the FW has an
6639                  * entry update it in our SW bookkeeping and continue with the
6640                  * matching.
6641                  */
6642                 if (!recp[i].recp_created)
6643                         if (ice_get_recp_frm_fw(hw,
6644                                                 hw->switch_info->recp_list, i,
6645                                                 &refresh_required))
6646                                 continue;
6647
6648                 /* Skip inverse action recipes */
6649                 if (recp[i].root_buf && recp[i].root_buf->content.act_ctrl &
6650                     ICE_AQ_RECIPE_ACT_INV_ACT)
6651                         continue;
6652
6653                 /* if number of words we are looking for match */
6654                 if (lkup_exts->n_val_words == recp[i].lkup_exts.n_val_words) {
6655                         struct ice_fv_word *ar = recp[i].lkup_exts.fv_words;
6656                         struct ice_fv_word *be = lkup_exts->fv_words;
6657                         u16 *cr = recp[i].lkup_exts.field_mask;
6658                         u16 *de = lkup_exts->field_mask;
6659                         bool found = true;
6660                         u8 pe, qr;
6661
6662                         /* ar, cr, and qr are related to the recipe words, while
6663                          * be, de, and pe are related to the lookup words
6664                          */
6665                         for (pe = 0; pe < lkup_exts->n_val_words; pe++) {
6666                                 for (qr = 0; qr < recp[i].lkup_exts.n_val_words;
6667                                      qr++) {
6668                                         if (ar[qr].off == be[pe].off &&
6669                                             ar[qr].prot_id == be[pe].prot_id &&
6670                                             cr[qr] == de[pe])
6671                                                 /* Found the "pe"th word in the
6672                                                  * given recipe
6673                                                  */
6674                                                 break;
6675                                 }
6676                                 /* After walking through all the words in the
6677                                  * "i"th recipe if "p"th word was not found then
6678                                  * this recipe is not what we are looking for.
6679                                  * So break out from this loop and try the next
6680                                  * recipe
6681                                  */
6682                                 if (qr >= recp[i].lkup_exts.n_val_words) {
6683                                         found = false;
6684                                         break;
6685                                 }
6686                         }
6687                         /* If for "i"th recipe the found was never set to false
6688                          * then it means we found our match
6689                          */
6690                         if (tun_type == recp[i].tun_type && found &&
6691                             priority == recp[i].priority)
6692                                 return i; /* Return the recipe ID */
6693                 }
6694         }
6695         return ICE_MAX_NUM_RECIPES;
6696 }
6697
6698 /**
6699  * ice_change_proto_id_to_dvm - change proto id in prot_id_tbl
6700  *
6701  * As protocol id for outer vlan is different in dvm and svm, if dvm is
6702  * supported protocol array record for outer vlan has to be modified to
6703  * reflect the value proper for DVM.
6704  */
6705 void ice_change_proto_id_to_dvm(void)
6706 {
6707         u8 i;
6708
6709         for (i = 0; i < ARRAY_SIZE(ice_prot_id_tbl); i++)
6710                 if (ice_prot_id_tbl[i].type == ICE_VLAN_OFOS &&
6711                     ice_prot_id_tbl[i].protocol_id != ICE_VLAN_OF_HW)
6712                         ice_prot_id_tbl[i].protocol_id = ICE_VLAN_OF_HW;
6713 }
6714
6715 /**
6716  * ice_prot_type_to_id - get protocol ID from protocol type
6717  * @type: protocol type
6718  * @id: pointer to variable that will receive the ID
6719  *
6720  * Returns true if found, false otherwise
6721  */
6722 static bool ice_prot_type_to_id(enum ice_protocol_type type, u8 *id)
6723 {
6724         u8 i;
6725
6726         for (i = 0; i < ARRAY_SIZE(ice_prot_id_tbl); i++)
6727                 if (ice_prot_id_tbl[i].type == type) {
6728                         *id = ice_prot_id_tbl[i].protocol_id;
6729                         return true;
6730                 }
6731         return false;
6732 }
6733
6734 /**
6735  * ice_fill_valid_words - count valid words
6736  * @rule: advanced rule with lookup information
6737  * @lkup_exts: byte offset extractions of the words that are valid
6738  *
6739  * calculate valid words in a lookup rule using mask value
6740  */
6741 static u8
6742 ice_fill_valid_words(struct ice_adv_lkup_elem *rule,
6743                      struct ice_prot_lkup_ext *lkup_exts)
6744 {
6745         u8 j, word, prot_id, ret_val;
6746
6747         if (!ice_prot_type_to_id(rule->type, &prot_id))
6748                 return 0;
6749
6750         word = lkup_exts->n_val_words;
6751
6752         for (j = 0; j < sizeof(rule->m_u) / sizeof(u16); j++)
6753                 if (((u16 *)&rule->m_u)[j] &&
6754                     (size_t)rule->type < ARRAY_SIZE(ice_prot_ext)) {
6755                         /* No more space to accommodate */
6756                         if (word >= ICE_MAX_CHAIN_WORDS)
6757                                 return 0;
6758                         lkup_exts->fv_words[word].off =
6759                                 ice_prot_ext[rule->type].offs[j];
6760                         lkup_exts->fv_words[word].prot_id =
6761                                 ice_prot_id_tbl[rule->type].protocol_id;
6762                         lkup_exts->field_mask[word] =
6763                                 BE16_TO_CPU(((_FORCE_ __be16 *)&rule->m_u)[j]);
6764                         word++;
6765                 }
6766
6767         ret_val = word - lkup_exts->n_val_words;
6768         lkup_exts->n_val_words = word;
6769
6770         return ret_val;
6771 }
6772
6773 /**
6774  * ice_create_first_fit_recp_def - Create a recipe grouping
6775  * @hw: pointer to the hardware structure
6776  * @lkup_exts: an array of protocol header extractions
6777  * @rg_list: pointer to a list that stores new recipe groups
6778  * @recp_cnt: pointer to a variable that stores returned number of recipe groups
6779  *
6780  * Using first fit algorithm, take all the words that are still not done
6781  * and start grouping them in 4-word groups. Each group makes up one
6782  * recipe.
6783  */
6784 static enum ice_status
6785 ice_create_first_fit_recp_def(struct ice_hw *hw,
6786                               struct ice_prot_lkup_ext *lkup_exts,
6787                               struct LIST_HEAD_TYPE *rg_list,
6788                               u8 *recp_cnt)
6789 {
6790         struct ice_pref_recipe_group *grp = NULL;
6791         u8 j;
6792
6793         *recp_cnt = 0;
6794
6795         if (!lkup_exts->n_val_words) {
6796                 struct ice_recp_grp_entry *entry;
6797
6798                 entry = (struct ice_recp_grp_entry *)
6799                         ice_malloc(hw, sizeof(*entry));
6800                 if (!entry)
6801                         return ICE_ERR_NO_MEMORY;
6802                 LIST_ADD(&entry->l_entry, rg_list);
6803                 grp = &entry->r_group;
6804                 (*recp_cnt)++;
6805                 grp->n_val_pairs = 0;
6806         }
6807
6808         /* Walk through every word in the rule to check if it is not done. If so
6809          * then this word needs to be part of a new recipe.
6810          */
6811         for (j = 0; j < lkup_exts->n_val_words; j++)
6812                 if (!ice_is_bit_set(lkup_exts->done, j)) {
6813                         if (!grp ||
6814                             grp->n_val_pairs == ICE_NUM_WORDS_RECIPE) {
6815                                 struct ice_recp_grp_entry *entry;
6816
6817                                 entry = (struct ice_recp_grp_entry *)
6818                                         ice_malloc(hw, sizeof(*entry));
6819                                 if (!entry)
6820                                         return ICE_ERR_NO_MEMORY;
6821                                 LIST_ADD(&entry->l_entry, rg_list);
6822                                 grp = &entry->r_group;
6823                                 (*recp_cnt)++;
6824                         }
6825
6826                         grp->pairs[grp->n_val_pairs].prot_id =
6827                                 lkup_exts->fv_words[j].prot_id;
6828                         grp->pairs[grp->n_val_pairs].off =
6829                                 lkup_exts->fv_words[j].off;
6830                         grp->mask[grp->n_val_pairs] = lkup_exts->field_mask[j];
6831                         grp->n_val_pairs++;
6832                 }
6833
6834         return ICE_SUCCESS;
6835 }
6836
6837 /**
6838  * ice_fill_fv_word_index - fill in the field vector indices for a recipe group
6839  * @hw: pointer to the hardware structure
6840  * @fv_list: field vector with the extraction sequence information
6841  * @rg_list: recipe groupings with protocol-offset pairs
6842  *
6843  * Helper function to fill in the field vector indices for protocol-offset
6844  * pairs. These indexes are then ultimately programmed into a recipe.
6845  */
6846 static enum ice_status
6847 ice_fill_fv_word_index(struct ice_hw *hw, struct LIST_HEAD_TYPE *fv_list,
6848                        struct LIST_HEAD_TYPE *rg_list)
6849 {
6850         struct ice_sw_fv_list_entry *fv;
6851         struct ice_recp_grp_entry *rg;
6852         struct ice_fv_word *fv_ext;
6853
6854         if (LIST_EMPTY(fv_list))
6855                 return ICE_SUCCESS;
6856
6857         fv = LIST_FIRST_ENTRY(fv_list, struct ice_sw_fv_list_entry, list_entry);
6858         fv_ext = fv->fv_ptr->ew;
6859
6860         LIST_FOR_EACH_ENTRY(rg, rg_list, ice_recp_grp_entry, l_entry) {
6861                 u8 i;
6862
6863                 for (i = 0; i < rg->r_group.n_val_pairs; i++) {
6864                         struct ice_fv_word *pr;
6865                         bool found = false;
6866                         u16 mask;
6867                         u8 j;
6868
6869                         pr = &rg->r_group.pairs[i];
6870                         mask = rg->r_group.mask[i];
6871
6872                         for (j = 0; j < hw->blk[ICE_BLK_SW].es.fvw; j++)
6873                                 if (fv_ext[j].prot_id == pr->prot_id &&
6874                                     fv_ext[j].off == pr->off) {
6875                                         found = true;
6876
6877                                         /* Store index of field vector */
6878                                         rg->fv_idx[i] = j;
6879                                         rg->fv_mask[i] = mask;
6880                                         break;
6881                                 }
6882
6883                         /* Protocol/offset could not be found, caller gave an
6884                          * invalid pair
6885                          */
6886                         if (!found)
6887                                 return ICE_ERR_PARAM;
6888                 }
6889         }
6890
6891         return ICE_SUCCESS;
6892 }
6893
6894 /**
6895  * ice_find_free_recp_res_idx - find free result indexes for recipe
6896  * @hw: pointer to hardware structure
6897  * @profiles: bitmap of profiles that will be associated with the new recipe
6898  * @free_idx: pointer to variable to receive the free index bitmap
6899  *
6900  * The algorithm used here is:
6901  *      1. When creating a new recipe, create a set P which contains all
6902  *         Profiles that will be associated with our new recipe
6903  *
6904  *      2. For each Profile p in set P:
6905  *          a. Add all recipes associated with Profile p into set R
6906  *          b. Optional : PossibleIndexes &= profile[p].possibleIndexes
6907  *              [initially PossibleIndexes should be 0xFFFFFFFFFFFFFFFF]
6908  *              i. Or just assume they all have the same possible indexes:
6909  *                      44, 45, 46, 47
6910  *                      i.e., PossibleIndexes = 0x0000F00000000000
6911  *
6912  *      3. For each Recipe r in set R:
6913  *          a. UsedIndexes |= (bitwise or ) recipe[r].res_indexes
6914  *          b. FreeIndexes = UsedIndexes ^ PossibleIndexes
6915  *
6916  *      FreeIndexes will contain the bits indicating the indexes free for use,
6917  *      then the code needs to update the recipe[r].used_result_idx_bits to
6918  *      indicate which indexes were selected for use by this recipe.
6919  */
6920 static u16
6921 ice_find_free_recp_res_idx(struct ice_hw *hw, const ice_bitmap_t *profiles,
6922                            ice_bitmap_t *free_idx)
6923 {
6924         ice_declare_bitmap(possible_idx, ICE_MAX_FV_WORDS);
6925         ice_declare_bitmap(recipes, ICE_MAX_NUM_RECIPES);
6926         ice_declare_bitmap(used_idx, ICE_MAX_FV_WORDS);
6927         u16 bit;
6928
6929         ice_zero_bitmap(possible_idx, ICE_MAX_FV_WORDS);
6930         ice_zero_bitmap(recipes, ICE_MAX_NUM_RECIPES);
6931         ice_zero_bitmap(used_idx, ICE_MAX_FV_WORDS);
6932         ice_zero_bitmap(free_idx, ICE_MAX_FV_WORDS);
6933
6934         ice_bitmap_set(possible_idx, 0, ICE_MAX_FV_WORDS);
6935
6936         /* For each profile we are going to associate the recipe with, add the
6937          * recipes that are associated with that profile. This will give us
6938          * the set of recipes that our recipe may collide with. Also, determine
6939          * what possible result indexes are usable given this set of profiles.
6940          */
6941         ice_for_each_set_bit(bit, profiles, ICE_MAX_NUM_PROFILES) {
6942                 ice_or_bitmap(recipes, recipes, profile_to_recipe[bit],
6943                               ICE_MAX_NUM_RECIPES);
6944                 ice_and_bitmap(possible_idx, possible_idx,
6945                                hw->switch_info->prof_res_bm[bit],
6946                                ICE_MAX_FV_WORDS);
6947         }
6948
6949         /* For each recipe that our new recipe may collide with, determine
6950          * which indexes have been used.
6951          */
6952         ice_for_each_set_bit(bit, recipes, ICE_MAX_NUM_RECIPES)
6953                 ice_or_bitmap(used_idx, used_idx,
6954                               hw->switch_info->recp_list[bit].res_idxs,
6955                               ICE_MAX_FV_WORDS);
6956
6957         ice_xor_bitmap(free_idx, used_idx, possible_idx, ICE_MAX_FV_WORDS);
6958
6959         /* return number of free indexes */
6960         return (u16)ice_bitmap_hweight(free_idx, ICE_MAX_FV_WORDS);
6961 }
6962
6963 /**
6964  * ice_add_sw_recipe - function to call AQ calls to create switch recipe
6965  * @hw: pointer to hardware structure
6966  * @rm: recipe management list entry
6967  * @profiles: bitmap of profiles that will be associated.
6968  */
6969 static enum ice_status
6970 ice_add_sw_recipe(struct ice_hw *hw, struct ice_sw_recipe *rm,
6971                   ice_bitmap_t *profiles)
6972 {
6973         ice_declare_bitmap(result_idx_bm, ICE_MAX_FV_WORDS);
6974         struct ice_aqc_recipe_data_elem *tmp;
6975         struct ice_aqc_recipe_data_elem *buf;
6976         struct ice_recp_grp_entry *entry;
6977         enum ice_status status;
6978         u16 free_res_idx;
6979         u16 recipe_count;
6980         u8 chain_idx;
6981         u8 recps = 0;
6982
6983         /* When more than one recipe are required, another recipe is needed to
6984          * chain them together. Matching a tunnel metadata ID takes up one of
6985          * the match fields in the chaining recipe reducing the number of
6986          * chained recipes by one.
6987          */
6988          /* check number of free result indices */
6989         ice_zero_bitmap(result_idx_bm, ICE_MAX_FV_WORDS);
6990         free_res_idx = ice_find_free_recp_res_idx(hw, profiles, result_idx_bm);
6991
6992         ice_debug(hw, ICE_DBG_SW, "Result idx slots: %d, need %d\n",
6993                   free_res_idx, rm->n_grp_count);
6994
6995         if (rm->n_grp_count > 1) {
6996                 if (rm->n_grp_count > free_res_idx)
6997                         return ICE_ERR_MAX_LIMIT;
6998
6999                 rm->n_grp_count++;
7000         }
7001
7002         if (rm->n_grp_count > ICE_MAX_CHAIN_RECIPE)
7003                 return ICE_ERR_MAX_LIMIT;
7004
7005         tmp = (struct ice_aqc_recipe_data_elem *)ice_calloc(hw,
7006                                                             ICE_MAX_NUM_RECIPES,
7007                                                             sizeof(*tmp));
7008         if (!tmp)
7009                 return ICE_ERR_NO_MEMORY;
7010
7011         buf = (struct ice_aqc_recipe_data_elem *)
7012                 ice_calloc(hw, rm->n_grp_count, sizeof(*buf));
7013         if (!buf) {
7014                 status = ICE_ERR_NO_MEMORY;
7015                 goto err_mem;
7016         }
7017
7018         ice_zero_bitmap(rm->r_bitmap, ICE_MAX_NUM_RECIPES);
7019         recipe_count = ICE_MAX_NUM_RECIPES;
7020         status = ice_aq_get_recipe(hw, tmp, &recipe_count, ICE_SW_LKUP_MAC,
7021                                    NULL);
7022         if (status || recipe_count == 0)
7023                 goto err_unroll;
7024
7025         /* Allocate the recipe resources, and configure them according to the
7026          * match fields from protocol headers and extracted field vectors.
7027          */
7028         chain_idx = ice_find_first_bit(result_idx_bm, ICE_MAX_FV_WORDS);
7029         LIST_FOR_EACH_ENTRY(entry, &rm->rg_list, ice_recp_grp_entry, l_entry) {
7030                 u8 i;
7031
7032                 status = ice_alloc_recipe(hw, &entry->rid);
7033                 if (status)
7034                         goto err_unroll;
7035
7036                 /* Clear the result index of the located recipe, as this will be
7037                  * updated, if needed, later in the recipe creation process.
7038                  */
7039                 tmp[0].content.result_indx = 0;
7040
7041                 buf[recps] = tmp[0];
7042                 buf[recps].recipe_indx = (u8)entry->rid;
7043                 /* if the recipe is a non-root recipe RID should be programmed
7044                  * as 0 for the rules to be applied correctly.
7045                  */
7046                 buf[recps].content.rid = 0;
7047                 ice_memset(&buf[recps].content.lkup_indx, 0,
7048                            sizeof(buf[recps].content.lkup_indx),
7049                            ICE_NONDMA_MEM);
7050
7051                 /* All recipes use look-up index 0 to match switch ID. */
7052                 buf[recps].content.lkup_indx[0] = ICE_AQ_SW_ID_LKUP_IDX;
7053                 buf[recps].content.mask[0] =
7054                         CPU_TO_LE16(ICE_AQ_SW_ID_LKUP_MASK);
7055                 /* Setup lkup_indx 1..4 to INVALID/ignore and set the mask
7056                  * to be 0
7057                  */
7058                 for (i = 1; i <= ICE_NUM_WORDS_RECIPE; i++) {
7059                         buf[recps].content.lkup_indx[i] = 0x80;
7060                         buf[recps].content.mask[i] = 0;
7061                 }
7062
7063                 for (i = 0; i < entry->r_group.n_val_pairs; i++) {
7064                         buf[recps].content.lkup_indx[i + 1] = entry->fv_idx[i];
7065                         buf[recps].content.mask[i + 1] =
7066                                 CPU_TO_LE16(entry->fv_mask[i]);
7067                 }
7068
7069                 if (rm->n_grp_count > 1) {
7070                         /* Checks to see if there really is a valid result index
7071                          * that can be used.
7072                          */
7073                         if (chain_idx >= ICE_MAX_FV_WORDS) {
7074                                 ice_debug(hw, ICE_DBG_SW, "No chain index available\n");
7075                                 status = ICE_ERR_MAX_LIMIT;
7076                                 goto err_unroll;
7077                         }
7078
7079                         entry->chain_idx = chain_idx;
7080                         buf[recps].content.result_indx =
7081                                 ICE_AQ_RECIPE_RESULT_EN |
7082                                 ((chain_idx << ICE_AQ_RECIPE_RESULT_DATA_S) &
7083                                  ICE_AQ_RECIPE_RESULT_DATA_M);
7084                         ice_clear_bit(chain_idx, result_idx_bm);
7085                         chain_idx = ice_find_first_bit(result_idx_bm,
7086                                                        ICE_MAX_FV_WORDS);
7087                 }
7088
7089                 /* fill recipe dependencies */
7090                 ice_zero_bitmap((ice_bitmap_t *)buf[recps].recipe_bitmap,
7091                                 ICE_MAX_NUM_RECIPES);
7092                 ice_set_bit(buf[recps].recipe_indx,
7093                             (ice_bitmap_t *)buf[recps].recipe_bitmap);
7094                 buf[recps].content.act_ctrl_fwd_priority = rm->priority;
7095                 recps++;
7096         }
7097
7098         if (rm->n_grp_count == 1) {
7099                 rm->root_rid = buf[0].recipe_indx;
7100                 ice_set_bit(buf[0].recipe_indx, rm->r_bitmap);
7101                 buf[0].content.rid = rm->root_rid | ICE_AQ_RECIPE_ID_IS_ROOT;
7102                 if (sizeof(buf[0].recipe_bitmap) >= sizeof(rm->r_bitmap)) {
7103                         ice_memcpy(buf[0].recipe_bitmap, rm->r_bitmap,
7104                                    sizeof(buf[0].recipe_bitmap),
7105                                    ICE_NONDMA_TO_NONDMA);
7106                 } else {
7107                         status = ICE_ERR_BAD_PTR;
7108                         goto err_unroll;
7109                 }
7110                 /* Applicable only for ROOT_RECIPE, set the fwd_priority for
7111                  * the recipe which is getting created if specified
7112                  * by user. Usually any advanced switch filter, which results
7113                  * into new extraction sequence, ended up creating a new recipe
7114                  * of type ROOT and usually recipes are associated with profiles
7115                  * Switch rule referreing newly created recipe, needs to have
7116                  * either/or 'fwd' or 'join' priority, otherwise switch rule
7117                  * evaluation will not happen correctly. In other words, if
7118                  * switch rule to be evaluated on priority basis, then recipe
7119                  * needs to have priority, otherwise it will be evaluated last.
7120                  */
7121                 buf[0].content.act_ctrl_fwd_priority = rm->priority;
7122         } else {
7123                 struct ice_recp_grp_entry *last_chain_entry;
7124                 u16 rid, i;
7125
7126                 /* Allocate the last recipe that will chain the outcomes of the
7127                  * other recipes together
7128                  */
7129                 status = ice_alloc_recipe(hw, &rid);
7130                 if (status)
7131                         goto err_unroll;
7132
7133                 buf[recps].recipe_indx = (u8)rid;
7134                 buf[recps].content.rid = (u8)rid;
7135                 buf[recps].content.rid |= ICE_AQ_RECIPE_ID_IS_ROOT;
7136                 /* the new entry created should also be part of rg_list to
7137                  * make sure we have complete recipe
7138                  */
7139                 last_chain_entry = (struct ice_recp_grp_entry *)ice_malloc(hw,
7140                         sizeof(*last_chain_entry));
7141                 if (!last_chain_entry) {
7142                         status = ICE_ERR_NO_MEMORY;
7143                         goto err_unroll;
7144                 }
7145                 last_chain_entry->rid = rid;
7146                 ice_memset(&buf[recps].content.lkup_indx, 0,
7147                            sizeof(buf[recps].content.lkup_indx),
7148                            ICE_NONDMA_MEM);
7149                 /* All recipes use look-up index 0 to match switch ID. */
7150                 buf[recps].content.lkup_indx[0] = ICE_AQ_SW_ID_LKUP_IDX;
7151                 buf[recps].content.mask[0] =
7152                         CPU_TO_LE16(ICE_AQ_SW_ID_LKUP_MASK);
7153                 for (i = 1; i <= ICE_NUM_WORDS_RECIPE; i++) {
7154                         buf[recps].content.lkup_indx[i] =
7155                                 ICE_AQ_RECIPE_LKUP_IGNORE;
7156                         buf[recps].content.mask[i] = 0;
7157                 }
7158
7159                 i = 1;
7160                 /* update r_bitmap with the recp that is used for chaining */
7161                 ice_set_bit(rid, rm->r_bitmap);
7162                 /* this is the recipe that chains all the other recipes so it
7163                  * should not have a chaining ID to indicate the same
7164                  */
7165                 last_chain_entry->chain_idx = ICE_INVAL_CHAIN_IND;
7166                 LIST_FOR_EACH_ENTRY(entry, &rm->rg_list, ice_recp_grp_entry,
7167                                     l_entry) {
7168                         last_chain_entry->fv_idx[i] = entry->chain_idx;
7169                         buf[recps].content.lkup_indx[i] = entry->chain_idx;
7170                         buf[recps].content.mask[i++] = CPU_TO_LE16(0xFFFF);
7171                         ice_set_bit(entry->rid, rm->r_bitmap);
7172                 }
7173                 LIST_ADD(&last_chain_entry->l_entry, &rm->rg_list);
7174                 if (sizeof(buf[recps].recipe_bitmap) >=
7175                     sizeof(rm->r_bitmap)) {
7176                         ice_memcpy(buf[recps].recipe_bitmap, rm->r_bitmap,
7177                                    sizeof(buf[recps].recipe_bitmap),
7178                                    ICE_NONDMA_TO_NONDMA);
7179                 } else {
7180                         status = ICE_ERR_BAD_PTR;
7181                         goto err_unroll;
7182                 }
7183                 buf[recps].content.act_ctrl_fwd_priority = rm->priority;
7184
7185                 recps++;
7186                 rm->root_rid = (u8)rid;
7187         }
7188         status = ice_acquire_change_lock(hw, ICE_RES_WRITE);
7189         if (status)
7190                 goto err_unroll;
7191
7192         status = ice_aq_add_recipe(hw, buf, rm->n_grp_count, NULL);
7193         ice_release_change_lock(hw);
7194         if (status)
7195                 goto err_unroll;
7196
7197         /* Every recipe that just got created add it to the recipe
7198          * book keeping list
7199          */
7200         LIST_FOR_EACH_ENTRY(entry, &rm->rg_list, ice_recp_grp_entry, l_entry) {
7201                 struct ice_switch_info *sw = hw->switch_info;
7202                 bool is_root, idx_found = false;
7203                 struct ice_sw_recipe *recp;
7204                 u16 idx, buf_idx = 0;
7205
7206                 /* find buffer index for copying some data */
7207                 for (idx = 0; idx < rm->n_grp_count; idx++)
7208                         if (buf[idx].recipe_indx == entry->rid) {
7209                                 buf_idx = idx;
7210                                 idx_found = true;
7211                         }
7212
7213                 if (!idx_found) {
7214                         status = ICE_ERR_OUT_OF_RANGE;
7215                         goto err_unroll;
7216                 }
7217
7218                 recp = &sw->recp_list[entry->rid];
7219                 is_root = (rm->root_rid == entry->rid);
7220                 recp->is_root = is_root;
7221
7222                 recp->root_rid = entry->rid;
7223                 recp->big_recp = (is_root && rm->n_grp_count > 1);
7224
7225                 ice_memcpy(&recp->ext_words, entry->r_group.pairs,
7226                            entry->r_group.n_val_pairs *
7227                            sizeof(struct ice_fv_word),
7228                            ICE_NONDMA_TO_NONDMA);
7229
7230                 ice_memcpy(recp->r_bitmap, buf[buf_idx].recipe_bitmap,
7231                            sizeof(recp->r_bitmap), ICE_NONDMA_TO_NONDMA);
7232
7233                 /* Copy non-result fv index values and masks to recipe. This
7234                  * call will also update the result recipe bitmask.
7235                  */
7236                 ice_collect_result_idx(&buf[buf_idx], recp);
7237
7238                 /* for non-root recipes, also copy to the root, this allows
7239                  * easier matching of a complete chained recipe
7240                  */
7241                 if (!is_root)
7242                         ice_collect_result_idx(&buf[buf_idx],
7243                                                &sw->recp_list[rm->root_rid]);
7244
7245                 recp->n_ext_words = entry->r_group.n_val_pairs;
7246                 recp->chain_idx = entry->chain_idx;
7247                 recp->priority = buf[buf_idx].content.act_ctrl_fwd_priority;
7248                 recp->n_grp_count = rm->n_grp_count;
7249                 recp->tun_type = rm->tun_type;
7250                 recp->recp_created = true;
7251         }
7252         rm->root_buf = buf;
7253         ice_free(hw, tmp);
7254         return status;
7255
7256 err_unroll:
7257 err_mem:
7258         ice_free(hw, tmp);
7259         ice_free(hw, buf);
7260         return status;
7261 }
7262
7263 /**
7264  * ice_create_recipe_group - creates recipe group
7265  * @hw: pointer to hardware structure
7266  * @rm: recipe management list entry
7267  * @lkup_exts: lookup elements
7268  */
7269 static enum ice_status
7270 ice_create_recipe_group(struct ice_hw *hw, struct ice_sw_recipe *rm,
7271                         struct ice_prot_lkup_ext *lkup_exts)
7272 {
7273         enum ice_status status;
7274         u8 recp_count = 0;
7275
7276         rm->n_grp_count = 0;
7277
7278         /* Create recipes for words that are marked not done by packing them
7279          * as best fit.
7280          */
7281         status = ice_create_first_fit_recp_def(hw, lkup_exts,
7282                                                &rm->rg_list, &recp_count);
7283         if (!status) {
7284                 rm->n_grp_count += recp_count;
7285                 rm->n_ext_words = lkup_exts->n_val_words;
7286                 ice_memcpy(&rm->ext_words, lkup_exts->fv_words,
7287                            sizeof(rm->ext_words), ICE_NONDMA_TO_NONDMA);
7288                 ice_memcpy(rm->word_masks, lkup_exts->field_mask,
7289                            sizeof(rm->word_masks), ICE_NONDMA_TO_NONDMA);
7290         }
7291
7292         return status;
7293 }
7294
7295 /**
7296  * ice_get_fv - get field vectors/extraction sequences for spec. lookup types
7297  * @hw: pointer to hardware structure
7298  * @lkups: lookup elements or match criteria for the advanced recipe, one
7299  *         structure per protocol header
7300  * @lkups_cnt: number of protocols
7301  * @bm: bitmap of field vectors to consider
7302  * @fv_list: pointer to a list that holds the returned field vectors
7303  */
7304 static enum ice_status
7305 ice_get_fv(struct ice_hw *hw, struct ice_adv_lkup_elem *lkups, u16 lkups_cnt,
7306            ice_bitmap_t *bm, struct LIST_HEAD_TYPE *fv_list)
7307 {
7308         enum ice_status status;
7309         u8 *prot_ids;
7310         u16 i;
7311
7312         if (!lkups_cnt)
7313                 return ICE_SUCCESS;
7314
7315         prot_ids = (u8 *)ice_calloc(hw, lkups_cnt, sizeof(*prot_ids));
7316         if (!prot_ids)
7317                 return ICE_ERR_NO_MEMORY;
7318
7319         for (i = 0; i < lkups_cnt; i++)
7320                 if (!ice_prot_type_to_id(lkups[i].type, &prot_ids[i])) {
7321                         status = ICE_ERR_CFG;
7322                         goto free_mem;
7323                 }
7324
7325         /* Find field vectors that include all specified protocol types */
7326         status = ice_get_sw_fv_list(hw, prot_ids, lkups_cnt, bm, fv_list);
7327
7328 free_mem:
7329         ice_free(hw, prot_ids);
7330         return status;
7331 }
7332
7333 /**
7334  * ice_tun_type_match_word - determine if tun type needs a match mask
7335  * @tun_type: tunnel type
7336  * @mask: mask to be used for the tunnel
7337  */
7338 static bool ice_tun_type_match_word(enum ice_sw_tunnel_type tun_type, u16 *mask)
7339 {
7340         switch (tun_type) {
7341         case ICE_SW_TUN_VXLAN_GPE:
7342         case ICE_SW_TUN_GENEVE:
7343         case ICE_SW_TUN_VXLAN:
7344         case ICE_SW_TUN_NVGRE:
7345         case ICE_SW_TUN_UDP:
7346         case ICE_ALL_TUNNELS:
7347         case ICE_SW_TUN_AND_NON_TUN_QINQ:
7348         case ICE_NON_TUN_QINQ:
7349         case ICE_SW_TUN_PPPOE_QINQ:
7350         case ICE_SW_TUN_PPPOE_PAY_QINQ:
7351         case ICE_SW_TUN_PPPOE_IPV4_QINQ:
7352         case ICE_SW_TUN_PPPOE_IPV6_QINQ:
7353                 *mask = ICE_TUN_FLAG_MASK;
7354                 return true;
7355
7356         case ICE_SW_TUN_GENEVE_VLAN:
7357         case ICE_SW_TUN_VXLAN_VLAN:
7358                 *mask = ICE_TUN_FLAG_MASK & ~ICE_TUN_FLAG_VLAN_MASK;
7359                 return true;
7360
7361         default:
7362                 *mask = 0;
7363                 return false;
7364         }
7365 }
7366
7367 /**
7368  * ice_add_special_words - Add words that are not protocols, such as metadata
7369  * @rinfo: other information regarding the rule e.g. priority and action info
7370  * @lkup_exts: lookup word structure
7371  */
7372 static enum ice_status
7373 ice_add_special_words(struct ice_adv_rule_info *rinfo,
7374                       struct ice_prot_lkup_ext *lkup_exts)
7375 {
7376         u16 mask;
7377
7378         /* If this is a tunneled packet, then add recipe index to match the
7379          * tunnel bit in the packet metadata flags.
7380          */
7381         if (ice_tun_type_match_word(rinfo->tun_type, &mask)) {
7382                 if (lkup_exts->n_val_words < ICE_MAX_CHAIN_WORDS) {
7383                         u8 word = lkup_exts->n_val_words++;
7384
7385                         lkup_exts->fv_words[word].prot_id = ICE_META_DATA_ID_HW;
7386                         lkup_exts->fv_words[word].off = ICE_TUN_FLAG_MDID_OFF;
7387                         lkup_exts->field_mask[word] = mask;
7388                 } else {
7389                         return ICE_ERR_MAX_LIMIT;
7390                 }
7391         }
7392
7393         return ICE_SUCCESS;
7394 }
7395
7396 /* ice_get_compat_fv_bitmap - Get compatible field vector bitmap for rule
7397  * @hw: pointer to hardware structure
7398  * @rinfo: other information regarding the rule e.g. priority and action info
7399  * @bm: pointer to memory for returning the bitmap of field vectors
7400  */
7401 static void
7402 ice_get_compat_fv_bitmap(struct ice_hw *hw, struct ice_adv_rule_info *rinfo,
7403                          ice_bitmap_t *bm)
7404 {
7405         enum ice_prof_type prof_type;
7406
7407         ice_zero_bitmap(bm, ICE_MAX_NUM_PROFILES);
7408
7409         switch (rinfo->tun_type) {
7410         case ICE_NON_TUN:
7411         case ICE_NON_TUN_QINQ:
7412                 prof_type = ICE_PROF_NON_TUN;
7413                 break;
7414         case ICE_ALL_TUNNELS:
7415                 prof_type = ICE_PROF_TUN_ALL;
7416                 break;
7417         case ICE_SW_TUN_VXLAN_GPE:
7418         case ICE_SW_TUN_GENEVE:
7419         case ICE_SW_TUN_GENEVE_VLAN:
7420         case ICE_SW_TUN_VXLAN:
7421         case ICE_SW_TUN_VXLAN_VLAN:
7422         case ICE_SW_TUN_UDP:
7423         case ICE_SW_TUN_GTP:
7424                 prof_type = ICE_PROF_TUN_UDP;
7425                 break;
7426         case ICE_SW_TUN_NVGRE:
7427                 prof_type = ICE_PROF_TUN_GRE;
7428                 break;
7429         case ICE_SW_TUN_PPPOE:
7430         case ICE_SW_TUN_PPPOE_QINQ:
7431                 prof_type = ICE_PROF_TUN_PPPOE;
7432                 break;
7433         case ICE_SW_TUN_PPPOE_PAY:
7434         case ICE_SW_TUN_PPPOE_PAY_QINQ:
7435                 ice_set_bit(ICE_PROFID_PPPOE_PAY, bm);
7436                 return;
7437         case ICE_SW_TUN_PPPOE_IPV4:
7438         case ICE_SW_TUN_PPPOE_IPV4_QINQ:
7439                 ice_set_bit(ICE_PROFID_PPPOE_IPV4_OTHER, bm);
7440                 ice_set_bit(ICE_PROFID_PPPOE_IPV4_UDP, bm);
7441                 ice_set_bit(ICE_PROFID_PPPOE_IPV4_TCP, bm);
7442                 return;
7443         case ICE_SW_TUN_PPPOE_IPV4_TCP:
7444                 ice_set_bit(ICE_PROFID_PPPOE_IPV4_TCP, bm);
7445                 return;
7446         case ICE_SW_TUN_PPPOE_IPV4_UDP:
7447                 ice_set_bit(ICE_PROFID_PPPOE_IPV4_UDP, bm);
7448                 return;
7449         case ICE_SW_TUN_PPPOE_IPV6:
7450         case ICE_SW_TUN_PPPOE_IPV6_QINQ:
7451                 ice_set_bit(ICE_PROFID_PPPOE_IPV6_OTHER, bm);
7452                 ice_set_bit(ICE_PROFID_PPPOE_IPV6_UDP, bm);
7453                 ice_set_bit(ICE_PROFID_PPPOE_IPV6_TCP, bm);
7454                 return;
7455         case ICE_SW_TUN_PPPOE_IPV6_TCP:
7456                 ice_set_bit(ICE_PROFID_PPPOE_IPV6_TCP, bm);
7457                 return;
7458         case ICE_SW_TUN_PPPOE_IPV6_UDP:
7459                 ice_set_bit(ICE_PROFID_PPPOE_IPV6_UDP, bm);
7460                 return;
7461         case ICE_SW_TUN_PROFID_IPV6_ESP:
7462         case ICE_SW_TUN_IPV6_ESP:
7463                 ice_set_bit(ICE_PROFID_IPV6_ESP, bm);
7464                 return;
7465         case ICE_SW_TUN_PROFID_IPV6_AH:
7466         case ICE_SW_TUN_IPV6_AH:
7467                 ice_set_bit(ICE_PROFID_IPV6_AH, bm);
7468                 return;
7469         case ICE_SW_TUN_PROFID_MAC_IPV6_L2TPV3:
7470         case ICE_SW_TUN_IPV6_L2TPV3:
7471                 ice_set_bit(ICE_PROFID_MAC_IPV6_L2TPV3, bm);
7472                 return;
7473         case ICE_SW_TUN_PROFID_IPV6_NAT_T:
7474         case ICE_SW_TUN_IPV6_NAT_T:
7475                 ice_set_bit(ICE_PROFID_IPV6_NAT_T, bm);
7476                 return;
7477         case ICE_SW_TUN_PROFID_IPV4_PFCP_NODE:
7478                 ice_set_bit(ICE_PROFID_IPV4_PFCP_NODE, bm);
7479                 return;
7480         case ICE_SW_TUN_PROFID_IPV4_PFCP_SESSION:
7481                 ice_set_bit(ICE_PROFID_IPV4_PFCP_SESSION, bm);
7482                 return;
7483         case ICE_SW_TUN_PROFID_IPV6_PFCP_NODE:
7484                 ice_set_bit(ICE_PROFID_IPV6_PFCP_NODE, bm);
7485                 return;
7486         case ICE_SW_TUN_PROFID_IPV6_PFCP_SESSION:
7487                 ice_set_bit(ICE_PROFID_IPV6_PFCP_SESSION, bm);
7488                 return;
7489         case ICE_SW_TUN_IPV4_NAT_T:
7490                 ice_set_bit(ICE_PROFID_IPV4_NAT_T, bm);
7491                 return;
7492         case ICE_SW_TUN_IPV4_L2TPV3:
7493                 ice_set_bit(ICE_PROFID_MAC_IPV4_L2TPV3, bm);
7494                 return;
7495         case ICE_SW_TUN_IPV4_ESP:
7496                 ice_set_bit(ICE_PROFID_IPV4_ESP, bm);
7497                 return;
7498         case ICE_SW_TUN_IPV4_AH:
7499                 ice_set_bit(ICE_PROFID_IPV4_AH, bm);
7500                 return;
7501         case ICE_SW_IPV4_TCP:
7502                 ice_set_bit(ICE_PROFID_IPV4_TCP, bm);
7503                 return;
7504         case ICE_SW_IPV4_UDP:
7505                 ice_set_bit(ICE_PROFID_IPV4_UDP, bm);
7506                 return;
7507         case ICE_SW_IPV6_TCP:
7508                 ice_set_bit(ICE_PROFID_IPV6_TCP, bm);
7509                 return;
7510         case ICE_SW_IPV6_UDP:
7511                 ice_set_bit(ICE_PROFID_IPV6_UDP, bm);
7512                 return;
7513         case ICE_SW_TUN_IPV4_GTPU_NO_PAY:
7514                 ice_set_bit(ICE_PROFID_IPV4_GTPU_TEID, bm);
7515                 return;
7516         case ICE_SW_TUN_IPV6_GTPU_NO_PAY:
7517                 ice_set_bit(ICE_PROFID_IPV6_GTPU_TEID, bm);
7518                 return;
7519         case ICE_SW_TUN_IPV4_GTPU_IPV4:
7520                 ice_set_bit(ICE_PROFID_IPV4_GTPU_IPV4_OTHER, bm);
7521                 ice_set_bit(ICE_PROFID_IPV4_GTPU_IPV4_UDP, bm);
7522                 ice_set_bit(ICE_PROFID_IPV4_GTPU_IPV4_TCP, bm);
7523                 return;
7524         case ICE_SW_TUN_IPV4_GTPU_IPV4_UDP:
7525                 ice_set_bit(ICE_PROFID_IPV4_GTPU_IPV4_UDP, bm);
7526                 return;
7527         case ICE_SW_TUN_IPV4_GTPU_IPV4_TCP:
7528                 ice_set_bit(ICE_PROFID_IPV4_GTPU_IPV4_TCP, bm);
7529                 return;
7530         case ICE_SW_TUN_IPV4_GTPU_EH_IPV4:
7531                 ice_set_bit(ICE_PROFID_IPV4_GTPU_EH_IPV4_OTHER, bm);
7532                 ice_set_bit(ICE_PROFID_IPV4_GTPU_EH_IPV4_UDP, bm);
7533                 ice_set_bit(ICE_PROFID_IPV4_GTPU_EH_IPV4_TCP, bm);
7534                 return;
7535         case ICE_SW_TUN_IPV4_GTPU_EH_IPV4_UDP:
7536                 ice_set_bit(ICE_PROFID_IPV4_GTPU_EH_IPV4_UDP, bm);
7537                 return;
7538         case ICE_SW_TUN_IPV4_GTPU_EH_IPV4_TCP:
7539                 ice_set_bit(ICE_PROFID_IPV4_GTPU_EH_IPV4_TCP, bm);
7540                 return;
7541         case ICE_SW_TUN_IPV6_GTPU_IPV4:
7542                 ice_set_bit(ICE_PROFID_IPV6_GTPU_IPV4_OTHER, bm);
7543                 ice_set_bit(ICE_PROFID_IPV6_GTPU_IPV4_UDP, bm);
7544                 ice_set_bit(ICE_PROFID_IPV6_GTPU_IPV4_TCP, bm);
7545                 return;
7546         case ICE_SW_TUN_IPV6_GTPU_IPV4_UDP:
7547                 ice_set_bit(ICE_PROFID_IPV6_GTPU_IPV4_UDP, bm);
7548                 return;
7549         case ICE_SW_TUN_IPV6_GTPU_IPV4_TCP:
7550                 ice_set_bit(ICE_PROFID_IPV6_GTPU_IPV4_TCP, bm);
7551                 return;
7552         case ICE_SW_TUN_IPV6_GTPU_EH_IPV4:
7553                 ice_set_bit(ICE_PROFID_IPV6_GTPU_EH_IPV4_OTHER, bm);
7554                 ice_set_bit(ICE_PROFID_IPV6_GTPU_EH_IPV4_UDP, bm);
7555                 ice_set_bit(ICE_PROFID_IPV6_GTPU_EH_IPV4_TCP, bm);
7556                 return;
7557         case ICE_SW_TUN_IPV6_GTPU_EH_IPV4_UDP:
7558                 ice_set_bit(ICE_PROFID_IPV6_GTPU_EH_IPV4_UDP, bm);
7559                 return;
7560         case ICE_SW_TUN_IPV6_GTPU_EH_IPV4_TCP:
7561                 ice_set_bit(ICE_PROFID_IPV6_GTPU_EH_IPV4_TCP, bm);
7562                 return;
7563         case ICE_SW_TUN_IPV4_GTPU_IPV6:
7564                 ice_set_bit(ICE_PROFID_IPV4_GTPU_IPV6_OTHER, bm);
7565                 ice_set_bit(ICE_PROFID_IPV4_GTPU_IPV6_UDP, bm);
7566                 ice_set_bit(ICE_PROFID_IPV4_GTPU_IPV6_TCP, bm);
7567                 return;
7568         case ICE_SW_TUN_IPV4_GTPU_IPV6_UDP:
7569                 ice_set_bit(ICE_PROFID_IPV4_GTPU_IPV6_UDP, bm);
7570                 return;
7571         case ICE_SW_TUN_IPV4_GTPU_IPV6_TCP:
7572                 ice_set_bit(ICE_PROFID_IPV4_GTPU_IPV6_TCP, bm);
7573                 return;
7574         case ICE_SW_TUN_IPV4_GTPU_EH_IPV6:
7575                 ice_set_bit(ICE_PROFID_IPV4_GTPU_EH_IPV6_OTHER, bm);
7576                 ice_set_bit(ICE_PROFID_IPV4_GTPU_EH_IPV6_UDP, bm);
7577                 ice_set_bit(ICE_PROFID_IPV4_GTPU_EH_IPV6_TCP, bm);
7578                 return;
7579         case ICE_SW_TUN_IPV4_GTPU_EH_IPV6_UDP:
7580                 ice_set_bit(ICE_PROFID_IPV4_GTPU_EH_IPV6_UDP, bm);
7581                 return;
7582         case ICE_SW_TUN_IPV4_GTPU_EH_IPV6_TCP:
7583                 ice_set_bit(ICE_PROFID_IPV4_GTPU_EH_IPV6_TCP, bm);
7584                 return;
7585         case ICE_SW_TUN_IPV6_GTPU_IPV6:
7586                 ice_set_bit(ICE_PROFID_IPV6_GTPU_IPV6_OTHER, bm);
7587                 ice_set_bit(ICE_PROFID_IPV6_GTPU_IPV6_UDP, bm);
7588                 ice_set_bit(ICE_PROFID_IPV6_GTPU_IPV6_TCP, bm);
7589                 return;
7590         case ICE_SW_TUN_IPV6_GTPU_IPV6_UDP:
7591                 ice_set_bit(ICE_PROFID_IPV6_GTPU_IPV6_UDP, bm);
7592                 return;
7593         case ICE_SW_TUN_IPV6_GTPU_IPV6_TCP:
7594                 ice_set_bit(ICE_PROFID_IPV6_GTPU_IPV6_TCP, bm);
7595                 return;
7596         case ICE_SW_TUN_IPV6_GTPU_EH_IPV6:
7597                 ice_set_bit(ICE_PROFID_IPV6_GTPU_EH_IPV6_OTHER, bm);
7598                 ice_set_bit(ICE_PROFID_IPV6_GTPU_EH_IPV6_UDP, bm);
7599                 ice_set_bit(ICE_PROFID_IPV6_GTPU_EH_IPV6_TCP, bm);
7600                 return;
7601         case ICE_SW_TUN_IPV6_GTPU_EH_IPV6_UDP:
7602                 ice_set_bit(ICE_PROFID_IPV6_GTPU_EH_IPV6_UDP, bm);
7603                 return;
7604         case ICE_SW_TUN_IPV6_GTPU_EH_IPV6_TCP:
7605                 ice_set_bit(ICE_PROFID_IPV6_GTPU_EH_IPV6_TCP, bm);
7606                 return;
7607         case ICE_SW_TUN_AND_NON_TUN:
7608         case ICE_SW_TUN_AND_NON_TUN_QINQ:
7609         default:
7610                 prof_type = ICE_PROF_ALL;
7611                 break;
7612         }
7613
7614         ice_get_sw_fv_bitmap(hw, prof_type, bm);
7615 }
7616
7617 /**
7618  * ice_is_prof_rule - determine if rule type is a profile rule
7619  * @type: the rule type
7620  *
7621  * if the rule type is a profile rule, that means that there no field value
7622  * match required, in this case just a profile hit is required.
7623  */
7624 bool ice_is_prof_rule(enum ice_sw_tunnel_type type)
7625 {
7626         switch (type) {
7627         case ICE_SW_TUN_PROFID_IPV6_ESP:
7628         case ICE_SW_TUN_PROFID_IPV6_AH:
7629         case ICE_SW_TUN_PROFID_MAC_IPV6_L2TPV3:
7630         case ICE_SW_TUN_PROFID_IPV6_NAT_T:
7631         case ICE_SW_TUN_PROFID_IPV4_PFCP_NODE:
7632         case ICE_SW_TUN_PROFID_IPV4_PFCP_SESSION:
7633         case ICE_SW_TUN_PROFID_IPV6_PFCP_NODE:
7634         case ICE_SW_TUN_PROFID_IPV6_PFCP_SESSION:
7635                 return true;
7636         default:
7637                 break;
7638         }
7639
7640         return false;
7641 }
7642
7643 /**
7644  * ice_add_adv_recipe - Add an advanced recipe that is not part of the default
7645  * @hw: pointer to hardware structure
7646  * @lkups: lookup elements or match criteria for the advanced recipe, one
7647  *  structure per protocol header
7648  * @lkups_cnt: number of protocols
7649  * @rinfo: other information regarding the rule e.g. priority and action info
7650  * @rid: return the recipe ID of the recipe created
7651  */
7652 static enum ice_status
7653 ice_add_adv_recipe(struct ice_hw *hw, struct ice_adv_lkup_elem *lkups,
7654                    u16 lkups_cnt, struct ice_adv_rule_info *rinfo, u16 *rid)
7655 {
7656         ice_declare_bitmap(fv_bitmap, ICE_MAX_NUM_PROFILES);
7657         ice_declare_bitmap(profiles, ICE_MAX_NUM_PROFILES);
7658         struct ice_prot_lkup_ext *lkup_exts;
7659         struct ice_recp_grp_entry *r_entry;
7660         struct ice_sw_fv_list_entry *fvit;
7661         struct ice_recp_grp_entry *r_tmp;
7662         struct ice_sw_fv_list_entry *tmp;
7663         enum ice_status status = ICE_SUCCESS;
7664         struct ice_sw_recipe *rm;
7665         u8 i;
7666
7667         if (!ice_is_prof_rule(rinfo->tun_type) && !lkups_cnt)
7668                 return ICE_ERR_PARAM;
7669
7670         lkup_exts = (struct ice_prot_lkup_ext *)
7671                 ice_malloc(hw, sizeof(*lkup_exts));
7672         if (!lkup_exts)
7673                 return ICE_ERR_NO_MEMORY;
7674
7675         /* Determine the number of words to be matched and if it exceeds a
7676          * recipe's restrictions
7677          */
7678         for (i = 0; i < lkups_cnt; i++) {
7679                 u16 count;
7680
7681                 if (lkups[i].type >= ICE_PROTOCOL_LAST) {
7682                         status = ICE_ERR_CFG;
7683                         goto err_free_lkup_exts;
7684                 }
7685
7686                 count = ice_fill_valid_words(&lkups[i], lkup_exts);
7687                 if (!count) {
7688                         status = ICE_ERR_CFG;
7689                         goto err_free_lkup_exts;
7690                 }
7691         }
7692
7693         rm = (struct ice_sw_recipe *)ice_malloc(hw, sizeof(*rm));
7694         if (!rm) {
7695                 status = ICE_ERR_NO_MEMORY;
7696                 goto err_free_lkup_exts;
7697         }
7698
7699         /* Get field vectors that contain fields extracted from all the protocol
7700          * headers being programmed.
7701          */
7702         INIT_LIST_HEAD(&rm->fv_list);
7703         INIT_LIST_HEAD(&rm->rg_list);
7704
7705         /* Get bitmap of field vectors (profiles) that are compatible with the
7706          * rule request; only these will be searched in the subsequent call to
7707          * ice_get_fv.
7708          */
7709         ice_get_compat_fv_bitmap(hw, rinfo, fv_bitmap);
7710
7711         status = ice_get_fv(hw, lkups, lkups_cnt, fv_bitmap, &rm->fv_list);
7712         if (status)
7713                 goto err_unroll;
7714
7715         /* Create any special protocol/offset pairs, such as looking at tunnel
7716          * bits by extracting metadata
7717          */
7718         status = ice_add_special_words(rinfo, lkup_exts);
7719         if (status)
7720                 goto err_free_lkup_exts;
7721
7722         /* Group match words into recipes using preferred recipe grouping
7723          * criteria.
7724          */
7725         status = ice_create_recipe_group(hw, rm, lkup_exts);
7726         if (status)
7727                 goto err_unroll;
7728
7729         /* set the recipe priority if specified */
7730         rm->priority = (u8)rinfo->priority;
7731
7732         /* Find offsets from the field vector. Pick the first one for all the
7733          * recipes.
7734          */
7735         status = ice_fill_fv_word_index(hw, &rm->fv_list, &rm->rg_list);
7736         if (status)
7737                 goto err_unroll;
7738
7739         /* An empty FV list means to use all the profiles returned in the
7740          * profile bitmap
7741          */
7742         if (LIST_EMPTY(&rm->fv_list)) {
7743                 u16 j;
7744
7745                 ice_for_each_set_bit(j, fv_bitmap, ICE_MAX_NUM_PROFILES) {
7746                         struct ice_sw_fv_list_entry *fvl;
7747
7748                         fvl = (struct ice_sw_fv_list_entry *)
7749                                 ice_malloc(hw, sizeof(*fvl));
7750                         if (!fvl)
7751                                 goto err_unroll;
7752                         fvl->fv_ptr = NULL;
7753                         fvl->profile_id = j;
7754                         LIST_ADD(&fvl->list_entry, &rm->fv_list);
7755                 }
7756         }
7757
7758         /* get bitmap of all profiles the recipe will be associated with */
7759         ice_zero_bitmap(profiles, ICE_MAX_NUM_PROFILES);
7760         LIST_FOR_EACH_ENTRY(fvit, &rm->fv_list, ice_sw_fv_list_entry,
7761                             list_entry) {
7762                 ice_debug(hw, ICE_DBG_SW, "profile: %d\n", fvit->profile_id);
7763                 ice_set_bit((u16)fvit->profile_id, profiles);
7764         }
7765
7766         /* Look for a recipe which matches our requested fv / mask list */
7767         *rid = ice_find_recp(hw, lkup_exts, rinfo->tun_type, rinfo->priority);
7768         if (*rid < ICE_MAX_NUM_RECIPES)
7769                 /* Success if found a recipe that match the existing criteria */
7770                 goto err_unroll;
7771
7772         rm->tun_type = rinfo->tun_type;
7773         /* Recipe we need does not exist, add a recipe */
7774         status = ice_add_sw_recipe(hw, rm, profiles);
7775         if (status)
7776                 goto err_unroll;
7777
7778         /* Associate all the recipes created with all the profiles in the
7779          * common field vector.
7780          */
7781         LIST_FOR_EACH_ENTRY(fvit, &rm->fv_list, ice_sw_fv_list_entry,
7782                             list_entry) {
7783                 ice_declare_bitmap(r_bitmap, ICE_MAX_NUM_RECIPES);
7784                 u16 j;
7785
7786                 status = ice_aq_get_recipe_to_profile(hw, fvit->profile_id,
7787                                                       (u8 *)r_bitmap, NULL);
7788                 if (status)
7789                         goto err_unroll;
7790
7791                 ice_or_bitmap(r_bitmap, r_bitmap, rm->r_bitmap,
7792                               ICE_MAX_NUM_RECIPES);
7793                 status = ice_acquire_change_lock(hw, ICE_RES_WRITE);
7794                 if (status)
7795                         goto err_unroll;
7796
7797                 status = ice_aq_map_recipe_to_profile(hw, fvit->profile_id,
7798                                                       (u8 *)r_bitmap,
7799                                                       NULL);
7800                 ice_release_change_lock(hw);
7801
7802                 if (status)
7803                         goto err_unroll;
7804
7805                 /* Update profile to recipe bitmap array */
7806                 ice_cp_bitmap(profile_to_recipe[fvit->profile_id], r_bitmap,
7807                               ICE_MAX_NUM_RECIPES);
7808
7809                 /* Update recipe to profile bitmap array */
7810                 ice_for_each_set_bit(j, rm->r_bitmap, ICE_MAX_NUM_RECIPES)
7811                         ice_set_bit((u16)fvit->profile_id,
7812                                     recipe_to_profile[j]);
7813         }
7814
7815         *rid = rm->root_rid;
7816         ice_memcpy(&hw->switch_info->recp_list[*rid].lkup_exts,
7817                    lkup_exts, sizeof(*lkup_exts), ICE_NONDMA_TO_NONDMA);
7818 err_unroll:
7819         LIST_FOR_EACH_ENTRY_SAFE(r_entry, r_tmp, &rm->rg_list,
7820                                  ice_recp_grp_entry, l_entry) {
7821                 LIST_DEL(&r_entry->l_entry);
7822                 ice_free(hw, r_entry);
7823         }
7824
7825         LIST_FOR_EACH_ENTRY_SAFE(fvit, tmp, &rm->fv_list, ice_sw_fv_list_entry,
7826                                  list_entry) {
7827                 LIST_DEL(&fvit->list_entry);
7828                 ice_free(hw, fvit);
7829         }
7830
7831         if (rm->root_buf)
7832                 ice_free(hw, rm->root_buf);
7833
7834         ice_free(hw, rm);
7835
7836 err_free_lkup_exts:
7837         ice_free(hw, lkup_exts);
7838
7839         return status;
7840 }
7841
7842 /**
7843  * ice_find_dummy_packet - find dummy packet by tunnel type
7844  *
7845  * @lkups: lookup elements or match criteria for the advanced recipe, one
7846  *         structure per protocol header
7847  * @lkups_cnt: number of protocols
7848  * @tun_type: tunnel type from the match criteria
7849  * @pkt: dummy packet to fill according to filter match criteria
7850  * @pkt_len: packet length of dummy packet
7851  * @offsets: pointer to receive the pointer to the offsets for the packet
7852  */
7853 static void
7854 ice_find_dummy_packet(struct ice_adv_lkup_elem *lkups, u16 lkups_cnt,
7855                       enum ice_sw_tunnel_type tun_type, const u8 **pkt,
7856                       u16 *pkt_len,
7857                       const struct ice_dummy_pkt_offsets **offsets)
7858 {
7859         bool tcp = false, udp = false, ipv6 = false, vlan = false;
7860         bool gre = false;
7861         u16 i;
7862
7863         for (i = 0; i < lkups_cnt; i++) {
7864                 if (lkups[i].type == ICE_UDP_ILOS)
7865                         udp = true;
7866                 else if (lkups[i].type == ICE_TCP_IL)
7867                         tcp = true;
7868                 else if (lkups[i].type == ICE_IPV6_OFOS)
7869                         ipv6 = true;
7870                 else if (lkups[i].type == ICE_VLAN_OFOS)
7871                         vlan = true;
7872                 else if (lkups[i].type == ICE_IPV4_OFOS &&
7873                          lkups[i].h_u.ipv4_hdr.protocol ==
7874                                 ICE_IPV4_NVGRE_PROTO_ID &&
7875                          lkups[i].m_u.ipv4_hdr.protocol ==
7876                                 0xFF)
7877                         gre = true;
7878                 else if (lkups[i].type == ICE_PPPOE &&
7879                          lkups[i].h_u.pppoe_hdr.ppp_prot_id ==
7880                                 CPU_TO_BE16(ICE_PPP_IPV6_PROTO_ID) &&
7881                          lkups[i].m_u.pppoe_hdr.ppp_prot_id ==
7882                                 0xFFFF)
7883                         ipv6 = true;
7884                 else if (lkups[i].type == ICE_ETYPE_OL &&
7885                          lkups[i].h_u.ethertype.ethtype_id ==
7886                                 CPU_TO_BE16(ICE_IPV6_ETHER_ID) &&
7887                          lkups[i].m_u.ethertype.ethtype_id ==
7888                                         0xFFFF)
7889                         ipv6 = true;
7890                 else if (lkups[i].type == ICE_IPV4_IL &&
7891                          lkups[i].h_u.ipv4_hdr.protocol ==
7892                                 ICE_TCP_PROTO_ID &&
7893                          lkups[i].m_u.ipv4_hdr.protocol ==
7894                                 0xFF)
7895                         tcp = true;
7896         }
7897
7898         if ((tun_type == ICE_SW_TUN_AND_NON_TUN_QINQ ||
7899              tun_type == ICE_NON_TUN_QINQ) && ipv6) {
7900                 *pkt = dummy_qinq_ipv6_pkt;
7901                 *pkt_len = sizeof(dummy_qinq_ipv6_pkt);
7902                 *offsets = dummy_qinq_ipv6_packet_offsets;
7903                 return;
7904         } else if (tun_type == ICE_SW_TUN_AND_NON_TUN_QINQ ||
7905                            tun_type == ICE_NON_TUN_QINQ) {
7906                 *pkt = dummy_qinq_ipv4_pkt;
7907                 *pkt_len = sizeof(dummy_qinq_ipv4_pkt);
7908                 *offsets = dummy_qinq_ipv4_packet_offsets;
7909                 return;
7910         }
7911
7912         if (tun_type == ICE_SW_TUN_PPPOE_IPV6_QINQ) {
7913                 *pkt = dummy_qinq_pppoe_ipv6_packet;
7914                 *pkt_len = sizeof(dummy_qinq_pppoe_ipv6_packet);
7915                 *offsets = dummy_qinq_pppoe_packet_ipv6_offsets;
7916                 return;
7917         } else if (tun_type == ICE_SW_TUN_PPPOE_IPV4_QINQ) {
7918                 *pkt = dummy_qinq_pppoe_ipv4_pkt;
7919                 *pkt_len = sizeof(dummy_qinq_pppoe_ipv4_pkt);
7920                 *offsets = dummy_qinq_pppoe_ipv4_packet_offsets;
7921                 return;
7922         } else if (tun_type == ICE_SW_TUN_PPPOE_QINQ && ipv6) {
7923                 *pkt = dummy_qinq_pppoe_ipv6_packet;
7924                 *pkt_len = sizeof(dummy_qinq_pppoe_ipv6_packet);
7925                 *offsets = dummy_qinq_pppoe_packet_offsets;
7926                 return;
7927         } else if (tun_type == ICE_SW_TUN_PPPOE_QINQ ||
7928                         tun_type == ICE_SW_TUN_PPPOE_PAY_QINQ) {
7929                 *pkt = dummy_qinq_pppoe_ipv4_pkt;
7930                 *pkt_len = sizeof(dummy_qinq_pppoe_ipv4_pkt);
7931                 *offsets = dummy_qinq_pppoe_packet_offsets;
7932                 return;
7933         }
7934
7935         if (tun_type == ICE_SW_TUN_IPV4_GTPU_NO_PAY) {
7936                 *pkt = dummy_ipv4_gtpu_ipv4_packet;
7937                 *pkt_len = sizeof(dummy_ipv4_gtpu_ipv4_packet);
7938                 *offsets = dummy_ipv4_gtp_no_pay_packet_offsets;
7939                 return;
7940         } else if (tun_type == ICE_SW_TUN_IPV6_GTPU_NO_PAY) {
7941                 *pkt = dummy_ipv6_gtp_packet;
7942                 *pkt_len = sizeof(dummy_ipv6_gtp_packet);
7943                 *offsets = dummy_ipv6_gtp_no_pay_packet_offsets;
7944                 return;
7945         }
7946
7947         if (tun_type == ICE_SW_TUN_IPV4_ESP) {
7948                 *pkt = dummy_ipv4_esp_pkt;
7949                 *pkt_len = sizeof(dummy_ipv4_esp_pkt);
7950                 *offsets = dummy_ipv4_esp_packet_offsets;
7951                 return;
7952         }
7953
7954         if (tun_type == ICE_SW_TUN_IPV6_ESP) {
7955                 *pkt = dummy_ipv6_esp_pkt;
7956                 *pkt_len = sizeof(dummy_ipv6_esp_pkt);
7957                 *offsets = dummy_ipv6_esp_packet_offsets;
7958                 return;
7959         }
7960
7961         if (tun_type == ICE_SW_TUN_IPV4_AH) {
7962                 *pkt = dummy_ipv4_ah_pkt;
7963                 *pkt_len = sizeof(dummy_ipv4_ah_pkt);
7964                 *offsets = dummy_ipv4_ah_packet_offsets;
7965                 return;
7966         }
7967
7968         if (tun_type == ICE_SW_TUN_IPV6_AH) {
7969                 *pkt = dummy_ipv6_ah_pkt;
7970                 *pkt_len = sizeof(dummy_ipv6_ah_pkt);
7971                 *offsets = dummy_ipv6_ah_packet_offsets;
7972                 return;
7973         }
7974
7975         if (tun_type == ICE_SW_TUN_IPV4_NAT_T) {
7976                 *pkt = dummy_ipv4_nat_pkt;
7977                 *pkt_len = sizeof(dummy_ipv4_nat_pkt);
7978                 *offsets = dummy_ipv4_nat_packet_offsets;
7979                 return;
7980         }
7981
7982         if (tun_type == ICE_SW_TUN_IPV6_NAT_T) {
7983                 *pkt = dummy_ipv6_nat_pkt;
7984                 *pkt_len = sizeof(dummy_ipv6_nat_pkt);
7985                 *offsets = dummy_ipv6_nat_packet_offsets;
7986                 return;
7987         }
7988
7989         if (tun_type == ICE_SW_TUN_IPV4_L2TPV3) {
7990                 *pkt = dummy_ipv4_l2tpv3_pkt;
7991                 *pkt_len = sizeof(dummy_ipv4_l2tpv3_pkt);
7992                 *offsets = dummy_ipv4_l2tpv3_packet_offsets;
7993                 return;
7994         }
7995
7996         if (tun_type == ICE_SW_TUN_IPV6_L2TPV3) {
7997                 *pkt = dummy_ipv6_l2tpv3_pkt;
7998                 *pkt_len = sizeof(dummy_ipv6_l2tpv3_pkt);
7999                 *offsets = dummy_ipv6_l2tpv3_packet_offsets;
8000                 return;
8001         }
8002
8003         if (tun_type == ICE_SW_TUN_GTP) {
8004                 *pkt = dummy_udp_gtp_packet;
8005                 *pkt_len = sizeof(dummy_udp_gtp_packet);
8006                 *offsets = dummy_udp_gtp_packet_offsets;
8007                 return;
8008         }
8009
8010         if (tun_type == ICE_SW_TUN_IPV4_GTPU_IPV4 ||
8011             tun_type == ICE_SW_TUN_IPV4_GTPU_EH_IPV4) {
8012                 *pkt = dummy_ipv4_gtpu_ipv4_packet;
8013                 *pkt_len = sizeof(dummy_ipv4_gtpu_ipv4_packet);
8014                 *offsets = dummy_ipv4_gtpu_ipv4_packet_offsets;
8015                 return;
8016         }
8017
8018         if (tun_type == ICE_SW_TUN_IPV4_GTPU_IPV4_UDP ||
8019             tun_type == ICE_SW_TUN_IPV4_GTPU_EH_IPV4_UDP) {
8020                 *pkt = dummy_ipv4_gtpu_ipv4_udp_packet;
8021                 *pkt_len = sizeof(dummy_ipv4_gtpu_ipv4_udp_packet);
8022                 *offsets = dummy_ipv4_gtpu_ipv4_udp_packet_offsets;
8023                 return;
8024         }
8025
8026         if (tun_type == ICE_SW_TUN_IPV4_GTPU_IPV4_TCP ||
8027             tun_type == ICE_SW_TUN_IPV4_GTPU_EH_IPV4_TCP) {
8028                 *pkt = dummy_ipv4_gtpu_ipv4_tcp_packet;
8029                 *pkt_len = sizeof(dummy_ipv4_gtpu_ipv4_tcp_packet);
8030                 *offsets = dummy_ipv4_gtpu_ipv4_tcp_packet_offsets;
8031                 return;
8032         }
8033
8034         if (tun_type == ICE_SW_TUN_IPV4_GTPU_IPV6 ||
8035             tun_type == ICE_SW_TUN_IPV4_GTPU_EH_IPV6) {
8036                 *pkt = dummy_ipv4_gtpu_ipv6_packet;
8037                 *pkt_len = sizeof(dummy_ipv4_gtpu_ipv6_packet);
8038                 *offsets = dummy_ipv4_gtpu_ipv6_packet_offsets;
8039                 return;
8040         }
8041
8042         if (tun_type == ICE_SW_TUN_IPV4_GTPU_IPV6_UDP ||
8043             tun_type == ICE_SW_TUN_IPV4_GTPU_EH_IPV6_UDP) {
8044                 *pkt = dummy_ipv4_gtpu_ipv6_udp_packet;
8045                 *pkt_len = sizeof(dummy_ipv4_gtpu_ipv6_udp_packet);
8046                 *offsets = dummy_ipv4_gtpu_ipv6_udp_packet_offsets;
8047                 return;
8048         }
8049
8050         if (tun_type == ICE_SW_TUN_IPV4_GTPU_IPV6_TCP ||
8051             tun_type == ICE_SW_TUN_IPV4_GTPU_EH_IPV6_TCP) {
8052                 *pkt = dummy_ipv4_gtpu_ipv6_tcp_packet;
8053                 *pkt_len = sizeof(dummy_ipv4_gtpu_ipv6_tcp_packet);
8054                 *offsets = dummy_ipv4_gtpu_ipv6_tcp_packet_offsets;
8055                 return;
8056         }
8057
8058         if (tun_type == ICE_SW_TUN_IPV6_GTPU_IPV4 ||
8059             tun_type == ICE_SW_TUN_IPV6_GTPU_EH_IPV4) {
8060                 *pkt = dummy_ipv6_gtpu_ipv4_packet;
8061                 *pkt_len = sizeof(dummy_ipv6_gtpu_ipv4_packet);
8062                 *offsets = dummy_ipv6_gtpu_ipv4_packet_offsets;
8063                 return;
8064         }
8065
8066         if (tun_type == ICE_SW_TUN_IPV6_GTPU_IPV4_UDP ||
8067             tun_type == ICE_SW_TUN_IPV6_GTPU_EH_IPV4_UDP) {
8068                 *pkt = dummy_ipv6_gtpu_ipv4_udp_packet;
8069                 *pkt_len = sizeof(dummy_ipv6_gtpu_ipv4_udp_packet);
8070                 *offsets = dummy_ipv6_gtpu_ipv4_udp_packet_offsets;
8071                 return;
8072         }
8073
8074         if (tun_type == ICE_SW_TUN_IPV6_GTPU_IPV4_TCP ||
8075             tun_type == ICE_SW_TUN_IPV6_GTPU_EH_IPV4_TCP) {
8076                 *pkt = dummy_ipv6_gtpu_ipv4_tcp_packet;
8077                 *pkt_len = sizeof(dummy_ipv6_gtpu_ipv4_tcp_packet);
8078                 *offsets = dummy_ipv6_gtpu_ipv4_tcp_packet_offsets;
8079                 return;
8080         }
8081
8082         if (tun_type == ICE_SW_TUN_IPV6_GTPU_IPV6 ||
8083             tun_type == ICE_SW_TUN_IPV6_GTPU_EH_IPV6) {
8084                 *pkt = dummy_ipv6_gtpu_ipv6_packet;
8085                 *pkt_len = sizeof(dummy_ipv6_gtpu_ipv6_packet);
8086                 *offsets = dummy_ipv6_gtpu_ipv6_packet_offsets;
8087                 return;
8088         }
8089
8090         if (tun_type == ICE_SW_TUN_IPV6_GTPU_IPV6_UDP ||
8091             tun_type == ICE_SW_TUN_IPV6_GTPU_EH_IPV6_UDP) {
8092                 *pkt = dummy_ipv6_gtpu_ipv6_udp_packet;
8093                 *pkt_len = sizeof(dummy_ipv6_gtpu_ipv6_udp_packet);
8094                 *offsets = dummy_ipv6_gtpu_ipv6_udp_packet_offsets;
8095                 return;
8096         }
8097
8098         if (tun_type == ICE_SW_TUN_IPV6_GTPU_IPV6_TCP ||
8099             tun_type == ICE_SW_TUN_IPV6_GTPU_EH_IPV6_TCP) {
8100                 *pkt = dummy_ipv6_gtpu_ipv6_tcp_packet;
8101                 *pkt_len = sizeof(dummy_ipv6_gtpu_ipv6_tcp_packet);
8102                 *offsets = dummy_ipv6_gtpu_ipv6_tcp_packet_offsets;
8103                 return;
8104         }
8105
8106         if (tun_type == ICE_SW_TUN_PPPOE && ipv6) {
8107                 *pkt = dummy_pppoe_ipv6_packet;
8108                 *pkt_len = sizeof(dummy_pppoe_ipv6_packet);
8109                 *offsets = dummy_pppoe_packet_offsets;
8110                 return;
8111         } else if (tun_type == ICE_SW_TUN_PPPOE ||
8112                 tun_type == ICE_SW_TUN_PPPOE_PAY) {
8113                 *pkt = dummy_pppoe_ipv4_packet;
8114                 *pkt_len = sizeof(dummy_pppoe_ipv4_packet);
8115                 *offsets = dummy_pppoe_packet_offsets;
8116                 return;
8117         }
8118
8119         if (tun_type == ICE_SW_TUN_PPPOE_IPV4) {
8120                 *pkt = dummy_pppoe_ipv4_packet;
8121                 *pkt_len = sizeof(dummy_pppoe_ipv4_packet);
8122                 *offsets = dummy_pppoe_packet_ipv4_offsets;
8123                 return;
8124         }
8125
8126         if (tun_type == ICE_SW_TUN_PPPOE_IPV4_TCP) {
8127                 *pkt = dummy_pppoe_ipv4_tcp_packet;
8128                 *pkt_len = sizeof(dummy_pppoe_ipv4_tcp_packet);
8129                 *offsets = dummy_pppoe_ipv4_tcp_packet_offsets;
8130                 return;
8131         }
8132
8133         if (tun_type == ICE_SW_TUN_PPPOE_IPV4_UDP) {
8134                 *pkt = dummy_pppoe_ipv4_udp_packet;
8135                 *pkt_len = sizeof(dummy_pppoe_ipv4_udp_packet);
8136                 *offsets = dummy_pppoe_ipv4_udp_packet_offsets;
8137                 return;
8138         }
8139
8140         if (tun_type == ICE_SW_TUN_PPPOE_IPV6) {
8141                 *pkt = dummy_pppoe_ipv6_packet;
8142                 *pkt_len = sizeof(dummy_pppoe_ipv6_packet);
8143                 *offsets = dummy_pppoe_packet_ipv6_offsets;
8144                 return;
8145         }
8146
8147         if (tun_type == ICE_SW_TUN_PPPOE_IPV6_TCP) {
8148                 *pkt = dummy_pppoe_ipv6_tcp_packet;
8149                 *pkt_len = sizeof(dummy_pppoe_ipv6_tcp_packet);
8150                 *offsets = dummy_pppoe_packet_ipv6_tcp_offsets;
8151                 return;
8152         }
8153
8154         if (tun_type == ICE_SW_TUN_PPPOE_IPV6_UDP) {
8155                 *pkt = dummy_pppoe_ipv6_udp_packet;
8156                 *pkt_len = sizeof(dummy_pppoe_ipv6_udp_packet);
8157                 *offsets = dummy_pppoe_packet_ipv6_udp_offsets;
8158                 return;
8159         }
8160
8161         if (tun_type == ICE_SW_IPV4_TCP) {
8162                 *pkt = dummy_tcp_packet;
8163                 *pkt_len = sizeof(dummy_tcp_packet);
8164                 *offsets = dummy_tcp_packet_offsets;
8165                 return;
8166         }
8167
8168         if (tun_type == ICE_SW_IPV4_UDP) {
8169                 *pkt = dummy_udp_packet;
8170                 *pkt_len = sizeof(dummy_udp_packet);
8171                 *offsets = dummy_udp_packet_offsets;
8172                 return;
8173         }
8174
8175         if (tun_type == ICE_SW_IPV6_TCP) {
8176                 *pkt = dummy_tcp_ipv6_packet;
8177                 *pkt_len = sizeof(dummy_tcp_ipv6_packet);
8178                 *offsets = dummy_tcp_ipv6_packet_offsets;
8179                 return;
8180         }
8181
8182         if (tun_type == ICE_SW_IPV6_UDP) {
8183                 *pkt = dummy_udp_ipv6_packet;
8184                 *pkt_len = sizeof(dummy_udp_ipv6_packet);
8185                 *offsets = dummy_udp_ipv6_packet_offsets;
8186                 return;
8187         }
8188
8189         /* Support GTP tunnel + L3 */
8190         if (tun_type == ICE_SW_TUN_IPV4_GTPU_IPV4 ||
8191             tun_type == ICE_SW_TUN_GTP_IPV4) {
8192                 *pkt = dummy_ipv4_gtpu_ipv4_packet;
8193                 *pkt_len = sizeof(dummy_ipv4_gtpu_ipv4_packet);
8194                 *offsets = dummy_ipv4_gtpu_ipv4_packet_offsets;
8195                 return;
8196         }
8197         if (tun_type == ICE_SW_TUN_IPV4_GTPU_IPV6 ||
8198             tun_type == ICE_SW_TUN_GTP_IPV6) {
8199                 *pkt = dummy_ipv4_gtpu_ipv6_packet;
8200                 *pkt_len = sizeof(dummy_ipv4_gtpu_ipv6_packet);
8201                 *offsets = dummy_ipv4_gtpu_ipv6_packet_offsets;
8202                 return;
8203         }
8204         if (tun_type == ICE_SW_TUN_IPV6_GTPU_IPV4) {
8205                 *pkt = dummy_ipv6_gtpu_ipv4_packet;
8206                 *pkt_len = sizeof(dummy_ipv6_gtpu_ipv4_packet);
8207                 *offsets = dummy_ipv6_gtpu_ipv4_packet_offsets;
8208                 return;
8209         }
8210         if (tun_type == ICE_SW_TUN_IPV6_GTPU_IPV6) {
8211                 *pkt = dummy_ipv6_gtpu_ipv6_packet;
8212                 *pkt_len = sizeof(dummy_ipv6_gtpu_ipv6_packet);
8213                 *offsets = dummy_ipv6_gtpu_ipv6_packet_offsets;
8214                 return;
8215         }
8216
8217         if (tun_type == ICE_ALL_TUNNELS) {
8218                 *pkt = dummy_gre_udp_packet;
8219                 *pkt_len = sizeof(dummy_gre_udp_packet);
8220                 *offsets = dummy_gre_udp_packet_offsets;
8221                 return;
8222         }
8223
8224         if (tun_type == ICE_SW_TUN_NVGRE || gre) {
8225                 if (tcp) {
8226                         *pkt = dummy_gre_tcp_packet;
8227                         *pkt_len = sizeof(dummy_gre_tcp_packet);
8228                         *offsets = dummy_gre_tcp_packet_offsets;
8229                         return;
8230                 }
8231
8232                 *pkt = dummy_gre_udp_packet;
8233                 *pkt_len = sizeof(dummy_gre_udp_packet);
8234                 *offsets = dummy_gre_udp_packet_offsets;
8235                 return;
8236         }
8237
8238         if (tun_type == ICE_SW_TUN_VXLAN || tun_type == ICE_SW_TUN_GENEVE ||
8239             tun_type == ICE_SW_TUN_VXLAN_GPE || tun_type == ICE_SW_TUN_UDP ||
8240             tun_type == ICE_SW_TUN_GENEVE_VLAN ||
8241             tun_type == ICE_SW_TUN_VXLAN_VLAN) {
8242                 if (tcp) {
8243                         *pkt = dummy_udp_tun_tcp_packet;
8244                         *pkt_len = sizeof(dummy_udp_tun_tcp_packet);
8245                         *offsets = dummy_udp_tun_tcp_packet_offsets;
8246                         return;
8247                 }
8248
8249                 *pkt = dummy_udp_tun_udp_packet;
8250                 *pkt_len = sizeof(dummy_udp_tun_udp_packet);
8251                 *offsets = dummy_udp_tun_udp_packet_offsets;
8252                 return;
8253         }
8254
8255         if (udp && !ipv6) {
8256                 if (vlan) {
8257                         *pkt = dummy_vlan_udp_packet;
8258                         *pkt_len = sizeof(dummy_vlan_udp_packet);
8259                         *offsets = dummy_vlan_udp_packet_offsets;
8260                         return;
8261                 }
8262                 *pkt = dummy_udp_packet;
8263                 *pkt_len = sizeof(dummy_udp_packet);
8264                 *offsets = dummy_udp_packet_offsets;
8265                 return;
8266         } else if (udp && ipv6) {
8267                 if (vlan) {
8268                         *pkt = dummy_vlan_udp_ipv6_packet;
8269                         *pkt_len = sizeof(dummy_vlan_udp_ipv6_packet);
8270                         *offsets = dummy_vlan_udp_ipv6_packet_offsets;
8271                         return;
8272                 }
8273                 *pkt = dummy_udp_ipv6_packet;
8274                 *pkt_len = sizeof(dummy_udp_ipv6_packet);
8275                 *offsets = dummy_udp_ipv6_packet_offsets;
8276                 return;
8277         } else if ((tcp && ipv6) || ipv6) {
8278                 if (vlan) {
8279                         *pkt = dummy_vlan_tcp_ipv6_packet;
8280                         *pkt_len = sizeof(dummy_vlan_tcp_ipv6_packet);
8281                         *offsets = dummy_vlan_tcp_ipv6_packet_offsets;
8282                         return;
8283                 }
8284                 *pkt = dummy_tcp_ipv6_packet;
8285                 *pkt_len = sizeof(dummy_tcp_ipv6_packet);
8286                 *offsets = dummy_tcp_ipv6_packet_offsets;
8287                 return;
8288         }
8289
8290         if (vlan) {
8291                 *pkt = dummy_vlan_tcp_packet;
8292                 *pkt_len = sizeof(dummy_vlan_tcp_packet);
8293                 *offsets = dummy_vlan_tcp_packet_offsets;
8294         } else {
8295                 *pkt = dummy_tcp_packet;
8296                 *pkt_len = sizeof(dummy_tcp_packet);
8297                 *offsets = dummy_tcp_packet_offsets;
8298         }
8299 }
8300
8301 /**
8302  * ice_fill_adv_dummy_packet - fill a dummy packet with given match criteria
8303  *
8304  * @lkups: lookup elements or match criteria for the advanced recipe, one
8305  *         structure per protocol header
8306  * @lkups_cnt: number of protocols
8307  * @s_rule: stores rule information from the match criteria
8308  * @dummy_pkt: dummy packet to fill according to filter match criteria
8309  * @pkt_len: packet length of dummy packet
8310  * @offsets: offset info for the dummy packet
8311  */
8312 static enum ice_status
8313 ice_fill_adv_dummy_packet(struct ice_adv_lkup_elem *lkups, u16 lkups_cnt,
8314                           struct ice_aqc_sw_rules_elem *s_rule,
8315                           const u8 *dummy_pkt, u16 pkt_len,
8316                           const struct ice_dummy_pkt_offsets *offsets)
8317 {
8318         u8 *pkt;
8319         u16 i;
8320
8321         /* Start with a packet with a pre-defined/dummy content. Then, fill
8322          * in the header values to be looked up or matched.
8323          */
8324         pkt = s_rule->pdata.lkup_tx_rx.hdr;
8325
8326         ice_memcpy(pkt, dummy_pkt, pkt_len, ICE_NONDMA_TO_NONDMA);
8327
8328         for (i = 0; i < lkups_cnt; i++) {
8329                 enum ice_protocol_type type;
8330                 u16 offset = 0, len = 0, j;
8331                 bool found = false;
8332
8333                 /* find the start of this layer; it should be found since this
8334                  * was already checked when search for the dummy packet
8335                  */
8336                 type = lkups[i].type;
8337                 for (j = 0; offsets[j].type != ICE_PROTOCOL_LAST; j++) {
8338                         if (type == offsets[j].type) {
8339                                 offset = offsets[j].offset;
8340                                 found = true;
8341                                 break;
8342                         }
8343                 }
8344                 /* this should never happen in a correct calling sequence */
8345                 if (!found)
8346                         return ICE_ERR_PARAM;
8347
8348                 switch (lkups[i].type) {
8349                 case ICE_MAC_OFOS:
8350                 case ICE_MAC_IL:
8351                         len = sizeof(struct ice_ether_hdr);
8352                         break;
8353                 case ICE_ETYPE_OL:
8354                         len = sizeof(struct ice_ethtype_hdr);
8355                         break;
8356                 case ICE_VLAN_OFOS:
8357                 case ICE_VLAN_EX:
8358                 case ICE_VLAN_IN:
8359                         len = sizeof(struct ice_vlan_hdr);
8360                         break;
8361                 case ICE_IPV4_OFOS:
8362                 case ICE_IPV4_IL:
8363                         len = sizeof(struct ice_ipv4_hdr);
8364                         break;
8365                 case ICE_IPV6_OFOS:
8366                 case ICE_IPV6_IL:
8367                         len = sizeof(struct ice_ipv6_hdr);
8368                         break;
8369                 case ICE_TCP_IL:
8370                 case ICE_UDP_OF:
8371                 case ICE_UDP_ILOS:
8372                         len = sizeof(struct ice_l4_hdr);
8373                         break;
8374                 case ICE_SCTP_IL:
8375                         len = sizeof(struct ice_sctp_hdr);
8376                         break;
8377                 case ICE_NVGRE:
8378                         len = sizeof(struct ice_nvgre);
8379                         break;
8380                 case ICE_VXLAN:
8381                 case ICE_GENEVE:
8382                 case ICE_VXLAN_GPE:
8383                         len = sizeof(struct ice_udp_tnl_hdr);
8384                         break;
8385
8386                 case ICE_GTP:
8387                 case ICE_GTP_NO_PAY:
8388                         len = sizeof(struct ice_udp_gtp_hdr);
8389                         break;
8390                 case ICE_PPPOE:
8391                         len = sizeof(struct ice_pppoe_hdr);
8392                         break;
8393                 case ICE_ESP:
8394                         len = sizeof(struct ice_esp_hdr);
8395                         break;
8396                 case ICE_NAT_T:
8397                         len = sizeof(struct ice_nat_t_hdr);
8398                         break;
8399                 case ICE_AH:
8400                         len = sizeof(struct ice_ah_hdr);
8401                         break;
8402                 case ICE_L2TPV3:
8403                         len = sizeof(struct ice_l2tpv3_sess_hdr);
8404                         break;
8405                 default:
8406                         return ICE_ERR_PARAM;
8407                 }
8408
8409                 /* the length should be a word multiple */
8410                 if (len % ICE_BYTES_PER_WORD)
8411                         return ICE_ERR_CFG;
8412
8413                 /* We have the offset to the header start, the length, the
8414                  * caller's header values and mask. Use this information to
8415                  * copy the data into the dummy packet appropriately based on
8416                  * the mask. Note that we need to only write the bits as
8417                  * indicated by the mask to make sure we don't improperly write
8418                  * over any significant packet data.
8419                  */
8420                 for (j = 0; j < len / sizeof(u16); j++)
8421                         if (((u16 *)&lkups[i].m_u)[j])
8422                                 ((u16 *)(pkt + offset))[j] =
8423                                         (((u16 *)(pkt + offset))[j] &
8424                                          ~((u16 *)&lkups[i].m_u)[j]) |
8425                                         (((u16 *)&lkups[i].h_u)[j] &
8426                                          ((u16 *)&lkups[i].m_u)[j]);
8427         }
8428
8429         s_rule->pdata.lkup_tx_rx.hdr_len = CPU_TO_LE16(pkt_len);
8430
8431         return ICE_SUCCESS;
8432 }
8433
8434 /**
8435  * ice_fill_adv_packet_tun - fill dummy packet with udp tunnel port
8436  * @hw: pointer to the hardware structure
8437  * @tun_type: tunnel type
8438  * @pkt: dummy packet to fill in
8439  * @offsets: offset info for the dummy packet
8440  */
8441 static enum ice_status
8442 ice_fill_adv_packet_tun(struct ice_hw *hw, enum ice_sw_tunnel_type tun_type,
8443                         u8 *pkt, const struct ice_dummy_pkt_offsets *offsets)
8444 {
8445         u16 open_port, i;
8446
8447         switch (tun_type) {
8448         case ICE_SW_TUN_AND_NON_TUN:
8449         case ICE_SW_TUN_VXLAN_GPE:
8450         case ICE_SW_TUN_VXLAN:
8451         case ICE_SW_TUN_VXLAN_VLAN:
8452         case ICE_SW_TUN_UDP:
8453                 if (!ice_get_open_tunnel_port(hw, TNL_VXLAN, &open_port))
8454                         return ICE_ERR_CFG;
8455                 break;
8456
8457         case ICE_SW_TUN_GENEVE:
8458         case ICE_SW_TUN_GENEVE_VLAN:
8459                 if (!ice_get_open_tunnel_port(hw, TNL_GENEVE, &open_port))
8460                         return ICE_ERR_CFG;
8461                 break;
8462
8463         default:
8464                 /* Nothing needs to be done for this tunnel type */
8465                 return ICE_SUCCESS;
8466         }
8467
8468         /* Find the outer UDP protocol header and insert the port number */
8469         for (i = 0; offsets[i].type != ICE_PROTOCOL_LAST; i++) {
8470                 if (offsets[i].type == ICE_UDP_OF) {
8471                         struct ice_l4_hdr *hdr;
8472                         u16 offset;
8473
8474                         offset = offsets[i].offset;
8475                         hdr = (struct ice_l4_hdr *)&pkt[offset];
8476                         hdr->dst_port = CPU_TO_BE16(open_port);
8477
8478                         return ICE_SUCCESS;
8479                 }
8480         }
8481
8482         return ICE_ERR_CFG;
8483 }
8484
8485 /**
8486  * ice_find_adv_rule_entry - Search a rule entry
8487  * @hw: pointer to the hardware structure
8488  * @lkups: lookup elements or match criteria for the advanced recipe, one
8489  *         structure per protocol header
8490  * @lkups_cnt: number of protocols
8491  * @recp_id: recipe ID for which we are finding the rule
8492  * @rinfo: other information regarding the rule e.g. priority and action info
8493  *
8494  * Helper function to search for a given advance rule entry
8495  * Returns pointer to entry storing the rule if found
8496  */
8497 static struct ice_adv_fltr_mgmt_list_entry *
8498 ice_find_adv_rule_entry(struct ice_hw *hw, struct ice_adv_lkup_elem *lkups,
8499                         u16 lkups_cnt, u16 recp_id,
8500                         struct ice_adv_rule_info *rinfo)
8501 {
8502         struct ice_adv_fltr_mgmt_list_entry *list_itr;
8503         struct ice_switch_info *sw = hw->switch_info;
8504         int i;
8505
8506         LIST_FOR_EACH_ENTRY(list_itr, &sw->recp_list[recp_id].filt_rules,
8507                             ice_adv_fltr_mgmt_list_entry, list_entry) {
8508                 bool lkups_matched = true;
8509
8510                 if (lkups_cnt != list_itr->lkups_cnt)
8511                         continue;
8512                 for (i = 0; i < list_itr->lkups_cnt; i++)
8513                         if (memcmp(&list_itr->lkups[i], &lkups[i],
8514                                    sizeof(*lkups))) {
8515                                 lkups_matched = false;
8516                                 break;
8517                         }
8518                 if (rinfo->sw_act.flag == list_itr->rule_info.sw_act.flag &&
8519                     rinfo->tun_type == list_itr->rule_info.tun_type &&
8520                     lkups_matched)
8521                         return list_itr;
8522         }
8523         return NULL;
8524 }
8525
8526 /**
8527  * ice_adv_add_update_vsi_list
8528  * @hw: pointer to the hardware structure
8529  * @m_entry: pointer to current adv filter management list entry
8530  * @cur_fltr: filter information from the book keeping entry
8531  * @new_fltr: filter information with the new VSI to be added
8532  *
8533  * Call AQ command to add or update previously created VSI list with new VSI.
8534  *
8535  * Helper function to do book keeping associated with adding filter information
8536  * The algorithm to do the booking keeping is described below :
8537  * When a VSI needs to subscribe to a given advanced filter
8538  *      if only one VSI has been added till now
8539  *              Allocate a new VSI list and add two VSIs
8540  *              to this list using switch rule command
8541  *              Update the previously created switch rule with the
8542  *              newly created VSI list ID
8543  *      if a VSI list was previously created
8544  *              Add the new VSI to the previously created VSI list set
8545  *              using the update switch rule command
8546  */
8547 static enum ice_status
8548 ice_adv_add_update_vsi_list(struct ice_hw *hw,
8549                             struct ice_adv_fltr_mgmt_list_entry *m_entry,
8550                             struct ice_adv_rule_info *cur_fltr,
8551                             struct ice_adv_rule_info *new_fltr)
8552 {
8553         enum ice_status status;
8554         u16 vsi_list_id = 0;
8555
8556         if (cur_fltr->sw_act.fltr_act == ICE_FWD_TO_Q ||
8557             cur_fltr->sw_act.fltr_act == ICE_FWD_TO_QGRP ||
8558             cur_fltr->sw_act.fltr_act == ICE_DROP_PACKET)
8559                 return ICE_ERR_NOT_IMPL;
8560
8561         if ((new_fltr->sw_act.fltr_act == ICE_FWD_TO_Q ||
8562              new_fltr->sw_act.fltr_act == ICE_FWD_TO_QGRP) &&
8563             (cur_fltr->sw_act.fltr_act == ICE_FWD_TO_VSI ||
8564              cur_fltr->sw_act.fltr_act == ICE_FWD_TO_VSI_LIST))
8565                 return ICE_ERR_NOT_IMPL;
8566
8567         if (m_entry->vsi_count < 2 && !m_entry->vsi_list_info) {
8568                  /* Only one entry existed in the mapping and it was not already
8569                   * a part of a VSI list. So, create a VSI list with the old and
8570                   * new VSIs.
8571                   */
8572                 struct ice_fltr_info tmp_fltr;
8573                 u16 vsi_handle_arr[2];
8574
8575                 /* A rule already exists with the new VSI being added */
8576                 if (cur_fltr->sw_act.fwd_id.hw_vsi_id ==
8577                     new_fltr->sw_act.fwd_id.hw_vsi_id)
8578                         return ICE_ERR_ALREADY_EXISTS;
8579
8580                 vsi_handle_arr[0] = cur_fltr->sw_act.vsi_handle;
8581                 vsi_handle_arr[1] = new_fltr->sw_act.vsi_handle;
8582                 status = ice_create_vsi_list_rule(hw, &vsi_handle_arr[0], 2,
8583                                                   &vsi_list_id,
8584                                                   ICE_SW_LKUP_LAST);
8585                 if (status)
8586                         return status;
8587
8588                 ice_memset(&tmp_fltr, 0, sizeof(tmp_fltr), ICE_NONDMA_MEM);
8589                 tmp_fltr.flag = m_entry->rule_info.sw_act.flag;
8590                 tmp_fltr.fltr_rule_id = cur_fltr->fltr_rule_id;
8591                 tmp_fltr.fltr_act = ICE_FWD_TO_VSI_LIST;
8592                 tmp_fltr.fwd_id.vsi_list_id = vsi_list_id;
8593                 tmp_fltr.lkup_type = ICE_SW_LKUP_LAST;
8594
8595                 /* Update the previous switch rule of "forward to VSI" to
8596                  * "fwd to VSI list"
8597                  */
8598                 status = ice_update_pkt_fwd_rule(hw, &tmp_fltr);
8599                 if (status)
8600                         return status;
8601
8602                 cur_fltr->sw_act.fwd_id.vsi_list_id = vsi_list_id;
8603                 cur_fltr->sw_act.fltr_act = ICE_FWD_TO_VSI_LIST;
8604                 m_entry->vsi_list_info =
8605                         ice_create_vsi_list_map(hw, &vsi_handle_arr[0], 2,
8606                                                 vsi_list_id);
8607         } else {
8608                 u16 vsi_handle = new_fltr->sw_act.vsi_handle;
8609
8610                 if (!m_entry->vsi_list_info)
8611                         return ICE_ERR_CFG;
8612
8613                 /* A rule already exists with the new VSI being added */
8614                 if (ice_is_bit_set(m_entry->vsi_list_info->vsi_map, vsi_handle))
8615                         return ICE_SUCCESS;
8616
8617                 /* Update the previously created VSI list set with
8618                  * the new VSI ID passed in
8619                  */
8620                 vsi_list_id = cur_fltr->sw_act.fwd_id.vsi_list_id;
8621
8622                 status = ice_update_vsi_list_rule(hw, &vsi_handle, 1,
8623                                                   vsi_list_id, false,
8624                                                   ice_aqc_opc_update_sw_rules,
8625                                                   ICE_SW_LKUP_LAST);
8626                 /* update VSI list mapping info with new VSI ID */
8627                 if (!status)
8628                         ice_set_bit(vsi_handle,
8629                                     m_entry->vsi_list_info->vsi_map);
8630         }
8631         if (!status)
8632                 m_entry->vsi_count++;
8633         return status;
8634 }
8635
8636 /**
8637  * ice_add_adv_rule - helper function to create an advanced switch rule
8638  * @hw: pointer to the hardware structure
8639  * @lkups: information on the words that needs to be looked up. All words
8640  * together makes one recipe
8641  * @lkups_cnt: num of entries in the lkups array
8642  * @rinfo: other information related to the rule that needs to be programmed
8643  * @added_entry: this will return recipe_id, rule_id and vsi_handle. should be
8644  *               ignored is case of error.
8645  *
8646  * This function can program only 1 rule at a time. The lkups is used to
8647  * describe the all the words that forms the "lookup" portion of the recipe.
8648  * These words can span multiple protocols. Callers to this function need to
8649  * pass in a list of protocol headers with lookup information along and mask
8650  * that determines which words are valid from the given protocol header.
8651  * rinfo describes other information related to this rule such as forwarding
8652  * IDs, priority of this rule, etc.
8653  */
8654 enum ice_status
8655 ice_add_adv_rule(struct ice_hw *hw, struct ice_adv_lkup_elem *lkups,
8656                  u16 lkups_cnt, struct ice_adv_rule_info *rinfo,
8657                  struct ice_rule_query_data *added_entry)
8658 {
8659         struct ice_adv_fltr_mgmt_list_entry *m_entry, *adv_fltr = NULL;
8660         u16 rid = 0, i, pkt_len, rule_buf_sz, vsi_handle;
8661         const struct ice_dummy_pkt_offsets *pkt_offsets;
8662         struct ice_aqc_sw_rules_elem *s_rule = NULL;
8663         struct LIST_HEAD_TYPE *rule_head;
8664         struct ice_switch_info *sw;
8665         enum ice_status status;
8666         const u8 *pkt = NULL;
8667         bool prof_rule;
8668         u16 word_cnt;
8669         u32 act = 0;
8670         u8 q_rgn;
8671
8672         /* Initialize profile to result index bitmap */
8673         if (!hw->switch_info->prof_res_bm_init) {
8674                 hw->switch_info->prof_res_bm_init = 1;
8675                 ice_init_prof_result_bm(hw);
8676         }
8677
8678         prof_rule = ice_is_prof_rule(rinfo->tun_type);
8679         if (!prof_rule && !lkups_cnt)
8680                 return ICE_ERR_PARAM;
8681
8682         /* get # of words we need to match */
8683         word_cnt = 0;
8684         for (i = 0; i < lkups_cnt; i++) {
8685                 u16 j, *ptr;
8686
8687                 ptr = (u16 *)&lkups[i].m_u;
8688                 for (j = 0; j < sizeof(lkups->m_u) / sizeof(u16); j++)
8689                         if (ptr[j] != 0)
8690                                 word_cnt++;
8691         }
8692
8693         if (prof_rule) {
8694                 if (word_cnt > ICE_MAX_CHAIN_WORDS)
8695                         return ICE_ERR_PARAM;
8696         } else {
8697                 if (!word_cnt || word_cnt > ICE_MAX_CHAIN_WORDS)
8698                         return ICE_ERR_PARAM;
8699         }
8700
8701         /* make sure that we can locate a dummy packet */
8702         ice_find_dummy_packet(lkups, lkups_cnt, rinfo->tun_type, &pkt, &pkt_len,
8703                               &pkt_offsets);
8704         if (!pkt) {
8705                 status = ICE_ERR_PARAM;
8706                 goto err_ice_add_adv_rule;
8707         }
8708
8709         if (!(rinfo->sw_act.fltr_act == ICE_FWD_TO_VSI ||
8710               rinfo->sw_act.fltr_act == ICE_FWD_TO_Q ||
8711               rinfo->sw_act.fltr_act == ICE_FWD_TO_QGRP ||
8712               rinfo->sw_act.fltr_act == ICE_DROP_PACKET))
8713                 return ICE_ERR_CFG;
8714
8715         vsi_handle = rinfo->sw_act.vsi_handle;
8716         if (!ice_is_vsi_valid(hw, vsi_handle))
8717                 return ICE_ERR_PARAM;
8718
8719         if (rinfo->sw_act.fltr_act == ICE_FWD_TO_VSI)
8720                 rinfo->sw_act.fwd_id.hw_vsi_id =
8721                         ice_get_hw_vsi_num(hw, vsi_handle);
8722         if (rinfo->sw_act.flag & ICE_FLTR_TX)
8723                 rinfo->sw_act.src = ice_get_hw_vsi_num(hw, vsi_handle);
8724
8725         status = ice_add_adv_recipe(hw, lkups, lkups_cnt, rinfo, &rid);
8726         if (status)
8727                 return status;
8728         m_entry = ice_find_adv_rule_entry(hw, lkups, lkups_cnt, rid, rinfo);
8729         if (m_entry) {
8730                 /* we have to add VSI to VSI_LIST and increment vsi_count.
8731                  * Also Update VSI list so that we can change forwarding rule
8732                  * if the rule already exists, we will check if it exists with
8733                  * same vsi_id, if not then add it to the VSI list if it already
8734                  * exists if not then create a VSI list and add the existing VSI
8735                  * ID and the new VSI ID to the list
8736                  * We will add that VSI to the list
8737                  */
8738                 status = ice_adv_add_update_vsi_list(hw, m_entry,
8739                                                      &m_entry->rule_info,
8740                                                      rinfo);
8741                 if (added_entry) {
8742                         added_entry->rid = rid;
8743                         added_entry->rule_id = m_entry->rule_info.fltr_rule_id;
8744                         added_entry->vsi_handle = rinfo->sw_act.vsi_handle;
8745                 }
8746                 return status;
8747         }
8748         rule_buf_sz = ICE_SW_RULE_RX_TX_NO_HDR_SIZE + pkt_len;
8749         s_rule = (struct ice_aqc_sw_rules_elem *)ice_malloc(hw, rule_buf_sz);
8750         if (!s_rule)
8751                 return ICE_ERR_NO_MEMORY;
8752         act |= ICE_SINGLE_ACT_LAN_ENABLE;
8753         switch (rinfo->sw_act.fltr_act) {
8754         case ICE_FWD_TO_VSI:
8755                 act |= (rinfo->sw_act.fwd_id.hw_vsi_id <<
8756                         ICE_SINGLE_ACT_VSI_ID_S) & ICE_SINGLE_ACT_VSI_ID_M;
8757                 act |= ICE_SINGLE_ACT_VSI_FORWARDING | ICE_SINGLE_ACT_VALID_BIT;
8758                 break;
8759         case ICE_FWD_TO_Q:
8760                 act |= ICE_SINGLE_ACT_TO_Q;
8761                 act |= (rinfo->sw_act.fwd_id.q_id << ICE_SINGLE_ACT_Q_INDEX_S) &
8762                        ICE_SINGLE_ACT_Q_INDEX_M;
8763                 break;
8764         case ICE_FWD_TO_QGRP:
8765                 q_rgn = rinfo->sw_act.qgrp_size > 0 ?
8766                         (u8)ice_ilog2(rinfo->sw_act.qgrp_size) : 0;
8767                 act |= ICE_SINGLE_ACT_TO_Q;
8768                 act |= (rinfo->sw_act.fwd_id.q_id << ICE_SINGLE_ACT_Q_INDEX_S) &
8769                        ICE_SINGLE_ACT_Q_INDEX_M;
8770                 act |= (q_rgn << ICE_SINGLE_ACT_Q_REGION_S) &
8771                        ICE_SINGLE_ACT_Q_REGION_M;
8772                 break;
8773         case ICE_DROP_PACKET:
8774                 act |= ICE_SINGLE_ACT_VSI_FORWARDING | ICE_SINGLE_ACT_DROP |
8775                        ICE_SINGLE_ACT_VALID_BIT;
8776                 break;
8777         default:
8778                 status = ICE_ERR_CFG;
8779                 goto err_ice_add_adv_rule;
8780         }
8781
8782         /* set the rule LOOKUP type based on caller specified 'RX'
8783          * instead of hardcoding it to be either LOOKUP_TX/RX
8784          *
8785          * for 'RX' set the source to be the port number
8786          * for 'TX' set the source to be the source HW VSI number (determined
8787          * by caller)
8788          */
8789         if (rinfo->rx) {
8790                 s_rule->type = CPU_TO_LE16(ICE_AQC_SW_RULES_T_LKUP_RX);
8791                 s_rule->pdata.lkup_tx_rx.src =
8792                         CPU_TO_LE16(hw->port_info->lport);
8793         } else {
8794                 s_rule->type = CPU_TO_LE16(ICE_AQC_SW_RULES_T_LKUP_TX);
8795                 s_rule->pdata.lkup_tx_rx.src = CPU_TO_LE16(rinfo->sw_act.src);
8796         }
8797
8798         s_rule->pdata.lkup_tx_rx.recipe_id = CPU_TO_LE16(rid);
8799         s_rule->pdata.lkup_tx_rx.act = CPU_TO_LE32(act);
8800
8801         status = ice_fill_adv_dummy_packet(lkups, lkups_cnt, s_rule, pkt,
8802                                            pkt_len, pkt_offsets);
8803         if (status)
8804                 goto err_ice_add_adv_rule;
8805
8806         if (rinfo->tun_type != ICE_NON_TUN &&
8807             rinfo->tun_type != ICE_SW_TUN_AND_NON_TUN) {
8808                 status = ice_fill_adv_packet_tun(hw, rinfo->tun_type,
8809                                                  s_rule->pdata.lkup_tx_rx.hdr,
8810                                                  pkt_offsets);
8811                 if (status)
8812                         goto err_ice_add_adv_rule;
8813         }
8814
8815         status = ice_aq_sw_rules(hw, (struct ice_aqc_sw_rules *)s_rule,
8816                                  rule_buf_sz, 1, ice_aqc_opc_add_sw_rules,
8817                                  NULL);
8818         if (status)
8819                 goto err_ice_add_adv_rule;
8820         adv_fltr = (struct ice_adv_fltr_mgmt_list_entry *)
8821                 ice_malloc(hw, sizeof(struct ice_adv_fltr_mgmt_list_entry));
8822         if (!adv_fltr) {
8823                 status = ICE_ERR_NO_MEMORY;
8824                 goto err_ice_add_adv_rule;
8825         }
8826
8827         adv_fltr->lkups = (struct ice_adv_lkup_elem *)
8828                 ice_memdup(hw, lkups, lkups_cnt * sizeof(*lkups),
8829                            ICE_NONDMA_TO_NONDMA);
8830         if (!adv_fltr->lkups && !prof_rule) {
8831                 status = ICE_ERR_NO_MEMORY;
8832                 goto err_ice_add_adv_rule;
8833         }
8834
8835         adv_fltr->lkups_cnt = lkups_cnt;
8836         adv_fltr->rule_info = *rinfo;
8837         adv_fltr->rule_info.fltr_rule_id =
8838                 LE16_TO_CPU(s_rule->pdata.lkup_tx_rx.index);
8839         sw = hw->switch_info;
8840         sw->recp_list[rid].adv_rule = true;
8841         rule_head = &sw->recp_list[rid].filt_rules;
8842
8843         if (rinfo->sw_act.fltr_act == ICE_FWD_TO_VSI)
8844                 adv_fltr->vsi_count = 1;
8845
8846         /* Add rule entry to book keeping list */
8847         LIST_ADD(&adv_fltr->list_entry, rule_head);
8848         if (added_entry) {
8849                 added_entry->rid = rid;
8850                 added_entry->rule_id = adv_fltr->rule_info.fltr_rule_id;
8851                 added_entry->vsi_handle = rinfo->sw_act.vsi_handle;
8852         }
8853 err_ice_add_adv_rule:
8854         if (status && adv_fltr) {
8855                 ice_free(hw, adv_fltr->lkups);
8856                 ice_free(hw, adv_fltr);
8857         }
8858
8859         ice_free(hw, s_rule);
8860
8861         return status;
8862 }
8863
8864 /**
8865  * ice_adv_rem_update_vsi_list
8866  * @hw: pointer to the hardware structure
8867  * @vsi_handle: VSI handle of the VSI to remove
8868  * @fm_list: filter management entry for which the VSI list management needs to
8869  *           be done
8870  */
8871 static enum ice_status
8872 ice_adv_rem_update_vsi_list(struct ice_hw *hw, u16 vsi_handle,
8873                             struct ice_adv_fltr_mgmt_list_entry *fm_list)
8874 {
8875         struct ice_vsi_list_map_info *vsi_list_info;
8876         enum ice_sw_lkup_type lkup_type;
8877         enum ice_status status;
8878         u16 vsi_list_id;
8879
8880         if (fm_list->rule_info.sw_act.fltr_act != ICE_FWD_TO_VSI_LIST ||
8881             fm_list->vsi_count == 0)
8882                 return ICE_ERR_PARAM;
8883
8884         /* A rule with the VSI being removed does not exist */
8885         if (!ice_is_bit_set(fm_list->vsi_list_info->vsi_map, vsi_handle))
8886                 return ICE_ERR_DOES_NOT_EXIST;
8887
8888         lkup_type = ICE_SW_LKUP_LAST;
8889         vsi_list_id = fm_list->rule_info.sw_act.fwd_id.vsi_list_id;
8890         status = ice_update_vsi_list_rule(hw, &vsi_handle, 1, vsi_list_id, true,
8891                                           ice_aqc_opc_update_sw_rules,
8892                                           lkup_type);
8893         if (status)
8894                 return status;
8895
8896         fm_list->vsi_count--;
8897         ice_clear_bit(vsi_handle, fm_list->vsi_list_info->vsi_map);
8898         vsi_list_info = fm_list->vsi_list_info;
8899         if (fm_list->vsi_count == 1) {
8900                 struct ice_fltr_info tmp_fltr;
8901                 u16 rem_vsi_handle;
8902
8903                 rem_vsi_handle = ice_find_first_bit(vsi_list_info->vsi_map,
8904                                                     ICE_MAX_VSI);
8905                 if (!ice_is_vsi_valid(hw, rem_vsi_handle))
8906                         return ICE_ERR_OUT_OF_RANGE;
8907
8908                 /* Make sure VSI list is empty before removing it below */
8909                 status = ice_update_vsi_list_rule(hw, &rem_vsi_handle, 1,
8910                                                   vsi_list_id, true,
8911                                                   ice_aqc_opc_update_sw_rules,
8912                                                   lkup_type);
8913                 if (status)
8914                         return status;
8915
8916                 ice_memset(&tmp_fltr, 0, sizeof(tmp_fltr), ICE_NONDMA_MEM);
8917                 tmp_fltr.flag = fm_list->rule_info.sw_act.flag;
8918                 tmp_fltr.fltr_rule_id = fm_list->rule_info.fltr_rule_id;
8919                 fm_list->rule_info.sw_act.fltr_act = ICE_FWD_TO_VSI;
8920                 tmp_fltr.fltr_act = ICE_FWD_TO_VSI;
8921                 tmp_fltr.fwd_id.hw_vsi_id =
8922                         ice_get_hw_vsi_num(hw, rem_vsi_handle);
8923                 fm_list->rule_info.sw_act.fwd_id.hw_vsi_id =
8924                         ice_get_hw_vsi_num(hw, rem_vsi_handle);
8925                 fm_list->rule_info.sw_act.vsi_handle = rem_vsi_handle;
8926
8927                 /* Update the previous switch rule of "MAC forward to VSI" to
8928                  * "MAC fwd to VSI list"
8929                  */
8930                 status = ice_update_pkt_fwd_rule(hw, &tmp_fltr);
8931                 if (status) {
8932                         ice_debug(hw, ICE_DBG_SW, "Failed to update pkt fwd rule to FWD_TO_VSI on HW VSI %d, error %d\n",
8933                                   tmp_fltr.fwd_id.hw_vsi_id, status);
8934                         return status;
8935                 }
8936                 fm_list->vsi_list_info->ref_cnt--;
8937
8938                 /* Remove the VSI list since it is no longer used */
8939                 status = ice_remove_vsi_list_rule(hw, vsi_list_id, lkup_type);
8940                 if (status) {
8941                         ice_debug(hw, ICE_DBG_SW, "Failed to remove VSI list %d, error %d\n",
8942                                   vsi_list_id, status);
8943                         return status;
8944                 }
8945
8946                 LIST_DEL(&vsi_list_info->list_entry);
8947                 ice_free(hw, vsi_list_info);
8948                 fm_list->vsi_list_info = NULL;
8949         }
8950
8951         return status;
8952 }
8953
8954 /**
8955  * ice_rem_adv_rule - removes existing advanced switch rule
8956  * @hw: pointer to the hardware structure
8957  * @lkups: information on the words that needs to be looked up. All words
8958  *         together makes one recipe
8959  * @lkups_cnt: num of entries in the lkups array
8960  * @rinfo: Its the pointer to the rule information for the rule
8961  *
8962  * This function can be used to remove 1 rule at a time. The lkups is
8963  * used to describe all the words that forms the "lookup" portion of the
8964  * rule. These words can span multiple protocols. Callers to this function
8965  * need to pass in a list of protocol headers with lookup information along
8966  * and mask that determines which words are valid from the given protocol
8967  * header. rinfo describes other information related to this rule such as
8968  * forwarding IDs, priority of this rule, etc.
8969  */
8970 enum ice_status
8971 ice_rem_adv_rule(struct ice_hw *hw, struct ice_adv_lkup_elem *lkups,
8972                  u16 lkups_cnt, struct ice_adv_rule_info *rinfo)
8973 {
8974         struct ice_adv_fltr_mgmt_list_entry *list_elem;
8975         struct ice_prot_lkup_ext lkup_exts;
8976         struct ice_lock *rule_lock; /* Lock to protect filter rule list */
8977         enum ice_status status = ICE_SUCCESS;
8978         bool remove_rule = false;
8979         u16 i, rid, vsi_handle;
8980
8981         ice_memset(&lkup_exts, 0, sizeof(lkup_exts), ICE_NONDMA_MEM);
8982         for (i = 0; i < lkups_cnt; i++) {
8983                 u16 count;
8984
8985                 if (lkups[i].type >= ICE_PROTOCOL_LAST)
8986                         return ICE_ERR_CFG;
8987
8988                 count = ice_fill_valid_words(&lkups[i], &lkup_exts);
8989                 if (!count)
8990                         return ICE_ERR_CFG;
8991         }
8992
8993         /* Create any special protocol/offset pairs, such as looking at tunnel
8994          * bits by extracting metadata
8995          */
8996         status = ice_add_special_words(rinfo, &lkup_exts);
8997         if (status)
8998                 return status;
8999
9000         rid = ice_find_recp(hw, &lkup_exts, rinfo->tun_type, rinfo->priority);
9001         /* If did not find a recipe that match the existing criteria */
9002         if (rid == ICE_MAX_NUM_RECIPES)
9003                 return ICE_ERR_PARAM;
9004
9005         rule_lock = &hw->switch_info->recp_list[rid].filt_rule_lock;
9006         list_elem = ice_find_adv_rule_entry(hw, lkups, lkups_cnt, rid, rinfo);
9007         /* the rule is already removed */
9008         if (!list_elem)
9009                 return ICE_SUCCESS;
9010         ice_acquire_lock(rule_lock);
9011         if (list_elem->rule_info.sw_act.fltr_act != ICE_FWD_TO_VSI_LIST) {
9012                 remove_rule = true;
9013         } else if (list_elem->vsi_count > 1) {
9014                 remove_rule = false;
9015                 vsi_handle = rinfo->sw_act.vsi_handle;
9016                 status = ice_adv_rem_update_vsi_list(hw, vsi_handle, list_elem);
9017         } else {
9018                 vsi_handle = rinfo->sw_act.vsi_handle;
9019                 status = ice_adv_rem_update_vsi_list(hw, vsi_handle, list_elem);
9020                 if (status) {
9021                         ice_release_lock(rule_lock);
9022                         return status;
9023                 }
9024                 if (list_elem->vsi_count == 0)
9025                         remove_rule = true;
9026         }
9027         ice_release_lock(rule_lock);
9028         if (remove_rule) {
9029                 struct ice_aqc_sw_rules_elem *s_rule;
9030                 u16 rule_buf_sz;
9031
9032                 rule_buf_sz = ICE_SW_RULE_RX_TX_NO_HDR_SIZE;
9033                 s_rule = (struct ice_aqc_sw_rules_elem *)
9034                         ice_malloc(hw, rule_buf_sz);
9035                 if (!s_rule)
9036                         return ICE_ERR_NO_MEMORY;
9037                 s_rule->pdata.lkup_tx_rx.act = 0;
9038                 s_rule->pdata.lkup_tx_rx.index =
9039                         CPU_TO_LE16(list_elem->rule_info.fltr_rule_id);
9040                 s_rule->pdata.lkup_tx_rx.hdr_len = 0;
9041                 status = ice_aq_sw_rules(hw, (struct ice_aqc_sw_rules *)s_rule,
9042                                          rule_buf_sz, 1,
9043                                          ice_aqc_opc_remove_sw_rules, NULL);
9044                 if (status == ICE_SUCCESS || status == ICE_ERR_DOES_NOT_EXIST) {
9045                         struct ice_switch_info *sw = hw->switch_info;
9046
9047                         ice_acquire_lock(rule_lock);
9048                         LIST_DEL(&list_elem->list_entry);
9049                         ice_free(hw, list_elem->lkups);
9050                         ice_free(hw, list_elem);
9051                         ice_release_lock(rule_lock);
9052                         if (LIST_EMPTY(&sw->recp_list[rid].filt_rules))
9053                                 sw->recp_list[rid].adv_rule = false;
9054                 }
9055                 ice_free(hw, s_rule);
9056         }
9057         return status;
9058 }
9059
9060 /**
9061  * ice_rem_adv_rule_by_id - removes existing advanced switch rule by ID
9062  * @hw: pointer to the hardware structure
9063  * @remove_entry: data struct which holds rule_id, VSI handle and recipe ID
9064  *
9065  * This function is used to remove 1 rule at a time. The removal is based on
9066  * the remove_entry parameter. This function will remove rule for a given
9067  * vsi_handle with a given rule_id which is passed as parameter in remove_entry
9068  */
9069 enum ice_status
9070 ice_rem_adv_rule_by_id(struct ice_hw *hw,
9071                        struct ice_rule_query_data *remove_entry)
9072 {
9073         struct ice_adv_fltr_mgmt_list_entry *list_itr;
9074         struct LIST_HEAD_TYPE *list_head;
9075         struct ice_adv_rule_info rinfo;
9076         struct ice_switch_info *sw;
9077
9078         sw = hw->switch_info;
9079         if (!sw->recp_list[remove_entry->rid].recp_created)
9080                 return ICE_ERR_PARAM;
9081         list_head = &sw->recp_list[remove_entry->rid].filt_rules;
9082         LIST_FOR_EACH_ENTRY(list_itr, list_head, ice_adv_fltr_mgmt_list_entry,
9083                             list_entry) {
9084                 if (list_itr->rule_info.fltr_rule_id ==
9085                     remove_entry->rule_id) {
9086                         rinfo = list_itr->rule_info;
9087                         rinfo.sw_act.vsi_handle = remove_entry->vsi_handle;
9088                         return ice_rem_adv_rule(hw, list_itr->lkups,
9089                                                 list_itr->lkups_cnt, &rinfo);
9090                 }
9091         }
9092         /* either list is empty or unable to find rule */
9093         return ICE_ERR_DOES_NOT_EXIST;
9094 }
9095
9096 /**
9097  * ice_rem_adv_rule_for_vsi - removes existing advanced switch rules for a
9098  *                       given VSI handle
9099  * @hw: pointer to the hardware structure
9100  * @vsi_handle: VSI handle for which we are supposed to remove all the rules.
9101  *
9102  * This function is used to remove all the rules for a given VSI and as soon
9103  * as removing a rule fails, it will return immediately with the error code,
9104  * else it will return ICE_SUCCESS
9105  */
9106 enum ice_status ice_rem_adv_rule_for_vsi(struct ice_hw *hw, u16 vsi_handle)
9107 {
9108         struct ice_adv_fltr_mgmt_list_entry *list_itr, *tmp_entry;
9109         struct ice_vsi_list_map_info *map_info;
9110         struct LIST_HEAD_TYPE *list_head;
9111         struct ice_adv_rule_info rinfo;
9112         struct ice_switch_info *sw;
9113         enum ice_status status;
9114         u8 rid;
9115
9116         sw = hw->switch_info;
9117         for (rid = 0; rid < ICE_MAX_NUM_RECIPES; rid++) {
9118                 if (!sw->recp_list[rid].recp_created)
9119                         continue;
9120                 if (!sw->recp_list[rid].adv_rule)
9121                         continue;
9122
9123                 list_head = &sw->recp_list[rid].filt_rules;
9124                 LIST_FOR_EACH_ENTRY_SAFE(list_itr, tmp_entry, list_head,
9125                                          ice_adv_fltr_mgmt_list_entry,
9126                                          list_entry) {
9127                         rinfo = list_itr->rule_info;
9128
9129                         if (rinfo.sw_act.fltr_act == ICE_FWD_TO_VSI_LIST) {
9130                                 map_info = list_itr->vsi_list_info;
9131                                 if (!map_info)
9132                                         continue;
9133
9134                                 if (!ice_is_bit_set(map_info->vsi_map,
9135                                                     vsi_handle))
9136                                         continue;
9137                         } else if (rinfo.sw_act.vsi_handle != vsi_handle) {
9138                                 continue;
9139                         }
9140
9141                         rinfo.sw_act.vsi_handle = vsi_handle;
9142                         status = ice_rem_adv_rule(hw, list_itr->lkups,
9143                                                   list_itr->lkups_cnt, &rinfo);
9144
9145                         if (status)
9146                                 return status;
9147                 }
9148         }
9149         return ICE_SUCCESS;
9150 }
9151
9152 /**
9153  * ice_replay_fltr - Replay all the filters stored by a specific list head
9154  * @hw: pointer to the hardware structure
9155  * @list_head: list for which filters needs to be replayed
9156  * @recp_id: Recipe ID for which rules need to be replayed
9157  */
9158 static enum ice_status
9159 ice_replay_fltr(struct ice_hw *hw, u8 recp_id, struct LIST_HEAD_TYPE *list_head)
9160 {
9161         struct ice_fltr_mgmt_list_entry *itr;
9162         enum ice_status status = ICE_SUCCESS;
9163         struct ice_sw_recipe *recp_list;
9164         u8 lport = hw->port_info->lport;
9165         struct LIST_HEAD_TYPE l_head;
9166
9167         if (LIST_EMPTY(list_head))
9168                 return status;
9169
9170         recp_list = &hw->switch_info->recp_list[recp_id];
9171         /* Move entries from the given list_head to a temporary l_head so that
9172          * they can be replayed. Otherwise when trying to re-add the same
9173          * filter, the function will return already exists
9174          */
9175         LIST_REPLACE_INIT(list_head, &l_head);
9176
9177         /* Mark the given list_head empty by reinitializing it so filters
9178          * could be added again by *handler
9179          */
9180         LIST_FOR_EACH_ENTRY(itr, &l_head, ice_fltr_mgmt_list_entry,
9181                             list_entry) {
9182                 struct ice_fltr_list_entry f_entry;
9183                 u16 vsi_handle;
9184
9185                 f_entry.fltr_info = itr->fltr_info;
9186                 if (itr->vsi_count < 2 && recp_id != ICE_SW_LKUP_VLAN) {
9187                         status = ice_add_rule_internal(hw, recp_list, lport,
9188                                                        &f_entry);
9189                         if (status != ICE_SUCCESS)
9190                                 goto end;
9191                         continue;
9192                 }
9193
9194                 /* Add a filter per VSI separately */
9195                 ice_for_each_set_bit(vsi_handle, itr->vsi_list_info->vsi_map,
9196                                      ICE_MAX_VSI) {
9197                         if (!ice_is_vsi_valid(hw, vsi_handle))
9198                                 break;
9199
9200                         ice_clear_bit(vsi_handle, itr->vsi_list_info->vsi_map);
9201                         f_entry.fltr_info.vsi_handle = vsi_handle;
9202                         f_entry.fltr_info.fwd_id.hw_vsi_id =
9203                                 ice_get_hw_vsi_num(hw, vsi_handle);
9204                         f_entry.fltr_info.fltr_act = ICE_FWD_TO_VSI;
9205                         if (recp_id == ICE_SW_LKUP_VLAN)
9206                                 status = ice_add_vlan_internal(hw, recp_list,
9207                                                                &f_entry);
9208                         else
9209                                 status = ice_add_rule_internal(hw, recp_list,
9210                                                                lport,
9211                                                                &f_entry);
9212                         if (status != ICE_SUCCESS)
9213                                 goto end;
9214                 }
9215         }
9216 end:
9217         /* Clear the filter management list */
9218         ice_rem_sw_rule_info(hw, &l_head);
9219         return status;
9220 }
9221
9222 /**
9223  * ice_replay_all_fltr - replay all filters stored in bookkeeping lists
9224  * @hw: pointer to the hardware structure
9225  *
9226  * NOTE: This function does not clean up partially added filters on error.
9227  * It is up to caller of the function to issue a reset or fail early.
9228  */
9229 enum ice_status ice_replay_all_fltr(struct ice_hw *hw)
9230 {
9231         struct ice_switch_info *sw = hw->switch_info;
9232         enum ice_status status = ICE_SUCCESS;
9233         u8 i;
9234
9235         for (i = 0; i < ICE_MAX_NUM_RECIPES; i++) {
9236                 struct LIST_HEAD_TYPE *head = &sw->recp_list[i].filt_rules;
9237
9238                 status = ice_replay_fltr(hw, i, head);
9239                 if (status != ICE_SUCCESS)
9240                         return status;
9241         }
9242         return status;
9243 }
9244
9245 /**
9246  * ice_replay_vsi_fltr - Replay filters for requested VSI
9247  * @hw: pointer to the hardware structure
9248  * @pi: pointer to port information structure
9249  * @sw: pointer to switch info struct for which function replays filters
9250  * @vsi_handle: driver VSI handle
9251  * @recp_id: Recipe ID for which rules need to be replayed
9252  * @list_head: list for which filters need to be replayed
9253  *
9254  * Replays the filter of recipe recp_id for a VSI represented via vsi_handle.
9255  * It is required to pass valid VSI handle.
9256  */
9257 static enum ice_status
9258 ice_replay_vsi_fltr(struct ice_hw *hw, struct ice_port_info *pi,
9259                     struct ice_switch_info *sw, u16 vsi_handle, u8 recp_id,
9260                     struct LIST_HEAD_TYPE *list_head)
9261 {
9262         struct ice_fltr_mgmt_list_entry *itr;
9263         enum ice_status status = ICE_SUCCESS;
9264         struct ice_sw_recipe *recp_list;
9265         u16 hw_vsi_id;
9266
9267         if (LIST_EMPTY(list_head))
9268                 return status;
9269         recp_list = &sw->recp_list[recp_id];
9270         hw_vsi_id = ice_get_hw_vsi_num(hw, vsi_handle);
9271
9272         LIST_FOR_EACH_ENTRY(itr, list_head, ice_fltr_mgmt_list_entry,
9273                             list_entry) {
9274                 struct ice_fltr_list_entry f_entry;
9275
9276                 f_entry.fltr_info = itr->fltr_info;
9277                 if (itr->vsi_count < 2 && recp_id != ICE_SW_LKUP_VLAN &&
9278                     itr->fltr_info.vsi_handle == vsi_handle) {
9279                         /* update the src in case it is VSI num */
9280                         if (f_entry.fltr_info.src_id == ICE_SRC_ID_VSI)
9281                                 f_entry.fltr_info.src = hw_vsi_id;
9282                         status = ice_add_rule_internal(hw, recp_list,
9283                                                        pi->lport,
9284                                                        &f_entry);
9285                         if (status != ICE_SUCCESS)
9286                                 goto end;
9287                         continue;
9288                 }
9289                 if (!itr->vsi_list_info ||
9290                     !ice_is_bit_set(itr->vsi_list_info->vsi_map, vsi_handle))
9291                         continue;
9292                 /* Clearing it so that the logic can add it back */
9293                 ice_clear_bit(vsi_handle, itr->vsi_list_info->vsi_map);
9294                 f_entry.fltr_info.vsi_handle = vsi_handle;
9295                 f_entry.fltr_info.fltr_act = ICE_FWD_TO_VSI;
9296                 /* update the src in case it is VSI num */
9297                 if (f_entry.fltr_info.src_id == ICE_SRC_ID_VSI)
9298                         f_entry.fltr_info.src = hw_vsi_id;
9299                 if (recp_id == ICE_SW_LKUP_VLAN)
9300                         status = ice_add_vlan_internal(hw, recp_list, &f_entry);
9301                 else
9302                         status = ice_add_rule_internal(hw, recp_list,
9303                                                        pi->lport,
9304                                                        &f_entry);
9305                 if (status != ICE_SUCCESS)
9306                         goto end;
9307         }
9308 end:
9309         return status;
9310 }
9311
9312 /**
9313  * ice_replay_vsi_adv_rule - Replay advanced rule for requested VSI
9314  * @hw: pointer to the hardware structure
9315  * @vsi_handle: driver VSI handle
9316  * @list_head: list for which filters need to be replayed
9317  *
9318  * Replay the advanced rule for the given VSI.
9319  */
9320 static enum ice_status
9321 ice_replay_vsi_adv_rule(struct ice_hw *hw, u16 vsi_handle,
9322                         struct LIST_HEAD_TYPE *list_head)
9323 {
9324         struct ice_rule_query_data added_entry = { 0 };
9325         struct ice_adv_fltr_mgmt_list_entry *adv_fltr;
9326         enum ice_status status = ICE_SUCCESS;
9327
9328         if (LIST_EMPTY(list_head))
9329                 return status;
9330         LIST_FOR_EACH_ENTRY(adv_fltr, list_head, ice_adv_fltr_mgmt_list_entry,
9331                             list_entry) {
9332                 struct ice_adv_rule_info *rinfo = &adv_fltr->rule_info;
9333                 u16 lk_cnt = adv_fltr->lkups_cnt;
9334
9335                 if (vsi_handle != rinfo->sw_act.vsi_handle)
9336                         continue;
9337                 status = ice_add_adv_rule(hw, adv_fltr->lkups, lk_cnt, rinfo,
9338                                           &added_entry);
9339                 if (status)
9340                         break;
9341         }
9342         return status;
9343 }
9344
9345 /**
9346  * ice_replay_vsi_all_fltr - replay all filters stored in bookkeeping lists
9347  * @hw: pointer to the hardware structure
9348  * @pi: pointer to port information structure
9349  * @vsi_handle: driver VSI handle
9350  *
9351  * Replays filters for requested VSI via vsi_handle.
9352  */
9353 enum ice_status
9354 ice_replay_vsi_all_fltr(struct ice_hw *hw, struct ice_port_info *pi,
9355                         u16 vsi_handle)
9356 {
9357         struct ice_switch_info *sw = hw->switch_info;
9358         enum ice_status status;
9359         u8 i;
9360
9361         /* Update the recipes that were created */
9362         for (i = 0; i < ICE_MAX_NUM_RECIPES; i++) {
9363                 struct LIST_HEAD_TYPE *head;
9364
9365                 head = &sw->recp_list[i].filt_replay_rules;
9366                 if (!sw->recp_list[i].adv_rule)
9367                         status = ice_replay_vsi_fltr(hw, pi, sw, vsi_handle, i,
9368                                                      head);
9369                 else
9370                         status = ice_replay_vsi_adv_rule(hw, vsi_handle, head);
9371                 if (status != ICE_SUCCESS)
9372                         return status;
9373         }
9374
9375         return ICE_SUCCESS;
9376 }
9377
9378 /**
9379  * ice_rm_sw_replay_rule_info - helper function to delete filter replay rules
9380  * @hw: pointer to the HW struct
9381  * @sw: pointer to switch info struct for which function removes filters
9382  *
9383  * Deletes the filter replay rules for given switch
9384  */
9385 void ice_rm_sw_replay_rule_info(struct ice_hw *hw, struct ice_switch_info *sw)
9386 {
9387         u8 i;
9388
9389         if (!sw)
9390                 return;
9391
9392         for (i = 0; i < ICE_MAX_NUM_RECIPES; i++) {
9393                 if (!LIST_EMPTY(&sw->recp_list[i].filt_replay_rules)) {
9394                         struct LIST_HEAD_TYPE *l_head;
9395
9396                         l_head = &sw->recp_list[i].filt_replay_rules;
9397                         if (!sw->recp_list[i].adv_rule)
9398                                 ice_rem_sw_rule_info(hw, l_head);
9399                         else
9400                                 ice_rem_adv_rule_info(hw, l_head);
9401                 }
9402         }
9403 }
9404
9405 /**
9406  * ice_rm_all_sw_replay_rule_info - deletes filter replay rules
9407  * @hw: pointer to the HW struct
9408  *
9409  * Deletes the filter replay rules.
9410  */
9411 void ice_rm_all_sw_replay_rule_info(struct ice_hw *hw)
9412 {
9413         ice_rm_sw_replay_rule_info(hw, hw->switch_info);
9414 }