net/hns3: fix return value for unsupported tuple
[dpdk.git] / drivers / net / ice / base / ice_switch.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2001-2021 Intel Corporation
3  */
4
5 #include "ice_switch.h"
6 #include "ice_flex_type.h"
7 #include "ice_flow.h"
8
9 #define ICE_ETH_DA_OFFSET               0
10 #define ICE_ETH_ETHTYPE_OFFSET          12
11 #define ICE_ETH_VLAN_TCI_OFFSET         14
12 #define ICE_MAX_VLAN_ID                 0xFFF
13 #define ICE_IPV6_ETHER_ID               0x86DD
14 #define ICE_IPV4_NVGRE_PROTO_ID         0x002F
15 #define ICE_PPP_IPV6_PROTO_ID           0x0057
16 #define ICE_TCP_PROTO_ID                0x06
17 #define ICE_GTPU_PROFILE                24
18 #define ICE_ETH_P_8021Q                 0x8100
19 #define ICE_MPLS_ETHER_ID               0x8847
20
21 /* Dummy ethernet header needed in the ice_aqc_sw_rules_elem
22  * struct to configure any switch filter rules.
23  * {DA (6 bytes), SA(6 bytes),
24  * Ether type (2 bytes for header without VLAN tag) OR
25  * VLAN tag (4 bytes for header with VLAN tag) }
26  *
27  * Word on Hardcoded values
28  * byte 0 = 0x2: to identify it as locally administered DA MAC
29  * byte 6 = 0x2: to identify it as locally administered SA MAC
30  * byte 12 = 0x81 & byte 13 = 0x00:
31  *      In case of VLAN filter first two bytes defines ether type (0x8100)
32  *      and remaining two bytes are placeholder for programming a given VLAN ID
33  *      In case of Ether type filter it is treated as header without VLAN tag
34  *      and byte 12 and 13 is used to program a given Ether type instead
35  */
36 static const u8 dummy_eth_header[DUMMY_ETH_HDR_LEN] = { 0x2, 0, 0, 0, 0, 0,
37                                                         0x2, 0, 0, 0, 0, 0,
38                                                         0x81, 0, 0, 0};
39
40 struct ice_dummy_pkt_offsets {
41         enum ice_protocol_type type;
42         u16 offset; /* ICE_PROTOCOL_LAST indicates end of list */
43 };
44
45 static const struct ice_dummy_pkt_offsets dummy_gre_tcp_packet_offsets[] = {
46         { ICE_MAC_OFOS,         0 },
47         { ICE_ETYPE_OL,         12 },
48         { ICE_IPV4_OFOS,        14 },
49         { ICE_NVGRE,            34 },
50         { ICE_MAC_IL,           42 },
51         { ICE_IPV4_IL,          56 },
52         { ICE_TCP_IL,           76 },
53         { ICE_PROTOCOL_LAST,    0 },
54 };
55
56 static const u8 dummy_gre_tcp_packet[] = {
57         0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
58         0x00, 0x00, 0x00, 0x00,
59         0x00, 0x00, 0x00, 0x00,
60
61         0x08, 0x00,             /* ICE_ETYPE_OL 12 */
62
63         0x45, 0x00, 0x00, 0x3E, /* ICE_IPV4_OFOS 14 */
64         0x00, 0x00, 0x00, 0x00,
65         0x00, 0x2F, 0x00, 0x00,
66         0x00, 0x00, 0x00, 0x00,
67         0x00, 0x00, 0x00, 0x00,
68
69         0x80, 0x00, 0x65, 0x58, /* ICE_NVGRE 34 */
70         0x00, 0x00, 0x00, 0x00,
71
72         0x00, 0x00, 0x00, 0x00, /* ICE_MAC_IL 42 */
73         0x00, 0x00, 0x00, 0x00,
74         0x00, 0x00, 0x00, 0x00,
75         0x08, 0x00,
76
77         0x45, 0x00, 0x00, 0x14, /* ICE_IPV4_IL 56 */
78         0x00, 0x00, 0x00, 0x00,
79         0x00, 0x06, 0x00, 0x00,
80         0x00, 0x00, 0x00, 0x00,
81         0x00, 0x00, 0x00, 0x00,
82
83         0x00, 0x00, 0x00, 0x00, /* ICE_TCP_IL 76 */
84         0x00, 0x00, 0x00, 0x00,
85         0x00, 0x00, 0x00, 0x00,
86         0x50, 0x02, 0x20, 0x00,
87         0x00, 0x00, 0x00, 0x00
88 };
89
90 static const struct ice_dummy_pkt_offsets dummy_gre_udp_packet_offsets[] = {
91         { ICE_MAC_OFOS,         0 },
92         { ICE_ETYPE_OL,         12 },
93         { ICE_IPV4_OFOS,        14 },
94         { ICE_NVGRE,            34 },
95         { ICE_MAC_IL,           42 },
96         { ICE_IPV4_IL,          56 },
97         { ICE_UDP_ILOS,         76 },
98         { ICE_PROTOCOL_LAST,    0 },
99 };
100
101 static const u8 dummy_gre_udp_packet[] = {
102         0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
103         0x00, 0x00, 0x00, 0x00,
104         0x00, 0x00, 0x00, 0x00,
105
106         0x08, 0x00,             /* ICE_ETYPE_OL 12 */
107
108         0x45, 0x00, 0x00, 0x3E, /* ICE_IPV4_OFOS 14 */
109         0x00, 0x00, 0x00, 0x00,
110         0x00, 0x2F, 0x00, 0x00,
111         0x00, 0x00, 0x00, 0x00,
112         0x00, 0x00, 0x00, 0x00,
113
114         0x80, 0x00, 0x65, 0x58, /* ICE_NVGRE 34 */
115         0x00, 0x00, 0x00, 0x00,
116
117         0x00, 0x00, 0x00, 0x00, /* ICE_MAC_IL 42 */
118         0x00, 0x00, 0x00, 0x00,
119         0x00, 0x00, 0x00, 0x00,
120         0x08, 0x00,
121
122         0x45, 0x00, 0x00, 0x14, /* ICE_IPV4_IL 56 */
123         0x00, 0x00, 0x00, 0x00,
124         0x00, 0x11, 0x00, 0x00,
125         0x00, 0x00, 0x00, 0x00,
126         0x00, 0x00, 0x00, 0x00,
127
128         0x00, 0x00, 0x00, 0x00, /* ICE_UDP_ILOS 76 */
129         0x00, 0x08, 0x00, 0x00,
130 };
131
132 static const struct ice_dummy_pkt_offsets dummy_udp_tun_tcp_packet_offsets[] = {
133         { ICE_MAC_OFOS,         0 },
134         { ICE_ETYPE_OL,         12 },
135         { ICE_IPV4_OFOS,        14 },
136         { ICE_UDP_OF,           34 },
137         { ICE_VXLAN,            42 },
138         { ICE_GENEVE,           42 },
139         { ICE_VXLAN_GPE,        42 },
140         { ICE_MAC_IL,           50 },
141         { ICE_IPV4_IL,          64 },
142         { ICE_TCP_IL,           84 },
143         { ICE_PROTOCOL_LAST,    0 },
144 };
145
146 static const u8 dummy_udp_tun_tcp_packet[] = {
147         0x00, 0x00, 0x00, 0x00,  /* ICE_MAC_OFOS 0 */
148         0x00, 0x00, 0x00, 0x00,
149         0x00, 0x00, 0x00, 0x00,
150
151         0x08, 0x00,             /* ICE_ETYPE_OL 12 */
152
153         0x45, 0x00, 0x00, 0x5a, /* ICE_IPV4_OFOS 14 */
154         0x00, 0x01, 0x00, 0x00,
155         0x40, 0x11, 0x00, 0x00,
156         0x00, 0x00, 0x00, 0x00,
157         0x00, 0x00, 0x00, 0x00,
158
159         0x00, 0x00, 0x12, 0xb5, /* ICE_UDP_OF 34 */
160         0x00, 0x46, 0x00, 0x00,
161
162         0x00, 0x00, 0x65, 0x58, /* ICE_VXLAN 42 */
163         0x00, 0x00, 0x00, 0x00,
164
165         0x00, 0x00, 0x00, 0x00, /* ICE_MAC_IL 50 */
166         0x00, 0x00, 0x00, 0x00,
167         0x00, 0x00, 0x00, 0x00,
168         0x08, 0x00,
169
170         0x45, 0x00, 0x00, 0x28, /* ICE_IPV4_IL 64 */
171         0x00, 0x01, 0x00, 0x00,
172         0x40, 0x06, 0x00, 0x00,
173         0x00, 0x00, 0x00, 0x00,
174         0x00, 0x00, 0x00, 0x00,
175
176         0x00, 0x00, 0x00, 0x00, /* ICE_TCP_IL 84 */
177         0x00, 0x00, 0x00, 0x00,
178         0x00, 0x00, 0x00, 0x00,
179         0x50, 0x02, 0x20, 0x00,
180         0x00, 0x00, 0x00, 0x00
181 };
182
183 static const struct ice_dummy_pkt_offsets dummy_udp_tun_udp_packet_offsets[] = {
184         { ICE_MAC_OFOS,         0 },
185         { ICE_ETYPE_OL,         12 },
186         { ICE_IPV4_OFOS,        14 },
187         { ICE_UDP_OF,           34 },
188         { ICE_VXLAN,            42 },
189         { ICE_GENEVE,           42 },
190         { ICE_VXLAN_GPE,        42 },
191         { ICE_MAC_IL,           50 },
192         { ICE_IPV4_IL,          64 },
193         { ICE_UDP_ILOS,         84 },
194         { ICE_PROTOCOL_LAST,    0 },
195 };
196
197 static const u8 dummy_udp_tun_udp_packet[] = {
198         0x00, 0x00, 0x00, 0x00,  /* ICE_MAC_OFOS 0 */
199         0x00, 0x00, 0x00, 0x00,
200         0x00, 0x00, 0x00, 0x00,
201
202         0x08, 0x00,             /* ICE_ETYPE_OL 12 */
203
204         0x45, 0x00, 0x00, 0x4e, /* ICE_IPV4_OFOS 14 */
205         0x00, 0x01, 0x00, 0x00,
206         0x00, 0x11, 0x00, 0x00,
207         0x00, 0x00, 0x00, 0x00,
208         0x00, 0x00, 0x00, 0x00,
209
210         0x00, 0x00, 0x12, 0xb5, /* ICE_UDP_OF 34 */
211         0x00, 0x3a, 0x00, 0x00,
212
213         0x00, 0x00, 0x65, 0x58, /* ICE_VXLAN 42 */
214         0x00, 0x00, 0x00, 0x00,
215
216         0x00, 0x00, 0x00, 0x00, /* ICE_MAC_IL 50 */
217         0x00, 0x00, 0x00, 0x00,
218         0x00, 0x00, 0x00, 0x00,
219         0x08, 0x00,
220
221         0x45, 0x00, 0x00, 0x1c, /* ICE_IPV4_IL 64 */
222         0x00, 0x01, 0x00, 0x00,
223         0x00, 0x11, 0x00, 0x00,
224         0x00, 0x00, 0x00, 0x00,
225         0x00, 0x00, 0x00, 0x00,
226
227         0x00, 0x00, 0x00, 0x00, /* ICE_UDP_ILOS 84 */
228         0x00, 0x08, 0x00, 0x00,
229 };
230
231 /* offset info for MAC + IPv4 + UDP dummy packet */
232 static const struct ice_dummy_pkt_offsets dummy_udp_packet_offsets[] = {
233         { ICE_MAC_OFOS,         0 },
234         { ICE_ETYPE_OL,         12 },
235         { ICE_IPV4_OFOS,        14 },
236         { ICE_UDP_ILOS,         34 },
237         { ICE_PROTOCOL_LAST,    0 },
238 };
239
240 /* Dummy packet for MAC + IPv4 + UDP */
241 static const u8 dummy_udp_packet[] = {
242         0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
243         0x00, 0x00, 0x00, 0x00,
244         0x00, 0x00, 0x00, 0x00,
245
246         0x08, 0x00,             /* ICE_ETYPE_OL 12 */
247
248         0x45, 0x00, 0x00, 0x1c, /* ICE_IPV4_OFOS 14 */
249         0x00, 0x01, 0x00, 0x00,
250         0x00, 0x11, 0x00, 0x00,
251         0x00, 0x00, 0x00, 0x00,
252         0x00, 0x00, 0x00, 0x00,
253
254         0x00, 0x00, 0x00, 0x00, /* ICE_UDP_ILOS 34 */
255         0x00, 0x08, 0x00, 0x00,
256
257         0x00, 0x00,     /* 2 bytes for 4 byte alignment */
258 };
259
260 /* offset info for MAC + VLAN + IPv4 + UDP dummy packet */
261 static const struct ice_dummy_pkt_offsets dummy_vlan_udp_packet_offsets[] = {
262         { ICE_MAC_OFOS,         0 },
263         { ICE_VLAN_OFOS,        12 },
264         { ICE_ETYPE_OL,         16 },
265         { ICE_IPV4_OFOS,        18 },
266         { ICE_UDP_ILOS,         38 },
267         { ICE_PROTOCOL_LAST,    0 },
268 };
269
270 /* C-tag (801.1Q), IPv4:UDP dummy packet */
271 static const u8 dummy_vlan_udp_packet[] = {
272         0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
273         0x00, 0x00, 0x00, 0x00,
274         0x00, 0x00, 0x00, 0x00,
275
276         0x81, 0x00, 0x00, 0x00, /* ICE_VLAN_OFOS 12 */
277
278         0x08, 0x00,             /* ICE_ETYPE_OL 16 */
279
280         0x45, 0x00, 0x00, 0x1c, /* ICE_IPV4_OFOS 18 */
281         0x00, 0x01, 0x00, 0x00,
282         0x00, 0x11, 0x00, 0x00,
283         0x00, 0x00, 0x00, 0x00,
284         0x00, 0x00, 0x00, 0x00,
285
286         0x00, 0x00, 0x00, 0x00, /* ICE_UDP_ILOS 38 */
287         0x00, 0x08, 0x00, 0x00,
288
289         0x00, 0x00,     /* 2 bytes for 4 byte alignment */
290 };
291
292 /* offset info for MAC + IPv4 + TCP dummy packet */
293 static const struct ice_dummy_pkt_offsets dummy_tcp_packet_offsets[] = {
294         { ICE_MAC_OFOS,         0 },
295         { ICE_ETYPE_OL,         12 },
296         { ICE_IPV4_OFOS,        14 },
297         { ICE_TCP_IL,           34 },
298         { ICE_PROTOCOL_LAST,    0 },
299 };
300
301 /* Dummy packet for MAC + IPv4 + TCP */
302 static const u8 dummy_tcp_packet[] = {
303         0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
304         0x00, 0x00, 0x00, 0x00,
305         0x00, 0x00, 0x00, 0x00,
306
307         0x08, 0x00,             /* ICE_ETYPE_OL 12 */
308
309         0x45, 0x00, 0x00, 0x28, /* ICE_IPV4_OFOS 14 */
310         0x00, 0x01, 0x00, 0x00,
311         0x00, 0x06, 0x00, 0x00,
312         0x00, 0x00, 0x00, 0x00,
313         0x00, 0x00, 0x00, 0x00,
314
315         0x00, 0x00, 0x00, 0x00, /* ICE_TCP_IL 34 */
316         0x00, 0x00, 0x00, 0x00,
317         0x00, 0x00, 0x00, 0x00,
318         0x50, 0x00, 0x00, 0x00,
319         0x00, 0x00, 0x00, 0x00,
320
321         0x00, 0x00,     /* 2 bytes for 4 byte alignment */
322 };
323
324 /* offset info for MAC + MPLS dummy packet */
325 static const struct ice_dummy_pkt_offsets dummy_mpls_packet_offsets[] = {
326         { ICE_MAC_OFOS,         0 },
327         { ICE_ETYPE_OL,         12 },
328         { ICE_PROTOCOL_LAST,    0 },
329 };
330
331 /* Dummy packet for MAC + MPLS */
332 static const u8 dummy_mpls_packet[] = {
333         0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
334         0x00, 0x00, 0x00, 0x00,
335         0x00, 0x00, 0x00, 0x00,
336
337         0x88, 0x47,             /* ICE_ETYPE_OL 12 */
338         0x00, 0x00, 0x01, 0x00,
339
340         0x00, 0x00,     /* 2 bytes for 4 byte alignment */
341 };
342
343 /* offset info for MAC + VLAN (C-tag, 802.1Q) + IPv4 + TCP dummy packet */
344 static const struct ice_dummy_pkt_offsets dummy_vlan_tcp_packet_offsets[] = {
345         { ICE_MAC_OFOS,         0 },
346         { ICE_VLAN_OFOS,        12 },
347         { ICE_ETYPE_OL,         16 },
348         { ICE_IPV4_OFOS,        18 },
349         { ICE_TCP_IL,           38 },
350         { ICE_PROTOCOL_LAST,    0 },
351 };
352
353 /* C-tag (801.1Q), IPv4:TCP dummy packet */
354 static const u8 dummy_vlan_tcp_packet[] = {
355         0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
356         0x00, 0x00, 0x00, 0x00,
357         0x00, 0x00, 0x00, 0x00,
358
359         0x81, 0x00, 0x00, 0x00, /* ICE_VLAN_OFOS 12 */
360
361         0x08, 0x00,             /* ICE_ETYPE_OL 16 */
362
363         0x45, 0x00, 0x00, 0x28, /* ICE_IPV4_OFOS 18 */
364         0x00, 0x01, 0x00, 0x00,
365         0x00, 0x06, 0x00, 0x00,
366         0x00, 0x00, 0x00, 0x00,
367         0x00, 0x00, 0x00, 0x00,
368
369         0x00, 0x00, 0x00, 0x00, /* ICE_TCP_IL 38 */
370         0x00, 0x00, 0x00, 0x00,
371         0x00, 0x00, 0x00, 0x00,
372         0x50, 0x00, 0x00, 0x00,
373         0x00, 0x00, 0x00, 0x00,
374
375         0x00, 0x00,     /* 2 bytes for 4 byte alignment */
376 };
377
378 static const struct ice_dummy_pkt_offsets dummy_tcp_ipv6_packet_offsets[] = {
379         { ICE_MAC_OFOS,         0 },
380         { ICE_ETYPE_OL,         12 },
381         { ICE_IPV6_OFOS,        14 },
382         { ICE_TCP_IL,           54 },
383         { ICE_PROTOCOL_LAST,    0 },
384 };
385
386 static const u8 dummy_tcp_ipv6_packet[] = {
387         0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
388         0x00, 0x00, 0x00, 0x00,
389         0x00, 0x00, 0x00, 0x00,
390
391         0x86, 0xDD,             /* ICE_ETYPE_OL 12 */
392
393         0x60, 0x00, 0x00, 0x00, /* ICE_IPV6_OFOS 40 */
394         0x00, 0x14, 0x06, 0x00, /* Next header is TCP */
395         0x00, 0x00, 0x00, 0x00,
396         0x00, 0x00, 0x00, 0x00,
397         0x00, 0x00, 0x00, 0x00,
398         0x00, 0x00, 0x00, 0x00,
399         0x00, 0x00, 0x00, 0x00,
400         0x00, 0x00, 0x00, 0x00,
401         0x00, 0x00, 0x00, 0x00,
402         0x00, 0x00, 0x00, 0x00,
403
404         0x00, 0x00, 0x00, 0x00, /* ICE_TCP_IL 54 */
405         0x00, 0x00, 0x00, 0x00,
406         0x00, 0x00, 0x00, 0x00,
407         0x50, 0x00, 0x00, 0x00,
408         0x00, 0x00, 0x00, 0x00,
409
410         0x00, 0x00, /* 2 bytes for 4 byte alignment */
411 };
412
413 /* C-tag (802.1Q): IPv6 + TCP */
414 static const struct ice_dummy_pkt_offsets
415 dummy_vlan_tcp_ipv6_packet_offsets[] = {
416         { ICE_MAC_OFOS,         0 },
417         { ICE_VLAN_OFOS,        12 },
418         { ICE_ETYPE_OL,         16 },
419         { ICE_IPV6_OFOS,        18 },
420         { ICE_TCP_IL,           58 },
421         { ICE_PROTOCOL_LAST,    0 },
422 };
423
424 /* C-tag (802.1Q), IPv6 + TCP dummy packet */
425 static const u8 dummy_vlan_tcp_ipv6_packet[] = {
426         0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
427         0x00, 0x00, 0x00, 0x00,
428         0x00, 0x00, 0x00, 0x00,
429
430         0x81, 0x00, 0x00, 0x00, /* ICE_VLAN_OFOS 12 */
431
432         0x86, 0xDD,             /* ICE_ETYPE_OL 16 */
433
434         0x60, 0x00, 0x00, 0x00, /* ICE_IPV6_OFOS 18 */
435         0x00, 0x14, 0x06, 0x00, /* Next header is TCP */
436         0x00, 0x00, 0x00, 0x00,
437         0x00, 0x00, 0x00, 0x00,
438         0x00, 0x00, 0x00, 0x00,
439         0x00, 0x00, 0x00, 0x00,
440         0x00, 0x00, 0x00, 0x00,
441         0x00, 0x00, 0x00, 0x00,
442         0x00, 0x00, 0x00, 0x00,
443         0x00, 0x00, 0x00, 0x00,
444
445         0x00, 0x00, 0x00, 0x00, /* ICE_TCP_IL 58 */
446         0x00, 0x00, 0x00, 0x00,
447         0x00, 0x00, 0x00, 0x00,
448         0x50, 0x00, 0x00, 0x00,
449         0x00, 0x00, 0x00, 0x00,
450
451         0x00, 0x00, /* 2 bytes for 4 byte alignment */
452 };
453
454 /* IPv6 + UDP */
455 static const struct ice_dummy_pkt_offsets dummy_udp_ipv6_packet_offsets[] = {
456         { ICE_MAC_OFOS,         0 },
457         { ICE_ETYPE_OL,         12 },
458         { ICE_IPV6_OFOS,        14 },
459         { ICE_UDP_ILOS,         54 },
460         { ICE_PROTOCOL_LAST,    0 },
461 };
462
463 /* IPv6 + UDP dummy packet */
464 static const u8 dummy_udp_ipv6_packet[] = {
465         0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
466         0x00, 0x00, 0x00, 0x00,
467         0x00, 0x00, 0x00, 0x00,
468
469         0x86, 0xDD,             /* ICE_ETYPE_OL 12 */
470
471         0x60, 0x00, 0x00, 0x00, /* ICE_IPV6_OFOS 40 */
472         0x00, 0x10, 0x11, 0x00, /* Next header UDP */
473         0x00, 0x00, 0x00, 0x00,
474         0x00, 0x00, 0x00, 0x00,
475         0x00, 0x00, 0x00, 0x00,
476         0x00, 0x00, 0x00, 0x00,
477         0x00, 0x00, 0x00, 0x00,
478         0x00, 0x00, 0x00, 0x00,
479         0x00, 0x00, 0x00, 0x00,
480         0x00, 0x00, 0x00, 0x00,
481
482         0x00, 0x00, 0x00, 0x00, /* ICE_UDP_ILOS 54 */
483         0x00, 0x10, 0x00, 0x00,
484
485         0x00, 0x00, 0x00, 0x00, /* needed for ESP packets */
486         0x00, 0x00, 0x00, 0x00,
487
488         0x00, 0x00, /* 2 bytes for 4 byte alignment */
489 };
490
491 /* C-tag (802.1Q): IPv6 + UDP */
492 static const struct ice_dummy_pkt_offsets
493 dummy_vlan_udp_ipv6_packet_offsets[] = {
494         { ICE_MAC_OFOS,         0 },
495         { ICE_VLAN_OFOS,        12 },
496         { ICE_ETYPE_OL,         16 },
497         { ICE_IPV6_OFOS,        18 },
498         { ICE_UDP_ILOS,         58 },
499         { ICE_PROTOCOL_LAST,    0 },
500 };
501
502 /* C-tag (802.1Q), IPv6 + UDP dummy packet */
503 static const u8 dummy_vlan_udp_ipv6_packet[] = {
504         0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
505         0x00, 0x00, 0x00, 0x00,
506         0x00, 0x00, 0x00, 0x00,
507
508         0x81, 0x00, 0x00, 0x00,/* ICE_VLAN_OFOS 12 */
509
510         0x86, 0xDD,             /* ICE_ETYPE_OL 16 */
511
512         0x60, 0x00, 0x00, 0x00, /* ICE_IPV6_OFOS 18 */
513         0x00, 0x08, 0x11, 0x00, /* Next header UDP */
514         0x00, 0x00, 0x00, 0x00,
515         0x00, 0x00, 0x00, 0x00,
516         0x00, 0x00, 0x00, 0x00,
517         0x00, 0x00, 0x00, 0x00,
518         0x00, 0x00, 0x00, 0x00,
519         0x00, 0x00, 0x00, 0x00,
520         0x00, 0x00, 0x00, 0x00,
521         0x00, 0x00, 0x00, 0x00,
522
523         0x00, 0x00, 0x00, 0x00, /* ICE_UDP_ILOS 58 */
524         0x00, 0x08, 0x00, 0x00,
525
526         0x00, 0x00, /* 2 bytes for 4 byte alignment */
527 };
528
529 /* Outer IPv4 + Outer UDP + GTP + Inner IPv4 + Inner TCP */
530 static const struct ice_dummy_pkt_offsets dummy_ipv4_gtpu_ipv4_tcp_packet_offsets[] = {
531         { ICE_MAC_OFOS,         0 },
532         { ICE_IPV4_OFOS,        14 },
533         { ICE_UDP_OF,           34 },
534         { ICE_GTP,              42 },
535         { ICE_IPV4_IL,          62 },
536         { ICE_TCP_IL,           82 },
537         { ICE_PROTOCOL_LAST,    0 },
538 };
539
540 static const u8 dummy_ipv4_gtpu_ipv4_tcp_packet[] = {
541         0x00, 0x00, 0x00, 0x00, /* Ethernet 0 */
542         0x00, 0x00, 0x00, 0x00,
543         0x00, 0x00, 0x00, 0x00,
544         0x08, 0x00,
545
546         0x45, 0x00, 0x00, 0x58, /* IP 14 */
547         0x00, 0x00, 0x00, 0x00,
548         0x00, 0x11, 0x00, 0x00,
549         0x00, 0x00, 0x00, 0x00,
550         0x00, 0x00, 0x00, 0x00,
551
552         0x00, 0x00, 0x08, 0x68, /* UDP 34 */
553         0x00, 0x44, 0x00, 0x00,
554
555         0x34, 0xff, 0x00, 0x34, /* GTP-U Header 42 */
556         0x00, 0x00, 0x00, 0x00,
557         0x00, 0x00, 0x00, 0x85,
558
559         0x02, 0x00, 0x00, 0x00, /* GTP_PDUSession_ExtensionHeader 54 */
560         0x00, 0x00, 0x00, 0x00,
561
562         0x45, 0x00, 0x00, 0x28, /* IP 62 */
563         0x00, 0x00, 0x00, 0x00,
564         0x00, 0x06, 0x00, 0x00,
565         0x00, 0x00, 0x00, 0x00,
566         0x00, 0x00, 0x00, 0x00,
567
568         0x00, 0x00, 0x00, 0x00, /* TCP 82 */
569         0x00, 0x00, 0x00, 0x00,
570         0x00, 0x00, 0x00, 0x00,
571         0x50, 0x00, 0x00, 0x00,
572         0x00, 0x00, 0x00, 0x00,
573
574         0x00, 0x00, /* 2 bytes for 4 byte alignment */
575 };
576
577 /* Outer IPv4 + Outer UDP + GTP + Inner IPv4 + Inner UDP */
578 static const struct ice_dummy_pkt_offsets dummy_ipv4_gtpu_ipv4_udp_packet_offsets[] = {
579         { ICE_MAC_OFOS,         0 },
580         { ICE_IPV4_OFOS,        14 },
581         { ICE_UDP_OF,           34 },
582         { ICE_GTP,              42 },
583         { ICE_IPV4_IL,          62 },
584         { ICE_UDP_ILOS,         82 },
585         { ICE_PROTOCOL_LAST,    0 },
586 };
587
588 static const u8 dummy_ipv4_gtpu_ipv4_udp_packet[] = {
589         0x00, 0x00, 0x00, 0x00, /* Ethernet 0 */
590         0x00, 0x00, 0x00, 0x00,
591         0x00, 0x00, 0x00, 0x00,
592         0x08, 0x00,
593
594         0x45, 0x00, 0x00, 0x4c, /* IP 14 */
595         0x00, 0x00, 0x00, 0x00,
596         0x00, 0x11, 0x00, 0x00,
597         0x00, 0x00, 0x00, 0x00,
598         0x00, 0x00, 0x00, 0x00,
599
600         0x00, 0x00, 0x08, 0x68, /* UDP 34 */
601         0x00, 0x38, 0x00, 0x00,
602
603         0x34, 0xff, 0x00, 0x28, /* GTP-U Header 42 */
604         0x00, 0x00, 0x00, 0x00,
605         0x00, 0x00, 0x00, 0x85,
606
607         0x02, 0x00, 0x00, 0x00, /* GTP_PDUSession_ExtensionHeader 54 */
608         0x00, 0x00, 0x00, 0x00,
609
610         0x45, 0x00, 0x00, 0x1c, /* IP 62 */
611         0x00, 0x00, 0x00, 0x00,
612         0x00, 0x11, 0x00, 0x00,
613         0x00, 0x00, 0x00, 0x00,
614         0x00, 0x00, 0x00, 0x00,
615
616         0x00, 0x00, 0x00, 0x00, /* UDP 82 */
617         0x00, 0x08, 0x00, 0x00,
618
619         0x00, 0x00, /* 2 bytes for 4 byte alignment */
620 };
621
622 /* Outer IPv6 + Outer UDP + GTP + Inner IPv4 + Inner TCP */
623 static const struct ice_dummy_pkt_offsets dummy_ipv4_gtpu_ipv6_tcp_packet_offsets[] = {
624         { ICE_MAC_OFOS,         0 },
625         { ICE_IPV4_OFOS,        14 },
626         { ICE_UDP_OF,           34 },
627         { ICE_GTP,              42 },
628         { ICE_IPV6_IL,          62 },
629         { ICE_TCP_IL,           102 },
630         { ICE_PROTOCOL_LAST,    0 },
631 };
632
633 static const u8 dummy_ipv4_gtpu_ipv6_tcp_packet[] = {
634         0x00, 0x00, 0x00, 0x00, /* Ethernet 0 */
635         0x00, 0x00, 0x00, 0x00,
636         0x00, 0x00, 0x00, 0x00,
637         0x08, 0x00,
638
639         0x45, 0x00, 0x00, 0x6c, /* IP 14 */
640         0x00, 0x00, 0x00, 0x00,
641         0x00, 0x11, 0x00, 0x00,
642         0x00, 0x00, 0x00, 0x00,
643         0x00, 0x00, 0x00, 0x00,
644
645         0x00, 0x00, 0x08, 0x68, /* UDP 34 */
646         0x00, 0x58, 0x00, 0x00,
647
648         0x34, 0xff, 0x00, 0x48, /* GTP-U Header 42 */
649         0x00, 0x00, 0x00, 0x00,
650         0x00, 0x00, 0x00, 0x85,
651
652         0x02, 0x00, 0x00, 0x00, /* GTP_PDUSession_ExtensionHeader 54 */
653         0x00, 0x00, 0x00, 0x00,
654
655         0x60, 0x00, 0x00, 0x00, /* IPv6 62 */
656         0x00, 0x14, 0x06, 0x00,
657         0x00, 0x00, 0x00, 0x00,
658         0x00, 0x00, 0x00, 0x00,
659         0x00, 0x00, 0x00, 0x00,
660         0x00, 0x00, 0x00, 0x00,
661         0x00, 0x00, 0x00, 0x00,
662         0x00, 0x00, 0x00, 0x00,
663         0x00, 0x00, 0x00, 0x00,
664         0x00, 0x00, 0x00, 0x00,
665
666         0x00, 0x00, 0x00, 0x00, /* TCP 102 */
667         0x00, 0x00, 0x00, 0x00,
668         0x00, 0x00, 0x00, 0x00,
669         0x50, 0x00, 0x00, 0x00,
670         0x00, 0x00, 0x00, 0x00,
671
672         0x00, 0x00, /* 2 bytes for 4 byte alignment */
673 };
674
675 static const struct ice_dummy_pkt_offsets dummy_ipv4_gtpu_ipv6_udp_packet_offsets[] = {
676         { ICE_MAC_OFOS,         0 },
677         { ICE_IPV4_OFOS,        14 },
678         { ICE_UDP_OF,           34 },
679         { ICE_GTP,              42 },
680         { ICE_IPV6_IL,          62 },
681         { ICE_UDP_ILOS,         102 },
682         { ICE_PROTOCOL_LAST,    0 },
683 };
684
685 static const u8 dummy_ipv4_gtpu_ipv6_udp_packet[] = {
686         0x00, 0x00, 0x00, 0x00, /* Ethernet 0 */
687         0x00, 0x00, 0x00, 0x00,
688         0x00, 0x00, 0x00, 0x00,
689         0x08, 0x00,
690
691         0x45, 0x00, 0x00, 0x60, /* IP 14 */
692         0x00, 0x00, 0x00, 0x00,
693         0x00, 0x11, 0x00, 0x00,
694         0x00, 0x00, 0x00, 0x00,
695         0x00, 0x00, 0x00, 0x00,
696
697         0x00, 0x00, 0x08, 0x68, /* UDP 34 */
698         0x00, 0x4c, 0x00, 0x00,
699
700         0x34, 0xff, 0x00, 0x3c, /* GTP-U Header 42 */
701         0x00, 0x00, 0x00, 0x00,
702         0x00, 0x00, 0x00, 0x85,
703
704         0x02, 0x00, 0x00, 0x00, /* GTP_PDUSession_ExtensionHeader 54 */
705         0x00, 0x00, 0x00, 0x00,
706
707         0x60, 0x00, 0x00, 0x00, /* IPv6 62 */
708         0x00, 0x08, 0x11, 0x00,
709         0x00, 0x00, 0x00, 0x00,
710         0x00, 0x00, 0x00, 0x00,
711         0x00, 0x00, 0x00, 0x00,
712         0x00, 0x00, 0x00, 0x00,
713         0x00, 0x00, 0x00, 0x00,
714         0x00, 0x00, 0x00, 0x00,
715         0x00, 0x00, 0x00, 0x00,
716         0x00, 0x00, 0x00, 0x00,
717
718         0x00, 0x00, 0x00, 0x00, /* UDP 102 */
719         0x00, 0x08, 0x00, 0x00,
720
721         0x00, 0x00, /* 2 bytes for 4 byte alignment */
722 };
723
724 static const struct ice_dummy_pkt_offsets dummy_ipv6_gtpu_ipv4_tcp_packet_offsets[] = {
725         { ICE_MAC_OFOS,         0 },
726         { ICE_IPV6_OFOS,        14 },
727         { ICE_UDP_OF,           54 },
728         { ICE_GTP,              62 },
729         { ICE_IPV4_IL,          82 },
730         { ICE_TCP_IL,           102 },
731         { ICE_PROTOCOL_LAST,    0 },
732 };
733
734 static const u8 dummy_ipv6_gtpu_ipv4_tcp_packet[] = {
735         0x00, 0x00, 0x00, 0x00, /* Ethernet 0 */
736         0x00, 0x00, 0x00, 0x00,
737         0x00, 0x00, 0x00, 0x00,
738         0x86, 0xdd,
739
740         0x60, 0x00, 0x00, 0x00, /* IPv6 14 */
741         0x00, 0x44, 0x11, 0x00,
742         0x00, 0x00, 0x00, 0x00,
743         0x00, 0x00, 0x00, 0x00,
744         0x00, 0x00, 0x00, 0x00,
745         0x00, 0x00, 0x00, 0x00,
746         0x00, 0x00, 0x00, 0x00,
747         0x00, 0x00, 0x00, 0x00,
748         0x00, 0x00, 0x00, 0x00,
749         0x00, 0x00, 0x00, 0x00,
750
751         0x00, 0x00, 0x08, 0x68, /* UDP 54 */
752         0x00, 0x44, 0x00, 0x00,
753
754         0x34, 0xff, 0x00, 0x34, /* GTP-U Header 62 */
755         0x00, 0x00, 0x00, 0x00,
756         0x00, 0x00, 0x00, 0x85,
757
758         0x02, 0x00, 0x00, 0x00, /* GTP_PDUSession_ExtensionHeader 74 */
759         0x00, 0x00, 0x00, 0x00,
760
761         0x45, 0x00, 0x00, 0x28, /* IP 82 */
762         0x00, 0x00, 0x00, 0x00,
763         0x00, 0x06, 0x00, 0x00,
764         0x00, 0x00, 0x00, 0x00,
765         0x00, 0x00, 0x00, 0x00,
766
767         0x00, 0x00, 0x00, 0x00, /* TCP 102 */
768         0x00, 0x00, 0x00, 0x00,
769         0x00, 0x00, 0x00, 0x00,
770         0x50, 0x00, 0x00, 0x00,
771         0x00, 0x00, 0x00, 0x00,
772
773         0x00, 0x00, /* 2 bytes for 4 byte alignment */
774 };
775
776 static const struct ice_dummy_pkt_offsets dummy_ipv6_gtpu_ipv4_udp_packet_offsets[] = {
777         { ICE_MAC_OFOS,         0 },
778         { ICE_IPV6_OFOS,        14 },
779         { ICE_UDP_OF,           54 },
780         { ICE_GTP,              62 },
781         { ICE_IPV4_IL,          82 },
782         { ICE_UDP_ILOS,         102 },
783         { ICE_PROTOCOL_LAST,    0 },
784 };
785
786 static const u8 dummy_ipv6_gtpu_ipv4_udp_packet[] = {
787         0x00, 0x00, 0x00, 0x00, /* Ethernet 0 */
788         0x00, 0x00, 0x00, 0x00,
789         0x00, 0x00, 0x00, 0x00,
790         0x86, 0xdd,
791
792         0x60, 0x00, 0x00, 0x00, /* IPv6 14 */
793         0x00, 0x38, 0x11, 0x00,
794         0x00, 0x00, 0x00, 0x00,
795         0x00, 0x00, 0x00, 0x00,
796         0x00, 0x00, 0x00, 0x00,
797         0x00, 0x00, 0x00, 0x00,
798         0x00, 0x00, 0x00, 0x00,
799         0x00, 0x00, 0x00, 0x00,
800         0x00, 0x00, 0x00, 0x00,
801         0x00, 0x00, 0x00, 0x00,
802
803         0x00, 0x00, 0x08, 0x68, /* UDP 54 */
804         0x00, 0x38, 0x00, 0x00,
805
806         0x34, 0xff, 0x00, 0x28, /* GTP-U Header 62 */
807         0x00, 0x00, 0x00, 0x00,
808         0x00, 0x00, 0x00, 0x85,
809
810         0x02, 0x00, 0x00, 0x00, /* GTP_PDUSession_ExtensionHeader 74 */
811         0x00, 0x00, 0x00, 0x00,
812
813         0x45, 0x00, 0x00, 0x1c, /* IP 82 */
814         0x00, 0x00, 0x00, 0x00,
815         0x00, 0x11, 0x00, 0x00,
816         0x00, 0x00, 0x00, 0x00,
817         0x00, 0x00, 0x00, 0x00,
818
819         0x00, 0x00, 0x00, 0x00, /* UDP 102 */
820         0x00, 0x08, 0x00, 0x00,
821
822         0x00, 0x00, /* 2 bytes for 4 byte alignment */
823 };
824
825 static const struct ice_dummy_pkt_offsets dummy_ipv6_gtpu_ipv6_tcp_packet_offsets[] = {
826         { ICE_MAC_OFOS,         0 },
827         { ICE_IPV6_OFOS,        14 },
828         { ICE_UDP_OF,           54 },
829         { ICE_GTP,              62 },
830         { ICE_IPV6_IL,          82 },
831         { ICE_TCP_IL,           122 },
832         { ICE_PROTOCOL_LAST,    0 },
833 };
834
835 static const u8 dummy_ipv6_gtpu_ipv6_tcp_packet[] = {
836         0x00, 0x00, 0x00, 0x00, /* Ethernet 0 */
837         0x00, 0x00, 0x00, 0x00,
838         0x00, 0x00, 0x00, 0x00,
839         0x86, 0xdd,
840
841         0x60, 0x00, 0x00, 0x00, /* IPv6 14 */
842         0x00, 0x58, 0x11, 0x00,
843         0x00, 0x00, 0x00, 0x00,
844         0x00, 0x00, 0x00, 0x00,
845         0x00, 0x00, 0x00, 0x00,
846         0x00, 0x00, 0x00, 0x00,
847         0x00, 0x00, 0x00, 0x00,
848         0x00, 0x00, 0x00, 0x00,
849         0x00, 0x00, 0x00, 0x00,
850         0x00, 0x00, 0x00, 0x00,
851
852         0x00, 0x00, 0x08, 0x68, /* UDP 54 */
853         0x00, 0x58, 0x00, 0x00,
854
855         0x34, 0xff, 0x00, 0x48, /* GTP-U Header 62 */
856         0x00, 0x00, 0x00, 0x00,
857         0x00, 0x00, 0x00, 0x85,
858
859         0x02, 0x00, 0x00, 0x00, /* GTP_PDUSession_ExtensionHeader 74 */
860         0x00, 0x00, 0x00, 0x00,
861
862         0x60, 0x00, 0x00, 0x00, /* IPv6 82 */
863         0x00, 0x14, 0x06, 0x00,
864         0x00, 0x00, 0x00, 0x00,
865         0x00, 0x00, 0x00, 0x00,
866         0x00, 0x00, 0x00, 0x00,
867         0x00, 0x00, 0x00, 0x00,
868         0x00, 0x00, 0x00, 0x00,
869         0x00, 0x00, 0x00, 0x00,
870         0x00, 0x00, 0x00, 0x00,
871         0x00, 0x00, 0x00, 0x00,
872
873         0x00, 0x00, 0x00, 0x00, /* TCP 122 */
874         0x00, 0x00, 0x00, 0x00,
875         0x00, 0x00, 0x00, 0x00,
876         0x50, 0x00, 0x00, 0x00,
877         0x00, 0x00, 0x00, 0x00,
878
879         0x00, 0x00, /* 2 bytes for 4 byte alignment */
880 };
881
882 static const struct ice_dummy_pkt_offsets dummy_ipv6_gtpu_ipv6_udp_packet_offsets[] = {
883         { ICE_MAC_OFOS,         0 },
884         { ICE_IPV6_OFOS,        14 },
885         { ICE_UDP_OF,           54 },
886         { ICE_GTP,              62 },
887         { ICE_IPV6_IL,          82 },
888         { ICE_UDP_ILOS,         122 },
889         { ICE_PROTOCOL_LAST,    0 },
890 };
891
892 static const u8 dummy_ipv6_gtpu_ipv6_udp_packet[] = {
893         0x00, 0x00, 0x00, 0x00, /* Ethernet 0 */
894         0x00, 0x00, 0x00, 0x00,
895         0x00, 0x00, 0x00, 0x00,
896         0x86, 0xdd,
897
898         0x60, 0x00, 0x00, 0x00, /* IPv6 14 */
899         0x00, 0x4c, 0x11, 0x00,
900         0x00, 0x00, 0x00, 0x00,
901         0x00, 0x00, 0x00, 0x00,
902         0x00, 0x00, 0x00, 0x00,
903         0x00, 0x00, 0x00, 0x00,
904         0x00, 0x00, 0x00, 0x00,
905         0x00, 0x00, 0x00, 0x00,
906         0x00, 0x00, 0x00, 0x00,
907         0x00, 0x00, 0x00, 0x00,
908
909         0x00, 0x00, 0x08, 0x68, /* UDP 54 */
910         0x00, 0x4c, 0x00, 0x00,
911
912         0x34, 0xff, 0x00, 0x3c, /* GTP-U Header 62 */
913         0x00, 0x00, 0x00, 0x00,
914         0x00, 0x00, 0x00, 0x85,
915
916         0x02, 0x00, 0x00, 0x00, /* GTP_PDUSession_ExtensionHeader 74 */
917         0x00, 0x00, 0x00, 0x00,
918
919         0x60, 0x00, 0x00, 0x00, /* IPv6 82 */
920         0x00, 0x08, 0x11, 0x00,
921         0x00, 0x00, 0x00, 0x00,
922         0x00, 0x00, 0x00, 0x00,
923         0x00, 0x00, 0x00, 0x00,
924         0x00, 0x00, 0x00, 0x00,
925         0x00, 0x00, 0x00, 0x00,
926         0x00, 0x00, 0x00, 0x00,
927         0x00, 0x00, 0x00, 0x00,
928         0x00, 0x00, 0x00, 0x00,
929
930         0x00, 0x00, 0x00, 0x00, /* UDP 122 */
931         0x00, 0x08, 0x00, 0x00,
932
933         0x00, 0x00, /* 2 bytes for 4 byte alignment */
934 };
935
936 static const struct ice_dummy_pkt_offsets dummy_ipv4_gtpu_ipv4_packet_offsets[] = {
937         { ICE_MAC_OFOS,         0 },
938         { ICE_IPV4_OFOS,        14 },
939         { ICE_UDP_OF,           34 },
940         { ICE_GTP,              42 },
941         { ICE_IPV4_IL,          62 },
942         { ICE_PROTOCOL_LAST,    0 },
943 };
944
945 static const u8 dummy_ipv4_gtpu_ipv4_packet[] = {
946         0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
947         0x00, 0x00, 0x00, 0x00,
948         0x00, 0x00, 0x00, 0x00,
949         0x08, 0x00,
950
951         0x45, 0x00, 0x00, 0x44, /* ICE_IPV4_OFOS 14 */
952         0x00, 0x00, 0x40, 0x00,
953         0x40, 0x11, 0x00, 0x00,
954         0x00, 0x00, 0x00, 0x00,
955         0x00, 0x00, 0x00, 0x00,
956
957         0x08, 0x68, 0x08, 0x68, /* ICE_UDP_OF 34 */
958         0x00, 0x00, 0x00, 0x00,
959
960         0x34, 0xff, 0x00, 0x28,  /* ICE_GTP 42 */
961         0x00, 0x00, 0x00, 0x00,
962         0x00, 0x00, 0x00, 0x85,
963
964         0x02, 0x00, 0x00, 0x00, /* PDU Session extension header */
965         0x00, 0x00, 0x00, 0x00,
966
967         0x45, 0x00, 0x00, 0x14, /* ICE_IPV4_IL 62 */
968         0x00, 0x00, 0x40, 0x00,
969         0x40, 0x00, 0x00, 0x00,
970         0x00, 0x00, 0x00, 0x00,
971         0x00, 0x00, 0x00, 0x00,
972         0x00, 0x00,
973 };
974
975 static const
976 struct ice_dummy_pkt_offsets dummy_ipv4_gtpu_ipv6_packet_offsets[] = {
977         { ICE_MAC_OFOS,         0 },
978         { ICE_IPV4_OFOS,        14 },
979         { ICE_UDP_OF,           34 },
980         { ICE_GTP,              42 },
981         { ICE_IPV6_IL,          62 },
982         { ICE_PROTOCOL_LAST,    0 },
983 };
984
985 static const u8 dummy_ipv4_gtpu_ipv6_packet[] = {
986         0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
987         0x00, 0x00, 0x00, 0x00,
988         0x00, 0x00, 0x00, 0x00,
989         0x08, 0x00,
990
991         0x45, 0x00, 0x00, 0x58, /* ICE_IPV4_OFOS 14 */
992         0x00, 0x00, 0x40, 0x00,
993         0x40, 0x11, 0x00, 0x00,
994         0x00, 0x00, 0x00, 0x00,
995         0x00, 0x00, 0x00, 0x00,
996
997         0x08, 0x68, 0x08, 0x68, /* ICE_UDP_OF 34 */
998         0x00, 0x00, 0x00, 0x00,
999
1000         0x34, 0xff, 0x00, 0x28,  /* ICE_GTP 42 */
1001         0x00, 0x00, 0x00, 0x00,
1002         0x00, 0x00, 0x00, 0x85,
1003
1004         0x02, 0x00, 0x00, 0x00, /* PDU Session extension header */
1005         0x00, 0x00, 0x00, 0x00,
1006
1007         0x60, 0x00, 0x00, 0x00, /* ICE_IPV6_IL 62 */
1008         0x00, 0x00, 0x3b, 0x00,
1009         0x00, 0x00, 0x00, 0x00,
1010         0x00, 0x00, 0x00, 0x00,
1011         0x00, 0x00, 0x00, 0x00,
1012         0x00, 0x00, 0x00, 0x00,
1013         0x00, 0x00, 0x00, 0x00,
1014         0x00, 0x00, 0x00, 0x00,
1015         0x00, 0x00, 0x00, 0x00,
1016         0x00, 0x00, 0x00, 0x00,
1017
1018         0x00, 0x00,
1019 };
1020
1021 static const
1022 struct ice_dummy_pkt_offsets dummy_ipv6_gtpu_ipv4_packet_offsets[] = {
1023         { ICE_MAC_OFOS,         0 },
1024         { ICE_IPV6_OFOS,        14 },
1025         { ICE_UDP_OF,           54 },
1026         { ICE_GTP,              62 },
1027         { ICE_IPV4_IL,          82 },
1028         { ICE_PROTOCOL_LAST,    0 },
1029 };
1030
1031 static const u8 dummy_ipv6_gtpu_ipv4_packet[] = {
1032         0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
1033         0x00, 0x00, 0x00, 0x00,
1034         0x00, 0x00, 0x00, 0x00,
1035         0x86, 0xdd,
1036
1037         0x60, 0x00, 0x00, 0x00, /* ICE_IPV6_OFOS 14 */
1038         0x00, 0x58, 0x11, 0x00, /* Next header UDP*/
1039         0x00, 0x00, 0x00, 0x00,
1040         0x00, 0x00, 0x00, 0x00,
1041         0x00, 0x00, 0x00, 0x00,
1042         0x00, 0x00, 0x00, 0x00,
1043         0x00, 0x00, 0x00, 0x00,
1044         0x00, 0x00, 0x00, 0x00,
1045         0x00, 0x00, 0x00, 0x00,
1046         0x00, 0x00, 0x00, 0x00,
1047
1048         0x08, 0x68, 0x08, 0x68, /* ICE_UDP_OF 54 */
1049         0x00, 0x00, 0x00, 0x00,
1050
1051         0x34, 0xff, 0x00, 0x28,  /* ICE_GTP 62 */
1052         0x00, 0x00, 0x00, 0x00,
1053         0x00, 0x00, 0x00, 0x85,
1054
1055         0x02, 0x00, 0x00, 0x00, /* PDU Session extension header */
1056         0x00, 0x00, 0x00, 0x00,
1057
1058         0x45, 0x00, 0x00, 0x14, /* ICE_IPV4_IL 82 */
1059         0x00, 0x00, 0x40, 0x00,
1060         0x40, 0x00, 0x00, 0x00,
1061         0x00, 0x00, 0x00, 0x00,
1062         0x00, 0x00, 0x00, 0x00,
1063
1064         0x00, 0x00,
1065 };
1066
1067 static const
1068 struct ice_dummy_pkt_offsets dummy_ipv6_gtpu_ipv6_packet_offsets[] = {
1069         { ICE_MAC_OFOS,         0 },
1070         { ICE_IPV6_OFOS,        14 },
1071         { ICE_UDP_OF,           54 },
1072         { ICE_GTP,              62 },
1073         { ICE_IPV6_IL,          82 },
1074         { ICE_PROTOCOL_LAST,    0 },
1075 };
1076
1077 static const u8 dummy_ipv6_gtpu_ipv6_packet[] = {
1078         0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
1079         0x00, 0x00, 0x00, 0x00,
1080         0x00, 0x00, 0x00, 0x00,
1081         0x86, 0xdd,
1082
1083         0x60, 0x00, 0x00, 0x00, /* ICE_IPV6_OFOS 14 */
1084         0x00, 0x6c, 0x11, 0x00, /* Next header UDP*/
1085         0x00, 0x00, 0x00, 0x00,
1086         0x00, 0x00, 0x00, 0x00,
1087         0x00, 0x00, 0x00, 0x00,
1088         0x00, 0x00, 0x00, 0x00,
1089         0x00, 0x00, 0x00, 0x00,
1090         0x00, 0x00, 0x00, 0x00,
1091         0x00, 0x00, 0x00, 0x00,
1092         0x00, 0x00, 0x00, 0x00,
1093
1094         0x08, 0x68, 0x08, 0x68, /* ICE_UDP_OF 54 */
1095         0x00, 0x00, 0x00, 0x00,
1096
1097         0x34, 0xff, 0x00, 0x28,  /* ICE_GTP 62 */
1098         0x00, 0x00, 0x00, 0x00,
1099         0x00, 0x00, 0x00, 0x85,
1100
1101         0x02, 0x00, 0x00, 0x00, /* PDU Session extension header */
1102         0x00, 0x00, 0x00, 0x00,
1103
1104         0x60, 0x00, 0x00, 0x00, /* ICE_IPV6_OFIL 82 */
1105         0x00, 0x00, 0x3b, 0x00,
1106         0x00, 0x00, 0x00, 0x00,
1107         0x00, 0x00, 0x00, 0x00,
1108         0x00, 0x00, 0x00, 0x00,
1109         0x00, 0x00, 0x00, 0x00,
1110         0x00, 0x00, 0x00, 0x00,
1111         0x00, 0x00, 0x00, 0x00,
1112         0x00, 0x00, 0x00, 0x00,
1113         0x00, 0x00, 0x00, 0x00,
1114
1115         0x00, 0x00,
1116 };
1117
1118 static const struct ice_dummy_pkt_offsets dummy_udp_gtp_packet_offsets[] = {
1119         { ICE_MAC_OFOS,         0 },
1120         { ICE_IPV4_OFOS,        14 },
1121         { ICE_UDP_OF,           34 },
1122         { ICE_GTP,              42 },
1123         { ICE_PROTOCOL_LAST,    0 },
1124 };
1125
1126 static const u8 dummy_udp_gtp_packet[] = {
1127         0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
1128         0x00, 0x00, 0x00, 0x00,
1129         0x00, 0x00, 0x00, 0x00,
1130         0x08, 0x00,
1131
1132         0x45, 0x00, 0x00, 0x30, /* ICE_IPV4_OFOS 14 */
1133         0x00, 0x00, 0x00, 0x00,
1134         0x00, 0x11, 0x00, 0x00,
1135         0x00, 0x00, 0x00, 0x00,
1136         0x00, 0x00, 0x00, 0x00,
1137
1138         0x00, 0x00, 0x08, 0x68, /* ICE_UDP_OF 34 */
1139         0x00, 0x1c, 0x00, 0x00,
1140
1141         0x34, 0xff, 0x00, 0x0c, /* ICE_GTP 42 */
1142         0x00, 0x00, 0x00, 0x00,
1143         0x00, 0x00, 0x00, 0x85,
1144
1145         0x02, 0x00, 0x00, 0x00, /* PDU Session extension header */
1146         0x00, 0x00, 0x00, 0x00,
1147
1148 };
1149
1150 static const struct ice_dummy_pkt_offsets dummy_ipv4_gtp_no_pay_packet_offsets[] = {
1151         { ICE_MAC_OFOS,         0 },
1152         { ICE_IPV4_OFOS,        14 },
1153         { ICE_UDP_OF,           34 },
1154         { ICE_GTP_NO_PAY,       42 },
1155         { ICE_PROTOCOL_LAST,    0 },
1156 };
1157
1158 static const
1159 struct ice_dummy_pkt_offsets dummy_ipv6_gtp_no_pay_packet_offsets[] = {
1160         { ICE_MAC_OFOS,         0 },
1161         { ICE_IPV6_OFOS,        14 },
1162         { ICE_UDP_OF,           54 },
1163         { ICE_GTP_NO_PAY,       62 },
1164         { ICE_PROTOCOL_LAST,    0 },
1165 };
1166
1167 static const u8 dummy_ipv6_gtp_packet[] = {
1168         0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
1169         0x00, 0x00, 0x00, 0x00,
1170         0x00, 0x00, 0x00, 0x00,
1171         0x86, 0xdd,
1172
1173         0x60, 0x00, 0x00, 0x00, /* ICE_IPV6_OFOS 14 */
1174         0x00, 0x6c, 0x11, 0x00, /* Next header UDP*/
1175         0x00, 0x00, 0x00, 0x00,
1176         0x00, 0x00, 0x00, 0x00,
1177         0x00, 0x00, 0x00, 0x00,
1178         0x00, 0x00, 0x00, 0x00,
1179         0x00, 0x00, 0x00, 0x00,
1180         0x00, 0x00, 0x00, 0x00,
1181         0x00, 0x00, 0x00, 0x00,
1182         0x00, 0x00, 0x00, 0x00,
1183
1184         0x08, 0x68, 0x08, 0x68, /* ICE_UDP_OF 54 */
1185         0x00, 0x00, 0x00, 0x00,
1186
1187         0x30, 0x00, 0x00, 0x28,  /* ICE_GTP 62 */
1188         0x00, 0x00, 0x00, 0x00,
1189
1190         0x00, 0x00,
1191 };
1192
1193 static const struct ice_dummy_pkt_offsets dummy_pppoe_packet_offsets[] = {
1194         { ICE_MAC_OFOS,         0 },
1195         { ICE_VLAN_OFOS,        12 },
1196         { ICE_ETYPE_OL,         16 },
1197         { ICE_PPPOE,            18 },
1198         { ICE_PROTOCOL_LAST,    0 },
1199 };
1200
1201 static const struct ice_dummy_pkt_offsets dummy_pppoe_packet_ipv4_offsets[] = {
1202         { ICE_MAC_OFOS,         0 },
1203         { ICE_VLAN_OFOS,        12 },
1204         { ICE_ETYPE_OL,         16 },
1205         { ICE_PPPOE,            18 },
1206         { ICE_IPV4_OFOS,        26 },
1207         { ICE_PROTOCOL_LAST,    0 },
1208 };
1209
1210 static const u8 dummy_pppoe_ipv4_packet[] = {
1211         0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
1212         0x00, 0x00, 0x00, 0x00,
1213         0x00, 0x00, 0x00, 0x00,
1214
1215         0x81, 0x00, 0x00, 0x00, /* ICE_VLAN_OFOS 12 */
1216
1217         0x88, 0x64,             /* ICE_ETYPE_OL 16 */
1218
1219         0x11, 0x00, 0x00, 0x00, /* ICE_PPPOE 18 */
1220         0x00, 0x16,
1221
1222         0x00, 0x21,             /* PPP Link Layer 24 */
1223
1224         0x45, 0x00, 0x00, 0x14, /* ICE_IPV4_IL 26 */
1225         0x00, 0x00, 0x00, 0x00,
1226         0x00, 0x00, 0x00, 0x00,
1227         0x00, 0x00, 0x00, 0x00,
1228         0x00, 0x00, 0x00, 0x00,
1229
1230         0x00, 0x00,             /* 2 bytes for 4 bytes alignment */
1231 };
1232
1233 static const
1234 struct ice_dummy_pkt_offsets dummy_pppoe_ipv4_tcp_packet_offsets[] = {
1235         { ICE_MAC_OFOS,         0 },
1236         { ICE_VLAN_OFOS,        12 },
1237         { ICE_ETYPE_OL,         16 },
1238         { ICE_PPPOE,            18 },
1239         { ICE_IPV4_OFOS,        26 },
1240         { ICE_TCP_IL,           46 },
1241         { ICE_PROTOCOL_LAST,    0 },
1242 };
1243
1244 static const u8 dummy_pppoe_ipv4_tcp_packet[] = {
1245         0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
1246         0x00, 0x00, 0x00, 0x00,
1247         0x00, 0x00, 0x00, 0x00,
1248
1249         0x81, 0x00, 0x00, 0x00, /* ICE_VLAN_OFOS 12 */
1250
1251         0x88, 0x64,             /* ICE_ETYPE_OL 16 */
1252
1253         0x11, 0x00, 0x00, 0x00, /* ICE_PPPOE 18 */
1254         0x00, 0x16,
1255
1256         0x00, 0x21,             /* PPP Link Layer 24 */
1257
1258         0x45, 0x00, 0x00, 0x28, /* ICE_IPV4_OFOS 26 */
1259         0x00, 0x01, 0x00, 0x00,
1260         0x00, 0x06, 0x00, 0x00,
1261         0x00, 0x00, 0x00, 0x00,
1262         0x00, 0x00, 0x00, 0x00,
1263
1264         0x00, 0x00, 0x00, 0x00, /* ICE_TCP_IL 46 */
1265         0x00, 0x00, 0x00, 0x00,
1266         0x00, 0x00, 0x00, 0x00,
1267         0x50, 0x00, 0x00, 0x00,
1268         0x00, 0x00, 0x00, 0x00,
1269
1270         0x00, 0x00,             /* 2 bytes for 4 bytes alignment */
1271 };
1272
1273 static const
1274 struct ice_dummy_pkt_offsets dummy_pppoe_ipv4_udp_packet_offsets[] = {
1275         { ICE_MAC_OFOS,         0 },
1276         { ICE_VLAN_OFOS,        12 },
1277         { ICE_ETYPE_OL,         16 },
1278         { ICE_PPPOE,            18 },
1279         { ICE_IPV4_OFOS,        26 },
1280         { ICE_UDP_ILOS,         46 },
1281         { ICE_PROTOCOL_LAST,    0 },
1282 };
1283
1284 static const u8 dummy_pppoe_ipv4_udp_packet[] = {
1285         0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
1286         0x00, 0x00, 0x00, 0x00,
1287         0x00, 0x00, 0x00, 0x00,
1288
1289         0x81, 0x00, 0x00, 0x00, /* ICE_VLAN_OFOS 12 */
1290
1291         0x88, 0x64,             /* ICE_ETYPE_OL 16 */
1292
1293         0x11, 0x00, 0x00, 0x00, /* ICE_PPPOE 18 */
1294         0x00, 0x16,
1295
1296         0x00, 0x21,             /* PPP Link Layer 24 */
1297
1298         0x45, 0x00, 0x00, 0x1c, /* ICE_IPV4_OFOS 26 */
1299         0x00, 0x01, 0x00, 0x00,
1300         0x00, 0x11, 0x00, 0x00,
1301         0x00, 0x00, 0x00, 0x00,
1302         0x00, 0x00, 0x00, 0x00,
1303
1304         0x00, 0x00, 0x00, 0x00, /* ICE_UDP_ILOS 46 */
1305         0x00, 0x08, 0x00, 0x00,
1306
1307         0x00, 0x00,             /* 2 bytes for 4 bytes alignment */
1308 };
1309
1310 static const struct ice_dummy_pkt_offsets dummy_pppoe_packet_ipv6_offsets[] = {
1311         { ICE_MAC_OFOS,         0 },
1312         { ICE_VLAN_OFOS,        12 },
1313         { ICE_ETYPE_OL,         16 },
1314         { ICE_PPPOE,            18 },
1315         { ICE_IPV6_OFOS,        26 },
1316         { ICE_PROTOCOL_LAST,    0 },
1317 };
1318
1319 static const u8 dummy_pppoe_ipv6_packet[] = {
1320         0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
1321         0x00, 0x00, 0x00, 0x00,
1322         0x00, 0x00, 0x00, 0x00,
1323
1324         0x81, 0x00, 0x00, 0x00, /* ICE_VLAN_OFOS 12 */
1325
1326         0x88, 0x64,             /* ICE_ETYPE_OL 16 */
1327
1328         0x11, 0x00, 0x00, 0x00, /* ICE_PPPOE 18 */
1329         0x00, 0x2a,
1330
1331         0x00, 0x57,             /* PPP Link Layer 24 */
1332
1333         0x60, 0x00, 0x00, 0x00, /* ICE_IPV6_OFOS 26 */
1334         0x00, 0x00, 0x3b, 0x00,
1335         0x00, 0x00, 0x00, 0x00,
1336         0x00, 0x00, 0x00, 0x00,
1337         0x00, 0x00, 0x00, 0x00,
1338         0x00, 0x00, 0x00, 0x00,
1339         0x00, 0x00, 0x00, 0x00,
1340         0x00, 0x00, 0x00, 0x00,
1341         0x00, 0x00, 0x00, 0x00,
1342         0x00, 0x00, 0x00, 0x00,
1343
1344         0x00, 0x00,             /* 2 bytes for 4 bytes alignment */
1345 };
1346
1347 static const
1348 struct ice_dummy_pkt_offsets dummy_pppoe_packet_ipv6_tcp_offsets[] = {
1349         { ICE_MAC_OFOS,         0 },
1350         { ICE_VLAN_OFOS,        12 },
1351         { ICE_ETYPE_OL,         16 },
1352         { ICE_PPPOE,            18 },
1353         { ICE_IPV6_OFOS,        26 },
1354         { ICE_TCP_IL,           66 },
1355         { ICE_PROTOCOL_LAST,    0 },
1356 };
1357
1358 static const u8 dummy_pppoe_ipv6_tcp_packet[] = {
1359         0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
1360         0x00, 0x00, 0x00, 0x00,
1361         0x00, 0x00, 0x00, 0x00,
1362
1363         0x81, 0x00, 0x00, 0x00, /* ICE_VLAN_OFOS 12 */
1364
1365         0x88, 0x64,             /* ICE_ETYPE_OL 16 */
1366
1367         0x11, 0x00, 0x00, 0x00, /* ICE_PPPOE 18 */
1368         0x00, 0x2a,
1369
1370         0x00, 0x57,             /* PPP Link Layer 24 */
1371
1372         0x60, 0x00, 0x00, 0x00, /* ICE_IPV6_OFOS 26 */
1373         0x00, 0x14, 0x06, 0x00, /* Next header is TCP */
1374         0x00, 0x00, 0x00, 0x00,
1375         0x00, 0x00, 0x00, 0x00,
1376         0x00, 0x00, 0x00, 0x00,
1377         0x00, 0x00, 0x00, 0x00,
1378         0x00, 0x00, 0x00, 0x00,
1379         0x00, 0x00, 0x00, 0x00,
1380         0x00, 0x00, 0x00, 0x00,
1381         0x00, 0x00, 0x00, 0x00,
1382
1383         0x00, 0x00, 0x00, 0x00, /* ICE_TCP_IL 66 */
1384         0x00, 0x00, 0x00, 0x00,
1385         0x00, 0x00, 0x00, 0x00,
1386         0x50, 0x00, 0x00, 0x00,
1387         0x00, 0x00, 0x00, 0x00,
1388
1389         0x00, 0x00,             /* 2 bytes for 4 bytes alignment */
1390 };
1391
1392 static const
1393 struct ice_dummy_pkt_offsets dummy_pppoe_packet_ipv6_udp_offsets[] = {
1394         { ICE_MAC_OFOS,         0 },
1395         { ICE_VLAN_OFOS,        12 },
1396         { ICE_ETYPE_OL,         16 },
1397         { ICE_PPPOE,            18 },
1398         { ICE_IPV6_OFOS,        26 },
1399         { ICE_UDP_ILOS,         66 },
1400         { ICE_PROTOCOL_LAST,    0 },
1401 };
1402
1403 static const u8 dummy_pppoe_ipv6_udp_packet[] = {
1404         0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
1405         0x00, 0x00, 0x00, 0x00,
1406         0x00, 0x00, 0x00, 0x00,
1407
1408         0x81, 0x00, 0x00, 0x00, /* ICE_VLAN_OFOS 12 */
1409
1410         0x88, 0x64,             /* ICE_ETYPE_OL 16 */
1411
1412         0x11, 0x00, 0x00, 0x00, /* ICE_PPPOE 18 */
1413         0x00, 0x2a,
1414
1415         0x00, 0x57,             /* PPP Link Layer 24 */
1416
1417         0x60, 0x00, 0x00, 0x00, /* ICE_IPV6_OFOS 26 */
1418         0x00, 0x08, 0x11, 0x00, /* Next header UDP*/
1419         0x00, 0x00, 0x00, 0x00,
1420         0x00, 0x00, 0x00, 0x00,
1421         0x00, 0x00, 0x00, 0x00,
1422         0x00, 0x00, 0x00, 0x00,
1423         0x00, 0x00, 0x00, 0x00,
1424         0x00, 0x00, 0x00, 0x00,
1425         0x00, 0x00, 0x00, 0x00,
1426         0x00, 0x00, 0x00, 0x00,
1427
1428         0x00, 0x00, 0x00, 0x00, /* ICE_UDP_ILOS 66 */
1429         0x00, 0x08, 0x00, 0x00,
1430
1431         0x00, 0x00,             /* 2 bytes for 4 bytes alignment */
1432 };
1433
1434 static const struct ice_dummy_pkt_offsets dummy_ipv4_esp_packet_offsets[] = {
1435         { ICE_MAC_OFOS,         0 },
1436         { ICE_IPV4_OFOS,        14 },
1437         { ICE_ESP,                      34 },
1438         { ICE_PROTOCOL_LAST,    0 },
1439 };
1440
1441 static const u8 dummy_ipv4_esp_pkt[] = {
1442         0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
1443         0x00, 0x00, 0x00, 0x00,
1444         0x00, 0x00, 0x00, 0x00,
1445         0x08, 0x00,
1446
1447         0x45, 0x00, 0x00, 0x1c, /* ICE_IPV4_IL 14 */
1448         0x00, 0x00, 0x40, 0x00,
1449         0x40, 0x32, 0x00, 0x00,
1450         0x00, 0x00, 0x00, 0x00,
1451         0x00, 0x00, 0x00, 0x00,
1452
1453         0x00, 0x00, 0x00, 0x00, /* ICE_ESP 34 */
1454         0x00, 0x00, 0x00, 0x00,
1455         0x00, 0x00,             /* 2 bytes for 4 bytes alignment */
1456 };
1457
1458 static const struct ice_dummy_pkt_offsets dummy_ipv6_esp_packet_offsets[] = {
1459         { ICE_MAC_OFOS,         0 },
1460         { ICE_IPV6_OFOS,        14 },
1461         { ICE_ESP,                      54 },
1462         { ICE_PROTOCOL_LAST,    0 },
1463 };
1464
1465 static const u8 dummy_ipv6_esp_pkt[] = {
1466         0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
1467         0x00, 0x00, 0x00, 0x00,
1468         0x00, 0x00, 0x00, 0x00,
1469         0x86, 0xDD,
1470
1471         0x60, 0x00, 0x00, 0x00, /* ICE_IPV6_OFOS 14 */
1472         0x00, 0x08, 0x32, 0x00, /* Next header ESP */
1473         0x00, 0x00, 0x00, 0x00,
1474         0x00, 0x00, 0x00, 0x00,
1475         0x00, 0x00, 0x00, 0x00,
1476         0x00, 0x00, 0x00, 0x00,
1477         0x00, 0x00, 0x00, 0x00,
1478         0x00, 0x00, 0x00, 0x00,
1479         0x00, 0x00, 0x00, 0x00,
1480         0x00, 0x00, 0x00, 0x00,
1481
1482         0x00, 0x00, 0x00, 0x00, /* ICE_ESP 54 */
1483         0x00, 0x00, 0x00, 0x00,
1484         0x00, 0x00,             /* 2 bytes for 4 bytes alignment */
1485 };
1486
1487 static const struct ice_dummy_pkt_offsets dummy_ipv4_ah_packet_offsets[] = {
1488         { ICE_MAC_OFOS,         0 },
1489         { ICE_IPV4_OFOS,        14 },
1490         { ICE_AH,                       34 },
1491         { ICE_PROTOCOL_LAST,    0 },
1492 };
1493
1494 static const u8 dummy_ipv4_ah_pkt[] = {
1495         0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
1496         0x00, 0x00, 0x00, 0x00,
1497         0x00, 0x00, 0x00, 0x00,
1498         0x08, 0x00,
1499
1500         0x45, 0x00, 0x00, 0x20, /* ICE_IPV4_IL 14 */
1501         0x00, 0x00, 0x40, 0x00,
1502         0x40, 0x33, 0x00, 0x00,
1503         0x00, 0x00, 0x00, 0x00,
1504         0x00, 0x00, 0x00, 0x00,
1505
1506         0x00, 0x00, 0x00, 0x00, /* ICE_AH 34 */
1507         0x00, 0x00, 0x00, 0x00,
1508         0x00, 0x00, 0x00, 0x00,
1509         0x00, 0x00,             /* 2 bytes for 4 bytes alignment */
1510 };
1511
1512 static const struct ice_dummy_pkt_offsets dummy_ipv6_ah_packet_offsets[] = {
1513         { ICE_MAC_OFOS,         0 },
1514         { ICE_IPV6_OFOS,        14 },
1515         { ICE_AH,                       54 },
1516         { ICE_PROTOCOL_LAST,    0 },
1517 };
1518
1519 static const u8 dummy_ipv6_ah_pkt[] = {
1520         0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
1521         0x00, 0x00, 0x00, 0x00,
1522         0x00, 0x00, 0x00, 0x00,
1523         0x86, 0xDD,
1524
1525         0x60, 0x00, 0x00, 0x00, /* ICE_IPV6_OFOS 14 */
1526         0x00, 0x0c, 0x33, 0x00, /* Next header AH */
1527         0x00, 0x00, 0x00, 0x00,
1528         0x00, 0x00, 0x00, 0x00,
1529         0x00, 0x00, 0x00, 0x00,
1530         0x00, 0x00, 0x00, 0x00,
1531         0x00, 0x00, 0x00, 0x00,
1532         0x00, 0x00, 0x00, 0x00,
1533         0x00, 0x00, 0x00, 0x00,
1534         0x00, 0x00, 0x00, 0x00,
1535
1536         0x00, 0x00, 0x00, 0x00, /* ICE_AH 54 */
1537         0x00, 0x00, 0x00, 0x00,
1538         0x00, 0x00, 0x00, 0x00,
1539         0x00, 0x00,             /* 2 bytes for 4 bytes alignment */
1540 };
1541
1542 static const struct ice_dummy_pkt_offsets dummy_ipv4_nat_packet_offsets[] = {
1543         { ICE_MAC_OFOS,         0 },
1544         { ICE_IPV4_OFOS,        14 },
1545         { ICE_UDP_ILOS,         34 },
1546         { ICE_NAT_T,            42 },
1547         { ICE_PROTOCOL_LAST,    0 },
1548 };
1549
1550 static const u8 dummy_ipv4_nat_pkt[] = {
1551         0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
1552         0x00, 0x00, 0x00, 0x00,
1553         0x00, 0x00, 0x00, 0x00,
1554         0x08, 0x00,
1555
1556         0x45, 0x00, 0x00, 0x24, /* ICE_IPV4_IL 14 */
1557         0x00, 0x00, 0x40, 0x00,
1558         0x40, 0x11, 0x00, 0x00,
1559         0x00, 0x00, 0x00, 0x00,
1560         0x00, 0x00, 0x00, 0x00,
1561
1562         0x00, 0x00, 0x11, 0x94, /* ICE_NAT_T 34 */
1563         0x00, 0x00, 0x00, 0x00,
1564
1565         0x00, 0x00, 0x00, 0x00,
1566         0x00, 0x00, 0x00, 0x00,
1567         0x00, 0x00,             /* 2 bytes for 4 bytes alignment */
1568 };
1569
1570 static const struct ice_dummy_pkt_offsets dummy_ipv6_nat_packet_offsets[] = {
1571         { ICE_MAC_OFOS,         0 },
1572         { ICE_IPV6_OFOS,        14 },
1573         { ICE_UDP_ILOS,         54 },
1574         { ICE_NAT_T,            62 },
1575         { ICE_PROTOCOL_LAST,    0 },
1576 };
1577
1578 static const u8 dummy_ipv6_nat_pkt[] = {
1579         0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
1580         0x00, 0x00, 0x00, 0x00,
1581         0x00, 0x00, 0x00, 0x00,
1582         0x86, 0xDD,
1583
1584         0x60, 0x00, 0x00, 0x00, /* ICE_IPV6_OFOS 14 */
1585         0x00, 0x10, 0x11, 0x00, /* Next header NAT_T */
1586         0x00, 0x00, 0x00, 0x00,
1587         0x00, 0x00, 0x00, 0x00,
1588         0x00, 0x00, 0x00, 0x00,
1589         0x00, 0x00, 0x00, 0x00,
1590         0x00, 0x00, 0x00, 0x00,
1591         0x00, 0x00, 0x00, 0x00,
1592         0x00, 0x00, 0x00, 0x00,
1593         0x00, 0x00, 0x00, 0x00,
1594
1595         0x00, 0x00, 0x11, 0x94, /* ICE_NAT_T 54 */
1596         0x00, 0x00, 0x00, 0x00,
1597
1598         0x00, 0x00, 0x00, 0x00,
1599         0x00, 0x00, 0x00, 0x00,
1600         0x00, 0x00,             /* 2 bytes for 4 bytes alignment */
1601
1602 };
1603
1604 static const struct ice_dummy_pkt_offsets dummy_ipv4_l2tpv3_packet_offsets[] = {
1605         { ICE_MAC_OFOS,         0 },
1606         { ICE_IPV4_OFOS,        14 },
1607         { ICE_L2TPV3,           34 },
1608         { ICE_PROTOCOL_LAST,    0 },
1609 };
1610
1611 static const u8 dummy_ipv4_l2tpv3_pkt[] = {
1612         0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
1613         0x00, 0x00, 0x00, 0x00,
1614         0x00, 0x00, 0x00, 0x00,
1615         0x08, 0x00,
1616
1617         0x45, 0x00, 0x00, 0x20, /* ICE_IPV4_IL 14 */
1618         0x00, 0x00, 0x40, 0x00,
1619         0x40, 0x73, 0x00, 0x00,
1620         0x00, 0x00, 0x00, 0x00,
1621         0x00, 0x00, 0x00, 0x00,
1622
1623         0x00, 0x00, 0x00, 0x00, /* ICE_L2TPV3 34 */
1624         0x00, 0x00, 0x00, 0x00,
1625         0x00, 0x00, 0x00, 0x00,
1626         0x00, 0x00,             /* 2 bytes for 4 bytes alignment */
1627 };
1628
1629 static const struct ice_dummy_pkt_offsets dummy_ipv6_l2tpv3_packet_offsets[] = {
1630         { ICE_MAC_OFOS,         0 },
1631         { ICE_IPV6_OFOS,        14 },
1632         { ICE_L2TPV3,           54 },
1633         { ICE_PROTOCOL_LAST,    0 },
1634 };
1635
1636 static const u8 dummy_ipv6_l2tpv3_pkt[] = {
1637         0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
1638         0x00, 0x00, 0x00, 0x00,
1639         0x00, 0x00, 0x00, 0x00,
1640         0x86, 0xDD,
1641
1642         0x60, 0x00, 0x00, 0x00, /* ICE_IPV6_IL 14 */
1643         0x00, 0x0c, 0x73, 0x40,
1644         0x00, 0x00, 0x00, 0x00,
1645         0x00, 0x00, 0x00, 0x00,
1646         0x00, 0x00, 0x00, 0x00,
1647         0x00, 0x00, 0x00, 0x00,
1648         0x00, 0x00, 0x00, 0x00,
1649         0x00, 0x00, 0x00, 0x00,
1650         0x00, 0x00, 0x00, 0x00,
1651         0x00, 0x00, 0x00, 0x00,
1652
1653         0x00, 0x00, 0x00, 0x00, /* ICE_L2TPV3 54 */
1654         0x00, 0x00, 0x00, 0x00,
1655         0x00, 0x00, 0x00, 0x00,
1656         0x00, 0x00,             /* 2 bytes for 4 bytes alignment */
1657 };
1658
1659 static const struct ice_dummy_pkt_offsets dummy_qinq_ipv4_packet_offsets[] = {
1660         { ICE_MAC_OFOS,         0 },
1661         { ICE_VLAN_EX,          12 },
1662         { ICE_VLAN_IN,          16 },
1663         { ICE_ETYPE_OL,         20 },
1664         { ICE_IPV4_OFOS,        22 },
1665         { ICE_PROTOCOL_LAST,    0 },
1666 };
1667
1668 static const u8 dummy_qinq_ipv4_pkt[] = {
1669         0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
1670         0x00, 0x00, 0x00, 0x00,
1671         0x00, 0x00, 0x00, 0x00,
1672
1673         0x91, 0x00, 0x00, 0x00, /* ICE_VLAN_EX 12 */
1674         0x81, 0x00, 0x00, 0x00, /* ICE_VLAN_IN 16 */
1675         0x08, 0x00,             /* ICE_ETYPE_OL 20 */
1676
1677         0x45, 0x00, 0x00, 0x14, /* ICE_IPV4_OFOS 22 */
1678         0x00, 0x01, 0x00, 0x00,
1679         0x00, 0x00, 0x00, 0x00,
1680         0x00, 0x00, 0x00, 0x00,
1681         0x00, 0x00, 0x00, 0x00,
1682
1683         0x00, 0x00,     /* 2 bytes for 4 byte alignment */
1684 };
1685
1686 static const
1687 struct ice_dummy_pkt_offsets dummy_qinq_ipv4_udp_packet_offsets[] = {
1688         { ICE_MAC_OFOS,         0 },
1689         { ICE_VLAN_EX,          12 },
1690         { ICE_VLAN_IN,          16 },
1691         { ICE_ETYPE_OL,         20 },
1692         { ICE_IPV4_OFOS,        22 },
1693         { ICE_UDP_ILOS,         42 },
1694         { ICE_PROTOCOL_LAST,    0 },
1695 };
1696
1697 static const u8 dummy_qinq_ipv4_udp_pkt[] = {
1698         0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
1699         0x00, 0x00, 0x00, 0x00,
1700         0x00, 0x00, 0x00, 0x00,
1701
1702         0x91, 0x00, 0x00, 0x00, /* ICE_VLAN_EX 12 */
1703         0x81, 0x00, 0x00, 0x00, /* ICE_VLAN_IN 16 */
1704         0x08, 0x00,             /* ICE_ETYPE_OL 20 */
1705
1706         0x45, 0x00, 0x00, 0x1c, /* ICE_IPV4_OFOS 22 */
1707         0x00, 0x01, 0x00, 0x00,
1708         0x00, 0x11, 0x00, 0x00,
1709         0x00, 0x00, 0x00, 0x00,
1710         0x00, 0x00, 0x00, 0x00,
1711
1712         0x00, 0x00, 0x00, 0x00, /* ICE_UDP_ILOS 42 */
1713         0x00, 0x08, 0x00, 0x00,
1714
1715         0x00, 0x00,     /* 2 bytes for 4 byte alignment */
1716 };
1717
1718 static const
1719 struct ice_dummy_pkt_offsets dummy_qinq_ipv4_tcp_packet_offsets[] = {
1720         { ICE_MAC_OFOS,         0 },
1721         { ICE_VLAN_EX,          12 },
1722         { ICE_VLAN_IN,          16 },
1723         { ICE_ETYPE_OL,         20 },
1724         { ICE_IPV4_OFOS,        22 },
1725         { ICE_TCP_IL,           42 },
1726         { ICE_PROTOCOL_LAST,    0 },
1727 };
1728
1729 static const u8 dummy_qinq_ipv4_tcp_pkt[] = {
1730         0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
1731         0x00, 0x00, 0x00, 0x00,
1732         0x00, 0x00, 0x00, 0x00,
1733
1734         0x91, 0x00, 0x00, 0x00, /* ICE_VLAN_EX 12 */
1735         0x81, 0x00, 0x00, 0x00, /* ICE_VLAN_IN 16 */
1736         0x08, 0x00,             /* ICE_ETYPE_OL 20 */
1737
1738         0x45, 0x00, 0x00, 0x28, /* ICE_IPV4_OFOS 22 */
1739         0x00, 0x01, 0x00, 0x00,
1740         0x00, 0x06, 0x00, 0x00,
1741         0x00, 0x00, 0x00, 0x00,
1742         0x00, 0x00, 0x00, 0x00,
1743
1744         0x00, 0x00, 0x00, 0x00, /* ICE_TCP_IL 42 */
1745         0x00, 0x00, 0x00, 0x00,
1746         0x00, 0x00, 0x00, 0x00,
1747         0x50, 0x00, 0x00, 0x00,
1748         0x00, 0x00, 0x00, 0x00,
1749
1750         0x00, 0x00,     /* 2 bytes for 4 byte alignment */
1751 };
1752
1753 static const struct ice_dummy_pkt_offsets dummy_qinq_ipv6_packet_offsets[] = {
1754         { ICE_MAC_OFOS,         0 },
1755         { ICE_VLAN_EX,          12 },
1756         { ICE_VLAN_IN,          16 },
1757         { ICE_ETYPE_OL,         20 },
1758         { ICE_IPV6_OFOS,        22 },
1759         { ICE_PROTOCOL_LAST,    0 },
1760 };
1761
1762 static const u8 dummy_qinq_ipv6_pkt[] = {
1763         0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
1764         0x00, 0x00, 0x00, 0x00,
1765         0x00, 0x00, 0x00, 0x00,
1766
1767         0x91, 0x00, 0x00, 0x00, /* ICE_VLAN_EX 12 */
1768         0x81, 0x00, 0x00, 0x00, /* ICE_VLAN_IN 16 */
1769         0x86, 0xDD,             /* ICE_ETYPE_OL 20 */
1770
1771         0x60, 0x00, 0x00, 0x00, /* ICE_IPV6_OFOS 22 */
1772         0x00, 0x00, 0x3b, 0x00,
1773         0x00, 0x00, 0x00, 0x00,
1774         0x00, 0x00, 0x00, 0x00,
1775         0x00, 0x00, 0x00, 0x00,
1776         0x00, 0x00, 0x00, 0x00,
1777         0x00, 0x00, 0x00, 0x00,
1778         0x00, 0x00, 0x00, 0x00,
1779         0x00, 0x00, 0x00, 0x00,
1780         0x00, 0x00, 0x00, 0x00,
1781
1782         0x00, 0x00,     /* 2 bytes for 4 byte alignment */
1783 };
1784
1785 static const
1786 struct ice_dummy_pkt_offsets dummy_qinq_ipv6_udp_packet_offsets[] = {
1787         { ICE_MAC_OFOS,         0 },
1788         { ICE_VLAN_EX,          12 },
1789         { ICE_VLAN_IN,          16 },
1790         { ICE_ETYPE_OL,         20 },
1791         { ICE_IPV6_OFOS,        22 },
1792         { ICE_UDP_ILOS,         62 },
1793         { ICE_PROTOCOL_LAST,    0 },
1794 };
1795
1796 static const u8 dummy_qinq_ipv6_udp_pkt[] = {
1797         0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
1798         0x00, 0x00, 0x00, 0x00,
1799         0x00, 0x00, 0x00, 0x00,
1800
1801         0x91, 0x00, 0x00, 0x00, /* ICE_VLAN_EX 12 */
1802         0x81, 0x00, 0x00, 0x00, /* ICE_VLAN_IN 16 */
1803         0x86, 0xDD,             /* ICE_ETYPE_OL 20 */
1804
1805         0x60, 0x00, 0x00, 0x00, /* ICE_IPV6_OFOS 22 */
1806         0x00, 0x08, 0x11, 0x00, /* Next header UDP */
1807         0x00, 0x00, 0x00, 0x00,
1808         0x00, 0x00, 0x00, 0x00,
1809         0x00, 0x00, 0x00, 0x00,
1810         0x00, 0x00, 0x00, 0x00,
1811         0x00, 0x00, 0x00, 0x00,
1812         0x00, 0x00, 0x00, 0x00,
1813         0x00, 0x00, 0x00, 0x00,
1814         0x00, 0x00, 0x00, 0x00,
1815
1816         0x00, 0x00, 0x00, 0x00, /* ICE_UDP_ILOS 62 */
1817         0x00, 0x08, 0x00, 0x00,
1818
1819         0x00, 0x00,     /* 2 bytes for 4 byte alignment */
1820 };
1821
1822 static const
1823 struct ice_dummy_pkt_offsets dummy_qinq_ipv6_tcp_packet_offsets[] = {
1824         { ICE_MAC_OFOS,         0 },
1825         { ICE_VLAN_EX,          12 },
1826         { ICE_VLAN_IN,          16 },
1827         { ICE_ETYPE_OL,         20 },
1828         { ICE_IPV6_OFOS,        22 },
1829         { ICE_TCP_IL,           62 },
1830         { ICE_PROTOCOL_LAST,    0 },
1831 };
1832
1833 static const u8 dummy_qinq_ipv6_tcp_pkt[] = {
1834         0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
1835         0x00, 0x00, 0x00, 0x00,
1836         0x00, 0x00, 0x00, 0x00,
1837
1838         0x91, 0x00, 0x00, 0x00, /* ICE_VLAN_EX 12 */
1839         0x81, 0x00, 0x00, 0x00, /* ICE_VLAN_IN 16 */
1840         0x86, 0xDD,             /* ICE_ETYPE_OL 20 */
1841
1842         0x60, 0x00, 0x00, 0x00, /* ICE_IPV6_OFOS 22 */
1843         0x00, 0x14, 0x06, 0x00, /* Next header TCP */
1844         0x00, 0x00, 0x00, 0x00,
1845         0x00, 0x00, 0x00, 0x00,
1846         0x00, 0x00, 0x00, 0x00,
1847         0x00, 0x00, 0x00, 0x00,
1848         0x00, 0x00, 0x00, 0x00,
1849         0x00, 0x00, 0x00, 0x00,
1850         0x00, 0x00, 0x00, 0x00,
1851         0x00, 0x00, 0x00, 0x00,
1852
1853         0x00, 0x00, 0x00, 0x00, /* ICE_TCP_IL 62 */
1854         0x00, 0x00, 0x00, 0x00,
1855         0x00, 0x00, 0x00, 0x00,
1856         0x50, 0x00, 0x00, 0x00,
1857         0x00, 0x00, 0x00, 0x00,
1858
1859         0x00, 0x00,     /* 2 bytes for 4 byte alignment */
1860 };
1861
1862 static const struct ice_dummy_pkt_offsets dummy_qinq_pppoe_packet_offsets[] = {
1863         { ICE_MAC_OFOS,         0 },
1864         { ICE_VLAN_EX,          12 },
1865         { ICE_VLAN_IN,          16 },
1866         { ICE_ETYPE_OL,         20 },
1867         { ICE_PPPOE,            22 },
1868         { ICE_PROTOCOL_LAST,    0 },
1869 };
1870
1871 static const
1872 struct ice_dummy_pkt_offsets dummy_qinq_pppoe_ipv4_packet_offsets[] = {
1873         { ICE_MAC_OFOS,         0 },
1874         { ICE_VLAN_EX,          12 },
1875         { ICE_VLAN_IN,          16 },
1876         { ICE_ETYPE_OL,         20 },
1877         { ICE_PPPOE,            22 },
1878         { ICE_IPV4_OFOS,        30 },
1879         { ICE_PROTOCOL_LAST,    0 },
1880 };
1881
1882 static const u8 dummy_qinq_pppoe_ipv4_pkt[] = {
1883         0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
1884         0x00, 0x00, 0x00, 0x00,
1885         0x00, 0x00, 0x00, 0x00,
1886
1887         0x91, 0x00, 0x00, 0x00, /* ICE_VLAN_EX 12 */
1888         0x81, 0x00, 0x00, 0x00, /* ICE_VLAN_IN 16 */
1889         0x88, 0x64,             /* ICE_ETYPE_OL 20 */
1890
1891         0x11, 0x00, 0x00, 0x00, /* ICE_PPPOE 22 */
1892         0x00, 0x16,
1893
1894         0x00, 0x21,             /* PPP Link Layer 28 */
1895
1896         0x45, 0x00, 0x00, 0x14, /* ICE_IPV4_OFOS 30 */
1897         0x00, 0x00, 0x00, 0x00,
1898         0x00, 0x00, 0x00, 0x00,
1899         0x00, 0x00, 0x00, 0x00,
1900         0x00, 0x00, 0x00, 0x00,
1901
1902         0x00, 0x00,     /* 2 bytes for 4 byte alignment */
1903 };
1904
1905 static const
1906 struct ice_dummy_pkt_offsets dummy_qinq_pppoe_packet_ipv6_offsets[] = {
1907         { ICE_MAC_OFOS,         0 },
1908         { ICE_VLAN_EX,          12 },
1909         { ICE_VLAN_IN,          16 },
1910         { ICE_ETYPE_OL,         20 },
1911         { ICE_PPPOE,            22 },
1912         { ICE_IPV6_OFOS,        30 },
1913         { ICE_PROTOCOL_LAST,    0 },
1914 };
1915
1916 static const u8 dummy_qinq_pppoe_ipv6_packet[] = {
1917         0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
1918         0x00, 0x00, 0x00, 0x00,
1919         0x00, 0x00, 0x00, 0x00,
1920
1921         0x91, 0x00, 0x00, 0x00, /* ICE_VLAN_EX 12 */
1922         0x81, 0x00, 0x00, 0x00, /* ICE_VLAN_IN 16 */
1923         0x88, 0x64,             /* ICE_ETYPE_OL 20 */
1924
1925         0x11, 0x00, 0x00, 0x00, /* ICE_PPPOE 22 */
1926         0x00, 0x2a,
1927
1928         0x00, 0x57,             /* PPP Link Layer 28*/
1929
1930         0x60, 0x00, 0x00, 0x00, /* ICE_IPV6_OFOS 30 */
1931         0x00, 0x00, 0x3b, 0x00,
1932         0x00, 0x00, 0x00, 0x00,
1933         0x00, 0x00, 0x00, 0x00,
1934         0x00, 0x00, 0x00, 0x00,
1935         0x00, 0x00, 0x00, 0x00,
1936         0x00, 0x00, 0x00, 0x00,
1937         0x00, 0x00, 0x00, 0x00,
1938         0x00, 0x00, 0x00, 0x00,
1939         0x00, 0x00, 0x00, 0x00,
1940
1941         0x00, 0x00,             /* 2 bytes for 4 bytes alignment */
1942 };
1943
1944 /* this is a recipe to profile association bitmap */
1945 static ice_declare_bitmap(recipe_to_profile[ICE_MAX_NUM_RECIPES],
1946                           ICE_MAX_NUM_PROFILES);
1947
1948 /* this is a profile to recipe association bitmap */
1949 static ice_declare_bitmap(profile_to_recipe[ICE_MAX_NUM_PROFILES],
1950                           ICE_MAX_NUM_RECIPES);
1951
1952 static void ice_get_recp_to_prof_map(struct ice_hw *hw);
1953
1954 /**
1955  * ice_collect_result_idx - copy result index values
1956  * @buf: buffer that contains the result index
1957  * @recp: the recipe struct to copy data into
1958  */
1959 static void ice_collect_result_idx(struct ice_aqc_recipe_data_elem *buf,
1960                                    struct ice_sw_recipe *recp)
1961 {
1962         if (buf->content.result_indx & ICE_AQ_RECIPE_RESULT_EN)
1963                 ice_set_bit(buf->content.result_indx &
1964                             ~ICE_AQ_RECIPE_RESULT_EN, recp->res_idxs);
1965 }
1966
1967 static struct ice_prof_type_entry ice_prof_type_tbl[ICE_GTPU_PROFILE] = {
1968         { ICE_PROFID_IPV4_GTPU_IPV4_OTHER,    ICE_SW_TUN_IPV4_GTPU_IPV4},
1969         { ICE_PROFID_IPV4_GTPU_IPV4_UDP,      ICE_SW_TUN_IPV4_GTPU_IPV4_UDP},
1970         { ICE_PROFID_IPV4_GTPU_IPV4_TCP,      ICE_SW_TUN_IPV4_GTPU_IPV4_TCP},
1971         { ICE_PROFID_IPV4_GTPU_EH_IPV4_OTHER, ICE_SW_TUN_IPV4_GTPU_EH_IPV4},
1972         { ICE_PROFID_IPV4_GTPU_EH_IPV4_UDP,   ICE_SW_TUN_IPV4_GTPU_EH_IPV4_UDP},
1973         { ICE_PROFID_IPV4_GTPU_EH_IPV4_TCP,   ICE_SW_TUN_IPV4_GTPU_EH_IPV4_TCP},
1974         { ICE_PROFID_IPV4_GTPU_IPV6_OTHER,    ICE_SW_TUN_IPV4_GTPU_IPV6},
1975         { ICE_PROFID_IPV4_GTPU_IPV6_UDP,      ICE_SW_TUN_IPV4_GTPU_IPV6_UDP},
1976         { ICE_PROFID_IPV4_GTPU_IPV6_TCP,      ICE_SW_TUN_IPV4_GTPU_IPV6_TCP},
1977         { ICE_PROFID_IPV4_GTPU_EH_IPV6_OTHER, ICE_SW_TUN_IPV4_GTPU_EH_IPV6},
1978         { ICE_PROFID_IPV4_GTPU_EH_IPV6_UDP,   ICE_SW_TUN_IPV4_GTPU_EH_IPV6_UDP},
1979         { ICE_PROFID_IPV4_GTPU_EH_IPV6_TCP,   ICE_SW_TUN_IPV4_GTPU_EH_IPV6_TCP},
1980         { ICE_PROFID_IPV6_GTPU_IPV4_OTHER,    ICE_SW_TUN_IPV6_GTPU_IPV4},
1981         { ICE_PROFID_IPV6_GTPU_IPV4_UDP,      ICE_SW_TUN_IPV6_GTPU_IPV4_UDP},
1982         { ICE_PROFID_IPV6_GTPU_IPV4_TCP,      ICE_SW_TUN_IPV6_GTPU_IPV4_TCP},
1983         { ICE_PROFID_IPV6_GTPU_EH_IPV4_OTHER, ICE_SW_TUN_IPV6_GTPU_EH_IPV4},
1984         { ICE_PROFID_IPV6_GTPU_EH_IPV4_UDP,   ICE_SW_TUN_IPV6_GTPU_EH_IPV4_UDP},
1985         { ICE_PROFID_IPV6_GTPU_EH_IPV4_TCP,   ICE_SW_TUN_IPV6_GTPU_EH_IPV4_TCP},
1986         { ICE_PROFID_IPV6_GTPU_IPV6_OTHER,    ICE_SW_TUN_IPV6_GTPU_IPV6},
1987         { ICE_PROFID_IPV6_GTPU_IPV6_UDP,      ICE_SW_TUN_IPV6_GTPU_IPV6_UDP},
1988         { ICE_PROFID_IPV6_GTPU_IPV6_TCP,      ICE_SW_TUN_IPV6_GTPU_IPV6_TCP},
1989         { ICE_PROFID_IPV6_GTPU_EH_IPV6_OTHER, ICE_SW_TUN_IPV6_GTPU_EH_IPV6},
1990         { ICE_PROFID_IPV6_GTPU_EH_IPV6_UDP,   ICE_SW_TUN_IPV6_GTPU_EH_IPV6_UDP},
1991         { ICE_PROFID_IPV6_GTPU_EH_IPV6_TCP,   ICE_SW_TUN_IPV6_GTPU_EH_IPV6_TCP},
1992 };
1993
1994 /**
1995  * ice_get_tun_type_for_recipe - get tunnel type for the recipe
1996  * @rid: recipe ID that we are populating
1997  */
1998 static enum ice_sw_tunnel_type ice_get_tun_type_for_recipe(u8 rid, bool vlan)
1999 {
2000         u8 vxlan_profile[12] = {10, 11, 12, 16, 17, 18, 22, 23, 24, 25, 26, 27};
2001         u8 gre_profile[12] = {13, 14, 15, 19, 20, 21, 28, 29, 30, 31, 32, 33};
2002         u8 pppoe_profile[7] = {34, 35, 36, 37, 38, 39, 40};
2003         u8 non_tun_profile[6] = {4, 5, 6, 7, 8, 9};
2004         enum ice_sw_tunnel_type tun_type;
2005         u16 i, j, k, profile_num = 0;
2006         bool non_tun_valid = false;
2007         bool pppoe_valid = false;
2008         bool vxlan_valid = false;
2009         bool gre_valid = false;
2010         bool gtp_valid = false;
2011         bool flag_valid = false;
2012
2013         for (j = 0; j < ICE_MAX_NUM_PROFILES; j++) {
2014                 if (!ice_is_bit_set(recipe_to_profile[rid], j))
2015                         continue;
2016                 else
2017                         profile_num++;
2018
2019                 for (i = 0; i < 12; i++) {
2020                         if (gre_profile[i] == j)
2021                                 gre_valid = true;
2022                 }
2023
2024                 for (i = 0; i < 12; i++) {
2025                         if (vxlan_profile[i] == j)
2026                                 vxlan_valid = true;
2027                 }
2028
2029                 for (i = 0; i < 7; i++) {
2030                         if (pppoe_profile[i] == j)
2031                                 pppoe_valid = true;
2032                 }
2033
2034                 for (i = 0; i < 6; i++) {
2035                         if (non_tun_profile[i] == j)
2036                                 non_tun_valid = true;
2037                 }
2038
2039                 if (j >= ICE_PROFID_IPV4_GTPU_EH_IPV4_OTHER &&
2040                     j <= ICE_PROFID_IPV6_GTPU_IPV6_TCP)
2041                         gtp_valid = true;
2042
2043                 if ((j >= ICE_PROFID_IPV4_ESP &&
2044                      j <= ICE_PROFID_IPV6_PFCP_SESSION) ||
2045                     (j >= ICE_PROFID_IPV4_GTPC_TEID &&
2046                      j <= ICE_PROFID_IPV6_GTPU_TEID))
2047                         flag_valid = true;
2048         }
2049
2050         if (!non_tun_valid && vxlan_valid)
2051                 tun_type = ICE_SW_TUN_VXLAN;
2052         else if (!non_tun_valid && gre_valid)
2053                 tun_type = ICE_SW_TUN_NVGRE;
2054         else if (!non_tun_valid && pppoe_valid)
2055                 tun_type = ICE_SW_TUN_PPPOE;
2056         else if (!non_tun_valid && gtp_valid)
2057                 tun_type = ICE_SW_TUN_GTP;
2058         else if (non_tun_valid &&
2059                  (vxlan_valid || gre_valid || gtp_valid || pppoe_valid))
2060                 tun_type = ICE_SW_TUN_AND_NON_TUN;
2061         else if (non_tun_valid && !vxlan_valid && !gre_valid && !gtp_valid &&
2062                  !pppoe_valid)
2063                 tun_type = ICE_NON_TUN;
2064         else
2065                 tun_type = ICE_NON_TUN;
2066
2067         if (profile_num > 1 && tun_type == ICE_SW_TUN_PPPOE) {
2068                 i = ice_is_bit_set(recipe_to_profile[rid],
2069                                    ICE_PROFID_PPPOE_IPV4_OTHER);
2070                 j = ice_is_bit_set(recipe_to_profile[rid],
2071                                    ICE_PROFID_PPPOE_IPV6_OTHER);
2072                 if (i && !j)
2073                         tun_type = ICE_SW_TUN_PPPOE_IPV4;
2074                 else if (!i && j)
2075                         tun_type = ICE_SW_TUN_PPPOE_IPV6;
2076         }
2077
2078         if (tun_type == ICE_SW_TUN_GTP) {
2079                 for (k = 0; k < ARRAY_SIZE(ice_prof_type_tbl); k++)
2080                         if (ice_is_bit_set(recipe_to_profile[rid],
2081                                            ice_prof_type_tbl[k].prof_id)) {
2082                                 tun_type = ice_prof_type_tbl[k].type;
2083                                 break;
2084                         }
2085         }
2086
2087         if (profile_num == 1 && (flag_valid || non_tun_valid || pppoe_valid)) {
2088                 for (j = 0; j < ICE_MAX_NUM_PROFILES; j++) {
2089                         if (ice_is_bit_set(recipe_to_profile[rid], j)) {
2090                                 switch (j) {
2091                                 case ICE_PROFID_IPV4_TCP:
2092                                         tun_type = ICE_SW_IPV4_TCP;
2093                                         break;
2094                                 case ICE_PROFID_IPV4_UDP:
2095                                         tun_type = ICE_SW_IPV4_UDP;
2096                                         break;
2097                                 case ICE_PROFID_IPV6_TCP:
2098                                         tun_type = ICE_SW_IPV6_TCP;
2099                                         break;
2100                                 case ICE_PROFID_IPV6_UDP:
2101                                         tun_type = ICE_SW_IPV6_UDP;
2102                                         break;
2103                                 case ICE_PROFID_PPPOE_PAY:
2104                                         tun_type = ICE_SW_TUN_PPPOE_PAY;
2105                                         break;
2106                                 case ICE_PROFID_PPPOE_IPV4_TCP:
2107                                         tun_type = ICE_SW_TUN_PPPOE_IPV4_TCP;
2108                                         break;
2109                                 case ICE_PROFID_PPPOE_IPV4_UDP:
2110                                         tun_type = ICE_SW_TUN_PPPOE_IPV4_UDP;
2111                                         break;
2112                                 case ICE_PROFID_PPPOE_IPV4_OTHER:
2113                                         tun_type = ICE_SW_TUN_PPPOE_IPV4;
2114                                         break;
2115                                 case ICE_PROFID_PPPOE_IPV6_TCP:
2116                                         tun_type = ICE_SW_TUN_PPPOE_IPV6_TCP;
2117                                         break;
2118                                 case ICE_PROFID_PPPOE_IPV6_UDP:
2119                                         tun_type = ICE_SW_TUN_PPPOE_IPV6_UDP;
2120                                         break;
2121                                 case ICE_PROFID_PPPOE_IPV6_OTHER:
2122                                         tun_type = ICE_SW_TUN_PPPOE_IPV6;
2123                                         break;
2124                                 case ICE_PROFID_IPV4_ESP:
2125                                         tun_type = ICE_SW_TUN_IPV4_ESP;
2126                                         break;
2127                                 case ICE_PROFID_IPV6_ESP:
2128                                         tun_type = ICE_SW_TUN_IPV6_ESP;
2129                                         break;
2130                                 case ICE_PROFID_IPV4_AH:
2131                                         tun_type = ICE_SW_TUN_IPV4_AH;
2132                                         break;
2133                                 case ICE_PROFID_IPV6_AH:
2134                                         tun_type = ICE_SW_TUN_IPV6_AH;
2135                                         break;
2136                                 case ICE_PROFID_IPV4_NAT_T:
2137                                         tun_type = ICE_SW_TUN_IPV4_NAT_T;
2138                                         break;
2139                                 case ICE_PROFID_IPV6_NAT_T:
2140                                         tun_type = ICE_SW_TUN_IPV6_NAT_T;
2141                                         break;
2142                                 case ICE_PROFID_IPV4_PFCP_NODE:
2143                                         tun_type =
2144                                         ICE_SW_TUN_PROFID_IPV4_PFCP_NODE;
2145                                         break;
2146                                 case ICE_PROFID_IPV6_PFCP_NODE:
2147                                         tun_type =
2148                                         ICE_SW_TUN_PROFID_IPV6_PFCP_NODE;
2149                                         break;
2150                                 case ICE_PROFID_IPV4_PFCP_SESSION:
2151                                         tun_type =
2152                                         ICE_SW_TUN_PROFID_IPV4_PFCP_SESSION;
2153                                         break;
2154                                 case ICE_PROFID_IPV6_PFCP_SESSION:
2155                                         tun_type =
2156                                         ICE_SW_TUN_PROFID_IPV6_PFCP_SESSION;
2157                                         break;
2158                                 case ICE_PROFID_MAC_IPV4_L2TPV3:
2159                                         tun_type = ICE_SW_TUN_IPV4_L2TPV3;
2160                                         break;
2161                                 case ICE_PROFID_MAC_IPV6_L2TPV3:
2162                                         tun_type = ICE_SW_TUN_IPV6_L2TPV3;
2163                                         break;
2164                                 case ICE_PROFID_IPV4_GTPU_TEID:
2165                                         tun_type = ICE_SW_TUN_IPV4_GTPU_NO_PAY;
2166                                         break;
2167                                 case ICE_PROFID_IPV6_GTPU_TEID:
2168                                         tun_type = ICE_SW_TUN_IPV6_GTPU_NO_PAY;
2169                                         break;
2170                                 default:
2171                                         break;
2172                                 }
2173
2174                                 return tun_type;
2175                         }
2176                 }
2177         }
2178
2179         if (vlan && tun_type == ICE_SW_TUN_PPPOE)
2180                 tun_type = ICE_SW_TUN_PPPOE_QINQ;
2181         else if (vlan && tun_type == ICE_SW_TUN_PPPOE_IPV6)
2182                 tun_type = ICE_SW_TUN_PPPOE_IPV6_QINQ;
2183         else if (vlan && tun_type == ICE_SW_TUN_PPPOE_IPV4)
2184                 tun_type = ICE_SW_TUN_PPPOE_IPV4_QINQ;
2185         else if (vlan && tun_type == ICE_SW_TUN_PPPOE_PAY)
2186                 tun_type = ICE_SW_TUN_PPPOE_PAY_QINQ;
2187         else if (vlan && tun_type == ICE_SW_TUN_AND_NON_TUN)
2188                 tun_type = ICE_SW_TUN_AND_NON_TUN_QINQ;
2189         else if (vlan && tun_type == ICE_NON_TUN)
2190                 tun_type = ICE_NON_TUN_QINQ;
2191
2192         return tun_type;
2193 }
2194
2195 /**
2196  * ice_get_recp_frm_fw - update SW bookkeeping from FW recipe entries
2197  * @hw: pointer to hardware structure
2198  * @recps: struct that we need to populate
2199  * @rid: recipe ID that we are populating
2200  * @refresh_required: true if we should get recipe to profile mapping from FW
2201  *
2202  * This function is used to populate all the necessary entries into our
2203  * bookkeeping so that we have a current list of all the recipes that are
2204  * programmed in the firmware.
2205  */
2206 static enum ice_status
2207 ice_get_recp_frm_fw(struct ice_hw *hw, struct ice_sw_recipe *recps, u8 rid,
2208                     bool *refresh_required)
2209 {
2210         ice_declare_bitmap(result_bm, ICE_MAX_FV_WORDS);
2211         struct ice_aqc_recipe_data_elem *tmp;
2212         u16 num_recps = ICE_MAX_NUM_RECIPES;
2213         struct ice_prot_lkup_ext *lkup_exts;
2214         enum ice_status status;
2215         u8 fv_word_idx = 0;
2216         bool vlan = false;
2217         u16 sub_recps;
2218
2219         ice_zero_bitmap(result_bm, ICE_MAX_FV_WORDS);
2220
2221         /* we need a buffer big enough to accommodate all the recipes */
2222         tmp = (struct ice_aqc_recipe_data_elem *)ice_calloc(hw,
2223                 ICE_MAX_NUM_RECIPES, sizeof(*tmp));
2224         if (!tmp)
2225                 return ICE_ERR_NO_MEMORY;
2226
2227         tmp[0].recipe_indx = rid;
2228         status = ice_aq_get_recipe(hw, tmp, &num_recps, rid, NULL);
2229         /* non-zero status meaning recipe doesn't exist */
2230         if (status)
2231                 goto err_unroll;
2232
2233         /* Get recipe to profile map so that we can get the fv from lkups that
2234          * we read for a recipe from FW. Since we want to minimize the number of
2235          * times we make this FW call, just make one call and cache the copy
2236          * until a new recipe is added. This operation is only required the
2237          * first time to get the changes from FW. Then to search existing
2238          * entries we don't need to update the cache again until another recipe
2239          * gets added.
2240          */
2241         if (*refresh_required) {
2242                 ice_get_recp_to_prof_map(hw);
2243                 *refresh_required = false;
2244         }
2245
2246         /* Start populating all the entries for recps[rid] based on lkups from
2247          * firmware. Note that we are only creating the root recipe in our
2248          * database.
2249          */
2250         lkup_exts = &recps[rid].lkup_exts;
2251
2252         for (sub_recps = 0; sub_recps < num_recps; sub_recps++) {
2253                 struct ice_aqc_recipe_data_elem root_bufs = tmp[sub_recps];
2254                 struct ice_recp_grp_entry *rg_entry;
2255                 u8 i, prof, idx, prot = 0;
2256                 bool is_root;
2257                 u16 off = 0;
2258
2259                 rg_entry = (struct ice_recp_grp_entry *)
2260                         ice_malloc(hw, sizeof(*rg_entry));
2261                 if (!rg_entry) {
2262                         status = ICE_ERR_NO_MEMORY;
2263                         goto err_unroll;
2264                 }
2265
2266                 idx = root_bufs.recipe_indx;
2267                 is_root = root_bufs.content.rid & ICE_AQ_RECIPE_ID_IS_ROOT;
2268
2269                 /* Mark all result indices in this chain */
2270                 if (root_bufs.content.result_indx & ICE_AQ_RECIPE_RESULT_EN)
2271                         ice_set_bit(root_bufs.content.result_indx &
2272                                     ~ICE_AQ_RECIPE_RESULT_EN, result_bm);
2273
2274                 /* get the first profile that is associated with rid */
2275                 prof = ice_find_first_bit(recipe_to_profile[idx],
2276                                           ICE_MAX_NUM_PROFILES);
2277                 for (i = 0; i < ICE_NUM_WORDS_RECIPE; i++) {
2278                         u8 lkup_indx = root_bufs.content.lkup_indx[i + 1];
2279
2280                         rg_entry->fv_idx[i] = lkup_indx;
2281                         rg_entry->fv_mask[i] =
2282                                 LE16_TO_CPU(root_bufs.content.mask[i + 1]);
2283
2284                         /* If the recipe is a chained recipe then all its
2285                          * child recipe's result will have a result index.
2286                          * To fill fv_words we should not use those result
2287                          * index, we only need the protocol ids and offsets.
2288                          * We will skip all the fv_idx which stores result
2289                          * index in them. We also need to skip any fv_idx which
2290                          * has ICE_AQ_RECIPE_LKUP_IGNORE or 0 since it isn't a
2291                          * valid offset value.
2292                          */
2293                         if (ice_is_bit_set(hw->switch_info->prof_res_bm[prof],
2294                                            rg_entry->fv_idx[i]) ||
2295                             rg_entry->fv_idx[i] & ICE_AQ_RECIPE_LKUP_IGNORE ||
2296                             rg_entry->fv_idx[i] == 0)
2297                                 continue;
2298
2299                         ice_find_prot_off(hw, ICE_BLK_SW, prof,
2300                                           rg_entry->fv_idx[i], &prot, &off);
2301                         lkup_exts->fv_words[fv_word_idx].prot_id = prot;
2302                         lkup_exts->fv_words[fv_word_idx].off = off;
2303                         lkup_exts->field_mask[fv_word_idx] =
2304                                 rg_entry->fv_mask[i];
2305                         if (prot == ICE_META_DATA_ID_HW &&
2306                             off == ICE_TUN_FLAG_MDID_OFF(1))
2307                                 vlan = true;
2308                         fv_word_idx++;
2309                 }
2310                 /* populate rg_list with the data from the child entry of this
2311                  * recipe
2312                  */
2313                 LIST_ADD(&rg_entry->l_entry, &recps[rid].rg_list);
2314
2315                 /* Propagate some data to the recipe database */
2316                 recps[idx].is_root = !!is_root;
2317                 recps[idx].priority = root_bufs.content.act_ctrl_fwd_priority;
2318                 ice_zero_bitmap(recps[idx].res_idxs, ICE_MAX_FV_WORDS);
2319                 if (root_bufs.content.result_indx & ICE_AQ_RECIPE_RESULT_EN) {
2320                         recps[idx].chain_idx = root_bufs.content.result_indx &
2321                                 ~ICE_AQ_RECIPE_RESULT_EN;
2322                         ice_set_bit(recps[idx].chain_idx, recps[idx].res_idxs);
2323                 } else {
2324                         recps[idx].chain_idx = ICE_INVAL_CHAIN_IND;
2325                 }
2326
2327                 if (!is_root)
2328                         continue;
2329
2330                 /* Only do the following for root recipes entries */
2331                 ice_memcpy(recps[idx].r_bitmap, root_bufs.recipe_bitmap,
2332                            sizeof(recps[idx].r_bitmap), ICE_NONDMA_TO_NONDMA);
2333                 recps[idx].root_rid = root_bufs.content.rid &
2334                         ~ICE_AQ_RECIPE_ID_IS_ROOT;
2335                 recps[idx].priority = root_bufs.content.act_ctrl_fwd_priority;
2336         }
2337
2338         /* Complete initialization of the root recipe entry */
2339         lkup_exts->n_val_words = fv_word_idx;
2340         recps[rid].big_recp = (num_recps > 1);
2341         recps[rid].n_grp_count = (u8)num_recps;
2342         recps[rid].tun_type = ice_get_tun_type_for_recipe(rid, vlan);
2343         recps[rid].root_buf = (struct ice_aqc_recipe_data_elem *)
2344                 ice_memdup(hw, tmp, recps[rid].n_grp_count *
2345                            sizeof(*recps[rid].root_buf), ICE_NONDMA_TO_NONDMA);
2346         if (!recps[rid].root_buf)
2347                 goto err_unroll;
2348
2349         /* Copy result indexes */
2350         ice_cp_bitmap(recps[rid].res_idxs, result_bm, ICE_MAX_FV_WORDS);
2351         recps[rid].recp_created = true;
2352
2353 err_unroll:
2354         ice_free(hw, tmp);
2355         return status;
2356 }
2357
2358 /**
2359  * ice_get_recp_to_prof_map - updates recipe to profile mapping
2360  * @hw: pointer to hardware structure
2361  *
2362  * This function is used to populate recipe_to_profile matrix where index to
2363  * this array is the recipe ID and the element is the mapping of which profiles
2364  * is this recipe mapped to.
2365  */
2366 static void ice_get_recp_to_prof_map(struct ice_hw *hw)
2367 {
2368         ice_declare_bitmap(r_bitmap, ICE_MAX_NUM_RECIPES);
2369         u16 i;
2370
2371         for (i = 0; i < hw->switch_info->max_used_prof_index + 1; i++) {
2372                 u16 j;
2373
2374                 ice_zero_bitmap(profile_to_recipe[i], ICE_MAX_NUM_RECIPES);
2375                 ice_zero_bitmap(r_bitmap, ICE_MAX_NUM_RECIPES);
2376                 if (ice_aq_get_recipe_to_profile(hw, i, (u8 *)r_bitmap, NULL))
2377                         continue;
2378                 ice_cp_bitmap(profile_to_recipe[i], r_bitmap,
2379                               ICE_MAX_NUM_RECIPES);
2380                 ice_for_each_set_bit(j, r_bitmap, ICE_MAX_NUM_RECIPES)
2381                         ice_set_bit(i, recipe_to_profile[j]);
2382         }
2383 }
2384
2385 /**
2386  * ice_init_def_sw_recp - initialize the recipe book keeping tables
2387  * @hw: pointer to the HW struct
2388  * @recp_list: pointer to sw recipe list
2389  *
2390  * Allocate memory for the entire recipe table and initialize the structures/
2391  * entries corresponding to basic recipes.
2392  */
2393 enum ice_status
2394 ice_init_def_sw_recp(struct ice_hw *hw, struct ice_sw_recipe **recp_list)
2395 {
2396         struct ice_sw_recipe *recps;
2397         u8 i;
2398
2399         recps = (struct ice_sw_recipe *)
2400                 ice_calloc(hw, ICE_MAX_NUM_RECIPES, sizeof(*recps));
2401         if (!recps)
2402                 return ICE_ERR_NO_MEMORY;
2403
2404         for (i = 0; i < ICE_MAX_NUM_RECIPES; i++) {
2405                 recps[i].root_rid = i;
2406                 INIT_LIST_HEAD(&recps[i].filt_rules);
2407                 INIT_LIST_HEAD(&recps[i].filt_replay_rules);
2408                 INIT_LIST_HEAD(&recps[i].rg_list);
2409                 ice_init_lock(&recps[i].filt_rule_lock);
2410         }
2411
2412         *recp_list = recps;
2413
2414         return ICE_SUCCESS;
2415 }
2416
2417 /**
2418  * ice_aq_get_sw_cfg - get switch configuration
2419  * @hw: pointer to the hardware structure
2420  * @buf: pointer to the result buffer
2421  * @buf_size: length of the buffer available for response
2422  * @req_desc: pointer to requested descriptor
2423  * @num_elems: pointer to number of elements
2424  * @cd: pointer to command details structure or NULL
2425  *
2426  * Get switch configuration (0x0200) to be placed in buf.
2427  * This admin command returns information such as initial VSI/port number
2428  * and switch ID it belongs to.
2429  *
2430  * NOTE: *req_desc is both an input/output parameter.
2431  * The caller of this function first calls this function with *request_desc set
2432  * to 0. If the response from f/w has *req_desc set to 0, all the switch
2433  * configuration information has been returned; if non-zero (meaning not all
2434  * the information was returned), the caller should call this function again
2435  * with *req_desc set to the previous value returned by f/w to get the
2436  * next block of switch configuration information.
2437  *
2438  * *num_elems is output only parameter. This reflects the number of elements
2439  * in response buffer. The caller of this function to use *num_elems while
2440  * parsing the response buffer.
2441  */
2442 static enum ice_status
2443 ice_aq_get_sw_cfg(struct ice_hw *hw, struct ice_aqc_get_sw_cfg_resp_elem *buf,
2444                   u16 buf_size, u16 *req_desc, u16 *num_elems,
2445                   struct ice_sq_cd *cd)
2446 {
2447         struct ice_aqc_get_sw_cfg *cmd;
2448         struct ice_aq_desc desc;
2449         enum ice_status status;
2450
2451         ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_sw_cfg);
2452         cmd = &desc.params.get_sw_conf;
2453         cmd->element = CPU_TO_LE16(*req_desc);
2454
2455         status = ice_aq_send_cmd(hw, &desc, buf, buf_size, cd);
2456         if (!status) {
2457                 *req_desc = LE16_TO_CPU(cmd->element);
2458                 *num_elems = LE16_TO_CPU(cmd->num_elems);
2459         }
2460
2461         return status;
2462 }
2463
2464 /**
2465  * ice_alloc_rss_global_lut - allocate a RSS global LUT
2466  * @hw: pointer to the HW struct
2467  * @shared_res: true to allocate as a shared resource and false to allocate as a dedicated resource
2468  * @global_lut_id: output parameter for the RSS global LUT's ID
2469  */
2470 enum ice_status ice_alloc_rss_global_lut(struct ice_hw *hw, bool shared_res, u16 *global_lut_id)
2471 {
2472         struct ice_aqc_alloc_free_res_elem *sw_buf;
2473         enum ice_status status;
2474         u16 buf_len;
2475
2476         buf_len = ice_struct_size(sw_buf, elem, 1);
2477         sw_buf = (struct ice_aqc_alloc_free_res_elem *)ice_malloc(hw, buf_len);
2478         if (!sw_buf)
2479                 return ICE_ERR_NO_MEMORY;
2480
2481         sw_buf->num_elems = CPU_TO_LE16(1);
2482         sw_buf->res_type = CPU_TO_LE16(ICE_AQC_RES_TYPE_GLOBAL_RSS_HASH |
2483                                        (shared_res ? ICE_AQC_RES_TYPE_FLAG_SHARED :
2484                                        ICE_AQC_RES_TYPE_FLAG_DEDICATED));
2485
2486         status = ice_aq_alloc_free_res(hw, 1, sw_buf, buf_len, ice_aqc_opc_alloc_res, NULL);
2487         if (status) {
2488                 ice_debug(hw, ICE_DBG_RES, "Failed to allocate %s RSS global LUT, status %d\n",
2489                           shared_res ? "shared" : "dedicated", status);
2490                 goto ice_alloc_global_lut_exit;
2491         }
2492
2493         *global_lut_id = LE16_TO_CPU(sw_buf->elem[0].e.sw_resp);
2494
2495 ice_alloc_global_lut_exit:
2496         ice_free(hw, sw_buf);
2497         return status;
2498 }
2499
2500 /**
2501  * ice_free_rss_global_lut - free a RSS global LUT
2502  * @hw: pointer to the HW struct
2503  * @global_lut_id: ID of the RSS global LUT to free
2504  */
2505 enum ice_status ice_free_rss_global_lut(struct ice_hw *hw, u16 global_lut_id)
2506 {
2507         struct ice_aqc_alloc_free_res_elem *sw_buf;
2508         u16 buf_len, num_elems = 1;
2509         enum ice_status status;
2510
2511         buf_len = ice_struct_size(sw_buf, elem, num_elems);
2512         sw_buf = (struct ice_aqc_alloc_free_res_elem *)ice_malloc(hw, buf_len);
2513         if (!sw_buf)
2514                 return ICE_ERR_NO_MEMORY;
2515
2516         sw_buf->num_elems = CPU_TO_LE16(num_elems);
2517         sw_buf->res_type = CPU_TO_LE16(ICE_AQC_RES_TYPE_GLOBAL_RSS_HASH);
2518         sw_buf->elem[0].e.sw_resp = CPU_TO_LE16(global_lut_id);
2519
2520         status = ice_aq_alloc_free_res(hw, num_elems, sw_buf, buf_len, ice_aqc_opc_free_res, NULL);
2521         if (status)
2522                 ice_debug(hw, ICE_DBG_RES, "Failed to free RSS global LUT %d, status %d\n",
2523                           global_lut_id, status);
2524
2525         ice_free(hw, sw_buf);
2526         return status;
2527 }
2528
2529 /**
2530  * ice_alloc_sw - allocate resources specific to switch
2531  * @hw: pointer to the HW struct
2532  * @ena_stats: true to turn on VEB stats
2533  * @shared_res: true for shared resource, false for dedicated resource
2534  * @sw_id: switch ID returned
2535  * @counter_id: VEB counter ID returned
2536  *
2537  * allocates switch resources (SWID and VEB counter) (0x0208)
2538  */
2539 enum ice_status
2540 ice_alloc_sw(struct ice_hw *hw, bool ena_stats, bool shared_res, u16 *sw_id,
2541              u16 *counter_id)
2542 {
2543         struct ice_aqc_alloc_free_res_elem *sw_buf;
2544         struct ice_aqc_res_elem *sw_ele;
2545         enum ice_status status;
2546         u16 buf_len;
2547
2548         buf_len = ice_struct_size(sw_buf, elem, 1);
2549         sw_buf = (struct ice_aqc_alloc_free_res_elem *)ice_malloc(hw, buf_len);
2550         if (!sw_buf)
2551                 return ICE_ERR_NO_MEMORY;
2552
2553         /* Prepare buffer for switch ID.
2554          * The number of resource entries in buffer is passed as 1 since only a
2555          * single switch/VEB instance is allocated, and hence a single sw_id
2556          * is requested.
2557          */
2558         sw_buf->num_elems = CPU_TO_LE16(1);
2559         sw_buf->res_type =
2560                 CPU_TO_LE16(ICE_AQC_RES_TYPE_SWID |
2561                             (shared_res ? ICE_AQC_RES_TYPE_FLAG_SHARED :
2562                             ICE_AQC_RES_TYPE_FLAG_DEDICATED));
2563
2564         status = ice_aq_alloc_free_res(hw, 1, sw_buf, buf_len,
2565                                        ice_aqc_opc_alloc_res, NULL);
2566
2567         if (status)
2568                 goto ice_alloc_sw_exit;
2569
2570         sw_ele = &sw_buf->elem[0];
2571         *sw_id = LE16_TO_CPU(sw_ele->e.sw_resp);
2572
2573         if (ena_stats) {
2574                 /* Prepare buffer for VEB Counter */
2575                 enum ice_adminq_opc opc = ice_aqc_opc_alloc_res;
2576                 struct ice_aqc_alloc_free_res_elem *counter_buf;
2577                 struct ice_aqc_res_elem *counter_ele;
2578
2579                 counter_buf = (struct ice_aqc_alloc_free_res_elem *)
2580                                 ice_malloc(hw, buf_len);
2581                 if (!counter_buf) {
2582                         status = ICE_ERR_NO_MEMORY;
2583                         goto ice_alloc_sw_exit;
2584                 }
2585
2586                 /* The number of resource entries in buffer is passed as 1 since
2587                  * only a single switch/VEB instance is allocated, and hence a
2588                  * single VEB counter is requested.
2589                  */
2590                 counter_buf->num_elems = CPU_TO_LE16(1);
2591                 counter_buf->res_type =
2592                         CPU_TO_LE16(ICE_AQC_RES_TYPE_VEB_COUNTER |
2593                                     ICE_AQC_RES_TYPE_FLAG_DEDICATED);
2594                 status = ice_aq_alloc_free_res(hw, 1, counter_buf, buf_len,
2595                                                opc, NULL);
2596
2597                 if (status) {
2598                         ice_free(hw, counter_buf);
2599                         goto ice_alloc_sw_exit;
2600                 }
2601                 counter_ele = &counter_buf->elem[0];
2602                 *counter_id = LE16_TO_CPU(counter_ele->e.sw_resp);
2603                 ice_free(hw, counter_buf);
2604         }
2605
2606 ice_alloc_sw_exit:
2607         ice_free(hw, sw_buf);
2608         return status;
2609 }
2610
2611 /**
2612  * ice_free_sw - free resources specific to switch
2613  * @hw: pointer to the HW struct
2614  * @sw_id: switch ID returned
2615  * @counter_id: VEB counter ID returned
2616  *
2617  * free switch resources (SWID and VEB counter) (0x0209)
2618  *
2619  * NOTE: This function frees multiple resources. It continues
2620  * releasing other resources even after it encounters error.
2621  * The error code returned is the last error it encountered.
2622  */
2623 enum ice_status ice_free_sw(struct ice_hw *hw, u16 sw_id, u16 counter_id)
2624 {
2625         struct ice_aqc_alloc_free_res_elem *sw_buf, *counter_buf;
2626         enum ice_status status, ret_status;
2627         u16 buf_len;
2628
2629         buf_len = ice_struct_size(sw_buf, elem, 1);
2630         sw_buf = (struct ice_aqc_alloc_free_res_elem *)ice_malloc(hw, buf_len);
2631         if (!sw_buf)
2632                 return ICE_ERR_NO_MEMORY;
2633
2634         /* Prepare buffer to free for switch ID res.
2635          * The number of resource entries in buffer is passed as 1 since only a
2636          * single switch/VEB instance is freed, and hence a single sw_id
2637          * is released.
2638          */
2639         sw_buf->num_elems = CPU_TO_LE16(1);
2640         sw_buf->res_type = CPU_TO_LE16(ICE_AQC_RES_TYPE_SWID);
2641         sw_buf->elem[0].e.sw_resp = CPU_TO_LE16(sw_id);
2642
2643         ret_status = ice_aq_alloc_free_res(hw, 1, sw_buf, buf_len,
2644                                            ice_aqc_opc_free_res, NULL);
2645
2646         if (ret_status)
2647                 ice_debug(hw, ICE_DBG_SW, "CQ CMD Buffer:\n");
2648
2649         /* Prepare buffer to free for VEB Counter resource */
2650         counter_buf = (struct ice_aqc_alloc_free_res_elem *)
2651                         ice_malloc(hw, buf_len);
2652         if (!counter_buf) {
2653                 ice_free(hw, sw_buf);
2654                 return ICE_ERR_NO_MEMORY;
2655         }
2656
2657         /* The number of resource entries in buffer is passed as 1 since only a
2658          * single switch/VEB instance is freed, and hence a single VEB counter
2659          * is released
2660          */
2661         counter_buf->num_elems = CPU_TO_LE16(1);
2662         counter_buf->res_type = CPU_TO_LE16(ICE_AQC_RES_TYPE_VEB_COUNTER);
2663         counter_buf->elem[0].e.sw_resp = CPU_TO_LE16(counter_id);
2664
2665         status = ice_aq_alloc_free_res(hw, 1, counter_buf, buf_len,
2666                                        ice_aqc_opc_free_res, NULL);
2667         if (status) {
2668                 ice_debug(hw, ICE_DBG_SW, "VEB counter resource could not be freed\n");
2669                 ret_status = status;
2670         }
2671
2672         ice_free(hw, counter_buf);
2673         ice_free(hw, sw_buf);
2674         return ret_status;
2675 }
2676
2677 /**
2678  * ice_aq_add_vsi
2679  * @hw: pointer to the HW struct
2680  * @vsi_ctx: pointer to a VSI context struct
2681  * @cd: pointer to command details structure or NULL
2682  *
2683  * Add a VSI context to the hardware (0x0210)
2684  */
2685 enum ice_status
2686 ice_aq_add_vsi(struct ice_hw *hw, struct ice_vsi_ctx *vsi_ctx,
2687                struct ice_sq_cd *cd)
2688 {
2689         struct ice_aqc_add_update_free_vsi_resp *res;
2690         struct ice_aqc_add_get_update_free_vsi *cmd;
2691         struct ice_aq_desc desc;
2692         enum ice_status status;
2693
2694         cmd = &desc.params.vsi_cmd;
2695         res = &desc.params.add_update_free_vsi_res;
2696
2697         ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_add_vsi);
2698
2699         if (!vsi_ctx->alloc_from_pool)
2700                 cmd->vsi_num = CPU_TO_LE16(vsi_ctx->vsi_num |
2701                                            ICE_AQ_VSI_IS_VALID);
2702
2703         cmd->vsi_flags = CPU_TO_LE16(vsi_ctx->flags);
2704
2705         desc.flags |= CPU_TO_LE16(ICE_AQ_FLAG_RD);
2706
2707         status = ice_aq_send_cmd(hw, &desc, &vsi_ctx->info,
2708                                  sizeof(vsi_ctx->info), cd);
2709
2710         if (!status) {
2711                 vsi_ctx->vsi_num = LE16_TO_CPU(res->vsi_num) & ICE_AQ_VSI_NUM_M;
2712                 vsi_ctx->vsis_allocd = LE16_TO_CPU(res->vsi_used);
2713                 vsi_ctx->vsis_unallocated = LE16_TO_CPU(res->vsi_free);
2714         }
2715
2716         return status;
2717 }
2718
2719 /**
2720  * ice_aq_free_vsi
2721  * @hw: pointer to the HW struct
2722  * @vsi_ctx: pointer to a VSI context struct
2723  * @keep_vsi_alloc: keep VSI allocation as part of this PF's resources
2724  * @cd: pointer to command details structure or NULL
2725  *
2726  * Free VSI context info from hardware (0x0213)
2727  */
2728 enum ice_status
2729 ice_aq_free_vsi(struct ice_hw *hw, struct ice_vsi_ctx *vsi_ctx,
2730                 bool keep_vsi_alloc, struct ice_sq_cd *cd)
2731 {
2732         struct ice_aqc_add_update_free_vsi_resp *resp;
2733         struct ice_aqc_add_get_update_free_vsi *cmd;
2734         struct ice_aq_desc desc;
2735         enum ice_status status;
2736
2737         cmd = &desc.params.vsi_cmd;
2738         resp = &desc.params.add_update_free_vsi_res;
2739
2740         ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_free_vsi);
2741
2742         cmd->vsi_num = CPU_TO_LE16(vsi_ctx->vsi_num | ICE_AQ_VSI_IS_VALID);
2743         if (keep_vsi_alloc)
2744                 cmd->cmd_flags = CPU_TO_LE16(ICE_AQ_VSI_KEEP_ALLOC);
2745
2746         status = ice_aq_send_cmd(hw, &desc, NULL, 0, cd);
2747         if (!status) {
2748                 vsi_ctx->vsis_allocd = LE16_TO_CPU(resp->vsi_used);
2749                 vsi_ctx->vsis_unallocated = LE16_TO_CPU(resp->vsi_free);
2750         }
2751
2752         return status;
2753 }
2754
2755 /**
2756  * ice_aq_update_vsi
2757  * @hw: pointer to the HW struct
2758  * @vsi_ctx: pointer to a VSI context struct
2759  * @cd: pointer to command details structure or NULL
2760  *
2761  * Update VSI context in the hardware (0x0211)
2762  */
2763 enum ice_status
2764 ice_aq_update_vsi(struct ice_hw *hw, struct ice_vsi_ctx *vsi_ctx,
2765                   struct ice_sq_cd *cd)
2766 {
2767         struct ice_aqc_add_update_free_vsi_resp *resp;
2768         struct ice_aqc_add_get_update_free_vsi *cmd;
2769         struct ice_aq_desc desc;
2770         enum ice_status status;
2771
2772         cmd = &desc.params.vsi_cmd;
2773         resp = &desc.params.add_update_free_vsi_res;
2774
2775         ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_update_vsi);
2776
2777         cmd->vsi_num = CPU_TO_LE16(vsi_ctx->vsi_num | ICE_AQ_VSI_IS_VALID);
2778
2779         desc.flags |= CPU_TO_LE16(ICE_AQ_FLAG_RD);
2780
2781         status = ice_aq_send_cmd(hw, &desc, &vsi_ctx->info,
2782                                  sizeof(vsi_ctx->info), cd);
2783
2784         if (!status) {
2785                 vsi_ctx->vsis_allocd = LE16_TO_CPU(resp->vsi_used);
2786                 vsi_ctx->vsis_unallocated = LE16_TO_CPU(resp->vsi_free);
2787         }
2788
2789         return status;
2790 }
2791
2792 /**
2793  * ice_is_vsi_valid - check whether the VSI is valid or not
2794  * @hw: pointer to the HW struct
2795  * @vsi_handle: VSI handle
2796  *
2797  * check whether the VSI is valid or not
2798  */
2799 bool ice_is_vsi_valid(struct ice_hw *hw, u16 vsi_handle)
2800 {
2801         return vsi_handle < ICE_MAX_VSI && hw->vsi_ctx[vsi_handle];
2802 }
2803
2804 /**
2805  * ice_get_hw_vsi_num - return the HW VSI number
2806  * @hw: pointer to the HW struct
2807  * @vsi_handle: VSI handle
2808  *
2809  * return the HW VSI number
2810  * Caution: call this function only if VSI is valid (ice_is_vsi_valid)
2811  */
2812 u16 ice_get_hw_vsi_num(struct ice_hw *hw, u16 vsi_handle)
2813 {
2814         return hw->vsi_ctx[vsi_handle]->vsi_num;
2815 }
2816
2817 /**
2818  * ice_get_vsi_ctx - return the VSI context entry for a given VSI handle
2819  * @hw: pointer to the HW struct
2820  * @vsi_handle: VSI handle
2821  *
2822  * return the VSI context entry for a given VSI handle
2823  */
2824 struct ice_vsi_ctx *ice_get_vsi_ctx(struct ice_hw *hw, u16 vsi_handle)
2825 {
2826         return (vsi_handle >= ICE_MAX_VSI) ? NULL : hw->vsi_ctx[vsi_handle];
2827 }
2828
2829 /**
2830  * ice_save_vsi_ctx - save the VSI context for a given VSI handle
2831  * @hw: pointer to the HW struct
2832  * @vsi_handle: VSI handle
2833  * @vsi: VSI context pointer
2834  *
2835  * save the VSI context entry for a given VSI handle
2836  */
2837 static void
2838 ice_save_vsi_ctx(struct ice_hw *hw, u16 vsi_handle, struct ice_vsi_ctx *vsi)
2839 {
2840         hw->vsi_ctx[vsi_handle] = vsi;
2841 }
2842
2843 /**
2844  * ice_clear_vsi_q_ctx - clear VSI queue contexts for all TCs
2845  * @hw: pointer to the HW struct
2846  * @vsi_handle: VSI handle
2847  */
2848 static void ice_clear_vsi_q_ctx(struct ice_hw *hw, u16 vsi_handle)
2849 {
2850         struct ice_vsi_ctx *vsi;
2851         u8 i;
2852
2853         vsi = ice_get_vsi_ctx(hw, vsi_handle);
2854         if (!vsi)
2855                 return;
2856         ice_for_each_traffic_class(i) {
2857                 if (vsi->lan_q_ctx[i]) {
2858                         ice_free(hw, vsi->lan_q_ctx[i]);
2859                         vsi->lan_q_ctx[i] = NULL;
2860                 }
2861         }
2862 }
2863
2864 /**
2865  * ice_clear_vsi_ctx - clear the VSI context entry
2866  * @hw: pointer to the HW struct
2867  * @vsi_handle: VSI handle
2868  *
2869  * clear the VSI context entry
2870  */
2871 static void ice_clear_vsi_ctx(struct ice_hw *hw, u16 vsi_handle)
2872 {
2873         struct ice_vsi_ctx *vsi;
2874
2875         vsi = ice_get_vsi_ctx(hw, vsi_handle);
2876         if (vsi) {
2877                 ice_clear_vsi_q_ctx(hw, vsi_handle);
2878                 ice_free(hw, vsi);
2879                 hw->vsi_ctx[vsi_handle] = NULL;
2880         }
2881 }
2882
2883 /**
2884  * ice_clear_all_vsi_ctx - clear all the VSI context entries
2885  * @hw: pointer to the HW struct
2886  */
2887 void ice_clear_all_vsi_ctx(struct ice_hw *hw)
2888 {
2889         u16 i;
2890
2891         for (i = 0; i < ICE_MAX_VSI; i++)
2892                 ice_clear_vsi_ctx(hw, i);
2893 }
2894
2895 /**
2896  * ice_add_vsi - add VSI context to the hardware and VSI handle list
2897  * @hw: pointer to the HW struct
2898  * @vsi_handle: unique VSI handle provided by drivers
2899  * @vsi_ctx: pointer to a VSI context struct
2900  * @cd: pointer to command details structure or NULL
2901  *
2902  * Add a VSI context to the hardware also add it into the VSI handle list.
2903  * If this function gets called after reset for existing VSIs then update
2904  * with the new HW VSI number in the corresponding VSI handle list entry.
2905  */
2906 enum ice_status
2907 ice_add_vsi(struct ice_hw *hw, u16 vsi_handle, struct ice_vsi_ctx *vsi_ctx,
2908             struct ice_sq_cd *cd)
2909 {
2910         struct ice_vsi_ctx *tmp_vsi_ctx;
2911         enum ice_status status;
2912
2913         if (vsi_handle >= ICE_MAX_VSI)
2914                 return ICE_ERR_PARAM;
2915         status = ice_aq_add_vsi(hw, vsi_ctx, cd);
2916         if (status)
2917                 return status;
2918         tmp_vsi_ctx = ice_get_vsi_ctx(hw, vsi_handle);
2919         if (!tmp_vsi_ctx) {
2920                 /* Create a new VSI context */
2921                 tmp_vsi_ctx = (struct ice_vsi_ctx *)
2922                         ice_malloc(hw, sizeof(*tmp_vsi_ctx));
2923                 if (!tmp_vsi_ctx) {
2924                         ice_aq_free_vsi(hw, vsi_ctx, false, cd);
2925                         return ICE_ERR_NO_MEMORY;
2926                 }
2927                 *tmp_vsi_ctx = *vsi_ctx;
2928
2929                 ice_save_vsi_ctx(hw, vsi_handle, tmp_vsi_ctx);
2930         } else {
2931                 /* update with new HW VSI num */
2932                 tmp_vsi_ctx->vsi_num = vsi_ctx->vsi_num;
2933         }
2934
2935         return ICE_SUCCESS;
2936 }
2937
2938 /**
2939  * ice_free_vsi- free VSI context from hardware and VSI handle list
2940  * @hw: pointer to the HW struct
2941  * @vsi_handle: unique VSI handle
2942  * @vsi_ctx: pointer to a VSI context struct
2943  * @keep_vsi_alloc: keep VSI allocation as part of this PF's resources
2944  * @cd: pointer to command details structure or NULL
2945  *
2946  * Free VSI context info from hardware as well as from VSI handle list
2947  */
2948 enum ice_status
2949 ice_free_vsi(struct ice_hw *hw, u16 vsi_handle, struct ice_vsi_ctx *vsi_ctx,
2950              bool keep_vsi_alloc, struct ice_sq_cd *cd)
2951 {
2952         enum ice_status status;
2953
2954         if (!ice_is_vsi_valid(hw, vsi_handle))
2955                 return ICE_ERR_PARAM;
2956         vsi_ctx->vsi_num = ice_get_hw_vsi_num(hw, vsi_handle);
2957         status = ice_aq_free_vsi(hw, vsi_ctx, keep_vsi_alloc, cd);
2958         if (!status)
2959                 ice_clear_vsi_ctx(hw, vsi_handle);
2960         return status;
2961 }
2962
2963 /**
2964  * ice_update_vsi
2965  * @hw: pointer to the HW struct
2966  * @vsi_handle: unique VSI handle
2967  * @vsi_ctx: pointer to a VSI context struct
2968  * @cd: pointer to command details structure or NULL
2969  *
2970  * Update VSI context in the hardware
2971  */
2972 enum ice_status
2973 ice_update_vsi(struct ice_hw *hw, u16 vsi_handle, struct ice_vsi_ctx *vsi_ctx,
2974                struct ice_sq_cd *cd)
2975 {
2976         if (!ice_is_vsi_valid(hw, vsi_handle))
2977                 return ICE_ERR_PARAM;
2978         vsi_ctx->vsi_num = ice_get_hw_vsi_num(hw, vsi_handle);
2979         return ice_aq_update_vsi(hw, vsi_ctx, cd);
2980 }
2981
2982 /**
2983  * ice_aq_get_vsi_params
2984  * @hw: pointer to the HW struct
2985  * @vsi_ctx: pointer to a VSI context struct
2986  * @cd: pointer to command details structure or NULL
2987  *
2988  * Get VSI context info from hardware (0x0212)
2989  */
2990 enum ice_status
2991 ice_aq_get_vsi_params(struct ice_hw *hw, struct ice_vsi_ctx *vsi_ctx,
2992                       struct ice_sq_cd *cd)
2993 {
2994         struct ice_aqc_add_get_update_free_vsi *cmd;
2995         struct ice_aqc_get_vsi_resp *resp;
2996         struct ice_aq_desc desc;
2997         enum ice_status status;
2998
2999         cmd = &desc.params.vsi_cmd;
3000         resp = &desc.params.get_vsi_resp;
3001
3002         ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_vsi_params);
3003
3004         cmd->vsi_num = CPU_TO_LE16(vsi_ctx->vsi_num | ICE_AQ_VSI_IS_VALID);
3005
3006         status = ice_aq_send_cmd(hw, &desc, &vsi_ctx->info,
3007                                  sizeof(vsi_ctx->info), cd);
3008         if (!status) {
3009                 vsi_ctx->vsi_num = LE16_TO_CPU(resp->vsi_num) &
3010                                         ICE_AQ_VSI_NUM_M;
3011                 vsi_ctx->vsis_allocd = LE16_TO_CPU(resp->vsi_used);
3012                 vsi_ctx->vsis_unallocated = LE16_TO_CPU(resp->vsi_free);
3013         }
3014
3015         return status;
3016 }
3017
3018 /**
3019  * ice_aq_add_update_mir_rule - add/update a mirror rule
3020  * @hw: pointer to the HW struct
3021  * @rule_type: Rule Type
3022  * @dest_vsi: VSI number to which packets will be mirrored
3023  * @count: length of the list
3024  * @mr_buf: buffer for list of mirrored VSI numbers
3025  * @cd: pointer to command details structure or NULL
3026  * @rule_id: Rule ID
3027  *
3028  * Add/Update Mirror Rule (0x260).
3029  */
3030 enum ice_status
3031 ice_aq_add_update_mir_rule(struct ice_hw *hw, u16 rule_type, u16 dest_vsi,
3032                            u16 count, struct ice_mir_rule_buf *mr_buf,
3033                            struct ice_sq_cd *cd, u16 *rule_id)
3034 {
3035         struct ice_aqc_add_update_mir_rule *cmd;
3036         struct ice_aq_desc desc;
3037         enum ice_status status;
3038         __le16 *mr_list = NULL;
3039         u16 buf_size = 0;
3040
3041         switch (rule_type) {
3042         case ICE_AQC_RULE_TYPE_VPORT_INGRESS:
3043         case ICE_AQC_RULE_TYPE_VPORT_EGRESS:
3044                 /* Make sure count and mr_buf are set for these rule_types */
3045                 if (!(count && mr_buf))
3046                         return ICE_ERR_PARAM;
3047
3048                 buf_size = count * sizeof(__le16);
3049                 mr_list = (_FORCE_ __le16 *)ice_malloc(hw, buf_size);
3050                 if (!mr_list)
3051                         return ICE_ERR_NO_MEMORY;
3052                 break;
3053         case ICE_AQC_RULE_TYPE_PPORT_INGRESS:
3054         case ICE_AQC_RULE_TYPE_PPORT_EGRESS:
3055                 /* Make sure count and mr_buf are not set for these
3056                  * rule_types
3057                  */
3058                 if (count || mr_buf)
3059                         return ICE_ERR_PARAM;
3060                 break;
3061         default:
3062                 ice_debug(hw, ICE_DBG_SW, "Error due to unsupported rule_type %u\n", rule_type);
3063                 return ICE_ERR_OUT_OF_RANGE;
3064         }
3065
3066         ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_add_update_mir_rule);
3067
3068         /* Pre-process 'mr_buf' items for add/update of virtual port
3069          * ingress/egress mirroring (but not physical port ingress/egress
3070          * mirroring)
3071          */
3072         if (mr_buf) {
3073                 int i;
3074
3075                 for (i = 0; i < count; i++) {
3076                         u16 id;
3077
3078                         id = mr_buf[i].vsi_idx & ICE_AQC_RULE_MIRRORED_VSI_M;
3079
3080                         /* Validate specified VSI number, make sure it is less
3081                          * than ICE_MAX_VSI, if not return with error.
3082                          */
3083                         if (id >= ICE_MAX_VSI) {
3084                                 ice_debug(hw, ICE_DBG_SW, "Error VSI index (%u) out-of-range\n",
3085                                           id);
3086                                 ice_free(hw, mr_list);
3087                                 return ICE_ERR_OUT_OF_RANGE;
3088                         }
3089
3090                         /* add VSI to mirror rule */
3091                         if (mr_buf[i].add)
3092                                 mr_list[i] =
3093                                         CPU_TO_LE16(id | ICE_AQC_RULE_ACT_M);
3094                         else /* remove VSI from mirror rule */
3095                                 mr_list[i] = CPU_TO_LE16(id);
3096                 }
3097         }
3098
3099         cmd = &desc.params.add_update_rule;
3100         if ((*rule_id) != ICE_INVAL_MIRROR_RULE_ID)
3101                 cmd->rule_id = CPU_TO_LE16(((*rule_id) & ICE_AQC_RULE_ID_M) |
3102                                            ICE_AQC_RULE_ID_VALID_M);
3103         cmd->rule_type = CPU_TO_LE16(rule_type & ICE_AQC_RULE_TYPE_M);
3104         cmd->num_entries = CPU_TO_LE16(count);
3105         cmd->dest = CPU_TO_LE16(dest_vsi);
3106
3107         status = ice_aq_send_cmd(hw, &desc, mr_list, buf_size, cd);
3108         if (!status)
3109                 *rule_id = LE16_TO_CPU(cmd->rule_id) & ICE_AQC_RULE_ID_M;
3110
3111         ice_free(hw, mr_list);
3112
3113         return status;
3114 }
3115
3116 /**
3117  * ice_aq_delete_mir_rule - delete a mirror rule
3118  * @hw: pointer to the HW struct
3119  * @rule_id: Mirror rule ID (to be deleted)
3120  * @keep_allocd: if set, the VSI stays part of the PF allocated res,
3121  *               otherwise it is returned to the shared pool
3122  * @cd: pointer to command details structure or NULL
3123  *
3124  * Delete Mirror Rule (0x261).
3125  */
3126 enum ice_status
3127 ice_aq_delete_mir_rule(struct ice_hw *hw, u16 rule_id, bool keep_allocd,
3128                        struct ice_sq_cd *cd)
3129 {
3130         struct ice_aqc_delete_mir_rule *cmd;
3131         struct ice_aq_desc desc;
3132
3133         /* rule_id should be in the range 0...63 */
3134         if (rule_id >= ICE_MAX_NUM_MIRROR_RULES)
3135                 return ICE_ERR_OUT_OF_RANGE;
3136
3137         ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_del_mir_rule);
3138
3139         cmd = &desc.params.del_rule;
3140         rule_id |= ICE_AQC_RULE_ID_VALID_M;
3141         cmd->rule_id = CPU_TO_LE16(rule_id);
3142
3143         if (keep_allocd)
3144                 cmd->flags = CPU_TO_LE16(ICE_AQC_FLAG_KEEP_ALLOCD_M);
3145
3146         return ice_aq_send_cmd(hw, &desc, NULL, 0, cd);
3147 }
3148
3149 /**
3150  * ice_aq_alloc_free_vsi_list
3151  * @hw: pointer to the HW struct
3152  * @vsi_list_id: VSI list ID returned or used for lookup
3153  * @lkup_type: switch rule filter lookup type
3154  * @opc: switch rules population command type - pass in the command opcode
3155  *
3156  * allocates or free a VSI list resource
3157  */
3158 static enum ice_status
3159 ice_aq_alloc_free_vsi_list(struct ice_hw *hw, u16 *vsi_list_id,
3160                            enum ice_sw_lkup_type lkup_type,
3161                            enum ice_adminq_opc opc)
3162 {
3163         struct ice_aqc_alloc_free_res_elem *sw_buf;
3164         struct ice_aqc_res_elem *vsi_ele;
3165         enum ice_status status;
3166         u16 buf_len;
3167
3168         buf_len = ice_struct_size(sw_buf, elem, 1);
3169         sw_buf = (struct ice_aqc_alloc_free_res_elem *)ice_malloc(hw, buf_len);
3170         if (!sw_buf)
3171                 return ICE_ERR_NO_MEMORY;
3172         sw_buf->num_elems = CPU_TO_LE16(1);
3173
3174         if (lkup_type == ICE_SW_LKUP_MAC ||
3175             lkup_type == ICE_SW_LKUP_MAC_VLAN ||
3176             lkup_type == ICE_SW_LKUP_ETHERTYPE ||
3177             lkup_type == ICE_SW_LKUP_ETHERTYPE_MAC ||
3178             lkup_type == ICE_SW_LKUP_PROMISC ||
3179             lkup_type == ICE_SW_LKUP_PROMISC_VLAN ||
3180             lkup_type == ICE_SW_LKUP_LAST) {
3181                 sw_buf->res_type = CPU_TO_LE16(ICE_AQC_RES_TYPE_VSI_LIST_REP);
3182         } else if (lkup_type == ICE_SW_LKUP_VLAN) {
3183                 sw_buf->res_type =
3184                         CPU_TO_LE16(ICE_AQC_RES_TYPE_VSI_LIST_PRUNE);
3185         } else {
3186                 status = ICE_ERR_PARAM;
3187                 goto ice_aq_alloc_free_vsi_list_exit;
3188         }
3189
3190         if (opc == ice_aqc_opc_free_res)
3191                 sw_buf->elem[0].e.sw_resp = CPU_TO_LE16(*vsi_list_id);
3192
3193         status = ice_aq_alloc_free_res(hw, 1, sw_buf, buf_len, opc, NULL);
3194         if (status)
3195                 goto ice_aq_alloc_free_vsi_list_exit;
3196
3197         if (opc == ice_aqc_opc_alloc_res) {
3198                 vsi_ele = &sw_buf->elem[0];
3199                 *vsi_list_id = LE16_TO_CPU(vsi_ele->e.sw_resp);
3200         }
3201
3202 ice_aq_alloc_free_vsi_list_exit:
3203         ice_free(hw, sw_buf);
3204         return status;
3205 }
3206
3207 /**
3208  * ice_aq_set_storm_ctrl - Sets storm control configuration
3209  * @hw: pointer to the HW struct
3210  * @bcast_thresh: represents the upper threshold for broadcast storm control
3211  * @mcast_thresh: represents the upper threshold for multicast storm control
3212  * @ctl_bitmask: storm control knobs
3213  *
3214  * Sets the storm control configuration (0x0280)
3215  */
3216 enum ice_status
3217 ice_aq_set_storm_ctrl(struct ice_hw *hw, u32 bcast_thresh, u32 mcast_thresh,
3218                       u32 ctl_bitmask)
3219 {
3220         struct ice_aqc_storm_cfg *cmd;
3221         struct ice_aq_desc desc;
3222
3223         cmd = &desc.params.storm_conf;
3224
3225         ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_set_storm_cfg);
3226
3227         cmd->bcast_thresh_size = CPU_TO_LE32(bcast_thresh & ICE_AQ_THRESHOLD_M);
3228         cmd->mcast_thresh_size = CPU_TO_LE32(mcast_thresh & ICE_AQ_THRESHOLD_M);
3229         cmd->storm_ctrl_ctrl = CPU_TO_LE32(ctl_bitmask);
3230
3231         return ice_aq_send_cmd(hw, &desc, NULL, 0, NULL);
3232 }
3233
3234 /**
3235  * ice_aq_get_storm_ctrl - gets storm control configuration
3236  * @hw: pointer to the HW struct
3237  * @bcast_thresh: represents the upper threshold for broadcast storm control
3238  * @mcast_thresh: represents the upper threshold for multicast storm control
3239  * @ctl_bitmask: storm control knobs
3240  *
3241  * Gets the storm control configuration (0x0281)
3242  */
3243 enum ice_status
3244 ice_aq_get_storm_ctrl(struct ice_hw *hw, u32 *bcast_thresh, u32 *mcast_thresh,
3245                       u32 *ctl_bitmask)
3246 {
3247         enum ice_status status;
3248         struct ice_aq_desc desc;
3249
3250         ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_storm_cfg);
3251
3252         status = ice_aq_send_cmd(hw, &desc, NULL, 0, NULL);
3253         if (!status) {
3254                 struct ice_aqc_storm_cfg *resp = &desc.params.storm_conf;
3255
3256                 if (bcast_thresh)
3257                         *bcast_thresh = LE32_TO_CPU(resp->bcast_thresh_size) &
3258                                 ICE_AQ_THRESHOLD_M;
3259                 if (mcast_thresh)
3260                         *mcast_thresh = LE32_TO_CPU(resp->mcast_thresh_size) &
3261                                 ICE_AQ_THRESHOLD_M;
3262                 if (ctl_bitmask)
3263                         *ctl_bitmask = LE32_TO_CPU(resp->storm_ctrl_ctrl);
3264         }
3265
3266         return status;
3267 }
3268
3269 /**
3270  * ice_aq_sw_rules - add/update/remove switch rules
3271  * @hw: pointer to the HW struct
3272  * @rule_list: pointer to switch rule population list
3273  * @rule_list_sz: total size of the rule list in bytes
3274  * @num_rules: number of switch rules in the rule_list
3275  * @opc: switch rules population command type - pass in the command opcode
3276  * @cd: pointer to command details structure or NULL
3277  *
3278  * Add(0x02a0)/Update(0x02a1)/Remove(0x02a2) switch rules commands to firmware
3279  */
3280 static enum ice_status
3281 ice_aq_sw_rules(struct ice_hw *hw, void *rule_list, u16 rule_list_sz,
3282                 u8 num_rules, enum ice_adminq_opc opc, struct ice_sq_cd *cd)
3283 {
3284         struct ice_aq_desc desc;
3285         enum ice_status status;
3286
3287         ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__);
3288
3289         if (opc != ice_aqc_opc_add_sw_rules &&
3290             opc != ice_aqc_opc_update_sw_rules &&
3291             opc != ice_aqc_opc_remove_sw_rules)
3292                 return ICE_ERR_PARAM;
3293
3294         ice_fill_dflt_direct_cmd_desc(&desc, opc);
3295
3296         desc.flags |= CPU_TO_LE16(ICE_AQ_FLAG_RD);
3297         desc.params.sw_rules.num_rules_fltr_entry_index =
3298                 CPU_TO_LE16(num_rules);
3299         status = ice_aq_send_cmd(hw, &desc, rule_list, rule_list_sz, cd);
3300         if (opc != ice_aqc_opc_add_sw_rules &&
3301             hw->adminq.sq_last_status == ICE_AQ_RC_ENOENT)
3302                 status = ICE_ERR_DOES_NOT_EXIST;
3303
3304         return status;
3305 }
3306
3307 /**
3308  * ice_aq_add_recipe - add switch recipe
3309  * @hw: pointer to the HW struct
3310  * @s_recipe_list: pointer to switch rule population list
3311  * @num_recipes: number of switch recipes in the list
3312  * @cd: pointer to command details structure or NULL
3313  *
3314  * Add(0x0290)
3315  */
3316 enum ice_status
3317 ice_aq_add_recipe(struct ice_hw *hw,
3318                   struct ice_aqc_recipe_data_elem *s_recipe_list,
3319                   u16 num_recipes, struct ice_sq_cd *cd)
3320 {
3321         struct ice_aqc_add_get_recipe *cmd;
3322         struct ice_aq_desc desc;
3323         u16 buf_size;
3324
3325         ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__);
3326         cmd = &desc.params.add_get_recipe;
3327         ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_add_recipe);
3328
3329         cmd->num_sub_recipes = CPU_TO_LE16(num_recipes);
3330         desc.flags |= CPU_TO_LE16(ICE_AQ_FLAG_RD);
3331
3332         buf_size = num_recipes * sizeof(*s_recipe_list);
3333
3334         return ice_aq_send_cmd(hw, &desc, s_recipe_list, buf_size, cd);
3335 }
3336
3337 /**
3338  * ice_aq_get_recipe - get switch recipe
3339  * @hw: pointer to the HW struct
3340  * @s_recipe_list: pointer to switch rule population list
3341  * @num_recipes: pointer to the number of recipes (input and output)
3342  * @recipe_root: root recipe number of recipe(s) to retrieve
3343  * @cd: pointer to command details structure or NULL
3344  *
3345  * Get(0x0292)
3346  *
3347  * On input, *num_recipes should equal the number of entries in s_recipe_list.
3348  * On output, *num_recipes will equal the number of entries returned in
3349  * s_recipe_list.
3350  *
3351  * The caller must supply enough space in s_recipe_list to hold all possible
3352  * recipes and *num_recipes must equal ICE_MAX_NUM_RECIPES.
3353  */
3354 enum ice_status
3355 ice_aq_get_recipe(struct ice_hw *hw,
3356                   struct ice_aqc_recipe_data_elem *s_recipe_list,
3357                   u16 *num_recipes, u16 recipe_root, struct ice_sq_cd *cd)
3358 {
3359         struct ice_aqc_add_get_recipe *cmd;
3360         struct ice_aq_desc desc;
3361         enum ice_status status;
3362         u16 buf_size;
3363
3364         if (*num_recipes != ICE_MAX_NUM_RECIPES)
3365                 return ICE_ERR_PARAM;
3366
3367         ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__);
3368         cmd = &desc.params.add_get_recipe;
3369         ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_recipe);
3370
3371         cmd->return_index = CPU_TO_LE16(recipe_root);
3372         cmd->num_sub_recipes = 0;
3373
3374         buf_size = *num_recipes * sizeof(*s_recipe_list);
3375
3376         status = ice_aq_send_cmd(hw, &desc, s_recipe_list, buf_size, cd);
3377         *num_recipes = LE16_TO_CPU(cmd->num_sub_recipes);
3378
3379         return status;
3380 }
3381
3382 /**
3383  * ice_update_recipe_lkup_idx - update a default recipe based on the lkup_idx
3384  * @hw: pointer to the HW struct
3385  * @params: parameters used to update the default recipe
3386  *
3387  * This function only supports updating default recipes and it only supports
3388  * updating a single recipe based on the lkup_idx at a time.
3389  *
3390  * This is done as a read-modify-write operation. First, get the current recipe
3391  * contents based on the recipe's ID. Then modify the field vector index and
3392  * mask if it's valid at the lkup_idx. Finally, use the add recipe AQ to update
3393  * the pre-existing recipe with the modifications.
3394  */
3395 enum ice_status
3396 ice_update_recipe_lkup_idx(struct ice_hw *hw,
3397                            struct ice_update_recipe_lkup_idx_params *params)
3398 {
3399         struct ice_aqc_recipe_data_elem *rcp_list;
3400         u16 num_recps = ICE_MAX_NUM_RECIPES;
3401         enum ice_status status;
3402
3403         rcp_list = (struct ice_aqc_recipe_data_elem *)ice_malloc(hw, num_recps * sizeof(*rcp_list));
3404         if (!rcp_list)
3405                 return ICE_ERR_NO_MEMORY;
3406
3407         /* read current recipe list from firmware */
3408         rcp_list->recipe_indx = params->rid;
3409         status = ice_aq_get_recipe(hw, rcp_list, &num_recps, params->rid, NULL);
3410         if (status) {
3411                 ice_debug(hw, ICE_DBG_SW, "Failed to get recipe %d, status %d\n",
3412                           params->rid, status);
3413                 goto error_out;
3414         }
3415
3416         /* only modify existing recipe's lkup_idx and mask if valid, while
3417          * leaving all other fields the same, then update the recipe firmware
3418          */
3419         rcp_list->content.lkup_indx[params->lkup_idx] = params->fv_idx;
3420         if (params->mask_valid)
3421                 rcp_list->content.mask[params->lkup_idx] =
3422                         CPU_TO_LE16(params->mask);
3423
3424         if (params->ignore_valid)
3425                 rcp_list->content.lkup_indx[params->lkup_idx] |=
3426                         ICE_AQ_RECIPE_LKUP_IGNORE;
3427
3428         status = ice_aq_add_recipe(hw, &rcp_list[0], 1, NULL);
3429         if (status)
3430                 ice_debug(hw, ICE_DBG_SW, "Failed to update recipe %d lkup_idx %d fv_idx %d mask %d mask_valid %s, status %d\n",
3431                           params->rid, params->lkup_idx, params->fv_idx,
3432                           params->mask, params->mask_valid ? "true" : "false",
3433                           status);
3434
3435 error_out:
3436         ice_free(hw, rcp_list);
3437         return status;
3438 }
3439
3440 /**
3441  * ice_aq_map_recipe_to_profile - Map recipe to packet profile
3442  * @hw: pointer to the HW struct
3443  * @profile_id: package profile ID to associate the recipe with
3444  * @r_bitmap: Recipe bitmap filled in and need to be returned as response
3445  * @cd: pointer to command details structure or NULL
3446  * Recipe to profile association (0x0291)
3447  */
3448 enum ice_status
3449 ice_aq_map_recipe_to_profile(struct ice_hw *hw, u32 profile_id, u8 *r_bitmap,
3450                              struct ice_sq_cd *cd)
3451 {
3452         struct ice_aqc_recipe_to_profile *cmd;
3453         struct ice_aq_desc desc;
3454
3455         ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__);
3456         cmd = &desc.params.recipe_to_profile;
3457         ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_recipe_to_profile);
3458         cmd->profile_id = CPU_TO_LE16(profile_id);
3459         /* Set the recipe ID bit in the bitmask to let the device know which
3460          * profile we are associating the recipe to
3461          */
3462         ice_memcpy(cmd->recipe_assoc, r_bitmap, sizeof(cmd->recipe_assoc),
3463                    ICE_NONDMA_TO_NONDMA);
3464
3465         return ice_aq_send_cmd(hw, &desc, NULL, 0, cd);
3466 }
3467
3468 /**
3469  * ice_aq_get_recipe_to_profile - Map recipe to packet profile
3470  * @hw: pointer to the HW struct
3471  * @profile_id: package profile ID to associate the recipe with
3472  * @r_bitmap: Recipe bitmap filled in and need to be returned as response
3473  * @cd: pointer to command details structure or NULL
3474  * Associate profile ID with given recipe (0x0293)
3475  */
3476 enum ice_status
3477 ice_aq_get_recipe_to_profile(struct ice_hw *hw, u32 profile_id, u8 *r_bitmap,
3478                              struct ice_sq_cd *cd)
3479 {
3480         struct ice_aqc_recipe_to_profile *cmd;
3481         struct ice_aq_desc desc;
3482         enum ice_status status;
3483
3484         ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__);
3485         cmd = &desc.params.recipe_to_profile;
3486         ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_recipe_to_profile);
3487         cmd->profile_id = CPU_TO_LE16(profile_id);
3488
3489         status = ice_aq_send_cmd(hw, &desc, NULL, 0, cd);
3490         if (!status)
3491                 ice_memcpy(r_bitmap, cmd->recipe_assoc,
3492                            sizeof(cmd->recipe_assoc), ICE_NONDMA_TO_NONDMA);
3493
3494         return status;
3495 }
3496
3497 /**
3498  * ice_alloc_recipe - add recipe resource
3499  * @hw: pointer to the hardware structure
3500  * @rid: recipe ID returned as response to AQ call
3501  */
3502 enum ice_status ice_alloc_recipe(struct ice_hw *hw, u16 *rid)
3503 {
3504         struct ice_aqc_alloc_free_res_elem *sw_buf;
3505         enum ice_status status;
3506         u16 buf_len;
3507
3508         buf_len = ice_struct_size(sw_buf, elem, 1);
3509         sw_buf = (struct ice_aqc_alloc_free_res_elem *)ice_malloc(hw, buf_len);
3510         if (!sw_buf)
3511                 return ICE_ERR_NO_MEMORY;
3512
3513         sw_buf->num_elems = CPU_TO_LE16(1);
3514         sw_buf->res_type = CPU_TO_LE16((ICE_AQC_RES_TYPE_RECIPE <<
3515                                         ICE_AQC_RES_TYPE_S) |
3516                                         ICE_AQC_RES_TYPE_FLAG_SHARED);
3517         status = ice_aq_alloc_free_res(hw, 1, sw_buf, buf_len,
3518                                        ice_aqc_opc_alloc_res, NULL);
3519         if (!status)
3520                 *rid = LE16_TO_CPU(sw_buf->elem[0].e.sw_resp);
3521         ice_free(hw, sw_buf);
3522
3523         return status;
3524 }
3525
3526 /* ice_init_port_info - Initialize port_info with switch configuration data
3527  * @pi: pointer to port_info
3528  * @vsi_port_num: VSI number or port number
3529  * @type: Type of switch element (port or VSI)
3530  * @swid: switch ID of the switch the element is attached to
3531  * @pf_vf_num: PF or VF number
3532  * @is_vf: true if the element is a VF, false otherwise
3533  */
3534 static void
3535 ice_init_port_info(struct ice_port_info *pi, u16 vsi_port_num, u8 type,
3536                    u16 swid, u16 pf_vf_num, bool is_vf)
3537 {
3538         switch (type) {
3539         case ICE_AQC_GET_SW_CONF_RESP_PHYS_PORT:
3540                 pi->lport = (u8)(vsi_port_num & ICE_LPORT_MASK);
3541                 pi->sw_id = swid;
3542                 pi->pf_vf_num = pf_vf_num;
3543                 pi->is_vf = is_vf;
3544                 pi->dflt_tx_vsi_num = ICE_DFLT_VSI_INVAL;
3545                 pi->dflt_rx_vsi_num = ICE_DFLT_VSI_INVAL;
3546                 break;
3547         default:
3548                 ice_debug(pi->hw, ICE_DBG_SW, "incorrect VSI/port type received\n");
3549                 break;
3550         }
3551 }
3552
3553 /* ice_get_initial_sw_cfg - Get initial port and default VSI data
3554  * @hw: pointer to the hardware structure
3555  */
3556 enum ice_status ice_get_initial_sw_cfg(struct ice_hw *hw)
3557 {
3558         struct ice_aqc_get_sw_cfg_resp_elem *rbuf;
3559         enum ice_status status;
3560         u8 num_total_ports;
3561         u16 req_desc = 0;
3562         u16 num_elems;
3563         u8 j = 0;
3564         u16 i;
3565
3566         num_total_ports = 1;
3567
3568         rbuf = (struct ice_aqc_get_sw_cfg_resp_elem *)
3569                 ice_malloc(hw, ICE_SW_CFG_MAX_BUF_LEN);
3570
3571         if (!rbuf)
3572                 return ICE_ERR_NO_MEMORY;
3573
3574         /* Multiple calls to ice_aq_get_sw_cfg may be required
3575          * to get all the switch configuration information. The need
3576          * for additional calls is indicated by ice_aq_get_sw_cfg
3577          * writing a non-zero value in req_desc
3578          */
3579         do {
3580                 struct ice_aqc_get_sw_cfg_resp_elem *ele;
3581
3582                 status = ice_aq_get_sw_cfg(hw, rbuf, ICE_SW_CFG_MAX_BUF_LEN,
3583                                            &req_desc, &num_elems, NULL);
3584
3585                 if (status)
3586                         break;
3587
3588                 for (i = 0, ele = rbuf; i < num_elems; i++, ele++) {
3589                         u16 pf_vf_num, swid, vsi_port_num;
3590                         bool is_vf = false;
3591                         u8 res_type;
3592
3593                         vsi_port_num = LE16_TO_CPU(ele->vsi_port_num) &
3594                                 ICE_AQC_GET_SW_CONF_RESP_VSI_PORT_NUM_M;
3595
3596                         pf_vf_num = LE16_TO_CPU(ele->pf_vf_num) &
3597                                 ICE_AQC_GET_SW_CONF_RESP_FUNC_NUM_M;
3598
3599                         swid = LE16_TO_CPU(ele->swid);
3600
3601                         if (LE16_TO_CPU(ele->pf_vf_num) &
3602                             ICE_AQC_GET_SW_CONF_RESP_IS_VF)
3603                                 is_vf = true;
3604
3605                         res_type = (u8)(LE16_TO_CPU(ele->vsi_port_num) >>
3606                                         ICE_AQC_GET_SW_CONF_RESP_TYPE_S);
3607
3608                         switch (res_type) {
3609                         case ICE_AQC_GET_SW_CONF_RESP_VSI:
3610                                 if (hw->dcf_enabled && !is_vf)
3611                                         hw->pf_id = pf_vf_num;
3612                                 break;
3613                         case ICE_AQC_GET_SW_CONF_RESP_PHYS_PORT:
3614                         case ICE_AQC_GET_SW_CONF_RESP_VIRT_PORT:
3615                                 if (j == num_total_ports) {
3616                                         ice_debug(hw, ICE_DBG_SW, "more ports than expected\n");
3617                                         status = ICE_ERR_CFG;
3618                                         goto out;
3619                                 }
3620                                 ice_init_port_info(hw->port_info,
3621                                                    vsi_port_num, res_type, swid,
3622                                                    pf_vf_num, is_vf);
3623                                 j++;
3624                                 break;
3625                         default:
3626                                 break;
3627                         }
3628                 }
3629         } while (req_desc && !status);
3630
3631 out:
3632         ice_free(hw, rbuf);
3633         return status;
3634 }
3635
3636 /**
3637  * ice_fill_sw_info - Helper function to populate lb_en and lan_en
3638  * @hw: pointer to the hardware structure
3639  * @fi: filter info structure to fill/update
3640  *
3641  * This helper function populates the lb_en and lan_en elements of the provided
3642  * ice_fltr_info struct using the switch's type and characteristics of the
3643  * switch rule being configured.
3644  */
3645 static void ice_fill_sw_info(struct ice_hw *hw, struct ice_fltr_info *fi)
3646 {
3647         if ((fi->flag & ICE_FLTR_RX) &&
3648             (fi->fltr_act == ICE_FWD_TO_VSI ||
3649              fi->fltr_act == ICE_FWD_TO_VSI_LIST) &&
3650             fi->lkup_type == ICE_SW_LKUP_LAST)
3651                 fi->lan_en = true;
3652         fi->lb_en = false;
3653         fi->lan_en = false;
3654         if ((fi->flag & ICE_FLTR_TX) &&
3655             (fi->fltr_act == ICE_FWD_TO_VSI ||
3656              fi->fltr_act == ICE_FWD_TO_VSI_LIST ||
3657              fi->fltr_act == ICE_FWD_TO_Q ||
3658              fi->fltr_act == ICE_FWD_TO_QGRP)) {
3659                 /* Setting LB for prune actions will result in replicated
3660                  * packets to the internal switch that will be dropped.
3661                  */
3662                 if (fi->lkup_type != ICE_SW_LKUP_VLAN)
3663                         fi->lb_en = true;
3664
3665                 /* Set lan_en to TRUE if
3666                  * 1. The switch is a VEB AND
3667                  * 2
3668                  * 2.1 The lookup is a directional lookup like ethertype,
3669                  * promiscuous, ethertype-MAC, promiscuous-VLAN
3670                  * and default-port OR
3671                  * 2.2 The lookup is VLAN, OR
3672                  * 2.3 The lookup is MAC with mcast or bcast addr for MAC, OR
3673                  * 2.4 The lookup is MAC_VLAN with mcast or bcast addr for MAC.
3674                  *
3675                  * OR
3676                  *
3677                  * The switch is a VEPA.
3678                  *
3679                  * In all other cases, the LAN enable has to be set to false.
3680                  */
3681                 if (hw->evb_veb) {
3682                         if (fi->lkup_type == ICE_SW_LKUP_ETHERTYPE ||
3683                             fi->lkup_type == ICE_SW_LKUP_PROMISC ||
3684                             fi->lkup_type == ICE_SW_LKUP_ETHERTYPE_MAC ||
3685                             fi->lkup_type == ICE_SW_LKUP_PROMISC_VLAN ||
3686                             fi->lkup_type == ICE_SW_LKUP_DFLT ||
3687                             fi->lkup_type == ICE_SW_LKUP_VLAN ||
3688                             (fi->lkup_type == ICE_SW_LKUP_MAC &&
3689                              !IS_UNICAST_ETHER_ADDR(fi->l_data.mac.mac_addr)) ||
3690                             (fi->lkup_type == ICE_SW_LKUP_MAC_VLAN &&
3691                              !IS_UNICAST_ETHER_ADDR(fi->l_data.mac.mac_addr)))
3692                                 fi->lan_en = true;
3693                 } else {
3694                         fi->lan_en = true;
3695                 }
3696         }
3697 }
3698
3699 /**
3700  * ice_fill_sw_rule - Helper function to fill switch rule structure
3701  * @hw: pointer to the hardware structure
3702  * @f_info: entry containing packet forwarding information
3703  * @s_rule: switch rule structure to be filled in based on mac_entry
3704  * @opc: switch rules population command type - pass in the command opcode
3705  */
3706 static void
3707 ice_fill_sw_rule(struct ice_hw *hw, struct ice_fltr_info *f_info,
3708                  struct ice_aqc_sw_rules_elem *s_rule, enum ice_adminq_opc opc)
3709 {
3710         u16 vlan_id = ICE_MAX_VLAN_ID + 1;
3711         u16 vlan_tpid = ICE_ETH_P_8021Q;
3712         void *daddr = NULL;
3713         u16 eth_hdr_sz;
3714         u8 *eth_hdr;
3715         u32 act = 0;
3716         __be16 *off;
3717         u8 q_rgn;
3718
3719         if (opc == ice_aqc_opc_remove_sw_rules) {
3720                 s_rule->pdata.lkup_tx_rx.act = 0;
3721                 s_rule->pdata.lkup_tx_rx.index =
3722                         CPU_TO_LE16(f_info->fltr_rule_id);
3723                 s_rule->pdata.lkup_tx_rx.hdr_len = 0;
3724                 return;
3725         }
3726
3727         eth_hdr_sz = sizeof(dummy_eth_header);
3728         eth_hdr = s_rule->pdata.lkup_tx_rx.hdr;
3729
3730         /* initialize the ether header with a dummy header */
3731         ice_memcpy(eth_hdr, dummy_eth_header, eth_hdr_sz, ICE_NONDMA_TO_NONDMA);
3732         ice_fill_sw_info(hw, f_info);
3733
3734         switch (f_info->fltr_act) {
3735         case ICE_FWD_TO_VSI:
3736                 act |= (f_info->fwd_id.hw_vsi_id << ICE_SINGLE_ACT_VSI_ID_S) &
3737                         ICE_SINGLE_ACT_VSI_ID_M;
3738                 if (f_info->lkup_type != ICE_SW_LKUP_VLAN)
3739                         act |= ICE_SINGLE_ACT_VSI_FORWARDING |
3740                                 ICE_SINGLE_ACT_VALID_BIT;
3741                 break;
3742         case ICE_FWD_TO_VSI_LIST:
3743                 act |= ICE_SINGLE_ACT_VSI_LIST;
3744                 act |= (f_info->fwd_id.vsi_list_id <<
3745                         ICE_SINGLE_ACT_VSI_LIST_ID_S) &
3746                         ICE_SINGLE_ACT_VSI_LIST_ID_M;
3747                 if (f_info->lkup_type != ICE_SW_LKUP_VLAN)
3748                         act |= ICE_SINGLE_ACT_VSI_FORWARDING |
3749                                 ICE_SINGLE_ACT_VALID_BIT;
3750                 break;
3751         case ICE_FWD_TO_Q:
3752                 act |= ICE_SINGLE_ACT_TO_Q;
3753                 act |= (f_info->fwd_id.q_id << ICE_SINGLE_ACT_Q_INDEX_S) &
3754                         ICE_SINGLE_ACT_Q_INDEX_M;
3755                 break;
3756         case ICE_DROP_PACKET:
3757                 act |= ICE_SINGLE_ACT_VSI_FORWARDING | ICE_SINGLE_ACT_DROP |
3758                         ICE_SINGLE_ACT_VALID_BIT;
3759                 break;
3760         case ICE_FWD_TO_QGRP:
3761                 q_rgn = f_info->qgrp_size > 0 ?
3762                         (u8)ice_ilog2(f_info->qgrp_size) : 0;
3763                 act |= ICE_SINGLE_ACT_TO_Q;
3764                 act |= (f_info->fwd_id.q_id << ICE_SINGLE_ACT_Q_INDEX_S) &
3765                         ICE_SINGLE_ACT_Q_INDEX_M;
3766                 act |= (q_rgn << ICE_SINGLE_ACT_Q_REGION_S) &
3767                         ICE_SINGLE_ACT_Q_REGION_M;
3768                 break;
3769         default:
3770                 return;
3771         }
3772
3773         if (f_info->lb_en)
3774                 act |= ICE_SINGLE_ACT_LB_ENABLE;
3775         if (f_info->lan_en)
3776                 act |= ICE_SINGLE_ACT_LAN_ENABLE;
3777
3778         switch (f_info->lkup_type) {
3779         case ICE_SW_LKUP_MAC:
3780                 daddr = f_info->l_data.mac.mac_addr;
3781                 break;
3782         case ICE_SW_LKUP_VLAN:
3783                 vlan_id = f_info->l_data.vlan.vlan_id;
3784                 if (f_info->l_data.vlan.tpid_valid)
3785                         vlan_tpid = f_info->l_data.vlan.tpid;
3786                 if (f_info->fltr_act == ICE_FWD_TO_VSI ||
3787                     f_info->fltr_act == ICE_FWD_TO_VSI_LIST) {
3788                         act |= ICE_SINGLE_ACT_PRUNE;
3789                         act |= ICE_SINGLE_ACT_EGRESS | ICE_SINGLE_ACT_INGRESS;
3790                 }
3791                 break;
3792         case ICE_SW_LKUP_ETHERTYPE_MAC:
3793                 daddr = f_info->l_data.ethertype_mac.mac_addr;
3794                 /* fall-through */
3795         case ICE_SW_LKUP_ETHERTYPE:
3796                 off = (_FORCE_ __be16 *)(eth_hdr + ICE_ETH_ETHTYPE_OFFSET);
3797                 *off = CPU_TO_BE16(f_info->l_data.ethertype_mac.ethertype);
3798                 break;
3799         case ICE_SW_LKUP_MAC_VLAN:
3800                 daddr = f_info->l_data.mac_vlan.mac_addr;
3801                 vlan_id = f_info->l_data.mac_vlan.vlan_id;
3802                 break;
3803         case ICE_SW_LKUP_PROMISC_VLAN:
3804                 vlan_id = f_info->l_data.mac_vlan.vlan_id;
3805                 /* fall-through */
3806         case ICE_SW_LKUP_PROMISC:
3807                 daddr = f_info->l_data.mac_vlan.mac_addr;
3808                 break;
3809         default:
3810                 break;
3811         }
3812
3813         s_rule->type = (f_info->flag & ICE_FLTR_RX) ?
3814                 CPU_TO_LE16(ICE_AQC_SW_RULES_T_LKUP_RX) :
3815                 CPU_TO_LE16(ICE_AQC_SW_RULES_T_LKUP_TX);
3816
3817         /* Recipe set depending on lookup type */
3818         s_rule->pdata.lkup_tx_rx.recipe_id = CPU_TO_LE16(f_info->lkup_type);
3819         s_rule->pdata.lkup_tx_rx.src = CPU_TO_LE16(f_info->src);
3820         s_rule->pdata.lkup_tx_rx.act = CPU_TO_LE32(act);
3821
3822         if (daddr)
3823                 ice_memcpy(eth_hdr + ICE_ETH_DA_OFFSET, daddr, ETH_ALEN,
3824                            ICE_NONDMA_TO_NONDMA);
3825
3826         if (!(vlan_id > ICE_MAX_VLAN_ID)) {
3827                 off = (_FORCE_ __be16 *)(eth_hdr + ICE_ETH_VLAN_TCI_OFFSET);
3828                 *off = CPU_TO_BE16(vlan_id);
3829                 off = (_FORCE_ __be16 *)(eth_hdr + ICE_ETH_ETHTYPE_OFFSET);
3830                 *off = CPU_TO_BE16(vlan_tpid);
3831         }
3832
3833         /* Create the switch rule with the final dummy Ethernet header */
3834         if (opc != ice_aqc_opc_update_sw_rules)
3835                 s_rule->pdata.lkup_tx_rx.hdr_len = CPU_TO_LE16(eth_hdr_sz);
3836 }
3837
3838 /**
3839  * ice_add_marker_act
3840  * @hw: pointer to the hardware structure
3841  * @m_ent: the management entry for which sw marker needs to be added
3842  * @sw_marker: sw marker to tag the Rx descriptor with
3843  * @l_id: large action resource ID
3844  *
3845  * Create a large action to hold software marker and update the switch rule
3846  * entry pointed by m_ent with newly created large action
3847  */
3848 static enum ice_status
3849 ice_add_marker_act(struct ice_hw *hw, struct ice_fltr_mgmt_list_entry *m_ent,
3850                    u16 sw_marker, u16 l_id)
3851 {
3852         struct ice_aqc_sw_rules_elem *lg_act, *rx_tx;
3853         /* For software marker we need 3 large actions
3854          * 1. FWD action: FWD TO VSI or VSI LIST
3855          * 2. GENERIC VALUE action to hold the profile ID
3856          * 3. GENERIC VALUE action to hold the software marker ID
3857          */
3858         const u16 num_lg_acts = 3;
3859         enum ice_status status;
3860         u16 lg_act_size;
3861         u16 rules_size;
3862         u32 act;
3863         u16 id;
3864
3865         if (m_ent->fltr_info.lkup_type != ICE_SW_LKUP_MAC)
3866                 return ICE_ERR_PARAM;
3867
3868         /* Create two back-to-back switch rules and submit them to the HW using
3869          * one memory buffer:
3870          *    1. Large Action
3871          *    2. Look up Tx Rx
3872          */
3873         lg_act_size = (u16)ICE_SW_RULE_LG_ACT_SIZE(num_lg_acts);
3874         rules_size = lg_act_size + ICE_SW_RULE_RX_TX_ETH_HDR_SIZE;
3875         lg_act = (struct ice_aqc_sw_rules_elem *)ice_malloc(hw, rules_size);
3876         if (!lg_act)
3877                 return ICE_ERR_NO_MEMORY;
3878
3879         rx_tx = (struct ice_aqc_sw_rules_elem *)((u8 *)lg_act + lg_act_size);
3880
3881         /* Fill in the first switch rule i.e. large action */
3882         lg_act->type = CPU_TO_LE16(ICE_AQC_SW_RULES_T_LG_ACT);
3883         lg_act->pdata.lg_act.index = CPU_TO_LE16(l_id);
3884         lg_act->pdata.lg_act.size = CPU_TO_LE16(num_lg_acts);
3885
3886         /* First action VSI forwarding or VSI list forwarding depending on how
3887          * many VSIs
3888          */
3889         id = (m_ent->vsi_count > 1) ? m_ent->fltr_info.fwd_id.vsi_list_id :
3890                 m_ent->fltr_info.fwd_id.hw_vsi_id;
3891
3892         act = ICE_LG_ACT_VSI_FORWARDING | ICE_LG_ACT_VALID_BIT;
3893         act |= (id << ICE_LG_ACT_VSI_LIST_ID_S) & ICE_LG_ACT_VSI_LIST_ID_M;
3894         if (m_ent->vsi_count > 1)
3895                 act |= ICE_LG_ACT_VSI_LIST;
3896         lg_act->pdata.lg_act.act[0] = CPU_TO_LE32(act);
3897
3898         /* Second action descriptor type */
3899         act = ICE_LG_ACT_GENERIC;
3900
3901         act |= (1 << ICE_LG_ACT_GENERIC_VALUE_S) & ICE_LG_ACT_GENERIC_VALUE_M;
3902         lg_act->pdata.lg_act.act[1] = CPU_TO_LE32(act);
3903
3904         act = (ICE_LG_ACT_GENERIC_OFF_RX_DESC_PROF_IDX <<
3905                ICE_LG_ACT_GENERIC_OFFSET_S) & ICE_LG_ACT_GENERIC_OFFSET_M;
3906
3907         /* Third action Marker value */
3908         act |= ICE_LG_ACT_GENERIC;
3909         act |= (sw_marker << ICE_LG_ACT_GENERIC_VALUE_S) &
3910                 ICE_LG_ACT_GENERIC_VALUE_M;
3911
3912         lg_act->pdata.lg_act.act[2] = CPU_TO_LE32(act);
3913
3914         /* call the fill switch rule to fill the lookup Tx Rx structure */
3915         ice_fill_sw_rule(hw, &m_ent->fltr_info, rx_tx,
3916                          ice_aqc_opc_update_sw_rules);
3917
3918         /* Update the action to point to the large action ID */
3919         rx_tx->pdata.lkup_tx_rx.act =
3920                 CPU_TO_LE32(ICE_SINGLE_ACT_PTR |
3921                             ((l_id << ICE_SINGLE_ACT_PTR_VAL_S) &
3922                              ICE_SINGLE_ACT_PTR_VAL_M));
3923
3924         /* Use the filter rule ID of the previously created rule with single
3925          * act. Once the update happens, hardware will treat this as large
3926          * action
3927          */
3928         rx_tx->pdata.lkup_tx_rx.index =
3929                 CPU_TO_LE16(m_ent->fltr_info.fltr_rule_id);
3930
3931         status = ice_aq_sw_rules(hw, lg_act, rules_size, 2,
3932                                  ice_aqc_opc_update_sw_rules, NULL);
3933         if (!status) {
3934                 m_ent->lg_act_idx = l_id;
3935                 m_ent->sw_marker_id = sw_marker;
3936         }
3937
3938         ice_free(hw, lg_act);
3939         return status;
3940 }
3941
3942 /**
3943  * ice_add_counter_act - add/update filter rule with counter action
3944  * @hw: pointer to the hardware structure
3945  * @m_ent: the management entry for which counter needs to be added
3946  * @counter_id: VLAN counter ID returned as part of allocate resource
3947  * @l_id: large action resource ID
3948  */
3949 static enum ice_status
3950 ice_add_counter_act(struct ice_hw *hw, struct ice_fltr_mgmt_list_entry *m_ent,
3951                     u16 counter_id, u16 l_id)
3952 {
3953         struct ice_aqc_sw_rules_elem *lg_act;
3954         struct ice_aqc_sw_rules_elem *rx_tx;
3955         enum ice_status status;
3956         /* 2 actions will be added while adding a large action counter */
3957         const int num_acts = 2;
3958         u16 lg_act_size;
3959         u16 rules_size;
3960         u16 f_rule_id;
3961         u32 act;
3962         u16 id;
3963
3964         if (m_ent->fltr_info.lkup_type != ICE_SW_LKUP_MAC)
3965                 return ICE_ERR_PARAM;
3966
3967         /* Create two back-to-back switch rules and submit them to the HW using
3968          * one memory buffer:
3969          * 1. Large Action
3970          * 2. Look up Tx Rx
3971          */
3972         lg_act_size = (u16)ICE_SW_RULE_LG_ACT_SIZE(num_acts);
3973         rules_size = lg_act_size + ICE_SW_RULE_RX_TX_ETH_HDR_SIZE;
3974         lg_act = (struct ice_aqc_sw_rules_elem *)ice_malloc(hw, rules_size);
3975         if (!lg_act)
3976                 return ICE_ERR_NO_MEMORY;
3977
3978         rx_tx = (struct ice_aqc_sw_rules_elem *)((u8 *)lg_act + lg_act_size);
3979
3980         /* Fill in the first switch rule i.e. large action */
3981         lg_act->type = CPU_TO_LE16(ICE_AQC_SW_RULES_T_LG_ACT);
3982         lg_act->pdata.lg_act.index = CPU_TO_LE16(l_id);
3983         lg_act->pdata.lg_act.size = CPU_TO_LE16(num_acts);
3984
3985         /* First action VSI forwarding or VSI list forwarding depending on how
3986          * many VSIs
3987          */
3988         id = (m_ent->vsi_count > 1) ?  m_ent->fltr_info.fwd_id.vsi_list_id :
3989                 m_ent->fltr_info.fwd_id.hw_vsi_id;
3990
3991         act = ICE_LG_ACT_VSI_FORWARDING | ICE_LG_ACT_VALID_BIT;
3992         act |= (id << ICE_LG_ACT_VSI_LIST_ID_S) &
3993                 ICE_LG_ACT_VSI_LIST_ID_M;
3994         if (m_ent->vsi_count > 1)
3995                 act |= ICE_LG_ACT_VSI_LIST;
3996         lg_act->pdata.lg_act.act[0] = CPU_TO_LE32(act);
3997
3998         /* Second action counter ID */
3999         act = ICE_LG_ACT_STAT_COUNT;
4000         act |= (counter_id << ICE_LG_ACT_STAT_COUNT_S) &
4001                 ICE_LG_ACT_STAT_COUNT_M;
4002         lg_act->pdata.lg_act.act[1] = CPU_TO_LE32(act);
4003
4004         /* call the fill switch rule to fill the lookup Tx Rx structure */
4005         ice_fill_sw_rule(hw, &m_ent->fltr_info, rx_tx,
4006                          ice_aqc_opc_update_sw_rules);
4007
4008         act = ICE_SINGLE_ACT_PTR;
4009         act |= (l_id << ICE_SINGLE_ACT_PTR_VAL_S) & ICE_SINGLE_ACT_PTR_VAL_M;
4010         rx_tx->pdata.lkup_tx_rx.act = CPU_TO_LE32(act);
4011
4012         /* Use the filter rule ID of the previously created rule with single
4013          * act. Once the update happens, hardware will treat this as large
4014          * action
4015          */
4016         f_rule_id = m_ent->fltr_info.fltr_rule_id;
4017         rx_tx->pdata.lkup_tx_rx.index = CPU_TO_LE16(f_rule_id);
4018
4019         status = ice_aq_sw_rules(hw, lg_act, rules_size, 2,
4020                                  ice_aqc_opc_update_sw_rules, NULL);
4021         if (!status) {
4022                 m_ent->lg_act_idx = l_id;
4023                 m_ent->counter_index = counter_id;
4024         }
4025
4026         ice_free(hw, lg_act);
4027         return status;
4028 }
4029
4030 /**
4031  * ice_create_vsi_list_map
4032  * @hw: pointer to the hardware structure
4033  * @vsi_handle_arr: array of VSI handles to set in the VSI mapping
4034  * @num_vsi: number of VSI handles in the array
4035  * @vsi_list_id: VSI list ID generated as part of allocate resource
4036  *
4037  * Helper function to create a new entry of VSI list ID to VSI mapping
4038  * using the given VSI list ID
4039  */
4040 static struct ice_vsi_list_map_info *
4041 ice_create_vsi_list_map(struct ice_hw *hw, u16 *vsi_handle_arr, u16 num_vsi,
4042                         u16 vsi_list_id)
4043 {
4044         struct ice_switch_info *sw = hw->switch_info;
4045         struct ice_vsi_list_map_info *v_map;
4046         int i;
4047
4048         v_map = (struct ice_vsi_list_map_info *)ice_malloc(hw, sizeof(*v_map));
4049         if (!v_map)
4050                 return NULL;
4051
4052         v_map->vsi_list_id = vsi_list_id;
4053         v_map->ref_cnt = 1;
4054         for (i = 0; i < num_vsi; i++)
4055                 ice_set_bit(vsi_handle_arr[i], v_map->vsi_map);
4056
4057         LIST_ADD(&v_map->list_entry, &sw->vsi_list_map_head);
4058         return v_map;
4059 }
4060
4061 /**
4062  * ice_update_vsi_list_rule
4063  * @hw: pointer to the hardware structure
4064  * @vsi_handle_arr: array of VSI handles to form a VSI list
4065  * @num_vsi: number of VSI handles in the array
4066  * @vsi_list_id: VSI list ID generated as part of allocate resource
4067  * @remove: Boolean value to indicate if this is a remove action
4068  * @opc: switch rules population command type - pass in the command opcode
4069  * @lkup_type: lookup type of the filter
4070  *
4071  * Call AQ command to add a new switch rule or update existing switch rule
4072  * using the given VSI list ID
4073  */
4074 static enum ice_status
4075 ice_update_vsi_list_rule(struct ice_hw *hw, u16 *vsi_handle_arr, u16 num_vsi,
4076                          u16 vsi_list_id, bool remove, enum ice_adminq_opc opc,
4077                          enum ice_sw_lkup_type lkup_type)
4078 {
4079         struct ice_aqc_sw_rules_elem *s_rule;
4080         enum ice_status status;
4081         u16 s_rule_size;
4082         u16 rule_type;
4083         int i;
4084
4085         if (!num_vsi)
4086                 return ICE_ERR_PARAM;
4087
4088         if (lkup_type == ICE_SW_LKUP_MAC ||
4089             lkup_type == ICE_SW_LKUP_MAC_VLAN ||
4090             lkup_type == ICE_SW_LKUP_ETHERTYPE ||
4091             lkup_type == ICE_SW_LKUP_ETHERTYPE_MAC ||
4092             lkup_type == ICE_SW_LKUP_PROMISC ||
4093             lkup_type == ICE_SW_LKUP_PROMISC_VLAN ||
4094             lkup_type == ICE_SW_LKUP_LAST)
4095                 rule_type = remove ? ICE_AQC_SW_RULES_T_VSI_LIST_CLEAR :
4096                         ICE_AQC_SW_RULES_T_VSI_LIST_SET;
4097         else if (lkup_type == ICE_SW_LKUP_VLAN)
4098                 rule_type = remove ? ICE_AQC_SW_RULES_T_PRUNE_LIST_CLEAR :
4099                         ICE_AQC_SW_RULES_T_PRUNE_LIST_SET;
4100         else
4101                 return ICE_ERR_PARAM;
4102
4103         s_rule_size = (u16)ICE_SW_RULE_VSI_LIST_SIZE(num_vsi);
4104         s_rule = (struct ice_aqc_sw_rules_elem *)ice_malloc(hw, s_rule_size);
4105         if (!s_rule)
4106                 return ICE_ERR_NO_MEMORY;
4107         for (i = 0; i < num_vsi; i++) {
4108                 if (!ice_is_vsi_valid(hw, vsi_handle_arr[i])) {
4109                         status = ICE_ERR_PARAM;
4110                         goto exit;
4111                 }
4112                 /* AQ call requires hw_vsi_id(s) */
4113                 s_rule->pdata.vsi_list.vsi[i] =
4114                         CPU_TO_LE16(ice_get_hw_vsi_num(hw, vsi_handle_arr[i]));
4115         }
4116
4117         s_rule->type = CPU_TO_LE16(rule_type);
4118         s_rule->pdata.vsi_list.number_vsi = CPU_TO_LE16(num_vsi);
4119         s_rule->pdata.vsi_list.index = CPU_TO_LE16(vsi_list_id);
4120
4121         status = ice_aq_sw_rules(hw, s_rule, s_rule_size, 1, opc, NULL);
4122
4123 exit:
4124         ice_free(hw, s_rule);
4125         return status;
4126 }
4127
4128 /**
4129  * ice_create_vsi_list_rule - Creates and populates a VSI list rule
4130  * @hw: pointer to the HW struct
4131  * @vsi_handle_arr: array of VSI handles to form a VSI list
4132  * @num_vsi: number of VSI handles in the array
4133  * @vsi_list_id: stores the ID of the VSI list to be created
4134  * @lkup_type: switch rule filter's lookup type
4135  */
4136 static enum ice_status
4137 ice_create_vsi_list_rule(struct ice_hw *hw, u16 *vsi_handle_arr, u16 num_vsi,
4138                          u16 *vsi_list_id, enum ice_sw_lkup_type lkup_type)
4139 {
4140         enum ice_status status;
4141
4142         status = ice_aq_alloc_free_vsi_list(hw, vsi_list_id, lkup_type,
4143                                             ice_aqc_opc_alloc_res);
4144         if (status)
4145                 return status;
4146
4147         /* Update the newly created VSI list to include the specified VSIs */
4148         return ice_update_vsi_list_rule(hw, vsi_handle_arr, num_vsi,
4149                                         *vsi_list_id, false,
4150                                         ice_aqc_opc_add_sw_rules, lkup_type);
4151 }
4152
4153 /**
4154  * ice_create_pkt_fwd_rule
4155  * @hw: pointer to the hardware structure
4156  * @recp_list: corresponding filter management list
4157  * @f_entry: entry containing packet forwarding information
4158  *
4159  * Create switch rule with given filter information and add an entry
4160  * to the corresponding filter management list to track this switch rule
4161  * and VSI mapping
4162  */
4163 static enum ice_status
4164 ice_create_pkt_fwd_rule(struct ice_hw *hw, struct ice_sw_recipe *recp_list,
4165                         struct ice_fltr_list_entry *f_entry)
4166 {
4167         struct ice_fltr_mgmt_list_entry *fm_entry;
4168         struct ice_aqc_sw_rules_elem *s_rule;
4169         enum ice_status status;
4170
4171         s_rule = (struct ice_aqc_sw_rules_elem *)
4172                 ice_malloc(hw, ICE_SW_RULE_RX_TX_ETH_HDR_SIZE);
4173         if (!s_rule)
4174                 return ICE_ERR_NO_MEMORY;
4175         fm_entry = (struct ice_fltr_mgmt_list_entry *)
4176                    ice_malloc(hw, sizeof(*fm_entry));
4177         if (!fm_entry) {
4178                 status = ICE_ERR_NO_MEMORY;
4179                 goto ice_create_pkt_fwd_rule_exit;
4180         }
4181
4182         fm_entry->fltr_info = f_entry->fltr_info;
4183
4184         /* Initialize all the fields for the management entry */
4185         fm_entry->vsi_count = 1;
4186         fm_entry->lg_act_idx = ICE_INVAL_LG_ACT_INDEX;
4187         fm_entry->sw_marker_id = ICE_INVAL_SW_MARKER_ID;
4188         fm_entry->counter_index = ICE_INVAL_COUNTER_ID;
4189
4190         ice_fill_sw_rule(hw, &fm_entry->fltr_info, s_rule,
4191                          ice_aqc_opc_add_sw_rules);
4192
4193         status = ice_aq_sw_rules(hw, s_rule, ICE_SW_RULE_RX_TX_ETH_HDR_SIZE, 1,
4194                                  ice_aqc_opc_add_sw_rules, NULL);
4195         if (status) {
4196                 ice_free(hw, fm_entry);
4197                 goto ice_create_pkt_fwd_rule_exit;
4198         }
4199
4200         f_entry->fltr_info.fltr_rule_id =
4201                 LE16_TO_CPU(s_rule->pdata.lkup_tx_rx.index);
4202         fm_entry->fltr_info.fltr_rule_id =
4203                 LE16_TO_CPU(s_rule->pdata.lkup_tx_rx.index);
4204
4205         /* The book keeping entries will get removed when base driver
4206          * calls remove filter AQ command
4207          */
4208         LIST_ADD(&fm_entry->list_entry, &recp_list->filt_rules);
4209
4210 ice_create_pkt_fwd_rule_exit:
4211         ice_free(hw, s_rule);
4212         return status;
4213 }
4214
4215 /**
4216  * ice_update_pkt_fwd_rule
4217  * @hw: pointer to the hardware structure
4218  * @f_info: filter information for switch rule
4219  *
4220  * Call AQ command to update a previously created switch rule with a
4221  * VSI list ID
4222  */
4223 static enum ice_status
4224 ice_update_pkt_fwd_rule(struct ice_hw *hw, struct ice_fltr_info *f_info)
4225 {
4226         struct ice_aqc_sw_rules_elem *s_rule;
4227         enum ice_status status;
4228
4229         s_rule = (struct ice_aqc_sw_rules_elem *)
4230                 ice_malloc(hw, ICE_SW_RULE_RX_TX_ETH_HDR_SIZE);
4231         if (!s_rule)
4232                 return ICE_ERR_NO_MEMORY;
4233
4234         ice_fill_sw_rule(hw, f_info, s_rule, ice_aqc_opc_update_sw_rules);
4235
4236         s_rule->pdata.lkup_tx_rx.index = CPU_TO_LE16(f_info->fltr_rule_id);
4237
4238         /* Update switch rule with new rule set to forward VSI list */
4239         status = ice_aq_sw_rules(hw, s_rule, ICE_SW_RULE_RX_TX_ETH_HDR_SIZE, 1,
4240                                  ice_aqc_opc_update_sw_rules, NULL);
4241
4242         ice_free(hw, s_rule);
4243         return status;
4244 }
4245
4246 /**
4247  * ice_update_sw_rule_bridge_mode
4248  * @hw: pointer to the HW struct
4249  *
4250  * Updates unicast switch filter rules based on VEB/VEPA mode
4251  */
4252 enum ice_status ice_update_sw_rule_bridge_mode(struct ice_hw *hw)
4253 {
4254         struct ice_switch_info *sw = hw->switch_info;
4255         struct ice_fltr_mgmt_list_entry *fm_entry;
4256         enum ice_status status = ICE_SUCCESS;
4257         struct LIST_HEAD_TYPE *rule_head;
4258         struct ice_lock *rule_lock; /* Lock to protect filter rule list */
4259
4260         rule_lock = &sw->recp_list[ICE_SW_LKUP_MAC].filt_rule_lock;
4261         rule_head = &sw->recp_list[ICE_SW_LKUP_MAC].filt_rules;
4262
4263         ice_acquire_lock(rule_lock);
4264         LIST_FOR_EACH_ENTRY(fm_entry, rule_head, ice_fltr_mgmt_list_entry,
4265                             list_entry) {
4266                 struct ice_fltr_info *fi = &fm_entry->fltr_info;
4267                 u8 *addr = fi->l_data.mac.mac_addr;
4268
4269                 /* Update unicast Tx rules to reflect the selected
4270                  * VEB/VEPA mode
4271                  */
4272                 if ((fi->flag & ICE_FLTR_TX) && IS_UNICAST_ETHER_ADDR(addr) &&
4273                     (fi->fltr_act == ICE_FWD_TO_VSI ||
4274                      fi->fltr_act == ICE_FWD_TO_VSI_LIST ||
4275                      fi->fltr_act == ICE_FWD_TO_Q ||
4276                      fi->fltr_act == ICE_FWD_TO_QGRP)) {
4277                         status = ice_update_pkt_fwd_rule(hw, fi);
4278                         if (status)
4279                                 break;
4280                 }
4281         }
4282
4283         ice_release_lock(rule_lock);
4284
4285         return status;
4286 }
4287
4288 /**
4289  * ice_add_update_vsi_list
4290  * @hw: pointer to the hardware structure
4291  * @m_entry: pointer to current filter management list entry
4292  * @cur_fltr: filter information from the book keeping entry
4293  * @new_fltr: filter information with the new VSI to be added
4294  *
4295  * Call AQ command to add or update previously created VSI list with new VSI.
4296  *
4297  * Helper function to do book keeping associated with adding filter information
4298  * The algorithm to do the book keeping is described below :
4299  * When a VSI needs to subscribe to a given filter (MAC/VLAN/Ethtype etc.)
4300  *      if only one VSI has been added till now
4301  *              Allocate a new VSI list and add two VSIs
4302  *              to this list using switch rule command
4303  *              Update the previously created switch rule with the
4304  *              newly created VSI list ID
4305  *      if a VSI list was previously created
4306  *              Add the new VSI to the previously created VSI list set
4307  *              using the update switch rule command
4308  */
4309 static enum ice_status
4310 ice_add_update_vsi_list(struct ice_hw *hw,
4311                         struct ice_fltr_mgmt_list_entry *m_entry,
4312                         struct ice_fltr_info *cur_fltr,
4313                         struct ice_fltr_info *new_fltr)
4314 {
4315         enum ice_status status = ICE_SUCCESS;
4316         u16 vsi_list_id = 0;
4317
4318         if ((cur_fltr->fltr_act == ICE_FWD_TO_Q ||
4319              cur_fltr->fltr_act == ICE_FWD_TO_QGRP))
4320                 return ICE_ERR_NOT_IMPL;
4321
4322         if ((new_fltr->fltr_act == ICE_FWD_TO_Q ||
4323              new_fltr->fltr_act == ICE_FWD_TO_QGRP) &&
4324             (cur_fltr->fltr_act == ICE_FWD_TO_VSI ||
4325              cur_fltr->fltr_act == ICE_FWD_TO_VSI_LIST))
4326                 return ICE_ERR_NOT_IMPL;
4327
4328         if (m_entry->vsi_count < 2 && !m_entry->vsi_list_info) {
4329                 /* Only one entry existed in the mapping and it was not already
4330                  * a part of a VSI list. So, create a VSI list with the old and
4331                  * new VSIs.
4332                  */
4333                 struct ice_fltr_info tmp_fltr;
4334                 u16 vsi_handle_arr[2];
4335
4336                 /* A rule already exists with the new VSI being added */
4337                 if (cur_fltr->fwd_id.hw_vsi_id == new_fltr->fwd_id.hw_vsi_id)
4338                         return ICE_ERR_ALREADY_EXISTS;
4339
4340                 vsi_handle_arr[0] = cur_fltr->vsi_handle;
4341                 vsi_handle_arr[1] = new_fltr->vsi_handle;
4342                 status = ice_create_vsi_list_rule(hw, &vsi_handle_arr[0], 2,
4343                                                   &vsi_list_id,
4344                                                   new_fltr->lkup_type);
4345                 if (status)
4346                         return status;
4347
4348                 tmp_fltr = *new_fltr;
4349                 tmp_fltr.fltr_rule_id = cur_fltr->fltr_rule_id;
4350                 tmp_fltr.fltr_act = ICE_FWD_TO_VSI_LIST;
4351                 tmp_fltr.fwd_id.vsi_list_id = vsi_list_id;
4352                 /* Update the previous switch rule of "MAC forward to VSI" to
4353                  * "MAC fwd to VSI list"
4354                  */
4355                 status = ice_update_pkt_fwd_rule(hw, &tmp_fltr);
4356                 if (status)
4357                         return status;
4358
4359                 cur_fltr->fwd_id.vsi_list_id = vsi_list_id;
4360                 cur_fltr->fltr_act = ICE_FWD_TO_VSI_LIST;
4361                 m_entry->vsi_list_info =
4362                         ice_create_vsi_list_map(hw, &vsi_handle_arr[0], 2,
4363                                                 vsi_list_id);
4364
4365                 if (!m_entry->vsi_list_info)
4366                         return ICE_ERR_NO_MEMORY;
4367
4368                 /* If this entry was large action then the large action needs
4369                  * to be updated to point to FWD to VSI list
4370                  */
4371                 if (m_entry->sw_marker_id != ICE_INVAL_SW_MARKER_ID)
4372                         status =
4373                             ice_add_marker_act(hw, m_entry,
4374                                                m_entry->sw_marker_id,
4375                                                m_entry->lg_act_idx);
4376         } else {
4377                 u16 vsi_handle = new_fltr->vsi_handle;
4378                 enum ice_adminq_opc opcode;
4379
4380                 if (!m_entry->vsi_list_info)
4381                         return ICE_ERR_CFG;
4382
4383                 /* A rule already exists with the new VSI being added */
4384                 if (ice_is_bit_set(m_entry->vsi_list_info->vsi_map, vsi_handle))
4385                         return ICE_SUCCESS;
4386
4387                 /* Update the previously created VSI list set with
4388                  * the new VSI ID passed in
4389                  */
4390                 vsi_list_id = cur_fltr->fwd_id.vsi_list_id;
4391                 opcode = ice_aqc_opc_update_sw_rules;
4392
4393                 status = ice_update_vsi_list_rule(hw, &vsi_handle, 1,
4394                                                   vsi_list_id, false, opcode,
4395                                                   new_fltr->lkup_type);
4396                 /* update VSI list mapping info with new VSI ID */
4397                 if (!status)
4398                         ice_set_bit(vsi_handle,
4399                                     m_entry->vsi_list_info->vsi_map);
4400         }
4401         if (!status)
4402                 m_entry->vsi_count++;
4403         return status;
4404 }
4405
4406 /**
4407  * ice_find_rule_entry - Search a rule entry
4408  * @list_head: head of rule list
4409  * @f_info: rule information
4410  *
4411  * Helper function to search for a given rule entry
4412  * Returns pointer to entry storing the rule if found
4413  */
4414 static struct ice_fltr_mgmt_list_entry *
4415 ice_find_rule_entry(struct LIST_HEAD_TYPE *list_head,
4416                     struct ice_fltr_info *f_info)
4417 {
4418         struct ice_fltr_mgmt_list_entry *list_itr, *ret = NULL;
4419
4420         LIST_FOR_EACH_ENTRY(list_itr, list_head, ice_fltr_mgmt_list_entry,
4421                             list_entry) {
4422                 if (!memcmp(&f_info->l_data, &list_itr->fltr_info.l_data,
4423                             sizeof(f_info->l_data)) &&
4424                     f_info->flag == list_itr->fltr_info.flag) {
4425                         ret = list_itr;
4426                         break;
4427                 }
4428         }
4429         return ret;
4430 }
4431
4432 /**
4433  * ice_find_vsi_list_entry - Search VSI list map with VSI count 1
4434  * @recp_list: VSI lists needs to be searched
4435  * @vsi_handle: VSI handle to be found in VSI list
4436  * @vsi_list_id: VSI list ID found containing vsi_handle
4437  *
4438  * Helper function to search a VSI list with single entry containing given VSI
4439  * handle element. This can be extended further to search VSI list with more
4440  * than 1 vsi_count. Returns pointer to VSI list entry if found.
4441  */
4442 static struct ice_vsi_list_map_info *
4443 ice_find_vsi_list_entry(struct ice_sw_recipe *recp_list, u16 vsi_handle,
4444                         u16 *vsi_list_id)
4445 {
4446         struct ice_vsi_list_map_info *map_info = NULL;
4447         struct LIST_HEAD_TYPE *list_head;
4448
4449         list_head = &recp_list->filt_rules;
4450         if (recp_list->adv_rule) {
4451                 struct ice_adv_fltr_mgmt_list_entry *list_itr;
4452
4453                 LIST_FOR_EACH_ENTRY(list_itr, list_head,
4454                                     ice_adv_fltr_mgmt_list_entry,
4455                                     list_entry) {
4456                         if (list_itr->vsi_list_info) {
4457                                 map_info = list_itr->vsi_list_info;
4458                                 if (ice_is_bit_set(map_info->vsi_map,
4459                                                    vsi_handle)) {
4460                                         *vsi_list_id = map_info->vsi_list_id;
4461                                         return map_info;
4462                                 }
4463                         }
4464                 }
4465         } else {
4466                 struct ice_fltr_mgmt_list_entry *list_itr;
4467
4468                 LIST_FOR_EACH_ENTRY(list_itr, list_head,
4469                                     ice_fltr_mgmt_list_entry,
4470                                     list_entry) {
4471                         if (list_itr->vsi_count == 1 &&
4472                             list_itr->vsi_list_info) {
4473                                 map_info = list_itr->vsi_list_info;
4474                                 if (ice_is_bit_set(map_info->vsi_map,
4475                                                    vsi_handle)) {
4476                                         *vsi_list_id = map_info->vsi_list_id;
4477                                         return map_info;
4478                                 }
4479                         }
4480                 }
4481         }
4482         return NULL;
4483 }
4484
4485 /**
4486  * ice_add_rule_internal - add rule for a given lookup type
4487  * @hw: pointer to the hardware structure
4488  * @recp_list: recipe list for which rule has to be added
4489  * @lport: logic port number on which function add rule
4490  * @f_entry: structure containing MAC forwarding information
4491  *
4492  * Adds or updates the rule lists for a given recipe
4493  */
4494 static enum ice_status
4495 ice_add_rule_internal(struct ice_hw *hw, struct ice_sw_recipe *recp_list,
4496                       u8 lport, struct ice_fltr_list_entry *f_entry)
4497 {
4498         struct ice_fltr_info *new_fltr, *cur_fltr;
4499         struct ice_fltr_mgmt_list_entry *m_entry;
4500         struct ice_lock *rule_lock; /* Lock to protect filter rule list */
4501         enum ice_status status = ICE_SUCCESS;
4502
4503         if (!ice_is_vsi_valid(hw, f_entry->fltr_info.vsi_handle))
4504                 return ICE_ERR_PARAM;
4505
4506         /* Load the hw_vsi_id only if the fwd action is fwd to VSI */
4507         if (f_entry->fltr_info.fltr_act == ICE_FWD_TO_VSI)
4508                 f_entry->fltr_info.fwd_id.hw_vsi_id =
4509                         ice_get_hw_vsi_num(hw, f_entry->fltr_info.vsi_handle);
4510
4511         rule_lock = &recp_list->filt_rule_lock;
4512
4513         ice_acquire_lock(rule_lock);
4514         new_fltr = &f_entry->fltr_info;
4515         if (new_fltr->flag & ICE_FLTR_RX)
4516                 new_fltr->src = lport;
4517         else if (new_fltr->flag & ICE_FLTR_TX)
4518                 new_fltr->src =
4519                         ice_get_hw_vsi_num(hw, f_entry->fltr_info.vsi_handle);
4520
4521         m_entry = ice_find_rule_entry(&recp_list->filt_rules, new_fltr);
4522         if (!m_entry) {
4523                 status = ice_create_pkt_fwd_rule(hw, recp_list, f_entry);
4524                 goto exit_add_rule_internal;
4525         }
4526
4527         cur_fltr = &m_entry->fltr_info;
4528         status = ice_add_update_vsi_list(hw, m_entry, cur_fltr, new_fltr);
4529
4530 exit_add_rule_internal:
4531         ice_release_lock(rule_lock);
4532         return status;
4533 }
4534
4535 /**
4536  * ice_remove_vsi_list_rule
4537  * @hw: pointer to the hardware structure
4538  * @vsi_list_id: VSI list ID generated as part of allocate resource
4539  * @lkup_type: switch rule filter lookup type
4540  *
4541  * The VSI list should be emptied before this function is called to remove the
4542  * VSI list.
4543  */
4544 static enum ice_status
4545 ice_remove_vsi_list_rule(struct ice_hw *hw, u16 vsi_list_id,
4546                          enum ice_sw_lkup_type lkup_type)
4547 {
4548         /* Free the vsi_list resource that we allocated. It is assumed that the
4549          * list is empty at this point.
4550          */
4551         return ice_aq_alloc_free_vsi_list(hw, &vsi_list_id, lkup_type,
4552                                             ice_aqc_opc_free_res);
4553 }
4554
4555 /**
4556  * ice_rem_update_vsi_list
4557  * @hw: pointer to the hardware structure
4558  * @vsi_handle: VSI handle of the VSI to remove
4559  * @fm_list: filter management entry for which the VSI list management needs to
4560  *           be done
4561  */
4562 static enum ice_status
4563 ice_rem_update_vsi_list(struct ice_hw *hw, u16 vsi_handle,
4564                         struct ice_fltr_mgmt_list_entry *fm_list)
4565 {
4566         enum ice_sw_lkup_type lkup_type;
4567         enum ice_status status = ICE_SUCCESS;
4568         u16 vsi_list_id;
4569
4570         if (fm_list->fltr_info.fltr_act != ICE_FWD_TO_VSI_LIST ||
4571             fm_list->vsi_count == 0)
4572                 return ICE_ERR_PARAM;
4573
4574         /* A rule with the VSI being removed does not exist */
4575         if (!ice_is_bit_set(fm_list->vsi_list_info->vsi_map, vsi_handle))
4576                 return ICE_ERR_DOES_NOT_EXIST;
4577
4578         lkup_type = fm_list->fltr_info.lkup_type;
4579         vsi_list_id = fm_list->fltr_info.fwd_id.vsi_list_id;
4580         status = ice_update_vsi_list_rule(hw, &vsi_handle, 1, vsi_list_id, true,
4581                                           ice_aqc_opc_update_sw_rules,
4582                                           lkup_type);
4583         if (status)
4584                 return status;
4585
4586         fm_list->vsi_count--;
4587         ice_clear_bit(vsi_handle, fm_list->vsi_list_info->vsi_map);
4588
4589         if (fm_list->vsi_count == 1 && lkup_type != ICE_SW_LKUP_VLAN) {
4590                 struct ice_fltr_info tmp_fltr_info = fm_list->fltr_info;
4591                 struct ice_vsi_list_map_info *vsi_list_info =
4592                         fm_list->vsi_list_info;
4593                 u16 rem_vsi_handle;
4594
4595                 rem_vsi_handle = ice_find_first_bit(vsi_list_info->vsi_map,
4596                                                     ICE_MAX_VSI);
4597                 if (!ice_is_vsi_valid(hw, rem_vsi_handle))
4598                         return ICE_ERR_OUT_OF_RANGE;
4599
4600                 /* Make sure VSI list is empty before removing it below */
4601                 status = ice_update_vsi_list_rule(hw, &rem_vsi_handle, 1,
4602                                                   vsi_list_id, true,
4603                                                   ice_aqc_opc_update_sw_rules,
4604                                                   lkup_type);
4605                 if (status)
4606                         return status;
4607
4608                 tmp_fltr_info.fltr_act = ICE_FWD_TO_VSI;
4609                 tmp_fltr_info.fwd_id.hw_vsi_id =
4610                         ice_get_hw_vsi_num(hw, rem_vsi_handle);
4611                 tmp_fltr_info.vsi_handle = rem_vsi_handle;
4612                 status = ice_update_pkt_fwd_rule(hw, &tmp_fltr_info);
4613                 if (status) {
4614                         ice_debug(hw, ICE_DBG_SW, "Failed to update pkt fwd rule to FWD_TO_VSI on HW VSI %d, error %d\n",
4615                                   tmp_fltr_info.fwd_id.hw_vsi_id, status);
4616                         return status;
4617                 }
4618
4619                 fm_list->fltr_info = tmp_fltr_info;
4620         }
4621
4622         if ((fm_list->vsi_count == 1 && lkup_type != ICE_SW_LKUP_VLAN) ||
4623             (fm_list->vsi_count == 0 && lkup_type == ICE_SW_LKUP_VLAN)) {
4624                 struct ice_vsi_list_map_info *vsi_list_info =
4625                         fm_list->vsi_list_info;
4626
4627                 /* Remove the VSI list since it is no longer used */
4628                 status = ice_remove_vsi_list_rule(hw, vsi_list_id, lkup_type);
4629                 if (status) {
4630                         ice_debug(hw, ICE_DBG_SW, "Failed to remove VSI list %d, error %d\n",
4631                                   vsi_list_id, status);
4632                         return status;
4633                 }
4634
4635                 LIST_DEL(&vsi_list_info->list_entry);
4636                 ice_free(hw, vsi_list_info);
4637                 fm_list->vsi_list_info = NULL;
4638         }
4639
4640         return status;
4641 }
4642
4643 /**
4644  * ice_remove_rule_internal - Remove a filter rule of a given type
4645  *
4646  * @hw: pointer to the hardware structure
4647  * @recp_list: recipe list for which the rule needs to removed
4648  * @f_entry: rule entry containing filter information
4649  */
4650 static enum ice_status
4651 ice_remove_rule_internal(struct ice_hw *hw, struct ice_sw_recipe *recp_list,
4652                          struct ice_fltr_list_entry *f_entry)
4653 {
4654         struct ice_fltr_mgmt_list_entry *list_elem;
4655         struct ice_lock *rule_lock; /* Lock to protect filter rule list */
4656         enum ice_status status = ICE_SUCCESS;
4657         bool remove_rule = false;
4658         u16 vsi_handle;
4659
4660         if (!ice_is_vsi_valid(hw, f_entry->fltr_info.vsi_handle))
4661                 return ICE_ERR_PARAM;
4662         f_entry->fltr_info.fwd_id.hw_vsi_id =
4663                 ice_get_hw_vsi_num(hw, f_entry->fltr_info.vsi_handle);
4664
4665         rule_lock = &recp_list->filt_rule_lock;
4666         ice_acquire_lock(rule_lock);
4667         list_elem = ice_find_rule_entry(&recp_list->filt_rules,
4668                                         &f_entry->fltr_info);
4669         if (!list_elem) {
4670                 status = ICE_ERR_DOES_NOT_EXIST;
4671                 goto exit;
4672         }
4673
4674         if (list_elem->fltr_info.fltr_act != ICE_FWD_TO_VSI_LIST) {
4675                 remove_rule = true;
4676         } else if (!list_elem->vsi_list_info) {
4677                 status = ICE_ERR_DOES_NOT_EXIST;
4678                 goto exit;
4679         } else if (list_elem->vsi_list_info->ref_cnt > 1) {
4680                 /* a ref_cnt > 1 indicates that the vsi_list is being
4681                  * shared by multiple rules. Decrement the ref_cnt and
4682                  * remove this rule, but do not modify the list, as it
4683                  * is in-use by other rules.
4684                  */
4685                 list_elem->vsi_list_info->ref_cnt--;
4686                 remove_rule = true;
4687         } else {
4688                 /* a ref_cnt of 1 indicates the vsi_list is only used
4689                  * by one rule. However, the original removal request is only
4690                  * for a single VSI. Update the vsi_list first, and only
4691                  * remove the rule if there are no further VSIs in this list.
4692                  */
4693                 vsi_handle = f_entry->fltr_info.vsi_handle;
4694                 status = ice_rem_update_vsi_list(hw, vsi_handle, list_elem);
4695                 if (status)
4696                         goto exit;
4697                 /* if VSI count goes to zero after updating the VSI list */
4698                 if (list_elem->vsi_count == 0)
4699                         remove_rule = true;
4700         }
4701
4702         if (remove_rule) {
4703                 /* Remove the lookup rule */
4704                 struct ice_aqc_sw_rules_elem *s_rule;
4705
4706                 s_rule = (struct ice_aqc_sw_rules_elem *)
4707                         ice_malloc(hw, ICE_SW_RULE_RX_TX_NO_HDR_SIZE);
4708                 if (!s_rule) {
4709                         status = ICE_ERR_NO_MEMORY;
4710                         goto exit;
4711                 }
4712
4713                 ice_fill_sw_rule(hw, &list_elem->fltr_info, s_rule,
4714                                  ice_aqc_opc_remove_sw_rules);
4715
4716                 status = ice_aq_sw_rules(hw, s_rule,
4717                                          ICE_SW_RULE_RX_TX_NO_HDR_SIZE, 1,
4718                                          ice_aqc_opc_remove_sw_rules, NULL);
4719
4720                 /* Remove a book keeping from the list */
4721                 ice_free(hw, s_rule);
4722
4723                 if (status)
4724                         goto exit;
4725
4726                 LIST_DEL(&list_elem->list_entry);
4727                 ice_free(hw, list_elem);
4728         }
4729 exit:
4730         ice_release_lock(rule_lock);
4731         return status;
4732 }
4733
4734 /**
4735  * ice_aq_get_res_alloc - get allocated resources
4736  * @hw: pointer to the HW struct
4737  * @num_entries: pointer to u16 to store the number of resource entries returned
4738  * @buf: pointer to buffer
4739  * @buf_size: size of buf
4740  * @cd: pointer to command details structure or NULL
4741  *
4742  * The caller-supplied buffer must be large enough to store the resource
4743  * information for all resource types. Each resource type is an
4744  * ice_aqc_get_res_resp_elem structure.
4745  */
4746 enum ice_status
4747 ice_aq_get_res_alloc(struct ice_hw *hw, u16 *num_entries,
4748                      struct ice_aqc_get_res_resp_elem *buf, u16 buf_size,
4749                      struct ice_sq_cd *cd)
4750 {
4751         struct ice_aqc_get_res_alloc *resp;
4752         enum ice_status status;
4753         struct ice_aq_desc desc;
4754
4755         if (!buf)
4756                 return ICE_ERR_BAD_PTR;
4757
4758         if (buf_size < ICE_AQ_GET_RES_ALLOC_BUF_LEN)
4759                 return ICE_ERR_INVAL_SIZE;
4760
4761         resp = &desc.params.get_res;
4762
4763         ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_res_alloc);
4764         status = ice_aq_send_cmd(hw, &desc, buf, buf_size, cd);
4765
4766         if (!status && num_entries)
4767                 *num_entries = LE16_TO_CPU(resp->resp_elem_num);
4768
4769         return status;
4770 }
4771
4772 /**
4773  * ice_aq_get_res_descs - get allocated resource descriptors
4774  * @hw: pointer to the hardware structure
4775  * @num_entries: number of resource entries in buffer
4776  * @buf: structure to hold response data buffer
4777  * @buf_size: size of buffer
4778  * @res_type: resource type
4779  * @res_shared: is resource shared
4780  * @desc_id: input - first desc ID to start; output - next desc ID
4781  * @cd: pointer to command details structure or NULL
4782  */
4783 enum ice_status
4784 ice_aq_get_res_descs(struct ice_hw *hw, u16 num_entries,
4785                      struct ice_aqc_res_elem *buf, u16 buf_size, u16 res_type,
4786                      bool res_shared, u16 *desc_id, struct ice_sq_cd *cd)
4787 {
4788         struct ice_aqc_get_allocd_res_desc *cmd;
4789         struct ice_aq_desc desc;
4790         enum ice_status status;
4791
4792         ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__);
4793
4794         cmd = &desc.params.get_res_desc;
4795
4796         if (!buf)
4797                 return ICE_ERR_PARAM;
4798
4799         if (buf_size != (num_entries * sizeof(*buf)))
4800                 return ICE_ERR_PARAM;
4801
4802         ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_allocd_res_desc);
4803
4804         cmd->ops.cmd.res = CPU_TO_LE16(((res_type << ICE_AQC_RES_TYPE_S) &
4805                                          ICE_AQC_RES_TYPE_M) | (res_shared ?
4806                                         ICE_AQC_RES_TYPE_FLAG_SHARED : 0));
4807         cmd->ops.cmd.first_desc = CPU_TO_LE16(*desc_id);
4808
4809         status = ice_aq_send_cmd(hw, &desc, buf, buf_size, cd);
4810         if (!status)
4811                 *desc_id = LE16_TO_CPU(cmd->ops.resp.next_desc);
4812
4813         return status;
4814 }
4815
4816 /**
4817  * ice_add_mac_rule - Add a MAC address based filter rule
4818  * @hw: pointer to the hardware structure
4819  * @m_list: list of MAC addresses and forwarding information
4820  * @sw: pointer to switch info struct for which function add rule
4821  * @lport: logic port number on which function add rule
4822  *
4823  * IMPORTANT: When the umac_shared flag is set to false and m_list has
4824  * multiple unicast addresses, the function assumes that all the
4825  * addresses are unique in a given add_mac call. It doesn't
4826  * check for duplicates in this case, removing duplicates from a given
4827  * list should be taken care of in the caller of this function.
4828  */
4829 static enum ice_status
4830 ice_add_mac_rule(struct ice_hw *hw, struct LIST_HEAD_TYPE *m_list,
4831                  struct ice_switch_info *sw, u8 lport)
4832 {
4833         struct ice_sw_recipe *recp_list = &sw->recp_list[ICE_SW_LKUP_MAC];
4834         struct ice_aqc_sw_rules_elem *s_rule, *r_iter;
4835         struct ice_fltr_list_entry *m_list_itr;
4836         struct LIST_HEAD_TYPE *rule_head;
4837         u16 total_elem_left, s_rule_size;
4838         struct ice_lock *rule_lock; /* Lock to protect filter rule list */
4839         enum ice_status status = ICE_SUCCESS;
4840         u16 num_unicast = 0;
4841         u8 elem_sent;
4842
4843         s_rule = NULL;
4844         rule_lock = &recp_list->filt_rule_lock;
4845         rule_head = &recp_list->filt_rules;
4846
4847         LIST_FOR_EACH_ENTRY(m_list_itr, m_list, ice_fltr_list_entry,
4848                             list_entry) {
4849                 u8 *add = &m_list_itr->fltr_info.l_data.mac.mac_addr[0];
4850                 u16 vsi_handle;
4851                 u16 hw_vsi_id;
4852
4853                 m_list_itr->fltr_info.flag = ICE_FLTR_TX;
4854                 vsi_handle = m_list_itr->fltr_info.vsi_handle;
4855                 if (!ice_is_vsi_valid(hw, vsi_handle))
4856                         return ICE_ERR_PARAM;
4857                 hw_vsi_id = ice_get_hw_vsi_num(hw, vsi_handle);
4858                 m_list_itr->fltr_info.fwd_id.hw_vsi_id = hw_vsi_id;
4859                 /* update the src in case it is VSI num */
4860                 if (m_list_itr->fltr_info.src_id != ICE_SRC_ID_VSI)
4861                         return ICE_ERR_PARAM;
4862                 m_list_itr->fltr_info.src = hw_vsi_id;
4863                 if (m_list_itr->fltr_info.lkup_type != ICE_SW_LKUP_MAC ||
4864                     IS_ZERO_ETHER_ADDR(add))
4865                         return ICE_ERR_PARAM;
4866                 if (IS_UNICAST_ETHER_ADDR(add) && !hw->umac_shared) {
4867                         /* Don't overwrite the unicast address */
4868                         ice_acquire_lock(rule_lock);
4869                         if (ice_find_rule_entry(rule_head,
4870                                                 &m_list_itr->fltr_info)) {
4871                                 ice_release_lock(rule_lock);
4872                                 continue;
4873                         }
4874                         ice_release_lock(rule_lock);
4875                         num_unicast++;
4876                 } else if (IS_MULTICAST_ETHER_ADDR(add) ||
4877                            (IS_UNICAST_ETHER_ADDR(add) && hw->umac_shared)) {
4878                         m_list_itr->status =
4879                                 ice_add_rule_internal(hw, recp_list, lport,
4880                                                       m_list_itr);
4881                         if (m_list_itr->status)
4882                                 return m_list_itr->status;
4883                 }
4884         }
4885
4886         ice_acquire_lock(rule_lock);
4887         /* Exit if no suitable entries were found for adding bulk switch rule */
4888         if (!num_unicast) {
4889                 status = ICE_SUCCESS;
4890                 goto ice_add_mac_exit;
4891         }
4892
4893         /* Allocate switch rule buffer for the bulk update for unicast */
4894         s_rule_size = ICE_SW_RULE_RX_TX_ETH_HDR_SIZE;
4895         s_rule = (struct ice_aqc_sw_rules_elem *)
4896                 ice_calloc(hw, num_unicast, s_rule_size);
4897         if (!s_rule) {
4898                 status = ICE_ERR_NO_MEMORY;
4899                 goto ice_add_mac_exit;
4900         }
4901
4902         r_iter = s_rule;
4903         LIST_FOR_EACH_ENTRY(m_list_itr, m_list, ice_fltr_list_entry,
4904                             list_entry) {
4905                 struct ice_fltr_info *f_info = &m_list_itr->fltr_info;
4906                 u8 *mac_addr = &f_info->l_data.mac.mac_addr[0];
4907
4908                 if (IS_UNICAST_ETHER_ADDR(mac_addr)) {
4909                         ice_fill_sw_rule(hw, &m_list_itr->fltr_info, r_iter,
4910                                          ice_aqc_opc_add_sw_rules);
4911                         r_iter = (struct ice_aqc_sw_rules_elem *)
4912                                 ((u8 *)r_iter + s_rule_size);
4913                 }
4914         }
4915
4916         /* Call AQ bulk switch rule update for all unicast addresses */
4917         r_iter = s_rule;
4918         /* Call AQ switch rule in AQ_MAX chunk */
4919         for (total_elem_left = num_unicast; total_elem_left > 0;
4920              total_elem_left -= elem_sent) {
4921                 struct ice_aqc_sw_rules_elem *entry = r_iter;
4922
4923                 elem_sent = MIN_T(u8, total_elem_left,
4924                                   (ICE_AQ_MAX_BUF_LEN / s_rule_size));
4925                 status = ice_aq_sw_rules(hw, entry, elem_sent * s_rule_size,
4926                                          elem_sent, ice_aqc_opc_add_sw_rules,
4927                                          NULL);
4928                 if (status)
4929                         goto ice_add_mac_exit;
4930                 r_iter = (struct ice_aqc_sw_rules_elem *)
4931                         ((u8 *)r_iter + (elem_sent * s_rule_size));
4932         }
4933
4934         /* Fill up rule ID based on the value returned from FW */
4935         r_iter = s_rule;
4936         LIST_FOR_EACH_ENTRY(m_list_itr, m_list, ice_fltr_list_entry,
4937                             list_entry) {
4938                 struct ice_fltr_info *f_info = &m_list_itr->fltr_info;
4939                 u8 *mac_addr = &f_info->l_data.mac.mac_addr[0];
4940                 struct ice_fltr_mgmt_list_entry *fm_entry;
4941
4942                 if (IS_UNICAST_ETHER_ADDR(mac_addr)) {
4943                         f_info->fltr_rule_id =
4944                                 LE16_TO_CPU(r_iter->pdata.lkup_tx_rx.index);
4945                         f_info->fltr_act = ICE_FWD_TO_VSI;
4946                         /* Create an entry to track this MAC address */
4947                         fm_entry = (struct ice_fltr_mgmt_list_entry *)
4948                                 ice_malloc(hw, sizeof(*fm_entry));
4949                         if (!fm_entry) {
4950                                 status = ICE_ERR_NO_MEMORY;
4951                                 goto ice_add_mac_exit;
4952                         }
4953                         fm_entry->fltr_info = *f_info;
4954                         fm_entry->vsi_count = 1;
4955                         /* The book keeping entries will get removed when
4956                          * base driver calls remove filter AQ command
4957                          */
4958
4959                         LIST_ADD(&fm_entry->list_entry, rule_head);
4960                         r_iter = (struct ice_aqc_sw_rules_elem *)
4961                                 ((u8 *)r_iter + s_rule_size);
4962                 }
4963         }
4964
4965 ice_add_mac_exit:
4966         ice_release_lock(rule_lock);
4967         if (s_rule)
4968                 ice_free(hw, s_rule);
4969         return status;
4970 }
4971
4972 /**
4973  * ice_add_mac - Add a MAC address based filter rule
4974  * @hw: pointer to the hardware structure
4975  * @m_list: list of MAC addresses and forwarding information
4976  *
4977  * Function add MAC rule for logical port from HW struct
4978  */
4979 enum ice_status ice_add_mac(struct ice_hw *hw, struct LIST_HEAD_TYPE *m_list)
4980 {
4981         if (!m_list || !hw)
4982                 return ICE_ERR_PARAM;
4983
4984         return ice_add_mac_rule(hw, m_list, hw->switch_info,
4985                                 hw->port_info->lport);
4986 }
4987
4988 /**
4989  * ice_add_vlan_internal - Add one VLAN based filter rule
4990  * @hw: pointer to the hardware structure
4991  * @recp_list: recipe list for which rule has to be added
4992  * @f_entry: filter entry containing one VLAN information
4993  */
4994 static enum ice_status
4995 ice_add_vlan_internal(struct ice_hw *hw, struct ice_sw_recipe *recp_list,
4996                       struct ice_fltr_list_entry *f_entry)
4997 {
4998         struct ice_fltr_mgmt_list_entry *v_list_itr;
4999         struct ice_fltr_info *new_fltr, *cur_fltr;
5000         enum ice_sw_lkup_type lkup_type;
5001         u16 vsi_list_id = 0, vsi_handle;
5002         struct ice_lock *rule_lock; /* Lock to protect filter rule list */
5003         enum ice_status status = ICE_SUCCESS;
5004
5005         if (!ice_is_vsi_valid(hw, f_entry->fltr_info.vsi_handle))
5006                 return ICE_ERR_PARAM;
5007
5008         f_entry->fltr_info.fwd_id.hw_vsi_id =
5009                 ice_get_hw_vsi_num(hw, f_entry->fltr_info.vsi_handle);
5010         new_fltr = &f_entry->fltr_info;
5011
5012         /* VLAN ID should only be 12 bits */
5013         if (new_fltr->l_data.vlan.vlan_id > ICE_MAX_VLAN_ID)
5014                 return ICE_ERR_PARAM;
5015
5016         if (new_fltr->src_id != ICE_SRC_ID_VSI)
5017                 return ICE_ERR_PARAM;
5018
5019         new_fltr->src = new_fltr->fwd_id.hw_vsi_id;
5020         lkup_type = new_fltr->lkup_type;
5021         vsi_handle = new_fltr->vsi_handle;
5022         rule_lock = &recp_list->filt_rule_lock;
5023         ice_acquire_lock(rule_lock);
5024         v_list_itr = ice_find_rule_entry(&recp_list->filt_rules, new_fltr);
5025         if (!v_list_itr) {
5026                 struct ice_vsi_list_map_info *map_info = NULL;
5027
5028                 if (new_fltr->fltr_act == ICE_FWD_TO_VSI) {
5029                         /* All VLAN pruning rules use a VSI list. Check if
5030                          * there is already a VSI list containing VSI that we
5031                          * want to add. If found, use the same vsi_list_id for
5032                          * this new VLAN rule or else create a new list.
5033                          */
5034                         map_info = ice_find_vsi_list_entry(recp_list,
5035                                                            vsi_handle,
5036                                                            &vsi_list_id);
5037                         if (!map_info) {
5038                                 status = ice_create_vsi_list_rule(hw,
5039                                                                   &vsi_handle,
5040                                                                   1,
5041                                                                   &vsi_list_id,
5042                                                                   lkup_type);
5043                                 if (status)
5044                                         goto exit;
5045                         }
5046                         /* Convert the action to forwarding to a VSI list. */
5047                         new_fltr->fltr_act = ICE_FWD_TO_VSI_LIST;
5048                         new_fltr->fwd_id.vsi_list_id = vsi_list_id;
5049                 }
5050
5051                 status = ice_create_pkt_fwd_rule(hw, recp_list, f_entry);
5052                 if (!status) {
5053                         v_list_itr = ice_find_rule_entry(&recp_list->filt_rules,
5054                                                          new_fltr);
5055                         if (!v_list_itr) {
5056                                 status = ICE_ERR_DOES_NOT_EXIST;
5057                                 goto exit;
5058                         }
5059                         /* reuse VSI list for new rule and increment ref_cnt */
5060                         if (map_info) {
5061                                 v_list_itr->vsi_list_info = map_info;
5062                                 map_info->ref_cnt++;
5063                         } else {
5064                                 v_list_itr->vsi_list_info =
5065                                         ice_create_vsi_list_map(hw, &vsi_handle,
5066                                                                 1, vsi_list_id);
5067                         }
5068                 }
5069         } else if (v_list_itr->vsi_list_info->ref_cnt == 1) {
5070                 /* Update existing VSI list to add new VSI ID only if it used
5071                  * by one VLAN rule.
5072                  */
5073                 cur_fltr = &v_list_itr->fltr_info;
5074                 status = ice_add_update_vsi_list(hw, v_list_itr, cur_fltr,
5075                                                  new_fltr);
5076         } else {
5077                 /* If VLAN rule exists and VSI list being used by this rule is
5078                  * referenced by more than 1 VLAN rule. Then create a new VSI
5079                  * list appending previous VSI with new VSI and update existing
5080                  * VLAN rule to point to new VSI list ID
5081                  */
5082                 struct ice_fltr_info tmp_fltr;
5083                 u16 vsi_handle_arr[2];
5084                 u16 cur_handle;
5085
5086                 /* Current implementation only supports reusing VSI list with
5087                  * one VSI count. We should never hit below condition
5088                  */
5089                 if (v_list_itr->vsi_count > 1 &&
5090                     v_list_itr->vsi_list_info->ref_cnt > 1) {
5091                         ice_debug(hw, ICE_DBG_SW, "Invalid configuration: Optimization to reuse VSI list with more than one VSI is not being done yet\n");
5092                         status = ICE_ERR_CFG;
5093                         goto exit;
5094                 }
5095
5096                 cur_handle =
5097                         ice_find_first_bit(v_list_itr->vsi_list_info->vsi_map,
5098                                            ICE_MAX_VSI);
5099
5100                 /* A rule already exists with the new VSI being added */
5101                 if (cur_handle == vsi_handle) {
5102                         status = ICE_ERR_ALREADY_EXISTS;
5103                         goto exit;
5104                 }
5105
5106                 vsi_handle_arr[0] = cur_handle;
5107                 vsi_handle_arr[1] = vsi_handle;
5108                 status = ice_create_vsi_list_rule(hw, &vsi_handle_arr[0], 2,
5109                                                   &vsi_list_id, lkup_type);
5110                 if (status)
5111                         goto exit;
5112
5113                 tmp_fltr = v_list_itr->fltr_info;
5114                 tmp_fltr.fltr_rule_id = v_list_itr->fltr_info.fltr_rule_id;
5115                 tmp_fltr.fwd_id.vsi_list_id = vsi_list_id;
5116                 tmp_fltr.fltr_act = ICE_FWD_TO_VSI_LIST;
5117                 /* Update the previous switch rule to a new VSI list which
5118                  * includes current VSI that is requested
5119                  */
5120                 status = ice_update_pkt_fwd_rule(hw, &tmp_fltr);
5121                 if (status)
5122                         goto exit;
5123
5124                 /* before overriding VSI list map info. decrement ref_cnt of
5125                  * previous VSI list
5126                  */
5127                 v_list_itr->vsi_list_info->ref_cnt--;
5128
5129                 /* now update to newly created list */
5130                 v_list_itr->fltr_info.fwd_id.vsi_list_id = vsi_list_id;
5131                 v_list_itr->vsi_list_info =
5132                         ice_create_vsi_list_map(hw, &vsi_handle_arr[0], 2,
5133                                                 vsi_list_id);
5134                 v_list_itr->vsi_count++;
5135         }
5136
5137 exit:
5138         ice_release_lock(rule_lock);
5139         return status;
5140 }
5141
5142 /**
5143  * ice_add_vlan_rule - Add VLAN based filter rule
5144  * @hw: pointer to the hardware structure
5145  * @v_list: list of VLAN entries and forwarding information
5146  * @sw: pointer to switch info struct for which function add rule
5147  */
5148 static enum ice_status
5149 ice_add_vlan_rule(struct ice_hw *hw, struct LIST_HEAD_TYPE *v_list,
5150                   struct ice_switch_info *sw)
5151 {
5152         struct ice_fltr_list_entry *v_list_itr;
5153         struct ice_sw_recipe *recp_list;
5154
5155         recp_list = &sw->recp_list[ICE_SW_LKUP_VLAN];
5156         LIST_FOR_EACH_ENTRY(v_list_itr, v_list, ice_fltr_list_entry,
5157                             list_entry) {
5158                 if (v_list_itr->fltr_info.lkup_type != ICE_SW_LKUP_VLAN)
5159                         return ICE_ERR_PARAM;
5160                 v_list_itr->fltr_info.flag = ICE_FLTR_TX;
5161                 v_list_itr->status = ice_add_vlan_internal(hw, recp_list,
5162                                                            v_list_itr);
5163                 if (v_list_itr->status)
5164                         return v_list_itr->status;
5165         }
5166         return ICE_SUCCESS;
5167 }
5168
5169 /**
5170  * ice_add_vlan - Add a VLAN based filter rule
5171  * @hw: pointer to the hardware structure
5172  * @v_list: list of VLAN and forwarding information
5173  *
5174  * Function add VLAN rule for logical port from HW struct
5175  */
5176 enum ice_status ice_add_vlan(struct ice_hw *hw, struct LIST_HEAD_TYPE *v_list)
5177 {
5178         if (!v_list || !hw)
5179                 return ICE_ERR_PARAM;
5180
5181         return ice_add_vlan_rule(hw, v_list, hw->switch_info);
5182 }
5183
5184 /**
5185  * ice_add_mac_vlan_rule - Add MAC and VLAN pair based filter rule
5186  * @hw: pointer to the hardware structure
5187  * @mv_list: list of MAC and VLAN filters
5188  * @sw: pointer to switch info struct for which function add rule
5189  * @lport: logic port number on which function add rule
5190  *
5191  * If the VSI on which the MAC-VLAN pair has to be added has Rx and Tx VLAN
5192  * pruning bits enabled, then it is the responsibility of the caller to make
5193  * sure to add a VLAN only filter on the same VSI. Packets belonging to that
5194  * VLAN won't be received on that VSI otherwise.
5195  */
5196 static enum ice_status
5197 ice_add_mac_vlan_rule(struct ice_hw *hw, struct LIST_HEAD_TYPE *mv_list,
5198                       struct ice_switch_info *sw, u8 lport)
5199 {
5200         struct ice_fltr_list_entry *mv_list_itr;
5201         struct ice_sw_recipe *recp_list;
5202
5203         if (!mv_list || !hw)
5204                 return ICE_ERR_PARAM;
5205
5206         recp_list = &sw->recp_list[ICE_SW_LKUP_MAC_VLAN];
5207         LIST_FOR_EACH_ENTRY(mv_list_itr, mv_list, ice_fltr_list_entry,
5208                             list_entry) {
5209                 enum ice_sw_lkup_type l_type =
5210                         mv_list_itr->fltr_info.lkup_type;
5211
5212                 if (l_type != ICE_SW_LKUP_MAC_VLAN)
5213                         return ICE_ERR_PARAM;
5214                 mv_list_itr->fltr_info.flag = ICE_FLTR_TX;
5215                 mv_list_itr->status =
5216                         ice_add_rule_internal(hw, recp_list, lport,
5217                                               mv_list_itr);
5218                 if (mv_list_itr->status)
5219                         return mv_list_itr->status;
5220         }
5221         return ICE_SUCCESS;
5222 }
5223
5224 /**
5225  * ice_add_mac_vlan - Add a MAC VLAN address based filter rule
5226  * @hw: pointer to the hardware structure
5227  * @mv_list: list of MAC VLAN addresses and forwarding information
5228  *
5229  * Function add MAC VLAN rule for logical port from HW struct
5230  */
5231 enum ice_status
5232 ice_add_mac_vlan(struct ice_hw *hw, struct LIST_HEAD_TYPE *mv_list)
5233 {
5234         if (!mv_list || !hw)
5235                 return ICE_ERR_PARAM;
5236
5237         return ice_add_mac_vlan_rule(hw, mv_list, hw->switch_info,
5238                                      hw->port_info->lport);
5239 }
5240
5241 /**
5242  * ice_add_eth_mac_rule - Add ethertype and MAC based filter rule
5243  * @hw: pointer to the hardware structure
5244  * @em_list: list of ether type MAC filter, MAC is optional
5245  * @sw: pointer to switch info struct for which function add rule
5246  * @lport: logic port number on which function add rule
5247  *
5248  * This function requires the caller to populate the entries in
5249  * the filter list with the necessary fields (including flags to
5250  * indicate Tx or Rx rules).
5251  */
5252 static enum ice_status
5253 ice_add_eth_mac_rule(struct ice_hw *hw, struct LIST_HEAD_TYPE *em_list,
5254                      struct ice_switch_info *sw, u8 lport)
5255 {
5256         struct ice_fltr_list_entry *em_list_itr;
5257
5258         LIST_FOR_EACH_ENTRY(em_list_itr, em_list, ice_fltr_list_entry,
5259                             list_entry) {
5260                 struct ice_sw_recipe *recp_list;
5261                 enum ice_sw_lkup_type l_type;
5262
5263                 l_type = em_list_itr->fltr_info.lkup_type;
5264                 recp_list = &sw->recp_list[l_type];
5265
5266                 if (l_type != ICE_SW_LKUP_ETHERTYPE_MAC &&
5267                     l_type != ICE_SW_LKUP_ETHERTYPE)
5268                         return ICE_ERR_PARAM;
5269
5270                 em_list_itr->status = ice_add_rule_internal(hw, recp_list,
5271                                                             lport,
5272                                                             em_list_itr);
5273                 if (em_list_itr->status)
5274                         return em_list_itr->status;
5275         }
5276         return ICE_SUCCESS;
5277 }
5278
5279 /**
5280  * ice_add_eth_mac - Add a ethertype based filter rule
5281  * @hw: pointer to the hardware structure
5282  * @em_list: list of ethertype and forwarding information
5283  *
5284  * Function add ethertype rule for logical port from HW struct
5285  */
5286 enum ice_status
5287 ice_add_eth_mac(struct ice_hw *hw, struct LIST_HEAD_TYPE *em_list)
5288 {
5289         if (!em_list || !hw)
5290                 return ICE_ERR_PARAM;
5291
5292         return ice_add_eth_mac_rule(hw, em_list, hw->switch_info,
5293                                     hw->port_info->lport);
5294 }
5295
5296 /**
5297  * ice_remove_eth_mac_rule - Remove an ethertype (or MAC) based filter rule
5298  * @hw: pointer to the hardware structure
5299  * @em_list: list of ethertype or ethertype MAC entries
5300  * @sw: pointer to switch info struct for which function add rule
5301  */
5302 static enum ice_status
5303 ice_remove_eth_mac_rule(struct ice_hw *hw, struct LIST_HEAD_TYPE *em_list,
5304                         struct ice_switch_info *sw)
5305 {
5306         struct ice_fltr_list_entry *em_list_itr, *tmp;
5307
5308         LIST_FOR_EACH_ENTRY_SAFE(em_list_itr, tmp, em_list, ice_fltr_list_entry,
5309                                  list_entry) {
5310                 struct ice_sw_recipe *recp_list;
5311                 enum ice_sw_lkup_type l_type;
5312
5313                 l_type = em_list_itr->fltr_info.lkup_type;
5314
5315                 if (l_type != ICE_SW_LKUP_ETHERTYPE_MAC &&
5316                     l_type != ICE_SW_LKUP_ETHERTYPE)
5317                         return ICE_ERR_PARAM;
5318
5319                 recp_list = &sw->recp_list[l_type];
5320                 em_list_itr->status = ice_remove_rule_internal(hw, recp_list,
5321                                                                em_list_itr);
5322                 if (em_list_itr->status)
5323                         return em_list_itr->status;
5324         }
5325         return ICE_SUCCESS;
5326 }
5327
5328 /**
5329  * ice_remove_eth_mac - remove a ethertype based filter rule
5330  * @hw: pointer to the hardware structure
5331  * @em_list: list of ethertype and forwarding information
5332  *
5333  */
5334 enum ice_status
5335 ice_remove_eth_mac(struct ice_hw *hw, struct LIST_HEAD_TYPE *em_list)
5336 {
5337         if (!em_list || !hw)
5338                 return ICE_ERR_PARAM;
5339
5340         return ice_remove_eth_mac_rule(hw, em_list, hw->switch_info);
5341 }
5342
5343 /**
5344  * ice_rem_sw_rule_info
5345  * @hw: pointer to the hardware structure
5346  * @rule_head: pointer to the switch list structure that we want to delete
5347  */
5348 static void
5349 ice_rem_sw_rule_info(struct ice_hw *hw, struct LIST_HEAD_TYPE *rule_head)
5350 {
5351         if (!LIST_EMPTY(rule_head)) {
5352                 struct ice_fltr_mgmt_list_entry *entry;
5353                 struct ice_fltr_mgmt_list_entry *tmp;
5354
5355                 LIST_FOR_EACH_ENTRY_SAFE(entry, tmp, rule_head,
5356                                          ice_fltr_mgmt_list_entry, list_entry) {
5357                         LIST_DEL(&entry->list_entry);
5358                         ice_free(hw, entry);
5359                 }
5360         }
5361 }
5362
5363 /**
5364  * ice_rem_adv_rule_info
5365  * @hw: pointer to the hardware structure
5366  * @rule_head: pointer to the switch list structure that we want to delete
5367  */
5368 static void
5369 ice_rem_adv_rule_info(struct ice_hw *hw, struct LIST_HEAD_TYPE *rule_head)
5370 {
5371         struct ice_adv_fltr_mgmt_list_entry *tmp_entry;
5372         struct ice_adv_fltr_mgmt_list_entry *lst_itr;
5373
5374         if (LIST_EMPTY(rule_head))
5375                 return;
5376
5377         LIST_FOR_EACH_ENTRY_SAFE(lst_itr, tmp_entry, rule_head,
5378                                  ice_adv_fltr_mgmt_list_entry, list_entry) {
5379                 LIST_DEL(&lst_itr->list_entry);
5380                 ice_free(hw, lst_itr->lkups);
5381                 ice_free(hw, lst_itr);
5382         }
5383 }
5384
5385 /**
5386  * ice_rem_all_sw_rules_info
5387  * @hw: pointer to the hardware structure
5388  */
5389 void ice_rem_all_sw_rules_info(struct ice_hw *hw)
5390 {
5391         struct ice_switch_info *sw = hw->switch_info;
5392         u8 i;
5393
5394         for (i = 0; i < ICE_MAX_NUM_RECIPES; i++) {
5395                 struct LIST_HEAD_TYPE *rule_head;
5396
5397                 rule_head = &sw->recp_list[i].filt_rules;
5398                 if (!sw->recp_list[i].adv_rule)
5399                         ice_rem_sw_rule_info(hw, rule_head);
5400                 else
5401                         ice_rem_adv_rule_info(hw, rule_head);
5402                 if (sw->recp_list[i].adv_rule &&
5403                     LIST_EMPTY(&sw->recp_list[i].filt_rules))
5404                         sw->recp_list[i].adv_rule = false;
5405         }
5406 }
5407
5408 /**
5409  * ice_cfg_dflt_vsi - change state of VSI to set/clear default
5410  * @pi: pointer to the port_info structure
5411  * @vsi_handle: VSI handle to set as default
5412  * @set: true to add the above mentioned switch rule, false to remove it
5413  * @direction: ICE_FLTR_RX or ICE_FLTR_TX
5414  *
5415  * add filter rule to set/unset given VSI as default VSI for the switch
5416  * (represented by swid)
5417  */
5418 enum ice_status
5419 ice_cfg_dflt_vsi(struct ice_port_info *pi, u16 vsi_handle, bool set,
5420                  u8 direction)
5421 {
5422         struct ice_aqc_sw_rules_elem *s_rule;
5423         struct ice_fltr_info f_info;
5424         struct ice_hw *hw = pi->hw;
5425         enum ice_adminq_opc opcode;
5426         enum ice_status status;
5427         u16 s_rule_size;
5428         u16 hw_vsi_id;
5429
5430         if (!ice_is_vsi_valid(hw, vsi_handle))
5431                 return ICE_ERR_PARAM;
5432         hw_vsi_id = ice_get_hw_vsi_num(hw, vsi_handle);
5433
5434         s_rule_size = set ? ICE_SW_RULE_RX_TX_ETH_HDR_SIZE :
5435                 ICE_SW_RULE_RX_TX_NO_HDR_SIZE;
5436
5437         s_rule = (struct ice_aqc_sw_rules_elem *)ice_malloc(hw, s_rule_size);
5438         if (!s_rule)
5439                 return ICE_ERR_NO_MEMORY;
5440
5441         ice_memset(&f_info, 0, sizeof(f_info), ICE_NONDMA_MEM);
5442
5443         f_info.lkup_type = ICE_SW_LKUP_DFLT;
5444         f_info.flag = direction;
5445         f_info.fltr_act = ICE_FWD_TO_VSI;
5446         f_info.fwd_id.hw_vsi_id = hw_vsi_id;
5447
5448         if (f_info.flag & ICE_FLTR_RX) {
5449                 f_info.src = pi->lport;
5450                 f_info.src_id = ICE_SRC_ID_LPORT;
5451                 if (!set)
5452                         f_info.fltr_rule_id =
5453                                 pi->dflt_rx_vsi_rule_id;
5454         } else if (f_info.flag & ICE_FLTR_TX) {
5455                 f_info.src_id = ICE_SRC_ID_VSI;
5456                 f_info.src = hw_vsi_id;
5457                 if (!set)
5458                         f_info.fltr_rule_id =
5459                                 pi->dflt_tx_vsi_rule_id;
5460         }
5461
5462         if (set)
5463                 opcode = ice_aqc_opc_add_sw_rules;
5464         else
5465                 opcode = ice_aqc_opc_remove_sw_rules;
5466
5467         ice_fill_sw_rule(hw, &f_info, s_rule, opcode);
5468
5469         status = ice_aq_sw_rules(hw, s_rule, s_rule_size, 1, opcode, NULL);
5470         if (status || !(f_info.flag & ICE_FLTR_TX_RX))
5471                 goto out;
5472         if (set) {
5473                 u16 index = LE16_TO_CPU(s_rule->pdata.lkup_tx_rx.index);
5474
5475                 if (f_info.flag & ICE_FLTR_TX) {
5476                         pi->dflt_tx_vsi_num = hw_vsi_id;
5477                         pi->dflt_tx_vsi_rule_id = index;
5478                 } else if (f_info.flag & ICE_FLTR_RX) {
5479                         pi->dflt_rx_vsi_num = hw_vsi_id;
5480                         pi->dflt_rx_vsi_rule_id = index;
5481                 }
5482         } else {
5483                 if (f_info.flag & ICE_FLTR_TX) {
5484                         pi->dflt_tx_vsi_num = ICE_DFLT_VSI_INVAL;
5485                         pi->dflt_tx_vsi_rule_id = ICE_INVAL_ACT;
5486                 } else if (f_info.flag & ICE_FLTR_RX) {
5487                         pi->dflt_rx_vsi_num = ICE_DFLT_VSI_INVAL;
5488                         pi->dflt_rx_vsi_rule_id = ICE_INVAL_ACT;
5489                 }
5490         }
5491
5492 out:
5493         ice_free(hw, s_rule);
5494         return status;
5495 }
5496
5497 /**
5498  * ice_find_ucast_rule_entry - Search for a unicast MAC filter rule entry
5499  * @list_head: head of rule list
5500  * @f_info: rule information
5501  *
5502  * Helper function to search for a unicast rule entry - this is to be used
5503  * to remove unicast MAC filter that is not shared with other VSIs on the
5504  * PF switch.
5505  *
5506  * Returns pointer to entry storing the rule if found
5507  */
5508 static struct ice_fltr_mgmt_list_entry *
5509 ice_find_ucast_rule_entry(struct LIST_HEAD_TYPE *list_head,
5510                           struct ice_fltr_info *f_info)
5511 {
5512         struct ice_fltr_mgmt_list_entry *list_itr;
5513
5514         LIST_FOR_EACH_ENTRY(list_itr, list_head, ice_fltr_mgmt_list_entry,
5515                             list_entry) {
5516                 if (!memcmp(&f_info->l_data, &list_itr->fltr_info.l_data,
5517                             sizeof(f_info->l_data)) &&
5518                     f_info->fwd_id.hw_vsi_id ==
5519                     list_itr->fltr_info.fwd_id.hw_vsi_id &&
5520                     f_info->flag == list_itr->fltr_info.flag)
5521                         return list_itr;
5522         }
5523         return NULL;
5524 }
5525
5526 /**
5527  * ice_remove_mac_rule - remove a MAC based filter rule
5528  * @hw: pointer to the hardware structure
5529  * @m_list: list of MAC addresses and forwarding information
5530  * @recp_list: list from which function remove MAC address
5531  *
5532  * This function removes either a MAC filter rule or a specific VSI from a
5533  * VSI list for a multicast MAC address.
5534  *
5535  * Returns ICE_ERR_DOES_NOT_EXIST if a given entry was not added by
5536  * ice_add_mac. Caller should be aware that this call will only work if all
5537  * the entries passed into m_list were added previously. It will not attempt to
5538  * do a partial remove of entries that were found.
5539  */
5540 static enum ice_status
5541 ice_remove_mac_rule(struct ice_hw *hw, struct LIST_HEAD_TYPE *m_list,
5542                     struct ice_sw_recipe *recp_list)
5543 {
5544         struct ice_fltr_list_entry *list_itr, *tmp;
5545         struct ice_lock *rule_lock; /* Lock to protect filter rule list */
5546
5547         if (!m_list)
5548                 return ICE_ERR_PARAM;
5549
5550         rule_lock = &recp_list->filt_rule_lock;
5551         LIST_FOR_EACH_ENTRY_SAFE(list_itr, tmp, m_list, ice_fltr_list_entry,
5552                                  list_entry) {
5553                 enum ice_sw_lkup_type l_type = list_itr->fltr_info.lkup_type;
5554                 u8 *add = &list_itr->fltr_info.l_data.mac.mac_addr[0];
5555                 u16 vsi_handle;
5556
5557                 if (l_type != ICE_SW_LKUP_MAC)
5558                         return ICE_ERR_PARAM;
5559
5560                 vsi_handle = list_itr->fltr_info.vsi_handle;
5561                 if (!ice_is_vsi_valid(hw, vsi_handle))
5562                         return ICE_ERR_PARAM;
5563
5564                 list_itr->fltr_info.fwd_id.hw_vsi_id =
5565                                         ice_get_hw_vsi_num(hw, vsi_handle);
5566                 if (IS_UNICAST_ETHER_ADDR(add) && !hw->umac_shared) {
5567                         /* Don't remove the unicast address that belongs to
5568                          * another VSI on the switch, since it is not being
5569                          * shared...
5570                          */
5571                         ice_acquire_lock(rule_lock);
5572                         if (!ice_find_ucast_rule_entry(&recp_list->filt_rules,
5573                                                        &list_itr->fltr_info)) {
5574                                 ice_release_lock(rule_lock);
5575                                 return ICE_ERR_DOES_NOT_EXIST;
5576                         }
5577                         ice_release_lock(rule_lock);
5578                 }
5579                 list_itr->status = ice_remove_rule_internal(hw, recp_list,
5580                                                             list_itr);
5581                 if (list_itr->status)
5582                         return list_itr->status;
5583         }
5584         return ICE_SUCCESS;
5585 }
5586
5587 /**
5588  * ice_remove_mac - remove a MAC address based filter rule
5589  * @hw: pointer to the hardware structure
5590  * @m_list: list of MAC addresses and forwarding information
5591  *
5592  */
5593 enum ice_status ice_remove_mac(struct ice_hw *hw, struct LIST_HEAD_TYPE *m_list)
5594 {
5595         struct ice_sw_recipe *recp_list;
5596
5597         recp_list = &hw->switch_info->recp_list[ICE_SW_LKUP_MAC];
5598         return ice_remove_mac_rule(hw, m_list, recp_list);
5599 }
5600
5601 /**
5602  * ice_remove_vlan_rule - Remove VLAN based filter rule
5603  * @hw: pointer to the hardware structure
5604  * @v_list: list of VLAN entries and forwarding information
5605  * @recp_list: list from which function remove VLAN
5606  */
5607 static enum ice_status
5608 ice_remove_vlan_rule(struct ice_hw *hw, struct LIST_HEAD_TYPE *v_list,
5609                      struct ice_sw_recipe *recp_list)
5610 {
5611         struct ice_fltr_list_entry *v_list_itr, *tmp;
5612
5613         LIST_FOR_EACH_ENTRY_SAFE(v_list_itr, tmp, v_list, ice_fltr_list_entry,
5614                                  list_entry) {
5615                 enum ice_sw_lkup_type l_type = v_list_itr->fltr_info.lkup_type;
5616
5617                 if (l_type != ICE_SW_LKUP_VLAN)
5618                         return ICE_ERR_PARAM;
5619                 v_list_itr->status = ice_remove_rule_internal(hw, recp_list,
5620                                                               v_list_itr);
5621                 if (v_list_itr->status)
5622                         return v_list_itr->status;
5623         }
5624         return ICE_SUCCESS;
5625 }
5626
5627 /**
5628  * ice_remove_vlan - remove a VLAN address based filter rule
5629  * @hw: pointer to the hardware structure
5630  * @v_list: list of VLAN and forwarding information
5631  *
5632  */
5633 enum ice_status
5634 ice_remove_vlan(struct ice_hw *hw, struct LIST_HEAD_TYPE *v_list)
5635 {
5636         struct ice_sw_recipe *recp_list;
5637
5638         if (!v_list || !hw)
5639                 return ICE_ERR_PARAM;
5640
5641         recp_list = &hw->switch_info->recp_list[ICE_SW_LKUP_VLAN];
5642         return ice_remove_vlan_rule(hw, v_list, recp_list);
5643 }
5644
5645 /**
5646  * ice_remove_mac_vlan_rule - Remove MAC VLAN based filter rule
5647  * @hw: pointer to the hardware structure
5648  * @v_list: list of MAC VLAN entries and forwarding information
5649  * @recp_list: list from which function remove MAC VLAN
5650  */
5651 static enum ice_status
5652 ice_remove_mac_vlan_rule(struct ice_hw *hw, struct LIST_HEAD_TYPE *v_list,
5653                          struct ice_sw_recipe *recp_list)
5654 {
5655         struct ice_fltr_list_entry *v_list_itr, *tmp;
5656
5657         recp_list = &hw->switch_info->recp_list[ICE_SW_LKUP_MAC_VLAN];
5658         LIST_FOR_EACH_ENTRY_SAFE(v_list_itr, tmp, v_list, ice_fltr_list_entry,
5659                                  list_entry) {
5660                 enum ice_sw_lkup_type l_type = v_list_itr->fltr_info.lkup_type;
5661
5662                 if (l_type != ICE_SW_LKUP_MAC_VLAN)
5663                         return ICE_ERR_PARAM;
5664                 v_list_itr->status =
5665                         ice_remove_rule_internal(hw, recp_list,
5666                                                  v_list_itr);
5667                 if (v_list_itr->status)
5668                         return v_list_itr->status;
5669         }
5670         return ICE_SUCCESS;
5671 }
5672
5673 /**
5674  * ice_remove_mac_vlan - remove a MAC VLAN address based filter rule
5675  * @hw: pointer to the hardware structure
5676  * @mv_list: list of MAC VLAN and forwarding information
5677  */
5678 enum ice_status
5679 ice_remove_mac_vlan(struct ice_hw *hw, struct LIST_HEAD_TYPE *mv_list)
5680 {
5681         struct ice_sw_recipe *recp_list;
5682
5683         if (!mv_list || !hw)
5684                 return ICE_ERR_PARAM;
5685
5686         recp_list = &hw->switch_info->recp_list[ICE_SW_LKUP_MAC_VLAN];
5687         return ice_remove_mac_vlan_rule(hw, mv_list, recp_list);
5688 }
5689
5690 /**
5691  * ice_vsi_uses_fltr - Determine if given VSI uses specified filter
5692  * @fm_entry: filter entry to inspect
5693  * @vsi_handle: VSI handle to compare with filter info
5694  */
5695 static bool
5696 ice_vsi_uses_fltr(struct ice_fltr_mgmt_list_entry *fm_entry, u16 vsi_handle)
5697 {
5698         return ((fm_entry->fltr_info.fltr_act == ICE_FWD_TO_VSI &&
5699                  fm_entry->fltr_info.vsi_handle == vsi_handle) ||
5700                 (fm_entry->fltr_info.fltr_act == ICE_FWD_TO_VSI_LIST &&
5701                  fm_entry->vsi_list_info &&
5702                  (ice_is_bit_set(fm_entry->vsi_list_info->vsi_map,
5703                                  vsi_handle))));
5704 }
5705
5706 /**
5707  * ice_add_entry_to_vsi_fltr_list - Add copy of fltr_list_entry to remove list
5708  * @hw: pointer to the hardware structure
5709  * @vsi_handle: VSI handle to remove filters from
5710  * @vsi_list_head: pointer to the list to add entry to
5711  * @fi: pointer to fltr_info of filter entry to copy & add
5712  *
5713  * Helper function, used when creating a list of filters to remove from
5714  * a specific VSI. The entry added to vsi_list_head is a COPY of the
5715  * original filter entry, with the exception of fltr_info.fltr_act and
5716  * fltr_info.fwd_id fields. These are set such that later logic can
5717  * extract which VSI to remove the fltr from, and pass on that information.
5718  */
5719 static enum ice_status
5720 ice_add_entry_to_vsi_fltr_list(struct ice_hw *hw, u16 vsi_handle,
5721                                struct LIST_HEAD_TYPE *vsi_list_head,
5722                                struct ice_fltr_info *fi)
5723 {
5724         struct ice_fltr_list_entry *tmp;
5725
5726         /* this memory is freed up in the caller function
5727          * once filters for this VSI are removed
5728          */
5729         tmp = (struct ice_fltr_list_entry *)ice_malloc(hw, sizeof(*tmp));
5730         if (!tmp)
5731                 return ICE_ERR_NO_MEMORY;
5732
5733         tmp->fltr_info = *fi;
5734
5735         /* Overwrite these fields to indicate which VSI to remove filter from,
5736          * so find and remove logic can extract the information from the
5737          * list entries. Note that original entries will still have proper
5738          * values.
5739          */
5740         tmp->fltr_info.fltr_act = ICE_FWD_TO_VSI;
5741         tmp->fltr_info.vsi_handle = vsi_handle;
5742         tmp->fltr_info.fwd_id.hw_vsi_id = ice_get_hw_vsi_num(hw, vsi_handle);
5743
5744         LIST_ADD(&tmp->list_entry, vsi_list_head);
5745
5746         return ICE_SUCCESS;
5747 }
5748
5749 /**
5750  * ice_add_to_vsi_fltr_list - Add VSI filters to the list
5751  * @hw: pointer to the hardware structure
5752  * @vsi_handle: VSI handle to remove filters from
5753  * @lkup_list_head: pointer to the list that has certain lookup type filters
5754  * @vsi_list_head: pointer to the list pertaining to VSI with vsi_handle
5755  *
5756  * Locates all filters in lkup_list_head that are used by the given VSI,
5757  * and adds COPIES of those entries to vsi_list_head (intended to be used
5758  * to remove the listed filters).
5759  * Note that this means all entries in vsi_list_head must be explicitly
5760  * deallocated by the caller when done with list.
5761  */
5762 static enum ice_status
5763 ice_add_to_vsi_fltr_list(struct ice_hw *hw, u16 vsi_handle,
5764                          struct LIST_HEAD_TYPE *lkup_list_head,
5765                          struct LIST_HEAD_TYPE *vsi_list_head)
5766 {
5767         struct ice_fltr_mgmt_list_entry *fm_entry;
5768         enum ice_status status = ICE_SUCCESS;
5769
5770         /* check to make sure VSI ID is valid and within boundary */
5771         if (!ice_is_vsi_valid(hw, vsi_handle))
5772                 return ICE_ERR_PARAM;
5773
5774         LIST_FOR_EACH_ENTRY(fm_entry, lkup_list_head,
5775                             ice_fltr_mgmt_list_entry, list_entry) {
5776                 if (!ice_vsi_uses_fltr(fm_entry, vsi_handle))
5777                         continue;
5778
5779                 status = ice_add_entry_to_vsi_fltr_list(hw, vsi_handle,
5780                                                         vsi_list_head,
5781                                                         &fm_entry->fltr_info);
5782                 if (status)
5783                         return status;
5784         }
5785         return status;
5786 }
5787
5788 /**
5789  * ice_determine_promisc_mask
5790  * @fi: filter info to parse
5791  *
5792  * Helper function to determine which ICE_PROMISC_ mask corresponds
5793  * to given filter into.
5794  */
5795 static u8 ice_determine_promisc_mask(struct ice_fltr_info *fi)
5796 {
5797         u16 vid = fi->l_data.mac_vlan.vlan_id;
5798         u8 *macaddr = fi->l_data.mac.mac_addr;
5799         bool is_tx_fltr = false;
5800         u8 promisc_mask = 0;
5801
5802         if (fi->flag == ICE_FLTR_TX)
5803                 is_tx_fltr = true;
5804
5805         if (IS_BROADCAST_ETHER_ADDR(macaddr))
5806                 promisc_mask |= is_tx_fltr ?
5807                         ICE_PROMISC_BCAST_TX : ICE_PROMISC_BCAST_RX;
5808         else if (IS_MULTICAST_ETHER_ADDR(macaddr))
5809                 promisc_mask |= is_tx_fltr ?
5810                         ICE_PROMISC_MCAST_TX : ICE_PROMISC_MCAST_RX;
5811         else if (IS_UNICAST_ETHER_ADDR(macaddr))
5812                 promisc_mask |= is_tx_fltr ?
5813                         ICE_PROMISC_UCAST_TX : ICE_PROMISC_UCAST_RX;
5814         if (vid)
5815                 promisc_mask |= is_tx_fltr ?
5816                         ICE_PROMISC_VLAN_TX : ICE_PROMISC_VLAN_RX;
5817
5818         return promisc_mask;
5819 }
5820
5821 /**
5822  * _ice_get_vsi_promisc - get promiscuous mode of given VSI
5823  * @hw: pointer to the hardware structure
5824  * @vsi_handle: VSI handle to retrieve info from
5825  * @promisc_mask: pointer to mask to be filled in
5826  * @vid: VLAN ID of promisc VLAN VSI
5827  * @sw: pointer to switch info struct for which function add rule
5828  */
5829 static enum ice_status
5830 _ice_get_vsi_promisc(struct ice_hw *hw, u16 vsi_handle, u8 *promisc_mask,
5831                      u16 *vid, struct ice_switch_info *sw)
5832 {
5833         struct ice_fltr_mgmt_list_entry *itr;
5834         struct LIST_HEAD_TYPE *rule_head;
5835         struct ice_lock *rule_lock;     /* Lock to protect filter rule list */
5836
5837         if (!ice_is_vsi_valid(hw, vsi_handle))
5838                 return ICE_ERR_PARAM;
5839
5840         *vid = 0;
5841         *promisc_mask = 0;
5842         rule_head = &sw->recp_list[ICE_SW_LKUP_PROMISC].filt_rules;
5843         rule_lock = &sw->recp_list[ICE_SW_LKUP_PROMISC].filt_rule_lock;
5844
5845         ice_acquire_lock(rule_lock);
5846         LIST_FOR_EACH_ENTRY(itr, rule_head,
5847                             ice_fltr_mgmt_list_entry, list_entry) {
5848                 /* Continue if this filter doesn't apply to this VSI or the
5849                  * VSI ID is not in the VSI map for this filter
5850                  */
5851                 if (!ice_vsi_uses_fltr(itr, vsi_handle))
5852                         continue;
5853
5854                 *promisc_mask |= ice_determine_promisc_mask(&itr->fltr_info);
5855         }
5856         ice_release_lock(rule_lock);
5857
5858         return ICE_SUCCESS;
5859 }
5860
5861 /**
5862  * ice_get_vsi_promisc - get promiscuous mode of given VSI
5863  * @hw: pointer to the hardware structure
5864  * @vsi_handle: VSI handle to retrieve info from
5865  * @promisc_mask: pointer to mask to be filled in
5866  * @vid: VLAN ID of promisc VLAN VSI
5867  */
5868 enum ice_status
5869 ice_get_vsi_promisc(struct ice_hw *hw, u16 vsi_handle, u8 *promisc_mask,
5870                     u16 *vid)
5871 {
5872         return _ice_get_vsi_promisc(hw, vsi_handle, promisc_mask,
5873                                     vid, hw->switch_info);
5874 }
5875
5876 /**
5877  * _ice_get_vsi_vlan_promisc - get VLAN promiscuous mode of given VSI
5878  * @hw: pointer to the hardware structure
5879  * @vsi_handle: VSI handle to retrieve info from
5880  * @promisc_mask: pointer to mask to be filled in
5881  * @vid: VLAN ID of promisc VLAN VSI
5882  * @sw: pointer to switch info struct for which function add rule
5883  */
5884 static enum ice_status
5885 _ice_get_vsi_vlan_promisc(struct ice_hw *hw, u16 vsi_handle, u8 *promisc_mask,
5886                           u16 *vid, struct ice_switch_info *sw)
5887 {
5888         struct ice_fltr_mgmt_list_entry *itr;
5889         struct LIST_HEAD_TYPE *rule_head;
5890         struct ice_lock *rule_lock;     /* Lock to protect filter rule list */
5891
5892         if (!ice_is_vsi_valid(hw, vsi_handle))
5893                 return ICE_ERR_PARAM;
5894
5895         *vid = 0;
5896         *promisc_mask = 0;
5897         rule_head = &sw->recp_list[ICE_SW_LKUP_PROMISC_VLAN].filt_rules;
5898         rule_lock = &sw->recp_list[ICE_SW_LKUP_PROMISC_VLAN].filt_rule_lock;
5899
5900         ice_acquire_lock(rule_lock);
5901         LIST_FOR_EACH_ENTRY(itr, rule_head, ice_fltr_mgmt_list_entry,
5902                             list_entry) {
5903                 /* Continue if this filter doesn't apply to this VSI or the
5904                  * VSI ID is not in the VSI map for this filter
5905                  */
5906                 if (!ice_vsi_uses_fltr(itr, vsi_handle))
5907                         continue;
5908
5909                 *promisc_mask |= ice_determine_promisc_mask(&itr->fltr_info);
5910         }
5911         ice_release_lock(rule_lock);
5912
5913         return ICE_SUCCESS;
5914 }
5915
5916 /**
5917  * ice_get_vsi_vlan_promisc - get VLAN promiscuous mode of given VSI
5918  * @hw: pointer to the hardware structure
5919  * @vsi_handle: VSI handle to retrieve info from
5920  * @promisc_mask: pointer to mask to be filled in
5921  * @vid: VLAN ID of promisc VLAN VSI
5922  */
5923 enum ice_status
5924 ice_get_vsi_vlan_promisc(struct ice_hw *hw, u16 vsi_handle, u8 *promisc_mask,
5925                          u16 *vid)
5926 {
5927         return _ice_get_vsi_vlan_promisc(hw, vsi_handle, promisc_mask,
5928                                          vid, hw->switch_info);
5929 }
5930
5931 /**
5932  * ice_remove_promisc - Remove promisc based filter rules
5933  * @hw: pointer to the hardware structure
5934  * @recp_id: recipe ID for which the rule needs to removed
5935  * @v_list: list of promisc entries
5936  */
5937 static enum ice_status
5938 ice_remove_promisc(struct ice_hw *hw, u8 recp_id,
5939                    struct LIST_HEAD_TYPE *v_list)
5940 {
5941         struct ice_fltr_list_entry *v_list_itr, *tmp;
5942         struct ice_sw_recipe *recp_list;
5943
5944         recp_list = &hw->switch_info->recp_list[recp_id];
5945         LIST_FOR_EACH_ENTRY_SAFE(v_list_itr, tmp, v_list, ice_fltr_list_entry,
5946                                  list_entry) {
5947                 v_list_itr->status =
5948                         ice_remove_rule_internal(hw, recp_list, v_list_itr);
5949                 if (v_list_itr->status)
5950                         return v_list_itr->status;
5951         }
5952         return ICE_SUCCESS;
5953 }
5954
5955 /**
5956  * _ice_clear_vsi_promisc - clear specified promiscuous mode(s)
5957  * @hw: pointer to the hardware structure
5958  * @vsi_handle: VSI handle to clear mode
5959  * @promisc_mask: mask of promiscuous config bits to clear
5960  * @vid: VLAN ID to clear VLAN promiscuous
5961  * @sw: pointer to switch info struct for which function add rule
5962  */
5963 static enum ice_status
5964 _ice_clear_vsi_promisc(struct ice_hw *hw, u16 vsi_handle, u8 promisc_mask,
5965                        u16 vid, struct ice_switch_info *sw)
5966 {
5967         struct ice_fltr_list_entry *fm_entry, *tmp;
5968         struct LIST_HEAD_TYPE remove_list_head;
5969         struct ice_fltr_mgmt_list_entry *itr;
5970         struct LIST_HEAD_TYPE *rule_head;
5971         struct ice_lock *rule_lock;     /* Lock to protect filter rule list */
5972         enum ice_status status = ICE_SUCCESS;
5973         u8 recipe_id;
5974
5975         if (!ice_is_vsi_valid(hw, vsi_handle))
5976                 return ICE_ERR_PARAM;
5977
5978         if (promisc_mask & (ICE_PROMISC_VLAN_RX | ICE_PROMISC_VLAN_TX))
5979                 recipe_id = ICE_SW_LKUP_PROMISC_VLAN;
5980         else
5981                 recipe_id = ICE_SW_LKUP_PROMISC;
5982
5983         rule_head = &sw->recp_list[recipe_id].filt_rules;
5984         rule_lock = &sw->recp_list[recipe_id].filt_rule_lock;
5985
5986         INIT_LIST_HEAD(&remove_list_head);
5987
5988         ice_acquire_lock(rule_lock);
5989         LIST_FOR_EACH_ENTRY(itr, rule_head,
5990                             ice_fltr_mgmt_list_entry, list_entry) {
5991                 struct ice_fltr_info *fltr_info;
5992                 u8 fltr_promisc_mask = 0;
5993
5994                 if (!ice_vsi_uses_fltr(itr, vsi_handle))
5995                         continue;
5996                 fltr_info = &itr->fltr_info;
5997
5998                 if (recipe_id == ICE_SW_LKUP_PROMISC_VLAN &&
5999                     vid != fltr_info->l_data.mac_vlan.vlan_id)
6000                         continue;
6001
6002                 fltr_promisc_mask |= ice_determine_promisc_mask(fltr_info);
6003
6004                 /* Skip if filter is not completely specified by given mask */
6005                 if (fltr_promisc_mask & ~promisc_mask)
6006                         continue;
6007
6008                 status = ice_add_entry_to_vsi_fltr_list(hw, vsi_handle,
6009                                                         &remove_list_head,
6010                                                         fltr_info);
6011                 if (status) {
6012                         ice_release_lock(rule_lock);
6013                         goto free_fltr_list;
6014                 }
6015         }
6016         ice_release_lock(rule_lock);
6017
6018         status = ice_remove_promisc(hw, recipe_id, &remove_list_head);
6019
6020 free_fltr_list:
6021         LIST_FOR_EACH_ENTRY_SAFE(fm_entry, tmp, &remove_list_head,
6022                                  ice_fltr_list_entry, list_entry) {
6023                 LIST_DEL(&fm_entry->list_entry);
6024                 ice_free(hw, fm_entry);
6025         }
6026
6027         return status;
6028 }
6029
6030 /**
6031  * ice_clear_vsi_promisc - clear specified promiscuous mode(s) for given VSI
6032  * @hw: pointer to the hardware structure
6033  * @vsi_handle: VSI handle to clear mode
6034  * @promisc_mask: mask of promiscuous config bits to clear
6035  * @vid: VLAN ID to clear VLAN promiscuous
6036  */
6037 enum ice_status
6038 ice_clear_vsi_promisc(struct ice_hw *hw, u16 vsi_handle,
6039                       u8 promisc_mask, u16 vid)
6040 {
6041         return _ice_clear_vsi_promisc(hw, vsi_handle, promisc_mask,
6042                                       vid, hw->switch_info);
6043 }
6044
6045 /**
6046  * _ice_set_vsi_promisc - set given VSI to given promiscuous mode(s)
6047  * @hw: pointer to the hardware structure
6048  * @vsi_handle: VSI handle to configure
6049  * @promisc_mask: mask of promiscuous config bits
6050  * @vid: VLAN ID to set VLAN promiscuous
6051  * @lport: logical port number to configure promisc mode
6052  * @sw: pointer to switch info struct for which function add rule
6053  */
6054 static enum ice_status
6055 _ice_set_vsi_promisc(struct ice_hw *hw, u16 vsi_handle, u8 promisc_mask,
6056                      u16 vid, u8 lport, struct ice_switch_info *sw)
6057 {
6058         enum { UCAST_FLTR = 1, MCAST_FLTR, BCAST_FLTR };
6059         struct ice_fltr_list_entry f_list_entry;
6060         struct ice_fltr_info new_fltr;
6061         enum ice_status status = ICE_SUCCESS;
6062         bool is_tx_fltr;
6063         u16 hw_vsi_id;
6064         int pkt_type;
6065         u8 recipe_id;
6066
6067         ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__);
6068
6069         if (!ice_is_vsi_valid(hw, vsi_handle))
6070                 return ICE_ERR_PARAM;
6071         hw_vsi_id = ice_get_hw_vsi_num(hw, vsi_handle);
6072
6073         ice_memset(&new_fltr, 0, sizeof(new_fltr), ICE_NONDMA_MEM);
6074
6075         if (promisc_mask & (ICE_PROMISC_VLAN_RX | ICE_PROMISC_VLAN_TX)) {
6076                 new_fltr.lkup_type = ICE_SW_LKUP_PROMISC_VLAN;
6077                 new_fltr.l_data.mac_vlan.vlan_id = vid;
6078                 recipe_id = ICE_SW_LKUP_PROMISC_VLAN;
6079         } else {
6080                 new_fltr.lkup_type = ICE_SW_LKUP_PROMISC;
6081                 recipe_id = ICE_SW_LKUP_PROMISC;
6082         }
6083
6084         /* Separate filters must be set for each direction/packet type
6085          * combination, so we will loop over the mask value, store the
6086          * individual type, and clear it out in the input mask as it
6087          * is found.
6088          */
6089         while (promisc_mask) {
6090                 struct ice_sw_recipe *recp_list;
6091                 u8 *mac_addr;
6092
6093                 pkt_type = 0;
6094                 is_tx_fltr = false;
6095
6096                 if (promisc_mask & ICE_PROMISC_UCAST_RX) {
6097                         promisc_mask &= ~ICE_PROMISC_UCAST_RX;
6098                         pkt_type = UCAST_FLTR;
6099                 } else if (promisc_mask & ICE_PROMISC_UCAST_TX) {
6100                         promisc_mask &= ~ICE_PROMISC_UCAST_TX;
6101                         pkt_type = UCAST_FLTR;
6102                         is_tx_fltr = true;
6103                 } else if (promisc_mask & ICE_PROMISC_MCAST_RX) {
6104                         promisc_mask &= ~ICE_PROMISC_MCAST_RX;
6105                         pkt_type = MCAST_FLTR;
6106                 } else if (promisc_mask & ICE_PROMISC_MCAST_TX) {
6107                         promisc_mask &= ~ICE_PROMISC_MCAST_TX;
6108                         pkt_type = MCAST_FLTR;
6109                         is_tx_fltr = true;
6110                 } else if (promisc_mask & ICE_PROMISC_BCAST_RX) {
6111                         promisc_mask &= ~ICE_PROMISC_BCAST_RX;
6112                         pkt_type = BCAST_FLTR;
6113                 } else if (promisc_mask & ICE_PROMISC_BCAST_TX) {
6114                         promisc_mask &= ~ICE_PROMISC_BCAST_TX;
6115                         pkt_type = BCAST_FLTR;
6116                         is_tx_fltr = true;
6117                 }
6118
6119                 /* Check for VLAN promiscuous flag */
6120                 if (promisc_mask & ICE_PROMISC_VLAN_RX) {
6121                         promisc_mask &= ~ICE_PROMISC_VLAN_RX;
6122                 } else if (promisc_mask & ICE_PROMISC_VLAN_TX) {
6123                         promisc_mask &= ~ICE_PROMISC_VLAN_TX;
6124                         is_tx_fltr = true;
6125                 }
6126
6127                 /* Set filter DA based on packet type */
6128                 mac_addr = new_fltr.l_data.mac.mac_addr;
6129                 if (pkt_type == BCAST_FLTR) {
6130                         ice_memset(mac_addr, 0xff, ETH_ALEN, ICE_NONDMA_MEM);
6131                 } else if (pkt_type == MCAST_FLTR ||
6132                            pkt_type == UCAST_FLTR) {
6133                         /* Use the dummy ether header DA */
6134                         ice_memcpy(mac_addr, dummy_eth_header, ETH_ALEN,
6135                                    ICE_NONDMA_TO_NONDMA);
6136                         if (pkt_type == MCAST_FLTR)
6137                                 mac_addr[0] |= 0x1;     /* Set multicast bit */
6138                 }
6139
6140                 /* Need to reset this to zero for all iterations */
6141                 new_fltr.flag = 0;
6142                 if (is_tx_fltr) {
6143                         new_fltr.flag |= ICE_FLTR_TX;
6144                         new_fltr.src = hw_vsi_id;
6145                 } else {
6146                         new_fltr.flag |= ICE_FLTR_RX;
6147                         new_fltr.src = lport;
6148                 }
6149
6150                 new_fltr.fltr_act = ICE_FWD_TO_VSI;
6151                 new_fltr.vsi_handle = vsi_handle;
6152                 new_fltr.fwd_id.hw_vsi_id = hw_vsi_id;
6153                 f_list_entry.fltr_info = new_fltr;
6154                 recp_list = &sw->recp_list[recipe_id];
6155
6156                 status = ice_add_rule_internal(hw, recp_list, lport,
6157                                                &f_list_entry);
6158                 if (status != ICE_SUCCESS)
6159                         goto set_promisc_exit;
6160         }
6161
6162 set_promisc_exit:
6163         return status;
6164 }
6165
6166 /**
6167  * ice_set_vsi_promisc - set given VSI to given promiscuous mode(s)
6168  * @hw: pointer to the hardware structure
6169  * @vsi_handle: VSI handle to configure
6170  * @promisc_mask: mask of promiscuous config bits
6171  * @vid: VLAN ID to set VLAN promiscuous
6172  */
6173 enum ice_status
6174 ice_set_vsi_promisc(struct ice_hw *hw, u16 vsi_handle, u8 promisc_mask,
6175                     u16 vid)
6176 {
6177         return _ice_set_vsi_promisc(hw, vsi_handle, promisc_mask, vid,
6178                                     hw->port_info->lport,
6179                                     hw->switch_info);
6180 }
6181
6182 /**
6183  * _ice_set_vlan_vsi_promisc
6184  * @hw: pointer to the hardware structure
6185  * @vsi_handle: VSI handle to configure
6186  * @promisc_mask: mask of promiscuous config bits
6187  * @rm_vlan_promisc: Clear VLANs VSI promisc mode
6188  * @lport: logical port number to configure promisc mode
6189  * @sw: pointer to switch info struct for which function add rule
6190  *
6191  * Configure VSI with all associated VLANs to given promiscuous mode(s)
6192  */
6193 static enum ice_status
6194 _ice_set_vlan_vsi_promisc(struct ice_hw *hw, u16 vsi_handle, u8 promisc_mask,
6195                           bool rm_vlan_promisc, u8 lport,
6196                           struct ice_switch_info *sw)
6197 {
6198         struct ice_fltr_list_entry *list_itr, *tmp;
6199         struct LIST_HEAD_TYPE vsi_list_head;
6200         struct LIST_HEAD_TYPE *vlan_head;
6201         struct ice_lock *vlan_lock; /* Lock to protect filter rule list */
6202         enum ice_status status;
6203         u16 vlan_id;
6204
6205         INIT_LIST_HEAD(&vsi_list_head);
6206         vlan_lock = &sw->recp_list[ICE_SW_LKUP_VLAN].filt_rule_lock;
6207         vlan_head = &sw->recp_list[ICE_SW_LKUP_VLAN].filt_rules;
6208         ice_acquire_lock(vlan_lock);
6209         status = ice_add_to_vsi_fltr_list(hw, vsi_handle, vlan_head,
6210                                           &vsi_list_head);
6211         ice_release_lock(vlan_lock);
6212         if (status)
6213                 goto free_fltr_list;
6214
6215         LIST_FOR_EACH_ENTRY(list_itr, &vsi_list_head, ice_fltr_list_entry,
6216                             list_entry) {
6217                 vlan_id = list_itr->fltr_info.l_data.vlan.vlan_id;
6218                 if (rm_vlan_promisc)
6219                         status =  _ice_clear_vsi_promisc(hw, vsi_handle,
6220                                                          promisc_mask,
6221                                                          vlan_id, sw);
6222                 else
6223                         status =  _ice_set_vsi_promisc(hw, vsi_handle,
6224                                                        promisc_mask, vlan_id,
6225                                                        lport, sw);
6226                 if (status)
6227                         break;
6228         }
6229
6230 free_fltr_list:
6231         LIST_FOR_EACH_ENTRY_SAFE(list_itr, tmp, &vsi_list_head,
6232                                  ice_fltr_list_entry, list_entry) {
6233                 LIST_DEL(&list_itr->list_entry);
6234                 ice_free(hw, list_itr);
6235         }
6236         return status;
6237 }
6238
6239 /**
6240  * ice_set_vlan_vsi_promisc
6241  * @hw: pointer to the hardware structure
6242  * @vsi_handle: VSI handle to configure
6243  * @promisc_mask: mask of promiscuous config bits
6244  * @rm_vlan_promisc: Clear VLANs VSI promisc mode
6245  *
6246  * Configure VSI with all associated VLANs to given promiscuous mode(s)
6247  */
6248 enum ice_status
6249 ice_set_vlan_vsi_promisc(struct ice_hw *hw, u16 vsi_handle, u8 promisc_mask,
6250                          bool rm_vlan_promisc)
6251 {
6252         return _ice_set_vlan_vsi_promisc(hw, vsi_handle, promisc_mask,
6253                                          rm_vlan_promisc, hw->port_info->lport,
6254                                          hw->switch_info);
6255 }
6256
6257 /**
6258  * ice_remove_vsi_lkup_fltr - Remove lookup type filters for a VSI
6259  * @hw: pointer to the hardware structure
6260  * @vsi_handle: VSI handle to remove filters from
6261  * @recp_list: recipe list from which function remove fltr
6262  * @lkup: switch rule filter lookup type
6263  */
6264 static void
6265 ice_remove_vsi_lkup_fltr(struct ice_hw *hw, u16 vsi_handle,
6266                          struct ice_sw_recipe *recp_list,
6267                          enum ice_sw_lkup_type lkup)
6268 {
6269         struct ice_fltr_list_entry *fm_entry;
6270         struct LIST_HEAD_TYPE remove_list_head;
6271         struct LIST_HEAD_TYPE *rule_head;
6272         struct ice_fltr_list_entry *tmp;
6273         struct ice_lock *rule_lock;     /* Lock to protect filter rule list */
6274         enum ice_status status;
6275
6276         INIT_LIST_HEAD(&remove_list_head);
6277         rule_lock = &recp_list[lkup].filt_rule_lock;
6278         rule_head = &recp_list[lkup].filt_rules;
6279         ice_acquire_lock(rule_lock);
6280         status = ice_add_to_vsi_fltr_list(hw, vsi_handle, rule_head,
6281                                           &remove_list_head);
6282         ice_release_lock(rule_lock);
6283         if (status)
6284                 goto free_fltr_list;
6285
6286         switch (lkup) {
6287         case ICE_SW_LKUP_MAC:
6288                 ice_remove_mac_rule(hw, &remove_list_head, &recp_list[lkup]);
6289                 break;
6290         case ICE_SW_LKUP_VLAN:
6291                 ice_remove_vlan_rule(hw, &remove_list_head, &recp_list[lkup]);
6292                 break;
6293         case ICE_SW_LKUP_PROMISC:
6294         case ICE_SW_LKUP_PROMISC_VLAN:
6295                 ice_remove_promisc(hw, lkup, &remove_list_head);
6296                 break;
6297         case ICE_SW_LKUP_MAC_VLAN:
6298                 ice_remove_mac_vlan(hw, &remove_list_head);
6299                 break;
6300         case ICE_SW_LKUP_ETHERTYPE:
6301         case ICE_SW_LKUP_ETHERTYPE_MAC:
6302                 ice_remove_eth_mac(hw, &remove_list_head);
6303                 break;
6304         case ICE_SW_LKUP_DFLT:
6305                 ice_debug(hw, ICE_DBG_SW, "Remove filters for this lookup type hasn't been implemented yet\n");
6306                 break;
6307         case ICE_SW_LKUP_LAST:
6308                 ice_debug(hw, ICE_DBG_SW, "Unsupported lookup type\n");
6309                 break;
6310         }
6311
6312 free_fltr_list:
6313         LIST_FOR_EACH_ENTRY_SAFE(fm_entry, tmp, &remove_list_head,
6314                                  ice_fltr_list_entry, list_entry) {
6315                 LIST_DEL(&fm_entry->list_entry);
6316                 ice_free(hw, fm_entry);
6317         }
6318 }
6319
6320 /**
6321  * ice_remove_vsi_fltr_rule - Remove all filters for a VSI
6322  * @hw: pointer to the hardware structure
6323  * @vsi_handle: VSI handle to remove filters from
6324  * @sw: pointer to switch info struct
6325  */
6326 static void
6327 ice_remove_vsi_fltr_rule(struct ice_hw *hw, u16 vsi_handle,
6328                          struct ice_switch_info *sw)
6329 {
6330         ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__);
6331
6332         ice_remove_vsi_lkup_fltr(hw, vsi_handle,
6333                                  sw->recp_list, ICE_SW_LKUP_MAC);
6334         ice_remove_vsi_lkup_fltr(hw, vsi_handle,
6335                                  sw->recp_list, ICE_SW_LKUP_MAC_VLAN);
6336         ice_remove_vsi_lkup_fltr(hw, vsi_handle,
6337                                  sw->recp_list, ICE_SW_LKUP_PROMISC);
6338         ice_remove_vsi_lkup_fltr(hw, vsi_handle,
6339                                  sw->recp_list, ICE_SW_LKUP_VLAN);
6340         ice_remove_vsi_lkup_fltr(hw, vsi_handle,
6341                                  sw->recp_list, ICE_SW_LKUP_DFLT);
6342         ice_remove_vsi_lkup_fltr(hw, vsi_handle,
6343                                  sw->recp_list, ICE_SW_LKUP_ETHERTYPE);
6344         ice_remove_vsi_lkup_fltr(hw, vsi_handle,
6345                                  sw->recp_list, ICE_SW_LKUP_ETHERTYPE_MAC);
6346         ice_remove_vsi_lkup_fltr(hw, vsi_handle,
6347                                  sw->recp_list, ICE_SW_LKUP_PROMISC_VLAN);
6348 }
6349
6350 /**
6351  * ice_remove_vsi_fltr - Remove all filters for a VSI
6352  * @hw: pointer to the hardware structure
6353  * @vsi_handle: VSI handle to remove filters from
6354  */
6355 void ice_remove_vsi_fltr(struct ice_hw *hw, u16 vsi_handle)
6356 {
6357         ice_remove_vsi_fltr_rule(hw, vsi_handle, hw->switch_info);
6358 }
6359
6360 /**
6361  * ice_alloc_res_cntr - allocating resource counter
6362  * @hw: pointer to the hardware structure
6363  * @type: type of resource
6364  * @alloc_shared: if set it is shared else dedicated
6365  * @num_items: number of entries requested for FD resource type
6366  * @counter_id: counter index returned by AQ call
6367  */
6368 enum ice_status
6369 ice_alloc_res_cntr(struct ice_hw *hw, u8 type, u8 alloc_shared, u16 num_items,
6370                    u16 *counter_id)
6371 {
6372         struct ice_aqc_alloc_free_res_elem *buf;
6373         enum ice_status status;
6374         u16 buf_len;
6375
6376         /* Allocate resource */
6377         buf_len = ice_struct_size(buf, elem, 1);
6378         buf = (struct ice_aqc_alloc_free_res_elem *)ice_malloc(hw, buf_len);
6379         if (!buf)
6380                 return ICE_ERR_NO_MEMORY;
6381
6382         buf->num_elems = CPU_TO_LE16(num_items);
6383         buf->res_type = CPU_TO_LE16(((type << ICE_AQC_RES_TYPE_S) &
6384                                       ICE_AQC_RES_TYPE_M) | alloc_shared);
6385
6386         status = ice_aq_alloc_free_res(hw, 1, buf, buf_len,
6387                                        ice_aqc_opc_alloc_res, NULL);
6388         if (status)
6389                 goto exit;
6390
6391         *counter_id = LE16_TO_CPU(buf->elem[0].e.sw_resp);
6392
6393 exit:
6394         ice_free(hw, buf);
6395         return status;
6396 }
6397
6398 /**
6399  * ice_free_res_cntr - free resource counter
6400  * @hw: pointer to the hardware structure
6401  * @type: type of resource
6402  * @alloc_shared: if set it is shared else dedicated
6403  * @num_items: number of entries to be freed for FD resource type
6404  * @counter_id: counter ID resource which needs to be freed
6405  */
6406 enum ice_status
6407 ice_free_res_cntr(struct ice_hw *hw, u8 type, u8 alloc_shared, u16 num_items,
6408                   u16 counter_id)
6409 {
6410         struct ice_aqc_alloc_free_res_elem *buf;
6411         enum ice_status status;
6412         u16 buf_len;
6413
6414         /* Free resource */
6415         buf_len = ice_struct_size(buf, elem, 1);
6416         buf = (struct ice_aqc_alloc_free_res_elem *)ice_malloc(hw, buf_len);
6417         if (!buf)
6418                 return ICE_ERR_NO_MEMORY;
6419
6420         buf->num_elems = CPU_TO_LE16(num_items);
6421         buf->res_type = CPU_TO_LE16(((type << ICE_AQC_RES_TYPE_S) &
6422                                       ICE_AQC_RES_TYPE_M) | alloc_shared);
6423         buf->elem[0].e.sw_resp = CPU_TO_LE16(counter_id);
6424
6425         status = ice_aq_alloc_free_res(hw, 1, buf, buf_len,
6426                                        ice_aqc_opc_free_res, NULL);
6427         if (status)
6428                 ice_debug(hw, ICE_DBG_SW, "counter resource could not be freed\n");
6429
6430         ice_free(hw, buf);
6431         return status;
6432 }
6433
6434 /**
6435  * ice_alloc_vlan_res_counter - obtain counter resource for VLAN type
6436  * @hw: pointer to the hardware structure
6437  * @counter_id: returns counter index
6438  */
6439 enum ice_status ice_alloc_vlan_res_counter(struct ice_hw *hw, u16 *counter_id)
6440 {
6441         return ice_alloc_res_cntr(hw, ICE_AQC_RES_TYPE_VLAN_COUNTER,
6442                                   ICE_AQC_RES_TYPE_FLAG_DEDICATED, 1,
6443                                   counter_id);
6444 }
6445
6446 /**
6447  * ice_free_vlan_res_counter - Free counter resource for VLAN type
6448  * @hw: pointer to the hardware structure
6449  * @counter_id: counter index to be freed
6450  */
6451 enum ice_status ice_free_vlan_res_counter(struct ice_hw *hw, u16 counter_id)
6452 {
6453         return ice_free_res_cntr(hw, ICE_AQC_RES_TYPE_VLAN_COUNTER,
6454                                  ICE_AQC_RES_TYPE_FLAG_DEDICATED, 1,
6455                                  counter_id);
6456 }
6457
6458 /**
6459  * ice_alloc_res_lg_act - add large action resource
6460  * @hw: pointer to the hardware structure
6461  * @l_id: large action ID to fill it in
6462  * @num_acts: number of actions to hold with a large action entry
6463  */
6464 static enum ice_status
6465 ice_alloc_res_lg_act(struct ice_hw *hw, u16 *l_id, u16 num_acts)
6466 {
6467         struct ice_aqc_alloc_free_res_elem *sw_buf;
6468         enum ice_status status;
6469         u16 buf_len;
6470
6471         if (num_acts > ICE_MAX_LG_ACT || num_acts == 0)
6472                 return ICE_ERR_PARAM;
6473
6474         /* Allocate resource for large action */
6475         buf_len = ice_struct_size(sw_buf, elem, 1);
6476         sw_buf = (struct ice_aqc_alloc_free_res_elem *)ice_malloc(hw, buf_len);
6477         if (!sw_buf)
6478                 return ICE_ERR_NO_MEMORY;
6479
6480         sw_buf->num_elems = CPU_TO_LE16(1);
6481
6482         /* If num_acts is 1, use ICE_AQC_RES_TYPE_WIDE_TABLE_1.
6483          * If num_acts is 2, use ICE_AQC_RES_TYPE_WIDE_TABLE_3.
6484          * If num_acts is greater than 2, then use
6485          * ICE_AQC_RES_TYPE_WIDE_TABLE_4.
6486          * The num_acts cannot exceed 4. This was ensured at the
6487          * beginning of the function.
6488          */
6489         if (num_acts == 1)
6490                 sw_buf->res_type = CPU_TO_LE16(ICE_AQC_RES_TYPE_WIDE_TABLE_1);
6491         else if (num_acts == 2)
6492                 sw_buf->res_type = CPU_TO_LE16(ICE_AQC_RES_TYPE_WIDE_TABLE_2);
6493         else
6494                 sw_buf->res_type = CPU_TO_LE16(ICE_AQC_RES_TYPE_WIDE_TABLE_4);
6495
6496         status = ice_aq_alloc_free_res(hw, 1, sw_buf, buf_len,
6497                                        ice_aqc_opc_alloc_res, NULL);
6498         if (!status)
6499                 *l_id = LE16_TO_CPU(sw_buf->elem[0].e.sw_resp);
6500
6501         ice_free(hw, sw_buf);
6502         return status;
6503 }
6504
6505 /**
6506  * ice_add_mac_with_sw_marker - add filter with sw marker
6507  * @hw: pointer to the hardware structure
6508  * @f_info: filter info structure containing the MAC filter information
6509  * @sw_marker: sw marker to tag the Rx descriptor with
6510  */
6511 enum ice_status
6512 ice_add_mac_with_sw_marker(struct ice_hw *hw, struct ice_fltr_info *f_info,
6513                            u16 sw_marker)
6514 {
6515         struct ice_fltr_mgmt_list_entry *m_entry;
6516         struct ice_fltr_list_entry fl_info;
6517         struct ice_sw_recipe *recp_list;
6518         struct LIST_HEAD_TYPE l_head;
6519         struct ice_lock *rule_lock;     /* Lock to protect filter rule list */
6520         enum ice_status ret;
6521         bool entry_exists;
6522         u16 lg_act_id;
6523
6524         if (f_info->fltr_act != ICE_FWD_TO_VSI)
6525                 return ICE_ERR_PARAM;
6526
6527         if (f_info->lkup_type != ICE_SW_LKUP_MAC)
6528                 return ICE_ERR_PARAM;
6529
6530         if (sw_marker == ICE_INVAL_SW_MARKER_ID)
6531                 return ICE_ERR_PARAM;
6532
6533         if (!ice_is_vsi_valid(hw, f_info->vsi_handle))
6534                 return ICE_ERR_PARAM;
6535         f_info->fwd_id.hw_vsi_id = ice_get_hw_vsi_num(hw, f_info->vsi_handle);
6536
6537         /* Add filter if it doesn't exist so then the adding of large
6538          * action always results in update
6539          */
6540
6541         INIT_LIST_HEAD(&l_head);
6542         fl_info.fltr_info = *f_info;
6543         LIST_ADD(&fl_info.list_entry, &l_head);
6544
6545         entry_exists = false;
6546         ret = ice_add_mac_rule(hw, &l_head, hw->switch_info,
6547                                hw->port_info->lport);
6548         if (ret == ICE_ERR_ALREADY_EXISTS)
6549                 entry_exists = true;
6550         else if (ret)
6551                 return ret;
6552
6553         recp_list = &hw->switch_info->recp_list[ICE_SW_LKUP_MAC];
6554         rule_lock = &recp_list->filt_rule_lock;
6555         ice_acquire_lock(rule_lock);
6556         /* Get the book keeping entry for the filter */
6557         m_entry = ice_find_rule_entry(&recp_list->filt_rules, f_info);
6558         if (!m_entry)
6559                 goto exit_error;
6560
6561         /* If counter action was enabled for this rule then don't enable
6562          * sw marker large action
6563          */
6564         if (m_entry->counter_index != ICE_INVAL_COUNTER_ID) {
6565                 ret = ICE_ERR_PARAM;
6566                 goto exit_error;
6567         }
6568
6569         /* if same marker was added before */
6570         if (m_entry->sw_marker_id == sw_marker) {
6571                 ret = ICE_ERR_ALREADY_EXISTS;
6572                 goto exit_error;
6573         }
6574
6575         /* Allocate a hardware table entry to hold large act. Three actions
6576          * for marker based large action
6577          */
6578         ret = ice_alloc_res_lg_act(hw, &lg_act_id, 3);
6579         if (ret)
6580                 goto exit_error;
6581
6582         if (lg_act_id == ICE_INVAL_LG_ACT_INDEX)
6583                 goto exit_error;
6584
6585         /* Update the switch rule to add the marker action */
6586         ret = ice_add_marker_act(hw, m_entry, sw_marker, lg_act_id);
6587         if (!ret) {
6588                 ice_release_lock(rule_lock);
6589                 return ret;
6590         }
6591
6592 exit_error:
6593         ice_release_lock(rule_lock);
6594         /* only remove entry if it did not exist previously */
6595         if (!entry_exists)
6596                 ret = ice_remove_mac(hw, &l_head);
6597
6598         return ret;
6599 }
6600
6601 /**
6602  * ice_add_mac_with_counter - add filter with counter enabled
6603  * @hw: pointer to the hardware structure
6604  * @f_info: pointer to filter info structure containing the MAC filter
6605  *          information
6606  */
6607 enum ice_status
6608 ice_add_mac_with_counter(struct ice_hw *hw, struct ice_fltr_info *f_info)
6609 {
6610         struct ice_fltr_mgmt_list_entry *m_entry;
6611         struct ice_fltr_list_entry fl_info;
6612         struct ice_sw_recipe *recp_list;
6613         struct LIST_HEAD_TYPE l_head;
6614         struct ice_lock *rule_lock;     /* Lock to protect filter rule list */
6615         enum ice_status ret;
6616         bool entry_exist;
6617         u16 counter_id;
6618         u16 lg_act_id;
6619
6620         if (f_info->fltr_act != ICE_FWD_TO_VSI)
6621                 return ICE_ERR_PARAM;
6622
6623         if (f_info->lkup_type != ICE_SW_LKUP_MAC)
6624                 return ICE_ERR_PARAM;
6625
6626         if (!ice_is_vsi_valid(hw, f_info->vsi_handle))
6627                 return ICE_ERR_PARAM;
6628         f_info->fwd_id.hw_vsi_id = ice_get_hw_vsi_num(hw, f_info->vsi_handle);
6629         recp_list = &hw->switch_info->recp_list[ICE_SW_LKUP_MAC];
6630
6631         entry_exist = false;
6632
6633         rule_lock = &recp_list->filt_rule_lock;
6634
6635         /* Add filter if it doesn't exist so then the adding of large
6636          * action always results in update
6637          */
6638         INIT_LIST_HEAD(&l_head);
6639
6640         fl_info.fltr_info = *f_info;
6641         LIST_ADD(&fl_info.list_entry, &l_head);
6642
6643         ret = ice_add_mac_rule(hw, &l_head, hw->switch_info,
6644                                hw->port_info->lport);
6645         if (ret == ICE_ERR_ALREADY_EXISTS)
6646                 entry_exist = true;
6647         else if (ret)
6648                 return ret;
6649
6650         ice_acquire_lock(rule_lock);
6651         m_entry = ice_find_rule_entry(&recp_list->filt_rules, f_info);
6652         if (!m_entry) {
6653                 ret = ICE_ERR_BAD_PTR;
6654                 goto exit_error;
6655         }
6656
6657         /* Don't enable counter for a filter for which sw marker was enabled */
6658         if (m_entry->sw_marker_id != ICE_INVAL_SW_MARKER_ID) {
6659                 ret = ICE_ERR_PARAM;
6660                 goto exit_error;
6661         }
6662
6663         /* If a counter was already enabled then don't need to add again */
6664         if (m_entry->counter_index != ICE_INVAL_COUNTER_ID) {
6665                 ret = ICE_ERR_ALREADY_EXISTS;
6666                 goto exit_error;
6667         }
6668
6669         /* Allocate a hardware table entry to VLAN counter */
6670         ret = ice_alloc_vlan_res_counter(hw, &counter_id);
6671         if (ret)
6672                 goto exit_error;
6673
6674         /* Allocate a hardware table entry to hold large act. Two actions for
6675          * counter based large action
6676          */
6677         ret = ice_alloc_res_lg_act(hw, &lg_act_id, 2);
6678         if (ret)
6679                 goto exit_error;
6680
6681         if (lg_act_id == ICE_INVAL_LG_ACT_INDEX)
6682                 goto exit_error;
6683
6684         /* Update the switch rule to add the counter action */
6685         ret = ice_add_counter_act(hw, m_entry, counter_id, lg_act_id);
6686         if (!ret) {
6687                 ice_release_lock(rule_lock);
6688                 return ret;
6689         }
6690
6691 exit_error:
6692         ice_release_lock(rule_lock);
6693         /* only remove entry if it did not exist previously */
6694         if (!entry_exist)
6695                 ret = ice_remove_mac(hw, &l_head);
6696
6697         return ret;
6698 }
6699
6700 /* This is mapping table entry that maps every word within a given protocol
6701  * structure to the real byte offset as per the specification of that
6702  * protocol header.
6703  * for example dst address is 3 words in ethertype header and corresponding
6704  * bytes are 0, 2, 3 in the actual packet header and src address is at 4, 6, 8
6705  * IMPORTANT: Every structure part of "ice_prot_hdr" union should have a
6706  * matching entry describing its field. This needs to be updated if new
6707  * structure is added to that union.
6708  */
6709 static const struct ice_prot_ext_tbl_entry ice_prot_ext[ICE_PROTOCOL_LAST] = {
6710         { ICE_MAC_OFOS,         { 0, 2, 4, 6, 8, 10, 12 } },
6711         { ICE_MAC_IL,           { 0, 2, 4, 6, 8, 10, 12 } },
6712         { ICE_ETYPE_OL,         { 0 } },
6713         { ICE_VLAN_OFOS,        { 2, 0 } },
6714         { ICE_IPV4_OFOS,        { 0, 2, 4, 6, 8, 10, 12, 14, 16, 18 } },
6715         { ICE_IPV4_IL,          { 0, 2, 4, 6, 8, 10, 12, 14, 16, 18 } },
6716         { ICE_IPV6_OFOS,        { 0, 2, 4, 6, 8, 10, 12, 14, 16, 18, 20, 22, 24,
6717                                  26, 28, 30, 32, 34, 36, 38 } },
6718         { ICE_IPV6_IL,          { 0, 2, 4, 6, 8, 10, 12, 14, 16, 18, 20, 22, 24,
6719                                  26, 28, 30, 32, 34, 36, 38 } },
6720         { ICE_TCP_IL,           { 0, 2 } },
6721         { ICE_UDP_OF,           { 0, 2 } },
6722         { ICE_UDP_ILOS,         { 0, 2 } },
6723         { ICE_SCTP_IL,          { 0, 2 } },
6724         { ICE_VXLAN,            { 8, 10, 12, 14 } },
6725         { ICE_GENEVE,           { 8, 10, 12, 14 } },
6726         { ICE_VXLAN_GPE,        { 8, 10, 12, 14 } },
6727         { ICE_NVGRE,            { 0, 2, 4, 6 } },
6728         { ICE_GTP,              { 8, 10, 12, 14, 16, 18, 20, 22 } },
6729         { ICE_PPPOE,            { 0, 2, 4, 6 } },
6730         { ICE_PFCP,             { 8, 10, 12, 14, 16, 18, 20, 22 } },
6731         { ICE_L2TPV3,           { 0, 2, 4, 6, 8, 10 } },
6732         { ICE_ESP,              { 0, 2, 4, 6 } },
6733         { ICE_AH,               { 0, 2, 4, 6, 8, 10 } },
6734         { ICE_NAT_T,            { 8, 10, 12, 14 } },
6735         { ICE_GTP_NO_PAY,       { 8, 10, 12, 14 } },
6736         { ICE_VLAN_EX,          { 2, 0 } },
6737         { ICE_VLAN_IN,          { 2, 0 } },
6738 };
6739
6740 /* The following table describes preferred grouping of recipes.
6741  * If a recipe that needs to be programmed is a superset or matches one of the
6742  * following combinations, then the recipe needs to be chained as per the
6743  * following policy.
6744  */
6745
6746 static struct ice_protocol_entry ice_prot_id_tbl[ICE_PROTOCOL_LAST] = {
6747         { ICE_MAC_OFOS,         ICE_MAC_OFOS_HW },
6748         { ICE_MAC_IL,           ICE_MAC_IL_HW },
6749         { ICE_ETYPE_OL,         ICE_ETYPE_OL_HW },
6750         { ICE_VLAN_OFOS,        ICE_VLAN_OL_HW },
6751         { ICE_IPV4_OFOS,        ICE_IPV4_OFOS_HW },
6752         { ICE_IPV4_IL,          ICE_IPV4_IL_HW },
6753         { ICE_IPV6_OFOS,        ICE_IPV6_OFOS_HW },
6754         { ICE_IPV6_IL,          ICE_IPV6_IL_HW },
6755         { ICE_TCP_IL,           ICE_TCP_IL_HW },
6756         { ICE_UDP_OF,           ICE_UDP_OF_HW },
6757         { ICE_UDP_ILOS,         ICE_UDP_ILOS_HW },
6758         { ICE_SCTP_IL,          ICE_SCTP_IL_HW },
6759         { ICE_VXLAN,            ICE_UDP_OF_HW },
6760         { ICE_GENEVE,           ICE_UDP_OF_HW },
6761         { ICE_VXLAN_GPE,        ICE_UDP_OF_HW },
6762         { ICE_NVGRE,            ICE_GRE_OF_HW },
6763         { ICE_GTP,              ICE_UDP_OF_HW },
6764         { ICE_PPPOE,            ICE_PPPOE_HW },
6765         { ICE_PFCP,             ICE_UDP_ILOS_HW },
6766         { ICE_L2TPV3,           ICE_L2TPV3_HW },
6767         { ICE_ESP,              ICE_ESP_HW },
6768         { ICE_AH,               ICE_AH_HW },
6769         { ICE_NAT_T,            ICE_UDP_ILOS_HW },
6770         { ICE_GTP_NO_PAY,       ICE_UDP_ILOS_HW },
6771         { ICE_VLAN_EX,          ICE_VLAN_OF_HW },
6772         { ICE_VLAN_IN,          ICE_VLAN_OL_HW },
6773         { ICE_FLG_DIR,          ICE_META_DATA_ID_HW},
6774 };
6775
6776 /**
6777  * ice_find_recp - find a recipe
6778  * @hw: pointer to the hardware structure
6779  * @lkup_exts: extension sequence to match
6780  *
6781  * Returns index of matching recipe, or ICE_MAX_NUM_RECIPES if not found.
6782  */
6783 static u16 ice_find_recp(struct ice_hw *hw, struct ice_prot_lkup_ext *lkup_exts,
6784                          enum ice_sw_tunnel_type tun_type, u32 priority)
6785 {
6786         bool refresh_required = true;
6787         struct ice_sw_recipe *recp;
6788         u8 i;
6789
6790         /* Walk through existing recipes to find a match */
6791         recp = hw->switch_info->recp_list;
6792         for (i = 0; i < ICE_MAX_NUM_RECIPES; i++) {
6793                 /* If recipe was not created for this ID, in SW bookkeeping,
6794                  * check if FW has an entry for this recipe. If the FW has an
6795                  * entry update it in our SW bookkeeping and continue with the
6796                  * matching.
6797                  */
6798                 if (!recp[i].recp_created)
6799                         if (ice_get_recp_frm_fw(hw,
6800                                                 hw->switch_info->recp_list, i,
6801                                                 &refresh_required))
6802                                 continue;
6803
6804                 /* Skip inverse action recipes */
6805                 if (recp[i].root_buf && recp[i].root_buf->content.act_ctrl &
6806                     ICE_AQ_RECIPE_ACT_INV_ACT)
6807                         continue;
6808
6809                 /* if number of words we are looking for match */
6810                 if (lkup_exts->n_val_words == recp[i].lkup_exts.n_val_words) {
6811                         struct ice_fv_word *ar = recp[i].lkup_exts.fv_words;
6812                         struct ice_fv_word *be = lkup_exts->fv_words;
6813                         u16 *cr = recp[i].lkup_exts.field_mask;
6814                         u16 *de = lkup_exts->field_mask;
6815                         bool found = true;
6816                         u8 pe, qr;
6817
6818                         /* ar, cr, and qr are related to the recipe words, while
6819                          * be, de, and pe are related to the lookup words
6820                          */
6821                         for (pe = 0; pe < lkup_exts->n_val_words; pe++) {
6822                                 for (qr = 0; qr < recp[i].lkup_exts.n_val_words;
6823                                      qr++) {
6824                                         if (ar[qr].off == be[pe].off &&
6825                                             ar[qr].prot_id == be[pe].prot_id &&
6826                                             cr[qr] == de[pe])
6827                                                 /* Found the "pe"th word in the
6828                                                  * given recipe
6829                                                  */
6830                                                 break;
6831                                 }
6832                                 /* After walking through all the words in the
6833                                  * "i"th recipe if "p"th word was not found then
6834                                  * this recipe is not what we are looking for.
6835                                  * So break out from this loop and try the next
6836                                  * recipe
6837                                  */
6838                                 if (qr >= recp[i].lkup_exts.n_val_words) {
6839                                         found = false;
6840                                         break;
6841                                 }
6842                         }
6843                         /* If for "i"th recipe the found was never set to false
6844                          * then it means we found our match
6845                          */
6846                         if (tun_type == recp[i].tun_type && found &&
6847                             priority == recp[i].priority)
6848                                 return i; /* Return the recipe ID */
6849                 }
6850         }
6851         return ICE_MAX_NUM_RECIPES;
6852 }
6853
6854 /**
6855  * ice_change_proto_id_to_dvm - change proto id in prot_id_tbl
6856  *
6857  * As protocol id for outer vlan is different in dvm and svm, if dvm is
6858  * supported protocol array record for outer vlan has to be modified to
6859  * reflect the value proper for DVM.
6860  */
6861 void ice_change_proto_id_to_dvm(void)
6862 {
6863         u8 i;
6864
6865         for (i = 0; i < ARRAY_SIZE(ice_prot_id_tbl); i++)
6866                 if (ice_prot_id_tbl[i].type == ICE_VLAN_OFOS &&
6867                     ice_prot_id_tbl[i].protocol_id != ICE_VLAN_OF_HW)
6868                         ice_prot_id_tbl[i].protocol_id = ICE_VLAN_OF_HW;
6869 }
6870
6871 /**
6872  * ice_prot_type_to_id - get protocol ID from protocol type
6873  * @type: protocol type
6874  * @id: pointer to variable that will receive the ID
6875  *
6876  * Returns true if found, false otherwise
6877  */
6878 static bool ice_prot_type_to_id(enum ice_protocol_type type, u8 *id)
6879 {
6880         u8 i;
6881
6882         for (i = 0; i < ARRAY_SIZE(ice_prot_id_tbl); i++)
6883                 if (ice_prot_id_tbl[i].type == type) {
6884                         *id = ice_prot_id_tbl[i].protocol_id;
6885                         return true;
6886                 }
6887         return false;
6888 }
6889
6890 /**
6891  * ice_fill_valid_words - count valid words
6892  * @rule: advanced rule with lookup information
6893  * @lkup_exts: byte offset extractions of the words that are valid
6894  *
6895  * calculate valid words in a lookup rule using mask value
6896  */
6897 static u8
6898 ice_fill_valid_words(struct ice_adv_lkup_elem *rule,
6899                      struct ice_prot_lkup_ext *lkup_exts)
6900 {
6901         u8 j, word, prot_id, ret_val;
6902
6903         if (!ice_prot_type_to_id(rule->type, &prot_id))
6904                 return 0;
6905
6906         word = lkup_exts->n_val_words;
6907
6908         for (j = 0; j < sizeof(rule->m_u) / sizeof(u16); j++)
6909                 if (((u16 *)&rule->m_u)[j] &&
6910                     (size_t)rule->type < ARRAY_SIZE(ice_prot_ext)) {
6911                         /* No more space to accommodate */
6912                         if (word >= ICE_MAX_CHAIN_WORDS)
6913                                 return 0;
6914                         lkup_exts->fv_words[word].off =
6915                                 ice_prot_ext[rule->type].offs[j];
6916                         lkup_exts->fv_words[word].prot_id =
6917                                 ice_prot_id_tbl[rule->type].protocol_id;
6918                         lkup_exts->field_mask[word] =
6919                                 BE16_TO_CPU(((_FORCE_ __be16 *)&rule->m_u)[j]);
6920                         word++;
6921                 }
6922
6923         ret_val = word - lkup_exts->n_val_words;
6924         lkup_exts->n_val_words = word;
6925
6926         return ret_val;
6927 }
6928
6929 /**
6930  * ice_create_first_fit_recp_def - Create a recipe grouping
6931  * @hw: pointer to the hardware structure
6932  * @lkup_exts: an array of protocol header extractions
6933  * @rg_list: pointer to a list that stores new recipe groups
6934  * @recp_cnt: pointer to a variable that stores returned number of recipe groups
6935  *
6936  * Using first fit algorithm, take all the words that are still not done
6937  * and start grouping them in 4-word groups. Each group makes up one
6938  * recipe.
6939  */
6940 static enum ice_status
6941 ice_create_first_fit_recp_def(struct ice_hw *hw,
6942                               struct ice_prot_lkup_ext *lkup_exts,
6943                               struct LIST_HEAD_TYPE *rg_list,
6944                               u8 *recp_cnt)
6945 {
6946         struct ice_pref_recipe_group *grp = NULL;
6947         u8 j;
6948
6949         *recp_cnt = 0;
6950
6951         if (!lkup_exts->n_val_words) {
6952                 struct ice_recp_grp_entry *entry;
6953
6954                 entry = (struct ice_recp_grp_entry *)
6955                         ice_malloc(hw, sizeof(*entry));
6956                 if (!entry)
6957                         return ICE_ERR_NO_MEMORY;
6958                 LIST_ADD(&entry->l_entry, rg_list);
6959                 grp = &entry->r_group;
6960                 (*recp_cnt)++;
6961                 grp->n_val_pairs = 0;
6962         }
6963
6964         /* Walk through every word in the rule to check if it is not done. If so
6965          * then this word needs to be part of a new recipe.
6966          */
6967         for (j = 0; j < lkup_exts->n_val_words; j++)
6968                 if (!ice_is_bit_set(lkup_exts->done, j)) {
6969                         if (!grp ||
6970                             grp->n_val_pairs == ICE_NUM_WORDS_RECIPE) {
6971                                 struct ice_recp_grp_entry *entry;
6972
6973                                 entry = (struct ice_recp_grp_entry *)
6974                                         ice_malloc(hw, sizeof(*entry));
6975                                 if (!entry)
6976                                         return ICE_ERR_NO_MEMORY;
6977                                 LIST_ADD(&entry->l_entry, rg_list);
6978                                 grp = &entry->r_group;
6979                                 (*recp_cnt)++;
6980                         }
6981
6982                         grp->pairs[grp->n_val_pairs].prot_id =
6983                                 lkup_exts->fv_words[j].prot_id;
6984                         grp->pairs[grp->n_val_pairs].off =
6985                                 lkup_exts->fv_words[j].off;
6986                         grp->mask[grp->n_val_pairs] = lkup_exts->field_mask[j];
6987                         grp->n_val_pairs++;
6988                 }
6989
6990         return ICE_SUCCESS;
6991 }
6992
6993 /**
6994  * ice_fill_fv_word_index - fill in the field vector indices for a recipe group
6995  * @hw: pointer to the hardware structure
6996  * @fv_list: field vector with the extraction sequence information
6997  * @rg_list: recipe groupings with protocol-offset pairs
6998  *
6999  * Helper function to fill in the field vector indices for protocol-offset
7000  * pairs. These indexes are then ultimately programmed into a recipe.
7001  */
7002 static enum ice_status
7003 ice_fill_fv_word_index(struct ice_hw *hw, struct LIST_HEAD_TYPE *fv_list,
7004                        struct LIST_HEAD_TYPE *rg_list)
7005 {
7006         struct ice_sw_fv_list_entry *fv;
7007         struct ice_recp_grp_entry *rg;
7008         struct ice_fv_word *fv_ext;
7009
7010         if (LIST_EMPTY(fv_list))
7011                 return ICE_SUCCESS;
7012
7013         fv = LIST_FIRST_ENTRY(fv_list, struct ice_sw_fv_list_entry, list_entry);
7014         fv_ext = fv->fv_ptr->ew;
7015
7016         LIST_FOR_EACH_ENTRY(rg, rg_list, ice_recp_grp_entry, l_entry) {
7017                 u8 i;
7018
7019                 for (i = 0; i < rg->r_group.n_val_pairs; i++) {
7020                         struct ice_fv_word *pr;
7021                         bool found = false;
7022                         u16 mask;
7023                         u8 j;
7024
7025                         pr = &rg->r_group.pairs[i];
7026                         mask = rg->r_group.mask[i];
7027
7028                         for (j = 0; j < hw->blk[ICE_BLK_SW].es.fvw; j++)
7029                                 if (fv_ext[j].prot_id == pr->prot_id &&
7030                                     fv_ext[j].off == pr->off) {
7031                                         found = true;
7032
7033                                         /* Store index of field vector */
7034                                         rg->fv_idx[i] = j;
7035                                         rg->fv_mask[i] = mask;
7036                                         break;
7037                                 }
7038
7039                         /* Protocol/offset could not be found, caller gave an
7040                          * invalid pair
7041                          */
7042                         if (!found)
7043                                 return ICE_ERR_PARAM;
7044                 }
7045         }
7046
7047         return ICE_SUCCESS;
7048 }
7049
7050 /**
7051  * ice_find_free_recp_res_idx - find free result indexes for recipe
7052  * @hw: pointer to hardware structure
7053  * @profiles: bitmap of profiles that will be associated with the new recipe
7054  * @free_idx: pointer to variable to receive the free index bitmap
7055  *
7056  * The algorithm used here is:
7057  *      1. When creating a new recipe, create a set P which contains all
7058  *         Profiles that will be associated with our new recipe
7059  *
7060  *      2. For each Profile p in set P:
7061  *          a. Add all recipes associated with Profile p into set R
7062  *          b. Optional : PossibleIndexes &= profile[p].possibleIndexes
7063  *              [initially PossibleIndexes should be 0xFFFFFFFFFFFFFFFF]
7064  *              i. Or just assume they all have the same possible indexes:
7065  *                      44, 45, 46, 47
7066  *                      i.e., PossibleIndexes = 0x0000F00000000000
7067  *
7068  *      3. For each Recipe r in set R:
7069  *          a. UsedIndexes |= (bitwise or ) recipe[r].res_indexes
7070  *          b. FreeIndexes = UsedIndexes ^ PossibleIndexes
7071  *
7072  *      FreeIndexes will contain the bits indicating the indexes free for use,
7073  *      then the code needs to update the recipe[r].used_result_idx_bits to
7074  *      indicate which indexes were selected for use by this recipe.
7075  */
7076 static u16
7077 ice_find_free_recp_res_idx(struct ice_hw *hw, const ice_bitmap_t *profiles,
7078                            ice_bitmap_t *free_idx)
7079 {
7080         ice_declare_bitmap(possible_idx, ICE_MAX_FV_WORDS);
7081         ice_declare_bitmap(recipes, ICE_MAX_NUM_RECIPES);
7082         ice_declare_bitmap(used_idx, ICE_MAX_FV_WORDS);
7083         u16 bit;
7084
7085         ice_zero_bitmap(possible_idx, ICE_MAX_FV_WORDS);
7086         ice_zero_bitmap(recipes, ICE_MAX_NUM_RECIPES);
7087         ice_zero_bitmap(used_idx, ICE_MAX_FV_WORDS);
7088         ice_zero_bitmap(free_idx, ICE_MAX_FV_WORDS);
7089
7090         ice_bitmap_set(possible_idx, 0, ICE_MAX_FV_WORDS);
7091
7092         /* For each profile we are going to associate the recipe with, add the
7093          * recipes that are associated with that profile. This will give us
7094          * the set of recipes that our recipe may collide with. Also, determine
7095          * what possible result indexes are usable given this set of profiles.
7096          */
7097         ice_for_each_set_bit(bit, profiles, ICE_MAX_NUM_PROFILES) {
7098                 ice_or_bitmap(recipes, recipes, profile_to_recipe[bit],
7099                               ICE_MAX_NUM_RECIPES);
7100                 ice_and_bitmap(possible_idx, possible_idx,
7101                                hw->switch_info->prof_res_bm[bit],
7102                                ICE_MAX_FV_WORDS);
7103         }
7104
7105         /* For each recipe that our new recipe may collide with, determine
7106          * which indexes have been used.
7107          */
7108         ice_for_each_set_bit(bit, recipes, ICE_MAX_NUM_RECIPES)
7109                 ice_or_bitmap(used_idx, used_idx,
7110                               hw->switch_info->recp_list[bit].res_idxs,
7111                               ICE_MAX_FV_WORDS);
7112
7113         ice_xor_bitmap(free_idx, used_idx, possible_idx, ICE_MAX_FV_WORDS);
7114
7115         /* return number of free indexes */
7116         return (u16)ice_bitmap_hweight(free_idx, ICE_MAX_FV_WORDS);
7117 }
7118
7119 /**
7120  * ice_add_sw_recipe - function to call AQ calls to create switch recipe
7121  * @hw: pointer to hardware structure
7122  * @rm: recipe management list entry
7123  * @profiles: bitmap of profiles that will be associated.
7124  */
7125 static enum ice_status
7126 ice_add_sw_recipe(struct ice_hw *hw, struct ice_sw_recipe *rm,
7127                   ice_bitmap_t *profiles)
7128 {
7129         ice_declare_bitmap(result_idx_bm, ICE_MAX_FV_WORDS);
7130         struct ice_aqc_recipe_data_elem *tmp;
7131         struct ice_aqc_recipe_data_elem *buf;
7132         struct ice_recp_grp_entry *entry;
7133         enum ice_status status;
7134         u16 free_res_idx;
7135         u16 recipe_count;
7136         u8 chain_idx;
7137         u8 recps = 0;
7138
7139         /* When more than one recipe are required, another recipe is needed to
7140          * chain them together. Matching a tunnel metadata ID takes up one of
7141          * the match fields in the chaining recipe reducing the number of
7142          * chained recipes by one.
7143          */
7144          /* check number of free result indices */
7145         ice_zero_bitmap(result_idx_bm, ICE_MAX_FV_WORDS);
7146         free_res_idx = ice_find_free_recp_res_idx(hw, profiles, result_idx_bm);
7147
7148         ice_debug(hw, ICE_DBG_SW, "Result idx slots: %d, need %d\n",
7149                   free_res_idx, rm->n_grp_count);
7150
7151         if (rm->n_grp_count > 1) {
7152                 if (rm->n_grp_count > free_res_idx)
7153                         return ICE_ERR_MAX_LIMIT;
7154
7155                 rm->n_grp_count++;
7156         }
7157
7158         if (rm->n_grp_count > ICE_MAX_CHAIN_RECIPE)
7159                 return ICE_ERR_MAX_LIMIT;
7160
7161         tmp = (struct ice_aqc_recipe_data_elem *)ice_calloc(hw,
7162                                                             ICE_MAX_NUM_RECIPES,
7163                                                             sizeof(*tmp));
7164         if (!tmp)
7165                 return ICE_ERR_NO_MEMORY;
7166
7167         buf = (struct ice_aqc_recipe_data_elem *)
7168                 ice_calloc(hw, rm->n_grp_count, sizeof(*buf));
7169         if (!buf) {
7170                 status = ICE_ERR_NO_MEMORY;
7171                 goto err_mem;
7172         }
7173
7174         ice_zero_bitmap(rm->r_bitmap, ICE_MAX_NUM_RECIPES);
7175         recipe_count = ICE_MAX_NUM_RECIPES;
7176         status = ice_aq_get_recipe(hw, tmp, &recipe_count, ICE_SW_LKUP_MAC,
7177                                    NULL);
7178         if (status || recipe_count == 0)
7179                 goto err_unroll;
7180
7181         /* Allocate the recipe resources, and configure them according to the
7182          * match fields from protocol headers and extracted field vectors.
7183          */
7184         chain_idx = ice_find_first_bit(result_idx_bm, ICE_MAX_FV_WORDS);
7185         LIST_FOR_EACH_ENTRY(entry, &rm->rg_list, ice_recp_grp_entry, l_entry) {
7186                 u8 i;
7187
7188                 status = ice_alloc_recipe(hw, &entry->rid);
7189                 if (status)
7190                         goto err_unroll;
7191
7192                 /* Clear the result index of the located recipe, as this will be
7193                  * updated, if needed, later in the recipe creation process.
7194                  */
7195                 tmp[0].content.result_indx = 0;
7196
7197                 buf[recps] = tmp[0];
7198                 buf[recps].recipe_indx = (u8)entry->rid;
7199                 /* if the recipe is a non-root recipe RID should be programmed
7200                  * as 0 for the rules to be applied correctly.
7201                  */
7202                 buf[recps].content.rid = 0;
7203                 ice_memset(&buf[recps].content.lkup_indx, 0,
7204                            sizeof(buf[recps].content.lkup_indx),
7205                            ICE_NONDMA_MEM);
7206
7207                 /* All recipes use look-up index 0 to match switch ID. */
7208                 buf[recps].content.lkup_indx[0] = ICE_AQ_SW_ID_LKUP_IDX;
7209                 buf[recps].content.mask[0] =
7210                         CPU_TO_LE16(ICE_AQ_SW_ID_LKUP_MASK);
7211                 /* Setup lkup_indx 1..4 to INVALID/ignore and set the mask
7212                  * to be 0
7213                  */
7214                 for (i = 1; i <= ICE_NUM_WORDS_RECIPE; i++) {
7215                         buf[recps].content.lkup_indx[i] = 0x80;
7216                         buf[recps].content.mask[i] = 0;
7217                 }
7218
7219                 for (i = 0; i < entry->r_group.n_val_pairs; i++) {
7220                         buf[recps].content.lkup_indx[i + 1] = entry->fv_idx[i];
7221                         buf[recps].content.mask[i + 1] =
7222                                 CPU_TO_LE16(entry->fv_mask[i]);
7223                 }
7224
7225                 if (rm->n_grp_count > 1) {
7226                         /* Checks to see if there really is a valid result index
7227                          * that can be used.
7228                          */
7229                         if (chain_idx >= ICE_MAX_FV_WORDS) {
7230                                 ice_debug(hw, ICE_DBG_SW, "No chain index available\n");
7231                                 status = ICE_ERR_MAX_LIMIT;
7232                                 goto err_unroll;
7233                         }
7234
7235                         entry->chain_idx = chain_idx;
7236                         buf[recps].content.result_indx =
7237                                 ICE_AQ_RECIPE_RESULT_EN |
7238                                 ((chain_idx << ICE_AQ_RECIPE_RESULT_DATA_S) &
7239                                  ICE_AQ_RECIPE_RESULT_DATA_M);
7240                         ice_clear_bit(chain_idx, result_idx_bm);
7241                         chain_idx = ice_find_first_bit(result_idx_bm,
7242                                                        ICE_MAX_FV_WORDS);
7243                 }
7244
7245                 /* fill recipe dependencies */
7246                 ice_zero_bitmap((ice_bitmap_t *)buf[recps].recipe_bitmap,
7247                                 ICE_MAX_NUM_RECIPES);
7248                 ice_set_bit(buf[recps].recipe_indx,
7249                             (ice_bitmap_t *)buf[recps].recipe_bitmap);
7250                 buf[recps].content.act_ctrl_fwd_priority = rm->priority;
7251                 recps++;
7252         }
7253
7254         if (rm->n_grp_count == 1) {
7255                 rm->root_rid = buf[0].recipe_indx;
7256                 ice_set_bit(buf[0].recipe_indx, rm->r_bitmap);
7257                 buf[0].content.rid = rm->root_rid | ICE_AQ_RECIPE_ID_IS_ROOT;
7258                 if (sizeof(buf[0].recipe_bitmap) >= sizeof(rm->r_bitmap)) {
7259                         ice_memcpy(buf[0].recipe_bitmap, rm->r_bitmap,
7260                                    sizeof(buf[0].recipe_bitmap),
7261                                    ICE_NONDMA_TO_NONDMA);
7262                 } else {
7263                         status = ICE_ERR_BAD_PTR;
7264                         goto err_unroll;
7265                 }
7266                 /* Applicable only for ROOT_RECIPE, set the fwd_priority for
7267                  * the recipe which is getting created if specified
7268                  * by user. Usually any advanced switch filter, which results
7269                  * into new extraction sequence, ended up creating a new recipe
7270                  * of type ROOT and usually recipes are associated with profiles
7271                  * Switch rule referreing newly created recipe, needs to have
7272                  * either/or 'fwd' or 'join' priority, otherwise switch rule
7273                  * evaluation will not happen correctly. In other words, if
7274                  * switch rule to be evaluated on priority basis, then recipe
7275                  * needs to have priority, otherwise it will be evaluated last.
7276                  */
7277                 buf[0].content.act_ctrl_fwd_priority = rm->priority;
7278         } else {
7279                 struct ice_recp_grp_entry *last_chain_entry;
7280                 u16 rid, i;
7281
7282                 /* Allocate the last recipe that will chain the outcomes of the
7283                  * other recipes together
7284                  */
7285                 status = ice_alloc_recipe(hw, &rid);
7286                 if (status)
7287                         goto err_unroll;
7288
7289                 buf[recps].recipe_indx = (u8)rid;
7290                 buf[recps].content.rid = (u8)rid;
7291                 buf[recps].content.rid |= ICE_AQ_RECIPE_ID_IS_ROOT;
7292                 /* the new entry created should also be part of rg_list to
7293                  * make sure we have complete recipe
7294                  */
7295                 last_chain_entry = (struct ice_recp_grp_entry *)ice_malloc(hw,
7296                         sizeof(*last_chain_entry));
7297                 if (!last_chain_entry) {
7298                         status = ICE_ERR_NO_MEMORY;
7299                         goto err_unroll;
7300                 }
7301                 last_chain_entry->rid = rid;
7302                 ice_memset(&buf[recps].content.lkup_indx, 0,
7303                            sizeof(buf[recps].content.lkup_indx),
7304                            ICE_NONDMA_MEM);
7305                 /* All recipes use look-up index 0 to match switch ID. */
7306                 buf[recps].content.lkup_indx[0] = ICE_AQ_SW_ID_LKUP_IDX;
7307                 buf[recps].content.mask[0] =
7308                         CPU_TO_LE16(ICE_AQ_SW_ID_LKUP_MASK);
7309                 for (i = 1; i <= ICE_NUM_WORDS_RECIPE; i++) {
7310                         buf[recps].content.lkup_indx[i] =
7311                                 ICE_AQ_RECIPE_LKUP_IGNORE;
7312                         buf[recps].content.mask[i] = 0;
7313                 }
7314
7315                 i = 1;
7316                 /* update r_bitmap with the recp that is used for chaining */
7317                 ice_set_bit(rid, rm->r_bitmap);
7318                 /* this is the recipe that chains all the other recipes so it
7319                  * should not have a chaining ID to indicate the same
7320                  */
7321                 last_chain_entry->chain_idx = ICE_INVAL_CHAIN_IND;
7322                 LIST_FOR_EACH_ENTRY(entry, &rm->rg_list, ice_recp_grp_entry,
7323                                     l_entry) {
7324                         last_chain_entry->fv_idx[i] = entry->chain_idx;
7325                         buf[recps].content.lkup_indx[i] = entry->chain_idx;
7326                         buf[recps].content.mask[i++] = CPU_TO_LE16(0xFFFF);
7327                         ice_set_bit(entry->rid, rm->r_bitmap);
7328                 }
7329                 LIST_ADD(&last_chain_entry->l_entry, &rm->rg_list);
7330                 if (sizeof(buf[recps].recipe_bitmap) >=
7331                     sizeof(rm->r_bitmap)) {
7332                         ice_memcpy(buf[recps].recipe_bitmap, rm->r_bitmap,
7333                                    sizeof(buf[recps].recipe_bitmap),
7334                                    ICE_NONDMA_TO_NONDMA);
7335                 } else {
7336                         status = ICE_ERR_BAD_PTR;
7337                         goto err_unroll;
7338                 }
7339                 buf[recps].content.act_ctrl_fwd_priority = rm->priority;
7340
7341                 recps++;
7342                 rm->root_rid = (u8)rid;
7343         }
7344         status = ice_acquire_change_lock(hw, ICE_RES_WRITE);
7345         if (status)
7346                 goto err_unroll;
7347
7348         status = ice_aq_add_recipe(hw, buf, rm->n_grp_count, NULL);
7349         ice_release_change_lock(hw);
7350         if (status)
7351                 goto err_unroll;
7352
7353         /* Every recipe that just got created add it to the recipe
7354          * book keeping list
7355          */
7356         LIST_FOR_EACH_ENTRY(entry, &rm->rg_list, ice_recp_grp_entry, l_entry) {
7357                 struct ice_switch_info *sw = hw->switch_info;
7358                 bool is_root, idx_found = false;
7359                 struct ice_sw_recipe *recp;
7360                 u16 idx, buf_idx = 0;
7361
7362                 /* find buffer index for copying some data */
7363                 for (idx = 0; idx < rm->n_grp_count; idx++)
7364                         if (buf[idx].recipe_indx == entry->rid) {
7365                                 buf_idx = idx;
7366                                 idx_found = true;
7367                         }
7368
7369                 if (!idx_found) {
7370                         status = ICE_ERR_OUT_OF_RANGE;
7371                         goto err_unroll;
7372                 }
7373
7374                 recp = &sw->recp_list[entry->rid];
7375                 is_root = (rm->root_rid == entry->rid);
7376                 recp->is_root = is_root;
7377
7378                 recp->root_rid = entry->rid;
7379                 recp->big_recp = (is_root && rm->n_grp_count > 1);
7380
7381                 ice_memcpy(&recp->ext_words, entry->r_group.pairs,
7382                            entry->r_group.n_val_pairs *
7383                            sizeof(struct ice_fv_word),
7384                            ICE_NONDMA_TO_NONDMA);
7385
7386                 ice_memcpy(recp->r_bitmap, buf[buf_idx].recipe_bitmap,
7387                            sizeof(recp->r_bitmap), ICE_NONDMA_TO_NONDMA);
7388
7389                 /* Copy non-result fv index values and masks to recipe. This
7390                  * call will also update the result recipe bitmask.
7391                  */
7392                 ice_collect_result_idx(&buf[buf_idx], recp);
7393
7394                 /* for non-root recipes, also copy to the root, this allows
7395                  * easier matching of a complete chained recipe
7396                  */
7397                 if (!is_root)
7398                         ice_collect_result_idx(&buf[buf_idx],
7399                                                &sw->recp_list[rm->root_rid]);
7400
7401                 recp->n_ext_words = entry->r_group.n_val_pairs;
7402                 recp->chain_idx = entry->chain_idx;
7403                 recp->priority = buf[buf_idx].content.act_ctrl_fwd_priority;
7404                 recp->n_grp_count = rm->n_grp_count;
7405                 recp->tun_type = rm->tun_type;
7406                 recp->recp_created = true;
7407         }
7408         rm->root_buf = buf;
7409         ice_free(hw, tmp);
7410         return status;
7411
7412 err_unroll:
7413 err_mem:
7414         ice_free(hw, tmp);
7415         ice_free(hw, buf);
7416         return status;
7417 }
7418
7419 /**
7420  * ice_create_recipe_group - creates recipe group
7421  * @hw: pointer to hardware structure
7422  * @rm: recipe management list entry
7423  * @lkup_exts: lookup elements
7424  */
7425 static enum ice_status
7426 ice_create_recipe_group(struct ice_hw *hw, struct ice_sw_recipe *rm,
7427                         struct ice_prot_lkup_ext *lkup_exts)
7428 {
7429         enum ice_status status;
7430         u8 recp_count = 0;
7431
7432         rm->n_grp_count = 0;
7433
7434         /* Create recipes for words that are marked not done by packing them
7435          * as best fit.
7436          */
7437         status = ice_create_first_fit_recp_def(hw, lkup_exts,
7438                                                &rm->rg_list, &recp_count);
7439         if (!status) {
7440                 rm->n_grp_count += recp_count;
7441                 rm->n_ext_words = lkup_exts->n_val_words;
7442                 ice_memcpy(&rm->ext_words, lkup_exts->fv_words,
7443                            sizeof(rm->ext_words), ICE_NONDMA_TO_NONDMA);
7444                 ice_memcpy(rm->word_masks, lkup_exts->field_mask,
7445                            sizeof(rm->word_masks), ICE_NONDMA_TO_NONDMA);
7446         }
7447
7448         return status;
7449 }
7450
7451 /**
7452  * ice_get_fv - get field vectors/extraction sequences for spec. lookup types
7453  * @hw: pointer to hardware structure
7454  * @lkups: lookup elements or match criteria for the advanced recipe, one
7455  *         structure per protocol header
7456  * @lkups_cnt: number of protocols
7457  * @bm: bitmap of field vectors to consider
7458  * @fv_list: pointer to a list that holds the returned field vectors
7459  */
7460 static enum ice_status
7461 ice_get_fv(struct ice_hw *hw, struct ice_adv_lkup_elem *lkups, u16 lkups_cnt,
7462            ice_bitmap_t *bm, struct LIST_HEAD_TYPE *fv_list)
7463 {
7464         enum ice_status status;
7465         u8 *prot_ids;
7466         u16 i;
7467
7468         if (!lkups_cnt)
7469                 return ICE_SUCCESS;
7470
7471         prot_ids = (u8 *)ice_calloc(hw, lkups_cnt, sizeof(*prot_ids));
7472         if (!prot_ids)
7473                 return ICE_ERR_NO_MEMORY;
7474
7475         for (i = 0; i < lkups_cnt; i++)
7476                 if (!ice_prot_type_to_id(lkups[i].type, &prot_ids[i])) {
7477                         status = ICE_ERR_CFG;
7478                         goto free_mem;
7479                 }
7480
7481         /* Find field vectors that include all specified protocol types */
7482         status = ice_get_sw_fv_list(hw, prot_ids, lkups_cnt, bm, fv_list);
7483
7484 free_mem:
7485         ice_free(hw, prot_ids);
7486         return status;
7487 }
7488
7489 /**
7490  * ice_tun_type_match_word - determine if tun type needs a match mask
7491  * @tun_type: tunnel type
7492  * @off: offset of packet flag
7493  * @mask: mask to be used for the tunnel
7494  */
7495 static bool ice_tun_type_match_word(enum ice_sw_tunnel_type tun_type, u16 *off, u16 *mask)
7496 {
7497         switch (tun_type) {
7498         case ICE_SW_TUN_VXLAN_GPE:
7499         case ICE_SW_TUN_GENEVE:
7500         case ICE_SW_TUN_VXLAN:
7501         case ICE_SW_TUN_NVGRE:
7502         case ICE_SW_TUN_UDP:
7503         case ICE_ALL_TUNNELS:
7504         case ICE_SW_TUN_AND_NON_TUN_QINQ:
7505         case ICE_NON_TUN_QINQ:
7506         case ICE_SW_TUN_PPPOE_QINQ:
7507         case ICE_SW_TUN_PPPOE_PAY_QINQ:
7508         case ICE_SW_TUN_PPPOE_IPV4_QINQ:
7509         case ICE_SW_TUN_PPPOE_IPV6_QINQ:
7510                 *mask = ICE_TUN_FLAG_MASK;
7511                 *off = ICE_TUN_FLAG_MDID_OFF(1);
7512                 return true;
7513
7514         case ICE_SW_TUN_AND_NON_TUN:
7515                 *mask = ICE_DIR_FLAG_MASK;
7516                 *off = ICE_TUN_FLAG_MDID_OFF(0);
7517                 return true;
7518
7519         case ICE_SW_TUN_GENEVE_VLAN:
7520         case ICE_SW_TUN_VXLAN_VLAN:
7521                 *mask = ICE_TUN_FLAG_MASK & ~ICE_TUN_FLAG_VLAN_MASK;
7522                 *off = ICE_TUN_FLAG_MDID_OFF(1);
7523                 return true;
7524
7525         default:
7526                 *mask = 0;
7527                 *off = 0;
7528                 return false;
7529         }
7530 }
7531
7532 /**
7533  * ice_add_special_words - Add words that are not protocols, such as metadata
7534  * @rinfo: other information regarding the rule e.g. priority and action info
7535  * @lkup_exts: lookup word structure
7536  */
7537 static enum ice_status
7538 ice_add_special_words(struct ice_adv_rule_info *rinfo,
7539                       struct ice_prot_lkup_ext *lkup_exts)
7540 {
7541         u16 mask;
7542         u16 off;
7543
7544         /* If this is a tunneled packet, then add recipe index to match the
7545          * tunnel bit in the packet metadata flags. If this is a tun_and_non_tun
7546          * packet, then add recipe index to match the direction bit in the flag.
7547          */
7548         if (ice_tun_type_match_word(rinfo->tun_type, &off, &mask)) {
7549                 if (lkup_exts->n_val_words < ICE_MAX_CHAIN_WORDS) {
7550                         u8 word = lkup_exts->n_val_words++;
7551
7552                         lkup_exts->fv_words[word].prot_id = ICE_META_DATA_ID_HW;
7553                         lkup_exts->fv_words[word].off = off;
7554                         lkup_exts->field_mask[word] = mask;
7555                 } else {
7556                         return ICE_ERR_MAX_LIMIT;
7557                 }
7558         }
7559
7560         return ICE_SUCCESS;
7561 }
7562
7563 /* ice_get_compat_fv_bitmap - Get compatible field vector bitmap for rule
7564  * @hw: pointer to hardware structure
7565  * @rinfo: other information regarding the rule e.g. priority and action info
7566  * @bm: pointer to memory for returning the bitmap of field vectors
7567  */
7568 static void
7569 ice_get_compat_fv_bitmap(struct ice_hw *hw, struct ice_adv_rule_info *rinfo,
7570                          ice_bitmap_t *bm)
7571 {
7572         enum ice_prof_type prof_type;
7573
7574         ice_zero_bitmap(bm, ICE_MAX_NUM_PROFILES);
7575
7576         switch (rinfo->tun_type) {
7577         case ICE_NON_TUN:
7578         case ICE_NON_TUN_QINQ:
7579                 prof_type = ICE_PROF_NON_TUN;
7580                 break;
7581         case ICE_ALL_TUNNELS:
7582                 prof_type = ICE_PROF_TUN_ALL;
7583                 break;
7584         case ICE_SW_TUN_VXLAN_GPE:
7585         case ICE_SW_TUN_GENEVE:
7586         case ICE_SW_TUN_GENEVE_VLAN:
7587         case ICE_SW_TUN_VXLAN:
7588         case ICE_SW_TUN_VXLAN_VLAN:
7589         case ICE_SW_TUN_UDP:
7590         case ICE_SW_TUN_GTP:
7591                 prof_type = ICE_PROF_TUN_UDP;
7592                 break;
7593         case ICE_SW_TUN_NVGRE:
7594                 prof_type = ICE_PROF_TUN_GRE;
7595                 break;
7596         case ICE_SW_TUN_PPPOE:
7597         case ICE_SW_TUN_PPPOE_QINQ:
7598                 prof_type = ICE_PROF_TUN_PPPOE;
7599                 break;
7600         case ICE_SW_TUN_PPPOE_PAY:
7601         case ICE_SW_TUN_PPPOE_PAY_QINQ:
7602                 ice_set_bit(ICE_PROFID_PPPOE_PAY, bm);
7603                 return;
7604         case ICE_SW_TUN_PPPOE_IPV4:
7605         case ICE_SW_TUN_PPPOE_IPV4_QINQ:
7606                 ice_set_bit(ICE_PROFID_PPPOE_IPV4_OTHER, bm);
7607                 ice_set_bit(ICE_PROFID_PPPOE_IPV4_UDP, bm);
7608                 ice_set_bit(ICE_PROFID_PPPOE_IPV4_TCP, bm);
7609                 return;
7610         case ICE_SW_TUN_PPPOE_IPV4_TCP:
7611                 ice_set_bit(ICE_PROFID_PPPOE_IPV4_TCP, bm);
7612                 return;
7613         case ICE_SW_TUN_PPPOE_IPV4_UDP:
7614                 ice_set_bit(ICE_PROFID_PPPOE_IPV4_UDP, bm);
7615                 return;
7616         case ICE_SW_TUN_PPPOE_IPV6:
7617         case ICE_SW_TUN_PPPOE_IPV6_QINQ:
7618                 ice_set_bit(ICE_PROFID_PPPOE_IPV6_OTHER, bm);
7619                 ice_set_bit(ICE_PROFID_PPPOE_IPV6_UDP, bm);
7620                 ice_set_bit(ICE_PROFID_PPPOE_IPV6_TCP, bm);
7621                 return;
7622         case ICE_SW_TUN_PPPOE_IPV6_TCP:
7623                 ice_set_bit(ICE_PROFID_PPPOE_IPV6_TCP, bm);
7624                 return;
7625         case ICE_SW_TUN_PPPOE_IPV6_UDP:
7626                 ice_set_bit(ICE_PROFID_PPPOE_IPV6_UDP, bm);
7627                 return;
7628         case ICE_SW_TUN_PROFID_IPV6_ESP:
7629         case ICE_SW_TUN_IPV6_ESP:
7630                 ice_set_bit(ICE_PROFID_IPV6_ESP, bm);
7631                 return;
7632         case ICE_SW_TUN_PROFID_IPV6_AH:
7633         case ICE_SW_TUN_IPV6_AH:
7634                 ice_set_bit(ICE_PROFID_IPV6_AH, bm);
7635                 return;
7636         case ICE_SW_TUN_PROFID_MAC_IPV6_L2TPV3:
7637         case ICE_SW_TUN_IPV6_L2TPV3:
7638                 ice_set_bit(ICE_PROFID_MAC_IPV6_L2TPV3, bm);
7639                 return;
7640         case ICE_SW_TUN_PROFID_IPV6_NAT_T:
7641         case ICE_SW_TUN_IPV6_NAT_T:
7642                 ice_set_bit(ICE_PROFID_IPV6_NAT_T, bm);
7643                 return;
7644         case ICE_SW_TUN_PROFID_IPV4_PFCP_NODE:
7645                 ice_set_bit(ICE_PROFID_IPV4_PFCP_NODE, bm);
7646                 return;
7647         case ICE_SW_TUN_PROFID_IPV4_PFCP_SESSION:
7648                 ice_set_bit(ICE_PROFID_IPV4_PFCP_SESSION, bm);
7649                 return;
7650         case ICE_SW_TUN_PROFID_IPV6_PFCP_NODE:
7651                 ice_set_bit(ICE_PROFID_IPV6_PFCP_NODE, bm);
7652                 return;
7653         case ICE_SW_TUN_PROFID_IPV6_PFCP_SESSION:
7654                 ice_set_bit(ICE_PROFID_IPV6_PFCP_SESSION, bm);
7655                 return;
7656         case ICE_SW_TUN_IPV4_NAT_T:
7657                 ice_set_bit(ICE_PROFID_IPV4_NAT_T, bm);
7658                 return;
7659         case ICE_SW_TUN_IPV4_L2TPV3:
7660                 ice_set_bit(ICE_PROFID_MAC_IPV4_L2TPV3, bm);
7661                 return;
7662         case ICE_SW_TUN_IPV4_ESP:
7663                 ice_set_bit(ICE_PROFID_IPV4_ESP, bm);
7664                 return;
7665         case ICE_SW_TUN_IPV4_AH:
7666                 ice_set_bit(ICE_PROFID_IPV4_AH, bm);
7667                 return;
7668         case ICE_SW_IPV4_TCP:
7669                 ice_set_bit(ICE_PROFID_IPV4_TCP, bm);
7670                 return;
7671         case ICE_SW_IPV4_UDP:
7672                 ice_set_bit(ICE_PROFID_IPV4_UDP, bm);
7673                 return;
7674         case ICE_SW_IPV6_TCP:
7675                 ice_set_bit(ICE_PROFID_IPV6_TCP, bm);
7676                 return;
7677         case ICE_SW_IPV6_UDP:
7678                 ice_set_bit(ICE_PROFID_IPV6_UDP, bm);
7679                 return;
7680         case ICE_SW_TUN_IPV4_GTPU_NO_PAY:
7681                 ice_set_bit(ICE_PROFID_IPV4_GTPU_TEID, bm);
7682                 return;
7683         case ICE_SW_TUN_IPV6_GTPU_NO_PAY:
7684                 ice_set_bit(ICE_PROFID_IPV6_GTPU_TEID, bm);
7685                 return;
7686         case ICE_SW_TUN_IPV4_GTPU_IPV4:
7687                 ice_set_bit(ICE_PROFID_IPV4_GTPU_IPV4_OTHER, bm);
7688                 ice_set_bit(ICE_PROFID_IPV4_GTPU_IPV4_UDP, bm);
7689                 ice_set_bit(ICE_PROFID_IPV4_GTPU_IPV4_TCP, bm);
7690                 return;
7691         case ICE_SW_TUN_IPV4_GTPU_IPV4_UDP:
7692                 ice_set_bit(ICE_PROFID_IPV4_GTPU_IPV4_UDP, bm);
7693                 return;
7694         case ICE_SW_TUN_IPV4_GTPU_IPV4_TCP:
7695                 ice_set_bit(ICE_PROFID_IPV4_GTPU_IPV4_TCP, bm);
7696                 return;
7697         case ICE_SW_TUN_IPV4_GTPU_EH_IPV4:
7698                 ice_set_bit(ICE_PROFID_IPV4_GTPU_EH_IPV4_OTHER, bm);
7699                 ice_set_bit(ICE_PROFID_IPV4_GTPU_EH_IPV4_UDP, bm);
7700                 ice_set_bit(ICE_PROFID_IPV4_GTPU_EH_IPV4_TCP, bm);
7701                 return;
7702         case ICE_SW_TUN_IPV4_GTPU_EH_IPV4_UDP:
7703                 ice_set_bit(ICE_PROFID_IPV4_GTPU_EH_IPV4_UDP, bm);
7704                 return;
7705         case ICE_SW_TUN_IPV4_GTPU_EH_IPV4_TCP:
7706                 ice_set_bit(ICE_PROFID_IPV4_GTPU_EH_IPV4_TCP, bm);
7707                 return;
7708         case ICE_SW_TUN_IPV6_GTPU_IPV4:
7709                 ice_set_bit(ICE_PROFID_IPV6_GTPU_IPV4_OTHER, bm);
7710                 ice_set_bit(ICE_PROFID_IPV6_GTPU_IPV4_UDP, bm);
7711                 ice_set_bit(ICE_PROFID_IPV6_GTPU_IPV4_TCP, bm);
7712                 return;
7713         case ICE_SW_TUN_IPV6_GTPU_IPV4_UDP:
7714                 ice_set_bit(ICE_PROFID_IPV6_GTPU_IPV4_UDP, bm);
7715                 return;
7716         case ICE_SW_TUN_IPV6_GTPU_IPV4_TCP:
7717                 ice_set_bit(ICE_PROFID_IPV6_GTPU_IPV4_TCP, bm);
7718                 return;
7719         case ICE_SW_TUN_IPV6_GTPU_EH_IPV4:
7720                 ice_set_bit(ICE_PROFID_IPV6_GTPU_EH_IPV4_OTHER, bm);
7721                 ice_set_bit(ICE_PROFID_IPV6_GTPU_EH_IPV4_UDP, bm);
7722                 ice_set_bit(ICE_PROFID_IPV6_GTPU_EH_IPV4_TCP, bm);
7723                 return;
7724         case ICE_SW_TUN_IPV6_GTPU_EH_IPV4_UDP:
7725                 ice_set_bit(ICE_PROFID_IPV6_GTPU_EH_IPV4_UDP, bm);
7726                 return;
7727         case ICE_SW_TUN_IPV6_GTPU_EH_IPV4_TCP:
7728                 ice_set_bit(ICE_PROFID_IPV6_GTPU_EH_IPV4_TCP, bm);
7729                 return;
7730         case ICE_SW_TUN_IPV4_GTPU_IPV6:
7731                 ice_set_bit(ICE_PROFID_IPV4_GTPU_IPV6_OTHER, bm);
7732                 ice_set_bit(ICE_PROFID_IPV4_GTPU_IPV6_UDP, bm);
7733                 ice_set_bit(ICE_PROFID_IPV4_GTPU_IPV6_TCP, bm);
7734                 return;
7735         case ICE_SW_TUN_IPV4_GTPU_IPV6_UDP:
7736                 ice_set_bit(ICE_PROFID_IPV4_GTPU_IPV6_UDP, bm);
7737                 return;
7738         case ICE_SW_TUN_IPV4_GTPU_IPV6_TCP:
7739                 ice_set_bit(ICE_PROFID_IPV4_GTPU_IPV6_TCP, bm);
7740                 return;
7741         case ICE_SW_TUN_IPV4_GTPU_EH_IPV6:
7742                 ice_set_bit(ICE_PROFID_IPV4_GTPU_EH_IPV6_OTHER, bm);
7743                 ice_set_bit(ICE_PROFID_IPV4_GTPU_EH_IPV6_UDP, bm);
7744                 ice_set_bit(ICE_PROFID_IPV4_GTPU_EH_IPV6_TCP, bm);
7745                 return;
7746         case ICE_SW_TUN_IPV4_GTPU_EH_IPV6_UDP:
7747                 ice_set_bit(ICE_PROFID_IPV4_GTPU_EH_IPV6_UDP, bm);
7748                 return;
7749         case ICE_SW_TUN_IPV4_GTPU_EH_IPV6_TCP:
7750                 ice_set_bit(ICE_PROFID_IPV4_GTPU_EH_IPV6_TCP, bm);
7751                 return;
7752         case ICE_SW_TUN_IPV6_GTPU_IPV6:
7753                 ice_set_bit(ICE_PROFID_IPV6_GTPU_IPV6_OTHER, bm);
7754                 ice_set_bit(ICE_PROFID_IPV6_GTPU_IPV6_UDP, bm);
7755                 ice_set_bit(ICE_PROFID_IPV6_GTPU_IPV6_TCP, bm);
7756                 return;
7757         case ICE_SW_TUN_IPV6_GTPU_IPV6_UDP:
7758                 ice_set_bit(ICE_PROFID_IPV6_GTPU_IPV6_UDP, bm);
7759                 return;
7760         case ICE_SW_TUN_IPV6_GTPU_IPV6_TCP:
7761                 ice_set_bit(ICE_PROFID_IPV6_GTPU_IPV6_TCP, bm);
7762                 return;
7763         case ICE_SW_TUN_IPV6_GTPU_EH_IPV6:
7764                 ice_set_bit(ICE_PROFID_IPV6_GTPU_EH_IPV6_OTHER, bm);
7765                 ice_set_bit(ICE_PROFID_IPV6_GTPU_EH_IPV6_UDP, bm);
7766                 ice_set_bit(ICE_PROFID_IPV6_GTPU_EH_IPV6_TCP, bm);
7767                 return;
7768         case ICE_SW_TUN_IPV6_GTPU_EH_IPV6_UDP:
7769                 ice_set_bit(ICE_PROFID_IPV6_GTPU_EH_IPV6_UDP, bm);
7770                 return;
7771         case ICE_SW_TUN_IPV6_GTPU_EH_IPV6_TCP:
7772                 ice_set_bit(ICE_PROFID_IPV6_GTPU_EH_IPV6_TCP, bm);
7773                 return;
7774         case ICE_SW_TUN_AND_NON_TUN:
7775         case ICE_SW_TUN_AND_NON_TUN_QINQ:
7776         default:
7777                 prof_type = ICE_PROF_ALL;
7778                 break;
7779         }
7780
7781         ice_get_sw_fv_bitmap(hw, prof_type, bm);
7782 }
7783
7784 /**
7785  * ice_is_prof_rule - determine if rule type is a profile rule
7786  * @type: the rule type
7787  *
7788  * if the rule type is a profile rule, that means that there no field value
7789  * match required, in this case just a profile hit is required.
7790  */
7791 bool ice_is_prof_rule(enum ice_sw_tunnel_type type)
7792 {
7793         switch (type) {
7794         case ICE_SW_TUN_AND_NON_TUN:
7795         case ICE_SW_TUN_PROFID_IPV6_ESP:
7796         case ICE_SW_TUN_PROFID_IPV6_AH:
7797         case ICE_SW_TUN_PROFID_MAC_IPV6_L2TPV3:
7798         case ICE_SW_TUN_PROFID_IPV6_NAT_T:
7799         case ICE_SW_TUN_PROFID_IPV4_PFCP_NODE:
7800         case ICE_SW_TUN_PROFID_IPV4_PFCP_SESSION:
7801         case ICE_SW_TUN_PROFID_IPV6_PFCP_NODE:
7802         case ICE_SW_TUN_PROFID_IPV6_PFCP_SESSION:
7803                 return true;
7804         default:
7805                 break;
7806         }
7807
7808         return false;
7809 }
7810
7811 /**
7812  * ice_add_adv_recipe - Add an advanced recipe that is not part of the default
7813  * @hw: pointer to hardware structure
7814  * @lkups: lookup elements or match criteria for the advanced recipe, one
7815  *  structure per protocol header
7816  * @lkups_cnt: number of protocols
7817  * @rinfo: other information regarding the rule e.g. priority and action info
7818  * @rid: return the recipe ID of the recipe created
7819  */
7820 static enum ice_status
7821 ice_add_adv_recipe(struct ice_hw *hw, struct ice_adv_lkup_elem *lkups,
7822                    u16 lkups_cnt, struct ice_adv_rule_info *rinfo, u16 *rid)
7823 {
7824         ice_declare_bitmap(fv_bitmap, ICE_MAX_NUM_PROFILES);
7825         ice_declare_bitmap(profiles, ICE_MAX_NUM_PROFILES);
7826         struct ice_prot_lkup_ext *lkup_exts;
7827         struct ice_recp_grp_entry *r_entry;
7828         struct ice_sw_fv_list_entry *fvit;
7829         struct ice_recp_grp_entry *r_tmp;
7830         struct ice_sw_fv_list_entry *tmp;
7831         enum ice_status status = ICE_SUCCESS;
7832         struct ice_sw_recipe *rm;
7833         u8 i;
7834
7835         if (!ice_is_prof_rule(rinfo->tun_type) && !lkups_cnt)
7836                 return ICE_ERR_PARAM;
7837
7838         lkup_exts = (struct ice_prot_lkup_ext *)
7839                 ice_malloc(hw, sizeof(*lkup_exts));
7840         if (!lkup_exts)
7841                 return ICE_ERR_NO_MEMORY;
7842
7843         /* Determine the number of words to be matched and if it exceeds a
7844          * recipe's restrictions
7845          */
7846         for (i = 0; i < lkups_cnt; i++) {
7847                 u16 count;
7848
7849                 if (lkups[i].type >= ICE_PROTOCOL_LAST) {
7850                         status = ICE_ERR_CFG;
7851                         goto err_free_lkup_exts;
7852                 }
7853
7854                 count = ice_fill_valid_words(&lkups[i], lkup_exts);
7855                 if (!count) {
7856                         status = ICE_ERR_CFG;
7857                         goto err_free_lkup_exts;
7858                 }
7859         }
7860
7861         rm = (struct ice_sw_recipe *)ice_malloc(hw, sizeof(*rm));
7862         if (!rm) {
7863                 status = ICE_ERR_NO_MEMORY;
7864                 goto err_free_lkup_exts;
7865         }
7866
7867         /* Get field vectors that contain fields extracted from all the protocol
7868          * headers being programmed.
7869          */
7870         INIT_LIST_HEAD(&rm->fv_list);
7871         INIT_LIST_HEAD(&rm->rg_list);
7872
7873         /* Get bitmap of field vectors (profiles) that are compatible with the
7874          * rule request; only these will be searched in the subsequent call to
7875          * ice_get_fv.
7876          */
7877         ice_get_compat_fv_bitmap(hw, rinfo, fv_bitmap);
7878
7879         /* If it is a packet to match any, add a lookup element to match direction
7880          * flag of source interface.
7881          */
7882         if (rinfo->tun_type == ICE_SW_TUN_AND_NON_TUN &&
7883             lkups_cnt < ICE_MAX_CHAIN_WORDS) {
7884                 lkups[lkups_cnt].type = ICE_FLG_DIR;
7885                 lkups_cnt++;
7886         }
7887
7888         status = ice_get_fv(hw, lkups, lkups_cnt, fv_bitmap, &rm->fv_list);
7889         if (status)
7890                 goto err_unroll;
7891
7892         /* Create any special protocol/offset pairs, such as looking at tunnel
7893          * bits by extracting metadata
7894          */
7895         status = ice_add_special_words(rinfo, lkup_exts);
7896         if (status)
7897                 goto err_free_lkup_exts;
7898
7899         /* Group match words into recipes using preferred recipe grouping
7900          * criteria.
7901          */
7902         status = ice_create_recipe_group(hw, rm, lkup_exts);
7903         if (status)
7904                 goto err_unroll;
7905
7906         /* set the recipe priority if specified */
7907         rm->priority = (u8)rinfo->priority;
7908
7909         /* Find offsets from the field vector. Pick the first one for all the
7910          * recipes.
7911          */
7912         status = ice_fill_fv_word_index(hw, &rm->fv_list, &rm->rg_list);
7913         if (status)
7914                 goto err_unroll;
7915
7916         /* An empty FV list means to use all the profiles returned in the
7917          * profile bitmap
7918          */
7919         if (LIST_EMPTY(&rm->fv_list)) {
7920                 u16 j;
7921
7922                 ice_for_each_set_bit(j, fv_bitmap, ICE_MAX_NUM_PROFILES) {
7923                         struct ice_sw_fv_list_entry *fvl;
7924
7925                         fvl = (struct ice_sw_fv_list_entry *)
7926                                 ice_malloc(hw, sizeof(*fvl));
7927                         if (!fvl)
7928                                 goto err_unroll;
7929                         fvl->fv_ptr = NULL;
7930                         fvl->profile_id = j;
7931                         LIST_ADD(&fvl->list_entry, &rm->fv_list);
7932                 }
7933         }
7934
7935         /* get bitmap of all profiles the recipe will be associated with */
7936         ice_zero_bitmap(profiles, ICE_MAX_NUM_PROFILES);
7937         LIST_FOR_EACH_ENTRY(fvit, &rm->fv_list, ice_sw_fv_list_entry,
7938                             list_entry) {
7939                 ice_debug(hw, ICE_DBG_SW, "profile: %d\n", fvit->profile_id);
7940                 ice_set_bit((u16)fvit->profile_id, profiles);
7941         }
7942
7943         /* Look for a recipe which matches our requested fv / mask list */
7944         *rid = ice_find_recp(hw, lkup_exts, rinfo->tun_type, rinfo->priority);
7945         if (*rid < ICE_MAX_NUM_RECIPES)
7946                 /* Success if found a recipe that match the existing criteria */
7947                 goto err_unroll;
7948
7949         rm->tun_type = rinfo->tun_type;
7950         /* Recipe we need does not exist, add a recipe */
7951         status = ice_add_sw_recipe(hw, rm, profiles);
7952         if (status)
7953                 goto err_unroll;
7954
7955         /* Associate all the recipes created with all the profiles in the
7956          * common field vector.
7957          */
7958         LIST_FOR_EACH_ENTRY(fvit, &rm->fv_list, ice_sw_fv_list_entry,
7959                             list_entry) {
7960                 ice_declare_bitmap(r_bitmap, ICE_MAX_NUM_RECIPES);
7961                 u16 j;
7962
7963                 status = ice_aq_get_recipe_to_profile(hw, fvit->profile_id,
7964                                                       (u8 *)r_bitmap, NULL);
7965                 if (status)
7966                         goto err_unroll;
7967
7968                 ice_or_bitmap(r_bitmap, r_bitmap, rm->r_bitmap,
7969                               ICE_MAX_NUM_RECIPES);
7970                 status = ice_acquire_change_lock(hw, ICE_RES_WRITE);
7971                 if (status)
7972                         goto err_unroll;
7973
7974                 status = ice_aq_map_recipe_to_profile(hw, fvit->profile_id,
7975                                                       (u8 *)r_bitmap,
7976                                                       NULL);
7977                 ice_release_change_lock(hw);
7978
7979                 if (status)
7980                         goto err_unroll;
7981
7982                 /* Update profile to recipe bitmap array */
7983                 ice_cp_bitmap(profile_to_recipe[fvit->profile_id], r_bitmap,
7984                               ICE_MAX_NUM_RECIPES);
7985
7986                 /* Update recipe to profile bitmap array */
7987                 ice_for_each_set_bit(j, rm->r_bitmap, ICE_MAX_NUM_RECIPES)
7988                         ice_set_bit((u16)fvit->profile_id,
7989                                     recipe_to_profile[j]);
7990         }
7991
7992         *rid = rm->root_rid;
7993         ice_memcpy(&hw->switch_info->recp_list[*rid].lkup_exts,
7994                    lkup_exts, sizeof(*lkup_exts), ICE_NONDMA_TO_NONDMA);
7995 err_unroll:
7996         LIST_FOR_EACH_ENTRY_SAFE(r_entry, r_tmp, &rm->rg_list,
7997                                  ice_recp_grp_entry, l_entry) {
7998                 LIST_DEL(&r_entry->l_entry);
7999                 ice_free(hw, r_entry);
8000         }
8001
8002         LIST_FOR_EACH_ENTRY_SAFE(fvit, tmp, &rm->fv_list, ice_sw_fv_list_entry,
8003                                  list_entry) {
8004                 LIST_DEL(&fvit->list_entry);
8005                 ice_free(hw, fvit);
8006         }
8007
8008         if (rm->root_buf)
8009                 ice_free(hw, rm->root_buf);
8010
8011         ice_free(hw, rm);
8012
8013 err_free_lkup_exts:
8014         ice_free(hw, lkup_exts);
8015
8016         return status;
8017 }
8018
8019 /**
8020  * ice_find_dummy_packet - find dummy packet by tunnel type
8021  *
8022  * @lkups: lookup elements or match criteria for the advanced recipe, one
8023  *         structure per protocol header
8024  * @lkups_cnt: number of protocols
8025  * @tun_type: tunnel type from the match criteria
8026  * @pkt: dummy packet to fill according to filter match criteria
8027  * @pkt_len: packet length of dummy packet
8028  * @offsets: pointer to receive the pointer to the offsets for the packet
8029  */
8030 static void
8031 ice_find_dummy_packet(struct ice_adv_lkup_elem *lkups, u16 lkups_cnt,
8032                       enum ice_sw_tunnel_type tun_type, const u8 **pkt,
8033                       u16 *pkt_len,
8034                       const struct ice_dummy_pkt_offsets **offsets)
8035 {
8036         bool tcp = false, udp = false, ipv6 = false, vlan = false;
8037         bool gre = false, mpls = false;
8038         u16 i;
8039
8040         for (i = 0; i < lkups_cnt; i++) {
8041                 if (lkups[i].type == ICE_UDP_ILOS)
8042                         udp = true;
8043                 else if (lkups[i].type == ICE_TCP_IL)
8044                         tcp = true;
8045                 else if (lkups[i].type == ICE_IPV6_OFOS)
8046                         ipv6 = true;
8047                 else if (lkups[i].type == ICE_VLAN_OFOS)
8048                         vlan = true;
8049                 else if (lkups[i].type == ICE_ETYPE_OL &&
8050                          lkups[i].h_u.ethertype.ethtype_id ==
8051                                 CPU_TO_BE16(ICE_IPV6_ETHER_ID) &&
8052                          lkups[i].m_u.ethertype.ethtype_id ==
8053                                 CPU_TO_BE16(0xFFFF))
8054                         ipv6 = true;
8055                 else if (lkups[i].type == ICE_IPV4_OFOS &&
8056                          lkups[i].h_u.ipv4_hdr.protocol ==
8057                                 ICE_IPV4_NVGRE_PROTO_ID &&
8058                          lkups[i].m_u.ipv4_hdr.protocol ==
8059                                 0xFF)
8060                         gre = true;
8061                 else if (lkups[i].type == ICE_PPPOE &&
8062                          lkups[i].h_u.pppoe_hdr.ppp_prot_id ==
8063                                 CPU_TO_BE16(ICE_PPP_IPV6_PROTO_ID) &&
8064                          lkups[i].m_u.pppoe_hdr.ppp_prot_id ==
8065                                 0xFFFF)
8066                         ipv6 = true;
8067                 else if (lkups[i].type == ICE_IPV4_IL &&
8068                          lkups[i].h_u.ipv4_hdr.protocol ==
8069                                 ICE_TCP_PROTO_ID &&
8070                          lkups[i].m_u.ipv4_hdr.protocol ==
8071                                 0xFF)
8072                         tcp = true;
8073                 else if (lkups[i].type == ICE_ETYPE_OL &&
8074                          lkups[i].h_u.ethertype.ethtype_id ==
8075                                 CPU_TO_BE16(ICE_MPLS_ETHER_ID) &&
8076                          lkups[i].m_u.ethertype.ethtype_id == 0xFFFF)
8077                         mpls = true;
8078         }
8079
8080         if ((tun_type == ICE_SW_TUN_AND_NON_TUN_QINQ ||
8081              tun_type == ICE_NON_TUN_QINQ) && ipv6) {
8082                 if (tcp) {
8083                         *pkt = dummy_qinq_ipv6_tcp_pkt;
8084                         *pkt_len = sizeof(dummy_qinq_ipv6_tcp_pkt);
8085                         *offsets = dummy_qinq_ipv6_tcp_packet_offsets;
8086                         return;
8087                 }
8088
8089                 if (udp) {
8090                         *pkt = dummy_qinq_ipv6_udp_pkt;
8091                         *pkt_len = sizeof(dummy_qinq_ipv6_udp_pkt);
8092                         *offsets = dummy_qinq_ipv6_udp_packet_offsets;
8093                         return;
8094                 }
8095
8096                 *pkt = dummy_qinq_ipv6_pkt;
8097                 *pkt_len = sizeof(dummy_qinq_ipv6_pkt);
8098                 *offsets = dummy_qinq_ipv6_packet_offsets;
8099                 return;
8100         } else if (tun_type == ICE_SW_TUN_AND_NON_TUN_QINQ ||
8101                            tun_type == ICE_NON_TUN_QINQ) {
8102                 if (tcp) {
8103                         *pkt = dummy_qinq_ipv4_tcp_pkt;
8104                         *pkt_len = sizeof(dummy_qinq_ipv4_tcp_pkt);
8105                         *offsets = dummy_qinq_ipv4_tcp_packet_offsets;
8106                         return;
8107                 }
8108
8109                 if (udp) {
8110                         *pkt = dummy_qinq_ipv4_udp_pkt;
8111                         *pkt_len = sizeof(dummy_qinq_ipv4_udp_pkt);
8112                         *offsets = dummy_qinq_ipv4_udp_packet_offsets;
8113                         return;
8114                 }
8115
8116                 *pkt = dummy_qinq_ipv4_pkt;
8117                 *pkt_len = sizeof(dummy_qinq_ipv4_pkt);
8118                 *offsets = dummy_qinq_ipv4_packet_offsets;
8119                 return;
8120         }
8121
8122         if (tun_type == ICE_SW_TUN_PPPOE_IPV6_QINQ) {
8123                 *pkt = dummy_qinq_pppoe_ipv6_packet;
8124                 *pkt_len = sizeof(dummy_qinq_pppoe_ipv6_packet);
8125                 *offsets = dummy_qinq_pppoe_packet_ipv6_offsets;
8126                 return;
8127         } else if (tun_type == ICE_SW_TUN_PPPOE_IPV4_QINQ) {
8128                 *pkt = dummy_qinq_pppoe_ipv4_pkt;
8129                 *pkt_len = sizeof(dummy_qinq_pppoe_ipv4_pkt);
8130                 *offsets = dummy_qinq_pppoe_ipv4_packet_offsets;
8131                 return;
8132         } else if (tun_type == ICE_SW_TUN_PPPOE_QINQ && ipv6) {
8133                 *pkt = dummy_qinq_pppoe_ipv6_packet;
8134                 *pkt_len = sizeof(dummy_qinq_pppoe_ipv6_packet);
8135                 *offsets = dummy_qinq_pppoe_packet_offsets;
8136                 return;
8137         } else if (tun_type == ICE_SW_TUN_PPPOE_QINQ ||
8138                         tun_type == ICE_SW_TUN_PPPOE_PAY_QINQ) {
8139                 *pkt = dummy_qinq_pppoe_ipv4_pkt;
8140                 *pkt_len = sizeof(dummy_qinq_pppoe_ipv4_pkt);
8141                 *offsets = dummy_qinq_pppoe_packet_offsets;
8142                 return;
8143         }
8144
8145         if (tun_type == ICE_SW_TUN_IPV4_GTPU_NO_PAY) {
8146                 *pkt = dummy_ipv4_gtpu_ipv4_packet;
8147                 *pkt_len = sizeof(dummy_ipv4_gtpu_ipv4_packet);
8148                 *offsets = dummy_ipv4_gtp_no_pay_packet_offsets;
8149                 return;
8150         } else if (tun_type == ICE_SW_TUN_IPV6_GTPU_NO_PAY) {
8151                 *pkt = dummy_ipv6_gtp_packet;
8152                 *pkt_len = sizeof(dummy_ipv6_gtp_packet);
8153                 *offsets = dummy_ipv6_gtp_no_pay_packet_offsets;
8154                 return;
8155         }
8156
8157         if (tun_type == ICE_SW_TUN_IPV4_ESP) {
8158                 *pkt = dummy_ipv4_esp_pkt;
8159                 *pkt_len = sizeof(dummy_ipv4_esp_pkt);
8160                 *offsets = dummy_ipv4_esp_packet_offsets;
8161                 return;
8162         }
8163
8164         if (tun_type == ICE_SW_TUN_IPV6_ESP) {
8165                 *pkt = dummy_ipv6_esp_pkt;
8166                 *pkt_len = sizeof(dummy_ipv6_esp_pkt);
8167                 *offsets = dummy_ipv6_esp_packet_offsets;
8168                 return;
8169         }
8170
8171         if (tun_type == ICE_SW_TUN_IPV4_AH) {
8172                 *pkt = dummy_ipv4_ah_pkt;
8173                 *pkt_len = sizeof(dummy_ipv4_ah_pkt);
8174                 *offsets = dummy_ipv4_ah_packet_offsets;
8175                 return;
8176         }
8177
8178         if (tun_type == ICE_SW_TUN_IPV6_AH) {
8179                 *pkt = dummy_ipv6_ah_pkt;
8180                 *pkt_len = sizeof(dummy_ipv6_ah_pkt);
8181                 *offsets = dummy_ipv6_ah_packet_offsets;
8182                 return;
8183         }
8184
8185         if (tun_type == ICE_SW_TUN_IPV4_NAT_T) {
8186                 *pkt = dummy_ipv4_nat_pkt;
8187                 *pkt_len = sizeof(dummy_ipv4_nat_pkt);
8188                 *offsets = dummy_ipv4_nat_packet_offsets;
8189                 return;
8190         }
8191
8192         if (tun_type == ICE_SW_TUN_IPV6_NAT_T) {
8193                 *pkt = dummy_ipv6_nat_pkt;
8194                 *pkt_len = sizeof(dummy_ipv6_nat_pkt);
8195                 *offsets = dummy_ipv6_nat_packet_offsets;
8196                 return;
8197         }
8198
8199         if (tun_type == ICE_SW_TUN_IPV4_L2TPV3) {
8200                 *pkt = dummy_ipv4_l2tpv3_pkt;
8201                 *pkt_len = sizeof(dummy_ipv4_l2tpv3_pkt);
8202                 *offsets = dummy_ipv4_l2tpv3_packet_offsets;
8203                 return;
8204         }
8205
8206         if (tun_type == ICE_SW_TUN_IPV6_L2TPV3) {
8207                 *pkt = dummy_ipv6_l2tpv3_pkt;
8208                 *pkt_len = sizeof(dummy_ipv6_l2tpv3_pkt);
8209                 *offsets = dummy_ipv6_l2tpv3_packet_offsets;
8210                 return;
8211         }
8212
8213         if (tun_type == ICE_SW_TUN_GTP) {
8214                 *pkt = dummy_udp_gtp_packet;
8215                 *pkt_len = sizeof(dummy_udp_gtp_packet);
8216                 *offsets = dummy_udp_gtp_packet_offsets;
8217                 return;
8218         }
8219
8220         if (tun_type == ICE_SW_TUN_IPV4_GTPU_IPV4 ||
8221             tun_type == ICE_SW_TUN_IPV4_GTPU_EH_IPV4) {
8222                 *pkt = dummy_ipv4_gtpu_ipv4_packet;
8223                 *pkt_len = sizeof(dummy_ipv4_gtpu_ipv4_packet);
8224                 *offsets = dummy_ipv4_gtpu_ipv4_packet_offsets;
8225                 return;
8226         }
8227
8228         if (tun_type == ICE_SW_TUN_IPV4_GTPU_IPV4_UDP ||
8229             tun_type == ICE_SW_TUN_IPV4_GTPU_EH_IPV4_UDP) {
8230                 *pkt = dummy_ipv4_gtpu_ipv4_udp_packet;
8231                 *pkt_len = sizeof(dummy_ipv4_gtpu_ipv4_udp_packet);
8232                 *offsets = dummy_ipv4_gtpu_ipv4_udp_packet_offsets;
8233                 return;
8234         }
8235
8236         if (tun_type == ICE_SW_TUN_IPV4_GTPU_IPV4_TCP ||
8237             tun_type == ICE_SW_TUN_IPV4_GTPU_EH_IPV4_TCP) {
8238                 *pkt = dummy_ipv4_gtpu_ipv4_tcp_packet;
8239                 *pkt_len = sizeof(dummy_ipv4_gtpu_ipv4_tcp_packet);
8240                 *offsets = dummy_ipv4_gtpu_ipv4_tcp_packet_offsets;
8241                 return;
8242         }
8243
8244         if (tun_type == ICE_SW_TUN_IPV4_GTPU_IPV6 ||
8245             tun_type == ICE_SW_TUN_IPV4_GTPU_EH_IPV6) {
8246                 *pkt = dummy_ipv4_gtpu_ipv6_packet;
8247                 *pkt_len = sizeof(dummy_ipv4_gtpu_ipv6_packet);
8248                 *offsets = dummy_ipv4_gtpu_ipv6_packet_offsets;
8249                 return;
8250         }
8251
8252         if (tun_type == ICE_SW_TUN_IPV4_GTPU_IPV6_UDP ||
8253             tun_type == ICE_SW_TUN_IPV4_GTPU_EH_IPV6_UDP) {
8254                 *pkt = dummy_ipv4_gtpu_ipv6_udp_packet;
8255                 *pkt_len = sizeof(dummy_ipv4_gtpu_ipv6_udp_packet);
8256                 *offsets = dummy_ipv4_gtpu_ipv6_udp_packet_offsets;
8257                 return;
8258         }
8259
8260         if (tun_type == ICE_SW_TUN_IPV4_GTPU_IPV6_TCP ||
8261             tun_type == ICE_SW_TUN_IPV4_GTPU_EH_IPV6_TCP) {
8262                 *pkt = dummy_ipv4_gtpu_ipv6_tcp_packet;
8263                 *pkt_len = sizeof(dummy_ipv4_gtpu_ipv6_tcp_packet);
8264                 *offsets = dummy_ipv4_gtpu_ipv6_tcp_packet_offsets;
8265                 return;
8266         }
8267
8268         if (tun_type == ICE_SW_TUN_IPV6_GTPU_IPV4 ||
8269             tun_type == ICE_SW_TUN_IPV6_GTPU_EH_IPV4) {
8270                 *pkt = dummy_ipv6_gtpu_ipv4_packet;
8271                 *pkt_len = sizeof(dummy_ipv6_gtpu_ipv4_packet);
8272                 *offsets = dummy_ipv6_gtpu_ipv4_packet_offsets;
8273                 return;
8274         }
8275
8276         if (tun_type == ICE_SW_TUN_IPV6_GTPU_IPV4_UDP ||
8277             tun_type == ICE_SW_TUN_IPV6_GTPU_EH_IPV4_UDP) {
8278                 *pkt = dummy_ipv6_gtpu_ipv4_udp_packet;
8279                 *pkt_len = sizeof(dummy_ipv6_gtpu_ipv4_udp_packet);
8280                 *offsets = dummy_ipv6_gtpu_ipv4_udp_packet_offsets;
8281                 return;
8282         }
8283
8284         if (tun_type == ICE_SW_TUN_IPV6_GTPU_IPV4_TCP ||
8285             tun_type == ICE_SW_TUN_IPV6_GTPU_EH_IPV4_TCP) {
8286                 *pkt = dummy_ipv6_gtpu_ipv4_tcp_packet;
8287                 *pkt_len = sizeof(dummy_ipv6_gtpu_ipv4_tcp_packet);
8288                 *offsets = dummy_ipv6_gtpu_ipv4_tcp_packet_offsets;
8289                 return;
8290         }
8291
8292         if (tun_type == ICE_SW_TUN_IPV6_GTPU_IPV6 ||
8293             tun_type == ICE_SW_TUN_IPV6_GTPU_EH_IPV6) {
8294                 *pkt = dummy_ipv6_gtpu_ipv6_packet;
8295                 *pkt_len = sizeof(dummy_ipv6_gtpu_ipv6_packet);
8296                 *offsets = dummy_ipv6_gtpu_ipv6_packet_offsets;
8297                 return;
8298         }
8299
8300         if (tun_type == ICE_SW_TUN_IPV6_GTPU_IPV6_UDP ||
8301             tun_type == ICE_SW_TUN_IPV6_GTPU_EH_IPV6_UDP) {
8302                 *pkt = dummy_ipv6_gtpu_ipv6_udp_packet;
8303                 *pkt_len = sizeof(dummy_ipv6_gtpu_ipv6_udp_packet);
8304                 *offsets = dummy_ipv6_gtpu_ipv6_udp_packet_offsets;
8305                 return;
8306         }
8307
8308         if (tun_type == ICE_SW_TUN_IPV6_GTPU_IPV6_TCP ||
8309             tun_type == ICE_SW_TUN_IPV6_GTPU_EH_IPV6_TCP) {
8310                 *pkt = dummy_ipv6_gtpu_ipv6_tcp_packet;
8311                 *pkt_len = sizeof(dummy_ipv6_gtpu_ipv6_tcp_packet);
8312                 *offsets = dummy_ipv6_gtpu_ipv6_tcp_packet_offsets;
8313                 return;
8314         }
8315
8316         if (tun_type == ICE_SW_TUN_PPPOE && ipv6) {
8317                 *pkt = dummy_pppoe_ipv6_packet;
8318                 *pkt_len = sizeof(dummy_pppoe_ipv6_packet);
8319                 *offsets = dummy_pppoe_packet_offsets;
8320                 return;
8321         } else if (tun_type == ICE_SW_TUN_PPPOE ||
8322                 tun_type == ICE_SW_TUN_PPPOE_PAY) {
8323                 *pkt = dummy_pppoe_ipv4_packet;
8324                 *pkt_len = sizeof(dummy_pppoe_ipv4_packet);
8325                 *offsets = dummy_pppoe_packet_offsets;
8326                 return;
8327         }
8328
8329         if (tun_type == ICE_SW_TUN_PPPOE_IPV4) {
8330                 *pkt = dummy_pppoe_ipv4_packet;
8331                 *pkt_len = sizeof(dummy_pppoe_ipv4_packet);
8332                 *offsets = dummy_pppoe_packet_ipv4_offsets;
8333                 return;
8334         }
8335
8336         if (tun_type == ICE_SW_TUN_PPPOE_IPV4_TCP) {
8337                 *pkt = dummy_pppoe_ipv4_tcp_packet;
8338                 *pkt_len = sizeof(dummy_pppoe_ipv4_tcp_packet);
8339                 *offsets = dummy_pppoe_ipv4_tcp_packet_offsets;
8340                 return;
8341         }
8342
8343         if (tun_type == ICE_SW_TUN_PPPOE_IPV4_UDP) {
8344                 *pkt = dummy_pppoe_ipv4_udp_packet;
8345                 *pkt_len = sizeof(dummy_pppoe_ipv4_udp_packet);
8346                 *offsets = dummy_pppoe_ipv4_udp_packet_offsets;
8347                 return;
8348         }
8349
8350         if (tun_type == ICE_SW_TUN_PPPOE_IPV6) {
8351                 *pkt = dummy_pppoe_ipv6_packet;
8352                 *pkt_len = sizeof(dummy_pppoe_ipv6_packet);
8353                 *offsets = dummy_pppoe_packet_ipv6_offsets;
8354                 return;
8355         }
8356
8357         if (tun_type == ICE_SW_TUN_PPPOE_IPV6_TCP) {
8358                 *pkt = dummy_pppoe_ipv6_tcp_packet;
8359                 *pkt_len = sizeof(dummy_pppoe_ipv6_tcp_packet);
8360                 *offsets = dummy_pppoe_packet_ipv6_tcp_offsets;
8361                 return;
8362         }
8363
8364         if (tun_type == ICE_SW_TUN_PPPOE_IPV6_UDP) {
8365                 *pkt = dummy_pppoe_ipv6_udp_packet;
8366                 *pkt_len = sizeof(dummy_pppoe_ipv6_udp_packet);
8367                 *offsets = dummy_pppoe_packet_ipv6_udp_offsets;
8368                 return;
8369         }
8370
8371         if (tun_type == ICE_SW_IPV4_TCP) {
8372                 *pkt = dummy_tcp_packet;
8373                 *pkt_len = sizeof(dummy_tcp_packet);
8374                 *offsets = dummy_tcp_packet_offsets;
8375                 return;
8376         }
8377
8378         if (tun_type == ICE_SW_IPV4_UDP) {
8379                 *pkt = dummy_udp_packet;
8380                 *pkt_len = sizeof(dummy_udp_packet);
8381                 *offsets = dummy_udp_packet_offsets;
8382                 return;
8383         }
8384
8385         if (tun_type == ICE_SW_IPV6_TCP) {
8386                 *pkt = dummy_tcp_ipv6_packet;
8387                 *pkt_len = sizeof(dummy_tcp_ipv6_packet);
8388                 *offsets = dummy_tcp_ipv6_packet_offsets;
8389                 return;
8390         }
8391
8392         if (tun_type == ICE_SW_IPV6_UDP) {
8393                 *pkt = dummy_udp_ipv6_packet;
8394                 *pkt_len = sizeof(dummy_udp_ipv6_packet);
8395                 *offsets = dummy_udp_ipv6_packet_offsets;
8396                 return;
8397         }
8398
8399         if (tun_type == ICE_ALL_TUNNELS) {
8400                 *pkt = dummy_gre_udp_packet;
8401                 *pkt_len = sizeof(dummy_gre_udp_packet);
8402                 *offsets = dummy_gre_udp_packet_offsets;
8403                 return;
8404         }
8405
8406         if (tun_type == ICE_SW_TUN_NVGRE || gre) {
8407                 if (tcp) {
8408                         *pkt = dummy_gre_tcp_packet;
8409                         *pkt_len = sizeof(dummy_gre_tcp_packet);
8410                         *offsets = dummy_gre_tcp_packet_offsets;
8411                         return;
8412                 }
8413
8414                 *pkt = dummy_gre_udp_packet;
8415                 *pkt_len = sizeof(dummy_gre_udp_packet);
8416                 *offsets = dummy_gre_udp_packet_offsets;
8417                 return;
8418         }
8419
8420         if (tun_type == ICE_SW_TUN_VXLAN || tun_type == ICE_SW_TUN_GENEVE ||
8421             tun_type == ICE_SW_TUN_VXLAN_GPE || tun_type == ICE_SW_TUN_UDP ||
8422             tun_type == ICE_SW_TUN_GENEVE_VLAN ||
8423             tun_type == ICE_SW_TUN_VXLAN_VLAN) {
8424                 if (tcp) {
8425                         *pkt = dummy_udp_tun_tcp_packet;
8426                         *pkt_len = sizeof(dummy_udp_tun_tcp_packet);
8427                         *offsets = dummy_udp_tun_tcp_packet_offsets;
8428                         return;
8429                 }
8430
8431                 *pkt = dummy_udp_tun_udp_packet;
8432                 *pkt_len = sizeof(dummy_udp_tun_udp_packet);
8433                 *offsets = dummy_udp_tun_udp_packet_offsets;
8434                 return;
8435         }
8436
8437         if (udp && !ipv6) {
8438                 if (vlan) {
8439                         *pkt = dummy_vlan_udp_packet;
8440                         *pkt_len = sizeof(dummy_vlan_udp_packet);
8441                         *offsets = dummy_vlan_udp_packet_offsets;
8442                         return;
8443                 }
8444                 *pkt = dummy_udp_packet;
8445                 *pkt_len = sizeof(dummy_udp_packet);
8446                 *offsets = dummy_udp_packet_offsets;
8447                 return;
8448         } else if (udp && ipv6) {
8449                 if (vlan) {
8450                         *pkt = dummy_vlan_udp_ipv6_packet;
8451                         *pkt_len = sizeof(dummy_vlan_udp_ipv6_packet);
8452                         *offsets = dummy_vlan_udp_ipv6_packet_offsets;
8453                         return;
8454                 }
8455                 *pkt = dummy_udp_ipv6_packet;
8456                 *pkt_len = sizeof(dummy_udp_ipv6_packet);
8457                 *offsets = dummy_udp_ipv6_packet_offsets;
8458                 return;
8459         } else if ((tcp && ipv6) || ipv6) {
8460                 if (vlan) {
8461                         *pkt = dummy_vlan_tcp_ipv6_packet;
8462                         *pkt_len = sizeof(dummy_vlan_tcp_ipv6_packet);
8463                         *offsets = dummy_vlan_tcp_ipv6_packet_offsets;
8464                         return;
8465                 }
8466                 *pkt = dummy_tcp_ipv6_packet;
8467                 *pkt_len = sizeof(dummy_tcp_ipv6_packet);
8468                 *offsets = dummy_tcp_ipv6_packet_offsets;
8469                 return;
8470         }
8471
8472         if (vlan) {
8473                 *pkt = dummy_vlan_tcp_packet;
8474                 *pkt_len = sizeof(dummy_vlan_tcp_packet);
8475                 *offsets = dummy_vlan_tcp_packet_offsets;
8476         }  else if (mpls) {
8477                 *pkt = dummy_mpls_packet;
8478                 *pkt_len = sizeof(dummy_mpls_packet);
8479                 *offsets = dummy_mpls_packet_offsets;
8480         } else {
8481                 *pkt = dummy_tcp_packet;
8482                 *pkt_len = sizeof(dummy_tcp_packet);
8483                 *offsets = dummy_tcp_packet_offsets;
8484         }
8485 }
8486
8487 /**
8488  * ice_fill_adv_dummy_packet - fill a dummy packet with given match criteria
8489  *
8490  * @lkups: lookup elements or match criteria for the advanced recipe, one
8491  *         structure per protocol header
8492  * @lkups_cnt: number of protocols
8493  * @s_rule: stores rule information from the match criteria
8494  * @dummy_pkt: dummy packet to fill according to filter match criteria
8495  * @pkt_len: packet length of dummy packet
8496  * @offsets: offset info for the dummy packet
8497  */
8498 static enum ice_status
8499 ice_fill_adv_dummy_packet(struct ice_adv_lkup_elem *lkups, u16 lkups_cnt,
8500                           struct ice_aqc_sw_rules_elem *s_rule,
8501                           const u8 *dummy_pkt, u16 pkt_len,
8502                           const struct ice_dummy_pkt_offsets *offsets)
8503 {
8504         u8 *pkt;
8505         u16 i;
8506
8507         /* Start with a packet with a pre-defined/dummy content. Then, fill
8508          * in the header values to be looked up or matched.
8509          */
8510         pkt = s_rule->pdata.lkup_tx_rx.hdr;
8511
8512         ice_memcpy(pkt, dummy_pkt, pkt_len, ICE_NONDMA_TO_NONDMA);
8513
8514         for (i = 0; i < lkups_cnt; i++) {
8515                 enum ice_protocol_type type;
8516                 u16 offset = 0, len = 0, j;
8517                 bool found = false;
8518
8519                 /* find the start of this layer; it should be found since this
8520                  * was already checked when search for the dummy packet
8521                  */
8522                 type = lkups[i].type;
8523                 for (j = 0; offsets[j].type != ICE_PROTOCOL_LAST; j++) {
8524                         if (type == offsets[j].type) {
8525                                 offset = offsets[j].offset;
8526                                 found = true;
8527                                 break;
8528                         }
8529                 }
8530                 /* this should never happen in a correct calling sequence */
8531                 if (!found)
8532                         return ICE_ERR_PARAM;
8533
8534                 switch (lkups[i].type) {
8535                 case ICE_MAC_OFOS:
8536                 case ICE_MAC_IL:
8537                         len = sizeof(struct ice_ether_hdr);
8538                         break;
8539                 case ICE_ETYPE_OL:
8540                         len = sizeof(struct ice_ethtype_hdr);
8541                         break;
8542                 case ICE_VLAN_OFOS:
8543                 case ICE_VLAN_EX:
8544                 case ICE_VLAN_IN:
8545                         len = sizeof(struct ice_vlan_hdr);
8546                         break;
8547                 case ICE_IPV4_OFOS:
8548                 case ICE_IPV4_IL:
8549                         len = sizeof(struct ice_ipv4_hdr);
8550                         break;
8551                 case ICE_IPV6_OFOS:
8552                 case ICE_IPV6_IL:
8553                         len = sizeof(struct ice_ipv6_hdr);
8554                         break;
8555                 case ICE_TCP_IL:
8556                 case ICE_UDP_OF:
8557                 case ICE_UDP_ILOS:
8558                         len = sizeof(struct ice_l4_hdr);
8559                         break;
8560                 case ICE_SCTP_IL:
8561                         len = sizeof(struct ice_sctp_hdr);
8562                         break;
8563                 case ICE_NVGRE:
8564                         len = sizeof(struct ice_nvgre);
8565                         break;
8566                 case ICE_VXLAN:
8567                 case ICE_GENEVE:
8568                 case ICE_VXLAN_GPE:
8569                         len = sizeof(struct ice_udp_tnl_hdr);
8570                         break;
8571
8572                 case ICE_GTP:
8573                 case ICE_GTP_NO_PAY:
8574                         len = sizeof(struct ice_udp_gtp_hdr);
8575                         break;
8576                 case ICE_PPPOE:
8577                         len = sizeof(struct ice_pppoe_hdr);
8578                         break;
8579                 case ICE_ESP:
8580                         len = sizeof(struct ice_esp_hdr);
8581                         break;
8582                 case ICE_NAT_T:
8583                         len = sizeof(struct ice_nat_t_hdr);
8584                         break;
8585                 case ICE_AH:
8586                         len = sizeof(struct ice_ah_hdr);
8587                         break;
8588                 case ICE_L2TPV3:
8589                         len = sizeof(struct ice_l2tpv3_sess_hdr);
8590                         break;
8591                 default:
8592                         return ICE_ERR_PARAM;
8593                 }
8594
8595                 /* the length should be a word multiple */
8596                 if (len % ICE_BYTES_PER_WORD)
8597                         return ICE_ERR_CFG;
8598
8599                 /* We have the offset to the header start, the length, the
8600                  * caller's header values and mask. Use this information to
8601                  * copy the data into the dummy packet appropriately based on
8602                  * the mask. Note that we need to only write the bits as
8603                  * indicated by the mask to make sure we don't improperly write
8604                  * over any significant packet data.
8605                  */
8606                 for (j = 0; j < len / sizeof(u16); j++)
8607                         if (((u16 *)&lkups[i].m_u)[j])
8608                                 ((u16 *)(pkt + offset))[j] =
8609                                         (((u16 *)(pkt + offset))[j] &
8610                                          ~((u16 *)&lkups[i].m_u)[j]) |
8611                                         (((u16 *)&lkups[i].h_u)[j] &
8612                                          ((u16 *)&lkups[i].m_u)[j]);
8613         }
8614
8615         s_rule->pdata.lkup_tx_rx.hdr_len = CPU_TO_LE16(pkt_len);
8616
8617         return ICE_SUCCESS;
8618 }
8619
8620 /**
8621  * ice_fill_adv_packet_tun - fill dummy packet with udp tunnel port
8622  * @hw: pointer to the hardware structure
8623  * @tun_type: tunnel type
8624  * @pkt: dummy packet to fill in
8625  * @offsets: offset info for the dummy packet
8626  */
8627 static enum ice_status
8628 ice_fill_adv_packet_tun(struct ice_hw *hw, enum ice_sw_tunnel_type tun_type,
8629                         u8 *pkt, const struct ice_dummy_pkt_offsets *offsets)
8630 {
8631         u16 open_port, i;
8632
8633         switch (tun_type) {
8634         case ICE_SW_TUN_AND_NON_TUN:
8635         case ICE_SW_TUN_VXLAN_GPE:
8636         case ICE_SW_TUN_VXLAN:
8637         case ICE_SW_TUN_VXLAN_VLAN:
8638         case ICE_SW_TUN_UDP:
8639                 if (!ice_get_open_tunnel_port(hw, TNL_VXLAN, &open_port))
8640                         return ICE_ERR_CFG;
8641                 break;
8642
8643         case ICE_SW_TUN_GENEVE:
8644         case ICE_SW_TUN_GENEVE_VLAN:
8645                 if (!ice_get_open_tunnel_port(hw, TNL_GENEVE, &open_port))
8646                         return ICE_ERR_CFG;
8647                 break;
8648
8649         default:
8650                 /* Nothing needs to be done for this tunnel type */
8651                 return ICE_SUCCESS;
8652         }
8653
8654         /* Find the outer UDP protocol header and insert the port number */
8655         for (i = 0; offsets[i].type != ICE_PROTOCOL_LAST; i++) {
8656                 if (offsets[i].type == ICE_UDP_OF) {
8657                         struct ice_l4_hdr *hdr;
8658                         u16 offset;
8659
8660                         offset = offsets[i].offset;
8661                         hdr = (struct ice_l4_hdr *)&pkt[offset];
8662                         hdr->dst_port = CPU_TO_BE16(open_port);
8663
8664                         return ICE_SUCCESS;
8665                 }
8666         }
8667
8668         return ICE_ERR_CFG;
8669 }
8670
8671 /**
8672  * ice_find_adv_rule_entry - Search a rule entry
8673  * @hw: pointer to the hardware structure
8674  * @lkups: lookup elements or match criteria for the advanced recipe, one
8675  *         structure per protocol header
8676  * @lkups_cnt: number of protocols
8677  * @recp_id: recipe ID for which we are finding the rule
8678  * @rinfo: other information regarding the rule e.g. priority and action info
8679  *
8680  * Helper function to search for a given advance rule entry
8681  * Returns pointer to entry storing the rule if found
8682  */
8683 static struct ice_adv_fltr_mgmt_list_entry *
8684 ice_find_adv_rule_entry(struct ice_hw *hw, struct ice_adv_lkup_elem *lkups,
8685                         u16 lkups_cnt, u16 recp_id,
8686                         struct ice_adv_rule_info *rinfo)
8687 {
8688         struct ice_adv_fltr_mgmt_list_entry *list_itr;
8689         struct ice_switch_info *sw = hw->switch_info;
8690         int i;
8691
8692         LIST_FOR_EACH_ENTRY(list_itr, &sw->recp_list[recp_id].filt_rules,
8693                             ice_adv_fltr_mgmt_list_entry, list_entry) {
8694                 bool lkups_matched = true;
8695
8696                 if (lkups_cnt != list_itr->lkups_cnt)
8697                         continue;
8698                 for (i = 0; i < list_itr->lkups_cnt; i++)
8699                         if (memcmp(&list_itr->lkups[i], &lkups[i],
8700                                    sizeof(*lkups))) {
8701                                 lkups_matched = false;
8702                                 break;
8703                         }
8704                 if (rinfo->sw_act.flag == list_itr->rule_info.sw_act.flag &&
8705                     rinfo->tun_type == list_itr->rule_info.tun_type &&
8706                     lkups_matched)
8707                         return list_itr;
8708         }
8709         return NULL;
8710 }
8711
8712 /**
8713  * ice_adv_add_update_vsi_list
8714  * @hw: pointer to the hardware structure
8715  * @m_entry: pointer to current adv filter management list entry
8716  * @cur_fltr: filter information from the book keeping entry
8717  * @new_fltr: filter information with the new VSI to be added
8718  *
8719  * Call AQ command to add or update previously created VSI list with new VSI.
8720  *
8721  * Helper function to do book keeping associated with adding filter information
8722  * The algorithm to do the booking keeping is described below :
8723  * When a VSI needs to subscribe to a given advanced filter
8724  *      if only one VSI has been added till now
8725  *              Allocate a new VSI list and add two VSIs
8726  *              to this list using switch rule command
8727  *              Update the previously created switch rule with the
8728  *              newly created VSI list ID
8729  *      if a VSI list was previously created
8730  *              Add the new VSI to the previously created VSI list set
8731  *              using the update switch rule command
8732  */
8733 static enum ice_status
8734 ice_adv_add_update_vsi_list(struct ice_hw *hw,
8735                             struct ice_adv_fltr_mgmt_list_entry *m_entry,
8736                             struct ice_adv_rule_info *cur_fltr,
8737                             struct ice_adv_rule_info *new_fltr)
8738 {
8739         enum ice_status status;
8740         u16 vsi_list_id = 0;
8741
8742         if (cur_fltr->sw_act.fltr_act == ICE_FWD_TO_Q ||
8743             cur_fltr->sw_act.fltr_act == ICE_FWD_TO_QGRP ||
8744             cur_fltr->sw_act.fltr_act == ICE_DROP_PACKET)
8745                 return ICE_ERR_NOT_IMPL;
8746
8747         if ((new_fltr->sw_act.fltr_act == ICE_FWD_TO_Q ||
8748              new_fltr->sw_act.fltr_act == ICE_FWD_TO_QGRP) &&
8749             (cur_fltr->sw_act.fltr_act == ICE_FWD_TO_VSI ||
8750              cur_fltr->sw_act.fltr_act == ICE_FWD_TO_VSI_LIST))
8751                 return ICE_ERR_NOT_IMPL;
8752
8753         if (m_entry->vsi_count < 2 && !m_entry->vsi_list_info) {
8754                  /* Only one entry existed in the mapping and it was not already
8755                   * a part of a VSI list. So, create a VSI list with the old and
8756                   * new VSIs.
8757                   */
8758                 struct ice_fltr_info tmp_fltr;
8759                 u16 vsi_handle_arr[2];
8760
8761                 /* A rule already exists with the new VSI being added */
8762                 if (cur_fltr->sw_act.fwd_id.hw_vsi_id ==
8763                     new_fltr->sw_act.fwd_id.hw_vsi_id)
8764                         return ICE_ERR_ALREADY_EXISTS;
8765
8766                 vsi_handle_arr[0] = cur_fltr->sw_act.vsi_handle;
8767                 vsi_handle_arr[1] = new_fltr->sw_act.vsi_handle;
8768                 status = ice_create_vsi_list_rule(hw, &vsi_handle_arr[0], 2,
8769                                                   &vsi_list_id,
8770                                                   ICE_SW_LKUP_LAST);
8771                 if (status)
8772                         return status;
8773
8774                 ice_memset(&tmp_fltr, 0, sizeof(tmp_fltr), ICE_NONDMA_MEM);
8775                 tmp_fltr.flag = m_entry->rule_info.sw_act.flag;
8776                 tmp_fltr.fltr_rule_id = cur_fltr->fltr_rule_id;
8777                 tmp_fltr.fltr_act = ICE_FWD_TO_VSI_LIST;
8778                 tmp_fltr.fwd_id.vsi_list_id = vsi_list_id;
8779                 tmp_fltr.lkup_type = ICE_SW_LKUP_LAST;
8780
8781                 /* Update the previous switch rule of "forward to VSI" to
8782                  * "fwd to VSI list"
8783                  */
8784                 status = ice_update_pkt_fwd_rule(hw, &tmp_fltr);
8785                 if (status)
8786                         return status;
8787
8788                 cur_fltr->sw_act.fwd_id.vsi_list_id = vsi_list_id;
8789                 cur_fltr->sw_act.fltr_act = ICE_FWD_TO_VSI_LIST;
8790                 m_entry->vsi_list_info =
8791                         ice_create_vsi_list_map(hw, &vsi_handle_arr[0], 2,
8792                                                 vsi_list_id);
8793         } else {
8794                 u16 vsi_handle = new_fltr->sw_act.vsi_handle;
8795
8796                 if (!m_entry->vsi_list_info)
8797                         return ICE_ERR_CFG;
8798
8799                 /* A rule already exists with the new VSI being added */
8800                 if (ice_is_bit_set(m_entry->vsi_list_info->vsi_map, vsi_handle))
8801                         return ICE_SUCCESS;
8802
8803                 /* Update the previously created VSI list set with
8804                  * the new VSI ID passed in
8805                  */
8806                 vsi_list_id = cur_fltr->sw_act.fwd_id.vsi_list_id;
8807
8808                 status = ice_update_vsi_list_rule(hw, &vsi_handle, 1,
8809                                                   vsi_list_id, false,
8810                                                   ice_aqc_opc_update_sw_rules,
8811                                                   ICE_SW_LKUP_LAST);
8812                 /* update VSI list mapping info with new VSI ID */
8813                 if (!status)
8814                         ice_set_bit(vsi_handle,
8815                                     m_entry->vsi_list_info->vsi_map);
8816         }
8817         if (!status)
8818                 m_entry->vsi_count++;
8819         return status;
8820 }
8821
8822 /**
8823  * ice_add_adv_rule - helper function to create an advanced switch rule
8824  * @hw: pointer to the hardware structure
8825  * @lkups: information on the words that needs to be looked up. All words
8826  * together makes one recipe
8827  * @lkups_cnt: num of entries in the lkups array
8828  * @rinfo: other information related to the rule that needs to be programmed
8829  * @added_entry: this will return recipe_id, rule_id and vsi_handle. should be
8830  *               ignored is case of error.
8831  *
8832  * This function can program only 1 rule at a time. The lkups is used to
8833  * describe the all the words that forms the "lookup" portion of the recipe.
8834  * These words can span multiple protocols. Callers to this function need to
8835  * pass in a list of protocol headers with lookup information along and mask
8836  * that determines which words are valid from the given protocol header.
8837  * rinfo describes other information related to this rule such as forwarding
8838  * IDs, priority of this rule, etc.
8839  */
8840 enum ice_status
8841 ice_add_adv_rule(struct ice_hw *hw, struct ice_adv_lkup_elem *lkups,
8842                  u16 lkups_cnt, struct ice_adv_rule_info *rinfo,
8843                  struct ice_rule_query_data *added_entry)
8844 {
8845         struct ice_adv_fltr_mgmt_list_entry *m_entry, *adv_fltr = NULL;
8846         u16 rid = 0, i, pkt_len, rule_buf_sz, vsi_handle;
8847         const struct ice_dummy_pkt_offsets *pkt_offsets;
8848         struct ice_aqc_sw_rules_elem *s_rule = NULL;
8849         struct LIST_HEAD_TYPE *rule_head;
8850         struct ice_switch_info *sw;
8851         enum ice_status status;
8852         const u8 *pkt = NULL;
8853         bool prof_rule;
8854         u16 word_cnt;
8855         u32 act = 0;
8856         u8 q_rgn;
8857
8858         /* Initialize profile to result index bitmap */
8859         if (!hw->switch_info->prof_res_bm_init) {
8860                 hw->switch_info->prof_res_bm_init = 1;
8861                 ice_init_prof_result_bm(hw);
8862         }
8863
8864         prof_rule = ice_is_prof_rule(rinfo->tun_type);
8865         if (!prof_rule && !lkups_cnt)
8866                 return ICE_ERR_PARAM;
8867
8868         /* get # of words we need to match */
8869         word_cnt = 0;
8870         for (i = 0; i < lkups_cnt; i++) {
8871                 u16 j, *ptr;
8872
8873                 ptr = (u16 *)&lkups[i].m_u;
8874                 for (j = 0; j < sizeof(lkups->m_u) / sizeof(u16); j++)
8875                         if (ptr[j] != 0)
8876                                 word_cnt++;
8877         }
8878
8879         if (prof_rule) {
8880                 if (word_cnt > ICE_MAX_CHAIN_WORDS)
8881                         return ICE_ERR_PARAM;
8882         } else {
8883                 if (!word_cnt || word_cnt > ICE_MAX_CHAIN_WORDS)
8884                         return ICE_ERR_PARAM;
8885         }
8886
8887         /* make sure that we can locate a dummy packet */
8888         ice_find_dummy_packet(lkups, lkups_cnt, rinfo->tun_type, &pkt, &pkt_len,
8889                               &pkt_offsets);
8890         if (!pkt) {
8891                 status = ICE_ERR_PARAM;
8892                 goto err_ice_add_adv_rule;
8893         }
8894
8895         if (!(rinfo->sw_act.fltr_act == ICE_FWD_TO_VSI ||
8896               rinfo->sw_act.fltr_act == ICE_FWD_TO_Q ||
8897               rinfo->sw_act.fltr_act == ICE_FWD_TO_QGRP ||
8898               rinfo->sw_act.fltr_act == ICE_DROP_PACKET))
8899                 return ICE_ERR_CFG;
8900
8901         vsi_handle = rinfo->sw_act.vsi_handle;
8902         if (!ice_is_vsi_valid(hw, vsi_handle))
8903                 return ICE_ERR_PARAM;
8904
8905         if (rinfo->sw_act.fltr_act == ICE_FWD_TO_VSI)
8906                 rinfo->sw_act.fwd_id.hw_vsi_id =
8907                         ice_get_hw_vsi_num(hw, vsi_handle);
8908         if (rinfo->sw_act.flag & ICE_FLTR_TX)
8909                 rinfo->sw_act.src = ice_get_hw_vsi_num(hw, vsi_handle);
8910
8911         status = ice_add_adv_recipe(hw, lkups, lkups_cnt, rinfo, &rid);
8912         if (status)
8913                 return status;
8914         m_entry = ice_find_adv_rule_entry(hw, lkups, lkups_cnt, rid, rinfo);
8915         if (m_entry) {
8916                 /* we have to add VSI to VSI_LIST and increment vsi_count.
8917                  * Also Update VSI list so that we can change forwarding rule
8918                  * if the rule already exists, we will check if it exists with
8919                  * same vsi_id, if not then add it to the VSI list if it already
8920                  * exists if not then create a VSI list and add the existing VSI
8921                  * ID and the new VSI ID to the list
8922                  * We will add that VSI to the list
8923                  */
8924                 status = ice_adv_add_update_vsi_list(hw, m_entry,
8925                                                      &m_entry->rule_info,
8926                                                      rinfo);
8927                 if (added_entry) {
8928                         added_entry->rid = rid;
8929                         added_entry->rule_id = m_entry->rule_info.fltr_rule_id;
8930                         added_entry->vsi_handle = rinfo->sw_act.vsi_handle;
8931                 }
8932                 return status;
8933         }
8934         rule_buf_sz = ICE_SW_RULE_RX_TX_NO_HDR_SIZE + pkt_len;
8935         s_rule = (struct ice_aqc_sw_rules_elem *)ice_malloc(hw, rule_buf_sz);
8936         if (!s_rule)
8937                 return ICE_ERR_NO_MEMORY;
8938         if (!rinfo->flags_info.act_valid)
8939                 act |= ICE_SINGLE_ACT_LAN_ENABLE;
8940         else
8941                 act |= rinfo->flags_info.act & (ICE_SINGLE_ACT_LAN_ENABLE |
8942                                                 ICE_SINGLE_ACT_LB_ENABLE);
8943
8944         switch (rinfo->sw_act.fltr_act) {
8945         case ICE_FWD_TO_VSI:
8946                 act |= (rinfo->sw_act.fwd_id.hw_vsi_id <<
8947                         ICE_SINGLE_ACT_VSI_ID_S) & ICE_SINGLE_ACT_VSI_ID_M;
8948                 act |= ICE_SINGLE_ACT_VSI_FORWARDING | ICE_SINGLE_ACT_VALID_BIT;
8949                 break;
8950         case ICE_FWD_TO_Q:
8951                 act |= ICE_SINGLE_ACT_TO_Q;
8952                 act |= (rinfo->sw_act.fwd_id.q_id << ICE_SINGLE_ACT_Q_INDEX_S) &
8953                        ICE_SINGLE_ACT_Q_INDEX_M;
8954                 break;
8955         case ICE_FWD_TO_QGRP:
8956                 q_rgn = rinfo->sw_act.qgrp_size > 0 ?
8957                         (u8)ice_ilog2(rinfo->sw_act.qgrp_size) : 0;
8958                 act |= ICE_SINGLE_ACT_TO_Q;
8959                 act |= (rinfo->sw_act.fwd_id.q_id << ICE_SINGLE_ACT_Q_INDEX_S) &
8960                        ICE_SINGLE_ACT_Q_INDEX_M;
8961                 act |= (q_rgn << ICE_SINGLE_ACT_Q_REGION_S) &
8962                        ICE_SINGLE_ACT_Q_REGION_M;
8963                 break;
8964         case ICE_DROP_PACKET:
8965                 act |= ICE_SINGLE_ACT_VSI_FORWARDING | ICE_SINGLE_ACT_DROP |
8966                        ICE_SINGLE_ACT_VALID_BIT;
8967                 break;
8968         default:
8969                 status = ICE_ERR_CFG;
8970                 goto err_ice_add_adv_rule;
8971         }
8972
8973         /* set the rule LOOKUP type based on caller specified 'RX'
8974          * instead of hardcoding it to be either LOOKUP_TX/RX
8975          *
8976          * for 'RX' set the source to be the port number
8977          * for 'TX' set the source to be the source HW VSI number (determined
8978          * by caller)
8979          */
8980         if (rinfo->rx) {
8981                 s_rule->type = CPU_TO_LE16(ICE_AQC_SW_RULES_T_LKUP_RX);
8982                 s_rule->pdata.lkup_tx_rx.src =
8983                         CPU_TO_LE16(hw->port_info->lport);
8984         } else {
8985                 s_rule->type = CPU_TO_LE16(ICE_AQC_SW_RULES_T_LKUP_TX);
8986                 s_rule->pdata.lkup_tx_rx.src = CPU_TO_LE16(rinfo->sw_act.src);
8987         }
8988
8989         s_rule->pdata.lkup_tx_rx.recipe_id = CPU_TO_LE16(rid);
8990         s_rule->pdata.lkup_tx_rx.act = CPU_TO_LE32(act);
8991
8992         status = ice_fill_adv_dummy_packet(lkups, lkups_cnt, s_rule, pkt,
8993                                            pkt_len, pkt_offsets);
8994         if (status)
8995                 goto err_ice_add_adv_rule;
8996
8997         if (rinfo->tun_type != ICE_NON_TUN &&
8998             rinfo->tun_type != ICE_SW_TUN_AND_NON_TUN) {
8999                 status = ice_fill_adv_packet_tun(hw, rinfo->tun_type,
9000                                                  s_rule->pdata.lkup_tx_rx.hdr,
9001                                                  pkt_offsets);
9002                 if (status)
9003                         goto err_ice_add_adv_rule;
9004         }
9005
9006         status = ice_aq_sw_rules(hw, (struct ice_aqc_sw_rules *)s_rule,
9007                                  rule_buf_sz, 1, ice_aqc_opc_add_sw_rules,
9008                                  NULL);
9009         if (status)
9010                 goto err_ice_add_adv_rule;
9011         adv_fltr = (struct ice_adv_fltr_mgmt_list_entry *)
9012                 ice_malloc(hw, sizeof(struct ice_adv_fltr_mgmt_list_entry));
9013         if (!adv_fltr) {
9014                 status = ICE_ERR_NO_MEMORY;
9015                 goto err_ice_add_adv_rule;
9016         }
9017
9018         adv_fltr->lkups = (struct ice_adv_lkup_elem *)
9019                 ice_memdup(hw, lkups, lkups_cnt * sizeof(*lkups),
9020                            ICE_NONDMA_TO_NONDMA);
9021         if (!adv_fltr->lkups && !prof_rule) {
9022                 status = ICE_ERR_NO_MEMORY;
9023                 goto err_ice_add_adv_rule;
9024         }
9025
9026         adv_fltr->lkups_cnt = lkups_cnt;
9027         adv_fltr->rule_info = *rinfo;
9028         adv_fltr->rule_info.fltr_rule_id =
9029                 LE16_TO_CPU(s_rule->pdata.lkup_tx_rx.index);
9030         sw = hw->switch_info;
9031         sw->recp_list[rid].adv_rule = true;
9032         rule_head = &sw->recp_list[rid].filt_rules;
9033
9034         if (rinfo->sw_act.fltr_act == ICE_FWD_TO_VSI)
9035                 adv_fltr->vsi_count = 1;
9036
9037         /* Add rule entry to book keeping list */
9038         LIST_ADD(&adv_fltr->list_entry, rule_head);
9039         if (added_entry) {
9040                 added_entry->rid = rid;
9041                 added_entry->rule_id = adv_fltr->rule_info.fltr_rule_id;
9042                 added_entry->vsi_handle = rinfo->sw_act.vsi_handle;
9043         }
9044 err_ice_add_adv_rule:
9045         if (status && adv_fltr) {
9046                 ice_free(hw, adv_fltr->lkups);
9047                 ice_free(hw, adv_fltr);
9048         }
9049
9050         ice_free(hw, s_rule);
9051
9052         return status;
9053 }
9054
9055 /**
9056  * ice_adv_rem_update_vsi_list
9057  * @hw: pointer to the hardware structure
9058  * @vsi_handle: VSI handle of the VSI to remove
9059  * @fm_list: filter management entry for which the VSI list management needs to
9060  *           be done
9061  */
9062 static enum ice_status
9063 ice_adv_rem_update_vsi_list(struct ice_hw *hw, u16 vsi_handle,
9064                             struct ice_adv_fltr_mgmt_list_entry *fm_list)
9065 {
9066         struct ice_vsi_list_map_info *vsi_list_info;
9067         enum ice_sw_lkup_type lkup_type;
9068         enum ice_status status;
9069         u16 vsi_list_id;
9070
9071         if (fm_list->rule_info.sw_act.fltr_act != ICE_FWD_TO_VSI_LIST ||
9072             fm_list->vsi_count == 0)
9073                 return ICE_ERR_PARAM;
9074
9075         /* A rule with the VSI being removed does not exist */
9076         if (!ice_is_bit_set(fm_list->vsi_list_info->vsi_map, vsi_handle))
9077                 return ICE_ERR_DOES_NOT_EXIST;
9078
9079         lkup_type = ICE_SW_LKUP_LAST;
9080         vsi_list_id = fm_list->rule_info.sw_act.fwd_id.vsi_list_id;
9081         status = ice_update_vsi_list_rule(hw, &vsi_handle, 1, vsi_list_id, true,
9082                                           ice_aqc_opc_update_sw_rules,
9083                                           lkup_type);
9084         if (status)
9085                 return status;
9086
9087         fm_list->vsi_count--;
9088         ice_clear_bit(vsi_handle, fm_list->vsi_list_info->vsi_map);
9089         vsi_list_info = fm_list->vsi_list_info;
9090         if (fm_list->vsi_count == 1) {
9091                 struct ice_fltr_info tmp_fltr;
9092                 u16 rem_vsi_handle;
9093
9094                 rem_vsi_handle = ice_find_first_bit(vsi_list_info->vsi_map,
9095                                                     ICE_MAX_VSI);
9096                 if (!ice_is_vsi_valid(hw, rem_vsi_handle))
9097                         return ICE_ERR_OUT_OF_RANGE;
9098
9099                 /* Make sure VSI list is empty before removing it below */
9100                 status = ice_update_vsi_list_rule(hw, &rem_vsi_handle, 1,
9101                                                   vsi_list_id, true,
9102                                                   ice_aqc_opc_update_sw_rules,
9103                                                   lkup_type);
9104                 if (status)
9105                         return status;
9106
9107                 ice_memset(&tmp_fltr, 0, sizeof(tmp_fltr), ICE_NONDMA_MEM);
9108                 tmp_fltr.flag = fm_list->rule_info.sw_act.flag;
9109                 tmp_fltr.fltr_rule_id = fm_list->rule_info.fltr_rule_id;
9110                 fm_list->rule_info.sw_act.fltr_act = ICE_FWD_TO_VSI;
9111                 tmp_fltr.fltr_act = ICE_FWD_TO_VSI;
9112                 tmp_fltr.fwd_id.hw_vsi_id =
9113                         ice_get_hw_vsi_num(hw, rem_vsi_handle);
9114                 fm_list->rule_info.sw_act.fwd_id.hw_vsi_id =
9115                         ice_get_hw_vsi_num(hw, rem_vsi_handle);
9116                 fm_list->rule_info.sw_act.vsi_handle = rem_vsi_handle;
9117
9118                 /* Update the previous switch rule of "MAC forward to VSI" to
9119                  * "MAC fwd to VSI list"
9120                  */
9121                 status = ice_update_pkt_fwd_rule(hw, &tmp_fltr);
9122                 if (status) {
9123                         ice_debug(hw, ICE_DBG_SW, "Failed to update pkt fwd rule to FWD_TO_VSI on HW VSI %d, error %d\n",
9124                                   tmp_fltr.fwd_id.hw_vsi_id, status);
9125                         return status;
9126                 }
9127                 fm_list->vsi_list_info->ref_cnt--;
9128
9129                 /* Remove the VSI list since it is no longer used */
9130                 status = ice_remove_vsi_list_rule(hw, vsi_list_id, lkup_type);
9131                 if (status) {
9132                         ice_debug(hw, ICE_DBG_SW, "Failed to remove VSI list %d, error %d\n",
9133                                   vsi_list_id, status);
9134                         return status;
9135                 }
9136
9137                 LIST_DEL(&vsi_list_info->list_entry);
9138                 ice_free(hw, vsi_list_info);
9139                 fm_list->vsi_list_info = NULL;
9140         }
9141
9142         return status;
9143 }
9144
9145 /**
9146  * ice_rem_adv_rule - removes existing advanced switch rule
9147  * @hw: pointer to the hardware structure
9148  * @lkups: information on the words that needs to be looked up. All words
9149  *         together makes one recipe
9150  * @lkups_cnt: num of entries in the lkups array
9151  * @rinfo: Its the pointer to the rule information for the rule
9152  *
9153  * This function can be used to remove 1 rule at a time. The lkups is
9154  * used to describe all the words that forms the "lookup" portion of the
9155  * rule. These words can span multiple protocols. Callers to this function
9156  * need to pass in a list of protocol headers with lookup information along
9157  * and mask that determines which words are valid from the given protocol
9158  * header. rinfo describes other information related to this rule such as
9159  * forwarding IDs, priority of this rule, etc.
9160  */
9161 enum ice_status
9162 ice_rem_adv_rule(struct ice_hw *hw, struct ice_adv_lkup_elem *lkups,
9163                  u16 lkups_cnt, struct ice_adv_rule_info *rinfo)
9164 {
9165         struct ice_adv_fltr_mgmt_list_entry *list_elem;
9166         struct ice_prot_lkup_ext lkup_exts;
9167         struct ice_lock *rule_lock; /* Lock to protect filter rule list */
9168         enum ice_status status = ICE_SUCCESS;
9169         bool remove_rule = false;
9170         u16 i, rid, vsi_handle;
9171
9172         ice_memset(&lkup_exts, 0, sizeof(lkup_exts), ICE_NONDMA_MEM);
9173         for (i = 0; i < lkups_cnt; i++) {
9174                 u16 count;
9175
9176                 if (lkups[i].type >= ICE_PROTOCOL_LAST)
9177                         return ICE_ERR_CFG;
9178
9179                 count = ice_fill_valid_words(&lkups[i], &lkup_exts);
9180                 if (!count)
9181                         return ICE_ERR_CFG;
9182         }
9183
9184         /* Create any special protocol/offset pairs, such as looking at tunnel
9185          * bits by extracting metadata
9186          */
9187         status = ice_add_special_words(rinfo, &lkup_exts);
9188         if (status)
9189                 return status;
9190
9191         rid = ice_find_recp(hw, &lkup_exts, rinfo->tun_type, rinfo->priority);
9192         /* If did not find a recipe that match the existing criteria */
9193         if (rid == ICE_MAX_NUM_RECIPES)
9194                 return ICE_ERR_PARAM;
9195
9196         rule_lock = &hw->switch_info->recp_list[rid].filt_rule_lock;
9197         list_elem = ice_find_adv_rule_entry(hw, lkups, lkups_cnt, rid, rinfo);
9198         /* the rule is already removed */
9199         if (!list_elem)
9200                 return ICE_SUCCESS;
9201         ice_acquire_lock(rule_lock);
9202         if (list_elem->rule_info.sw_act.fltr_act != ICE_FWD_TO_VSI_LIST) {
9203                 remove_rule = true;
9204         } else if (list_elem->vsi_count > 1) {
9205                 remove_rule = false;
9206                 vsi_handle = rinfo->sw_act.vsi_handle;
9207                 status = ice_adv_rem_update_vsi_list(hw, vsi_handle, list_elem);
9208         } else {
9209                 vsi_handle = rinfo->sw_act.vsi_handle;
9210                 status = ice_adv_rem_update_vsi_list(hw, vsi_handle, list_elem);
9211                 if (status) {
9212                         ice_release_lock(rule_lock);
9213                         return status;
9214                 }
9215                 if (list_elem->vsi_count == 0)
9216                         remove_rule = true;
9217         }
9218         ice_release_lock(rule_lock);
9219         if (remove_rule) {
9220                 struct ice_aqc_sw_rules_elem *s_rule;
9221                 u16 rule_buf_sz;
9222
9223                 rule_buf_sz = ICE_SW_RULE_RX_TX_NO_HDR_SIZE;
9224                 s_rule = (struct ice_aqc_sw_rules_elem *)
9225                         ice_malloc(hw, rule_buf_sz);
9226                 if (!s_rule)
9227                         return ICE_ERR_NO_MEMORY;
9228                 s_rule->pdata.lkup_tx_rx.act = 0;
9229                 s_rule->pdata.lkup_tx_rx.index =
9230                         CPU_TO_LE16(list_elem->rule_info.fltr_rule_id);
9231                 s_rule->pdata.lkup_tx_rx.hdr_len = 0;
9232                 status = ice_aq_sw_rules(hw, (struct ice_aqc_sw_rules *)s_rule,
9233                                          rule_buf_sz, 1,
9234                                          ice_aqc_opc_remove_sw_rules, NULL);
9235                 if (status == ICE_SUCCESS || status == ICE_ERR_DOES_NOT_EXIST) {
9236                         struct ice_switch_info *sw = hw->switch_info;
9237
9238                         ice_acquire_lock(rule_lock);
9239                         LIST_DEL(&list_elem->list_entry);
9240                         ice_free(hw, list_elem->lkups);
9241                         ice_free(hw, list_elem);
9242                         ice_release_lock(rule_lock);
9243                         if (LIST_EMPTY(&sw->recp_list[rid].filt_rules))
9244                                 sw->recp_list[rid].adv_rule = false;
9245                 }
9246                 ice_free(hw, s_rule);
9247         }
9248         return status;
9249 }
9250
9251 /**
9252  * ice_rem_adv_rule_by_id - removes existing advanced switch rule by ID
9253  * @hw: pointer to the hardware structure
9254  * @remove_entry: data struct which holds rule_id, VSI handle and recipe ID
9255  *
9256  * This function is used to remove 1 rule at a time. The removal is based on
9257  * the remove_entry parameter. This function will remove rule for a given
9258  * vsi_handle with a given rule_id which is passed as parameter in remove_entry
9259  */
9260 enum ice_status
9261 ice_rem_adv_rule_by_id(struct ice_hw *hw,
9262                        struct ice_rule_query_data *remove_entry)
9263 {
9264         struct ice_adv_fltr_mgmt_list_entry *list_itr;
9265         struct LIST_HEAD_TYPE *list_head;
9266         struct ice_adv_rule_info rinfo;
9267         struct ice_switch_info *sw;
9268
9269         sw = hw->switch_info;
9270         if (!sw->recp_list[remove_entry->rid].recp_created)
9271                 return ICE_ERR_PARAM;
9272         list_head = &sw->recp_list[remove_entry->rid].filt_rules;
9273         LIST_FOR_EACH_ENTRY(list_itr, list_head, ice_adv_fltr_mgmt_list_entry,
9274                             list_entry) {
9275                 if (list_itr->rule_info.fltr_rule_id ==
9276                     remove_entry->rule_id) {
9277                         rinfo = list_itr->rule_info;
9278                         rinfo.sw_act.vsi_handle = remove_entry->vsi_handle;
9279                         return ice_rem_adv_rule(hw, list_itr->lkups,
9280                                                 list_itr->lkups_cnt, &rinfo);
9281                 }
9282         }
9283         /* either list is empty or unable to find rule */
9284         return ICE_ERR_DOES_NOT_EXIST;
9285 }
9286
9287 /**
9288  * ice_rem_adv_rule_for_vsi - removes existing advanced switch rules for a
9289  *                       given VSI handle
9290  * @hw: pointer to the hardware structure
9291  * @vsi_handle: VSI handle for which we are supposed to remove all the rules.
9292  *
9293  * This function is used to remove all the rules for a given VSI and as soon
9294  * as removing a rule fails, it will return immediately with the error code,
9295  * else it will return ICE_SUCCESS
9296  */
9297 enum ice_status ice_rem_adv_rule_for_vsi(struct ice_hw *hw, u16 vsi_handle)
9298 {
9299         struct ice_adv_fltr_mgmt_list_entry *list_itr, *tmp_entry;
9300         struct ice_vsi_list_map_info *map_info;
9301         struct LIST_HEAD_TYPE *list_head;
9302         struct ice_adv_rule_info rinfo;
9303         struct ice_switch_info *sw;
9304         enum ice_status status;
9305         u8 rid;
9306
9307         sw = hw->switch_info;
9308         for (rid = 0; rid < ICE_MAX_NUM_RECIPES; rid++) {
9309                 if (!sw->recp_list[rid].recp_created)
9310                         continue;
9311                 if (!sw->recp_list[rid].adv_rule)
9312                         continue;
9313
9314                 list_head = &sw->recp_list[rid].filt_rules;
9315                 LIST_FOR_EACH_ENTRY_SAFE(list_itr, tmp_entry, list_head,
9316                                          ice_adv_fltr_mgmt_list_entry,
9317                                          list_entry) {
9318                         rinfo = list_itr->rule_info;
9319
9320                         if (rinfo.sw_act.fltr_act == ICE_FWD_TO_VSI_LIST) {
9321                                 map_info = list_itr->vsi_list_info;
9322                                 if (!map_info)
9323                                         continue;
9324
9325                                 if (!ice_is_bit_set(map_info->vsi_map,
9326                                                     vsi_handle))
9327                                         continue;
9328                         } else if (rinfo.sw_act.vsi_handle != vsi_handle) {
9329                                 continue;
9330                         }
9331
9332                         rinfo.sw_act.vsi_handle = vsi_handle;
9333                         status = ice_rem_adv_rule(hw, list_itr->lkups,
9334                                                   list_itr->lkups_cnt, &rinfo);
9335
9336                         if (status)
9337                                 return status;
9338                 }
9339         }
9340         return ICE_SUCCESS;
9341 }
9342
9343 /**
9344  * ice_replay_fltr - Replay all the filters stored by a specific list head
9345  * @hw: pointer to the hardware structure
9346  * @list_head: list for which filters needs to be replayed
9347  * @recp_id: Recipe ID for which rules need to be replayed
9348  */
9349 static enum ice_status
9350 ice_replay_fltr(struct ice_hw *hw, u8 recp_id, struct LIST_HEAD_TYPE *list_head)
9351 {
9352         struct ice_fltr_mgmt_list_entry *itr;
9353         enum ice_status status = ICE_SUCCESS;
9354         struct ice_sw_recipe *recp_list;
9355         u8 lport = hw->port_info->lport;
9356         struct LIST_HEAD_TYPE l_head;
9357
9358         if (LIST_EMPTY(list_head))
9359                 return status;
9360
9361         recp_list = &hw->switch_info->recp_list[recp_id];
9362         /* Move entries from the given list_head to a temporary l_head so that
9363          * they can be replayed. Otherwise when trying to re-add the same
9364          * filter, the function will return already exists
9365          */
9366         LIST_REPLACE_INIT(list_head, &l_head);
9367
9368         /* Mark the given list_head empty by reinitializing it so filters
9369          * could be added again by *handler
9370          */
9371         LIST_FOR_EACH_ENTRY(itr, &l_head, ice_fltr_mgmt_list_entry,
9372                             list_entry) {
9373                 struct ice_fltr_list_entry f_entry;
9374                 u16 vsi_handle;
9375
9376                 f_entry.fltr_info = itr->fltr_info;
9377                 if (itr->vsi_count < 2 && recp_id != ICE_SW_LKUP_VLAN) {
9378                         status = ice_add_rule_internal(hw, recp_list, lport,
9379                                                        &f_entry);
9380                         if (status != ICE_SUCCESS)
9381                                 goto end;
9382                         continue;
9383                 }
9384
9385                 /* Add a filter per VSI separately */
9386                 ice_for_each_set_bit(vsi_handle, itr->vsi_list_info->vsi_map,
9387                                      ICE_MAX_VSI) {
9388                         if (!ice_is_vsi_valid(hw, vsi_handle))
9389                                 break;
9390
9391                         ice_clear_bit(vsi_handle, itr->vsi_list_info->vsi_map);
9392                         f_entry.fltr_info.vsi_handle = vsi_handle;
9393                         f_entry.fltr_info.fwd_id.hw_vsi_id =
9394                                 ice_get_hw_vsi_num(hw, vsi_handle);
9395                         f_entry.fltr_info.fltr_act = ICE_FWD_TO_VSI;
9396                         if (recp_id == ICE_SW_LKUP_VLAN)
9397                                 status = ice_add_vlan_internal(hw, recp_list,
9398                                                                &f_entry);
9399                         else
9400                                 status = ice_add_rule_internal(hw, recp_list,
9401                                                                lport,
9402                                                                &f_entry);
9403                         if (status != ICE_SUCCESS)
9404                                 goto end;
9405                 }
9406         }
9407 end:
9408         /* Clear the filter management list */
9409         ice_rem_sw_rule_info(hw, &l_head);
9410         return status;
9411 }
9412
9413 /**
9414  * ice_replay_all_fltr - replay all filters stored in bookkeeping lists
9415  * @hw: pointer to the hardware structure
9416  *
9417  * NOTE: This function does not clean up partially added filters on error.
9418  * It is up to caller of the function to issue a reset or fail early.
9419  */
9420 enum ice_status ice_replay_all_fltr(struct ice_hw *hw)
9421 {
9422         struct ice_switch_info *sw = hw->switch_info;
9423         enum ice_status status = ICE_SUCCESS;
9424         u8 i;
9425
9426         for (i = 0; i < ICE_MAX_NUM_RECIPES; i++) {
9427                 struct LIST_HEAD_TYPE *head = &sw->recp_list[i].filt_rules;
9428
9429                 status = ice_replay_fltr(hw, i, head);
9430                 if (status != ICE_SUCCESS)
9431                         return status;
9432         }
9433         return status;
9434 }
9435
9436 /**
9437  * ice_replay_vsi_fltr - Replay filters for requested VSI
9438  * @hw: pointer to the hardware structure
9439  * @pi: pointer to port information structure
9440  * @sw: pointer to switch info struct for which function replays filters
9441  * @vsi_handle: driver VSI handle
9442  * @recp_id: Recipe ID for which rules need to be replayed
9443  * @list_head: list for which filters need to be replayed
9444  *
9445  * Replays the filter of recipe recp_id for a VSI represented via vsi_handle.
9446  * It is required to pass valid VSI handle.
9447  */
9448 static enum ice_status
9449 ice_replay_vsi_fltr(struct ice_hw *hw, struct ice_port_info *pi,
9450                     struct ice_switch_info *sw, u16 vsi_handle, u8 recp_id,
9451                     struct LIST_HEAD_TYPE *list_head)
9452 {
9453         struct ice_fltr_mgmt_list_entry *itr;
9454         enum ice_status status = ICE_SUCCESS;
9455         struct ice_sw_recipe *recp_list;
9456         u16 hw_vsi_id;
9457
9458         if (LIST_EMPTY(list_head))
9459                 return status;
9460         recp_list = &sw->recp_list[recp_id];
9461         hw_vsi_id = ice_get_hw_vsi_num(hw, vsi_handle);
9462
9463         LIST_FOR_EACH_ENTRY(itr, list_head, ice_fltr_mgmt_list_entry,
9464                             list_entry) {
9465                 struct ice_fltr_list_entry f_entry;
9466
9467                 f_entry.fltr_info = itr->fltr_info;
9468                 if (itr->vsi_count < 2 && recp_id != ICE_SW_LKUP_VLAN &&
9469                     itr->fltr_info.vsi_handle == vsi_handle) {
9470                         /* update the src in case it is VSI num */
9471                         if (f_entry.fltr_info.src_id == ICE_SRC_ID_VSI)
9472                                 f_entry.fltr_info.src = hw_vsi_id;
9473                         status = ice_add_rule_internal(hw, recp_list,
9474                                                        pi->lport,
9475                                                        &f_entry);
9476                         if (status != ICE_SUCCESS)
9477                                 goto end;
9478                         continue;
9479                 }
9480                 if (!itr->vsi_list_info ||
9481                     !ice_is_bit_set(itr->vsi_list_info->vsi_map, vsi_handle))
9482                         continue;
9483                 /* Clearing it so that the logic can add it back */
9484                 ice_clear_bit(vsi_handle, itr->vsi_list_info->vsi_map);
9485                 f_entry.fltr_info.vsi_handle = vsi_handle;
9486                 f_entry.fltr_info.fltr_act = ICE_FWD_TO_VSI;
9487                 /* update the src in case it is VSI num */
9488                 if (f_entry.fltr_info.src_id == ICE_SRC_ID_VSI)
9489                         f_entry.fltr_info.src = hw_vsi_id;
9490                 if (recp_id == ICE_SW_LKUP_VLAN)
9491                         status = ice_add_vlan_internal(hw, recp_list, &f_entry);
9492                 else
9493                         status = ice_add_rule_internal(hw, recp_list,
9494                                                        pi->lport,
9495                                                        &f_entry);
9496                 if (status != ICE_SUCCESS)
9497                         goto end;
9498         }
9499 end:
9500         return status;
9501 }
9502
9503 /**
9504  * ice_replay_vsi_adv_rule - Replay advanced rule for requested VSI
9505  * @hw: pointer to the hardware structure
9506  * @vsi_handle: driver VSI handle
9507  * @list_head: list for which filters need to be replayed
9508  *
9509  * Replay the advanced rule for the given VSI.
9510  */
9511 static enum ice_status
9512 ice_replay_vsi_adv_rule(struct ice_hw *hw, u16 vsi_handle,
9513                         struct LIST_HEAD_TYPE *list_head)
9514 {
9515         struct ice_rule_query_data added_entry = { 0 };
9516         struct ice_adv_fltr_mgmt_list_entry *adv_fltr;
9517         enum ice_status status = ICE_SUCCESS;
9518
9519         if (LIST_EMPTY(list_head))
9520                 return status;
9521         LIST_FOR_EACH_ENTRY(adv_fltr, list_head, ice_adv_fltr_mgmt_list_entry,
9522                             list_entry) {
9523                 struct ice_adv_rule_info *rinfo = &adv_fltr->rule_info;
9524                 u16 lk_cnt = adv_fltr->lkups_cnt;
9525
9526                 if (vsi_handle != rinfo->sw_act.vsi_handle)
9527                         continue;
9528                 status = ice_add_adv_rule(hw, adv_fltr->lkups, lk_cnt, rinfo,
9529                                           &added_entry);
9530                 if (status)
9531                         break;
9532         }
9533         return status;
9534 }
9535
9536 /**
9537  * ice_replay_vsi_all_fltr - replay all filters stored in bookkeeping lists
9538  * @hw: pointer to the hardware structure
9539  * @pi: pointer to port information structure
9540  * @vsi_handle: driver VSI handle
9541  *
9542  * Replays filters for requested VSI via vsi_handle.
9543  */
9544 enum ice_status
9545 ice_replay_vsi_all_fltr(struct ice_hw *hw, struct ice_port_info *pi,
9546                         u16 vsi_handle)
9547 {
9548         struct ice_switch_info *sw = hw->switch_info;
9549         enum ice_status status;
9550         u8 i;
9551
9552         /* Update the recipes that were created */
9553         for (i = 0; i < ICE_MAX_NUM_RECIPES; i++) {
9554                 struct LIST_HEAD_TYPE *head;
9555
9556                 head = &sw->recp_list[i].filt_replay_rules;
9557                 if (!sw->recp_list[i].adv_rule)
9558                         status = ice_replay_vsi_fltr(hw, pi, sw, vsi_handle, i,
9559                                                      head);
9560                 else
9561                         status = ice_replay_vsi_adv_rule(hw, vsi_handle, head);
9562                 if (status != ICE_SUCCESS)
9563                         return status;
9564         }
9565
9566         return ICE_SUCCESS;
9567 }
9568
9569 /**
9570  * ice_rm_sw_replay_rule_info - helper function to delete filter replay rules
9571  * @hw: pointer to the HW struct
9572  * @sw: pointer to switch info struct for which function removes filters
9573  *
9574  * Deletes the filter replay rules for given switch
9575  */
9576 void ice_rm_sw_replay_rule_info(struct ice_hw *hw, struct ice_switch_info *sw)
9577 {
9578         u8 i;
9579
9580         if (!sw)
9581                 return;
9582
9583         for (i = 0; i < ICE_MAX_NUM_RECIPES; i++) {
9584                 if (!LIST_EMPTY(&sw->recp_list[i].filt_replay_rules)) {
9585                         struct LIST_HEAD_TYPE *l_head;
9586
9587                         l_head = &sw->recp_list[i].filt_replay_rules;
9588                         if (!sw->recp_list[i].adv_rule)
9589                                 ice_rem_sw_rule_info(hw, l_head);
9590                         else
9591                                 ice_rem_adv_rule_info(hw, l_head);
9592                 }
9593         }
9594 }
9595
9596 /**
9597  * ice_rm_all_sw_replay_rule_info - deletes filter replay rules
9598  * @hw: pointer to the HW struct
9599  *
9600  * Deletes the filter replay rules.
9601  */
9602 void ice_rm_all_sw_replay_rule_info(struct ice_hw *hw)
9603 {
9604         ice_rm_sw_replay_rule_info(hw, hw->switch_info);
9605 }