net/ice/base: allocate and free RSS global lookup table
[dpdk.git] / drivers / net / ice / base / ice_switch.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2001-2020 Intel Corporation
3  */
4
5 #include "ice_switch.h"
6 #include "ice_flex_type.h"
7 #include "ice_flow.h"
8
9 #define ICE_ETH_DA_OFFSET               0
10 #define ICE_ETH_ETHTYPE_OFFSET          12
11 #define ICE_ETH_VLAN_TCI_OFFSET         14
12 #define ICE_MAX_VLAN_ID                 0xFFF
13 #define ICE_IPV4_NVGRE_PROTO_ID         0x002F
14 #define ICE_PPP_IPV6_PROTO_ID           0x0057
15 #define ICE_IPV6_ETHER_ID               0x86DD
16 #define ICE_TCP_PROTO_ID                0x06
17
18 /* Dummy ethernet header needed in the ice_aqc_sw_rules_elem
19  * struct to configure any switch filter rules.
20  * {DA (6 bytes), SA(6 bytes),
21  * Ether type (2 bytes for header without VLAN tag) OR
22  * VLAN tag (4 bytes for header with VLAN tag) }
23  *
24  * Word on Hardcoded values
25  * byte 0 = 0x2: to identify it as locally administered DA MAC
26  * byte 6 = 0x2: to identify it as locally administered SA MAC
27  * byte 12 = 0x81 & byte 13 = 0x00:
28  *      In case of VLAN filter first two bytes defines ether type (0x8100)
29  *      and remaining two bytes are placeholder for programming a given VLAN ID
30  *      In case of Ether type filter it is treated as header without VLAN tag
31  *      and byte 12 and 13 is used to program a given Ether type instead
32  */
33 static const u8 dummy_eth_header[DUMMY_ETH_HDR_LEN] = { 0x2, 0, 0, 0, 0, 0,
34                                                         0x2, 0, 0, 0, 0, 0,
35                                                         0x81, 0, 0, 0};
36
37 struct ice_dummy_pkt_offsets {
38         enum ice_protocol_type type;
39         u16 offset; /* ICE_PROTOCOL_LAST indicates end of list */
40 };
41
42 static const struct ice_dummy_pkt_offsets dummy_gre_tcp_packet_offsets[] = {
43         { ICE_MAC_OFOS,         0 },
44         { ICE_ETYPE_OL,         12 },
45         { ICE_IPV4_OFOS,        14 },
46         { ICE_NVGRE,            34 },
47         { ICE_MAC_IL,           42 },
48         { ICE_IPV4_IL,          56 },
49         { ICE_TCP_IL,           76 },
50         { ICE_PROTOCOL_LAST,    0 },
51 };
52
53 static const u8 dummy_gre_tcp_packet[] = {
54         0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
55         0x00, 0x00, 0x00, 0x00,
56         0x00, 0x00, 0x00, 0x00,
57
58         0x08, 0x00,             /* ICE_ETYPE_OL 12 */
59
60         0x45, 0x00, 0x00, 0x3E, /* ICE_IPV4_OFOS 14 */
61         0x00, 0x00, 0x00, 0x00,
62         0x00, 0x2F, 0x00, 0x00,
63         0x00, 0x00, 0x00, 0x00,
64         0x00, 0x00, 0x00, 0x00,
65
66         0x80, 0x00, 0x65, 0x58, /* ICE_NVGRE 34 */
67         0x00, 0x00, 0x00, 0x00,
68
69         0x00, 0x00, 0x00, 0x00, /* ICE_MAC_IL 42 */
70         0x00, 0x00, 0x00, 0x00,
71         0x00, 0x00, 0x00, 0x00,
72         0x08, 0x00,
73
74         0x45, 0x00, 0x00, 0x14, /* ICE_IPV4_IL 56 */
75         0x00, 0x00, 0x00, 0x00,
76         0x00, 0x06, 0x00, 0x00,
77         0x00, 0x00, 0x00, 0x00,
78         0x00, 0x00, 0x00, 0x00,
79
80         0x00, 0x00, 0x00, 0x00, /* ICE_TCP_IL 76 */
81         0x00, 0x00, 0x00, 0x00,
82         0x00, 0x00, 0x00, 0x00,
83         0x50, 0x02, 0x20, 0x00,
84         0x00, 0x00, 0x00, 0x00
85 };
86
87 static const struct ice_dummy_pkt_offsets dummy_gre_udp_packet_offsets[] = {
88         { ICE_MAC_OFOS,         0 },
89         { ICE_ETYPE_OL,         12 },
90         { ICE_IPV4_OFOS,        14 },
91         { ICE_NVGRE,            34 },
92         { ICE_MAC_IL,           42 },
93         { ICE_IPV4_IL,          56 },
94         { ICE_UDP_ILOS,         76 },
95         { ICE_PROTOCOL_LAST,    0 },
96 };
97
98 static const u8 dummy_gre_udp_packet[] = {
99         0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
100         0x00, 0x00, 0x00, 0x00,
101         0x00, 0x00, 0x00, 0x00,
102
103         0x08, 0x00,             /* ICE_ETYPE_OL 12 */
104
105         0x45, 0x00, 0x00, 0x3E, /* ICE_IPV4_OFOS 14 */
106         0x00, 0x00, 0x00, 0x00,
107         0x00, 0x2F, 0x00, 0x00,
108         0x00, 0x00, 0x00, 0x00,
109         0x00, 0x00, 0x00, 0x00,
110
111         0x80, 0x00, 0x65, 0x58, /* ICE_NVGRE 34 */
112         0x00, 0x00, 0x00, 0x00,
113
114         0x00, 0x00, 0x00, 0x00, /* ICE_MAC_IL 42 */
115         0x00, 0x00, 0x00, 0x00,
116         0x00, 0x00, 0x00, 0x00,
117         0x08, 0x00,
118
119         0x45, 0x00, 0x00, 0x14, /* ICE_IPV4_IL 56 */
120         0x00, 0x00, 0x00, 0x00,
121         0x00, 0x11, 0x00, 0x00,
122         0x00, 0x00, 0x00, 0x00,
123         0x00, 0x00, 0x00, 0x00,
124
125         0x00, 0x00, 0x00, 0x00, /* ICE_UDP_ILOS 76 */
126         0x00, 0x08, 0x00, 0x00,
127 };
128
129 static const struct ice_dummy_pkt_offsets dummy_udp_tun_tcp_packet_offsets[] = {
130         { ICE_MAC_OFOS,         0 },
131         { ICE_ETYPE_OL,         12 },
132         { ICE_IPV4_OFOS,        14 },
133         { ICE_UDP_OF,           34 },
134         { ICE_VXLAN,            42 },
135         { ICE_GENEVE,           42 },
136         { ICE_VXLAN_GPE,        42 },
137         { ICE_MAC_IL,           50 },
138         { ICE_IPV4_IL,          64 },
139         { ICE_TCP_IL,           84 },
140         { ICE_PROTOCOL_LAST,    0 },
141 };
142
143 static const u8 dummy_udp_tun_tcp_packet[] = {
144         0x00, 0x00, 0x00, 0x00,  /* ICE_MAC_OFOS 0 */
145         0x00, 0x00, 0x00, 0x00,
146         0x00, 0x00, 0x00, 0x00,
147
148         0x08, 0x00,             /* ICE_ETYPE_OL 12 */
149
150         0x45, 0x00, 0x00, 0x5a, /* ICE_IPV4_OFOS 14 */
151         0x00, 0x01, 0x00, 0x00,
152         0x40, 0x11, 0x00, 0x00,
153         0x00, 0x00, 0x00, 0x00,
154         0x00, 0x00, 0x00, 0x00,
155
156         0x00, 0x00, 0x12, 0xb5, /* ICE_UDP_OF 34 */
157         0x00, 0x46, 0x00, 0x00,
158
159         0x00, 0x00, 0x65, 0x58, /* ICE_VXLAN 42 */
160         0x00, 0x00, 0x00, 0x00,
161
162         0x00, 0x00, 0x00, 0x00, /* ICE_MAC_IL 50 */
163         0x00, 0x00, 0x00, 0x00,
164         0x00, 0x00, 0x00, 0x00,
165         0x08, 0x00,
166
167         0x45, 0x00, 0x00, 0x28, /* ICE_IPV4_IL 64 */
168         0x00, 0x01, 0x00, 0x00,
169         0x40, 0x06, 0x00, 0x00,
170         0x00, 0x00, 0x00, 0x00,
171         0x00, 0x00, 0x00, 0x00,
172
173         0x00, 0x00, 0x00, 0x00, /* ICE_TCP_IL 84 */
174         0x00, 0x00, 0x00, 0x00,
175         0x00, 0x00, 0x00, 0x00,
176         0x50, 0x02, 0x20, 0x00,
177         0x00, 0x00, 0x00, 0x00
178 };
179
180 static const struct ice_dummy_pkt_offsets dummy_udp_tun_udp_packet_offsets[] = {
181         { ICE_MAC_OFOS,         0 },
182         { ICE_ETYPE_OL,         12 },
183         { ICE_IPV4_OFOS,        14 },
184         { ICE_UDP_OF,           34 },
185         { ICE_VXLAN,            42 },
186         { ICE_GENEVE,           42 },
187         { ICE_VXLAN_GPE,        42 },
188         { ICE_MAC_IL,           50 },
189         { ICE_IPV4_IL,          64 },
190         { ICE_UDP_ILOS,         84 },
191         { ICE_PROTOCOL_LAST,    0 },
192 };
193
194 static const u8 dummy_udp_tun_udp_packet[] = {
195         0x00, 0x00, 0x00, 0x00,  /* ICE_MAC_OFOS 0 */
196         0x00, 0x00, 0x00, 0x00,
197         0x00, 0x00, 0x00, 0x00,
198
199         0x08, 0x00,             /* ICE_ETYPE_OL 12 */
200
201         0x45, 0x00, 0x00, 0x4e, /* ICE_IPV4_OFOS 14 */
202         0x00, 0x01, 0x00, 0x00,
203         0x00, 0x11, 0x00, 0x00,
204         0x00, 0x00, 0x00, 0x00,
205         0x00, 0x00, 0x00, 0x00,
206
207         0x00, 0x00, 0x12, 0xb5, /* ICE_UDP_OF 34 */
208         0x00, 0x3a, 0x00, 0x00,
209
210         0x00, 0x00, 0x65, 0x58, /* ICE_VXLAN 42 */
211         0x00, 0x00, 0x00, 0x00,
212
213         0x00, 0x00, 0x00, 0x00, /* ICE_MAC_IL 50 */
214         0x00, 0x00, 0x00, 0x00,
215         0x00, 0x00, 0x00, 0x00,
216         0x08, 0x00,
217
218         0x45, 0x00, 0x00, 0x1c, /* ICE_IPV4_IL 64 */
219         0x00, 0x01, 0x00, 0x00,
220         0x00, 0x11, 0x00, 0x00,
221         0x00, 0x00, 0x00, 0x00,
222         0x00, 0x00, 0x00, 0x00,
223
224         0x00, 0x00, 0x00, 0x00, /* ICE_UDP_ILOS 84 */
225         0x00, 0x08, 0x00, 0x00,
226 };
227
228 /* offset info for MAC + IPv4 + UDP dummy packet */
229 static const struct ice_dummy_pkt_offsets dummy_udp_packet_offsets[] = {
230         { ICE_MAC_OFOS,         0 },
231         { ICE_ETYPE_OL,         12 },
232         { ICE_IPV4_OFOS,        14 },
233         { ICE_UDP_ILOS,         34 },
234         { ICE_PROTOCOL_LAST,    0 },
235 };
236
237 /* Dummy packet for MAC + IPv4 + UDP */
238 static const u8 dummy_udp_packet[] = {
239         0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
240         0x00, 0x00, 0x00, 0x00,
241         0x00, 0x00, 0x00, 0x00,
242
243         0x08, 0x00,             /* ICE_ETYPE_OL 12 */
244
245         0x45, 0x00, 0x00, 0x1c, /* ICE_IPV4_OFOS 14 */
246         0x00, 0x01, 0x00, 0x00,
247         0x00, 0x11, 0x00, 0x00,
248         0x00, 0x00, 0x00, 0x00,
249         0x00, 0x00, 0x00, 0x00,
250
251         0x00, 0x00, 0x00, 0x00, /* ICE_UDP_ILOS 34 */
252         0x00, 0x08, 0x00, 0x00,
253
254         0x00, 0x00,     /* 2 bytes for 4 byte alignment */
255 };
256
257 /* offset info for MAC + VLAN + IPv4 + UDP dummy packet */
258 static const struct ice_dummy_pkt_offsets dummy_vlan_udp_packet_offsets[] = {
259         { ICE_MAC_OFOS,         0 },
260         { ICE_ETYPE_OL,         12 },
261         { ICE_VLAN_OFOS,        14 },
262         { ICE_IPV4_OFOS,        18 },
263         { ICE_UDP_ILOS,         38 },
264         { ICE_PROTOCOL_LAST,    0 },
265 };
266
267 /* C-tag (801.1Q), IPv4:UDP dummy packet */
268 static const u8 dummy_vlan_udp_packet[] = {
269         0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
270         0x00, 0x00, 0x00, 0x00,
271         0x00, 0x00, 0x00, 0x00,
272
273         0x81, 0x00,             /* ICE_ETYPE_OL 12 */
274
275         0x00, 0x00, 0x08, 0x00, /* ICE_VLAN_OFOS 14 */
276
277         0x45, 0x00, 0x00, 0x1c, /* ICE_IPV4_OFOS 18 */
278         0x00, 0x01, 0x00, 0x00,
279         0x00, 0x11, 0x00, 0x00,
280         0x00, 0x00, 0x00, 0x00,
281         0x00, 0x00, 0x00, 0x00,
282
283         0x00, 0x00, 0x00, 0x00, /* ICE_UDP_ILOS 38 */
284         0x00, 0x08, 0x00, 0x00,
285
286         0x00, 0x00,     /* 2 bytes for 4 byte alignment */
287 };
288
289 /* offset info for MAC + IPv4 + TCP dummy packet */
290 static const struct ice_dummy_pkt_offsets dummy_tcp_packet_offsets[] = {
291         { ICE_MAC_OFOS,         0 },
292         { ICE_ETYPE_OL,         12 },
293         { ICE_IPV4_OFOS,        14 },
294         { ICE_TCP_IL,           34 },
295         { ICE_PROTOCOL_LAST,    0 },
296 };
297
298 /* Dummy packet for MAC + IPv4 + TCP */
299 static const u8 dummy_tcp_packet[] = {
300         0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
301         0x00, 0x00, 0x00, 0x00,
302         0x00, 0x00, 0x00, 0x00,
303
304         0x08, 0x00,             /* ICE_ETYPE_OL 12 */
305
306         0x45, 0x00, 0x00, 0x28, /* ICE_IPV4_OFOS 14 */
307         0x00, 0x01, 0x00, 0x00,
308         0x00, 0x06, 0x00, 0x00,
309         0x00, 0x00, 0x00, 0x00,
310         0x00, 0x00, 0x00, 0x00,
311
312         0x00, 0x00, 0x00, 0x00, /* ICE_TCP_IL 34 */
313         0x00, 0x00, 0x00, 0x00,
314         0x00, 0x00, 0x00, 0x00,
315         0x50, 0x00, 0x00, 0x00,
316         0x00, 0x00, 0x00, 0x00,
317
318         0x00, 0x00,     /* 2 bytes for 4 byte alignment */
319 };
320
321 /* offset info for MAC + VLAN (C-tag, 802.1Q) + IPv4 + TCP dummy packet */
322 static const struct ice_dummy_pkt_offsets dummy_vlan_tcp_packet_offsets[] = {
323         { ICE_MAC_OFOS,         0 },
324         { ICE_ETYPE_OL,         12 },
325         { ICE_VLAN_OFOS,        14 },
326         { ICE_IPV4_OFOS,        18 },
327         { ICE_TCP_IL,           38 },
328         { ICE_PROTOCOL_LAST,    0 },
329 };
330
331 /* C-tag (801.1Q), IPv4:TCP dummy packet */
332 static const u8 dummy_vlan_tcp_packet[] = {
333         0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
334         0x00, 0x00, 0x00, 0x00,
335         0x00, 0x00, 0x00, 0x00,
336
337         0x81, 0x00,             /* ICE_ETYPE_OL 12 */
338
339         0x00, 0x00, 0x08, 0x00, /* ICE_VLAN_OFOS 14 */
340
341         0x45, 0x00, 0x00, 0x28, /* ICE_IPV4_OFOS 18 */
342         0x00, 0x01, 0x00, 0x00,
343         0x00, 0x06, 0x00, 0x00,
344         0x00, 0x00, 0x00, 0x00,
345         0x00, 0x00, 0x00, 0x00,
346
347         0x00, 0x00, 0x00, 0x00, /* ICE_TCP_IL 38 */
348         0x00, 0x00, 0x00, 0x00,
349         0x00, 0x00, 0x00, 0x00,
350         0x50, 0x00, 0x00, 0x00,
351         0x00, 0x00, 0x00, 0x00,
352
353         0x00, 0x00,     /* 2 bytes for 4 byte alignment */
354 };
355
356 static const struct ice_dummy_pkt_offsets dummy_tcp_ipv6_packet_offsets[] = {
357         { ICE_MAC_OFOS,         0 },
358         { ICE_ETYPE_OL,         12 },
359         { ICE_IPV6_OFOS,        14 },
360         { ICE_TCP_IL,           54 },
361         { ICE_PROTOCOL_LAST,    0 },
362 };
363
364 static const u8 dummy_tcp_ipv6_packet[] = {
365         0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
366         0x00, 0x00, 0x00, 0x00,
367         0x00, 0x00, 0x00, 0x00,
368
369         0x86, 0xDD,             /* ICE_ETYPE_OL 12 */
370
371         0x60, 0x00, 0x00, 0x00, /* ICE_IPV6_OFOS 40 */
372         0x00, 0x14, 0x06, 0x00, /* Next header is TCP */
373         0x00, 0x00, 0x00, 0x00,
374         0x00, 0x00, 0x00, 0x00,
375         0x00, 0x00, 0x00, 0x00,
376         0x00, 0x00, 0x00, 0x00,
377         0x00, 0x00, 0x00, 0x00,
378         0x00, 0x00, 0x00, 0x00,
379         0x00, 0x00, 0x00, 0x00,
380         0x00, 0x00, 0x00, 0x00,
381
382         0x00, 0x00, 0x00, 0x00, /* ICE_TCP_IL 54 */
383         0x00, 0x00, 0x00, 0x00,
384         0x00, 0x00, 0x00, 0x00,
385         0x50, 0x00, 0x00, 0x00,
386         0x00, 0x00, 0x00, 0x00,
387
388         0x00, 0x00, /* 2 bytes for 4 byte alignment */
389 };
390
391 /* C-tag (802.1Q): IPv6 + TCP */
392 static const struct ice_dummy_pkt_offsets
393 dummy_vlan_tcp_ipv6_packet_offsets[] = {
394         { ICE_MAC_OFOS,         0 },
395         { ICE_ETYPE_OL,         12 },
396         { ICE_VLAN_OFOS,        14 },
397         { ICE_IPV6_OFOS,        18 },
398         { ICE_TCP_IL,           58 },
399         { ICE_PROTOCOL_LAST,    0 },
400 };
401
402 /* C-tag (802.1Q), IPv6 + TCP dummy packet */
403 static const u8 dummy_vlan_tcp_ipv6_packet[] = {
404         0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
405         0x00, 0x00, 0x00, 0x00,
406         0x00, 0x00, 0x00, 0x00,
407
408         0x81, 0x00,             /* ICE_ETYPE_OL 12 */
409
410         0x00, 0x00, 0x86, 0xDD, /* ICE_VLAN_OFOS 14 */
411
412         0x60, 0x00, 0x00, 0x00, /* ICE_IPV6_OFOS 18 */
413         0x00, 0x14, 0x06, 0x00, /* Next header is TCP */
414         0x00, 0x00, 0x00, 0x00,
415         0x00, 0x00, 0x00, 0x00,
416         0x00, 0x00, 0x00, 0x00,
417         0x00, 0x00, 0x00, 0x00,
418         0x00, 0x00, 0x00, 0x00,
419         0x00, 0x00, 0x00, 0x00,
420         0x00, 0x00, 0x00, 0x00,
421         0x00, 0x00, 0x00, 0x00,
422
423         0x00, 0x00, 0x00, 0x00, /* ICE_TCP_IL 58 */
424         0x00, 0x00, 0x00, 0x00,
425         0x00, 0x00, 0x00, 0x00,
426         0x50, 0x00, 0x00, 0x00,
427         0x00, 0x00, 0x00, 0x00,
428
429         0x00, 0x00, /* 2 bytes for 4 byte alignment */
430 };
431
432 /* IPv6 + UDP */
433 static const struct ice_dummy_pkt_offsets dummy_udp_ipv6_packet_offsets[] = {
434         { ICE_MAC_OFOS,         0 },
435         { ICE_ETYPE_OL,         12 },
436         { ICE_IPV6_OFOS,        14 },
437         { ICE_UDP_ILOS,         54 },
438         { ICE_PROTOCOL_LAST,    0 },
439 };
440
441 /* IPv6 + UDP dummy packet */
442 static const u8 dummy_udp_ipv6_packet[] = {
443         0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
444         0x00, 0x00, 0x00, 0x00,
445         0x00, 0x00, 0x00, 0x00,
446
447         0x86, 0xDD,             /* ICE_ETYPE_OL 12 */
448
449         0x60, 0x00, 0x00, 0x00, /* ICE_IPV6_OFOS 40 */
450         0x00, 0x10, 0x11, 0x00, /* Next header UDP */
451         0x00, 0x00, 0x00, 0x00,
452         0x00, 0x00, 0x00, 0x00,
453         0x00, 0x00, 0x00, 0x00,
454         0x00, 0x00, 0x00, 0x00,
455         0x00, 0x00, 0x00, 0x00,
456         0x00, 0x00, 0x00, 0x00,
457         0x00, 0x00, 0x00, 0x00,
458         0x00, 0x00, 0x00, 0x00,
459
460         0x00, 0x00, 0x00, 0x00, /* ICE_UDP_ILOS 54 */
461         0x00, 0x10, 0x00, 0x00,
462
463         0x00, 0x00, 0x00, 0x00, /* needed for ESP packets */
464         0x00, 0x00, 0x00, 0x00,
465
466         0x00, 0x00, /* 2 bytes for 4 byte alignment */
467 };
468
469 /* C-tag (802.1Q): IPv6 + UDP */
470 static const struct ice_dummy_pkt_offsets
471 dummy_vlan_udp_ipv6_packet_offsets[] = {
472         { ICE_MAC_OFOS,         0 },
473         { ICE_ETYPE_OL,         12 },
474         { ICE_VLAN_OFOS,        14 },
475         { ICE_IPV6_OFOS,        18 },
476         { ICE_UDP_ILOS,         58 },
477         { ICE_PROTOCOL_LAST,    0 },
478 };
479
480 /* C-tag (802.1Q), IPv6 + UDP dummy packet */
481 static const u8 dummy_vlan_udp_ipv6_packet[] = {
482         0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
483         0x00, 0x00, 0x00, 0x00,
484         0x00, 0x00, 0x00, 0x00,
485
486         0x81, 0x00,             /* ICE_ETYPE_OL 12 */
487
488         0x00, 0x00, 0x86, 0xDD, /* ICE_VLAN_OFOS 14 */
489
490         0x60, 0x00, 0x00, 0x00, /* ICE_IPV6_OFOS 18 */
491         0x00, 0x08, 0x11, 0x00, /* Next header UDP */
492         0x00, 0x00, 0x00, 0x00,
493         0x00, 0x00, 0x00, 0x00,
494         0x00, 0x00, 0x00, 0x00,
495         0x00, 0x00, 0x00, 0x00,
496         0x00, 0x00, 0x00, 0x00,
497         0x00, 0x00, 0x00, 0x00,
498         0x00, 0x00, 0x00, 0x00,
499         0x00, 0x00, 0x00, 0x00,
500
501         0x00, 0x00, 0x00, 0x00, /* ICE_UDP_ILOS 58 */
502         0x00, 0x08, 0x00, 0x00,
503
504         0x00, 0x00, /* 2 bytes for 4 byte alignment */
505 };
506
507 static const struct ice_dummy_pkt_offsets dummy_udp_gtp_packet_offsets[] = {
508         { ICE_MAC_OFOS,         0 },
509         { ICE_IPV4_OFOS,        14 },
510         { ICE_UDP_OF,           34 },
511         { ICE_GTP,              42 },
512         { ICE_PROTOCOL_LAST,    0 },
513 };
514
515 static const u8 dummy_udp_gtp_packet[] = {
516         0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
517         0x00, 0x00, 0x00, 0x00,
518         0x00, 0x00, 0x00, 0x00,
519         0x08, 0x00,
520
521         0x45, 0x00, 0x00, 0x30, /* ICE_IPV4_OFOS 14 */
522         0x00, 0x00, 0x00, 0x00,
523         0x00, 0x11, 0x00, 0x00,
524         0x00, 0x00, 0x00, 0x00,
525         0x00, 0x00, 0x00, 0x00,
526
527         0x00, 0x00, 0x08, 0x68, /* ICE_UDP_OF 34 */
528         0x00, 0x1c, 0x00, 0x00,
529
530         0x34, 0xff, 0x00, 0x0c, /* ICE_GTP 42 */
531         0x00, 0x00, 0x00, 0x00,
532         0x00, 0x00, 0x00, 0x85,
533
534         0x02, 0x00, 0x00, 0x00, /* PDU Session extension header */
535         0x00, 0x00, 0x00, 0x00,
536 };
537
538 static const
539 struct ice_dummy_pkt_offsets dummy_ipv4_gtpu_ipv4_packet_offsets[] = {
540         { ICE_MAC_OFOS,         0 },
541         { ICE_IPV4_OFOS,        14 },
542         { ICE_UDP_OF,           34 },
543         { ICE_GTP,              42 },
544         { ICE_IPV4_IL,          62 },
545         { ICE_PROTOCOL_LAST,    0 },
546 };
547
548 static const u8 dummy_ipv4_gtpu_ipv4_packet[] = {
549         0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
550         0x00, 0x00, 0x00, 0x00,
551         0x00, 0x00, 0x00, 0x00,
552         0x08, 0x00,
553
554         0x45, 0x00, 0x00, 0x44, /* ICE_IPV4_OFOS 14 */
555         0x00, 0x00, 0x40, 0x00,
556         0x40, 0x11, 0x00, 0x00,
557         0x00, 0x00, 0x00, 0x00,
558         0x00, 0x00, 0x00, 0x00,
559
560         0x08, 0x68, 0x08, 0x68, /* ICE_UDP_OF 34 */
561         0x00, 0x00, 0x00, 0x00,
562
563         0x34, 0xff, 0x00, 0x28,  /* ICE_GTP 42 */
564         0x00, 0x00, 0x00, 0x00,
565         0x00, 0x00, 0x00, 0x85,
566
567         0x02, 0x00, 0x00, 0x00, /* PDU Session extension header */
568         0x00, 0x00, 0x00, 0x00,
569
570         0x45, 0x00, 0x00, 0x14, /* ICE_IPV4_IL 62 */
571         0x00, 0x00, 0x40, 0x00,
572         0x40, 0x00, 0x00, 0x00,
573         0x00, 0x00, 0x00, 0x00,
574         0x00, 0x00, 0x00, 0x00,
575         0x00, 0x00,
576 };
577
578 static const
579 struct ice_dummy_pkt_offsets dummy_ipv4_gtpu_ipv6_packet_offsets[] = {
580         { ICE_MAC_OFOS,         0 },
581         { ICE_IPV4_OFOS,        14 },
582         { ICE_UDP_OF,           34 },
583         { ICE_GTP,              42 },
584         { ICE_IPV6_IL,          62 },
585         { ICE_PROTOCOL_LAST,    0 },
586 };
587
588 static const u8 dummy_ipv4_gtpu_ipv6_packet[] = {
589         0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
590         0x00, 0x00, 0x00, 0x00,
591         0x00, 0x00, 0x00, 0x00,
592         0x08, 0x00,
593
594         0x45, 0x00, 0x00, 0x58, /* ICE_IPV4_OFOS 14 */
595         0x00, 0x00, 0x40, 0x00,
596         0x40, 0x11, 0x00, 0x00,
597         0x00, 0x00, 0x00, 0x00,
598         0x00, 0x00, 0x00, 0x00,
599
600         0x08, 0x68, 0x08, 0x68, /* ICE_UDP_OF 34 */
601         0x00, 0x00, 0x00, 0x00,
602
603         0x34, 0xff, 0x00, 0x28,  /* ICE_GTP 42 */
604         0x00, 0x00, 0x00, 0x00,
605         0x00, 0x00, 0x00, 0x85,
606
607         0x02, 0x00, 0x00, 0x00, /* PDU Session extension header */
608         0x00, 0x00, 0x00, 0x00,
609
610         0x60, 0x00, 0x00, 0x00, /* ICE_IPV6_IL 62 */
611         0x00, 0x00, 0x3b, 0x00,
612         0x00, 0x00, 0x00, 0x00,
613         0x00, 0x00, 0x00, 0x00,
614         0x00, 0x00, 0x00, 0x00,
615         0x00, 0x00, 0x00, 0x00,
616         0x00, 0x00, 0x00, 0x00,
617         0x00, 0x00, 0x00, 0x00,
618         0x00, 0x00, 0x00, 0x00,
619         0x00, 0x00, 0x00, 0x00,
620
621         0x00, 0x00,
622 };
623
624 static const
625 struct ice_dummy_pkt_offsets dummy_ipv6_gtpu_ipv4_packet_offsets[] = {
626         { ICE_MAC_OFOS,         0 },
627         { ICE_IPV6_OFOS,        14 },
628         { ICE_UDP_OF,           54 },
629         { ICE_GTP,              62 },
630         { ICE_IPV4_IL,          82 },
631         { ICE_PROTOCOL_LAST,    0 },
632 };
633
634 static const u8 dummy_ipv6_gtpu_ipv4_packet[] = {
635         0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
636         0x00, 0x00, 0x00, 0x00,
637         0x00, 0x00, 0x00, 0x00,
638         0x86, 0xdd,
639
640         0x60, 0x00, 0x00, 0x00, /* ICE_IPV6_OFOS 14 */
641         0x00, 0x58, 0x11, 0x00, /* Next header UDP*/
642         0x00, 0x00, 0x00, 0x00,
643         0x00, 0x00, 0x00, 0x00,
644         0x00, 0x00, 0x00, 0x00,
645         0x00, 0x00, 0x00, 0x00,
646         0x00, 0x00, 0x00, 0x00,
647         0x00, 0x00, 0x00, 0x00,
648         0x00, 0x00, 0x00, 0x00,
649         0x00, 0x00, 0x00, 0x00,
650
651         0x08, 0x68, 0x08, 0x68, /* ICE_UDP_OF 54 */
652         0x00, 0x00, 0x00, 0x00,
653
654         0x34, 0xff, 0x00, 0x28,  /* ICE_GTP 62 */
655         0x00, 0x00, 0x00, 0x00,
656         0x00, 0x00, 0x00, 0x85,
657
658         0x02, 0x00, 0x00, 0x00, /* PDU Session extension header */
659         0x00, 0x00, 0x00, 0x00,
660
661         0x45, 0x00, 0x00, 0x14, /* ICE_IPV4_IL 82 */
662         0x00, 0x00, 0x40, 0x00,
663         0x40, 0x00, 0x00, 0x00,
664         0x00, 0x00, 0x00, 0x00,
665         0x00, 0x00, 0x00, 0x00,
666
667         0x00, 0x00,
668 };
669
670 static const
671 struct ice_dummy_pkt_offsets dummy_ipv6_gtpu_ipv6_packet_offsets[] = {
672         { ICE_MAC_OFOS,         0 },
673         { ICE_IPV6_OFOS,        14 },
674         { ICE_UDP_OF,           54 },
675         { ICE_GTP,              62 },
676         { ICE_IPV6_IL,          82 },
677         { ICE_PROTOCOL_LAST,    0 },
678 };
679
680 static const u8 dummy_ipv6_gtpu_ipv6_packet[] = {
681         0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
682         0x00, 0x00, 0x00, 0x00,
683         0x00, 0x00, 0x00, 0x00,
684         0x86, 0xdd,
685
686         0x60, 0x00, 0x00, 0x00, /* ICE_IPV6_OFOS 14 */
687         0x00, 0x6c, 0x11, 0x00, /* Next header UDP*/
688         0x00, 0x00, 0x00, 0x00,
689         0x00, 0x00, 0x00, 0x00,
690         0x00, 0x00, 0x00, 0x00,
691         0x00, 0x00, 0x00, 0x00,
692         0x00, 0x00, 0x00, 0x00,
693         0x00, 0x00, 0x00, 0x00,
694         0x00, 0x00, 0x00, 0x00,
695         0x00, 0x00, 0x00, 0x00,
696
697         0x08, 0x68, 0x08, 0x68, /* ICE_UDP_OF 54 */
698         0x00, 0x00, 0x00, 0x00,
699
700         0x34, 0xff, 0x00, 0x28,  /* ICE_GTP 62 */
701         0x00, 0x00, 0x00, 0x00,
702         0x00, 0x00, 0x00, 0x85,
703
704         0x02, 0x00, 0x00, 0x00, /* PDU Session extension header */
705         0x00, 0x00, 0x00, 0x00,
706
707         0x60, 0x00, 0x00, 0x00, /* ICE_IPV6_OFIL 82 */
708         0x00, 0x00, 0x3b, 0x00,
709         0x00, 0x00, 0x00, 0x00,
710         0x00, 0x00, 0x00, 0x00,
711         0x00, 0x00, 0x00, 0x00,
712         0x00, 0x00, 0x00, 0x00,
713         0x00, 0x00, 0x00, 0x00,
714         0x00, 0x00, 0x00, 0x00,
715         0x00, 0x00, 0x00, 0x00,
716         0x00, 0x00, 0x00, 0x00,
717
718         0x00, 0x00,
719 };
720
721 static const
722 struct ice_dummy_pkt_offsets dummy_ipv4_gtp_no_pay_packet_offsets[] = {
723         { ICE_MAC_OFOS,         0 },
724         { ICE_IPV4_OFOS,        14 },
725         { ICE_UDP_OF,           34 },
726         { ICE_GTP_NO_PAY,       42 },
727         { ICE_PROTOCOL_LAST,    0 },
728 };
729
730 static const
731 struct ice_dummy_pkt_offsets dummy_ipv6_gtp_no_pay_packet_offsets[] = {
732         { ICE_MAC_OFOS,         0 },
733         { ICE_IPV6_OFOS,        14 },
734         { ICE_UDP_OF,           54 },
735         { ICE_GTP_NO_PAY,       62 },
736         { ICE_PROTOCOL_LAST,    0 },
737 };
738
739 static const struct ice_dummy_pkt_offsets dummy_pppoe_packet_offsets[] = {
740         { ICE_MAC_OFOS,         0 },
741         { ICE_ETYPE_OL,         12 },
742         { ICE_VLAN_OFOS,        14},
743         { ICE_PPPOE,            18 },
744         { ICE_PROTOCOL_LAST,    0 },
745 };
746
747 static const struct ice_dummy_pkt_offsets dummy_pppoe_packet_ipv4_offsets[] = {
748         { ICE_MAC_OFOS,         0 },
749         { ICE_ETYPE_OL,         12 },
750         { ICE_VLAN_OFOS,        14},
751         { ICE_PPPOE,            18 },
752         { ICE_IPV4_OFOS,        26 },
753         { ICE_PROTOCOL_LAST,    0 },
754 };
755
756 static const u8 dummy_pppoe_ipv4_packet[] = {
757         0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
758         0x00, 0x00, 0x00, 0x00,
759         0x00, 0x00, 0x00, 0x00,
760
761         0x81, 0x00,             /* ICE_ETYPE_OL 12 */
762
763         0x00, 0x00, 0x88, 0x64, /* ICE_VLAN_OFOS 14 */
764
765         0x11, 0x00, 0x00, 0x00, /* ICE_PPPOE 18 */
766         0x00, 0x16,
767
768         0x00, 0x21,             /* PPP Link Layer 24 */
769
770         0x45, 0x00, 0x00, 0x14, /* ICE_IPV4_IL 26 */
771         0x00, 0x00, 0x00, 0x00,
772         0x00, 0x00, 0x00, 0x00,
773         0x00, 0x00, 0x00, 0x00,
774         0x00, 0x00, 0x00, 0x00,
775
776         0x00, 0x00,             /* 2 bytes for 4 bytes alignment */
777 };
778
779 static const
780 struct ice_dummy_pkt_offsets dummy_pppoe_ipv4_tcp_packet_offsets[] = {
781         { ICE_MAC_OFOS,         0 },
782         { ICE_ETYPE_OL,         12 },
783         { ICE_VLAN_OFOS,        14},
784         { ICE_PPPOE,            18 },
785         { ICE_IPV4_OFOS,        26 },
786         { ICE_TCP_IL,           46 },
787         { ICE_PROTOCOL_LAST,    0 },
788 };
789
790 static const u8 dummy_pppoe_ipv4_tcp_packet[] = {
791         0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
792         0x00, 0x00, 0x00, 0x00,
793         0x00, 0x00, 0x00, 0x00,
794
795         0x81, 0x00,             /* ICE_ETYPE_OL 12 */
796
797         0x00, 0x00, 0x88, 0x64, /* ICE_VLAN_OFOS 14 */
798
799         0x11, 0x00, 0x00, 0x00, /* ICE_PPPOE 18 */
800         0x00, 0x16,
801
802         0x00, 0x21,             /* PPP Link Layer 24 */
803
804         0x45, 0x00, 0x00, 0x28, /* ICE_IPV4_OFOS 26 */
805         0x00, 0x01, 0x00, 0x00,
806         0x00, 0x06, 0x00, 0x00,
807         0x00, 0x00, 0x00, 0x00,
808         0x00, 0x00, 0x00, 0x00,
809
810         0x00, 0x00, 0x00, 0x00, /* ICE_TCP_IL 46 */
811         0x00, 0x00, 0x00, 0x00,
812         0x00, 0x00, 0x00, 0x00,
813         0x50, 0x00, 0x00, 0x00,
814         0x00, 0x00, 0x00, 0x00,
815
816         0x00, 0x00,             /* 2 bytes for 4 bytes alignment */
817 };
818
819 static const
820 struct ice_dummy_pkt_offsets dummy_pppoe_ipv4_udp_packet_offsets[] = {
821         { ICE_MAC_OFOS,         0 },
822         { ICE_ETYPE_OL,         12 },
823         { ICE_VLAN_OFOS,        14},
824         { ICE_PPPOE,            18 },
825         { ICE_IPV4_OFOS,        26 },
826         { ICE_UDP_ILOS,         46 },
827         { ICE_PROTOCOL_LAST,    0 },
828 };
829
830 static const u8 dummy_pppoe_ipv4_udp_packet[] = {
831         0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
832         0x00, 0x00, 0x00, 0x00,
833         0x00, 0x00, 0x00, 0x00,
834
835         0x81, 0x00,             /* ICE_ETYPE_OL 12 */
836
837         0x00, 0x00, 0x88, 0x64, /* ICE_VLAN_OFOS 14 */
838
839         0x11, 0x00, 0x00, 0x00, /* ICE_PPPOE 18 */
840         0x00, 0x16,
841
842         0x00, 0x21,             /* PPP Link Layer 24 */
843
844         0x45, 0x00, 0x00, 0x1c, /* ICE_IPV4_OFOS 26 */
845         0x00, 0x01, 0x00, 0x00,
846         0x00, 0x11, 0x00, 0x00,
847         0x00, 0x00, 0x00, 0x00,
848         0x00, 0x00, 0x00, 0x00,
849
850         0x00, 0x00, 0x00, 0x00, /* ICE_UDP_ILOS 46 */
851         0x00, 0x08, 0x00, 0x00,
852
853         0x00, 0x00,             /* 2 bytes for 4 bytes alignment */
854 };
855
856 static const struct ice_dummy_pkt_offsets dummy_pppoe_packet_ipv6_offsets[] = {
857         { ICE_MAC_OFOS,         0 },
858         { ICE_ETYPE_OL,         12 },
859         { ICE_VLAN_OFOS,        14},
860         { ICE_PPPOE,            18 },
861         { ICE_IPV6_OFOS,        26 },
862         { ICE_PROTOCOL_LAST,    0 },
863 };
864
865 static const u8 dummy_pppoe_ipv6_packet[] = {
866         0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
867         0x00, 0x00, 0x00, 0x00,
868         0x00, 0x00, 0x00, 0x00,
869
870         0x81, 0x00,             /* ICE_ETYPE_OL 12 */
871
872         0x00, 0x00, 0x88, 0x64, /* ICE_VLAN_OFOS 14 */
873
874         0x11, 0x00, 0x00, 0x00, /* ICE_PPPOE 18 */
875         0x00, 0x2a,
876
877         0x00, 0x57,             /* PPP Link Layer 24 */
878
879         0x60, 0x00, 0x00, 0x00, /* ICE_IPV6_OFOS 26 */
880         0x00, 0x00, 0x3b, 0x00,
881         0x00, 0x00, 0x00, 0x00,
882         0x00, 0x00, 0x00, 0x00,
883         0x00, 0x00, 0x00, 0x00,
884         0x00, 0x00, 0x00, 0x00,
885         0x00, 0x00, 0x00, 0x00,
886         0x00, 0x00, 0x00, 0x00,
887         0x00, 0x00, 0x00, 0x00,
888         0x00, 0x00, 0x00, 0x00,
889
890         0x00, 0x00,             /* 2 bytes for 4 bytes alignment */
891 };
892
893 static const
894 struct ice_dummy_pkt_offsets dummy_pppoe_packet_ipv6_tcp_offsets[] = {
895         { ICE_MAC_OFOS,         0 },
896         { ICE_ETYPE_OL,         12 },
897         { ICE_VLAN_OFOS,        14},
898         { ICE_PPPOE,            18 },
899         { ICE_IPV6_OFOS,        26 },
900         { ICE_TCP_IL,           66 },
901         { ICE_PROTOCOL_LAST,    0 },
902 };
903
904 static const u8 dummy_pppoe_ipv6_tcp_packet[] = {
905         0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
906         0x00, 0x00, 0x00, 0x00,
907         0x00, 0x00, 0x00, 0x00,
908
909         0x81, 0x00,             /* ICE_ETYPE_OL 12 */
910
911         0x00, 0x00, 0x88, 0x64, /* ICE_VLAN_OFOS 14 */
912
913         0x11, 0x00, 0x00, 0x00, /* ICE_PPPOE 18 */
914         0x00, 0x2a,
915
916         0x00, 0x57,             /* PPP Link Layer 24 */
917
918         0x60, 0x00, 0x00, 0x00, /* ICE_IPV6_OFOS 26 */
919         0x00, 0x14, 0x06, 0x00, /* Next header is TCP */
920         0x00, 0x00, 0x00, 0x00,
921         0x00, 0x00, 0x00, 0x00,
922         0x00, 0x00, 0x00, 0x00,
923         0x00, 0x00, 0x00, 0x00,
924         0x00, 0x00, 0x00, 0x00,
925         0x00, 0x00, 0x00, 0x00,
926         0x00, 0x00, 0x00, 0x00,
927         0x00, 0x00, 0x00, 0x00,
928
929         0x00, 0x00, 0x00, 0x00, /* ICE_TCP_IL 66 */
930         0x00, 0x00, 0x00, 0x00,
931         0x00, 0x00, 0x00, 0x00,
932         0x50, 0x00, 0x00, 0x00,
933         0x00, 0x00, 0x00, 0x00,
934
935         0x00, 0x00,             /* 2 bytes for 4 bytes alignment */
936 };
937
938 static const
939 struct ice_dummy_pkt_offsets dummy_pppoe_packet_ipv6_udp_offsets[] = {
940         { ICE_MAC_OFOS,         0 },
941         { ICE_ETYPE_OL,         12 },
942         { ICE_VLAN_OFOS,        14},
943         { ICE_PPPOE,            18 },
944         { ICE_IPV6_OFOS,        26 },
945         { ICE_UDP_ILOS,         66 },
946         { ICE_PROTOCOL_LAST,    0 },
947 };
948
949 static const u8 dummy_pppoe_ipv6_udp_packet[] = {
950         0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
951         0x00, 0x00, 0x00, 0x00,
952         0x00, 0x00, 0x00, 0x00,
953
954         0x81, 0x00,             /* ICE_ETYPE_OL 12 */
955
956         0x00, 0x00, 0x88, 0x64, /* ICE_VLAN_OFOS 14 */
957
958         0x11, 0x00, 0x00, 0x00, /* ICE_PPPOE 18 */
959         0x00, 0x2a,
960
961         0x00, 0x57,             /* PPP Link Layer 24 */
962
963         0x60, 0x00, 0x00, 0x00, /* ICE_IPV6_OFOS 26 */
964         0x00, 0x08, 0x11, 0x00, /* Next header UDP*/
965         0x00, 0x00, 0x00, 0x00,
966         0x00, 0x00, 0x00, 0x00,
967         0x00, 0x00, 0x00, 0x00,
968         0x00, 0x00, 0x00, 0x00,
969         0x00, 0x00, 0x00, 0x00,
970         0x00, 0x00, 0x00, 0x00,
971         0x00, 0x00, 0x00, 0x00,
972         0x00, 0x00, 0x00, 0x00,
973
974         0x00, 0x00, 0x00, 0x00, /* ICE_UDP_ILOS 66 */
975         0x00, 0x08, 0x00, 0x00,
976
977         0x00, 0x00,             /* 2 bytes for 4 bytes alignment */
978 };
979
980 static const struct ice_dummy_pkt_offsets dummy_ipv4_esp_packet_offsets[] = {
981         { ICE_MAC_OFOS,         0 },
982         { ICE_IPV4_OFOS,        14 },
983         { ICE_ESP,                      34 },
984         { ICE_PROTOCOL_LAST,    0 },
985 };
986
987 static const u8 dummy_ipv4_esp_pkt[] = {
988         0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
989         0x00, 0x00, 0x00, 0x00,
990         0x00, 0x00, 0x00, 0x00,
991         0x08, 0x00,
992
993         0x45, 0x00, 0x00, 0x1c, /* ICE_IPV4_IL 14 */
994         0x00, 0x00, 0x40, 0x00,
995         0x40, 0x32, 0x00, 0x00,
996         0x00, 0x00, 0x00, 0x00,
997         0x00, 0x00, 0x00, 0x00,
998
999         0x00, 0x00, 0x00, 0x00, /* ICE_ESP 34 */
1000         0x00, 0x00, 0x00, 0x00,
1001         0x00, 0x00,             /* 2 bytes for 4 bytes alignment */
1002 };
1003
1004 static const struct ice_dummy_pkt_offsets dummy_ipv6_esp_packet_offsets[] = {
1005         { ICE_MAC_OFOS,         0 },
1006         { ICE_IPV6_OFOS,        14 },
1007         { ICE_ESP,                      54 },
1008         { ICE_PROTOCOL_LAST,    0 },
1009 };
1010
1011 static const u8 dummy_ipv6_esp_pkt[] = {
1012         0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
1013         0x00, 0x00, 0x00, 0x00,
1014         0x00, 0x00, 0x00, 0x00,
1015         0x86, 0xDD,
1016
1017         0x60, 0x00, 0x00, 0x00, /* ICE_IPV6_OFOS 14 */
1018         0x00, 0x08, 0x32, 0x00, /* Next header ESP */
1019         0x00, 0x00, 0x00, 0x00,
1020         0x00, 0x00, 0x00, 0x00,
1021         0x00, 0x00, 0x00, 0x00,
1022         0x00, 0x00, 0x00, 0x00,
1023         0x00, 0x00, 0x00, 0x00,
1024         0x00, 0x00, 0x00, 0x00,
1025         0x00, 0x00, 0x00, 0x00,
1026         0x00, 0x00, 0x00, 0x00,
1027
1028         0x00, 0x00, 0x00, 0x00, /* ICE_ESP 54 */
1029         0x00, 0x00, 0x00, 0x00,
1030         0x00, 0x00,             /* 2 bytes for 4 bytes alignment */
1031 };
1032
1033 static const struct ice_dummy_pkt_offsets dummy_ipv4_ah_packet_offsets[] = {
1034         { ICE_MAC_OFOS,         0 },
1035         { ICE_IPV4_OFOS,        14 },
1036         { ICE_AH,                       34 },
1037         { ICE_PROTOCOL_LAST,    0 },
1038 };
1039
1040 static const u8 dummy_ipv4_ah_pkt[] = {
1041         0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
1042         0x00, 0x00, 0x00, 0x00,
1043         0x00, 0x00, 0x00, 0x00,
1044         0x08, 0x00,
1045
1046         0x45, 0x00, 0x00, 0x20, /* ICE_IPV4_IL 14 */
1047         0x00, 0x00, 0x40, 0x00,
1048         0x40, 0x33, 0x00, 0x00,
1049         0x00, 0x00, 0x00, 0x00,
1050         0x00, 0x00, 0x00, 0x00,
1051
1052         0x00, 0x00, 0x00, 0x00, /* ICE_AH 34 */
1053         0x00, 0x00, 0x00, 0x00,
1054         0x00, 0x00, 0x00, 0x00,
1055         0x00, 0x00,             /* 2 bytes for 4 bytes alignment */
1056 };
1057
1058 static const struct ice_dummy_pkt_offsets dummy_ipv6_ah_packet_offsets[] = {
1059         { ICE_MAC_OFOS,         0 },
1060         { ICE_IPV6_OFOS,        14 },
1061         { ICE_AH,                       54 },
1062         { ICE_PROTOCOL_LAST,    0 },
1063 };
1064
1065 static const u8 dummy_ipv6_ah_pkt[] = {
1066         0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
1067         0x00, 0x00, 0x00, 0x00,
1068         0x00, 0x00, 0x00, 0x00,
1069         0x86, 0xDD,
1070
1071         0x60, 0x00, 0x00, 0x00, /* ICE_IPV6_OFOS 14 */
1072         0x00, 0x0c, 0x33, 0x00, /* Next header AH */
1073         0x00, 0x00, 0x00, 0x00,
1074         0x00, 0x00, 0x00, 0x00,
1075         0x00, 0x00, 0x00, 0x00,
1076         0x00, 0x00, 0x00, 0x00,
1077         0x00, 0x00, 0x00, 0x00,
1078         0x00, 0x00, 0x00, 0x00,
1079         0x00, 0x00, 0x00, 0x00,
1080         0x00, 0x00, 0x00, 0x00,
1081
1082         0x00, 0x00, 0x00, 0x00, /* ICE_AH 54 */
1083         0x00, 0x00, 0x00, 0x00,
1084         0x00, 0x00, 0x00, 0x00,
1085         0x00, 0x00,             /* 2 bytes for 4 bytes alignment */
1086 };
1087
1088 static const struct ice_dummy_pkt_offsets dummy_ipv4_nat_packet_offsets[] = {
1089         { ICE_MAC_OFOS,         0 },
1090         { ICE_IPV4_OFOS,        14 },
1091         { ICE_UDP_ILOS,         34 },
1092         { ICE_NAT_T,            42 },
1093         { ICE_PROTOCOL_LAST,    0 },
1094 };
1095
1096 static const u8 dummy_ipv4_nat_pkt[] = {
1097         0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
1098         0x00, 0x00, 0x00, 0x00,
1099         0x00, 0x00, 0x00, 0x00,
1100         0x08, 0x00,
1101
1102         0x45, 0x00, 0x00, 0x24, /* ICE_IPV4_IL 14 */
1103         0x00, 0x00, 0x40, 0x00,
1104         0x40, 0x11, 0x00, 0x00,
1105         0x00, 0x00, 0x00, 0x00,
1106         0x00, 0x00, 0x00, 0x00,
1107
1108         0x00, 0x00, 0x11, 0x94, /* ICE_NAT_T 34 */
1109         0x00, 0x00, 0x00, 0x00,
1110
1111         0x00, 0x00, 0x00, 0x00,
1112         0x00, 0x00, 0x00, 0x00,
1113         0x00, 0x00,             /* 2 bytes for 4 bytes alignment */
1114 };
1115
1116 static const struct ice_dummy_pkt_offsets dummy_ipv6_nat_packet_offsets[] = {
1117         { ICE_MAC_OFOS,         0 },
1118         { ICE_IPV6_OFOS,        14 },
1119         { ICE_UDP_ILOS,         54 },
1120         { ICE_NAT_T,            62 },
1121         { ICE_PROTOCOL_LAST,    0 },
1122 };
1123
1124 static const u8 dummy_ipv6_nat_pkt[] = {
1125         0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
1126         0x00, 0x00, 0x00, 0x00,
1127         0x00, 0x00, 0x00, 0x00,
1128         0x86, 0xDD,
1129
1130         0x60, 0x00, 0x00, 0x00, /* ICE_IPV6_OFOS 14 */
1131         0x00, 0x10, 0x11, 0x00, /* Next header NAT_T */
1132         0x00, 0x00, 0x00, 0x00,
1133         0x00, 0x00, 0x00, 0x00,
1134         0x00, 0x00, 0x00, 0x00,
1135         0x00, 0x00, 0x00, 0x00,
1136         0x00, 0x00, 0x00, 0x00,
1137         0x00, 0x00, 0x00, 0x00,
1138         0x00, 0x00, 0x00, 0x00,
1139         0x00, 0x00, 0x00, 0x00,
1140
1141         0x00, 0x00, 0x11, 0x94, /* ICE_NAT_T 54 */
1142         0x00, 0x00, 0x00, 0x00,
1143
1144         0x00, 0x00, 0x00, 0x00,
1145         0x00, 0x00, 0x00, 0x00,
1146         0x00, 0x00,             /* 2 bytes for 4 bytes alignment */
1147
1148 };
1149
1150 static const struct ice_dummy_pkt_offsets dummy_ipv4_l2tpv3_packet_offsets[] = {
1151         { ICE_MAC_OFOS,         0 },
1152         { ICE_IPV4_OFOS,        14 },
1153         { ICE_L2TPV3,           34 },
1154         { ICE_PROTOCOL_LAST,    0 },
1155 };
1156
1157 static const u8 dummy_ipv4_l2tpv3_pkt[] = {
1158         0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
1159         0x00, 0x00, 0x00, 0x00,
1160         0x00, 0x00, 0x00, 0x00,
1161         0x08, 0x00,
1162
1163         0x45, 0x00, 0x00, 0x20, /* ICE_IPV4_IL 14 */
1164         0x00, 0x00, 0x40, 0x00,
1165         0x40, 0x73, 0x00, 0x00,
1166         0x00, 0x00, 0x00, 0x00,
1167         0x00, 0x00, 0x00, 0x00,
1168
1169         0x00, 0x00, 0x00, 0x00, /* ICE_L2TPV3 34 */
1170         0x00, 0x00, 0x00, 0x00,
1171         0x00, 0x00, 0x00, 0x00,
1172         0x00, 0x00,             /* 2 bytes for 4 bytes alignment */
1173 };
1174
1175 static const struct ice_dummy_pkt_offsets dummy_ipv6_l2tpv3_packet_offsets[] = {
1176         { ICE_MAC_OFOS,         0 },
1177         { ICE_IPV6_OFOS,        14 },
1178         { ICE_L2TPV3,           54 },
1179         { ICE_PROTOCOL_LAST,    0 },
1180 };
1181
1182 static const u8 dummy_ipv6_l2tpv3_pkt[] = {
1183         0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
1184         0x00, 0x00, 0x00, 0x00,
1185         0x00, 0x00, 0x00, 0x00,
1186         0x86, 0xDD,
1187
1188         0x60, 0x00, 0x00, 0x00, /* ICE_IPV6_IL 14 */
1189         0x00, 0x0c, 0x73, 0x40,
1190         0x00, 0x00, 0x00, 0x00,
1191         0x00, 0x00, 0x00, 0x00,
1192         0x00, 0x00, 0x00, 0x00,
1193         0x00, 0x00, 0x00, 0x00,
1194         0x00, 0x00, 0x00, 0x00,
1195         0x00, 0x00, 0x00, 0x00,
1196         0x00, 0x00, 0x00, 0x00,
1197         0x00, 0x00, 0x00, 0x00,
1198
1199         0x00, 0x00, 0x00, 0x00, /* ICE_L2TPV3 54 */
1200         0x00, 0x00, 0x00, 0x00,
1201         0x00, 0x00, 0x00, 0x00,
1202         0x00, 0x00,             /* 2 bytes for 4 bytes alignment */
1203 };
1204
1205 static const struct ice_dummy_pkt_offsets dummy_qinq_ipv4_packet_offsets[] = {
1206         { ICE_MAC_OFOS,         0 },
1207         { ICE_VLAN_EX,          14 },
1208         { ICE_VLAN_OFOS,        18 },
1209         { ICE_IPV4_OFOS,        22 },
1210         { ICE_PROTOCOL_LAST,    0 },
1211 };
1212
1213 static const u8 dummy_qinq_ipv4_pkt[] = {
1214         0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
1215         0x00, 0x00, 0x00, 0x00,
1216         0x00, 0x00, 0x00, 0x00,
1217         0x91, 0x00,
1218
1219         0x00, 0x00, 0x81, 0x00, /* ICE_VLAN_EX 14 */
1220         0x00, 0x00, 0x08, 0x00, /* ICE_VLAN_OFOS 18 */
1221
1222         0x45, 0x00, 0x00, 0x1c, /* ICE_IPV4_OFOS 22 */
1223         0x00, 0x01, 0x00, 0x00,
1224         0x00, 0x11, 0x00, 0x00,
1225         0x00, 0x00, 0x00, 0x00,
1226         0x00, 0x00, 0x00, 0x00,
1227
1228         0x00, 0x00, 0x00, 0x00, /* ICE_UDP_ILOS 42 */
1229         0x00, 0x08, 0x00, 0x00,
1230
1231         0x00, 0x00,     /* 2 bytes for 4 byte alignment */
1232 };
1233
1234 static const struct ice_dummy_pkt_offsets dummy_qinq_ipv6_packet_offsets[] = {
1235         { ICE_MAC_OFOS,         0 },
1236         { ICE_VLAN_EX,          14 },
1237         { ICE_VLAN_OFOS,        18 },
1238         { ICE_IPV6_OFOS,        22 },
1239         { ICE_PROTOCOL_LAST,    0 },
1240 };
1241
1242 static const u8 dummy_qinq_ipv6_pkt[] = {
1243         0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
1244         0x00, 0x00, 0x00, 0x00,
1245         0x00, 0x00, 0x00, 0x00,
1246         0x91, 0x00,
1247
1248         0x00, 0x00, 0x81, 0x00, /* ICE_VLAN_EX 14 */
1249         0x00, 0x00, 0x86, 0xDD, /* ICE_VLAN_OFOS 18 */
1250
1251         0x60, 0x00, 0x00, 0x00, /* ICE_IPV6_OFOS 22 */
1252         0x00, 0x10, 0x11, 0x00, /* Next header UDP */
1253         0x00, 0x00, 0x00, 0x00,
1254         0x00, 0x00, 0x00, 0x00,
1255         0x00, 0x00, 0x00, 0x00,
1256         0x00, 0x00, 0x00, 0x00,
1257         0x00, 0x00, 0x00, 0x00,
1258         0x00, 0x00, 0x00, 0x00,
1259         0x00, 0x00, 0x00, 0x00,
1260         0x00, 0x00, 0x00, 0x00,
1261
1262         0x00, 0x00, 0x00, 0x00, /* ICE_UDP_ILOS 62 */
1263         0x00, 0x10, 0x00, 0x00,
1264
1265         0x00, 0x00, 0x00, 0x00, /* needed for ESP packets */
1266         0x00, 0x00, 0x00, 0x00,
1267
1268         0x00, 0x00,     /* 2 bytes for 4 byte alignment */
1269 };
1270
1271 static const struct ice_dummy_pkt_offsets dummy_qinq_pppoe_packet_offsets[] = {
1272         { ICE_MAC_OFOS,         0 },
1273         { ICE_VLAN_EX,          14 },
1274         { ICE_VLAN_OFOS,        18 },
1275         { ICE_PPPOE,            22 },
1276         { ICE_PROTOCOL_LAST,    0 },
1277 };
1278
1279 static const
1280 struct ice_dummy_pkt_offsets dummy_qinq_pppoe_ipv4_packet_offsets[] = {
1281         { ICE_MAC_OFOS,         0 },
1282         { ICE_VLAN_EX,          14 },
1283         { ICE_VLAN_OFOS,        18 },
1284         { ICE_PPPOE,            22 },
1285         { ICE_IPV4_OFOS,        30 },
1286         { ICE_PROTOCOL_LAST,    0 },
1287 };
1288
1289 static const u8 dummy_qinq_pppoe_ipv4_pkt[] = {
1290         0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
1291         0x00, 0x00, 0x00, 0x00,
1292         0x00, 0x00, 0x00, 0x00,
1293         0x91, 0x00,
1294
1295         0x00, 0x00, 0x81, 0x00, /* ICE_VLAN_EX 14 */
1296         0x00, 0x00, 0x88, 0x64, /* ICE_VLAN_OFOS 18 */
1297
1298         0x11, 0x00, 0x00, 0x00, /* ICE_PPPOE 22 */
1299         0x00, 0x16,
1300
1301         0x00, 0x21,             /* PPP Link Layer 28 */
1302
1303         0x45, 0x00, 0x00, 0x14, /* ICE_IPV4_IL 30 */
1304         0x00, 0x00, 0x00, 0x00,
1305         0x00, 0x00, 0x00, 0x00,
1306         0x00, 0x00, 0x00, 0x00,
1307         0x00, 0x00, 0x00, 0x00,
1308
1309         0x00, 0x00,     /* 2 bytes for 4 byte alignment */
1310 };
1311
1312 static const
1313 struct ice_dummy_pkt_offsets dummy_qinq_pppoe_packet_ipv6_offsets[] = {
1314         { ICE_MAC_OFOS,         0 },
1315         { ICE_ETYPE_OL,         12 },
1316         { ICE_VLAN_EX,          14},
1317         { ICE_VLAN_OFOS,        18 },
1318         { ICE_PPPOE,            22 },
1319         { ICE_IPV6_OFOS,        30 },
1320         { ICE_PROTOCOL_LAST,    0 },
1321 };
1322
1323 static const u8 dummy_qinq_pppoe_ipv6_packet[] = {
1324         0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
1325         0x00, 0x00, 0x00, 0x00,
1326         0x00, 0x00, 0x00, 0x00,
1327
1328         0x91, 0x00,             /* ICE_ETYPE_OL 12 */
1329
1330         0x00, 0x00, 0x81, 0x00, /* ICE_VLAN_EX 14 */
1331         0x00, 0x00, 0x88, 0x64, /* ICE_VLAN_OFOS 18 */
1332
1333         0x11, 0x00, 0x00, 0x00, /* ICE_PPPOE 22 */
1334         0x00, 0x2a,
1335
1336         0x00, 0x57,             /* PPP Link Layer 28*/
1337
1338         0x60, 0x00, 0x00, 0x00, /* ICE_IPV6_OFOS 30 */
1339         0x00, 0x00, 0x3b, 0x00,
1340         0x00, 0x00, 0x00, 0x00,
1341         0x00, 0x00, 0x00, 0x00,
1342         0x00, 0x00, 0x00, 0x00,
1343         0x00, 0x00, 0x00, 0x00,
1344         0x00, 0x00, 0x00, 0x00,
1345         0x00, 0x00, 0x00, 0x00,
1346         0x00, 0x00, 0x00, 0x00,
1347         0x00, 0x00, 0x00, 0x00,
1348
1349         0x00, 0x00,             /* 2 bytes for 4 bytes alignment */
1350 };
1351
1352 /* this is a recipe to profile association bitmap */
1353 static ice_declare_bitmap(recipe_to_profile[ICE_MAX_NUM_RECIPES],
1354                           ICE_MAX_NUM_PROFILES);
1355
1356 /* this is a profile to recipe association bitmap */
1357 static ice_declare_bitmap(profile_to_recipe[ICE_MAX_NUM_PROFILES],
1358                           ICE_MAX_NUM_RECIPES);
1359
1360 static void ice_get_recp_to_prof_map(struct ice_hw *hw);
1361
1362 /**
1363  * ice_collect_result_idx - copy result index values
1364  * @buf: buffer that contains the result index
1365  * @recp: the recipe struct to copy data into
1366  */
1367 static void ice_collect_result_idx(struct ice_aqc_recipe_data_elem *buf,
1368                                    struct ice_sw_recipe *recp)
1369 {
1370         if (buf->content.result_indx & ICE_AQ_RECIPE_RESULT_EN)
1371                 ice_set_bit(buf->content.result_indx &
1372                             ~ICE_AQ_RECIPE_RESULT_EN, recp->res_idxs);
1373 }
1374
1375 /**
1376  * ice_get_tun_type_for_recipe - get tunnel type for the recipe
1377  * @rid: recipe ID that we are populating
1378  */
1379 static enum ice_sw_tunnel_type ice_get_tun_type_for_recipe(u8 rid, bool vlan)
1380 {
1381         u8 vxlan_profile[12] = {10, 11, 12, 16, 17, 18, 22, 23, 24, 25, 26, 27};
1382         u8 gre_profile[12] = {13, 14, 15, 19, 20, 21, 28, 29, 30, 31, 32, 33};
1383         u8 pppoe_profile[7] = {34, 35, 36, 37, 38, 39, 40};
1384         u8 non_tun_profile[6] = {4, 5, 6, 7, 8, 9};
1385         enum ice_sw_tunnel_type tun_type;
1386         u16 i, j, profile_num = 0;
1387         bool non_tun_valid = false;
1388         bool pppoe_valid = false;
1389         bool vxlan_valid = false;
1390         bool gre_valid = false;
1391         bool gtp_valid = false;
1392         bool flag_valid = false;
1393
1394         for (j = 0; j < ICE_MAX_NUM_PROFILES; j++) {
1395                 if (!ice_is_bit_set(recipe_to_profile[rid], j))
1396                         continue;
1397                 else
1398                         profile_num++;
1399
1400                 for (i = 0; i < 12; i++) {
1401                         if (gre_profile[i] == j)
1402                                 gre_valid = true;
1403                 }
1404
1405                 for (i = 0; i < 12; i++) {
1406                         if (vxlan_profile[i] == j)
1407                                 vxlan_valid = true;
1408                 }
1409
1410                 for (i = 0; i < 7; i++) {
1411                         if (pppoe_profile[i] == j)
1412                                 pppoe_valid = true;
1413                 }
1414
1415                 for (i = 0; i < 6; i++) {
1416                         if (non_tun_profile[i] == j)
1417                                 non_tun_valid = true;
1418                 }
1419
1420                 if (j >= ICE_PROFID_IPV4_GTPU_EH_IPV4_OTHER &&
1421                     j <= ICE_PROFID_IPV6_GTPU_IPV6_TCP)
1422                         gtp_valid = true;
1423
1424                 if ((j >= ICE_PROFID_IPV4_ESP &&
1425                      j <= ICE_PROFID_IPV6_PFCP_SESSION) ||
1426                     (j >= ICE_PROFID_IPV4_GTPC_TEID &&
1427                      j <= ICE_PROFID_IPV6_GTPU_TEID))
1428                         flag_valid = true;
1429         }
1430
1431         if (!non_tun_valid && vxlan_valid)
1432                 tun_type = ICE_SW_TUN_VXLAN;
1433         else if (!non_tun_valid && gre_valid)
1434                 tun_type = ICE_SW_TUN_NVGRE;
1435         else if (!non_tun_valid && pppoe_valid)
1436                 tun_type = ICE_SW_TUN_PPPOE;
1437         else if (!non_tun_valid && gtp_valid)
1438                 tun_type = ICE_SW_TUN_GTP;
1439         else if (non_tun_valid &&
1440                  (vxlan_valid || gre_valid || gtp_valid || pppoe_valid))
1441                 tun_type = ICE_SW_TUN_AND_NON_TUN;
1442         else if (non_tun_valid && !vxlan_valid && !gre_valid && !gtp_valid &&
1443                  !pppoe_valid)
1444                 tun_type = ICE_NON_TUN;
1445         else
1446                 tun_type = ICE_NON_TUN;
1447
1448         if (profile_num > 1 && tun_type == ICE_SW_TUN_PPPOE) {
1449                 i = ice_is_bit_set(recipe_to_profile[rid],
1450                                    ICE_PROFID_PPPOE_IPV4_OTHER);
1451                 j = ice_is_bit_set(recipe_to_profile[rid],
1452                                    ICE_PROFID_PPPOE_IPV6_OTHER);
1453                 if (i && !j)
1454                         tun_type = ICE_SW_TUN_PPPOE_IPV4;
1455                 else if (!i && j)
1456                         tun_type = ICE_SW_TUN_PPPOE_IPV6;
1457         }
1458
1459         if (tun_type == ICE_SW_TUN_GTP) {
1460                 if (ice_is_bit_set(recipe_to_profile[rid],
1461                                    ICE_PROFID_IPV4_GTPU_IPV4_OTHER))
1462                         tun_type = ICE_SW_TUN_IPV4_GTPU_IPV4;
1463                 else if (ice_is_bit_set(recipe_to_profile[rid],
1464                                         ICE_PROFID_IPV4_GTPU_IPV6_OTHER))
1465                         tun_type = ICE_SW_TUN_IPV4_GTPU_IPV6;
1466                 else if (ice_is_bit_set(recipe_to_profile[rid],
1467                                         ICE_PROFID_IPV6_GTPU_IPV4_OTHER))
1468                         tun_type = ICE_SW_TUN_IPV6_GTPU_IPV4;
1469                 else if (ice_is_bit_set(recipe_to_profile[rid],
1470                                         ICE_PROFID_IPV6_GTPU_IPV6_OTHER))
1471                         tun_type = ICE_SW_TUN_IPV6_GTPU_IPV6;
1472         }
1473
1474         if (profile_num == 1 && (flag_valid || non_tun_valid || pppoe_valid)) {
1475                 for (j = 0; j < ICE_MAX_NUM_PROFILES; j++) {
1476                         if (ice_is_bit_set(recipe_to_profile[rid], j)) {
1477                                 switch (j) {
1478                                 case ICE_PROFID_IPV4_TCP:
1479                                         tun_type = ICE_SW_IPV4_TCP;
1480                                         break;
1481                                 case ICE_PROFID_IPV4_UDP:
1482                                         tun_type = ICE_SW_IPV4_UDP;
1483                                         break;
1484                                 case ICE_PROFID_IPV6_TCP:
1485                                         tun_type = ICE_SW_IPV6_TCP;
1486                                         break;
1487                                 case ICE_PROFID_IPV6_UDP:
1488                                         tun_type = ICE_SW_IPV6_UDP;
1489                                         break;
1490                                 case ICE_PROFID_PPPOE_PAY:
1491                                         tun_type = ICE_SW_TUN_PPPOE_PAY;
1492                                         break;
1493                                 case ICE_PROFID_PPPOE_IPV4_TCP:
1494                                         tun_type = ICE_SW_TUN_PPPOE_IPV4_TCP;
1495                                         break;
1496                                 case ICE_PROFID_PPPOE_IPV4_UDP:
1497                                         tun_type = ICE_SW_TUN_PPPOE_IPV4_UDP;
1498                                         break;
1499                                 case ICE_PROFID_PPPOE_IPV4_OTHER:
1500                                         tun_type = ICE_SW_TUN_PPPOE_IPV4;
1501                                         break;
1502                                 case ICE_PROFID_PPPOE_IPV6_TCP:
1503                                         tun_type = ICE_SW_TUN_PPPOE_IPV6_TCP;
1504                                         break;
1505                                 case ICE_PROFID_PPPOE_IPV6_UDP:
1506                                         tun_type = ICE_SW_TUN_PPPOE_IPV6_UDP;
1507                                         break;
1508                                 case ICE_PROFID_PPPOE_IPV6_OTHER:
1509                                         tun_type = ICE_SW_TUN_PPPOE_IPV6;
1510                                         break;
1511                                 case ICE_PROFID_IPV4_ESP:
1512                                         tun_type = ICE_SW_TUN_IPV4_ESP;
1513                                         break;
1514                                 case ICE_PROFID_IPV6_ESP:
1515                                         tun_type = ICE_SW_TUN_IPV6_ESP;
1516                                         break;
1517                                 case ICE_PROFID_IPV4_AH:
1518                                         tun_type = ICE_SW_TUN_IPV4_AH;
1519                                         break;
1520                                 case ICE_PROFID_IPV6_AH:
1521                                         tun_type = ICE_SW_TUN_IPV6_AH;
1522                                         break;
1523                                 case ICE_PROFID_IPV4_NAT_T:
1524                                         tun_type = ICE_SW_TUN_IPV4_NAT_T;
1525                                         break;
1526                                 case ICE_PROFID_IPV6_NAT_T:
1527                                         tun_type = ICE_SW_TUN_IPV6_NAT_T;
1528                                         break;
1529                                 case ICE_PROFID_IPV4_PFCP_NODE:
1530                                         tun_type =
1531                                         ICE_SW_TUN_PROFID_IPV4_PFCP_NODE;
1532                                         break;
1533                                 case ICE_PROFID_IPV6_PFCP_NODE:
1534                                         tun_type =
1535                                         ICE_SW_TUN_PROFID_IPV6_PFCP_NODE;
1536                                         break;
1537                                 case ICE_PROFID_IPV4_PFCP_SESSION:
1538                                         tun_type =
1539                                         ICE_SW_TUN_PROFID_IPV4_PFCP_SESSION;
1540                                         break;
1541                                 case ICE_PROFID_IPV6_PFCP_SESSION:
1542                                         tun_type =
1543                                         ICE_SW_TUN_PROFID_IPV6_PFCP_SESSION;
1544                                         break;
1545                                 case ICE_PROFID_MAC_IPV4_L2TPV3:
1546                                         tun_type = ICE_SW_TUN_IPV4_L2TPV3;
1547                                         break;
1548                                 case ICE_PROFID_MAC_IPV6_L2TPV3:
1549                                         tun_type = ICE_SW_TUN_IPV6_L2TPV3;
1550                                         break;
1551                                 case ICE_PROFID_IPV4_GTPU_TEID:
1552                                         tun_type = ICE_SW_TUN_IPV4_GTPU_NO_PAY;
1553                                         break;
1554                                 case ICE_PROFID_IPV6_GTPU_TEID:
1555                                         tun_type = ICE_SW_TUN_IPV6_GTPU_NO_PAY;
1556                                         break;
1557                                 default:
1558                                         break;
1559                                 }
1560
1561                                 return tun_type;
1562                         }
1563                 }
1564         }
1565
1566         if (vlan && tun_type == ICE_SW_TUN_PPPOE)
1567                 tun_type = ICE_SW_TUN_PPPOE_QINQ;
1568         else if (vlan && tun_type == ICE_SW_TUN_PPPOE_IPV6)
1569                 tun_type = ICE_SW_TUN_PPPOE_IPV6_QINQ;
1570         else if (vlan && tun_type == ICE_SW_TUN_PPPOE_IPV4)
1571                 tun_type = ICE_SW_TUN_PPPOE_IPV4_QINQ;
1572         else if (vlan && tun_type == ICE_SW_TUN_PPPOE_PAY)
1573                 tun_type = ICE_SW_TUN_PPPOE_PAY_QINQ;
1574         else if (vlan && tun_type == ICE_SW_TUN_AND_NON_TUN)
1575                 tun_type = ICE_SW_TUN_AND_NON_TUN_QINQ;
1576         else if (vlan && tun_type == ICE_NON_TUN)
1577                 tun_type = ICE_NON_TUN_QINQ;
1578
1579         return tun_type;
1580 }
1581
1582 /**
1583  * ice_get_recp_frm_fw - update SW bookkeeping from FW recipe entries
1584  * @hw: pointer to hardware structure
1585  * @recps: struct that we need to populate
1586  * @rid: recipe ID that we are populating
1587  * @refresh_required: true if we should get recipe to profile mapping from FW
1588  *
1589  * This function is used to populate all the necessary entries into our
1590  * bookkeeping so that we have a current list of all the recipes that are
1591  * programmed in the firmware.
1592  */
1593 static enum ice_status
1594 ice_get_recp_frm_fw(struct ice_hw *hw, struct ice_sw_recipe *recps, u8 rid,
1595                     bool *refresh_required)
1596 {
1597         ice_declare_bitmap(result_bm, ICE_MAX_FV_WORDS);
1598         struct ice_aqc_recipe_data_elem *tmp;
1599         u16 num_recps = ICE_MAX_NUM_RECIPES;
1600         struct ice_prot_lkup_ext *lkup_exts;
1601         enum ice_status status;
1602         u8 fv_word_idx = 0;
1603         bool vlan = false;
1604         u16 sub_recps;
1605
1606         ice_zero_bitmap(result_bm, ICE_MAX_FV_WORDS);
1607
1608         /* we need a buffer big enough to accommodate all the recipes */
1609         tmp = (struct ice_aqc_recipe_data_elem *)ice_calloc(hw,
1610                 ICE_MAX_NUM_RECIPES, sizeof(*tmp));
1611         if (!tmp)
1612                 return ICE_ERR_NO_MEMORY;
1613
1614         tmp[0].recipe_indx = rid;
1615         status = ice_aq_get_recipe(hw, tmp, &num_recps, rid, NULL);
1616         /* non-zero status meaning recipe doesn't exist */
1617         if (status)
1618                 goto err_unroll;
1619
1620         /* Get recipe to profile map so that we can get the fv from lkups that
1621          * we read for a recipe from FW. Since we want to minimize the number of
1622          * times we make this FW call, just make one call and cache the copy
1623          * until a new recipe is added. This operation is only required the
1624          * first time to get the changes from FW. Then to search existing
1625          * entries we don't need to update the cache again until another recipe
1626          * gets added.
1627          */
1628         if (*refresh_required) {
1629                 ice_get_recp_to_prof_map(hw);
1630                 *refresh_required = false;
1631         }
1632
1633         /* Start populating all the entries for recps[rid] based on lkups from
1634          * firmware. Note that we are only creating the root recipe in our
1635          * database.
1636          */
1637         lkup_exts = &recps[rid].lkup_exts;
1638
1639         for (sub_recps = 0; sub_recps < num_recps; sub_recps++) {
1640                 struct ice_aqc_recipe_data_elem root_bufs = tmp[sub_recps];
1641                 struct ice_recp_grp_entry *rg_entry;
1642                 u8 i, prof, idx, prot = 0;
1643                 bool is_root;
1644                 u16 off = 0;
1645
1646                 rg_entry = (struct ice_recp_grp_entry *)
1647                         ice_malloc(hw, sizeof(*rg_entry));
1648                 if (!rg_entry) {
1649                         status = ICE_ERR_NO_MEMORY;
1650                         goto err_unroll;
1651                 }
1652
1653                 idx = root_bufs.recipe_indx;
1654                 is_root = root_bufs.content.rid & ICE_AQ_RECIPE_ID_IS_ROOT;
1655
1656                 /* Mark all result indices in this chain */
1657                 if (root_bufs.content.result_indx & ICE_AQ_RECIPE_RESULT_EN)
1658                         ice_set_bit(root_bufs.content.result_indx &
1659                                     ~ICE_AQ_RECIPE_RESULT_EN, result_bm);
1660
1661                 /* get the first profile that is associated with rid */
1662                 prof = ice_find_first_bit(recipe_to_profile[idx],
1663                                           ICE_MAX_NUM_PROFILES);
1664                 for (i = 0; i < ICE_NUM_WORDS_RECIPE; i++) {
1665                         u8 lkup_indx = root_bufs.content.lkup_indx[i + 1];
1666
1667                         rg_entry->fv_idx[i] = lkup_indx;
1668                         rg_entry->fv_mask[i] =
1669                                 LE16_TO_CPU(root_bufs.content.mask[i + 1]);
1670
1671                         /* If the recipe is a chained recipe then all its
1672                          * child recipe's result will have a result index.
1673                          * To fill fv_words we should not use those result
1674                          * index, we only need the protocol ids and offsets.
1675                          * We will skip all the fv_idx which stores result
1676                          * index in them. We also need to skip any fv_idx which
1677                          * has ICE_AQ_RECIPE_LKUP_IGNORE or 0 since it isn't a
1678                          * valid offset value.
1679                          */
1680                         if (ice_is_bit_set(hw->switch_info->prof_res_bm[prof],
1681                                            rg_entry->fv_idx[i]) ||
1682                             rg_entry->fv_idx[i] & ICE_AQ_RECIPE_LKUP_IGNORE ||
1683                             rg_entry->fv_idx[i] == 0)
1684                                 continue;
1685
1686                         ice_find_prot_off(hw, ICE_BLK_SW, prof,
1687                                           rg_entry->fv_idx[i], &prot, &off);
1688                         lkup_exts->fv_words[fv_word_idx].prot_id = prot;
1689                         lkup_exts->fv_words[fv_word_idx].off = off;
1690                         lkup_exts->field_mask[fv_word_idx] =
1691                                 rg_entry->fv_mask[i];
1692                         if (prot == ICE_META_DATA_ID_HW &&
1693                             off == ICE_TUN_FLAG_MDID_OFF)
1694                                 vlan = true;
1695                         fv_word_idx++;
1696                 }
1697                 /* populate rg_list with the data from the child entry of this
1698                  * recipe
1699                  */
1700                 LIST_ADD(&rg_entry->l_entry, &recps[rid].rg_list);
1701
1702                 /* Propagate some data to the recipe database */
1703                 recps[idx].is_root = !!is_root;
1704                 recps[idx].priority = root_bufs.content.act_ctrl_fwd_priority;
1705                 ice_zero_bitmap(recps[idx].res_idxs, ICE_MAX_FV_WORDS);
1706                 if (root_bufs.content.result_indx & ICE_AQ_RECIPE_RESULT_EN) {
1707                         recps[idx].chain_idx = root_bufs.content.result_indx &
1708                                 ~ICE_AQ_RECIPE_RESULT_EN;
1709                         ice_set_bit(recps[idx].chain_idx, recps[idx].res_idxs);
1710                 } else {
1711                         recps[idx].chain_idx = ICE_INVAL_CHAIN_IND;
1712                 }
1713
1714                 if (!is_root)
1715                         continue;
1716
1717                 /* Only do the following for root recipes entries */
1718                 ice_memcpy(recps[idx].r_bitmap, root_bufs.recipe_bitmap,
1719                            sizeof(recps[idx].r_bitmap), ICE_NONDMA_TO_NONDMA);
1720                 recps[idx].root_rid = root_bufs.content.rid &
1721                         ~ICE_AQ_RECIPE_ID_IS_ROOT;
1722                 recps[idx].priority = root_bufs.content.act_ctrl_fwd_priority;
1723         }
1724
1725         /* Complete initialization of the root recipe entry */
1726         lkup_exts->n_val_words = fv_word_idx;
1727         recps[rid].big_recp = (num_recps > 1);
1728         recps[rid].n_grp_count = (u8)num_recps;
1729         recps[rid].tun_type = ice_get_tun_type_for_recipe(rid, vlan);
1730         recps[rid].root_buf = (struct ice_aqc_recipe_data_elem *)
1731                 ice_memdup(hw, tmp, recps[rid].n_grp_count *
1732                            sizeof(*recps[rid].root_buf), ICE_NONDMA_TO_NONDMA);
1733         if (!recps[rid].root_buf)
1734                 goto err_unroll;
1735
1736         /* Copy result indexes */
1737         ice_cp_bitmap(recps[rid].res_idxs, result_bm, ICE_MAX_FV_WORDS);
1738         recps[rid].recp_created = true;
1739
1740 err_unroll:
1741         ice_free(hw, tmp);
1742         return status;
1743 }
1744
1745 /**
1746  * ice_get_recp_to_prof_map - updates recipe to profile mapping
1747  * @hw: pointer to hardware structure
1748  *
1749  * This function is used to populate recipe_to_profile matrix where index to
1750  * this array is the recipe ID and the element is the mapping of which profiles
1751  * is this recipe mapped to.
1752  */
1753 static void ice_get_recp_to_prof_map(struct ice_hw *hw)
1754 {
1755         ice_declare_bitmap(r_bitmap, ICE_MAX_NUM_RECIPES);
1756         u16 i;
1757
1758         for (i = 0; i < hw->switch_info->max_used_prof_index + 1; i++) {
1759                 u16 j;
1760
1761                 ice_zero_bitmap(profile_to_recipe[i], ICE_MAX_NUM_RECIPES);
1762                 ice_zero_bitmap(r_bitmap, ICE_MAX_NUM_RECIPES);
1763                 if (ice_aq_get_recipe_to_profile(hw, i, (u8 *)r_bitmap, NULL))
1764                         continue;
1765                 ice_cp_bitmap(profile_to_recipe[i], r_bitmap,
1766                               ICE_MAX_NUM_RECIPES);
1767                 ice_for_each_set_bit(j, r_bitmap, ICE_MAX_NUM_RECIPES)
1768                         ice_set_bit(i, recipe_to_profile[j]);
1769         }
1770 }
1771
1772 /**
1773  * ice_init_def_sw_recp - initialize the recipe book keeping tables
1774  * @hw: pointer to the HW struct
1775  * @recp_list: pointer to sw recipe list
1776  *
1777  * Allocate memory for the entire recipe table and initialize the structures/
1778  * entries corresponding to basic recipes.
1779  */
1780 enum ice_status
1781 ice_init_def_sw_recp(struct ice_hw *hw, struct ice_sw_recipe **recp_list)
1782 {
1783         struct ice_sw_recipe *recps;
1784         u8 i;
1785
1786         recps = (struct ice_sw_recipe *)
1787                 ice_calloc(hw, ICE_MAX_NUM_RECIPES, sizeof(*recps));
1788         if (!recps)
1789                 return ICE_ERR_NO_MEMORY;
1790
1791         for (i = 0; i < ICE_MAX_NUM_RECIPES; i++) {
1792                 recps[i].root_rid = i;
1793                 INIT_LIST_HEAD(&recps[i].filt_rules);
1794                 INIT_LIST_HEAD(&recps[i].filt_replay_rules);
1795                 INIT_LIST_HEAD(&recps[i].rg_list);
1796                 ice_init_lock(&recps[i].filt_rule_lock);
1797         }
1798
1799         *recp_list = recps;
1800
1801         return ICE_SUCCESS;
1802 }
1803
1804 /**
1805  * ice_aq_get_sw_cfg - get switch configuration
1806  * @hw: pointer to the hardware structure
1807  * @buf: pointer to the result buffer
1808  * @buf_size: length of the buffer available for response
1809  * @req_desc: pointer to requested descriptor
1810  * @num_elems: pointer to number of elements
1811  * @cd: pointer to command details structure or NULL
1812  *
1813  * Get switch configuration (0x0200) to be placed in buf.
1814  * This admin command returns information such as initial VSI/port number
1815  * and switch ID it belongs to.
1816  *
1817  * NOTE: *req_desc is both an input/output parameter.
1818  * The caller of this function first calls this function with *request_desc set
1819  * to 0. If the response from f/w has *req_desc set to 0, all the switch
1820  * configuration information has been returned; if non-zero (meaning not all
1821  * the information was returned), the caller should call this function again
1822  * with *req_desc set to the previous value returned by f/w to get the
1823  * next block of switch configuration information.
1824  *
1825  * *num_elems is output only parameter. This reflects the number of elements
1826  * in response buffer. The caller of this function to use *num_elems while
1827  * parsing the response buffer.
1828  */
1829 static enum ice_status
1830 ice_aq_get_sw_cfg(struct ice_hw *hw, struct ice_aqc_get_sw_cfg_resp_elem *buf,
1831                   u16 buf_size, u16 *req_desc, u16 *num_elems,
1832                   struct ice_sq_cd *cd)
1833 {
1834         struct ice_aqc_get_sw_cfg *cmd;
1835         struct ice_aq_desc desc;
1836         enum ice_status status;
1837
1838         ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_sw_cfg);
1839         cmd = &desc.params.get_sw_conf;
1840         cmd->element = CPU_TO_LE16(*req_desc);
1841
1842         status = ice_aq_send_cmd(hw, &desc, buf, buf_size, cd);
1843         if (!status) {
1844                 *req_desc = LE16_TO_CPU(cmd->element);
1845                 *num_elems = LE16_TO_CPU(cmd->num_elems);
1846         }
1847
1848         return status;
1849 }
1850
1851 /**
1852  * ice_alloc_rss_global_lut - allocate a RSS global LUT
1853  * @hw: pointer to the HW struct
1854  * @shared_res: true to allocate as a shared resource and false to allocate as a dedicated resource
1855  * @global_lut_id: output parameter for the RSS global LUT's ID
1856  */
1857 enum ice_status ice_alloc_rss_global_lut(struct ice_hw *hw, bool shared_res, u16 *global_lut_id)
1858 {
1859         struct ice_aqc_alloc_free_res_elem *sw_buf;
1860         enum ice_status status;
1861         u16 buf_len;
1862
1863         buf_len = ice_struct_size(sw_buf, elem, 1);
1864         sw_buf = (struct ice_aqc_alloc_free_res_elem *)ice_malloc(hw, buf_len);
1865         if (!sw_buf)
1866                 return ICE_ERR_NO_MEMORY;
1867
1868         sw_buf->num_elems = CPU_TO_LE16(1);
1869         sw_buf->res_type = CPU_TO_LE16(ICE_AQC_RES_TYPE_GLOBAL_RSS_HASH |
1870                                        (shared_res ? ICE_AQC_RES_TYPE_FLAG_SHARED :
1871                                        ICE_AQC_RES_TYPE_FLAG_DEDICATED));
1872
1873         status = ice_aq_alloc_free_res(hw, 1, sw_buf, buf_len, ice_aqc_opc_alloc_res, NULL);
1874         if (status) {
1875                 ice_debug(hw, ICE_DBG_RES, "Failed to allocate %s RSS global LUT, status %d\n",
1876                           shared_res ? "shared" : "dedicated", status);
1877                 goto ice_alloc_global_lut_exit;
1878         }
1879
1880         *global_lut_id = LE16_TO_CPU(sw_buf->elem[0].e.sw_resp);
1881
1882 ice_alloc_global_lut_exit:
1883         ice_free(hw, sw_buf);
1884         return status;
1885 }
1886
1887 /**
1888  * ice_free_global_lut - free a RSS global LUT
1889  * @hw: pointer to the HW struct
1890  * @global_lut_id: ID of the RSS global LUT to free
1891  */
1892 enum ice_status ice_free_rss_global_lut(struct ice_hw *hw, u16 global_lut_id)
1893 {
1894         struct ice_aqc_alloc_free_res_elem *sw_buf;
1895         u16 buf_len, num_elems = 1;
1896         enum ice_status status;
1897
1898         buf_len = ice_struct_size(sw_buf, elem, num_elems);
1899         sw_buf = (struct ice_aqc_alloc_free_res_elem *)ice_malloc(hw, buf_len);
1900         if (!sw_buf)
1901                 return ICE_ERR_NO_MEMORY;
1902
1903         sw_buf->num_elems = CPU_TO_LE16(num_elems);
1904         sw_buf->res_type = CPU_TO_LE16(ICE_AQC_RES_TYPE_GLOBAL_RSS_HASH);
1905         sw_buf->elem[0].e.sw_resp = CPU_TO_LE16(global_lut_id);
1906
1907         status = ice_aq_alloc_free_res(hw, num_elems, sw_buf, buf_len, ice_aqc_opc_free_res, NULL);
1908         if (status)
1909                 ice_debug(hw, ICE_DBG_RES, "Failed to free RSS global LUT %d, status %d\n",
1910                           global_lut_id, status);
1911
1912         ice_free(hw, sw_buf);
1913         return status;
1914 }
1915
1916 /**
1917  * ice_alloc_sw - allocate resources specific to switch
1918  * @hw: pointer to the HW struct
1919  * @ena_stats: true to turn on VEB stats
1920  * @shared_res: true for shared resource, false for dedicated resource
1921  * @sw_id: switch ID returned
1922  * @counter_id: VEB counter ID returned
1923  *
1924  * allocates switch resources (SWID and VEB counter) (0x0208)
1925  */
1926 enum ice_status
1927 ice_alloc_sw(struct ice_hw *hw, bool ena_stats, bool shared_res, u16 *sw_id,
1928              u16 *counter_id)
1929 {
1930         struct ice_aqc_alloc_free_res_elem *sw_buf;
1931         struct ice_aqc_res_elem *sw_ele;
1932         enum ice_status status;
1933         u16 buf_len;
1934
1935         buf_len = ice_struct_size(sw_buf, elem, 1);
1936         sw_buf = (struct ice_aqc_alloc_free_res_elem *)ice_malloc(hw, buf_len);
1937         if (!sw_buf)
1938                 return ICE_ERR_NO_MEMORY;
1939
1940         /* Prepare buffer for switch ID.
1941          * The number of resource entries in buffer is passed as 1 since only a
1942          * single switch/VEB instance is allocated, and hence a single sw_id
1943          * is requested.
1944          */
1945         sw_buf->num_elems = CPU_TO_LE16(1);
1946         sw_buf->res_type =
1947                 CPU_TO_LE16(ICE_AQC_RES_TYPE_SWID |
1948                             (shared_res ? ICE_AQC_RES_TYPE_FLAG_SHARED :
1949                             ICE_AQC_RES_TYPE_FLAG_DEDICATED));
1950
1951         status = ice_aq_alloc_free_res(hw, 1, sw_buf, buf_len,
1952                                        ice_aqc_opc_alloc_res, NULL);
1953
1954         if (status)
1955                 goto ice_alloc_sw_exit;
1956
1957         sw_ele = &sw_buf->elem[0];
1958         *sw_id = LE16_TO_CPU(sw_ele->e.sw_resp);
1959
1960         if (ena_stats) {
1961                 /* Prepare buffer for VEB Counter */
1962                 enum ice_adminq_opc opc = ice_aqc_opc_alloc_res;
1963                 struct ice_aqc_alloc_free_res_elem *counter_buf;
1964                 struct ice_aqc_res_elem *counter_ele;
1965
1966                 counter_buf = (struct ice_aqc_alloc_free_res_elem *)
1967                                 ice_malloc(hw, buf_len);
1968                 if (!counter_buf) {
1969                         status = ICE_ERR_NO_MEMORY;
1970                         goto ice_alloc_sw_exit;
1971                 }
1972
1973                 /* The number of resource entries in buffer is passed as 1 since
1974                  * only a single switch/VEB instance is allocated, and hence a
1975                  * single VEB counter is requested.
1976                  */
1977                 counter_buf->num_elems = CPU_TO_LE16(1);
1978                 counter_buf->res_type =
1979                         CPU_TO_LE16(ICE_AQC_RES_TYPE_VEB_COUNTER |
1980                                     ICE_AQC_RES_TYPE_FLAG_DEDICATED);
1981                 status = ice_aq_alloc_free_res(hw, 1, counter_buf, buf_len,
1982                                                opc, NULL);
1983
1984                 if (status) {
1985                         ice_free(hw, counter_buf);
1986                         goto ice_alloc_sw_exit;
1987                 }
1988                 counter_ele = &counter_buf->elem[0];
1989                 *counter_id = LE16_TO_CPU(counter_ele->e.sw_resp);
1990                 ice_free(hw, counter_buf);
1991         }
1992
1993 ice_alloc_sw_exit:
1994         ice_free(hw, sw_buf);
1995         return status;
1996 }
1997
1998 /**
1999  * ice_free_sw - free resources specific to switch
2000  * @hw: pointer to the HW struct
2001  * @sw_id: switch ID returned
2002  * @counter_id: VEB counter ID returned
2003  *
2004  * free switch resources (SWID and VEB counter) (0x0209)
2005  *
2006  * NOTE: This function frees multiple resources. It continues
2007  * releasing other resources even after it encounters error.
2008  * The error code returned is the last error it encountered.
2009  */
2010 enum ice_status ice_free_sw(struct ice_hw *hw, u16 sw_id, u16 counter_id)
2011 {
2012         struct ice_aqc_alloc_free_res_elem *sw_buf, *counter_buf;
2013         enum ice_status status, ret_status;
2014         u16 buf_len;
2015
2016         buf_len = ice_struct_size(sw_buf, elem, 1);
2017         sw_buf = (struct ice_aqc_alloc_free_res_elem *)ice_malloc(hw, buf_len);
2018         if (!sw_buf)
2019                 return ICE_ERR_NO_MEMORY;
2020
2021         /* Prepare buffer to free for switch ID res.
2022          * The number of resource entries in buffer is passed as 1 since only a
2023          * single switch/VEB instance is freed, and hence a single sw_id
2024          * is released.
2025          */
2026         sw_buf->num_elems = CPU_TO_LE16(1);
2027         sw_buf->res_type = CPU_TO_LE16(ICE_AQC_RES_TYPE_SWID);
2028         sw_buf->elem[0].e.sw_resp = CPU_TO_LE16(sw_id);
2029
2030         ret_status = ice_aq_alloc_free_res(hw, 1, sw_buf, buf_len,
2031                                            ice_aqc_opc_free_res, NULL);
2032
2033         if (ret_status)
2034                 ice_debug(hw, ICE_DBG_SW, "CQ CMD Buffer:\n");
2035
2036         /* Prepare buffer to free for VEB Counter resource */
2037         counter_buf = (struct ice_aqc_alloc_free_res_elem *)
2038                         ice_malloc(hw, buf_len);
2039         if (!counter_buf) {
2040                 ice_free(hw, sw_buf);
2041                 return ICE_ERR_NO_MEMORY;
2042         }
2043
2044         /* The number of resource entries in buffer is passed as 1 since only a
2045          * single switch/VEB instance is freed, and hence a single VEB counter
2046          * is released
2047          */
2048         counter_buf->num_elems = CPU_TO_LE16(1);
2049         counter_buf->res_type = CPU_TO_LE16(ICE_AQC_RES_TYPE_VEB_COUNTER);
2050         counter_buf->elem[0].e.sw_resp = CPU_TO_LE16(counter_id);
2051
2052         status = ice_aq_alloc_free_res(hw, 1, counter_buf, buf_len,
2053                                        ice_aqc_opc_free_res, NULL);
2054         if (status) {
2055                 ice_debug(hw, ICE_DBG_SW, "VEB counter resource could not be freed\n");
2056                 ret_status = status;
2057         }
2058
2059         ice_free(hw, counter_buf);
2060         ice_free(hw, sw_buf);
2061         return ret_status;
2062 }
2063
2064 /**
2065  * ice_aq_add_vsi
2066  * @hw: pointer to the HW struct
2067  * @vsi_ctx: pointer to a VSI context struct
2068  * @cd: pointer to command details structure or NULL
2069  *
2070  * Add a VSI context to the hardware (0x0210)
2071  */
2072 enum ice_status
2073 ice_aq_add_vsi(struct ice_hw *hw, struct ice_vsi_ctx *vsi_ctx,
2074                struct ice_sq_cd *cd)
2075 {
2076         struct ice_aqc_add_update_free_vsi_resp *res;
2077         struct ice_aqc_add_get_update_free_vsi *cmd;
2078         struct ice_aq_desc desc;
2079         enum ice_status status;
2080
2081         cmd = &desc.params.vsi_cmd;
2082         res = &desc.params.add_update_free_vsi_res;
2083
2084         ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_add_vsi);
2085
2086         if (!vsi_ctx->alloc_from_pool)
2087                 cmd->vsi_num = CPU_TO_LE16(vsi_ctx->vsi_num |
2088                                            ICE_AQ_VSI_IS_VALID);
2089
2090         cmd->vsi_flags = CPU_TO_LE16(vsi_ctx->flags);
2091
2092         desc.flags |= CPU_TO_LE16(ICE_AQ_FLAG_RD);
2093
2094         status = ice_aq_send_cmd(hw, &desc, &vsi_ctx->info,
2095                                  sizeof(vsi_ctx->info), cd);
2096
2097         if (!status) {
2098                 vsi_ctx->vsi_num = LE16_TO_CPU(res->vsi_num) & ICE_AQ_VSI_NUM_M;
2099                 vsi_ctx->vsis_allocd = LE16_TO_CPU(res->vsi_used);
2100                 vsi_ctx->vsis_unallocated = LE16_TO_CPU(res->vsi_free);
2101         }
2102
2103         return status;
2104 }
2105
2106 /**
2107  * ice_aq_free_vsi
2108  * @hw: pointer to the HW struct
2109  * @vsi_ctx: pointer to a VSI context struct
2110  * @keep_vsi_alloc: keep VSI allocation as part of this PF's resources
2111  * @cd: pointer to command details structure or NULL
2112  *
2113  * Free VSI context info from hardware (0x0213)
2114  */
2115 enum ice_status
2116 ice_aq_free_vsi(struct ice_hw *hw, struct ice_vsi_ctx *vsi_ctx,
2117                 bool keep_vsi_alloc, struct ice_sq_cd *cd)
2118 {
2119         struct ice_aqc_add_update_free_vsi_resp *resp;
2120         struct ice_aqc_add_get_update_free_vsi *cmd;
2121         struct ice_aq_desc desc;
2122         enum ice_status status;
2123
2124         cmd = &desc.params.vsi_cmd;
2125         resp = &desc.params.add_update_free_vsi_res;
2126
2127         ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_free_vsi);
2128
2129         cmd->vsi_num = CPU_TO_LE16(vsi_ctx->vsi_num | ICE_AQ_VSI_IS_VALID);
2130         if (keep_vsi_alloc)
2131                 cmd->cmd_flags = CPU_TO_LE16(ICE_AQ_VSI_KEEP_ALLOC);
2132
2133         status = ice_aq_send_cmd(hw, &desc, NULL, 0, cd);
2134         if (!status) {
2135                 vsi_ctx->vsis_allocd = LE16_TO_CPU(resp->vsi_used);
2136                 vsi_ctx->vsis_unallocated = LE16_TO_CPU(resp->vsi_free);
2137         }
2138
2139         return status;
2140 }
2141
2142 /**
2143  * ice_aq_update_vsi
2144  * @hw: pointer to the HW struct
2145  * @vsi_ctx: pointer to a VSI context struct
2146  * @cd: pointer to command details structure or NULL
2147  *
2148  * Update VSI context in the hardware (0x0211)
2149  */
2150 enum ice_status
2151 ice_aq_update_vsi(struct ice_hw *hw, struct ice_vsi_ctx *vsi_ctx,
2152                   struct ice_sq_cd *cd)
2153 {
2154         struct ice_aqc_add_update_free_vsi_resp *resp;
2155         struct ice_aqc_add_get_update_free_vsi *cmd;
2156         struct ice_aq_desc desc;
2157         enum ice_status status;
2158
2159         cmd = &desc.params.vsi_cmd;
2160         resp = &desc.params.add_update_free_vsi_res;
2161
2162         ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_update_vsi);
2163
2164         cmd->vsi_num = CPU_TO_LE16(vsi_ctx->vsi_num | ICE_AQ_VSI_IS_VALID);
2165
2166         desc.flags |= CPU_TO_LE16(ICE_AQ_FLAG_RD);
2167
2168         status = ice_aq_send_cmd(hw, &desc, &vsi_ctx->info,
2169                                  sizeof(vsi_ctx->info), cd);
2170
2171         if (!status) {
2172                 vsi_ctx->vsis_allocd = LE16_TO_CPU(resp->vsi_used);
2173                 vsi_ctx->vsis_unallocated = LE16_TO_CPU(resp->vsi_free);
2174         }
2175
2176         return status;
2177 }
2178
2179 /**
2180  * ice_is_vsi_valid - check whether the VSI is valid or not
2181  * @hw: pointer to the HW struct
2182  * @vsi_handle: VSI handle
2183  *
2184  * check whether the VSI is valid or not
2185  */
2186 bool ice_is_vsi_valid(struct ice_hw *hw, u16 vsi_handle)
2187 {
2188         return vsi_handle < ICE_MAX_VSI && hw->vsi_ctx[vsi_handle];
2189 }
2190
2191 /**
2192  * ice_get_hw_vsi_num - return the HW VSI number
2193  * @hw: pointer to the HW struct
2194  * @vsi_handle: VSI handle
2195  *
2196  * return the HW VSI number
2197  * Caution: call this function only if VSI is valid (ice_is_vsi_valid)
2198  */
2199 u16 ice_get_hw_vsi_num(struct ice_hw *hw, u16 vsi_handle)
2200 {
2201         return hw->vsi_ctx[vsi_handle]->vsi_num;
2202 }
2203
2204 /**
2205  * ice_get_vsi_ctx - return the VSI context entry for a given VSI handle
2206  * @hw: pointer to the HW struct
2207  * @vsi_handle: VSI handle
2208  *
2209  * return the VSI context entry for a given VSI handle
2210  */
2211 struct ice_vsi_ctx *ice_get_vsi_ctx(struct ice_hw *hw, u16 vsi_handle)
2212 {
2213         return (vsi_handle >= ICE_MAX_VSI) ? NULL : hw->vsi_ctx[vsi_handle];
2214 }
2215
2216 /**
2217  * ice_save_vsi_ctx - save the VSI context for a given VSI handle
2218  * @hw: pointer to the HW struct
2219  * @vsi_handle: VSI handle
2220  * @vsi: VSI context pointer
2221  *
2222  * save the VSI context entry for a given VSI handle
2223  */
2224 static void
2225 ice_save_vsi_ctx(struct ice_hw *hw, u16 vsi_handle, struct ice_vsi_ctx *vsi)
2226 {
2227         hw->vsi_ctx[vsi_handle] = vsi;
2228 }
2229
2230 /**
2231  * ice_clear_vsi_q_ctx - clear VSI queue contexts for all TCs
2232  * @hw: pointer to the HW struct
2233  * @vsi_handle: VSI handle
2234  */
2235 static void ice_clear_vsi_q_ctx(struct ice_hw *hw, u16 vsi_handle)
2236 {
2237         struct ice_vsi_ctx *vsi;
2238         u8 i;
2239
2240         vsi = ice_get_vsi_ctx(hw, vsi_handle);
2241         if (!vsi)
2242                 return;
2243         ice_for_each_traffic_class(i) {
2244                 if (vsi->lan_q_ctx[i]) {
2245                         ice_free(hw, vsi->lan_q_ctx[i]);
2246                         vsi->lan_q_ctx[i] = NULL;
2247                 }
2248         }
2249 }
2250
2251 /**
2252  * ice_clear_vsi_ctx - clear the VSI context entry
2253  * @hw: pointer to the HW struct
2254  * @vsi_handle: VSI handle
2255  *
2256  * clear the VSI context entry
2257  */
2258 static void ice_clear_vsi_ctx(struct ice_hw *hw, u16 vsi_handle)
2259 {
2260         struct ice_vsi_ctx *vsi;
2261
2262         vsi = ice_get_vsi_ctx(hw, vsi_handle);
2263         if (vsi) {
2264                 ice_clear_vsi_q_ctx(hw, vsi_handle);
2265                 ice_free(hw, vsi);
2266                 hw->vsi_ctx[vsi_handle] = NULL;
2267         }
2268 }
2269
2270 /**
2271  * ice_clear_all_vsi_ctx - clear all the VSI context entries
2272  * @hw: pointer to the HW struct
2273  */
2274 void ice_clear_all_vsi_ctx(struct ice_hw *hw)
2275 {
2276         u16 i;
2277
2278         for (i = 0; i < ICE_MAX_VSI; i++)
2279                 ice_clear_vsi_ctx(hw, i);
2280 }
2281
2282 /**
2283  * ice_add_vsi - add VSI context to the hardware and VSI handle list
2284  * @hw: pointer to the HW struct
2285  * @vsi_handle: unique VSI handle provided by drivers
2286  * @vsi_ctx: pointer to a VSI context struct
2287  * @cd: pointer to command details structure or NULL
2288  *
2289  * Add a VSI context to the hardware also add it into the VSI handle list.
2290  * If this function gets called after reset for existing VSIs then update
2291  * with the new HW VSI number in the corresponding VSI handle list entry.
2292  */
2293 enum ice_status
2294 ice_add_vsi(struct ice_hw *hw, u16 vsi_handle, struct ice_vsi_ctx *vsi_ctx,
2295             struct ice_sq_cd *cd)
2296 {
2297         struct ice_vsi_ctx *tmp_vsi_ctx;
2298         enum ice_status status;
2299
2300         if (vsi_handle >= ICE_MAX_VSI)
2301                 return ICE_ERR_PARAM;
2302         status = ice_aq_add_vsi(hw, vsi_ctx, cd);
2303         if (status)
2304                 return status;
2305         tmp_vsi_ctx = ice_get_vsi_ctx(hw, vsi_handle);
2306         if (!tmp_vsi_ctx) {
2307                 /* Create a new VSI context */
2308                 tmp_vsi_ctx = (struct ice_vsi_ctx *)
2309                         ice_malloc(hw, sizeof(*tmp_vsi_ctx));
2310                 if (!tmp_vsi_ctx) {
2311                         ice_aq_free_vsi(hw, vsi_ctx, false, cd);
2312                         return ICE_ERR_NO_MEMORY;
2313                 }
2314                 *tmp_vsi_ctx = *vsi_ctx;
2315
2316                 ice_save_vsi_ctx(hw, vsi_handle, tmp_vsi_ctx);
2317         } else {
2318                 /* update with new HW VSI num */
2319                 tmp_vsi_ctx->vsi_num = vsi_ctx->vsi_num;
2320         }
2321
2322         return ICE_SUCCESS;
2323 }
2324
2325 /**
2326  * ice_free_vsi- free VSI context from hardware and VSI handle list
2327  * @hw: pointer to the HW struct
2328  * @vsi_handle: unique VSI handle
2329  * @vsi_ctx: pointer to a VSI context struct
2330  * @keep_vsi_alloc: keep VSI allocation as part of this PF's resources
2331  * @cd: pointer to command details structure or NULL
2332  *
2333  * Free VSI context info from hardware as well as from VSI handle list
2334  */
2335 enum ice_status
2336 ice_free_vsi(struct ice_hw *hw, u16 vsi_handle, struct ice_vsi_ctx *vsi_ctx,
2337              bool keep_vsi_alloc, struct ice_sq_cd *cd)
2338 {
2339         enum ice_status status;
2340
2341         if (!ice_is_vsi_valid(hw, vsi_handle))
2342                 return ICE_ERR_PARAM;
2343         vsi_ctx->vsi_num = ice_get_hw_vsi_num(hw, vsi_handle);
2344         status = ice_aq_free_vsi(hw, vsi_ctx, keep_vsi_alloc, cd);
2345         if (!status)
2346                 ice_clear_vsi_ctx(hw, vsi_handle);
2347         return status;
2348 }
2349
2350 /**
2351  * ice_update_vsi
2352  * @hw: pointer to the HW struct
2353  * @vsi_handle: unique VSI handle
2354  * @vsi_ctx: pointer to a VSI context struct
2355  * @cd: pointer to command details structure or NULL
2356  *
2357  * Update VSI context in the hardware
2358  */
2359 enum ice_status
2360 ice_update_vsi(struct ice_hw *hw, u16 vsi_handle, struct ice_vsi_ctx *vsi_ctx,
2361                struct ice_sq_cd *cd)
2362 {
2363         if (!ice_is_vsi_valid(hw, vsi_handle))
2364                 return ICE_ERR_PARAM;
2365         vsi_ctx->vsi_num = ice_get_hw_vsi_num(hw, vsi_handle);
2366         return ice_aq_update_vsi(hw, vsi_ctx, cd);
2367 }
2368
2369 /**
2370  * ice_aq_get_vsi_params
2371  * @hw: pointer to the HW struct
2372  * @vsi_ctx: pointer to a VSI context struct
2373  * @cd: pointer to command details structure or NULL
2374  *
2375  * Get VSI context info from hardware (0x0212)
2376  */
2377 enum ice_status
2378 ice_aq_get_vsi_params(struct ice_hw *hw, struct ice_vsi_ctx *vsi_ctx,
2379                       struct ice_sq_cd *cd)
2380 {
2381         struct ice_aqc_add_get_update_free_vsi *cmd;
2382         struct ice_aqc_get_vsi_resp *resp;
2383         struct ice_aq_desc desc;
2384         enum ice_status status;
2385
2386         cmd = &desc.params.vsi_cmd;
2387         resp = &desc.params.get_vsi_resp;
2388
2389         ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_vsi_params);
2390
2391         cmd->vsi_num = CPU_TO_LE16(vsi_ctx->vsi_num | ICE_AQ_VSI_IS_VALID);
2392
2393         status = ice_aq_send_cmd(hw, &desc, &vsi_ctx->info,
2394                                  sizeof(vsi_ctx->info), cd);
2395         if (!status) {
2396                 vsi_ctx->vsi_num = LE16_TO_CPU(resp->vsi_num) &
2397                                         ICE_AQ_VSI_NUM_M;
2398                 vsi_ctx->vsis_allocd = LE16_TO_CPU(resp->vsi_used);
2399                 vsi_ctx->vsis_unallocated = LE16_TO_CPU(resp->vsi_free);
2400         }
2401
2402         return status;
2403 }
2404
2405 /**
2406  * ice_aq_add_update_mir_rule - add/update a mirror rule
2407  * @hw: pointer to the HW struct
2408  * @rule_type: Rule Type
2409  * @dest_vsi: VSI number to which packets will be mirrored
2410  * @count: length of the list
2411  * @mr_buf: buffer for list of mirrored VSI numbers
2412  * @cd: pointer to command details structure or NULL
2413  * @rule_id: Rule ID
2414  *
2415  * Add/Update Mirror Rule (0x260).
2416  */
2417 enum ice_status
2418 ice_aq_add_update_mir_rule(struct ice_hw *hw, u16 rule_type, u16 dest_vsi,
2419                            u16 count, struct ice_mir_rule_buf *mr_buf,
2420                            struct ice_sq_cd *cd, u16 *rule_id)
2421 {
2422         struct ice_aqc_add_update_mir_rule *cmd;
2423         struct ice_aq_desc desc;
2424         enum ice_status status;
2425         __le16 *mr_list = NULL;
2426         u16 buf_size = 0;
2427
2428         switch (rule_type) {
2429         case ICE_AQC_RULE_TYPE_VPORT_INGRESS:
2430         case ICE_AQC_RULE_TYPE_VPORT_EGRESS:
2431                 /* Make sure count and mr_buf are set for these rule_types */
2432                 if (!(count && mr_buf))
2433                         return ICE_ERR_PARAM;
2434
2435                 buf_size = count * sizeof(__le16);
2436                 mr_list = (_FORCE_ __le16 *)ice_malloc(hw, buf_size);
2437                 if (!mr_list)
2438                         return ICE_ERR_NO_MEMORY;
2439                 break;
2440         case ICE_AQC_RULE_TYPE_PPORT_INGRESS:
2441         case ICE_AQC_RULE_TYPE_PPORT_EGRESS:
2442                 /* Make sure count and mr_buf are not set for these
2443                  * rule_types
2444                  */
2445                 if (count || mr_buf)
2446                         return ICE_ERR_PARAM;
2447                 break;
2448         default:
2449                 ice_debug(hw, ICE_DBG_SW, "Error due to unsupported rule_type %u\n", rule_type);
2450                 return ICE_ERR_OUT_OF_RANGE;
2451         }
2452
2453         ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_add_update_mir_rule);
2454
2455         /* Pre-process 'mr_buf' items for add/update of virtual port
2456          * ingress/egress mirroring (but not physical port ingress/egress
2457          * mirroring)
2458          */
2459         if (mr_buf) {
2460                 int i;
2461
2462                 for (i = 0; i < count; i++) {
2463                         u16 id;
2464
2465                         id = mr_buf[i].vsi_idx & ICE_AQC_RULE_MIRRORED_VSI_M;
2466
2467                         /* Validate specified VSI number, make sure it is less
2468                          * than ICE_MAX_VSI, if not return with error.
2469                          */
2470                         if (id >= ICE_MAX_VSI) {
2471                                 ice_debug(hw, ICE_DBG_SW, "Error VSI index (%u) out-of-range\n",
2472                                           id);
2473                                 ice_free(hw, mr_list);
2474                                 return ICE_ERR_OUT_OF_RANGE;
2475                         }
2476
2477                         /* add VSI to mirror rule */
2478                         if (mr_buf[i].add)
2479                                 mr_list[i] =
2480                                         CPU_TO_LE16(id | ICE_AQC_RULE_ACT_M);
2481                         else /* remove VSI from mirror rule */
2482                                 mr_list[i] = CPU_TO_LE16(id);
2483                 }
2484         }
2485
2486         cmd = &desc.params.add_update_rule;
2487         if ((*rule_id) != ICE_INVAL_MIRROR_RULE_ID)
2488                 cmd->rule_id = CPU_TO_LE16(((*rule_id) & ICE_AQC_RULE_ID_M) |
2489                                            ICE_AQC_RULE_ID_VALID_M);
2490         cmd->rule_type = CPU_TO_LE16(rule_type & ICE_AQC_RULE_TYPE_M);
2491         cmd->num_entries = CPU_TO_LE16(count);
2492         cmd->dest = CPU_TO_LE16(dest_vsi);
2493
2494         status = ice_aq_send_cmd(hw, &desc, mr_list, buf_size, cd);
2495         if (!status)
2496                 *rule_id = LE16_TO_CPU(cmd->rule_id) & ICE_AQC_RULE_ID_M;
2497
2498         ice_free(hw, mr_list);
2499
2500         return status;
2501 }
2502
2503 /**
2504  * ice_aq_delete_mir_rule - delete a mirror rule
2505  * @hw: pointer to the HW struct
2506  * @rule_id: Mirror rule ID (to be deleted)
2507  * @keep_allocd: if set, the VSI stays part of the PF allocated res,
2508  *               otherwise it is returned to the shared pool
2509  * @cd: pointer to command details structure or NULL
2510  *
2511  * Delete Mirror Rule (0x261).
2512  */
2513 enum ice_status
2514 ice_aq_delete_mir_rule(struct ice_hw *hw, u16 rule_id, bool keep_allocd,
2515                        struct ice_sq_cd *cd)
2516 {
2517         struct ice_aqc_delete_mir_rule *cmd;
2518         struct ice_aq_desc desc;
2519
2520         /* rule_id should be in the range 0...63 */
2521         if (rule_id >= ICE_MAX_NUM_MIRROR_RULES)
2522                 return ICE_ERR_OUT_OF_RANGE;
2523
2524         ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_del_mir_rule);
2525
2526         cmd = &desc.params.del_rule;
2527         rule_id |= ICE_AQC_RULE_ID_VALID_M;
2528         cmd->rule_id = CPU_TO_LE16(rule_id);
2529
2530         if (keep_allocd)
2531                 cmd->flags = CPU_TO_LE16(ICE_AQC_FLAG_KEEP_ALLOCD_M);
2532
2533         return ice_aq_send_cmd(hw, &desc, NULL, 0, cd);
2534 }
2535
2536 /**
2537  * ice_aq_alloc_free_vsi_list
2538  * @hw: pointer to the HW struct
2539  * @vsi_list_id: VSI list ID returned or used for lookup
2540  * @lkup_type: switch rule filter lookup type
2541  * @opc: switch rules population command type - pass in the command opcode
2542  *
2543  * allocates or free a VSI list resource
2544  */
2545 static enum ice_status
2546 ice_aq_alloc_free_vsi_list(struct ice_hw *hw, u16 *vsi_list_id,
2547                            enum ice_sw_lkup_type lkup_type,
2548                            enum ice_adminq_opc opc)
2549 {
2550         struct ice_aqc_alloc_free_res_elem *sw_buf;
2551         struct ice_aqc_res_elem *vsi_ele;
2552         enum ice_status status;
2553         u16 buf_len;
2554
2555         buf_len = ice_struct_size(sw_buf, elem, 1);
2556         sw_buf = (struct ice_aqc_alloc_free_res_elem *)ice_malloc(hw, buf_len);
2557         if (!sw_buf)
2558                 return ICE_ERR_NO_MEMORY;
2559         sw_buf->num_elems = CPU_TO_LE16(1);
2560
2561         if (lkup_type == ICE_SW_LKUP_MAC ||
2562             lkup_type == ICE_SW_LKUP_MAC_VLAN ||
2563             lkup_type == ICE_SW_LKUP_ETHERTYPE ||
2564             lkup_type == ICE_SW_LKUP_ETHERTYPE_MAC ||
2565             lkup_type == ICE_SW_LKUP_PROMISC ||
2566             lkup_type == ICE_SW_LKUP_PROMISC_VLAN ||
2567             lkup_type == ICE_SW_LKUP_LAST) {
2568                 sw_buf->res_type = CPU_TO_LE16(ICE_AQC_RES_TYPE_VSI_LIST_REP);
2569         } else if (lkup_type == ICE_SW_LKUP_VLAN) {
2570                 sw_buf->res_type =
2571                         CPU_TO_LE16(ICE_AQC_RES_TYPE_VSI_LIST_PRUNE);
2572         } else {
2573                 status = ICE_ERR_PARAM;
2574                 goto ice_aq_alloc_free_vsi_list_exit;
2575         }
2576
2577         if (opc == ice_aqc_opc_free_res)
2578                 sw_buf->elem[0].e.sw_resp = CPU_TO_LE16(*vsi_list_id);
2579
2580         status = ice_aq_alloc_free_res(hw, 1, sw_buf, buf_len, opc, NULL);
2581         if (status)
2582                 goto ice_aq_alloc_free_vsi_list_exit;
2583
2584         if (opc == ice_aqc_opc_alloc_res) {
2585                 vsi_ele = &sw_buf->elem[0];
2586                 *vsi_list_id = LE16_TO_CPU(vsi_ele->e.sw_resp);
2587         }
2588
2589 ice_aq_alloc_free_vsi_list_exit:
2590         ice_free(hw, sw_buf);
2591         return status;
2592 }
2593
2594 /**
2595  * ice_aq_set_storm_ctrl - Sets storm control configuration
2596  * @hw: pointer to the HW struct
2597  * @bcast_thresh: represents the upper threshold for broadcast storm control
2598  * @mcast_thresh: represents the upper threshold for multicast storm control
2599  * @ctl_bitmask: storm control knobs
2600  *
2601  * Sets the storm control configuration (0x0280)
2602  */
2603 enum ice_status
2604 ice_aq_set_storm_ctrl(struct ice_hw *hw, u32 bcast_thresh, u32 mcast_thresh,
2605                       u32 ctl_bitmask)
2606 {
2607         struct ice_aqc_storm_cfg *cmd;
2608         struct ice_aq_desc desc;
2609
2610         cmd = &desc.params.storm_conf;
2611
2612         ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_set_storm_cfg);
2613
2614         cmd->bcast_thresh_size = CPU_TO_LE32(bcast_thresh & ICE_AQ_THRESHOLD_M);
2615         cmd->mcast_thresh_size = CPU_TO_LE32(mcast_thresh & ICE_AQ_THRESHOLD_M);
2616         cmd->storm_ctrl_ctrl = CPU_TO_LE32(ctl_bitmask);
2617
2618         return ice_aq_send_cmd(hw, &desc, NULL, 0, NULL);
2619 }
2620
2621 /**
2622  * ice_aq_get_storm_ctrl - gets storm control configuration
2623  * @hw: pointer to the HW struct
2624  * @bcast_thresh: represents the upper threshold for broadcast storm control
2625  * @mcast_thresh: represents the upper threshold for multicast storm control
2626  * @ctl_bitmask: storm control knobs
2627  *
2628  * Gets the storm control configuration (0x0281)
2629  */
2630 enum ice_status
2631 ice_aq_get_storm_ctrl(struct ice_hw *hw, u32 *bcast_thresh, u32 *mcast_thresh,
2632                       u32 *ctl_bitmask)
2633 {
2634         enum ice_status status;
2635         struct ice_aq_desc desc;
2636
2637         ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_storm_cfg);
2638
2639         status = ice_aq_send_cmd(hw, &desc, NULL, 0, NULL);
2640         if (!status) {
2641                 struct ice_aqc_storm_cfg *resp = &desc.params.storm_conf;
2642
2643                 if (bcast_thresh)
2644                         *bcast_thresh = LE32_TO_CPU(resp->bcast_thresh_size) &
2645                                 ICE_AQ_THRESHOLD_M;
2646                 if (mcast_thresh)
2647                         *mcast_thresh = LE32_TO_CPU(resp->mcast_thresh_size) &
2648                                 ICE_AQ_THRESHOLD_M;
2649                 if (ctl_bitmask)
2650                         *ctl_bitmask = LE32_TO_CPU(resp->storm_ctrl_ctrl);
2651         }
2652
2653         return status;
2654 }
2655
2656 /**
2657  * ice_aq_sw_rules - add/update/remove switch rules
2658  * @hw: pointer to the HW struct
2659  * @rule_list: pointer to switch rule population list
2660  * @rule_list_sz: total size of the rule list in bytes
2661  * @num_rules: number of switch rules in the rule_list
2662  * @opc: switch rules population command type - pass in the command opcode
2663  * @cd: pointer to command details structure or NULL
2664  *
2665  * Add(0x02a0)/Update(0x02a1)/Remove(0x02a2) switch rules commands to firmware
2666  */
2667 static enum ice_status
2668 ice_aq_sw_rules(struct ice_hw *hw, void *rule_list, u16 rule_list_sz,
2669                 u8 num_rules, enum ice_adminq_opc opc, struct ice_sq_cd *cd)
2670 {
2671         struct ice_aq_desc desc;
2672         enum ice_status status;
2673
2674         ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__);
2675
2676         if (opc != ice_aqc_opc_add_sw_rules &&
2677             opc != ice_aqc_opc_update_sw_rules &&
2678             opc != ice_aqc_opc_remove_sw_rules)
2679                 return ICE_ERR_PARAM;
2680
2681         ice_fill_dflt_direct_cmd_desc(&desc, opc);
2682
2683         desc.flags |= CPU_TO_LE16(ICE_AQ_FLAG_RD);
2684         desc.params.sw_rules.num_rules_fltr_entry_index =
2685                 CPU_TO_LE16(num_rules);
2686         status = ice_aq_send_cmd(hw, &desc, rule_list, rule_list_sz, cd);
2687         if (opc != ice_aqc_opc_add_sw_rules &&
2688             hw->adminq.sq_last_status == ICE_AQ_RC_ENOENT)
2689                 status = ICE_ERR_DOES_NOT_EXIST;
2690
2691         return status;
2692 }
2693
2694 /**
2695  * ice_aq_add_recipe - add switch recipe
2696  * @hw: pointer to the HW struct
2697  * @s_recipe_list: pointer to switch rule population list
2698  * @num_recipes: number of switch recipes in the list
2699  * @cd: pointer to command details structure or NULL
2700  *
2701  * Add(0x0290)
2702  */
2703 enum ice_status
2704 ice_aq_add_recipe(struct ice_hw *hw,
2705                   struct ice_aqc_recipe_data_elem *s_recipe_list,
2706                   u16 num_recipes, struct ice_sq_cd *cd)
2707 {
2708         struct ice_aqc_add_get_recipe *cmd;
2709         struct ice_aq_desc desc;
2710         u16 buf_size;
2711
2712         ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__);
2713         cmd = &desc.params.add_get_recipe;
2714         ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_add_recipe);
2715
2716         cmd->num_sub_recipes = CPU_TO_LE16(num_recipes);
2717         desc.flags |= CPU_TO_LE16(ICE_AQ_FLAG_RD);
2718
2719         buf_size = num_recipes * sizeof(*s_recipe_list);
2720
2721         return ice_aq_send_cmd(hw, &desc, s_recipe_list, buf_size, cd);
2722 }
2723
2724 /**
2725  * ice_aq_get_recipe - get switch recipe
2726  * @hw: pointer to the HW struct
2727  * @s_recipe_list: pointer to switch rule population list
2728  * @num_recipes: pointer to the number of recipes (input and output)
2729  * @recipe_root: root recipe number of recipe(s) to retrieve
2730  * @cd: pointer to command details structure or NULL
2731  *
2732  * Get(0x0292)
2733  *
2734  * On input, *num_recipes should equal the number of entries in s_recipe_list.
2735  * On output, *num_recipes will equal the number of entries returned in
2736  * s_recipe_list.
2737  *
2738  * The caller must supply enough space in s_recipe_list to hold all possible
2739  * recipes and *num_recipes must equal ICE_MAX_NUM_RECIPES.
2740  */
2741 enum ice_status
2742 ice_aq_get_recipe(struct ice_hw *hw,
2743                   struct ice_aqc_recipe_data_elem *s_recipe_list,
2744                   u16 *num_recipes, u16 recipe_root, struct ice_sq_cd *cd)
2745 {
2746         struct ice_aqc_add_get_recipe *cmd;
2747         struct ice_aq_desc desc;
2748         enum ice_status status;
2749         u16 buf_size;
2750
2751         if (*num_recipes != ICE_MAX_NUM_RECIPES)
2752                 return ICE_ERR_PARAM;
2753
2754         ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__);
2755         cmd = &desc.params.add_get_recipe;
2756         ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_recipe);
2757
2758         cmd->return_index = CPU_TO_LE16(recipe_root);
2759         cmd->num_sub_recipes = 0;
2760
2761         buf_size = *num_recipes * sizeof(*s_recipe_list);
2762
2763         status = ice_aq_send_cmd(hw, &desc, s_recipe_list, buf_size, cd);
2764         /* cppcheck-suppress constArgument */
2765         *num_recipes = LE16_TO_CPU(cmd->num_sub_recipes);
2766
2767         return status;
2768 }
2769
2770 /**
2771  * ice_aq_map_recipe_to_profile - Map recipe to packet profile
2772  * @hw: pointer to the HW struct
2773  * @profile_id: package profile ID to associate the recipe with
2774  * @r_bitmap: Recipe bitmap filled in and need to be returned as response
2775  * @cd: pointer to command details structure or NULL
2776  * Recipe to profile association (0x0291)
2777  */
2778 enum ice_status
2779 ice_aq_map_recipe_to_profile(struct ice_hw *hw, u32 profile_id, u8 *r_bitmap,
2780                              struct ice_sq_cd *cd)
2781 {
2782         struct ice_aqc_recipe_to_profile *cmd;
2783         struct ice_aq_desc desc;
2784
2785         ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__);
2786         cmd = &desc.params.recipe_to_profile;
2787         ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_recipe_to_profile);
2788         cmd->profile_id = CPU_TO_LE16(profile_id);
2789         /* Set the recipe ID bit in the bitmask to let the device know which
2790          * profile we are associating the recipe to
2791          */
2792         ice_memcpy(cmd->recipe_assoc, r_bitmap, sizeof(cmd->recipe_assoc),
2793                    ICE_NONDMA_TO_NONDMA);
2794
2795         return ice_aq_send_cmd(hw, &desc, NULL, 0, cd);
2796 }
2797
2798 /**
2799  * ice_aq_get_recipe_to_profile - Map recipe to packet profile
2800  * @hw: pointer to the HW struct
2801  * @profile_id: package profile ID to associate the recipe with
2802  * @r_bitmap: Recipe bitmap filled in and need to be returned as response
2803  * @cd: pointer to command details structure or NULL
2804  * Associate profile ID with given recipe (0x0293)
2805  */
2806 enum ice_status
2807 ice_aq_get_recipe_to_profile(struct ice_hw *hw, u32 profile_id, u8 *r_bitmap,
2808                              struct ice_sq_cd *cd)
2809 {
2810         struct ice_aqc_recipe_to_profile *cmd;
2811         struct ice_aq_desc desc;
2812         enum ice_status status;
2813
2814         ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__);
2815         cmd = &desc.params.recipe_to_profile;
2816         ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_recipe_to_profile);
2817         cmd->profile_id = CPU_TO_LE16(profile_id);
2818
2819         status = ice_aq_send_cmd(hw, &desc, NULL, 0, cd);
2820         if (!status)
2821                 ice_memcpy(r_bitmap, cmd->recipe_assoc,
2822                            sizeof(cmd->recipe_assoc), ICE_NONDMA_TO_NONDMA);
2823
2824         return status;
2825 }
2826
2827 /**
2828  * ice_alloc_recipe - add recipe resource
2829  * @hw: pointer to the hardware structure
2830  * @rid: recipe ID returned as response to AQ call
2831  */
2832 enum ice_status ice_alloc_recipe(struct ice_hw *hw, u16 *rid)
2833 {
2834         struct ice_aqc_alloc_free_res_elem *sw_buf;
2835         enum ice_status status;
2836         u16 buf_len;
2837
2838         buf_len = ice_struct_size(sw_buf, elem, 1);
2839         sw_buf = (struct ice_aqc_alloc_free_res_elem *)ice_malloc(hw, buf_len);
2840         if (!sw_buf)
2841                 return ICE_ERR_NO_MEMORY;
2842
2843         sw_buf->num_elems = CPU_TO_LE16(1);
2844         sw_buf->res_type = CPU_TO_LE16((ICE_AQC_RES_TYPE_RECIPE <<
2845                                         ICE_AQC_RES_TYPE_S) |
2846                                         ICE_AQC_RES_TYPE_FLAG_SHARED);
2847         status = ice_aq_alloc_free_res(hw, 1, sw_buf, buf_len,
2848                                        ice_aqc_opc_alloc_res, NULL);
2849         if (!status)
2850                 *rid = LE16_TO_CPU(sw_buf->elem[0].e.sw_resp);
2851         ice_free(hw, sw_buf);
2852
2853         return status;
2854 }
2855
2856 /* ice_init_port_info - Initialize port_info with switch configuration data
2857  * @pi: pointer to port_info
2858  * @vsi_port_num: VSI number or port number
2859  * @type: Type of switch element (port or VSI)
2860  * @swid: switch ID of the switch the element is attached to
2861  * @pf_vf_num: PF or VF number
2862  * @is_vf: true if the element is a VF, false otherwise
2863  */
2864 static void
2865 ice_init_port_info(struct ice_port_info *pi, u16 vsi_port_num, u8 type,
2866                    u16 swid, u16 pf_vf_num, bool is_vf)
2867 {
2868         switch (type) {
2869         case ICE_AQC_GET_SW_CONF_RESP_PHYS_PORT:
2870                 pi->lport = (u8)(vsi_port_num & ICE_LPORT_MASK);
2871                 pi->sw_id = swid;
2872                 pi->pf_vf_num = pf_vf_num;
2873                 pi->is_vf = is_vf;
2874                 pi->dflt_tx_vsi_num = ICE_DFLT_VSI_INVAL;
2875                 pi->dflt_rx_vsi_num = ICE_DFLT_VSI_INVAL;
2876                 break;
2877         default:
2878                 ice_debug(pi->hw, ICE_DBG_SW, "incorrect VSI/port type received\n");
2879                 break;
2880         }
2881 }
2882
2883 /* ice_get_initial_sw_cfg - Get initial port and default VSI data
2884  * @hw: pointer to the hardware structure
2885  */
2886 enum ice_status ice_get_initial_sw_cfg(struct ice_hw *hw)
2887 {
2888         struct ice_aqc_get_sw_cfg_resp_elem *rbuf;
2889         enum ice_status status;
2890         u8 num_total_ports;
2891         u16 req_desc = 0;
2892         u16 num_elems;
2893         u8 j = 0;
2894         u16 i;
2895
2896         num_total_ports = 1;
2897
2898         rbuf = (struct ice_aqc_get_sw_cfg_resp_elem *)
2899                 ice_malloc(hw, ICE_SW_CFG_MAX_BUF_LEN);
2900
2901         if (!rbuf)
2902                 return ICE_ERR_NO_MEMORY;
2903
2904         /* Multiple calls to ice_aq_get_sw_cfg may be required
2905          * to get all the switch configuration information. The need
2906          * for additional calls is indicated by ice_aq_get_sw_cfg
2907          * writing a non-zero value in req_desc
2908          */
2909         do {
2910                 struct ice_aqc_get_sw_cfg_resp_elem *ele;
2911
2912                 status = ice_aq_get_sw_cfg(hw, rbuf, ICE_SW_CFG_MAX_BUF_LEN,
2913                                            &req_desc, &num_elems, NULL);
2914
2915                 if (status)
2916                         break;
2917
2918                 for (i = 0, ele = rbuf; i < num_elems; i++, ele++) {
2919                         u16 pf_vf_num, swid, vsi_port_num;
2920                         bool is_vf = false;
2921                         u8 res_type;
2922
2923                         vsi_port_num = LE16_TO_CPU(ele->vsi_port_num) &
2924                                 ICE_AQC_GET_SW_CONF_RESP_VSI_PORT_NUM_M;
2925
2926                         pf_vf_num = LE16_TO_CPU(ele->pf_vf_num) &
2927                                 ICE_AQC_GET_SW_CONF_RESP_FUNC_NUM_M;
2928
2929                         swid = LE16_TO_CPU(ele->swid);
2930
2931                         if (LE16_TO_CPU(ele->pf_vf_num) &
2932                             ICE_AQC_GET_SW_CONF_RESP_IS_VF)
2933                                 is_vf = true;
2934
2935                         res_type = (u8)(LE16_TO_CPU(ele->vsi_port_num) >>
2936                                         ICE_AQC_GET_SW_CONF_RESP_TYPE_S);
2937
2938                         switch (res_type) {
2939                         case ICE_AQC_GET_SW_CONF_RESP_PHYS_PORT:
2940                         case ICE_AQC_GET_SW_CONF_RESP_VIRT_PORT:
2941                                 if (j == num_total_ports) {
2942                                         ice_debug(hw, ICE_DBG_SW, "more ports than expected\n");
2943                                         status = ICE_ERR_CFG;
2944                                         goto out;
2945                                 }
2946                                 ice_init_port_info(hw->port_info,
2947                                                    vsi_port_num, res_type, swid,
2948                                                    pf_vf_num, is_vf);
2949                                 j++;
2950                                 break;
2951                         default:
2952                                 break;
2953                         }
2954                 }
2955         } while (req_desc && !status);
2956
2957 out:
2958         ice_free(hw, rbuf);
2959         return status;
2960 }
2961
2962 /**
2963  * ice_fill_sw_info - Helper function to populate lb_en and lan_en
2964  * @hw: pointer to the hardware structure
2965  * @fi: filter info structure to fill/update
2966  *
2967  * This helper function populates the lb_en and lan_en elements of the provided
2968  * ice_fltr_info struct using the switch's type and characteristics of the
2969  * switch rule being configured.
2970  */
2971 static void ice_fill_sw_info(struct ice_hw *hw, struct ice_fltr_info *fi)
2972 {
2973         if ((fi->flag & ICE_FLTR_RX) &&
2974             (fi->fltr_act == ICE_FWD_TO_VSI ||
2975              fi->fltr_act == ICE_FWD_TO_VSI_LIST) &&
2976             fi->lkup_type == ICE_SW_LKUP_LAST)
2977                 fi->lan_en = true;
2978         fi->lb_en = false;
2979         fi->lan_en = false;
2980         if ((fi->flag & ICE_FLTR_TX) &&
2981             (fi->fltr_act == ICE_FWD_TO_VSI ||
2982              fi->fltr_act == ICE_FWD_TO_VSI_LIST ||
2983              fi->fltr_act == ICE_FWD_TO_Q ||
2984              fi->fltr_act == ICE_FWD_TO_QGRP)) {
2985                 /* Setting LB for prune actions will result in replicated
2986                  * packets to the internal switch that will be dropped.
2987                  */
2988                 if (fi->lkup_type != ICE_SW_LKUP_VLAN)
2989                         fi->lb_en = true;
2990
2991                 /* Set lan_en to TRUE if
2992                  * 1. The switch is a VEB AND
2993                  * 2
2994                  * 2.1 The lookup is a directional lookup like ethertype,
2995                  * promiscuous, ethertype-MAC, promiscuous-VLAN
2996                  * and default-port OR
2997                  * 2.2 The lookup is VLAN, OR
2998                  * 2.3 The lookup is MAC with mcast or bcast addr for MAC, OR
2999                  * 2.4 The lookup is MAC_VLAN with mcast or bcast addr for MAC.
3000                  *
3001                  * OR
3002                  *
3003                  * The switch is a VEPA.
3004                  *
3005                  * In all other cases, the LAN enable has to be set to false.
3006                  */
3007                 if (hw->evb_veb) {
3008                         if (fi->lkup_type == ICE_SW_LKUP_ETHERTYPE ||
3009                             fi->lkup_type == ICE_SW_LKUP_PROMISC ||
3010                             fi->lkup_type == ICE_SW_LKUP_ETHERTYPE_MAC ||
3011                             fi->lkup_type == ICE_SW_LKUP_PROMISC_VLAN ||
3012                             fi->lkup_type == ICE_SW_LKUP_DFLT ||
3013                             fi->lkup_type == ICE_SW_LKUP_VLAN ||
3014                             (fi->lkup_type == ICE_SW_LKUP_MAC &&
3015                              !IS_UNICAST_ETHER_ADDR(fi->l_data.mac.mac_addr)) ||
3016                             (fi->lkup_type == ICE_SW_LKUP_MAC_VLAN &&
3017                              !IS_UNICAST_ETHER_ADDR(fi->l_data.mac.mac_addr)))
3018                                 fi->lan_en = true;
3019                 } else {
3020                         fi->lan_en = true;
3021                 }
3022         }
3023 }
3024
3025 /**
3026  * ice_fill_sw_rule - Helper function to fill switch rule structure
3027  * @hw: pointer to the hardware structure
3028  * @f_info: entry containing packet forwarding information
3029  * @s_rule: switch rule structure to be filled in based on mac_entry
3030  * @opc: switch rules population command type - pass in the command opcode
3031  */
3032 static void
3033 ice_fill_sw_rule(struct ice_hw *hw, struct ice_fltr_info *f_info,
3034                  struct ice_aqc_sw_rules_elem *s_rule, enum ice_adminq_opc opc)
3035 {
3036         u16 vlan_id = ICE_MAX_VLAN_ID + 1;
3037         void *daddr = NULL;
3038         u16 eth_hdr_sz;
3039         u8 *eth_hdr;
3040         u32 act = 0;
3041         __be16 *off;
3042         u8 q_rgn;
3043
3044         if (opc == ice_aqc_opc_remove_sw_rules) {
3045                 s_rule->pdata.lkup_tx_rx.act = 0;
3046                 s_rule->pdata.lkup_tx_rx.index =
3047                         CPU_TO_LE16(f_info->fltr_rule_id);
3048                 s_rule->pdata.lkup_tx_rx.hdr_len = 0;
3049                 return;
3050         }
3051
3052         eth_hdr_sz = sizeof(dummy_eth_header);
3053         eth_hdr = s_rule->pdata.lkup_tx_rx.hdr;
3054
3055         /* initialize the ether header with a dummy header */
3056         ice_memcpy(eth_hdr, dummy_eth_header, eth_hdr_sz, ICE_NONDMA_TO_NONDMA);
3057         ice_fill_sw_info(hw, f_info);
3058
3059         switch (f_info->fltr_act) {
3060         case ICE_FWD_TO_VSI:
3061                 act |= (f_info->fwd_id.hw_vsi_id << ICE_SINGLE_ACT_VSI_ID_S) &
3062                         ICE_SINGLE_ACT_VSI_ID_M;
3063                 if (f_info->lkup_type != ICE_SW_LKUP_VLAN)
3064                         act |= ICE_SINGLE_ACT_VSI_FORWARDING |
3065                                 ICE_SINGLE_ACT_VALID_BIT;
3066                 break;
3067         case ICE_FWD_TO_VSI_LIST:
3068                 act |= ICE_SINGLE_ACT_VSI_LIST;
3069                 act |= (f_info->fwd_id.vsi_list_id <<
3070                         ICE_SINGLE_ACT_VSI_LIST_ID_S) &
3071                         ICE_SINGLE_ACT_VSI_LIST_ID_M;
3072                 if (f_info->lkup_type != ICE_SW_LKUP_VLAN)
3073                         act |= ICE_SINGLE_ACT_VSI_FORWARDING |
3074                                 ICE_SINGLE_ACT_VALID_BIT;
3075                 break;
3076         case ICE_FWD_TO_Q:
3077                 act |= ICE_SINGLE_ACT_TO_Q;
3078                 act |= (f_info->fwd_id.q_id << ICE_SINGLE_ACT_Q_INDEX_S) &
3079                         ICE_SINGLE_ACT_Q_INDEX_M;
3080                 break;
3081         case ICE_DROP_PACKET:
3082                 act |= ICE_SINGLE_ACT_VSI_FORWARDING | ICE_SINGLE_ACT_DROP |
3083                         ICE_SINGLE_ACT_VALID_BIT;
3084                 break;
3085         case ICE_FWD_TO_QGRP:
3086                 q_rgn = f_info->qgrp_size > 0 ?
3087                         (u8)ice_ilog2(f_info->qgrp_size) : 0;
3088                 act |= ICE_SINGLE_ACT_TO_Q;
3089                 act |= (f_info->fwd_id.q_id << ICE_SINGLE_ACT_Q_INDEX_S) &
3090                         ICE_SINGLE_ACT_Q_INDEX_M;
3091                 act |= (q_rgn << ICE_SINGLE_ACT_Q_REGION_S) &
3092                         ICE_SINGLE_ACT_Q_REGION_M;
3093                 break;
3094         default:
3095                 return;
3096         }
3097
3098         if (f_info->lb_en)
3099                 act |= ICE_SINGLE_ACT_LB_ENABLE;
3100         if (f_info->lan_en)
3101                 act |= ICE_SINGLE_ACT_LAN_ENABLE;
3102
3103         switch (f_info->lkup_type) {
3104         case ICE_SW_LKUP_MAC:
3105                 daddr = f_info->l_data.mac.mac_addr;
3106                 break;
3107         case ICE_SW_LKUP_VLAN:
3108                 vlan_id = f_info->l_data.vlan.vlan_id;
3109                 if (f_info->fltr_act == ICE_FWD_TO_VSI ||
3110                     f_info->fltr_act == ICE_FWD_TO_VSI_LIST) {
3111                         act |= ICE_SINGLE_ACT_PRUNE;
3112                         act |= ICE_SINGLE_ACT_EGRESS | ICE_SINGLE_ACT_INGRESS;
3113                 }
3114                 break;
3115         case ICE_SW_LKUP_ETHERTYPE_MAC:
3116                 daddr = f_info->l_data.ethertype_mac.mac_addr;
3117                 /* fall-through */
3118         case ICE_SW_LKUP_ETHERTYPE:
3119                 off = (_FORCE_ __be16 *)(eth_hdr + ICE_ETH_ETHTYPE_OFFSET);
3120                 *off = CPU_TO_BE16(f_info->l_data.ethertype_mac.ethertype);
3121                 break;
3122         case ICE_SW_LKUP_MAC_VLAN:
3123                 daddr = f_info->l_data.mac_vlan.mac_addr;
3124                 vlan_id = f_info->l_data.mac_vlan.vlan_id;
3125                 break;
3126         case ICE_SW_LKUP_PROMISC_VLAN:
3127                 vlan_id = f_info->l_data.mac_vlan.vlan_id;
3128                 /* fall-through */
3129         case ICE_SW_LKUP_PROMISC:
3130                 daddr = f_info->l_data.mac_vlan.mac_addr;
3131                 break;
3132         default:
3133                 break;
3134         }
3135
3136         s_rule->type = (f_info->flag & ICE_FLTR_RX) ?
3137                 CPU_TO_LE16(ICE_AQC_SW_RULES_T_LKUP_RX) :
3138                 CPU_TO_LE16(ICE_AQC_SW_RULES_T_LKUP_TX);
3139
3140         /* Recipe set depending on lookup type */
3141         s_rule->pdata.lkup_tx_rx.recipe_id = CPU_TO_LE16(f_info->lkup_type);
3142         s_rule->pdata.lkup_tx_rx.src = CPU_TO_LE16(f_info->src);
3143         s_rule->pdata.lkup_tx_rx.act = CPU_TO_LE32(act);
3144
3145         if (daddr)
3146                 ice_memcpy(eth_hdr + ICE_ETH_DA_OFFSET, daddr, ETH_ALEN,
3147                            ICE_NONDMA_TO_NONDMA);
3148
3149         if (!(vlan_id > ICE_MAX_VLAN_ID)) {
3150                 off = (_FORCE_ __be16 *)(eth_hdr + ICE_ETH_VLAN_TCI_OFFSET);
3151                 *off = CPU_TO_BE16(vlan_id);
3152         }
3153
3154         /* Create the switch rule with the final dummy Ethernet header */
3155         if (opc != ice_aqc_opc_update_sw_rules)
3156                 s_rule->pdata.lkup_tx_rx.hdr_len = CPU_TO_LE16(eth_hdr_sz);
3157 }
3158
3159 /**
3160  * ice_add_marker_act
3161  * @hw: pointer to the hardware structure
3162  * @m_ent: the management entry for which sw marker needs to be added
3163  * @sw_marker: sw marker to tag the Rx descriptor with
3164  * @l_id: large action resource ID
3165  *
3166  * Create a large action to hold software marker and update the switch rule
3167  * entry pointed by m_ent with newly created large action
3168  */
3169 static enum ice_status
3170 ice_add_marker_act(struct ice_hw *hw, struct ice_fltr_mgmt_list_entry *m_ent,
3171                    u16 sw_marker, u16 l_id)
3172 {
3173         struct ice_aqc_sw_rules_elem *lg_act, *rx_tx;
3174         /* For software marker we need 3 large actions
3175          * 1. FWD action: FWD TO VSI or VSI LIST
3176          * 2. GENERIC VALUE action to hold the profile ID
3177          * 3. GENERIC VALUE action to hold the software marker ID
3178          */
3179         const u16 num_lg_acts = 3;
3180         enum ice_status status;
3181         u16 lg_act_size;
3182         u16 rules_size;
3183         u32 act;
3184         u16 id;
3185
3186         if (m_ent->fltr_info.lkup_type != ICE_SW_LKUP_MAC)
3187                 return ICE_ERR_PARAM;
3188
3189         /* Create two back-to-back switch rules and submit them to the HW using
3190          * one memory buffer:
3191          *    1. Large Action
3192          *    2. Look up Tx Rx
3193          */
3194         lg_act_size = (u16)ICE_SW_RULE_LG_ACT_SIZE(num_lg_acts);
3195         rules_size = lg_act_size + ICE_SW_RULE_RX_TX_ETH_HDR_SIZE;
3196         lg_act = (struct ice_aqc_sw_rules_elem *)ice_malloc(hw, rules_size);
3197         if (!lg_act)
3198                 return ICE_ERR_NO_MEMORY;
3199
3200         rx_tx = (struct ice_aqc_sw_rules_elem *)((u8 *)lg_act + lg_act_size);
3201
3202         /* Fill in the first switch rule i.e. large action */
3203         lg_act->type = CPU_TO_LE16(ICE_AQC_SW_RULES_T_LG_ACT);
3204         lg_act->pdata.lg_act.index = CPU_TO_LE16(l_id);
3205         lg_act->pdata.lg_act.size = CPU_TO_LE16(num_lg_acts);
3206
3207         /* First action VSI forwarding or VSI list forwarding depending on how
3208          * many VSIs
3209          */
3210         id = (m_ent->vsi_count > 1) ? m_ent->fltr_info.fwd_id.vsi_list_id :
3211                 m_ent->fltr_info.fwd_id.hw_vsi_id;
3212
3213         act = ICE_LG_ACT_VSI_FORWARDING | ICE_LG_ACT_VALID_BIT;
3214         act |= (id << ICE_LG_ACT_VSI_LIST_ID_S) & ICE_LG_ACT_VSI_LIST_ID_M;
3215         if (m_ent->vsi_count > 1)
3216                 act |= ICE_LG_ACT_VSI_LIST;
3217         lg_act->pdata.lg_act.act[0] = CPU_TO_LE32(act);
3218
3219         /* Second action descriptor type */
3220         act = ICE_LG_ACT_GENERIC;
3221
3222         act |= (1 << ICE_LG_ACT_GENERIC_VALUE_S) & ICE_LG_ACT_GENERIC_VALUE_M;
3223         lg_act->pdata.lg_act.act[1] = CPU_TO_LE32(act);
3224
3225         act = (ICE_LG_ACT_GENERIC_OFF_RX_DESC_PROF_IDX <<
3226                ICE_LG_ACT_GENERIC_OFFSET_S) & ICE_LG_ACT_GENERIC_OFFSET_M;
3227
3228         /* Third action Marker value */
3229         act |= ICE_LG_ACT_GENERIC;
3230         act |= (sw_marker << ICE_LG_ACT_GENERIC_VALUE_S) &
3231                 ICE_LG_ACT_GENERIC_VALUE_M;
3232
3233         lg_act->pdata.lg_act.act[2] = CPU_TO_LE32(act);
3234
3235         /* call the fill switch rule to fill the lookup Tx Rx structure */
3236         ice_fill_sw_rule(hw, &m_ent->fltr_info, rx_tx,
3237                          ice_aqc_opc_update_sw_rules);
3238
3239         /* Update the action to point to the large action ID */
3240         rx_tx->pdata.lkup_tx_rx.act =
3241                 CPU_TO_LE32(ICE_SINGLE_ACT_PTR |
3242                             ((l_id << ICE_SINGLE_ACT_PTR_VAL_S) &
3243                              ICE_SINGLE_ACT_PTR_VAL_M));
3244
3245         /* Use the filter rule ID of the previously created rule with single
3246          * act. Once the update happens, hardware will treat this as large
3247          * action
3248          */
3249         rx_tx->pdata.lkup_tx_rx.index =
3250                 CPU_TO_LE16(m_ent->fltr_info.fltr_rule_id);
3251
3252         status = ice_aq_sw_rules(hw, lg_act, rules_size, 2,
3253                                  ice_aqc_opc_update_sw_rules, NULL);
3254         if (!status) {
3255                 m_ent->lg_act_idx = l_id;
3256                 m_ent->sw_marker_id = sw_marker;
3257         }
3258
3259         ice_free(hw, lg_act);
3260         return status;
3261 }
3262
3263 /**
3264  * ice_add_counter_act - add/update filter rule with counter action
3265  * @hw: pointer to the hardware structure
3266  * @m_ent: the management entry for which counter needs to be added
3267  * @counter_id: VLAN counter ID returned as part of allocate resource
3268  * @l_id: large action resource ID
3269  */
3270 static enum ice_status
3271 ice_add_counter_act(struct ice_hw *hw, struct ice_fltr_mgmt_list_entry *m_ent,
3272                     u16 counter_id, u16 l_id)
3273 {
3274         struct ice_aqc_sw_rules_elem *lg_act;
3275         struct ice_aqc_sw_rules_elem *rx_tx;
3276         enum ice_status status;
3277         /* 2 actions will be added while adding a large action counter */
3278         const int num_acts = 2;
3279         u16 lg_act_size;
3280         u16 rules_size;
3281         u16 f_rule_id;
3282         u32 act;
3283         u16 id;
3284
3285         if (m_ent->fltr_info.lkup_type != ICE_SW_LKUP_MAC)
3286                 return ICE_ERR_PARAM;
3287
3288         /* Create two back-to-back switch rules and submit them to the HW using
3289          * one memory buffer:
3290          * 1. Large Action
3291          * 2. Look up Tx Rx
3292          */
3293         lg_act_size = (u16)ICE_SW_RULE_LG_ACT_SIZE(num_acts);
3294         rules_size = lg_act_size + ICE_SW_RULE_RX_TX_ETH_HDR_SIZE;
3295         lg_act = (struct ice_aqc_sw_rules_elem *)ice_malloc(hw, rules_size);
3296         if (!lg_act)
3297                 return ICE_ERR_NO_MEMORY;
3298
3299         rx_tx = (struct ice_aqc_sw_rules_elem *)((u8 *)lg_act + lg_act_size);
3300
3301         /* Fill in the first switch rule i.e. large action */
3302         lg_act->type = CPU_TO_LE16(ICE_AQC_SW_RULES_T_LG_ACT);
3303         lg_act->pdata.lg_act.index = CPU_TO_LE16(l_id);
3304         lg_act->pdata.lg_act.size = CPU_TO_LE16(num_acts);
3305
3306         /* First action VSI forwarding or VSI list forwarding depending on how
3307          * many VSIs
3308          */
3309         id = (m_ent->vsi_count > 1) ?  m_ent->fltr_info.fwd_id.vsi_list_id :
3310                 m_ent->fltr_info.fwd_id.hw_vsi_id;
3311
3312         act = ICE_LG_ACT_VSI_FORWARDING | ICE_LG_ACT_VALID_BIT;
3313         act |= (id << ICE_LG_ACT_VSI_LIST_ID_S) &
3314                 ICE_LG_ACT_VSI_LIST_ID_M;
3315         if (m_ent->vsi_count > 1)
3316                 act |= ICE_LG_ACT_VSI_LIST;
3317         lg_act->pdata.lg_act.act[0] = CPU_TO_LE32(act);
3318
3319         /* Second action counter ID */
3320         act = ICE_LG_ACT_STAT_COUNT;
3321         act |= (counter_id << ICE_LG_ACT_STAT_COUNT_S) &
3322                 ICE_LG_ACT_STAT_COUNT_M;
3323         lg_act->pdata.lg_act.act[1] = CPU_TO_LE32(act);
3324
3325         /* call the fill switch rule to fill the lookup Tx Rx structure */
3326         ice_fill_sw_rule(hw, &m_ent->fltr_info, rx_tx,
3327                          ice_aqc_opc_update_sw_rules);
3328
3329         act = ICE_SINGLE_ACT_PTR;
3330         act |= (l_id << ICE_SINGLE_ACT_PTR_VAL_S) & ICE_SINGLE_ACT_PTR_VAL_M;
3331         rx_tx->pdata.lkup_tx_rx.act = CPU_TO_LE32(act);
3332
3333         /* Use the filter rule ID of the previously created rule with single
3334          * act. Once the update happens, hardware will treat this as large
3335          * action
3336          */
3337         f_rule_id = m_ent->fltr_info.fltr_rule_id;
3338         rx_tx->pdata.lkup_tx_rx.index = CPU_TO_LE16(f_rule_id);
3339
3340         status = ice_aq_sw_rules(hw, lg_act, rules_size, 2,
3341                                  ice_aqc_opc_update_sw_rules, NULL);
3342         if (!status) {
3343                 m_ent->lg_act_idx = l_id;
3344                 m_ent->counter_index = counter_id;
3345         }
3346
3347         ice_free(hw, lg_act);
3348         return status;
3349 }
3350
3351 /**
3352  * ice_create_vsi_list_map
3353  * @hw: pointer to the hardware structure
3354  * @vsi_handle_arr: array of VSI handles to set in the VSI mapping
3355  * @num_vsi: number of VSI handles in the array
3356  * @vsi_list_id: VSI list ID generated as part of allocate resource
3357  *
3358  * Helper function to create a new entry of VSI list ID to VSI mapping
3359  * using the given VSI list ID
3360  */
3361 static struct ice_vsi_list_map_info *
3362 ice_create_vsi_list_map(struct ice_hw *hw, u16 *vsi_handle_arr, u16 num_vsi,
3363                         u16 vsi_list_id)
3364 {
3365         struct ice_switch_info *sw = hw->switch_info;
3366         struct ice_vsi_list_map_info *v_map;
3367         int i;
3368
3369         v_map = (struct ice_vsi_list_map_info *)ice_calloc(hw, 1,
3370                 sizeof(*v_map));
3371         if (!v_map)
3372                 return NULL;
3373
3374         v_map->vsi_list_id = vsi_list_id;
3375         v_map->ref_cnt = 1;
3376         for (i = 0; i < num_vsi; i++)
3377                 ice_set_bit(vsi_handle_arr[i], v_map->vsi_map);
3378
3379         LIST_ADD(&v_map->list_entry, &sw->vsi_list_map_head);
3380         return v_map;
3381 }
3382
3383 /**
3384  * ice_update_vsi_list_rule
3385  * @hw: pointer to the hardware structure
3386  * @vsi_handle_arr: array of VSI handles to form a VSI list
3387  * @num_vsi: number of VSI handles in the array
3388  * @vsi_list_id: VSI list ID generated as part of allocate resource
3389  * @remove: Boolean value to indicate if this is a remove action
3390  * @opc: switch rules population command type - pass in the command opcode
3391  * @lkup_type: lookup type of the filter
3392  *
3393  * Call AQ command to add a new switch rule or update existing switch rule
3394  * using the given VSI list ID
3395  */
3396 static enum ice_status
3397 ice_update_vsi_list_rule(struct ice_hw *hw, u16 *vsi_handle_arr, u16 num_vsi,
3398                          u16 vsi_list_id, bool remove, enum ice_adminq_opc opc,
3399                          enum ice_sw_lkup_type lkup_type)
3400 {
3401         struct ice_aqc_sw_rules_elem *s_rule;
3402         enum ice_status status;
3403         u16 s_rule_size;
3404         u16 rule_type;
3405         int i;
3406
3407         if (!num_vsi)
3408                 return ICE_ERR_PARAM;
3409
3410         if (lkup_type == ICE_SW_LKUP_MAC ||
3411             lkup_type == ICE_SW_LKUP_MAC_VLAN ||
3412             lkup_type == ICE_SW_LKUP_ETHERTYPE ||
3413             lkup_type == ICE_SW_LKUP_ETHERTYPE_MAC ||
3414             lkup_type == ICE_SW_LKUP_PROMISC ||
3415             lkup_type == ICE_SW_LKUP_PROMISC_VLAN ||
3416             lkup_type == ICE_SW_LKUP_LAST)
3417                 rule_type = remove ? ICE_AQC_SW_RULES_T_VSI_LIST_CLEAR :
3418                         ICE_AQC_SW_RULES_T_VSI_LIST_SET;
3419         else if (lkup_type == ICE_SW_LKUP_VLAN)
3420                 rule_type = remove ? ICE_AQC_SW_RULES_T_PRUNE_LIST_CLEAR :
3421                         ICE_AQC_SW_RULES_T_PRUNE_LIST_SET;
3422         else
3423                 return ICE_ERR_PARAM;
3424
3425         s_rule_size = (u16)ICE_SW_RULE_VSI_LIST_SIZE(num_vsi);
3426         s_rule = (struct ice_aqc_sw_rules_elem *)ice_malloc(hw, s_rule_size);
3427         if (!s_rule)
3428                 return ICE_ERR_NO_MEMORY;
3429         for (i = 0; i < num_vsi; i++) {
3430                 if (!ice_is_vsi_valid(hw, vsi_handle_arr[i])) {
3431                         status = ICE_ERR_PARAM;
3432                         goto exit;
3433                 }
3434                 /* AQ call requires hw_vsi_id(s) */
3435                 s_rule->pdata.vsi_list.vsi[i] =
3436                         CPU_TO_LE16(ice_get_hw_vsi_num(hw, vsi_handle_arr[i]));
3437         }
3438
3439         s_rule->type = CPU_TO_LE16(rule_type);
3440         s_rule->pdata.vsi_list.number_vsi = CPU_TO_LE16(num_vsi);
3441         s_rule->pdata.vsi_list.index = CPU_TO_LE16(vsi_list_id);
3442
3443         status = ice_aq_sw_rules(hw, s_rule, s_rule_size, 1, opc, NULL);
3444
3445 exit:
3446         ice_free(hw, s_rule);
3447         return status;
3448 }
3449
3450 /**
3451  * ice_create_vsi_list_rule - Creates and populates a VSI list rule
3452  * @hw: pointer to the HW struct
3453  * @vsi_handle_arr: array of VSI handles to form a VSI list
3454  * @num_vsi: number of VSI handles in the array
3455  * @vsi_list_id: stores the ID of the VSI list to be created
3456  * @lkup_type: switch rule filter's lookup type
3457  */
3458 static enum ice_status
3459 ice_create_vsi_list_rule(struct ice_hw *hw, u16 *vsi_handle_arr, u16 num_vsi,
3460                          u16 *vsi_list_id, enum ice_sw_lkup_type lkup_type)
3461 {
3462         enum ice_status status;
3463
3464         status = ice_aq_alloc_free_vsi_list(hw, vsi_list_id, lkup_type,
3465                                             ice_aqc_opc_alloc_res);
3466         if (status)
3467                 return status;
3468
3469         /* Update the newly created VSI list to include the specified VSIs */
3470         return ice_update_vsi_list_rule(hw, vsi_handle_arr, num_vsi,
3471                                         *vsi_list_id, false,
3472                                         ice_aqc_opc_add_sw_rules, lkup_type);
3473 }
3474
3475 /**
3476  * ice_create_pkt_fwd_rule
3477  * @hw: pointer to the hardware structure
3478  * @recp_list: corresponding filter management list
3479  * @f_entry: entry containing packet forwarding information
3480  *
3481  * Create switch rule with given filter information and add an entry
3482  * to the corresponding filter management list to track this switch rule
3483  * and VSI mapping
3484  */
3485 static enum ice_status
3486 ice_create_pkt_fwd_rule(struct ice_hw *hw, struct ice_sw_recipe *recp_list,
3487                         struct ice_fltr_list_entry *f_entry)
3488 {
3489         struct ice_fltr_mgmt_list_entry *fm_entry;
3490         struct ice_aqc_sw_rules_elem *s_rule;
3491         enum ice_status status;
3492
3493         s_rule = (struct ice_aqc_sw_rules_elem *)
3494                 ice_malloc(hw, ICE_SW_RULE_RX_TX_ETH_HDR_SIZE);
3495         if (!s_rule)
3496                 return ICE_ERR_NO_MEMORY;
3497         fm_entry = (struct ice_fltr_mgmt_list_entry *)
3498                    ice_malloc(hw, sizeof(*fm_entry));
3499         if (!fm_entry) {
3500                 status = ICE_ERR_NO_MEMORY;
3501                 goto ice_create_pkt_fwd_rule_exit;
3502         }
3503
3504         fm_entry->fltr_info = f_entry->fltr_info;
3505
3506         /* Initialize all the fields for the management entry */
3507         fm_entry->vsi_count = 1;
3508         fm_entry->lg_act_idx = ICE_INVAL_LG_ACT_INDEX;
3509         fm_entry->sw_marker_id = ICE_INVAL_SW_MARKER_ID;
3510         fm_entry->counter_index = ICE_INVAL_COUNTER_ID;
3511
3512         ice_fill_sw_rule(hw, &fm_entry->fltr_info, s_rule,
3513                          ice_aqc_opc_add_sw_rules);
3514
3515         status = ice_aq_sw_rules(hw, s_rule, ICE_SW_RULE_RX_TX_ETH_HDR_SIZE, 1,
3516                                  ice_aqc_opc_add_sw_rules, NULL);
3517         if (status) {
3518                 ice_free(hw, fm_entry);
3519                 goto ice_create_pkt_fwd_rule_exit;
3520         }
3521
3522         f_entry->fltr_info.fltr_rule_id =
3523                 LE16_TO_CPU(s_rule->pdata.lkup_tx_rx.index);
3524         fm_entry->fltr_info.fltr_rule_id =
3525                 LE16_TO_CPU(s_rule->pdata.lkup_tx_rx.index);
3526
3527         /* The book keeping entries will get removed when base driver
3528          * calls remove filter AQ command
3529          */
3530         LIST_ADD(&fm_entry->list_entry, &recp_list->filt_rules);
3531
3532 ice_create_pkt_fwd_rule_exit:
3533         ice_free(hw, s_rule);
3534         return status;
3535 }
3536
3537 /**
3538  * ice_update_pkt_fwd_rule
3539  * @hw: pointer to the hardware structure
3540  * @f_info: filter information for switch rule
3541  *
3542  * Call AQ command to update a previously created switch rule with a
3543  * VSI list ID
3544  */
3545 static enum ice_status
3546 ice_update_pkt_fwd_rule(struct ice_hw *hw, struct ice_fltr_info *f_info)
3547 {
3548         struct ice_aqc_sw_rules_elem *s_rule;
3549         enum ice_status status;
3550
3551         s_rule = (struct ice_aqc_sw_rules_elem *)
3552                 ice_malloc(hw, ICE_SW_RULE_RX_TX_ETH_HDR_SIZE);
3553         if (!s_rule)
3554                 return ICE_ERR_NO_MEMORY;
3555
3556         ice_fill_sw_rule(hw, f_info, s_rule, ice_aqc_opc_update_sw_rules);
3557
3558         s_rule->pdata.lkup_tx_rx.index = CPU_TO_LE16(f_info->fltr_rule_id);
3559
3560         /* Update switch rule with new rule set to forward VSI list */
3561         status = ice_aq_sw_rules(hw, s_rule, ICE_SW_RULE_RX_TX_ETH_HDR_SIZE, 1,
3562                                  ice_aqc_opc_update_sw_rules, NULL);
3563
3564         ice_free(hw, s_rule);
3565         return status;
3566 }
3567
3568 /**
3569  * ice_update_sw_rule_bridge_mode
3570  * @hw: pointer to the HW struct
3571  *
3572  * Updates unicast switch filter rules based on VEB/VEPA mode
3573  */
3574 enum ice_status ice_update_sw_rule_bridge_mode(struct ice_hw *hw)
3575 {
3576         struct ice_switch_info *sw = hw->switch_info;
3577         struct ice_fltr_mgmt_list_entry *fm_entry;
3578         enum ice_status status = ICE_SUCCESS;
3579         struct LIST_HEAD_TYPE *rule_head;
3580         struct ice_lock *rule_lock; /* Lock to protect filter rule list */
3581
3582         rule_lock = &sw->recp_list[ICE_SW_LKUP_MAC].filt_rule_lock;
3583         rule_head = &sw->recp_list[ICE_SW_LKUP_MAC].filt_rules;
3584
3585         ice_acquire_lock(rule_lock);
3586         LIST_FOR_EACH_ENTRY(fm_entry, rule_head, ice_fltr_mgmt_list_entry,
3587                             list_entry) {
3588                 struct ice_fltr_info *fi = &fm_entry->fltr_info;
3589                 u8 *addr = fi->l_data.mac.mac_addr;
3590
3591                 /* Update unicast Tx rules to reflect the selected
3592                  * VEB/VEPA mode
3593                  */
3594                 if ((fi->flag & ICE_FLTR_TX) && IS_UNICAST_ETHER_ADDR(addr) &&
3595                     (fi->fltr_act == ICE_FWD_TO_VSI ||
3596                      fi->fltr_act == ICE_FWD_TO_VSI_LIST ||
3597                      fi->fltr_act == ICE_FWD_TO_Q ||
3598                      fi->fltr_act == ICE_FWD_TO_QGRP)) {
3599                         status = ice_update_pkt_fwd_rule(hw, fi);
3600                         if (status)
3601                                 break;
3602                 }
3603         }
3604
3605         ice_release_lock(rule_lock);
3606
3607         return status;
3608 }
3609
3610 /**
3611  * ice_add_update_vsi_list
3612  * @hw: pointer to the hardware structure
3613  * @m_entry: pointer to current filter management list entry
3614  * @cur_fltr: filter information from the book keeping entry
3615  * @new_fltr: filter information with the new VSI to be added
3616  *
3617  * Call AQ command to add or update previously created VSI list with new VSI.
3618  *
3619  * Helper function to do book keeping associated with adding filter information
3620  * The algorithm to do the book keeping is described below :
3621  * When a VSI needs to subscribe to a given filter (MAC/VLAN/Ethtype etc.)
3622  *      if only one VSI has been added till now
3623  *              Allocate a new VSI list and add two VSIs
3624  *              to this list using switch rule command
3625  *              Update the previously created switch rule with the
3626  *              newly created VSI list ID
3627  *      if a VSI list was previously created
3628  *              Add the new VSI to the previously created VSI list set
3629  *              using the update switch rule command
3630  */
3631 static enum ice_status
3632 ice_add_update_vsi_list(struct ice_hw *hw,
3633                         struct ice_fltr_mgmt_list_entry *m_entry,
3634                         struct ice_fltr_info *cur_fltr,
3635                         struct ice_fltr_info *new_fltr)
3636 {
3637         enum ice_status status = ICE_SUCCESS;
3638         u16 vsi_list_id = 0;
3639
3640         if ((cur_fltr->fltr_act == ICE_FWD_TO_Q ||
3641              cur_fltr->fltr_act == ICE_FWD_TO_QGRP))
3642                 return ICE_ERR_NOT_IMPL;
3643
3644         if ((new_fltr->fltr_act == ICE_FWD_TO_Q ||
3645              new_fltr->fltr_act == ICE_FWD_TO_QGRP) &&
3646             (cur_fltr->fltr_act == ICE_FWD_TO_VSI ||
3647              cur_fltr->fltr_act == ICE_FWD_TO_VSI_LIST))
3648                 return ICE_ERR_NOT_IMPL;
3649
3650         if (m_entry->vsi_count < 2 && !m_entry->vsi_list_info) {
3651                 /* Only one entry existed in the mapping and it was not already
3652                  * a part of a VSI list. So, create a VSI list with the old and
3653                  * new VSIs.
3654                  */
3655                 struct ice_fltr_info tmp_fltr;
3656                 u16 vsi_handle_arr[2];
3657
3658                 /* A rule already exists with the new VSI being added */
3659                 if (cur_fltr->fwd_id.hw_vsi_id == new_fltr->fwd_id.hw_vsi_id)
3660                         return ICE_ERR_ALREADY_EXISTS;
3661
3662                 vsi_handle_arr[0] = cur_fltr->vsi_handle;
3663                 vsi_handle_arr[1] = new_fltr->vsi_handle;
3664                 status = ice_create_vsi_list_rule(hw, &vsi_handle_arr[0], 2,
3665                                                   &vsi_list_id,
3666                                                   new_fltr->lkup_type);
3667                 if (status)
3668                         return status;
3669
3670                 tmp_fltr = *new_fltr;
3671                 tmp_fltr.fltr_rule_id = cur_fltr->fltr_rule_id;
3672                 tmp_fltr.fltr_act = ICE_FWD_TO_VSI_LIST;
3673                 tmp_fltr.fwd_id.vsi_list_id = vsi_list_id;
3674                 /* Update the previous switch rule of "MAC forward to VSI" to
3675                  * "MAC fwd to VSI list"
3676                  */
3677                 status = ice_update_pkt_fwd_rule(hw, &tmp_fltr);
3678                 if (status)
3679                         return status;
3680
3681                 cur_fltr->fwd_id.vsi_list_id = vsi_list_id;
3682                 cur_fltr->fltr_act = ICE_FWD_TO_VSI_LIST;
3683                 m_entry->vsi_list_info =
3684                         ice_create_vsi_list_map(hw, &vsi_handle_arr[0], 2,
3685                                                 vsi_list_id);
3686
3687                 /* If this entry was large action then the large action needs
3688                  * to be updated to point to FWD to VSI list
3689                  */
3690                 if (m_entry->sw_marker_id != ICE_INVAL_SW_MARKER_ID)
3691                         status =
3692                             ice_add_marker_act(hw, m_entry,
3693                                                m_entry->sw_marker_id,
3694                                                m_entry->lg_act_idx);
3695         } else {
3696                 u16 vsi_handle = new_fltr->vsi_handle;
3697                 enum ice_adminq_opc opcode;
3698
3699                 if (!m_entry->vsi_list_info)
3700                         return ICE_ERR_CFG;
3701
3702                 /* A rule already exists with the new VSI being added */
3703                 if (ice_is_bit_set(m_entry->vsi_list_info->vsi_map, vsi_handle))
3704                         return ICE_SUCCESS;
3705
3706                 /* Update the previously created VSI list set with
3707                  * the new VSI ID passed in
3708                  */
3709                 vsi_list_id = cur_fltr->fwd_id.vsi_list_id;
3710                 opcode = ice_aqc_opc_update_sw_rules;
3711
3712                 status = ice_update_vsi_list_rule(hw, &vsi_handle, 1,
3713                                                   vsi_list_id, false, opcode,
3714                                                   new_fltr->lkup_type);
3715                 /* update VSI list mapping info with new VSI ID */
3716                 if (!status)
3717                         ice_set_bit(vsi_handle,
3718                                     m_entry->vsi_list_info->vsi_map);
3719         }
3720         if (!status)
3721                 m_entry->vsi_count++;
3722         return status;
3723 }
3724
3725 /**
3726  * ice_find_rule_entry - Search a rule entry
3727  * @list_head: head of rule list
3728  * @f_info: rule information
3729  *
3730  * Helper function to search for a given rule entry
3731  * Returns pointer to entry storing the rule if found
3732  */
3733 static struct ice_fltr_mgmt_list_entry *
3734 ice_find_rule_entry(struct LIST_HEAD_TYPE *list_head,
3735                     struct ice_fltr_info *f_info)
3736 {
3737         struct ice_fltr_mgmt_list_entry *list_itr, *ret = NULL;
3738
3739         LIST_FOR_EACH_ENTRY(list_itr, list_head, ice_fltr_mgmt_list_entry,
3740                             list_entry) {
3741                 if (!memcmp(&f_info->l_data, &list_itr->fltr_info.l_data,
3742                             sizeof(f_info->l_data)) &&
3743                     f_info->flag == list_itr->fltr_info.flag) {
3744                         ret = list_itr;
3745                         break;
3746                 }
3747         }
3748         return ret;
3749 }
3750
3751 /**
3752  * ice_find_vsi_list_entry - Search VSI list map with VSI count 1
3753  * @recp_list: VSI lists needs to be searched
3754  * @vsi_handle: VSI handle to be found in VSI list
3755  * @vsi_list_id: VSI list ID found containing vsi_handle
3756  *
3757  * Helper function to search a VSI list with single entry containing given VSI
3758  * handle element. This can be extended further to search VSI list with more
3759  * than 1 vsi_count. Returns pointer to VSI list entry if found.
3760  */
3761 static struct ice_vsi_list_map_info *
3762 ice_find_vsi_list_entry(struct ice_sw_recipe *recp_list, u16 vsi_handle,
3763                         u16 *vsi_list_id)
3764 {
3765         struct ice_vsi_list_map_info *map_info = NULL;
3766         struct LIST_HEAD_TYPE *list_head;
3767
3768         list_head = &recp_list->filt_rules;
3769         if (recp_list->adv_rule) {
3770                 struct ice_adv_fltr_mgmt_list_entry *list_itr;
3771
3772                 LIST_FOR_EACH_ENTRY(list_itr, list_head,
3773                                     ice_adv_fltr_mgmt_list_entry,
3774                                     list_entry) {
3775                         if (list_itr->vsi_list_info) {
3776                                 map_info = list_itr->vsi_list_info;
3777                                 if (ice_is_bit_set(map_info->vsi_map,
3778                                                    vsi_handle)) {
3779                                         *vsi_list_id = map_info->vsi_list_id;
3780                                         return map_info;
3781                                 }
3782                         }
3783                 }
3784         } else {
3785                 struct ice_fltr_mgmt_list_entry *list_itr;
3786
3787                 LIST_FOR_EACH_ENTRY(list_itr, list_head,
3788                                     ice_fltr_mgmt_list_entry,
3789                                     list_entry) {
3790                         if (list_itr->vsi_count == 1 &&
3791                             list_itr->vsi_list_info) {
3792                                 map_info = list_itr->vsi_list_info;
3793                                 if (ice_is_bit_set(map_info->vsi_map,
3794                                                    vsi_handle)) {
3795                                         *vsi_list_id = map_info->vsi_list_id;
3796                                         return map_info;
3797                                 }
3798                         }
3799                 }
3800         }
3801         return NULL;
3802 }
3803
3804 /**
3805  * ice_add_rule_internal - add rule for a given lookup type
3806  * @hw: pointer to the hardware structure
3807  * @recp_list: recipe list for which rule has to be added
3808  * @lport: logic port number on which function add rule
3809  * @f_entry: structure containing MAC forwarding information
3810  *
3811  * Adds or updates the rule lists for a given recipe
3812  */
3813 static enum ice_status
3814 ice_add_rule_internal(struct ice_hw *hw, struct ice_sw_recipe *recp_list,
3815                       u8 lport, struct ice_fltr_list_entry *f_entry)
3816 {
3817         struct ice_fltr_info *new_fltr, *cur_fltr;
3818         struct ice_fltr_mgmt_list_entry *m_entry;
3819         struct ice_lock *rule_lock; /* Lock to protect filter rule list */
3820         enum ice_status status = ICE_SUCCESS;
3821
3822         if (!ice_is_vsi_valid(hw, f_entry->fltr_info.vsi_handle))
3823                 return ICE_ERR_PARAM;
3824
3825         /* Load the hw_vsi_id only if the fwd action is fwd to VSI */
3826         if (f_entry->fltr_info.fltr_act == ICE_FWD_TO_VSI)
3827                 f_entry->fltr_info.fwd_id.hw_vsi_id =
3828                         ice_get_hw_vsi_num(hw, f_entry->fltr_info.vsi_handle);
3829
3830         rule_lock = &recp_list->filt_rule_lock;
3831
3832         ice_acquire_lock(rule_lock);
3833         new_fltr = &f_entry->fltr_info;
3834         if (new_fltr->flag & ICE_FLTR_RX)
3835                 new_fltr->src = lport;
3836         else if (new_fltr->flag & ICE_FLTR_TX)
3837                 new_fltr->src =
3838                         ice_get_hw_vsi_num(hw, f_entry->fltr_info.vsi_handle);
3839
3840         m_entry = ice_find_rule_entry(&recp_list->filt_rules, new_fltr);
3841         if (!m_entry) {
3842                 status = ice_create_pkt_fwd_rule(hw, recp_list, f_entry);
3843                 goto exit_add_rule_internal;
3844         }
3845
3846         cur_fltr = &m_entry->fltr_info;
3847         status = ice_add_update_vsi_list(hw, m_entry, cur_fltr, new_fltr);
3848
3849 exit_add_rule_internal:
3850         ice_release_lock(rule_lock);
3851         return status;
3852 }
3853
3854 /**
3855  * ice_remove_vsi_list_rule
3856  * @hw: pointer to the hardware structure
3857  * @vsi_list_id: VSI list ID generated as part of allocate resource
3858  * @lkup_type: switch rule filter lookup type
3859  *
3860  * The VSI list should be emptied before this function is called to remove the
3861  * VSI list.
3862  */
3863 static enum ice_status
3864 ice_remove_vsi_list_rule(struct ice_hw *hw, u16 vsi_list_id,
3865                          enum ice_sw_lkup_type lkup_type)
3866 {
3867         /* Free the vsi_list resource that we allocated. It is assumed that the
3868          * list is empty at this point.
3869          */
3870         return ice_aq_alloc_free_vsi_list(hw, &vsi_list_id, lkup_type,
3871                                             ice_aqc_opc_free_res);
3872 }
3873
3874 /**
3875  * ice_rem_update_vsi_list
3876  * @hw: pointer to the hardware structure
3877  * @vsi_handle: VSI handle of the VSI to remove
3878  * @fm_list: filter management entry for which the VSI list management needs to
3879  *           be done
3880  */
3881 static enum ice_status
3882 ice_rem_update_vsi_list(struct ice_hw *hw, u16 vsi_handle,
3883                         struct ice_fltr_mgmt_list_entry *fm_list)
3884 {
3885         enum ice_sw_lkup_type lkup_type;
3886         enum ice_status status = ICE_SUCCESS;
3887         u16 vsi_list_id;
3888
3889         if (fm_list->fltr_info.fltr_act != ICE_FWD_TO_VSI_LIST ||
3890             fm_list->vsi_count == 0)
3891                 return ICE_ERR_PARAM;
3892
3893         /* A rule with the VSI being removed does not exist */
3894         if (!ice_is_bit_set(fm_list->vsi_list_info->vsi_map, vsi_handle))
3895                 return ICE_ERR_DOES_NOT_EXIST;
3896
3897         lkup_type = fm_list->fltr_info.lkup_type;
3898         vsi_list_id = fm_list->fltr_info.fwd_id.vsi_list_id;
3899         status = ice_update_vsi_list_rule(hw, &vsi_handle, 1, vsi_list_id, true,
3900                                           ice_aqc_opc_update_sw_rules,
3901                                           lkup_type);
3902         if (status)
3903                 return status;
3904
3905         fm_list->vsi_count--;
3906         ice_clear_bit(vsi_handle, fm_list->vsi_list_info->vsi_map);
3907
3908         if (fm_list->vsi_count == 1 && lkup_type != ICE_SW_LKUP_VLAN) {
3909                 struct ice_fltr_info tmp_fltr_info = fm_list->fltr_info;
3910                 struct ice_vsi_list_map_info *vsi_list_info =
3911                         fm_list->vsi_list_info;
3912                 u16 rem_vsi_handle;
3913
3914                 rem_vsi_handle = ice_find_first_bit(vsi_list_info->vsi_map,
3915                                                     ICE_MAX_VSI);
3916                 if (!ice_is_vsi_valid(hw, rem_vsi_handle))
3917                         return ICE_ERR_OUT_OF_RANGE;
3918
3919                 /* Make sure VSI list is empty before removing it below */
3920                 status = ice_update_vsi_list_rule(hw, &rem_vsi_handle, 1,
3921                                                   vsi_list_id, true,
3922                                                   ice_aqc_opc_update_sw_rules,
3923                                                   lkup_type);
3924                 if (status)
3925                         return status;
3926
3927                 tmp_fltr_info.fltr_act = ICE_FWD_TO_VSI;
3928                 tmp_fltr_info.fwd_id.hw_vsi_id =
3929                         ice_get_hw_vsi_num(hw, rem_vsi_handle);
3930                 tmp_fltr_info.vsi_handle = rem_vsi_handle;
3931                 status = ice_update_pkt_fwd_rule(hw, &tmp_fltr_info);
3932                 if (status) {
3933                         ice_debug(hw, ICE_DBG_SW, "Failed to update pkt fwd rule to FWD_TO_VSI on HW VSI %d, error %d\n",
3934                                   tmp_fltr_info.fwd_id.hw_vsi_id, status);
3935                         return status;
3936                 }
3937
3938                 fm_list->fltr_info = tmp_fltr_info;
3939         }
3940
3941         if ((fm_list->vsi_count == 1 && lkup_type != ICE_SW_LKUP_VLAN) ||
3942             (fm_list->vsi_count == 0 && lkup_type == ICE_SW_LKUP_VLAN)) {
3943                 struct ice_vsi_list_map_info *vsi_list_info =
3944                         fm_list->vsi_list_info;
3945
3946                 /* Remove the VSI list since it is no longer used */
3947                 status = ice_remove_vsi_list_rule(hw, vsi_list_id, lkup_type);
3948                 if (status) {
3949                         ice_debug(hw, ICE_DBG_SW, "Failed to remove VSI list %d, error %d\n",
3950                                   vsi_list_id, status);
3951                         return status;
3952                 }
3953
3954                 LIST_DEL(&vsi_list_info->list_entry);
3955                 ice_free(hw, vsi_list_info);
3956                 fm_list->vsi_list_info = NULL;
3957         }
3958
3959         return status;
3960 }
3961
3962 /**
3963  * ice_remove_rule_internal - Remove a filter rule of a given type
3964  *
3965  * @hw: pointer to the hardware structure
3966  * @recp_list: recipe list for which the rule needs to removed
3967  * @f_entry: rule entry containing filter information
3968  */
3969 static enum ice_status
3970 ice_remove_rule_internal(struct ice_hw *hw, struct ice_sw_recipe *recp_list,
3971                          struct ice_fltr_list_entry *f_entry)
3972 {
3973         struct ice_fltr_mgmt_list_entry *list_elem;
3974         struct ice_lock *rule_lock; /* Lock to protect filter rule list */
3975         enum ice_status status = ICE_SUCCESS;
3976         bool remove_rule = false;
3977         u16 vsi_handle;
3978
3979         if (!ice_is_vsi_valid(hw, f_entry->fltr_info.vsi_handle))
3980                 return ICE_ERR_PARAM;
3981         f_entry->fltr_info.fwd_id.hw_vsi_id =
3982                 ice_get_hw_vsi_num(hw, f_entry->fltr_info.vsi_handle);
3983
3984         rule_lock = &recp_list->filt_rule_lock;
3985         ice_acquire_lock(rule_lock);
3986         list_elem = ice_find_rule_entry(&recp_list->filt_rules,
3987                                         &f_entry->fltr_info);
3988         if (!list_elem) {
3989                 status = ICE_ERR_DOES_NOT_EXIST;
3990                 goto exit;
3991         }
3992
3993         if (list_elem->fltr_info.fltr_act != ICE_FWD_TO_VSI_LIST) {
3994                 remove_rule = true;
3995         } else if (!list_elem->vsi_list_info) {
3996                 status = ICE_ERR_DOES_NOT_EXIST;
3997                 goto exit;
3998         } else if (list_elem->vsi_list_info->ref_cnt > 1) {
3999                 /* a ref_cnt > 1 indicates that the vsi_list is being
4000                  * shared by multiple rules. Decrement the ref_cnt and
4001                  * remove this rule, but do not modify the list, as it
4002                  * is in-use by other rules.
4003                  */
4004                 list_elem->vsi_list_info->ref_cnt--;
4005                 remove_rule = true;
4006         } else {
4007                 /* a ref_cnt of 1 indicates the vsi_list is only used
4008                  * by one rule. However, the original removal request is only
4009                  * for a single VSI. Update the vsi_list first, and only
4010                  * remove the rule if there are no further VSIs in this list.
4011                  */
4012                 vsi_handle = f_entry->fltr_info.vsi_handle;
4013                 status = ice_rem_update_vsi_list(hw, vsi_handle, list_elem);
4014                 if (status)
4015                         goto exit;
4016                 /* if VSI count goes to zero after updating the VSI list */
4017                 if (list_elem->vsi_count == 0)
4018                         remove_rule = true;
4019         }
4020
4021         if (remove_rule) {
4022                 /* Remove the lookup rule */
4023                 struct ice_aqc_sw_rules_elem *s_rule;
4024
4025                 s_rule = (struct ice_aqc_sw_rules_elem *)
4026                         ice_malloc(hw, ICE_SW_RULE_RX_TX_NO_HDR_SIZE);
4027                 if (!s_rule) {
4028                         status = ICE_ERR_NO_MEMORY;
4029                         goto exit;
4030                 }
4031
4032                 ice_fill_sw_rule(hw, &list_elem->fltr_info, s_rule,
4033                                  ice_aqc_opc_remove_sw_rules);
4034
4035                 status = ice_aq_sw_rules(hw, s_rule,
4036                                          ICE_SW_RULE_RX_TX_NO_HDR_SIZE, 1,
4037                                          ice_aqc_opc_remove_sw_rules, NULL);
4038
4039                 /* Remove a book keeping from the list */
4040                 ice_free(hw, s_rule);
4041
4042                 if (status)
4043                         goto exit;
4044
4045                 LIST_DEL(&list_elem->list_entry);
4046                 ice_free(hw, list_elem);
4047         }
4048 exit:
4049         ice_release_lock(rule_lock);
4050         return status;
4051 }
4052
4053 /**
4054  * ice_aq_get_res_alloc - get allocated resources
4055  * @hw: pointer to the HW struct
4056  * @num_entries: pointer to u16 to store the number of resource entries returned
4057  * @buf: pointer to buffer
4058  * @buf_size: size of buf
4059  * @cd: pointer to command details structure or NULL
4060  *
4061  * The caller-supplied buffer must be large enough to store the resource
4062  * information for all resource types. Each resource type is an
4063  * ice_aqc_get_res_resp_elem structure.
4064  */
4065 enum ice_status
4066 ice_aq_get_res_alloc(struct ice_hw *hw, u16 *num_entries,
4067                      struct ice_aqc_get_res_resp_elem *buf, u16 buf_size,
4068                      struct ice_sq_cd *cd)
4069 {
4070         struct ice_aqc_get_res_alloc *resp;
4071         enum ice_status status;
4072         struct ice_aq_desc desc;
4073
4074         if (!buf)
4075                 return ICE_ERR_BAD_PTR;
4076
4077         if (buf_size < ICE_AQ_GET_RES_ALLOC_BUF_LEN)
4078                 return ICE_ERR_INVAL_SIZE;
4079
4080         resp = &desc.params.get_res;
4081
4082         ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_res_alloc);
4083         status = ice_aq_send_cmd(hw, &desc, buf, buf_size, cd);
4084
4085         if (!status && num_entries)
4086                 *num_entries = LE16_TO_CPU(resp->resp_elem_num);
4087
4088         return status;
4089 }
4090
4091 /**
4092  * ice_aq_get_res_descs - get allocated resource descriptors
4093  * @hw: pointer to the hardware structure
4094  * @num_entries: number of resource entries in buffer
4095  * @buf: structure to hold response data buffer
4096  * @buf_size: size of buffer
4097  * @res_type: resource type
4098  * @res_shared: is resource shared
4099  * @desc_id: input - first desc ID to start; output - next desc ID
4100  * @cd: pointer to command details structure or NULL
4101  */
4102 enum ice_status
4103 ice_aq_get_res_descs(struct ice_hw *hw, u16 num_entries,
4104                      struct ice_aqc_res_elem *buf, u16 buf_size, u16 res_type,
4105                      bool res_shared, u16 *desc_id, struct ice_sq_cd *cd)
4106 {
4107         struct ice_aqc_get_allocd_res_desc *cmd;
4108         struct ice_aq_desc desc;
4109         enum ice_status status;
4110
4111         ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__);
4112
4113         cmd = &desc.params.get_res_desc;
4114
4115         if (!buf)
4116                 return ICE_ERR_PARAM;
4117
4118         if (buf_size != (num_entries * sizeof(*buf)))
4119                 return ICE_ERR_PARAM;
4120
4121         ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_allocd_res_desc);
4122
4123         cmd->ops.cmd.res = CPU_TO_LE16(((res_type << ICE_AQC_RES_TYPE_S) &
4124                                          ICE_AQC_RES_TYPE_M) | (res_shared ?
4125                                         ICE_AQC_RES_TYPE_FLAG_SHARED : 0));
4126         cmd->ops.cmd.first_desc = CPU_TO_LE16(*desc_id);
4127
4128         status = ice_aq_send_cmd(hw, &desc, buf, buf_size, cd);
4129         if (!status)
4130                 *desc_id = LE16_TO_CPU(cmd->ops.resp.next_desc);
4131
4132         return status;
4133 }
4134
4135 /**
4136  * ice_add_mac_rule - Add a MAC address based filter rule
4137  * @hw: pointer to the hardware structure
4138  * @m_list: list of MAC addresses and forwarding information
4139  * @sw: pointer to switch info struct for which function add rule
4140  * @lport: logic port number on which function add rule
4141  *
4142  * IMPORTANT: When the ucast_shared flag is set to false and m_list has
4143  * multiple unicast addresses, the function assumes that all the
4144  * addresses are unique in a given add_mac call. It doesn't
4145  * check for duplicates in this case, removing duplicates from a given
4146  * list should be taken care of in the caller of this function.
4147  */
4148 static enum ice_status
4149 ice_add_mac_rule(struct ice_hw *hw, struct LIST_HEAD_TYPE *m_list,
4150                  struct ice_switch_info *sw, u8 lport)
4151 {
4152         struct ice_sw_recipe *recp_list = &sw->recp_list[ICE_SW_LKUP_MAC];
4153         struct ice_aqc_sw_rules_elem *s_rule, *r_iter;
4154         struct ice_fltr_list_entry *m_list_itr;
4155         struct LIST_HEAD_TYPE *rule_head;
4156         u16 total_elem_left, s_rule_size;
4157         struct ice_lock *rule_lock; /* Lock to protect filter rule list */
4158         enum ice_status status = ICE_SUCCESS;
4159         u16 num_unicast = 0;
4160         u8 elem_sent;
4161
4162         s_rule = NULL;
4163         rule_lock = &recp_list->filt_rule_lock;
4164         rule_head = &recp_list->filt_rules;
4165
4166         LIST_FOR_EACH_ENTRY(m_list_itr, m_list, ice_fltr_list_entry,
4167                             list_entry) {
4168                 u8 *add = &m_list_itr->fltr_info.l_data.mac.mac_addr[0];
4169                 u16 vsi_handle;
4170                 u16 hw_vsi_id;
4171
4172                 m_list_itr->fltr_info.flag = ICE_FLTR_TX;
4173                 vsi_handle = m_list_itr->fltr_info.vsi_handle;
4174                 if (!ice_is_vsi_valid(hw, vsi_handle))
4175                         return ICE_ERR_PARAM;
4176                 hw_vsi_id = ice_get_hw_vsi_num(hw, vsi_handle);
4177                 m_list_itr->fltr_info.fwd_id.hw_vsi_id = hw_vsi_id;
4178                 /* update the src in case it is VSI num */
4179                 if (m_list_itr->fltr_info.src_id != ICE_SRC_ID_VSI)
4180                         return ICE_ERR_PARAM;
4181                 m_list_itr->fltr_info.src = hw_vsi_id;
4182                 if (m_list_itr->fltr_info.lkup_type != ICE_SW_LKUP_MAC ||
4183                     IS_ZERO_ETHER_ADDR(add))
4184                         return ICE_ERR_PARAM;
4185                 if (IS_UNICAST_ETHER_ADDR(add) && !hw->ucast_shared) {
4186                         /* Don't overwrite the unicast address */
4187                         ice_acquire_lock(rule_lock);
4188                         if (ice_find_rule_entry(rule_head,
4189                                                 &m_list_itr->fltr_info)) {
4190                                 ice_release_lock(rule_lock);
4191                                 return ICE_ERR_ALREADY_EXISTS;
4192                         }
4193                         ice_release_lock(rule_lock);
4194                         num_unicast++;
4195                 } else if (IS_MULTICAST_ETHER_ADDR(add) ||
4196                            (IS_UNICAST_ETHER_ADDR(add) && hw->ucast_shared)) {
4197                         m_list_itr->status =
4198                                 ice_add_rule_internal(hw, recp_list, lport,
4199                                                       m_list_itr);
4200                         if (m_list_itr->status)
4201                                 return m_list_itr->status;
4202                 }
4203         }
4204
4205         ice_acquire_lock(rule_lock);
4206         /* Exit if no suitable entries were found for adding bulk switch rule */
4207         if (!num_unicast) {
4208                 status = ICE_SUCCESS;
4209                 goto ice_add_mac_exit;
4210         }
4211
4212         /* Allocate switch rule buffer for the bulk update for unicast */
4213         s_rule_size = ICE_SW_RULE_RX_TX_ETH_HDR_SIZE;
4214         s_rule = (struct ice_aqc_sw_rules_elem *)
4215                 ice_calloc(hw, num_unicast, s_rule_size);
4216         if (!s_rule) {
4217                 status = ICE_ERR_NO_MEMORY;
4218                 goto ice_add_mac_exit;
4219         }
4220
4221         r_iter = s_rule;
4222         LIST_FOR_EACH_ENTRY(m_list_itr, m_list, ice_fltr_list_entry,
4223                             list_entry) {
4224                 struct ice_fltr_info *f_info = &m_list_itr->fltr_info;
4225                 u8 *mac_addr = &f_info->l_data.mac.mac_addr[0];
4226
4227                 if (IS_UNICAST_ETHER_ADDR(mac_addr)) {
4228                         ice_fill_sw_rule(hw, &m_list_itr->fltr_info, r_iter,
4229                                          ice_aqc_opc_add_sw_rules);
4230                         r_iter = (struct ice_aqc_sw_rules_elem *)
4231                                 ((u8 *)r_iter + s_rule_size);
4232                 }
4233         }
4234
4235         /* Call AQ bulk switch rule update for all unicast addresses */
4236         r_iter = s_rule;
4237         /* Call AQ switch rule in AQ_MAX chunk */
4238         for (total_elem_left = num_unicast; total_elem_left > 0;
4239              total_elem_left -= elem_sent) {
4240                 struct ice_aqc_sw_rules_elem *entry = r_iter;
4241
4242                 elem_sent = MIN_T(u8, total_elem_left,
4243                                   (ICE_AQ_MAX_BUF_LEN / s_rule_size));
4244                 status = ice_aq_sw_rules(hw, entry, elem_sent * s_rule_size,
4245                                          elem_sent, ice_aqc_opc_add_sw_rules,
4246                                          NULL);
4247                 if (status)
4248                         goto ice_add_mac_exit;
4249                 r_iter = (struct ice_aqc_sw_rules_elem *)
4250                         ((u8 *)r_iter + (elem_sent * s_rule_size));
4251         }
4252
4253         /* Fill up rule ID based on the value returned from FW */
4254         r_iter = s_rule;
4255         LIST_FOR_EACH_ENTRY(m_list_itr, m_list, ice_fltr_list_entry,
4256                             list_entry) {
4257                 struct ice_fltr_info *f_info = &m_list_itr->fltr_info;
4258                 u8 *mac_addr = &f_info->l_data.mac.mac_addr[0];
4259                 struct ice_fltr_mgmt_list_entry *fm_entry;
4260
4261                 if (IS_UNICAST_ETHER_ADDR(mac_addr)) {
4262                         f_info->fltr_rule_id =
4263                                 LE16_TO_CPU(r_iter->pdata.lkup_tx_rx.index);
4264                         f_info->fltr_act = ICE_FWD_TO_VSI;
4265                         /* Create an entry to track this MAC address */
4266                         fm_entry = (struct ice_fltr_mgmt_list_entry *)
4267                                 ice_malloc(hw, sizeof(*fm_entry));
4268                         if (!fm_entry) {
4269                                 status = ICE_ERR_NO_MEMORY;
4270                                 goto ice_add_mac_exit;
4271                         }
4272                         fm_entry->fltr_info = *f_info;
4273                         fm_entry->vsi_count = 1;
4274                         /* The book keeping entries will get removed when
4275                          * base driver calls remove filter AQ command
4276                          */
4277
4278                         LIST_ADD(&fm_entry->list_entry, rule_head);
4279                         r_iter = (struct ice_aqc_sw_rules_elem *)
4280                                 ((u8 *)r_iter + s_rule_size);
4281                 }
4282         }
4283
4284 ice_add_mac_exit:
4285         ice_release_lock(rule_lock);
4286         if (s_rule)
4287                 ice_free(hw, s_rule);
4288         return status;
4289 }
4290
4291 /**
4292  * ice_add_mac - Add a MAC address based filter rule
4293  * @hw: pointer to the hardware structure
4294  * @m_list: list of MAC addresses and forwarding information
4295  *
4296  * Function add MAC rule for logical port from HW struct
4297  */
4298 enum ice_status ice_add_mac(struct ice_hw *hw, struct LIST_HEAD_TYPE *m_list)
4299 {
4300         if (!m_list || !hw)
4301                 return ICE_ERR_PARAM;
4302
4303         return ice_add_mac_rule(hw, m_list, hw->switch_info,
4304                                 hw->port_info->lport);
4305 }
4306
4307 /**
4308  * ice_add_vlan_internal - Add one VLAN based filter rule
4309  * @hw: pointer to the hardware structure
4310  * @recp_list: recipe list for which rule has to be added
4311  * @f_entry: filter entry containing one VLAN information
4312  */
4313 static enum ice_status
4314 ice_add_vlan_internal(struct ice_hw *hw, struct ice_sw_recipe *recp_list,
4315                       struct ice_fltr_list_entry *f_entry)
4316 {
4317         struct ice_fltr_mgmt_list_entry *v_list_itr;
4318         struct ice_fltr_info *new_fltr, *cur_fltr;
4319         enum ice_sw_lkup_type lkup_type;
4320         u16 vsi_list_id = 0, vsi_handle;
4321         struct ice_lock *rule_lock; /* Lock to protect filter rule list */
4322         enum ice_status status = ICE_SUCCESS;
4323
4324         if (!ice_is_vsi_valid(hw, f_entry->fltr_info.vsi_handle))
4325                 return ICE_ERR_PARAM;
4326
4327         f_entry->fltr_info.fwd_id.hw_vsi_id =
4328                 ice_get_hw_vsi_num(hw, f_entry->fltr_info.vsi_handle);
4329         new_fltr = &f_entry->fltr_info;
4330
4331         /* VLAN ID should only be 12 bits */
4332         if (new_fltr->l_data.vlan.vlan_id > ICE_MAX_VLAN_ID)
4333                 return ICE_ERR_PARAM;
4334
4335         if (new_fltr->src_id != ICE_SRC_ID_VSI)
4336                 return ICE_ERR_PARAM;
4337
4338         new_fltr->src = new_fltr->fwd_id.hw_vsi_id;
4339         lkup_type = new_fltr->lkup_type;
4340         vsi_handle = new_fltr->vsi_handle;
4341         rule_lock = &recp_list->filt_rule_lock;
4342         ice_acquire_lock(rule_lock);
4343         v_list_itr = ice_find_rule_entry(&recp_list->filt_rules, new_fltr);
4344         if (!v_list_itr) {
4345                 struct ice_vsi_list_map_info *map_info = NULL;
4346
4347                 if (new_fltr->fltr_act == ICE_FWD_TO_VSI) {
4348                         /* All VLAN pruning rules use a VSI list. Check if
4349                          * there is already a VSI list containing VSI that we
4350                          * want to add. If found, use the same vsi_list_id for
4351                          * this new VLAN rule or else create a new list.
4352                          */
4353                         map_info = ice_find_vsi_list_entry(recp_list,
4354                                                            vsi_handle,
4355                                                            &vsi_list_id);
4356                         if (!map_info) {
4357                                 status = ice_create_vsi_list_rule(hw,
4358                                                                   &vsi_handle,
4359                                                                   1,
4360                                                                   &vsi_list_id,
4361                                                                   lkup_type);
4362                                 if (status)
4363                                         goto exit;
4364                         }
4365                         /* Convert the action to forwarding to a VSI list. */
4366                         new_fltr->fltr_act = ICE_FWD_TO_VSI_LIST;
4367                         new_fltr->fwd_id.vsi_list_id = vsi_list_id;
4368                 }
4369
4370                 status = ice_create_pkt_fwd_rule(hw, recp_list, f_entry);
4371                 if (!status) {
4372                         v_list_itr = ice_find_rule_entry(&recp_list->filt_rules,
4373                                                          new_fltr);
4374                         if (!v_list_itr) {
4375                                 status = ICE_ERR_DOES_NOT_EXIST;
4376                                 goto exit;
4377                         }
4378                         /* reuse VSI list for new rule and increment ref_cnt */
4379                         if (map_info) {
4380                                 v_list_itr->vsi_list_info = map_info;
4381                                 map_info->ref_cnt++;
4382                         } else {
4383                                 v_list_itr->vsi_list_info =
4384                                         ice_create_vsi_list_map(hw, &vsi_handle,
4385                                                                 1, vsi_list_id);
4386                         }
4387                 }
4388         } else if (v_list_itr->vsi_list_info->ref_cnt == 1) {
4389                 /* Update existing VSI list to add new VSI ID only if it used
4390                  * by one VLAN rule.
4391                  */
4392                 cur_fltr = &v_list_itr->fltr_info;
4393                 status = ice_add_update_vsi_list(hw, v_list_itr, cur_fltr,
4394                                                  new_fltr);
4395         } else {
4396                 /* If VLAN rule exists and VSI list being used by this rule is
4397                  * referenced by more than 1 VLAN rule. Then create a new VSI
4398                  * list appending previous VSI with new VSI and update existing
4399                  * VLAN rule to point to new VSI list ID
4400                  */
4401                 struct ice_fltr_info tmp_fltr;
4402                 u16 vsi_handle_arr[2];
4403                 u16 cur_handle;
4404
4405                 /* Current implementation only supports reusing VSI list with
4406                  * one VSI count. We should never hit below condition
4407                  */
4408                 if (v_list_itr->vsi_count > 1 &&
4409                     v_list_itr->vsi_list_info->ref_cnt > 1) {
4410                         ice_debug(hw, ICE_DBG_SW, "Invalid configuration: Optimization to reuse VSI list with more than one VSI is not being done yet\n");
4411                         status = ICE_ERR_CFG;
4412                         goto exit;
4413                 }
4414
4415                 cur_handle =
4416                         ice_find_first_bit(v_list_itr->vsi_list_info->vsi_map,
4417                                            ICE_MAX_VSI);
4418
4419                 /* A rule already exists with the new VSI being added */
4420                 if (cur_handle == vsi_handle) {
4421                         status = ICE_ERR_ALREADY_EXISTS;
4422                         goto exit;
4423                 }
4424
4425                 vsi_handle_arr[0] = cur_handle;
4426                 vsi_handle_arr[1] = vsi_handle;
4427                 status = ice_create_vsi_list_rule(hw, &vsi_handle_arr[0], 2,
4428                                                   &vsi_list_id, lkup_type);
4429                 if (status)
4430                         goto exit;
4431
4432                 tmp_fltr = v_list_itr->fltr_info;
4433                 tmp_fltr.fltr_rule_id = v_list_itr->fltr_info.fltr_rule_id;
4434                 tmp_fltr.fwd_id.vsi_list_id = vsi_list_id;
4435                 tmp_fltr.fltr_act = ICE_FWD_TO_VSI_LIST;
4436                 /* Update the previous switch rule to a new VSI list which
4437                  * includes current VSI that is requested
4438                  */
4439                 status = ice_update_pkt_fwd_rule(hw, &tmp_fltr);
4440                 if (status)
4441                         goto exit;
4442
4443                 /* before overriding VSI list map info. decrement ref_cnt of
4444                  * previous VSI list
4445                  */
4446                 v_list_itr->vsi_list_info->ref_cnt--;
4447
4448                 /* now update to newly created list */
4449                 v_list_itr->fltr_info.fwd_id.vsi_list_id = vsi_list_id;
4450                 v_list_itr->vsi_list_info =
4451                         ice_create_vsi_list_map(hw, &vsi_handle_arr[0], 2,
4452                                                 vsi_list_id);
4453                 v_list_itr->vsi_count++;
4454         }
4455
4456 exit:
4457         ice_release_lock(rule_lock);
4458         return status;
4459 }
4460
4461 /**
4462  * ice_add_vlan_rule - Add VLAN based filter rule
4463  * @hw: pointer to the hardware structure
4464  * @v_list: list of VLAN entries and forwarding information
4465  * @sw: pointer to switch info struct for which function add rule
4466  */
4467 static enum ice_status
4468 ice_add_vlan_rule(struct ice_hw *hw, struct LIST_HEAD_TYPE *v_list,
4469                   struct ice_switch_info *sw)
4470 {
4471         struct ice_fltr_list_entry *v_list_itr;
4472         struct ice_sw_recipe *recp_list;
4473
4474         recp_list = &sw->recp_list[ICE_SW_LKUP_VLAN];
4475         LIST_FOR_EACH_ENTRY(v_list_itr, v_list, ice_fltr_list_entry,
4476                             list_entry) {
4477                 if (v_list_itr->fltr_info.lkup_type != ICE_SW_LKUP_VLAN)
4478                         return ICE_ERR_PARAM;
4479                 v_list_itr->fltr_info.flag = ICE_FLTR_TX;
4480                 v_list_itr->status = ice_add_vlan_internal(hw, recp_list,
4481                                                            v_list_itr);
4482                 if (v_list_itr->status)
4483                         return v_list_itr->status;
4484         }
4485         return ICE_SUCCESS;
4486 }
4487
4488 /**
4489  * ice_add_vlan - Add a VLAN based filter rule
4490  * @hw: pointer to the hardware structure
4491  * @v_list: list of VLAN and forwarding information
4492  *
4493  * Function add VLAN rule for logical port from HW struct
4494  */
4495 enum ice_status ice_add_vlan(struct ice_hw *hw, struct LIST_HEAD_TYPE *v_list)
4496 {
4497         if (!v_list || !hw)
4498                 return ICE_ERR_PARAM;
4499
4500         return ice_add_vlan_rule(hw, v_list, hw->switch_info);
4501 }
4502
4503 /**
4504  * ice_add_mac_vlan - Add MAC and VLAN pair based filter rule
4505  * @hw: pointer to the hardware structure
4506  * @mv_list: list of MAC and VLAN filters
4507  * @sw: pointer to switch info struct for which function add rule
4508  * @lport: logic port number on which function add rule
4509  *
4510  * If the VSI on which the MAC-VLAN pair has to be added has Rx and Tx VLAN
4511  * pruning bits enabled, then it is the responsibility of the caller to make
4512  * sure to add a VLAN only filter on the same VSI. Packets belonging to that
4513  * VLAN won't be received on that VSI otherwise.
4514  */
4515 static enum ice_status
4516 ice_add_mac_vlan_rule(struct ice_hw *hw, struct LIST_HEAD_TYPE *mv_list,
4517                       struct ice_switch_info *sw, u8 lport)
4518 {
4519         struct ice_fltr_list_entry *mv_list_itr;
4520         struct ice_sw_recipe *recp_list;
4521
4522         if (!mv_list || !hw)
4523                 return ICE_ERR_PARAM;
4524
4525         recp_list = &sw->recp_list[ICE_SW_LKUP_MAC_VLAN];
4526         LIST_FOR_EACH_ENTRY(mv_list_itr, mv_list, ice_fltr_list_entry,
4527                             list_entry) {
4528                 enum ice_sw_lkup_type l_type =
4529                         mv_list_itr->fltr_info.lkup_type;
4530
4531                 if (l_type != ICE_SW_LKUP_MAC_VLAN)
4532                         return ICE_ERR_PARAM;
4533                 mv_list_itr->fltr_info.flag = ICE_FLTR_TX;
4534                 mv_list_itr->status =
4535                         ice_add_rule_internal(hw, recp_list, lport,
4536                                               mv_list_itr);
4537                 if (mv_list_itr->status)
4538                         return mv_list_itr->status;
4539         }
4540         return ICE_SUCCESS;
4541 }
4542
4543 /**
4544  * ice_add_mac_vlan - Add a MAC VLAN address based filter rule
4545  * @hw: pointer to the hardware structure
4546  * @mv_list: list of MAC VLAN addresses and forwarding information
4547  *
4548  * Function add MAC VLAN rule for logical port from HW struct
4549  */
4550 enum ice_status
4551 ice_add_mac_vlan(struct ice_hw *hw, struct LIST_HEAD_TYPE *mv_list)
4552 {
4553         if (!mv_list || !hw)
4554                 return ICE_ERR_PARAM;
4555
4556         return ice_add_mac_vlan_rule(hw, mv_list, hw->switch_info,
4557                                      hw->port_info->lport);
4558 }
4559
4560 /**
4561  * ice_add_eth_mac_rule - Add ethertype and MAC based filter rule
4562  * @hw: pointer to the hardware structure
4563  * @em_list: list of ether type MAC filter, MAC is optional
4564  * @sw: pointer to switch info struct for which function add rule
4565  * @lport: logic port number on which function add rule
4566  *
4567  * This function requires the caller to populate the entries in
4568  * the filter list with the necessary fields (including flags to
4569  * indicate Tx or Rx rules).
4570  */
4571 static enum ice_status
4572 ice_add_eth_mac_rule(struct ice_hw *hw, struct LIST_HEAD_TYPE *em_list,
4573                      struct ice_switch_info *sw, u8 lport)
4574 {
4575         struct ice_fltr_list_entry *em_list_itr;
4576
4577         LIST_FOR_EACH_ENTRY(em_list_itr, em_list, ice_fltr_list_entry,
4578                             list_entry) {
4579                 struct ice_sw_recipe *recp_list;
4580                 enum ice_sw_lkup_type l_type;
4581
4582                 l_type = em_list_itr->fltr_info.lkup_type;
4583                 recp_list = &sw->recp_list[l_type];
4584
4585                 if (l_type != ICE_SW_LKUP_ETHERTYPE_MAC &&
4586                     l_type != ICE_SW_LKUP_ETHERTYPE)
4587                         return ICE_ERR_PARAM;
4588
4589                 em_list_itr->status = ice_add_rule_internal(hw, recp_list,
4590                                                             lport,
4591                                                             em_list_itr);
4592                 if (em_list_itr->status)
4593                         return em_list_itr->status;
4594         }
4595         return ICE_SUCCESS;
4596 }
4597
4598 /**
4599  * ice_add_eth_mac - Add a ethertype based filter rule
4600  * @hw: pointer to the hardware structure
4601  * @em_list: list of ethertype and forwarding information
4602  *
4603  * Function add ethertype rule for logical port from HW struct
4604  */
4605 enum ice_status
4606 ice_add_eth_mac(struct ice_hw *hw, struct LIST_HEAD_TYPE *em_list)
4607 {
4608         if (!em_list || !hw)
4609                 return ICE_ERR_PARAM;
4610
4611         return ice_add_eth_mac_rule(hw, em_list, hw->switch_info,
4612                                     hw->port_info->lport);
4613 }
4614
4615 /**
4616  * ice_remove_eth_mac_rule - Remove an ethertype (or MAC) based filter rule
4617  * @hw: pointer to the hardware structure
4618  * @em_list: list of ethertype or ethertype MAC entries
4619  * @sw: pointer to switch info struct for which function add rule
4620  */
4621 static enum ice_status
4622 ice_remove_eth_mac_rule(struct ice_hw *hw, struct LIST_HEAD_TYPE *em_list,
4623                         struct ice_switch_info *sw)
4624 {
4625         struct ice_fltr_list_entry *em_list_itr, *tmp;
4626
4627         LIST_FOR_EACH_ENTRY_SAFE(em_list_itr, tmp, em_list, ice_fltr_list_entry,
4628                                  list_entry) {
4629                 struct ice_sw_recipe *recp_list;
4630                 enum ice_sw_lkup_type l_type;
4631
4632                 l_type = em_list_itr->fltr_info.lkup_type;
4633
4634                 if (l_type != ICE_SW_LKUP_ETHERTYPE_MAC &&
4635                     l_type != ICE_SW_LKUP_ETHERTYPE)
4636                         return ICE_ERR_PARAM;
4637
4638                 recp_list = &sw->recp_list[l_type];
4639                 em_list_itr->status = ice_remove_rule_internal(hw, recp_list,
4640                                                                em_list_itr);
4641                 if (em_list_itr->status)
4642                         return em_list_itr->status;
4643         }
4644         return ICE_SUCCESS;
4645 }
4646
4647 /**
4648  * ice_remove_eth_mac - remove a ethertype based filter rule
4649  * @hw: pointer to the hardware structure
4650  * @em_list: list of ethertype and forwarding information
4651  *
4652  */
4653 enum ice_status
4654 ice_remove_eth_mac(struct ice_hw *hw, struct LIST_HEAD_TYPE *em_list)
4655 {
4656         if (!em_list || !hw)
4657                 return ICE_ERR_PARAM;
4658
4659         return ice_remove_eth_mac_rule(hw, em_list, hw->switch_info);
4660 }
4661
4662 /**
4663  * ice_rem_sw_rule_info
4664  * @hw: pointer to the hardware structure
4665  * @rule_head: pointer to the switch list structure that we want to delete
4666  */
4667 static void
4668 ice_rem_sw_rule_info(struct ice_hw *hw, struct LIST_HEAD_TYPE *rule_head)
4669 {
4670         if (!LIST_EMPTY(rule_head)) {
4671                 struct ice_fltr_mgmt_list_entry *entry;
4672                 struct ice_fltr_mgmt_list_entry *tmp;
4673
4674                 LIST_FOR_EACH_ENTRY_SAFE(entry, tmp, rule_head,
4675                                          ice_fltr_mgmt_list_entry, list_entry) {
4676                         LIST_DEL(&entry->list_entry);
4677                         ice_free(hw, entry);
4678                 }
4679         }
4680 }
4681
4682 /**
4683  * ice_rem_adv_rule_info
4684  * @hw: pointer to the hardware structure
4685  * @rule_head: pointer to the switch list structure that we want to delete
4686  */
4687 static void
4688 ice_rem_adv_rule_info(struct ice_hw *hw, struct LIST_HEAD_TYPE *rule_head)
4689 {
4690         struct ice_adv_fltr_mgmt_list_entry *tmp_entry;
4691         struct ice_adv_fltr_mgmt_list_entry *lst_itr;
4692
4693         if (LIST_EMPTY(rule_head))
4694                 return;
4695
4696         LIST_FOR_EACH_ENTRY_SAFE(lst_itr, tmp_entry, rule_head,
4697                                  ice_adv_fltr_mgmt_list_entry, list_entry) {
4698                 LIST_DEL(&lst_itr->list_entry);
4699                 ice_free(hw, lst_itr->lkups);
4700                 ice_free(hw, lst_itr);
4701         }
4702 }
4703
4704 /**
4705  * ice_rem_all_sw_rules_info
4706  * @hw: pointer to the hardware structure
4707  */
4708 void ice_rem_all_sw_rules_info(struct ice_hw *hw)
4709 {
4710         struct ice_switch_info *sw = hw->switch_info;
4711         u8 i;
4712
4713         for (i = 0; i < ICE_MAX_NUM_RECIPES; i++) {
4714                 struct LIST_HEAD_TYPE *rule_head;
4715
4716                 rule_head = &sw->recp_list[i].filt_rules;
4717                 if (!sw->recp_list[i].adv_rule)
4718                         ice_rem_sw_rule_info(hw, rule_head);
4719                 else
4720                         ice_rem_adv_rule_info(hw, rule_head);
4721                 if (sw->recp_list[i].adv_rule &&
4722                     LIST_EMPTY(&sw->recp_list[i].filt_rules))
4723                         sw->recp_list[i].adv_rule = false;
4724         }
4725 }
4726
4727 /**
4728  * ice_cfg_dflt_vsi - change state of VSI to set/clear default
4729  * @pi: pointer to the port_info structure
4730  * @vsi_handle: VSI handle to set as default
4731  * @set: true to add the above mentioned switch rule, false to remove it
4732  * @direction: ICE_FLTR_RX or ICE_FLTR_TX
4733  *
4734  * add filter rule to set/unset given VSI as default VSI for the switch
4735  * (represented by swid)
4736  */
4737 enum ice_status
4738 ice_cfg_dflt_vsi(struct ice_port_info *pi, u16 vsi_handle, bool set,
4739                  u8 direction)
4740 {
4741         struct ice_aqc_sw_rules_elem *s_rule;
4742         struct ice_fltr_info f_info;
4743         struct ice_hw *hw = pi->hw;
4744         enum ice_adminq_opc opcode;
4745         enum ice_status status;
4746         u16 s_rule_size;
4747         u16 hw_vsi_id;
4748
4749         if (!ice_is_vsi_valid(hw, vsi_handle))
4750                 return ICE_ERR_PARAM;
4751         hw_vsi_id = ice_get_hw_vsi_num(hw, vsi_handle);
4752
4753         s_rule_size = set ? ICE_SW_RULE_RX_TX_ETH_HDR_SIZE :
4754                 ICE_SW_RULE_RX_TX_NO_HDR_SIZE;
4755
4756         s_rule = (struct ice_aqc_sw_rules_elem *)ice_malloc(hw, s_rule_size);
4757         if (!s_rule)
4758                 return ICE_ERR_NO_MEMORY;
4759
4760         ice_memset(&f_info, 0, sizeof(f_info), ICE_NONDMA_MEM);
4761
4762         f_info.lkup_type = ICE_SW_LKUP_DFLT;
4763         f_info.flag = direction;
4764         f_info.fltr_act = ICE_FWD_TO_VSI;
4765         f_info.fwd_id.hw_vsi_id = hw_vsi_id;
4766
4767         if (f_info.flag & ICE_FLTR_RX) {
4768                 f_info.src = pi->lport;
4769                 f_info.src_id = ICE_SRC_ID_LPORT;
4770                 if (!set)
4771                         f_info.fltr_rule_id =
4772                                 pi->dflt_rx_vsi_rule_id;
4773         } else if (f_info.flag & ICE_FLTR_TX) {
4774                 f_info.src_id = ICE_SRC_ID_VSI;
4775                 f_info.src = hw_vsi_id;
4776                 if (!set)
4777                         f_info.fltr_rule_id =
4778                                 pi->dflt_tx_vsi_rule_id;
4779         }
4780
4781         if (set)
4782                 opcode = ice_aqc_opc_add_sw_rules;
4783         else
4784                 opcode = ice_aqc_opc_remove_sw_rules;
4785
4786         ice_fill_sw_rule(hw, &f_info, s_rule, opcode);
4787
4788         status = ice_aq_sw_rules(hw, s_rule, s_rule_size, 1, opcode, NULL);
4789         if (status || !(f_info.flag & ICE_FLTR_TX_RX))
4790                 goto out;
4791         if (set) {
4792                 u16 index = LE16_TO_CPU(s_rule->pdata.lkup_tx_rx.index);
4793
4794                 if (f_info.flag & ICE_FLTR_TX) {
4795                         pi->dflt_tx_vsi_num = hw_vsi_id;
4796                         pi->dflt_tx_vsi_rule_id = index;
4797                 } else if (f_info.flag & ICE_FLTR_RX) {
4798                         pi->dflt_rx_vsi_num = hw_vsi_id;
4799                         pi->dflt_rx_vsi_rule_id = index;
4800                 }
4801         } else {
4802                 if (f_info.flag & ICE_FLTR_TX) {
4803                         pi->dflt_tx_vsi_num = ICE_DFLT_VSI_INVAL;
4804                         pi->dflt_tx_vsi_rule_id = ICE_INVAL_ACT;
4805                 } else if (f_info.flag & ICE_FLTR_RX) {
4806                         pi->dflt_rx_vsi_num = ICE_DFLT_VSI_INVAL;
4807                         pi->dflt_rx_vsi_rule_id = ICE_INVAL_ACT;
4808                 }
4809         }
4810
4811 out:
4812         ice_free(hw, s_rule);
4813         return status;
4814 }
4815
4816 /**
4817  * ice_find_ucast_rule_entry - Search for a unicast MAC filter rule entry
4818  * @list_head: head of rule list
4819  * @f_info: rule information
4820  *
4821  * Helper function to search for a unicast rule entry - this is to be used
4822  * to remove unicast MAC filter that is not shared with other VSIs on the
4823  * PF switch.
4824  *
4825  * Returns pointer to entry storing the rule if found
4826  */
4827 static struct ice_fltr_mgmt_list_entry *
4828 ice_find_ucast_rule_entry(struct LIST_HEAD_TYPE *list_head,
4829                           struct ice_fltr_info *f_info)
4830 {
4831         struct ice_fltr_mgmt_list_entry *list_itr;
4832
4833         LIST_FOR_EACH_ENTRY(list_itr, list_head, ice_fltr_mgmt_list_entry,
4834                             list_entry) {
4835                 if (!memcmp(&f_info->l_data, &list_itr->fltr_info.l_data,
4836                             sizeof(f_info->l_data)) &&
4837                     f_info->fwd_id.hw_vsi_id ==
4838                     list_itr->fltr_info.fwd_id.hw_vsi_id &&
4839                     f_info->flag == list_itr->fltr_info.flag)
4840                         return list_itr;
4841         }
4842         return NULL;
4843 }
4844
4845 /**
4846  * ice_remove_mac_rule - remove a MAC based filter rule
4847  * @hw: pointer to the hardware structure
4848  * @m_list: list of MAC addresses and forwarding information
4849  * @recp_list: list from which function remove MAC address
4850  *
4851  * This function removes either a MAC filter rule or a specific VSI from a
4852  * VSI list for a multicast MAC address.
4853  *
4854  * Returns ICE_ERR_DOES_NOT_EXIST if a given entry was not added by
4855  * ice_add_mac. Caller should be aware that this call will only work if all
4856  * the entries passed into m_list were added previously. It will not attempt to
4857  * do a partial remove of entries that were found.
4858  */
4859 static enum ice_status
4860 ice_remove_mac_rule(struct ice_hw *hw, struct LIST_HEAD_TYPE *m_list,
4861                     struct ice_sw_recipe *recp_list)
4862 {
4863         struct ice_fltr_list_entry *list_itr, *tmp;
4864         struct ice_lock *rule_lock; /* Lock to protect filter rule list */
4865
4866         if (!m_list)
4867                 return ICE_ERR_PARAM;
4868
4869         rule_lock = &recp_list->filt_rule_lock;
4870         LIST_FOR_EACH_ENTRY_SAFE(list_itr, tmp, m_list, ice_fltr_list_entry,
4871                                  list_entry) {
4872                 enum ice_sw_lkup_type l_type = list_itr->fltr_info.lkup_type;
4873                 u8 *add = &list_itr->fltr_info.l_data.mac.mac_addr[0];
4874                 u16 vsi_handle;
4875
4876                 if (l_type != ICE_SW_LKUP_MAC)
4877                         return ICE_ERR_PARAM;
4878
4879                 vsi_handle = list_itr->fltr_info.vsi_handle;
4880                 if (!ice_is_vsi_valid(hw, vsi_handle))
4881                         return ICE_ERR_PARAM;
4882
4883                 list_itr->fltr_info.fwd_id.hw_vsi_id =
4884                                         ice_get_hw_vsi_num(hw, vsi_handle);
4885                 if (IS_UNICAST_ETHER_ADDR(add) && !hw->ucast_shared) {
4886                         /* Don't remove the unicast address that belongs to
4887                          * another VSI on the switch, since it is not being
4888                          * shared...
4889                          */
4890                         ice_acquire_lock(rule_lock);
4891                         if (!ice_find_ucast_rule_entry(&recp_list->filt_rules,
4892                                                        &list_itr->fltr_info)) {
4893                                 ice_release_lock(rule_lock);
4894                                 return ICE_ERR_DOES_NOT_EXIST;
4895                         }
4896                         ice_release_lock(rule_lock);
4897                 }
4898                 list_itr->status = ice_remove_rule_internal(hw, recp_list,
4899                                                             list_itr);
4900                 if (list_itr->status)
4901                         return list_itr->status;
4902         }
4903         return ICE_SUCCESS;
4904 }
4905
4906 /**
4907  * ice_remove_mac - remove a MAC address based filter rule
4908  * @hw: pointer to the hardware structure
4909  * @m_list: list of MAC addresses and forwarding information
4910  *
4911  */
4912 enum ice_status ice_remove_mac(struct ice_hw *hw, struct LIST_HEAD_TYPE *m_list)
4913 {
4914         struct ice_sw_recipe *recp_list;
4915
4916         recp_list = &hw->switch_info->recp_list[ICE_SW_LKUP_MAC];
4917         return ice_remove_mac_rule(hw, m_list, recp_list);
4918 }
4919
4920 /**
4921  * ice_remove_vlan_rule - Remove VLAN based filter rule
4922  * @hw: pointer to the hardware structure
4923  * @v_list: list of VLAN entries and forwarding information
4924  * @recp_list: list from which function remove VLAN
4925  */
4926 static enum ice_status
4927 ice_remove_vlan_rule(struct ice_hw *hw, struct LIST_HEAD_TYPE *v_list,
4928                      struct ice_sw_recipe *recp_list)
4929 {
4930         struct ice_fltr_list_entry *v_list_itr, *tmp;
4931
4932         LIST_FOR_EACH_ENTRY_SAFE(v_list_itr, tmp, v_list, ice_fltr_list_entry,
4933                                  list_entry) {
4934                 enum ice_sw_lkup_type l_type = v_list_itr->fltr_info.lkup_type;
4935
4936                 if (l_type != ICE_SW_LKUP_VLAN)
4937                         return ICE_ERR_PARAM;
4938                 v_list_itr->status = ice_remove_rule_internal(hw, recp_list,
4939                                                               v_list_itr);
4940                 if (v_list_itr->status)
4941                         return v_list_itr->status;
4942         }
4943         return ICE_SUCCESS;
4944 }
4945
4946 /**
4947  * ice_remove_vlan - remove a VLAN address based filter rule
4948  * @hw: pointer to the hardware structure
4949  * @v_list: list of VLAN and forwarding information
4950  *
4951  */
4952 enum ice_status
4953 ice_remove_vlan(struct ice_hw *hw, struct LIST_HEAD_TYPE *v_list)
4954 {
4955         struct ice_sw_recipe *recp_list;
4956
4957         if (!v_list || !hw)
4958                 return ICE_ERR_PARAM;
4959
4960         recp_list = &hw->switch_info->recp_list[ICE_SW_LKUP_VLAN];
4961         return ice_remove_vlan_rule(hw, v_list, recp_list);
4962 }
4963
4964 /**
4965  * ice_remove_mac_vlan_rule - Remove MAC VLAN based filter rule
4966  * @hw: pointer to the hardware structure
4967  * @v_list: list of MAC VLAN entries and forwarding information
4968  * @recp_list: list from which function remove MAC VLAN
4969  */
4970 static enum ice_status
4971 ice_remove_mac_vlan_rule(struct ice_hw *hw, struct LIST_HEAD_TYPE *v_list,
4972                          struct ice_sw_recipe *recp_list)
4973 {
4974         struct ice_fltr_list_entry *v_list_itr, *tmp;
4975
4976         recp_list = &hw->switch_info->recp_list[ICE_SW_LKUP_MAC_VLAN];
4977         LIST_FOR_EACH_ENTRY_SAFE(v_list_itr, tmp, v_list, ice_fltr_list_entry,
4978                                  list_entry) {
4979                 enum ice_sw_lkup_type l_type = v_list_itr->fltr_info.lkup_type;
4980
4981                 if (l_type != ICE_SW_LKUP_MAC_VLAN)
4982                         return ICE_ERR_PARAM;
4983                 v_list_itr->status =
4984                         ice_remove_rule_internal(hw, recp_list,
4985                                                  v_list_itr);
4986                 if (v_list_itr->status)
4987                         return v_list_itr->status;
4988         }
4989         return ICE_SUCCESS;
4990 }
4991
4992 /**
4993  * ice_remove_mac_vlan - remove a MAC VLAN address based filter rule
4994  * @hw: pointer to the hardware structure
4995  * @mv_list: list of MAC VLAN and forwarding information
4996  */
4997 enum ice_status
4998 ice_remove_mac_vlan(struct ice_hw *hw, struct LIST_HEAD_TYPE *mv_list)
4999 {
5000         struct ice_sw_recipe *recp_list;
5001
5002         if (!mv_list || !hw)
5003                 return ICE_ERR_PARAM;
5004
5005         recp_list = &hw->switch_info->recp_list[ICE_SW_LKUP_MAC_VLAN];
5006         return ice_remove_mac_vlan_rule(hw, mv_list, recp_list);
5007 }
5008
5009 /**
5010  * ice_vsi_uses_fltr - Determine if given VSI uses specified filter
5011  * @fm_entry: filter entry to inspect
5012  * @vsi_handle: VSI handle to compare with filter info
5013  */
5014 static bool
5015 ice_vsi_uses_fltr(struct ice_fltr_mgmt_list_entry *fm_entry, u16 vsi_handle)
5016 {
5017         return ((fm_entry->fltr_info.fltr_act == ICE_FWD_TO_VSI &&
5018                  fm_entry->fltr_info.vsi_handle == vsi_handle) ||
5019                 (fm_entry->fltr_info.fltr_act == ICE_FWD_TO_VSI_LIST &&
5020                  (ice_is_bit_set(fm_entry->vsi_list_info->vsi_map,
5021                                  vsi_handle))));
5022 }
5023
5024 /**
5025  * ice_add_entry_to_vsi_fltr_list - Add copy of fltr_list_entry to remove list
5026  * @hw: pointer to the hardware structure
5027  * @vsi_handle: VSI handle to remove filters from
5028  * @vsi_list_head: pointer to the list to add entry to
5029  * @fi: pointer to fltr_info of filter entry to copy & add
5030  *
5031  * Helper function, used when creating a list of filters to remove from
5032  * a specific VSI. The entry added to vsi_list_head is a COPY of the
5033  * original filter entry, with the exception of fltr_info.fltr_act and
5034  * fltr_info.fwd_id fields. These are set such that later logic can
5035  * extract which VSI to remove the fltr from, and pass on that information.
5036  */
5037 static enum ice_status
5038 ice_add_entry_to_vsi_fltr_list(struct ice_hw *hw, u16 vsi_handle,
5039                                struct LIST_HEAD_TYPE *vsi_list_head,
5040                                struct ice_fltr_info *fi)
5041 {
5042         struct ice_fltr_list_entry *tmp;
5043
5044         /* this memory is freed up in the caller function
5045          * once filters for this VSI are removed
5046          */
5047         tmp = (struct ice_fltr_list_entry *)ice_malloc(hw, sizeof(*tmp));
5048         if (!tmp)
5049                 return ICE_ERR_NO_MEMORY;
5050
5051         tmp->fltr_info = *fi;
5052
5053         /* Overwrite these fields to indicate which VSI to remove filter from,
5054          * so find and remove logic can extract the information from the
5055          * list entries. Note that original entries will still have proper
5056          * values.
5057          */
5058         tmp->fltr_info.fltr_act = ICE_FWD_TO_VSI;
5059         tmp->fltr_info.vsi_handle = vsi_handle;
5060         tmp->fltr_info.fwd_id.hw_vsi_id = ice_get_hw_vsi_num(hw, vsi_handle);
5061
5062         LIST_ADD(&tmp->list_entry, vsi_list_head);
5063
5064         return ICE_SUCCESS;
5065 }
5066
5067 /**
5068  * ice_add_to_vsi_fltr_list - Add VSI filters to the list
5069  * @hw: pointer to the hardware structure
5070  * @vsi_handle: VSI handle to remove filters from
5071  * @lkup_list_head: pointer to the list that has certain lookup type filters
5072  * @vsi_list_head: pointer to the list pertaining to VSI with vsi_handle
5073  *
5074  * Locates all filters in lkup_list_head that are used by the given VSI,
5075  * and adds COPIES of those entries to vsi_list_head (intended to be used
5076  * to remove the listed filters).
5077  * Note that this means all entries in vsi_list_head must be explicitly
5078  * deallocated by the caller when done with list.
5079  */
5080 static enum ice_status
5081 ice_add_to_vsi_fltr_list(struct ice_hw *hw, u16 vsi_handle,
5082                          struct LIST_HEAD_TYPE *lkup_list_head,
5083                          struct LIST_HEAD_TYPE *vsi_list_head)
5084 {
5085         struct ice_fltr_mgmt_list_entry *fm_entry;
5086         enum ice_status status = ICE_SUCCESS;
5087
5088         /* check to make sure VSI ID is valid and within boundary */
5089         if (!ice_is_vsi_valid(hw, vsi_handle))
5090                 return ICE_ERR_PARAM;
5091
5092         LIST_FOR_EACH_ENTRY(fm_entry, lkup_list_head,
5093                             ice_fltr_mgmt_list_entry, list_entry) {
5094                 struct ice_fltr_info *fi;
5095
5096                 fi = &fm_entry->fltr_info;
5097                 if (!fi || !ice_vsi_uses_fltr(fm_entry, vsi_handle))
5098                         continue;
5099
5100                 status = ice_add_entry_to_vsi_fltr_list(hw, vsi_handle,
5101                                                         vsi_list_head, fi);
5102                 if (status)
5103                         return status;
5104         }
5105         return status;
5106 }
5107
5108 /**
5109  * ice_determine_promisc_mask
5110  * @fi: filter info to parse
5111  *
5112  * Helper function to determine which ICE_PROMISC_ mask corresponds
5113  * to given filter into.
5114  */
5115 static u8 ice_determine_promisc_mask(struct ice_fltr_info *fi)
5116 {
5117         u16 vid = fi->l_data.mac_vlan.vlan_id;
5118         u8 *macaddr = fi->l_data.mac.mac_addr;
5119         bool is_tx_fltr = false;
5120         u8 promisc_mask = 0;
5121
5122         if (fi->flag == ICE_FLTR_TX)
5123                 is_tx_fltr = true;
5124
5125         if (IS_BROADCAST_ETHER_ADDR(macaddr))
5126                 promisc_mask |= is_tx_fltr ?
5127                         ICE_PROMISC_BCAST_TX : ICE_PROMISC_BCAST_RX;
5128         else if (IS_MULTICAST_ETHER_ADDR(macaddr))
5129                 promisc_mask |= is_tx_fltr ?
5130                         ICE_PROMISC_MCAST_TX : ICE_PROMISC_MCAST_RX;
5131         else if (IS_UNICAST_ETHER_ADDR(macaddr))
5132                 promisc_mask |= is_tx_fltr ?
5133                         ICE_PROMISC_UCAST_TX : ICE_PROMISC_UCAST_RX;
5134         if (vid)
5135                 promisc_mask |= is_tx_fltr ?
5136                         ICE_PROMISC_VLAN_TX : ICE_PROMISC_VLAN_RX;
5137
5138         return promisc_mask;
5139 }
5140
5141 /**
5142  * _ice_get_vsi_promisc - get promiscuous mode of given VSI
5143  * @hw: pointer to the hardware structure
5144  * @vsi_handle: VSI handle to retrieve info from
5145  * @promisc_mask: pointer to mask to be filled in
5146  * @vid: VLAN ID of promisc VLAN VSI
5147  * @sw: pointer to switch info struct for which function add rule
5148  */
5149 static enum ice_status
5150 _ice_get_vsi_promisc(struct ice_hw *hw, u16 vsi_handle, u8 *promisc_mask,
5151                      u16 *vid, struct ice_switch_info *sw)
5152 {
5153         struct ice_fltr_mgmt_list_entry *itr;
5154         struct LIST_HEAD_TYPE *rule_head;
5155         struct ice_lock *rule_lock;     /* Lock to protect filter rule list */
5156
5157         if (!ice_is_vsi_valid(hw, vsi_handle))
5158                 return ICE_ERR_PARAM;
5159
5160         *vid = 0;
5161         *promisc_mask = 0;
5162         rule_head = &sw->recp_list[ICE_SW_LKUP_PROMISC].filt_rules;
5163         rule_lock = &sw->recp_list[ICE_SW_LKUP_PROMISC].filt_rule_lock;
5164
5165         ice_acquire_lock(rule_lock);
5166         LIST_FOR_EACH_ENTRY(itr, rule_head,
5167                             ice_fltr_mgmt_list_entry, list_entry) {
5168                 /* Continue if this filter doesn't apply to this VSI or the
5169                  * VSI ID is not in the VSI map for this filter
5170                  */
5171                 if (!ice_vsi_uses_fltr(itr, vsi_handle))
5172                         continue;
5173
5174                 *promisc_mask |= ice_determine_promisc_mask(&itr->fltr_info);
5175         }
5176         ice_release_lock(rule_lock);
5177
5178         return ICE_SUCCESS;
5179 }
5180
5181 /**
5182  * ice_get_vsi_promisc - get promiscuous mode of given VSI
5183  * @hw: pointer to the hardware structure
5184  * @vsi_handle: VSI handle to retrieve info from
5185  * @promisc_mask: pointer to mask to be filled in
5186  * @vid: VLAN ID of promisc VLAN VSI
5187  */
5188 enum ice_status
5189 ice_get_vsi_promisc(struct ice_hw *hw, u16 vsi_handle, u8 *promisc_mask,
5190                     u16 *vid)
5191 {
5192         return _ice_get_vsi_promisc(hw, vsi_handle, promisc_mask,
5193                                     vid, hw->switch_info);
5194 }
5195
5196 /**
5197  * ice_get_vsi_vlan_promisc - get VLAN promiscuous mode of given VSI
5198  * @hw: pointer to the hardware structure
5199  * @vsi_handle: VSI handle to retrieve info from
5200  * @promisc_mask: pointer to mask to be filled in
5201  * @vid: VLAN ID of promisc VLAN VSI
5202  * @sw: pointer to switch info struct for which function add rule
5203  */
5204 static enum ice_status
5205 _ice_get_vsi_vlan_promisc(struct ice_hw *hw, u16 vsi_handle, u8 *promisc_mask,
5206                           u16 *vid, struct ice_switch_info *sw)
5207 {
5208         struct ice_fltr_mgmt_list_entry *itr;
5209         struct LIST_HEAD_TYPE *rule_head;
5210         struct ice_lock *rule_lock;     /* Lock to protect filter rule list */
5211
5212         if (!ice_is_vsi_valid(hw, vsi_handle))
5213                 return ICE_ERR_PARAM;
5214
5215         *vid = 0;
5216         *promisc_mask = 0;
5217         rule_head = &sw->recp_list[ICE_SW_LKUP_PROMISC_VLAN].filt_rules;
5218         rule_lock = &sw->recp_list[ICE_SW_LKUP_PROMISC_VLAN].filt_rule_lock;
5219
5220         ice_acquire_lock(rule_lock);
5221         LIST_FOR_EACH_ENTRY(itr, rule_head, ice_fltr_mgmt_list_entry,
5222                             list_entry) {
5223                 /* Continue if this filter doesn't apply to this VSI or the
5224                  * VSI ID is not in the VSI map for this filter
5225                  */
5226                 if (!ice_vsi_uses_fltr(itr, vsi_handle))
5227                         continue;
5228
5229                 *promisc_mask |= ice_determine_promisc_mask(&itr->fltr_info);
5230         }
5231         ice_release_lock(rule_lock);
5232
5233         return ICE_SUCCESS;
5234 }
5235
5236 /**
5237  * ice_get_vsi_vlan_promisc - get VLAN promiscuous mode of given VSI
5238  * @hw: pointer to the hardware structure
5239  * @vsi_handle: VSI handle to retrieve info from
5240  * @promisc_mask: pointer to mask to be filled in
5241  * @vid: VLAN ID of promisc VLAN VSI
5242  */
5243 enum ice_status
5244 ice_get_vsi_vlan_promisc(struct ice_hw *hw, u16 vsi_handle, u8 *promisc_mask,
5245                          u16 *vid)
5246 {
5247         return _ice_get_vsi_vlan_promisc(hw, vsi_handle, promisc_mask,
5248                                          vid, hw->switch_info);
5249 }
5250
5251 /**
5252  * ice_remove_promisc - Remove promisc based filter rules
5253  * @hw: pointer to the hardware structure
5254  * @recp_id: recipe ID for which the rule needs to removed
5255  * @v_list: list of promisc entries
5256  */
5257 static enum ice_status
5258 ice_remove_promisc(struct ice_hw *hw, u8 recp_id,
5259                    struct LIST_HEAD_TYPE *v_list)
5260 {
5261         struct ice_fltr_list_entry *v_list_itr, *tmp;
5262         struct ice_sw_recipe *recp_list;
5263
5264         recp_list = &hw->switch_info->recp_list[recp_id];
5265         LIST_FOR_EACH_ENTRY_SAFE(v_list_itr, tmp, v_list, ice_fltr_list_entry,
5266                                  list_entry) {
5267                 v_list_itr->status =
5268                         ice_remove_rule_internal(hw, recp_list, v_list_itr);
5269                 if (v_list_itr->status)
5270                         return v_list_itr->status;
5271         }
5272         return ICE_SUCCESS;
5273 }
5274
5275 /**
5276  * _ice_clear_vsi_promisc - clear specified promiscuous mode(s)
5277  * @hw: pointer to the hardware structure
5278  * @vsi_handle: VSI handle to clear mode
5279  * @promisc_mask: mask of promiscuous config bits to clear
5280  * @vid: VLAN ID to clear VLAN promiscuous
5281  * @sw: pointer to switch info struct for which function add rule
5282  */
5283 static enum ice_status
5284 _ice_clear_vsi_promisc(struct ice_hw *hw, u16 vsi_handle, u8 promisc_mask,
5285                        u16 vid, struct ice_switch_info *sw)
5286 {
5287         struct ice_fltr_list_entry *fm_entry, *tmp;
5288         struct LIST_HEAD_TYPE remove_list_head;
5289         struct ice_fltr_mgmt_list_entry *itr;
5290         struct LIST_HEAD_TYPE *rule_head;
5291         struct ice_lock *rule_lock;     /* Lock to protect filter rule list */
5292         enum ice_status status = ICE_SUCCESS;
5293         u8 recipe_id;
5294
5295         if (!ice_is_vsi_valid(hw, vsi_handle))
5296                 return ICE_ERR_PARAM;
5297
5298         if (promisc_mask & (ICE_PROMISC_VLAN_RX | ICE_PROMISC_VLAN_TX))
5299                 recipe_id = ICE_SW_LKUP_PROMISC_VLAN;
5300         else
5301                 recipe_id = ICE_SW_LKUP_PROMISC;
5302
5303         rule_head = &sw->recp_list[recipe_id].filt_rules;
5304         rule_lock = &sw->recp_list[recipe_id].filt_rule_lock;
5305
5306         INIT_LIST_HEAD(&remove_list_head);
5307
5308         ice_acquire_lock(rule_lock);
5309         LIST_FOR_EACH_ENTRY(itr, rule_head,
5310                             ice_fltr_mgmt_list_entry, list_entry) {
5311                 struct ice_fltr_info *fltr_info;
5312                 u8 fltr_promisc_mask = 0;
5313
5314                 if (!ice_vsi_uses_fltr(itr, vsi_handle))
5315                         continue;
5316                 fltr_info = &itr->fltr_info;
5317
5318                 if (recipe_id == ICE_SW_LKUP_PROMISC_VLAN &&
5319                     vid != fltr_info->l_data.mac_vlan.vlan_id)
5320                         continue;
5321
5322                 fltr_promisc_mask |= ice_determine_promisc_mask(fltr_info);
5323
5324                 /* Skip if filter is not completely specified by given mask */
5325                 if (fltr_promisc_mask & ~promisc_mask)
5326                         continue;
5327
5328                 status = ice_add_entry_to_vsi_fltr_list(hw, vsi_handle,
5329                                                         &remove_list_head,
5330                                                         fltr_info);
5331                 if (status) {
5332                         ice_release_lock(rule_lock);
5333                         goto free_fltr_list;
5334                 }
5335         }
5336         ice_release_lock(rule_lock);
5337
5338         status = ice_remove_promisc(hw, recipe_id, &remove_list_head);
5339
5340 free_fltr_list:
5341         LIST_FOR_EACH_ENTRY_SAFE(fm_entry, tmp, &remove_list_head,
5342                                  ice_fltr_list_entry, list_entry) {
5343                 LIST_DEL(&fm_entry->list_entry);
5344                 ice_free(hw, fm_entry);
5345         }
5346
5347         return status;
5348 }
5349
5350 /**
5351  * ice_clear_vsi_promisc - clear specified promiscuous mode(s) for given VSI
5352  * @hw: pointer to the hardware structure
5353  * @vsi_handle: VSI handle to clear mode
5354  * @promisc_mask: mask of promiscuous config bits to clear
5355  * @vid: VLAN ID to clear VLAN promiscuous
5356  */
5357 enum ice_status
5358 ice_clear_vsi_promisc(struct ice_hw *hw, u16 vsi_handle,
5359                       u8 promisc_mask, u16 vid)
5360 {
5361         return _ice_clear_vsi_promisc(hw, vsi_handle, promisc_mask,
5362                                       vid, hw->switch_info);
5363 }
5364
5365 /**
5366  * _ice_set_vsi_promisc - set given VSI to given promiscuous mode(s)
5367  * @hw: pointer to the hardware structure
5368  * @vsi_handle: VSI handle to configure
5369  * @promisc_mask: mask of promiscuous config bits
5370  * @vid: VLAN ID to set VLAN promiscuous
5371  * @lport: logical port number to configure promisc mode
5372  * @sw: pointer to switch info struct for which function add rule
5373  */
5374 static enum ice_status
5375 _ice_set_vsi_promisc(struct ice_hw *hw, u16 vsi_handle, u8 promisc_mask,
5376                      u16 vid, u8 lport, struct ice_switch_info *sw)
5377 {
5378         enum { UCAST_FLTR = 1, MCAST_FLTR, BCAST_FLTR };
5379         struct ice_fltr_list_entry f_list_entry;
5380         struct ice_fltr_info new_fltr;
5381         enum ice_status status = ICE_SUCCESS;
5382         bool is_tx_fltr;
5383         u16 hw_vsi_id;
5384         int pkt_type;
5385         u8 recipe_id;
5386
5387         ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__);
5388
5389         if (!ice_is_vsi_valid(hw, vsi_handle))
5390                 return ICE_ERR_PARAM;
5391         hw_vsi_id = ice_get_hw_vsi_num(hw, vsi_handle);
5392
5393         ice_memset(&new_fltr, 0, sizeof(new_fltr), ICE_NONDMA_MEM);
5394
5395         if (promisc_mask & (ICE_PROMISC_VLAN_RX | ICE_PROMISC_VLAN_TX)) {
5396                 new_fltr.lkup_type = ICE_SW_LKUP_PROMISC_VLAN;
5397                 new_fltr.l_data.mac_vlan.vlan_id = vid;
5398                 recipe_id = ICE_SW_LKUP_PROMISC_VLAN;
5399         } else {
5400                 new_fltr.lkup_type = ICE_SW_LKUP_PROMISC;
5401                 recipe_id = ICE_SW_LKUP_PROMISC;
5402         }
5403
5404         /* Separate filters must be set for each direction/packet type
5405          * combination, so we will loop over the mask value, store the
5406          * individual type, and clear it out in the input mask as it
5407          * is found.
5408          */
5409         while (promisc_mask) {
5410                 struct ice_sw_recipe *recp_list;
5411                 u8 *mac_addr;
5412
5413                 pkt_type = 0;
5414                 is_tx_fltr = false;
5415
5416                 if (promisc_mask & ICE_PROMISC_UCAST_RX) {
5417                         promisc_mask &= ~ICE_PROMISC_UCAST_RX;
5418                         pkt_type = UCAST_FLTR;
5419                 } else if (promisc_mask & ICE_PROMISC_UCAST_TX) {
5420                         promisc_mask &= ~ICE_PROMISC_UCAST_TX;
5421                         pkt_type = UCAST_FLTR;
5422                         is_tx_fltr = true;
5423                 } else if (promisc_mask & ICE_PROMISC_MCAST_RX) {
5424                         promisc_mask &= ~ICE_PROMISC_MCAST_RX;
5425                         pkt_type = MCAST_FLTR;
5426                 } else if (promisc_mask & ICE_PROMISC_MCAST_TX) {
5427                         promisc_mask &= ~ICE_PROMISC_MCAST_TX;
5428                         pkt_type = MCAST_FLTR;
5429                         is_tx_fltr = true;
5430                 } else if (promisc_mask & ICE_PROMISC_BCAST_RX) {
5431                         promisc_mask &= ~ICE_PROMISC_BCAST_RX;
5432                         pkt_type = BCAST_FLTR;
5433                 } else if (promisc_mask & ICE_PROMISC_BCAST_TX) {
5434                         promisc_mask &= ~ICE_PROMISC_BCAST_TX;
5435                         pkt_type = BCAST_FLTR;
5436                         is_tx_fltr = true;
5437                 }
5438
5439                 /* Check for VLAN promiscuous flag */
5440                 if (promisc_mask & ICE_PROMISC_VLAN_RX) {
5441                         promisc_mask &= ~ICE_PROMISC_VLAN_RX;
5442                 } else if (promisc_mask & ICE_PROMISC_VLAN_TX) {
5443                         promisc_mask &= ~ICE_PROMISC_VLAN_TX;
5444                         is_tx_fltr = true;
5445                 }
5446
5447                 /* Set filter DA based on packet type */
5448                 mac_addr = new_fltr.l_data.mac.mac_addr;
5449                 if (pkt_type == BCAST_FLTR) {
5450                         ice_memset(mac_addr, 0xff, ETH_ALEN, ICE_NONDMA_MEM);
5451                 } else if (pkt_type == MCAST_FLTR ||
5452                            pkt_type == UCAST_FLTR) {
5453                         /* Use the dummy ether header DA */
5454                         ice_memcpy(mac_addr, dummy_eth_header, ETH_ALEN,
5455                                    ICE_NONDMA_TO_NONDMA);
5456                         if (pkt_type == MCAST_FLTR)
5457                                 mac_addr[0] |= 0x1;     /* Set multicast bit */
5458                 }
5459
5460                 /* Need to reset this to zero for all iterations */
5461                 new_fltr.flag = 0;
5462                 if (is_tx_fltr) {
5463                         new_fltr.flag |= ICE_FLTR_TX;
5464                         new_fltr.src = hw_vsi_id;
5465                 } else {
5466                         new_fltr.flag |= ICE_FLTR_RX;
5467                         new_fltr.src = lport;
5468                 }
5469
5470                 new_fltr.fltr_act = ICE_FWD_TO_VSI;
5471                 new_fltr.vsi_handle = vsi_handle;
5472                 new_fltr.fwd_id.hw_vsi_id = hw_vsi_id;
5473                 f_list_entry.fltr_info = new_fltr;
5474                 recp_list = &sw->recp_list[recipe_id];
5475
5476                 status = ice_add_rule_internal(hw, recp_list, lport,
5477                                                &f_list_entry);
5478                 if (status != ICE_SUCCESS)
5479                         goto set_promisc_exit;
5480         }
5481
5482 set_promisc_exit:
5483         return status;
5484 }
5485
5486 /**
5487  * ice_set_vsi_promisc - set given VSI to given promiscuous mode(s)
5488  * @hw: pointer to the hardware structure
5489  * @vsi_handle: VSI handle to configure
5490  * @promisc_mask: mask of promiscuous config bits
5491  * @vid: VLAN ID to set VLAN promiscuous
5492  */
5493 enum ice_status
5494 ice_set_vsi_promisc(struct ice_hw *hw, u16 vsi_handle, u8 promisc_mask,
5495                     u16 vid)
5496 {
5497         return _ice_set_vsi_promisc(hw, vsi_handle, promisc_mask, vid,
5498                                     hw->port_info->lport,
5499                                     hw->switch_info);
5500 }
5501
5502 /**
5503  * _ice_set_vlan_vsi_promisc
5504  * @hw: pointer to the hardware structure
5505  * @vsi_handle: VSI handle to configure
5506  * @promisc_mask: mask of promiscuous config bits
5507  * @rm_vlan_promisc: Clear VLANs VSI promisc mode
5508  * @lport: logical port number to configure promisc mode
5509  * @sw: pointer to switch info struct for which function add rule
5510  *
5511  * Configure VSI with all associated VLANs to given promiscuous mode(s)
5512  */
5513 static enum ice_status
5514 _ice_set_vlan_vsi_promisc(struct ice_hw *hw, u16 vsi_handle, u8 promisc_mask,
5515                           bool rm_vlan_promisc, u8 lport,
5516                           struct ice_switch_info *sw)
5517 {
5518         struct ice_fltr_list_entry *list_itr, *tmp;
5519         struct LIST_HEAD_TYPE vsi_list_head;
5520         struct LIST_HEAD_TYPE *vlan_head;
5521         struct ice_lock *vlan_lock; /* Lock to protect filter rule list */
5522         enum ice_status status;
5523         u16 vlan_id;
5524
5525         INIT_LIST_HEAD(&vsi_list_head);
5526         vlan_lock = &sw->recp_list[ICE_SW_LKUP_VLAN].filt_rule_lock;
5527         vlan_head = &sw->recp_list[ICE_SW_LKUP_VLAN].filt_rules;
5528         ice_acquire_lock(vlan_lock);
5529         status = ice_add_to_vsi_fltr_list(hw, vsi_handle, vlan_head,
5530                                           &vsi_list_head);
5531         ice_release_lock(vlan_lock);
5532         if (status)
5533                 goto free_fltr_list;
5534
5535         LIST_FOR_EACH_ENTRY(list_itr, &vsi_list_head, ice_fltr_list_entry,
5536                             list_entry) {
5537                 vlan_id = list_itr->fltr_info.l_data.vlan.vlan_id;
5538                 if (rm_vlan_promisc)
5539                         status =  _ice_clear_vsi_promisc(hw, vsi_handle,
5540                                                          promisc_mask,
5541                                                          vlan_id, sw);
5542                 else
5543                         status =  _ice_set_vsi_promisc(hw, vsi_handle,
5544                                                        promisc_mask, vlan_id,
5545                                                        lport, sw);
5546                 if (status)
5547                         break;
5548         }
5549
5550 free_fltr_list:
5551         LIST_FOR_EACH_ENTRY_SAFE(list_itr, tmp, &vsi_list_head,
5552                                  ice_fltr_list_entry, list_entry) {
5553                 LIST_DEL(&list_itr->list_entry);
5554                 ice_free(hw, list_itr);
5555         }
5556         return status;
5557 }
5558
5559 /**
5560  * ice_set_vlan_vsi_promisc
5561  * @hw: pointer to the hardware structure
5562  * @vsi_handle: VSI handle to configure
5563  * @promisc_mask: mask of promiscuous config bits
5564  * @rm_vlan_promisc: Clear VLANs VSI promisc mode
5565  *
5566  * Configure VSI with all associated VLANs to given promiscuous mode(s)
5567  */
5568 enum ice_status
5569 ice_set_vlan_vsi_promisc(struct ice_hw *hw, u16 vsi_handle, u8 promisc_mask,
5570                          bool rm_vlan_promisc)
5571 {
5572         return _ice_set_vlan_vsi_promisc(hw, vsi_handle, promisc_mask,
5573                                          rm_vlan_promisc, hw->port_info->lport,
5574                                          hw->switch_info);
5575 }
5576
5577 /**
5578  * ice_remove_vsi_lkup_fltr - Remove lookup type filters for a VSI
5579  * @hw: pointer to the hardware structure
5580  * @vsi_handle: VSI handle to remove filters from
5581  * @recp_list: recipe list from which function remove fltr
5582  * @lkup: switch rule filter lookup type
5583  */
5584 static void
5585 ice_remove_vsi_lkup_fltr(struct ice_hw *hw, u16 vsi_handle,
5586                          struct ice_sw_recipe *recp_list,
5587                          enum ice_sw_lkup_type lkup)
5588 {
5589         struct ice_fltr_list_entry *fm_entry;
5590         struct LIST_HEAD_TYPE remove_list_head;
5591         struct LIST_HEAD_TYPE *rule_head;
5592         struct ice_fltr_list_entry *tmp;
5593         struct ice_lock *rule_lock;     /* Lock to protect filter rule list */
5594         enum ice_status status;
5595
5596         INIT_LIST_HEAD(&remove_list_head);
5597         rule_lock = &recp_list[lkup].filt_rule_lock;
5598         rule_head = &recp_list[lkup].filt_rules;
5599         ice_acquire_lock(rule_lock);
5600         status = ice_add_to_vsi_fltr_list(hw, vsi_handle, rule_head,
5601                                           &remove_list_head);
5602         ice_release_lock(rule_lock);
5603         if (status)
5604                 return;
5605
5606         switch (lkup) {
5607         case ICE_SW_LKUP_MAC:
5608                 ice_remove_mac_rule(hw, &remove_list_head, &recp_list[lkup]);
5609                 break;
5610         case ICE_SW_LKUP_VLAN:
5611                 ice_remove_vlan_rule(hw, &remove_list_head, &recp_list[lkup]);
5612                 break;
5613         case ICE_SW_LKUP_PROMISC:
5614         case ICE_SW_LKUP_PROMISC_VLAN:
5615                 ice_remove_promisc(hw, lkup, &remove_list_head);
5616                 break;
5617         case ICE_SW_LKUP_MAC_VLAN:
5618                 ice_remove_mac_vlan(hw, &remove_list_head);
5619                 break;
5620         case ICE_SW_LKUP_ETHERTYPE:
5621         case ICE_SW_LKUP_ETHERTYPE_MAC:
5622                 ice_remove_eth_mac(hw, &remove_list_head);
5623                 break;
5624         case ICE_SW_LKUP_DFLT:
5625                 ice_debug(hw, ICE_DBG_SW, "Remove filters for this lookup type hasn't been implemented yet\n");
5626                 break;
5627         case ICE_SW_LKUP_LAST:
5628                 ice_debug(hw, ICE_DBG_SW, "Unsupported lookup type\n");
5629                 break;
5630         }
5631
5632         LIST_FOR_EACH_ENTRY_SAFE(fm_entry, tmp, &remove_list_head,
5633                                  ice_fltr_list_entry, list_entry) {
5634                 LIST_DEL(&fm_entry->list_entry);
5635                 ice_free(hw, fm_entry);
5636         }
5637 }
5638
5639 /**
5640  * ice_remove_vsi_fltr_rule - Remove all filters for a VSI
5641  * @hw: pointer to the hardware structure
5642  * @vsi_handle: VSI handle to remove filters from
5643  * @sw: pointer to switch info struct
5644  */
5645 static void
5646 ice_remove_vsi_fltr_rule(struct ice_hw *hw, u16 vsi_handle,
5647                          struct ice_switch_info *sw)
5648 {
5649         ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__);
5650
5651         ice_remove_vsi_lkup_fltr(hw, vsi_handle,
5652                                  sw->recp_list, ICE_SW_LKUP_MAC);
5653         ice_remove_vsi_lkup_fltr(hw, vsi_handle,
5654                                  sw->recp_list, ICE_SW_LKUP_MAC_VLAN);
5655         ice_remove_vsi_lkup_fltr(hw, vsi_handle,
5656                                  sw->recp_list, ICE_SW_LKUP_PROMISC);
5657         ice_remove_vsi_lkup_fltr(hw, vsi_handle,
5658                                  sw->recp_list, ICE_SW_LKUP_VLAN);
5659         ice_remove_vsi_lkup_fltr(hw, vsi_handle,
5660                                  sw->recp_list, ICE_SW_LKUP_DFLT);
5661         ice_remove_vsi_lkup_fltr(hw, vsi_handle,
5662                                  sw->recp_list, ICE_SW_LKUP_ETHERTYPE);
5663         ice_remove_vsi_lkup_fltr(hw, vsi_handle,
5664                                  sw->recp_list, ICE_SW_LKUP_ETHERTYPE_MAC);
5665         ice_remove_vsi_lkup_fltr(hw, vsi_handle,
5666                                  sw->recp_list, ICE_SW_LKUP_PROMISC_VLAN);
5667 }
5668
5669 /**
5670  * ice_remove_vsi_fltr - Remove all filters for a VSI
5671  * @hw: pointer to the hardware structure
5672  * @vsi_handle: VSI handle to remove filters from
5673  */
5674 void ice_remove_vsi_fltr(struct ice_hw *hw, u16 vsi_handle)
5675 {
5676         ice_remove_vsi_fltr_rule(hw, vsi_handle, hw->switch_info);
5677 }
5678
5679 /**
5680  * ice_alloc_res_cntr - allocating resource counter
5681  * @hw: pointer to the hardware structure
5682  * @type: type of resource
5683  * @alloc_shared: if set it is shared else dedicated
5684  * @num_items: number of entries requested for FD resource type
5685  * @counter_id: counter index returned by AQ call
5686  */
5687 enum ice_status
5688 ice_alloc_res_cntr(struct ice_hw *hw, u8 type, u8 alloc_shared, u16 num_items,
5689                    u16 *counter_id)
5690 {
5691         struct ice_aqc_alloc_free_res_elem *buf;
5692         enum ice_status status;
5693         u16 buf_len;
5694
5695         /* Allocate resource */
5696         buf_len = ice_struct_size(buf, elem, 1);
5697         buf = (struct ice_aqc_alloc_free_res_elem *)ice_malloc(hw, buf_len);
5698         if (!buf)
5699                 return ICE_ERR_NO_MEMORY;
5700
5701         buf->num_elems = CPU_TO_LE16(num_items);
5702         buf->res_type = CPU_TO_LE16(((type << ICE_AQC_RES_TYPE_S) &
5703                                       ICE_AQC_RES_TYPE_M) | alloc_shared);
5704
5705         status = ice_aq_alloc_free_res(hw, 1, buf, buf_len,
5706                                        ice_aqc_opc_alloc_res, NULL);
5707         if (status)
5708                 goto exit;
5709
5710         *counter_id = LE16_TO_CPU(buf->elem[0].e.sw_resp);
5711
5712 exit:
5713         ice_free(hw, buf);
5714         return status;
5715 }
5716
5717 /**
5718  * ice_free_res_cntr - free resource counter
5719  * @hw: pointer to the hardware structure
5720  * @type: type of resource
5721  * @alloc_shared: if set it is shared else dedicated
5722  * @num_items: number of entries to be freed for FD resource type
5723  * @counter_id: counter ID resource which needs to be freed
5724  */
5725 enum ice_status
5726 ice_free_res_cntr(struct ice_hw *hw, u8 type, u8 alloc_shared, u16 num_items,
5727                   u16 counter_id)
5728 {
5729         struct ice_aqc_alloc_free_res_elem *buf;
5730         enum ice_status status;
5731         u16 buf_len;
5732
5733         /* Free resource */
5734         buf_len = ice_struct_size(buf, elem, 1);
5735         buf = (struct ice_aqc_alloc_free_res_elem *)ice_malloc(hw, buf_len);
5736         if (!buf)
5737                 return ICE_ERR_NO_MEMORY;
5738
5739         buf->num_elems = CPU_TO_LE16(num_items);
5740         buf->res_type = CPU_TO_LE16(((type << ICE_AQC_RES_TYPE_S) &
5741                                       ICE_AQC_RES_TYPE_M) | alloc_shared);
5742         buf->elem[0].e.sw_resp = CPU_TO_LE16(counter_id);
5743
5744         status = ice_aq_alloc_free_res(hw, 1, buf, buf_len,
5745                                        ice_aqc_opc_free_res, NULL);
5746         if (status)
5747                 ice_debug(hw, ICE_DBG_SW, "counter resource could not be freed\n");
5748
5749         ice_free(hw, buf);
5750         return status;
5751 }
5752
5753 /**
5754  * ice_alloc_vlan_res_counter - obtain counter resource for VLAN type
5755  * @hw: pointer to the hardware structure
5756  * @counter_id: returns counter index
5757  */
5758 enum ice_status ice_alloc_vlan_res_counter(struct ice_hw *hw, u16 *counter_id)
5759 {
5760         return ice_alloc_res_cntr(hw, ICE_AQC_RES_TYPE_VLAN_COUNTER,
5761                                   ICE_AQC_RES_TYPE_FLAG_DEDICATED, 1,
5762                                   counter_id);
5763 }
5764
5765 /**
5766  * ice_free_vlan_res_counter - Free counter resource for VLAN type
5767  * @hw: pointer to the hardware structure
5768  * @counter_id: counter index to be freed
5769  */
5770 enum ice_status ice_free_vlan_res_counter(struct ice_hw *hw, u16 counter_id)
5771 {
5772         return ice_free_res_cntr(hw, ICE_AQC_RES_TYPE_VLAN_COUNTER,
5773                                  ICE_AQC_RES_TYPE_FLAG_DEDICATED, 1,
5774                                  counter_id);
5775 }
5776
5777 /**
5778  * ice_alloc_res_lg_act - add large action resource
5779  * @hw: pointer to the hardware structure
5780  * @l_id: large action ID to fill it in
5781  * @num_acts: number of actions to hold with a large action entry
5782  */
5783 static enum ice_status
5784 ice_alloc_res_lg_act(struct ice_hw *hw, u16 *l_id, u16 num_acts)
5785 {
5786         struct ice_aqc_alloc_free_res_elem *sw_buf;
5787         enum ice_status status;
5788         u16 buf_len;
5789
5790         if (num_acts > ICE_MAX_LG_ACT || num_acts == 0)
5791                 return ICE_ERR_PARAM;
5792
5793         /* Allocate resource for large action */
5794         buf_len = ice_struct_size(sw_buf, elem, 1);
5795         sw_buf = (struct ice_aqc_alloc_free_res_elem *)ice_malloc(hw, buf_len);
5796         if (!sw_buf)
5797                 return ICE_ERR_NO_MEMORY;
5798
5799         sw_buf->num_elems = CPU_TO_LE16(1);
5800
5801         /* If num_acts is 1, use ICE_AQC_RES_TYPE_WIDE_TABLE_1.
5802          * If num_acts is 2, use ICE_AQC_RES_TYPE_WIDE_TABLE_3.
5803          * If num_acts is greater than 2, then use
5804          * ICE_AQC_RES_TYPE_WIDE_TABLE_4.
5805          * The num_acts cannot exceed 4. This was ensured at the
5806          * beginning of the function.
5807          */
5808         if (num_acts == 1)
5809                 sw_buf->res_type = CPU_TO_LE16(ICE_AQC_RES_TYPE_WIDE_TABLE_1);
5810         else if (num_acts == 2)
5811                 sw_buf->res_type = CPU_TO_LE16(ICE_AQC_RES_TYPE_WIDE_TABLE_2);
5812         else
5813                 sw_buf->res_type = CPU_TO_LE16(ICE_AQC_RES_TYPE_WIDE_TABLE_4);
5814
5815         status = ice_aq_alloc_free_res(hw, 1, sw_buf, buf_len,
5816                                        ice_aqc_opc_alloc_res, NULL);
5817         if (!status)
5818                 *l_id = LE16_TO_CPU(sw_buf->elem[0].e.sw_resp);
5819
5820         ice_free(hw, sw_buf);
5821         return status;
5822 }
5823
5824 /**
5825  * ice_add_mac_with_sw_marker - add filter with sw marker
5826  * @hw: pointer to the hardware structure
5827  * @f_info: filter info structure containing the MAC filter information
5828  * @sw_marker: sw marker to tag the Rx descriptor with
5829  */
5830 enum ice_status
5831 ice_add_mac_with_sw_marker(struct ice_hw *hw, struct ice_fltr_info *f_info,
5832                            u16 sw_marker)
5833 {
5834         struct ice_fltr_mgmt_list_entry *m_entry;
5835         struct ice_fltr_list_entry fl_info;
5836         struct ice_sw_recipe *recp_list;
5837         struct LIST_HEAD_TYPE l_head;
5838         struct ice_lock *rule_lock;     /* Lock to protect filter rule list */
5839         enum ice_status ret;
5840         bool entry_exists;
5841         u16 lg_act_id;
5842
5843         if (f_info->fltr_act != ICE_FWD_TO_VSI)
5844                 return ICE_ERR_PARAM;
5845
5846         if (f_info->lkup_type != ICE_SW_LKUP_MAC)
5847                 return ICE_ERR_PARAM;
5848
5849         if (sw_marker == ICE_INVAL_SW_MARKER_ID)
5850                 return ICE_ERR_PARAM;
5851
5852         if (!ice_is_vsi_valid(hw, f_info->vsi_handle))
5853                 return ICE_ERR_PARAM;
5854         f_info->fwd_id.hw_vsi_id = ice_get_hw_vsi_num(hw, f_info->vsi_handle);
5855
5856         /* Add filter if it doesn't exist so then the adding of large
5857          * action always results in update
5858          */
5859
5860         INIT_LIST_HEAD(&l_head);
5861         fl_info.fltr_info = *f_info;
5862         LIST_ADD(&fl_info.list_entry, &l_head);
5863
5864         entry_exists = false;
5865         ret = ice_add_mac_rule(hw, &l_head, hw->switch_info,
5866                                hw->port_info->lport);
5867         if (ret == ICE_ERR_ALREADY_EXISTS)
5868                 entry_exists = true;
5869         else if (ret)
5870                 return ret;
5871
5872         recp_list = &hw->switch_info->recp_list[ICE_SW_LKUP_MAC];
5873         rule_lock = &recp_list->filt_rule_lock;
5874         ice_acquire_lock(rule_lock);
5875         /* Get the book keeping entry for the filter */
5876         m_entry = ice_find_rule_entry(&recp_list->filt_rules, f_info);
5877         if (!m_entry)
5878                 goto exit_error;
5879
5880         /* If counter action was enabled for this rule then don't enable
5881          * sw marker large action
5882          */
5883         if (m_entry->counter_index != ICE_INVAL_COUNTER_ID) {
5884                 ret = ICE_ERR_PARAM;
5885                 goto exit_error;
5886         }
5887
5888         /* if same marker was added before */
5889         if (m_entry->sw_marker_id == sw_marker) {
5890                 ret = ICE_ERR_ALREADY_EXISTS;
5891                 goto exit_error;
5892         }
5893
5894         /* Allocate a hardware table entry to hold large act. Three actions
5895          * for marker based large action
5896          */
5897         ret = ice_alloc_res_lg_act(hw, &lg_act_id, 3);
5898         if (ret)
5899                 goto exit_error;
5900
5901         if (lg_act_id == ICE_INVAL_LG_ACT_INDEX)
5902                 goto exit_error;
5903
5904         /* Update the switch rule to add the marker action */
5905         ret = ice_add_marker_act(hw, m_entry, sw_marker, lg_act_id);
5906         if (!ret) {
5907                 ice_release_lock(rule_lock);
5908                 return ret;
5909         }
5910
5911 exit_error:
5912         ice_release_lock(rule_lock);
5913         /* only remove entry if it did not exist previously */
5914         if (!entry_exists)
5915                 ret = ice_remove_mac(hw, &l_head);
5916
5917         return ret;
5918 }
5919
5920 /**
5921  * ice_add_mac_with_counter - add filter with counter enabled
5922  * @hw: pointer to the hardware structure
5923  * @f_info: pointer to filter info structure containing the MAC filter
5924  *          information
5925  */
5926 enum ice_status
5927 ice_add_mac_with_counter(struct ice_hw *hw, struct ice_fltr_info *f_info)
5928 {
5929         struct ice_fltr_mgmt_list_entry *m_entry;
5930         struct ice_fltr_list_entry fl_info;
5931         struct ice_sw_recipe *recp_list;
5932         struct LIST_HEAD_TYPE l_head;
5933         struct ice_lock *rule_lock;     /* Lock to protect filter rule list */
5934         enum ice_status ret;
5935         bool entry_exist;
5936         u16 counter_id;
5937         u16 lg_act_id;
5938
5939         if (f_info->fltr_act != ICE_FWD_TO_VSI)
5940                 return ICE_ERR_PARAM;
5941
5942         if (f_info->lkup_type != ICE_SW_LKUP_MAC)
5943                 return ICE_ERR_PARAM;
5944
5945         if (!ice_is_vsi_valid(hw, f_info->vsi_handle))
5946                 return ICE_ERR_PARAM;
5947         f_info->fwd_id.hw_vsi_id = ice_get_hw_vsi_num(hw, f_info->vsi_handle);
5948         recp_list = &hw->switch_info->recp_list[ICE_SW_LKUP_MAC];
5949
5950         entry_exist = false;
5951
5952         rule_lock = &recp_list->filt_rule_lock;
5953
5954         /* Add filter if it doesn't exist so then the adding of large
5955          * action always results in update
5956          */
5957         INIT_LIST_HEAD(&l_head);
5958
5959         fl_info.fltr_info = *f_info;
5960         LIST_ADD(&fl_info.list_entry, &l_head);
5961
5962         ret = ice_add_mac_rule(hw, &l_head, hw->switch_info,
5963                                hw->port_info->lport);
5964         if (ret == ICE_ERR_ALREADY_EXISTS)
5965                 entry_exist = true;
5966         else if (ret)
5967                 return ret;
5968
5969         ice_acquire_lock(rule_lock);
5970         m_entry = ice_find_rule_entry(&recp_list->filt_rules, f_info);
5971         if (!m_entry) {
5972                 ret = ICE_ERR_BAD_PTR;
5973                 goto exit_error;
5974         }
5975
5976         /* Don't enable counter for a filter for which sw marker was enabled */
5977         if (m_entry->sw_marker_id != ICE_INVAL_SW_MARKER_ID) {
5978                 ret = ICE_ERR_PARAM;
5979                 goto exit_error;
5980         }
5981
5982         /* If a counter was already enabled then don't need to add again */
5983         if (m_entry->counter_index != ICE_INVAL_COUNTER_ID) {
5984                 ret = ICE_ERR_ALREADY_EXISTS;
5985                 goto exit_error;
5986         }
5987
5988         /* Allocate a hardware table entry to VLAN counter */
5989         ret = ice_alloc_vlan_res_counter(hw, &counter_id);
5990         if (ret)
5991                 goto exit_error;
5992
5993         /* Allocate a hardware table entry to hold large act. Two actions for
5994          * counter based large action
5995          */
5996         ret = ice_alloc_res_lg_act(hw, &lg_act_id, 2);
5997         if (ret)
5998                 goto exit_error;
5999
6000         if (lg_act_id == ICE_INVAL_LG_ACT_INDEX)
6001                 goto exit_error;
6002
6003         /* Update the switch rule to add the counter action */
6004         ret = ice_add_counter_act(hw, m_entry, counter_id, lg_act_id);
6005         if (!ret) {
6006                 ice_release_lock(rule_lock);
6007                 return ret;
6008         }
6009
6010 exit_error:
6011         ice_release_lock(rule_lock);
6012         /* only remove entry if it did not exist previously */
6013         if (!entry_exist)
6014                 ret = ice_remove_mac(hw, &l_head);
6015
6016         return ret;
6017 }
6018
6019 /* This is mapping table entry that maps every word within a given protocol
6020  * structure to the real byte offset as per the specification of that
6021  * protocol header.
6022  * for example dst address is 3 words in ethertype header and corresponding
6023  * bytes are 0, 2, 3 in the actual packet header and src address is at 4, 6, 8
6024  * IMPORTANT: Every structure part of "ice_prot_hdr" union should have a
6025  * matching entry describing its field. This needs to be updated if new
6026  * structure is added to that union.
6027  */
6028 static const struct ice_prot_ext_tbl_entry ice_prot_ext[ICE_PROTOCOL_LAST] = {
6029         { ICE_MAC_OFOS,         { 0, 2, 4, 6, 8, 10, 12 } },
6030         { ICE_MAC_IL,           { 0, 2, 4, 6, 8, 10, 12 } },
6031         { ICE_ETYPE_OL,         { 0 } },
6032         { ICE_VLAN_OFOS,        { 0, 2 } },
6033         { ICE_IPV4_OFOS,        { 0, 2, 4, 6, 8, 10, 12, 14, 16, 18 } },
6034         { ICE_IPV4_IL,          { 0, 2, 4, 6, 8, 10, 12, 14, 16, 18 } },
6035         { ICE_IPV6_OFOS,        { 0, 2, 4, 6, 8, 10, 12, 14, 16, 18, 20, 22, 24,
6036                                  26, 28, 30, 32, 34, 36, 38 } },
6037         { ICE_IPV6_IL,          { 0, 2, 4, 6, 8, 10, 12, 14, 16, 18, 20, 22, 24,
6038                                  26, 28, 30, 32, 34, 36, 38 } },
6039         { ICE_TCP_IL,           { 0, 2 } },
6040         { ICE_UDP_OF,           { 0, 2 } },
6041         { ICE_UDP_ILOS,         { 0, 2 } },
6042         { ICE_SCTP_IL,          { 0, 2 } },
6043         { ICE_VXLAN,            { 8, 10, 12, 14 } },
6044         { ICE_GENEVE,           { 8, 10, 12, 14 } },
6045         { ICE_VXLAN_GPE,        { 8, 10, 12, 14 } },
6046         { ICE_NVGRE,            { 0, 2, 4, 6 } },
6047         { ICE_GTP,              { 8, 10, 12, 14, 16, 18, 20 } },
6048         { ICE_PPPOE,            { 0, 2, 4, 6 } },
6049         { ICE_PFCP,             { 8, 10, 12, 14, 16, 18, 20, 22 } },
6050         { ICE_L2TPV3,           { 0, 2, 4, 6, 8, 10 } },
6051         { ICE_ESP,              { 0, 2, 4, 6 } },
6052         { ICE_AH,               { 0, 2, 4, 6, 8, 10 } },
6053         { ICE_NAT_T,            { 8, 10, 12, 14 } },
6054         { ICE_GTP_NO_PAY,       { 8, 10, 12, 14 } },
6055         { ICE_VLAN_EX,          { 0, 2 } },
6056 };
6057
6058 /* The following table describes preferred grouping of recipes.
6059  * If a recipe that needs to be programmed is a superset or matches one of the
6060  * following combinations, then the recipe needs to be chained as per the
6061  * following policy.
6062  */
6063
6064 static const struct ice_protocol_entry ice_prot_id_tbl[ICE_PROTOCOL_LAST] = {
6065         { ICE_MAC_OFOS,         ICE_MAC_OFOS_HW },
6066         { ICE_MAC_IL,           ICE_MAC_IL_HW },
6067         { ICE_ETYPE_OL,         ICE_ETYPE_OL_HW },
6068         { ICE_VLAN_OFOS,        ICE_VLAN_OL_HW },
6069         { ICE_IPV4_OFOS,        ICE_IPV4_OFOS_HW },
6070         { ICE_IPV4_IL,          ICE_IPV4_IL_HW },
6071         { ICE_IPV6_OFOS,        ICE_IPV6_OFOS_HW },
6072         { ICE_IPV6_IL,          ICE_IPV6_IL_HW },
6073         { ICE_TCP_IL,           ICE_TCP_IL_HW },
6074         { ICE_UDP_OF,           ICE_UDP_OF_HW },
6075         { ICE_UDP_ILOS,         ICE_UDP_ILOS_HW },
6076         { ICE_SCTP_IL,          ICE_SCTP_IL_HW },
6077         { ICE_VXLAN,            ICE_UDP_OF_HW },
6078         { ICE_GENEVE,           ICE_UDP_OF_HW },
6079         { ICE_VXLAN_GPE,        ICE_UDP_OF_HW },
6080         { ICE_NVGRE,            ICE_GRE_OF_HW },
6081         { ICE_GTP,              ICE_UDP_OF_HW },
6082         { ICE_PPPOE,            ICE_PPPOE_HW },
6083         { ICE_PFCP,             ICE_UDP_ILOS_HW },
6084         { ICE_L2TPV3,           ICE_L2TPV3_HW },
6085         { ICE_ESP,              ICE_ESP_HW },
6086         { ICE_AH,               ICE_AH_HW },
6087         { ICE_NAT_T,            ICE_UDP_ILOS_HW },
6088         { ICE_GTP_NO_PAY,       ICE_UDP_ILOS_HW },
6089         { ICE_VLAN_EX,          ICE_VLAN_OF_HW },
6090 };
6091
6092 /**
6093  * ice_find_recp - find a recipe
6094  * @hw: pointer to the hardware structure
6095  * @lkup_exts: extension sequence to match
6096  *
6097  * Returns index of matching recipe, or ICE_MAX_NUM_RECIPES if not found.
6098  */
6099 static u16 ice_find_recp(struct ice_hw *hw, struct ice_prot_lkup_ext *lkup_exts,
6100                          enum ice_sw_tunnel_type tun_type)
6101 {
6102         bool refresh_required = true;
6103         struct ice_sw_recipe *recp;
6104         u8 i;
6105
6106         /* Walk through existing recipes to find a match */
6107         recp = hw->switch_info->recp_list;
6108         for (i = 0; i < ICE_MAX_NUM_RECIPES; i++) {
6109                 /* If recipe was not created for this ID, in SW bookkeeping,
6110                  * check if FW has an entry for this recipe. If the FW has an
6111                  * entry update it in our SW bookkeeping and continue with the
6112                  * matching.
6113                  */
6114                 if (!recp[i].recp_created)
6115                         if (ice_get_recp_frm_fw(hw,
6116                                                 hw->switch_info->recp_list, i,
6117                                                 &refresh_required))
6118                                 continue;
6119
6120                 /* Skip inverse action recipes */
6121                 if (recp[i].root_buf && recp[i].root_buf->content.act_ctrl &
6122                     ICE_AQ_RECIPE_ACT_INV_ACT)
6123                         continue;
6124
6125                 /* if number of words we are looking for match */
6126                 if (lkup_exts->n_val_words == recp[i].lkup_exts.n_val_words) {
6127                         struct ice_fv_word *ar = recp[i].lkup_exts.fv_words;
6128                         struct ice_fv_word *be = lkup_exts->fv_words;
6129                         u16 *cr = recp[i].lkup_exts.field_mask;
6130                         u16 *de = lkup_exts->field_mask;
6131                         bool found = true;
6132                         u8 pe, qr;
6133
6134                         /* ar, cr, and qr are related to the recipe words, while
6135                          * be, de, and pe are related to the lookup words
6136                          */
6137                         for (pe = 0; pe < lkup_exts->n_val_words; pe++) {
6138                                 for (qr = 0; qr < recp[i].lkup_exts.n_val_words;
6139                                      qr++) {
6140                                         if (ar[qr].off == be[pe].off &&
6141                                             ar[qr].prot_id == be[pe].prot_id &&
6142                                             cr[qr] == de[pe])
6143                                                 /* Found the "pe"th word in the
6144                                                  * given recipe
6145                                                  */
6146                                                 break;
6147                                 }
6148                                 /* After walking through all the words in the
6149                                  * "i"th recipe if "p"th word was not found then
6150                                  * this recipe is not what we are looking for.
6151                                  * So break out from this loop and try the next
6152                                  * recipe
6153                                  */
6154                                 if (qr >= recp[i].lkup_exts.n_val_words) {
6155                                         found = false;
6156                                         break;
6157                                 }
6158                         }
6159                         /* If for "i"th recipe the found was never set to false
6160                          * then it means we found our match
6161                          */
6162                         if (tun_type == recp[i].tun_type && found)
6163                                 return i; /* Return the recipe ID */
6164                 }
6165         }
6166         return ICE_MAX_NUM_RECIPES;
6167 }
6168
6169 /**
6170  * ice_prot_type_to_id - get protocol ID from protocol type
6171  * @type: protocol type
6172  * @id: pointer to variable that will receive the ID
6173  *
6174  * Returns true if found, false otherwise
6175  */
6176 static bool ice_prot_type_to_id(enum ice_protocol_type type, u8 *id)
6177 {
6178         u8 i;
6179
6180         for (i = 0; i < ARRAY_SIZE(ice_prot_id_tbl); i++)
6181                 if (ice_prot_id_tbl[i].type == type) {
6182                         *id = ice_prot_id_tbl[i].protocol_id;
6183                         return true;
6184                 }
6185         return false;
6186 }
6187
6188 /**
6189  * ice_find_valid_words - count valid words
6190  * @rule: advanced rule with lookup information
6191  * @lkup_exts: byte offset extractions of the words that are valid
6192  *
6193  * calculate valid words in a lookup rule using mask value
6194  */
6195 static u8
6196 ice_fill_valid_words(struct ice_adv_lkup_elem *rule,
6197                      struct ice_prot_lkup_ext *lkup_exts)
6198 {
6199         u8 j, word, prot_id, ret_val;
6200
6201         if (!ice_prot_type_to_id(rule->type, &prot_id))
6202                 return 0;
6203
6204         word = lkup_exts->n_val_words;
6205
6206         for (j = 0; j < sizeof(rule->m_u) / sizeof(u16); j++)
6207                 if (((u16 *)&rule->m_u)[j] &&
6208                     rule->type < ARRAY_SIZE(ice_prot_ext)) {
6209                         /* No more space to accommodate */
6210                         if (word >= ICE_MAX_CHAIN_WORDS)
6211                                 return 0;
6212                         lkup_exts->fv_words[word].off =
6213                                 ice_prot_ext[rule->type].offs[j];
6214                         lkup_exts->fv_words[word].prot_id =
6215                                 ice_prot_id_tbl[rule->type].protocol_id;
6216                         lkup_exts->field_mask[word] =
6217                                 BE16_TO_CPU(((_FORCE_ __be16 *)&rule->m_u)[j]);
6218                         word++;
6219                 }
6220
6221         ret_val = word - lkup_exts->n_val_words;
6222         lkup_exts->n_val_words = word;
6223
6224         return ret_val;
6225 }
6226
6227 /**
6228  * ice_create_first_fit_recp_def - Create a recipe grouping
6229  * @hw: pointer to the hardware structure
6230  * @lkup_exts: an array of protocol header extractions
6231  * @rg_list: pointer to a list that stores new recipe groups
6232  * @recp_cnt: pointer to a variable that stores returned number of recipe groups
6233  *
6234  * Using first fit algorithm, take all the words that are still not done
6235  * and start grouping them in 4-word groups. Each group makes up one
6236  * recipe.
6237  */
6238 static enum ice_status
6239 ice_create_first_fit_recp_def(struct ice_hw *hw,
6240                               struct ice_prot_lkup_ext *lkup_exts,
6241                               struct LIST_HEAD_TYPE *rg_list,
6242                               u8 *recp_cnt)
6243 {
6244         struct ice_pref_recipe_group *grp = NULL;
6245         u8 j;
6246
6247         *recp_cnt = 0;
6248
6249         if (!lkup_exts->n_val_words) {
6250                 struct ice_recp_grp_entry *entry;
6251
6252                 entry = (struct ice_recp_grp_entry *)
6253                         ice_malloc(hw, sizeof(*entry));
6254                 if (!entry)
6255                         return ICE_ERR_NO_MEMORY;
6256                 LIST_ADD(&entry->l_entry, rg_list);
6257                 grp = &entry->r_group;
6258                 (*recp_cnt)++;
6259                 grp->n_val_pairs = 0;
6260         }
6261
6262         /* Walk through every word in the rule to check if it is not done. If so
6263          * then this word needs to be part of a new recipe.
6264          */
6265         for (j = 0; j < lkup_exts->n_val_words; j++)
6266                 if (!ice_is_bit_set(lkup_exts->done, j)) {
6267                         if (!grp ||
6268                             grp->n_val_pairs == ICE_NUM_WORDS_RECIPE) {
6269                                 struct ice_recp_grp_entry *entry;
6270
6271                                 entry = (struct ice_recp_grp_entry *)
6272                                         ice_malloc(hw, sizeof(*entry));
6273                                 if (!entry)
6274                                         return ICE_ERR_NO_MEMORY;
6275                                 LIST_ADD(&entry->l_entry, rg_list);
6276                                 grp = &entry->r_group;
6277                                 (*recp_cnt)++;
6278                         }
6279
6280                         grp->pairs[grp->n_val_pairs].prot_id =
6281                                 lkup_exts->fv_words[j].prot_id;
6282                         grp->pairs[grp->n_val_pairs].off =
6283                                 lkup_exts->fv_words[j].off;
6284                         grp->mask[grp->n_val_pairs] = lkup_exts->field_mask[j];
6285                         grp->n_val_pairs++;
6286                 }
6287
6288         return ICE_SUCCESS;
6289 }
6290
6291 /**
6292  * ice_fill_fv_word_index - fill in the field vector indices for a recipe group
6293  * @hw: pointer to the hardware structure
6294  * @fv_list: field vector with the extraction sequence information
6295  * @rg_list: recipe groupings with protocol-offset pairs
6296  *
6297  * Helper function to fill in the field vector indices for protocol-offset
6298  * pairs. These indexes are then ultimately programmed into a recipe.
6299  */
6300 static enum ice_status
6301 ice_fill_fv_word_index(struct ice_hw *hw, struct LIST_HEAD_TYPE *fv_list,
6302                        struct LIST_HEAD_TYPE *rg_list)
6303 {
6304         struct ice_sw_fv_list_entry *fv;
6305         struct ice_recp_grp_entry *rg;
6306         struct ice_fv_word *fv_ext;
6307
6308         if (LIST_EMPTY(fv_list))
6309                 return ICE_SUCCESS;
6310
6311         fv = LIST_FIRST_ENTRY(fv_list, struct ice_sw_fv_list_entry, list_entry);
6312         fv_ext = fv->fv_ptr->ew;
6313
6314         LIST_FOR_EACH_ENTRY(rg, rg_list, ice_recp_grp_entry, l_entry) {
6315                 u8 i;
6316
6317                 for (i = 0; i < rg->r_group.n_val_pairs; i++) {
6318                         struct ice_fv_word *pr;
6319                         bool found = false;
6320                         u16 mask;
6321                         u8 j;
6322
6323                         pr = &rg->r_group.pairs[i];
6324                         mask = rg->r_group.mask[i];
6325
6326                         for (j = 0; j < hw->blk[ICE_BLK_SW].es.fvw; j++)
6327                                 if (fv_ext[j].prot_id == pr->prot_id &&
6328                                     fv_ext[j].off == pr->off) {
6329                                         found = true;
6330
6331                                         /* Store index of field vector */
6332                                         rg->fv_idx[i] = j;
6333                                         rg->fv_mask[i] = mask;
6334                                         break;
6335                                 }
6336
6337                         /* Protocol/offset could not be found, caller gave an
6338                          * invalid pair
6339                          */
6340                         if (!found)
6341                                 return ICE_ERR_PARAM;
6342                 }
6343         }
6344
6345         return ICE_SUCCESS;
6346 }
6347
6348 /**
6349  * ice_find_free_recp_res_idx - find free result indexes for recipe
6350  * @hw: pointer to hardware structure
6351  * @profiles: bitmap of profiles that will be associated with the new recipe
6352  * @free_idx: pointer to variable to receive the free index bitmap
6353  *
6354  * The algorithm used here is:
6355  *      1. When creating a new recipe, create a set P which contains all
6356  *         Profiles that will be associated with our new recipe
6357  *
6358  *      2. For each Profile p in set P:
6359  *          a. Add all recipes associated with Profile p into set R
6360  *          b. Optional : PossibleIndexes &= profile[p].possibleIndexes
6361  *              [initially PossibleIndexes should be 0xFFFFFFFFFFFFFFFF]
6362  *              i. Or just assume they all have the same possible indexes:
6363  *                      44, 45, 46, 47
6364  *                      i.e., PossibleIndexes = 0x0000F00000000000
6365  *
6366  *      3. For each Recipe r in set R:
6367  *          a. UsedIndexes |= (bitwise or ) recipe[r].res_indexes
6368  *          b. FreeIndexes = UsedIndexes ^ PossibleIndexes
6369  *
6370  *      FreeIndexes will contain the bits indicating the indexes free for use,
6371  *      then the code needs to update the recipe[r].used_result_idx_bits to
6372  *      indicate which indexes were selected for use by this recipe.
6373  */
6374 static u16
6375 ice_find_free_recp_res_idx(struct ice_hw *hw, const ice_bitmap_t *profiles,
6376                            ice_bitmap_t *free_idx)
6377 {
6378         ice_declare_bitmap(possible_idx, ICE_MAX_FV_WORDS);
6379         ice_declare_bitmap(recipes, ICE_MAX_NUM_RECIPES);
6380         ice_declare_bitmap(used_idx, ICE_MAX_FV_WORDS);
6381         u16 bit;
6382
6383         ice_zero_bitmap(possible_idx, ICE_MAX_FV_WORDS);
6384         ice_zero_bitmap(recipes, ICE_MAX_NUM_RECIPES);
6385         ice_zero_bitmap(used_idx, ICE_MAX_FV_WORDS);
6386         ice_zero_bitmap(free_idx, ICE_MAX_FV_WORDS);
6387
6388         ice_bitmap_set(possible_idx, 0, ICE_MAX_FV_WORDS);
6389
6390         /* For each profile we are going to associate the recipe with, add the
6391          * recipes that are associated with that profile. This will give us
6392          * the set of recipes that our recipe may collide with. Also, determine
6393          * what possible result indexes are usable given this set of profiles.
6394          */
6395         ice_for_each_set_bit(bit, profiles, ICE_MAX_NUM_PROFILES) {
6396                 ice_or_bitmap(recipes, recipes, profile_to_recipe[bit],
6397                               ICE_MAX_NUM_RECIPES);
6398                 ice_and_bitmap(possible_idx, possible_idx,
6399                                hw->switch_info->prof_res_bm[bit],
6400                                ICE_MAX_FV_WORDS);
6401         }
6402
6403         /* For each recipe that our new recipe may collide with, determine
6404          * which indexes have been used.
6405          */
6406         ice_for_each_set_bit(bit, recipes, ICE_MAX_NUM_RECIPES)
6407                 ice_or_bitmap(used_idx, used_idx,
6408                               hw->switch_info->recp_list[bit].res_idxs,
6409                               ICE_MAX_FV_WORDS);
6410
6411         ice_xor_bitmap(free_idx, used_idx, possible_idx, ICE_MAX_FV_WORDS);
6412
6413         /* return number of free indexes */
6414         return (u16)ice_bitmap_hweight(free_idx, ICE_MAX_FV_WORDS);
6415 }
6416
6417 /**
6418  * ice_add_sw_recipe - function to call AQ calls to create switch recipe
6419  * @hw: pointer to hardware structure
6420  * @rm: recipe management list entry
6421  * @profiles: bitmap of profiles that will be associated.
6422  */
6423 static enum ice_status
6424 ice_add_sw_recipe(struct ice_hw *hw, struct ice_sw_recipe *rm,
6425                   ice_bitmap_t *profiles)
6426 {
6427         ice_declare_bitmap(result_idx_bm, ICE_MAX_FV_WORDS);
6428         struct ice_aqc_recipe_data_elem *tmp;
6429         struct ice_aqc_recipe_data_elem *buf;
6430         struct ice_recp_grp_entry *entry;
6431         enum ice_status status;
6432         u16 free_res_idx;
6433         u16 recipe_count;
6434         u8 chain_idx;
6435         u8 recps = 0;
6436
6437         /* When more than one recipe are required, another recipe is needed to
6438          * chain them together. Matching a tunnel metadata ID takes up one of
6439          * the match fields in the chaining recipe reducing the number of
6440          * chained recipes by one.
6441          */
6442          /* check number of free result indices */
6443         ice_zero_bitmap(result_idx_bm, ICE_MAX_FV_WORDS);
6444         free_res_idx = ice_find_free_recp_res_idx(hw, profiles, result_idx_bm);
6445
6446         ice_debug(hw, ICE_DBG_SW, "Result idx slots: %d, need %d\n",
6447                   free_res_idx, rm->n_grp_count);
6448
6449         if (rm->n_grp_count > 1) {
6450                 if (rm->n_grp_count > free_res_idx)
6451                         return ICE_ERR_MAX_LIMIT;
6452
6453                 rm->n_grp_count++;
6454         }
6455
6456         if (rm->n_grp_count > ICE_MAX_CHAIN_RECIPE)
6457                 return ICE_ERR_MAX_LIMIT;
6458
6459         tmp = (struct ice_aqc_recipe_data_elem *)ice_calloc(hw,
6460                                                             ICE_MAX_NUM_RECIPES,
6461                                                             sizeof(*tmp));
6462         if (!tmp)
6463                 return ICE_ERR_NO_MEMORY;
6464
6465         buf = (struct ice_aqc_recipe_data_elem *)
6466                 ice_calloc(hw, rm->n_grp_count, sizeof(*buf));
6467         if (!buf) {
6468                 status = ICE_ERR_NO_MEMORY;
6469                 goto err_mem;
6470         }
6471
6472         ice_zero_bitmap(rm->r_bitmap, ICE_MAX_NUM_RECIPES);
6473         recipe_count = ICE_MAX_NUM_RECIPES;
6474         status = ice_aq_get_recipe(hw, tmp, &recipe_count, ICE_SW_LKUP_MAC,
6475                                    NULL);
6476         if (status || recipe_count == 0)
6477                 goto err_unroll;
6478
6479         /* Allocate the recipe resources, and configure them according to the
6480          * match fields from protocol headers and extracted field vectors.
6481          */
6482         chain_idx = ice_find_first_bit(result_idx_bm, ICE_MAX_FV_WORDS);
6483         LIST_FOR_EACH_ENTRY(entry, &rm->rg_list, ice_recp_grp_entry, l_entry) {
6484                 u8 i;
6485
6486                 status = ice_alloc_recipe(hw, &entry->rid);
6487                 if (status)
6488                         goto err_unroll;
6489
6490                 /* Clear the result index of the located recipe, as this will be
6491                  * updated, if needed, later in the recipe creation process.
6492                  */
6493                 tmp[0].content.result_indx = 0;
6494
6495                 buf[recps] = tmp[0];
6496                 buf[recps].recipe_indx = (u8)entry->rid;
6497                 /* if the recipe is a non-root recipe RID should be programmed
6498                  * as 0 for the rules to be applied correctly.
6499                  */
6500                 buf[recps].content.rid = 0;
6501                 ice_memset(&buf[recps].content.lkup_indx, 0,
6502                            sizeof(buf[recps].content.lkup_indx),
6503                            ICE_NONDMA_MEM);
6504
6505                 /* All recipes use look-up index 0 to match switch ID. */
6506                 buf[recps].content.lkup_indx[0] = ICE_AQ_SW_ID_LKUP_IDX;
6507                 buf[recps].content.mask[0] =
6508                         CPU_TO_LE16(ICE_AQ_SW_ID_LKUP_MASK);
6509                 /* Setup lkup_indx 1..4 to INVALID/ignore and set the mask
6510                  * to be 0
6511                  */
6512                 for (i = 1; i <= ICE_NUM_WORDS_RECIPE; i++) {
6513                         buf[recps].content.lkup_indx[i] = 0x80;
6514                         buf[recps].content.mask[i] = 0;
6515                 }
6516
6517                 for (i = 0; i < entry->r_group.n_val_pairs; i++) {
6518                         buf[recps].content.lkup_indx[i + 1] = entry->fv_idx[i];
6519                         buf[recps].content.mask[i + 1] =
6520                                 CPU_TO_LE16(entry->fv_mask[i]);
6521                 }
6522
6523                 if (rm->n_grp_count > 1) {
6524                         /* Checks to see if there really is a valid result index
6525                          * that can be used.
6526                          */
6527                         if (chain_idx >= ICE_MAX_FV_WORDS) {
6528                                 ice_debug(hw, ICE_DBG_SW, "No chain index available\n");
6529                                 status = ICE_ERR_MAX_LIMIT;
6530                                 goto err_unroll;
6531                         }
6532
6533                         entry->chain_idx = chain_idx;
6534                         buf[recps].content.result_indx =
6535                                 ICE_AQ_RECIPE_RESULT_EN |
6536                                 ((chain_idx << ICE_AQ_RECIPE_RESULT_DATA_S) &
6537                                  ICE_AQ_RECIPE_RESULT_DATA_M);
6538                         ice_clear_bit(chain_idx, result_idx_bm);
6539                         chain_idx = ice_find_first_bit(result_idx_bm,
6540                                                        ICE_MAX_FV_WORDS);
6541                 }
6542
6543                 /* fill recipe dependencies */
6544                 ice_zero_bitmap((ice_bitmap_t *)buf[recps].recipe_bitmap,
6545                                 ICE_MAX_NUM_RECIPES);
6546                 ice_set_bit(buf[recps].recipe_indx,
6547                             (ice_bitmap_t *)buf[recps].recipe_bitmap);
6548                 buf[recps].content.act_ctrl_fwd_priority = rm->priority;
6549                 recps++;
6550         }
6551
6552         if (rm->n_grp_count == 1) {
6553                 rm->root_rid = buf[0].recipe_indx;
6554                 ice_set_bit(buf[0].recipe_indx, rm->r_bitmap);
6555                 buf[0].content.rid = rm->root_rid | ICE_AQ_RECIPE_ID_IS_ROOT;
6556                 if (sizeof(buf[0].recipe_bitmap) >= sizeof(rm->r_bitmap)) {
6557                         ice_memcpy(buf[0].recipe_bitmap, rm->r_bitmap,
6558                                    sizeof(buf[0].recipe_bitmap),
6559                                    ICE_NONDMA_TO_NONDMA);
6560                 } else {
6561                         status = ICE_ERR_BAD_PTR;
6562                         goto err_unroll;
6563                 }
6564                 /* Applicable only for ROOT_RECIPE, set the fwd_priority for
6565                  * the recipe which is getting created if specified
6566                  * by user. Usually any advanced switch filter, which results
6567                  * into new extraction sequence, ended up creating a new recipe
6568                  * of type ROOT and usually recipes are associated with profiles
6569                  * Switch rule referreing newly created recipe, needs to have
6570                  * either/or 'fwd' or 'join' priority, otherwise switch rule
6571                  * evaluation will not happen correctly. In other words, if
6572                  * switch rule to be evaluated on priority basis, then recipe
6573                  * needs to have priority, otherwise it will be evaluated last.
6574                  */
6575                 buf[0].content.act_ctrl_fwd_priority = rm->priority;
6576         } else {
6577                 struct ice_recp_grp_entry *last_chain_entry;
6578                 u16 rid, i;
6579
6580                 /* Allocate the last recipe that will chain the outcomes of the
6581                  * other recipes together
6582                  */
6583                 status = ice_alloc_recipe(hw, &rid);
6584                 if (status)
6585                         goto err_unroll;
6586
6587                 buf[recps].recipe_indx = (u8)rid;
6588                 buf[recps].content.rid = (u8)rid;
6589                 buf[recps].content.rid |= ICE_AQ_RECIPE_ID_IS_ROOT;
6590                 /* the new entry created should also be part of rg_list to
6591                  * make sure we have complete recipe
6592                  */
6593                 last_chain_entry = (struct ice_recp_grp_entry *)ice_malloc(hw,
6594                         sizeof(*last_chain_entry));
6595                 if (!last_chain_entry) {
6596                         status = ICE_ERR_NO_MEMORY;
6597                         goto err_unroll;
6598                 }
6599                 last_chain_entry->rid = rid;
6600                 ice_memset(&buf[recps].content.lkup_indx, 0,
6601                            sizeof(buf[recps].content.lkup_indx),
6602                            ICE_NONDMA_MEM);
6603                 /* All recipes use look-up index 0 to match switch ID. */
6604                 buf[recps].content.lkup_indx[0] = ICE_AQ_SW_ID_LKUP_IDX;
6605                 buf[recps].content.mask[0] =
6606                         CPU_TO_LE16(ICE_AQ_SW_ID_LKUP_MASK);
6607                 for (i = 1; i <= ICE_NUM_WORDS_RECIPE; i++) {
6608                         buf[recps].content.lkup_indx[i] =
6609                                 ICE_AQ_RECIPE_LKUP_IGNORE;
6610                         buf[recps].content.mask[i] = 0;
6611                 }
6612
6613                 i = 1;
6614                 /* update r_bitmap with the recp that is used for chaining */
6615                 ice_set_bit(rid, rm->r_bitmap);
6616                 /* this is the recipe that chains all the other recipes so it
6617                  * should not have a chaining ID to indicate the same
6618                  */
6619                 last_chain_entry->chain_idx = ICE_INVAL_CHAIN_IND;
6620                 LIST_FOR_EACH_ENTRY(entry, &rm->rg_list, ice_recp_grp_entry,
6621                                     l_entry) {
6622                         last_chain_entry->fv_idx[i] = entry->chain_idx;
6623                         buf[recps].content.lkup_indx[i] = entry->chain_idx;
6624                         buf[recps].content.mask[i++] = CPU_TO_LE16(0xFFFF);
6625                         ice_set_bit(entry->rid, rm->r_bitmap);
6626                 }
6627                 LIST_ADD(&last_chain_entry->l_entry, &rm->rg_list);
6628                 if (sizeof(buf[recps].recipe_bitmap) >=
6629                     sizeof(rm->r_bitmap)) {
6630                         ice_memcpy(buf[recps].recipe_bitmap, rm->r_bitmap,
6631                                    sizeof(buf[recps].recipe_bitmap),
6632                                    ICE_NONDMA_TO_NONDMA);
6633                 } else {
6634                         status = ICE_ERR_BAD_PTR;
6635                         goto err_unroll;
6636                 }
6637                 buf[recps].content.act_ctrl_fwd_priority = rm->priority;
6638
6639                 recps++;
6640                 rm->root_rid = (u8)rid;
6641         }
6642         status = ice_acquire_change_lock(hw, ICE_RES_WRITE);
6643         if (status)
6644                 goto err_unroll;
6645
6646         status = ice_aq_add_recipe(hw, buf, rm->n_grp_count, NULL);
6647         ice_release_change_lock(hw);
6648         if (status)
6649                 goto err_unroll;
6650
6651         /* Every recipe that just got created add it to the recipe
6652          * book keeping list
6653          */
6654         LIST_FOR_EACH_ENTRY(entry, &rm->rg_list, ice_recp_grp_entry, l_entry) {
6655                 struct ice_switch_info *sw = hw->switch_info;
6656                 bool is_root, idx_found = false;
6657                 struct ice_sw_recipe *recp;
6658                 u16 idx, buf_idx = 0;
6659
6660                 /* find buffer index for copying some data */
6661                 for (idx = 0; idx < rm->n_grp_count; idx++)
6662                         if (buf[idx].recipe_indx == entry->rid) {
6663                                 buf_idx = idx;
6664                                 idx_found = true;
6665                         }
6666
6667                 if (!idx_found) {
6668                         status = ICE_ERR_OUT_OF_RANGE;
6669                         goto err_unroll;
6670                 }
6671
6672                 recp = &sw->recp_list[entry->rid];
6673                 is_root = (rm->root_rid == entry->rid);
6674                 recp->is_root = is_root;
6675
6676                 recp->root_rid = entry->rid;
6677                 recp->big_recp = (is_root && rm->n_grp_count > 1);
6678
6679                 ice_memcpy(&recp->ext_words, entry->r_group.pairs,
6680                            entry->r_group.n_val_pairs *
6681                            sizeof(struct ice_fv_word),
6682                            ICE_NONDMA_TO_NONDMA);
6683
6684                 ice_memcpy(recp->r_bitmap, buf[buf_idx].recipe_bitmap,
6685                            sizeof(recp->r_bitmap), ICE_NONDMA_TO_NONDMA);
6686
6687                 /* Copy non-result fv index values and masks to recipe. This
6688                  * call will also update the result recipe bitmask.
6689                  */
6690                 ice_collect_result_idx(&buf[buf_idx], recp);
6691
6692                 /* for non-root recipes, also copy to the root, this allows
6693                  * easier matching of a complete chained recipe
6694                  */
6695                 if (!is_root)
6696                         ice_collect_result_idx(&buf[buf_idx],
6697                                                &sw->recp_list[rm->root_rid]);
6698
6699                 recp->n_ext_words = entry->r_group.n_val_pairs;
6700                 recp->chain_idx = entry->chain_idx;
6701                 recp->priority = buf[buf_idx].content.act_ctrl_fwd_priority;
6702                 recp->n_grp_count = rm->n_grp_count;
6703                 recp->tun_type = rm->tun_type;
6704                 recp->recp_created = true;
6705         }
6706         rm->root_buf = buf;
6707         ice_free(hw, tmp);
6708         return status;
6709
6710 err_unroll:
6711 err_mem:
6712         ice_free(hw, tmp);
6713         ice_free(hw, buf);
6714         return status;
6715 }
6716
6717 /**
6718  * ice_create_recipe_group - creates recipe group
6719  * @hw: pointer to hardware structure
6720  * @rm: recipe management list entry
6721  * @lkup_exts: lookup elements
6722  */
6723 static enum ice_status
6724 ice_create_recipe_group(struct ice_hw *hw, struct ice_sw_recipe *rm,
6725                         struct ice_prot_lkup_ext *lkup_exts)
6726 {
6727         enum ice_status status;
6728         u8 recp_count = 0;
6729
6730         rm->n_grp_count = 0;
6731
6732         /* Create recipes for words that are marked not done by packing them
6733          * as best fit.
6734          */
6735         status = ice_create_first_fit_recp_def(hw, lkup_exts,
6736                                                &rm->rg_list, &recp_count);
6737         if (!status) {
6738                 rm->n_grp_count += recp_count;
6739                 rm->n_ext_words = lkup_exts->n_val_words;
6740                 ice_memcpy(&rm->ext_words, lkup_exts->fv_words,
6741                            sizeof(rm->ext_words), ICE_NONDMA_TO_NONDMA);
6742                 ice_memcpy(rm->word_masks, lkup_exts->field_mask,
6743                            sizeof(rm->word_masks), ICE_NONDMA_TO_NONDMA);
6744         }
6745
6746         return status;
6747 }
6748
6749 /**
6750  * ice_get_fv - get field vectors/extraction sequences for spec. lookup types
6751  * @hw: pointer to hardware structure
6752  * @lkups: lookup elements or match criteria for the advanced recipe, one
6753  *         structure per protocol header
6754  * @lkups_cnt: number of protocols
6755  * @bm: bitmap of field vectors to consider
6756  * @fv_list: pointer to a list that holds the returned field vectors
6757  */
6758 static enum ice_status
6759 ice_get_fv(struct ice_hw *hw, struct ice_adv_lkup_elem *lkups, u16 lkups_cnt,
6760            ice_bitmap_t *bm, struct LIST_HEAD_TYPE *fv_list)
6761 {
6762         enum ice_status status;
6763         u8 *prot_ids;
6764         u16 i;
6765
6766         if (!lkups_cnt)
6767                 return ICE_SUCCESS;
6768
6769         prot_ids = (u8 *)ice_calloc(hw, lkups_cnt, sizeof(*prot_ids));
6770         if (!prot_ids)
6771                 return ICE_ERR_NO_MEMORY;
6772
6773         for (i = 0; i < lkups_cnt; i++)
6774                 if (!ice_prot_type_to_id(lkups[i].type, &prot_ids[i])) {
6775                         status = ICE_ERR_CFG;
6776                         goto free_mem;
6777                 }
6778
6779         /* Find field vectors that include all specified protocol types */
6780         status = ice_get_sw_fv_list(hw, prot_ids, lkups_cnt, bm, fv_list);
6781
6782 free_mem:
6783         ice_free(hw, prot_ids);
6784         return status;
6785 }
6786
6787 /**
6788  * ice_tun_type_match_mask - determine if tun type needs a match mask
6789  * @tun_type: tunnel type
6790  * @mask: mask to be used for the tunnel
6791  */
6792 static bool ice_tun_type_match_word(enum ice_sw_tunnel_type tun_type, u16 *mask)
6793 {
6794         switch (tun_type) {
6795         case ICE_SW_TUN_VXLAN_GPE:
6796         case ICE_SW_TUN_GENEVE:
6797         case ICE_SW_TUN_VXLAN:
6798         case ICE_SW_TUN_NVGRE:
6799         case ICE_SW_TUN_UDP:
6800         case ICE_ALL_TUNNELS:
6801         case ICE_SW_TUN_AND_NON_TUN_QINQ:
6802         case ICE_NON_TUN_QINQ:
6803         case ICE_SW_TUN_PPPOE_QINQ:
6804         case ICE_SW_TUN_PPPOE_PAY_QINQ:
6805         case ICE_SW_TUN_PPPOE_IPV4_QINQ:
6806         case ICE_SW_TUN_PPPOE_IPV6_QINQ:
6807                 *mask = ICE_TUN_FLAG_MASK;
6808                 return true;
6809
6810         case ICE_SW_TUN_GENEVE_VLAN:
6811         case ICE_SW_TUN_VXLAN_VLAN:
6812                 *mask = ICE_TUN_FLAG_MASK & ~ICE_TUN_FLAG_VLAN_MASK;
6813                 return true;
6814
6815         default:
6816                 *mask = 0;
6817                 return false;
6818         }
6819 }
6820
6821 /**
6822  * ice_add_special_words - Add words that are not protocols, such as metadata
6823  * @rinfo: other information regarding the rule e.g. priority and action info
6824  * @lkup_exts: lookup word structure
6825  */
6826 static enum ice_status
6827 ice_add_special_words(struct ice_adv_rule_info *rinfo,
6828                       struct ice_prot_lkup_ext *lkup_exts)
6829 {
6830         u16 mask;
6831
6832         /* If this is a tunneled packet, then add recipe index to match the
6833          * tunnel bit in the packet metadata flags.
6834          */
6835         if (ice_tun_type_match_word(rinfo->tun_type, &mask)) {
6836                 if (lkup_exts->n_val_words < ICE_MAX_CHAIN_WORDS) {
6837                         u8 word = lkup_exts->n_val_words++;
6838
6839                         lkup_exts->fv_words[word].prot_id = ICE_META_DATA_ID_HW;
6840                         lkup_exts->fv_words[word].off = ICE_TUN_FLAG_MDID_OFF;
6841                         lkup_exts->field_mask[word] = mask;
6842                 } else {
6843                         return ICE_ERR_MAX_LIMIT;
6844                 }
6845         }
6846
6847         return ICE_SUCCESS;
6848 }
6849
6850 /* ice_get_compat_fv_bitmap - Get compatible field vector bitmap for rule
6851  * @hw: pointer to hardware structure
6852  * @rinfo: other information regarding the rule e.g. priority and action info
6853  * @bm: pointer to memory for returning the bitmap of field vectors
6854  */
6855 static void
6856 ice_get_compat_fv_bitmap(struct ice_hw *hw, struct ice_adv_rule_info *rinfo,
6857                          ice_bitmap_t *bm)
6858 {
6859         enum ice_prof_type prof_type;
6860
6861         ice_zero_bitmap(bm, ICE_MAX_NUM_PROFILES);
6862
6863         switch (rinfo->tun_type) {
6864         case ICE_NON_TUN:
6865         case ICE_NON_TUN_QINQ:
6866                 prof_type = ICE_PROF_NON_TUN;
6867                 break;
6868         case ICE_ALL_TUNNELS:
6869                 prof_type = ICE_PROF_TUN_ALL;
6870                 break;
6871         case ICE_SW_TUN_VXLAN_GPE:
6872         case ICE_SW_TUN_GENEVE:
6873         case ICE_SW_TUN_GENEVE_VLAN:
6874         case ICE_SW_TUN_VXLAN:
6875         case ICE_SW_TUN_VXLAN_VLAN:
6876         case ICE_SW_TUN_UDP:
6877         case ICE_SW_TUN_GTP:
6878                 prof_type = ICE_PROF_TUN_UDP;
6879                 break;
6880         case ICE_SW_TUN_NVGRE:
6881                 prof_type = ICE_PROF_TUN_GRE;
6882                 break;
6883         case ICE_SW_TUN_PPPOE:
6884         case ICE_SW_TUN_PPPOE_QINQ:
6885                 prof_type = ICE_PROF_TUN_PPPOE;
6886                 break;
6887         case ICE_SW_TUN_PPPOE_PAY:
6888         case ICE_SW_TUN_PPPOE_PAY_QINQ:
6889                 ice_set_bit(ICE_PROFID_PPPOE_PAY, bm);
6890                 return;
6891         case ICE_SW_TUN_PPPOE_IPV4:
6892         case ICE_SW_TUN_PPPOE_IPV4_QINQ:
6893                 ice_set_bit(ICE_PROFID_PPPOE_IPV4_OTHER, bm);
6894                 ice_set_bit(ICE_PROFID_PPPOE_IPV4_UDP, bm);
6895                 ice_set_bit(ICE_PROFID_PPPOE_IPV4_TCP, bm);
6896                 return;
6897         case ICE_SW_TUN_PPPOE_IPV4_TCP:
6898                 ice_set_bit(ICE_PROFID_PPPOE_IPV4_TCP, bm);
6899                 return;
6900         case ICE_SW_TUN_PPPOE_IPV4_UDP:
6901                 ice_set_bit(ICE_PROFID_PPPOE_IPV4_UDP, bm);
6902                 return;
6903         case ICE_SW_TUN_PPPOE_IPV6:
6904         case ICE_SW_TUN_PPPOE_IPV6_QINQ:
6905                 ice_set_bit(ICE_PROFID_PPPOE_IPV6_OTHER, bm);
6906                 ice_set_bit(ICE_PROFID_PPPOE_IPV6_UDP, bm);
6907                 ice_set_bit(ICE_PROFID_PPPOE_IPV6_TCP, bm);
6908                 return;
6909         case ICE_SW_TUN_PPPOE_IPV6_TCP:
6910                 ice_set_bit(ICE_PROFID_PPPOE_IPV6_TCP, bm);
6911                 return;
6912         case ICE_SW_TUN_PPPOE_IPV6_UDP:
6913                 ice_set_bit(ICE_PROFID_PPPOE_IPV6_UDP, bm);
6914                 return;
6915         case ICE_SW_TUN_PROFID_IPV6_ESP:
6916         case ICE_SW_TUN_IPV6_ESP:
6917                 ice_set_bit(ICE_PROFID_IPV6_ESP, bm);
6918                 return;
6919         case ICE_SW_TUN_PROFID_IPV6_AH:
6920         case ICE_SW_TUN_IPV6_AH:
6921                 ice_set_bit(ICE_PROFID_IPV6_AH, bm);
6922                 return;
6923         case ICE_SW_TUN_PROFID_MAC_IPV6_L2TPV3:
6924         case ICE_SW_TUN_IPV6_L2TPV3:
6925                 ice_set_bit(ICE_PROFID_MAC_IPV6_L2TPV3, bm);
6926                 return;
6927         case ICE_SW_TUN_PROFID_IPV6_NAT_T:
6928         case ICE_SW_TUN_IPV6_NAT_T:
6929                 ice_set_bit(ICE_PROFID_IPV6_NAT_T, bm);
6930                 return;
6931         case ICE_SW_TUN_PROFID_IPV4_PFCP_NODE:
6932                 ice_set_bit(ICE_PROFID_IPV4_PFCP_NODE, bm);
6933                 return;
6934         case ICE_SW_TUN_PROFID_IPV4_PFCP_SESSION:
6935                 ice_set_bit(ICE_PROFID_IPV4_PFCP_SESSION, bm);
6936                 return;
6937         case ICE_SW_TUN_PROFID_IPV6_PFCP_NODE:
6938                 ice_set_bit(ICE_PROFID_IPV6_PFCP_NODE, bm);
6939                 return;
6940         case ICE_SW_TUN_PROFID_IPV6_PFCP_SESSION:
6941                 ice_set_bit(ICE_PROFID_IPV6_PFCP_SESSION, bm);
6942                 return;
6943         case ICE_SW_TUN_IPV4_NAT_T:
6944                 ice_set_bit(ICE_PROFID_IPV4_NAT_T, bm);
6945                 return;
6946         case ICE_SW_TUN_IPV4_L2TPV3:
6947                 ice_set_bit(ICE_PROFID_MAC_IPV4_L2TPV3, bm);
6948                 return;
6949         case ICE_SW_TUN_IPV4_ESP:
6950                 ice_set_bit(ICE_PROFID_IPV4_ESP, bm);
6951                 return;
6952         case ICE_SW_TUN_IPV4_AH:
6953                 ice_set_bit(ICE_PROFID_IPV4_AH, bm);
6954                 return;
6955         case ICE_SW_IPV4_TCP:
6956                 ice_set_bit(ICE_PROFID_IPV4_TCP, bm);
6957                 return;
6958         case ICE_SW_IPV4_UDP:
6959                 ice_set_bit(ICE_PROFID_IPV4_UDP, bm);
6960                 return;
6961         case ICE_SW_IPV6_TCP:
6962                 ice_set_bit(ICE_PROFID_IPV6_TCP, bm);
6963                 return;
6964         case ICE_SW_IPV6_UDP:
6965                 ice_set_bit(ICE_PROFID_IPV6_UDP, bm);
6966                 return;
6967         case ICE_SW_TUN_IPV4_GTPU_IPV4:
6968                 ice_set_bit(ICE_PROFID_IPV4_GTPU_EH_IPV4_OTHER, bm);
6969                 ice_set_bit(ICE_PROFID_IPV4_GTPU_IPV4_OTHER, bm);
6970                 ice_set_bit(ICE_PROFID_IPV4_GTPU_EH_IPV4_UDP, bm);
6971                 ice_set_bit(ICE_PROFID_IPV4_GTPU_IPV4_UDP, bm);
6972                 ice_set_bit(ICE_PROFID_IPV4_GTPU_EH_IPV4_TCP, bm);
6973                 ice_set_bit(ICE_PROFID_IPV4_GTPU_IPV4_TCP, bm);
6974                 return;
6975         case ICE_SW_TUN_IPV6_GTPU_IPV4:
6976                 ice_set_bit(ICE_PROFID_IPV6_GTPU_EH_IPV4_OTHER, bm);
6977                 ice_set_bit(ICE_PROFID_IPV6_GTPU_IPV4_OTHER, bm);
6978                 ice_set_bit(ICE_PROFID_IPV6_GTPU_EH_IPV4_UDP, bm);
6979                 ice_set_bit(ICE_PROFID_IPV6_GTPU_IPV4_UDP, bm);
6980                 ice_set_bit(ICE_PROFID_IPV6_GTPU_EH_IPV4_TCP, bm);
6981                 ice_set_bit(ICE_PROFID_IPV6_GTPU_IPV4_TCP, bm);
6982                 return;
6983         case ICE_SW_TUN_IPV4_GTPU_IPV6:
6984                 ice_set_bit(ICE_PROFID_IPV4_GTPU_EH_IPV6_OTHER, bm);
6985                 ice_set_bit(ICE_PROFID_IPV4_GTPU_IPV6_OTHER, bm);
6986                 ice_set_bit(ICE_PROFID_IPV4_GTPU_EH_IPV6_UDP, bm);
6987                 ice_set_bit(ICE_PROFID_IPV4_GTPU_IPV6_UDP, bm);
6988                 ice_set_bit(ICE_PROFID_IPV4_GTPU_EH_IPV6_TCP, bm);
6989                 ice_set_bit(ICE_PROFID_IPV4_GTPU_IPV6_TCP, bm);
6990                 return;
6991         case ICE_SW_TUN_IPV6_GTPU_IPV6:
6992                 ice_set_bit(ICE_PROFID_IPV6_GTPU_EH_IPV6_OTHER, bm);
6993                 ice_set_bit(ICE_PROFID_IPV6_GTPU_IPV6_OTHER, bm);
6994                 ice_set_bit(ICE_PROFID_IPV6_GTPU_EH_IPV6_UDP, bm);
6995                 ice_set_bit(ICE_PROFID_IPV6_GTPU_IPV6_UDP, bm);
6996                 ice_set_bit(ICE_PROFID_IPV6_GTPU_EH_IPV6_TCP, bm);
6997                 ice_set_bit(ICE_PROFID_IPV6_GTPU_IPV6_TCP, bm);
6998                 return;
6999         case ICE_SW_TUN_AND_NON_TUN:
7000         case ICE_SW_TUN_AND_NON_TUN_QINQ:
7001         default:
7002                 prof_type = ICE_PROF_ALL;
7003                 break;
7004         }
7005
7006         ice_get_sw_fv_bitmap(hw, prof_type, bm);
7007 }
7008
7009 /**
7010  * ice_is_prof_rule - determine if rule type is a profile rule
7011  * @type: the rule type
7012  *
7013  * if the rule type is a profile rule, that means that there no field value
7014  * match required, in this case just a profile hit is required.
7015  */
7016 bool ice_is_prof_rule(enum ice_sw_tunnel_type type)
7017 {
7018         switch (type) {
7019         case ICE_SW_TUN_PROFID_IPV6_ESP:
7020         case ICE_SW_TUN_PROFID_IPV6_AH:
7021         case ICE_SW_TUN_PROFID_MAC_IPV6_L2TPV3:
7022         case ICE_SW_TUN_PROFID_IPV6_NAT_T:
7023         case ICE_SW_TUN_PROFID_IPV4_PFCP_NODE:
7024         case ICE_SW_TUN_PROFID_IPV4_PFCP_SESSION:
7025         case ICE_SW_TUN_PROFID_IPV6_PFCP_NODE:
7026         case ICE_SW_TUN_PROFID_IPV6_PFCP_SESSION:
7027                 return true;
7028         default:
7029                 break;
7030         }
7031
7032         return false;
7033 }
7034
7035 /**
7036  * ice_add_adv_recipe - Add an advanced recipe that is not part of the default
7037  * @hw: pointer to hardware structure
7038  * @lkups: lookup elements or match criteria for the advanced recipe, one
7039  *  structure per protocol header
7040  * @lkups_cnt: number of protocols
7041  * @rinfo: other information regarding the rule e.g. priority and action info
7042  * @rid: return the recipe ID of the recipe created
7043  */
7044 static enum ice_status
7045 ice_add_adv_recipe(struct ice_hw *hw, struct ice_adv_lkup_elem *lkups,
7046                    u16 lkups_cnt, struct ice_adv_rule_info *rinfo, u16 *rid)
7047 {
7048         ice_declare_bitmap(fv_bitmap, ICE_MAX_NUM_PROFILES);
7049         ice_declare_bitmap(profiles, ICE_MAX_NUM_PROFILES);
7050         struct ice_prot_lkup_ext *lkup_exts;
7051         struct ice_recp_grp_entry *r_entry;
7052         struct ice_sw_fv_list_entry *fvit;
7053         struct ice_recp_grp_entry *r_tmp;
7054         struct ice_sw_fv_list_entry *tmp;
7055         enum ice_status status = ICE_SUCCESS;
7056         struct ice_sw_recipe *rm;
7057         u8 i;
7058
7059         if (!ice_is_prof_rule(rinfo->tun_type) && !lkups_cnt)
7060                 return ICE_ERR_PARAM;
7061
7062         lkup_exts = (struct ice_prot_lkup_ext *)
7063                 ice_malloc(hw, sizeof(*lkup_exts));
7064         if (!lkup_exts)
7065                 return ICE_ERR_NO_MEMORY;
7066
7067         /* Determine the number of words to be matched and if it exceeds a
7068          * recipe's restrictions
7069          */
7070         for (i = 0; i < lkups_cnt; i++) {
7071                 u16 count;
7072
7073                 if (lkups[i].type >= ICE_PROTOCOL_LAST) {
7074                         status = ICE_ERR_CFG;
7075                         goto err_free_lkup_exts;
7076                 }
7077
7078                 count = ice_fill_valid_words(&lkups[i], lkup_exts);
7079                 if (!count) {
7080                         status = ICE_ERR_CFG;
7081                         goto err_free_lkup_exts;
7082                 }
7083         }
7084
7085         rm = (struct ice_sw_recipe *)ice_malloc(hw, sizeof(*rm));
7086         if (!rm) {
7087                 status = ICE_ERR_NO_MEMORY;
7088                 goto err_free_lkup_exts;
7089         }
7090
7091         /* Get field vectors that contain fields extracted from all the protocol
7092          * headers being programmed.
7093          */
7094         INIT_LIST_HEAD(&rm->fv_list);
7095         INIT_LIST_HEAD(&rm->rg_list);
7096
7097         /* Get bitmap of field vectors (profiles) that are compatible with the
7098          * rule request; only these will be searched in the subsequent call to
7099          * ice_get_fv.
7100          */
7101         ice_get_compat_fv_bitmap(hw, rinfo, fv_bitmap);
7102
7103         status = ice_get_fv(hw, lkups, lkups_cnt, fv_bitmap, &rm->fv_list);
7104         if (status)
7105                 goto err_unroll;
7106
7107         /* Create any special protocol/offset pairs, such as looking at tunnel
7108          * bits by extracting metadata
7109          */
7110         status = ice_add_special_words(rinfo, lkup_exts);
7111         if (status)
7112                 goto err_free_lkup_exts;
7113
7114         /* Group match words into recipes using preferred recipe grouping
7115          * criteria.
7116          */
7117         status = ice_create_recipe_group(hw, rm, lkup_exts);
7118         if (status)
7119                 goto err_unroll;
7120
7121         /* set the recipe priority if specified */
7122         rm->priority = (u8)rinfo->priority;
7123
7124         /* Find offsets from the field vector. Pick the first one for all the
7125          * recipes.
7126          */
7127         status = ice_fill_fv_word_index(hw, &rm->fv_list, &rm->rg_list);
7128         if (status)
7129                 goto err_unroll;
7130
7131         /* An empty FV list means to use all the profiles returned in the
7132          * profile bitmap
7133          */
7134         if (LIST_EMPTY(&rm->fv_list)) {
7135                 u16 j;
7136
7137                 ice_for_each_set_bit(j, fv_bitmap, ICE_MAX_NUM_PROFILES) {
7138                         struct ice_sw_fv_list_entry *fvl;
7139
7140                         fvl = (struct ice_sw_fv_list_entry *)
7141                                 ice_malloc(hw, sizeof(*fvl));
7142                         if (!fvl)
7143                                 goto err_unroll;
7144                         fvl->fv_ptr = NULL;
7145                         fvl->profile_id = j;
7146                         LIST_ADD(&fvl->list_entry, &rm->fv_list);
7147                 }
7148         }
7149
7150         /* get bitmap of all profiles the recipe will be associated with */
7151         ice_zero_bitmap(profiles, ICE_MAX_NUM_PROFILES);
7152         LIST_FOR_EACH_ENTRY(fvit, &rm->fv_list, ice_sw_fv_list_entry,
7153                             list_entry) {
7154                 ice_debug(hw, ICE_DBG_SW, "profile: %d\n", fvit->profile_id);
7155                 ice_set_bit((u16)fvit->profile_id, profiles);
7156         }
7157
7158         /* Look for a recipe which matches our requested fv / mask list */
7159         *rid = ice_find_recp(hw, lkup_exts, rinfo->tun_type);
7160         if (*rid < ICE_MAX_NUM_RECIPES)
7161                 /* Success if found a recipe that match the existing criteria */
7162                 goto err_unroll;
7163
7164         rm->tun_type = rinfo->tun_type;
7165         /* Recipe we need does not exist, add a recipe */
7166         status = ice_add_sw_recipe(hw, rm, profiles);
7167         if (status)
7168                 goto err_unroll;
7169
7170         /* Associate all the recipes created with all the profiles in the
7171          * common field vector.
7172          */
7173         LIST_FOR_EACH_ENTRY(fvit, &rm->fv_list, ice_sw_fv_list_entry,
7174                             list_entry) {
7175                 ice_declare_bitmap(r_bitmap, ICE_MAX_NUM_RECIPES);
7176                 u16 j;
7177
7178                 status = ice_aq_get_recipe_to_profile(hw, fvit->profile_id,
7179                                                       (u8 *)r_bitmap, NULL);
7180                 if (status)
7181                         goto err_unroll;
7182
7183                 ice_or_bitmap(r_bitmap, r_bitmap, rm->r_bitmap,
7184                               ICE_MAX_NUM_RECIPES);
7185                 status = ice_acquire_change_lock(hw, ICE_RES_WRITE);
7186                 if (status)
7187                         goto err_unroll;
7188
7189                 status = ice_aq_map_recipe_to_profile(hw, fvit->profile_id,
7190                                                       (u8 *)r_bitmap,
7191                                                       NULL);
7192                 ice_release_change_lock(hw);
7193
7194                 if (status)
7195                         goto err_unroll;
7196
7197                 /* Update profile to recipe bitmap array */
7198                 ice_cp_bitmap(profile_to_recipe[fvit->profile_id], r_bitmap,
7199                               ICE_MAX_NUM_RECIPES);
7200
7201                 /* Update recipe to profile bitmap array */
7202                 ice_for_each_set_bit(j, rm->r_bitmap, ICE_MAX_NUM_RECIPES)
7203                         ice_set_bit((u16)fvit->profile_id,
7204                                     recipe_to_profile[j]);
7205         }
7206
7207         *rid = rm->root_rid;
7208         ice_memcpy(&hw->switch_info->recp_list[*rid].lkup_exts,
7209                    lkup_exts, sizeof(*lkup_exts), ICE_NONDMA_TO_NONDMA);
7210 err_unroll:
7211         LIST_FOR_EACH_ENTRY_SAFE(r_entry, r_tmp, &rm->rg_list,
7212                                  ice_recp_grp_entry, l_entry) {
7213                 LIST_DEL(&r_entry->l_entry);
7214                 ice_free(hw, r_entry);
7215         }
7216
7217         LIST_FOR_EACH_ENTRY_SAFE(fvit, tmp, &rm->fv_list, ice_sw_fv_list_entry,
7218                                  list_entry) {
7219                 LIST_DEL(&fvit->list_entry);
7220                 ice_free(hw, fvit);
7221         }
7222
7223         if (rm->root_buf)
7224                 ice_free(hw, rm->root_buf);
7225
7226         ice_free(hw, rm);
7227
7228 err_free_lkup_exts:
7229         ice_free(hw, lkup_exts);
7230
7231         return status;
7232 }
7233
7234 /**
7235  * ice_find_dummy_packet - find dummy packet by tunnel type
7236  *
7237  * @lkups: lookup elements or match criteria for the advanced recipe, one
7238  *         structure per protocol header
7239  * @lkups_cnt: number of protocols
7240  * @tun_type: tunnel type from the match criteria
7241  * @pkt: dummy packet to fill according to filter match criteria
7242  * @pkt_len: packet length of dummy packet
7243  * @offsets: pointer to receive the pointer to the offsets for the packet
7244  */
7245 static void
7246 ice_find_dummy_packet(struct ice_adv_lkup_elem *lkups, u16 lkups_cnt,
7247                       enum ice_sw_tunnel_type tun_type, const u8 **pkt,
7248                       u16 *pkt_len,
7249                       const struct ice_dummy_pkt_offsets **offsets)
7250 {
7251         bool tcp = false, udp = false, ipv6 = false, vlan = false;
7252         bool gre = false;
7253         u16 i;
7254
7255         for (i = 0; i < lkups_cnt; i++) {
7256                 if (lkups[i].type == ICE_UDP_ILOS)
7257                         udp = true;
7258                 else if (lkups[i].type == ICE_TCP_IL)
7259                         tcp = true;
7260                 else if (lkups[i].type == ICE_IPV6_OFOS)
7261                         ipv6 = true;
7262                 else if (lkups[i].type == ICE_VLAN_OFOS)
7263                         vlan = true;
7264                 else if (lkups[i].type == ICE_IPV4_OFOS &&
7265                          lkups[i].h_u.ipv4_hdr.protocol ==
7266                                 ICE_IPV4_NVGRE_PROTO_ID &&
7267                          lkups[i].m_u.ipv4_hdr.protocol ==
7268                                 0xFF)
7269                         gre = true;
7270                 else if (lkups[i].type == ICE_PPPOE &&
7271                          lkups[i].h_u.pppoe_hdr.ppp_prot_id ==
7272                                 CPU_TO_BE16(ICE_PPP_IPV6_PROTO_ID) &&
7273                          lkups[i].m_u.pppoe_hdr.ppp_prot_id ==
7274                                 0xFFFF)
7275                         ipv6 = true;
7276                 else if (lkups[i].type == ICE_ETYPE_OL &&
7277                          lkups[i].h_u.ethertype.ethtype_id ==
7278                                 CPU_TO_BE16(ICE_IPV6_ETHER_ID) &&
7279                          lkups[i].m_u.ethertype.ethtype_id ==
7280                                         0xFFFF)
7281                         ipv6 = true;
7282                 else if (lkups[i].type == ICE_IPV4_IL &&
7283                          lkups[i].h_u.ipv4_hdr.protocol ==
7284                                 ICE_TCP_PROTO_ID &&
7285                          lkups[i].m_u.ipv4_hdr.protocol ==
7286                                 0xFF)
7287                         tcp = true;
7288         }
7289
7290         if ((tun_type == ICE_SW_TUN_AND_NON_TUN_QINQ ||
7291              tun_type == ICE_NON_TUN_QINQ) && ipv6) {
7292                 *pkt = dummy_qinq_ipv6_pkt;
7293                 *pkt_len = sizeof(dummy_qinq_ipv6_pkt);
7294                 *offsets = dummy_qinq_ipv6_packet_offsets;
7295                 return;
7296         } else if (tun_type == ICE_SW_TUN_AND_NON_TUN_QINQ ||
7297                            tun_type == ICE_NON_TUN_QINQ) {
7298                 *pkt = dummy_qinq_ipv4_pkt;
7299                 *pkt_len = sizeof(dummy_qinq_ipv4_pkt);
7300                 *offsets = dummy_qinq_ipv4_packet_offsets;
7301                 return;
7302         }
7303
7304         if (tun_type == ICE_SW_TUN_PPPOE_IPV6_QINQ) {
7305                 *pkt = dummy_qinq_pppoe_ipv6_packet;
7306                 *pkt_len = sizeof(dummy_qinq_pppoe_ipv6_packet);
7307                 *offsets = dummy_qinq_pppoe_packet_ipv6_offsets;
7308                 return;
7309         } else if (tun_type == ICE_SW_TUN_PPPOE_IPV4_QINQ) {
7310                 *pkt = dummy_qinq_pppoe_ipv4_pkt;
7311                 *pkt_len = sizeof(dummy_qinq_pppoe_ipv4_pkt);
7312                 *offsets = dummy_qinq_pppoe_ipv4_packet_offsets;
7313                 return;
7314         } else if (tun_type == ICE_SW_TUN_PPPOE_QINQ ||
7315                         tun_type == ICE_SW_TUN_PPPOE_PAY_QINQ) {
7316                 *pkt = dummy_qinq_pppoe_ipv4_pkt;
7317                 *pkt_len = sizeof(dummy_qinq_pppoe_ipv4_pkt);
7318                 *offsets = dummy_qinq_pppoe_packet_offsets;
7319                 return;
7320         }
7321
7322         if (tun_type == ICE_SW_TUN_IPV4_GTPU_NO_PAY) {
7323                 *pkt = dummy_ipv4_gtpu_ipv4_packet;
7324                 *pkt_len = sizeof(dummy_ipv4_gtpu_ipv4_packet);
7325                 *offsets = dummy_ipv4_gtp_no_pay_packet_offsets;
7326                 return;
7327         } else if (tun_type == ICE_SW_TUN_IPV6_GTPU_NO_PAY) {
7328                 *pkt = dummy_ipv6_gtpu_ipv6_packet;
7329                 *pkt_len = sizeof(dummy_ipv6_gtpu_ipv6_packet);
7330                 *offsets = dummy_ipv6_gtp_no_pay_packet_offsets;
7331                 return;
7332         } else if (tun_type == ICE_SW_TUN_IPV4_GTPU_IPV4) {
7333                 *pkt = dummy_ipv4_gtpu_ipv4_packet;
7334                 *pkt_len = sizeof(dummy_ipv4_gtpu_ipv4_packet);
7335                 *offsets = dummy_ipv4_gtpu_ipv4_packet_offsets;
7336                 return;
7337         } else if (tun_type == ICE_SW_TUN_IPV4_GTPU_IPV6) {
7338                 *pkt = dummy_ipv4_gtpu_ipv6_packet;
7339                 *pkt_len = sizeof(dummy_ipv4_gtpu_ipv6_packet);
7340                 *offsets = dummy_ipv4_gtpu_ipv6_packet_offsets;
7341                 return;
7342         } else if (tun_type == ICE_SW_TUN_IPV6_GTPU_IPV4) {
7343                 *pkt = dummy_ipv6_gtpu_ipv4_packet;
7344                 *pkt_len = sizeof(dummy_ipv6_gtpu_ipv4_packet);
7345                 *offsets = dummy_ipv6_gtpu_ipv4_packet_offsets;
7346                 return;
7347         } else if (tun_type == ICE_SW_TUN_IPV6_GTPU_IPV6) {
7348                 *pkt = dummy_ipv6_gtpu_ipv6_packet;
7349                 *pkt_len = sizeof(dummy_ipv6_gtpu_ipv6_packet);
7350                 *offsets = dummy_ipv6_gtpu_ipv6_packet_offsets;
7351                 return;
7352         }
7353
7354         if (tun_type == ICE_SW_TUN_IPV4_ESP) {
7355                 *pkt = dummy_ipv4_esp_pkt;
7356                 *pkt_len = sizeof(dummy_ipv4_esp_pkt);
7357                 *offsets = dummy_ipv4_esp_packet_offsets;
7358                 return;
7359         }
7360
7361         if (tun_type == ICE_SW_TUN_IPV6_ESP) {
7362                 *pkt = dummy_ipv6_esp_pkt;
7363                 *pkt_len = sizeof(dummy_ipv6_esp_pkt);
7364                 *offsets = dummy_ipv6_esp_packet_offsets;
7365                 return;
7366         }
7367
7368         if (tun_type == ICE_SW_TUN_IPV4_AH) {
7369                 *pkt = dummy_ipv4_ah_pkt;
7370                 *pkt_len = sizeof(dummy_ipv4_ah_pkt);
7371                 *offsets = dummy_ipv4_ah_packet_offsets;
7372                 return;
7373         }
7374
7375         if (tun_type == ICE_SW_TUN_IPV6_AH) {
7376                 *pkt = dummy_ipv6_ah_pkt;
7377                 *pkt_len = sizeof(dummy_ipv6_ah_pkt);
7378                 *offsets = dummy_ipv6_ah_packet_offsets;
7379                 return;
7380         }
7381
7382         if (tun_type == ICE_SW_TUN_IPV4_NAT_T) {
7383                 *pkt = dummy_ipv4_nat_pkt;
7384                 *pkt_len = sizeof(dummy_ipv4_nat_pkt);
7385                 *offsets = dummy_ipv4_nat_packet_offsets;
7386                 return;
7387         }
7388
7389         if (tun_type == ICE_SW_TUN_IPV6_NAT_T) {
7390                 *pkt = dummy_ipv6_nat_pkt;
7391                 *pkt_len = sizeof(dummy_ipv6_nat_pkt);
7392                 *offsets = dummy_ipv6_nat_packet_offsets;
7393                 return;
7394         }
7395
7396         if (tun_type == ICE_SW_TUN_IPV4_L2TPV3) {
7397                 *pkt = dummy_ipv4_l2tpv3_pkt;
7398                 *pkt_len = sizeof(dummy_ipv4_l2tpv3_pkt);
7399                 *offsets = dummy_ipv4_l2tpv3_packet_offsets;
7400                 return;
7401         }
7402
7403         if (tun_type == ICE_SW_TUN_IPV6_L2TPV3) {
7404                 *pkt = dummy_ipv6_l2tpv3_pkt;
7405                 *pkt_len = sizeof(dummy_ipv6_l2tpv3_pkt);
7406                 *offsets = dummy_ipv6_l2tpv3_packet_offsets;
7407                 return;
7408         }
7409
7410         if (tun_type == ICE_SW_TUN_GTP) {
7411                 *pkt = dummy_udp_gtp_packet;
7412                 *pkt_len = sizeof(dummy_udp_gtp_packet);
7413                 *offsets = dummy_udp_gtp_packet_offsets;
7414                 return;
7415         }
7416
7417         if (tun_type == ICE_SW_TUN_PPPOE && ipv6) {
7418                 *pkt = dummy_pppoe_ipv6_packet;
7419                 *pkt_len = sizeof(dummy_pppoe_ipv6_packet);
7420                 *offsets = dummy_pppoe_packet_offsets;
7421                 return;
7422         } else if (tun_type == ICE_SW_TUN_PPPOE ||
7423                 tun_type == ICE_SW_TUN_PPPOE_PAY) {
7424                 *pkt = dummy_pppoe_ipv4_packet;
7425                 *pkt_len = sizeof(dummy_pppoe_ipv4_packet);
7426                 *offsets = dummy_pppoe_packet_offsets;
7427                 return;
7428         }
7429
7430         if (tun_type == ICE_SW_TUN_PPPOE_IPV4) {
7431                 *pkt = dummy_pppoe_ipv4_packet;
7432                 *pkt_len = sizeof(dummy_pppoe_ipv4_packet);
7433                 *offsets = dummy_pppoe_packet_ipv4_offsets;
7434                 return;
7435         }
7436
7437         if (tun_type == ICE_SW_TUN_PPPOE_IPV4_TCP) {
7438                 *pkt = dummy_pppoe_ipv4_tcp_packet;
7439                 *pkt_len = sizeof(dummy_pppoe_ipv4_tcp_packet);
7440                 *offsets = dummy_pppoe_ipv4_tcp_packet_offsets;
7441                 return;
7442         }
7443
7444         if (tun_type == ICE_SW_TUN_PPPOE_IPV4_UDP) {
7445                 *pkt = dummy_pppoe_ipv4_udp_packet;
7446                 *pkt_len = sizeof(dummy_pppoe_ipv4_udp_packet);
7447                 *offsets = dummy_pppoe_ipv4_udp_packet_offsets;
7448                 return;
7449         }
7450
7451         if (tun_type == ICE_SW_TUN_PPPOE_IPV6) {
7452                 *pkt = dummy_pppoe_ipv6_packet;
7453                 *pkt_len = sizeof(dummy_pppoe_ipv6_packet);
7454                 *offsets = dummy_pppoe_packet_ipv6_offsets;
7455                 return;
7456         }
7457
7458         if (tun_type == ICE_SW_TUN_PPPOE_IPV6_TCP) {
7459                 *pkt = dummy_pppoe_ipv6_tcp_packet;
7460                 *pkt_len = sizeof(dummy_pppoe_ipv6_tcp_packet);
7461                 *offsets = dummy_pppoe_packet_ipv6_tcp_offsets;
7462                 return;
7463         }
7464
7465         if (tun_type == ICE_SW_TUN_PPPOE_IPV6_UDP) {
7466                 *pkt = dummy_pppoe_ipv6_udp_packet;
7467                 *pkt_len = sizeof(dummy_pppoe_ipv6_udp_packet);
7468                 *offsets = dummy_pppoe_packet_ipv6_udp_offsets;
7469                 return;
7470         }
7471
7472         if (tun_type == ICE_SW_IPV4_TCP) {
7473                 *pkt = dummy_tcp_packet;
7474                 *pkt_len = sizeof(dummy_tcp_packet);
7475                 *offsets = dummy_tcp_packet_offsets;
7476                 return;
7477         }
7478
7479         if (tun_type == ICE_SW_IPV4_UDP) {
7480                 *pkt = dummy_udp_packet;
7481                 *pkt_len = sizeof(dummy_udp_packet);
7482                 *offsets = dummy_udp_packet_offsets;
7483                 return;
7484         }
7485
7486         if (tun_type == ICE_SW_IPV6_TCP) {
7487                 *pkt = dummy_tcp_ipv6_packet;
7488                 *pkt_len = sizeof(dummy_tcp_ipv6_packet);
7489                 *offsets = dummy_tcp_ipv6_packet_offsets;
7490                 return;
7491         }
7492
7493         if (tun_type == ICE_SW_IPV6_UDP) {
7494                 *pkt = dummy_udp_ipv6_packet;
7495                 *pkt_len = sizeof(dummy_udp_ipv6_packet);
7496                 *offsets = dummy_udp_ipv6_packet_offsets;
7497                 return;
7498         }
7499
7500         if (tun_type == ICE_ALL_TUNNELS) {
7501                 *pkt = dummy_gre_udp_packet;
7502                 *pkt_len = sizeof(dummy_gre_udp_packet);
7503                 *offsets = dummy_gre_udp_packet_offsets;
7504                 return;
7505         }
7506
7507         if (tun_type == ICE_SW_TUN_NVGRE || gre) {
7508                 if (tcp) {
7509                         *pkt = dummy_gre_tcp_packet;
7510                         *pkt_len = sizeof(dummy_gre_tcp_packet);
7511                         *offsets = dummy_gre_tcp_packet_offsets;
7512                         return;
7513                 }
7514
7515                 *pkt = dummy_gre_udp_packet;
7516                 *pkt_len = sizeof(dummy_gre_udp_packet);
7517                 *offsets = dummy_gre_udp_packet_offsets;
7518                 return;
7519         }
7520
7521         if (tun_type == ICE_SW_TUN_VXLAN || tun_type == ICE_SW_TUN_GENEVE ||
7522             tun_type == ICE_SW_TUN_VXLAN_GPE || tun_type == ICE_SW_TUN_UDP ||
7523             tun_type == ICE_SW_TUN_GENEVE_VLAN ||
7524             tun_type == ICE_SW_TUN_VXLAN_VLAN) {
7525                 if (tcp) {
7526                         *pkt = dummy_udp_tun_tcp_packet;
7527                         *pkt_len = sizeof(dummy_udp_tun_tcp_packet);
7528                         *offsets = dummy_udp_tun_tcp_packet_offsets;
7529                         return;
7530                 }
7531
7532                 *pkt = dummy_udp_tun_udp_packet;
7533                 *pkt_len = sizeof(dummy_udp_tun_udp_packet);
7534                 *offsets = dummy_udp_tun_udp_packet_offsets;
7535                 return;
7536         }
7537
7538         if (udp && !ipv6) {
7539                 if (vlan) {
7540                         *pkt = dummy_vlan_udp_packet;
7541                         *pkt_len = sizeof(dummy_vlan_udp_packet);
7542                         *offsets = dummy_vlan_udp_packet_offsets;
7543                         return;
7544                 }
7545                 *pkt = dummy_udp_packet;
7546                 *pkt_len = sizeof(dummy_udp_packet);
7547                 *offsets = dummy_udp_packet_offsets;
7548                 return;
7549         } else if (udp && ipv6) {
7550                 if (vlan) {
7551                         *pkt = dummy_vlan_udp_ipv6_packet;
7552                         *pkt_len = sizeof(dummy_vlan_udp_ipv6_packet);
7553                         *offsets = dummy_vlan_udp_ipv6_packet_offsets;
7554                         return;
7555                 }
7556                 *pkt = dummy_udp_ipv6_packet;
7557                 *pkt_len = sizeof(dummy_udp_ipv6_packet);
7558                 *offsets = dummy_udp_ipv6_packet_offsets;
7559                 return;
7560         } else if ((tcp && ipv6) || ipv6) {
7561                 if (vlan) {
7562                         *pkt = dummy_vlan_tcp_ipv6_packet;
7563                         *pkt_len = sizeof(dummy_vlan_tcp_ipv6_packet);
7564                         *offsets = dummy_vlan_tcp_ipv6_packet_offsets;
7565                         return;
7566                 }
7567                 *pkt = dummy_tcp_ipv6_packet;
7568                 *pkt_len = sizeof(dummy_tcp_ipv6_packet);
7569                 *offsets = dummy_tcp_ipv6_packet_offsets;
7570                 return;
7571         }
7572
7573         if (vlan) {
7574                 *pkt = dummy_vlan_tcp_packet;
7575                 *pkt_len = sizeof(dummy_vlan_tcp_packet);
7576                 *offsets = dummy_vlan_tcp_packet_offsets;
7577         } else {
7578                 *pkt = dummy_tcp_packet;
7579                 *pkt_len = sizeof(dummy_tcp_packet);
7580                 *offsets = dummy_tcp_packet_offsets;
7581         }
7582 }
7583
7584 /**
7585  * ice_fill_adv_dummy_packet - fill a dummy packet with given match criteria
7586  *
7587  * @lkups: lookup elements or match criteria for the advanced recipe, one
7588  *         structure per protocol header
7589  * @lkups_cnt: number of protocols
7590  * @s_rule: stores rule information from the match criteria
7591  * @dummy_pkt: dummy packet to fill according to filter match criteria
7592  * @pkt_len: packet length of dummy packet
7593  * @offsets: offset info for the dummy packet
7594  */
7595 static enum ice_status
7596 ice_fill_adv_dummy_packet(struct ice_adv_lkup_elem *lkups, u16 lkups_cnt,
7597                           struct ice_aqc_sw_rules_elem *s_rule,
7598                           const u8 *dummy_pkt, u16 pkt_len,
7599                           const struct ice_dummy_pkt_offsets *offsets)
7600 {
7601         u8 *pkt;
7602         u16 i;
7603
7604         /* Start with a packet with a pre-defined/dummy content. Then, fill
7605          * in the header values to be looked up or matched.
7606          */
7607         pkt = s_rule->pdata.lkup_tx_rx.hdr;
7608
7609         ice_memcpy(pkt, dummy_pkt, pkt_len, ICE_NONDMA_TO_NONDMA);
7610
7611         for (i = 0; i < lkups_cnt; i++) {
7612                 enum ice_protocol_type type;
7613                 u16 offset = 0, len = 0, j;
7614                 bool found = false;
7615
7616                 /* find the start of this layer; it should be found since this
7617                  * was already checked when search for the dummy packet
7618                  */
7619                 type = lkups[i].type;
7620                 for (j = 0; offsets[j].type != ICE_PROTOCOL_LAST; j++) {
7621                         if (type == offsets[j].type) {
7622                                 offset = offsets[j].offset;
7623                                 found = true;
7624                                 break;
7625                         }
7626                 }
7627                 /* this should never happen in a correct calling sequence */
7628                 if (!found)
7629                         return ICE_ERR_PARAM;
7630
7631                 switch (lkups[i].type) {
7632                 case ICE_MAC_OFOS:
7633                 case ICE_MAC_IL:
7634                         len = sizeof(struct ice_ether_hdr);
7635                         break;
7636                 case ICE_ETYPE_OL:
7637                         len = sizeof(struct ice_ethtype_hdr);
7638                         break;
7639                 case ICE_VLAN_OFOS:
7640                 case ICE_VLAN_EX:
7641                         len = sizeof(struct ice_vlan_hdr);
7642                         break;
7643                 case ICE_IPV4_OFOS:
7644                 case ICE_IPV4_IL:
7645                         len = sizeof(struct ice_ipv4_hdr);
7646                         break;
7647                 case ICE_IPV6_OFOS:
7648                 case ICE_IPV6_IL:
7649                         len = sizeof(struct ice_ipv6_hdr);
7650                         break;
7651                 case ICE_TCP_IL:
7652                 case ICE_UDP_OF:
7653                 case ICE_UDP_ILOS:
7654                         len = sizeof(struct ice_l4_hdr);
7655                         break;
7656                 case ICE_SCTP_IL:
7657                         len = sizeof(struct ice_sctp_hdr);
7658                         break;
7659                 case ICE_NVGRE:
7660                         len = sizeof(struct ice_nvgre);
7661                         break;
7662                 case ICE_VXLAN:
7663                 case ICE_GENEVE:
7664                 case ICE_VXLAN_GPE:
7665                         len = sizeof(struct ice_udp_tnl_hdr);
7666                         break;
7667
7668                 case ICE_GTP:
7669                 case ICE_GTP_NO_PAY:
7670                         len = sizeof(struct ice_udp_gtp_hdr);
7671                         break;
7672                 case ICE_PPPOE:
7673                         len = sizeof(struct ice_pppoe_hdr);
7674                         break;
7675                 case ICE_ESP:
7676                         len = sizeof(struct ice_esp_hdr);
7677                         break;
7678                 case ICE_NAT_T:
7679                         len = sizeof(struct ice_nat_t_hdr);
7680                         break;
7681                 case ICE_AH:
7682                         len = sizeof(struct ice_ah_hdr);
7683                         break;
7684                 case ICE_L2TPV3:
7685                         len = sizeof(struct ice_l2tpv3_sess_hdr);
7686                         break;
7687                 default:
7688                         return ICE_ERR_PARAM;
7689                 }
7690
7691                 /* the length should be a word multiple */
7692                 if (len % ICE_BYTES_PER_WORD)
7693                         return ICE_ERR_CFG;
7694
7695                 /* We have the offset to the header start, the length, the
7696                  * caller's header values and mask. Use this information to
7697                  * copy the data into the dummy packet appropriately based on
7698                  * the mask. Note that we need to only write the bits as
7699                  * indicated by the mask to make sure we don't improperly write
7700                  * over any significant packet data.
7701                  */
7702                 for (j = 0; j < len / sizeof(u16); j++)
7703                         if (((u16 *)&lkups[i].m_u)[j])
7704                                 ((u16 *)(pkt + offset))[j] =
7705                                         (((u16 *)(pkt + offset))[j] &
7706                                          ~((u16 *)&lkups[i].m_u)[j]) |
7707                                         (((u16 *)&lkups[i].h_u)[j] &
7708                                          ((u16 *)&lkups[i].m_u)[j]);
7709         }
7710
7711         s_rule->pdata.lkup_tx_rx.hdr_len = CPU_TO_LE16(pkt_len);
7712
7713         return ICE_SUCCESS;
7714 }
7715
7716 /**
7717  * ice_fill_adv_packet_tun - fill dummy packet with udp tunnel port
7718  * @hw: pointer to the hardware structure
7719  * @tun_type: tunnel type
7720  * @pkt: dummy packet to fill in
7721  * @offsets: offset info for the dummy packet
7722  */
7723 static enum ice_status
7724 ice_fill_adv_packet_tun(struct ice_hw *hw, enum ice_sw_tunnel_type tun_type,
7725                         u8 *pkt, const struct ice_dummy_pkt_offsets *offsets)
7726 {
7727         u16 open_port, i;
7728
7729         switch (tun_type) {
7730         case ICE_SW_TUN_AND_NON_TUN:
7731         case ICE_SW_TUN_VXLAN_GPE:
7732         case ICE_SW_TUN_VXLAN:
7733         case ICE_SW_TUN_VXLAN_VLAN:
7734         case ICE_SW_TUN_UDP:
7735                 if (!ice_get_open_tunnel_port(hw, TNL_VXLAN, &open_port))
7736                         return ICE_ERR_CFG;
7737                 break;
7738
7739         case ICE_SW_TUN_GENEVE:
7740         case ICE_SW_TUN_GENEVE_VLAN:
7741                 if (!ice_get_open_tunnel_port(hw, TNL_GENEVE, &open_port))
7742                         return ICE_ERR_CFG;
7743                 break;
7744
7745         default:
7746                 /* Nothing needs to be done for this tunnel type */
7747                 return ICE_SUCCESS;
7748         }
7749
7750         /* Find the outer UDP protocol header and insert the port number */
7751         for (i = 0; offsets[i].type != ICE_PROTOCOL_LAST; i++) {
7752                 if (offsets[i].type == ICE_UDP_OF) {
7753                         struct ice_l4_hdr *hdr;
7754                         u16 offset;
7755
7756                         offset = offsets[i].offset;
7757                         hdr = (struct ice_l4_hdr *)&pkt[offset];
7758                         hdr->dst_port = CPU_TO_BE16(open_port);
7759
7760                         return ICE_SUCCESS;
7761                 }
7762         }
7763
7764         return ICE_ERR_CFG;
7765 }
7766
7767 /**
7768  * ice_find_adv_rule_entry - Search a rule entry
7769  * @hw: pointer to the hardware structure
7770  * @lkups: lookup elements or match criteria for the advanced recipe, one
7771  *         structure per protocol header
7772  * @lkups_cnt: number of protocols
7773  * @recp_id: recipe ID for which we are finding the rule
7774  * @rinfo: other information regarding the rule e.g. priority and action info
7775  *
7776  * Helper function to search for a given advance rule entry
7777  * Returns pointer to entry storing the rule if found
7778  */
7779 static struct ice_adv_fltr_mgmt_list_entry *
7780 ice_find_adv_rule_entry(struct ice_hw *hw, struct ice_adv_lkup_elem *lkups,
7781                         u16 lkups_cnt, u16 recp_id,
7782                         struct ice_adv_rule_info *rinfo)
7783 {
7784         struct ice_adv_fltr_mgmt_list_entry *list_itr;
7785         struct ice_switch_info *sw = hw->switch_info;
7786         int i;
7787
7788         LIST_FOR_EACH_ENTRY(list_itr, &sw->recp_list[recp_id].filt_rules,
7789                             ice_adv_fltr_mgmt_list_entry, list_entry) {
7790                 bool lkups_matched = true;
7791
7792                 if (lkups_cnt != list_itr->lkups_cnt)
7793                         continue;
7794                 for (i = 0; i < list_itr->lkups_cnt; i++)
7795                         if (memcmp(&list_itr->lkups[i], &lkups[i],
7796                                    sizeof(*lkups))) {
7797                                 lkups_matched = false;
7798                                 break;
7799                         }
7800                 if (rinfo->sw_act.flag == list_itr->rule_info.sw_act.flag &&
7801                     rinfo->tun_type == list_itr->rule_info.tun_type &&
7802                     lkups_matched)
7803                         return list_itr;
7804         }
7805         return NULL;
7806 }
7807
7808 /**
7809  * ice_adv_add_update_vsi_list
7810  * @hw: pointer to the hardware structure
7811  * @m_entry: pointer to current adv filter management list entry
7812  * @cur_fltr: filter information from the book keeping entry
7813  * @new_fltr: filter information with the new VSI to be added
7814  *
7815  * Call AQ command to add or update previously created VSI list with new VSI.
7816  *
7817  * Helper function to do book keeping associated with adding filter information
7818  * The algorithm to do the booking keeping is described below :
7819  * When a VSI needs to subscribe to a given advanced filter
7820  *      if only one VSI has been added till now
7821  *              Allocate a new VSI list and add two VSIs
7822  *              to this list using switch rule command
7823  *              Update the previously created switch rule with the
7824  *              newly created VSI list ID
7825  *      if a VSI list was previously created
7826  *              Add the new VSI to the previously created VSI list set
7827  *              using the update switch rule command
7828  */
7829 static enum ice_status
7830 ice_adv_add_update_vsi_list(struct ice_hw *hw,
7831                             struct ice_adv_fltr_mgmt_list_entry *m_entry,
7832                             struct ice_adv_rule_info *cur_fltr,
7833                             struct ice_adv_rule_info *new_fltr)
7834 {
7835         enum ice_status status;
7836         u16 vsi_list_id = 0;
7837
7838         if (cur_fltr->sw_act.fltr_act == ICE_FWD_TO_Q ||
7839             cur_fltr->sw_act.fltr_act == ICE_FWD_TO_QGRP ||
7840             cur_fltr->sw_act.fltr_act == ICE_DROP_PACKET)
7841                 return ICE_ERR_NOT_IMPL;
7842
7843         if ((new_fltr->sw_act.fltr_act == ICE_FWD_TO_Q ||
7844              new_fltr->sw_act.fltr_act == ICE_FWD_TO_QGRP) &&
7845             (cur_fltr->sw_act.fltr_act == ICE_FWD_TO_VSI ||
7846              cur_fltr->sw_act.fltr_act == ICE_FWD_TO_VSI_LIST))
7847                 return ICE_ERR_NOT_IMPL;
7848
7849         if (m_entry->vsi_count < 2 && !m_entry->vsi_list_info) {
7850                  /* Only one entry existed in the mapping and it was not already
7851                   * a part of a VSI list. So, create a VSI list with the old and
7852                   * new VSIs.
7853                   */
7854                 struct ice_fltr_info tmp_fltr;
7855                 u16 vsi_handle_arr[2];
7856
7857                 /* A rule already exists with the new VSI being added */
7858                 if (cur_fltr->sw_act.fwd_id.hw_vsi_id ==
7859                     new_fltr->sw_act.fwd_id.hw_vsi_id)
7860                         return ICE_ERR_ALREADY_EXISTS;
7861
7862                 vsi_handle_arr[0] = cur_fltr->sw_act.vsi_handle;
7863                 vsi_handle_arr[1] = new_fltr->sw_act.vsi_handle;
7864                 status = ice_create_vsi_list_rule(hw, &vsi_handle_arr[0], 2,
7865                                                   &vsi_list_id,
7866                                                   ICE_SW_LKUP_LAST);
7867                 if (status)
7868                         return status;
7869
7870                 ice_memset(&tmp_fltr, 0, sizeof(tmp_fltr), ICE_NONDMA_MEM);
7871                 tmp_fltr.flag = m_entry->rule_info.sw_act.flag;
7872                 tmp_fltr.fltr_rule_id = cur_fltr->fltr_rule_id;
7873                 tmp_fltr.fltr_act = ICE_FWD_TO_VSI_LIST;
7874                 tmp_fltr.fwd_id.vsi_list_id = vsi_list_id;
7875                 tmp_fltr.lkup_type = ICE_SW_LKUP_LAST;
7876
7877                 /* Update the previous switch rule of "forward to VSI" to
7878                  * "fwd to VSI list"
7879                  */
7880                 status = ice_update_pkt_fwd_rule(hw, &tmp_fltr);
7881                 if (status)
7882                         return status;
7883
7884                 cur_fltr->sw_act.fwd_id.vsi_list_id = vsi_list_id;
7885                 cur_fltr->sw_act.fltr_act = ICE_FWD_TO_VSI_LIST;
7886                 m_entry->vsi_list_info =
7887                         ice_create_vsi_list_map(hw, &vsi_handle_arr[0], 2,
7888                                                 vsi_list_id);
7889         } else {
7890                 u16 vsi_handle = new_fltr->sw_act.vsi_handle;
7891
7892                 if (!m_entry->vsi_list_info)
7893                         return ICE_ERR_CFG;
7894
7895                 /* A rule already exists with the new VSI being added */
7896                 if (ice_is_bit_set(m_entry->vsi_list_info->vsi_map, vsi_handle))
7897                         return ICE_SUCCESS;
7898
7899                 /* Update the previously created VSI list set with
7900                  * the new VSI ID passed in
7901                  */
7902                 vsi_list_id = cur_fltr->sw_act.fwd_id.vsi_list_id;
7903
7904                 status = ice_update_vsi_list_rule(hw, &vsi_handle, 1,
7905                                                   vsi_list_id, false,
7906                                                   ice_aqc_opc_update_sw_rules,
7907                                                   ICE_SW_LKUP_LAST);
7908                 /* update VSI list mapping info with new VSI ID */
7909                 if (!status)
7910                         ice_set_bit(vsi_handle,
7911                                     m_entry->vsi_list_info->vsi_map);
7912         }
7913         if (!status)
7914                 m_entry->vsi_count++;
7915         return status;
7916 }
7917
7918 /**
7919  * ice_add_adv_rule - helper function to create an advanced switch rule
7920  * @hw: pointer to the hardware structure
7921  * @lkups: information on the words that needs to be looked up. All words
7922  * together makes one recipe
7923  * @lkups_cnt: num of entries in the lkups array
7924  * @rinfo: other information related to the rule that needs to be programmed
7925  * @added_entry: this will return recipe_id, rule_id and vsi_handle. should be
7926  *               ignored is case of error.
7927  *
7928  * This function can program only 1 rule at a time. The lkups is used to
7929  * describe the all the words that forms the "lookup" portion of the recipe.
7930  * These words can span multiple protocols. Callers to this function need to
7931  * pass in a list of protocol headers with lookup information along and mask
7932  * that determines which words are valid from the given protocol header.
7933  * rinfo describes other information related to this rule such as forwarding
7934  * IDs, priority of this rule, etc.
7935  */
7936 enum ice_status
7937 ice_add_adv_rule(struct ice_hw *hw, struct ice_adv_lkup_elem *lkups,
7938                  u16 lkups_cnt, struct ice_adv_rule_info *rinfo,
7939                  struct ice_rule_query_data *added_entry)
7940 {
7941         struct ice_adv_fltr_mgmt_list_entry *m_entry, *adv_fltr = NULL;
7942         u16 rid = 0, i, pkt_len, rule_buf_sz, vsi_handle;
7943         const struct ice_dummy_pkt_offsets *pkt_offsets;
7944         struct ice_aqc_sw_rules_elem *s_rule = NULL;
7945         struct LIST_HEAD_TYPE *rule_head;
7946         struct ice_switch_info *sw;
7947         enum ice_status status;
7948         const u8 *pkt = NULL;
7949         bool prof_rule;
7950         u16 word_cnt;
7951         u32 act = 0;
7952         u8 q_rgn;
7953
7954         /* Initialize profile to result index bitmap */
7955         if (!hw->switch_info->prof_res_bm_init) {
7956                 hw->switch_info->prof_res_bm_init = 1;
7957                 ice_init_prof_result_bm(hw);
7958         }
7959
7960         prof_rule = ice_is_prof_rule(rinfo->tun_type);
7961         if (!prof_rule && !lkups_cnt)
7962                 return ICE_ERR_PARAM;
7963
7964         /* get # of words we need to match */
7965         word_cnt = 0;
7966         for (i = 0; i < lkups_cnt; i++) {
7967                 u16 j, *ptr;
7968
7969                 ptr = (u16 *)&lkups[i].m_u;
7970                 for (j = 0; j < sizeof(lkups->m_u) / sizeof(u16); j++)
7971                         if (ptr[j] != 0)
7972                                 word_cnt++;
7973         }
7974
7975         if (prof_rule) {
7976                 if (word_cnt > ICE_MAX_CHAIN_WORDS)
7977                         return ICE_ERR_PARAM;
7978         } else {
7979                 if (!word_cnt || word_cnt > ICE_MAX_CHAIN_WORDS)
7980                         return ICE_ERR_PARAM;
7981         }
7982
7983         /* make sure that we can locate a dummy packet */
7984         ice_find_dummy_packet(lkups, lkups_cnt, rinfo->tun_type, &pkt, &pkt_len,
7985                               &pkt_offsets);
7986         if (!pkt) {
7987                 status = ICE_ERR_PARAM;
7988                 goto err_ice_add_adv_rule;
7989         }
7990
7991         if (!(rinfo->sw_act.fltr_act == ICE_FWD_TO_VSI ||
7992               rinfo->sw_act.fltr_act == ICE_FWD_TO_Q ||
7993               rinfo->sw_act.fltr_act == ICE_FWD_TO_QGRP ||
7994               rinfo->sw_act.fltr_act == ICE_DROP_PACKET))
7995                 return ICE_ERR_CFG;
7996
7997         vsi_handle = rinfo->sw_act.vsi_handle;
7998         if (!ice_is_vsi_valid(hw, vsi_handle))
7999                 return ICE_ERR_PARAM;
8000
8001         if (rinfo->sw_act.fltr_act == ICE_FWD_TO_VSI)
8002                 rinfo->sw_act.fwd_id.hw_vsi_id =
8003                         ice_get_hw_vsi_num(hw, vsi_handle);
8004         if (rinfo->sw_act.flag & ICE_FLTR_TX)
8005                 rinfo->sw_act.src = ice_get_hw_vsi_num(hw, vsi_handle);
8006
8007         status = ice_add_adv_recipe(hw, lkups, lkups_cnt, rinfo, &rid);
8008         if (status)
8009                 return status;
8010         m_entry = ice_find_adv_rule_entry(hw, lkups, lkups_cnt, rid, rinfo);
8011         if (m_entry) {
8012                 /* we have to add VSI to VSI_LIST and increment vsi_count.
8013                  * Also Update VSI list so that we can change forwarding rule
8014                  * if the rule already exists, we will check if it exists with
8015                  * same vsi_id, if not then add it to the VSI list if it already
8016                  * exists if not then create a VSI list and add the existing VSI
8017                  * ID and the new VSI ID to the list
8018                  * We will add that VSI to the list
8019                  */
8020                 status = ice_adv_add_update_vsi_list(hw, m_entry,
8021                                                      &m_entry->rule_info,
8022                                                      rinfo);
8023                 if (added_entry) {
8024                         added_entry->rid = rid;
8025                         added_entry->rule_id = m_entry->rule_info.fltr_rule_id;
8026                         added_entry->vsi_handle = rinfo->sw_act.vsi_handle;
8027                 }
8028                 return status;
8029         }
8030         rule_buf_sz = ICE_SW_RULE_RX_TX_NO_HDR_SIZE + pkt_len;
8031         s_rule = (struct ice_aqc_sw_rules_elem *)ice_malloc(hw, rule_buf_sz);
8032         if (!s_rule)
8033                 return ICE_ERR_NO_MEMORY;
8034         act |= ICE_SINGLE_ACT_LAN_ENABLE;
8035         switch (rinfo->sw_act.fltr_act) {
8036         case ICE_FWD_TO_VSI:
8037                 act |= (rinfo->sw_act.fwd_id.hw_vsi_id <<
8038                         ICE_SINGLE_ACT_VSI_ID_S) & ICE_SINGLE_ACT_VSI_ID_M;
8039                 act |= ICE_SINGLE_ACT_VSI_FORWARDING | ICE_SINGLE_ACT_VALID_BIT;
8040                 break;
8041         case ICE_FWD_TO_Q:
8042                 act |= ICE_SINGLE_ACT_TO_Q;
8043                 act |= (rinfo->sw_act.fwd_id.q_id << ICE_SINGLE_ACT_Q_INDEX_S) &
8044                        ICE_SINGLE_ACT_Q_INDEX_M;
8045                 break;
8046         case ICE_FWD_TO_QGRP:
8047                 q_rgn = rinfo->sw_act.qgrp_size > 0 ?
8048                         (u8)ice_ilog2(rinfo->sw_act.qgrp_size) : 0;
8049                 act |= ICE_SINGLE_ACT_TO_Q;
8050                 act |= (rinfo->sw_act.fwd_id.q_id << ICE_SINGLE_ACT_Q_INDEX_S) &
8051                        ICE_SINGLE_ACT_Q_INDEX_M;
8052                 act |= (q_rgn << ICE_SINGLE_ACT_Q_REGION_S) &
8053                        ICE_SINGLE_ACT_Q_REGION_M;
8054                 break;
8055         case ICE_DROP_PACKET:
8056                 act |= ICE_SINGLE_ACT_VSI_FORWARDING | ICE_SINGLE_ACT_DROP |
8057                        ICE_SINGLE_ACT_VALID_BIT;
8058                 break;
8059         default:
8060                 status = ICE_ERR_CFG;
8061                 goto err_ice_add_adv_rule;
8062         }
8063
8064         /* set the rule LOOKUP type based on caller specified 'RX'
8065          * instead of hardcoding it to be either LOOKUP_TX/RX
8066          *
8067          * for 'RX' set the source to be the port number
8068          * for 'TX' set the source to be the source HW VSI number (determined
8069          * by caller)
8070          */
8071         if (rinfo->rx) {
8072                 s_rule->type = CPU_TO_LE16(ICE_AQC_SW_RULES_T_LKUP_RX);
8073                 s_rule->pdata.lkup_tx_rx.src =
8074                         CPU_TO_LE16(hw->port_info->lport);
8075         } else {
8076                 s_rule->type = CPU_TO_LE16(ICE_AQC_SW_RULES_T_LKUP_TX);
8077                 s_rule->pdata.lkup_tx_rx.src = CPU_TO_LE16(rinfo->sw_act.src);
8078         }
8079
8080         s_rule->pdata.lkup_tx_rx.recipe_id = CPU_TO_LE16(rid);
8081         s_rule->pdata.lkup_tx_rx.act = CPU_TO_LE32(act);
8082
8083         status = ice_fill_adv_dummy_packet(lkups, lkups_cnt, s_rule, pkt,
8084                                            pkt_len, pkt_offsets);
8085         if (status)
8086                 goto err_ice_add_adv_rule;
8087
8088         if (rinfo->tun_type != ICE_NON_TUN &&
8089             rinfo->tun_type != ICE_SW_TUN_AND_NON_TUN) {
8090                 status = ice_fill_adv_packet_tun(hw, rinfo->tun_type,
8091                                                  s_rule->pdata.lkup_tx_rx.hdr,
8092                                                  pkt_offsets);
8093                 if (status)
8094                         goto err_ice_add_adv_rule;
8095         }
8096
8097         status = ice_aq_sw_rules(hw, (struct ice_aqc_sw_rules *)s_rule,
8098                                  rule_buf_sz, 1, ice_aqc_opc_add_sw_rules,
8099                                  NULL);
8100         if (status)
8101                 goto err_ice_add_adv_rule;
8102         adv_fltr = (struct ice_adv_fltr_mgmt_list_entry *)
8103                 ice_malloc(hw, sizeof(struct ice_adv_fltr_mgmt_list_entry));
8104         if (!adv_fltr) {
8105                 status = ICE_ERR_NO_MEMORY;
8106                 goto err_ice_add_adv_rule;
8107         }
8108
8109         adv_fltr->lkups = (struct ice_adv_lkup_elem *)
8110                 ice_memdup(hw, lkups, lkups_cnt * sizeof(*lkups),
8111                            ICE_NONDMA_TO_NONDMA);
8112         if (!adv_fltr->lkups && !prof_rule) {
8113                 status = ICE_ERR_NO_MEMORY;
8114                 goto err_ice_add_adv_rule;
8115         }
8116
8117         adv_fltr->lkups_cnt = lkups_cnt;
8118         adv_fltr->rule_info = *rinfo;
8119         adv_fltr->rule_info.fltr_rule_id =
8120                 LE16_TO_CPU(s_rule->pdata.lkup_tx_rx.index);
8121         sw = hw->switch_info;
8122         sw->recp_list[rid].adv_rule = true;
8123         rule_head = &sw->recp_list[rid].filt_rules;
8124
8125         if (rinfo->sw_act.fltr_act == ICE_FWD_TO_VSI)
8126                 adv_fltr->vsi_count = 1;
8127
8128         /* Add rule entry to book keeping list */
8129         LIST_ADD(&adv_fltr->list_entry, rule_head);
8130         if (added_entry) {
8131                 added_entry->rid = rid;
8132                 added_entry->rule_id = adv_fltr->rule_info.fltr_rule_id;
8133                 added_entry->vsi_handle = rinfo->sw_act.vsi_handle;
8134         }
8135 err_ice_add_adv_rule:
8136         if (status && adv_fltr) {
8137                 ice_free(hw, adv_fltr->lkups);
8138                 ice_free(hw, adv_fltr);
8139         }
8140
8141         ice_free(hw, s_rule);
8142
8143         return status;
8144 }
8145
8146 /**
8147  * ice_adv_rem_update_vsi_list
8148  * @hw: pointer to the hardware structure
8149  * @vsi_handle: VSI handle of the VSI to remove
8150  * @fm_list: filter management entry for which the VSI list management needs to
8151  *           be done
8152  */
8153 static enum ice_status
8154 ice_adv_rem_update_vsi_list(struct ice_hw *hw, u16 vsi_handle,
8155                             struct ice_adv_fltr_mgmt_list_entry *fm_list)
8156 {
8157         struct ice_vsi_list_map_info *vsi_list_info;
8158         enum ice_sw_lkup_type lkup_type;
8159         enum ice_status status;
8160         u16 vsi_list_id;
8161
8162         if (fm_list->rule_info.sw_act.fltr_act != ICE_FWD_TO_VSI_LIST ||
8163             fm_list->vsi_count == 0)
8164                 return ICE_ERR_PARAM;
8165
8166         /* A rule with the VSI being removed does not exist */
8167         if (!ice_is_bit_set(fm_list->vsi_list_info->vsi_map, vsi_handle))
8168                 return ICE_ERR_DOES_NOT_EXIST;
8169
8170         lkup_type = ICE_SW_LKUP_LAST;
8171         vsi_list_id = fm_list->rule_info.sw_act.fwd_id.vsi_list_id;
8172         status = ice_update_vsi_list_rule(hw, &vsi_handle, 1, vsi_list_id, true,
8173                                           ice_aqc_opc_update_sw_rules,
8174                                           lkup_type);
8175         if (status)
8176                 return status;
8177
8178         fm_list->vsi_count--;
8179         ice_clear_bit(vsi_handle, fm_list->vsi_list_info->vsi_map);
8180         vsi_list_info = fm_list->vsi_list_info;
8181         if (fm_list->vsi_count == 1) {
8182                 struct ice_fltr_info tmp_fltr;
8183                 u16 rem_vsi_handle;
8184
8185                 rem_vsi_handle = ice_find_first_bit(vsi_list_info->vsi_map,
8186                                                     ICE_MAX_VSI);
8187                 if (!ice_is_vsi_valid(hw, rem_vsi_handle))
8188                         return ICE_ERR_OUT_OF_RANGE;
8189
8190                 /* Make sure VSI list is empty before removing it below */
8191                 status = ice_update_vsi_list_rule(hw, &rem_vsi_handle, 1,
8192                                                   vsi_list_id, true,
8193                                                   ice_aqc_opc_update_sw_rules,
8194                                                   lkup_type);
8195                 if (status)
8196                         return status;
8197
8198                 ice_memset(&tmp_fltr, 0, sizeof(tmp_fltr), ICE_NONDMA_MEM);
8199                 tmp_fltr.flag = fm_list->rule_info.sw_act.flag;
8200                 tmp_fltr.fltr_rule_id = fm_list->rule_info.fltr_rule_id;
8201                 fm_list->rule_info.sw_act.fltr_act = ICE_FWD_TO_VSI;
8202                 tmp_fltr.fltr_act = ICE_FWD_TO_VSI;
8203                 tmp_fltr.fwd_id.hw_vsi_id =
8204                         ice_get_hw_vsi_num(hw, rem_vsi_handle);
8205                 fm_list->rule_info.sw_act.fwd_id.hw_vsi_id =
8206                         ice_get_hw_vsi_num(hw, rem_vsi_handle);
8207                 fm_list->rule_info.sw_act.vsi_handle = rem_vsi_handle;
8208
8209                 /* Update the previous switch rule of "MAC forward to VSI" to
8210                  * "MAC fwd to VSI list"
8211                  */
8212                 status = ice_update_pkt_fwd_rule(hw, &tmp_fltr);
8213                 if (status) {
8214                         ice_debug(hw, ICE_DBG_SW, "Failed to update pkt fwd rule to FWD_TO_VSI on HW VSI %d, error %d\n",
8215                                   tmp_fltr.fwd_id.hw_vsi_id, status);
8216                         return status;
8217                 }
8218                 fm_list->vsi_list_info->ref_cnt--;
8219
8220                 /* Remove the VSI list since it is no longer used */
8221                 status = ice_remove_vsi_list_rule(hw, vsi_list_id, lkup_type);
8222                 if (status) {
8223                         ice_debug(hw, ICE_DBG_SW, "Failed to remove VSI list %d, error %d\n",
8224                                   vsi_list_id, status);
8225                         return status;
8226                 }
8227
8228                 LIST_DEL(&vsi_list_info->list_entry);
8229                 ice_free(hw, vsi_list_info);
8230                 fm_list->vsi_list_info = NULL;
8231         }
8232
8233         return status;
8234 }
8235
8236 /**
8237  * ice_rem_adv_rule - removes existing advanced switch rule
8238  * @hw: pointer to the hardware structure
8239  * @lkups: information on the words that needs to be looked up. All words
8240  *         together makes one recipe
8241  * @lkups_cnt: num of entries in the lkups array
8242  * @rinfo: Its the pointer to the rule information for the rule
8243  *
8244  * This function can be used to remove 1 rule at a time. The lkups is
8245  * used to describe all the words that forms the "lookup" portion of the
8246  * rule. These words can span multiple protocols. Callers to this function
8247  * need to pass in a list of protocol headers with lookup information along
8248  * and mask that determines which words are valid from the given protocol
8249  * header. rinfo describes other information related to this rule such as
8250  * forwarding IDs, priority of this rule, etc.
8251  */
8252 enum ice_status
8253 ice_rem_adv_rule(struct ice_hw *hw, struct ice_adv_lkup_elem *lkups,
8254                  u16 lkups_cnt, struct ice_adv_rule_info *rinfo)
8255 {
8256         struct ice_adv_fltr_mgmt_list_entry *list_elem;
8257         struct ice_prot_lkup_ext lkup_exts;
8258         struct ice_lock *rule_lock; /* Lock to protect filter rule list */
8259         enum ice_status status = ICE_SUCCESS;
8260         bool remove_rule = false;
8261         u16 i, rid, vsi_handle;
8262
8263         ice_memset(&lkup_exts, 0, sizeof(lkup_exts), ICE_NONDMA_MEM);
8264         for (i = 0; i < lkups_cnt; i++) {
8265                 u16 count;
8266
8267                 if (lkups[i].type >= ICE_PROTOCOL_LAST)
8268                         return ICE_ERR_CFG;
8269
8270                 count = ice_fill_valid_words(&lkups[i], &lkup_exts);
8271                 if (!count)
8272                         return ICE_ERR_CFG;
8273         }
8274
8275         /* Create any special protocol/offset pairs, such as looking at tunnel
8276          * bits by extracting metadata
8277          */
8278         status = ice_add_special_words(rinfo, &lkup_exts);
8279         if (status)
8280                 return status;
8281
8282         rid = ice_find_recp(hw, &lkup_exts, rinfo->tun_type);
8283         /* If did not find a recipe that match the existing criteria */
8284         if (rid == ICE_MAX_NUM_RECIPES)
8285                 return ICE_ERR_PARAM;
8286
8287         rule_lock = &hw->switch_info->recp_list[rid].filt_rule_lock;
8288         list_elem = ice_find_adv_rule_entry(hw, lkups, lkups_cnt, rid, rinfo);
8289         /* the rule is already removed */
8290         if (!list_elem)
8291                 return ICE_SUCCESS;
8292         ice_acquire_lock(rule_lock);
8293         if (list_elem->rule_info.sw_act.fltr_act != ICE_FWD_TO_VSI_LIST) {
8294                 remove_rule = true;
8295         } else if (list_elem->vsi_count > 1) {
8296                 remove_rule = false;
8297                 vsi_handle = rinfo->sw_act.vsi_handle;
8298                 status = ice_adv_rem_update_vsi_list(hw, vsi_handle, list_elem);
8299         } else {
8300                 vsi_handle = rinfo->sw_act.vsi_handle;
8301                 status = ice_adv_rem_update_vsi_list(hw, vsi_handle, list_elem);
8302                 if (status) {
8303                         ice_release_lock(rule_lock);
8304                         return status;
8305                 }
8306                 if (list_elem->vsi_count == 0)
8307                         remove_rule = true;
8308         }
8309         ice_release_lock(rule_lock);
8310         if (remove_rule) {
8311                 struct ice_aqc_sw_rules_elem *s_rule;
8312                 u16 rule_buf_sz;
8313
8314                 rule_buf_sz = ICE_SW_RULE_RX_TX_NO_HDR_SIZE;
8315                 s_rule = (struct ice_aqc_sw_rules_elem *)
8316                         ice_malloc(hw, rule_buf_sz);
8317                 if (!s_rule)
8318                         return ICE_ERR_NO_MEMORY;
8319                 s_rule->pdata.lkup_tx_rx.act = 0;
8320                 s_rule->pdata.lkup_tx_rx.index =
8321                         CPU_TO_LE16(list_elem->rule_info.fltr_rule_id);
8322                 s_rule->pdata.lkup_tx_rx.hdr_len = 0;
8323                 status = ice_aq_sw_rules(hw, (struct ice_aqc_sw_rules *)s_rule,
8324                                          rule_buf_sz, 1,
8325                                          ice_aqc_opc_remove_sw_rules, NULL);
8326                 if (status == ICE_SUCCESS || status == ICE_ERR_DOES_NOT_EXIST) {
8327                         struct ice_switch_info *sw = hw->switch_info;
8328
8329                         ice_acquire_lock(rule_lock);
8330                         LIST_DEL(&list_elem->list_entry);
8331                         ice_free(hw, list_elem->lkups);
8332                         ice_free(hw, list_elem);
8333                         ice_release_lock(rule_lock);
8334                         if (LIST_EMPTY(&sw->recp_list[rid].filt_rules))
8335                                 sw->recp_list[rid].adv_rule = false;
8336                 }
8337                 ice_free(hw, s_rule);
8338         }
8339         return status;
8340 }
8341
8342 /**
8343  * ice_rem_adv_rule_by_id - removes existing advanced switch rule by ID
8344  * @hw: pointer to the hardware structure
8345  * @remove_entry: data struct which holds rule_id, VSI handle and recipe ID
8346  *
8347  * This function is used to remove 1 rule at a time. The removal is based on
8348  * the remove_entry parameter. This function will remove rule for a given
8349  * vsi_handle with a given rule_id which is passed as parameter in remove_entry
8350  */
8351 enum ice_status
8352 ice_rem_adv_rule_by_id(struct ice_hw *hw,
8353                        struct ice_rule_query_data *remove_entry)
8354 {
8355         struct ice_adv_fltr_mgmt_list_entry *list_itr;
8356         struct LIST_HEAD_TYPE *list_head;
8357         struct ice_adv_rule_info rinfo;
8358         struct ice_switch_info *sw;
8359
8360         sw = hw->switch_info;
8361         if (!sw->recp_list[remove_entry->rid].recp_created)
8362                 return ICE_ERR_PARAM;
8363         list_head = &sw->recp_list[remove_entry->rid].filt_rules;
8364         LIST_FOR_EACH_ENTRY(list_itr, list_head, ice_adv_fltr_mgmt_list_entry,
8365                             list_entry) {
8366                 if (list_itr->rule_info.fltr_rule_id ==
8367                     remove_entry->rule_id) {
8368                         rinfo = list_itr->rule_info;
8369                         rinfo.sw_act.vsi_handle = remove_entry->vsi_handle;
8370                         return ice_rem_adv_rule(hw, list_itr->lkups,
8371                                                 list_itr->lkups_cnt, &rinfo);
8372                 }
8373         }
8374         /* either list is empty or unable to find rule */
8375         return ICE_ERR_DOES_NOT_EXIST;
8376 }
8377
8378 /**
8379  * ice_rem_adv_for_vsi - removes existing advanced switch rules for a
8380  *                       given VSI handle
8381  * @hw: pointer to the hardware structure
8382  * @vsi_handle: VSI handle for which we are supposed to remove all the rules.
8383  *
8384  * This function is used to remove all the rules for a given VSI and as soon
8385  * as removing a rule fails, it will return immediately with the error code,
8386  * else it will return ICE_SUCCESS
8387  */
8388 enum ice_status ice_rem_adv_rule_for_vsi(struct ice_hw *hw, u16 vsi_handle)
8389 {
8390         struct ice_adv_fltr_mgmt_list_entry *list_itr, *tmp_entry;
8391         struct ice_vsi_list_map_info *map_info;
8392         struct LIST_HEAD_TYPE *list_head;
8393         struct ice_adv_rule_info rinfo;
8394         struct ice_switch_info *sw;
8395         enum ice_status status;
8396         u8 rid;
8397
8398         sw = hw->switch_info;
8399         for (rid = 0; rid < ICE_MAX_NUM_RECIPES; rid++) {
8400                 if (!sw->recp_list[rid].recp_created)
8401                         continue;
8402                 if (!sw->recp_list[rid].adv_rule)
8403                         continue;
8404
8405                 list_head = &sw->recp_list[rid].filt_rules;
8406                 LIST_FOR_EACH_ENTRY_SAFE(list_itr, tmp_entry, list_head,
8407                                          ice_adv_fltr_mgmt_list_entry,
8408                                          list_entry) {
8409                         rinfo = list_itr->rule_info;
8410
8411                         if (rinfo.sw_act.fltr_act == ICE_FWD_TO_VSI_LIST) {
8412                                 map_info = list_itr->vsi_list_info;
8413                                 if (!map_info)
8414                                         continue;
8415
8416                                 if (!ice_is_bit_set(map_info->vsi_map,
8417                                                     vsi_handle))
8418                                         continue;
8419                         } else if (rinfo.sw_act.vsi_handle != vsi_handle) {
8420                                 continue;
8421                         }
8422
8423                         rinfo.sw_act.vsi_handle = vsi_handle;
8424                         status = ice_rem_adv_rule(hw, list_itr->lkups,
8425                                                   list_itr->lkups_cnt, &rinfo);
8426
8427                         if (status)
8428                                 return status;
8429                 }
8430         }
8431         return ICE_SUCCESS;
8432 }
8433
8434 /**
8435  * ice_replay_fltr - Replay all the filters stored by a specific list head
8436  * @hw: pointer to the hardware structure
8437  * @list_head: list for which filters needs to be replayed
8438  * @recp_id: Recipe ID for which rules need to be replayed
8439  */
8440 static enum ice_status
8441 ice_replay_fltr(struct ice_hw *hw, u8 recp_id, struct LIST_HEAD_TYPE *list_head)
8442 {
8443         struct ice_fltr_mgmt_list_entry *itr;
8444         enum ice_status status = ICE_SUCCESS;
8445         struct ice_sw_recipe *recp_list;
8446         u8 lport = hw->port_info->lport;
8447         struct LIST_HEAD_TYPE l_head;
8448
8449         if (LIST_EMPTY(list_head))
8450                 return status;
8451
8452         recp_list = &hw->switch_info->recp_list[recp_id];
8453         /* Move entries from the given list_head to a temporary l_head so that
8454          * they can be replayed. Otherwise when trying to re-add the same
8455          * filter, the function will return already exists
8456          */
8457         LIST_REPLACE_INIT(list_head, &l_head);
8458
8459         /* Mark the given list_head empty by reinitializing it so filters
8460          * could be added again by *handler
8461          */
8462         LIST_FOR_EACH_ENTRY(itr, &l_head, ice_fltr_mgmt_list_entry,
8463                             list_entry) {
8464                 struct ice_fltr_list_entry f_entry;
8465                 u16 vsi_handle;
8466
8467                 f_entry.fltr_info = itr->fltr_info;
8468                 if (itr->vsi_count < 2 && recp_id != ICE_SW_LKUP_VLAN) {
8469                         status = ice_add_rule_internal(hw, recp_list, lport,
8470                                                        &f_entry);
8471                         if (status != ICE_SUCCESS)
8472                                 goto end;
8473                         continue;
8474                 }
8475
8476                 /* Add a filter per VSI separately */
8477                 ice_for_each_set_bit(vsi_handle, itr->vsi_list_info->vsi_map,
8478                                      ICE_MAX_VSI) {
8479                         if (!ice_is_vsi_valid(hw, vsi_handle))
8480                                 break;
8481
8482                         ice_clear_bit(vsi_handle, itr->vsi_list_info->vsi_map);
8483                         f_entry.fltr_info.vsi_handle = vsi_handle;
8484                         f_entry.fltr_info.fwd_id.hw_vsi_id =
8485                                 ice_get_hw_vsi_num(hw, vsi_handle);
8486                         f_entry.fltr_info.fltr_act = ICE_FWD_TO_VSI;
8487                         if (recp_id == ICE_SW_LKUP_VLAN)
8488                                 status = ice_add_vlan_internal(hw, recp_list,
8489                                                                &f_entry);
8490                         else
8491                                 status = ice_add_rule_internal(hw, recp_list,
8492                                                                lport,
8493                                                                &f_entry);
8494                         if (status != ICE_SUCCESS)
8495                                 goto end;
8496                 }
8497         }
8498 end:
8499         /* Clear the filter management list */
8500         ice_rem_sw_rule_info(hw, &l_head);
8501         return status;
8502 }
8503
8504 /**
8505  * ice_replay_all_fltr - replay all filters stored in bookkeeping lists
8506  * @hw: pointer to the hardware structure
8507  *
8508  * NOTE: This function does not clean up partially added filters on error.
8509  * It is up to caller of the function to issue a reset or fail early.
8510  */
8511 enum ice_status ice_replay_all_fltr(struct ice_hw *hw)
8512 {
8513         struct ice_switch_info *sw = hw->switch_info;
8514         enum ice_status status = ICE_SUCCESS;
8515         u8 i;
8516
8517         for (i = 0; i < ICE_MAX_NUM_RECIPES; i++) {
8518                 struct LIST_HEAD_TYPE *head = &sw->recp_list[i].filt_rules;
8519
8520                 status = ice_replay_fltr(hw, i, head);
8521                 if (status != ICE_SUCCESS)
8522                         return status;
8523         }
8524         return status;
8525 }
8526
8527 /**
8528  * ice_replay_vsi_fltr - Replay filters for requested VSI
8529  * @hw: pointer to the hardware structure
8530  * @pi: pointer to port information structure
8531  * @sw: pointer to switch info struct for which function replays filters
8532  * @vsi_handle: driver VSI handle
8533  * @recp_id: Recipe ID for which rules need to be replayed
8534  * @list_head: list for which filters need to be replayed
8535  *
8536  * Replays the filter of recipe recp_id for a VSI represented via vsi_handle.
8537  * It is required to pass valid VSI handle.
8538  */
8539 static enum ice_status
8540 ice_replay_vsi_fltr(struct ice_hw *hw, struct ice_port_info *pi,
8541                     struct ice_switch_info *sw, u16 vsi_handle, u8 recp_id,
8542                     struct LIST_HEAD_TYPE *list_head)
8543 {
8544         struct ice_fltr_mgmt_list_entry *itr;
8545         enum ice_status status = ICE_SUCCESS;
8546         struct ice_sw_recipe *recp_list;
8547         u16 hw_vsi_id;
8548
8549         if (LIST_EMPTY(list_head))
8550                 return status;
8551         recp_list = &sw->recp_list[recp_id];
8552         hw_vsi_id = ice_get_hw_vsi_num(hw, vsi_handle);
8553
8554         LIST_FOR_EACH_ENTRY(itr, list_head, ice_fltr_mgmt_list_entry,
8555                             list_entry) {
8556                 struct ice_fltr_list_entry f_entry;
8557
8558                 f_entry.fltr_info = itr->fltr_info;
8559                 if (itr->vsi_count < 2 && recp_id != ICE_SW_LKUP_VLAN &&
8560                     itr->fltr_info.vsi_handle == vsi_handle) {
8561                         /* update the src in case it is VSI num */
8562                         if (f_entry.fltr_info.src_id == ICE_SRC_ID_VSI)
8563                                 f_entry.fltr_info.src = hw_vsi_id;
8564                         status = ice_add_rule_internal(hw, recp_list,
8565                                                        pi->lport,
8566                                                        &f_entry);
8567                         if (status != ICE_SUCCESS)
8568                                 goto end;
8569                         continue;
8570                 }
8571                 if (!itr->vsi_list_info ||
8572                     !ice_is_bit_set(itr->vsi_list_info->vsi_map, vsi_handle))
8573                         continue;
8574                 /* Clearing it so that the logic can add it back */
8575                 ice_clear_bit(vsi_handle, itr->vsi_list_info->vsi_map);
8576                 f_entry.fltr_info.vsi_handle = vsi_handle;
8577                 f_entry.fltr_info.fltr_act = ICE_FWD_TO_VSI;
8578                 /* update the src in case it is VSI num */
8579                 if (f_entry.fltr_info.src_id == ICE_SRC_ID_VSI)
8580                         f_entry.fltr_info.src = hw_vsi_id;
8581                 if (recp_id == ICE_SW_LKUP_VLAN)
8582                         status = ice_add_vlan_internal(hw, recp_list, &f_entry);
8583                 else
8584                         status = ice_add_rule_internal(hw, recp_list,
8585                                                        pi->lport,
8586                                                        &f_entry);
8587                 if (status != ICE_SUCCESS)
8588                         goto end;
8589         }
8590 end:
8591         return status;
8592 }
8593
8594 /**
8595  * ice_replay_vsi_adv_rule - Replay advanced rule for requested VSI
8596  * @hw: pointer to the hardware structure
8597  * @vsi_handle: driver VSI handle
8598  * @list_head: list for which filters need to be replayed
8599  *
8600  * Replay the advanced rule for the given VSI.
8601  */
8602 static enum ice_status
8603 ice_replay_vsi_adv_rule(struct ice_hw *hw, u16 vsi_handle,
8604                         struct LIST_HEAD_TYPE *list_head)
8605 {
8606         struct ice_rule_query_data added_entry = { 0 };
8607         struct ice_adv_fltr_mgmt_list_entry *adv_fltr;
8608         enum ice_status status = ICE_SUCCESS;
8609
8610         if (LIST_EMPTY(list_head))
8611                 return status;
8612         LIST_FOR_EACH_ENTRY(adv_fltr, list_head, ice_adv_fltr_mgmt_list_entry,
8613                             list_entry) {
8614                 struct ice_adv_rule_info *rinfo = &adv_fltr->rule_info;
8615                 u16 lk_cnt = adv_fltr->lkups_cnt;
8616
8617                 if (vsi_handle != rinfo->sw_act.vsi_handle)
8618                         continue;
8619                 status = ice_add_adv_rule(hw, adv_fltr->lkups, lk_cnt, rinfo,
8620                                           &added_entry);
8621                 if (status)
8622                         break;
8623         }
8624         return status;
8625 }
8626
8627 /**
8628  * ice_replay_vsi_all_fltr - replay all filters stored in bookkeeping lists
8629  * @hw: pointer to the hardware structure
8630  * @pi: pointer to port information structure
8631  * @vsi_handle: driver VSI handle
8632  *
8633  * Replays filters for requested VSI via vsi_handle.
8634  */
8635 enum ice_status
8636 ice_replay_vsi_all_fltr(struct ice_hw *hw, struct ice_port_info *pi,
8637                         u16 vsi_handle)
8638 {
8639         struct ice_switch_info *sw = hw->switch_info;
8640         enum ice_status status;
8641         u8 i;
8642
8643         /* Update the recipes that were created */
8644         for (i = 0; i < ICE_MAX_NUM_RECIPES; i++) {
8645                 struct LIST_HEAD_TYPE *head;
8646
8647                 head = &sw->recp_list[i].filt_replay_rules;
8648                 if (!sw->recp_list[i].adv_rule)
8649                         status = ice_replay_vsi_fltr(hw, pi, sw, vsi_handle, i,
8650                                                      head);
8651                 else
8652                         status = ice_replay_vsi_adv_rule(hw, vsi_handle, head);
8653                 if (status != ICE_SUCCESS)
8654                         return status;
8655         }
8656
8657         return ICE_SUCCESS;
8658 }
8659
8660 /**
8661  * ice_rm_all_sw_replay_rule - helper function to delete filter replay rules
8662  * @hw: pointer to the HW struct
8663  * @sw: pointer to switch info struct for which function removes filters
8664  *
8665  * Deletes the filter replay rules for given switch
8666  */
8667 void ice_rm_sw_replay_rule_info(struct ice_hw *hw, struct ice_switch_info *sw)
8668 {
8669         u8 i;
8670
8671         if (!sw)
8672                 return;
8673
8674         for (i = 0; i < ICE_MAX_NUM_RECIPES; i++) {
8675                 if (!LIST_EMPTY(&sw->recp_list[i].filt_replay_rules)) {
8676                         struct LIST_HEAD_TYPE *l_head;
8677
8678                         l_head = &sw->recp_list[i].filt_replay_rules;
8679                         if (!sw->recp_list[i].adv_rule)
8680                                 ice_rem_sw_rule_info(hw, l_head);
8681                         else
8682                                 ice_rem_adv_rule_info(hw, l_head);
8683                 }
8684         }
8685 }
8686
8687 /**
8688  * ice_rm_all_sw_replay_rule_info - deletes filter replay rules
8689  * @hw: pointer to the HW struct
8690  *
8691  * Deletes the filter replay rules.
8692  */
8693 void ice_rm_all_sw_replay_rule_info(struct ice_hw *hw)
8694 {
8695         ice_rm_sw_replay_rule_info(hw, hw->switch_info);
8696 }