net/ice/base: fix bitmap set function
[dpdk.git] / drivers / net / ice / base / ice_switch.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2001-2020 Intel Corporation
3  */
4
5 #include "ice_switch.h"
6 #include "ice_flex_type.h"
7 #include "ice_flow.h"
8
9 #define ICE_ETH_DA_OFFSET               0
10 #define ICE_ETH_ETHTYPE_OFFSET          12
11 #define ICE_ETH_VLAN_TCI_OFFSET         14
12 #define ICE_MAX_VLAN_ID                 0xFFF
13 #define ICE_IPV4_NVGRE_PROTO_ID         0x002F
14 #define ICE_PPP_IPV6_PROTO_ID           0x0057
15 #define ICE_IPV6_ETHER_ID               0x86DD
16 #define ICE_TCP_PROTO_ID                0x06
17
18 /* Dummy ethernet header needed in the ice_aqc_sw_rules_elem
19  * struct to configure any switch filter rules.
20  * {DA (6 bytes), SA(6 bytes),
21  * Ether type (2 bytes for header without VLAN tag) OR
22  * VLAN tag (4 bytes for header with VLAN tag) }
23  *
24  * Word on Hardcoded values
25  * byte 0 = 0x2: to identify it as locally administered DA MAC
26  * byte 6 = 0x2: to identify it as locally administered SA MAC
27  * byte 12 = 0x81 & byte 13 = 0x00:
28  *      In case of VLAN filter first two bytes defines ether type (0x8100)
29  *      and remaining two bytes are placeholder for programming a given VLAN ID
30  *      In case of Ether type filter it is treated as header without VLAN tag
31  *      and byte 12 and 13 is used to program a given Ether type instead
32  */
33 static const u8 dummy_eth_header[DUMMY_ETH_HDR_LEN] = { 0x2, 0, 0, 0, 0, 0,
34                                                         0x2, 0, 0, 0, 0, 0,
35                                                         0x81, 0, 0, 0};
36
37 struct ice_dummy_pkt_offsets {
38         enum ice_protocol_type type;
39         u16 offset; /* ICE_PROTOCOL_LAST indicates end of list */
40 };
41
42 static const struct ice_dummy_pkt_offsets dummy_gre_tcp_packet_offsets[] = {
43         { ICE_MAC_OFOS,         0 },
44         { ICE_ETYPE_OL,         12 },
45         { ICE_IPV4_OFOS,        14 },
46         { ICE_NVGRE,            34 },
47         { ICE_MAC_IL,           42 },
48         { ICE_IPV4_IL,          56 },
49         { ICE_TCP_IL,           76 },
50         { ICE_PROTOCOL_LAST,    0 },
51 };
52
53 static const u8 dummy_gre_tcp_packet[] = {
54         0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
55         0x00, 0x00, 0x00, 0x00,
56         0x00, 0x00, 0x00, 0x00,
57
58         0x08, 0x00,             /* ICE_ETYPE_OL 12 */
59
60         0x45, 0x00, 0x00, 0x3E, /* ICE_IPV4_OFOS 14 */
61         0x00, 0x00, 0x00, 0x00,
62         0x00, 0x2F, 0x00, 0x00,
63         0x00, 0x00, 0x00, 0x00,
64         0x00, 0x00, 0x00, 0x00,
65
66         0x80, 0x00, 0x65, 0x58, /* ICE_NVGRE 34 */
67         0x00, 0x00, 0x00, 0x00,
68
69         0x00, 0x00, 0x00, 0x00, /* ICE_MAC_IL 42 */
70         0x00, 0x00, 0x00, 0x00,
71         0x00, 0x00, 0x00, 0x00,
72         0x08, 0x00,
73
74         0x45, 0x00, 0x00, 0x14, /* ICE_IPV4_IL 56 */
75         0x00, 0x00, 0x00, 0x00,
76         0x00, 0x06, 0x00, 0x00,
77         0x00, 0x00, 0x00, 0x00,
78         0x00, 0x00, 0x00, 0x00,
79
80         0x00, 0x00, 0x00, 0x00, /* ICE_TCP_IL 76 */
81         0x00, 0x00, 0x00, 0x00,
82         0x00, 0x00, 0x00, 0x00,
83         0x50, 0x02, 0x20, 0x00,
84         0x00, 0x00, 0x00, 0x00
85 };
86
87 static const struct ice_dummy_pkt_offsets dummy_gre_udp_packet_offsets[] = {
88         { ICE_MAC_OFOS,         0 },
89         { ICE_ETYPE_OL,         12 },
90         { ICE_IPV4_OFOS,        14 },
91         { ICE_NVGRE,            34 },
92         { ICE_MAC_IL,           42 },
93         { ICE_IPV4_IL,          56 },
94         { ICE_UDP_ILOS,         76 },
95         { ICE_PROTOCOL_LAST,    0 },
96 };
97
98 static const u8 dummy_gre_udp_packet[] = {
99         0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
100         0x00, 0x00, 0x00, 0x00,
101         0x00, 0x00, 0x00, 0x00,
102
103         0x08, 0x00,             /* ICE_ETYPE_OL 12 */
104
105         0x45, 0x00, 0x00, 0x3E, /* ICE_IPV4_OFOS 14 */
106         0x00, 0x00, 0x00, 0x00,
107         0x00, 0x2F, 0x00, 0x00,
108         0x00, 0x00, 0x00, 0x00,
109         0x00, 0x00, 0x00, 0x00,
110
111         0x80, 0x00, 0x65, 0x58, /* ICE_NVGRE 34 */
112         0x00, 0x00, 0x00, 0x00,
113
114         0x00, 0x00, 0x00, 0x00, /* ICE_MAC_IL 42 */
115         0x00, 0x00, 0x00, 0x00,
116         0x00, 0x00, 0x00, 0x00,
117         0x08, 0x00,
118
119         0x45, 0x00, 0x00, 0x14, /* ICE_IPV4_IL 56 */
120         0x00, 0x00, 0x00, 0x00,
121         0x00, 0x11, 0x00, 0x00,
122         0x00, 0x00, 0x00, 0x00,
123         0x00, 0x00, 0x00, 0x00,
124
125         0x00, 0x00, 0x00, 0x00, /* ICE_UDP_ILOS 76 */
126         0x00, 0x08, 0x00, 0x00,
127 };
128
129 static const struct ice_dummy_pkt_offsets dummy_udp_tun_tcp_packet_offsets[] = {
130         { ICE_MAC_OFOS,         0 },
131         { ICE_ETYPE_OL,         12 },
132         { ICE_IPV4_OFOS,        14 },
133         { ICE_UDP_OF,           34 },
134         { ICE_VXLAN,            42 },
135         { ICE_GENEVE,           42 },
136         { ICE_VXLAN_GPE,        42 },
137         { ICE_MAC_IL,           50 },
138         { ICE_IPV4_IL,          64 },
139         { ICE_TCP_IL,           84 },
140         { ICE_PROTOCOL_LAST,    0 },
141 };
142
143 static const u8 dummy_udp_tun_tcp_packet[] = {
144         0x00, 0x00, 0x00, 0x00,  /* ICE_MAC_OFOS 0 */
145         0x00, 0x00, 0x00, 0x00,
146         0x00, 0x00, 0x00, 0x00,
147
148         0x08, 0x00,             /* ICE_ETYPE_OL 12 */
149
150         0x45, 0x00, 0x00, 0x5a, /* ICE_IPV4_OFOS 14 */
151         0x00, 0x01, 0x00, 0x00,
152         0x40, 0x11, 0x00, 0x00,
153         0x00, 0x00, 0x00, 0x00,
154         0x00, 0x00, 0x00, 0x00,
155
156         0x00, 0x00, 0x12, 0xb5, /* ICE_UDP_OF 34 */
157         0x00, 0x46, 0x00, 0x00,
158
159         0x00, 0x00, 0x65, 0x58, /* ICE_VXLAN 42 */
160         0x00, 0x00, 0x00, 0x00,
161
162         0x00, 0x00, 0x00, 0x00, /* ICE_MAC_IL 50 */
163         0x00, 0x00, 0x00, 0x00,
164         0x00, 0x00, 0x00, 0x00,
165         0x08, 0x00,
166
167         0x45, 0x00, 0x00, 0x28, /* ICE_IPV4_IL 64 */
168         0x00, 0x01, 0x00, 0x00,
169         0x40, 0x06, 0x00, 0x00,
170         0x00, 0x00, 0x00, 0x00,
171         0x00, 0x00, 0x00, 0x00,
172
173         0x00, 0x00, 0x00, 0x00, /* ICE_TCP_IL 84 */
174         0x00, 0x00, 0x00, 0x00,
175         0x00, 0x00, 0x00, 0x00,
176         0x50, 0x02, 0x20, 0x00,
177         0x00, 0x00, 0x00, 0x00
178 };
179
180 static const struct ice_dummy_pkt_offsets dummy_udp_tun_udp_packet_offsets[] = {
181         { ICE_MAC_OFOS,         0 },
182         { ICE_ETYPE_OL,         12 },
183         { ICE_IPV4_OFOS,        14 },
184         { ICE_UDP_OF,           34 },
185         { ICE_VXLAN,            42 },
186         { ICE_GENEVE,           42 },
187         { ICE_VXLAN_GPE,        42 },
188         { ICE_MAC_IL,           50 },
189         { ICE_IPV4_IL,          64 },
190         { ICE_UDP_ILOS,         84 },
191         { ICE_PROTOCOL_LAST,    0 },
192 };
193
194 static const u8 dummy_udp_tun_udp_packet[] = {
195         0x00, 0x00, 0x00, 0x00,  /* ICE_MAC_OFOS 0 */
196         0x00, 0x00, 0x00, 0x00,
197         0x00, 0x00, 0x00, 0x00,
198
199         0x08, 0x00,             /* ICE_ETYPE_OL 12 */
200
201         0x45, 0x00, 0x00, 0x4e, /* ICE_IPV4_OFOS 14 */
202         0x00, 0x01, 0x00, 0x00,
203         0x00, 0x11, 0x00, 0x00,
204         0x00, 0x00, 0x00, 0x00,
205         0x00, 0x00, 0x00, 0x00,
206
207         0x00, 0x00, 0x12, 0xb5, /* ICE_UDP_OF 34 */
208         0x00, 0x3a, 0x00, 0x00,
209
210         0x00, 0x00, 0x65, 0x58, /* ICE_VXLAN 42 */
211         0x00, 0x00, 0x00, 0x00,
212
213         0x00, 0x00, 0x00, 0x00, /* ICE_MAC_IL 50 */
214         0x00, 0x00, 0x00, 0x00,
215         0x00, 0x00, 0x00, 0x00,
216         0x08, 0x00,
217
218         0x45, 0x00, 0x00, 0x1c, /* ICE_IPV4_IL 64 */
219         0x00, 0x01, 0x00, 0x00,
220         0x00, 0x11, 0x00, 0x00,
221         0x00, 0x00, 0x00, 0x00,
222         0x00, 0x00, 0x00, 0x00,
223
224         0x00, 0x00, 0x00, 0x00, /* ICE_UDP_ILOS 84 */
225         0x00, 0x08, 0x00, 0x00,
226 };
227
228 /* offset info for MAC + IPv4 + UDP dummy packet */
229 static const struct ice_dummy_pkt_offsets dummy_udp_packet_offsets[] = {
230         { ICE_MAC_OFOS,         0 },
231         { ICE_ETYPE_OL,         12 },
232         { ICE_IPV4_OFOS,        14 },
233         { ICE_UDP_ILOS,         34 },
234         { ICE_PROTOCOL_LAST,    0 },
235 };
236
237 /* Dummy packet for MAC + IPv4 + UDP */
238 static const u8 dummy_udp_packet[] = {
239         0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
240         0x00, 0x00, 0x00, 0x00,
241         0x00, 0x00, 0x00, 0x00,
242
243         0x08, 0x00,             /* ICE_ETYPE_OL 12 */
244
245         0x45, 0x00, 0x00, 0x1c, /* ICE_IPV4_OFOS 14 */
246         0x00, 0x01, 0x00, 0x00,
247         0x00, 0x11, 0x00, 0x00,
248         0x00, 0x00, 0x00, 0x00,
249         0x00, 0x00, 0x00, 0x00,
250
251         0x00, 0x00, 0x00, 0x00, /* ICE_UDP_ILOS 34 */
252         0x00, 0x08, 0x00, 0x00,
253
254         0x00, 0x00,     /* 2 bytes for 4 byte alignment */
255 };
256
257 /* offset info for MAC + VLAN + IPv4 + UDP dummy packet */
258 static const struct ice_dummy_pkt_offsets dummy_vlan_udp_packet_offsets[] = {
259         { ICE_MAC_OFOS,         0 },
260         { ICE_ETYPE_OL,         12 },
261         { ICE_VLAN_OFOS,        14 },
262         { ICE_IPV4_OFOS,        18 },
263         { ICE_UDP_ILOS,         38 },
264         { ICE_PROTOCOL_LAST,    0 },
265 };
266
267 /* C-tag (801.1Q), IPv4:UDP dummy packet */
268 static const u8 dummy_vlan_udp_packet[] = {
269         0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
270         0x00, 0x00, 0x00, 0x00,
271         0x00, 0x00, 0x00, 0x00,
272
273         0x81, 0x00,             /* ICE_ETYPE_OL 12 */
274
275         0x00, 0x00, 0x08, 0x00, /* ICE_VLAN_OFOS 14 */
276
277         0x45, 0x00, 0x00, 0x1c, /* ICE_IPV4_OFOS 18 */
278         0x00, 0x01, 0x00, 0x00,
279         0x00, 0x11, 0x00, 0x00,
280         0x00, 0x00, 0x00, 0x00,
281         0x00, 0x00, 0x00, 0x00,
282
283         0x00, 0x00, 0x00, 0x00, /* ICE_UDP_ILOS 38 */
284         0x00, 0x08, 0x00, 0x00,
285
286         0x00, 0x00,     /* 2 bytes for 4 byte alignment */
287 };
288
289 /* offset info for MAC + IPv4 + TCP dummy packet */
290 static const struct ice_dummy_pkt_offsets dummy_tcp_packet_offsets[] = {
291         { ICE_MAC_OFOS,         0 },
292         { ICE_ETYPE_OL,         12 },
293         { ICE_IPV4_OFOS,        14 },
294         { ICE_TCP_IL,           34 },
295         { ICE_PROTOCOL_LAST,    0 },
296 };
297
298 /* Dummy packet for MAC + IPv4 + TCP */
299 static const u8 dummy_tcp_packet[] = {
300         0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
301         0x00, 0x00, 0x00, 0x00,
302         0x00, 0x00, 0x00, 0x00,
303
304         0x08, 0x00,             /* ICE_ETYPE_OL 12 */
305
306         0x45, 0x00, 0x00, 0x28, /* ICE_IPV4_OFOS 14 */
307         0x00, 0x01, 0x00, 0x00,
308         0x00, 0x06, 0x00, 0x00,
309         0x00, 0x00, 0x00, 0x00,
310         0x00, 0x00, 0x00, 0x00,
311
312         0x00, 0x00, 0x00, 0x00, /* ICE_TCP_IL 34 */
313         0x00, 0x00, 0x00, 0x00,
314         0x00, 0x00, 0x00, 0x00,
315         0x50, 0x00, 0x00, 0x00,
316         0x00, 0x00, 0x00, 0x00,
317
318         0x00, 0x00,     /* 2 bytes for 4 byte alignment */
319 };
320
321 /* offset info for MAC + VLAN (C-tag, 802.1Q) + IPv4 + TCP dummy packet */
322 static const struct ice_dummy_pkt_offsets dummy_vlan_tcp_packet_offsets[] = {
323         { ICE_MAC_OFOS,         0 },
324         { ICE_ETYPE_OL,         12 },
325         { ICE_VLAN_OFOS,        14 },
326         { ICE_IPV4_OFOS,        18 },
327         { ICE_TCP_IL,           38 },
328         { ICE_PROTOCOL_LAST,    0 },
329 };
330
331 /* C-tag (801.1Q), IPv4:TCP dummy packet */
332 static const u8 dummy_vlan_tcp_packet[] = {
333         0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
334         0x00, 0x00, 0x00, 0x00,
335         0x00, 0x00, 0x00, 0x00,
336
337         0x81, 0x00,             /* ICE_ETYPE_OL 12 */
338
339         0x00, 0x00, 0x08, 0x00, /* ICE_VLAN_OFOS 14 */
340
341         0x45, 0x00, 0x00, 0x28, /* ICE_IPV4_OFOS 18 */
342         0x00, 0x01, 0x00, 0x00,
343         0x00, 0x06, 0x00, 0x00,
344         0x00, 0x00, 0x00, 0x00,
345         0x00, 0x00, 0x00, 0x00,
346
347         0x00, 0x00, 0x00, 0x00, /* ICE_TCP_IL 38 */
348         0x00, 0x00, 0x00, 0x00,
349         0x00, 0x00, 0x00, 0x00,
350         0x50, 0x00, 0x00, 0x00,
351         0x00, 0x00, 0x00, 0x00,
352
353         0x00, 0x00,     /* 2 bytes for 4 byte alignment */
354 };
355
356 static const struct ice_dummy_pkt_offsets dummy_tcp_ipv6_packet_offsets[] = {
357         { ICE_MAC_OFOS,         0 },
358         { ICE_ETYPE_OL,         12 },
359         { ICE_IPV6_OFOS,        14 },
360         { ICE_TCP_IL,           54 },
361         { ICE_PROTOCOL_LAST,    0 },
362 };
363
364 static const u8 dummy_tcp_ipv6_packet[] = {
365         0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
366         0x00, 0x00, 0x00, 0x00,
367         0x00, 0x00, 0x00, 0x00,
368
369         0x86, 0xDD,             /* ICE_ETYPE_OL 12 */
370
371         0x60, 0x00, 0x00, 0x00, /* ICE_IPV6_OFOS 40 */
372         0x00, 0x14, 0x06, 0x00, /* Next header is TCP */
373         0x00, 0x00, 0x00, 0x00,
374         0x00, 0x00, 0x00, 0x00,
375         0x00, 0x00, 0x00, 0x00,
376         0x00, 0x00, 0x00, 0x00,
377         0x00, 0x00, 0x00, 0x00,
378         0x00, 0x00, 0x00, 0x00,
379         0x00, 0x00, 0x00, 0x00,
380         0x00, 0x00, 0x00, 0x00,
381
382         0x00, 0x00, 0x00, 0x00, /* ICE_TCP_IL 54 */
383         0x00, 0x00, 0x00, 0x00,
384         0x00, 0x00, 0x00, 0x00,
385         0x50, 0x00, 0x00, 0x00,
386         0x00, 0x00, 0x00, 0x00,
387
388         0x00, 0x00, /* 2 bytes for 4 byte alignment */
389 };
390
391 /* C-tag (802.1Q): IPv6 + TCP */
392 static const struct ice_dummy_pkt_offsets
393 dummy_vlan_tcp_ipv6_packet_offsets[] = {
394         { ICE_MAC_OFOS,         0 },
395         { ICE_ETYPE_OL,         12 },
396         { ICE_VLAN_OFOS,        14 },
397         { ICE_IPV6_OFOS,        18 },
398         { ICE_TCP_IL,           58 },
399         { ICE_PROTOCOL_LAST,    0 },
400 };
401
402 /* C-tag (802.1Q), IPv6 + TCP dummy packet */
403 static const u8 dummy_vlan_tcp_ipv6_packet[] = {
404         0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
405         0x00, 0x00, 0x00, 0x00,
406         0x00, 0x00, 0x00, 0x00,
407
408         0x81, 0x00,             /* ICE_ETYPE_OL 12 */
409
410         0x00, 0x00, 0x86, 0xDD, /* ICE_VLAN_OFOS 14 */
411
412         0x60, 0x00, 0x00, 0x00, /* ICE_IPV6_OFOS 18 */
413         0x00, 0x14, 0x06, 0x00, /* Next header is TCP */
414         0x00, 0x00, 0x00, 0x00,
415         0x00, 0x00, 0x00, 0x00,
416         0x00, 0x00, 0x00, 0x00,
417         0x00, 0x00, 0x00, 0x00,
418         0x00, 0x00, 0x00, 0x00,
419         0x00, 0x00, 0x00, 0x00,
420         0x00, 0x00, 0x00, 0x00,
421         0x00, 0x00, 0x00, 0x00,
422
423         0x00, 0x00, 0x00, 0x00, /* ICE_TCP_IL 58 */
424         0x00, 0x00, 0x00, 0x00,
425         0x00, 0x00, 0x00, 0x00,
426         0x50, 0x00, 0x00, 0x00,
427         0x00, 0x00, 0x00, 0x00,
428
429         0x00, 0x00, /* 2 bytes for 4 byte alignment */
430 };
431
432 /* IPv6 + UDP */
433 static const struct ice_dummy_pkt_offsets dummy_udp_ipv6_packet_offsets[] = {
434         { ICE_MAC_OFOS,         0 },
435         { ICE_ETYPE_OL,         12 },
436         { ICE_IPV6_OFOS,        14 },
437         { ICE_UDP_ILOS,         54 },
438         { ICE_PROTOCOL_LAST,    0 },
439 };
440
441 /* IPv6 + UDP dummy packet */
442 static const u8 dummy_udp_ipv6_packet[] = {
443         0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
444         0x00, 0x00, 0x00, 0x00,
445         0x00, 0x00, 0x00, 0x00,
446
447         0x86, 0xDD,             /* ICE_ETYPE_OL 12 */
448
449         0x60, 0x00, 0x00, 0x00, /* ICE_IPV6_OFOS 40 */
450         0x00, 0x10, 0x11, 0x00, /* Next header UDP */
451         0x00, 0x00, 0x00, 0x00,
452         0x00, 0x00, 0x00, 0x00,
453         0x00, 0x00, 0x00, 0x00,
454         0x00, 0x00, 0x00, 0x00,
455         0x00, 0x00, 0x00, 0x00,
456         0x00, 0x00, 0x00, 0x00,
457         0x00, 0x00, 0x00, 0x00,
458         0x00, 0x00, 0x00, 0x00,
459
460         0x00, 0x00, 0x00, 0x00, /* ICE_UDP_ILOS 54 */
461         0x00, 0x10, 0x00, 0x00,
462
463         0x00, 0x00, 0x00, 0x00, /* needed for ESP packets */
464         0x00, 0x00, 0x00, 0x00,
465
466         0x00, 0x00, /* 2 bytes for 4 byte alignment */
467 };
468
469 /* C-tag (802.1Q): IPv6 + UDP */
470 static const struct ice_dummy_pkt_offsets
471 dummy_vlan_udp_ipv6_packet_offsets[] = {
472         { ICE_MAC_OFOS,         0 },
473         { ICE_ETYPE_OL,         12 },
474         { ICE_VLAN_OFOS,        14 },
475         { ICE_IPV6_OFOS,        18 },
476         { ICE_UDP_ILOS,         58 },
477         { ICE_PROTOCOL_LAST,    0 },
478 };
479
480 /* C-tag (802.1Q), IPv6 + UDP dummy packet */
481 static const u8 dummy_vlan_udp_ipv6_packet[] = {
482         0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
483         0x00, 0x00, 0x00, 0x00,
484         0x00, 0x00, 0x00, 0x00,
485
486         0x81, 0x00,             /* ICE_ETYPE_OL 12 */
487
488         0x00, 0x00, 0x86, 0xDD, /* ICE_VLAN_OFOS 14 */
489
490         0x60, 0x00, 0x00, 0x00, /* ICE_IPV6_OFOS 18 */
491         0x00, 0x08, 0x11, 0x00, /* Next header UDP */
492         0x00, 0x00, 0x00, 0x00,
493         0x00, 0x00, 0x00, 0x00,
494         0x00, 0x00, 0x00, 0x00,
495         0x00, 0x00, 0x00, 0x00,
496         0x00, 0x00, 0x00, 0x00,
497         0x00, 0x00, 0x00, 0x00,
498         0x00, 0x00, 0x00, 0x00,
499         0x00, 0x00, 0x00, 0x00,
500
501         0x00, 0x00, 0x00, 0x00, /* ICE_UDP_ILOS 58 */
502         0x00, 0x08, 0x00, 0x00,
503
504         0x00, 0x00, /* 2 bytes for 4 byte alignment */
505 };
506
507 static const struct ice_dummy_pkt_offsets dummy_udp_gtp_packet_offsets[] = {
508         { ICE_MAC_OFOS,         0 },
509         { ICE_IPV4_OFOS,        14 },
510         { ICE_UDP_OF,           34 },
511         { ICE_GTP,              42 },
512         { ICE_PROTOCOL_LAST,    0 },
513 };
514
515 static const u8 dummy_udp_gtp_packet[] = {
516         0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
517         0x00, 0x00, 0x00, 0x00,
518         0x00, 0x00, 0x00, 0x00,
519         0x08, 0x00,
520
521         0x45, 0x00, 0x00, 0x30, /* ICE_IPV4_OFOS 14 */
522         0x00, 0x00, 0x00, 0x00,
523         0x00, 0x11, 0x00, 0x00,
524         0x00, 0x00, 0x00, 0x00,
525         0x00, 0x00, 0x00, 0x00,
526
527         0x00, 0x00, 0x08, 0x68, /* ICE_UDP_OF 34 */
528         0x00, 0x1c, 0x00, 0x00,
529
530         0x34, 0xff, 0x00, 0x0c, /* ICE_GTP 42 */
531         0x00, 0x00, 0x00, 0x00,
532         0x00, 0x00, 0x00, 0x85,
533
534         0x02, 0x00, 0x00, 0x00, /* PDU Session extension header */
535         0x00, 0x00, 0x00, 0x00,
536 };
537
538 static const
539 struct ice_dummy_pkt_offsets dummy_ipv4_gtpu_ipv4_packet_offsets[] = {
540         { ICE_MAC_OFOS,         0 },
541         { ICE_IPV4_OFOS,        14 },
542         { ICE_UDP_OF,           34 },
543         { ICE_GTP,              42 },
544         { ICE_IPV4_IL,          62 },
545         { ICE_PROTOCOL_LAST,    0 },
546 };
547
548 static const u8 dummy_ipv4_gtpu_ipv4_packet[] = {
549         0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
550         0x00, 0x00, 0x00, 0x00,
551         0x00, 0x00, 0x00, 0x00,
552         0x08, 0x00,
553
554         0x45, 0x00, 0x00, 0x44, /* ICE_IPV4_OFOS 14 */
555         0x00, 0x00, 0x40, 0x00,
556         0x40, 0x11, 0x00, 0x00,
557         0x00, 0x00, 0x00, 0x00,
558         0x00, 0x00, 0x00, 0x00,
559
560         0x08, 0x68, 0x08, 0x68, /* ICE_UDP_OF 34 */
561         0x00, 0x00, 0x00, 0x00,
562
563         0x34, 0xff, 0x00, 0x28,  /* ICE_GTP 42 */
564         0x00, 0x00, 0x00, 0x00,
565         0x00, 0x00, 0x00, 0x85,
566
567         0x02, 0x00, 0x00, 0x00, /* PDU Session extension header */
568         0x00, 0x00, 0x00, 0x00,
569
570         0x45, 0x00, 0x00, 0x14, /* ICE_IPV4_IL 62 */
571         0x00, 0x00, 0x40, 0x00,
572         0x40, 0x00, 0x00, 0x00,
573         0x00, 0x00, 0x00, 0x00,
574         0x00, 0x00, 0x00, 0x00,
575         0x00, 0x00,
576 };
577
578 static const
579 struct ice_dummy_pkt_offsets dummy_ipv4_gtpu_ipv6_packet_offsets[] = {
580         { ICE_MAC_OFOS,         0 },
581         { ICE_IPV4_OFOS,        14 },
582         { ICE_UDP_OF,           34 },
583         { ICE_GTP,              42 },
584         { ICE_IPV6_IL,          62 },
585         { ICE_PROTOCOL_LAST,    0 },
586 };
587
588 static const u8 dummy_ipv4_gtpu_ipv6_packet[] = {
589         0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
590         0x00, 0x00, 0x00, 0x00,
591         0x00, 0x00, 0x00, 0x00,
592         0x08, 0x00,
593
594         0x45, 0x00, 0x00, 0x58, /* ICE_IPV4_OFOS 14 */
595         0x00, 0x00, 0x40, 0x00,
596         0x40, 0x11, 0x00, 0x00,
597         0x00, 0x00, 0x00, 0x00,
598         0x00, 0x00, 0x00, 0x00,
599
600         0x08, 0x68, 0x08, 0x68, /* ICE_UDP_OF 34 */
601         0x00, 0x00, 0x00, 0x00,
602
603         0x34, 0xff, 0x00, 0x28,  /* ICE_GTP 42 */
604         0x00, 0x00, 0x00, 0x00,
605         0x00, 0x00, 0x00, 0x85,
606
607         0x02, 0x00, 0x00, 0x00, /* PDU Session extension header */
608         0x00, 0x00, 0x00, 0x00,
609
610         0x60, 0x00, 0x00, 0x00, /* ICE_IPV6_IL 62 */
611         0x00, 0x00, 0x3b, 0x00,
612         0x00, 0x00, 0x00, 0x00,
613         0x00, 0x00, 0x00, 0x00,
614         0x00, 0x00, 0x00, 0x00,
615         0x00, 0x00, 0x00, 0x00,
616         0x00, 0x00, 0x00, 0x00,
617         0x00, 0x00, 0x00, 0x00,
618         0x00, 0x00, 0x00, 0x00,
619         0x00, 0x00, 0x00, 0x00,
620
621         0x00, 0x00,
622 };
623
624 static const
625 struct ice_dummy_pkt_offsets dummy_ipv6_gtpu_ipv4_packet_offsets[] = {
626         { ICE_MAC_OFOS,         0 },
627         { ICE_IPV6_OFOS,        14 },
628         { ICE_UDP_OF,           54 },
629         { ICE_GTP,              62 },
630         { ICE_IPV4_IL,          82 },
631         { ICE_PROTOCOL_LAST,    0 },
632 };
633
634 static const u8 dummy_ipv6_gtpu_ipv4_packet[] = {
635         0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
636         0x00, 0x00, 0x00, 0x00,
637         0x00, 0x00, 0x00, 0x00,
638         0x86, 0xdd,
639
640         0x60, 0x00, 0x00, 0x00, /* ICE_IPV6_OFOS 14 */
641         0x00, 0x58, 0x11, 0x00, /* Next header UDP*/
642         0x00, 0x00, 0x00, 0x00,
643         0x00, 0x00, 0x00, 0x00,
644         0x00, 0x00, 0x00, 0x00,
645         0x00, 0x00, 0x00, 0x00,
646         0x00, 0x00, 0x00, 0x00,
647         0x00, 0x00, 0x00, 0x00,
648         0x00, 0x00, 0x00, 0x00,
649         0x00, 0x00, 0x00, 0x00,
650
651         0x08, 0x68, 0x08, 0x68, /* ICE_UDP_OF 54 */
652         0x00, 0x00, 0x00, 0x00,
653
654         0x34, 0xff, 0x00, 0x28,  /* ICE_GTP 62 */
655         0x00, 0x00, 0x00, 0x00,
656         0x00, 0x00, 0x00, 0x85,
657
658         0x02, 0x00, 0x00, 0x00, /* PDU Session extension header */
659         0x00, 0x00, 0x00, 0x00,
660
661         0x45, 0x00, 0x00, 0x14, /* ICE_IPV4_IL 82 */
662         0x00, 0x00, 0x40, 0x00,
663         0x40, 0x00, 0x00, 0x00,
664         0x00, 0x00, 0x00, 0x00,
665         0x00, 0x00, 0x00, 0x00,
666
667         0x00, 0x00,
668 };
669
670 static const
671 struct ice_dummy_pkt_offsets dummy_ipv6_gtpu_ipv6_packet_offsets[] = {
672         { ICE_MAC_OFOS,         0 },
673         { ICE_IPV6_OFOS,        14 },
674         { ICE_UDP_OF,           54 },
675         { ICE_GTP,              62 },
676         { ICE_IPV6_IL,          82 },
677         { ICE_PROTOCOL_LAST,    0 },
678 };
679
680 static const u8 dummy_ipv6_gtpu_ipv6_packet[] = {
681         0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
682         0x00, 0x00, 0x00, 0x00,
683         0x00, 0x00, 0x00, 0x00,
684         0x86, 0xdd,
685
686         0x60, 0x00, 0x00, 0x00, /* ICE_IPV6_OFOS 14 */
687         0x00, 0x6c, 0x11, 0x00, /* Next header UDP*/
688         0x00, 0x00, 0x00, 0x00,
689         0x00, 0x00, 0x00, 0x00,
690         0x00, 0x00, 0x00, 0x00,
691         0x00, 0x00, 0x00, 0x00,
692         0x00, 0x00, 0x00, 0x00,
693         0x00, 0x00, 0x00, 0x00,
694         0x00, 0x00, 0x00, 0x00,
695         0x00, 0x00, 0x00, 0x00,
696
697         0x08, 0x68, 0x08, 0x68, /* ICE_UDP_OF 54 */
698         0x00, 0x00, 0x00, 0x00,
699
700         0x34, 0xff, 0x00, 0x28,  /* ICE_GTP 62 */
701         0x00, 0x00, 0x00, 0x00,
702         0x00, 0x00, 0x00, 0x85,
703
704         0x02, 0x00, 0x00, 0x00, /* PDU Session extension header */
705         0x00, 0x00, 0x00, 0x00,
706
707         0x60, 0x00, 0x00, 0x00, /* ICE_IPV6_OFIL 82 */
708         0x00, 0x00, 0x3b, 0x00,
709         0x00, 0x00, 0x00, 0x00,
710         0x00, 0x00, 0x00, 0x00,
711         0x00, 0x00, 0x00, 0x00,
712         0x00, 0x00, 0x00, 0x00,
713         0x00, 0x00, 0x00, 0x00,
714         0x00, 0x00, 0x00, 0x00,
715         0x00, 0x00, 0x00, 0x00,
716         0x00, 0x00, 0x00, 0x00,
717
718         0x00, 0x00,
719 };
720
721 static const
722 struct ice_dummy_pkt_offsets dummy_ipv4_gtp_no_pay_packet_offsets[] = {
723         { ICE_MAC_OFOS,         0 },
724         { ICE_IPV4_OFOS,        14 },
725         { ICE_UDP_OF,           34 },
726         { ICE_GTP_NO_PAY,       42 },
727         { ICE_PROTOCOL_LAST,    0 },
728 };
729
730 static const
731 struct ice_dummy_pkt_offsets dummy_ipv6_gtp_no_pay_packet_offsets[] = {
732         { ICE_MAC_OFOS,         0 },
733         { ICE_IPV6_OFOS,        14 },
734         { ICE_UDP_OF,           54 },
735         { ICE_GTP_NO_PAY,       62 },
736         { ICE_PROTOCOL_LAST,    0 },
737 };
738
739 static const struct ice_dummy_pkt_offsets dummy_pppoe_packet_offsets[] = {
740         { ICE_MAC_OFOS,         0 },
741         { ICE_ETYPE_OL,         12 },
742         { ICE_VLAN_OFOS,        14},
743         { ICE_PPPOE,            18 },
744         { ICE_PROTOCOL_LAST,    0 },
745 };
746
747 static const struct ice_dummy_pkt_offsets dummy_pppoe_packet_ipv4_offsets[] = {
748         { ICE_MAC_OFOS,         0 },
749         { ICE_ETYPE_OL,         12 },
750         { ICE_VLAN_OFOS,        14},
751         { ICE_PPPOE,            18 },
752         { ICE_IPV4_OFOS,        26 },
753         { ICE_PROTOCOL_LAST,    0 },
754 };
755
756 static const u8 dummy_pppoe_ipv4_packet[] = {
757         0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
758         0x00, 0x00, 0x00, 0x00,
759         0x00, 0x00, 0x00, 0x00,
760
761         0x81, 0x00,             /* ICE_ETYPE_OL 12 */
762
763         0x00, 0x00, 0x88, 0x64, /* ICE_VLAN_OFOS 14 */
764
765         0x11, 0x00, 0x00, 0x00, /* ICE_PPPOE 18 */
766         0x00, 0x16,
767
768         0x00, 0x21,             /* PPP Link Layer 24 */
769
770         0x45, 0x00, 0x00, 0x14, /* ICE_IPV4_IL 26 */
771         0x00, 0x00, 0x00, 0x00,
772         0x00, 0x00, 0x00, 0x00,
773         0x00, 0x00, 0x00, 0x00,
774         0x00, 0x00, 0x00, 0x00,
775
776         0x00, 0x00,             /* 2 bytes for 4 bytes alignment */
777 };
778
779 static const
780 struct ice_dummy_pkt_offsets dummy_pppoe_ipv4_tcp_packet_offsets[] = {
781         { ICE_MAC_OFOS,         0 },
782         { ICE_ETYPE_OL,         12 },
783         { ICE_VLAN_OFOS,        14},
784         { ICE_PPPOE,            18 },
785         { ICE_IPV4_OFOS,        26 },
786         { ICE_TCP_IL,           46 },
787         { ICE_PROTOCOL_LAST,    0 },
788 };
789
790 static const u8 dummy_pppoe_ipv4_tcp_packet[] = {
791         0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
792         0x00, 0x00, 0x00, 0x00,
793         0x00, 0x00, 0x00, 0x00,
794
795         0x81, 0x00,             /* ICE_ETYPE_OL 12 */
796
797         0x00, 0x00, 0x88, 0x64, /* ICE_VLAN_OFOS 14 */
798
799         0x11, 0x00, 0x00, 0x00, /* ICE_PPPOE 18 */
800         0x00, 0x16,
801
802         0x00, 0x21,             /* PPP Link Layer 24 */
803
804         0x45, 0x00, 0x00, 0x28, /* ICE_IPV4_OFOS 26 */
805         0x00, 0x01, 0x00, 0x00,
806         0x00, 0x06, 0x00, 0x00,
807         0x00, 0x00, 0x00, 0x00,
808         0x00, 0x00, 0x00, 0x00,
809
810         0x00, 0x00, 0x00, 0x00, /* ICE_TCP_IL 46 */
811         0x00, 0x00, 0x00, 0x00,
812         0x00, 0x00, 0x00, 0x00,
813         0x50, 0x00, 0x00, 0x00,
814         0x00, 0x00, 0x00, 0x00,
815
816         0x00, 0x00,             /* 2 bytes for 4 bytes alignment */
817 };
818
819 static const
820 struct ice_dummy_pkt_offsets dummy_pppoe_ipv4_udp_packet_offsets[] = {
821         { ICE_MAC_OFOS,         0 },
822         { ICE_ETYPE_OL,         12 },
823         { ICE_VLAN_OFOS,        14},
824         { ICE_PPPOE,            18 },
825         { ICE_IPV4_OFOS,        26 },
826         { ICE_UDP_ILOS,         46 },
827         { ICE_PROTOCOL_LAST,    0 },
828 };
829
830 static const u8 dummy_pppoe_ipv4_udp_packet[] = {
831         0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
832         0x00, 0x00, 0x00, 0x00,
833         0x00, 0x00, 0x00, 0x00,
834
835         0x81, 0x00,             /* ICE_ETYPE_OL 12 */
836
837         0x00, 0x00, 0x88, 0x64, /* ICE_VLAN_OFOS 14 */
838
839         0x11, 0x00, 0x00, 0x00, /* ICE_PPPOE 18 */
840         0x00, 0x16,
841
842         0x00, 0x21,             /* PPP Link Layer 24 */
843
844         0x45, 0x00, 0x00, 0x1c, /* ICE_IPV4_OFOS 26 */
845         0x00, 0x01, 0x00, 0x00,
846         0x00, 0x11, 0x00, 0x00,
847         0x00, 0x00, 0x00, 0x00,
848         0x00, 0x00, 0x00, 0x00,
849
850         0x00, 0x00, 0x00, 0x00, /* ICE_UDP_ILOS 46 */
851         0x00, 0x08, 0x00, 0x00,
852
853         0x00, 0x00,             /* 2 bytes for 4 bytes alignment */
854 };
855
856 static const struct ice_dummy_pkt_offsets dummy_pppoe_packet_ipv6_offsets[] = {
857         { ICE_MAC_OFOS,         0 },
858         { ICE_ETYPE_OL,         12 },
859         { ICE_VLAN_OFOS,        14},
860         { ICE_PPPOE,            18 },
861         { ICE_IPV6_OFOS,        26 },
862         { ICE_PROTOCOL_LAST,    0 },
863 };
864
865 static const u8 dummy_pppoe_ipv6_packet[] = {
866         0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
867         0x00, 0x00, 0x00, 0x00,
868         0x00, 0x00, 0x00, 0x00,
869
870         0x81, 0x00,             /* ICE_ETYPE_OL 12 */
871
872         0x00, 0x00, 0x88, 0x64, /* ICE_VLAN_OFOS 14 */
873
874         0x11, 0x00, 0x00, 0x00, /* ICE_PPPOE 18 */
875         0x00, 0x2a,
876
877         0x00, 0x57,             /* PPP Link Layer 24 */
878
879         0x60, 0x00, 0x00, 0x00, /* ICE_IPV6_OFOS 26 */
880         0x00, 0x00, 0x3b, 0x00,
881         0x00, 0x00, 0x00, 0x00,
882         0x00, 0x00, 0x00, 0x00,
883         0x00, 0x00, 0x00, 0x00,
884         0x00, 0x00, 0x00, 0x00,
885         0x00, 0x00, 0x00, 0x00,
886         0x00, 0x00, 0x00, 0x00,
887         0x00, 0x00, 0x00, 0x00,
888         0x00, 0x00, 0x00, 0x00,
889
890         0x00, 0x00,             /* 2 bytes for 4 bytes alignment */
891 };
892
893 static const
894 struct ice_dummy_pkt_offsets dummy_pppoe_packet_ipv6_tcp_offsets[] = {
895         { ICE_MAC_OFOS,         0 },
896         { ICE_ETYPE_OL,         12 },
897         { ICE_VLAN_OFOS,        14},
898         { ICE_PPPOE,            18 },
899         { ICE_IPV6_OFOS,        26 },
900         { ICE_TCP_IL,           66 },
901         { ICE_PROTOCOL_LAST,    0 },
902 };
903
904 static const u8 dummy_pppoe_ipv6_tcp_packet[] = {
905         0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
906         0x00, 0x00, 0x00, 0x00,
907         0x00, 0x00, 0x00, 0x00,
908
909         0x81, 0x00,             /* ICE_ETYPE_OL 12 */
910
911         0x00, 0x00, 0x88, 0x64, /* ICE_VLAN_OFOS 14 */
912
913         0x11, 0x00, 0x00, 0x00, /* ICE_PPPOE 18 */
914         0x00, 0x2a,
915
916         0x00, 0x57,             /* PPP Link Layer 24 */
917
918         0x60, 0x00, 0x00, 0x00, /* ICE_IPV6_OFOS 26 */
919         0x00, 0x14, 0x06, 0x00, /* Next header is TCP */
920         0x00, 0x00, 0x00, 0x00,
921         0x00, 0x00, 0x00, 0x00,
922         0x00, 0x00, 0x00, 0x00,
923         0x00, 0x00, 0x00, 0x00,
924         0x00, 0x00, 0x00, 0x00,
925         0x00, 0x00, 0x00, 0x00,
926         0x00, 0x00, 0x00, 0x00,
927         0x00, 0x00, 0x00, 0x00,
928
929         0x00, 0x00, 0x00, 0x00, /* ICE_TCP_IL 66 */
930         0x00, 0x00, 0x00, 0x00,
931         0x00, 0x00, 0x00, 0x00,
932         0x50, 0x00, 0x00, 0x00,
933         0x00, 0x00, 0x00, 0x00,
934
935         0x00, 0x00,             /* 2 bytes for 4 bytes alignment */
936 };
937
938 static const
939 struct ice_dummy_pkt_offsets dummy_pppoe_packet_ipv6_udp_offsets[] = {
940         { ICE_MAC_OFOS,         0 },
941         { ICE_ETYPE_OL,         12 },
942         { ICE_VLAN_OFOS,        14},
943         { ICE_PPPOE,            18 },
944         { ICE_IPV6_OFOS,        26 },
945         { ICE_UDP_ILOS,         66 },
946         { ICE_PROTOCOL_LAST,    0 },
947 };
948
949 static const u8 dummy_pppoe_ipv6_udp_packet[] = {
950         0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
951         0x00, 0x00, 0x00, 0x00,
952         0x00, 0x00, 0x00, 0x00,
953
954         0x81, 0x00,             /* ICE_ETYPE_OL 12 */
955
956         0x00, 0x00, 0x88, 0x64, /* ICE_VLAN_OFOS 14 */
957
958         0x11, 0x00, 0x00, 0x00, /* ICE_PPPOE 18 */
959         0x00, 0x2a,
960
961         0x00, 0x57,             /* PPP Link Layer 24 */
962
963         0x60, 0x00, 0x00, 0x00, /* ICE_IPV6_OFOS 26 */
964         0x00, 0x08, 0x11, 0x00, /* Next header UDP*/
965         0x00, 0x00, 0x00, 0x00,
966         0x00, 0x00, 0x00, 0x00,
967         0x00, 0x00, 0x00, 0x00,
968         0x00, 0x00, 0x00, 0x00,
969         0x00, 0x00, 0x00, 0x00,
970         0x00, 0x00, 0x00, 0x00,
971         0x00, 0x00, 0x00, 0x00,
972         0x00, 0x00, 0x00, 0x00,
973
974         0x00, 0x00, 0x00, 0x00, /* ICE_UDP_ILOS 66 */
975         0x00, 0x08, 0x00, 0x00,
976
977         0x00, 0x00,             /* 2 bytes for 4 bytes alignment */
978 };
979
980 static const struct ice_dummy_pkt_offsets dummy_ipv4_esp_packet_offsets[] = {
981         { ICE_MAC_OFOS,         0 },
982         { ICE_IPV4_OFOS,        14 },
983         { ICE_ESP,                      34 },
984         { ICE_PROTOCOL_LAST,    0 },
985 };
986
987 static const u8 dummy_ipv4_esp_pkt[] = {
988         0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
989         0x00, 0x00, 0x00, 0x00,
990         0x00, 0x00, 0x00, 0x00,
991         0x08, 0x00,
992
993         0x45, 0x00, 0x00, 0x1c, /* ICE_IPV4_IL 14 */
994         0x00, 0x00, 0x40, 0x00,
995         0x40, 0x32, 0x00, 0x00,
996         0x00, 0x00, 0x00, 0x00,
997         0x00, 0x00, 0x00, 0x00,
998
999         0x00, 0x00, 0x00, 0x00, /* ICE_ESP 34 */
1000         0x00, 0x00, 0x00, 0x00,
1001         0x00, 0x00,             /* 2 bytes for 4 bytes alignment */
1002 };
1003
1004 static const struct ice_dummy_pkt_offsets dummy_ipv6_esp_packet_offsets[] = {
1005         { ICE_MAC_OFOS,         0 },
1006         { ICE_IPV6_OFOS,        14 },
1007         { ICE_ESP,                      54 },
1008         { ICE_PROTOCOL_LAST,    0 },
1009 };
1010
1011 static const u8 dummy_ipv6_esp_pkt[] = {
1012         0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
1013         0x00, 0x00, 0x00, 0x00,
1014         0x00, 0x00, 0x00, 0x00,
1015         0x86, 0xDD,
1016
1017         0x60, 0x00, 0x00, 0x00, /* ICE_IPV6_OFOS 14 */
1018         0x00, 0x08, 0x32, 0x00, /* Next header ESP */
1019         0x00, 0x00, 0x00, 0x00,
1020         0x00, 0x00, 0x00, 0x00,
1021         0x00, 0x00, 0x00, 0x00,
1022         0x00, 0x00, 0x00, 0x00,
1023         0x00, 0x00, 0x00, 0x00,
1024         0x00, 0x00, 0x00, 0x00,
1025         0x00, 0x00, 0x00, 0x00,
1026         0x00, 0x00, 0x00, 0x00,
1027
1028         0x00, 0x00, 0x00, 0x00, /* ICE_ESP 54 */
1029         0x00, 0x00, 0x00, 0x00,
1030         0x00, 0x00,             /* 2 bytes for 4 bytes alignment */
1031 };
1032
1033 static const struct ice_dummy_pkt_offsets dummy_ipv4_ah_packet_offsets[] = {
1034         { ICE_MAC_OFOS,         0 },
1035         { ICE_IPV4_OFOS,        14 },
1036         { ICE_AH,                       34 },
1037         { ICE_PROTOCOL_LAST,    0 },
1038 };
1039
1040 static const u8 dummy_ipv4_ah_pkt[] = {
1041         0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
1042         0x00, 0x00, 0x00, 0x00,
1043         0x00, 0x00, 0x00, 0x00,
1044         0x08, 0x00,
1045
1046         0x45, 0x00, 0x00, 0x20, /* ICE_IPV4_IL 14 */
1047         0x00, 0x00, 0x40, 0x00,
1048         0x40, 0x33, 0x00, 0x00,
1049         0x00, 0x00, 0x00, 0x00,
1050         0x00, 0x00, 0x00, 0x00,
1051
1052         0x00, 0x00, 0x00, 0x00, /* ICE_AH 34 */
1053         0x00, 0x00, 0x00, 0x00,
1054         0x00, 0x00, 0x00, 0x00,
1055         0x00, 0x00,             /* 2 bytes for 4 bytes alignment */
1056 };
1057
1058 static const struct ice_dummy_pkt_offsets dummy_ipv6_ah_packet_offsets[] = {
1059         { ICE_MAC_OFOS,         0 },
1060         { ICE_IPV6_OFOS,        14 },
1061         { ICE_AH,                       54 },
1062         { ICE_PROTOCOL_LAST,    0 },
1063 };
1064
1065 static const u8 dummy_ipv6_ah_pkt[] = {
1066         0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
1067         0x00, 0x00, 0x00, 0x00,
1068         0x00, 0x00, 0x00, 0x00,
1069         0x86, 0xDD,
1070
1071         0x60, 0x00, 0x00, 0x00, /* ICE_IPV6_OFOS 14 */
1072         0x00, 0x0c, 0x33, 0x00, /* Next header AH */
1073         0x00, 0x00, 0x00, 0x00,
1074         0x00, 0x00, 0x00, 0x00,
1075         0x00, 0x00, 0x00, 0x00,
1076         0x00, 0x00, 0x00, 0x00,
1077         0x00, 0x00, 0x00, 0x00,
1078         0x00, 0x00, 0x00, 0x00,
1079         0x00, 0x00, 0x00, 0x00,
1080         0x00, 0x00, 0x00, 0x00,
1081
1082         0x00, 0x00, 0x00, 0x00, /* ICE_AH 54 */
1083         0x00, 0x00, 0x00, 0x00,
1084         0x00, 0x00, 0x00, 0x00,
1085         0x00, 0x00,             /* 2 bytes for 4 bytes alignment */
1086 };
1087
1088 static const struct ice_dummy_pkt_offsets dummy_ipv4_nat_packet_offsets[] = {
1089         { ICE_MAC_OFOS,         0 },
1090         { ICE_IPV4_OFOS,        14 },
1091         { ICE_UDP_ILOS,         34 },
1092         { ICE_NAT_T,            42 },
1093         { ICE_PROTOCOL_LAST,    0 },
1094 };
1095
1096 static const u8 dummy_ipv4_nat_pkt[] = {
1097         0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
1098         0x00, 0x00, 0x00, 0x00,
1099         0x00, 0x00, 0x00, 0x00,
1100         0x08, 0x00,
1101
1102         0x45, 0x00, 0x00, 0x24, /* ICE_IPV4_IL 14 */
1103         0x00, 0x00, 0x40, 0x00,
1104         0x40, 0x11, 0x00, 0x00,
1105         0x00, 0x00, 0x00, 0x00,
1106         0x00, 0x00, 0x00, 0x00,
1107
1108         0x00, 0x00, 0x11, 0x94, /* ICE_NAT_T 34 */
1109         0x00, 0x00, 0x00, 0x00,
1110
1111         0x00, 0x00, 0x00, 0x00,
1112         0x00, 0x00, 0x00, 0x00,
1113         0x00, 0x00,             /* 2 bytes for 4 bytes alignment */
1114 };
1115
1116 static const struct ice_dummy_pkt_offsets dummy_ipv6_nat_packet_offsets[] = {
1117         { ICE_MAC_OFOS,         0 },
1118         { ICE_IPV6_OFOS,        14 },
1119         { ICE_UDP_ILOS,         54 },
1120         { ICE_NAT_T,            62 },
1121         { ICE_PROTOCOL_LAST,    0 },
1122 };
1123
1124 static const u8 dummy_ipv6_nat_pkt[] = {
1125         0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
1126         0x00, 0x00, 0x00, 0x00,
1127         0x00, 0x00, 0x00, 0x00,
1128         0x86, 0xDD,
1129
1130         0x60, 0x00, 0x00, 0x00, /* ICE_IPV6_OFOS 14 */
1131         0x00, 0x10, 0x11, 0x00, /* Next header NAT_T */
1132         0x00, 0x00, 0x00, 0x00,
1133         0x00, 0x00, 0x00, 0x00,
1134         0x00, 0x00, 0x00, 0x00,
1135         0x00, 0x00, 0x00, 0x00,
1136         0x00, 0x00, 0x00, 0x00,
1137         0x00, 0x00, 0x00, 0x00,
1138         0x00, 0x00, 0x00, 0x00,
1139         0x00, 0x00, 0x00, 0x00,
1140
1141         0x00, 0x00, 0x11, 0x94, /* ICE_NAT_T 54 */
1142         0x00, 0x00, 0x00, 0x00,
1143
1144         0x00, 0x00, 0x00, 0x00,
1145         0x00, 0x00, 0x00, 0x00,
1146         0x00, 0x00,             /* 2 bytes for 4 bytes alignment */
1147
1148 };
1149
1150 static const struct ice_dummy_pkt_offsets dummy_ipv4_l2tpv3_packet_offsets[] = {
1151         { ICE_MAC_OFOS,         0 },
1152         { ICE_IPV4_OFOS,        14 },
1153         { ICE_L2TPV3,           34 },
1154         { ICE_PROTOCOL_LAST,    0 },
1155 };
1156
1157 static const u8 dummy_ipv4_l2tpv3_pkt[] = {
1158         0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
1159         0x00, 0x00, 0x00, 0x00,
1160         0x00, 0x00, 0x00, 0x00,
1161         0x08, 0x00,
1162
1163         0x45, 0x00, 0x00, 0x20, /* ICE_IPV4_IL 14 */
1164         0x00, 0x00, 0x40, 0x00,
1165         0x40, 0x73, 0x00, 0x00,
1166         0x00, 0x00, 0x00, 0x00,
1167         0x00, 0x00, 0x00, 0x00,
1168
1169         0x00, 0x00, 0x00, 0x00, /* ICE_L2TPV3 34 */
1170         0x00, 0x00, 0x00, 0x00,
1171         0x00, 0x00, 0x00, 0x00,
1172         0x00, 0x00,             /* 2 bytes for 4 bytes alignment */
1173 };
1174
1175 static const struct ice_dummy_pkt_offsets dummy_ipv6_l2tpv3_packet_offsets[] = {
1176         { ICE_MAC_OFOS,         0 },
1177         { ICE_IPV6_OFOS,        14 },
1178         { ICE_L2TPV3,           54 },
1179         { ICE_PROTOCOL_LAST,    0 },
1180 };
1181
1182 static const u8 dummy_ipv6_l2tpv3_pkt[] = {
1183         0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
1184         0x00, 0x00, 0x00, 0x00,
1185         0x00, 0x00, 0x00, 0x00,
1186         0x86, 0xDD,
1187
1188         0x60, 0x00, 0x00, 0x00, /* ICE_IPV6_IL 14 */
1189         0x00, 0x0c, 0x73, 0x40,
1190         0x00, 0x00, 0x00, 0x00,
1191         0x00, 0x00, 0x00, 0x00,
1192         0x00, 0x00, 0x00, 0x00,
1193         0x00, 0x00, 0x00, 0x00,
1194         0x00, 0x00, 0x00, 0x00,
1195         0x00, 0x00, 0x00, 0x00,
1196         0x00, 0x00, 0x00, 0x00,
1197         0x00, 0x00, 0x00, 0x00,
1198
1199         0x00, 0x00, 0x00, 0x00, /* ICE_L2TPV3 54 */
1200         0x00, 0x00, 0x00, 0x00,
1201         0x00, 0x00, 0x00, 0x00,
1202         0x00, 0x00,             /* 2 bytes for 4 bytes alignment */
1203 };
1204
1205 static const struct ice_dummy_pkt_offsets dummy_qinq_ipv4_packet_offsets[] = {
1206         { ICE_MAC_OFOS,         0 },
1207         { ICE_VLAN_EX,          14 },
1208         { ICE_VLAN_OFOS,        18 },
1209         { ICE_IPV4_OFOS,        22 },
1210         { ICE_PROTOCOL_LAST,    0 },
1211 };
1212
1213 static const u8 dummy_qinq_ipv4_pkt[] = {
1214         0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
1215         0x00, 0x00, 0x00, 0x00,
1216         0x00, 0x00, 0x00, 0x00,
1217         0x91, 0x00,
1218
1219         0x00, 0x00, 0x81, 0x00, /* ICE_VLAN_EX 14 */
1220         0x00, 0x00, 0x08, 0x00, /* ICE_VLAN_OFOS 18 */
1221
1222         0x45, 0x00, 0x00, 0x1c, /* ICE_IPV4_OFOS 22 */
1223         0x00, 0x01, 0x00, 0x00,
1224         0x00, 0x11, 0x00, 0x00,
1225         0x00, 0x00, 0x00, 0x00,
1226         0x00, 0x00, 0x00, 0x00,
1227
1228         0x00, 0x00, 0x00, 0x00, /* ICE_UDP_ILOS 42 */
1229         0x00, 0x08, 0x00, 0x00,
1230
1231         0x00, 0x00,     /* 2 bytes for 4 byte alignment */
1232 };
1233
1234 static const struct ice_dummy_pkt_offsets dummy_qinq_ipv6_packet_offsets[] = {
1235         { ICE_MAC_OFOS,         0 },
1236         { ICE_VLAN_EX,          14 },
1237         { ICE_VLAN_OFOS,        18 },
1238         { ICE_IPV6_OFOS,        22 },
1239         { ICE_PROTOCOL_LAST,    0 },
1240 };
1241
1242 static const u8 dummy_qinq_ipv6_pkt[] = {
1243         0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
1244         0x00, 0x00, 0x00, 0x00,
1245         0x00, 0x00, 0x00, 0x00,
1246         0x91, 0x00,
1247
1248         0x00, 0x00, 0x81, 0x00, /* ICE_VLAN_EX 14 */
1249         0x00, 0x00, 0x86, 0xDD, /* ICE_VLAN_OFOS 18 */
1250
1251         0x60, 0x00, 0x00, 0x00, /* ICE_IPV6_OFOS 22 */
1252         0x00, 0x10, 0x11, 0x00, /* Next header UDP */
1253         0x00, 0x00, 0x00, 0x00,
1254         0x00, 0x00, 0x00, 0x00,
1255         0x00, 0x00, 0x00, 0x00,
1256         0x00, 0x00, 0x00, 0x00,
1257         0x00, 0x00, 0x00, 0x00,
1258         0x00, 0x00, 0x00, 0x00,
1259         0x00, 0x00, 0x00, 0x00,
1260         0x00, 0x00, 0x00, 0x00,
1261
1262         0x00, 0x00, 0x00, 0x00, /* ICE_UDP_ILOS 62 */
1263         0x00, 0x10, 0x00, 0x00,
1264
1265         0x00, 0x00, 0x00, 0x00, /* needed for ESP packets */
1266         0x00, 0x00, 0x00, 0x00,
1267
1268         0x00, 0x00,     /* 2 bytes for 4 byte alignment */
1269 };
1270
1271 static const struct ice_dummy_pkt_offsets dummy_qinq_pppoe_packet_offsets[] = {
1272         { ICE_MAC_OFOS,         0 },
1273         { ICE_VLAN_EX,          14 },
1274         { ICE_VLAN_OFOS,        18 },
1275         { ICE_PPPOE,            22 },
1276         { ICE_PROTOCOL_LAST,    0 },
1277 };
1278
1279 static const
1280 struct ice_dummy_pkt_offsets dummy_qinq_pppoe_ipv4_packet_offsets[] = {
1281         { ICE_MAC_OFOS,         0 },
1282         { ICE_VLAN_EX,          14 },
1283         { ICE_VLAN_OFOS,        18 },
1284         { ICE_PPPOE,            22 },
1285         { ICE_IPV4_OFOS,        30 },
1286         { ICE_PROTOCOL_LAST,    0 },
1287 };
1288
1289 static const u8 dummy_qinq_pppoe_ipv4_pkt[] = {
1290         0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
1291         0x00, 0x00, 0x00, 0x00,
1292         0x00, 0x00, 0x00, 0x00,
1293         0x91, 0x00,
1294
1295         0x00, 0x00, 0x81, 0x00, /* ICE_VLAN_EX 14 */
1296         0x00, 0x00, 0x88, 0x64, /* ICE_VLAN_OFOS 18 */
1297
1298         0x11, 0x00, 0x00, 0x00, /* ICE_PPPOE 22 */
1299         0x00, 0x16,
1300
1301         0x00, 0x21,             /* PPP Link Layer 28 */
1302
1303         0x45, 0x00, 0x00, 0x14, /* ICE_IPV4_IL 30 */
1304         0x00, 0x00, 0x00, 0x00,
1305         0x00, 0x00, 0x00, 0x00,
1306         0x00, 0x00, 0x00, 0x00,
1307         0x00, 0x00, 0x00, 0x00,
1308
1309         0x00, 0x00,     /* 2 bytes for 4 byte alignment */
1310 };
1311
1312 static const
1313 struct ice_dummy_pkt_offsets dummy_qinq_pppoe_packet_ipv6_offsets[] = {
1314         { ICE_MAC_OFOS,         0 },
1315         { ICE_ETYPE_OL,         12 },
1316         { ICE_VLAN_EX,          14},
1317         { ICE_VLAN_OFOS,        18 },
1318         { ICE_PPPOE,            22 },
1319         { ICE_IPV6_OFOS,        30 },
1320         { ICE_PROTOCOL_LAST,    0 },
1321 };
1322
1323 static const u8 dummy_qinq_pppoe_ipv6_packet[] = {
1324         0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
1325         0x00, 0x00, 0x00, 0x00,
1326         0x00, 0x00, 0x00, 0x00,
1327
1328         0x91, 0x00,             /* ICE_ETYPE_OL 12 */
1329
1330         0x00, 0x00, 0x81, 0x00, /* ICE_VLAN_EX 14 */
1331         0x00, 0x00, 0x88, 0x64, /* ICE_VLAN_OFOS 18 */
1332
1333         0x11, 0x00, 0x00, 0x00, /* ICE_PPPOE 22 */
1334         0x00, 0x2a,
1335
1336         0x00, 0x57,             /* PPP Link Layer 28*/
1337
1338         0x60, 0x00, 0x00, 0x00, /* ICE_IPV6_OFOS 30 */
1339         0x00, 0x00, 0x3b, 0x00,
1340         0x00, 0x00, 0x00, 0x00,
1341         0x00, 0x00, 0x00, 0x00,
1342         0x00, 0x00, 0x00, 0x00,
1343         0x00, 0x00, 0x00, 0x00,
1344         0x00, 0x00, 0x00, 0x00,
1345         0x00, 0x00, 0x00, 0x00,
1346         0x00, 0x00, 0x00, 0x00,
1347         0x00, 0x00, 0x00, 0x00,
1348
1349         0x00, 0x00,             /* 2 bytes for 4 bytes alignment */
1350 };
1351
1352 /* this is a recipe to profile association bitmap */
1353 static ice_declare_bitmap(recipe_to_profile[ICE_MAX_NUM_RECIPES],
1354                           ICE_MAX_NUM_PROFILES);
1355
1356 /* this is a profile to recipe association bitmap */
1357 static ice_declare_bitmap(profile_to_recipe[ICE_MAX_NUM_PROFILES],
1358                           ICE_MAX_NUM_RECIPES);
1359
1360 static void ice_get_recp_to_prof_map(struct ice_hw *hw);
1361
1362 /**
1363  * ice_collect_result_idx - copy result index values
1364  * @buf: buffer that contains the result index
1365  * @recp: the recipe struct to copy data into
1366  */
1367 static void ice_collect_result_idx(struct ice_aqc_recipe_data_elem *buf,
1368                                    struct ice_sw_recipe *recp)
1369 {
1370         if (buf->content.result_indx & ICE_AQ_RECIPE_RESULT_EN)
1371                 ice_set_bit(buf->content.result_indx &
1372                             ~ICE_AQ_RECIPE_RESULT_EN, recp->res_idxs);
1373 }
1374
1375 /**
1376  * ice_get_tun_type_for_recipe - get tunnel type for the recipe
1377  * @rid: recipe ID that we are populating
1378  */
1379 static enum ice_sw_tunnel_type ice_get_tun_type_for_recipe(u8 rid, bool vlan)
1380 {
1381         u8 vxlan_profile[12] = {10, 11, 12, 16, 17, 18, 22, 23, 24, 25, 26, 27};
1382         u8 gre_profile[12] = {13, 14, 15, 19, 20, 21, 28, 29, 30, 31, 32, 33};
1383         u8 pppoe_profile[7] = {34, 35, 36, 37, 38, 39, 40};
1384         u8 non_tun_profile[6] = {4, 5, 6, 7, 8, 9};
1385         enum ice_sw_tunnel_type tun_type;
1386         u16 i, j, profile_num = 0;
1387         bool non_tun_valid = false;
1388         bool pppoe_valid = false;
1389         bool vxlan_valid = false;
1390         bool gre_valid = false;
1391         bool gtp_valid = false;
1392         bool flag_valid = false;
1393
1394         for (j = 0; j < ICE_MAX_NUM_PROFILES; j++) {
1395                 if (!ice_is_bit_set(recipe_to_profile[rid], j))
1396                         continue;
1397                 else
1398                         profile_num++;
1399
1400                 for (i = 0; i < 12; i++) {
1401                         if (gre_profile[i] == j)
1402                                 gre_valid = true;
1403                 }
1404
1405                 for (i = 0; i < 12; i++) {
1406                         if (vxlan_profile[i] == j)
1407                                 vxlan_valid = true;
1408                 }
1409
1410                 for (i = 0; i < 7; i++) {
1411                         if (pppoe_profile[i] == j)
1412                                 pppoe_valid = true;
1413                 }
1414
1415                 for (i = 0; i < 6; i++) {
1416                         if (non_tun_profile[i] == j)
1417                                 non_tun_valid = true;
1418                 }
1419
1420                 if (j >= ICE_PROFID_IPV4_GTPU_EH_IPV4_OTHER &&
1421                     j <= ICE_PROFID_IPV6_GTPU_IPV6_TCP)
1422                         gtp_valid = true;
1423
1424                 if ((j >= ICE_PROFID_IPV4_ESP &&
1425                      j <= ICE_PROFID_IPV6_PFCP_SESSION) ||
1426                     (j >= ICE_PROFID_IPV4_GTPC_TEID &&
1427                      j <= ICE_PROFID_IPV6_GTPU_TEID))
1428                         flag_valid = true;
1429         }
1430
1431         if (!non_tun_valid && vxlan_valid)
1432                 tun_type = ICE_SW_TUN_VXLAN;
1433         else if (!non_tun_valid && gre_valid)
1434                 tun_type = ICE_SW_TUN_NVGRE;
1435         else if (!non_tun_valid && pppoe_valid)
1436                 tun_type = ICE_SW_TUN_PPPOE;
1437         else if (!non_tun_valid && gtp_valid)
1438                 tun_type = ICE_SW_TUN_GTP;
1439         else if (non_tun_valid &&
1440                  (vxlan_valid || gre_valid || gtp_valid || pppoe_valid))
1441                 tun_type = ICE_SW_TUN_AND_NON_TUN;
1442         else if (non_tun_valid && !vxlan_valid && !gre_valid && !gtp_valid &&
1443                  !pppoe_valid)
1444                 tun_type = ICE_NON_TUN;
1445         else
1446                 tun_type = ICE_NON_TUN;
1447
1448         if (profile_num > 1 && tun_type == ICE_SW_TUN_PPPOE) {
1449                 i = ice_is_bit_set(recipe_to_profile[rid],
1450                                    ICE_PROFID_PPPOE_IPV4_OTHER);
1451                 j = ice_is_bit_set(recipe_to_profile[rid],
1452                                    ICE_PROFID_PPPOE_IPV6_OTHER);
1453                 if (i && !j)
1454                         tun_type = ICE_SW_TUN_PPPOE_IPV4;
1455                 else if (!i && j)
1456                         tun_type = ICE_SW_TUN_PPPOE_IPV6;
1457         }
1458
1459         if (tun_type == ICE_SW_TUN_GTP) {
1460                 if (ice_is_bit_set(recipe_to_profile[rid],
1461                                    ICE_PROFID_IPV4_GTPU_IPV4_OTHER))
1462                         tun_type = ICE_SW_TUN_IPV4_GTPU_IPV4;
1463                 else if (ice_is_bit_set(recipe_to_profile[rid],
1464                                         ICE_PROFID_IPV4_GTPU_IPV6_OTHER))
1465                         tun_type = ICE_SW_TUN_IPV4_GTPU_IPV6;
1466                 else if (ice_is_bit_set(recipe_to_profile[rid],
1467                                         ICE_PROFID_IPV6_GTPU_IPV4_OTHER))
1468                         tun_type = ICE_SW_TUN_IPV6_GTPU_IPV4;
1469                 else if (ice_is_bit_set(recipe_to_profile[rid],
1470                                         ICE_PROFID_IPV6_GTPU_IPV6_OTHER))
1471                         tun_type = ICE_SW_TUN_IPV6_GTPU_IPV6;
1472         }
1473
1474         if (profile_num == 1 && (flag_valid || non_tun_valid || pppoe_valid)) {
1475                 for (j = 0; j < ICE_MAX_NUM_PROFILES; j++) {
1476                         if (ice_is_bit_set(recipe_to_profile[rid], j)) {
1477                                 switch (j) {
1478                                 case ICE_PROFID_IPV4_TCP:
1479                                         tun_type = ICE_SW_IPV4_TCP;
1480                                         break;
1481                                 case ICE_PROFID_IPV4_UDP:
1482                                         tun_type = ICE_SW_IPV4_UDP;
1483                                         break;
1484                                 case ICE_PROFID_IPV6_TCP:
1485                                         tun_type = ICE_SW_IPV6_TCP;
1486                                         break;
1487                                 case ICE_PROFID_IPV6_UDP:
1488                                         tun_type = ICE_SW_IPV6_UDP;
1489                                         break;
1490                                 case ICE_PROFID_PPPOE_PAY:
1491                                         tun_type = ICE_SW_TUN_PPPOE_PAY;
1492                                         break;
1493                                 case ICE_PROFID_PPPOE_IPV4_TCP:
1494                                         tun_type = ICE_SW_TUN_PPPOE_IPV4_TCP;
1495                                         break;
1496                                 case ICE_PROFID_PPPOE_IPV4_UDP:
1497                                         tun_type = ICE_SW_TUN_PPPOE_IPV4_UDP;
1498                                         break;
1499                                 case ICE_PROFID_PPPOE_IPV4_OTHER:
1500                                         tun_type = ICE_SW_TUN_PPPOE_IPV4;
1501                                         break;
1502                                 case ICE_PROFID_PPPOE_IPV6_TCP:
1503                                         tun_type = ICE_SW_TUN_PPPOE_IPV6_TCP;
1504                                         break;
1505                                 case ICE_PROFID_PPPOE_IPV6_UDP:
1506                                         tun_type = ICE_SW_TUN_PPPOE_IPV6_UDP;
1507                                         break;
1508                                 case ICE_PROFID_PPPOE_IPV6_OTHER:
1509                                         tun_type = ICE_SW_TUN_PPPOE_IPV6;
1510                                         break;
1511                                 case ICE_PROFID_IPV4_ESP:
1512                                         tun_type = ICE_SW_TUN_IPV4_ESP;
1513                                         break;
1514                                 case ICE_PROFID_IPV6_ESP:
1515                                         tun_type = ICE_SW_TUN_IPV6_ESP;
1516                                         break;
1517                                 case ICE_PROFID_IPV4_AH:
1518                                         tun_type = ICE_SW_TUN_IPV4_AH;
1519                                         break;
1520                                 case ICE_PROFID_IPV6_AH:
1521                                         tun_type = ICE_SW_TUN_IPV6_AH;
1522                                         break;
1523                                 case ICE_PROFID_IPV4_NAT_T:
1524                                         tun_type = ICE_SW_TUN_IPV4_NAT_T;
1525                                         break;
1526                                 case ICE_PROFID_IPV6_NAT_T:
1527                                         tun_type = ICE_SW_TUN_IPV6_NAT_T;
1528                                         break;
1529                                 case ICE_PROFID_IPV4_PFCP_NODE:
1530                                         tun_type =
1531                                         ICE_SW_TUN_PROFID_IPV4_PFCP_NODE;
1532                                         break;
1533                                 case ICE_PROFID_IPV6_PFCP_NODE:
1534                                         tun_type =
1535                                         ICE_SW_TUN_PROFID_IPV6_PFCP_NODE;
1536                                         break;
1537                                 case ICE_PROFID_IPV4_PFCP_SESSION:
1538                                         tun_type =
1539                                         ICE_SW_TUN_PROFID_IPV4_PFCP_SESSION;
1540                                         break;
1541                                 case ICE_PROFID_IPV6_PFCP_SESSION:
1542                                         tun_type =
1543                                         ICE_SW_TUN_PROFID_IPV6_PFCP_SESSION;
1544                                         break;
1545                                 case ICE_PROFID_MAC_IPV4_L2TPV3:
1546                                         tun_type = ICE_SW_TUN_IPV4_L2TPV3;
1547                                         break;
1548                                 case ICE_PROFID_MAC_IPV6_L2TPV3:
1549                                         tun_type = ICE_SW_TUN_IPV6_L2TPV3;
1550                                         break;
1551                                 case ICE_PROFID_IPV4_GTPU_TEID:
1552                                         tun_type = ICE_SW_TUN_IPV4_GTPU_NO_PAY;
1553                                         break;
1554                                 case ICE_PROFID_IPV6_GTPU_TEID:
1555                                         tun_type = ICE_SW_TUN_IPV6_GTPU_NO_PAY;
1556                                         break;
1557                                 default:
1558                                         break;
1559                                 }
1560
1561                                 return tun_type;
1562                         }
1563                 }
1564         }
1565
1566         if (vlan && tun_type == ICE_SW_TUN_PPPOE)
1567                 tun_type = ICE_SW_TUN_PPPOE_QINQ;
1568         else if (vlan && tun_type == ICE_SW_TUN_PPPOE_IPV6)
1569                 tun_type = ICE_SW_TUN_PPPOE_IPV6_QINQ;
1570         else if (vlan && tun_type == ICE_SW_TUN_PPPOE_IPV4)
1571                 tun_type = ICE_SW_TUN_PPPOE_IPV4_QINQ;
1572         else if (vlan && tun_type == ICE_SW_TUN_PPPOE_PAY)
1573                 tun_type = ICE_SW_TUN_PPPOE_PAY_QINQ;
1574         else if (vlan && tun_type == ICE_SW_TUN_AND_NON_TUN)
1575                 tun_type = ICE_SW_TUN_AND_NON_TUN_QINQ;
1576         else if (vlan && tun_type == ICE_NON_TUN)
1577                 tun_type = ICE_NON_TUN_QINQ;
1578
1579         return tun_type;
1580 }
1581
1582 /**
1583  * ice_get_recp_frm_fw - update SW bookkeeping from FW recipe entries
1584  * @hw: pointer to hardware structure
1585  * @recps: struct that we need to populate
1586  * @rid: recipe ID that we are populating
1587  * @refresh_required: true if we should get recipe to profile mapping from FW
1588  *
1589  * This function is used to populate all the necessary entries into our
1590  * bookkeeping so that we have a current list of all the recipes that are
1591  * programmed in the firmware.
1592  */
1593 static enum ice_status
1594 ice_get_recp_frm_fw(struct ice_hw *hw, struct ice_sw_recipe *recps, u8 rid,
1595                     bool *refresh_required)
1596 {
1597         ice_declare_bitmap(result_bm, ICE_MAX_FV_WORDS);
1598         struct ice_aqc_recipe_data_elem *tmp;
1599         u16 num_recps = ICE_MAX_NUM_RECIPES;
1600         struct ice_prot_lkup_ext *lkup_exts;
1601         enum ice_status status;
1602         u8 fv_word_idx = 0;
1603         bool vlan = false;
1604         u16 sub_recps;
1605
1606         ice_zero_bitmap(result_bm, ICE_MAX_FV_WORDS);
1607
1608         /* we need a buffer big enough to accommodate all the recipes */
1609         tmp = (struct ice_aqc_recipe_data_elem *)ice_calloc(hw,
1610                 ICE_MAX_NUM_RECIPES, sizeof(*tmp));
1611         if (!tmp)
1612                 return ICE_ERR_NO_MEMORY;
1613
1614         tmp[0].recipe_indx = rid;
1615         status = ice_aq_get_recipe(hw, tmp, &num_recps, rid, NULL);
1616         /* non-zero status meaning recipe doesn't exist */
1617         if (status)
1618                 goto err_unroll;
1619
1620         /* Get recipe to profile map so that we can get the fv from lkups that
1621          * we read for a recipe from FW. Since we want to minimize the number of
1622          * times we make this FW call, just make one call and cache the copy
1623          * until a new recipe is added. This operation is only required the
1624          * first time to get the changes from FW. Then to search existing
1625          * entries we don't need to update the cache again until another recipe
1626          * gets added.
1627          */
1628         if (*refresh_required) {
1629                 ice_get_recp_to_prof_map(hw);
1630                 *refresh_required = false;
1631         }
1632
1633         /* Start populating all the entries for recps[rid] based on lkups from
1634          * firmware. Note that we are only creating the root recipe in our
1635          * database.
1636          */
1637         lkup_exts = &recps[rid].lkup_exts;
1638
1639         for (sub_recps = 0; sub_recps < num_recps; sub_recps++) {
1640                 struct ice_aqc_recipe_data_elem root_bufs = tmp[sub_recps];
1641                 struct ice_recp_grp_entry *rg_entry;
1642                 u8 i, prof, idx, prot = 0;
1643                 bool is_root;
1644                 u16 off = 0;
1645
1646                 rg_entry = (struct ice_recp_grp_entry *)
1647                         ice_malloc(hw, sizeof(*rg_entry));
1648                 if (!rg_entry) {
1649                         status = ICE_ERR_NO_MEMORY;
1650                         goto err_unroll;
1651                 }
1652
1653                 idx = root_bufs.recipe_indx;
1654                 is_root = root_bufs.content.rid & ICE_AQ_RECIPE_ID_IS_ROOT;
1655
1656                 /* Mark all result indices in this chain */
1657                 if (root_bufs.content.result_indx & ICE_AQ_RECIPE_RESULT_EN)
1658                         ice_set_bit(root_bufs.content.result_indx &
1659                                     ~ICE_AQ_RECIPE_RESULT_EN, result_bm);
1660
1661                 /* get the first profile that is associated with rid */
1662                 prof = ice_find_first_bit(recipe_to_profile[idx],
1663                                           ICE_MAX_NUM_PROFILES);
1664                 for (i = 0; i < ICE_NUM_WORDS_RECIPE; i++) {
1665                         u8 lkup_indx = root_bufs.content.lkup_indx[i + 1];
1666
1667                         rg_entry->fv_idx[i] = lkup_indx;
1668                         rg_entry->fv_mask[i] =
1669                                 LE16_TO_CPU(root_bufs.content.mask[i + 1]);
1670
1671                         /* If the recipe is a chained recipe then all its
1672                          * child recipe's result will have a result index.
1673                          * To fill fv_words we should not use those result
1674                          * index, we only need the protocol ids and offsets.
1675                          * We will skip all the fv_idx which stores result
1676                          * index in them. We also need to skip any fv_idx which
1677                          * has ICE_AQ_RECIPE_LKUP_IGNORE or 0 since it isn't a
1678                          * valid offset value.
1679                          */
1680                         if (ice_is_bit_set(hw->switch_info->prof_res_bm[prof],
1681                                            rg_entry->fv_idx[i]) ||
1682                             rg_entry->fv_idx[i] & ICE_AQ_RECIPE_LKUP_IGNORE ||
1683                             rg_entry->fv_idx[i] == 0)
1684                                 continue;
1685
1686                         ice_find_prot_off(hw, ICE_BLK_SW, prof,
1687                                           rg_entry->fv_idx[i], &prot, &off);
1688                         lkup_exts->fv_words[fv_word_idx].prot_id = prot;
1689                         lkup_exts->fv_words[fv_word_idx].off = off;
1690                         lkup_exts->field_mask[fv_word_idx] =
1691                                 rg_entry->fv_mask[i];
1692                         if (prot == ICE_META_DATA_ID_HW &&
1693                             off == ICE_TUN_FLAG_MDID_OFF)
1694                                 vlan = true;
1695                         fv_word_idx++;
1696                 }
1697                 /* populate rg_list with the data from the child entry of this
1698                  * recipe
1699                  */
1700                 LIST_ADD(&rg_entry->l_entry, &recps[rid].rg_list);
1701
1702                 /* Propagate some data to the recipe database */
1703                 recps[idx].is_root = !!is_root;
1704                 recps[idx].priority = root_bufs.content.act_ctrl_fwd_priority;
1705                 ice_zero_bitmap(recps[idx].res_idxs, ICE_MAX_FV_WORDS);
1706                 if (root_bufs.content.result_indx & ICE_AQ_RECIPE_RESULT_EN) {
1707                         recps[idx].chain_idx = root_bufs.content.result_indx &
1708                                 ~ICE_AQ_RECIPE_RESULT_EN;
1709                         ice_set_bit(recps[idx].chain_idx, recps[idx].res_idxs);
1710                 } else {
1711                         recps[idx].chain_idx = ICE_INVAL_CHAIN_IND;
1712                 }
1713
1714                 if (!is_root)
1715                         continue;
1716
1717                 /* Only do the following for root recipes entries */
1718                 ice_memcpy(recps[idx].r_bitmap, root_bufs.recipe_bitmap,
1719                            sizeof(recps[idx].r_bitmap), ICE_NONDMA_TO_NONDMA);
1720                 recps[idx].root_rid = root_bufs.content.rid &
1721                         ~ICE_AQ_RECIPE_ID_IS_ROOT;
1722                 recps[idx].priority = root_bufs.content.act_ctrl_fwd_priority;
1723         }
1724
1725         /* Complete initialization of the root recipe entry */
1726         lkup_exts->n_val_words = fv_word_idx;
1727         recps[rid].big_recp = (num_recps > 1);
1728         recps[rid].n_grp_count = (u8)num_recps;
1729         recps[rid].tun_type = ice_get_tun_type_for_recipe(rid, vlan);
1730         recps[rid].root_buf = (struct ice_aqc_recipe_data_elem *)
1731                 ice_memdup(hw, tmp, recps[rid].n_grp_count *
1732                            sizeof(*recps[rid].root_buf), ICE_NONDMA_TO_NONDMA);
1733         if (!recps[rid].root_buf)
1734                 goto err_unroll;
1735
1736         /* Copy result indexes */
1737         ice_cp_bitmap(recps[rid].res_idxs, result_bm, ICE_MAX_FV_WORDS);
1738         recps[rid].recp_created = true;
1739
1740 err_unroll:
1741         ice_free(hw, tmp);
1742         return status;
1743 }
1744
1745 /**
1746  * ice_get_recp_to_prof_map - updates recipe to profile mapping
1747  * @hw: pointer to hardware structure
1748  *
1749  * This function is used to populate recipe_to_profile matrix where index to
1750  * this array is the recipe ID and the element is the mapping of which profiles
1751  * is this recipe mapped to.
1752  */
1753 static void ice_get_recp_to_prof_map(struct ice_hw *hw)
1754 {
1755         ice_declare_bitmap(r_bitmap, ICE_MAX_NUM_RECIPES);
1756         u16 i;
1757
1758         for (i = 0; i < hw->switch_info->max_used_prof_index + 1; i++) {
1759                 u16 j;
1760
1761                 ice_zero_bitmap(profile_to_recipe[i], ICE_MAX_NUM_RECIPES);
1762                 ice_zero_bitmap(r_bitmap, ICE_MAX_NUM_RECIPES);
1763                 if (ice_aq_get_recipe_to_profile(hw, i, (u8 *)r_bitmap, NULL))
1764                         continue;
1765                 ice_cp_bitmap(profile_to_recipe[i], r_bitmap,
1766                               ICE_MAX_NUM_RECIPES);
1767                 ice_for_each_set_bit(j, r_bitmap, ICE_MAX_NUM_RECIPES)
1768                         ice_set_bit(i, recipe_to_profile[j]);
1769         }
1770 }
1771
1772 /**
1773  * ice_init_def_sw_recp - initialize the recipe book keeping tables
1774  * @hw: pointer to the HW struct
1775  * @recp_list: pointer to sw recipe list
1776  *
1777  * Allocate memory for the entire recipe table and initialize the structures/
1778  * entries corresponding to basic recipes.
1779  */
1780 enum ice_status
1781 ice_init_def_sw_recp(struct ice_hw *hw, struct ice_sw_recipe **recp_list)
1782 {
1783         struct ice_sw_recipe *recps;
1784         u8 i;
1785
1786         recps = (struct ice_sw_recipe *)
1787                 ice_calloc(hw, ICE_MAX_NUM_RECIPES, sizeof(*recps));
1788         if (!recps)
1789                 return ICE_ERR_NO_MEMORY;
1790
1791         for (i = 0; i < ICE_MAX_NUM_RECIPES; i++) {
1792                 recps[i].root_rid = i;
1793                 INIT_LIST_HEAD(&recps[i].filt_rules);
1794                 INIT_LIST_HEAD(&recps[i].filt_replay_rules);
1795                 INIT_LIST_HEAD(&recps[i].rg_list);
1796                 ice_init_lock(&recps[i].filt_rule_lock);
1797         }
1798
1799         *recp_list = recps;
1800
1801         return ICE_SUCCESS;
1802 }
1803
1804 /**
1805  * ice_aq_get_sw_cfg - get switch configuration
1806  * @hw: pointer to the hardware structure
1807  * @buf: pointer to the result buffer
1808  * @buf_size: length of the buffer available for response
1809  * @req_desc: pointer to requested descriptor
1810  * @num_elems: pointer to number of elements
1811  * @cd: pointer to command details structure or NULL
1812  *
1813  * Get switch configuration (0x0200) to be placed in buf.
1814  * This admin command returns information such as initial VSI/port number
1815  * and switch ID it belongs to.
1816  *
1817  * NOTE: *req_desc is both an input/output parameter.
1818  * The caller of this function first calls this function with *request_desc set
1819  * to 0. If the response from f/w has *req_desc set to 0, all the switch
1820  * configuration information has been returned; if non-zero (meaning not all
1821  * the information was returned), the caller should call this function again
1822  * with *req_desc set to the previous value returned by f/w to get the
1823  * next block of switch configuration information.
1824  *
1825  * *num_elems is output only parameter. This reflects the number of elements
1826  * in response buffer. The caller of this function to use *num_elems while
1827  * parsing the response buffer.
1828  */
1829 static enum ice_status
1830 ice_aq_get_sw_cfg(struct ice_hw *hw, struct ice_aqc_get_sw_cfg_resp_elem *buf,
1831                   u16 buf_size, u16 *req_desc, u16 *num_elems,
1832                   struct ice_sq_cd *cd)
1833 {
1834         struct ice_aqc_get_sw_cfg *cmd;
1835         struct ice_aq_desc desc;
1836         enum ice_status status;
1837
1838         ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_sw_cfg);
1839         cmd = &desc.params.get_sw_conf;
1840         cmd->element = CPU_TO_LE16(*req_desc);
1841
1842         status = ice_aq_send_cmd(hw, &desc, buf, buf_size, cd);
1843         if (!status) {
1844                 *req_desc = LE16_TO_CPU(cmd->element);
1845                 *num_elems = LE16_TO_CPU(cmd->num_elems);
1846         }
1847
1848         return status;
1849 }
1850
1851 /**
1852  * ice_alloc_sw - allocate resources specific to switch
1853  * @hw: pointer to the HW struct
1854  * @ena_stats: true to turn on VEB stats
1855  * @shared_res: true for shared resource, false for dedicated resource
1856  * @sw_id: switch ID returned
1857  * @counter_id: VEB counter ID returned
1858  *
1859  * allocates switch resources (SWID and VEB counter) (0x0208)
1860  */
1861 enum ice_status
1862 ice_alloc_sw(struct ice_hw *hw, bool ena_stats, bool shared_res, u16 *sw_id,
1863              u16 *counter_id)
1864 {
1865         struct ice_aqc_alloc_free_res_elem *sw_buf;
1866         struct ice_aqc_res_elem *sw_ele;
1867         enum ice_status status;
1868         u16 buf_len;
1869
1870         buf_len = ice_struct_size(sw_buf, elem, 1);
1871         sw_buf = (struct ice_aqc_alloc_free_res_elem *)ice_malloc(hw, buf_len);
1872         if (!sw_buf)
1873                 return ICE_ERR_NO_MEMORY;
1874
1875         /* Prepare buffer for switch ID.
1876          * The number of resource entries in buffer is passed as 1 since only a
1877          * single switch/VEB instance is allocated, and hence a single sw_id
1878          * is requested.
1879          */
1880         sw_buf->num_elems = CPU_TO_LE16(1);
1881         sw_buf->res_type =
1882                 CPU_TO_LE16(ICE_AQC_RES_TYPE_SWID |
1883                             (shared_res ? ICE_AQC_RES_TYPE_FLAG_SHARED :
1884                             ICE_AQC_RES_TYPE_FLAG_DEDICATED));
1885
1886         status = ice_aq_alloc_free_res(hw, 1, sw_buf, buf_len,
1887                                        ice_aqc_opc_alloc_res, NULL);
1888
1889         if (status)
1890                 goto ice_alloc_sw_exit;
1891
1892         sw_ele = &sw_buf->elem[0];
1893         *sw_id = LE16_TO_CPU(sw_ele->e.sw_resp);
1894
1895         if (ena_stats) {
1896                 /* Prepare buffer for VEB Counter */
1897                 enum ice_adminq_opc opc = ice_aqc_opc_alloc_res;
1898                 struct ice_aqc_alloc_free_res_elem *counter_buf;
1899                 struct ice_aqc_res_elem *counter_ele;
1900
1901                 counter_buf = (struct ice_aqc_alloc_free_res_elem *)
1902                                 ice_malloc(hw, buf_len);
1903                 if (!counter_buf) {
1904                         status = ICE_ERR_NO_MEMORY;
1905                         goto ice_alloc_sw_exit;
1906                 }
1907
1908                 /* The number of resource entries in buffer is passed as 1 since
1909                  * only a single switch/VEB instance is allocated, and hence a
1910                  * single VEB counter is requested.
1911                  */
1912                 counter_buf->num_elems = CPU_TO_LE16(1);
1913                 counter_buf->res_type =
1914                         CPU_TO_LE16(ICE_AQC_RES_TYPE_VEB_COUNTER |
1915                                     ICE_AQC_RES_TYPE_FLAG_DEDICATED);
1916                 status = ice_aq_alloc_free_res(hw, 1, counter_buf, buf_len,
1917                                                opc, NULL);
1918
1919                 if (status) {
1920                         ice_free(hw, counter_buf);
1921                         goto ice_alloc_sw_exit;
1922                 }
1923                 counter_ele = &counter_buf->elem[0];
1924                 *counter_id = LE16_TO_CPU(counter_ele->e.sw_resp);
1925                 ice_free(hw, counter_buf);
1926         }
1927
1928 ice_alloc_sw_exit:
1929         ice_free(hw, sw_buf);
1930         return status;
1931 }
1932
1933 /**
1934  * ice_free_sw - free resources specific to switch
1935  * @hw: pointer to the HW struct
1936  * @sw_id: switch ID returned
1937  * @counter_id: VEB counter ID returned
1938  *
1939  * free switch resources (SWID and VEB counter) (0x0209)
1940  *
1941  * NOTE: This function frees multiple resources. It continues
1942  * releasing other resources even after it encounters error.
1943  * The error code returned is the last error it encountered.
1944  */
1945 enum ice_status ice_free_sw(struct ice_hw *hw, u16 sw_id, u16 counter_id)
1946 {
1947         struct ice_aqc_alloc_free_res_elem *sw_buf, *counter_buf;
1948         enum ice_status status, ret_status;
1949         u16 buf_len;
1950
1951         buf_len = ice_struct_size(sw_buf, elem, 1);
1952         sw_buf = (struct ice_aqc_alloc_free_res_elem *)ice_malloc(hw, buf_len);
1953         if (!sw_buf)
1954                 return ICE_ERR_NO_MEMORY;
1955
1956         /* Prepare buffer to free for switch ID res.
1957          * The number of resource entries in buffer is passed as 1 since only a
1958          * single switch/VEB instance is freed, and hence a single sw_id
1959          * is released.
1960          */
1961         sw_buf->num_elems = CPU_TO_LE16(1);
1962         sw_buf->res_type = CPU_TO_LE16(ICE_AQC_RES_TYPE_SWID);
1963         sw_buf->elem[0].e.sw_resp = CPU_TO_LE16(sw_id);
1964
1965         ret_status = ice_aq_alloc_free_res(hw, 1, sw_buf, buf_len,
1966                                            ice_aqc_opc_free_res, NULL);
1967
1968         if (ret_status)
1969                 ice_debug(hw, ICE_DBG_SW, "CQ CMD Buffer:\n");
1970
1971         /* Prepare buffer to free for VEB Counter resource */
1972         counter_buf = (struct ice_aqc_alloc_free_res_elem *)
1973                         ice_malloc(hw, buf_len);
1974         if (!counter_buf) {
1975                 ice_free(hw, sw_buf);
1976                 return ICE_ERR_NO_MEMORY;
1977         }
1978
1979         /* The number of resource entries in buffer is passed as 1 since only a
1980          * single switch/VEB instance is freed, and hence a single VEB counter
1981          * is released
1982          */
1983         counter_buf->num_elems = CPU_TO_LE16(1);
1984         counter_buf->res_type = CPU_TO_LE16(ICE_AQC_RES_TYPE_VEB_COUNTER);
1985         counter_buf->elem[0].e.sw_resp = CPU_TO_LE16(counter_id);
1986
1987         status = ice_aq_alloc_free_res(hw, 1, counter_buf, buf_len,
1988                                        ice_aqc_opc_free_res, NULL);
1989         if (status) {
1990                 ice_debug(hw, ICE_DBG_SW, "VEB counter resource could not be freed\n");
1991                 ret_status = status;
1992         }
1993
1994         ice_free(hw, counter_buf);
1995         ice_free(hw, sw_buf);
1996         return ret_status;
1997 }
1998
1999 /**
2000  * ice_aq_add_vsi
2001  * @hw: pointer to the HW struct
2002  * @vsi_ctx: pointer to a VSI context struct
2003  * @cd: pointer to command details structure or NULL
2004  *
2005  * Add a VSI context to the hardware (0x0210)
2006  */
2007 enum ice_status
2008 ice_aq_add_vsi(struct ice_hw *hw, struct ice_vsi_ctx *vsi_ctx,
2009                struct ice_sq_cd *cd)
2010 {
2011         struct ice_aqc_add_update_free_vsi_resp *res;
2012         struct ice_aqc_add_get_update_free_vsi *cmd;
2013         struct ice_aq_desc desc;
2014         enum ice_status status;
2015
2016         cmd = &desc.params.vsi_cmd;
2017         res = &desc.params.add_update_free_vsi_res;
2018
2019         ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_add_vsi);
2020
2021         if (!vsi_ctx->alloc_from_pool)
2022                 cmd->vsi_num = CPU_TO_LE16(vsi_ctx->vsi_num |
2023                                            ICE_AQ_VSI_IS_VALID);
2024
2025         cmd->vsi_flags = CPU_TO_LE16(vsi_ctx->flags);
2026
2027         desc.flags |= CPU_TO_LE16(ICE_AQ_FLAG_RD);
2028
2029         status = ice_aq_send_cmd(hw, &desc, &vsi_ctx->info,
2030                                  sizeof(vsi_ctx->info), cd);
2031
2032         if (!status) {
2033                 vsi_ctx->vsi_num = LE16_TO_CPU(res->vsi_num) & ICE_AQ_VSI_NUM_M;
2034                 vsi_ctx->vsis_allocd = LE16_TO_CPU(res->vsi_used);
2035                 vsi_ctx->vsis_unallocated = LE16_TO_CPU(res->vsi_free);
2036         }
2037
2038         return status;
2039 }
2040
2041 /**
2042  * ice_aq_free_vsi
2043  * @hw: pointer to the HW struct
2044  * @vsi_ctx: pointer to a VSI context struct
2045  * @keep_vsi_alloc: keep VSI allocation as part of this PF's resources
2046  * @cd: pointer to command details structure or NULL
2047  *
2048  * Free VSI context info from hardware (0x0213)
2049  */
2050 enum ice_status
2051 ice_aq_free_vsi(struct ice_hw *hw, struct ice_vsi_ctx *vsi_ctx,
2052                 bool keep_vsi_alloc, struct ice_sq_cd *cd)
2053 {
2054         struct ice_aqc_add_update_free_vsi_resp *resp;
2055         struct ice_aqc_add_get_update_free_vsi *cmd;
2056         struct ice_aq_desc desc;
2057         enum ice_status status;
2058
2059         cmd = &desc.params.vsi_cmd;
2060         resp = &desc.params.add_update_free_vsi_res;
2061
2062         ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_free_vsi);
2063
2064         cmd->vsi_num = CPU_TO_LE16(vsi_ctx->vsi_num | ICE_AQ_VSI_IS_VALID);
2065         if (keep_vsi_alloc)
2066                 cmd->cmd_flags = CPU_TO_LE16(ICE_AQ_VSI_KEEP_ALLOC);
2067
2068         status = ice_aq_send_cmd(hw, &desc, NULL, 0, cd);
2069         if (!status) {
2070                 vsi_ctx->vsis_allocd = LE16_TO_CPU(resp->vsi_used);
2071                 vsi_ctx->vsis_unallocated = LE16_TO_CPU(resp->vsi_free);
2072         }
2073
2074         return status;
2075 }
2076
2077 /**
2078  * ice_aq_update_vsi
2079  * @hw: pointer to the HW struct
2080  * @vsi_ctx: pointer to a VSI context struct
2081  * @cd: pointer to command details structure or NULL
2082  *
2083  * Update VSI context in the hardware (0x0211)
2084  */
2085 enum ice_status
2086 ice_aq_update_vsi(struct ice_hw *hw, struct ice_vsi_ctx *vsi_ctx,
2087                   struct ice_sq_cd *cd)
2088 {
2089         struct ice_aqc_add_update_free_vsi_resp *resp;
2090         struct ice_aqc_add_get_update_free_vsi *cmd;
2091         struct ice_aq_desc desc;
2092         enum ice_status status;
2093
2094         cmd = &desc.params.vsi_cmd;
2095         resp = &desc.params.add_update_free_vsi_res;
2096
2097         ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_update_vsi);
2098
2099         cmd->vsi_num = CPU_TO_LE16(vsi_ctx->vsi_num | ICE_AQ_VSI_IS_VALID);
2100
2101         desc.flags |= CPU_TO_LE16(ICE_AQ_FLAG_RD);
2102
2103         status = ice_aq_send_cmd(hw, &desc, &vsi_ctx->info,
2104                                  sizeof(vsi_ctx->info), cd);
2105
2106         if (!status) {
2107                 vsi_ctx->vsis_allocd = LE16_TO_CPU(resp->vsi_used);
2108                 vsi_ctx->vsis_unallocated = LE16_TO_CPU(resp->vsi_free);
2109         }
2110
2111         return status;
2112 }
2113
2114 /**
2115  * ice_is_vsi_valid - check whether the VSI is valid or not
2116  * @hw: pointer to the HW struct
2117  * @vsi_handle: VSI handle
2118  *
2119  * check whether the VSI is valid or not
2120  */
2121 bool ice_is_vsi_valid(struct ice_hw *hw, u16 vsi_handle)
2122 {
2123         return vsi_handle < ICE_MAX_VSI && hw->vsi_ctx[vsi_handle];
2124 }
2125
2126 /**
2127  * ice_get_hw_vsi_num - return the HW VSI number
2128  * @hw: pointer to the HW struct
2129  * @vsi_handle: VSI handle
2130  *
2131  * return the HW VSI number
2132  * Caution: call this function only if VSI is valid (ice_is_vsi_valid)
2133  */
2134 u16 ice_get_hw_vsi_num(struct ice_hw *hw, u16 vsi_handle)
2135 {
2136         return hw->vsi_ctx[vsi_handle]->vsi_num;
2137 }
2138
2139 /**
2140  * ice_get_vsi_ctx - return the VSI context entry for a given VSI handle
2141  * @hw: pointer to the HW struct
2142  * @vsi_handle: VSI handle
2143  *
2144  * return the VSI context entry for a given VSI handle
2145  */
2146 struct ice_vsi_ctx *ice_get_vsi_ctx(struct ice_hw *hw, u16 vsi_handle)
2147 {
2148         return (vsi_handle >= ICE_MAX_VSI) ? NULL : hw->vsi_ctx[vsi_handle];
2149 }
2150
2151 /**
2152  * ice_save_vsi_ctx - save the VSI context for a given VSI handle
2153  * @hw: pointer to the HW struct
2154  * @vsi_handle: VSI handle
2155  * @vsi: VSI context pointer
2156  *
2157  * save the VSI context entry for a given VSI handle
2158  */
2159 static void
2160 ice_save_vsi_ctx(struct ice_hw *hw, u16 vsi_handle, struct ice_vsi_ctx *vsi)
2161 {
2162         hw->vsi_ctx[vsi_handle] = vsi;
2163 }
2164
2165 /**
2166  * ice_clear_vsi_q_ctx - clear VSI queue contexts for all TCs
2167  * @hw: pointer to the HW struct
2168  * @vsi_handle: VSI handle
2169  */
2170 static void ice_clear_vsi_q_ctx(struct ice_hw *hw, u16 vsi_handle)
2171 {
2172         struct ice_vsi_ctx *vsi;
2173         u8 i;
2174
2175         vsi = ice_get_vsi_ctx(hw, vsi_handle);
2176         if (!vsi)
2177                 return;
2178         ice_for_each_traffic_class(i) {
2179                 if (vsi->lan_q_ctx[i]) {
2180                         ice_free(hw, vsi->lan_q_ctx[i]);
2181                         vsi->lan_q_ctx[i] = NULL;
2182                 }
2183         }
2184 }
2185
2186 /**
2187  * ice_clear_vsi_ctx - clear the VSI context entry
2188  * @hw: pointer to the HW struct
2189  * @vsi_handle: VSI handle
2190  *
2191  * clear the VSI context entry
2192  */
2193 static void ice_clear_vsi_ctx(struct ice_hw *hw, u16 vsi_handle)
2194 {
2195         struct ice_vsi_ctx *vsi;
2196
2197         vsi = ice_get_vsi_ctx(hw, vsi_handle);
2198         if (vsi) {
2199                 ice_clear_vsi_q_ctx(hw, vsi_handle);
2200                 ice_free(hw, vsi);
2201                 hw->vsi_ctx[vsi_handle] = NULL;
2202         }
2203 }
2204
2205 /**
2206  * ice_clear_all_vsi_ctx - clear all the VSI context entries
2207  * @hw: pointer to the HW struct
2208  */
2209 void ice_clear_all_vsi_ctx(struct ice_hw *hw)
2210 {
2211         u16 i;
2212
2213         for (i = 0; i < ICE_MAX_VSI; i++)
2214                 ice_clear_vsi_ctx(hw, i);
2215 }
2216
2217 /**
2218  * ice_add_vsi - add VSI context to the hardware and VSI handle list
2219  * @hw: pointer to the HW struct
2220  * @vsi_handle: unique VSI handle provided by drivers
2221  * @vsi_ctx: pointer to a VSI context struct
2222  * @cd: pointer to command details structure or NULL
2223  *
2224  * Add a VSI context to the hardware also add it into the VSI handle list.
2225  * If this function gets called after reset for existing VSIs then update
2226  * with the new HW VSI number in the corresponding VSI handle list entry.
2227  */
2228 enum ice_status
2229 ice_add_vsi(struct ice_hw *hw, u16 vsi_handle, struct ice_vsi_ctx *vsi_ctx,
2230             struct ice_sq_cd *cd)
2231 {
2232         struct ice_vsi_ctx *tmp_vsi_ctx;
2233         enum ice_status status;
2234
2235         if (vsi_handle >= ICE_MAX_VSI)
2236                 return ICE_ERR_PARAM;
2237         status = ice_aq_add_vsi(hw, vsi_ctx, cd);
2238         if (status)
2239                 return status;
2240         tmp_vsi_ctx = ice_get_vsi_ctx(hw, vsi_handle);
2241         if (!tmp_vsi_ctx) {
2242                 /* Create a new VSI context */
2243                 tmp_vsi_ctx = (struct ice_vsi_ctx *)
2244                         ice_malloc(hw, sizeof(*tmp_vsi_ctx));
2245                 if (!tmp_vsi_ctx) {
2246                         ice_aq_free_vsi(hw, vsi_ctx, false, cd);
2247                         return ICE_ERR_NO_MEMORY;
2248                 }
2249                 *tmp_vsi_ctx = *vsi_ctx;
2250
2251                 ice_save_vsi_ctx(hw, vsi_handle, tmp_vsi_ctx);
2252         } else {
2253                 /* update with new HW VSI num */
2254                 tmp_vsi_ctx->vsi_num = vsi_ctx->vsi_num;
2255         }
2256
2257         return ICE_SUCCESS;
2258 }
2259
2260 /**
2261  * ice_free_vsi- free VSI context from hardware and VSI handle list
2262  * @hw: pointer to the HW struct
2263  * @vsi_handle: unique VSI handle
2264  * @vsi_ctx: pointer to a VSI context struct
2265  * @keep_vsi_alloc: keep VSI allocation as part of this PF's resources
2266  * @cd: pointer to command details structure or NULL
2267  *
2268  * Free VSI context info from hardware as well as from VSI handle list
2269  */
2270 enum ice_status
2271 ice_free_vsi(struct ice_hw *hw, u16 vsi_handle, struct ice_vsi_ctx *vsi_ctx,
2272              bool keep_vsi_alloc, struct ice_sq_cd *cd)
2273 {
2274         enum ice_status status;
2275
2276         if (!ice_is_vsi_valid(hw, vsi_handle))
2277                 return ICE_ERR_PARAM;
2278         vsi_ctx->vsi_num = ice_get_hw_vsi_num(hw, vsi_handle);
2279         status = ice_aq_free_vsi(hw, vsi_ctx, keep_vsi_alloc, cd);
2280         if (!status)
2281                 ice_clear_vsi_ctx(hw, vsi_handle);
2282         return status;
2283 }
2284
2285 /**
2286  * ice_update_vsi
2287  * @hw: pointer to the HW struct
2288  * @vsi_handle: unique VSI handle
2289  * @vsi_ctx: pointer to a VSI context struct
2290  * @cd: pointer to command details structure or NULL
2291  *
2292  * Update VSI context in the hardware
2293  */
2294 enum ice_status
2295 ice_update_vsi(struct ice_hw *hw, u16 vsi_handle, struct ice_vsi_ctx *vsi_ctx,
2296                struct ice_sq_cd *cd)
2297 {
2298         if (!ice_is_vsi_valid(hw, vsi_handle))
2299                 return ICE_ERR_PARAM;
2300         vsi_ctx->vsi_num = ice_get_hw_vsi_num(hw, vsi_handle);
2301         return ice_aq_update_vsi(hw, vsi_ctx, cd);
2302 }
2303
2304 /**
2305  * ice_aq_get_vsi_params
2306  * @hw: pointer to the HW struct
2307  * @vsi_ctx: pointer to a VSI context struct
2308  * @cd: pointer to command details structure or NULL
2309  *
2310  * Get VSI context info from hardware (0x0212)
2311  */
2312 enum ice_status
2313 ice_aq_get_vsi_params(struct ice_hw *hw, struct ice_vsi_ctx *vsi_ctx,
2314                       struct ice_sq_cd *cd)
2315 {
2316         struct ice_aqc_add_get_update_free_vsi *cmd;
2317         struct ice_aqc_get_vsi_resp *resp;
2318         struct ice_aq_desc desc;
2319         enum ice_status status;
2320
2321         cmd = &desc.params.vsi_cmd;
2322         resp = &desc.params.get_vsi_resp;
2323
2324         ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_vsi_params);
2325
2326         cmd->vsi_num = CPU_TO_LE16(vsi_ctx->vsi_num | ICE_AQ_VSI_IS_VALID);
2327
2328         status = ice_aq_send_cmd(hw, &desc, &vsi_ctx->info,
2329                                  sizeof(vsi_ctx->info), cd);
2330         if (!status) {
2331                 vsi_ctx->vsi_num = LE16_TO_CPU(resp->vsi_num) &
2332                                         ICE_AQ_VSI_NUM_M;
2333                 vsi_ctx->vsis_allocd = LE16_TO_CPU(resp->vsi_used);
2334                 vsi_ctx->vsis_unallocated = LE16_TO_CPU(resp->vsi_free);
2335         }
2336
2337         return status;
2338 }
2339
2340 /**
2341  * ice_aq_add_update_mir_rule - add/update a mirror rule
2342  * @hw: pointer to the HW struct
2343  * @rule_type: Rule Type
2344  * @dest_vsi: VSI number to which packets will be mirrored
2345  * @count: length of the list
2346  * @mr_buf: buffer for list of mirrored VSI numbers
2347  * @cd: pointer to command details structure or NULL
2348  * @rule_id: Rule ID
2349  *
2350  * Add/Update Mirror Rule (0x260).
2351  */
2352 enum ice_status
2353 ice_aq_add_update_mir_rule(struct ice_hw *hw, u16 rule_type, u16 dest_vsi,
2354                            u16 count, struct ice_mir_rule_buf *mr_buf,
2355                            struct ice_sq_cd *cd, u16 *rule_id)
2356 {
2357         struct ice_aqc_add_update_mir_rule *cmd;
2358         struct ice_aq_desc desc;
2359         enum ice_status status;
2360         __le16 *mr_list = NULL;
2361         u16 buf_size = 0;
2362
2363         switch (rule_type) {
2364         case ICE_AQC_RULE_TYPE_VPORT_INGRESS:
2365         case ICE_AQC_RULE_TYPE_VPORT_EGRESS:
2366                 /* Make sure count and mr_buf are set for these rule_types */
2367                 if (!(count && mr_buf))
2368                         return ICE_ERR_PARAM;
2369
2370                 buf_size = count * sizeof(__le16);
2371                 mr_list = (_FORCE_ __le16 *)ice_malloc(hw, buf_size);
2372                 if (!mr_list)
2373                         return ICE_ERR_NO_MEMORY;
2374                 break;
2375         case ICE_AQC_RULE_TYPE_PPORT_INGRESS:
2376         case ICE_AQC_RULE_TYPE_PPORT_EGRESS:
2377                 /* Make sure count and mr_buf are not set for these
2378                  * rule_types
2379                  */
2380                 if (count || mr_buf)
2381                         return ICE_ERR_PARAM;
2382                 break;
2383         default:
2384                 ice_debug(hw, ICE_DBG_SW, "Error due to unsupported rule_type %u\n", rule_type);
2385                 return ICE_ERR_OUT_OF_RANGE;
2386         }
2387
2388         ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_add_update_mir_rule);
2389
2390         /* Pre-process 'mr_buf' items for add/update of virtual port
2391          * ingress/egress mirroring (but not physical port ingress/egress
2392          * mirroring)
2393          */
2394         if (mr_buf) {
2395                 int i;
2396
2397                 for (i = 0; i < count; i++) {
2398                         u16 id;
2399
2400                         id = mr_buf[i].vsi_idx & ICE_AQC_RULE_MIRRORED_VSI_M;
2401
2402                         /* Validate specified VSI number, make sure it is less
2403                          * than ICE_MAX_VSI, if not return with error.
2404                          */
2405                         if (id >= ICE_MAX_VSI) {
2406                                 ice_debug(hw, ICE_DBG_SW, "Error VSI index (%u) out-of-range\n",
2407                                           id);
2408                                 ice_free(hw, mr_list);
2409                                 return ICE_ERR_OUT_OF_RANGE;
2410                         }
2411
2412                         /* add VSI to mirror rule */
2413                         if (mr_buf[i].add)
2414                                 mr_list[i] =
2415                                         CPU_TO_LE16(id | ICE_AQC_RULE_ACT_M);
2416                         else /* remove VSI from mirror rule */
2417                                 mr_list[i] = CPU_TO_LE16(id);
2418                 }
2419         }
2420
2421         cmd = &desc.params.add_update_rule;
2422         if ((*rule_id) != ICE_INVAL_MIRROR_RULE_ID)
2423                 cmd->rule_id = CPU_TO_LE16(((*rule_id) & ICE_AQC_RULE_ID_M) |
2424                                            ICE_AQC_RULE_ID_VALID_M);
2425         cmd->rule_type = CPU_TO_LE16(rule_type & ICE_AQC_RULE_TYPE_M);
2426         cmd->num_entries = CPU_TO_LE16(count);
2427         cmd->dest = CPU_TO_LE16(dest_vsi);
2428
2429         status = ice_aq_send_cmd(hw, &desc, mr_list, buf_size, cd);
2430         if (!status)
2431                 *rule_id = LE16_TO_CPU(cmd->rule_id) & ICE_AQC_RULE_ID_M;
2432
2433         ice_free(hw, mr_list);
2434
2435         return status;
2436 }
2437
2438 /**
2439  * ice_aq_delete_mir_rule - delete a mirror rule
2440  * @hw: pointer to the HW struct
2441  * @rule_id: Mirror rule ID (to be deleted)
2442  * @keep_allocd: if set, the VSI stays part of the PF allocated res,
2443  *               otherwise it is returned to the shared pool
2444  * @cd: pointer to command details structure or NULL
2445  *
2446  * Delete Mirror Rule (0x261).
2447  */
2448 enum ice_status
2449 ice_aq_delete_mir_rule(struct ice_hw *hw, u16 rule_id, bool keep_allocd,
2450                        struct ice_sq_cd *cd)
2451 {
2452         struct ice_aqc_delete_mir_rule *cmd;
2453         struct ice_aq_desc desc;
2454
2455         /* rule_id should be in the range 0...63 */
2456         if (rule_id >= ICE_MAX_NUM_MIRROR_RULES)
2457                 return ICE_ERR_OUT_OF_RANGE;
2458
2459         ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_del_mir_rule);
2460
2461         cmd = &desc.params.del_rule;
2462         rule_id |= ICE_AQC_RULE_ID_VALID_M;
2463         cmd->rule_id = CPU_TO_LE16(rule_id);
2464
2465         if (keep_allocd)
2466                 cmd->flags = CPU_TO_LE16(ICE_AQC_FLAG_KEEP_ALLOCD_M);
2467
2468         return ice_aq_send_cmd(hw, &desc, NULL, 0, cd);
2469 }
2470
2471 /**
2472  * ice_aq_alloc_free_vsi_list
2473  * @hw: pointer to the HW struct
2474  * @vsi_list_id: VSI list ID returned or used for lookup
2475  * @lkup_type: switch rule filter lookup type
2476  * @opc: switch rules population command type - pass in the command opcode
2477  *
2478  * allocates or free a VSI list resource
2479  */
2480 static enum ice_status
2481 ice_aq_alloc_free_vsi_list(struct ice_hw *hw, u16 *vsi_list_id,
2482                            enum ice_sw_lkup_type lkup_type,
2483                            enum ice_adminq_opc opc)
2484 {
2485         struct ice_aqc_alloc_free_res_elem *sw_buf;
2486         struct ice_aqc_res_elem *vsi_ele;
2487         enum ice_status status;
2488         u16 buf_len;
2489
2490         buf_len = ice_struct_size(sw_buf, elem, 1);
2491         sw_buf = (struct ice_aqc_alloc_free_res_elem *)ice_malloc(hw, buf_len);
2492         if (!sw_buf)
2493                 return ICE_ERR_NO_MEMORY;
2494         sw_buf->num_elems = CPU_TO_LE16(1);
2495
2496         if (lkup_type == ICE_SW_LKUP_MAC ||
2497             lkup_type == ICE_SW_LKUP_MAC_VLAN ||
2498             lkup_type == ICE_SW_LKUP_ETHERTYPE ||
2499             lkup_type == ICE_SW_LKUP_ETHERTYPE_MAC ||
2500             lkup_type == ICE_SW_LKUP_PROMISC ||
2501             lkup_type == ICE_SW_LKUP_PROMISC_VLAN ||
2502             lkup_type == ICE_SW_LKUP_LAST) {
2503                 sw_buf->res_type = CPU_TO_LE16(ICE_AQC_RES_TYPE_VSI_LIST_REP);
2504         } else if (lkup_type == ICE_SW_LKUP_VLAN) {
2505                 sw_buf->res_type =
2506                         CPU_TO_LE16(ICE_AQC_RES_TYPE_VSI_LIST_PRUNE);
2507         } else {
2508                 status = ICE_ERR_PARAM;
2509                 goto ice_aq_alloc_free_vsi_list_exit;
2510         }
2511
2512         if (opc == ice_aqc_opc_free_res)
2513                 sw_buf->elem[0].e.sw_resp = CPU_TO_LE16(*vsi_list_id);
2514
2515         status = ice_aq_alloc_free_res(hw, 1, sw_buf, buf_len, opc, NULL);
2516         if (status)
2517                 goto ice_aq_alloc_free_vsi_list_exit;
2518
2519         if (opc == ice_aqc_opc_alloc_res) {
2520                 vsi_ele = &sw_buf->elem[0];
2521                 *vsi_list_id = LE16_TO_CPU(vsi_ele->e.sw_resp);
2522         }
2523
2524 ice_aq_alloc_free_vsi_list_exit:
2525         ice_free(hw, sw_buf);
2526         return status;
2527 }
2528
2529 /**
2530  * ice_aq_set_storm_ctrl - Sets storm control configuration
2531  * @hw: pointer to the HW struct
2532  * @bcast_thresh: represents the upper threshold for broadcast storm control
2533  * @mcast_thresh: represents the upper threshold for multicast storm control
2534  * @ctl_bitmask: storm control knobs
2535  *
2536  * Sets the storm control configuration (0x0280)
2537  */
2538 enum ice_status
2539 ice_aq_set_storm_ctrl(struct ice_hw *hw, u32 bcast_thresh, u32 mcast_thresh,
2540                       u32 ctl_bitmask)
2541 {
2542         struct ice_aqc_storm_cfg *cmd;
2543         struct ice_aq_desc desc;
2544
2545         cmd = &desc.params.storm_conf;
2546
2547         ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_set_storm_cfg);
2548
2549         cmd->bcast_thresh_size = CPU_TO_LE32(bcast_thresh & ICE_AQ_THRESHOLD_M);
2550         cmd->mcast_thresh_size = CPU_TO_LE32(mcast_thresh & ICE_AQ_THRESHOLD_M);
2551         cmd->storm_ctrl_ctrl = CPU_TO_LE32(ctl_bitmask);
2552
2553         return ice_aq_send_cmd(hw, &desc, NULL, 0, NULL);
2554 }
2555
2556 /**
2557  * ice_aq_get_storm_ctrl - gets storm control configuration
2558  * @hw: pointer to the HW struct
2559  * @bcast_thresh: represents the upper threshold for broadcast storm control
2560  * @mcast_thresh: represents the upper threshold for multicast storm control
2561  * @ctl_bitmask: storm control knobs
2562  *
2563  * Gets the storm control configuration (0x0281)
2564  */
2565 enum ice_status
2566 ice_aq_get_storm_ctrl(struct ice_hw *hw, u32 *bcast_thresh, u32 *mcast_thresh,
2567                       u32 *ctl_bitmask)
2568 {
2569         enum ice_status status;
2570         struct ice_aq_desc desc;
2571
2572         ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_storm_cfg);
2573
2574         status = ice_aq_send_cmd(hw, &desc, NULL, 0, NULL);
2575         if (!status) {
2576                 struct ice_aqc_storm_cfg *resp = &desc.params.storm_conf;
2577
2578                 if (bcast_thresh)
2579                         *bcast_thresh = LE32_TO_CPU(resp->bcast_thresh_size) &
2580                                 ICE_AQ_THRESHOLD_M;
2581                 if (mcast_thresh)
2582                         *mcast_thresh = LE32_TO_CPU(resp->mcast_thresh_size) &
2583                                 ICE_AQ_THRESHOLD_M;
2584                 if (ctl_bitmask)
2585                         *ctl_bitmask = LE32_TO_CPU(resp->storm_ctrl_ctrl);
2586         }
2587
2588         return status;
2589 }
2590
2591 /**
2592  * ice_aq_sw_rules - add/update/remove switch rules
2593  * @hw: pointer to the HW struct
2594  * @rule_list: pointer to switch rule population list
2595  * @rule_list_sz: total size of the rule list in bytes
2596  * @num_rules: number of switch rules in the rule_list
2597  * @opc: switch rules population command type - pass in the command opcode
2598  * @cd: pointer to command details structure or NULL
2599  *
2600  * Add(0x02a0)/Update(0x02a1)/Remove(0x02a2) switch rules commands to firmware
2601  */
2602 static enum ice_status
2603 ice_aq_sw_rules(struct ice_hw *hw, void *rule_list, u16 rule_list_sz,
2604                 u8 num_rules, enum ice_adminq_opc opc, struct ice_sq_cd *cd)
2605 {
2606         struct ice_aq_desc desc;
2607         enum ice_status status;
2608
2609         ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__);
2610
2611         if (opc != ice_aqc_opc_add_sw_rules &&
2612             opc != ice_aqc_opc_update_sw_rules &&
2613             opc != ice_aqc_opc_remove_sw_rules)
2614                 return ICE_ERR_PARAM;
2615
2616         ice_fill_dflt_direct_cmd_desc(&desc, opc);
2617
2618         desc.flags |= CPU_TO_LE16(ICE_AQ_FLAG_RD);
2619         desc.params.sw_rules.num_rules_fltr_entry_index =
2620                 CPU_TO_LE16(num_rules);
2621         status = ice_aq_send_cmd(hw, &desc, rule_list, rule_list_sz, cd);
2622         if (opc != ice_aqc_opc_add_sw_rules &&
2623             hw->adminq.sq_last_status == ICE_AQ_RC_ENOENT)
2624                 status = ICE_ERR_DOES_NOT_EXIST;
2625
2626         return status;
2627 }
2628
2629 /**
2630  * ice_aq_add_recipe - add switch recipe
2631  * @hw: pointer to the HW struct
2632  * @s_recipe_list: pointer to switch rule population list
2633  * @num_recipes: number of switch recipes in the list
2634  * @cd: pointer to command details structure or NULL
2635  *
2636  * Add(0x0290)
2637  */
2638 enum ice_status
2639 ice_aq_add_recipe(struct ice_hw *hw,
2640                   struct ice_aqc_recipe_data_elem *s_recipe_list,
2641                   u16 num_recipes, struct ice_sq_cd *cd)
2642 {
2643         struct ice_aqc_add_get_recipe *cmd;
2644         struct ice_aq_desc desc;
2645         u16 buf_size;
2646
2647         ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__);
2648         cmd = &desc.params.add_get_recipe;
2649         ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_add_recipe);
2650
2651         cmd->num_sub_recipes = CPU_TO_LE16(num_recipes);
2652         desc.flags |= CPU_TO_LE16(ICE_AQ_FLAG_RD);
2653
2654         buf_size = num_recipes * sizeof(*s_recipe_list);
2655
2656         return ice_aq_send_cmd(hw, &desc, s_recipe_list, buf_size, cd);
2657 }
2658
2659 /**
2660  * ice_aq_get_recipe - get switch recipe
2661  * @hw: pointer to the HW struct
2662  * @s_recipe_list: pointer to switch rule population list
2663  * @num_recipes: pointer to the number of recipes (input and output)
2664  * @recipe_root: root recipe number of recipe(s) to retrieve
2665  * @cd: pointer to command details structure or NULL
2666  *
2667  * Get(0x0292)
2668  *
2669  * On input, *num_recipes should equal the number of entries in s_recipe_list.
2670  * On output, *num_recipes will equal the number of entries returned in
2671  * s_recipe_list.
2672  *
2673  * The caller must supply enough space in s_recipe_list to hold all possible
2674  * recipes and *num_recipes must equal ICE_MAX_NUM_RECIPES.
2675  */
2676 enum ice_status
2677 ice_aq_get_recipe(struct ice_hw *hw,
2678                   struct ice_aqc_recipe_data_elem *s_recipe_list,
2679                   u16 *num_recipes, u16 recipe_root, struct ice_sq_cd *cd)
2680 {
2681         struct ice_aqc_add_get_recipe *cmd;
2682         struct ice_aq_desc desc;
2683         enum ice_status status;
2684         u16 buf_size;
2685
2686         if (*num_recipes != ICE_MAX_NUM_RECIPES)
2687                 return ICE_ERR_PARAM;
2688
2689         ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__);
2690         cmd = &desc.params.add_get_recipe;
2691         ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_recipe);
2692
2693         cmd->return_index = CPU_TO_LE16(recipe_root);
2694         cmd->num_sub_recipes = 0;
2695
2696         buf_size = *num_recipes * sizeof(*s_recipe_list);
2697
2698         status = ice_aq_send_cmd(hw, &desc, s_recipe_list, buf_size, cd);
2699         /* cppcheck-suppress constArgument */
2700         *num_recipes = LE16_TO_CPU(cmd->num_sub_recipes);
2701
2702         return status;
2703 }
2704
2705 /**
2706  * ice_aq_map_recipe_to_profile - Map recipe to packet profile
2707  * @hw: pointer to the HW struct
2708  * @profile_id: package profile ID to associate the recipe with
2709  * @r_bitmap: Recipe bitmap filled in and need to be returned as response
2710  * @cd: pointer to command details structure or NULL
2711  * Recipe to profile association (0x0291)
2712  */
2713 enum ice_status
2714 ice_aq_map_recipe_to_profile(struct ice_hw *hw, u32 profile_id, u8 *r_bitmap,
2715                              struct ice_sq_cd *cd)
2716 {
2717         struct ice_aqc_recipe_to_profile *cmd;
2718         struct ice_aq_desc desc;
2719
2720         ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__);
2721         cmd = &desc.params.recipe_to_profile;
2722         ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_recipe_to_profile);
2723         cmd->profile_id = CPU_TO_LE16(profile_id);
2724         /* Set the recipe ID bit in the bitmask to let the device know which
2725          * profile we are associating the recipe to
2726          */
2727         ice_memcpy(cmd->recipe_assoc, r_bitmap, sizeof(cmd->recipe_assoc),
2728                    ICE_NONDMA_TO_NONDMA);
2729
2730         return ice_aq_send_cmd(hw, &desc, NULL, 0, cd);
2731 }
2732
2733 /**
2734  * ice_aq_get_recipe_to_profile - Map recipe to packet profile
2735  * @hw: pointer to the HW struct
2736  * @profile_id: package profile ID to associate the recipe with
2737  * @r_bitmap: Recipe bitmap filled in and need to be returned as response
2738  * @cd: pointer to command details structure or NULL
2739  * Associate profile ID with given recipe (0x0293)
2740  */
2741 enum ice_status
2742 ice_aq_get_recipe_to_profile(struct ice_hw *hw, u32 profile_id, u8 *r_bitmap,
2743                              struct ice_sq_cd *cd)
2744 {
2745         struct ice_aqc_recipe_to_profile *cmd;
2746         struct ice_aq_desc desc;
2747         enum ice_status status;
2748
2749         ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__);
2750         cmd = &desc.params.recipe_to_profile;
2751         ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_recipe_to_profile);
2752         cmd->profile_id = CPU_TO_LE16(profile_id);
2753
2754         status = ice_aq_send_cmd(hw, &desc, NULL, 0, cd);
2755         if (!status)
2756                 ice_memcpy(r_bitmap, cmd->recipe_assoc,
2757                            sizeof(cmd->recipe_assoc), ICE_NONDMA_TO_NONDMA);
2758
2759         return status;
2760 }
2761
2762 /**
2763  * ice_alloc_recipe - add recipe resource
2764  * @hw: pointer to the hardware structure
2765  * @rid: recipe ID returned as response to AQ call
2766  */
2767 enum ice_status ice_alloc_recipe(struct ice_hw *hw, u16 *rid)
2768 {
2769         struct ice_aqc_alloc_free_res_elem *sw_buf;
2770         enum ice_status status;
2771         u16 buf_len;
2772
2773         buf_len = ice_struct_size(sw_buf, elem, 1);
2774         sw_buf = (struct ice_aqc_alloc_free_res_elem *)ice_malloc(hw, buf_len);
2775         if (!sw_buf)
2776                 return ICE_ERR_NO_MEMORY;
2777
2778         sw_buf->num_elems = CPU_TO_LE16(1);
2779         sw_buf->res_type = CPU_TO_LE16((ICE_AQC_RES_TYPE_RECIPE <<
2780                                         ICE_AQC_RES_TYPE_S) |
2781                                         ICE_AQC_RES_TYPE_FLAG_SHARED);
2782         status = ice_aq_alloc_free_res(hw, 1, sw_buf, buf_len,
2783                                        ice_aqc_opc_alloc_res, NULL);
2784         if (!status)
2785                 *rid = LE16_TO_CPU(sw_buf->elem[0].e.sw_resp);
2786         ice_free(hw, sw_buf);
2787
2788         return status;
2789 }
2790
2791 /* ice_init_port_info - Initialize port_info with switch configuration data
2792  * @pi: pointer to port_info
2793  * @vsi_port_num: VSI number or port number
2794  * @type: Type of switch element (port or VSI)
2795  * @swid: switch ID of the switch the element is attached to
2796  * @pf_vf_num: PF or VF number
2797  * @is_vf: true if the element is a VF, false otherwise
2798  */
2799 static void
2800 ice_init_port_info(struct ice_port_info *pi, u16 vsi_port_num, u8 type,
2801                    u16 swid, u16 pf_vf_num, bool is_vf)
2802 {
2803         switch (type) {
2804         case ICE_AQC_GET_SW_CONF_RESP_PHYS_PORT:
2805                 pi->lport = (u8)(vsi_port_num & ICE_LPORT_MASK);
2806                 pi->sw_id = swid;
2807                 pi->pf_vf_num = pf_vf_num;
2808                 pi->is_vf = is_vf;
2809                 pi->dflt_tx_vsi_num = ICE_DFLT_VSI_INVAL;
2810                 pi->dflt_rx_vsi_num = ICE_DFLT_VSI_INVAL;
2811                 break;
2812         default:
2813                 ice_debug(pi->hw, ICE_DBG_SW, "incorrect VSI/port type received\n");
2814                 break;
2815         }
2816 }
2817
2818 /* ice_get_initial_sw_cfg - Get initial port and default VSI data
2819  * @hw: pointer to the hardware structure
2820  */
2821 enum ice_status ice_get_initial_sw_cfg(struct ice_hw *hw)
2822 {
2823         struct ice_aqc_get_sw_cfg_resp_elem *rbuf;
2824         enum ice_status status;
2825         u8 num_total_ports;
2826         u16 req_desc = 0;
2827         u16 num_elems;
2828         u8 j = 0;
2829         u16 i;
2830
2831         num_total_ports = 1;
2832
2833         rbuf = (struct ice_aqc_get_sw_cfg_resp_elem *)
2834                 ice_malloc(hw, ICE_SW_CFG_MAX_BUF_LEN);
2835
2836         if (!rbuf)
2837                 return ICE_ERR_NO_MEMORY;
2838
2839         /* Multiple calls to ice_aq_get_sw_cfg may be required
2840          * to get all the switch configuration information. The need
2841          * for additional calls is indicated by ice_aq_get_sw_cfg
2842          * writing a non-zero value in req_desc
2843          */
2844         do {
2845                 struct ice_aqc_get_sw_cfg_resp_elem *ele;
2846
2847                 status = ice_aq_get_sw_cfg(hw, rbuf, ICE_SW_CFG_MAX_BUF_LEN,
2848                                            &req_desc, &num_elems, NULL);
2849
2850                 if (status)
2851                         break;
2852
2853                 for (i = 0, ele = rbuf; i < num_elems; i++, ele++) {
2854                         u16 pf_vf_num, swid, vsi_port_num;
2855                         bool is_vf = false;
2856                         u8 res_type;
2857
2858                         vsi_port_num = LE16_TO_CPU(ele->vsi_port_num) &
2859                                 ICE_AQC_GET_SW_CONF_RESP_VSI_PORT_NUM_M;
2860
2861                         pf_vf_num = LE16_TO_CPU(ele->pf_vf_num) &
2862                                 ICE_AQC_GET_SW_CONF_RESP_FUNC_NUM_M;
2863
2864                         swid = LE16_TO_CPU(ele->swid);
2865
2866                         if (LE16_TO_CPU(ele->pf_vf_num) &
2867                             ICE_AQC_GET_SW_CONF_RESP_IS_VF)
2868                                 is_vf = true;
2869
2870                         res_type = (u8)(LE16_TO_CPU(ele->vsi_port_num) >>
2871                                         ICE_AQC_GET_SW_CONF_RESP_TYPE_S);
2872
2873                         switch (res_type) {
2874                         case ICE_AQC_GET_SW_CONF_RESP_PHYS_PORT:
2875                         case ICE_AQC_GET_SW_CONF_RESP_VIRT_PORT:
2876                                 if (j == num_total_ports) {
2877                                         ice_debug(hw, ICE_DBG_SW, "more ports than expected\n");
2878                                         status = ICE_ERR_CFG;
2879                                         goto out;
2880                                 }
2881                                 ice_init_port_info(hw->port_info,
2882                                                    vsi_port_num, res_type, swid,
2883                                                    pf_vf_num, is_vf);
2884                                 j++;
2885                                 break;
2886                         default:
2887                                 break;
2888                         }
2889                 }
2890         } while (req_desc && !status);
2891
2892 out:
2893         ice_free(hw, rbuf);
2894         return status;
2895 }
2896
2897 /**
2898  * ice_fill_sw_info - Helper function to populate lb_en and lan_en
2899  * @hw: pointer to the hardware structure
2900  * @fi: filter info structure to fill/update
2901  *
2902  * This helper function populates the lb_en and lan_en elements of the provided
2903  * ice_fltr_info struct using the switch's type and characteristics of the
2904  * switch rule being configured.
2905  */
2906 static void ice_fill_sw_info(struct ice_hw *hw, struct ice_fltr_info *fi)
2907 {
2908         if ((fi->flag & ICE_FLTR_RX) &&
2909             (fi->fltr_act == ICE_FWD_TO_VSI ||
2910              fi->fltr_act == ICE_FWD_TO_VSI_LIST) &&
2911             fi->lkup_type == ICE_SW_LKUP_LAST)
2912                 fi->lan_en = true;
2913         fi->lb_en = false;
2914         fi->lan_en = false;
2915         if ((fi->flag & ICE_FLTR_TX) &&
2916             (fi->fltr_act == ICE_FWD_TO_VSI ||
2917              fi->fltr_act == ICE_FWD_TO_VSI_LIST ||
2918              fi->fltr_act == ICE_FWD_TO_Q ||
2919              fi->fltr_act == ICE_FWD_TO_QGRP)) {
2920                 /* Setting LB for prune actions will result in replicated
2921                  * packets to the internal switch that will be dropped.
2922                  */
2923                 if (fi->lkup_type != ICE_SW_LKUP_VLAN)
2924                         fi->lb_en = true;
2925
2926                 /* Set lan_en to TRUE if
2927                  * 1. The switch is a VEB AND
2928                  * 2
2929                  * 2.1 The lookup is a directional lookup like ethertype,
2930                  * promiscuous, ethertype-MAC, promiscuous-VLAN
2931                  * and default-port OR
2932                  * 2.2 The lookup is VLAN, OR
2933                  * 2.3 The lookup is MAC with mcast or bcast addr for MAC, OR
2934                  * 2.4 The lookup is MAC_VLAN with mcast or bcast addr for MAC.
2935                  *
2936                  * OR
2937                  *
2938                  * The switch is a VEPA.
2939                  *
2940                  * In all other cases, the LAN enable has to be set to false.
2941                  */
2942                 if (hw->evb_veb) {
2943                         if (fi->lkup_type == ICE_SW_LKUP_ETHERTYPE ||
2944                             fi->lkup_type == ICE_SW_LKUP_PROMISC ||
2945                             fi->lkup_type == ICE_SW_LKUP_ETHERTYPE_MAC ||
2946                             fi->lkup_type == ICE_SW_LKUP_PROMISC_VLAN ||
2947                             fi->lkup_type == ICE_SW_LKUP_DFLT ||
2948                             fi->lkup_type == ICE_SW_LKUP_VLAN ||
2949                             (fi->lkup_type == ICE_SW_LKUP_MAC &&
2950                              !IS_UNICAST_ETHER_ADDR(fi->l_data.mac.mac_addr)) ||
2951                             (fi->lkup_type == ICE_SW_LKUP_MAC_VLAN &&
2952                              !IS_UNICAST_ETHER_ADDR(fi->l_data.mac.mac_addr)))
2953                                 fi->lan_en = true;
2954                 } else {
2955                         fi->lan_en = true;
2956                 }
2957         }
2958 }
2959
2960 /**
2961  * ice_fill_sw_rule - Helper function to fill switch rule structure
2962  * @hw: pointer to the hardware structure
2963  * @f_info: entry containing packet forwarding information
2964  * @s_rule: switch rule structure to be filled in based on mac_entry
2965  * @opc: switch rules population command type - pass in the command opcode
2966  */
2967 static void
2968 ice_fill_sw_rule(struct ice_hw *hw, struct ice_fltr_info *f_info,
2969                  struct ice_aqc_sw_rules_elem *s_rule, enum ice_adminq_opc opc)
2970 {
2971         u16 vlan_id = ICE_MAX_VLAN_ID + 1;
2972         void *daddr = NULL;
2973         u16 eth_hdr_sz;
2974         u8 *eth_hdr;
2975         u32 act = 0;
2976         __be16 *off;
2977         u8 q_rgn;
2978
2979         if (opc == ice_aqc_opc_remove_sw_rules) {
2980                 s_rule->pdata.lkup_tx_rx.act = 0;
2981                 s_rule->pdata.lkup_tx_rx.index =
2982                         CPU_TO_LE16(f_info->fltr_rule_id);
2983                 s_rule->pdata.lkup_tx_rx.hdr_len = 0;
2984                 return;
2985         }
2986
2987         eth_hdr_sz = sizeof(dummy_eth_header);
2988         eth_hdr = s_rule->pdata.lkup_tx_rx.hdr;
2989
2990         /* initialize the ether header with a dummy header */
2991         ice_memcpy(eth_hdr, dummy_eth_header, eth_hdr_sz, ICE_NONDMA_TO_NONDMA);
2992         ice_fill_sw_info(hw, f_info);
2993
2994         switch (f_info->fltr_act) {
2995         case ICE_FWD_TO_VSI:
2996                 act |= (f_info->fwd_id.hw_vsi_id << ICE_SINGLE_ACT_VSI_ID_S) &
2997                         ICE_SINGLE_ACT_VSI_ID_M;
2998                 if (f_info->lkup_type != ICE_SW_LKUP_VLAN)
2999                         act |= ICE_SINGLE_ACT_VSI_FORWARDING |
3000                                 ICE_SINGLE_ACT_VALID_BIT;
3001                 break;
3002         case ICE_FWD_TO_VSI_LIST:
3003                 act |= ICE_SINGLE_ACT_VSI_LIST;
3004                 act |= (f_info->fwd_id.vsi_list_id <<
3005                         ICE_SINGLE_ACT_VSI_LIST_ID_S) &
3006                         ICE_SINGLE_ACT_VSI_LIST_ID_M;
3007                 if (f_info->lkup_type != ICE_SW_LKUP_VLAN)
3008                         act |= ICE_SINGLE_ACT_VSI_FORWARDING |
3009                                 ICE_SINGLE_ACT_VALID_BIT;
3010                 break;
3011         case ICE_FWD_TO_Q:
3012                 act |= ICE_SINGLE_ACT_TO_Q;
3013                 act |= (f_info->fwd_id.q_id << ICE_SINGLE_ACT_Q_INDEX_S) &
3014                         ICE_SINGLE_ACT_Q_INDEX_M;
3015                 break;
3016         case ICE_DROP_PACKET:
3017                 act |= ICE_SINGLE_ACT_VSI_FORWARDING | ICE_SINGLE_ACT_DROP |
3018                         ICE_SINGLE_ACT_VALID_BIT;
3019                 break;
3020         case ICE_FWD_TO_QGRP:
3021                 q_rgn = f_info->qgrp_size > 0 ?
3022                         (u8)ice_ilog2(f_info->qgrp_size) : 0;
3023                 act |= ICE_SINGLE_ACT_TO_Q;
3024                 act |= (f_info->fwd_id.q_id << ICE_SINGLE_ACT_Q_INDEX_S) &
3025                         ICE_SINGLE_ACT_Q_INDEX_M;
3026                 act |= (q_rgn << ICE_SINGLE_ACT_Q_REGION_S) &
3027                         ICE_SINGLE_ACT_Q_REGION_M;
3028                 break;
3029         default:
3030                 return;
3031         }
3032
3033         if (f_info->lb_en)
3034                 act |= ICE_SINGLE_ACT_LB_ENABLE;
3035         if (f_info->lan_en)
3036                 act |= ICE_SINGLE_ACT_LAN_ENABLE;
3037
3038         switch (f_info->lkup_type) {
3039         case ICE_SW_LKUP_MAC:
3040                 daddr = f_info->l_data.mac.mac_addr;
3041                 break;
3042         case ICE_SW_LKUP_VLAN:
3043                 vlan_id = f_info->l_data.vlan.vlan_id;
3044                 if (f_info->fltr_act == ICE_FWD_TO_VSI ||
3045                     f_info->fltr_act == ICE_FWD_TO_VSI_LIST) {
3046                         act |= ICE_SINGLE_ACT_PRUNE;
3047                         act |= ICE_SINGLE_ACT_EGRESS | ICE_SINGLE_ACT_INGRESS;
3048                 }
3049                 break;
3050         case ICE_SW_LKUP_ETHERTYPE_MAC:
3051                 daddr = f_info->l_data.ethertype_mac.mac_addr;
3052                 /* fall-through */
3053         case ICE_SW_LKUP_ETHERTYPE:
3054                 off = (_FORCE_ __be16 *)(eth_hdr + ICE_ETH_ETHTYPE_OFFSET);
3055                 *off = CPU_TO_BE16(f_info->l_data.ethertype_mac.ethertype);
3056                 break;
3057         case ICE_SW_LKUP_MAC_VLAN:
3058                 daddr = f_info->l_data.mac_vlan.mac_addr;
3059                 vlan_id = f_info->l_data.mac_vlan.vlan_id;
3060                 break;
3061         case ICE_SW_LKUP_PROMISC_VLAN:
3062                 vlan_id = f_info->l_data.mac_vlan.vlan_id;
3063                 /* fall-through */
3064         case ICE_SW_LKUP_PROMISC:
3065                 daddr = f_info->l_data.mac_vlan.mac_addr;
3066                 break;
3067         default:
3068                 break;
3069         }
3070
3071         s_rule->type = (f_info->flag & ICE_FLTR_RX) ?
3072                 CPU_TO_LE16(ICE_AQC_SW_RULES_T_LKUP_RX) :
3073                 CPU_TO_LE16(ICE_AQC_SW_RULES_T_LKUP_TX);
3074
3075         /* Recipe set depending on lookup type */
3076         s_rule->pdata.lkup_tx_rx.recipe_id = CPU_TO_LE16(f_info->lkup_type);
3077         s_rule->pdata.lkup_tx_rx.src = CPU_TO_LE16(f_info->src);
3078         s_rule->pdata.lkup_tx_rx.act = CPU_TO_LE32(act);
3079
3080         if (daddr)
3081                 ice_memcpy(eth_hdr + ICE_ETH_DA_OFFSET, daddr, ETH_ALEN,
3082                            ICE_NONDMA_TO_NONDMA);
3083
3084         if (!(vlan_id > ICE_MAX_VLAN_ID)) {
3085                 off = (_FORCE_ __be16 *)(eth_hdr + ICE_ETH_VLAN_TCI_OFFSET);
3086                 *off = CPU_TO_BE16(vlan_id);
3087         }
3088
3089         /* Create the switch rule with the final dummy Ethernet header */
3090         if (opc != ice_aqc_opc_update_sw_rules)
3091                 s_rule->pdata.lkup_tx_rx.hdr_len = CPU_TO_LE16(eth_hdr_sz);
3092 }
3093
3094 /**
3095  * ice_add_marker_act
3096  * @hw: pointer to the hardware structure
3097  * @m_ent: the management entry for which sw marker needs to be added
3098  * @sw_marker: sw marker to tag the Rx descriptor with
3099  * @l_id: large action resource ID
3100  *
3101  * Create a large action to hold software marker and update the switch rule
3102  * entry pointed by m_ent with newly created large action
3103  */
3104 static enum ice_status
3105 ice_add_marker_act(struct ice_hw *hw, struct ice_fltr_mgmt_list_entry *m_ent,
3106                    u16 sw_marker, u16 l_id)
3107 {
3108         struct ice_aqc_sw_rules_elem *lg_act, *rx_tx;
3109         /* For software marker we need 3 large actions
3110          * 1. FWD action: FWD TO VSI or VSI LIST
3111          * 2. GENERIC VALUE action to hold the profile ID
3112          * 3. GENERIC VALUE action to hold the software marker ID
3113          */
3114         const u16 num_lg_acts = 3;
3115         enum ice_status status;
3116         u16 lg_act_size;
3117         u16 rules_size;
3118         u32 act;
3119         u16 id;
3120
3121         if (m_ent->fltr_info.lkup_type != ICE_SW_LKUP_MAC)
3122                 return ICE_ERR_PARAM;
3123
3124         /* Create two back-to-back switch rules and submit them to the HW using
3125          * one memory buffer:
3126          *    1. Large Action
3127          *    2. Look up Tx Rx
3128          */
3129         lg_act_size = (u16)ICE_SW_RULE_LG_ACT_SIZE(num_lg_acts);
3130         rules_size = lg_act_size + ICE_SW_RULE_RX_TX_ETH_HDR_SIZE;
3131         lg_act = (struct ice_aqc_sw_rules_elem *)ice_malloc(hw, rules_size);
3132         if (!lg_act)
3133                 return ICE_ERR_NO_MEMORY;
3134
3135         rx_tx = (struct ice_aqc_sw_rules_elem *)((u8 *)lg_act + lg_act_size);
3136
3137         /* Fill in the first switch rule i.e. large action */
3138         lg_act->type = CPU_TO_LE16(ICE_AQC_SW_RULES_T_LG_ACT);
3139         lg_act->pdata.lg_act.index = CPU_TO_LE16(l_id);
3140         lg_act->pdata.lg_act.size = CPU_TO_LE16(num_lg_acts);
3141
3142         /* First action VSI forwarding or VSI list forwarding depending on how
3143          * many VSIs
3144          */
3145         id = (m_ent->vsi_count > 1) ? m_ent->fltr_info.fwd_id.vsi_list_id :
3146                 m_ent->fltr_info.fwd_id.hw_vsi_id;
3147
3148         act = ICE_LG_ACT_VSI_FORWARDING | ICE_LG_ACT_VALID_BIT;
3149         act |= (id << ICE_LG_ACT_VSI_LIST_ID_S) & ICE_LG_ACT_VSI_LIST_ID_M;
3150         if (m_ent->vsi_count > 1)
3151                 act |= ICE_LG_ACT_VSI_LIST;
3152         lg_act->pdata.lg_act.act[0] = CPU_TO_LE32(act);
3153
3154         /* Second action descriptor type */
3155         act = ICE_LG_ACT_GENERIC;
3156
3157         act |= (1 << ICE_LG_ACT_GENERIC_VALUE_S) & ICE_LG_ACT_GENERIC_VALUE_M;
3158         lg_act->pdata.lg_act.act[1] = CPU_TO_LE32(act);
3159
3160         act = (ICE_LG_ACT_GENERIC_OFF_RX_DESC_PROF_IDX <<
3161                ICE_LG_ACT_GENERIC_OFFSET_S) & ICE_LG_ACT_GENERIC_OFFSET_M;
3162
3163         /* Third action Marker value */
3164         act |= ICE_LG_ACT_GENERIC;
3165         act |= (sw_marker << ICE_LG_ACT_GENERIC_VALUE_S) &
3166                 ICE_LG_ACT_GENERIC_VALUE_M;
3167
3168         lg_act->pdata.lg_act.act[2] = CPU_TO_LE32(act);
3169
3170         /* call the fill switch rule to fill the lookup Tx Rx structure */
3171         ice_fill_sw_rule(hw, &m_ent->fltr_info, rx_tx,
3172                          ice_aqc_opc_update_sw_rules);
3173
3174         /* Update the action to point to the large action ID */
3175         rx_tx->pdata.lkup_tx_rx.act =
3176                 CPU_TO_LE32(ICE_SINGLE_ACT_PTR |
3177                             ((l_id << ICE_SINGLE_ACT_PTR_VAL_S) &
3178                              ICE_SINGLE_ACT_PTR_VAL_M));
3179
3180         /* Use the filter rule ID of the previously created rule with single
3181          * act. Once the update happens, hardware will treat this as large
3182          * action
3183          */
3184         rx_tx->pdata.lkup_tx_rx.index =
3185                 CPU_TO_LE16(m_ent->fltr_info.fltr_rule_id);
3186
3187         status = ice_aq_sw_rules(hw, lg_act, rules_size, 2,
3188                                  ice_aqc_opc_update_sw_rules, NULL);
3189         if (!status) {
3190                 m_ent->lg_act_idx = l_id;
3191                 m_ent->sw_marker_id = sw_marker;
3192         }
3193
3194         ice_free(hw, lg_act);
3195         return status;
3196 }
3197
3198 /**
3199  * ice_add_counter_act - add/update filter rule with counter action
3200  * @hw: pointer to the hardware structure
3201  * @m_ent: the management entry for which counter needs to be added
3202  * @counter_id: VLAN counter ID returned as part of allocate resource
3203  * @l_id: large action resource ID
3204  */
3205 static enum ice_status
3206 ice_add_counter_act(struct ice_hw *hw, struct ice_fltr_mgmt_list_entry *m_ent,
3207                     u16 counter_id, u16 l_id)
3208 {
3209         struct ice_aqc_sw_rules_elem *lg_act;
3210         struct ice_aqc_sw_rules_elem *rx_tx;
3211         enum ice_status status;
3212         /* 2 actions will be added while adding a large action counter */
3213         const int num_acts = 2;
3214         u16 lg_act_size;
3215         u16 rules_size;
3216         u16 f_rule_id;
3217         u32 act;
3218         u16 id;
3219
3220         if (m_ent->fltr_info.lkup_type != ICE_SW_LKUP_MAC)
3221                 return ICE_ERR_PARAM;
3222
3223         /* Create two back-to-back switch rules and submit them to the HW using
3224          * one memory buffer:
3225          * 1. Large Action
3226          * 2. Look up Tx Rx
3227          */
3228         lg_act_size = (u16)ICE_SW_RULE_LG_ACT_SIZE(num_acts);
3229         rules_size = lg_act_size + ICE_SW_RULE_RX_TX_ETH_HDR_SIZE;
3230         lg_act = (struct ice_aqc_sw_rules_elem *)ice_malloc(hw, rules_size);
3231         if (!lg_act)
3232                 return ICE_ERR_NO_MEMORY;
3233
3234         rx_tx = (struct ice_aqc_sw_rules_elem *)((u8 *)lg_act + lg_act_size);
3235
3236         /* Fill in the first switch rule i.e. large action */
3237         lg_act->type = CPU_TO_LE16(ICE_AQC_SW_RULES_T_LG_ACT);
3238         lg_act->pdata.lg_act.index = CPU_TO_LE16(l_id);
3239         lg_act->pdata.lg_act.size = CPU_TO_LE16(num_acts);
3240
3241         /* First action VSI forwarding or VSI list forwarding depending on how
3242          * many VSIs
3243          */
3244         id = (m_ent->vsi_count > 1) ?  m_ent->fltr_info.fwd_id.vsi_list_id :
3245                 m_ent->fltr_info.fwd_id.hw_vsi_id;
3246
3247         act = ICE_LG_ACT_VSI_FORWARDING | ICE_LG_ACT_VALID_BIT;
3248         act |= (id << ICE_LG_ACT_VSI_LIST_ID_S) &
3249                 ICE_LG_ACT_VSI_LIST_ID_M;
3250         if (m_ent->vsi_count > 1)
3251                 act |= ICE_LG_ACT_VSI_LIST;
3252         lg_act->pdata.lg_act.act[0] = CPU_TO_LE32(act);
3253
3254         /* Second action counter ID */
3255         act = ICE_LG_ACT_STAT_COUNT;
3256         act |= (counter_id << ICE_LG_ACT_STAT_COUNT_S) &
3257                 ICE_LG_ACT_STAT_COUNT_M;
3258         lg_act->pdata.lg_act.act[1] = CPU_TO_LE32(act);
3259
3260         /* call the fill switch rule to fill the lookup Tx Rx structure */
3261         ice_fill_sw_rule(hw, &m_ent->fltr_info, rx_tx,
3262                          ice_aqc_opc_update_sw_rules);
3263
3264         act = ICE_SINGLE_ACT_PTR;
3265         act |= (l_id << ICE_SINGLE_ACT_PTR_VAL_S) & ICE_SINGLE_ACT_PTR_VAL_M;
3266         rx_tx->pdata.lkup_tx_rx.act = CPU_TO_LE32(act);
3267
3268         /* Use the filter rule ID of the previously created rule with single
3269          * act. Once the update happens, hardware will treat this as large
3270          * action
3271          */
3272         f_rule_id = m_ent->fltr_info.fltr_rule_id;
3273         rx_tx->pdata.lkup_tx_rx.index = CPU_TO_LE16(f_rule_id);
3274
3275         status = ice_aq_sw_rules(hw, lg_act, rules_size, 2,
3276                                  ice_aqc_opc_update_sw_rules, NULL);
3277         if (!status) {
3278                 m_ent->lg_act_idx = l_id;
3279                 m_ent->counter_index = counter_id;
3280         }
3281
3282         ice_free(hw, lg_act);
3283         return status;
3284 }
3285
3286 /**
3287  * ice_create_vsi_list_map
3288  * @hw: pointer to the hardware structure
3289  * @vsi_handle_arr: array of VSI handles to set in the VSI mapping
3290  * @num_vsi: number of VSI handles in the array
3291  * @vsi_list_id: VSI list ID generated as part of allocate resource
3292  *
3293  * Helper function to create a new entry of VSI list ID to VSI mapping
3294  * using the given VSI list ID
3295  */
3296 static struct ice_vsi_list_map_info *
3297 ice_create_vsi_list_map(struct ice_hw *hw, u16 *vsi_handle_arr, u16 num_vsi,
3298                         u16 vsi_list_id)
3299 {
3300         struct ice_switch_info *sw = hw->switch_info;
3301         struct ice_vsi_list_map_info *v_map;
3302         int i;
3303
3304         v_map = (struct ice_vsi_list_map_info *)ice_calloc(hw, 1,
3305                 sizeof(*v_map));
3306         if (!v_map)
3307                 return NULL;
3308
3309         v_map->vsi_list_id = vsi_list_id;
3310         v_map->ref_cnt = 1;
3311         for (i = 0; i < num_vsi; i++)
3312                 ice_set_bit(vsi_handle_arr[i], v_map->vsi_map);
3313
3314         LIST_ADD(&v_map->list_entry, &sw->vsi_list_map_head);
3315         return v_map;
3316 }
3317
3318 /**
3319  * ice_update_vsi_list_rule
3320  * @hw: pointer to the hardware structure
3321  * @vsi_handle_arr: array of VSI handles to form a VSI list
3322  * @num_vsi: number of VSI handles in the array
3323  * @vsi_list_id: VSI list ID generated as part of allocate resource
3324  * @remove: Boolean value to indicate if this is a remove action
3325  * @opc: switch rules population command type - pass in the command opcode
3326  * @lkup_type: lookup type of the filter
3327  *
3328  * Call AQ command to add a new switch rule or update existing switch rule
3329  * using the given VSI list ID
3330  */
3331 static enum ice_status
3332 ice_update_vsi_list_rule(struct ice_hw *hw, u16 *vsi_handle_arr, u16 num_vsi,
3333                          u16 vsi_list_id, bool remove, enum ice_adminq_opc opc,
3334                          enum ice_sw_lkup_type lkup_type)
3335 {
3336         struct ice_aqc_sw_rules_elem *s_rule;
3337         enum ice_status status;
3338         u16 s_rule_size;
3339         u16 rule_type;
3340         int i;
3341
3342         if (!num_vsi)
3343                 return ICE_ERR_PARAM;
3344
3345         if (lkup_type == ICE_SW_LKUP_MAC ||
3346             lkup_type == ICE_SW_LKUP_MAC_VLAN ||
3347             lkup_type == ICE_SW_LKUP_ETHERTYPE ||
3348             lkup_type == ICE_SW_LKUP_ETHERTYPE_MAC ||
3349             lkup_type == ICE_SW_LKUP_PROMISC ||
3350             lkup_type == ICE_SW_LKUP_PROMISC_VLAN ||
3351             lkup_type == ICE_SW_LKUP_LAST)
3352                 rule_type = remove ? ICE_AQC_SW_RULES_T_VSI_LIST_CLEAR :
3353                         ICE_AQC_SW_RULES_T_VSI_LIST_SET;
3354         else if (lkup_type == ICE_SW_LKUP_VLAN)
3355                 rule_type = remove ? ICE_AQC_SW_RULES_T_PRUNE_LIST_CLEAR :
3356                         ICE_AQC_SW_RULES_T_PRUNE_LIST_SET;
3357         else
3358                 return ICE_ERR_PARAM;
3359
3360         s_rule_size = (u16)ICE_SW_RULE_VSI_LIST_SIZE(num_vsi);
3361         s_rule = (struct ice_aqc_sw_rules_elem *)ice_malloc(hw, s_rule_size);
3362         if (!s_rule)
3363                 return ICE_ERR_NO_MEMORY;
3364         for (i = 0; i < num_vsi; i++) {
3365                 if (!ice_is_vsi_valid(hw, vsi_handle_arr[i])) {
3366                         status = ICE_ERR_PARAM;
3367                         goto exit;
3368                 }
3369                 /* AQ call requires hw_vsi_id(s) */
3370                 s_rule->pdata.vsi_list.vsi[i] =
3371                         CPU_TO_LE16(ice_get_hw_vsi_num(hw, vsi_handle_arr[i]));
3372         }
3373
3374         s_rule->type = CPU_TO_LE16(rule_type);
3375         s_rule->pdata.vsi_list.number_vsi = CPU_TO_LE16(num_vsi);
3376         s_rule->pdata.vsi_list.index = CPU_TO_LE16(vsi_list_id);
3377
3378         status = ice_aq_sw_rules(hw, s_rule, s_rule_size, 1, opc, NULL);
3379
3380 exit:
3381         ice_free(hw, s_rule);
3382         return status;
3383 }
3384
3385 /**
3386  * ice_create_vsi_list_rule - Creates and populates a VSI list rule
3387  * @hw: pointer to the HW struct
3388  * @vsi_handle_arr: array of VSI handles to form a VSI list
3389  * @num_vsi: number of VSI handles in the array
3390  * @vsi_list_id: stores the ID of the VSI list to be created
3391  * @lkup_type: switch rule filter's lookup type
3392  */
3393 static enum ice_status
3394 ice_create_vsi_list_rule(struct ice_hw *hw, u16 *vsi_handle_arr, u16 num_vsi,
3395                          u16 *vsi_list_id, enum ice_sw_lkup_type lkup_type)
3396 {
3397         enum ice_status status;
3398
3399         status = ice_aq_alloc_free_vsi_list(hw, vsi_list_id, lkup_type,
3400                                             ice_aqc_opc_alloc_res);
3401         if (status)
3402                 return status;
3403
3404         /* Update the newly created VSI list to include the specified VSIs */
3405         return ice_update_vsi_list_rule(hw, vsi_handle_arr, num_vsi,
3406                                         *vsi_list_id, false,
3407                                         ice_aqc_opc_add_sw_rules, lkup_type);
3408 }
3409
3410 /**
3411  * ice_create_pkt_fwd_rule
3412  * @hw: pointer to the hardware structure
3413  * @recp_list: corresponding filter management list
3414  * @f_entry: entry containing packet forwarding information
3415  *
3416  * Create switch rule with given filter information and add an entry
3417  * to the corresponding filter management list to track this switch rule
3418  * and VSI mapping
3419  */
3420 static enum ice_status
3421 ice_create_pkt_fwd_rule(struct ice_hw *hw, struct ice_sw_recipe *recp_list,
3422                         struct ice_fltr_list_entry *f_entry)
3423 {
3424         struct ice_fltr_mgmt_list_entry *fm_entry;
3425         struct ice_aqc_sw_rules_elem *s_rule;
3426         enum ice_status status;
3427
3428         s_rule = (struct ice_aqc_sw_rules_elem *)
3429                 ice_malloc(hw, ICE_SW_RULE_RX_TX_ETH_HDR_SIZE);
3430         if (!s_rule)
3431                 return ICE_ERR_NO_MEMORY;
3432         fm_entry = (struct ice_fltr_mgmt_list_entry *)
3433                    ice_malloc(hw, sizeof(*fm_entry));
3434         if (!fm_entry) {
3435                 status = ICE_ERR_NO_MEMORY;
3436                 goto ice_create_pkt_fwd_rule_exit;
3437         }
3438
3439         fm_entry->fltr_info = f_entry->fltr_info;
3440
3441         /* Initialize all the fields for the management entry */
3442         fm_entry->vsi_count = 1;
3443         fm_entry->lg_act_idx = ICE_INVAL_LG_ACT_INDEX;
3444         fm_entry->sw_marker_id = ICE_INVAL_SW_MARKER_ID;
3445         fm_entry->counter_index = ICE_INVAL_COUNTER_ID;
3446
3447         ice_fill_sw_rule(hw, &fm_entry->fltr_info, s_rule,
3448                          ice_aqc_opc_add_sw_rules);
3449
3450         status = ice_aq_sw_rules(hw, s_rule, ICE_SW_RULE_RX_TX_ETH_HDR_SIZE, 1,
3451                                  ice_aqc_opc_add_sw_rules, NULL);
3452         if (status) {
3453                 ice_free(hw, fm_entry);
3454                 goto ice_create_pkt_fwd_rule_exit;
3455         }
3456
3457         f_entry->fltr_info.fltr_rule_id =
3458                 LE16_TO_CPU(s_rule->pdata.lkup_tx_rx.index);
3459         fm_entry->fltr_info.fltr_rule_id =
3460                 LE16_TO_CPU(s_rule->pdata.lkup_tx_rx.index);
3461
3462         /* The book keeping entries will get removed when base driver
3463          * calls remove filter AQ command
3464          */
3465         LIST_ADD(&fm_entry->list_entry, &recp_list->filt_rules);
3466
3467 ice_create_pkt_fwd_rule_exit:
3468         ice_free(hw, s_rule);
3469         return status;
3470 }
3471
3472 /**
3473  * ice_update_pkt_fwd_rule
3474  * @hw: pointer to the hardware structure
3475  * @f_info: filter information for switch rule
3476  *
3477  * Call AQ command to update a previously created switch rule with a
3478  * VSI list ID
3479  */
3480 static enum ice_status
3481 ice_update_pkt_fwd_rule(struct ice_hw *hw, struct ice_fltr_info *f_info)
3482 {
3483         struct ice_aqc_sw_rules_elem *s_rule;
3484         enum ice_status status;
3485
3486         s_rule = (struct ice_aqc_sw_rules_elem *)
3487                 ice_malloc(hw, ICE_SW_RULE_RX_TX_ETH_HDR_SIZE);
3488         if (!s_rule)
3489                 return ICE_ERR_NO_MEMORY;
3490
3491         ice_fill_sw_rule(hw, f_info, s_rule, ice_aqc_opc_update_sw_rules);
3492
3493         s_rule->pdata.lkup_tx_rx.index = CPU_TO_LE16(f_info->fltr_rule_id);
3494
3495         /* Update switch rule with new rule set to forward VSI list */
3496         status = ice_aq_sw_rules(hw, s_rule, ICE_SW_RULE_RX_TX_ETH_HDR_SIZE, 1,
3497                                  ice_aqc_opc_update_sw_rules, NULL);
3498
3499         ice_free(hw, s_rule);
3500         return status;
3501 }
3502
3503 /**
3504  * ice_update_sw_rule_bridge_mode
3505  * @hw: pointer to the HW struct
3506  *
3507  * Updates unicast switch filter rules based on VEB/VEPA mode
3508  */
3509 enum ice_status ice_update_sw_rule_bridge_mode(struct ice_hw *hw)
3510 {
3511         struct ice_switch_info *sw = hw->switch_info;
3512         struct ice_fltr_mgmt_list_entry *fm_entry;
3513         enum ice_status status = ICE_SUCCESS;
3514         struct LIST_HEAD_TYPE *rule_head;
3515         struct ice_lock *rule_lock; /* Lock to protect filter rule list */
3516
3517         rule_lock = &sw->recp_list[ICE_SW_LKUP_MAC].filt_rule_lock;
3518         rule_head = &sw->recp_list[ICE_SW_LKUP_MAC].filt_rules;
3519
3520         ice_acquire_lock(rule_lock);
3521         LIST_FOR_EACH_ENTRY(fm_entry, rule_head, ice_fltr_mgmt_list_entry,
3522                             list_entry) {
3523                 struct ice_fltr_info *fi = &fm_entry->fltr_info;
3524                 u8 *addr = fi->l_data.mac.mac_addr;
3525
3526                 /* Update unicast Tx rules to reflect the selected
3527                  * VEB/VEPA mode
3528                  */
3529                 if ((fi->flag & ICE_FLTR_TX) && IS_UNICAST_ETHER_ADDR(addr) &&
3530                     (fi->fltr_act == ICE_FWD_TO_VSI ||
3531                      fi->fltr_act == ICE_FWD_TO_VSI_LIST ||
3532                      fi->fltr_act == ICE_FWD_TO_Q ||
3533                      fi->fltr_act == ICE_FWD_TO_QGRP)) {
3534                         status = ice_update_pkt_fwd_rule(hw, fi);
3535                         if (status)
3536                                 break;
3537                 }
3538         }
3539
3540         ice_release_lock(rule_lock);
3541
3542         return status;
3543 }
3544
3545 /**
3546  * ice_add_update_vsi_list
3547  * @hw: pointer to the hardware structure
3548  * @m_entry: pointer to current filter management list entry
3549  * @cur_fltr: filter information from the book keeping entry
3550  * @new_fltr: filter information with the new VSI to be added
3551  *
3552  * Call AQ command to add or update previously created VSI list with new VSI.
3553  *
3554  * Helper function to do book keeping associated with adding filter information
3555  * The algorithm to do the book keeping is described below :
3556  * When a VSI needs to subscribe to a given filter (MAC/VLAN/Ethtype etc.)
3557  *      if only one VSI has been added till now
3558  *              Allocate a new VSI list and add two VSIs
3559  *              to this list using switch rule command
3560  *              Update the previously created switch rule with the
3561  *              newly created VSI list ID
3562  *      if a VSI list was previously created
3563  *              Add the new VSI to the previously created VSI list set
3564  *              using the update switch rule command
3565  */
3566 static enum ice_status
3567 ice_add_update_vsi_list(struct ice_hw *hw,
3568                         struct ice_fltr_mgmt_list_entry *m_entry,
3569                         struct ice_fltr_info *cur_fltr,
3570                         struct ice_fltr_info *new_fltr)
3571 {
3572         enum ice_status status = ICE_SUCCESS;
3573         u16 vsi_list_id = 0;
3574
3575         if ((cur_fltr->fltr_act == ICE_FWD_TO_Q ||
3576              cur_fltr->fltr_act == ICE_FWD_TO_QGRP))
3577                 return ICE_ERR_NOT_IMPL;
3578
3579         if ((new_fltr->fltr_act == ICE_FWD_TO_Q ||
3580              new_fltr->fltr_act == ICE_FWD_TO_QGRP) &&
3581             (cur_fltr->fltr_act == ICE_FWD_TO_VSI ||
3582              cur_fltr->fltr_act == ICE_FWD_TO_VSI_LIST))
3583                 return ICE_ERR_NOT_IMPL;
3584
3585         if (m_entry->vsi_count < 2 && !m_entry->vsi_list_info) {
3586                 /* Only one entry existed in the mapping and it was not already
3587                  * a part of a VSI list. So, create a VSI list with the old and
3588                  * new VSIs.
3589                  */
3590                 struct ice_fltr_info tmp_fltr;
3591                 u16 vsi_handle_arr[2];
3592
3593                 /* A rule already exists with the new VSI being added */
3594                 if (cur_fltr->fwd_id.hw_vsi_id == new_fltr->fwd_id.hw_vsi_id)
3595                         return ICE_ERR_ALREADY_EXISTS;
3596
3597                 vsi_handle_arr[0] = cur_fltr->vsi_handle;
3598                 vsi_handle_arr[1] = new_fltr->vsi_handle;
3599                 status = ice_create_vsi_list_rule(hw, &vsi_handle_arr[0], 2,
3600                                                   &vsi_list_id,
3601                                                   new_fltr->lkup_type);
3602                 if (status)
3603                         return status;
3604
3605                 tmp_fltr = *new_fltr;
3606                 tmp_fltr.fltr_rule_id = cur_fltr->fltr_rule_id;
3607                 tmp_fltr.fltr_act = ICE_FWD_TO_VSI_LIST;
3608                 tmp_fltr.fwd_id.vsi_list_id = vsi_list_id;
3609                 /* Update the previous switch rule of "MAC forward to VSI" to
3610                  * "MAC fwd to VSI list"
3611                  */
3612                 status = ice_update_pkt_fwd_rule(hw, &tmp_fltr);
3613                 if (status)
3614                         return status;
3615
3616                 cur_fltr->fwd_id.vsi_list_id = vsi_list_id;
3617                 cur_fltr->fltr_act = ICE_FWD_TO_VSI_LIST;
3618                 m_entry->vsi_list_info =
3619                         ice_create_vsi_list_map(hw, &vsi_handle_arr[0], 2,
3620                                                 vsi_list_id);
3621
3622                 /* If this entry was large action then the large action needs
3623                  * to be updated to point to FWD to VSI list
3624                  */
3625                 if (m_entry->sw_marker_id != ICE_INVAL_SW_MARKER_ID)
3626                         status =
3627                             ice_add_marker_act(hw, m_entry,
3628                                                m_entry->sw_marker_id,
3629                                                m_entry->lg_act_idx);
3630         } else {
3631                 u16 vsi_handle = new_fltr->vsi_handle;
3632                 enum ice_adminq_opc opcode;
3633
3634                 if (!m_entry->vsi_list_info)
3635                         return ICE_ERR_CFG;
3636
3637                 /* A rule already exists with the new VSI being added */
3638                 if (ice_is_bit_set(m_entry->vsi_list_info->vsi_map, vsi_handle))
3639                         return ICE_SUCCESS;
3640
3641                 /* Update the previously created VSI list set with
3642                  * the new VSI ID passed in
3643                  */
3644                 vsi_list_id = cur_fltr->fwd_id.vsi_list_id;
3645                 opcode = ice_aqc_opc_update_sw_rules;
3646
3647                 status = ice_update_vsi_list_rule(hw, &vsi_handle, 1,
3648                                                   vsi_list_id, false, opcode,
3649                                                   new_fltr->lkup_type);
3650                 /* update VSI list mapping info with new VSI ID */
3651                 if (!status)
3652                         ice_set_bit(vsi_handle,
3653                                     m_entry->vsi_list_info->vsi_map);
3654         }
3655         if (!status)
3656                 m_entry->vsi_count++;
3657         return status;
3658 }
3659
3660 /**
3661  * ice_find_rule_entry - Search a rule entry
3662  * @list_head: head of rule list
3663  * @f_info: rule information
3664  *
3665  * Helper function to search for a given rule entry
3666  * Returns pointer to entry storing the rule if found
3667  */
3668 static struct ice_fltr_mgmt_list_entry *
3669 ice_find_rule_entry(struct LIST_HEAD_TYPE *list_head,
3670                     struct ice_fltr_info *f_info)
3671 {
3672         struct ice_fltr_mgmt_list_entry *list_itr, *ret = NULL;
3673
3674         LIST_FOR_EACH_ENTRY(list_itr, list_head, ice_fltr_mgmt_list_entry,
3675                             list_entry) {
3676                 if (!memcmp(&f_info->l_data, &list_itr->fltr_info.l_data,
3677                             sizeof(f_info->l_data)) &&
3678                     f_info->flag == list_itr->fltr_info.flag) {
3679                         ret = list_itr;
3680                         break;
3681                 }
3682         }
3683         return ret;
3684 }
3685
3686 /**
3687  * ice_find_vsi_list_entry - Search VSI list map with VSI count 1
3688  * @recp_list: VSI lists needs to be searched
3689  * @vsi_handle: VSI handle to be found in VSI list
3690  * @vsi_list_id: VSI list ID found containing vsi_handle
3691  *
3692  * Helper function to search a VSI list with single entry containing given VSI
3693  * handle element. This can be extended further to search VSI list with more
3694  * than 1 vsi_count. Returns pointer to VSI list entry if found.
3695  */
3696 static struct ice_vsi_list_map_info *
3697 ice_find_vsi_list_entry(struct ice_sw_recipe *recp_list, u16 vsi_handle,
3698                         u16 *vsi_list_id)
3699 {
3700         struct ice_vsi_list_map_info *map_info = NULL;
3701         struct LIST_HEAD_TYPE *list_head;
3702
3703         list_head = &recp_list->filt_rules;
3704         if (recp_list->adv_rule) {
3705                 struct ice_adv_fltr_mgmt_list_entry *list_itr;
3706
3707                 LIST_FOR_EACH_ENTRY(list_itr, list_head,
3708                                     ice_adv_fltr_mgmt_list_entry,
3709                                     list_entry) {
3710                         if (list_itr->vsi_list_info) {
3711                                 map_info = list_itr->vsi_list_info;
3712                                 if (ice_is_bit_set(map_info->vsi_map,
3713                                                    vsi_handle)) {
3714                                         *vsi_list_id = map_info->vsi_list_id;
3715                                         return map_info;
3716                                 }
3717                         }
3718                 }
3719         } else {
3720                 struct ice_fltr_mgmt_list_entry *list_itr;
3721
3722                 LIST_FOR_EACH_ENTRY(list_itr, list_head,
3723                                     ice_fltr_mgmt_list_entry,
3724                                     list_entry) {
3725                         if (list_itr->vsi_count == 1 &&
3726                             list_itr->vsi_list_info) {
3727                                 map_info = list_itr->vsi_list_info;
3728                                 if (ice_is_bit_set(map_info->vsi_map,
3729                                                    vsi_handle)) {
3730                                         *vsi_list_id = map_info->vsi_list_id;
3731                                         return map_info;
3732                                 }
3733                         }
3734                 }
3735         }
3736         return NULL;
3737 }
3738
3739 /**
3740  * ice_add_rule_internal - add rule for a given lookup type
3741  * @hw: pointer to the hardware structure
3742  * @recp_list: recipe list for which rule has to be added
3743  * @lport: logic port number on which function add rule
3744  * @f_entry: structure containing MAC forwarding information
3745  *
3746  * Adds or updates the rule lists for a given recipe
3747  */
3748 static enum ice_status
3749 ice_add_rule_internal(struct ice_hw *hw, struct ice_sw_recipe *recp_list,
3750                       u8 lport, struct ice_fltr_list_entry *f_entry)
3751 {
3752         struct ice_fltr_info *new_fltr, *cur_fltr;
3753         struct ice_fltr_mgmt_list_entry *m_entry;
3754         struct ice_lock *rule_lock; /* Lock to protect filter rule list */
3755         enum ice_status status = ICE_SUCCESS;
3756
3757         if (!ice_is_vsi_valid(hw, f_entry->fltr_info.vsi_handle))
3758                 return ICE_ERR_PARAM;
3759
3760         /* Load the hw_vsi_id only if the fwd action is fwd to VSI */
3761         if (f_entry->fltr_info.fltr_act == ICE_FWD_TO_VSI)
3762                 f_entry->fltr_info.fwd_id.hw_vsi_id =
3763                         ice_get_hw_vsi_num(hw, f_entry->fltr_info.vsi_handle);
3764
3765         rule_lock = &recp_list->filt_rule_lock;
3766
3767         ice_acquire_lock(rule_lock);
3768         new_fltr = &f_entry->fltr_info;
3769         if (new_fltr->flag & ICE_FLTR_RX)
3770                 new_fltr->src = lport;
3771         else if (new_fltr->flag & ICE_FLTR_TX)
3772                 new_fltr->src =
3773                         ice_get_hw_vsi_num(hw, f_entry->fltr_info.vsi_handle);
3774
3775         m_entry = ice_find_rule_entry(&recp_list->filt_rules, new_fltr);
3776         if (!m_entry) {
3777                 status = ice_create_pkt_fwd_rule(hw, recp_list, f_entry);
3778                 goto exit_add_rule_internal;
3779         }
3780
3781         cur_fltr = &m_entry->fltr_info;
3782         status = ice_add_update_vsi_list(hw, m_entry, cur_fltr, new_fltr);
3783
3784 exit_add_rule_internal:
3785         ice_release_lock(rule_lock);
3786         return status;
3787 }
3788
3789 /**
3790  * ice_remove_vsi_list_rule
3791  * @hw: pointer to the hardware structure
3792  * @vsi_list_id: VSI list ID generated as part of allocate resource
3793  * @lkup_type: switch rule filter lookup type
3794  *
3795  * The VSI list should be emptied before this function is called to remove the
3796  * VSI list.
3797  */
3798 static enum ice_status
3799 ice_remove_vsi_list_rule(struct ice_hw *hw, u16 vsi_list_id,
3800                          enum ice_sw_lkup_type lkup_type)
3801 {
3802         /* Free the vsi_list resource that we allocated. It is assumed that the
3803          * list is empty at this point.
3804          */
3805         return ice_aq_alloc_free_vsi_list(hw, &vsi_list_id, lkup_type,
3806                                             ice_aqc_opc_free_res);
3807 }
3808
3809 /**
3810  * ice_rem_update_vsi_list
3811  * @hw: pointer to the hardware structure
3812  * @vsi_handle: VSI handle of the VSI to remove
3813  * @fm_list: filter management entry for which the VSI list management needs to
3814  *           be done
3815  */
3816 static enum ice_status
3817 ice_rem_update_vsi_list(struct ice_hw *hw, u16 vsi_handle,
3818                         struct ice_fltr_mgmt_list_entry *fm_list)
3819 {
3820         enum ice_sw_lkup_type lkup_type;
3821         enum ice_status status = ICE_SUCCESS;
3822         u16 vsi_list_id;
3823
3824         if (fm_list->fltr_info.fltr_act != ICE_FWD_TO_VSI_LIST ||
3825             fm_list->vsi_count == 0)
3826                 return ICE_ERR_PARAM;
3827
3828         /* A rule with the VSI being removed does not exist */
3829         if (!ice_is_bit_set(fm_list->vsi_list_info->vsi_map, vsi_handle))
3830                 return ICE_ERR_DOES_NOT_EXIST;
3831
3832         lkup_type = fm_list->fltr_info.lkup_type;
3833         vsi_list_id = fm_list->fltr_info.fwd_id.vsi_list_id;
3834         status = ice_update_vsi_list_rule(hw, &vsi_handle, 1, vsi_list_id, true,
3835                                           ice_aqc_opc_update_sw_rules,
3836                                           lkup_type);
3837         if (status)
3838                 return status;
3839
3840         fm_list->vsi_count--;
3841         ice_clear_bit(vsi_handle, fm_list->vsi_list_info->vsi_map);
3842
3843         if (fm_list->vsi_count == 1 && lkup_type != ICE_SW_LKUP_VLAN) {
3844                 struct ice_fltr_info tmp_fltr_info = fm_list->fltr_info;
3845                 struct ice_vsi_list_map_info *vsi_list_info =
3846                         fm_list->vsi_list_info;
3847                 u16 rem_vsi_handle;
3848
3849                 rem_vsi_handle = ice_find_first_bit(vsi_list_info->vsi_map,
3850                                                     ICE_MAX_VSI);
3851                 if (!ice_is_vsi_valid(hw, rem_vsi_handle))
3852                         return ICE_ERR_OUT_OF_RANGE;
3853
3854                 /* Make sure VSI list is empty before removing it below */
3855                 status = ice_update_vsi_list_rule(hw, &rem_vsi_handle, 1,
3856                                                   vsi_list_id, true,
3857                                                   ice_aqc_opc_update_sw_rules,
3858                                                   lkup_type);
3859                 if (status)
3860                         return status;
3861
3862                 tmp_fltr_info.fltr_act = ICE_FWD_TO_VSI;
3863                 tmp_fltr_info.fwd_id.hw_vsi_id =
3864                         ice_get_hw_vsi_num(hw, rem_vsi_handle);
3865                 tmp_fltr_info.vsi_handle = rem_vsi_handle;
3866                 status = ice_update_pkt_fwd_rule(hw, &tmp_fltr_info);
3867                 if (status) {
3868                         ice_debug(hw, ICE_DBG_SW, "Failed to update pkt fwd rule to FWD_TO_VSI on HW VSI %d, error %d\n",
3869                                   tmp_fltr_info.fwd_id.hw_vsi_id, status);
3870                         return status;
3871                 }
3872
3873                 fm_list->fltr_info = tmp_fltr_info;
3874         }
3875
3876         if ((fm_list->vsi_count == 1 && lkup_type != ICE_SW_LKUP_VLAN) ||
3877             (fm_list->vsi_count == 0 && lkup_type == ICE_SW_LKUP_VLAN)) {
3878                 struct ice_vsi_list_map_info *vsi_list_info =
3879                         fm_list->vsi_list_info;
3880
3881                 /* Remove the VSI list since it is no longer used */
3882                 status = ice_remove_vsi_list_rule(hw, vsi_list_id, lkup_type);
3883                 if (status) {
3884                         ice_debug(hw, ICE_DBG_SW, "Failed to remove VSI list %d, error %d\n",
3885                                   vsi_list_id, status);
3886                         return status;
3887                 }
3888
3889                 LIST_DEL(&vsi_list_info->list_entry);
3890                 ice_free(hw, vsi_list_info);
3891                 fm_list->vsi_list_info = NULL;
3892         }
3893
3894         return status;
3895 }
3896
3897 /**
3898  * ice_remove_rule_internal - Remove a filter rule of a given type
3899  *
3900  * @hw: pointer to the hardware structure
3901  * @recp_list: recipe list for which the rule needs to removed
3902  * @f_entry: rule entry containing filter information
3903  */
3904 static enum ice_status
3905 ice_remove_rule_internal(struct ice_hw *hw, struct ice_sw_recipe *recp_list,
3906                          struct ice_fltr_list_entry *f_entry)
3907 {
3908         struct ice_fltr_mgmt_list_entry *list_elem;
3909         struct ice_lock *rule_lock; /* Lock to protect filter rule list */
3910         enum ice_status status = ICE_SUCCESS;
3911         bool remove_rule = false;
3912         u16 vsi_handle;
3913
3914         if (!ice_is_vsi_valid(hw, f_entry->fltr_info.vsi_handle))
3915                 return ICE_ERR_PARAM;
3916         f_entry->fltr_info.fwd_id.hw_vsi_id =
3917                 ice_get_hw_vsi_num(hw, f_entry->fltr_info.vsi_handle);
3918
3919         rule_lock = &recp_list->filt_rule_lock;
3920         ice_acquire_lock(rule_lock);
3921         list_elem = ice_find_rule_entry(&recp_list->filt_rules,
3922                                         &f_entry->fltr_info);
3923         if (!list_elem) {
3924                 status = ICE_ERR_DOES_NOT_EXIST;
3925                 goto exit;
3926         }
3927
3928         if (list_elem->fltr_info.fltr_act != ICE_FWD_TO_VSI_LIST) {
3929                 remove_rule = true;
3930         } else if (!list_elem->vsi_list_info) {
3931                 status = ICE_ERR_DOES_NOT_EXIST;
3932                 goto exit;
3933         } else if (list_elem->vsi_list_info->ref_cnt > 1) {
3934                 /* a ref_cnt > 1 indicates that the vsi_list is being
3935                  * shared by multiple rules. Decrement the ref_cnt and
3936                  * remove this rule, but do not modify the list, as it
3937                  * is in-use by other rules.
3938                  */
3939                 list_elem->vsi_list_info->ref_cnt--;
3940                 remove_rule = true;
3941         } else {
3942                 /* a ref_cnt of 1 indicates the vsi_list is only used
3943                  * by one rule. However, the original removal request is only
3944                  * for a single VSI. Update the vsi_list first, and only
3945                  * remove the rule if there are no further VSIs in this list.
3946                  */
3947                 vsi_handle = f_entry->fltr_info.vsi_handle;
3948                 status = ice_rem_update_vsi_list(hw, vsi_handle, list_elem);
3949                 if (status)
3950                         goto exit;
3951                 /* if VSI count goes to zero after updating the VSI list */
3952                 if (list_elem->vsi_count == 0)
3953                         remove_rule = true;
3954         }
3955
3956         if (remove_rule) {
3957                 /* Remove the lookup rule */
3958                 struct ice_aqc_sw_rules_elem *s_rule;
3959
3960                 s_rule = (struct ice_aqc_sw_rules_elem *)
3961                         ice_malloc(hw, ICE_SW_RULE_RX_TX_NO_HDR_SIZE);
3962                 if (!s_rule) {
3963                         status = ICE_ERR_NO_MEMORY;
3964                         goto exit;
3965                 }
3966
3967                 ice_fill_sw_rule(hw, &list_elem->fltr_info, s_rule,
3968                                  ice_aqc_opc_remove_sw_rules);
3969
3970                 status = ice_aq_sw_rules(hw, s_rule,
3971                                          ICE_SW_RULE_RX_TX_NO_HDR_SIZE, 1,
3972                                          ice_aqc_opc_remove_sw_rules, NULL);
3973
3974                 /* Remove a book keeping from the list */
3975                 ice_free(hw, s_rule);
3976
3977                 if (status)
3978                         goto exit;
3979
3980                 LIST_DEL(&list_elem->list_entry);
3981                 ice_free(hw, list_elem);
3982         }
3983 exit:
3984         ice_release_lock(rule_lock);
3985         return status;
3986 }
3987
3988 /**
3989  * ice_aq_get_res_alloc - get allocated resources
3990  * @hw: pointer to the HW struct
3991  * @num_entries: pointer to u16 to store the number of resource entries returned
3992  * @buf: pointer to buffer
3993  * @buf_size: size of buf
3994  * @cd: pointer to command details structure or NULL
3995  *
3996  * The caller-supplied buffer must be large enough to store the resource
3997  * information for all resource types. Each resource type is an
3998  * ice_aqc_get_res_resp_elem structure.
3999  */
4000 enum ice_status
4001 ice_aq_get_res_alloc(struct ice_hw *hw, u16 *num_entries,
4002                      struct ice_aqc_get_res_resp_elem *buf, u16 buf_size,
4003                      struct ice_sq_cd *cd)
4004 {
4005         struct ice_aqc_get_res_alloc *resp;
4006         enum ice_status status;
4007         struct ice_aq_desc desc;
4008
4009         if (!buf)
4010                 return ICE_ERR_BAD_PTR;
4011
4012         if (buf_size < ICE_AQ_GET_RES_ALLOC_BUF_LEN)
4013                 return ICE_ERR_INVAL_SIZE;
4014
4015         resp = &desc.params.get_res;
4016
4017         ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_res_alloc);
4018         status = ice_aq_send_cmd(hw, &desc, buf, buf_size, cd);
4019
4020         if (!status && num_entries)
4021                 *num_entries = LE16_TO_CPU(resp->resp_elem_num);
4022
4023         return status;
4024 }
4025
4026 /**
4027  * ice_aq_get_res_descs - get allocated resource descriptors
4028  * @hw: pointer to the hardware structure
4029  * @num_entries: number of resource entries in buffer
4030  * @buf: structure to hold response data buffer
4031  * @buf_size: size of buffer
4032  * @res_type: resource type
4033  * @res_shared: is resource shared
4034  * @desc_id: input - first desc ID to start; output - next desc ID
4035  * @cd: pointer to command details structure or NULL
4036  */
4037 enum ice_status
4038 ice_aq_get_res_descs(struct ice_hw *hw, u16 num_entries,
4039                      struct ice_aqc_res_elem *buf, u16 buf_size, u16 res_type,
4040                      bool res_shared, u16 *desc_id, struct ice_sq_cd *cd)
4041 {
4042         struct ice_aqc_get_allocd_res_desc *cmd;
4043         struct ice_aq_desc desc;
4044         enum ice_status status;
4045
4046         ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__);
4047
4048         cmd = &desc.params.get_res_desc;
4049
4050         if (!buf)
4051                 return ICE_ERR_PARAM;
4052
4053         if (buf_size != (num_entries * sizeof(*buf)))
4054                 return ICE_ERR_PARAM;
4055
4056         ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_allocd_res_desc);
4057
4058         cmd->ops.cmd.res = CPU_TO_LE16(((res_type << ICE_AQC_RES_TYPE_S) &
4059                                          ICE_AQC_RES_TYPE_M) | (res_shared ?
4060                                         ICE_AQC_RES_TYPE_FLAG_SHARED : 0));
4061         cmd->ops.cmd.first_desc = CPU_TO_LE16(*desc_id);
4062
4063         status = ice_aq_send_cmd(hw, &desc, buf, buf_size, cd);
4064         if (!status)
4065                 *desc_id = LE16_TO_CPU(cmd->ops.resp.next_desc);
4066
4067         return status;
4068 }
4069
4070 /**
4071  * ice_add_mac_rule - Add a MAC address based filter rule
4072  * @hw: pointer to the hardware structure
4073  * @m_list: list of MAC addresses and forwarding information
4074  * @sw: pointer to switch info struct for which function add rule
4075  * @lport: logic port number on which function add rule
4076  *
4077  * IMPORTANT: When the ucast_shared flag is set to false and m_list has
4078  * multiple unicast addresses, the function assumes that all the
4079  * addresses are unique in a given add_mac call. It doesn't
4080  * check for duplicates in this case, removing duplicates from a given
4081  * list should be taken care of in the caller of this function.
4082  */
4083 static enum ice_status
4084 ice_add_mac_rule(struct ice_hw *hw, struct LIST_HEAD_TYPE *m_list,
4085                  struct ice_switch_info *sw, u8 lport)
4086 {
4087         struct ice_sw_recipe *recp_list = &sw->recp_list[ICE_SW_LKUP_MAC];
4088         struct ice_aqc_sw_rules_elem *s_rule, *r_iter;
4089         struct ice_fltr_list_entry *m_list_itr;
4090         struct LIST_HEAD_TYPE *rule_head;
4091         u16 total_elem_left, s_rule_size;
4092         struct ice_lock *rule_lock; /* Lock to protect filter rule list */
4093         enum ice_status status = ICE_SUCCESS;
4094         u16 num_unicast = 0;
4095         u8 elem_sent;
4096
4097         s_rule = NULL;
4098         rule_lock = &recp_list->filt_rule_lock;
4099         rule_head = &recp_list->filt_rules;
4100
4101         LIST_FOR_EACH_ENTRY(m_list_itr, m_list, ice_fltr_list_entry,
4102                             list_entry) {
4103                 u8 *add = &m_list_itr->fltr_info.l_data.mac.mac_addr[0];
4104                 u16 vsi_handle;
4105                 u16 hw_vsi_id;
4106
4107                 m_list_itr->fltr_info.flag = ICE_FLTR_TX;
4108                 vsi_handle = m_list_itr->fltr_info.vsi_handle;
4109                 if (!ice_is_vsi_valid(hw, vsi_handle))
4110                         return ICE_ERR_PARAM;
4111                 hw_vsi_id = ice_get_hw_vsi_num(hw, vsi_handle);
4112                 m_list_itr->fltr_info.fwd_id.hw_vsi_id = hw_vsi_id;
4113                 /* update the src in case it is VSI num */
4114                 if (m_list_itr->fltr_info.src_id != ICE_SRC_ID_VSI)
4115                         return ICE_ERR_PARAM;
4116                 m_list_itr->fltr_info.src = hw_vsi_id;
4117                 if (m_list_itr->fltr_info.lkup_type != ICE_SW_LKUP_MAC ||
4118                     IS_ZERO_ETHER_ADDR(add))
4119                         return ICE_ERR_PARAM;
4120                 if (IS_UNICAST_ETHER_ADDR(add) && !hw->ucast_shared) {
4121                         /* Don't overwrite the unicast address */
4122                         ice_acquire_lock(rule_lock);
4123                         if (ice_find_rule_entry(rule_head,
4124                                                 &m_list_itr->fltr_info)) {
4125                                 ice_release_lock(rule_lock);
4126                                 return ICE_ERR_ALREADY_EXISTS;
4127                         }
4128                         ice_release_lock(rule_lock);
4129                         num_unicast++;
4130                 } else if (IS_MULTICAST_ETHER_ADDR(add) ||
4131                            (IS_UNICAST_ETHER_ADDR(add) && hw->ucast_shared)) {
4132                         m_list_itr->status =
4133                                 ice_add_rule_internal(hw, recp_list, lport,
4134                                                       m_list_itr);
4135                         if (m_list_itr->status)
4136                                 return m_list_itr->status;
4137                 }
4138         }
4139
4140         ice_acquire_lock(rule_lock);
4141         /* Exit if no suitable entries were found for adding bulk switch rule */
4142         if (!num_unicast) {
4143                 status = ICE_SUCCESS;
4144                 goto ice_add_mac_exit;
4145         }
4146
4147         /* Allocate switch rule buffer for the bulk update for unicast */
4148         s_rule_size = ICE_SW_RULE_RX_TX_ETH_HDR_SIZE;
4149         s_rule = (struct ice_aqc_sw_rules_elem *)
4150                 ice_calloc(hw, num_unicast, s_rule_size);
4151         if (!s_rule) {
4152                 status = ICE_ERR_NO_MEMORY;
4153                 goto ice_add_mac_exit;
4154         }
4155
4156         r_iter = s_rule;
4157         LIST_FOR_EACH_ENTRY(m_list_itr, m_list, ice_fltr_list_entry,
4158                             list_entry) {
4159                 struct ice_fltr_info *f_info = &m_list_itr->fltr_info;
4160                 u8 *mac_addr = &f_info->l_data.mac.mac_addr[0];
4161
4162                 if (IS_UNICAST_ETHER_ADDR(mac_addr)) {
4163                         ice_fill_sw_rule(hw, &m_list_itr->fltr_info, r_iter,
4164                                          ice_aqc_opc_add_sw_rules);
4165                         r_iter = (struct ice_aqc_sw_rules_elem *)
4166                                 ((u8 *)r_iter + s_rule_size);
4167                 }
4168         }
4169
4170         /* Call AQ bulk switch rule update for all unicast addresses */
4171         r_iter = s_rule;
4172         /* Call AQ switch rule in AQ_MAX chunk */
4173         for (total_elem_left = num_unicast; total_elem_left > 0;
4174              total_elem_left -= elem_sent) {
4175                 struct ice_aqc_sw_rules_elem *entry = r_iter;
4176
4177                 elem_sent = MIN_T(u8, total_elem_left,
4178                                   (ICE_AQ_MAX_BUF_LEN / s_rule_size));
4179                 status = ice_aq_sw_rules(hw, entry, elem_sent * s_rule_size,
4180                                          elem_sent, ice_aqc_opc_add_sw_rules,
4181                                          NULL);
4182                 if (status)
4183                         goto ice_add_mac_exit;
4184                 r_iter = (struct ice_aqc_sw_rules_elem *)
4185                         ((u8 *)r_iter + (elem_sent * s_rule_size));
4186         }
4187
4188         /* Fill up rule ID based on the value returned from FW */
4189         r_iter = s_rule;
4190         LIST_FOR_EACH_ENTRY(m_list_itr, m_list, ice_fltr_list_entry,
4191                             list_entry) {
4192                 struct ice_fltr_info *f_info = &m_list_itr->fltr_info;
4193                 u8 *mac_addr = &f_info->l_data.mac.mac_addr[0];
4194                 struct ice_fltr_mgmt_list_entry *fm_entry;
4195
4196                 if (IS_UNICAST_ETHER_ADDR(mac_addr)) {
4197                         f_info->fltr_rule_id =
4198                                 LE16_TO_CPU(r_iter->pdata.lkup_tx_rx.index);
4199                         f_info->fltr_act = ICE_FWD_TO_VSI;
4200                         /* Create an entry to track this MAC address */
4201                         fm_entry = (struct ice_fltr_mgmt_list_entry *)
4202                                 ice_malloc(hw, sizeof(*fm_entry));
4203                         if (!fm_entry) {
4204                                 status = ICE_ERR_NO_MEMORY;
4205                                 goto ice_add_mac_exit;
4206                         }
4207                         fm_entry->fltr_info = *f_info;
4208                         fm_entry->vsi_count = 1;
4209                         /* The book keeping entries will get removed when
4210                          * base driver calls remove filter AQ command
4211                          */
4212
4213                         LIST_ADD(&fm_entry->list_entry, rule_head);
4214                         r_iter = (struct ice_aqc_sw_rules_elem *)
4215                                 ((u8 *)r_iter + s_rule_size);
4216                 }
4217         }
4218
4219 ice_add_mac_exit:
4220         ice_release_lock(rule_lock);
4221         if (s_rule)
4222                 ice_free(hw, s_rule);
4223         return status;
4224 }
4225
4226 /**
4227  * ice_add_mac - Add a MAC address based filter rule
4228  * @hw: pointer to the hardware structure
4229  * @m_list: list of MAC addresses and forwarding information
4230  *
4231  * Function add MAC rule for logical port from HW struct
4232  */
4233 enum ice_status ice_add_mac(struct ice_hw *hw, struct LIST_HEAD_TYPE *m_list)
4234 {
4235         if (!m_list || !hw)
4236                 return ICE_ERR_PARAM;
4237
4238         return ice_add_mac_rule(hw, m_list, hw->switch_info,
4239                                 hw->port_info->lport);
4240 }
4241
4242 /**
4243  * ice_add_vlan_internal - Add one VLAN based filter rule
4244  * @hw: pointer to the hardware structure
4245  * @recp_list: recipe list for which rule has to be added
4246  * @f_entry: filter entry containing one VLAN information
4247  */
4248 static enum ice_status
4249 ice_add_vlan_internal(struct ice_hw *hw, struct ice_sw_recipe *recp_list,
4250                       struct ice_fltr_list_entry *f_entry)
4251 {
4252         struct ice_fltr_mgmt_list_entry *v_list_itr;
4253         struct ice_fltr_info *new_fltr, *cur_fltr;
4254         enum ice_sw_lkup_type lkup_type;
4255         u16 vsi_list_id = 0, vsi_handle;
4256         struct ice_lock *rule_lock; /* Lock to protect filter rule list */
4257         enum ice_status status = ICE_SUCCESS;
4258
4259         if (!ice_is_vsi_valid(hw, f_entry->fltr_info.vsi_handle))
4260                 return ICE_ERR_PARAM;
4261
4262         f_entry->fltr_info.fwd_id.hw_vsi_id =
4263                 ice_get_hw_vsi_num(hw, f_entry->fltr_info.vsi_handle);
4264         new_fltr = &f_entry->fltr_info;
4265
4266         /* VLAN ID should only be 12 bits */
4267         if (new_fltr->l_data.vlan.vlan_id > ICE_MAX_VLAN_ID)
4268                 return ICE_ERR_PARAM;
4269
4270         if (new_fltr->src_id != ICE_SRC_ID_VSI)
4271                 return ICE_ERR_PARAM;
4272
4273         new_fltr->src = new_fltr->fwd_id.hw_vsi_id;
4274         lkup_type = new_fltr->lkup_type;
4275         vsi_handle = new_fltr->vsi_handle;
4276         rule_lock = &recp_list->filt_rule_lock;
4277         ice_acquire_lock(rule_lock);
4278         v_list_itr = ice_find_rule_entry(&recp_list->filt_rules, new_fltr);
4279         if (!v_list_itr) {
4280                 struct ice_vsi_list_map_info *map_info = NULL;
4281
4282                 if (new_fltr->fltr_act == ICE_FWD_TO_VSI) {
4283                         /* All VLAN pruning rules use a VSI list. Check if
4284                          * there is already a VSI list containing VSI that we
4285                          * want to add. If found, use the same vsi_list_id for
4286                          * this new VLAN rule or else create a new list.
4287                          */
4288                         map_info = ice_find_vsi_list_entry(recp_list,
4289                                                            vsi_handle,
4290                                                            &vsi_list_id);
4291                         if (!map_info) {
4292                                 status = ice_create_vsi_list_rule(hw,
4293                                                                   &vsi_handle,
4294                                                                   1,
4295                                                                   &vsi_list_id,
4296                                                                   lkup_type);
4297                                 if (status)
4298                                         goto exit;
4299                         }
4300                         /* Convert the action to forwarding to a VSI list. */
4301                         new_fltr->fltr_act = ICE_FWD_TO_VSI_LIST;
4302                         new_fltr->fwd_id.vsi_list_id = vsi_list_id;
4303                 }
4304
4305                 status = ice_create_pkt_fwd_rule(hw, recp_list, f_entry);
4306                 if (!status) {
4307                         v_list_itr = ice_find_rule_entry(&recp_list->filt_rules,
4308                                                          new_fltr);
4309                         if (!v_list_itr) {
4310                                 status = ICE_ERR_DOES_NOT_EXIST;
4311                                 goto exit;
4312                         }
4313                         /* reuse VSI list for new rule and increment ref_cnt */
4314                         if (map_info) {
4315                                 v_list_itr->vsi_list_info = map_info;
4316                                 map_info->ref_cnt++;
4317                         } else {
4318                                 v_list_itr->vsi_list_info =
4319                                         ice_create_vsi_list_map(hw, &vsi_handle,
4320                                                                 1, vsi_list_id);
4321                         }
4322                 }
4323         } else if (v_list_itr->vsi_list_info->ref_cnt == 1) {
4324                 /* Update existing VSI list to add new VSI ID only if it used
4325                  * by one VLAN rule.
4326                  */
4327                 cur_fltr = &v_list_itr->fltr_info;
4328                 status = ice_add_update_vsi_list(hw, v_list_itr, cur_fltr,
4329                                                  new_fltr);
4330         } else {
4331                 /* If VLAN rule exists and VSI list being used by this rule is
4332                  * referenced by more than 1 VLAN rule. Then create a new VSI
4333                  * list appending previous VSI with new VSI and update existing
4334                  * VLAN rule to point to new VSI list ID
4335                  */
4336                 struct ice_fltr_info tmp_fltr;
4337                 u16 vsi_handle_arr[2];
4338                 u16 cur_handle;
4339
4340                 /* Current implementation only supports reusing VSI list with
4341                  * one VSI count. We should never hit below condition
4342                  */
4343                 if (v_list_itr->vsi_count > 1 &&
4344                     v_list_itr->vsi_list_info->ref_cnt > 1) {
4345                         ice_debug(hw, ICE_DBG_SW, "Invalid configuration: Optimization to reuse VSI list with more than one VSI is not being done yet\n");
4346                         status = ICE_ERR_CFG;
4347                         goto exit;
4348                 }
4349
4350                 cur_handle =
4351                         ice_find_first_bit(v_list_itr->vsi_list_info->vsi_map,
4352                                            ICE_MAX_VSI);
4353
4354                 /* A rule already exists with the new VSI being added */
4355                 if (cur_handle == vsi_handle) {
4356                         status = ICE_ERR_ALREADY_EXISTS;
4357                         goto exit;
4358                 }
4359
4360                 vsi_handle_arr[0] = cur_handle;
4361                 vsi_handle_arr[1] = vsi_handle;
4362                 status = ice_create_vsi_list_rule(hw, &vsi_handle_arr[0], 2,
4363                                                   &vsi_list_id, lkup_type);
4364                 if (status)
4365                         goto exit;
4366
4367                 tmp_fltr = v_list_itr->fltr_info;
4368                 tmp_fltr.fltr_rule_id = v_list_itr->fltr_info.fltr_rule_id;
4369                 tmp_fltr.fwd_id.vsi_list_id = vsi_list_id;
4370                 tmp_fltr.fltr_act = ICE_FWD_TO_VSI_LIST;
4371                 /* Update the previous switch rule to a new VSI list which
4372                  * includes current VSI that is requested
4373                  */
4374                 status = ice_update_pkt_fwd_rule(hw, &tmp_fltr);
4375                 if (status)
4376                         goto exit;
4377
4378                 /* before overriding VSI list map info. decrement ref_cnt of
4379                  * previous VSI list
4380                  */
4381                 v_list_itr->vsi_list_info->ref_cnt--;
4382
4383                 /* now update to newly created list */
4384                 v_list_itr->fltr_info.fwd_id.vsi_list_id = vsi_list_id;
4385                 v_list_itr->vsi_list_info =
4386                         ice_create_vsi_list_map(hw, &vsi_handle_arr[0], 2,
4387                                                 vsi_list_id);
4388                 v_list_itr->vsi_count++;
4389         }
4390
4391 exit:
4392         ice_release_lock(rule_lock);
4393         return status;
4394 }
4395
4396 /**
4397  * ice_add_vlan_rule - Add VLAN based filter rule
4398  * @hw: pointer to the hardware structure
4399  * @v_list: list of VLAN entries and forwarding information
4400  * @sw: pointer to switch info struct for which function add rule
4401  */
4402 static enum ice_status
4403 ice_add_vlan_rule(struct ice_hw *hw, struct LIST_HEAD_TYPE *v_list,
4404                   struct ice_switch_info *sw)
4405 {
4406         struct ice_fltr_list_entry *v_list_itr;
4407         struct ice_sw_recipe *recp_list;
4408
4409         recp_list = &sw->recp_list[ICE_SW_LKUP_VLAN];
4410         LIST_FOR_EACH_ENTRY(v_list_itr, v_list, ice_fltr_list_entry,
4411                             list_entry) {
4412                 if (v_list_itr->fltr_info.lkup_type != ICE_SW_LKUP_VLAN)
4413                         return ICE_ERR_PARAM;
4414                 v_list_itr->fltr_info.flag = ICE_FLTR_TX;
4415                 v_list_itr->status = ice_add_vlan_internal(hw, recp_list,
4416                                                            v_list_itr);
4417                 if (v_list_itr->status)
4418                         return v_list_itr->status;
4419         }
4420         return ICE_SUCCESS;
4421 }
4422
4423 /**
4424  * ice_add_vlan - Add a VLAN based filter rule
4425  * @hw: pointer to the hardware structure
4426  * @v_list: list of VLAN and forwarding information
4427  *
4428  * Function add VLAN rule for logical port from HW struct
4429  */
4430 enum ice_status ice_add_vlan(struct ice_hw *hw, struct LIST_HEAD_TYPE *v_list)
4431 {
4432         if (!v_list || !hw)
4433                 return ICE_ERR_PARAM;
4434
4435         return ice_add_vlan_rule(hw, v_list, hw->switch_info);
4436 }
4437
4438 /**
4439  * ice_add_mac_vlan - Add MAC and VLAN pair based filter rule
4440  * @hw: pointer to the hardware structure
4441  * @mv_list: list of MAC and VLAN filters
4442  * @sw: pointer to switch info struct for which function add rule
4443  * @lport: logic port number on which function add rule
4444  *
4445  * If the VSI on which the MAC-VLAN pair has to be added has Rx and Tx VLAN
4446  * pruning bits enabled, then it is the responsibility of the caller to make
4447  * sure to add a VLAN only filter on the same VSI. Packets belonging to that
4448  * VLAN won't be received on that VSI otherwise.
4449  */
4450 static enum ice_status
4451 ice_add_mac_vlan_rule(struct ice_hw *hw, struct LIST_HEAD_TYPE *mv_list,
4452                       struct ice_switch_info *sw, u8 lport)
4453 {
4454         struct ice_fltr_list_entry *mv_list_itr;
4455         struct ice_sw_recipe *recp_list;
4456
4457         if (!mv_list || !hw)
4458                 return ICE_ERR_PARAM;
4459
4460         recp_list = &sw->recp_list[ICE_SW_LKUP_MAC_VLAN];
4461         LIST_FOR_EACH_ENTRY(mv_list_itr, mv_list, ice_fltr_list_entry,
4462                             list_entry) {
4463                 enum ice_sw_lkup_type l_type =
4464                         mv_list_itr->fltr_info.lkup_type;
4465
4466                 if (l_type != ICE_SW_LKUP_MAC_VLAN)
4467                         return ICE_ERR_PARAM;
4468                 mv_list_itr->fltr_info.flag = ICE_FLTR_TX;
4469                 mv_list_itr->status =
4470                         ice_add_rule_internal(hw, recp_list, lport,
4471                                               mv_list_itr);
4472                 if (mv_list_itr->status)
4473                         return mv_list_itr->status;
4474         }
4475         return ICE_SUCCESS;
4476 }
4477
4478 /**
4479  * ice_add_mac_vlan - Add a MAC VLAN address based filter rule
4480  * @hw: pointer to the hardware structure
4481  * @mv_list: list of MAC VLAN addresses and forwarding information
4482  *
4483  * Function add MAC VLAN rule for logical port from HW struct
4484  */
4485 enum ice_status
4486 ice_add_mac_vlan(struct ice_hw *hw, struct LIST_HEAD_TYPE *mv_list)
4487 {
4488         if (!mv_list || !hw)
4489                 return ICE_ERR_PARAM;
4490
4491         return ice_add_mac_vlan_rule(hw, mv_list, hw->switch_info,
4492                                      hw->port_info->lport);
4493 }
4494
4495 /**
4496  * ice_add_eth_mac_rule - Add ethertype and MAC based filter rule
4497  * @hw: pointer to the hardware structure
4498  * @em_list: list of ether type MAC filter, MAC is optional
4499  * @sw: pointer to switch info struct for which function add rule
4500  * @lport: logic port number on which function add rule
4501  *
4502  * This function requires the caller to populate the entries in
4503  * the filter list with the necessary fields (including flags to
4504  * indicate Tx or Rx rules).
4505  */
4506 static enum ice_status
4507 ice_add_eth_mac_rule(struct ice_hw *hw, struct LIST_HEAD_TYPE *em_list,
4508                      struct ice_switch_info *sw, u8 lport)
4509 {
4510         struct ice_fltr_list_entry *em_list_itr;
4511
4512         LIST_FOR_EACH_ENTRY(em_list_itr, em_list, ice_fltr_list_entry,
4513                             list_entry) {
4514                 struct ice_sw_recipe *recp_list;
4515                 enum ice_sw_lkup_type l_type;
4516
4517                 l_type = em_list_itr->fltr_info.lkup_type;
4518                 recp_list = &sw->recp_list[l_type];
4519
4520                 if (l_type != ICE_SW_LKUP_ETHERTYPE_MAC &&
4521                     l_type != ICE_SW_LKUP_ETHERTYPE)
4522                         return ICE_ERR_PARAM;
4523
4524                 em_list_itr->status = ice_add_rule_internal(hw, recp_list,
4525                                                             lport,
4526                                                             em_list_itr);
4527                 if (em_list_itr->status)
4528                         return em_list_itr->status;
4529         }
4530         return ICE_SUCCESS;
4531 }
4532
4533 /**
4534  * ice_add_eth_mac - Add a ethertype based filter rule
4535  * @hw: pointer to the hardware structure
4536  * @em_list: list of ethertype and forwarding information
4537  *
4538  * Function add ethertype rule for logical port from HW struct
4539  */
4540 enum ice_status
4541 ice_add_eth_mac(struct ice_hw *hw, struct LIST_HEAD_TYPE *em_list)
4542 {
4543         if (!em_list || !hw)
4544                 return ICE_ERR_PARAM;
4545
4546         return ice_add_eth_mac_rule(hw, em_list, hw->switch_info,
4547                                     hw->port_info->lport);
4548 }
4549
4550 /**
4551  * ice_remove_eth_mac_rule - Remove an ethertype (or MAC) based filter rule
4552  * @hw: pointer to the hardware structure
4553  * @em_list: list of ethertype or ethertype MAC entries
4554  * @sw: pointer to switch info struct for which function add rule
4555  */
4556 static enum ice_status
4557 ice_remove_eth_mac_rule(struct ice_hw *hw, struct LIST_HEAD_TYPE *em_list,
4558                         struct ice_switch_info *sw)
4559 {
4560         struct ice_fltr_list_entry *em_list_itr, *tmp;
4561
4562         LIST_FOR_EACH_ENTRY_SAFE(em_list_itr, tmp, em_list, ice_fltr_list_entry,
4563                                  list_entry) {
4564                 struct ice_sw_recipe *recp_list;
4565                 enum ice_sw_lkup_type l_type;
4566
4567                 l_type = em_list_itr->fltr_info.lkup_type;
4568
4569                 if (l_type != ICE_SW_LKUP_ETHERTYPE_MAC &&
4570                     l_type != ICE_SW_LKUP_ETHERTYPE)
4571                         return ICE_ERR_PARAM;
4572
4573                 recp_list = &sw->recp_list[l_type];
4574                 em_list_itr->status = ice_remove_rule_internal(hw, recp_list,
4575                                                                em_list_itr);
4576                 if (em_list_itr->status)
4577                         return em_list_itr->status;
4578         }
4579         return ICE_SUCCESS;
4580 }
4581
4582 /**
4583  * ice_remove_eth_mac - remove a ethertype based filter rule
4584  * @hw: pointer to the hardware structure
4585  * @em_list: list of ethertype and forwarding information
4586  *
4587  */
4588 enum ice_status
4589 ice_remove_eth_mac(struct ice_hw *hw, struct LIST_HEAD_TYPE *em_list)
4590 {
4591         if (!em_list || !hw)
4592                 return ICE_ERR_PARAM;
4593
4594         return ice_remove_eth_mac_rule(hw, em_list, hw->switch_info);
4595 }
4596
4597 /**
4598  * ice_rem_sw_rule_info
4599  * @hw: pointer to the hardware structure
4600  * @rule_head: pointer to the switch list structure that we want to delete
4601  */
4602 static void
4603 ice_rem_sw_rule_info(struct ice_hw *hw, struct LIST_HEAD_TYPE *rule_head)
4604 {
4605         if (!LIST_EMPTY(rule_head)) {
4606                 struct ice_fltr_mgmt_list_entry *entry;
4607                 struct ice_fltr_mgmt_list_entry *tmp;
4608
4609                 LIST_FOR_EACH_ENTRY_SAFE(entry, tmp, rule_head,
4610                                          ice_fltr_mgmt_list_entry, list_entry) {
4611                         LIST_DEL(&entry->list_entry);
4612                         ice_free(hw, entry);
4613                 }
4614         }
4615 }
4616
4617 /**
4618  * ice_rem_adv_rule_info
4619  * @hw: pointer to the hardware structure
4620  * @rule_head: pointer to the switch list structure that we want to delete
4621  */
4622 static void
4623 ice_rem_adv_rule_info(struct ice_hw *hw, struct LIST_HEAD_TYPE *rule_head)
4624 {
4625         struct ice_adv_fltr_mgmt_list_entry *tmp_entry;
4626         struct ice_adv_fltr_mgmt_list_entry *lst_itr;
4627
4628         if (LIST_EMPTY(rule_head))
4629                 return;
4630
4631         LIST_FOR_EACH_ENTRY_SAFE(lst_itr, tmp_entry, rule_head,
4632                                  ice_adv_fltr_mgmt_list_entry, list_entry) {
4633                 LIST_DEL(&lst_itr->list_entry);
4634                 ice_free(hw, lst_itr->lkups);
4635                 ice_free(hw, lst_itr);
4636         }
4637 }
4638
4639 /**
4640  * ice_rem_all_sw_rules_info
4641  * @hw: pointer to the hardware structure
4642  */
4643 void ice_rem_all_sw_rules_info(struct ice_hw *hw)
4644 {
4645         struct ice_switch_info *sw = hw->switch_info;
4646         u8 i;
4647
4648         for (i = 0; i < ICE_MAX_NUM_RECIPES; i++) {
4649                 struct LIST_HEAD_TYPE *rule_head;
4650
4651                 rule_head = &sw->recp_list[i].filt_rules;
4652                 if (!sw->recp_list[i].adv_rule)
4653                         ice_rem_sw_rule_info(hw, rule_head);
4654                 else
4655                         ice_rem_adv_rule_info(hw, rule_head);
4656                 if (sw->recp_list[i].adv_rule &&
4657                     LIST_EMPTY(&sw->recp_list[i].filt_rules))
4658                         sw->recp_list[i].adv_rule = false;
4659         }
4660 }
4661
4662 /**
4663  * ice_cfg_dflt_vsi - change state of VSI to set/clear default
4664  * @pi: pointer to the port_info structure
4665  * @vsi_handle: VSI handle to set as default
4666  * @set: true to add the above mentioned switch rule, false to remove it
4667  * @direction: ICE_FLTR_RX or ICE_FLTR_TX
4668  *
4669  * add filter rule to set/unset given VSI as default VSI for the switch
4670  * (represented by swid)
4671  */
4672 enum ice_status
4673 ice_cfg_dflt_vsi(struct ice_port_info *pi, u16 vsi_handle, bool set,
4674                  u8 direction)
4675 {
4676         struct ice_aqc_sw_rules_elem *s_rule;
4677         struct ice_fltr_info f_info;
4678         struct ice_hw *hw = pi->hw;
4679         enum ice_adminq_opc opcode;
4680         enum ice_status status;
4681         u16 s_rule_size;
4682         u16 hw_vsi_id;
4683
4684         if (!ice_is_vsi_valid(hw, vsi_handle))
4685                 return ICE_ERR_PARAM;
4686         hw_vsi_id = ice_get_hw_vsi_num(hw, vsi_handle);
4687
4688         s_rule_size = set ? ICE_SW_RULE_RX_TX_ETH_HDR_SIZE :
4689                 ICE_SW_RULE_RX_TX_NO_HDR_SIZE;
4690
4691         s_rule = (struct ice_aqc_sw_rules_elem *)ice_malloc(hw, s_rule_size);
4692         if (!s_rule)
4693                 return ICE_ERR_NO_MEMORY;
4694
4695         ice_memset(&f_info, 0, sizeof(f_info), ICE_NONDMA_MEM);
4696
4697         f_info.lkup_type = ICE_SW_LKUP_DFLT;
4698         f_info.flag = direction;
4699         f_info.fltr_act = ICE_FWD_TO_VSI;
4700         f_info.fwd_id.hw_vsi_id = hw_vsi_id;
4701
4702         if (f_info.flag & ICE_FLTR_RX) {
4703                 f_info.src = pi->lport;
4704                 f_info.src_id = ICE_SRC_ID_LPORT;
4705                 if (!set)
4706                         f_info.fltr_rule_id =
4707                                 pi->dflt_rx_vsi_rule_id;
4708         } else if (f_info.flag & ICE_FLTR_TX) {
4709                 f_info.src_id = ICE_SRC_ID_VSI;
4710                 f_info.src = hw_vsi_id;
4711                 if (!set)
4712                         f_info.fltr_rule_id =
4713                                 pi->dflt_tx_vsi_rule_id;
4714         }
4715
4716         if (set)
4717                 opcode = ice_aqc_opc_add_sw_rules;
4718         else
4719                 opcode = ice_aqc_opc_remove_sw_rules;
4720
4721         ice_fill_sw_rule(hw, &f_info, s_rule, opcode);
4722
4723         status = ice_aq_sw_rules(hw, s_rule, s_rule_size, 1, opcode, NULL);
4724         if (status || !(f_info.flag & ICE_FLTR_TX_RX))
4725                 goto out;
4726         if (set) {
4727                 u16 index = LE16_TO_CPU(s_rule->pdata.lkup_tx_rx.index);
4728
4729                 if (f_info.flag & ICE_FLTR_TX) {
4730                         pi->dflt_tx_vsi_num = hw_vsi_id;
4731                         pi->dflt_tx_vsi_rule_id = index;
4732                 } else if (f_info.flag & ICE_FLTR_RX) {
4733                         pi->dflt_rx_vsi_num = hw_vsi_id;
4734                         pi->dflt_rx_vsi_rule_id = index;
4735                 }
4736         } else {
4737                 if (f_info.flag & ICE_FLTR_TX) {
4738                         pi->dflt_tx_vsi_num = ICE_DFLT_VSI_INVAL;
4739                         pi->dflt_tx_vsi_rule_id = ICE_INVAL_ACT;
4740                 } else if (f_info.flag & ICE_FLTR_RX) {
4741                         pi->dflt_rx_vsi_num = ICE_DFLT_VSI_INVAL;
4742                         pi->dflt_rx_vsi_rule_id = ICE_INVAL_ACT;
4743                 }
4744         }
4745
4746 out:
4747         ice_free(hw, s_rule);
4748         return status;
4749 }
4750
4751 /**
4752  * ice_find_ucast_rule_entry - Search for a unicast MAC filter rule entry
4753  * @list_head: head of rule list
4754  * @f_info: rule information
4755  *
4756  * Helper function to search for a unicast rule entry - this is to be used
4757  * to remove unicast MAC filter that is not shared with other VSIs on the
4758  * PF switch.
4759  *
4760  * Returns pointer to entry storing the rule if found
4761  */
4762 static struct ice_fltr_mgmt_list_entry *
4763 ice_find_ucast_rule_entry(struct LIST_HEAD_TYPE *list_head,
4764                           struct ice_fltr_info *f_info)
4765 {
4766         struct ice_fltr_mgmt_list_entry *list_itr;
4767
4768         LIST_FOR_EACH_ENTRY(list_itr, list_head, ice_fltr_mgmt_list_entry,
4769                             list_entry) {
4770                 if (!memcmp(&f_info->l_data, &list_itr->fltr_info.l_data,
4771                             sizeof(f_info->l_data)) &&
4772                     f_info->fwd_id.hw_vsi_id ==
4773                     list_itr->fltr_info.fwd_id.hw_vsi_id &&
4774                     f_info->flag == list_itr->fltr_info.flag)
4775                         return list_itr;
4776         }
4777         return NULL;
4778 }
4779
4780 /**
4781  * ice_remove_mac_rule - remove a MAC based filter rule
4782  * @hw: pointer to the hardware structure
4783  * @m_list: list of MAC addresses and forwarding information
4784  * @recp_list: list from which function remove MAC address
4785  *
4786  * This function removes either a MAC filter rule or a specific VSI from a
4787  * VSI list for a multicast MAC address.
4788  *
4789  * Returns ICE_ERR_DOES_NOT_EXIST if a given entry was not added by
4790  * ice_add_mac. Caller should be aware that this call will only work if all
4791  * the entries passed into m_list were added previously. It will not attempt to
4792  * do a partial remove of entries that were found.
4793  */
4794 static enum ice_status
4795 ice_remove_mac_rule(struct ice_hw *hw, struct LIST_HEAD_TYPE *m_list,
4796                     struct ice_sw_recipe *recp_list)
4797 {
4798         struct ice_fltr_list_entry *list_itr, *tmp;
4799         struct ice_lock *rule_lock; /* Lock to protect filter rule list */
4800
4801         if (!m_list)
4802                 return ICE_ERR_PARAM;
4803
4804         rule_lock = &recp_list->filt_rule_lock;
4805         LIST_FOR_EACH_ENTRY_SAFE(list_itr, tmp, m_list, ice_fltr_list_entry,
4806                                  list_entry) {
4807                 enum ice_sw_lkup_type l_type = list_itr->fltr_info.lkup_type;
4808                 u8 *add = &list_itr->fltr_info.l_data.mac.mac_addr[0];
4809                 u16 vsi_handle;
4810
4811                 if (l_type != ICE_SW_LKUP_MAC)
4812                         return ICE_ERR_PARAM;
4813
4814                 vsi_handle = list_itr->fltr_info.vsi_handle;
4815                 if (!ice_is_vsi_valid(hw, vsi_handle))
4816                         return ICE_ERR_PARAM;
4817
4818                 list_itr->fltr_info.fwd_id.hw_vsi_id =
4819                                         ice_get_hw_vsi_num(hw, vsi_handle);
4820                 if (IS_UNICAST_ETHER_ADDR(add) && !hw->ucast_shared) {
4821                         /* Don't remove the unicast address that belongs to
4822                          * another VSI on the switch, since it is not being
4823                          * shared...
4824                          */
4825                         ice_acquire_lock(rule_lock);
4826                         if (!ice_find_ucast_rule_entry(&recp_list->filt_rules,
4827                                                        &list_itr->fltr_info)) {
4828                                 ice_release_lock(rule_lock);
4829                                 return ICE_ERR_DOES_NOT_EXIST;
4830                         }
4831                         ice_release_lock(rule_lock);
4832                 }
4833                 list_itr->status = ice_remove_rule_internal(hw, recp_list,
4834                                                             list_itr);
4835                 if (list_itr->status)
4836                         return list_itr->status;
4837         }
4838         return ICE_SUCCESS;
4839 }
4840
4841 /**
4842  * ice_remove_mac - remove a MAC address based filter rule
4843  * @hw: pointer to the hardware structure
4844  * @m_list: list of MAC addresses and forwarding information
4845  *
4846  */
4847 enum ice_status ice_remove_mac(struct ice_hw *hw, struct LIST_HEAD_TYPE *m_list)
4848 {
4849         struct ice_sw_recipe *recp_list;
4850
4851         recp_list = &hw->switch_info->recp_list[ICE_SW_LKUP_MAC];
4852         return ice_remove_mac_rule(hw, m_list, recp_list);
4853 }
4854
4855 /**
4856  * ice_remove_vlan_rule - Remove VLAN based filter rule
4857  * @hw: pointer to the hardware structure
4858  * @v_list: list of VLAN entries and forwarding information
4859  * @recp_list: list from which function remove VLAN
4860  */
4861 static enum ice_status
4862 ice_remove_vlan_rule(struct ice_hw *hw, struct LIST_HEAD_TYPE *v_list,
4863                      struct ice_sw_recipe *recp_list)
4864 {
4865         struct ice_fltr_list_entry *v_list_itr, *tmp;
4866
4867         LIST_FOR_EACH_ENTRY_SAFE(v_list_itr, tmp, v_list, ice_fltr_list_entry,
4868                                  list_entry) {
4869                 enum ice_sw_lkup_type l_type = v_list_itr->fltr_info.lkup_type;
4870
4871                 if (l_type != ICE_SW_LKUP_VLAN)
4872                         return ICE_ERR_PARAM;
4873                 v_list_itr->status = ice_remove_rule_internal(hw, recp_list,
4874                                                               v_list_itr);
4875                 if (v_list_itr->status)
4876                         return v_list_itr->status;
4877         }
4878         return ICE_SUCCESS;
4879 }
4880
4881 /**
4882  * ice_remove_vlan - remove a VLAN address based filter rule
4883  * @hw: pointer to the hardware structure
4884  * @v_list: list of VLAN and forwarding information
4885  *
4886  */
4887 enum ice_status
4888 ice_remove_vlan(struct ice_hw *hw, struct LIST_HEAD_TYPE *v_list)
4889 {
4890         struct ice_sw_recipe *recp_list;
4891
4892         if (!v_list || !hw)
4893                 return ICE_ERR_PARAM;
4894
4895         recp_list = &hw->switch_info->recp_list[ICE_SW_LKUP_VLAN];
4896         return ice_remove_vlan_rule(hw, v_list, recp_list);
4897 }
4898
4899 /**
4900  * ice_remove_mac_vlan_rule - Remove MAC VLAN based filter rule
4901  * @hw: pointer to the hardware structure
4902  * @v_list: list of MAC VLAN entries and forwarding information
4903  * @recp_list: list from which function remove MAC VLAN
4904  */
4905 static enum ice_status
4906 ice_remove_mac_vlan_rule(struct ice_hw *hw, struct LIST_HEAD_TYPE *v_list,
4907                          struct ice_sw_recipe *recp_list)
4908 {
4909         struct ice_fltr_list_entry *v_list_itr, *tmp;
4910
4911         recp_list = &hw->switch_info->recp_list[ICE_SW_LKUP_MAC_VLAN];
4912         LIST_FOR_EACH_ENTRY_SAFE(v_list_itr, tmp, v_list, ice_fltr_list_entry,
4913                                  list_entry) {
4914                 enum ice_sw_lkup_type l_type = v_list_itr->fltr_info.lkup_type;
4915
4916                 if (l_type != ICE_SW_LKUP_MAC_VLAN)
4917                         return ICE_ERR_PARAM;
4918                 v_list_itr->status =
4919                         ice_remove_rule_internal(hw, recp_list,
4920                                                  v_list_itr);
4921                 if (v_list_itr->status)
4922                         return v_list_itr->status;
4923         }
4924         return ICE_SUCCESS;
4925 }
4926
4927 /**
4928  * ice_remove_mac_vlan - remove a MAC VLAN address based filter rule
4929  * @hw: pointer to the hardware structure
4930  * @mv_list: list of MAC VLAN and forwarding information
4931  */
4932 enum ice_status
4933 ice_remove_mac_vlan(struct ice_hw *hw, struct LIST_HEAD_TYPE *mv_list)
4934 {
4935         struct ice_sw_recipe *recp_list;
4936
4937         if (!mv_list || !hw)
4938                 return ICE_ERR_PARAM;
4939
4940         recp_list = &hw->switch_info->recp_list[ICE_SW_LKUP_MAC_VLAN];
4941         return ice_remove_mac_vlan_rule(hw, mv_list, recp_list);
4942 }
4943
4944 /**
4945  * ice_vsi_uses_fltr - Determine if given VSI uses specified filter
4946  * @fm_entry: filter entry to inspect
4947  * @vsi_handle: VSI handle to compare with filter info
4948  */
4949 static bool
4950 ice_vsi_uses_fltr(struct ice_fltr_mgmt_list_entry *fm_entry, u16 vsi_handle)
4951 {
4952         return ((fm_entry->fltr_info.fltr_act == ICE_FWD_TO_VSI &&
4953                  fm_entry->fltr_info.vsi_handle == vsi_handle) ||
4954                 (fm_entry->fltr_info.fltr_act == ICE_FWD_TO_VSI_LIST &&
4955                  (ice_is_bit_set(fm_entry->vsi_list_info->vsi_map,
4956                                  vsi_handle))));
4957 }
4958
4959 /**
4960  * ice_add_entry_to_vsi_fltr_list - Add copy of fltr_list_entry to remove list
4961  * @hw: pointer to the hardware structure
4962  * @vsi_handle: VSI handle to remove filters from
4963  * @vsi_list_head: pointer to the list to add entry to
4964  * @fi: pointer to fltr_info of filter entry to copy & add
4965  *
4966  * Helper function, used when creating a list of filters to remove from
4967  * a specific VSI. The entry added to vsi_list_head is a COPY of the
4968  * original filter entry, with the exception of fltr_info.fltr_act and
4969  * fltr_info.fwd_id fields. These are set such that later logic can
4970  * extract which VSI to remove the fltr from, and pass on that information.
4971  */
4972 static enum ice_status
4973 ice_add_entry_to_vsi_fltr_list(struct ice_hw *hw, u16 vsi_handle,
4974                                struct LIST_HEAD_TYPE *vsi_list_head,
4975                                struct ice_fltr_info *fi)
4976 {
4977         struct ice_fltr_list_entry *tmp;
4978
4979         /* this memory is freed up in the caller function
4980          * once filters for this VSI are removed
4981          */
4982         tmp = (struct ice_fltr_list_entry *)ice_malloc(hw, sizeof(*tmp));
4983         if (!tmp)
4984                 return ICE_ERR_NO_MEMORY;
4985
4986         tmp->fltr_info = *fi;
4987
4988         /* Overwrite these fields to indicate which VSI to remove filter from,
4989          * so find and remove logic can extract the information from the
4990          * list entries. Note that original entries will still have proper
4991          * values.
4992          */
4993         tmp->fltr_info.fltr_act = ICE_FWD_TO_VSI;
4994         tmp->fltr_info.vsi_handle = vsi_handle;
4995         tmp->fltr_info.fwd_id.hw_vsi_id = ice_get_hw_vsi_num(hw, vsi_handle);
4996
4997         LIST_ADD(&tmp->list_entry, vsi_list_head);
4998
4999         return ICE_SUCCESS;
5000 }
5001
5002 /**
5003  * ice_add_to_vsi_fltr_list - Add VSI filters to the list
5004  * @hw: pointer to the hardware structure
5005  * @vsi_handle: VSI handle to remove filters from
5006  * @lkup_list_head: pointer to the list that has certain lookup type filters
5007  * @vsi_list_head: pointer to the list pertaining to VSI with vsi_handle
5008  *
5009  * Locates all filters in lkup_list_head that are used by the given VSI,
5010  * and adds COPIES of those entries to vsi_list_head (intended to be used
5011  * to remove the listed filters).
5012  * Note that this means all entries in vsi_list_head must be explicitly
5013  * deallocated by the caller when done with list.
5014  */
5015 static enum ice_status
5016 ice_add_to_vsi_fltr_list(struct ice_hw *hw, u16 vsi_handle,
5017                          struct LIST_HEAD_TYPE *lkup_list_head,
5018                          struct LIST_HEAD_TYPE *vsi_list_head)
5019 {
5020         struct ice_fltr_mgmt_list_entry *fm_entry;
5021         enum ice_status status = ICE_SUCCESS;
5022
5023         /* check to make sure VSI ID is valid and within boundary */
5024         if (!ice_is_vsi_valid(hw, vsi_handle))
5025                 return ICE_ERR_PARAM;
5026
5027         LIST_FOR_EACH_ENTRY(fm_entry, lkup_list_head,
5028                             ice_fltr_mgmt_list_entry, list_entry) {
5029                 struct ice_fltr_info *fi;
5030
5031                 fi = &fm_entry->fltr_info;
5032                 if (!fi || !ice_vsi_uses_fltr(fm_entry, vsi_handle))
5033                         continue;
5034
5035                 status = ice_add_entry_to_vsi_fltr_list(hw, vsi_handle,
5036                                                         vsi_list_head, fi);
5037                 if (status)
5038                         return status;
5039         }
5040         return status;
5041 }
5042
5043 /**
5044  * ice_determine_promisc_mask
5045  * @fi: filter info to parse
5046  *
5047  * Helper function to determine which ICE_PROMISC_ mask corresponds
5048  * to given filter into.
5049  */
5050 static u8 ice_determine_promisc_mask(struct ice_fltr_info *fi)
5051 {
5052         u16 vid = fi->l_data.mac_vlan.vlan_id;
5053         u8 *macaddr = fi->l_data.mac.mac_addr;
5054         bool is_tx_fltr = false;
5055         u8 promisc_mask = 0;
5056
5057         if (fi->flag == ICE_FLTR_TX)
5058                 is_tx_fltr = true;
5059
5060         if (IS_BROADCAST_ETHER_ADDR(macaddr))
5061                 promisc_mask |= is_tx_fltr ?
5062                         ICE_PROMISC_BCAST_TX : ICE_PROMISC_BCAST_RX;
5063         else if (IS_MULTICAST_ETHER_ADDR(macaddr))
5064                 promisc_mask |= is_tx_fltr ?
5065                         ICE_PROMISC_MCAST_TX : ICE_PROMISC_MCAST_RX;
5066         else if (IS_UNICAST_ETHER_ADDR(macaddr))
5067                 promisc_mask |= is_tx_fltr ?
5068                         ICE_PROMISC_UCAST_TX : ICE_PROMISC_UCAST_RX;
5069         if (vid)
5070                 promisc_mask |= is_tx_fltr ?
5071                         ICE_PROMISC_VLAN_TX : ICE_PROMISC_VLAN_RX;
5072
5073         return promisc_mask;
5074 }
5075
5076 /**
5077  * _ice_get_vsi_promisc - get promiscuous mode of given VSI
5078  * @hw: pointer to the hardware structure
5079  * @vsi_handle: VSI handle to retrieve info from
5080  * @promisc_mask: pointer to mask to be filled in
5081  * @vid: VLAN ID of promisc VLAN VSI
5082  * @sw: pointer to switch info struct for which function add rule
5083  */
5084 static enum ice_status
5085 _ice_get_vsi_promisc(struct ice_hw *hw, u16 vsi_handle, u8 *promisc_mask,
5086                      u16 *vid, struct ice_switch_info *sw)
5087 {
5088         struct ice_fltr_mgmt_list_entry *itr;
5089         struct LIST_HEAD_TYPE *rule_head;
5090         struct ice_lock *rule_lock;     /* Lock to protect filter rule list */
5091
5092         if (!ice_is_vsi_valid(hw, vsi_handle))
5093                 return ICE_ERR_PARAM;
5094
5095         *vid = 0;
5096         *promisc_mask = 0;
5097         rule_head = &sw->recp_list[ICE_SW_LKUP_PROMISC].filt_rules;
5098         rule_lock = &sw->recp_list[ICE_SW_LKUP_PROMISC].filt_rule_lock;
5099
5100         ice_acquire_lock(rule_lock);
5101         LIST_FOR_EACH_ENTRY(itr, rule_head,
5102                             ice_fltr_mgmt_list_entry, list_entry) {
5103                 /* Continue if this filter doesn't apply to this VSI or the
5104                  * VSI ID is not in the VSI map for this filter
5105                  */
5106                 if (!ice_vsi_uses_fltr(itr, vsi_handle))
5107                         continue;
5108
5109                 *promisc_mask |= ice_determine_promisc_mask(&itr->fltr_info);
5110         }
5111         ice_release_lock(rule_lock);
5112
5113         return ICE_SUCCESS;
5114 }
5115
5116 /**
5117  * ice_get_vsi_promisc - get promiscuous mode of given VSI
5118  * @hw: pointer to the hardware structure
5119  * @vsi_handle: VSI handle to retrieve info from
5120  * @promisc_mask: pointer to mask to be filled in
5121  * @vid: VLAN ID of promisc VLAN VSI
5122  */
5123 enum ice_status
5124 ice_get_vsi_promisc(struct ice_hw *hw, u16 vsi_handle, u8 *promisc_mask,
5125                     u16 *vid)
5126 {
5127         return _ice_get_vsi_promisc(hw, vsi_handle, promisc_mask,
5128                                     vid, hw->switch_info);
5129 }
5130
5131 /**
5132  * ice_get_vsi_vlan_promisc - get VLAN promiscuous mode of given VSI
5133  * @hw: pointer to the hardware structure
5134  * @vsi_handle: VSI handle to retrieve info from
5135  * @promisc_mask: pointer to mask to be filled in
5136  * @vid: VLAN ID of promisc VLAN VSI
5137  * @sw: pointer to switch info struct for which function add rule
5138  */
5139 static enum ice_status
5140 _ice_get_vsi_vlan_promisc(struct ice_hw *hw, u16 vsi_handle, u8 *promisc_mask,
5141                           u16 *vid, struct ice_switch_info *sw)
5142 {
5143         struct ice_fltr_mgmt_list_entry *itr;
5144         struct LIST_HEAD_TYPE *rule_head;
5145         struct ice_lock *rule_lock;     /* Lock to protect filter rule list */
5146
5147         if (!ice_is_vsi_valid(hw, vsi_handle))
5148                 return ICE_ERR_PARAM;
5149
5150         *vid = 0;
5151         *promisc_mask = 0;
5152         rule_head = &sw->recp_list[ICE_SW_LKUP_PROMISC_VLAN].filt_rules;
5153         rule_lock = &sw->recp_list[ICE_SW_LKUP_PROMISC_VLAN].filt_rule_lock;
5154
5155         ice_acquire_lock(rule_lock);
5156         LIST_FOR_EACH_ENTRY(itr, rule_head, ice_fltr_mgmt_list_entry,
5157                             list_entry) {
5158                 /* Continue if this filter doesn't apply to this VSI or the
5159                  * VSI ID is not in the VSI map for this filter
5160                  */
5161                 if (!ice_vsi_uses_fltr(itr, vsi_handle))
5162                         continue;
5163
5164                 *promisc_mask |= ice_determine_promisc_mask(&itr->fltr_info);
5165         }
5166         ice_release_lock(rule_lock);
5167
5168         return ICE_SUCCESS;
5169 }
5170
5171 /**
5172  * ice_get_vsi_vlan_promisc - get VLAN promiscuous mode of given VSI
5173  * @hw: pointer to the hardware structure
5174  * @vsi_handle: VSI handle to retrieve info from
5175  * @promisc_mask: pointer to mask to be filled in
5176  * @vid: VLAN ID of promisc VLAN VSI
5177  */
5178 enum ice_status
5179 ice_get_vsi_vlan_promisc(struct ice_hw *hw, u16 vsi_handle, u8 *promisc_mask,
5180                          u16 *vid)
5181 {
5182         return _ice_get_vsi_vlan_promisc(hw, vsi_handle, promisc_mask,
5183                                          vid, hw->switch_info);
5184 }
5185
5186 /**
5187  * ice_remove_promisc - Remove promisc based filter rules
5188  * @hw: pointer to the hardware structure
5189  * @recp_id: recipe ID for which the rule needs to removed
5190  * @v_list: list of promisc entries
5191  */
5192 static enum ice_status
5193 ice_remove_promisc(struct ice_hw *hw, u8 recp_id,
5194                    struct LIST_HEAD_TYPE *v_list)
5195 {
5196         struct ice_fltr_list_entry *v_list_itr, *tmp;
5197         struct ice_sw_recipe *recp_list;
5198
5199         recp_list = &hw->switch_info->recp_list[recp_id];
5200         LIST_FOR_EACH_ENTRY_SAFE(v_list_itr, tmp, v_list, ice_fltr_list_entry,
5201                                  list_entry) {
5202                 v_list_itr->status =
5203                         ice_remove_rule_internal(hw, recp_list, v_list_itr);
5204                 if (v_list_itr->status)
5205                         return v_list_itr->status;
5206         }
5207         return ICE_SUCCESS;
5208 }
5209
5210 /**
5211  * _ice_clear_vsi_promisc - clear specified promiscuous mode(s)
5212  * @hw: pointer to the hardware structure
5213  * @vsi_handle: VSI handle to clear mode
5214  * @promisc_mask: mask of promiscuous config bits to clear
5215  * @vid: VLAN ID to clear VLAN promiscuous
5216  * @sw: pointer to switch info struct for which function add rule
5217  */
5218 static enum ice_status
5219 _ice_clear_vsi_promisc(struct ice_hw *hw, u16 vsi_handle, u8 promisc_mask,
5220                        u16 vid, struct ice_switch_info *sw)
5221 {
5222         struct ice_fltr_list_entry *fm_entry, *tmp;
5223         struct LIST_HEAD_TYPE remove_list_head;
5224         struct ice_fltr_mgmt_list_entry *itr;
5225         struct LIST_HEAD_TYPE *rule_head;
5226         struct ice_lock *rule_lock;     /* Lock to protect filter rule list */
5227         enum ice_status status = ICE_SUCCESS;
5228         u8 recipe_id;
5229
5230         if (!ice_is_vsi_valid(hw, vsi_handle))
5231                 return ICE_ERR_PARAM;
5232
5233         if (promisc_mask & (ICE_PROMISC_VLAN_RX | ICE_PROMISC_VLAN_TX))
5234                 recipe_id = ICE_SW_LKUP_PROMISC_VLAN;
5235         else
5236                 recipe_id = ICE_SW_LKUP_PROMISC;
5237
5238         rule_head = &sw->recp_list[recipe_id].filt_rules;
5239         rule_lock = &sw->recp_list[recipe_id].filt_rule_lock;
5240
5241         INIT_LIST_HEAD(&remove_list_head);
5242
5243         ice_acquire_lock(rule_lock);
5244         LIST_FOR_EACH_ENTRY(itr, rule_head,
5245                             ice_fltr_mgmt_list_entry, list_entry) {
5246                 struct ice_fltr_info *fltr_info;
5247                 u8 fltr_promisc_mask = 0;
5248
5249                 if (!ice_vsi_uses_fltr(itr, vsi_handle))
5250                         continue;
5251                 fltr_info = &itr->fltr_info;
5252
5253                 if (recipe_id == ICE_SW_LKUP_PROMISC_VLAN &&
5254                     vid != fltr_info->l_data.mac_vlan.vlan_id)
5255                         continue;
5256
5257                 fltr_promisc_mask |= ice_determine_promisc_mask(fltr_info);
5258
5259                 /* Skip if filter is not completely specified by given mask */
5260                 if (fltr_promisc_mask & ~promisc_mask)
5261                         continue;
5262
5263                 status = ice_add_entry_to_vsi_fltr_list(hw, vsi_handle,
5264                                                         &remove_list_head,
5265                                                         fltr_info);
5266                 if (status) {
5267                         ice_release_lock(rule_lock);
5268                         goto free_fltr_list;
5269                 }
5270         }
5271         ice_release_lock(rule_lock);
5272
5273         status = ice_remove_promisc(hw, recipe_id, &remove_list_head);
5274
5275 free_fltr_list:
5276         LIST_FOR_EACH_ENTRY_SAFE(fm_entry, tmp, &remove_list_head,
5277                                  ice_fltr_list_entry, list_entry) {
5278                 LIST_DEL(&fm_entry->list_entry);
5279                 ice_free(hw, fm_entry);
5280         }
5281
5282         return status;
5283 }
5284
5285 /**
5286  * ice_clear_vsi_promisc - clear specified promiscuous mode(s) for given VSI
5287  * @hw: pointer to the hardware structure
5288  * @vsi_handle: VSI handle to clear mode
5289  * @promisc_mask: mask of promiscuous config bits to clear
5290  * @vid: VLAN ID to clear VLAN promiscuous
5291  */
5292 enum ice_status
5293 ice_clear_vsi_promisc(struct ice_hw *hw, u16 vsi_handle,
5294                       u8 promisc_mask, u16 vid)
5295 {
5296         return _ice_clear_vsi_promisc(hw, vsi_handle, promisc_mask,
5297                                       vid, hw->switch_info);
5298 }
5299
5300 /**
5301  * _ice_set_vsi_promisc - set given VSI to given promiscuous mode(s)
5302  * @hw: pointer to the hardware structure
5303  * @vsi_handle: VSI handle to configure
5304  * @promisc_mask: mask of promiscuous config bits
5305  * @vid: VLAN ID to set VLAN promiscuous
5306  * @lport: logical port number to configure promisc mode
5307  * @sw: pointer to switch info struct for which function add rule
5308  */
5309 static enum ice_status
5310 _ice_set_vsi_promisc(struct ice_hw *hw, u16 vsi_handle, u8 promisc_mask,
5311                      u16 vid, u8 lport, struct ice_switch_info *sw)
5312 {
5313         enum { UCAST_FLTR = 1, MCAST_FLTR, BCAST_FLTR };
5314         struct ice_fltr_list_entry f_list_entry;
5315         struct ice_fltr_info new_fltr;
5316         enum ice_status status = ICE_SUCCESS;
5317         bool is_tx_fltr;
5318         u16 hw_vsi_id;
5319         int pkt_type;
5320         u8 recipe_id;
5321
5322         ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__);
5323
5324         if (!ice_is_vsi_valid(hw, vsi_handle))
5325                 return ICE_ERR_PARAM;
5326         hw_vsi_id = ice_get_hw_vsi_num(hw, vsi_handle);
5327
5328         ice_memset(&new_fltr, 0, sizeof(new_fltr), ICE_NONDMA_MEM);
5329
5330         if (promisc_mask & (ICE_PROMISC_VLAN_RX | ICE_PROMISC_VLAN_TX)) {
5331                 new_fltr.lkup_type = ICE_SW_LKUP_PROMISC_VLAN;
5332                 new_fltr.l_data.mac_vlan.vlan_id = vid;
5333                 recipe_id = ICE_SW_LKUP_PROMISC_VLAN;
5334         } else {
5335                 new_fltr.lkup_type = ICE_SW_LKUP_PROMISC;
5336                 recipe_id = ICE_SW_LKUP_PROMISC;
5337         }
5338
5339         /* Separate filters must be set for each direction/packet type
5340          * combination, so we will loop over the mask value, store the
5341          * individual type, and clear it out in the input mask as it
5342          * is found.
5343          */
5344         while (promisc_mask) {
5345                 struct ice_sw_recipe *recp_list;
5346                 u8 *mac_addr;
5347
5348                 pkt_type = 0;
5349                 is_tx_fltr = false;
5350
5351                 if (promisc_mask & ICE_PROMISC_UCAST_RX) {
5352                         promisc_mask &= ~ICE_PROMISC_UCAST_RX;
5353                         pkt_type = UCAST_FLTR;
5354                 } else if (promisc_mask & ICE_PROMISC_UCAST_TX) {
5355                         promisc_mask &= ~ICE_PROMISC_UCAST_TX;
5356                         pkt_type = UCAST_FLTR;
5357                         is_tx_fltr = true;
5358                 } else if (promisc_mask & ICE_PROMISC_MCAST_RX) {
5359                         promisc_mask &= ~ICE_PROMISC_MCAST_RX;
5360                         pkt_type = MCAST_FLTR;
5361                 } else if (promisc_mask & ICE_PROMISC_MCAST_TX) {
5362                         promisc_mask &= ~ICE_PROMISC_MCAST_TX;
5363                         pkt_type = MCAST_FLTR;
5364                         is_tx_fltr = true;
5365                 } else if (promisc_mask & ICE_PROMISC_BCAST_RX) {
5366                         promisc_mask &= ~ICE_PROMISC_BCAST_RX;
5367                         pkt_type = BCAST_FLTR;
5368                 } else if (promisc_mask & ICE_PROMISC_BCAST_TX) {
5369                         promisc_mask &= ~ICE_PROMISC_BCAST_TX;
5370                         pkt_type = BCAST_FLTR;
5371                         is_tx_fltr = true;
5372                 }
5373
5374                 /* Check for VLAN promiscuous flag */
5375                 if (promisc_mask & ICE_PROMISC_VLAN_RX) {
5376                         promisc_mask &= ~ICE_PROMISC_VLAN_RX;
5377                 } else if (promisc_mask & ICE_PROMISC_VLAN_TX) {
5378                         promisc_mask &= ~ICE_PROMISC_VLAN_TX;
5379                         is_tx_fltr = true;
5380                 }
5381
5382                 /* Set filter DA based on packet type */
5383                 mac_addr = new_fltr.l_data.mac.mac_addr;
5384                 if (pkt_type == BCAST_FLTR) {
5385                         ice_memset(mac_addr, 0xff, ETH_ALEN, ICE_NONDMA_MEM);
5386                 } else if (pkt_type == MCAST_FLTR ||
5387                            pkt_type == UCAST_FLTR) {
5388                         /* Use the dummy ether header DA */
5389                         ice_memcpy(mac_addr, dummy_eth_header, ETH_ALEN,
5390                                    ICE_NONDMA_TO_NONDMA);
5391                         if (pkt_type == MCAST_FLTR)
5392                                 mac_addr[0] |= 0x1;     /* Set multicast bit */
5393                 }
5394
5395                 /* Need to reset this to zero for all iterations */
5396                 new_fltr.flag = 0;
5397                 if (is_tx_fltr) {
5398                         new_fltr.flag |= ICE_FLTR_TX;
5399                         new_fltr.src = hw_vsi_id;
5400                 } else {
5401                         new_fltr.flag |= ICE_FLTR_RX;
5402                         new_fltr.src = lport;
5403                 }
5404
5405                 new_fltr.fltr_act = ICE_FWD_TO_VSI;
5406                 new_fltr.vsi_handle = vsi_handle;
5407                 new_fltr.fwd_id.hw_vsi_id = hw_vsi_id;
5408                 f_list_entry.fltr_info = new_fltr;
5409                 recp_list = &sw->recp_list[recipe_id];
5410
5411                 status = ice_add_rule_internal(hw, recp_list, lport,
5412                                                &f_list_entry);
5413                 if (status != ICE_SUCCESS)
5414                         goto set_promisc_exit;
5415         }
5416
5417 set_promisc_exit:
5418         return status;
5419 }
5420
5421 /**
5422  * ice_set_vsi_promisc - set given VSI to given promiscuous mode(s)
5423  * @hw: pointer to the hardware structure
5424  * @vsi_handle: VSI handle to configure
5425  * @promisc_mask: mask of promiscuous config bits
5426  * @vid: VLAN ID to set VLAN promiscuous
5427  */
5428 enum ice_status
5429 ice_set_vsi_promisc(struct ice_hw *hw, u16 vsi_handle, u8 promisc_mask,
5430                     u16 vid)
5431 {
5432         return _ice_set_vsi_promisc(hw, vsi_handle, promisc_mask, vid,
5433                                     hw->port_info->lport,
5434                                     hw->switch_info);
5435 }
5436
5437 /**
5438  * _ice_set_vlan_vsi_promisc
5439  * @hw: pointer to the hardware structure
5440  * @vsi_handle: VSI handle to configure
5441  * @promisc_mask: mask of promiscuous config bits
5442  * @rm_vlan_promisc: Clear VLANs VSI promisc mode
5443  * @lport: logical port number to configure promisc mode
5444  * @sw: pointer to switch info struct for which function add rule
5445  *
5446  * Configure VSI with all associated VLANs to given promiscuous mode(s)
5447  */
5448 static enum ice_status
5449 _ice_set_vlan_vsi_promisc(struct ice_hw *hw, u16 vsi_handle, u8 promisc_mask,
5450                           bool rm_vlan_promisc, u8 lport,
5451                           struct ice_switch_info *sw)
5452 {
5453         struct ice_fltr_list_entry *list_itr, *tmp;
5454         struct LIST_HEAD_TYPE vsi_list_head;
5455         struct LIST_HEAD_TYPE *vlan_head;
5456         struct ice_lock *vlan_lock; /* Lock to protect filter rule list */
5457         enum ice_status status;
5458         u16 vlan_id;
5459
5460         INIT_LIST_HEAD(&vsi_list_head);
5461         vlan_lock = &sw->recp_list[ICE_SW_LKUP_VLAN].filt_rule_lock;
5462         vlan_head = &sw->recp_list[ICE_SW_LKUP_VLAN].filt_rules;
5463         ice_acquire_lock(vlan_lock);
5464         status = ice_add_to_vsi_fltr_list(hw, vsi_handle, vlan_head,
5465                                           &vsi_list_head);
5466         ice_release_lock(vlan_lock);
5467         if (status)
5468                 goto free_fltr_list;
5469
5470         LIST_FOR_EACH_ENTRY(list_itr, &vsi_list_head, ice_fltr_list_entry,
5471                             list_entry) {
5472                 vlan_id = list_itr->fltr_info.l_data.vlan.vlan_id;
5473                 if (rm_vlan_promisc)
5474                         status =  _ice_clear_vsi_promisc(hw, vsi_handle,
5475                                                          promisc_mask,
5476                                                          vlan_id, sw);
5477                 else
5478                         status =  _ice_set_vsi_promisc(hw, vsi_handle,
5479                                                        promisc_mask, vlan_id,
5480                                                        lport, sw);
5481                 if (status)
5482                         break;
5483         }
5484
5485 free_fltr_list:
5486         LIST_FOR_EACH_ENTRY_SAFE(list_itr, tmp, &vsi_list_head,
5487                                  ice_fltr_list_entry, list_entry) {
5488                 LIST_DEL(&list_itr->list_entry);
5489                 ice_free(hw, list_itr);
5490         }
5491         return status;
5492 }
5493
5494 /**
5495  * ice_set_vlan_vsi_promisc
5496  * @hw: pointer to the hardware structure
5497  * @vsi_handle: VSI handle to configure
5498  * @promisc_mask: mask of promiscuous config bits
5499  * @rm_vlan_promisc: Clear VLANs VSI promisc mode
5500  *
5501  * Configure VSI with all associated VLANs to given promiscuous mode(s)
5502  */
5503 enum ice_status
5504 ice_set_vlan_vsi_promisc(struct ice_hw *hw, u16 vsi_handle, u8 promisc_mask,
5505                          bool rm_vlan_promisc)
5506 {
5507         return _ice_set_vlan_vsi_promisc(hw, vsi_handle, promisc_mask,
5508                                          rm_vlan_promisc, hw->port_info->lport,
5509                                          hw->switch_info);
5510 }
5511
5512 /**
5513  * ice_remove_vsi_lkup_fltr - Remove lookup type filters for a VSI
5514  * @hw: pointer to the hardware structure
5515  * @vsi_handle: VSI handle to remove filters from
5516  * @recp_list: recipe list from which function remove fltr
5517  * @lkup: switch rule filter lookup type
5518  */
5519 static void
5520 ice_remove_vsi_lkup_fltr(struct ice_hw *hw, u16 vsi_handle,
5521                          struct ice_sw_recipe *recp_list,
5522                          enum ice_sw_lkup_type lkup)
5523 {
5524         struct ice_fltr_list_entry *fm_entry;
5525         struct LIST_HEAD_TYPE remove_list_head;
5526         struct LIST_HEAD_TYPE *rule_head;
5527         struct ice_fltr_list_entry *tmp;
5528         struct ice_lock *rule_lock;     /* Lock to protect filter rule list */
5529         enum ice_status status;
5530
5531         INIT_LIST_HEAD(&remove_list_head);
5532         rule_lock = &recp_list[lkup].filt_rule_lock;
5533         rule_head = &recp_list[lkup].filt_rules;
5534         ice_acquire_lock(rule_lock);
5535         status = ice_add_to_vsi_fltr_list(hw, vsi_handle, rule_head,
5536                                           &remove_list_head);
5537         ice_release_lock(rule_lock);
5538         if (status)
5539                 return;
5540
5541         switch (lkup) {
5542         case ICE_SW_LKUP_MAC:
5543                 ice_remove_mac_rule(hw, &remove_list_head, &recp_list[lkup]);
5544                 break;
5545         case ICE_SW_LKUP_VLAN:
5546                 ice_remove_vlan_rule(hw, &remove_list_head, &recp_list[lkup]);
5547                 break;
5548         case ICE_SW_LKUP_PROMISC:
5549         case ICE_SW_LKUP_PROMISC_VLAN:
5550                 ice_remove_promisc(hw, lkup, &remove_list_head);
5551                 break;
5552         case ICE_SW_LKUP_MAC_VLAN:
5553                 ice_remove_mac_vlan(hw, &remove_list_head);
5554                 break;
5555         case ICE_SW_LKUP_ETHERTYPE:
5556         case ICE_SW_LKUP_ETHERTYPE_MAC:
5557                 ice_remove_eth_mac(hw, &remove_list_head);
5558                 break;
5559         case ICE_SW_LKUP_DFLT:
5560                 ice_debug(hw, ICE_DBG_SW, "Remove filters for this lookup type hasn't been implemented yet\n");
5561                 break;
5562         case ICE_SW_LKUP_LAST:
5563                 ice_debug(hw, ICE_DBG_SW, "Unsupported lookup type\n");
5564                 break;
5565         }
5566
5567         LIST_FOR_EACH_ENTRY_SAFE(fm_entry, tmp, &remove_list_head,
5568                                  ice_fltr_list_entry, list_entry) {
5569                 LIST_DEL(&fm_entry->list_entry);
5570                 ice_free(hw, fm_entry);
5571         }
5572 }
5573
5574 /**
5575  * ice_remove_vsi_fltr_rule - Remove all filters for a VSI
5576  * @hw: pointer to the hardware structure
5577  * @vsi_handle: VSI handle to remove filters from
5578  * @sw: pointer to switch info struct
5579  */
5580 static void
5581 ice_remove_vsi_fltr_rule(struct ice_hw *hw, u16 vsi_handle,
5582                          struct ice_switch_info *sw)
5583 {
5584         ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__);
5585
5586         ice_remove_vsi_lkup_fltr(hw, vsi_handle,
5587                                  sw->recp_list, ICE_SW_LKUP_MAC);
5588         ice_remove_vsi_lkup_fltr(hw, vsi_handle,
5589                                  sw->recp_list, ICE_SW_LKUP_MAC_VLAN);
5590         ice_remove_vsi_lkup_fltr(hw, vsi_handle,
5591                                  sw->recp_list, ICE_SW_LKUP_PROMISC);
5592         ice_remove_vsi_lkup_fltr(hw, vsi_handle,
5593                                  sw->recp_list, ICE_SW_LKUP_VLAN);
5594         ice_remove_vsi_lkup_fltr(hw, vsi_handle,
5595                                  sw->recp_list, ICE_SW_LKUP_DFLT);
5596         ice_remove_vsi_lkup_fltr(hw, vsi_handle,
5597                                  sw->recp_list, ICE_SW_LKUP_ETHERTYPE);
5598         ice_remove_vsi_lkup_fltr(hw, vsi_handle,
5599                                  sw->recp_list, ICE_SW_LKUP_ETHERTYPE_MAC);
5600         ice_remove_vsi_lkup_fltr(hw, vsi_handle,
5601                                  sw->recp_list, ICE_SW_LKUP_PROMISC_VLAN);
5602 }
5603
5604 /**
5605  * ice_remove_vsi_fltr - Remove all filters for a VSI
5606  * @hw: pointer to the hardware structure
5607  * @vsi_handle: VSI handle to remove filters from
5608  */
5609 void ice_remove_vsi_fltr(struct ice_hw *hw, u16 vsi_handle)
5610 {
5611         ice_remove_vsi_fltr_rule(hw, vsi_handle, hw->switch_info);
5612 }
5613
5614 /**
5615  * ice_alloc_res_cntr - allocating resource counter
5616  * @hw: pointer to the hardware structure
5617  * @type: type of resource
5618  * @alloc_shared: if set it is shared else dedicated
5619  * @num_items: number of entries requested for FD resource type
5620  * @counter_id: counter index returned by AQ call
5621  */
5622 enum ice_status
5623 ice_alloc_res_cntr(struct ice_hw *hw, u8 type, u8 alloc_shared, u16 num_items,
5624                    u16 *counter_id)
5625 {
5626         struct ice_aqc_alloc_free_res_elem *buf;
5627         enum ice_status status;
5628         u16 buf_len;
5629
5630         /* Allocate resource */
5631         buf_len = ice_struct_size(buf, elem, 1);
5632         buf = (struct ice_aqc_alloc_free_res_elem *)ice_malloc(hw, buf_len);
5633         if (!buf)
5634                 return ICE_ERR_NO_MEMORY;
5635
5636         buf->num_elems = CPU_TO_LE16(num_items);
5637         buf->res_type = CPU_TO_LE16(((type << ICE_AQC_RES_TYPE_S) &
5638                                       ICE_AQC_RES_TYPE_M) | alloc_shared);
5639
5640         status = ice_aq_alloc_free_res(hw, 1, buf, buf_len,
5641                                        ice_aqc_opc_alloc_res, NULL);
5642         if (status)
5643                 goto exit;
5644
5645         *counter_id = LE16_TO_CPU(buf->elem[0].e.sw_resp);
5646
5647 exit:
5648         ice_free(hw, buf);
5649         return status;
5650 }
5651
5652 /**
5653  * ice_free_res_cntr - free resource counter
5654  * @hw: pointer to the hardware structure
5655  * @type: type of resource
5656  * @alloc_shared: if set it is shared else dedicated
5657  * @num_items: number of entries to be freed for FD resource type
5658  * @counter_id: counter ID resource which needs to be freed
5659  */
5660 enum ice_status
5661 ice_free_res_cntr(struct ice_hw *hw, u8 type, u8 alloc_shared, u16 num_items,
5662                   u16 counter_id)
5663 {
5664         struct ice_aqc_alloc_free_res_elem *buf;
5665         enum ice_status status;
5666         u16 buf_len;
5667
5668         /* Free resource */
5669         buf_len = ice_struct_size(buf, elem, 1);
5670         buf = (struct ice_aqc_alloc_free_res_elem *)ice_malloc(hw, buf_len);
5671         if (!buf)
5672                 return ICE_ERR_NO_MEMORY;
5673
5674         buf->num_elems = CPU_TO_LE16(num_items);
5675         buf->res_type = CPU_TO_LE16(((type << ICE_AQC_RES_TYPE_S) &
5676                                       ICE_AQC_RES_TYPE_M) | alloc_shared);
5677         buf->elem[0].e.sw_resp = CPU_TO_LE16(counter_id);
5678
5679         status = ice_aq_alloc_free_res(hw, 1, buf, buf_len,
5680                                        ice_aqc_opc_free_res, NULL);
5681         if (status)
5682                 ice_debug(hw, ICE_DBG_SW, "counter resource could not be freed\n");
5683
5684         ice_free(hw, buf);
5685         return status;
5686 }
5687
5688 /**
5689  * ice_alloc_vlan_res_counter - obtain counter resource for VLAN type
5690  * @hw: pointer to the hardware structure
5691  * @counter_id: returns counter index
5692  */
5693 enum ice_status ice_alloc_vlan_res_counter(struct ice_hw *hw, u16 *counter_id)
5694 {
5695         return ice_alloc_res_cntr(hw, ICE_AQC_RES_TYPE_VLAN_COUNTER,
5696                                   ICE_AQC_RES_TYPE_FLAG_DEDICATED, 1,
5697                                   counter_id);
5698 }
5699
5700 /**
5701  * ice_free_vlan_res_counter - Free counter resource for VLAN type
5702  * @hw: pointer to the hardware structure
5703  * @counter_id: counter index to be freed
5704  */
5705 enum ice_status ice_free_vlan_res_counter(struct ice_hw *hw, u16 counter_id)
5706 {
5707         return ice_free_res_cntr(hw, ICE_AQC_RES_TYPE_VLAN_COUNTER,
5708                                  ICE_AQC_RES_TYPE_FLAG_DEDICATED, 1,
5709                                  counter_id);
5710 }
5711
5712 /**
5713  * ice_alloc_res_lg_act - add large action resource
5714  * @hw: pointer to the hardware structure
5715  * @l_id: large action ID to fill it in
5716  * @num_acts: number of actions to hold with a large action entry
5717  */
5718 static enum ice_status
5719 ice_alloc_res_lg_act(struct ice_hw *hw, u16 *l_id, u16 num_acts)
5720 {
5721         struct ice_aqc_alloc_free_res_elem *sw_buf;
5722         enum ice_status status;
5723         u16 buf_len;
5724
5725         if (num_acts > ICE_MAX_LG_ACT || num_acts == 0)
5726                 return ICE_ERR_PARAM;
5727
5728         /* Allocate resource for large action */
5729         buf_len = ice_struct_size(sw_buf, elem, 1);
5730         sw_buf = (struct ice_aqc_alloc_free_res_elem *)ice_malloc(hw, buf_len);
5731         if (!sw_buf)
5732                 return ICE_ERR_NO_MEMORY;
5733
5734         sw_buf->num_elems = CPU_TO_LE16(1);
5735
5736         /* If num_acts is 1, use ICE_AQC_RES_TYPE_WIDE_TABLE_1.
5737          * If num_acts is 2, use ICE_AQC_RES_TYPE_WIDE_TABLE_3.
5738          * If num_acts is greater than 2, then use
5739          * ICE_AQC_RES_TYPE_WIDE_TABLE_4.
5740          * The num_acts cannot exceed 4. This was ensured at the
5741          * beginning of the function.
5742          */
5743         if (num_acts == 1)
5744                 sw_buf->res_type = CPU_TO_LE16(ICE_AQC_RES_TYPE_WIDE_TABLE_1);
5745         else if (num_acts == 2)
5746                 sw_buf->res_type = CPU_TO_LE16(ICE_AQC_RES_TYPE_WIDE_TABLE_2);
5747         else
5748                 sw_buf->res_type = CPU_TO_LE16(ICE_AQC_RES_TYPE_WIDE_TABLE_4);
5749
5750         status = ice_aq_alloc_free_res(hw, 1, sw_buf, buf_len,
5751                                        ice_aqc_opc_alloc_res, NULL);
5752         if (!status)
5753                 *l_id = LE16_TO_CPU(sw_buf->elem[0].e.sw_resp);
5754
5755         ice_free(hw, sw_buf);
5756         return status;
5757 }
5758
5759 /**
5760  * ice_add_mac_with_sw_marker - add filter with sw marker
5761  * @hw: pointer to the hardware structure
5762  * @f_info: filter info structure containing the MAC filter information
5763  * @sw_marker: sw marker to tag the Rx descriptor with
5764  */
5765 enum ice_status
5766 ice_add_mac_with_sw_marker(struct ice_hw *hw, struct ice_fltr_info *f_info,
5767                            u16 sw_marker)
5768 {
5769         struct ice_fltr_mgmt_list_entry *m_entry;
5770         struct ice_fltr_list_entry fl_info;
5771         struct ice_sw_recipe *recp_list;
5772         struct LIST_HEAD_TYPE l_head;
5773         struct ice_lock *rule_lock;     /* Lock to protect filter rule list */
5774         enum ice_status ret;
5775         bool entry_exists;
5776         u16 lg_act_id;
5777
5778         if (f_info->fltr_act != ICE_FWD_TO_VSI)
5779                 return ICE_ERR_PARAM;
5780
5781         if (f_info->lkup_type != ICE_SW_LKUP_MAC)
5782                 return ICE_ERR_PARAM;
5783
5784         if (sw_marker == ICE_INVAL_SW_MARKER_ID)
5785                 return ICE_ERR_PARAM;
5786
5787         if (!ice_is_vsi_valid(hw, f_info->vsi_handle))
5788                 return ICE_ERR_PARAM;
5789         f_info->fwd_id.hw_vsi_id = ice_get_hw_vsi_num(hw, f_info->vsi_handle);
5790
5791         /* Add filter if it doesn't exist so then the adding of large
5792          * action always results in update
5793          */
5794
5795         INIT_LIST_HEAD(&l_head);
5796         fl_info.fltr_info = *f_info;
5797         LIST_ADD(&fl_info.list_entry, &l_head);
5798
5799         entry_exists = false;
5800         ret = ice_add_mac_rule(hw, &l_head, hw->switch_info,
5801                                hw->port_info->lport);
5802         if (ret == ICE_ERR_ALREADY_EXISTS)
5803                 entry_exists = true;
5804         else if (ret)
5805                 return ret;
5806
5807         recp_list = &hw->switch_info->recp_list[ICE_SW_LKUP_MAC];
5808         rule_lock = &recp_list->filt_rule_lock;
5809         ice_acquire_lock(rule_lock);
5810         /* Get the book keeping entry for the filter */
5811         m_entry = ice_find_rule_entry(&recp_list->filt_rules, f_info);
5812         if (!m_entry)
5813                 goto exit_error;
5814
5815         /* If counter action was enabled for this rule then don't enable
5816          * sw marker large action
5817          */
5818         if (m_entry->counter_index != ICE_INVAL_COUNTER_ID) {
5819                 ret = ICE_ERR_PARAM;
5820                 goto exit_error;
5821         }
5822
5823         /* if same marker was added before */
5824         if (m_entry->sw_marker_id == sw_marker) {
5825                 ret = ICE_ERR_ALREADY_EXISTS;
5826                 goto exit_error;
5827         }
5828
5829         /* Allocate a hardware table entry to hold large act. Three actions
5830          * for marker based large action
5831          */
5832         ret = ice_alloc_res_lg_act(hw, &lg_act_id, 3);
5833         if (ret)
5834                 goto exit_error;
5835
5836         if (lg_act_id == ICE_INVAL_LG_ACT_INDEX)
5837                 goto exit_error;
5838
5839         /* Update the switch rule to add the marker action */
5840         ret = ice_add_marker_act(hw, m_entry, sw_marker, lg_act_id);
5841         if (!ret) {
5842                 ice_release_lock(rule_lock);
5843                 return ret;
5844         }
5845
5846 exit_error:
5847         ice_release_lock(rule_lock);
5848         /* only remove entry if it did not exist previously */
5849         if (!entry_exists)
5850                 ret = ice_remove_mac(hw, &l_head);
5851
5852         return ret;
5853 }
5854
5855 /**
5856  * ice_add_mac_with_counter - add filter with counter enabled
5857  * @hw: pointer to the hardware structure
5858  * @f_info: pointer to filter info structure containing the MAC filter
5859  *          information
5860  */
5861 enum ice_status
5862 ice_add_mac_with_counter(struct ice_hw *hw, struct ice_fltr_info *f_info)
5863 {
5864         struct ice_fltr_mgmt_list_entry *m_entry;
5865         struct ice_fltr_list_entry fl_info;
5866         struct ice_sw_recipe *recp_list;
5867         struct LIST_HEAD_TYPE l_head;
5868         struct ice_lock *rule_lock;     /* Lock to protect filter rule list */
5869         enum ice_status ret;
5870         bool entry_exist;
5871         u16 counter_id;
5872         u16 lg_act_id;
5873
5874         if (f_info->fltr_act != ICE_FWD_TO_VSI)
5875                 return ICE_ERR_PARAM;
5876
5877         if (f_info->lkup_type != ICE_SW_LKUP_MAC)
5878                 return ICE_ERR_PARAM;
5879
5880         if (!ice_is_vsi_valid(hw, f_info->vsi_handle))
5881                 return ICE_ERR_PARAM;
5882         f_info->fwd_id.hw_vsi_id = ice_get_hw_vsi_num(hw, f_info->vsi_handle);
5883         recp_list = &hw->switch_info->recp_list[ICE_SW_LKUP_MAC];
5884
5885         entry_exist = false;
5886
5887         rule_lock = &recp_list->filt_rule_lock;
5888
5889         /* Add filter if it doesn't exist so then the adding of large
5890          * action always results in update
5891          */
5892         INIT_LIST_HEAD(&l_head);
5893
5894         fl_info.fltr_info = *f_info;
5895         LIST_ADD(&fl_info.list_entry, &l_head);
5896
5897         ret = ice_add_mac_rule(hw, &l_head, hw->switch_info,
5898                                hw->port_info->lport);
5899         if (ret == ICE_ERR_ALREADY_EXISTS)
5900                 entry_exist = true;
5901         else if (ret)
5902                 return ret;
5903
5904         ice_acquire_lock(rule_lock);
5905         m_entry = ice_find_rule_entry(&recp_list->filt_rules, f_info);
5906         if (!m_entry) {
5907                 ret = ICE_ERR_BAD_PTR;
5908                 goto exit_error;
5909         }
5910
5911         /* Don't enable counter for a filter for which sw marker was enabled */
5912         if (m_entry->sw_marker_id != ICE_INVAL_SW_MARKER_ID) {
5913                 ret = ICE_ERR_PARAM;
5914                 goto exit_error;
5915         }
5916
5917         /* If a counter was already enabled then don't need to add again */
5918         if (m_entry->counter_index != ICE_INVAL_COUNTER_ID) {
5919                 ret = ICE_ERR_ALREADY_EXISTS;
5920                 goto exit_error;
5921         }
5922
5923         /* Allocate a hardware table entry to VLAN counter */
5924         ret = ice_alloc_vlan_res_counter(hw, &counter_id);
5925         if (ret)
5926                 goto exit_error;
5927
5928         /* Allocate a hardware table entry to hold large act. Two actions for
5929          * counter based large action
5930          */
5931         ret = ice_alloc_res_lg_act(hw, &lg_act_id, 2);
5932         if (ret)
5933                 goto exit_error;
5934
5935         if (lg_act_id == ICE_INVAL_LG_ACT_INDEX)
5936                 goto exit_error;
5937
5938         /* Update the switch rule to add the counter action */
5939         ret = ice_add_counter_act(hw, m_entry, counter_id, lg_act_id);
5940         if (!ret) {
5941                 ice_release_lock(rule_lock);
5942                 return ret;
5943         }
5944
5945 exit_error:
5946         ice_release_lock(rule_lock);
5947         /* only remove entry if it did not exist previously */
5948         if (!entry_exist)
5949                 ret = ice_remove_mac(hw, &l_head);
5950
5951         return ret;
5952 }
5953
5954 /* This is mapping table entry that maps every word within a given protocol
5955  * structure to the real byte offset as per the specification of that
5956  * protocol header.
5957  * for example dst address is 3 words in ethertype header and corresponding
5958  * bytes are 0, 2, 3 in the actual packet header and src address is at 4, 6, 8
5959  * IMPORTANT: Every structure part of "ice_prot_hdr" union should have a
5960  * matching entry describing its field. This needs to be updated if new
5961  * structure is added to that union.
5962  */
5963 static const struct ice_prot_ext_tbl_entry ice_prot_ext[ICE_PROTOCOL_LAST] = {
5964         { ICE_MAC_OFOS,         { 0, 2, 4, 6, 8, 10, 12 } },
5965         { ICE_MAC_IL,           { 0, 2, 4, 6, 8, 10, 12 } },
5966         { ICE_ETYPE_OL,         { 0 } },
5967         { ICE_VLAN_OFOS,        { 0, 2 } },
5968         { ICE_IPV4_OFOS,        { 0, 2, 4, 6, 8, 10, 12, 14, 16, 18 } },
5969         { ICE_IPV4_IL,          { 0, 2, 4, 6, 8, 10, 12, 14, 16, 18 } },
5970         { ICE_IPV6_OFOS,        { 0, 2, 4, 6, 8, 10, 12, 14, 16, 18, 20, 22, 24,
5971                                  26, 28, 30, 32, 34, 36, 38 } },
5972         { ICE_IPV6_IL,          { 0, 2, 4, 6, 8, 10, 12, 14, 16, 18, 20, 22, 24,
5973                                  26, 28, 30, 32, 34, 36, 38 } },
5974         { ICE_TCP_IL,           { 0, 2 } },
5975         { ICE_UDP_OF,           { 0, 2 } },
5976         { ICE_UDP_ILOS,         { 0, 2 } },
5977         { ICE_SCTP_IL,          { 0, 2 } },
5978         { ICE_VXLAN,            { 8, 10, 12, 14 } },
5979         { ICE_GENEVE,           { 8, 10, 12, 14 } },
5980         { ICE_VXLAN_GPE,        { 8, 10, 12, 14 } },
5981         { ICE_NVGRE,            { 0, 2, 4, 6 } },
5982         { ICE_GTP,              { 8, 10, 12, 14, 16, 18, 20 } },
5983         { ICE_PPPOE,            { 0, 2, 4, 6 } },
5984         { ICE_PFCP,             { 8, 10, 12, 14, 16, 18, 20, 22 } },
5985         { ICE_L2TPV3,           { 0, 2, 4, 6, 8, 10 } },
5986         { ICE_ESP,              { 0, 2, 4, 6 } },
5987         { ICE_AH,               { 0, 2, 4, 6, 8, 10 } },
5988         { ICE_NAT_T,            { 8, 10, 12, 14 } },
5989         { ICE_GTP_NO_PAY,       { 8, 10, 12, 14 } },
5990         { ICE_VLAN_EX,          { 0, 2 } },
5991 };
5992
5993 /* The following table describes preferred grouping of recipes.
5994  * If a recipe that needs to be programmed is a superset or matches one of the
5995  * following combinations, then the recipe needs to be chained as per the
5996  * following policy.
5997  */
5998
5999 static const struct ice_protocol_entry ice_prot_id_tbl[ICE_PROTOCOL_LAST] = {
6000         { ICE_MAC_OFOS,         ICE_MAC_OFOS_HW },
6001         { ICE_MAC_IL,           ICE_MAC_IL_HW },
6002         { ICE_ETYPE_OL,         ICE_ETYPE_OL_HW },
6003         { ICE_VLAN_OFOS,        ICE_VLAN_OL_HW },
6004         { ICE_IPV4_OFOS,        ICE_IPV4_OFOS_HW },
6005         { ICE_IPV4_IL,          ICE_IPV4_IL_HW },
6006         { ICE_IPV6_OFOS,        ICE_IPV6_OFOS_HW },
6007         { ICE_IPV6_IL,          ICE_IPV6_IL_HW },
6008         { ICE_TCP_IL,           ICE_TCP_IL_HW },
6009         { ICE_UDP_OF,           ICE_UDP_OF_HW },
6010         { ICE_UDP_ILOS,         ICE_UDP_ILOS_HW },
6011         { ICE_SCTP_IL,          ICE_SCTP_IL_HW },
6012         { ICE_VXLAN,            ICE_UDP_OF_HW },
6013         { ICE_GENEVE,           ICE_UDP_OF_HW },
6014         { ICE_VXLAN_GPE,        ICE_UDP_OF_HW },
6015         { ICE_NVGRE,            ICE_GRE_OF_HW },
6016         { ICE_GTP,              ICE_UDP_OF_HW },
6017         { ICE_PPPOE,            ICE_PPPOE_HW },
6018         { ICE_PFCP,             ICE_UDP_ILOS_HW },
6019         { ICE_L2TPV3,           ICE_L2TPV3_HW },
6020         { ICE_ESP,              ICE_ESP_HW },
6021         { ICE_AH,               ICE_AH_HW },
6022         { ICE_NAT_T,            ICE_UDP_ILOS_HW },
6023         { ICE_GTP_NO_PAY,       ICE_UDP_ILOS_HW },
6024         { ICE_VLAN_EX,          ICE_VLAN_OF_HW },
6025 };
6026
6027 /**
6028  * ice_find_recp - find a recipe
6029  * @hw: pointer to the hardware structure
6030  * @lkup_exts: extension sequence to match
6031  *
6032  * Returns index of matching recipe, or ICE_MAX_NUM_RECIPES if not found.
6033  */
6034 static u16 ice_find_recp(struct ice_hw *hw, struct ice_prot_lkup_ext *lkup_exts,
6035                          enum ice_sw_tunnel_type tun_type)
6036 {
6037         bool refresh_required = true;
6038         struct ice_sw_recipe *recp;
6039         u8 i;
6040
6041         /* Walk through existing recipes to find a match */
6042         recp = hw->switch_info->recp_list;
6043         for (i = 0; i < ICE_MAX_NUM_RECIPES; i++) {
6044                 /* If recipe was not created for this ID, in SW bookkeeping,
6045                  * check if FW has an entry for this recipe. If the FW has an
6046                  * entry update it in our SW bookkeeping and continue with the
6047                  * matching.
6048                  */
6049                 if (!recp[i].recp_created)
6050                         if (ice_get_recp_frm_fw(hw,
6051                                                 hw->switch_info->recp_list, i,
6052                                                 &refresh_required))
6053                                 continue;
6054
6055                 /* Skip inverse action recipes */
6056                 if (recp[i].root_buf && recp[i].root_buf->content.act_ctrl &
6057                     ICE_AQ_RECIPE_ACT_INV_ACT)
6058                         continue;
6059
6060                 /* if number of words we are looking for match */
6061                 if (lkup_exts->n_val_words == recp[i].lkup_exts.n_val_words) {
6062                         struct ice_fv_word *ar = recp[i].lkup_exts.fv_words;
6063                         struct ice_fv_word *be = lkup_exts->fv_words;
6064                         u16 *cr = recp[i].lkup_exts.field_mask;
6065                         u16 *de = lkup_exts->field_mask;
6066                         bool found = true;
6067                         u8 pe, qr;
6068
6069                         /* ar, cr, and qr are related to the recipe words, while
6070                          * be, de, and pe are related to the lookup words
6071                          */
6072                         for (pe = 0; pe < lkup_exts->n_val_words; pe++) {
6073                                 for (qr = 0; qr < recp[i].lkup_exts.n_val_words;
6074                                      qr++) {
6075                                         if (ar[qr].off == be[pe].off &&
6076                                             ar[qr].prot_id == be[pe].prot_id &&
6077                                             cr[qr] == de[pe])
6078                                                 /* Found the "pe"th word in the
6079                                                  * given recipe
6080                                                  */
6081                                                 break;
6082                                 }
6083                                 /* After walking through all the words in the
6084                                  * "i"th recipe if "p"th word was not found then
6085                                  * this recipe is not what we are looking for.
6086                                  * So break out from this loop and try the next
6087                                  * recipe
6088                                  */
6089                                 if (qr >= recp[i].lkup_exts.n_val_words) {
6090                                         found = false;
6091                                         break;
6092                                 }
6093                         }
6094                         /* If for "i"th recipe the found was never set to false
6095                          * then it means we found our match
6096                          */
6097                         if (tun_type == recp[i].tun_type && found)
6098                                 return i; /* Return the recipe ID */
6099                 }
6100         }
6101         return ICE_MAX_NUM_RECIPES;
6102 }
6103
6104 /**
6105  * ice_prot_type_to_id - get protocol ID from protocol type
6106  * @type: protocol type
6107  * @id: pointer to variable that will receive the ID
6108  *
6109  * Returns true if found, false otherwise
6110  */
6111 static bool ice_prot_type_to_id(enum ice_protocol_type type, u8 *id)
6112 {
6113         u8 i;
6114
6115         for (i = 0; i < ARRAY_SIZE(ice_prot_id_tbl); i++)
6116                 if (ice_prot_id_tbl[i].type == type) {
6117                         *id = ice_prot_id_tbl[i].protocol_id;
6118                         return true;
6119                 }
6120         return false;
6121 }
6122
6123 /**
6124  * ice_find_valid_words - count valid words
6125  * @rule: advanced rule with lookup information
6126  * @lkup_exts: byte offset extractions of the words that are valid
6127  *
6128  * calculate valid words in a lookup rule using mask value
6129  */
6130 static u8
6131 ice_fill_valid_words(struct ice_adv_lkup_elem *rule,
6132                      struct ice_prot_lkup_ext *lkup_exts)
6133 {
6134         u8 j, word, prot_id, ret_val;
6135
6136         if (!ice_prot_type_to_id(rule->type, &prot_id))
6137                 return 0;
6138
6139         word = lkup_exts->n_val_words;
6140
6141         for (j = 0; j < sizeof(rule->m_u) / sizeof(u16); j++)
6142                 if (((u16 *)&rule->m_u)[j] &&
6143                     rule->type < ARRAY_SIZE(ice_prot_ext)) {
6144                         /* No more space to accommodate */
6145                         if (word >= ICE_MAX_CHAIN_WORDS)
6146                                 return 0;
6147                         lkup_exts->fv_words[word].off =
6148                                 ice_prot_ext[rule->type].offs[j];
6149                         lkup_exts->fv_words[word].prot_id =
6150                                 ice_prot_id_tbl[rule->type].protocol_id;
6151                         lkup_exts->field_mask[word] =
6152                                 BE16_TO_CPU(((_FORCE_ __be16 *)&rule->m_u)[j]);
6153                         word++;
6154                 }
6155
6156         ret_val = word - lkup_exts->n_val_words;
6157         lkup_exts->n_val_words = word;
6158
6159         return ret_val;
6160 }
6161
6162 /**
6163  * ice_create_first_fit_recp_def - Create a recipe grouping
6164  * @hw: pointer to the hardware structure
6165  * @lkup_exts: an array of protocol header extractions
6166  * @rg_list: pointer to a list that stores new recipe groups
6167  * @recp_cnt: pointer to a variable that stores returned number of recipe groups
6168  *
6169  * Using first fit algorithm, take all the words that are still not done
6170  * and start grouping them in 4-word groups. Each group makes up one
6171  * recipe.
6172  */
6173 static enum ice_status
6174 ice_create_first_fit_recp_def(struct ice_hw *hw,
6175                               struct ice_prot_lkup_ext *lkup_exts,
6176                               struct LIST_HEAD_TYPE *rg_list,
6177                               u8 *recp_cnt)
6178 {
6179         struct ice_pref_recipe_group *grp = NULL;
6180         u8 j;
6181
6182         *recp_cnt = 0;
6183
6184         if (!lkup_exts->n_val_words) {
6185                 struct ice_recp_grp_entry *entry;
6186
6187                 entry = (struct ice_recp_grp_entry *)
6188                         ice_malloc(hw, sizeof(*entry));
6189                 if (!entry)
6190                         return ICE_ERR_NO_MEMORY;
6191                 LIST_ADD(&entry->l_entry, rg_list);
6192                 grp = &entry->r_group;
6193                 (*recp_cnt)++;
6194                 grp->n_val_pairs = 0;
6195         }
6196
6197         /* Walk through every word in the rule to check if it is not done. If so
6198          * then this word needs to be part of a new recipe.
6199          */
6200         for (j = 0; j < lkup_exts->n_val_words; j++)
6201                 if (!ice_is_bit_set(lkup_exts->done, j)) {
6202                         if (!grp ||
6203                             grp->n_val_pairs == ICE_NUM_WORDS_RECIPE) {
6204                                 struct ice_recp_grp_entry *entry;
6205
6206                                 entry = (struct ice_recp_grp_entry *)
6207                                         ice_malloc(hw, sizeof(*entry));
6208                                 if (!entry)
6209                                         return ICE_ERR_NO_MEMORY;
6210                                 LIST_ADD(&entry->l_entry, rg_list);
6211                                 grp = &entry->r_group;
6212                                 (*recp_cnt)++;
6213                         }
6214
6215                         grp->pairs[grp->n_val_pairs].prot_id =
6216                                 lkup_exts->fv_words[j].prot_id;
6217                         grp->pairs[grp->n_val_pairs].off =
6218                                 lkup_exts->fv_words[j].off;
6219                         grp->mask[grp->n_val_pairs] = lkup_exts->field_mask[j];
6220                         grp->n_val_pairs++;
6221                 }
6222
6223         return ICE_SUCCESS;
6224 }
6225
6226 /**
6227  * ice_fill_fv_word_index - fill in the field vector indices for a recipe group
6228  * @hw: pointer to the hardware structure
6229  * @fv_list: field vector with the extraction sequence information
6230  * @rg_list: recipe groupings with protocol-offset pairs
6231  *
6232  * Helper function to fill in the field vector indices for protocol-offset
6233  * pairs. These indexes are then ultimately programmed into a recipe.
6234  */
6235 static enum ice_status
6236 ice_fill_fv_word_index(struct ice_hw *hw, struct LIST_HEAD_TYPE *fv_list,
6237                        struct LIST_HEAD_TYPE *rg_list)
6238 {
6239         struct ice_sw_fv_list_entry *fv;
6240         struct ice_recp_grp_entry *rg;
6241         struct ice_fv_word *fv_ext;
6242
6243         if (LIST_EMPTY(fv_list))
6244                 return ICE_SUCCESS;
6245
6246         fv = LIST_FIRST_ENTRY(fv_list, struct ice_sw_fv_list_entry, list_entry);
6247         fv_ext = fv->fv_ptr->ew;
6248
6249         LIST_FOR_EACH_ENTRY(rg, rg_list, ice_recp_grp_entry, l_entry) {
6250                 u8 i;
6251
6252                 for (i = 0; i < rg->r_group.n_val_pairs; i++) {
6253                         struct ice_fv_word *pr;
6254                         bool found = false;
6255                         u16 mask;
6256                         u8 j;
6257
6258                         pr = &rg->r_group.pairs[i];
6259                         mask = rg->r_group.mask[i];
6260
6261                         for (j = 0; j < hw->blk[ICE_BLK_SW].es.fvw; j++)
6262                                 if (fv_ext[j].prot_id == pr->prot_id &&
6263                                     fv_ext[j].off == pr->off) {
6264                                         found = true;
6265
6266                                         /* Store index of field vector */
6267                                         rg->fv_idx[i] = j;
6268                                         rg->fv_mask[i] = mask;
6269                                         break;
6270                                 }
6271
6272                         /* Protocol/offset could not be found, caller gave an
6273                          * invalid pair
6274                          */
6275                         if (!found)
6276                                 return ICE_ERR_PARAM;
6277                 }
6278         }
6279
6280         return ICE_SUCCESS;
6281 }
6282
6283 /**
6284  * ice_find_free_recp_res_idx - find free result indexes for recipe
6285  * @hw: pointer to hardware structure
6286  * @profiles: bitmap of profiles that will be associated with the new recipe
6287  * @free_idx: pointer to variable to receive the free index bitmap
6288  *
6289  * The algorithm used here is:
6290  *      1. When creating a new recipe, create a set P which contains all
6291  *         Profiles that will be associated with our new recipe
6292  *
6293  *      2. For each Profile p in set P:
6294  *          a. Add all recipes associated with Profile p into set R
6295  *          b. Optional : PossibleIndexes &= profile[p].possibleIndexes
6296  *              [initially PossibleIndexes should be 0xFFFFFFFFFFFFFFFF]
6297  *              i. Or just assume they all have the same possible indexes:
6298  *                      44, 45, 46, 47
6299  *                      i.e., PossibleIndexes = 0x0000F00000000000
6300  *
6301  *      3. For each Recipe r in set R:
6302  *          a. UsedIndexes |= (bitwise or ) recipe[r].res_indexes
6303  *          b. FreeIndexes = UsedIndexes ^ PossibleIndexes
6304  *
6305  *      FreeIndexes will contain the bits indicating the indexes free for use,
6306  *      then the code needs to update the recipe[r].used_result_idx_bits to
6307  *      indicate which indexes were selected for use by this recipe.
6308  */
6309 static u16
6310 ice_find_free_recp_res_idx(struct ice_hw *hw, const ice_bitmap_t *profiles,
6311                            ice_bitmap_t *free_idx)
6312 {
6313         ice_declare_bitmap(possible_idx, ICE_MAX_FV_WORDS);
6314         ice_declare_bitmap(recipes, ICE_MAX_NUM_RECIPES);
6315         ice_declare_bitmap(used_idx, ICE_MAX_FV_WORDS);
6316         u16 bit;
6317
6318         ice_zero_bitmap(possible_idx, ICE_MAX_FV_WORDS);
6319         ice_zero_bitmap(recipes, ICE_MAX_NUM_RECIPES);
6320         ice_zero_bitmap(used_idx, ICE_MAX_FV_WORDS);
6321         ice_zero_bitmap(free_idx, ICE_MAX_FV_WORDS);
6322
6323         ice_bitmap_set(possible_idx, 0, ICE_MAX_FV_WORDS);
6324
6325         /* For each profile we are going to associate the recipe with, add the
6326          * recipes that are associated with that profile. This will give us
6327          * the set of recipes that our recipe may collide with. Also, determine
6328          * what possible result indexes are usable given this set of profiles.
6329          */
6330         ice_for_each_set_bit(bit, profiles, ICE_MAX_NUM_PROFILES) {
6331                 ice_or_bitmap(recipes, recipes, profile_to_recipe[bit],
6332                               ICE_MAX_NUM_RECIPES);
6333                 ice_and_bitmap(possible_idx, possible_idx,
6334                                hw->switch_info->prof_res_bm[bit],
6335                                ICE_MAX_FV_WORDS);
6336         }
6337
6338         /* For each recipe that our new recipe may collide with, determine
6339          * which indexes have been used.
6340          */
6341         ice_for_each_set_bit(bit, recipes, ICE_MAX_NUM_RECIPES)
6342                 ice_or_bitmap(used_idx, used_idx,
6343                               hw->switch_info->recp_list[bit].res_idxs,
6344                               ICE_MAX_FV_WORDS);
6345
6346         ice_xor_bitmap(free_idx, used_idx, possible_idx, ICE_MAX_FV_WORDS);
6347
6348         /* return number of free indexes */
6349         return (u16)ice_bitmap_hweight(free_idx, ICE_MAX_FV_WORDS);
6350 }
6351
6352 /**
6353  * ice_add_sw_recipe - function to call AQ calls to create switch recipe
6354  * @hw: pointer to hardware structure
6355  * @rm: recipe management list entry
6356  * @profiles: bitmap of profiles that will be associated.
6357  */
6358 static enum ice_status
6359 ice_add_sw_recipe(struct ice_hw *hw, struct ice_sw_recipe *rm,
6360                   ice_bitmap_t *profiles)
6361 {
6362         ice_declare_bitmap(result_idx_bm, ICE_MAX_FV_WORDS);
6363         struct ice_aqc_recipe_data_elem *tmp;
6364         struct ice_aqc_recipe_data_elem *buf;
6365         struct ice_recp_grp_entry *entry;
6366         enum ice_status status;
6367         u16 free_res_idx;
6368         u16 recipe_count;
6369         u8 chain_idx;
6370         u8 recps = 0;
6371
6372         /* When more than one recipe are required, another recipe is needed to
6373          * chain them together. Matching a tunnel metadata ID takes up one of
6374          * the match fields in the chaining recipe reducing the number of
6375          * chained recipes by one.
6376          */
6377          /* check number of free result indices */
6378         ice_zero_bitmap(result_idx_bm, ICE_MAX_FV_WORDS);
6379         free_res_idx = ice_find_free_recp_res_idx(hw, profiles, result_idx_bm);
6380
6381         ice_debug(hw, ICE_DBG_SW, "Result idx slots: %d, need %d\n",
6382                   free_res_idx, rm->n_grp_count);
6383
6384         if (rm->n_grp_count > 1) {
6385                 if (rm->n_grp_count > free_res_idx)
6386                         return ICE_ERR_MAX_LIMIT;
6387
6388                 rm->n_grp_count++;
6389         }
6390
6391         if (rm->n_grp_count > ICE_MAX_CHAIN_RECIPE)
6392                 return ICE_ERR_MAX_LIMIT;
6393
6394         tmp = (struct ice_aqc_recipe_data_elem *)ice_calloc(hw,
6395                                                             ICE_MAX_NUM_RECIPES,
6396                                                             sizeof(*tmp));
6397         if (!tmp)
6398                 return ICE_ERR_NO_MEMORY;
6399
6400         buf = (struct ice_aqc_recipe_data_elem *)
6401                 ice_calloc(hw, rm->n_grp_count, sizeof(*buf));
6402         if (!buf) {
6403                 status = ICE_ERR_NO_MEMORY;
6404                 goto err_mem;
6405         }
6406
6407         ice_zero_bitmap(rm->r_bitmap, ICE_MAX_NUM_RECIPES);
6408         recipe_count = ICE_MAX_NUM_RECIPES;
6409         status = ice_aq_get_recipe(hw, tmp, &recipe_count, ICE_SW_LKUP_MAC,
6410                                    NULL);
6411         if (status || recipe_count == 0)
6412                 goto err_unroll;
6413
6414         /* Allocate the recipe resources, and configure them according to the
6415          * match fields from protocol headers and extracted field vectors.
6416          */
6417         chain_idx = ice_find_first_bit(result_idx_bm, ICE_MAX_FV_WORDS);
6418         LIST_FOR_EACH_ENTRY(entry, &rm->rg_list, ice_recp_grp_entry, l_entry) {
6419                 u8 i;
6420
6421                 status = ice_alloc_recipe(hw, &entry->rid);
6422                 if (status)
6423                         goto err_unroll;
6424
6425                 /* Clear the result index of the located recipe, as this will be
6426                  * updated, if needed, later in the recipe creation process.
6427                  */
6428                 tmp[0].content.result_indx = 0;
6429
6430                 buf[recps] = tmp[0];
6431                 buf[recps].recipe_indx = (u8)entry->rid;
6432                 /* if the recipe is a non-root recipe RID should be programmed
6433                  * as 0 for the rules to be applied correctly.
6434                  */
6435                 buf[recps].content.rid = 0;
6436                 ice_memset(&buf[recps].content.lkup_indx, 0,
6437                            sizeof(buf[recps].content.lkup_indx),
6438                            ICE_NONDMA_MEM);
6439
6440                 /* All recipes use look-up index 0 to match switch ID. */
6441                 buf[recps].content.lkup_indx[0] = ICE_AQ_SW_ID_LKUP_IDX;
6442                 buf[recps].content.mask[0] =
6443                         CPU_TO_LE16(ICE_AQ_SW_ID_LKUP_MASK);
6444                 /* Setup lkup_indx 1..4 to INVALID/ignore and set the mask
6445                  * to be 0
6446                  */
6447                 for (i = 1; i <= ICE_NUM_WORDS_RECIPE; i++) {
6448                         buf[recps].content.lkup_indx[i] = 0x80;
6449                         buf[recps].content.mask[i] = 0;
6450                 }
6451
6452                 for (i = 0; i < entry->r_group.n_val_pairs; i++) {
6453                         buf[recps].content.lkup_indx[i + 1] = entry->fv_idx[i];
6454                         buf[recps].content.mask[i + 1] =
6455                                 CPU_TO_LE16(entry->fv_mask[i]);
6456                 }
6457
6458                 if (rm->n_grp_count > 1) {
6459                         /* Checks to see if there really is a valid result index
6460                          * that can be used.
6461                          */
6462                         if (chain_idx >= ICE_MAX_FV_WORDS) {
6463                                 ice_debug(hw, ICE_DBG_SW, "No chain index available\n");
6464                                 status = ICE_ERR_MAX_LIMIT;
6465                                 goto err_unroll;
6466                         }
6467
6468                         entry->chain_idx = chain_idx;
6469                         buf[recps].content.result_indx =
6470                                 ICE_AQ_RECIPE_RESULT_EN |
6471                                 ((chain_idx << ICE_AQ_RECIPE_RESULT_DATA_S) &
6472                                  ICE_AQ_RECIPE_RESULT_DATA_M);
6473                         ice_clear_bit(chain_idx, result_idx_bm);
6474                         chain_idx = ice_find_first_bit(result_idx_bm,
6475                                                        ICE_MAX_FV_WORDS);
6476                 }
6477
6478                 /* fill recipe dependencies */
6479                 ice_zero_bitmap((ice_bitmap_t *)buf[recps].recipe_bitmap,
6480                                 ICE_MAX_NUM_RECIPES);
6481                 ice_set_bit(buf[recps].recipe_indx,
6482                             (ice_bitmap_t *)buf[recps].recipe_bitmap);
6483                 buf[recps].content.act_ctrl_fwd_priority = rm->priority;
6484                 recps++;
6485         }
6486
6487         if (rm->n_grp_count == 1) {
6488                 rm->root_rid = buf[0].recipe_indx;
6489                 ice_set_bit(buf[0].recipe_indx, rm->r_bitmap);
6490                 buf[0].content.rid = rm->root_rid | ICE_AQ_RECIPE_ID_IS_ROOT;
6491                 if (sizeof(buf[0].recipe_bitmap) >= sizeof(rm->r_bitmap)) {
6492                         ice_memcpy(buf[0].recipe_bitmap, rm->r_bitmap,
6493                                    sizeof(buf[0].recipe_bitmap),
6494                                    ICE_NONDMA_TO_NONDMA);
6495                 } else {
6496                         status = ICE_ERR_BAD_PTR;
6497                         goto err_unroll;
6498                 }
6499                 /* Applicable only for ROOT_RECIPE, set the fwd_priority for
6500                  * the recipe which is getting created if specified
6501                  * by user. Usually any advanced switch filter, which results
6502                  * into new extraction sequence, ended up creating a new recipe
6503                  * of type ROOT and usually recipes are associated with profiles
6504                  * Switch rule referreing newly created recipe, needs to have
6505                  * either/or 'fwd' or 'join' priority, otherwise switch rule
6506                  * evaluation will not happen correctly. In other words, if
6507                  * switch rule to be evaluated on priority basis, then recipe
6508                  * needs to have priority, otherwise it will be evaluated last.
6509                  */
6510                 buf[0].content.act_ctrl_fwd_priority = rm->priority;
6511         } else {
6512                 struct ice_recp_grp_entry *last_chain_entry;
6513                 u16 rid, i;
6514
6515                 /* Allocate the last recipe that will chain the outcomes of the
6516                  * other recipes together
6517                  */
6518                 status = ice_alloc_recipe(hw, &rid);
6519                 if (status)
6520                         goto err_unroll;
6521
6522                 buf[recps].recipe_indx = (u8)rid;
6523                 buf[recps].content.rid = (u8)rid;
6524                 buf[recps].content.rid |= ICE_AQ_RECIPE_ID_IS_ROOT;
6525                 /* the new entry created should also be part of rg_list to
6526                  * make sure we have complete recipe
6527                  */
6528                 last_chain_entry = (struct ice_recp_grp_entry *)ice_malloc(hw,
6529                         sizeof(*last_chain_entry));
6530                 if (!last_chain_entry) {
6531                         status = ICE_ERR_NO_MEMORY;
6532                         goto err_unroll;
6533                 }
6534                 last_chain_entry->rid = rid;
6535                 ice_memset(&buf[recps].content.lkup_indx, 0,
6536                            sizeof(buf[recps].content.lkup_indx),
6537                            ICE_NONDMA_MEM);
6538                 /* All recipes use look-up index 0 to match switch ID. */
6539                 buf[recps].content.lkup_indx[0] = ICE_AQ_SW_ID_LKUP_IDX;
6540                 buf[recps].content.mask[0] =
6541                         CPU_TO_LE16(ICE_AQ_SW_ID_LKUP_MASK);
6542                 for (i = 1; i <= ICE_NUM_WORDS_RECIPE; i++) {
6543                         buf[recps].content.lkup_indx[i] =
6544                                 ICE_AQ_RECIPE_LKUP_IGNORE;
6545                         buf[recps].content.mask[i] = 0;
6546                 }
6547
6548                 i = 1;
6549                 /* update r_bitmap with the recp that is used for chaining */
6550                 ice_set_bit(rid, rm->r_bitmap);
6551                 /* this is the recipe that chains all the other recipes so it
6552                  * should not have a chaining ID to indicate the same
6553                  */
6554                 last_chain_entry->chain_idx = ICE_INVAL_CHAIN_IND;
6555                 LIST_FOR_EACH_ENTRY(entry, &rm->rg_list, ice_recp_grp_entry,
6556                                     l_entry) {
6557                         last_chain_entry->fv_idx[i] = entry->chain_idx;
6558                         buf[recps].content.lkup_indx[i] = entry->chain_idx;
6559                         buf[recps].content.mask[i++] = CPU_TO_LE16(0xFFFF);
6560                         ice_set_bit(entry->rid, rm->r_bitmap);
6561                 }
6562                 LIST_ADD(&last_chain_entry->l_entry, &rm->rg_list);
6563                 if (sizeof(buf[recps].recipe_bitmap) >=
6564                     sizeof(rm->r_bitmap)) {
6565                         ice_memcpy(buf[recps].recipe_bitmap, rm->r_bitmap,
6566                                    sizeof(buf[recps].recipe_bitmap),
6567                                    ICE_NONDMA_TO_NONDMA);
6568                 } else {
6569                         status = ICE_ERR_BAD_PTR;
6570                         goto err_unroll;
6571                 }
6572                 buf[recps].content.act_ctrl_fwd_priority = rm->priority;
6573
6574                 recps++;
6575                 rm->root_rid = (u8)rid;
6576         }
6577         status = ice_acquire_change_lock(hw, ICE_RES_WRITE);
6578         if (status)
6579                 goto err_unroll;
6580
6581         status = ice_aq_add_recipe(hw, buf, rm->n_grp_count, NULL);
6582         ice_release_change_lock(hw);
6583         if (status)
6584                 goto err_unroll;
6585
6586         /* Every recipe that just got created add it to the recipe
6587          * book keeping list
6588          */
6589         LIST_FOR_EACH_ENTRY(entry, &rm->rg_list, ice_recp_grp_entry, l_entry) {
6590                 struct ice_switch_info *sw = hw->switch_info;
6591                 bool is_root, idx_found = false;
6592                 struct ice_sw_recipe *recp;
6593                 u16 idx, buf_idx = 0;
6594
6595                 /* find buffer index for copying some data */
6596                 for (idx = 0; idx < rm->n_grp_count; idx++)
6597                         if (buf[idx].recipe_indx == entry->rid) {
6598                                 buf_idx = idx;
6599                                 idx_found = true;
6600                         }
6601
6602                 if (!idx_found) {
6603                         status = ICE_ERR_OUT_OF_RANGE;
6604                         goto err_unroll;
6605                 }
6606
6607                 recp = &sw->recp_list[entry->rid];
6608                 is_root = (rm->root_rid == entry->rid);
6609                 recp->is_root = is_root;
6610
6611                 recp->root_rid = entry->rid;
6612                 recp->big_recp = (is_root && rm->n_grp_count > 1);
6613
6614                 ice_memcpy(&recp->ext_words, entry->r_group.pairs,
6615                            entry->r_group.n_val_pairs *
6616                            sizeof(struct ice_fv_word),
6617                            ICE_NONDMA_TO_NONDMA);
6618
6619                 ice_memcpy(recp->r_bitmap, buf[buf_idx].recipe_bitmap,
6620                            sizeof(recp->r_bitmap), ICE_NONDMA_TO_NONDMA);
6621
6622                 /* Copy non-result fv index values and masks to recipe. This
6623                  * call will also update the result recipe bitmask.
6624                  */
6625                 ice_collect_result_idx(&buf[buf_idx], recp);
6626
6627                 /* for non-root recipes, also copy to the root, this allows
6628                  * easier matching of a complete chained recipe
6629                  */
6630                 if (!is_root)
6631                         ice_collect_result_idx(&buf[buf_idx],
6632                                                &sw->recp_list[rm->root_rid]);
6633
6634                 recp->n_ext_words = entry->r_group.n_val_pairs;
6635                 recp->chain_idx = entry->chain_idx;
6636                 recp->priority = buf[buf_idx].content.act_ctrl_fwd_priority;
6637                 recp->n_grp_count = rm->n_grp_count;
6638                 recp->tun_type = rm->tun_type;
6639                 recp->recp_created = true;
6640         }
6641         rm->root_buf = buf;
6642         ice_free(hw, tmp);
6643         return status;
6644
6645 err_unroll:
6646 err_mem:
6647         ice_free(hw, tmp);
6648         ice_free(hw, buf);
6649         return status;
6650 }
6651
6652 /**
6653  * ice_create_recipe_group - creates recipe group
6654  * @hw: pointer to hardware structure
6655  * @rm: recipe management list entry
6656  * @lkup_exts: lookup elements
6657  */
6658 static enum ice_status
6659 ice_create_recipe_group(struct ice_hw *hw, struct ice_sw_recipe *rm,
6660                         struct ice_prot_lkup_ext *lkup_exts)
6661 {
6662         enum ice_status status;
6663         u8 recp_count = 0;
6664
6665         rm->n_grp_count = 0;
6666
6667         /* Create recipes for words that are marked not done by packing them
6668          * as best fit.
6669          */
6670         status = ice_create_first_fit_recp_def(hw, lkup_exts,
6671                                                &rm->rg_list, &recp_count);
6672         if (!status) {
6673                 rm->n_grp_count += recp_count;
6674                 rm->n_ext_words = lkup_exts->n_val_words;
6675                 ice_memcpy(&rm->ext_words, lkup_exts->fv_words,
6676                            sizeof(rm->ext_words), ICE_NONDMA_TO_NONDMA);
6677                 ice_memcpy(rm->word_masks, lkup_exts->field_mask,
6678                            sizeof(rm->word_masks), ICE_NONDMA_TO_NONDMA);
6679         }
6680
6681         return status;
6682 }
6683
6684 /**
6685  * ice_get_fv - get field vectors/extraction sequences for spec. lookup types
6686  * @hw: pointer to hardware structure
6687  * @lkups: lookup elements or match criteria for the advanced recipe, one
6688  *         structure per protocol header
6689  * @lkups_cnt: number of protocols
6690  * @bm: bitmap of field vectors to consider
6691  * @fv_list: pointer to a list that holds the returned field vectors
6692  */
6693 static enum ice_status
6694 ice_get_fv(struct ice_hw *hw, struct ice_adv_lkup_elem *lkups, u16 lkups_cnt,
6695            ice_bitmap_t *bm, struct LIST_HEAD_TYPE *fv_list)
6696 {
6697         enum ice_status status;
6698         u8 *prot_ids;
6699         u16 i;
6700
6701         if (!lkups_cnt)
6702                 return ICE_SUCCESS;
6703
6704         prot_ids = (u8 *)ice_calloc(hw, lkups_cnt, sizeof(*prot_ids));
6705         if (!prot_ids)
6706                 return ICE_ERR_NO_MEMORY;
6707
6708         for (i = 0; i < lkups_cnt; i++)
6709                 if (!ice_prot_type_to_id(lkups[i].type, &prot_ids[i])) {
6710                         status = ICE_ERR_CFG;
6711                         goto free_mem;
6712                 }
6713
6714         /* Find field vectors that include all specified protocol types */
6715         status = ice_get_sw_fv_list(hw, prot_ids, lkups_cnt, bm, fv_list);
6716
6717 free_mem:
6718         ice_free(hw, prot_ids);
6719         return status;
6720 }
6721
6722 /**
6723  * ice_tun_type_match_mask - determine if tun type needs a match mask
6724  * @tun_type: tunnel type
6725  * @mask: mask to be used for the tunnel
6726  */
6727 static bool ice_tun_type_match_word(enum ice_sw_tunnel_type tun_type, u16 *mask)
6728 {
6729         switch (tun_type) {
6730         case ICE_SW_TUN_VXLAN_GPE:
6731         case ICE_SW_TUN_GENEVE:
6732         case ICE_SW_TUN_VXLAN:
6733         case ICE_SW_TUN_NVGRE:
6734         case ICE_SW_TUN_UDP:
6735         case ICE_ALL_TUNNELS:
6736         case ICE_SW_TUN_AND_NON_TUN_QINQ:
6737         case ICE_NON_TUN_QINQ:
6738         case ICE_SW_TUN_PPPOE_QINQ:
6739         case ICE_SW_TUN_PPPOE_PAY_QINQ:
6740         case ICE_SW_TUN_PPPOE_IPV4_QINQ:
6741         case ICE_SW_TUN_PPPOE_IPV6_QINQ:
6742                 *mask = ICE_TUN_FLAG_MASK;
6743                 return true;
6744
6745         case ICE_SW_TUN_GENEVE_VLAN:
6746         case ICE_SW_TUN_VXLAN_VLAN:
6747                 *mask = ICE_TUN_FLAG_MASK & ~ICE_TUN_FLAG_VLAN_MASK;
6748                 return true;
6749
6750         default:
6751                 *mask = 0;
6752                 return false;
6753         }
6754 }
6755
6756 /**
6757  * ice_add_special_words - Add words that are not protocols, such as metadata
6758  * @rinfo: other information regarding the rule e.g. priority and action info
6759  * @lkup_exts: lookup word structure
6760  */
6761 static enum ice_status
6762 ice_add_special_words(struct ice_adv_rule_info *rinfo,
6763                       struct ice_prot_lkup_ext *lkup_exts)
6764 {
6765         u16 mask;
6766
6767         /* If this is a tunneled packet, then add recipe index to match the
6768          * tunnel bit in the packet metadata flags.
6769          */
6770         if (ice_tun_type_match_word(rinfo->tun_type, &mask)) {
6771                 if (lkup_exts->n_val_words < ICE_MAX_CHAIN_WORDS) {
6772                         u8 word = lkup_exts->n_val_words++;
6773
6774                         lkup_exts->fv_words[word].prot_id = ICE_META_DATA_ID_HW;
6775                         lkup_exts->fv_words[word].off = ICE_TUN_FLAG_MDID_OFF;
6776                         lkup_exts->field_mask[word] = mask;
6777                 } else {
6778                         return ICE_ERR_MAX_LIMIT;
6779                 }
6780         }
6781
6782         return ICE_SUCCESS;
6783 }
6784
6785 /* ice_get_compat_fv_bitmap - Get compatible field vector bitmap for rule
6786  * @hw: pointer to hardware structure
6787  * @rinfo: other information regarding the rule e.g. priority and action info
6788  * @bm: pointer to memory for returning the bitmap of field vectors
6789  */
6790 static void
6791 ice_get_compat_fv_bitmap(struct ice_hw *hw, struct ice_adv_rule_info *rinfo,
6792                          ice_bitmap_t *bm)
6793 {
6794         enum ice_prof_type prof_type;
6795
6796         ice_zero_bitmap(bm, ICE_MAX_NUM_PROFILES);
6797
6798         switch (rinfo->tun_type) {
6799         case ICE_NON_TUN:
6800         case ICE_NON_TUN_QINQ:
6801                 prof_type = ICE_PROF_NON_TUN;
6802                 break;
6803         case ICE_ALL_TUNNELS:
6804                 prof_type = ICE_PROF_TUN_ALL;
6805                 break;
6806         case ICE_SW_TUN_VXLAN_GPE:
6807         case ICE_SW_TUN_GENEVE:
6808         case ICE_SW_TUN_GENEVE_VLAN:
6809         case ICE_SW_TUN_VXLAN:
6810         case ICE_SW_TUN_VXLAN_VLAN:
6811         case ICE_SW_TUN_UDP:
6812         case ICE_SW_TUN_GTP:
6813                 prof_type = ICE_PROF_TUN_UDP;
6814                 break;
6815         case ICE_SW_TUN_NVGRE:
6816                 prof_type = ICE_PROF_TUN_GRE;
6817                 break;
6818         case ICE_SW_TUN_PPPOE:
6819         case ICE_SW_TUN_PPPOE_QINQ:
6820                 prof_type = ICE_PROF_TUN_PPPOE;
6821                 break;
6822         case ICE_SW_TUN_PPPOE_PAY:
6823         case ICE_SW_TUN_PPPOE_PAY_QINQ:
6824                 ice_set_bit(ICE_PROFID_PPPOE_PAY, bm);
6825                 return;
6826         case ICE_SW_TUN_PPPOE_IPV4:
6827         case ICE_SW_TUN_PPPOE_IPV4_QINQ:
6828                 ice_set_bit(ICE_PROFID_PPPOE_IPV4_OTHER, bm);
6829                 ice_set_bit(ICE_PROFID_PPPOE_IPV4_UDP, bm);
6830                 ice_set_bit(ICE_PROFID_PPPOE_IPV4_TCP, bm);
6831                 return;
6832         case ICE_SW_TUN_PPPOE_IPV4_TCP:
6833                 ice_set_bit(ICE_PROFID_PPPOE_IPV4_TCP, bm);
6834                 return;
6835         case ICE_SW_TUN_PPPOE_IPV4_UDP:
6836                 ice_set_bit(ICE_PROFID_PPPOE_IPV4_UDP, bm);
6837                 return;
6838         case ICE_SW_TUN_PPPOE_IPV6:
6839         case ICE_SW_TUN_PPPOE_IPV6_QINQ:
6840                 ice_set_bit(ICE_PROFID_PPPOE_IPV6_OTHER, bm);
6841                 ice_set_bit(ICE_PROFID_PPPOE_IPV6_UDP, bm);
6842                 ice_set_bit(ICE_PROFID_PPPOE_IPV6_TCP, bm);
6843                 return;
6844         case ICE_SW_TUN_PPPOE_IPV6_TCP:
6845                 ice_set_bit(ICE_PROFID_PPPOE_IPV6_TCP, bm);
6846                 return;
6847         case ICE_SW_TUN_PPPOE_IPV6_UDP:
6848                 ice_set_bit(ICE_PROFID_PPPOE_IPV6_UDP, bm);
6849                 return;
6850         case ICE_SW_TUN_PROFID_IPV6_ESP:
6851         case ICE_SW_TUN_IPV6_ESP:
6852                 ice_set_bit(ICE_PROFID_IPV6_ESP, bm);
6853                 return;
6854         case ICE_SW_TUN_PROFID_IPV6_AH:
6855         case ICE_SW_TUN_IPV6_AH:
6856                 ice_set_bit(ICE_PROFID_IPV6_AH, bm);
6857                 return;
6858         case ICE_SW_TUN_PROFID_MAC_IPV6_L2TPV3:
6859         case ICE_SW_TUN_IPV6_L2TPV3:
6860                 ice_set_bit(ICE_PROFID_MAC_IPV6_L2TPV3, bm);
6861                 return;
6862         case ICE_SW_TUN_PROFID_IPV6_NAT_T:
6863         case ICE_SW_TUN_IPV6_NAT_T:
6864                 ice_set_bit(ICE_PROFID_IPV6_NAT_T, bm);
6865                 return;
6866         case ICE_SW_TUN_PROFID_IPV4_PFCP_NODE:
6867                 ice_set_bit(ICE_PROFID_IPV4_PFCP_NODE, bm);
6868                 return;
6869         case ICE_SW_TUN_PROFID_IPV4_PFCP_SESSION:
6870                 ice_set_bit(ICE_PROFID_IPV4_PFCP_SESSION, bm);
6871                 return;
6872         case ICE_SW_TUN_PROFID_IPV6_PFCP_NODE:
6873                 ice_set_bit(ICE_PROFID_IPV6_PFCP_NODE, bm);
6874                 return;
6875         case ICE_SW_TUN_PROFID_IPV6_PFCP_SESSION:
6876                 ice_set_bit(ICE_PROFID_IPV6_PFCP_SESSION, bm);
6877                 return;
6878         case ICE_SW_TUN_IPV4_NAT_T:
6879                 ice_set_bit(ICE_PROFID_IPV4_NAT_T, bm);
6880                 return;
6881         case ICE_SW_TUN_IPV4_L2TPV3:
6882                 ice_set_bit(ICE_PROFID_MAC_IPV4_L2TPV3, bm);
6883                 return;
6884         case ICE_SW_TUN_IPV4_ESP:
6885                 ice_set_bit(ICE_PROFID_IPV4_ESP, bm);
6886                 return;
6887         case ICE_SW_TUN_IPV4_AH:
6888                 ice_set_bit(ICE_PROFID_IPV4_AH, bm);
6889                 return;
6890         case ICE_SW_IPV4_TCP:
6891                 ice_set_bit(ICE_PROFID_IPV4_TCP, bm);
6892                 return;
6893         case ICE_SW_IPV4_UDP:
6894                 ice_set_bit(ICE_PROFID_IPV4_UDP, bm);
6895                 return;
6896         case ICE_SW_IPV6_TCP:
6897                 ice_set_bit(ICE_PROFID_IPV6_TCP, bm);
6898                 return;
6899         case ICE_SW_IPV6_UDP:
6900                 ice_set_bit(ICE_PROFID_IPV6_UDP, bm);
6901                 return;
6902         case ICE_SW_TUN_IPV4_GTPU_IPV4:
6903                 ice_set_bit(ICE_PROFID_IPV4_GTPU_EH_IPV4_OTHER, bm);
6904                 ice_set_bit(ICE_PROFID_IPV4_GTPU_IPV4_OTHER, bm);
6905                 ice_set_bit(ICE_PROFID_IPV4_GTPU_EH_IPV4_UDP, bm);
6906                 ice_set_bit(ICE_PROFID_IPV4_GTPU_IPV4_UDP, bm);
6907                 ice_set_bit(ICE_PROFID_IPV4_GTPU_EH_IPV4_TCP, bm);
6908                 ice_set_bit(ICE_PROFID_IPV4_GTPU_IPV4_TCP, bm);
6909                 return;
6910         case ICE_SW_TUN_IPV6_GTPU_IPV4:
6911                 ice_set_bit(ICE_PROFID_IPV6_GTPU_EH_IPV4_OTHER, bm);
6912                 ice_set_bit(ICE_PROFID_IPV6_GTPU_IPV4_OTHER, bm);
6913                 ice_set_bit(ICE_PROFID_IPV6_GTPU_EH_IPV4_UDP, bm);
6914                 ice_set_bit(ICE_PROFID_IPV6_GTPU_IPV4_UDP, bm);
6915                 ice_set_bit(ICE_PROFID_IPV6_GTPU_EH_IPV4_TCP, bm);
6916                 ice_set_bit(ICE_PROFID_IPV6_GTPU_IPV4_TCP, bm);
6917                 return;
6918         case ICE_SW_TUN_IPV4_GTPU_IPV6:
6919                 ice_set_bit(ICE_PROFID_IPV4_GTPU_EH_IPV6_OTHER, bm);
6920                 ice_set_bit(ICE_PROFID_IPV4_GTPU_IPV6_OTHER, bm);
6921                 ice_set_bit(ICE_PROFID_IPV4_GTPU_EH_IPV6_UDP, bm);
6922                 ice_set_bit(ICE_PROFID_IPV4_GTPU_IPV6_UDP, bm);
6923                 ice_set_bit(ICE_PROFID_IPV4_GTPU_EH_IPV6_TCP, bm);
6924                 ice_set_bit(ICE_PROFID_IPV4_GTPU_IPV6_TCP, bm);
6925                 return;
6926         case ICE_SW_TUN_IPV6_GTPU_IPV6:
6927                 ice_set_bit(ICE_PROFID_IPV6_GTPU_EH_IPV6_OTHER, bm);
6928                 ice_set_bit(ICE_PROFID_IPV6_GTPU_IPV6_OTHER, bm);
6929                 ice_set_bit(ICE_PROFID_IPV6_GTPU_EH_IPV6_UDP, bm);
6930                 ice_set_bit(ICE_PROFID_IPV6_GTPU_IPV6_UDP, bm);
6931                 ice_set_bit(ICE_PROFID_IPV6_GTPU_EH_IPV6_TCP, bm);
6932                 ice_set_bit(ICE_PROFID_IPV6_GTPU_IPV6_TCP, bm);
6933                 return;
6934         case ICE_SW_TUN_AND_NON_TUN:
6935         case ICE_SW_TUN_AND_NON_TUN_QINQ:
6936         default:
6937                 prof_type = ICE_PROF_ALL;
6938                 break;
6939         }
6940
6941         ice_get_sw_fv_bitmap(hw, prof_type, bm);
6942 }
6943
6944 /**
6945  * ice_is_prof_rule - determine if rule type is a profile rule
6946  * @type: the rule type
6947  *
6948  * if the rule type is a profile rule, that means that there no field value
6949  * match required, in this case just a profile hit is required.
6950  */
6951 bool ice_is_prof_rule(enum ice_sw_tunnel_type type)
6952 {
6953         switch (type) {
6954         case ICE_SW_TUN_PROFID_IPV6_ESP:
6955         case ICE_SW_TUN_PROFID_IPV6_AH:
6956         case ICE_SW_TUN_PROFID_MAC_IPV6_L2TPV3:
6957         case ICE_SW_TUN_PROFID_IPV6_NAT_T:
6958         case ICE_SW_TUN_PROFID_IPV4_PFCP_NODE:
6959         case ICE_SW_TUN_PROFID_IPV4_PFCP_SESSION:
6960         case ICE_SW_TUN_PROFID_IPV6_PFCP_NODE:
6961         case ICE_SW_TUN_PROFID_IPV6_PFCP_SESSION:
6962                 return true;
6963         default:
6964                 break;
6965         }
6966
6967         return false;
6968 }
6969
6970 /**
6971  * ice_add_adv_recipe - Add an advanced recipe that is not part of the default
6972  * @hw: pointer to hardware structure
6973  * @lkups: lookup elements or match criteria for the advanced recipe, one
6974  *  structure per protocol header
6975  * @lkups_cnt: number of protocols
6976  * @rinfo: other information regarding the rule e.g. priority and action info
6977  * @rid: return the recipe ID of the recipe created
6978  */
6979 static enum ice_status
6980 ice_add_adv_recipe(struct ice_hw *hw, struct ice_adv_lkup_elem *lkups,
6981                    u16 lkups_cnt, struct ice_adv_rule_info *rinfo, u16 *rid)
6982 {
6983         ice_declare_bitmap(fv_bitmap, ICE_MAX_NUM_PROFILES);
6984         ice_declare_bitmap(profiles, ICE_MAX_NUM_PROFILES);
6985         struct ice_prot_lkup_ext *lkup_exts;
6986         struct ice_recp_grp_entry *r_entry;
6987         struct ice_sw_fv_list_entry *fvit;
6988         struct ice_recp_grp_entry *r_tmp;
6989         struct ice_sw_fv_list_entry *tmp;
6990         enum ice_status status = ICE_SUCCESS;
6991         struct ice_sw_recipe *rm;
6992         u8 i;
6993
6994         if (!ice_is_prof_rule(rinfo->tun_type) && !lkups_cnt)
6995                 return ICE_ERR_PARAM;
6996
6997         lkup_exts = (struct ice_prot_lkup_ext *)
6998                 ice_malloc(hw, sizeof(*lkup_exts));
6999         if (!lkup_exts)
7000                 return ICE_ERR_NO_MEMORY;
7001
7002         /* Determine the number of words to be matched and if it exceeds a
7003          * recipe's restrictions
7004          */
7005         for (i = 0; i < lkups_cnt; i++) {
7006                 u16 count;
7007
7008                 if (lkups[i].type >= ICE_PROTOCOL_LAST) {
7009                         status = ICE_ERR_CFG;
7010                         goto err_free_lkup_exts;
7011                 }
7012
7013                 count = ice_fill_valid_words(&lkups[i], lkup_exts);
7014                 if (!count) {
7015                         status = ICE_ERR_CFG;
7016                         goto err_free_lkup_exts;
7017                 }
7018         }
7019
7020         rm = (struct ice_sw_recipe *)ice_malloc(hw, sizeof(*rm));
7021         if (!rm) {
7022                 status = ICE_ERR_NO_MEMORY;
7023                 goto err_free_lkup_exts;
7024         }
7025
7026         /* Get field vectors that contain fields extracted from all the protocol
7027          * headers being programmed.
7028          */
7029         INIT_LIST_HEAD(&rm->fv_list);
7030         INIT_LIST_HEAD(&rm->rg_list);
7031
7032         /* Get bitmap of field vectors (profiles) that are compatible with the
7033          * rule request; only these will be searched in the subsequent call to
7034          * ice_get_fv.
7035          */
7036         ice_get_compat_fv_bitmap(hw, rinfo, fv_bitmap);
7037
7038         status = ice_get_fv(hw, lkups, lkups_cnt, fv_bitmap, &rm->fv_list);
7039         if (status)
7040                 goto err_unroll;
7041
7042         /* Create any special protocol/offset pairs, such as looking at tunnel
7043          * bits by extracting metadata
7044          */
7045         status = ice_add_special_words(rinfo, lkup_exts);
7046         if (status)
7047                 goto err_free_lkup_exts;
7048
7049         /* Group match words into recipes using preferred recipe grouping
7050          * criteria.
7051          */
7052         status = ice_create_recipe_group(hw, rm, lkup_exts);
7053         if (status)
7054                 goto err_unroll;
7055
7056         /* set the recipe priority if specified */
7057         rm->priority = (u8)rinfo->priority;
7058
7059         /* Find offsets from the field vector. Pick the first one for all the
7060          * recipes.
7061          */
7062         status = ice_fill_fv_word_index(hw, &rm->fv_list, &rm->rg_list);
7063         if (status)
7064                 goto err_unroll;
7065
7066         /* An empty FV list means to use all the profiles returned in the
7067          * profile bitmap
7068          */
7069         if (LIST_EMPTY(&rm->fv_list)) {
7070                 u16 j;
7071
7072                 ice_for_each_set_bit(j, fv_bitmap, ICE_MAX_NUM_PROFILES) {
7073                         struct ice_sw_fv_list_entry *fvl;
7074
7075                         fvl = (struct ice_sw_fv_list_entry *)
7076                                 ice_malloc(hw, sizeof(*fvl));
7077                         if (!fvl)
7078                                 goto err_unroll;
7079                         fvl->fv_ptr = NULL;
7080                         fvl->profile_id = j;
7081                         LIST_ADD(&fvl->list_entry, &rm->fv_list);
7082                 }
7083         }
7084
7085         /* get bitmap of all profiles the recipe will be associated with */
7086         ice_zero_bitmap(profiles, ICE_MAX_NUM_PROFILES);
7087         LIST_FOR_EACH_ENTRY(fvit, &rm->fv_list, ice_sw_fv_list_entry,
7088                             list_entry) {
7089                 ice_debug(hw, ICE_DBG_SW, "profile: %d\n", fvit->profile_id);
7090                 ice_set_bit((u16)fvit->profile_id, profiles);
7091         }
7092
7093         /* Look for a recipe which matches our requested fv / mask list */
7094         *rid = ice_find_recp(hw, lkup_exts, rinfo->tun_type);
7095         if (*rid < ICE_MAX_NUM_RECIPES)
7096                 /* Success if found a recipe that match the existing criteria */
7097                 goto err_unroll;
7098
7099         rm->tun_type = rinfo->tun_type;
7100         /* Recipe we need does not exist, add a recipe */
7101         status = ice_add_sw_recipe(hw, rm, profiles);
7102         if (status)
7103                 goto err_unroll;
7104
7105         /* Associate all the recipes created with all the profiles in the
7106          * common field vector.
7107          */
7108         LIST_FOR_EACH_ENTRY(fvit, &rm->fv_list, ice_sw_fv_list_entry,
7109                             list_entry) {
7110                 ice_declare_bitmap(r_bitmap, ICE_MAX_NUM_RECIPES);
7111                 u16 j;
7112
7113                 status = ice_aq_get_recipe_to_profile(hw, fvit->profile_id,
7114                                                       (u8 *)r_bitmap, NULL);
7115                 if (status)
7116                         goto err_unroll;
7117
7118                 ice_or_bitmap(r_bitmap, r_bitmap, rm->r_bitmap,
7119                               ICE_MAX_NUM_RECIPES);
7120                 status = ice_acquire_change_lock(hw, ICE_RES_WRITE);
7121                 if (status)
7122                         goto err_unroll;
7123
7124                 status = ice_aq_map_recipe_to_profile(hw, fvit->profile_id,
7125                                                       (u8 *)r_bitmap,
7126                                                       NULL);
7127                 ice_release_change_lock(hw);
7128
7129                 if (status)
7130                         goto err_unroll;
7131
7132                 /* Update profile to recipe bitmap array */
7133                 ice_cp_bitmap(profile_to_recipe[fvit->profile_id], r_bitmap,
7134                               ICE_MAX_NUM_RECIPES);
7135
7136                 /* Update recipe to profile bitmap array */
7137                 ice_for_each_set_bit(j, rm->r_bitmap, ICE_MAX_NUM_RECIPES)
7138                         ice_set_bit((u16)fvit->profile_id,
7139                                     recipe_to_profile[j]);
7140         }
7141
7142         *rid = rm->root_rid;
7143         ice_memcpy(&hw->switch_info->recp_list[*rid].lkup_exts,
7144                    lkup_exts, sizeof(*lkup_exts), ICE_NONDMA_TO_NONDMA);
7145 err_unroll:
7146         LIST_FOR_EACH_ENTRY_SAFE(r_entry, r_tmp, &rm->rg_list,
7147                                  ice_recp_grp_entry, l_entry) {
7148                 LIST_DEL(&r_entry->l_entry);
7149                 ice_free(hw, r_entry);
7150         }
7151
7152         LIST_FOR_EACH_ENTRY_SAFE(fvit, tmp, &rm->fv_list, ice_sw_fv_list_entry,
7153                                  list_entry) {
7154                 LIST_DEL(&fvit->list_entry);
7155                 ice_free(hw, fvit);
7156         }
7157
7158         if (rm->root_buf)
7159                 ice_free(hw, rm->root_buf);
7160
7161         ice_free(hw, rm);
7162
7163 err_free_lkup_exts:
7164         ice_free(hw, lkup_exts);
7165
7166         return status;
7167 }
7168
7169 /**
7170  * ice_find_dummy_packet - find dummy packet by tunnel type
7171  *
7172  * @lkups: lookup elements or match criteria for the advanced recipe, one
7173  *         structure per protocol header
7174  * @lkups_cnt: number of protocols
7175  * @tun_type: tunnel type from the match criteria
7176  * @pkt: dummy packet to fill according to filter match criteria
7177  * @pkt_len: packet length of dummy packet
7178  * @offsets: pointer to receive the pointer to the offsets for the packet
7179  */
7180 static void
7181 ice_find_dummy_packet(struct ice_adv_lkup_elem *lkups, u16 lkups_cnt,
7182                       enum ice_sw_tunnel_type tun_type, const u8 **pkt,
7183                       u16 *pkt_len,
7184                       const struct ice_dummy_pkt_offsets **offsets)
7185 {
7186         bool tcp = false, udp = false, ipv6 = false, vlan = false;
7187         bool gre = false;
7188         u16 i;
7189
7190         for (i = 0; i < lkups_cnt; i++) {
7191                 if (lkups[i].type == ICE_UDP_ILOS)
7192                         udp = true;
7193                 else if (lkups[i].type == ICE_TCP_IL)
7194                         tcp = true;
7195                 else if (lkups[i].type == ICE_IPV6_OFOS)
7196                         ipv6 = true;
7197                 else if (lkups[i].type == ICE_VLAN_OFOS)
7198                         vlan = true;
7199                 else if (lkups[i].type == ICE_IPV4_OFOS &&
7200                          lkups[i].h_u.ipv4_hdr.protocol ==
7201                                 ICE_IPV4_NVGRE_PROTO_ID &&
7202                          lkups[i].m_u.ipv4_hdr.protocol ==
7203                                 0xFF)
7204                         gre = true;
7205                 else if (lkups[i].type == ICE_PPPOE &&
7206                          lkups[i].h_u.pppoe_hdr.ppp_prot_id ==
7207                                 CPU_TO_BE16(ICE_PPP_IPV6_PROTO_ID) &&
7208                          lkups[i].m_u.pppoe_hdr.ppp_prot_id ==
7209                                 0xFFFF)
7210                         ipv6 = true;
7211                 else if (lkups[i].type == ICE_ETYPE_OL &&
7212                          lkups[i].h_u.ethertype.ethtype_id ==
7213                                 CPU_TO_BE16(ICE_IPV6_ETHER_ID) &&
7214                          lkups[i].m_u.ethertype.ethtype_id ==
7215                                         0xFFFF)
7216                         ipv6 = true;
7217                 else if (lkups[i].type == ICE_IPV4_IL &&
7218                          lkups[i].h_u.ipv4_hdr.protocol ==
7219                                 ICE_TCP_PROTO_ID &&
7220                          lkups[i].m_u.ipv4_hdr.protocol ==
7221                                 0xFF)
7222                         tcp = true;
7223         }
7224
7225         if ((tun_type == ICE_SW_TUN_AND_NON_TUN_QINQ ||
7226              tun_type == ICE_NON_TUN_QINQ) && ipv6) {
7227                 *pkt = dummy_qinq_ipv6_pkt;
7228                 *pkt_len = sizeof(dummy_qinq_ipv6_pkt);
7229                 *offsets = dummy_qinq_ipv6_packet_offsets;
7230                 return;
7231         } else if (tun_type == ICE_SW_TUN_AND_NON_TUN_QINQ ||
7232                            tun_type == ICE_NON_TUN_QINQ) {
7233                 *pkt = dummy_qinq_ipv4_pkt;
7234                 *pkt_len = sizeof(dummy_qinq_ipv4_pkt);
7235                 *offsets = dummy_qinq_ipv4_packet_offsets;
7236                 return;
7237         }
7238
7239         if (tun_type == ICE_SW_TUN_PPPOE_IPV6_QINQ) {
7240                 *pkt = dummy_qinq_pppoe_ipv6_packet;
7241                 *pkt_len = sizeof(dummy_qinq_pppoe_ipv6_packet);
7242                 *offsets = dummy_qinq_pppoe_packet_ipv6_offsets;
7243                 return;
7244         } else if (tun_type == ICE_SW_TUN_PPPOE_IPV4_QINQ) {
7245                 *pkt = dummy_qinq_pppoe_ipv4_pkt;
7246                 *pkt_len = sizeof(dummy_qinq_pppoe_ipv4_pkt);
7247                 *offsets = dummy_qinq_pppoe_ipv4_packet_offsets;
7248                 return;
7249         } else if (tun_type == ICE_SW_TUN_PPPOE_QINQ ||
7250                         tun_type == ICE_SW_TUN_PPPOE_PAY_QINQ) {
7251                 *pkt = dummy_qinq_pppoe_ipv4_pkt;
7252                 *pkt_len = sizeof(dummy_qinq_pppoe_ipv4_pkt);
7253                 *offsets = dummy_qinq_pppoe_packet_offsets;
7254                 return;
7255         }
7256
7257         if (tun_type == ICE_SW_TUN_IPV4_GTPU_NO_PAY) {
7258                 *pkt = dummy_ipv4_gtpu_ipv4_packet;
7259                 *pkt_len = sizeof(dummy_ipv4_gtpu_ipv4_packet);
7260                 *offsets = dummy_ipv4_gtp_no_pay_packet_offsets;
7261                 return;
7262         } else if (tun_type == ICE_SW_TUN_IPV6_GTPU_NO_PAY) {
7263                 *pkt = dummy_ipv6_gtpu_ipv6_packet;
7264                 *pkt_len = sizeof(dummy_ipv6_gtpu_ipv6_packet);
7265                 *offsets = dummy_ipv6_gtp_no_pay_packet_offsets;
7266                 return;
7267         } else if (tun_type == ICE_SW_TUN_IPV4_GTPU_IPV4) {
7268                 *pkt = dummy_ipv4_gtpu_ipv4_packet;
7269                 *pkt_len = sizeof(dummy_ipv4_gtpu_ipv4_packet);
7270                 *offsets = dummy_ipv4_gtpu_ipv4_packet_offsets;
7271                 return;
7272         } else if (tun_type == ICE_SW_TUN_IPV4_GTPU_IPV6) {
7273                 *pkt = dummy_ipv4_gtpu_ipv6_packet;
7274                 *pkt_len = sizeof(dummy_ipv4_gtpu_ipv6_packet);
7275                 *offsets = dummy_ipv4_gtpu_ipv6_packet_offsets;
7276                 return;
7277         } else if (tun_type == ICE_SW_TUN_IPV6_GTPU_IPV4) {
7278                 *pkt = dummy_ipv6_gtpu_ipv4_packet;
7279                 *pkt_len = sizeof(dummy_ipv6_gtpu_ipv4_packet);
7280                 *offsets = dummy_ipv6_gtpu_ipv4_packet_offsets;
7281                 return;
7282         } else if (tun_type == ICE_SW_TUN_IPV6_GTPU_IPV6) {
7283                 *pkt = dummy_ipv6_gtpu_ipv6_packet;
7284                 *pkt_len = sizeof(dummy_ipv6_gtpu_ipv6_packet);
7285                 *offsets = dummy_ipv6_gtpu_ipv6_packet_offsets;
7286                 return;
7287         }
7288
7289         if (tun_type == ICE_SW_TUN_IPV4_ESP) {
7290                 *pkt = dummy_ipv4_esp_pkt;
7291                 *pkt_len = sizeof(dummy_ipv4_esp_pkt);
7292                 *offsets = dummy_ipv4_esp_packet_offsets;
7293                 return;
7294         }
7295
7296         if (tun_type == ICE_SW_TUN_IPV6_ESP) {
7297                 *pkt = dummy_ipv6_esp_pkt;
7298                 *pkt_len = sizeof(dummy_ipv6_esp_pkt);
7299                 *offsets = dummy_ipv6_esp_packet_offsets;
7300                 return;
7301         }
7302
7303         if (tun_type == ICE_SW_TUN_IPV4_AH) {
7304                 *pkt = dummy_ipv4_ah_pkt;
7305                 *pkt_len = sizeof(dummy_ipv4_ah_pkt);
7306                 *offsets = dummy_ipv4_ah_packet_offsets;
7307                 return;
7308         }
7309
7310         if (tun_type == ICE_SW_TUN_IPV6_AH) {
7311                 *pkt = dummy_ipv6_ah_pkt;
7312                 *pkt_len = sizeof(dummy_ipv6_ah_pkt);
7313                 *offsets = dummy_ipv6_ah_packet_offsets;
7314                 return;
7315         }
7316
7317         if (tun_type == ICE_SW_TUN_IPV4_NAT_T) {
7318                 *pkt = dummy_ipv4_nat_pkt;
7319                 *pkt_len = sizeof(dummy_ipv4_nat_pkt);
7320                 *offsets = dummy_ipv4_nat_packet_offsets;
7321                 return;
7322         }
7323
7324         if (tun_type == ICE_SW_TUN_IPV6_NAT_T) {
7325                 *pkt = dummy_ipv6_nat_pkt;
7326                 *pkt_len = sizeof(dummy_ipv6_nat_pkt);
7327                 *offsets = dummy_ipv6_nat_packet_offsets;
7328                 return;
7329         }
7330
7331         if (tun_type == ICE_SW_TUN_IPV4_L2TPV3) {
7332                 *pkt = dummy_ipv4_l2tpv3_pkt;
7333                 *pkt_len = sizeof(dummy_ipv4_l2tpv3_pkt);
7334                 *offsets = dummy_ipv4_l2tpv3_packet_offsets;
7335                 return;
7336         }
7337
7338         if (tun_type == ICE_SW_TUN_IPV6_L2TPV3) {
7339                 *pkt = dummy_ipv6_l2tpv3_pkt;
7340                 *pkt_len = sizeof(dummy_ipv6_l2tpv3_pkt);
7341                 *offsets = dummy_ipv6_l2tpv3_packet_offsets;
7342                 return;
7343         }
7344
7345         if (tun_type == ICE_SW_TUN_GTP) {
7346                 *pkt = dummy_udp_gtp_packet;
7347                 *pkt_len = sizeof(dummy_udp_gtp_packet);
7348                 *offsets = dummy_udp_gtp_packet_offsets;
7349                 return;
7350         }
7351
7352         if (tun_type == ICE_SW_TUN_PPPOE && ipv6) {
7353                 *pkt = dummy_pppoe_ipv6_packet;
7354                 *pkt_len = sizeof(dummy_pppoe_ipv6_packet);
7355                 *offsets = dummy_pppoe_packet_offsets;
7356                 return;
7357         } else if (tun_type == ICE_SW_TUN_PPPOE ||
7358                 tun_type == ICE_SW_TUN_PPPOE_PAY) {
7359                 *pkt = dummy_pppoe_ipv4_packet;
7360                 *pkt_len = sizeof(dummy_pppoe_ipv4_packet);
7361                 *offsets = dummy_pppoe_packet_offsets;
7362                 return;
7363         }
7364
7365         if (tun_type == ICE_SW_TUN_PPPOE_IPV4) {
7366                 *pkt = dummy_pppoe_ipv4_packet;
7367                 *pkt_len = sizeof(dummy_pppoe_ipv4_packet);
7368                 *offsets = dummy_pppoe_packet_ipv4_offsets;
7369                 return;
7370         }
7371
7372         if (tun_type == ICE_SW_TUN_PPPOE_IPV4_TCP) {
7373                 *pkt = dummy_pppoe_ipv4_tcp_packet;
7374                 *pkt_len = sizeof(dummy_pppoe_ipv4_tcp_packet);
7375                 *offsets = dummy_pppoe_ipv4_tcp_packet_offsets;
7376                 return;
7377         }
7378
7379         if (tun_type == ICE_SW_TUN_PPPOE_IPV4_UDP) {
7380                 *pkt = dummy_pppoe_ipv4_udp_packet;
7381                 *pkt_len = sizeof(dummy_pppoe_ipv4_udp_packet);
7382                 *offsets = dummy_pppoe_ipv4_udp_packet_offsets;
7383                 return;
7384         }
7385
7386         if (tun_type == ICE_SW_TUN_PPPOE_IPV6) {
7387                 *pkt = dummy_pppoe_ipv6_packet;
7388                 *pkt_len = sizeof(dummy_pppoe_ipv6_packet);
7389                 *offsets = dummy_pppoe_packet_ipv6_offsets;
7390                 return;
7391         }
7392
7393         if (tun_type == ICE_SW_TUN_PPPOE_IPV6_TCP) {
7394                 *pkt = dummy_pppoe_ipv6_tcp_packet;
7395                 *pkt_len = sizeof(dummy_pppoe_ipv6_tcp_packet);
7396                 *offsets = dummy_pppoe_packet_ipv6_tcp_offsets;
7397                 return;
7398         }
7399
7400         if (tun_type == ICE_SW_TUN_PPPOE_IPV6_UDP) {
7401                 *pkt = dummy_pppoe_ipv6_udp_packet;
7402                 *pkt_len = sizeof(dummy_pppoe_ipv6_udp_packet);
7403                 *offsets = dummy_pppoe_packet_ipv6_udp_offsets;
7404                 return;
7405         }
7406
7407         if (tun_type == ICE_SW_IPV4_TCP) {
7408                 *pkt = dummy_tcp_packet;
7409                 *pkt_len = sizeof(dummy_tcp_packet);
7410                 *offsets = dummy_tcp_packet_offsets;
7411                 return;
7412         }
7413
7414         if (tun_type == ICE_SW_IPV4_UDP) {
7415                 *pkt = dummy_udp_packet;
7416                 *pkt_len = sizeof(dummy_udp_packet);
7417                 *offsets = dummy_udp_packet_offsets;
7418                 return;
7419         }
7420
7421         if (tun_type == ICE_SW_IPV6_TCP) {
7422                 *pkt = dummy_tcp_ipv6_packet;
7423                 *pkt_len = sizeof(dummy_tcp_ipv6_packet);
7424                 *offsets = dummy_tcp_ipv6_packet_offsets;
7425                 return;
7426         }
7427
7428         if (tun_type == ICE_SW_IPV6_UDP) {
7429                 *pkt = dummy_udp_ipv6_packet;
7430                 *pkt_len = sizeof(dummy_udp_ipv6_packet);
7431                 *offsets = dummy_udp_ipv6_packet_offsets;
7432                 return;
7433         }
7434
7435         if (tun_type == ICE_ALL_TUNNELS) {
7436                 *pkt = dummy_gre_udp_packet;
7437                 *pkt_len = sizeof(dummy_gre_udp_packet);
7438                 *offsets = dummy_gre_udp_packet_offsets;
7439                 return;
7440         }
7441
7442         if (tun_type == ICE_SW_TUN_NVGRE || gre) {
7443                 if (tcp) {
7444                         *pkt = dummy_gre_tcp_packet;
7445                         *pkt_len = sizeof(dummy_gre_tcp_packet);
7446                         *offsets = dummy_gre_tcp_packet_offsets;
7447                         return;
7448                 }
7449
7450                 *pkt = dummy_gre_udp_packet;
7451                 *pkt_len = sizeof(dummy_gre_udp_packet);
7452                 *offsets = dummy_gre_udp_packet_offsets;
7453                 return;
7454         }
7455
7456         if (tun_type == ICE_SW_TUN_VXLAN || tun_type == ICE_SW_TUN_GENEVE ||
7457             tun_type == ICE_SW_TUN_VXLAN_GPE || tun_type == ICE_SW_TUN_UDP ||
7458             tun_type == ICE_SW_TUN_GENEVE_VLAN ||
7459             tun_type == ICE_SW_TUN_VXLAN_VLAN) {
7460                 if (tcp) {
7461                         *pkt = dummy_udp_tun_tcp_packet;
7462                         *pkt_len = sizeof(dummy_udp_tun_tcp_packet);
7463                         *offsets = dummy_udp_tun_tcp_packet_offsets;
7464                         return;
7465                 }
7466
7467                 *pkt = dummy_udp_tun_udp_packet;
7468                 *pkt_len = sizeof(dummy_udp_tun_udp_packet);
7469                 *offsets = dummy_udp_tun_udp_packet_offsets;
7470                 return;
7471         }
7472
7473         if (udp && !ipv6) {
7474                 if (vlan) {
7475                         *pkt = dummy_vlan_udp_packet;
7476                         *pkt_len = sizeof(dummy_vlan_udp_packet);
7477                         *offsets = dummy_vlan_udp_packet_offsets;
7478                         return;
7479                 }
7480                 *pkt = dummy_udp_packet;
7481                 *pkt_len = sizeof(dummy_udp_packet);
7482                 *offsets = dummy_udp_packet_offsets;
7483                 return;
7484         } else if (udp && ipv6) {
7485                 if (vlan) {
7486                         *pkt = dummy_vlan_udp_ipv6_packet;
7487                         *pkt_len = sizeof(dummy_vlan_udp_ipv6_packet);
7488                         *offsets = dummy_vlan_udp_ipv6_packet_offsets;
7489                         return;
7490                 }
7491                 *pkt = dummy_udp_ipv6_packet;
7492                 *pkt_len = sizeof(dummy_udp_ipv6_packet);
7493                 *offsets = dummy_udp_ipv6_packet_offsets;
7494                 return;
7495         } else if ((tcp && ipv6) || ipv6) {
7496                 if (vlan) {
7497                         *pkt = dummy_vlan_tcp_ipv6_packet;
7498                         *pkt_len = sizeof(dummy_vlan_tcp_ipv6_packet);
7499                         *offsets = dummy_vlan_tcp_ipv6_packet_offsets;
7500                         return;
7501                 }
7502                 *pkt = dummy_tcp_ipv6_packet;
7503                 *pkt_len = sizeof(dummy_tcp_ipv6_packet);
7504                 *offsets = dummy_tcp_ipv6_packet_offsets;
7505                 return;
7506         }
7507
7508         if (vlan) {
7509                 *pkt = dummy_vlan_tcp_packet;
7510                 *pkt_len = sizeof(dummy_vlan_tcp_packet);
7511                 *offsets = dummy_vlan_tcp_packet_offsets;
7512         } else {
7513                 *pkt = dummy_tcp_packet;
7514                 *pkt_len = sizeof(dummy_tcp_packet);
7515                 *offsets = dummy_tcp_packet_offsets;
7516         }
7517 }
7518
7519 /**
7520  * ice_fill_adv_dummy_packet - fill a dummy packet with given match criteria
7521  *
7522  * @lkups: lookup elements or match criteria for the advanced recipe, one
7523  *         structure per protocol header
7524  * @lkups_cnt: number of protocols
7525  * @s_rule: stores rule information from the match criteria
7526  * @dummy_pkt: dummy packet to fill according to filter match criteria
7527  * @pkt_len: packet length of dummy packet
7528  * @offsets: offset info for the dummy packet
7529  */
7530 static enum ice_status
7531 ice_fill_adv_dummy_packet(struct ice_adv_lkup_elem *lkups, u16 lkups_cnt,
7532                           struct ice_aqc_sw_rules_elem *s_rule,
7533                           const u8 *dummy_pkt, u16 pkt_len,
7534                           const struct ice_dummy_pkt_offsets *offsets)
7535 {
7536         u8 *pkt;
7537         u16 i;
7538
7539         /* Start with a packet with a pre-defined/dummy content. Then, fill
7540          * in the header values to be looked up or matched.
7541          */
7542         pkt = s_rule->pdata.lkup_tx_rx.hdr;
7543
7544         ice_memcpy(pkt, dummy_pkt, pkt_len, ICE_NONDMA_TO_NONDMA);
7545
7546         for (i = 0; i < lkups_cnt; i++) {
7547                 enum ice_protocol_type type;
7548                 u16 offset = 0, len = 0, j;
7549                 bool found = false;
7550
7551                 /* find the start of this layer; it should be found since this
7552                  * was already checked when search for the dummy packet
7553                  */
7554                 type = lkups[i].type;
7555                 for (j = 0; offsets[j].type != ICE_PROTOCOL_LAST; j++) {
7556                         if (type == offsets[j].type) {
7557                                 offset = offsets[j].offset;
7558                                 found = true;
7559                                 break;
7560                         }
7561                 }
7562                 /* this should never happen in a correct calling sequence */
7563                 if (!found)
7564                         return ICE_ERR_PARAM;
7565
7566                 switch (lkups[i].type) {
7567                 case ICE_MAC_OFOS:
7568                 case ICE_MAC_IL:
7569                         len = sizeof(struct ice_ether_hdr);
7570                         break;
7571                 case ICE_ETYPE_OL:
7572                         len = sizeof(struct ice_ethtype_hdr);
7573                         break;
7574                 case ICE_VLAN_OFOS:
7575                 case ICE_VLAN_EX:
7576                         len = sizeof(struct ice_vlan_hdr);
7577                         break;
7578                 case ICE_IPV4_OFOS:
7579                 case ICE_IPV4_IL:
7580                         len = sizeof(struct ice_ipv4_hdr);
7581                         break;
7582                 case ICE_IPV6_OFOS:
7583                 case ICE_IPV6_IL:
7584                         len = sizeof(struct ice_ipv6_hdr);
7585                         break;
7586                 case ICE_TCP_IL:
7587                 case ICE_UDP_OF:
7588                 case ICE_UDP_ILOS:
7589                         len = sizeof(struct ice_l4_hdr);
7590                         break;
7591                 case ICE_SCTP_IL:
7592                         len = sizeof(struct ice_sctp_hdr);
7593                         break;
7594                 case ICE_NVGRE:
7595                         len = sizeof(struct ice_nvgre);
7596                         break;
7597                 case ICE_VXLAN:
7598                 case ICE_GENEVE:
7599                 case ICE_VXLAN_GPE:
7600                         len = sizeof(struct ice_udp_tnl_hdr);
7601                         break;
7602
7603                 case ICE_GTP:
7604                 case ICE_GTP_NO_PAY:
7605                         len = sizeof(struct ice_udp_gtp_hdr);
7606                         break;
7607                 case ICE_PPPOE:
7608                         len = sizeof(struct ice_pppoe_hdr);
7609                         break;
7610                 case ICE_ESP:
7611                         len = sizeof(struct ice_esp_hdr);
7612                         break;
7613                 case ICE_NAT_T:
7614                         len = sizeof(struct ice_nat_t_hdr);
7615                         break;
7616                 case ICE_AH:
7617                         len = sizeof(struct ice_ah_hdr);
7618                         break;
7619                 case ICE_L2TPV3:
7620                         len = sizeof(struct ice_l2tpv3_sess_hdr);
7621                         break;
7622                 default:
7623                         return ICE_ERR_PARAM;
7624                 }
7625
7626                 /* the length should be a word multiple */
7627                 if (len % ICE_BYTES_PER_WORD)
7628                         return ICE_ERR_CFG;
7629
7630                 /* We have the offset to the header start, the length, the
7631                  * caller's header values and mask. Use this information to
7632                  * copy the data into the dummy packet appropriately based on
7633                  * the mask. Note that we need to only write the bits as
7634                  * indicated by the mask to make sure we don't improperly write
7635                  * over any significant packet data.
7636                  */
7637                 for (j = 0; j < len / sizeof(u16); j++)
7638                         if (((u16 *)&lkups[i].m_u)[j])
7639                                 ((u16 *)(pkt + offset))[j] =
7640                                         (((u16 *)(pkt + offset))[j] &
7641                                          ~((u16 *)&lkups[i].m_u)[j]) |
7642                                         (((u16 *)&lkups[i].h_u)[j] &
7643                                          ((u16 *)&lkups[i].m_u)[j]);
7644         }
7645
7646         s_rule->pdata.lkup_tx_rx.hdr_len = CPU_TO_LE16(pkt_len);
7647
7648         return ICE_SUCCESS;
7649 }
7650
7651 /**
7652  * ice_fill_adv_packet_tun - fill dummy packet with udp tunnel port
7653  * @hw: pointer to the hardware structure
7654  * @tun_type: tunnel type
7655  * @pkt: dummy packet to fill in
7656  * @offsets: offset info for the dummy packet
7657  */
7658 static enum ice_status
7659 ice_fill_adv_packet_tun(struct ice_hw *hw, enum ice_sw_tunnel_type tun_type,
7660                         u8 *pkt, const struct ice_dummy_pkt_offsets *offsets)
7661 {
7662         u16 open_port, i;
7663
7664         switch (tun_type) {
7665         case ICE_SW_TUN_AND_NON_TUN:
7666         case ICE_SW_TUN_VXLAN_GPE:
7667         case ICE_SW_TUN_VXLAN:
7668         case ICE_SW_TUN_VXLAN_VLAN:
7669         case ICE_SW_TUN_UDP:
7670                 if (!ice_get_open_tunnel_port(hw, TNL_VXLAN, &open_port))
7671                         return ICE_ERR_CFG;
7672                 break;
7673
7674         case ICE_SW_TUN_GENEVE:
7675         case ICE_SW_TUN_GENEVE_VLAN:
7676                 if (!ice_get_open_tunnel_port(hw, TNL_GENEVE, &open_port))
7677                         return ICE_ERR_CFG;
7678                 break;
7679
7680         default:
7681                 /* Nothing needs to be done for this tunnel type */
7682                 return ICE_SUCCESS;
7683         }
7684
7685         /* Find the outer UDP protocol header and insert the port number */
7686         for (i = 0; offsets[i].type != ICE_PROTOCOL_LAST; i++) {
7687                 if (offsets[i].type == ICE_UDP_OF) {
7688                         struct ice_l4_hdr *hdr;
7689                         u16 offset;
7690
7691                         offset = offsets[i].offset;
7692                         hdr = (struct ice_l4_hdr *)&pkt[offset];
7693                         hdr->dst_port = CPU_TO_BE16(open_port);
7694
7695                         return ICE_SUCCESS;
7696                 }
7697         }
7698
7699         return ICE_ERR_CFG;
7700 }
7701
7702 /**
7703  * ice_find_adv_rule_entry - Search a rule entry
7704  * @hw: pointer to the hardware structure
7705  * @lkups: lookup elements or match criteria for the advanced recipe, one
7706  *         structure per protocol header
7707  * @lkups_cnt: number of protocols
7708  * @recp_id: recipe ID for which we are finding the rule
7709  * @rinfo: other information regarding the rule e.g. priority and action info
7710  *
7711  * Helper function to search for a given advance rule entry
7712  * Returns pointer to entry storing the rule if found
7713  */
7714 static struct ice_adv_fltr_mgmt_list_entry *
7715 ice_find_adv_rule_entry(struct ice_hw *hw, struct ice_adv_lkup_elem *lkups,
7716                         u16 lkups_cnt, u16 recp_id,
7717                         struct ice_adv_rule_info *rinfo)
7718 {
7719         struct ice_adv_fltr_mgmt_list_entry *list_itr;
7720         struct ice_switch_info *sw = hw->switch_info;
7721         int i;
7722
7723         LIST_FOR_EACH_ENTRY(list_itr, &sw->recp_list[recp_id].filt_rules,
7724                             ice_adv_fltr_mgmt_list_entry, list_entry) {
7725                 bool lkups_matched = true;
7726
7727                 if (lkups_cnt != list_itr->lkups_cnt)
7728                         continue;
7729                 for (i = 0; i < list_itr->lkups_cnt; i++)
7730                         if (memcmp(&list_itr->lkups[i], &lkups[i],
7731                                    sizeof(*lkups))) {
7732                                 lkups_matched = false;
7733                                 break;
7734                         }
7735                 if (rinfo->sw_act.flag == list_itr->rule_info.sw_act.flag &&
7736                     rinfo->tun_type == list_itr->rule_info.tun_type &&
7737                     lkups_matched)
7738                         return list_itr;
7739         }
7740         return NULL;
7741 }
7742
7743 /**
7744  * ice_adv_add_update_vsi_list
7745  * @hw: pointer to the hardware structure
7746  * @m_entry: pointer to current adv filter management list entry
7747  * @cur_fltr: filter information from the book keeping entry
7748  * @new_fltr: filter information with the new VSI to be added
7749  *
7750  * Call AQ command to add or update previously created VSI list with new VSI.
7751  *
7752  * Helper function to do book keeping associated with adding filter information
7753  * The algorithm to do the booking keeping is described below :
7754  * When a VSI needs to subscribe to a given advanced filter
7755  *      if only one VSI has been added till now
7756  *              Allocate a new VSI list and add two VSIs
7757  *              to this list using switch rule command
7758  *              Update the previously created switch rule with the
7759  *              newly created VSI list ID
7760  *      if a VSI list was previously created
7761  *              Add the new VSI to the previously created VSI list set
7762  *              using the update switch rule command
7763  */
7764 static enum ice_status
7765 ice_adv_add_update_vsi_list(struct ice_hw *hw,
7766                             struct ice_adv_fltr_mgmt_list_entry *m_entry,
7767                             struct ice_adv_rule_info *cur_fltr,
7768                             struct ice_adv_rule_info *new_fltr)
7769 {
7770         enum ice_status status;
7771         u16 vsi_list_id = 0;
7772
7773         if (cur_fltr->sw_act.fltr_act == ICE_FWD_TO_Q ||
7774             cur_fltr->sw_act.fltr_act == ICE_FWD_TO_QGRP ||
7775             cur_fltr->sw_act.fltr_act == ICE_DROP_PACKET)
7776                 return ICE_ERR_NOT_IMPL;
7777
7778         if ((new_fltr->sw_act.fltr_act == ICE_FWD_TO_Q ||
7779              new_fltr->sw_act.fltr_act == ICE_FWD_TO_QGRP) &&
7780             (cur_fltr->sw_act.fltr_act == ICE_FWD_TO_VSI ||
7781              cur_fltr->sw_act.fltr_act == ICE_FWD_TO_VSI_LIST))
7782                 return ICE_ERR_NOT_IMPL;
7783
7784         if (m_entry->vsi_count < 2 && !m_entry->vsi_list_info) {
7785                  /* Only one entry existed in the mapping and it was not already
7786                   * a part of a VSI list. So, create a VSI list with the old and
7787                   * new VSIs.
7788                   */
7789                 struct ice_fltr_info tmp_fltr;
7790                 u16 vsi_handle_arr[2];
7791
7792                 /* A rule already exists with the new VSI being added */
7793                 if (cur_fltr->sw_act.fwd_id.hw_vsi_id ==
7794                     new_fltr->sw_act.fwd_id.hw_vsi_id)
7795                         return ICE_ERR_ALREADY_EXISTS;
7796
7797                 vsi_handle_arr[0] = cur_fltr->sw_act.vsi_handle;
7798                 vsi_handle_arr[1] = new_fltr->sw_act.vsi_handle;
7799                 status = ice_create_vsi_list_rule(hw, &vsi_handle_arr[0], 2,
7800                                                   &vsi_list_id,
7801                                                   ICE_SW_LKUP_LAST);
7802                 if (status)
7803                         return status;
7804
7805                 ice_memset(&tmp_fltr, 0, sizeof(tmp_fltr), ICE_NONDMA_MEM);
7806                 tmp_fltr.flag = m_entry->rule_info.sw_act.flag;
7807                 tmp_fltr.fltr_rule_id = cur_fltr->fltr_rule_id;
7808                 tmp_fltr.fltr_act = ICE_FWD_TO_VSI_LIST;
7809                 tmp_fltr.fwd_id.vsi_list_id = vsi_list_id;
7810                 tmp_fltr.lkup_type = ICE_SW_LKUP_LAST;
7811
7812                 /* Update the previous switch rule of "forward to VSI" to
7813                  * "fwd to VSI list"
7814                  */
7815                 status = ice_update_pkt_fwd_rule(hw, &tmp_fltr);
7816                 if (status)
7817                         return status;
7818
7819                 cur_fltr->sw_act.fwd_id.vsi_list_id = vsi_list_id;
7820                 cur_fltr->sw_act.fltr_act = ICE_FWD_TO_VSI_LIST;
7821                 m_entry->vsi_list_info =
7822                         ice_create_vsi_list_map(hw, &vsi_handle_arr[0], 2,
7823                                                 vsi_list_id);
7824         } else {
7825                 u16 vsi_handle = new_fltr->sw_act.vsi_handle;
7826
7827                 if (!m_entry->vsi_list_info)
7828                         return ICE_ERR_CFG;
7829
7830                 /* A rule already exists with the new VSI being added */
7831                 if (ice_is_bit_set(m_entry->vsi_list_info->vsi_map, vsi_handle))
7832                         return ICE_SUCCESS;
7833
7834                 /* Update the previously created VSI list set with
7835                  * the new VSI ID passed in
7836                  */
7837                 vsi_list_id = cur_fltr->sw_act.fwd_id.vsi_list_id;
7838
7839                 status = ice_update_vsi_list_rule(hw, &vsi_handle, 1,
7840                                                   vsi_list_id, false,
7841                                                   ice_aqc_opc_update_sw_rules,
7842                                                   ICE_SW_LKUP_LAST);
7843                 /* update VSI list mapping info with new VSI ID */
7844                 if (!status)
7845                         ice_set_bit(vsi_handle,
7846                                     m_entry->vsi_list_info->vsi_map);
7847         }
7848         if (!status)
7849                 m_entry->vsi_count++;
7850         return status;
7851 }
7852
7853 /**
7854  * ice_add_adv_rule - helper function to create an advanced switch rule
7855  * @hw: pointer to the hardware structure
7856  * @lkups: information on the words that needs to be looked up. All words
7857  * together makes one recipe
7858  * @lkups_cnt: num of entries in the lkups array
7859  * @rinfo: other information related to the rule that needs to be programmed
7860  * @added_entry: this will return recipe_id, rule_id and vsi_handle. should be
7861  *               ignored is case of error.
7862  *
7863  * This function can program only 1 rule at a time. The lkups is used to
7864  * describe the all the words that forms the "lookup" portion of the recipe.
7865  * These words can span multiple protocols. Callers to this function need to
7866  * pass in a list of protocol headers with lookup information along and mask
7867  * that determines which words are valid from the given protocol header.
7868  * rinfo describes other information related to this rule such as forwarding
7869  * IDs, priority of this rule, etc.
7870  */
7871 enum ice_status
7872 ice_add_adv_rule(struct ice_hw *hw, struct ice_adv_lkup_elem *lkups,
7873                  u16 lkups_cnt, struct ice_adv_rule_info *rinfo,
7874                  struct ice_rule_query_data *added_entry)
7875 {
7876         struct ice_adv_fltr_mgmt_list_entry *m_entry, *adv_fltr = NULL;
7877         u16 rid = 0, i, pkt_len, rule_buf_sz, vsi_handle;
7878         const struct ice_dummy_pkt_offsets *pkt_offsets;
7879         struct ice_aqc_sw_rules_elem *s_rule = NULL;
7880         struct LIST_HEAD_TYPE *rule_head;
7881         struct ice_switch_info *sw;
7882         enum ice_status status;
7883         const u8 *pkt = NULL;
7884         bool prof_rule;
7885         u16 word_cnt;
7886         u32 act = 0;
7887         u8 q_rgn;
7888
7889         /* Initialize profile to result index bitmap */
7890         if (!hw->switch_info->prof_res_bm_init) {
7891                 hw->switch_info->prof_res_bm_init = 1;
7892                 ice_init_prof_result_bm(hw);
7893         }
7894
7895         prof_rule = ice_is_prof_rule(rinfo->tun_type);
7896         if (!prof_rule && !lkups_cnt)
7897                 return ICE_ERR_PARAM;
7898
7899         /* get # of words we need to match */
7900         word_cnt = 0;
7901         for (i = 0; i < lkups_cnt; i++) {
7902                 u16 j, *ptr;
7903
7904                 ptr = (u16 *)&lkups[i].m_u;
7905                 for (j = 0; j < sizeof(lkups->m_u) / sizeof(u16); j++)
7906                         if (ptr[j] != 0)
7907                                 word_cnt++;
7908         }
7909
7910         if (prof_rule) {
7911                 if (word_cnt > ICE_MAX_CHAIN_WORDS)
7912                         return ICE_ERR_PARAM;
7913         } else {
7914                 if (!word_cnt || word_cnt > ICE_MAX_CHAIN_WORDS)
7915                         return ICE_ERR_PARAM;
7916         }
7917
7918         /* make sure that we can locate a dummy packet */
7919         ice_find_dummy_packet(lkups, lkups_cnt, rinfo->tun_type, &pkt, &pkt_len,
7920                               &pkt_offsets);
7921         if (!pkt) {
7922                 status = ICE_ERR_PARAM;
7923                 goto err_ice_add_adv_rule;
7924         }
7925
7926         if (!(rinfo->sw_act.fltr_act == ICE_FWD_TO_VSI ||
7927               rinfo->sw_act.fltr_act == ICE_FWD_TO_Q ||
7928               rinfo->sw_act.fltr_act == ICE_FWD_TO_QGRP ||
7929               rinfo->sw_act.fltr_act == ICE_DROP_PACKET))
7930                 return ICE_ERR_CFG;
7931
7932         vsi_handle = rinfo->sw_act.vsi_handle;
7933         if (!ice_is_vsi_valid(hw, vsi_handle))
7934                 return ICE_ERR_PARAM;
7935
7936         if (rinfo->sw_act.fltr_act == ICE_FWD_TO_VSI)
7937                 rinfo->sw_act.fwd_id.hw_vsi_id =
7938                         ice_get_hw_vsi_num(hw, vsi_handle);
7939         if (rinfo->sw_act.flag & ICE_FLTR_TX)
7940                 rinfo->sw_act.src = ice_get_hw_vsi_num(hw, vsi_handle);
7941
7942         status = ice_add_adv_recipe(hw, lkups, lkups_cnt, rinfo, &rid);
7943         if (status)
7944                 return status;
7945         m_entry = ice_find_adv_rule_entry(hw, lkups, lkups_cnt, rid, rinfo);
7946         if (m_entry) {
7947                 /* we have to add VSI to VSI_LIST and increment vsi_count.
7948                  * Also Update VSI list so that we can change forwarding rule
7949                  * if the rule already exists, we will check if it exists with
7950                  * same vsi_id, if not then add it to the VSI list if it already
7951                  * exists if not then create a VSI list and add the existing VSI
7952                  * ID and the new VSI ID to the list
7953                  * We will add that VSI to the list
7954                  */
7955                 status = ice_adv_add_update_vsi_list(hw, m_entry,
7956                                                      &m_entry->rule_info,
7957                                                      rinfo);
7958                 if (added_entry) {
7959                         added_entry->rid = rid;
7960                         added_entry->rule_id = m_entry->rule_info.fltr_rule_id;
7961                         added_entry->vsi_handle = rinfo->sw_act.vsi_handle;
7962                 }
7963                 return status;
7964         }
7965         rule_buf_sz = ICE_SW_RULE_RX_TX_NO_HDR_SIZE + pkt_len;
7966         s_rule = (struct ice_aqc_sw_rules_elem *)ice_malloc(hw, rule_buf_sz);
7967         if (!s_rule)
7968                 return ICE_ERR_NO_MEMORY;
7969         act |= ICE_SINGLE_ACT_LAN_ENABLE;
7970         switch (rinfo->sw_act.fltr_act) {
7971         case ICE_FWD_TO_VSI:
7972                 act |= (rinfo->sw_act.fwd_id.hw_vsi_id <<
7973                         ICE_SINGLE_ACT_VSI_ID_S) & ICE_SINGLE_ACT_VSI_ID_M;
7974                 act |= ICE_SINGLE_ACT_VSI_FORWARDING | ICE_SINGLE_ACT_VALID_BIT;
7975                 break;
7976         case ICE_FWD_TO_Q:
7977                 act |= ICE_SINGLE_ACT_TO_Q;
7978                 act |= (rinfo->sw_act.fwd_id.q_id << ICE_SINGLE_ACT_Q_INDEX_S) &
7979                        ICE_SINGLE_ACT_Q_INDEX_M;
7980                 break;
7981         case ICE_FWD_TO_QGRP:
7982                 q_rgn = rinfo->sw_act.qgrp_size > 0 ?
7983                         (u8)ice_ilog2(rinfo->sw_act.qgrp_size) : 0;
7984                 act |= ICE_SINGLE_ACT_TO_Q;
7985                 act |= (rinfo->sw_act.fwd_id.q_id << ICE_SINGLE_ACT_Q_INDEX_S) &
7986                        ICE_SINGLE_ACT_Q_INDEX_M;
7987                 act |= (q_rgn << ICE_SINGLE_ACT_Q_REGION_S) &
7988                        ICE_SINGLE_ACT_Q_REGION_M;
7989                 break;
7990         case ICE_DROP_PACKET:
7991                 act |= ICE_SINGLE_ACT_VSI_FORWARDING | ICE_SINGLE_ACT_DROP |
7992                        ICE_SINGLE_ACT_VALID_BIT;
7993                 break;
7994         default:
7995                 status = ICE_ERR_CFG;
7996                 goto err_ice_add_adv_rule;
7997         }
7998
7999         /* set the rule LOOKUP type based on caller specified 'RX'
8000          * instead of hardcoding it to be either LOOKUP_TX/RX
8001          *
8002          * for 'RX' set the source to be the port number
8003          * for 'TX' set the source to be the source HW VSI number (determined
8004          * by caller)
8005          */
8006         if (rinfo->rx) {
8007                 s_rule->type = CPU_TO_LE16(ICE_AQC_SW_RULES_T_LKUP_RX);
8008                 s_rule->pdata.lkup_tx_rx.src =
8009                         CPU_TO_LE16(hw->port_info->lport);
8010         } else {
8011                 s_rule->type = CPU_TO_LE16(ICE_AQC_SW_RULES_T_LKUP_TX);
8012                 s_rule->pdata.lkup_tx_rx.src = CPU_TO_LE16(rinfo->sw_act.src);
8013         }
8014
8015         s_rule->pdata.lkup_tx_rx.recipe_id = CPU_TO_LE16(rid);
8016         s_rule->pdata.lkup_tx_rx.act = CPU_TO_LE32(act);
8017
8018         status = ice_fill_adv_dummy_packet(lkups, lkups_cnt, s_rule, pkt,
8019                                            pkt_len, pkt_offsets);
8020         if (status)
8021                 goto err_ice_add_adv_rule;
8022
8023         if (rinfo->tun_type != ICE_NON_TUN &&
8024             rinfo->tun_type != ICE_SW_TUN_AND_NON_TUN) {
8025                 status = ice_fill_adv_packet_tun(hw, rinfo->tun_type,
8026                                                  s_rule->pdata.lkup_tx_rx.hdr,
8027                                                  pkt_offsets);
8028                 if (status)
8029                         goto err_ice_add_adv_rule;
8030         }
8031
8032         status = ice_aq_sw_rules(hw, (struct ice_aqc_sw_rules *)s_rule,
8033                                  rule_buf_sz, 1, ice_aqc_opc_add_sw_rules,
8034                                  NULL);
8035         if (status)
8036                 goto err_ice_add_adv_rule;
8037         adv_fltr = (struct ice_adv_fltr_mgmt_list_entry *)
8038                 ice_malloc(hw, sizeof(struct ice_adv_fltr_mgmt_list_entry));
8039         if (!adv_fltr) {
8040                 status = ICE_ERR_NO_MEMORY;
8041                 goto err_ice_add_adv_rule;
8042         }
8043
8044         adv_fltr->lkups = (struct ice_adv_lkup_elem *)
8045                 ice_memdup(hw, lkups, lkups_cnt * sizeof(*lkups),
8046                            ICE_NONDMA_TO_NONDMA);
8047         if (!adv_fltr->lkups && !prof_rule) {
8048                 status = ICE_ERR_NO_MEMORY;
8049                 goto err_ice_add_adv_rule;
8050         }
8051
8052         adv_fltr->lkups_cnt = lkups_cnt;
8053         adv_fltr->rule_info = *rinfo;
8054         adv_fltr->rule_info.fltr_rule_id =
8055                 LE16_TO_CPU(s_rule->pdata.lkup_tx_rx.index);
8056         sw = hw->switch_info;
8057         sw->recp_list[rid].adv_rule = true;
8058         rule_head = &sw->recp_list[rid].filt_rules;
8059
8060         if (rinfo->sw_act.fltr_act == ICE_FWD_TO_VSI)
8061                 adv_fltr->vsi_count = 1;
8062
8063         /* Add rule entry to book keeping list */
8064         LIST_ADD(&adv_fltr->list_entry, rule_head);
8065         if (added_entry) {
8066                 added_entry->rid = rid;
8067                 added_entry->rule_id = adv_fltr->rule_info.fltr_rule_id;
8068                 added_entry->vsi_handle = rinfo->sw_act.vsi_handle;
8069         }
8070 err_ice_add_adv_rule:
8071         if (status && adv_fltr) {
8072                 ice_free(hw, adv_fltr->lkups);
8073                 ice_free(hw, adv_fltr);
8074         }
8075
8076         ice_free(hw, s_rule);
8077
8078         return status;
8079 }
8080
8081 /**
8082  * ice_adv_rem_update_vsi_list
8083  * @hw: pointer to the hardware structure
8084  * @vsi_handle: VSI handle of the VSI to remove
8085  * @fm_list: filter management entry for which the VSI list management needs to
8086  *           be done
8087  */
8088 static enum ice_status
8089 ice_adv_rem_update_vsi_list(struct ice_hw *hw, u16 vsi_handle,
8090                             struct ice_adv_fltr_mgmt_list_entry *fm_list)
8091 {
8092         struct ice_vsi_list_map_info *vsi_list_info;
8093         enum ice_sw_lkup_type lkup_type;
8094         enum ice_status status;
8095         u16 vsi_list_id;
8096
8097         if (fm_list->rule_info.sw_act.fltr_act != ICE_FWD_TO_VSI_LIST ||
8098             fm_list->vsi_count == 0)
8099                 return ICE_ERR_PARAM;
8100
8101         /* A rule with the VSI being removed does not exist */
8102         if (!ice_is_bit_set(fm_list->vsi_list_info->vsi_map, vsi_handle))
8103                 return ICE_ERR_DOES_NOT_EXIST;
8104
8105         lkup_type = ICE_SW_LKUP_LAST;
8106         vsi_list_id = fm_list->rule_info.sw_act.fwd_id.vsi_list_id;
8107         status = ice_update_vsi_list_rule(hw, &vsi_handle, 1, vsi_list_id, true,
8108                                           ice_aqc_opc_update_sw_rules,
8109                                           lkup_type);
8110         if (status)
8111                 return status;
8112
8113         fm_list->vsi_count--;
8114         ice_clear_bit(vsi_handle, fm_list->vsi_list_info->vsi_map);
8115         vsi_list_info = fm_list->vsi_list_info;
8116         if (fm_list->vsi_count == 1) {
8117                 struct ice_fltr_info tmp_fltr;
8118                 u16 rem_vsi_handle;
8119
8120                 rem_vsi_handle = ice_find_first_bit(vsi_list_info->vsi_map,
8121                                                     ICE_MAX_VSI);
8122                 if (!ice_is_vsi_valid(hw, rem_vsi_handle))
8123                         return ICE_ERR_OUT_OF_RANGE;
8124
8125                 /* Make sure VSI list is empty before removing it below */
8126                 status = ice_update_vsi_list_rule(hw, &rem_vsi_handle, 1,
8127                                                   vsi_list_id, true,
8128                                                   ice_aqc_opc_update_sw_rules,
8129                                                   lkup_type);
8130                 if (status)
8131                         return status;
8132
8133                 ice_memset(&tmp_fltr, 0, sizeof(tmp_fltr), ICE_NONDMA_MEM);
8134                 tmp_fltr.flag = fm_list->rule_info.sw_act.flag;
8135                 tmp_fltr.fltr_rule_id = fm_list->rule_info.fltr_rule_id;
8136                 fm_list->rule_info.sw_act.fltr_act = ICE_FWD_TO_VSI;
8137                 tmp_fltr.fltr_act = ICE_FWD_TO_VSI;
8138                 tmp_fltr.fwd_id.hw_vsi_id =
8139                         ice_get_hw_vsi_num(hw, rem_vsi_handle);
8140                 fm_list->rule_info.sw_act.fwd_id.hw_vsi_id =
8141                         ice_get_hw_vsi_num(hw, rem_vsi_handle);
8142                 fm_list->rule_info.sw_act.vsi_handle = rem_vsi_handle;
8143
8144                 /* Update the previous switch rule of "MAC forward to VSI" to
8145                  * "MAC fwd to VSI list"
8146                  */
8147                 status = ice_update_pkt_fwd_rule(hw, &tmp_fltr);
8148                 if (status) {
8149                         ice_debug(hw, ICE_DBG_SW, "Failed to update pkt fwd rule to FWD_TO_VSI on HW VSI %d, error %d\n",
8150                                   tmp_fltr.fwd_id.hw_vsi_id, status);
8151                         return status;
8152                 }
8153                 fm_list->vsi_list_info->ref_cnt--;
8154
8155                 /* Remove the VSI list since it is no longer used */
8156                 status = ice_remove_vsi_list_rule(hw, vsi_list_id, lkup_type);
8157                 if (status) {
8158                         ice_debug(hw, ICE_DBG_SW, "Failed to remove VSI list %d, error %d\n",
8159                                   vsi_list_id, status);
8160                         return status;
8161                 }
8162
8163                 LIST_DEL(&vsi_list_info->list_entry);
8164                 ice_free(hw, vsi_list_info);
8165                 fm_list->vsi_list_info = NULL;
8166         }
8167
8168         return status;
8169 }
8170
8171 /**
8172  * ice_rem_adv_rule - removes existing advanced switch rule
8173  * @hw: pointer to the hardware structure
8174  * @lkups: information on the words that needs to be looked up. All words
8175  *         together makes one recipe
8176  * @lkups_cnt: num of entries in the lkups array
8177  * @rinfo: Its the pointer to the rule information for the rule
8178  *
8179  * This function can be used to remove 1 rule at a time. The lkups is
8180  * used to describe all the words that forms the "lookup" portion of the
8181  * rule. These words can span multiple protocols. Callers to this function
8182  * need to pass in a list of protocol headers with lookup information along
8183  * and mask that determines which words are valid from the given protocol
8184  * header. rinfo describes other information related to this rule such as
8185  * forwarding IDs, priority of this rule, etc.
8186  */
8187 enum ice_status
8188 ice_rem_adv_rule(struct ice_hw *hw, struct ice_adv_lkup_elem *lkups,
8189                  u16 lkups_cnt, struct ice_adv_rule_info *rinfo)
8190 {
8191         struct ice_adv_fltr_mgmt_list_entry *list_elem;
8192         struct ice_prot_lkup_ext lkup_exts;
8193         struct ice_lock *rule_lock; /* Lock to protect filter rule list */
8194         enum ice_status status = ICE_SUCCESS;
8195         bool remove_rule = false;
8196         u16 i, rid, vsi_handle;
8197
8198         ice_memset(&lkup_exts, 0, sizeof(lkup_exts), ICE_NONDMA_MEM);
8199         for (i = 0; i < lkups_cnt; i++) {
8200                 u16 count;
8201
8202                 if (lkups[i].type >= ICE_PROTOCOL_LAST)
8203                         return ICE_ERR_CFG;
8204
8205                 count = ice_fill_valid_words(&lkups[i], &lkup_exts);
8206                 if (!count)
8207                         return ICE_ERR_CFG;
8208         }
8209
8210         /* Create any special protocol/offset pairs, such as looking at tunnel
8211          * bits by extracting metadata
8212          */
8213         status = ice_add_special_words(rinfo, &lkup_exts);
8214         if (status)
8215                 return status;
8216
8217         rid = ice_find_recp(hw, &lkup_exts, rinfo->tun_type);
8218         /* If did not find a recipe that match the existing criteria */
8219         if (rid == ICE_MAX_NUM_RECIPES)
8220                 return ICE_ERR_PARAM;
8221
8222         rule_lock = &hw->switch_info->recp_list[rid].filt_rule_lock;
8223         list_elem = ice_find_adv_rule_entry(hw, lkups, lkups_cnt, rid, rinfo);
8224         /* the rule is already removed */
8225         if (!list_elem)
8226                 return ICE_SUCCESS;
8227         ice_acquire_lock(rule_lock);
8228         if (list_elem->rule_info.sw_act.fltr_act != ICE_FWD_TO_VSI_LIST) {
8229                 remove_rule = true;
8230         } else if (list_elem->vsi_count > 1) {
8231                 remove_rule = false;
8232                 vsi_handle = rinfo->sw_act.vsi_handle;
8233                 status = ice_adv_rem_update_vsi_list(hw, vsi_handle, list_elem);
8234         } else {
8235                 vsi_handle = rinfo->sw_act.vsi_handle;
8236                 status = ice_adv_rem_update_vsi_list(hw, vsi_handle, list_elem);
8237                 if (status) {
8238                         ice_release_lock(rule_lock);
8239                         return status;
8240                 }
8241                 if (list_elem->vsi_count == 0)
8242                         remove_rule = true;
8243         }
8244         ice_release_lock(rule_lock);
8245         if (remove_rule) {
8246                 struct ice_aqc_sw_rules_elem *s_rule;
8247                 u16 rule_buf_sz;
8248
8249                 rule_buf_sz = ICE_SW_RULE_RX_TX_NO_HDR_SIZE;
8250                 s_rule = (struct ice_aqc_sw_rules_elem *)
8251                         ice_malloc(hw, rule_buf_sz);
8252                 if (!s_rule)
8253                         return ICE_ERR_NO_MEMORY;
8254                 s_rule->pdata.lkup_tx_rx.act = 0;
8255                 s_rule->pdata.lkup_tx_rx.index =
8256                         CPU_TO_LE16(list_elem->rule_info.fltr_rule_id);
8257                 s_rule->pdata.lkup_tx_rx.hdr_len = 0;
8258                 status = ice_aq_sw_rules(hw, (struct ice_aqc_sw_rules *)s_rule,
8259                                          rule_buf_sz, 1,
8260                                          ice_aqc_opc_remove_sw_rules, NULL);
8261                 if (status == ICE_SUCCESS || status == ICE_ERR_DOES_NOT_EXIST) {
8262                         struct ice_switch_info *sw = hw->switch_info;
8263
8264                         ice_acquire_lock(rule_lock);
8265                         LIST_DEL(&list_elem->list_entry);
8266                         ice_free(hw, list_elem->lkups);
8267                         ice_free(hw, list_elem);
8268                         ice_release_lock(rule_lock);
8269                         if (LIST_EMPTY(&sw->recp_list[rid].filt_rules))
8270                                 sw->recp_list[rid].adv_rule = false;
8271                 }
8272                 ice_free(hw, s_rule);
8273         }
8274         return status;
8275 }
8276
8277 /**
8278  * ice_rem_adv_rule_by_id - removes existing advanced switch rule by ID
8279  * @hw: pointer to the hardware structure
8280  * @remove_entry: data struct which holds rule_id, VSI handle and recipe ID
8281  *
8282  * This function is used to remove 1 rule at a time. The removal is based on
8283  * the remove_entry parameter. This function will remove rule for a given
8284  * vsi_handle with a given rule_id which is passed as parameter in remove_entry
8285  */
8286 enum ice_status
8287 ice_rem_adv_rule_by_id(struct ice_hw *hw,
8288                        struct ice_rule_query_data *remove_entry)
8289 {
8290         struct ice_adv_fltr_mgmt_list_entry *list_itr;
8291         struct LIST_HEAD_TYPE *list_head;
8292         struct ice_adv_rule_info rinfo;
8293         struct ice_switch_info *sw;
8294
8295         sw = hw->switch_info;
8296         if (!sw->recp_list[remove_entry->rid].recp_created)
8297                 return ICE_ERR_PARAM;
8298         list_head = &sw->recp_list[remove_entry->rid].filt_rules;
8299         LIST_FOR_EACH_ENTRY(list_itr, list_head, ice_adv_fltr_mgmt_list_entry,
8300                             list_entry) {
8301                 if (list_itr->rule_info.fltr_rule_id ==
8302                     remove_entry->rule_id) {
8303                         rinfo = list_itr->rule_info;
8304                         rinfo.sw_act.vsi_handle = remove_entry->vsi_handle;
8305                         return ice_rem_adv_rule(hw, list_itr->lkups,
8306                                                 list_itr->lkups_cnt, &rinfo);
8307                 }
8308         }
8309         /* either list is empty or unable to find rule */
8310         return ICE_ERR_DOES_NOT_EXIST;
8311 }
8312
8313 /**
8314  * ice_rem_adv_for_vsi - removes existing advanced switch rules for a
8315  *                       given VSI handle
8316  * @hw: pointer to the hardware structure
8317  * @vsi_handle: VSI handle for which we are supposed to remove all the rules.
8318  *
8319  * This function is used to remove all the rules for a given VSI and as soon
8320  * as removing a rule fails, it will return immediately with the error code,
8321  * else it will return ICE_SUCCESS
8322  */
8323 enum ice_status ice_rem_adv_rule_for_vsi(struct ice_hw *hw, u16 vsi_handle)
8324 {
8325         struct ice_adv_fltr_mgmt_list_entry *list_itr, *tmp_entry;
8326         struct ice_vsi_list_map_info *map_info;
8327         struct LIST_HEAD_TYPE *list_head;
8328         struct ice_adv_rule_info rinfo;
8329         struct ice_switch_info *sw;
8330         enum ice_status status;
8331         u8 rid;
8332
8333         sw = hw->switch_info;
8334         for (rid = 0; rid < ICE_MAX_NUM_RECIPES; rid++) {
8335                 if (!sw->recp_list[rid].recp_created)
8336                         continue;
8337                 if (!sw->recp_list[rid].adv_rule)
8338                         continue;
8339
8340                 list_head = &sw->recp_list[rid].filt_rules;
8341                 LIST_FOR_EACH_ENTRY_SAFE(list_itr, tmp_entry, list_head,
8342                                          ice_adv_fltr_mgmt_list_entry,
8343                                          list_entry) {
8344                         rinfo = list_itr->rule_info;
8345
8346                         if (rinfo.sw_act.fltr_act == ICE_FWD_TO_VSI_LIST) {
8347                                 map_info = list_itr->vsi_list_info;
8348                                 if (!map_info)
8349                                         continue;
8350
8351                                 if (!ice_is_bit_set(map_info->vsi_map,
8352                                                     vsi_handle))
8353                                         continue;
8354                         } else if (rinfo.sw_act.vsi_handle != vsi_handle) {
8355                                 continue;
8356                         }
8357
8358                         rinfo.sw_act.vsi_handle = vsi_handle;
8359                         status = ice_rem_adv_rule(hw, list_itr->lkups,
8360                                                   list_itr->lkups_cnt, &rinfo);
8361
8362                         if (status)
8363                                 return status;
8364                 }
8365         }
8366         return ICE_SUCCESS;
8367 }
8368
8369 /**
8370  * ice_replay_fltr - Replay all the filters stored by a specific list head
8371  * @hw: pointer to the hardware structure
8372  * @list_head: list for which filters needs to be replayed
8373  * @recp_id: Recipe ID for which rules need to be replayed
8374  */
8375 static enum ice_status
8376 ice_replay_fltr(struct ice_hw *hw, u8 recp_id, struct LIST_HEAD_TYPE *list_head)
8377 {
8378         struct ice_fltr_mgmt_list_entry *itr;
8379         enum ice_status status = ICE_SUCCESS;
8380         struct ice_sw_recipe *recp_list;
8381         u8 lport = hw->port_info->lport;
8382         struct LIST_HEAD_TYPE l_head;
8383
8384         if (LIST_EMPTY(list_head))
8385                 return status;
8386
8387         recp_list = &hw->switch_info->recp_list[recp_id];
8388         /* Move entries from the given list_head to a temporary l_head so that
8389          * they can be replayed. Otherwise when trying to re-add the same
8390          * filter, the function will return already exists
8391          */
8392         LIST_REPLACE_INIT(list_head, &l_head);
8393
8394         /* Mark the given list_head empty by reinitializing it so filters
8395          * could be added again by *handler
8396          */
8397         LIST_FOR_EACH_ENTRY(itr, &l_head, ice_fltr_mgmt_list_entry,
8398                             list_entry) {
8399                 struct ice_fltr_list_entry f_entry;
8400                 u16 vsi_handle;
8401
8402                 f_entry.fltr_info = itr->fltr_info;
8403                 if (itr->vsi_count < 2 && recp_id != ICE_SW_LKUP_VLAN) {
8404                         status = ice_add_rule_internal(hw, recp_list, lport,
8405                                                        &f_entry);
8406                         if (status != ICE_SUCCESS)
8407                                 goto end;
8408                         continue;
8409                 }
8410
8411                 /* Add a filter per VSI separately */
8412                 ice_for_each_set_bit(vsi_handle, itr->vsi_list_info->vsi_map,
8413                                      ICE_MAX_VSI) {
8414                         if (!ice_is_vsi_valid(hw, vsi_handle))
8415                                 break;
8416
8417                         ice_clear_bit(vsi_handle, itr->vsi_list_info->vsi_map);
8418                         f_entry.fltr_info.vsi_handle = vsi_handle;
8419                         f_entry.fltr_info.fwd_id.hw_vsi_id =
8420                                 ice_get_hw_vsi_num(hw, vsi_handle);
8421                         f_entry.fltr_info.fltr_act = ICE_FWD_TO_VSI;
8422                         if (recp_id == ICE_SW_LKUP_VLAN)
8423                                 status = ice_add_vlan_internal(hw, recp_list,
8424                                                                &f_entry);
8425                         else
8426                                 status = ice_add_rule_internal(hw, recp_list,
8427                                                                lport,
8428                                                                &f_entry);
8429                         if (status != ICE_SUCCESS)
8430                                 goto end;
8431                 }
8432         }
8433 end:
8434         /* Clear the filter management list */
8435         ice_rem_sw_rule_info(hw, &l_head);
8436         return status;
8437 }
8438
8439 /**
8440  * ice_replay_all_fltr - replay all filters stored in bookkeeping lists
8441  * @hw: pointer to the hardware structure
8442  *
8443  * NOTE: This function does not clean up partially added filters on error.
8444  * It is up to caller of the function to issue a reset or fail early.
8445  */
8446 enum ice_status ice_replay_all_fltr(struct ice_hw *hw)
8447 {
8448         struct ice_switch_info *sw = hw->switch_info;
8449         enum ice_status status = ICE_SUCCESS;
8450         u8 i;
8451
8452         for (i = 0; i < ICE_MAX_NUM_RECIPES; i++) {
8453                 struct LIST_HEAD_TYPE *head = &sw->recp_list[i].filt_rules;
8454
8455                 status = ice_replay_fltr(hw, i, head);
8456                 if (status != ICE_SUCCESS)
8457                         return status;
8458         }
8459         return status;
8460 }
8461
8462 /**
8463  * ice_replay_vsi_fltr - Replay filters for requested VSI
8464  * @hw: pointer to the hardware structure
8465  * @pi: pointer to port information structure
8466  * @sw: pointer to switch info struct for which function replays filters
8467  * @vsi_handle: driver VSI handle
8468  * @recp_id: Recipe ID for which rules need to be replayed
8469  * @list_head: list for which filters need to be replayed
8470  *
8471  * Replays the filter of recipe recp_id for a VSI represented via vsi_handle.
8472  * It is required to pass valid VSI handle.
8473  */
8474 static enum ice_status
8475 ice_replay_vsi_fltr(struct ice_hw *hw, struct ice_port_info *pi,
8476                     struct ice_switch_info *sw, u16 vsi_handle, u8 recp_id,
8477                     struct LIST_HEAD_TYPE *list_head)
8478 {
8479         struct ice_fltr_mgmt_list_entry *itr;
8480         enum ice_status status = ICE_SUCCESS;
8481         struct ice_sw_recipe *recp_list;
8482         u16 hw_vsi_id;
8483
8484         if (LIST_EMPTY(list_head))
8485                 return status;
8486         recp_list = &sw->recp_list[recp_id];
8487         hw_vsi_id = ice_get_hw_vsi_num(hw, vsi_handle);
8488
8489         LIST_FOR_EACH_ENTRY(itr, list_head, ice_fltr_mgmt_list_entry,
8490                             list_entry) {
8491                 struct ice_fltr_list_entry f_entry;
8492
8493                 f_entry.fltr_info = itr->fltr_info;
8494                 if (itr->vsi_count < 2 && recp_id != ICE_SW_LKUP_VLAN &&
8495                     itr->fltr_info.vsi_handle == vsi_handle) {
8496                         /* update the src in case it is VSI num */
8497                         if (f_entry.fltr_info.src_id == ICE_SRC_ID_VSI)
8498                                 f_entry.fltr_info.src = hw_vsi_id;
8499                         status = ice_add_rule_internal(hw, recp_list,
8500                                                        pi->lport,
8501                                                        &f_entry);
8502                         if (status != ICE_SUCCESS)
8503                                 goto end;
8504                         continue;
8505                 }
8506                 if (!itr->vsi_list_info ||
8507                     !ice_is_bit_set(itr->vsi_list_info->vsi_map, vsi_handle))
8508                         continue;
8509                 /* Clearing it so that the logic can add it back */
8510                 ice_clear_bit(vsi_handle, itr->vsi_list_info->vsi_map);
8511                 f_entry.fltr_info.vsi_handle = vsi_handle;
8512                 f_entry.fltr_info.fltr_act = ICE_FWD_TO_VSI;
8513                 /* update the src in case it is VSI num */
8514                 if (f_entry.fltr_info.src_id == ICE_SRC_ID_VSI)
8515                         f_entry.fltr_info.src = hw_vsi_id;
8516                 if (recp_id == ICE_SW_LKUP_VLAN)
8517                         status = ice_add_vlan_internal(hw, recp_list, &f_entry);
8518                 else
8519                         status = ice_add_rule_internal(hw, recp_list,
8520                                                        pi->lport,
8521                                                        &f_entry);
8522                 if (status != ICE_SUCCESS)
8523                         goto end;
8524         }
8525 end:
8526         return status;
8527 }
8528
8529 /**
8530  * ice_replay_vsi_adv_rule - Replay advanced rule for requested VSI
8531  * @hw: pointer to the hardware structure
8532  * @vsi_handle: driver VSI handle
8533  * @list_head: list for which filters need to be replayed
8534  *
8535  * Replay the advanced rule for the given VSI.
8536  */
8537 static enum ice_status
8538 ice_replay_vsi_adv_rule(struct ice_hw *hw, u16 vsi_handle,
8539                         struct LIST_HEAD_TYPE *list_head)
8540 {
8541         struct ice_rule_query_data added_entry = { 0 };
8542         struct ice_adv_fltr_mgmt_list_entry *adv_fltr;
8543         enum ice_status status = ICE_SUCCESS;
8544
8545         if (LIST_EMPTY(list_head))
8546                 return status;
8547         LIST_FOR_EACH_ENTRY(adv_fltr, list_head, ice_adv_fltr_mgmt_list_entry,
8548                             list_entry) {
8549                 struct ice_adv_rule_info *rinfo = &adv_fltr->rule_info;
8550                 u16 lk_cnt = adv_fltr->lkups_cnt;
8551
8552                 if (vsi_handle != rinfo->sw_act.vsi_handle)
8553                         continue;
8554                 status = ice_add_adv_rule(hw, adv_fltr->lkups, lk_cnt, rinfo,
8555                                           &added_entry);
8556                 if (status)
8557                         break;
8558         }
8559         return status;
8560 }
8561
8562 /**
8563  * ice_replay_vsi_all_fltr - replay all filters stored in bookkeeping lists
8564  * @hw: pointer to the hardware structure
8565  * @pi: pointer to port information structure
8566  * @vsi_handle: driver VSI handle
8567  *
8568  * Replays filters for requested VSI via vsi_handle.
8569  */
8570 enum ice_status
8571 ice_replay_vsi_all_fltr(struct ice_hw *hw, struct ice_port_info *pi,
8572                         u16 vsi_handle)
8573 {
8574         struct ice_switch_info *sw = hw->switch_info;
8575         enum ice_status status;
8576         u8 i;
8577
8578         /* Update the recipes that were created */
8579         for (i = 0; i < ICE_MAX_NUM_RECIPES; i++) {
8580                 struct LIST_HEAD_TYPE *head;
8581
8582                 head = &sw->recp_list[i].filt_replay_rules;
8583                 if (!sw->recp_list[i].adv_rule)
8584                         status = ice_replay_vsi_fltr(hw, pi, sw, vsi_handle, i,
8585                                                      head);
8586                 else
8587                         status = ice_replay_vsi_adv_rule(hw, vsi_handle, head);
8588                 if (status != ICE_SUCCESS)
8589                         return status;
8590         }
8591
8592         return ICE_SUCCESS;
8593 }
8594
8595 /**
8596  * ice_rm_all_sw_replay_rule - helper function to delete filter replay rules
8597  * @hw: pointer to the HW struct
8598  * @sw: pointer to switch info struct for which function removes filters
8599  *
8600  * Deletes the filter replay rules for given switch
8601  */
8602 void ice_rm_sw_replay_rule_info(struct ice_hw *hw, struct ice_switch_info *sw)
8603 {
8604         u8 i;
8605
8606         if (!sw)
8607                 return;
8608
8609         for (i = 0; i < ICE_MAX_NUM_RECIPES; i++) {
8610                 if (!LIST_EMPTY(&sw->recp_list[i].filt_replay_rules)) {
8611                         struct LIST_HEAD_TYPE *l_head;
8612
8613                         l_head = &sw->recp_list[i].filt_replay_rules;
8614                         if (!sw->recp_list[i].adv_rule)
8615                                 ice_rem_sw_rule_info(hw, l_head);
8616                         else
8617                                 ice_rem_adv_rule_info(hw, l_head);
8618                 }
8619         }
8620 }
8621
8622 /**
8623  * ice_rm_all_sw_replay_rule_info - deletes filter replay rules
8624  * @hw: pointer to the HW struct
8625  *
8626  * Deletes the filter replay rules.
8627  */
8628 void ice_rm_all_sw_replay_rule_info(struct ice_hw *hw)
8629 {
8630         ice_rm_sw_replay_rule_info(hw, hw->switch_info);
8631 }