net/ice/base: support flow director for GTPoGRE
[dpdk.git] / drivers / net / ice / base / ice_flow.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2001-2021 Intel Corporation
3  */
4
5 #include "ice_common.h"
6 #include "ice_flow.h"
7
8 /* Size of known protocol header fields */
9 #define ICE_FLOW_FLD_SZ_ETH_TYPE        2
10 #define ICE_FLOW_FLD_SZ_VLAN            2
11 #define ICE_FLOW_FLD_SZ_IPV4_ADDR       4
12 #define ICE_FLOW_FLD_SZ_IPV6_ADDR       16
13 #define ICE_FLOW_FLD_SZ_IPV6_PRE32_ADDR 4
14 #define ICE_FLOW_FLD_SZ_IPV6_PRE48_ADDR 6
15 #define ICE_FLOW_FLD_SZ_IPV6_PRE64_ADDR 8
16 #define ICE_FLOW_FLD_SZ_IPV4_ID         2
17 #define ICE_FLOW_FLD_SZ_IPV6_ID         4
18 #define ICE_FLOW_FLD_SZ_IP_DSCP         1
19 #define ICE_FLOW_FLD_SZ_IP_TTL          1
20 #define ICE_FLOW_FLD_SZ_IP_PROT         1
21 #define ICE_FLOW_FLD_SZ_PORT            2
22 #define ICE_FLOW_FLD_SZ_TCP_FLAGS       1
23 #define ICE_FLOW_FLD_SZ_ICMP_TYPE       1
24 #define ICE_FLOW_FLD_SZ_ICMP_CODE       1
25 #define ICE_FLOW_FLD_SZ_ARP_OPER        2
26 #define ICE_FLOW_FLD_SZ_GRE_KEYID       4
27 #define ICE_FLOW_FLD_SZ_GTP_TEID        4
28 #define ICE_FLOW_FLD_SZ_GTP_QFI         2
29 #define ICE_FLOW_FLD_SZ_PPPOE_SESS_ID   2
30 #define ICE_FLOW_FLD_SZ_PFCP_SEID 8
31 #define ICE_FLOW_FLD_SZ_L2TPV3_SESS_ID  4
32 #define ICE_FLOW_FLD_SZ_ESP_SPI 4
33 #define ICE_FLOW_FLD_SZ_AH_SPI  4
34 #define ICE_FLOW_FLD_SZ_NAT_T_ESP_SPI   4
35 #define ICE_FLOW_FLD_SZ_VXLAN_VNI       4
36 #define ICE_FLOW_FLD_SZ_ECPRI_TP0_PC_ID 2
37
38 /* Describe properties of a protocol header field */
39 struct ice_flow_field_info {
40         enum ice_flow_seg_hdr hdr;
41         s16 off;        /* Offset from start of a protocol header, in bits */
42         u16 size;       /* Size of fields in bits */
43         u16 mask;       /* 16-bit mask for field */
44 };
45
46 #define ICE_FLOW_FLD_INFO(_hdr, _offset_bytes, _size_bytes) { \
47         .hdr = _hdr, \
48         .off = (_offset_bytes) * BITS_PER_BYTE, \
49         .size = (_size_bytes) * BITS_PER_BYTE, \
50         .mask = 0, \
51 }
52
53 #define ICE_FLOW_FLD_INFO_MSK(_hdr, _offset_bytes, _size_bytes, _mask) { \
54         .hdr = _hdr, \
55         .off = (_offset_bytes) * BITS_PER_BYTE, \
56         .size = (_size_bytes) * BITS_PER_BYTE, \
57         .mask = _mask, \
58 }
59
60 /* Table containing properties of supported protocol header fields */
61 static const
62 struct ice_flow_field_info ice_flds_info[ICE_FLOW_FIELD_IDX_MAX] = {
63         /* Ether */
64         /* ICE_FLOW_FIELD_IDX_ETH_DA */
65         ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_ETH, 0, ETH_ALEN),
66         /* ICE_FLOW_FIELD_IDX_ETH_SA */
67         ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_ETH, ETH_ALEN, ETH_ALEN),
68         /* ICE_FLOW_FIELD_IDX_S_VLAN */
69         ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_VLAN, 12, ICE_FLOW_FLD_SZ_VLAN),
70         /* ICE_FLOW_FIELD_IDX_C_VLAN */
71         ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_VLAN, 14, ICE_FLOW_FLD_SZ_VLAN),
72         /* ICE_FLOW_FIELD_IDX_ETH_TYPE */
73         ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_ETH, 0, ICE_FLOW_FLD_SZ_ETH_TYPE),
74         /* IPv4 / IPv6 */
75         /* ICE_FLOW_FIELD_IDX_IPV4_DSCP */
76         ICE_FLOW_FLD_INFO_MSK(ICE_FLOW_SEG_HDR_IPV4, 0, ICE_FLOW_FLD_SZ_IP_DSCP,
77                               0x00fc),
78         /* ICE_FLOW_FIELD_IDX_IPV6_DSCP */
79         ICE_FLOW_FLD_INFO_MSK(ICE_FLOW_SEG_HDR_IPV6, 0, ICE_FLOW_FLD_SZ_IP_DSCP,
80                               0x0ff0),
81         /* ICE_FLOW_FIELD_IDX_IPV4_TTL */
82         ICE_FLOW_FLD_INFO_MSK(ICE_FLOW_SEG_HDR_NONE, 8,
83                               ICE_FLOW_FLD_SZ_IP_TTL, 0xff00),
84         /* ICE_FLOW_FIELD_IDX_IPV4_PROT */
85         ICE_FLOW_FLD_INFO_MSK(ICE_FLOW_SEG_HDR_NONE, 8,
86                               ICE_FLOW_FLD_SZ_IP_PROT, 0x00ff),
87         /* ICE_FLOW_FIELD_IDX_IPV6_TTL */
88         ICE_FLOW_FLD_INFO_MSK(ICE_FLOW_SEG_HDR_NONE, 6,
89                               ICE_FLOW_FLD_SZ_IP_TTL, 0x00ff),
90         /* ICE_FLOW_FIELD_IDX_IPV6_PROT */
91         ICE_FLOW_FLD_INFO_MSK(ICE_FLOW_SEG_HDR_NONE, 6,
92                               ICE_FLOW_FLD_SZ_IP_PROT, 0xff00),
93         /* ICE_FLOW_FIELD_IDX_IPV4_SA */
94         ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_IPV4, 12, ICE_FLOW_FLD_SZ_IPV4_ADDR),
95         /* ICE_FLOW_FIELD_IDX_IPV4_DA */
96         ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_IPV4, 16, ICE_FLOW_FLD_SZ_IPV4_ADDR),
97         /* ICE_FLOW_FIELD_IDX_IPV6_SA */
98         ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_IPV6, 8, ICE_FLOW_FLD_SZ_IPV6_ADDR),
99         /* ICE_FLOW_FIELD_IDX_IPV6_DA */
100         ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_IPV6, 24, ICE_FLOW_FLD_SZ_IPV6_ADDR),
101         /* ICE_FLOW_FIELD_IDX_IPV4_FRAG */
102         ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_IPV_FRAG, 4,
103                           ICE_FLOW_FLD_SZ_IPV4_ID),
104         /* ICE_FLOW_FIELD_IDX_IPV6_FRAG */
105         ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_IPV_FRAG, 4,
106                           ICE_FLOW_FLD_SZ_IPV6_ID),
107         /* ICE_FLOW_FIELD_IDX_IPV6_PRE32_SA */
108         ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_IPV6, 8,
109                           ICE_FLOW_FLD_SZ_IPV6_PRE32_ADDR),
110         /* ICE_FLOW_FIELD_IDX_IPV6_PRE32_DA */
111         ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_IPV6, 24,
112                           ICE_FLOW_FLD_SZ_IPV6_PRE32_ADDR),
113         /* ICE_FLOW_FIELD_IDX_IPV6_PRE48_SA */
114         ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_IPV6, 8,
115                           ICE_FLOW_FLD_SZ_IPV6_PRE48_ADDR),
116         /* ICE_FLOW_FIELD_IDX_IPV6_PRE48_DA */
117         ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_IPV6, 24,
118                           ICE_FLOW_FLD_SZ_IPV6_PRE48_ADDR),
119         /* ICE_FLOW_FIELD_IDX_IPV6_PRE64_SA */
120         ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_IPV6, 8,
121                           ICE_FLOW_FLD_SZ_IPV6_PRE64_ADDR),
122         /* ICE_FLOW_FIELD_IDX_IPV6_PRE64_DA */
123         ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_IPV6, 24,
124                           ICE_FLOW_FLD_SZ_IPV6_PRE64_ADDR),
125         /* Transport */
126         /* ICE_FLOW_FIELD_IDX_TCP_SRC_PORT */
127         ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_TCP, 0, ICE_FLOW_FLD_SZ_PORT),
128         /* ICE_FLOW_FIELD_IDX_TCP_DST_PORT */
129         ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_TCP, 2, ICE_FLOW_FLD_SZ_PORT),
130         /* ICE_FLOW_FIELD_IDX_UDP_SRC_PORT */
131         ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_UDP, 0, ICE_FLOW_FLD_SZ_PORT),
132         /* ICE_FLOW_FIELD_IDX_UDP_DST_PORT */
133         ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_UDP, 2, ICE_FLOW_FLD_SZ_PORT),
134         /* ICE_FLOW_FIELD_IDX_SCTP_SRC_PORT */
135         ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_SCTP, 0, ICE_FLOW_FLD_SZ_PORT),
136         /* ICE_FLOW_FIELD_IDX_SCTP_DST_PORT */
137         ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_SCTP, 2, ICE_FLOW_FLD_SZ_PORT),
138         /* ICE_FLOW_FIELD_IDX_TCP_FLAGS */
139         ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_TCP, 13, ICE_FLOW_FLD_SZ_TCP_FLAGS),
140         /* ARP */
141         /* ICE_FLOW_FIELD_IDX_ARP_SIP */
142         ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_ARP, 14, ICE_FLOW_FLD_SZ_IPV4_ADDR),
143         /* ICE_FLOW_FIELD_IDX_ARP_DIP */
144         ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_ARP, 24, ICE_FLOW_FLD_SZ_IPV4_ADDR),
145         /* ICE_FLOW_FIELD_IDX_ARP_SHA */
146         ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_ARP, 8, ETH_ALEN),
147         /* ICE_FLOW_FIELD_IDX_ARP_DHA */
148         ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_ARP, 18, ETH_ALEN),
149         /* ICE_FLOW_FIELD_IDX_ARP_OP */
150         ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_ARP, 6, ICE_FLOW_FLD_SZ_ARP_OPER),
151         /* ICMP */
152         /* ICE_FLOW_FIELD_IDX_ICMP_TYPE */
153         ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_ICMP, 0, ICE_FLOW_FLD_SZ_ICMP_TYPE),
154         /* ICE_FLOW_FIELD_IDX_ICMP_CODE */
155         ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_ICMP, 1, ICE_FLOW_FLD_SZ_ICMP_CODE),
156         /* GRE */
157         /* ICE_FLOW_FIELD_IDX_GRE_KEYID */
158         ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_GRE, 12, ICE_FLOW_FLD_SZ_GRE_KEYID),
159         /* GTP */
160         /* ICE_FLOW_FIELD_IDX_GTPC_TEID */
161         ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_GTPC_TEID, 12,
162                           ICE_FLOW_FLD_SZ_GTP_TEID),
163         /* ICE_FLOW_FIELD_IDX_GTPU_IP_TEID */
164         ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_GTPU_IP, 12,
165                           ICE_FLOW_FLD_SZ_GTP_TEID),
166         /* ICE_FLOW_FIELD_IDX_GTPU_EH_TEID */
167         ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_GTPU_EH, 12,
168                           ICE_FLOW_FLD_SZ_GTP_TEID),
169         /* ICE_FLOW_FIELD_IDX_GTPU_EH_QFI */
170         ICE_FLOW_FLD_INFO_MSK(ICE_FLOW_SEG_HDR_GTPU_EH, 22,
171                               ICE_FLOW_FLD_SZ_GTP_QFI, 0x3f00),
172         /* ICE_FLOW_FIELD_IDX_GTPU_UP_TEID */
173         ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_GTPU_UP, 12,
174                           ICE_FLOW_FLD_SZ_GTP_TEID),
175         /* ICE_FLOW_FIELD_IDX_GTPU_DWN_TEID */
176         ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_GTPU_DWN, 12,
177                           ICE_FLOW_FLD_SZ_GTP_TEID),
178         /* PPPOE */
179         /* ICE_FLOW_FIELD_IDX_PPPOE_SESS_ID */
180         ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_PPPOE, 2,
181                           ICE_FLOW_FLD_SZ_PPPOE_SESS_ID),
182         /* PFCP */
183         /* ICE_FLOW_FIELD_IDX_PFCP_SEID */
184         ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_PFCP_SESSION, 12,
185                           ICE_FLOW_FLD_SZ_PFCP_SEID),
186         /* L2TPV3 */
187         /* ICE_FLOW_FIELD_IDX_L2TPV3_SESS_ID */
188         ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_L2TPV3, 0,
189                           ICE_FLOW_FLD_SZ_L2TPV3_SESS_ID),
190         /* ESP */
191         /* ICE_FLOW_FIELD_IDX_ESP_SPI */
192         ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_ESP, 0,
193                           ICE_FLOW_FLD_SZ_ESP_SPI),
194         /* AH */
195         /* ICE_FLOW_FIELD_IDX_AH_SPI */
196         ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_AH, 4,
197                           ICE_FLOW_FLD_SZ_AH_SPI),
198         /* NAT_T_ESP */
199         /* ICE_FLOW_FIELD_IDX_NAT_T_ESP_SPI */
200         ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_NAT_T_ESP, 8,
201                           ICE_FLOW_FLD_SZ_NAT_T_ESP_SPI),
202         /* ICE_FLOW_FIELD_IDX_VXLAN_VNI */
203         ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_VXLAN, 12,
204                           ICE_FLOW_FLD_SZ_VXLAN_VNI),
205         /* ECPRI_TP0 */
206         /* ICE_FLOW_FIELD_IDX_ECPRI_TP0_PC_ID */
207         ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_ECPRI_TP0, 4,
208                           ICE_FLOW_FLD_SZ_ECPRI_TP0_PC_ID),
209         /* UDP_ECPRI_TP0 */
210         /* ICE_FLOW_FIELD_IDX_UDP_ECPRI_TP0_PC_ID */
211         ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_UDP_ECPRI_TP0, 12,
212                           ICE_FLOW_FLD_SZ_ECPRI_TP0_PC_ID),
213 };
214
215 /* Bitmaps indicating relevant packet types for a particular protocol header
216  *
217  * Packet types for packets with an Outer/First/Single MAC header
218  */
219 static const u32 ice_ptypes_mac_ofos[] = {
220         0xFDC00846, 0xBFBF7F7E, 0xF70001DF, 0xFEFDFDFB,
221         0x0000077E, 0x000003FF, 0x00000000, 0x00000000,
222         0x00400000, 0x03FFF000, 0xFFFFFFE0, 0x00100707,
223         0xFFFFFF00, 0x0000003F, 0x00000000, 0x00000000,
224         0x00000000, 0x00000000, 0x00000000, 0x00000000,
225         0x00000000, 0x00000000, 0x00000000, 0x00000000,
226         0x00000000, 0x00000000, 0x00000000, 0x00000000,
227         0x00000000, 0x00000000, 0x00000000, 0x00000000,
228 };
229
230 /* Packet types for packets with an Innermost/Last MAC VLAN header */
231 static const u32 ice_ptypes_macvlan_il[] = {
232         0x00000000, 0xBC000000, 0x000001DF, 0xF0000000,
233         0x0000077E, 0x00000000, 0x00000000, 0x00000000,
234         0x00000000, 0x00000000, 0x00000000, 0x00000000,
235         0x00000000, 0x00000000, 0x00000000, 0x00000000,
236         0x00000000, 0x00000000, 0x00000000, 0x00000000,
237         0x00000000, 0x00000000, 0x00000000, 0x00000000,
238         0x00000000, 0x00000000, 0x00000000, 0x00000000,
239         0x00000000, 0x00000000, 0x00000000, 0x00000000,
240 };
241
242 /* Packet types for packets with an Outer/First/Single non-frag IPv4 header,
243  * does NOT include IPV4 other PTYPEs
244  */
245 static const u32 ice_ptypes_ipv4_ofos[] = {
246         0x1D800000, 0x24000800, 0x00000000, 0x00000000,
247         0x00000000, 0x00000155, 0x00000000, 0x00000000,
248         0x00000000, 0x000FC000, 0x000002A0, 0x00100000,
249         0x00001500, 0x00000000, 0x00000000, 0x00000000,
250         0x00000000, 0x00000000, 0x00000000, 0x00000000,
251         0x00000000, 0x00000000, 0x00000000, 0x00000000,
252         0x00000000, 0x00000000, 0x00000000, 0x00000000,
253         0x00000000, 0x00000000, 0x00000000, 0x00000000,
254 };
255
256 /* Packet types for packets with an Outer/First/Single non-frag IPv4 header,
257  * includes IPV4 other PTYPEs
258  */
259 static const u32 ice_ptypes_ipv4_ofos_all[] = {
260         0x1D800000, 0x27BF7800, 0x00000000, 0x00000000,
261         0x00000000, 0x00000155, 0x00000000, 0x00000000,
262         0x00000000, 0x000FC000, 0x83E0FAA0, 0x00000101,
263         0x03FFD000, 0x00000000, 0x02FBEFBC, 0x00000000,
264         0x00000000, 0x00000000, 0x00000000, 0x00000000,
265         0x00000000, 0x00000000, 0x00000000, 0x00000000,
266         0x00000000, 0x00000000, 0x00000000, 0x00000000,
267         0x00000000, 0x00000000, 0x00000000, 0x00000000,
268 };
269
270 /* Packet types for packets with an Innermost/Last IPv4 header */
271 static const u32 ice_ptypes_ipv4_il[] = {
272         0xE0000000, 0xB807700E, 0x80000003, 0xE01DC03B,
273         0x0000000E, 0x00000000, 0x00000000, 0x00000000,
274         0x00000000, 0x00000000, 0x001FF800, 0x00100000,
275         0xFC0FC000, 0x00000000, 0xBC0BC0BC, 0x00000BC0,
276         0x00000000, 0x00000000, 0x00000000, 0x00000000,
277         0x00000000, 0x00000000, 0x00000000, 0x00000000,
278         0x00000000, 0x00000000, 0x00000000, 0x00000000,
279         0x00000000, 0x00000000, 0x00000000, 0x00000000,
280 };
281
282 /* Packet types for packets with an Outer/First/Single non-frag IPv6 header,
283  * does NOT include IVP6 other PTYPEs
284  */
285 static const u32 ice_ptypes_ipv6_ofos[] = {
286         0x00000000, 0x00000000, 0x76000000, 0x10002000,
287         0x00000000, 0x000002AA, 0x00000000, 0x00000000,
288         0x00000000, 0x03F00000, 0x00000540, 0x00000000,
289         0x00002A00, 0x00000000, 0x00000000, 0x00000000,
290         0x00000000, 0x00000000, 0x00000000, 0x00000000,
291         0x00000000, 0x00000000, 0x00000000, 0x00000000,
292         0x00000000, 0x00000000, 0x00000000, 0x00000000,
293         0x00000000, 0x00000000, 0x00000000, 0x00000000,
294 };
295
296 /* Packet types for packets with an Outer/First/Single non-frag IPv6 header,
297  * includes IPV6 other PTYPEs
298  */
299 static const u32 ice_ptypes_ipv6_ofos_all[] = {
300         0x00000000, 0x00000000, 0x76000000, 0x1EFDE000,
301         0x00000000, 0x000002AA, 0x00000000, 0x00000000,
302         0x00000000, 0x03F00000, 0x7C1F0540, 0x00000206,
303         0xFC002000, 0x0000003F, 0xBC000000, 0x0002FBEF,
304         0x00000000, 0x00000000, 0x00000000, 0x00000000,
305         0x00000000, 0x00000000, 0x00000000, 0x00000000,
306         0x00000000, 0x00000000, 0x00000000, 0x00000000,
307         0x00000000, 0x00000000, 0x00000000, 0x00000000,
308 };
309
310 /* Packet types for packets with an Innermost/Last IPv6 header */
311 static const u32 ice_ptypes_ipv6_il[] = {
312         0x00000000, 0x03B80770, 0x000001DC, 0x0EE00000,
313         0x00000770, 0x00000000, 0x00000000, 0x00000000,
314         0x00000000, 0x00000000, 0x7FE00000, 0x00000000,
315         0x03F00000, 0x0000003F, 0x02F02F00, 0x0002F02F,
316         0x00000000, 0x00000000, 0x00000000, 0x00000000,
317         0x00000000, 0x00000000, 0x00000000, 0x00000000,
318         0x00000000, 0x00000000, 0x00000000, 0x00000000,
319         0x00000000, 0x00000000, 0x00000000, 0x00000000,
320 };
321
322 /* Packet types for packets with an Outer/First/Single
323  * non-frag IPv4 header - no L4
324  */
325 static const u32 ice_ptypes_ipv4_ofos_no_l4[] = {
326         0x10800000, 0x04000800, 0x00000000, 0x00000000,
327         0x00000000, 0x00000000, 0x00000000, 0x00000000,
328         0x00000000, 0x000cc000, 0x000002A0, 0x00000000,
329         0x00001500, 0x00000000, 0x00000000, 0x00000000,
330         0x00000000, 0x00000000, 0x00000000, 0x00000000,
331         0x00000000, 0x00000000, 0x00000000, 0x00000000,
332         0x00000000, 0x00000000, 0x00000000, 0x00000000,
333         0x00000000, 0x00000000, 0x00000000, 0x00000000,
334 };
335
336 /* Packet types for packets with an Innermost/Last IPv4 header - no L4 */
337 static const u32 ice_ptypes_ipv4_il_no_l4[] = {
338         0x60000000, 0x18043008, 0x80000002, 0x6010c021,
339         0x00000008, 0x00000000, 0x00000000, 0x00000000,
340         0x00000000, 0x00000000, 0x00139800, 0x00000000,
341         0x8C08C000, 0x00000000, 0x00000000, 0x00000000,
342         0x00000000, 0x00000000, 0x00000000, 0x00000000,
343         0x00000000, 0x00000000, 0x00000000, 0x00000000,
344         0x00000000, 0x00000000, 0x00000000, 0x00000000,
345         0x00000000, 0x00000000, 0x00000000, 0x00000000,
346 };
347
348 /* Packet types for packets with an Outer/First/Single
349  * non-frag IPv6 header - no L4
350  */
351 static const u32 ice_ptypes_ipv6_ofos_no_l4[] = {
352         0x00000000, 0x00000000, 0x42000000, 0x10002000,
353         0x00000000, 0x00000000, 0x00000000, 0x00000000,
354         0x00000000, 0x02300000, 0x00000540, 0x00000000,
355         0x00002A00, 0x00000000, 0x00000000, 0x00000000,
356         0x00000000, 0x00000000, 0x00000000, 0x00000000,
357         0x00000000, 0x00000000, 0x00000000, 0x00000000,
358         0x00000000, 0x00000000, 0x00000000, 0x00000000,
359         0x00000000, 0x00000000, 0x00000000, 0x00000000,
360 };
361
362 /* Packet types for packets with an Innermost/Last IPv6 header - no L4 */
363 static const u32 ice_ptypes_ipv6_il_no_l4[] = {
364         0x00000000, 0x02180430, 0x0000010c, 0x086010c0,
365         0x00000430, 0x00000000, 0x00000000, 0x00000000,
366         0x00000000, 0x00000000, 0x4e600000, 0x00000000,
367         0x02300000, 0x00000023, 0x00000000, 0x00000000,
368         0x00000000, 0x00000000, 0x00000000, 0x00000000,
369         0x00000000, 0x00000000, 0x00000000, 0x00000000,
370         0x00000000, 0x00000000, 0x00000000, 0x00000000,
371         0x00000000, 0x00000000, 0x00000000, 0x00000000,
372 };
373
374 /* Packet types for packets with an Outermost/First ARP header */
375 static const u32 ice_ptypes_arp_of[] = {
376         0x00000800, 0x00000000, 0x00000000, 0x00000000,
377         0x00000000, 0x00000000, 0x00000000, 0x00000000,
378         0x00000000, 0x00000000, 0x00000000, 0x00000000,
379         0x00000000, 0x00000000, 0x00000000, 0x00000000,
380         0x00000000, 0x00000000, 0x00000000, 0x00000000,
381         0x00000000, 0x00000000, 0x00000000, 0x00000000,
382         0x00000000, 0x00000000, 0x00000000, 0x00000000,
383         0x00000000, 0x00000000, 0x00000000, 0x00000000,
384 };
385
386 /* UDP Packet types for non-tunneled packets or tunneled
387  * packets with inner UDP.
388  */
389 static const u32 ice_ptypes_udp_il[] = {
390         0x81000000, 0x20204040, 0x04000010, 0x80810102,
391         0x00000040, 0x00000000, 0x00000000, 0x00000000,
392         0x00000000, 0x00410000, 0x908427E0, 0x00100007,
393         0x10410000, 0x00000004, 0x10410410, 0x00004104,
394         0x00000000, 0x00000000, 0x00000000, 0x00000000,
395         0x00000000, 0x00000000, 0x00000000, 0x00000000,
396         0x00000000, 0x00000000, 0x00000000, 0x00000000,
397         0x00000000, 0x00000000, 0x00000000, 0x00000000,
398 };
399
400 /* Packet types for packets with an Innermost/Last TCP header */
401 static const u32 ice_ptypes_tcp_il[] = {
402         0x04000000, 0x80810102, 0x10000040, 0x02040408,
403         0x00000102, 0x00000000, 0x00000000, 0x00000000,
404         0x00000000, 0x00820000, 0x21084000, 0x00000000,
405         0x20820000, 0x00000008, 0x20820820, 0x00008208,
406         0x00000000, 0x00000000, 0x00000000, 0x00000000,
407         0x00000000, 0x00000000, 0x00000000, 0x00000000,
408         0x00000000, 0x00000000, 0x00000000, 0x00000000,
409         0x00000000, 0x00000000, 0x00000000, 0x00000000,
410 };
411
412 /* Packet types for packets with an Innermost/Last SCTP header */
413 static const u32 ice_ptypes_sctp_il[] = {
414         0x08000000, 0x01020204, 0x20000081, 0x04080810,
415         0x00000204, 0x00000000, 0x00000000, 0x00000000,
416         0x00000000, 0x01040000, 0x00000000, 0x00000000,
417         0x41040000, 0x00000010, 0x00000000, 0x00000000,
418         0x00000000, 0x00000000, 0x00000000, 0x00000000,
419         0x00000000, 0x00000000, 0x00000000, 0x00000000,
420         0x00000000, 0x00000000, 0x00000000, 0x00000000,
421         0x00000000, 0x00000000, 0x00000000, 0x00000000,
422 };
423
424 /* Packet types for packets with an Outermost/First ICMP header */
425 static const u32 ice_ptypes_icmp_of[] = {
426         0x10000000, 0x00000000, 0x00000000, 0x00000000,
427         0x00000000, 0x00000000, 0x00000000, 0x00000000,
428         0x00000000, 0x00000000, 0x00000000, 0x00000000,
429         0x00000000, 0x00000000, 0x00000000, 0x00000000,
430         0x00000000, 0x00000000, 0x00000000, 0x00000000,
431         0x00000000, 0x00000000, 0x00000000, 0x00000000,
432         0x00000000, 0x00000000, 0x00000000, 0x00000000,
433         0x00000000, 0x00000000, 0x00000000, 0x00000000,
434 };
435
436 /* Packet types for packets with an Innermost/Last ICMP header */
437 static const u32 ice_ptypes_icmp_il[] = {
438         0x00000000, 0x02040408, 0x40000102, 0x08101020,
439         0x00000408, 0x00000000, 0x00000000, 0x00000000,
440         0x00000000, 0x00000000, 0x42108000, 0x00000000,
441         0x82080000, 0x00000020, 0x00000000, 0x00000000,
442         0x00000000, 0x00000000, 0x00000000, 0x00000000,
443         0x00000000, 0x00000000, 0x00000000, 0x00000000,
444         0x00000000, 0x00000000, 0x00000000, 0x00000000,
445         0x00000000, 0x00000000, 0x00000000, 0x00000000,
446 };
447
448 /* Packet types for packets with an Outermost/First GRE header */
449 static const u32 ice_ptypes_gre_of[] = {
450         0x00000000, 0xBFBF7800, 0x000001DF, 0xFEFDE000,
451         0x0000017E, 0x00000000, 0x00000000, 0x00000000,
452         0x00000000, 0x00000000, 0x00000000, 0x00000000,
453         0x00000000, 0x00000000, 0xBEFBEFBC, 0x0002FBEF,
454         0x00000000, 0x00000000, 0x00000000, 0x00000000,
455         0x00000000, 0x00000000, 0x00000000, 0x00000000,
456         0x00000000, 0x00000000, 0x00000000, 0x00000000,
457         0x00000000, 0x00000000, 0x00000000, 0x00000000,
458 };
459
460 /* Packet types for packets with an Innermost/Last MAC header */
461 static const u32 ice_ptypes_mac_il[] = {
462         0x00000000, 0x20000000, 0x00000000, 0x00000000,
463         0x00000000, 0x00000000, 0x00000000, 0x00000000,
464         0x00000000, 0x00000000, 0x00000000, 0x00000000,
465         0x00000000, 0x00000000, 0x00000000, 0x00000000,
466         0x00000000, 0x00000000, 0x00000000, 0x00000000,
467         0x00000000, 0x00000000, 0x00000000, 0x00000000,
468         0x00000000, 0x00000000, 0x00000000, 0x00000000,
469         0x00000000, 0x00000000, 0x00000000, 0x00000000,
470 };
471
472 /* Packet types for GTPC */
473 static const u32 ice_ptypes_gtpc[] = {
474         0x00000000, 0x00000000, 0x00000000, 0x00000000,
475         0x00000000, 0x00000000, 0x00000000, 0x00000000,
476         0x00000000, 0x00000000, 0x000001E0, 0x00000000,
477         0x00000000, 0x00000000, 0x00000000, 0x00000000,
478         0x00000000, 0x00000000, 0x00000000, 0x00000000,
479         0x00000000, 0x00000000, 0x00000000, 0x00000000,
480         0x00000000, 0x00000000, 0x00000000, 0x00000000,
481         0x00000000, 0x00000000, 0x00000000, 0x00000000,
482 };
483
484 /* Packet types for VXLAN with VNI */
485 static const u32 ice_ptypes_vxlan_vni[] = {
486         0x00000000, 0xBFBFF800, 0x00EFDFDF, 0xFEFDE000,
487         0x03BF7F7E, 0x00000000, 0x00000000, 0x00000000,
488         0x00000000, 0x00000000, 0x00000000, 0x00000000,
489         0x00000000, 0x00000000, 0x00000000, 0x00000000,
490         0x00000000, 0x00000000, 0x00000000, 0x00000000,
491         0x00000000, 0x00000000, 0x00000000, 0x00000000,
492         0x00000000, 0x00000000, 0x00000000, 0x00000000,
493         0x00000000, 0x00000000, 0x00000000, 0x00000000,
494 };
495
496 /* Packet types for GTPC with TEID */
497 static const u32 ice_ptypes_gtpc_tid[] = {
498         0x00000000, 0x00000000, 0x00000000, 0x00000000,
499         0x00000000, 0x00000000, 0x00000000, 0x00000000,
500         0x00000000, 0x00000000, 0x00000060, 0x00000000,
501         0x00000000, 0x00000000, 0x00000000, 0x00000000,
502         0x00000000, 0x00000000, 0x00000000, 0x00000000,
503         0x00000000, 0x00000000, 0x00000000, 0x00000000,
504         0x00000000, 0x00000000, 0x00000000, 0x00000000,
505         0x00000000, 0x00000000, 0x00000000, 0x00000000,
506 };
507
508 /* Packet types for GTPU */
509 static const struct ice_ptype_attributes ice_attr_gtpu_session[] = {
510         { ICE_MAC_IPV4_GTPU_IPV4_FRAG,    ICE_PTYPE_ATTR_GTP_SESSION },
511         { ICE_MAC_IPV4_GTPU_IPV4_PAY,     ICE_PTYPE_ATTR_GTP_SESSION },
512         { ICE_MAC_IPV4_GTPU_IPV4_UDP_PAY, ICE_PTYPE_ATTR_GTP_SESSION },
513         { ICE_MAC_IPV4_GTPU_IPV4_TCP,     ICE_PTYPE_ATTR_GTP_SESSION },
514         { ICE_MAC_IPV4_GTPU_IPV4_ICMP,    ICE_PTYPE_ATTR_GTP_SESSION },
515         { ICE_MAC_IPV6_GTPU_IPV4_FRAG,    ICE_PTYPE_ATTR_GTP_SESSION },
516         { ICE_MAC_IPV6_GTPU_IPV4_PAY,     ICE_PTYPE_ATTR_GTP_SESSION },
517         { ICE_MAC_IPV6_GTPU_IPV4_UDP_PAY, ICE_PTYPE_ATTR_GTP_SESSION },
518         { ICE_MAC_IPV6_GTPU_IPV4_TCP,     ICE_PTYPE_ATTR_GTP_SESSION },
519         { ICE_MAC_IPV6_GTPU_IPV4_ICMP,    ICE_PTYPE_ATTR_GTP_SESSION },
520         { ICE_MAC_IPV4_GTPU_IPV6_FRAG,    ICE_PTYPE_ATTR_GTP_SESSION },
521         { ICE_MAC_IPV4_GTPU_IPV6_PAY,     ICE_PTYPE_ATTR_GTP_SESSION },
522         { ICE_MAC_IPV4_GTPU_IPV6_UDP_PAY, ICE_PTYPE_ATTR_GTP_SESSION },
523         { ICE_MAC_IPV4_GTPU_IPV6_TCP,     ICE_PTYPE_ATTR_GTP_SESSION },
524         { ICE_MAC_IPV4_GTPU_IPV6_ICMPV6,  ICE_PTYPE_ATTR_GTP_SESSION },
525         { ICE_MAC_IPV6_GTPU_IPV6_FRAG,    ICE_PTYPE_ATTR_GTP_SESSION },
526         { ICE_MAC_IPV6_GTPU_IPV6_PAY,     ICE_PTYPE_ATTR_GTP_SESSION },
527         { ICE_MAC_IPV6_GTPU_IPV6_UDP_PAY, ICE_PTYPE_ATTR_GTP_SESSION },
528         { ICE_MAC_IPV6_GTPU_IPV6_TCP,     ICE_PTYPE_ATTR_GTP_SESSION },
529         { ICE_MAC_IPV6_GTPU_IPV6_ICMPV6,  ICE_PTYPE_ATTR_GTP_SESSION },
530         { MAC_IPV4_TUN_IPV4_GTPU_IPV4_FRAG,     ICE_PTYPE_ATTR_GTP_SESSION },
531         { MAC_IPV4_TUN_IPV4_GTPU_IPV4_PAY,      ICE_PTYPE_ATTR_GTP_SESSION },
532         { MAC_IPV4_TUN_IPV4_GTPU_IPV4_UDP_PAY,  ICE_PTYPE_ATTR_GTP_SESSION },
533         { MAC_IPV4_TUN_IPV4_GTPU_IPV4_TCP,      ICE_PTYPE_ATTR_GTP_SESSION },
534         { MAC_IPV4_TUN_IPV4_GTPU_IPV4_ICMP,     ICE_PTYPE_ATTR_GTP_SESSION },
535         { MAC_IPV4_TUN_IPV4_GTPU_IPV6_FRAG,     ICE_PTYPE_ATTR_GTP_SESSION },
536         { MAC_IPV4_TUN_IPV4_GTPU_IPV6_PAY,      ICE_PTYPE_ATTR_GTP_SESSION },
537         { MAC_IPV4_TUN_IPV4_GTPU_IPV6_UDP_PAY,  ICE_PTYPE_ATTR_GTP_SESSION },
538         { MAC_IPV4_TUN_IPV4_GTPU_IPV6_TCP,      ICE_PTYPE_ATTR_GTP_SESSION },
539         { MAC_IPV4_TUN_IPV4_GTPU_IPV6_ICMPV6,   ICE_PTYPE_ATTR_GTP_SESSION },
540         { MAC_IPV4_TUN_IPV6_GTPU_IPV4_FRAG,     ICE_PTYPE_ATTR_GTP_SESSION },
541         { MAC_IPV4_TUN_IPV6_GTPU_IPV4_PAY,      ICE_PTYPE_ATTR_GTP_SESSION },
542         { MAC_IPV4_TUN_IPV6_GTPU_IPV4_UDP_PAY,  ICE_PTYPE_ATTR_GTP_SESSION },
543         { MAC_IPV4_TUN_IPV6_GTPU_IPV4_TCP,      ICE_PTYPE_ATTR_GTP_SESSION },
544         { MAC_IPV4_TUN_IPV6_GTPU_IPV4_ICMP,     ICE_PTYPE_ATTR_GTP_SESSION },
545         { MAC_IPV4_TUN_IPV6_GTPU_IPV6_FRAG,     ICE_PTYPE_ATTR_GTP_SESSION },
546         { MAC_IPV4_TUN_IPV6_GTPU_IPV6_PAY,      ICE_PTYPE_ATTR_GTP_SESSION },
547         { MAC_IPV4_TUN_IPV6_GTPU_IPV6_UDP_PAY,  ICE_PTYPE_ATTR_GTP_SESSION },
548         { MAC_IPV4_TUN_IPV6_GTPU_IPV6_TCP,      ICE_PTYPE_ATTR_GTP_SESSION },
549         { MAC_IPV4_TUN_IPV6_GTPU_IPV6_ICMPV6,   ICE_PTYPE_ATTR_GTP_SESSION },
550         { MAC_IPV6_TUN_IPV4_GTPU_IPV4_FRAG,     ICE_PTYPE_ATTR_GTP_SESSION },
551         { MAC_IPV6_TUN_IPV4_GTPU_IPV4_PAY,      ICE_PTYPE_ATTR_GTP_SESSION },
552         { MAC_IPV6_TUN_IPV4_GTPU_IPV4_UDP_PAY,  ICE_PTYPE_ATTR_GTP_SESSION },
553         { MAC_IPV6_TUN_IPV4_GTPU_IPV4_TCP,      ICE_PTYPE_ATTR_GTP_SESSION },
554         { MAC_IPV6_TUN_IPV4_GTPU_IPV4_ICMP,     ICE_PTYPE_ATTR_GTP_SESSION },
555         { MAC_IPV6_TUN_IPV4_GTPU_IPV6_FRAG,     ICE_PTYPE_ATTR_GTP_SESSION },
556         { MAC_IPV6_TUN_IPV4_GTPU_IPV6_PAY,      ICE_PTYPE_ATTR_GTP_SESSION },
557         { MAC_IPV6_TUN_IPV4_GTPU_IPV6_UDP_PAY,  ICE_PTYPE_ATTR_GTP_SESSION },
558         { MAC_IPV6_TUN_IPV4_GTPU_IPV6_TCP,      ICE_PTYPE_ATTR_GTP_SESSION },
559         { MAC_IPV6_TUN_IPV4_GTPU_IPV6_ICMPV6,   ICE_PTYPE_ATTR_GTP_SESSION },
560         { MAC_IPV6_TUN_IPV6_GTPU_IPV4_FRAG,     ICE_PTYPE_ATTR_GTP_SESSION },
561         { MAC_IPV6_TUN_IPV6_GTPU_IPV4_PAY,      ICE_PTYPE_ATTR_GTP_SESSION },
562         { MAC_IPV6_TUN_IPV6_GTPU_IPV4_UDP_PAY,  ICE_PTYPE_ATTR_GTP_SESSION },
563         { MAC_IPV6_TUN_IPV6_GTPU_IPV4_TCP,      ICE_PTYPE_ATTR_GTP_SESSION },
564         { MAC_IPV6_TUN_IPV6_GTPU_IPV4_ICMP,     ICE_PTYPE_ATTR_GTP_SESSION },
565         { MAC_IPV6_TUN_IPV6_GTPU_IPV6_FRAG,     ICE_PTYPE_ATTR_GTP_SESSION },
566         { MAC_IPV6_TUN_IPV6_GTPU_IPV6_PAY,      ICE_PTYPE_ATTR_GTP_SESSION },
567         { MAC_IPV6_TUN_IPV6_GTPU_IPV6_UDP_PAY,  ICE_PTYPE_ATTR_GTP_SESSION },
568         { MAC_IPV6_TUN_IPV6_GTPU_IPV6_TCP,      ICE_PTYPE_ATTR_GTP_SESSION },
569         { MAC_IPV6_TUN_IPV6_GTPU_IPV6_ICMPV6,   ICE_PTYPE_ATTR_GTP_SESSION },
570 };
571
572 static const struct ice_ptype_attributes ice_attr_gtpu_eh[] = {
573         { ICE_MAC_IPV4_GTPU_IPV4_FRAG,    ICE_PTYPE_ATTR_GTP_PDU_EH },
574         { ICE_MAC_IPV4_GTPU_IPV4_PAY,     ICE_PTYPE_ATTR_GTP_PDU_EH },
575         { ICE_MAC_IPV4_GTPU_IPV4_UDP_PAY, ICE_PTYPE_ATTR_GTP_PDU_EH },
576         { ICE_MAC_IPV4_GTPU_IPV4_TCP,     ICE_PTYPE_ATTR_GTP_PDU_EH },
577         { ICE_MAC_IPV4_GTPU_IPV4_ICMP,    ICE_PTYPE_ATTR_GTP_PDU_EH },
578         { ICE_MAC_IPV6_GTPU_IPV4_FRAG,    ICE_PTYPE_ATTR_GTP_PDU_EH },
579         { ICE_MAC_IPV6_GTPU_IPV4_PAY,     ICE_PTYPE_ATTR_GTP_PDU_EH },
580         { ICE_MAC_IPV6_GTPU_IPV4_UDP_PAY, ICE_PTYPE_ATTR_GTP_PDU_EH },
581         { ICE_MAC_IPV6_GTPU_IPV4_TCP,     ICE_PTYPE_ATTR_GTP_PDU_EH },
582         { ICE_MAC_IPV6_GTPU_IPV4_ICMP,    ICE_PTYPE_ATTR_GTP_PDU_EH },
583         { ICE_MAC_IPV4_GTPU_IPV6_FRAG,    ICE_PTYPE_ATTR_GTP_PDU_EH },
584         { ICE_MAC_IPV4_GTPU_IPV6_PAY,     ICE_PTYPE_ATTR_GTP_PDU_EH },
585         { ICE_MAC_IPV4_GTPU_IPV6_UDP_PAY, ICE_PTYPE_ATTR_GTP_PDU_EH },
586         { ICE_MAC_IPV4_GTPU_IPV6_TCP,     ICE_PTYPE_ATTR_GTP_PDU_EH },
587         { ICE_MAC_IPV4_GTPU_IPV6_ICMPV6,  ICE_PTYPE_ATTR_GTP_PDU_EH },
588         { ICE_MAC_IPV6_GTPU_IPV6_FRAG,    ICE_PTYPE_ATTR_GTP_PDU_EH },
589         { ICE_MAC_IPV6_GTPU_IPV6_PAY,     ICE_PTYPE_ATTR_GTP_PDU_EH },
590         { ICE_MAC_IPV6_GTPU_IPV6_UDP_PAY, ICE_PTYPE_ATTR_GTP_PDU_EH },
591         { ICE_MAC_IPV6_GTPU_IPV6_TCP,     ICE_PTYPE_ATTR_GTP_PDU_EH },
592         { ICE_MAC_IPV6_GTPU_IPV6_ICMPV6,  ICE_PTYPE_ATTR_GTP_PDU_EH },
593         { MAC_IPV4_TUN_IPV4_GTPU_IPV4_FRAG,     ICE_PTYPE_ATTR_GTP_PDU_EH },
594         { MAC_IPV4_TUN_IPV4_GTPU_IPV4_PAY,      ICE_PTYPE_ATTR_GTP_PDU_EH },
595         { MAC_IPV4_TUN_IPV4_GTPU_IPV4_UDP_PAY,  ICE_PTYPE_ATTR_GTP_PDU_EH },
596         { MAC_IPV4_TUN_IPV4_GTPU_IPV4_TCP,      ICE_PTYPE_ATTR_GTP_PDU_EH },
597         { MAC_IPV4_TUN_IPV4_GTPU_IPV4_ICMP,     ICE_PTYPE_ATTR_GTP_PDU_EH },
598         { MAC_IPV4_TUN_IPV4_GTPU_IPV6_FRAG,     ICE_PTYPE_ATTR_GTP_PDU_EH },
599         { MAC_IPV4_TUN_IPV4_GTPU_IPV6_PAY,      ICE_PTYPE_ATTR_GTP_PDU_EH },
600         { MAC_IPV4_TUN_IPV4_GTPU_IPV6_UDP_PAY,  ICE_PTYPE_ATTR_GTP_PDU_EH },
601         { MAC_IPV4_TUN_IPV4_GTPU_IPV6_TCP,      ICE_PTYPE_ATTR_GTP_PDU_EH },
602         { MAC_IPV4_TUN_IPV4_GTPU_IPV6_ICMPV6,   ICE_PTYPE_ATTR_GTP_PDU_EH },
603         { MAC_IPV4_TUN_IPV6_GTPU_IPV4_FRAG,     ICE_PTYPE_ATTR_GTP_PDU_EH },
604         { MAC_IPV4_TUN_IPV6_GTPU_IPV4_PAY,      ICE_PTYPE_ATTR_GTP_PDU_EH },
605         { MAC_IPV4_TUN_IPV6_GTPU_IPV4_UDP_PAY,  ICE_PTYPE_ATTR_GTP_PDU_EH },
606         { MAC_IPV4_TUN_IPV6_GTPU_IPV4_TCP,      ICE_PTYPE_ATTR_GTP_PDU_EH },
607         { MAC_IPV4_TUN_IPV6_GTPU_IPV4_ICMP,     ICE_PTYPE_ATTR_GTP_PDU_EH },
608         { MAC_IPV4_TUN_IPV6_GTPU_IPV6_FRAG,     ICE_PTYPE_ATTR_GTP_PDU_EH },
609         { MAC_IPV4_TUN_IPV6_GTPU_IPV6_PAY,      ICE_PTYPE_ATTR_GTP_PDU_EH },
610         { MAC_IPV4_TUN_IPV6_GTPU_IPV6_UDP_PAY,  ICE_PTYPE_ATTR_GTP_PDU_EH },
611         { MAC_IPV4_TUN_IPV6_GTPU_IPV6_TCP,      ICE_PTYPE_ATTR_GTP_PDU_EH },
612         { MAC_IPV4_TUN_IPV6_GTPU_IPV6_ICMPV6,   ICE_PTYPE_ATTR_GTP_PDU_EH },
613         { MAC_IPV6_TUN_IPV4_GTPU_IPV4_FRAG,     ICE_PTYPE_ATTR_GTP_PDU_EH },
614         { MAC_IPV6_TUN_IPV4_GTPU_IPV4_PAY,      ICE_PTYPE_ATTR_GTP_PDU_EH },
615         { MAC_IPV6_TUN_IPV4_GTPU_IPV4_UDP_PAY,  ICE_PTYPE_ATTR_GTP_PDU_EH },
616         { MAC_IPV6_TUN_IPV4_GTPU_IPV4_TCP,      ICE_PTYPE_ATTR_GTP_PDU_EH },
617         { MAC_IPV6_TUN_IPV4_GTPU_IPV4_ICMP,     ICE_PTYPE_ATTR_GTP_PDU_EH },
618         { MAC_IPV6_TUN_IPV4_GTPU_IPV6_FRAG,     ICE_PTYPE_ATTR_GTP_PDU_EH },
619         { MAC_IPV6_TUN_IPV4_GTPU_IPV6_PAY,      ICE_PTYPE_ATTR_GTP_PDU_EH },
620         { MAC_IPV6_TUN_IPV4_GTPU_IPV6_UDP_PAY,  ICE_PTYPE_ATTR_GTP_PDU_EH },
621         { MAC_IPV6_TUN_IPV4_GTPU_IPV6_TCP,      ICE_PTYPE_ATTR_GTP_PDU_EH },
622         { MAC_IPV6_TUN_IPV4_GTPU_IPV6_ICMPV6,   ICE_PTYPE_ATTR_GTP_PDU_EH },
623         { MAC_IPV6_TUN_IPV6_GTPU_IPV4_FRAG,     ICE_PTYPE_ATTR_GTP_PDU_EH },
624         { MAC_IPV6_TUN_IPV6_GTPU_IPV4_PAY,      ICE_PTYPE_ATTR_GTP_PDU_EH },
625         { MAC_IPV6_TUN_IPV6_GTPU_IPV4_UDP_PAY,  ICE_PTYPE_ATTR_GTP_PDU_EH },
626         { MAC_IPV6_TUN_IPV6_GTPU_IPV4_TCP,      ICE_PTYPE_ATTR_GTP_PDU_EH },
627         { MAC_IPV6_TUN_IPV6_GTPU_IPV4_ICMP,     ICE_PTYPE_ATTR_GTP_PDU_EH },
628         { MAC_IPV6_TUN_IPV6_GTPU_IPV6_FRAG,     ICE_PTYPE_ATTR_GTP_PDU_EH },
629         { MAC_IPV6_TUN_IPV6_GTPU_IPV6_PAY,      ICE_PTYPE_ATTR_GTP_PDU_EH },
630         { MAC_IPV6_TUN_IPV6_GTPU_IPV6_UDP_PAY,  ICE_PTYPE_ATTR_GTP_PDU_EH },
631         { MAC_IPV6_TUN_IPV6_GTPU_IPV6_TCP,      ICE_PTYPE_ATTR_GTP_PDU_EH },
632         { MAC_IPV6_TUN_IPV6_GTPU_IPV6_ICMPV6,   ICE_PTYPE_ATTR_GTP_PDU_EH },
633 };
634
635 static const struct ice_ptype_attributes ice_attr_gtpu_down[] = {
636         { ICE_MAC_IPV4_GTPU_IPV4_FRAG,    ICE_PTYPE_ATTR_GTP_DOWNLINK },
637         { ICE_MAC_IPV4_GTPU_IPV4_PAY,     ICE_PTYPE_ATTR_GTP_DOWNLINK },
638         { ICE_MAC_IPV4_GTPU_IPV4_UDP_PAY, ICE_PTYPE_ATTR_GTP_DOWNLINK },
639         { ICE_MAC_IPV4_GTPU_IPV4_TCP,     ICE_PTYPE_ATTR_GTP_DOWNLINK },
640         { ICE_MAC_IPV4_GTPU_IPV4_ICMP,    ICE_PTYPE_ATTR_GTP_DOWNLINK },
641         { ICE_MAC_IPV6_GTPU_IPV4_FRAG,    ICE_PTYPE_ATTR_GTP_DOWNLINK },
642         { ICE_MAC_IPV6_GTPU_IPV4_PAY,     ICE_PTYPE_ATTR_GTP_DOWNLINK },
643         { ICE_MAC_IPV6_GTPU_IPV4_UDP_PAY, ICE_PTYPE_ATTR_GTP_DOWNLINK },
644         { ICE_MAC_IPV6_GTPU_IPV4_TCP,     ICE_PTYPE_ATTR_GTP_DOWNLINK },
645         { ICE_MAC_IPV6_GTPU_IPV4_ICMP,    ICE_PTYPE_ATTR_GTP_DOWNLINK },
646         { ICE_MAC_IPV4_GTPU_IPV6_FRAG,    ICE_PTYPE_ATTR_GTP_DOWNLINK },
647         { ICE_MAC_IPV4_GTPU_IPV6_PAY,     ICE_PTYPE_ATTR_GTP_DOWNLINK },
648         { ICE_MAC_IPV4_GTPU_IPV6_UDP_PAY, ICE_PTYPE_ATTR_GTP_DOWNLINK },
649         { ICE_MAC_IPV4_GTPU_IPV6_TCP,     ICE_PTYPE_ATTR_GTP_DOWNLINK },
650         { ICE_MAC_IPV4_GTPU_IPV6_ICMPV6,  ICE_PTYPE_ATTR_GTP_DOWNLINK },
651         { ICE_MAC_IPV6_GTPU_IPV6_FRAG,    ICE_PTYPE_ATTR_GTP_DOWNLINK },
652         { ICE_MAC_IPV6_GTPU_IPV6_PAY,     ICE_PTYPE_ATTR_GTP_DOWNLINK },
653         { ICE_MAC_IPV6_GTPU_IPV6_UDP_PAY, ICE_PTYPE_ATTR_GTP_DOWNLINK },
654         { ICE_MAC_IPV6_GTPU_IPV6_TCP,     ICE_PTYPE_ATTR_GTP_DOWNLINK },
655         { ICE_MAC_IPV6_GTPU_IPV6_ICMPV6,  ICE_PTYPE_ATTR_GTP_DOWNLINK },
656         { MAC_IPV4_TUN_IPV4_GTPU_IPV4_FRAG,     ICE_PTYPE_ATTR_GTP_DOWNLINK },
657         { MAC_IPV4_TUN_IPV4_GTPU_IPV4_PAY,      ICE_PTYPE_ATTR_GTP_DOWNLINK },
658         { MAC_IPV4_TUN_IPV4_GTPU_IPV4_UDP_PAY,  ICE_PTYPE_ATTR_GTP_DOWNLINK },
659         { MAC_IPV4_TUN_IPV4_GTPU_IPV4_TCP,      ICE_PTYPE_ATTR_GTP_DOWNLINK },
660         { MAC_IPV4_TUN_IPV4_GTPU_IPV4_ICMP,     ICE_PTYPE_ATTR_GTP_DOWNLINK },
661         { MAC_IPV4_TUN_IPV4_GTPU_IPV6_FRAG,     ICE_PTYPE_ATTR_GTP_DOWNLINK },
662         { MAC_IPV4_TUN_IPV4_GTPU_IPV6_PAY,      ICE_PTYPE_ATTR_GTP_DOWNLINK },
663         { MAC_IPV4_TUN_IPV4_GTPU_IPV6_UDP_PAY,  ICE_PTYPE_ATTR_GTP_DOWNLINK },
664         { MAC_IPV4_TUN_IPV4_GTPU_IPV6_TCP,      ICE_PTYPE_ATTR_GTP_DOWNLINK },
665         { MAC_IPV4_TUN_IPV4_GTPU_IPV6_ICMPV6,   ICE_PTYPE_ATTR_GTP_DOWNLINK },
666         { MAC_IPV4_TUN_IPV6_GTPU_IPV4_FRAG,     ICE_PTYPE_ATTR_GTP_DOWNLINK },
667         { MAC_IPV4_TUN_IPV6_GTPU_IPV4_PAY,      ICE_PTYPE_ATTR_GTP_DOWNLINK },
668         { MAC_IPV4_TUN_IPV6_GTPU_IPV4_UDP_PAY,  ICE_PTYPE_ATTR_GTP_DOWNLINK },
669         { MAC_IPV4_TUN_IPV6_GTPU_IPV4_TCP,      ICE_PTYPE_ATTR_GTP_DOWNLINK },
670         { MAC_IPV4_TUN_IPV6_GTPU_IPV4_ICMP,     ICE_PTYPE_ATTR_GTP_DOWNLINK },
671         { MAC_IPV4_TUN_IPV6_GTPU_IPV6_FRAG,     ICE_PTYPE_ATTR_GTP_DOWNLINK },
672         { MAC_IPV4_TUN_IPV6_GTPU_IPV6_PAY,      ICE_PTYPE_ATTR_GTP_DOWNLINK },
673         { MAC_IPV4_TUN_IPV6_GTPU_IPV6_UDP_PAY,  ICE_PTYPE_ATTR_GTP_DOWNLINK },
674         { MAC_IPV4_TUN_IPV6_GTPU_IPV6_TCP,      ICE_PTYPE_ATTR_GTP_DOWNLINK },
675         { MAC_IPV4_TUN_IPV6_GTPU_IPV6_ICMPV6,   ICE_PTYPE_ATTR_GTP_DOWNLINK },
676         { MAC_IPV6_TUN_IPV4_GTPU_IPV4_FRAG,     ICE_PTYPE_ATTR_GTP_DOWNLINK },
677         { MAC_IPV6_TUN_IPV4_GTPU_IPV4_PAY,      ICE_PTYPE_ATTR_GTP_DOWNLINK },
678         { MAC_IPV6_TUN_IPV4_GTPU_IPV4_UDP_PAY,  ICE_PTYPE_ATTR_GTP_DOWNLINK },
679         { MAC_IPV6_TUN_IPV4_GTPU_IPV4_TCP,      ICE_PTYPE_ATTR_GTP_DOWNLINK },
680         { MAC_IPV6_TUN_IPV4_GTPU_IPV4_ICMP,     ICE_PTYPE_ATTR_GTP_DOWNLINK },
681         { MAC_IPV6_TUN_IPV4_GTPU_IPV6_FRAG,     ICE_PTYPE_ATTR_GTP_DOWNLINK },
682         { MAC_IPV6_TUN_IPV4_GTPU_IPV6_PAY,      ICE_PTYPE_ATTR_GTP_DOWNLINK },
683         { MAC_IPV6_TUN_IPV4_GTPU_IPV6_UDP_PAY,  ICE_PTYPE_ATTR_GTP_DOWNLINK },
684         { MAC_IPV6_TUN_IPV4_GTPU_IPV6_TCP,      ICE_PTYPE_ATTR_GTP_DOWNLINK },
685         { MAC_IPV6_TUN_IPV4_GTPU_IPV6_ICMPV6,   ICE_PTYPE_ATTR_GTP_DOWNLINK },
686         { MAC_IPV6_TUN_IPV6_GTPU_IPV4_FRAG,     ICE_PTYPE_ATTR_GTP_DOWNLINK },
687         { MAC_IPV6_TUN_IPV6_GTPU_IPV4_PAY,      ICE_PTYPE_ATTR_GTP_DOWNLINK },
688         { MAC_IPV6_TUN_IPV6_GTPU_IPV4_UDP_PAY,  ICE_PTYPE_ATTR_GTP_DOWNLINK },
689         { MAC_IPV6_TUN_IPV6_GTPU_IPV4_TCP,      ICE_PTYPE_ATTR_GTP_DOWNLINK },
690         { MAC_IPV6_TUN_IPV6_GTPU_IPV4_ICMP,     ICE_PTYPE_ATTR_GTP_DOWNLINK },
691         { MAC_IPV6_TUN_IPV6_GTPU_IPV6_FRAG,     ICE_PTYPE_ATTR_GTP_DOWNLINK },
692         { MAC_IPV6_TUN_IPV6_GTPU_IPV6_PAY,      ICE_PTYPE_ATTR_GTP_DOWNLINK },
693         { MAC_IPV6_TUN_IPV6_GTPU_IPV6_UDP_PAY,  ICE_PTYPE_ATTR_GTP_DOWNLINK },
694         { MAC_IPV6_TUN_IPV6_GTPU_IPV6_TCP,      ICE_PTYPE_ATTR_GTP_DOWNLINK },
695         { MAC_IPV6_TUN_IPV6_GTPU_IPV6_ICMPV6,   ICE_PTYPE_ATTR_GTP_DOWNLINK },
696 };
697
698 static const struct ice_ptype_attributes ice_attr_gtpu_up[] = {
699         { ICE_MAC_IPV4_GTPU_IPV4_FRAG,    ICE_PTYPE_ATTR_GTP_UPLINK },
700         { ICE_MAC_IPV4_GTPU_IPV4_PAY,     ICE_PTYPE_ATTR_GTP_UPLINK },
701         { ICE_MAC_IPV4_GTPU_IPV4_UDP_PAY, ICE_PTYPE_ATTR_GTP_UPLINK },
702         { ICE_MAC_IPV4_GTPU_IPV4_TCP,     ICE_PTYPE_ATTR_GTP_UPLINK },
703         { ICE_MAC_IPV4_GTPU_IPV4_ICMP,    ICE_PTYPE_ATTR_GTP_UPLINK },
704         { ICE_MAC_IPV6_GTPU_IPV4_FRAG,    ICE_PTYPE_ATTR_GTP_UPLINK },
705         { ICE_MAC_IPV6_GTPU_IPV4_PAY,     ICE_PTYPE_ATTR_GTP_UPLINK },
706         { ICE_MAC_IPV6_GTPU_IPV4_UDP_PAY, ICE_PTYPE_ATTR_GTP_UPLINK },
707         { ICE_MAC_IPV6_GTPU_IPV4_TCP,     ICE_PTYPE_ATTR_GTP_UPLINK },
708         { ICE_MAC_IPV6_GTPU_IPV4_ICMP,    ICE_PTYPE_ATTR_GTP_UPLINK },
709         { ICE_MAC_IPV4_GTPU_IPV6_FRAG,    ICE_PTYPE_ATTR_GTP_UPLINK },
710         { ICE_MAC_IPV4_GTPU_IPV6_PAY,     ICE_PTYPE_ATTR_GTP_UPLINK },
711         { ICE_MAC_IPV4_GTPU_IPV6_UDP_PAY, ICE_PTYPE_ATTR_GTP_UPLINK },
712         { ICE_MAC_IPV4_GTPU_IPV6_TCP,     ICE_PTYPE_ATTR_GTP_UPLINK },
713         { ICE_MAC_IPV4_GTPU_IPV6_ICMPV6,  ICE_PTYPE_ATTR_GTP_UPLINK },
714         { ICE_MAC_IPV6_GTPU_IPV6_FRAG,    ICE_PTYPE_ATTR_GTP_UPLINK },
715         { ICE_MAC_IPV6_GTPU_IPV6_PAY,     ICE_PTYPE_ATTR_GTP_UPLINK },
716         { ICE_MAC_IPV6_GTPU_IPV6_UDP_PAY, ICE_PTYPE_ATTR_GTP_UPLINK },
717         { ICE_MAC_IPV6_GTPU_IPV6_TCP,     ICE_PTYPE_ATTR_GTP_UPLINK },
718         { ICE_MAC_IPV6_GTPU_IPV6_ICMPV6,  ICE_PTYPE_ATTR_GTP_UPLINK },
719         { MAC_IPV4_TUN_IPV4_GTPU_IPV4_FRAG,     ICE_PTYPE_ATTR_GTP_UPLINK },
720         { MAC_IPV4_TUN_IPV4_GTPU_IPV4_PAY,      ICE_PTYPE_ATTR_GTP_UPLINK },
721         { MAC_IPV4_TUN_IPV4_GTPU_IPV4_UDP_PAY,  ICE_PTYPE_ATTR_GTP_UPLINK },
722         { MAC_IPV4_TUN_IPV4_GTPU_IPV4_TCP,      ICE_PTYPE_ATTR_GTP_UPLINK },
723         { MAC_IPV4_TUN_IPV4_GTPU_IPV4_ICMP,     ICE_PTYPE_ATTR_GTP_UPLINK },
724         { MAC_IPV4_TUN_IPV4_GTPU_IPV6_FRAG,     ICE_PTYPE_ATTR_GTP_UPLINK },
725         { MAC_IPV4_TUN_IPV4_GTPU_IPV6_PAY,      ICE_PTYPE_ATTR_GTP_UPLINK },
726         { MAC_IPV4_TUN_IPV4_GTPU_IPV6_UDP_PAY,  ICE_PTYPE_ATTR_GTP_UPLINK },
727         { MAC_IPV4_TUN_IPV4_GTPU_IPV6_TCP,      ICE_PTYPE_ATTR_GTP_UPLINK },
728         { MAC_IPV4_TUN_IPV4_GTPU_IPV6_ICMPV6,   ICE_PTYPE_ATTR_GTP_UPLINK },
729         { MAC_IPV4_TUN_IPV6_GTPU_IPV4_FRAG,     ICE_PTYPE_ATTR_GTP_UPLINK },
730         { MAC_IPV4_TUN_IPV6_GTPU_IPV4_PAY,      ICE_PTYPE_ATTR_GTP_UPLINK },
731         { MAC_IPV4_TUN_IPV6_GTPU_IPV4_UDP_PAY,  ICE_PTYPE_ATTR_GTP_UPLINK },
732         { MAC_IPV4_TUN_IPV6_GTPU_IPV4_TCP,      ICE_PTYPE_ATTR_GTP_UPLINK },
733         { MAC_IPV4_TUN_IPV6_GTPU_IPV4_ICMP,     ICE_PTYPE_ATTR_GTP_UPLINK },
734         { MAC_IPV4_TUN_IPV6_GTPU_IPV6_FRAG,     ICE_PTYPE_ATTR_GTP_UPLINK },
735         { MAC_IPV4_TUN_IPV6_GTPU_IPV6_PAY,      ICE_PTYPE_ATTR_GTP_UPLINK },
736         { MAC_IPV4_TUN_IPV6_GTPU_IPV6_UDP_PAY,  ICE_PTYPE_ATTR_GTP_UPLINK },
737         { MAC_IPV4_TUN_IPV6_GTPU_IPV6_TCP,      ICE_PTYPE_ATTR_GTP_UPLINK },
738         { MAC_IPV4_TUN_IPV6_GTPU_IPV6_ICMPV6,   ICE_PTYPE_ATTR_GTP_UPLINK },
739         { MAC_IPV6_TUN_IPV4_GTPU_IPV4_FRAG,     ICE_PTYPE_ATTR_GTP_UPLINK },
740         { MAC_IPV6_TUN_IPV4_GTPU_IPV4_PAY,      ICE_PTYPE_ATTR_GTP_UPLINK },
741         { MAC_IPV6_TUN_IPV4_GTPU_IPV4_UDP_PAY,  ICE_PTYPE_ATTR_GTP_UPLINK },
742         { MAC_IPV6_TUN_IPV4_GTPU_IPV4_TCP,      ICE_PTYPE_ATTR_GTP_UPLINK },
743         { MAC_IPV6_TUN_IPV4_GTPU_IPV4_ICMP,     ICE_PTYPE_ATTR_GTP_UPLINK },
744         { MAC_IPV6_TUN_IPV4_GTPU_IPV6_FRAG,     ICE_PTYPE_ATTR_GTP_UPLINK },
745         { MAC_IPV6_TUN_IPV4_GTPU_IPV6_PAY,      ICE_PTYPE_ATTR_GTP_UPLINK },
746         { MAC_IPV6_TUN_IPV4_GTPU_IPV6_UDP_PAY,  ICE_PTYPE_ATTR_GTP_UPLINK },
747         { MAC_IPV6_TUN_IPV4_GTPU_IPV6_TCP,      ICE_PTYPE_ATTR_GTP_UPLINK },
748         { MAC_IPV6_TUN_IPV4_GTPU_IPV6_ICMPV6,   ICE_PTYPE_ATTR_GTP_UPLINK },
749         { MAC_IPV6_TUN_IPV6_GTPU_IPV4_FRAG,     ICE_PTYPE_ATTR_GTP_UPLINK },
750         { MAC_IPV6_TUN_IPV6_GTPU_IPV4_PAY,      ICE_PTYPE_ATTR_GTP_UPLINK },
751         { MAC_IPV6_TUN_IPV6_GTPU_IPV4_UDP_PAY,  ICE_PTYPE_ATTR_GTP_UPLINK },
752         { MAC_IPV6_TUN_IPV6_GTPU_IPV4_TCP,      ICE_PTYPE_ATTR_GTP_UPLINK },
753         { MAC_IPV6_TUN_IPV6_GTPU_IPV4_ICMP,     ICE_PTYPE_ATTR_GTP_UPLINK },
754         { MAC_IPV6_TUN_IPV6_GTPU_IPV6_FRAG,     ICE_PTYPE_ATTR_GTP_UPLINK },
755         { MAC_IPV6_TUN_IPV6_GTPU_IPV6_PAY,      ICE_PTYPE_ATTR_GTP_UPLINK },
756         { MAC_IPV6_TUN_IPV6_GTPU_IPV6_UDP_PAY,  ICE_PTYPE_ATTR_GTP_UPLINK },
757         { MAC_IPV6_TUN_IPV6_GTPU_IPV6_TCP,      ICE_PTYPE_ATTR_GTP_UPLINK },
758         { MAC_IPV6_TUN_IPV6_GTPU_IPV6_ICMPV6,   ICE_PTYPE_ATTR_GTP_UPLINK },
759 };
760
761 static const u32 ice_ptypes_gtpu[] = {
762         0x00000000, 0x00000000, 0x00000000, 0x00000000,
763         0x00000000, 0x00000000, 0x00000000, 0x00000000,
764         0x00000000, 0x00000000, 0x7FFFFE00, 0x00000000,
765         0x00000000, 0x0000003F, 0xBEFBEFBC, 0x0002FBEF,
766         0x00000000, 0x00000000, 0x00000000, 0x00000000,
767         0x00000000, 0x00000000, 0x00000000, 0x00000000,
768         0x00000000, 0x00000000, 0x00000000, 0x00000000,
769         0x00000000, 0x00000000, 0x00000000, 0x00000000,
770 };
771
772 /* Packet types for pppoe */
773 static const u32 ice_ptypes_pppoe[] = {
774         0x00000000, 0x00000000, 0x00000000, 0x00000000,
775         0x00000000, 0x00000000, 0x00000000, 0x00000000,
776         0x00000000, 0x03ffe000, 0x00000000, 0x00000000,
777         0x00000000, 0x00000000, 0x00000000, 0x00000000,
778         0x00000000, 0x00000000, 0x00000000, 0x00000000,
779         0x00000000, 0x00000000, 0x00000000, 0x00000000,
780         0x00000000, 0x00000000, 0x00000000, 0x00000000,
781         0x00000000, 0x00000000, 0x00000000, 0x00000000,
782 };
783
784 /* Packet types for packets with PFCP NODE header */
785 static const u32 ice_ptypes_pfcp_node[] = {
786         0x00000000, 0x00000000, 0x00000000, 0x00000000,
787         0x00000000, 0x00000000, 0x00000000, 0x00000000,
788         0x00000000, 0x00000000, 0x80000000, 0x00000002,
789         0x00000000, 0x00000000, 0x00000000, 0x00000000,
790         0x00000000, 0x00000000, 0x00000000, 0x00000000,
791         0x00000000, 0x00000000, 0x00000000, 0x00000000,
792         0x00000000, 0x00000000, 0x00000000, 0x00000000,
793         0x00000000, 0x00000000, 0x00000000, 0x00000000,
794 };
795
796 /* Packet types for packets with PFCP SESSION header */
797 static const u32 ice_ptypes_pfcp_session[] = {
798         0x00000000, 0x00000000, 0x00000000, 0x00000000,
799         0x00000000, 0x00000000, 0x00000000, 0x00000000,
800         0x00000000, 0x00000000, 0x00000000, 0x00000005,
801         0x00000000, 0x00000000, 0x00000000, 0x00000000,
802         0x00000000, 0x00000000, 0x00000000, 0x00000000,
803         0x00000000, 0x00000000, 0x00000000, 0x00000000,
804         0x00000000, 0x00000000, 0x00000000, 0x00000000,
805         0x00000000, 0x00000000, 0x00000000, 0x00000000,
806 };
807
808 /* Packet types for l2tpv3 */
809 static const u32 ice_ptypes_l2tpv3[] = {
810         0x00000000, 0x00000000, 0x00000000, 0x00000000,
811         0x00000000, 0x00000000, 0x00000000, 0x00000000,
812         0x00000000, 0x00000000, 0x00000000, 0x00000300,
813         0x00000000, 0x00000000, 0x00000000, 0x00000000,
814         0x00000000, 0x00000000, 0x00000000, 0x00000000,
815         0x00000000, 0x00000000, 0x00000000, 0x00000000,
816         0x00000000, 0x00000000, 0x00000000, 0x00000000,
817         0x00000000, 0x00000000, 0x00000000, 0x00000000,
818 };
819
820 /* Packet types for esp */
821 static const u32 ice_ptypes_esp[] = {
822         0x00000000, 0x00000000, 0x00000000, 0x00000000,
823         0x00000000, 0x00000003, 0x00000000, 0x00000000,
824         0x00000000, 0x00000000, 0x00000000, 0x00000000,
825         0x00000000, 0x00000000, 0x00000000, 0x00000000,
826         0x00000000, 0x00000000, 0x00000000, 0x00000000,
827         0x00000000, 0x00000000, 0x00000000, 0x00000000,
828         0x00000000, 0x00000000, 0x00000000, 0x00000000,
829         0x00000000, 0x00000000, 0x00000000, 0x00000000,
830 };
831
832 /* Packet types for ah */
833 static const u32 ice_ptypes_ah[] = {
834         0x00000000, 0x00000000, 0x00000000, 0x00000000,
835         0x00000000, 0x0000000C, 0x00000000, 0x00000000,
836         0x00000000, 0x00000000, 0x00000000, 0x00000000,
837         0x00000000, 0x00000000, 0x00000000, 0x00000000,
838         0x00000000, 0x00000000, 0x00000000, 0x00000000,
839         0x00000000, 0x00000000, 0x00000000, 0x00000000,
840         0x00000000, 0x00000000, 0x00000000, 0x00000000,
841         0x00000000, 0x00000000, 0x00000000, 0x00000000,
842 };
843
844 /* Packet types for packets with NAT_T ESP header */
845 static const u32 ice_ptypes_nat_t_esp[] = {
846         0x00000000, 0x00000000, 0x00000000, 0x00000000,
847         0x00000000, 0x00000030, 0x00000000, 0x00000000,
848         0x00000000, 0x00000000, 0x00000000, 0x00000000,
849         0x00000000, 0x00000000, 0x00000000, 0x00000000,
850         0x00000000, 0x00000000, 0x00000000, 0x00000000,
851         0x00000000, 0x00000000, 0x00000000, 0x00000000,
852         0x00000000, 0x00000000, 0x00000000, 0x00000000,
853         0x00000000, 0x00000000, 0x00000000, 0x00000000,
854 };
855
856 static const u32 ice_ptypes_mac_non_ip_ofos[] = {
857         0x00000846, 0x00000000, 0x00000000, 0x00000000,
858         0x00000000, 0x00000000, 0x00000000, 0x00000000,
859         0x00400000, 0x03FFF000, 0x00000000, 0x00000000,
860         0x00000000, 0x00000000, 0x00000000, 0x00000000,
861         0x00000000, 0x00000000, 0x00000000, 0x00000000,
862         0x00000000, 0x00000000, 0x00000000, 0x00000000,
863         0x00000000, 0x00000000, 0x00000000, 0x00000000,
864         0x00000000, 0x00000000, 0x00000000, 0x00000000,
865 };
866
867 static const u32 ice_ptypes_gtpu_no_ip[] = {
868         0x00000000, 0x00000000, 0x00000000, 0x00000000,
869         0x00000000, 0x00000000, 0x00000000, 0x00000000,
870         0x00000000, 0x00000000, 0x00000600, 0x00000000,
871         0x00000000, 0x00000000, 0x00000000, 0x00000000,
872         0x00000000, 0x00000000, 0x00000000, 0x00000000,
873         0x00000000, 0x00000000, 0x00000000, 0x00000000,
874         0x00000000, 0x00000000, 0x00000000, 0x00000000,
875         0x00000000, 0x00000000, 0x00000000, 0x00000000,
876 };
877
878 static const u32 ice_ptypes_ecpri_tp0[] = {
879         0x00000000, 0x00000000, 0x00000000, 0x00000000,
880         0x00000000, 0x00000000, 0x00000000, 0x00000000,
881         0x00000000, 0x00000000, 0x00000000, 0x00000400,
882         0x00000000, 0x00000000, 0x00000000, 0x00000000,
883         0x00000000, 0x00000000, 0x00000000, 0x00000000,
884         0x00000000, 0x00000000, 0x00000000, 0x00000000,
885         0x00000000, 0x00000000, 0x00000000, 0x00000000,
886         0x00000000, 0x00000000, 0x00000000, 0x00000000,
887 };
888
889 static const u32 ice_ptypes_udp_ecpri_tp0[] = {
890         0x00000000, 0x00000000, 0x00000000, 0x00000000,
891         0x00000000, 0x00000000, 0x00000000, 0x00000000,
892         0x00000000, 0x00000000, 0x00000000, 0x00100000,
893         0x00000000, 0x00000000, 0x00000000, 0x00000000,
894         0x00000000, 0x00000000, 0x00000000, 0x00000000,
895         0x00000000, 0x00000000, 0x00000000, 0x00000000,
896         0x00000000, 0x00000000, 0x00000000, 0x00000000,
897         0x00000000, 0x00000000, 0x00000000, 0x00000000,
898 };
899
900 static const u32 ice_ptypes_l2tpv2[] = {
901         0x00000000, 0x00000000, 0x00000000, 0x00000000,
902         0x00000000, 0x00000000, 0x00000000, 0x00000000,
903         0x00000000, 0x00000000, 0x00000000, 0x00000000,
904         0xFFFFFF00, 0x0000003F, 0x00000000, 0x00000000,
905         0x00000000, 0x00000000, 0x00000000, 0x00000000,
906         0x00000000, 0x00000000, 0x00000000, 0x00000000,
907         0x00000000, 0x00000000, 0x00000000, 0x00000000,
908         0x00000000, 0x00000000, 0x00000000, 0x00000000,
909 };
910
911 static const u32 ice_ptypes_ppp[] = {
912         0x00000000, 0x00000000, 0x00000000, 0x00000000,
913         0x00000000, 0x00000000, 0x00000000, 0x00000000,
914         0x00000000, 0x00000000, 0x00000000, 0x00000000,
915         0xFFFFF000, 0x0000003F, 0x00000000, 0x00000000,
916         0x00000000, 0x00000000, 0x00000000, 0x00000000,
917         0x00000000, 0x00000000, 0x00000000, 0x00000000,
918         0x00000000, 0x00000000, 0x00000000, 0x00000000,
919         0x00000000, 0x00000000, 0x00000000, 0x00000000,
920 };
921
922 static const u32 ice_ptypes_ipv4_frag[] = {
923         0x00400000, 0x00000000, 0x00000000, 0x00000000,
924         0x00000000, 0x00000000, 0x00000000, 0x00000000,
925         0x00000000, 0x00000000, 0x00000000, 0x00000000,
926         0x00000000, 0x00000000, 0x00000000, 0x00000000,
927         0x00000000, 0x00000000, 0x00000000, 0x00000000,
928         0x00000000, 0x00000000, 0x00000000, 0x00000000,
929         0x00000000, 0x00000000, 0x00000000, 0x00000000,
930         0x00000000, 0x00000000, 0x00000000, 0x00000000,
931 };
932
933 static const u32 ice_ptypes_ipv6_frag[] = {
934         0x00000000, 0x00000000, 0x01000000, 0x00000000,
935         0x00000000, 0x00000000, 0x00000000, 0x00000000,
936         0x00000000, 0x00000000, 0x00000000, 0x00000000,
937         0x00000000, 0x00000000, 0x00000000, 0x00000000,
938         0x00000000, 0x00000000, 0x00000000, 0x00000000,
939         0x00000000, 0x00000000, 0x00000000, 0x00000000,
940         0x00000000, 0x00000000, 0x00000000, 0x00000000,
941         0x00000000, 0x00000000, 0x00000000, 0x00000000,
942 };
943
944 /* Manage parameters and info. used during the creation of a flow profile */
945 struct ice_flow_prof_params {
946         enum ice_block blk;
947         u16 entry_length; /* # of bytes formatted entry will require */
948         u8 es_cnt;
949         struct ice_flow_prof *prof;
950
951         /* For ACL, the es[0] will have the data of ICE_RX_MDID_PKT_FLAGS_15_0
952          * This will give us the direction flags.
953          */
954         struct ice_fv_word es[ICE_MAX_FV_WORDS];
955         /* attributes can be used to add attributes to a particular PTYPE */
956         const struct ice_ptype_attributes *attr;
957         u16 attr_cnt;
958
959         u16 mask[ICE_MAX_FV_WORDS];
960         ice_declare_bitmap(ptypes, ICE_FLOW_PTYPE_MAX);
961 };
962
963 #define ICE_FLOW_RSS_HDRS_INNER_MASK \
964         (ICE_FLOW_SEG_HDR_PPPOE | ICE_FLOW_SEG_HDR_GTPC | \
965         ICE_FLOW_SEG_HDR_GTPC_TEID | ICE_FLOW_SEG_HDR_GTPU | \
966         ICE_FLOW_SEG_HDR_PFCP_SESSION | ICE_FLOW_SEG_HDR_L2TPV3 | \
967         ICE_FLOW_SEG_HDR_ESP | ICE_FLOW_SEG_HDR_AH | \
968         ICE_FLOW_SEG_HDR_NAT_T_ESP | ICE_FLOW_SEG_HDR_GTPU_NON_IP | \
969         ICE_FLOW_SEG_HDR_ECPRI_TP0 | ICE_FLOW_SEG_HDR_UDP_ECPRI_TP0 | \
970         ICE_FLOW_SEG_HDR_L2TPV2 | ICE_FLOW_SEG_HDR_PPP | ICE_FLOW_SEG_HDR_GRE)
971
972 #define ICE_FLOW_SEG_HDRS_L2_MASK       \
973         (ICE_FLOW_SEG_HDR_ETH | ICE_FLOW_SEG_HDR_VLAN)
974 #define ICE_FLOW_SEG_HDRS_L3_MASK       \
975         (ICE_FLOW_SEG_HDR_IPV4 | ICE_FLOW_SEG_HDR_IPV6 | \
976          ICE_FLOW_SEG_HDR_ARP)
977 #define ICE_FLOW_SEG_HDRS_L4_MASK       \
978         (ICE_FLOW_SEG_HDR_ICMP | ICE_FLOW_SEG_HDR_TCP | ICE_FLOW_SEG_HDR_UDP | \
979          ICE_FLOW_SEG_HDR_SCTP)
980 /* mask for L4 protocols that are NOT part of IPV4/6 OTHER PTYPE groups */
981 #define ICE_FLOW_SEG_HDRS_L4_MASK_NO_OTHER      \
982         (ICE_FLOW_SEG_HDR_TCP | ICE_FLOW_SEG_HDR_UDP | ICE_FLOW_SEG_HDR_SCTP)
983
984 /**
985  * ice_flow_val_hdrs - validates packet segments for valid protocol headers
986  * @segs: array of one or more packet segments that describe the flow
987  * @segs_cnt: number of packet segments provided
988  */
989 static enum ice_status
990 ice_flow_val_hdrs(struct ice_flow_seg_info *segs, u8 segs_cnt)
991 {
992         u8 i;
993
994         for (i = 0; i < segs_cnt; i++) {
995                 /* Multiple L3 headers */
996                 if (segs[i].hdrs & ICE_FLOW_SEG_HDRS_L3_MASK &&
997                     !ice_is_pow2(segs[i].hdrs & ICE_FLOW_SEG_HDRS_L3_MASK))
998                         return ICE_ERR_PARAM;
999
1000                 /* Multiple L4 headers */
1001                 if (segs[i].hdrs & ICE_FLOW_SEG_HDRS_L4_MASK &&
1002                     !ice_is_pow2(segs[i].hdrs & ICE_FLOW_SEG_HDRS_L4_MASK))
1003                         return ICE_ERR_PARAM;
1004         }
1005
1006         return ICE_SUCCESS;
1007 }
1008
1009 /* Sizes of fixed known protocol headers without header options */
1010 #define ICE_FLOW_PROT_HDR_SZ_MAC        14
1011 #define ICE_FLOW_PROT_HDR_SZ_MAC_VLAN   (ICE_FLOW_PROT_HDR_SZ_MAC + 2)
1012 #define ICE_FLOW_PROT_HDR_SZ_IPV4       20
1013 #define ICE_FLOW_PROT_HDR_SZ_IPV6       40
1014 #define ICE_FLOW_PROT_HDR_SZ_ARP        28
1015 #define ICE_FLOW_PROT_HDR_SZ_ICMP       8
1016 #define ICE_FLOW_PROT_HDR_SZ_TCP        20
1017 #define ICE_FLOW_PROT_HDR_SZ_UDP        8
1018 #define ICE_FLOW_PROT_HDR_SZ_SCTP       12
1019
1020 /**
1021  * ice_flow_calc_seg_sz - calculates size of a packet segment based on headers
1022  * @params: information about the flow to be processed
1023  * @seg: index of packet segment whose header size is to be determined
1024  */
1025 static u16 ice_flow_calc_seg_sz(struct ice_flow_prof_params *params, u8 seg)
1026 {
1027         u16 sz;
1028
1029         /* L2 headers */
1030         sz = (params->prof->segs[seg].hdrs & ICE_FLOW_SEG_HDR_VLAN) ?
1031                 ICE_FLOW_PROT_HDR_SZ_MAC_VLAN : ICE_FLOW_PROT_HDR_SZ_MAC;
1032
1033         /* L3 headers */
1034         if (params->prof->segs[seg].hdrs & ICE_FLOW_SEG_HDR_IPV4)
1035                 sz += ICE_FLOW_PROT_HDR_SZ_IPV4;
1036         else if (params->prof->segs[seg].hdrs & ICE_FLOW_SEG_HDR_IPV6)
1037                 sz += ICE_FLOW_PROT_HDR_SZ_IPV6;
1038         else if (params->prof->segs[seg].hdrs & ICE_FLOW_SEG_HDR_ARP)
1039                 sz += ICE_FLOW_PROT_HDR_SZ_ARP;
1040         else if (params->prof->segs[seg].hdrs & ICE_FLOW_SEG_HDRS_L4_MASK)
1041                 /* A L3 header is required if L4 is specified */
1042                 return 0;
1043
1044         /* L4 headers */
1045         if (params->prof->segs[seg].hdrs & ICE_FLOW_SEG_HDR_ICMP)
1046                 sz += ICE_FLOW_PROT_HDR_SZ_ICMP;
1047         else if (params->prof->segs[seg].hdrs & ICE_FLOW_SEG_HDR_TCP)
1048                 sz += ICE_FLOW_PROT_HDR_SZ_TCP;
1049         else if (params->prof->segs[seg].hdrs & ICE_FLOW_SEG_HDR_UDP)
1050                 sz += ICE_FLOW_PROT_HDR_SZ_UDP;
1051         else if (params->prof->segs[seg].hdrs & ICE_FLOW_SEG_HDR_SCTP)
1052                 sz += ICE_FLOW_PROT_HDR_SZ_SCTP;
1053
1054         return sz;
1055 }
1056
1057 /**
1058  * ice_flow_proc_seg_hdrs - process protocol headers present in pkt segments
1059  * @params: information about the flow to be processed
1060  *
1061  * This function identifies the packet types associated with the protocol
1062  * headers being present in packet segments of the specified flow profile.
1063  */
1064 static enum ice_status
1065 ice_flow_proc_seg_hdrs(struct ice_flow_prof_params *params)
1066 {
1067         struct ice_flow_prof *prof;
1068         u8 i;
1069
1070         ice_memset(params->ptypes, 0xff, sizeof(params->ptypes),
1071                    ICE_NONDMA_MEM);
1072
1073         prof = params->prof;
1074
1075         for (i = 0; i < params->prof->segs_cnt; i++) {
1076                 const ice_bitmap_t *src;
1077                 u32 hdrs;
1078
1079                 hdrs = prof->segs[i].hdrs;
1080
1081                 if (hdrs & ICE_FLOW_SEG_HDR_ETH) {
1082                         src = !i ? (const ice_bitmap_t *)ice_ptypes_mac_ofos :
1083                                 (const ice_bitmap_t *)ice_ptypes_mac_il;
1084                         ice_and_bitmap(params->ptypes, params->ptypes, src,
1085                                        ICE_FLOW_PTYPE_MAX);
1086                 }
1087
1088                 if (i && hdrs & ICE_FLOW_SEG_HDR_VLAN) {
1089                         src = (const ice_bitmap_t *)ice_ptypes_macvlan_il;
1090                         ice_and_bitmap(params->ptypes, params->ptypes, src,
1091                                        ICE_FLOW_PTYPE_MAX);
1092                 }
1093
1094                 if (!i && hdrs & ICE_FLOW_SEG_HDR_ARP) {
1095                         ice_and_bitmap(params->ptypes, params->ptypes,
1096                                        (const ice_bitmap_t *)ice_ptypes_arp_of,
1097                                        ICE_FLOW_PTYPE_MAX);
1098                 }
1099
1100                 if (hdrs & ICE_FLOW_SEG_HDR_ECPRI_TP0) {
1101                         src = (const ice_bitmap_t *)ice_ptypes_ecpri_tp0;
1102                         ice_and_bitmap(params->ptypes, params->ptypes, src,
1103                                        ICE_FLOW_PTYPE_MAX);
1104                 }
1105                 if ((hdrs & ICE_FLOW_SEG_HDR_IPV4) &&
1106                     (hdrs & ICE_FLOW_SEG_HDR_IPV_OTHER)) {
1107                         src = i ?
1108                                 (const ice_bitmap_t *)ice_ptypes_ipv4_il :
1109                                 (const ice_bitmap_t *)ice_ptypes_ipv4_ofos_all;
1110                         ice_and_bitmap(params->ptypes, params->ptypes, src,
1111                                        ICE_FLOW_PTYPE_MAX);
1112                 } else if ((hdrs & ICE_FLOW_SEG_HDR_IPV6) &&
1113                            (hdrs & ICE_FLOW_SEG_HDR_IPV_OTHER)) {
1114                         src = i ?
1115                                 (const ice_bitmap_t *)ice_ptypes_ipv6_il :
1116                                 (const ice_bitmap_t *)ice_ptypes_ipv6_ofos_all;
1117                         ice_and_bitmap(params->ptypes, params->ptypes, src,
1118                                        ICE_FLOW_PTYPE_MAX);
1119                 } else if ((hdrs & ICE_FLOW_SEG_HDR_IPV4) &&
1120                                 (hdrs & ICE_FLOW_SEG_HDR_IPV_FRAG)) {
1121                         src = (const ice_bitmap_t *)ice_ptypes_ipv4_frag;
1122                         ice_and_bitmap(params->ptypes, params->ptypes, src,
1123                                        ICE_FLOW_PTYPE_MAX);
1124                 } else if ((hdrs & ICE_FLOW_SEG_HDR_IPV6) &&
1125                                 (hdrs & ICE_FLOW_SEG_HDR_IPV_FRAG)) {
1126                         src = (const ice_bitmap_t *)ice_ptypes_ipv6_frag;
1127                         ice_and_bitmap(params->ptypes, params->ptypes, src,
1128                                        ICE_FLOW_PTYPE_MAX);
1129                 } else if ((hdrs & ICE_FLOW_SEG_HDR_IPV4) &&
1130                            !(hdrs & ICE_FLOW_SEG_HDRS_L4_MASK_NO_OTHER)) {
1131                         src = !i ? (const ice_bitmap_t *)ice_ptypes_ipv4_ofos_no_l4 :
1132                                 (const ice_bitmap_t *)ice_ptypes_ipv4_il_no_l4;
1133                         ice_and_bitmap(params->ptypes, params->ptypes, src,
1134                                        ICE_FLOW_PTYPE_MAX);
1135                 } else if (hdrs & ICE_FLOW_SEG_HDR_IPV4) {
1136                         src = !i ? (const ice_bitmap_t *)ice_ptypes_ipv4_ofos :
1137                                 (const ice_bitmap_t *)ice_ptypes_ipv4_il;
1138                         ice_and_bitmap(params->ptypes, params->ptypes, src,
1139                                        ICE_FLOW_PTYPE_MAX);
1140                 } else if ((hdrs & ICE_FLOW_SEG_HDR_IPV6) &&
1141                            !(hdrs & ICE_FLOW_SEG_HDRS_L4_MASK_NO_OTHER)) {
1142                         src = !i ? (const ice_bitmap_t *)ice_ptypes_ipv6_ofos_no_l4 :
1143                                 (const ice_bitmap_t *)ice_ptypes_ipv6_il_no_l4;
1144                         ice_and_bitmap(params->ptypes, params->ptypes, src,
1145                                        ICE_FLOW_PTYPE_MAX);
1146                 } else if (hdrs & ICE_FLOW_SEG_HDR_IPV6) {
1147                         src = !i ? (const ice_bitmap_t *)ice_ptypes_ipv6_ofos :
1148                                 (const ice_bitmap_t *)ice_ptypes_ipv6_il;
1149                         ice_and_bitmap(params->ptypes, params->ptypes, src,
1150                                        ICE_FLOW_PTYPE_MAX);
1151                 }
1152
1153                 if (hdrs & ICE_FLOW_SEG_HDR_ETH_NON_IP) {
1154                         src = (const ice_bitmap_t *)ice_ptypes_mac_non_ip_ofos;
1155                         ice_and_bitmap(params->ptypes, params->ptypes,
1156                                        src, ICE_FLOW_PTYPE_MAX);
1157                 } else if (hdrs & ICE_FLOW_SEG_HDR_PPPOE) {
1158                         src = (const ice_bitmap_t *)ice_ptypes_pppoe;
1159                         ice_and_bitmap(params->ptypes, params->ptypes, src,
1160                                        ICE_FLOW_PTYPE_MAX);
1161                 } else {
1162                         src = (const ice_bitmap_t *)ice_ptypes_pppoe;
1163                         ice_andnot_bitmap(params->ptypes, params->ptypes, src,
1164                                           ICE_FLOW_PTYPE_MAX);
1165                 }
1166
1167                 if (hdrs & ICE_FLOW_SEG_HDR_UDP) {
1168                         src = (const ice_bitmap_t *)ice_ptypes_udp_il;
1169                         ice_and_bitmap(params->ptypes, params->ptypes, src,
1170                                        ICE_FLOW_PTYPE_MAX);
1171                 } else if (hdrs & ICE_FLOW_SEG_HDR_TCP) {
1172                         ice_and_bitmap(params->ptypes, params->ptypes,
1173                                        (const ice_bitmap_t *)ice_ptypes_tcp_il,
1174                                        ICE_FLOW_PTYPE_MAX);
1175                 } else if (hdrs & ICE_FLOW_SEG_HDR_SCTP) {
1176                         src = (const ice_bitmap_t *)ice_ptypes_sctp_il;
1177                         ice_and_bitmap(params->ptypes, params->ptypes, src,
1178                                        ICE_FLOW_PTYPE_MAX);
1179                 }
1180
1181                 if (hdrs & ICE_FLOW_SEG_HDR_ICMP) {
1182                         src = !i ? (const ice_bitmap_t *)ice_ptypes_icmp_of :
1183                                 (const ice_bitmap_t *)ice_ptypes_icmp_il;
1184                         ice_and_bitmap(params->ptypes, params->ptypes, src,
1185                                        ICE_FLOW_PTYPE_MAX);
1186                 } else if (hdrs & ICE_FLOW_SEG_HDR_GRE) {
1187                         src = (const ice_bitmap_t *)ice_ptypes_gre_of;
1188                         ice_and_bitmap(params->ptypes, params->ptypes, src,
1189                                        ICE_FLOW_PTYPE_MAX);
1190                 } else if (hdrs & ICE_FLOW_SEG_HDR_GTPC) {
1191                         src = (const ice_bitmap_t *)ice_ptypes_gtpc;
1192                         ice_and_bitmap(params->ptypes, params->ptypes,
1193                                        src, ICE_FLOW_PTYPE_MAX);
1194                 } else if (hdrs & ICE_FLOW_SEG_HDR_GTPC_TEID) {
1195                         src = (const ice_bitmap_t *)ice_ptypes_gtpc_tid;
1196                         ice_and_bitmap(params->ptypes, params->ptypes,
1197                                        src, ICE_FLOW_PTYPE_MAX);
1198                 } else if (hdrs & ICE_FLOW_SEG_HDR_GTPU_NON_IP) {
1199                         src = (const ice_bitmap_t *)ice_ptypes_gtpu_no_ip;
1200                         ice_and_bitmap(params->ptypes, params->ptypes,
1201                                        src, ICE_FLOW_PTYPE_MAX);
1202                 } else if (hdrs & ICE_FLOW_SEG_HDR_GTPU_DWN) {
1203                         src = (const ice_bitmap_t *)ice_ptypes_gtpu;
1204                         ice_and_bitmap(params->ptypes, params->ptypes,
1205                                        src, ICE_FLOW_PTYPE_MAX);
1206
1207                         /* Attributes for GTP packet with downlink */
1208                         params->attr = ice_attr_gtpu_down;
1209                         params->attr_cnt = ARRAY_SIZE(ice_attr_gtpu_down);
1210                 } else if (hdrs & ICE_FLOW_SEG_HDR_GTPU_UP) {
1211                         src = (const ice_bitmap_t *)ice_ptypes_gtpu;
1212                         ice_and_bitmap(params->ptypes, params->ptypes,
1213                                        src, ICE_FLOW_PTYPE_MAX);
1214
1215                         /* Attributes for GTP packet with uplink */
1216                         params->attr = ice_attr_gtpu_up;
1217                         params->attr_cnt = ARRAY_SIZE(ice_attr_gtpu_up);
1218                 } else if (hdrs & ICE_FLOW_SEG_HDR_GTPU_EH) {
1219                         src = (const ice_bitmap_t *)ice_ptypes_gtpu;
1220                         ice_and_bitmap(params->ptypes, params->ptypes,
1221                                        src, ICE_FLOW_PTYPE_MAX);
1222
1223                         /* Attributes for GTP packet with Extension Header */
1224                         params->attr = ice_attr_gtpu_eh;
1225                         params->attr_cnt = ARRAY_SIZE(ice_attr_gtpu_eh);
1226                 } else if (hdrs & ICE_FLOW_SEG_HDR_GTPU_IP) {
1227                         src = (const ice_bitmap_t *)ice_ptypes_gtpu;
1228                         ice_and_bitmap(params->ptypes, params->ptypes,
1229                                        src, ICE_FLOW_PTYPE_MAX);
1230
1231                         /* Attributes for GTP packet without Extension Header */
1232                         params->attr = ice_attr_gtpu_session;
1233                         params->attr_cnt = ARRAY_SIZE(ice_attr_gtpu_session);
1234                 } else if (hdrs & ICE_FLOW_SEG_HDR_L2TPV2) {
1235                         src = (const ice_bitmap_t *)ice_ptypes_l2tpv2;
1236                         ice_and_bitmap(params->ptypes, params->ptypes,
1237                                        src, ICE_FLOW_PTYPE_MAX);
1238                 } else if (hdrs & ICE_FLOW_SEG_HDR_L2TPV3) {
1239                         src = (const ice_bitmap_t *)ice_ptypes_l2tpv3;
1240                         ice_and_bitmap(params->ptypes, params->ptypes,
1241                                        src, ICE_FLOW_PTYPE_MAX);
1242                 } else if (hdrs & ICE_FLOW_SEG_HDR_ESP) {
1243                         src = (const ice_bitmap_t *)ice_ptypes_esp;
1244                         ice_and_bitmap(params->ptypes, params->ptypes,
1245                                        src, ICE_FLOW_PTYPE_MAX);
1246                 } else if (hdrs & ICE_FLOW_SEG_HDR_AH) {
1247                         src = (const ice_bitmap_t *)ice_ptypes_ah;
1248                         ice_and_bitmap(params->ptypes, params->ptypes,
1249                                        src, ICE_FLOW_PTYPE_MAX);
1250                 } else if (hdrs & ICE_FLOW_SEG_HDR_NAT_T_ESP) {
1251                         src = (const ice_bitmap_t *)ice_ptypes_nat_t_esp;
1252                         ice_and_bitmap(params->ptypes, params->ptypes,
1253                                        src, ICE_FLOW_PTYPE_MAX);
1254                 } else if (hdrs & ICE_FLOW_SEG_HDR_VXLAN) {
1255                         src = (const ice_bitmap_t *)ice_ptypes_vxlan_vni;
1256                         ice_and_bitmap(params->ptypes, params->ptypes,
1257                                        src, ICE_FLOW_PTYPE_MAX);
1258                 } else if (hdrs & ICE_FLOW_SEG_HDR_UDP_ECPRI_TP0) {
1259                         src = (const ice_bitmap_t *)ice_ptypes_udp_ecpri_tp0;
1260                         ice_and_bitmap(params->ptypes, params->ptypes,
1261                                        src, ICE_FLOW_PTYPE_MAX);
1262                 }
1263
1264                 if (hdrs & ICE_FLOW_SEG_HDR_PPP) {
1265                         src = (const ice_bitmap_t *)ice_ptypes_ppp;
1266                         ice_and_bitmap(params->ptypes, params->ptypes,
1267                                        src, ICE_FLOW_PTYPE_MAX);
1268                 }
1269
1270                 if (hdrs & ICE_FLOW_SEG_HDR_PFCP) {
1271                         if (hdrs & ICE_FLOW_SEG_HDR_PFCP_NODE)
1272                                 src =
1273                                 (const ice_bitmap_t *)ice_ptypes_pfcp_node;
1274                         else
1275                                 src =
1276                                 (const ice_bitmap_t *)ice_ptypes_pfcp_session;
1277
1278                         ice_and_bitmap(params->ptypes, params->ptypes,
1279                                        src, ICE_FLOW_PTYPE_MAX);
1280                 } else {
1281                         src = (const ice_bitmap_t *)ice_ptypes_pfcp_node;
1282                         ice_andnot_bitmap(params->ptypes, params->ptypes,
1283                                           src, ICE_FLOW_PTYPE_MAX);
1284
1285                         src = (const ice_bitmap_t *)ice_ptypes_pfcp_session;
1286                         ice_andnot_bitmap(params->ptypes, params->ptypes,
1287                                           src, ICE_FLOW_PTYPE_MAX);
1288                 }
1289         }
1290
1291         return ICE_SUCCESS;
1292 }
1293
1294 /**
1295  * ice_flow_xtract_pkt_flags - Create an extr sequence entry for packet flags
1296  * @hw: pointer to the HW struct
1297  * @params: information about the flow to be processed
1298  * @flags: The value of pkt_flags[x:x] in Rx/Tx MDID metadata.
1299  *
1300  * This function will allocate an extraction sequence entries for a DWORD size
1301  * chunk of the packet flags.
1302  */
1303 static enum ice_status
1304 ice_flow_xtract_pkt_flags(struct ice_hw *hw,
1305                           struct ice_flow_prof_params *params,
1306                           enum ice_flex_mdid_pkt_flags flags)
1307 {
1308         u8 fv_words = hw->blk[params->blk].es.fvw;
1309         u8 idx;
1310
1311         /* Make sure the number of extraction sequence entries required does not
1312          * exceed the block's capacity.
1313          */
1314         if (params->es_cnt >= fv_words)
1315                 return ICE_ERR_MAX_LIMIT;
1316
1317         /* some blocks require a reversed field vector layout */
1318         if (hw->blk[params->blk].es.reverse)
1319                 idx = fv_words - params->es_cnt - 1;
1320         else
1321                 idx = params->es_cnt;
1322
1323         params->es[idx].prot_id = ICE_PROT_META_ID;
1324         params->es[idx].off = flags;
1325         params->es_cnt++;
1326
1327         return ICE_SUCCESS;
1328 }
1329
1330 /**
1331  * ice_flow_xtract_fld - Create an extraction sequence entry for the given field
1332  * @hw: pointer to the HW struct
1333  * @params: information about the flow to be processed
1334  * @seg: packet segment index of the field to be extracted
1335  * @fld: ID of field to be extracted
1336  * @match: bitfield of all fields
1337  *
1338  * This function determines the protocol ID, offset, and size of the given
1339  * field. It then allocates one or more extraction sequence entries for the
1340  * given field, and fill the entries with protocol ID and offset information.
1341  */
1342 static enum ice_status
1343 ice_flow_xtract_fld(struct ice_hw *hw, struct ice_flow_prof_params *params,
1344                     u8 seg, enum ice_flow_field fld, u64 match)
1345 {
1346         enum ice_flow_field sib = ICE_FLOW_FIELD_IDX_MAX;
1347         enum ice_prot_id prot_id = ICE_PROT_ID_INVAL;
1348         u8 fv_words = hw->blk[params->blk].es.fvw;
1349         struct ice_flow_fld_info *flds;
1350         u16 cnt, ese_bits, i;
1351         u16 sib_mask = 0;
1352         u16 mask;
1353         u16 off;
1354
1355         flds = params->prof->segs[seg].fields;
1356
1357         switch (fld) {
1358         case ICE_FLOW_FIELD_IDX_ETH_DA:
1359         case ICE_FLOW_FIELD_IDX_ETH_SA:
1360         case ICE_FLOW_FIELD_IDX_S_VLAN:
1361         case ICE_FLOW_FIELD_IDX_C_VLAN:
1362                 prot_id = seg == 0 ? ICE_PROT_MAC_OF_OR_S : ICE_PROT_MAC_IL;
1363                 break;
1364         case ICE_FLOW_FIELD_IDX_ETH_TYPE:
1365                 prot_id = seg == 0 ? ICE_PROT_ETYPE_OL : ICE_PROT_ETYPE_IL;
1366                 break;
1367         case ICE_FLOW_FIELD_IDX_IPV4_DSCP:
1368                 prot_id = seg == 0 ? ICE_PROT_IPV4_OF_OR_S : ICE_PROT_IPV4_IL;
1369                 break;
1370         case ICE_FLOW_FIELD_IDX_IPV6_DSCP:
1371                 prot_id = seg == 0 ? ICE_PROT_IPV6_OF_OR_S : ICE_PROT_IPV6_IL;
1372                 break;
1373         case ICE_FLOW_FIELD_IDX_IPV4_TTL:
1374         case ICE_FLOW_FIELD_IDX_IPV4_PROT:
1375                 prot_id = seg == 0 ? ICE_PROT_IPV4_OF_OR_S : ICE_PROT_IPV4_IL;
1376
1377                 /* TTL and PROT share the same extraction seq. entry.
1378                  * Each is considered a sibling to the other in terms of sharing
1379                  * the same extraction sequence entry.
1380                  */
1381                 if (fld == ICE_FLOW_FIELD_IDX_IPV4_TTL)
1382                         sib = ICE_FLOW_FIELD_IDX_IPV4_PROT;
1383                 else
1384                         sib = ICE_FLOW_FIELD_IDX_IPV4_TTL;
1385
1386                 /* If the sibling field is also included, that field's
1387                  * mask needs to be included.
1388                  */
1389                 if (match & BIT(sib))
1390                         sib_mask = ice_flds_info[sib].mask;
1391                 break;
1392         case ICE_FLOW_FIELD_IDX_IPV6_TTL:
1393         case ICE_FLOW_FIELD_IDX_IPV6_PROT:
1394                 prot_id = seg == 0 ? ICE_PROT_IPV6_OF_OR_S : ICE_PROT_IPV6_IL;
1395
1396                 /* TTL and PROT share the same extraction seq. entry.
1397                  * Each is considered a sibling to the other in terms of sharing
1398                  * the same extraction sequence entry.
1399                  */
1400                 if (fld == ICE_FLOW_FIELD_IDX_IPV6_TTL)
1401                         sib = ICE_FLOW_FIELD_IDX_IPV6_PROT;
1402                 else
1403                         sib = ICE_FLOW_FIELD_IDX_IPV6_TTL;
1404
1405                 /* If the sibling field is also included, that field's
1406                  * mask needs to be included.
1407                  */
1408                 if (match & BIT(sib))
1409                         sib_mask = ice_flds_info[sib].mask;
1410                 break;
1411         case ICE_FLOW_FIELD_IDX_IPV4_SA:
1412         case ICE_FLOW_FIELD_IDX_IPV4_DA:
1413                 prot_id = seg == 0 ? ICE_PROT_IPV4_OF_OR_S : ICE_PROT_IPV4_IL;
1414                 if (params->prof->segs[0].hdrs & ICE_FLOW_SEG_HDR_GRE &&
1415                     params->prof->segs[1].hdrs & ICE_FLOW_SEG_HDR_GTPU &&
1416                     seg == 1)
1417                         prot_id = ICE_PROT_IPV4_IL_IL;
1418                 break;
1419         case ICE_FLOW_FIELD_IDX_IPV4_ID:
1420                 prot_id = ICE_PROT_IPV4_OF_OR_S;
1421                 break;
1422         case ICE_FLOW_FIELD_IDX_IPV6_SA:
1423         case ICE_FLOW_FIELD_IDX_IPV6_DA:
1424         case ICE_FLOW_FIELD_IDX_IPV6_PRE32_SA:
1425         case ICE_FLOW_FIELD_IDX_IPV6_PRE32_DA:
1426         case ICE_FLOW_FIELD_IDX_IPV6_PRE48_SA:
1427         case ICE_FLOW_FIELD_IDX_IPV6_PRE48_DA:
1428         case ICE_FLOW_FIELD_IDX_IPV6_PRE64_SA:
1429         case ICE_FLOW_FIELD_IDX_IPV6_PRE64_DA:
1430                 prot_id = seg == 0 ? ICE_PROT_IPV6_OF_OR_S : ICE_PROT_IPV6_IL;
1431                 if (params->prof->segs[0].hdrs & ICE_FLOW_SEG_HDR_GRE &&
1432                     params->prof->segs[1].hdrs & ICE_FLOW_SEG_HDR_GTPU &&
1433                     seg == 1)
1434                         prot_id = ICE_PROT_IPV6_IL_IL;
1435                 break;
1436         case ICE_FLOW_FIELD_IDX_IPV6_ID:
1437                 prot_id = ICE_PROT_IPV6_FRAG;
1438                 break;
1439         case ICE_FLOW_FIELD_IDX_TCP_SRC_PORT:
1440         case ICE_FLOW_FIELD_IDX_TCP_DST_PORT:
1441         case ICE_FLOW_FIELD_IDX_TCP_FLAGS:
1442                 prot_id = ICE_PROT_TCP_IL;
1443                 break;
1444         case ICE_FLOW_FIELD_IDX_UDP_SRC_PORT:
1445         case ICE_FLOW_FIELD_IDX_UDP_DST_PORT:
1446                 prot_id = ICE_PROT_UDP_IL_OR_S;
1447                 break;
1448         case ICE_FLOW_FIELD_IDX_SCTP_SRC_PORT:
1449         case ICE_FLOW_FIELD_IDX_SCTP_DST_PORT:
1450                 prot_id = ICE_PROT_SCTP_IL;
1451                 break;
1452         case ICE_FLOW_FIELD_IDX_VXLAN_VNI:
1453         case ICE_FLOW_FIELD_IDX_GTPC_TEID:
1454         case ICE_FLOW_FIELD_IDX_GTPU_IP_TEID:
1455         case ICE_FLOW_FIELD_IDX_GTPU_UP_TEID:
1456         case ICE_FLOW_FIELD_IDX_GTPU_DWN_TEID:
1457         case ICE_FLOW_FIELD_IDX_GTPU_EH_TEID:
1458         case ICE_FLOW_FIELD_IDX_GTPU_EH_QFI:
1459                 /* GTP is accessed through UDP OF protocol */
1460                 prot_id = ICE_PROT_UDP_OF;
1461                 break;
1462         case ICE_FLOW_FIELD_IDX_PPPOE_SESS_ID:
1463                 prot_id = ICE_PROT_PPPOE;
1464                 break;
1465         case ICE_FLOW_FIELD_IDX_PFCP_SEID:
1466                 prot_id = ICE_PROT_UDP_IL_OR_S;
1467                 break;
1468         case ICE_FLOW_FIELD_IDX_L2TPV3_SESS_ID:
1469                 prot_id = ICE_PROT_L2TPV3;
1470                 break;
1471         case ICE_FLOW_FIELD_IDX_ESP_SPI:
1472                 prot_id = ICE_PROT_ESP_F;
1473                 break;
1474         case ICE_FLOW_FIELD_IDX_AH_SPI:
1475                 prot_id = ICE_PROT_ESP_2;
1476                 break;
1477         case ICE_FLOW_FIELD_IDX_NAT_T_ESP_SPI:
1478                 prot_id = ICE_PROT_UDP_IL_OR_S;
1479                 break;
1480         case ICE_FLOW_FIELD_IDX_ECPRI_TP0_PC_ID:
1481                 prot_id = ICE_PROT_ECPRI;
1482                 break;
1483         case ICE_FLOW_FIELD_IDX_UDP_ECPRI_TP0_PC_ID:
1484                 prot_id = ICE_PROT_UDP_IL_OR_S;
1485                 break;
1486         case ICE_FLOW_FIELD_IDX_ARP_SIP:
1487         case ICE_FLOW_FIELD_IDX_ARP_DIP:
1488         case ICE_FLOW_FIELD_IDX_ARP_SHA:
1489         case ICE_FLOW_FIELD_IDX_ARP_DHA:
1490         case ICE_FLOW_FIELD_IDX_ARP_OP:
1491                 prot_id = ICE_PROT_ARP_OF;
1492                 break;
1493         case ICE_FLOW_FIELD_IDX_ICMP_TYPE:
1494         case ICE_FLOW_FIELD_IDX_ICMP_CODE:
1495                 /* ICMP type and code share the same extraction seq. entry */
1496                 prot_id = (params->prof->segs[seg].hdrs &
1497                            ICE_FLOW_SEG_HDR_IPV4) ?
1498                         ICE_PROT_ICMP_IL : ICE_PROT_ICMPV6_IL;
1499                 sib = fld == ICE_FLOW_FIELD_IDX_ICMP_TYPE ?
1500                         ICE_FLOW_FIELD_IDX_ICMP_CODE :
1501                         ICE_FLOW_FIELD_IDX_ICMP_TYPE;
1502                 break;
1503         case ICE_FLOW_FIELD_IDX_GRE_KEYID:
1504                 prot_id = ICE_PROT_GRE_OF;
1505                 break;
1506         default:
1507                 return ICE_ERR_NOT_IMPL;
1508         }
1509
1510         /* Each extraction sequence entry is a word in size, and extracts a
1511          * word-aligned offset from a protocol header.
1512          */
1513         ese_bits = ICE_FLOW_FV_EXTRACT_SZ * BITS_PER_BYTE;
1514
1515         flds[fld].xtrct.prot_id = prot_id;
1516         flds[fld].xtrct.off = (ice_flds_info[fld].off / ese_bits) *
1517                 ICE_FLOW_FV_EXTRACT_SZ;
1518         flds[fld].xtrct.disp = (u8)(ice_flds_info[fld].off % ese_bits);
1519         flds[fld].xtrct.idx = params->es_cnt;
1520         flds[fld].xtrct.mask = ice_flds_info[fld].mask;
1521
1522         /* Adjust the next field-entry index after accommodating the number of
1523          * entries this field consumes
1524          */
1525         cnt = DIVIDE_AND_ROUND_UP(flds[fld].xtrct.disp +
1526                                   ice_flds_info[fld].size, ese_bits);
1527
1528         /* Fill in the extraction sequence entries needed for this field */
1529         off = flds[fld].xtrct.off;
1530         mask = flds[fld].xtrct.mask;
1531         for (i = 0; i < cnt; i++) {
1532                 /* Only consume an extraction sequence entry if there is no
1533                  * sibling field associated with this field or the sibling entry
1534                  * already extracts the word shared with this field.
1535                  */
1536                 if (sib == ICE_FLOW_FIELD_IDX_MAX ||
1537                     flds[sib].xtrct.prot_id == ICE_PROT_ID_INVAL ||
1538                     flds[sib].xtrct.off != off) {
1539                         u8 idx;
1540
1541                         /* Make sure the number of extraction sequence required
1542                          * does not exceed the block's capability
1543                          */
1544                         if (params->es_cnt >= fv_words)
1545                                 return ICE_ERR_MAX_LIMIT;
1546
1547                         /* some blocks require a reversed field vector layout */
1548                         if (hw->blk[params->blk].es.reverse)
1549                                 idx = fv_words - params->es_cnt - 1;
1550                         else
1551                                 idx = params->es_cnt;
1552
1553                         params->es[idx].prot_id = prot_id;
1554                         params->es[idx].off = off;
1555                         params->mask[idx] = mask | sib_mask;
1556                         params->es_cnt++;
1557                 }
1558
1559                 off += ICE_FLOW_FV_EXTRACT_SZ;
1560         }
1561
1562         return ICE_SUCCESS;
1563 }
1564
1565 /**
1566  * ice_flow_xtract_raws - Create extract sequence entries for raw bytes
1567  * @hw: pointer to the HW struct
1568  * @params: information about the flow to be processed
1569  * @seg: index of packet segment whose raw fields are to be extracted
1570  */
1571 static enum ice_status
1572 ice_flow_xtract_raws(struct ice_hw *hw, struct ice_flow_prof_params *params,
1573                      u8 seg)
1574 {
1575         u16 fv_words;
1576         u16 hdrs_sz;
1577         u8 i;
1578
1579         if (!params->prof->segs[seg].raws_cnt)
1580                 return ICE_SUCCESS;
1581
1582         if (params->prof->segs[seg].raws_cnt >
1583             ARRAY_SIZE(params->prof->segs[seg].raws))
1584                 return ICE_ERR_MAX_LIMIT;
1585
1586         /* Offsets within the segment headers are not supported */
1587         hdrs_sz = ice_flow_calc_seg_sz(params, seg);
1588         if (!hdrs_sz)
1589                 return ICE_ERR_PARAM;
1590
1591         fv_words = hw->blk[params->blk].es.fvw;
1592
1593         for (i = 0; i < params->prof->segs[seg].raws_cnt; i++) {
1594                 struct ice_flow_seg_fld_raw *raw;
1595                 u16 off, cnt, j;
1596
1597                 raw = &params->prof->segs[seg].raws[i];
1598
1599                 /* Storing extraction information */
1600                 raw->info.xtrct.prot_id = ICE_PROT_MAC_OF_OR_S;
1601                 raw->info.xtrct.off = (raw->off / ICE_FLOW_FV_EXTRACT_SZ) *
1602                         ICE_FLOW_FV_EXTRACT_SZ;
1603                 raw->info.xtrct.disp = (raw->off % ICE_FLOW_FV_EXTRACT_SZ) *
1604                         BITS_PER_BYTE;
1605                 raw->info.xtrct.idx = params->es_cnt;
1606
1607                 /* Determine the number of field vector entries this raw field
1608                  * consumes.
1609                  */
1610                 cnt = DIVIDE_AND_ROUND_UP(raw->info.xtrct.disp +
1611                                           (raw->info.src.last * BITS_PER_BYTE),
1612                                           (ICE_FLOW_FV_EXTRACT_SZ *
1613                                            BITS_PER_BYTE));
1614                 off = raw->info.xtrct.off;
1615                 for (j = 0; j < cnt; j++) {
1616                         u16 idx;
1617
1618                         /* Make sure the number of extraction sequence required
1619                          * does not exceed the block's capability
1620                          */
1621                         if (params->es_cnt >= hw->blk[params->blk].es.count ||
1622                             params->es_cnt >= ICE_MAX_FV_WORDS)
1623                                 return ICE_ERR_MAX_LIMIT;
1624
1625                         /* some blocks require a reversed field vector layout */
1626                         if (hw->blk[params->blk].es.reverse)
1627                                 idx = fv_words - params->es_cnt - 1;
1628                         else
1629                                 idx = params->es_cnt;
1630
1631                         params->es[idx].prot_id = raw->info.xtrct.prot_id;
1632                         params->es[idx].off = off;
1633                         params->es_cnt++;
1634                         off += ICE_FLOW_FV_EXTRACT_SZ;
1635                 }
1636         }
1637
1638         return ICE_SUCCESS;
1639 }
1640
1641 /**
1642  * ice_flow_create_xtrct_seq - Create an extraction sequence for given segments
1643  * @hw: pointer to the HW struct
1644  * @params: information about the flow to be processed
1645  *
1646  * This function iterates through all matched fields in the given segments, and
1647  * creates an extraction sequence for the fields.
1648  */
1649 static enum ice_status
1650 ice_flow_create_xtrct_seq(struct ice_hw *hw,
1651                           struct ice_flow_prof_params *params)
1652 {
1653         enum ice_status status = ICE_SUCCESS;
1654         u8 i;
1655
1656         /* For ACL, we also need to extract the direction bit (Rx,Tx) data from
1657          * packet flags
1658          */
1659         if (params->blk == ICE_BLK_ACL) {
1660                 status = ice_flow_xtract_pkt_flags(hw, params,
1661                                                    ICE_RX_MDID_PKT_FLAGS_15_0);
1662                 if (status)
1663                         return status;
1664         }
1665
1666         for (i = 0; i < params->prof->segs_cnt; i++) {
1667                 u64 match = params->prof->segs[i].match;
1668                 enum ice_flow_field j;
1669
1670                 ice_for_each_set_bit(j, (ice_bitmap_t *)&match,
1671                                      ICE_FLOW_FIELD_IDX_MAX) {
1672                         status = ice_flow_xtract_fld(hw, params, i, j, match);
1673                         if (status)
1674                                 return status;
1675                         ice_clear_bit(j, (ice_bitmap_t *)&match);
1676                 }
1677
1678                 /* Process raw matching bytes */
1679                 status = ice_flow_xtract_raws(hw, params, i);
1680                 if (status)
1681                         return status;
1682         }
1683
1684         return status;
1685 }
1686
1687 /**
1688  * ice_flow_sel_acl_scen - returns the specific scenario
1689  * @hw: pointer to the hardware structure
1690  * @params: information about the flow to be processed
1691  *
1692  * This function will return the specific scenario based on the
1693  * params passed to it
1694  */
1695 static enum ice_status
1696 ice_flow_sel_acl_scen(struct ice_hw *hw, struct ice_flow_prof_params *params)
1697 {
1698         /* Find the best-fit scenario for the provided match width */
1699         struct ice_acl_scen *cand_scen = NULL, *scen;
1700
1701         if (!hw->acl_tbl)
1702                 return ICE_ERR_DOES_NOT_EXIST;
1703
1704         /* Loop through each scenario and match against the scenario width
1705          * to select the specific scenario
1706          */
1707         LIST_FOR_EACH_ENTRY(scen, &hw->acl_tbl->scens, ice_acl_scen, list_entry)
1708                 if (scen->eff_width >= params->entry_length &&
1709                     (!cand_scen || cand_scen->eff_width > scen->eff_width))
1710                         cand_scen = scen;
1711         if (!cand_scen)
1712                 return ICE_ERR_DOES_NOT_EXIST;
1713
1714         params->prof->cfg.scen = cand_scen;
1715
1716         return ICE_SUCCESS;
1717 }
1718
1719 /**
1720  * ice_flow_acl_def_entry_frmt - Determine the layout of flow entries
1721  * @params: information about the flow to be processed
1722  */
1723 static enum ice_status
1724 ice_flow_acl_def_entry_frmt(struct ice_flow_prof_params *params)
1725 {
1726         u16 index, i, range_idx = 0;
1727
1728         index = ICE_AQC_ACL_PROF_BYTE_SEL_START_IDX;
1729
1730         for (i = 0; i < params->prof->segs_cnt; i++) {
1731                 struct ice_flow_seg_info *seg = &params->prof->segs[i];
1732                 u8 j;
1733
1734                 ice_for_each_set_bit(j, (ice_bitmap_t *)&seg->match,
1735                                      ICE_FLOW_FIELD_IDX_MAX) {
1736                         struct ice_flow_fld_info *fld = &seg->fields[j];
1737
1738                         fld->entry.mask = ICE_FLOW_FLD_OFF_INVAL;
1739
1740                         if (fld->type == ICE_FLOW_FLD_TYPE_RANGE) {
1741                                 fld->entry.last = ICE_FLOW_FLD_OFF_INVAL;
1742
1743                                 /* Range checking only supported for single
1744                                  * words
1745                                  */
1746                                 if (DIVIDE_AND_ROUND_UP(ice_flds_info[j].size +
1747                                                         fld->xtrct.disp,
1748                                                         BITS_PER_BYTE * 2) > 1)
1749                                         return ICE_ERR_PARAM;
1750
1751                                 /* Ranges must define low and high values */
1752                                 if (fld->src.val == ICE_FLOW_FLD_OFF_INVAL ||
1753                                     fld->src.last == ICE_FLOW_FLD_OFF_INVAL)
1754                                         return ICE_ERR_PARAM;
1755
1756                                 fld->entry.val = range_idx++;
1757                         } else {
1758                                 /* Store adjusted byte-length of field for later
1759                                  * use, taking into account potential
1760                                  * non-byte-aligned displacement
1761                                  */
1762                                 fld->entry.last = DIVIDE_AND_ROUND_UP
1763                                         (ice_flds_info[j].size +
1764                                          (fld->xtrct.disp % BITS_PER_BYTE),
1765                                          BITS_PER_BYTE);
1766                                 fld->entry.val = index;
1767                                 index += fld->entry.last;
1768                         }
1769                 }
1770
1771                 for (j = 0; j < seg->raws_cnt; j++) {
1772                         struct ice_flow_seg_fld_raw *raw = &seg->raws[j];
1773
1774                         raw->info.entry.mask = ICE_FLOW_FLD_OFF_INVAL;
1775                         raw->info.entry.val = index;
1776                         raw->info.entry.last = raw->info.src.last;
1777                         index += raw->info.entry.last;
1778                 }
1779         }
1780
1781         /* Currently only support using the byte selection base, which only
1782          * allows for an effective entry size of 30 bytes. Reject anything
1783          * larger.
1784          */
1785         if (index > ICE_AQC_ACL_PROF_BYTE_SEL_ELEMS)
1786                 return ICE_ERR_PARAM;
1787
1788         /* Only 8 range checkers per profile, reject anything trying to use
1789          * more
1790          */
1791         if (range_idx > ICE_AQC_ACL_PROF_RANGES_NUM_CFG)
1792                 return ICE_ERR_PARAM;
1793
1794         /* Store # bytes required for entry for later use */
1795         params->entry_length = index - ICE_AQC_ACL_PROF_BYTE_SEL_START_IDX;
1796
1797         return ICE_SUCCESS;
1798 }
1799
1800 /**
1801  * ice_flow_proc_segs - process all packet segments associated with a profile
1802  * @hw: pointer to the HW struct
1803  * @params: information about the flow to be processed
1804  */
1805 static enum ice_status
1806 ice_flow_proc_segs(struct ice_hw *hw, struct ice_flow_prof_params *params)
1807 {
1808         enum ice_status status;
1809
1810         status = ice_flow_proc_seg_hdrs(params);
1811         if (status)
1812                 return status;
1813
1814         status = ice_flow_create_xtrct_seq(hw, params);
1815         if (status)
1816                 return status;
1817
1818         switch (params->blk) {
1819         case ICE_BLK_FD:
1820         case ICE_BLK_RSS:
1821                 status = ICE_SUCCESS;
1822                 break;
1823         case ICE_BLK_ACL:
1824                 status = ice_flow_acl_def_entry_frmt(params);
1825                 if (status)
1826                         return status;
1827                 status = ice_flow_sel_acl_scen(hw, params);
1828                 if (status)
1829                         return status;
1830                 break;
1831         default:
1832                 return ICE_ERR_NOT_IMPL;
1833         }
1834
1835         return status;
1836 }
1837
1838 #define ICE_FLOW_FIND_PROF_CHK_FLDS     0x00000001
1839 #define ICE_FLOW_FIND_PROF_CHK_VSI      0x00000002
1840 #define ICE_FLOW_FIND_PROF_NOT_CHK_DIR  0x00000004
1841
1842 /**
1843  * ice_flow_find_prof_conds - Find a profile matching headers and conditions
1844  * @hw: pointer to the HW struct
1845  * @blk: classification stage
1846  * @dir: flow direction
1847  * @segs: array of one or more packet segments that describe the flow
1848  * @segs_cnt: number of packet segments provided
1849  * @vsi_handle: software VSI handle to check VSI (ICE_FLOW_FIND_PROF_CHK_VSI)
1850  * @conds: additional conditions to be checked (ICE_FLOW_FIND_PROF_CHK_*)
1851  */
1852 static struct ice_flow_prof *
1853 ice_flow_find_prof_conds(struct ice_hw *hw, enum ice_block blk,
1854                          enum ice_flow_dir dir, struct ice_flow_seg_info *segs,
1855                          u8 segs_cnt, u16 vsi_handle, u32 conds)
1856 {
1857         struct ice_flow_prof *p, *prof = NULL;
1858
1859         ice_acquire_lock(&hw->fl_profs_locks[blk]);
1860         LIST_FOR_EACH_ENTRY(p, &hw->fl_profs[blk], ice_flow_prof, l_entry)
1861                 if ((p->dir == dir || conds & ICE_FLOW_FIND_PROF_NOT_CHK_DIR) &&
1862                     segs_cnt && segs_cnt == p->segs_cnt) {
1863                         u8 i;
1864
1865                         /* Check for profile-VSI association if specified */
1866                         if ((conds & ICE_FLOW_FIND_PROF_CHK_VSI) &&
1867                             ice_is_vsi_valid(hw, vsi_handle) &&
1868                             !ice_is_bit_set(p->vsis, vsi_handle))
1869                                 continue;
1870
1871                         /* Protocol headers must be checked. Matched fields are
1872                          * checked if specified.
1873                          */
1874                         for (i = 0; i < segs_cnt; i++)
1875                                 if (segs[i].hdrs != p->segs[i].hdrs ||
1876                                     ((conds & ICE_FLOW_FIND_PROF_CHK_FLDS) &&
1877                                      segs[i].match != p->segs[i].match))
1878                                         break;
1879
1880                         /* A match is found if all segments are matched */
1881                         if (i == segs_cnt) {
1882                                 prof = p;
1883                                 break;
1884                         }
1885                 }
1886         ice_release_lock(&hw->fl_profs_locks[blk]);
1887
1888         return prof;
1889 }
1890
1891 /**
1892  * ice_flow_find_prof - Look up a profile matching headers and matched fields
1893  * @hw: pointer to the HW struct
1894  * @blk: classification stage
1895  * @dir: flow direction
1896  * @segs: array of one or more packet segments that describe the flow
1897  * @segs_cnt: number of packet segments provided
1898  */
1899 u64
1900 ice_flow_find_prof(struct ice_hw *hw, enum ice_block blk, enum ice_flow_dir dir,
1901                    struct ice_flow_seg_info *segs, u8 segs_cnt)
1902 {
1903         struct ice_flow_prof *p;
1904
1905         p = ice_flow_find_prof_conds(hw, blk, dir, segs, segs_cnt,
1906                                      ICE_MAX_VSI, ICE_FLOW_FIND_PROF_CHK_FLDS);
1907
1908         return p ? p->id : ICE_FLOW_PROF_ID_INVAL;
1909 }
1910
1911 /**
1912  * ice_flow_find_prof_id - Look up a profile with given profile ID
1913  * @hw: pointer to the HW struct
1914  * @blk: classification stage
1915  * @prof_id: unique ID to identify this flow profile
1916  */
1917 static struct ice_flow_prof *
1918 ice_flow_find_prof_id(struct ice_hw *hw, enum ice_block blk, u64 prof_id)
1919 {
1920         struct ice_flow_prof *p;
1921
1922         LIST_FOR_EACH_ENTRY(p, &hw->fl_profs[blk], ice_flow_prof, l_entry)
1923                 if (p->id == prof_id)
1924                         return p;
1925
1926         return NULL;
1927 }
1928
1929 /**
1930  * ice_dealloc_flow_entry - Deallocate flow entry memory
1931  * @hw: pointer to the HW struct
1932  * @entry: flow entry to be removed
1933  */
1934 static void
1935 ice_dealloc_flow_entry(struct ice_hw *hw, struct ice_flow_entry *entry)
1936 {
1937         if (!entry)
1938                 return;
1939
1940         if (entry->entry)
1941                 ice_free(hw, entry->entry);
1942
1943         if (entry->range_buf) {
1944                 ice_free(hw, entry->range_buf);
1945                 entry->range_buf = NULL;
1946         }
1947
1948         if (entry->acts) {
1949                 ice_free(hw, entry->acts);
1950                 entry->acts = NULL;
1951                 entry->acts_cnt = 0;
1952         }
1953
1954         ice_free(hw, entry);
1955 }
1956
1957 /**
1958  * ice_flow_get_hw_prof - return the HW profile for a specific profile ID handle
1959  * @hw: pointer to the HW struct
1960  * @blk: classification stage
1961  * @prof_id: the profile ID handle
1962  * @hw_prof_id: pointer to variable to receive the HW profile ID
1963  */
1964 enum ice_status
1965 ice_flow_get_hw_prof(struct ice_hw *hw, enum ice_block blk, u64 prof_id,
1966                      u8 *hw_prof_id)
1967 {
1968         enum ice_status status = ICE_ERR_DOES_NOT_EXIST;
1969         struct ice_prof_map *map;
1970
1971         ice_acquire_lock(&hw->blk[blk].es.prof_map_lock);
1972         map = ice_search_prof_id(hw, blk, prof_id);
1973         if (map) {
1974                 *hw_prof_id = map->prof_id;
1975                 status = ICE_SUCCESS;
1976         }
1977         ice_release_lock(&hw->blk[blk].es.prof_map_lock);
1978         return status;
1979 }
1980
1981 #define ICE_ACL_INVALID_SCEN    0x3f
1982
1983 /**
1984  * ice_flow_acl_is_prof_in_use - Verify if the profile is associated to any PF
1985  * @hw: pointer to the hardware structure
1986  * @prof: pointer to flow profile
1987  * @buf: destination buffer function writes partial extraction sequence to
1988  *
1989  * returns ICE_SUCCESS if no PF is associated to the given profile
1990  * returns ICE_ERR_IN_USE if at least one PF is associated to the given profile
1991  * returns other error code for real error
1992  */
1993 static enum ice_status
1994 ice_flow_acl_is_prof_in_use(struct ice_hw *hw, struct ice_flow_prof *prof,
1995                             struct ice_aqc_acl_prof_generic_frmt *buf)
1996 {
1997         enum ice_status status;
1998         u8 prof_id = 0;
1999
2000         status = ice_flow_get_hw_prof(hw, ICE_BLK_ACL, prof->id, &prof_id);
2001         if (status)
2002                 return status;
2003
2004         status = ice_query_acl_prof(hw, prof_id, buf, NULL);
2005         if (status)
2006                 return status;
2007
2008         /* If all PF's associated scenarios are all 0 or all
2009          * ICE_ACL_INVALID_SCEN (63) for the given profile then the latter has
2010          * not been configured yet.
2011          */
2012         if (buf->pf_scenario_num[0] == 0 && buf->pf_scenario_num[1] == 0 &&
2013             buf->pf_scenario_num[2] == 0 && buf->pf_scenario_num[3] == 0 &&
2014             buf->pf_scenario_num[4] == 0 && buf->pf_scenario_num[5] == 0 &&
2015             buf->pf_scenario_num[6] == 0 && buf->pf_scenario_num[7] == 0)
2016                 return ICE_SUCCESS;
2017
2018         if (buf->pf_scenario_num[0] == ICE_ACL_INVALID_SCEN &&
2019             buf->pf_scenario_num[1] == ICE_ACL_INVALID_SCEN &&
2020             buf->pf_scenario_num[2] == ICE_ACL_INVALID_SCEN &&
2021             buf->pf_scenario_num[3] == ICE_ACL_INVALID_SCEN &&
2022             buf->pf_scenario_num[4] == ICE_ACL_INVALID_SCEN &&
2023             buf->pf_scenario_num[5] == ICE_ACL_INVALID_SCEN &&
2024             buf->pf_scenario_num[6] == ICE_ACL_INVALID_SCEN &&
2025             buf->pf_scenario_num[7] == ICE_ACL_INVALID_SCEN)
2026                 return ICE_SUCCESS;
2027
2028         return ICE_ERR_IN_USE;
2029 }
2030
2031 /**
2032  * ice_flow_acl_free_act_cntr - Free the ACL rule's actions
2033  * @hw: pointer to the hardware structure
2034  * @acts: array of actions to be performed on a match
2035  * @acts_cnt: number of actions
2036  */
2037 static enum ice_status
2038 ice_flow_acl_free_act_cntr(struct ice_hw *hw, struct ice_flow_action *acts,
2039                            u8 acts_cnt)
2040 {
2041         int i;
2042
2043         for (i = 0; i < acts_cnt; i++) {
2044                 if (acts[i].type == ICE_FLOW_ACT_CNTR_PKT ||
2045                     acts[i].type == ICE_FLOW_ACT_CNTR_BYTES ||
2046                     acts[i].type == ICE_FLOW_ACT_CNTR_PKT_BYTES) {
2047                         struct ice_acl_cntrs cntrs = { 0 };
2048                         enum ice_status status;
2049
2050                         /* amount is unused in the dealloc path but the common
2051                          * parameter check routine wants a value set, as zero
2052                          * is invalid for the check. Just set it.
2053                          */
2054                         cntrs.amount = 1;
2055                         cntrs.bank = 0; /* Only bank0 for the moment */
2056                         cntrs.first_cntr =
2057                                         LE16_TO_CPU(acts[i].data.acl_act.value);
2058                         cntrs.last_cntr =
2059                                         LE16_TO_CPU(acts[i].data.acl_act.value);
2060
2061                         if (acts[i].type == ICE_FLOW_ACT_CNTR_PKT_BYTES)
2062                                 cntrs.type = ICE_AQC_ACL_CNT_TYPE_DUAL;
2063                         else
2064                                 cntrs.type = ICE_AQC_ACL_CNT_TYPE_SINGLE;
2065
2066                         status = ice_aq_dealloc_acl_cntrs(hw, &cntrs, NULL);
2067                         if (status)
2068                                 return status;
2069                 }
2070         }
2071         return ICE_SUCCESS;
2072 }
2073
2074 /**
2075  * ice_flow_acl_disassoc_scen - Disassociate the scenario from the profile
2076  * @hw: pointer to the hardware structure
2077  * @prof: pointer to flow profile
2078  *
2079  * Disassociate the scenario from the profile for the PF of the VSI.
2080  */
2081 static enum ice_status
2082 ice_flow_acl_disassoc_scen(struct ice_hw *hw, struct ice_flow_prof *prof)
2083 {
2084         struct ice_aqc_acl_prof_generic_frmt buf;
2085         enum ice_status status = ICE_SUCCESS;
2086         u8 prof_id = 0;
2087
2088         ice_memset(&buf, 0, sizeof(buf), ICE_NONDMA_MEM);
2089
2090         status = ice_flow_get_hw_prof(hw, ICE_BLK_ACL, prof->id, &prof_id);
2091         if (status)
2092                 return status;
2093
2094         status = ice_query_acl_prof(hw, prof_id, &buf, NULL);
2095         if (status)
2096                 return status;
2097
2098         /* Clear scenario for this PF */
2099         buf.pf_scenario_num[hw->pf_id] = ICE_ACL_INVALID_SCEN;
2100         status = ice_prgm_acl_prof_xtrct(hw, prof_id, &buf, NULL);
2101
2102         return status;
2103 }
2104
2105 /**
2106  * ice_flow_rem_entry_sync - Remove a flow entry
2107  * @hw: pointer to the HW struct
2108  * @blk: classification stage
2109  * @entry: flow entry to be removed
2110  */
2111 static enum ice_status
2112 ice_flow_rem_entry_sync(struct ice_hw *hw, enum ice_block blk,
2113                         struct ice_flow_entry *entry)
2114 {
2115         if (!entry)
2116                 return ICE_ERR_BAD_PTR;
2117
2118         if (blk == ICE_BLK_ACL) {
2119                 enum ice_status status;
2120
2121                 if (!entry->prof)
2122                         return ICE_ERR_BAD_PTR;
2123
2124                 status = ice_acl_rem_entry(hw, entry->prof->cfg.scen,
2125                                            entry->scen_entry_idx);
2126                 if (status)
2127                         return status;
2128
2129                 /* Checks if we need to release an ACL counter. */
2130                 if (entry->acts_cnt && entry->acts)
2131                         ice_flow_acl_free_act_cntr(hw, entry->acts,
2132                                                    entry->acts_cnt);
2133         }
2134
2135         LIST_DEL(&entry->l_entry);
2136
2137         ice_dealloc_flow_entry(hw, entry);
2138
2139         return ICE_SUCCESS;
2140 }
2141
2142 /**
2143  * ice_flow_add_prof_sync - Add a flow profile for packet segments and fields
2144  * @hw: pointer to the HW struct
2145  * @blk: classification stage
2146  * @dir: flow direction
2147  * @prof_id: unique ID to identify this flow profile
2148  * @segs: array of one or more packet segments that describe the flow
2149  * @segs_cnt: number of packet segments provided
2150  * @acts: array of default actions
2151  * @acts_cnt: number of default actions
2152  * @prof: stores the returned flow profile added
2153  *
2154  * Assumption: the caller has acquired the lock to the profile list
2155  */
2156 static enum ice_status
2157 ice_flow_add_prof_sync(struct ice_hw *hw, enum ice_block blk,
2158                        enum ice_flow_dir dir, u64 prof_id,
2159                        struct ice_flow_seg_info *segs, u8 segs_cnt,
2160                        struct ice_flow_action *acts, u8 acts_cnt,
2161                        struct ice_flow_prof **prof)
2162 {
2163         struct ice_flow_prof_params *params;
2164         enum ice_status status;
2165         u8 i;
2166
2167         if (!prof || (acts_cnt && !acts))
2168                 return ICE_ERR_BAD_PTR;
2169
2170         params = (struct ice_flow_prof_params *)ice_malloc(hw, sizeof(*params));
2171         if (!params)
2172                 return ICE_ERR_NO_MEMORY;
2173
2174         params->prof = (struct ice_flow_prof *)
2175                 ice_malloc(hw, sizeof(*params->prof));
2176         if (!params->prof) {
2177                 status = ICE_ERR_NO_MEMORY;
2178                 goto free_params;
2179         }
2180
2181         /* initialize extraction sequence to all invalid (0xff) */
2182         for (i = 0; i < ICE_MAX_FV_WORDS; i++) {
2183                 params->es[i].prot_id = ICE_PROT_INVALID;
2184                 params->es[i].off = ICE_FV_OFFSET_INVAL;
2185         }
2186
2187         params->blk = blk;
2188         params->prof->id = prof_id;
2189         params->prof->dir = dir;
2190         params->prof->segs_cnt = segs_cnt;
2191
2192         /* Make a copy of the segments that need to be persistent in the flow
2193          * profile instance
2194          */
2195         for (i = 0; i < segs_cnt; i++)
2196                 ice_memcpy(&params->prof->segs[i], &segs[i], sizeof(*segs),
2197                            ICE_NONDMA_TO_NONDMA);
2198
2199         /* Make a copy of the actions that need to be persistent in the flow
2200          * profile instance.
2201          */
2202         if (acts_cnt) {
2203                 params->prof->acts = (struct ice_flow_action *)
2204                         ice_memdup(hw, acts, acts_cnt * sizeof(*acts),
2205                                    ICE_NONDMA_TO_NONDMA);
2206
2207                 if (!params->prof->acts) {
2208                         status = ICE_ERR_NO_MEMORY;
2209                         goto out;
2210                 }
2211         }
2212
2213         status = ice_flow_proc_segs(hw, params);
2214         if (status) {
2215                 ice_debug(hw, ICE_DBG_FLOW, "Error processing a flow's packet segments\n");
2216                 goto out;
2217         }
2218
2219         /* Add a HW profile for this flow profile */
2220         status = ice_add_prof(hw, blk, prof_id, (u8 *)params->ptypes,
2221                               params->attr, params->attr_cnt, params->es,
2222                               params->mask);
2223         if (status) {
2224                 ice_debug(hw, ICE_DBG_FLOW, "Error adding a HW flow profile\n");
2225                 goto out;
2226         }
2227
2228         INIT_LIST_HEAD(&params->prof->entries);
2229         ice_init_lock(&params->prof->entries_lock);
2230         *prof = params->prof;
2231
2232 out:
2233         if (status) {
2234                 if (params->prof->acts)
2235                         ice_free(hw, params->prof->acts);
2236                 ice_free(hw, params->prof);
2237         }
2238 free_params:
2239         ice_free(hw, params);
2240
2241         return status;
2242 }
2243
2244 /**
2245  * ice_flow_rem_prof_sync - remove a flow profile
2246  * @hw: pointer to the hardware structure
2247  * @blk: classification stage
2248  * @prof: pointer to flow profile to remove
2249  *
2250  * Assumption: the caller has acquired the lock to the profile list
2251  */
2252 static enum ice_status
2253 ice_flow_rem_prof_sync(struct ice_hw *hw, enum ice_block blk,
2254                        struct ice_flow_prof *prof)
2255 {
2256         enum ice_status status;
2257
2258         /* Remove all remaining flow entries before removing the flow profile */
2259         if (!LIST_EMPTY(&prof->entries)) {
2260                 struct ice_flow_entry *e, *t;
2261
2262                 ice_acquire_lock(&prof->entries_lock);
2263
2264                 LIST_FOR_EACH_ENTRY_SAFE(e, t, &prof->entries, ice_flow_entry,
2265                                          l_entry) {
2266                         status = ice_flow_rem_entry_sync(hw, blk, e);
2267                         if (status)
2268                                 break;
2269                 }
2270
2271                 ice_release_lock(&prof->entries_lock);
2272         }
2273
2274         if (blk == ICE_BLK_ACL) {
2275                 struct ice_aqc_acl_profile_ranges query_rng_buf;
2276                 struct ice_aqc_acl_prof_generic_frmt buf;
2277                 u8 prof_id = 0;
2278
2279                 /* Disassociate the scenario from the profile for the PF */
2280                 status = ice_flow_acl_disassoc_scen(hw, prof);
2281                 if (status)
2282                         return status;
2283
2284                 /* Clear the range-checker if the profile ID is no longer
2285                  * used by any PF
2286                  */
2287                 status = ice_flow_acl_is_prof_in_use(hw, prof, &buf);
2288                 if (status && status != ICE_ERR_IN_USE) {
2289                         return status;
2290                 } else if (!status) {
2291                         /* Clear the range-checker value for profile ID */
2292                         ice_memset(&query_rng_buf, 0,
2293                                    sizeof(struct ice_aqc_acl_profile_ranges),
2294                                    ICE_NONDMA_MEM);
2295
2296                         status = ice_flow_get_hw_prof(hw, blk, prof->id,
2297                                                       &prof_id);
2298                         if (status)
2299                                 return status;
2300
2301                         status = ice_prog_acl_prof_ranges(hw, prof_id,
2302                                                           &query_rng_buf, NULL);
2303                         if (status)
2304                                 return status;
2305                 }
2306         }
2307
2308         /* Remove all hardware profiles associated with this flow profile */
2309         status = ice_rem_prof(hw, blk, prof->id);
2310         if (!status) {
2311                 LIST_DEL(&prof->l_entry);
2312                 ice_destroy_lock(&prof->entries_lock);
2313                 if (prof->acts)
2314                         ice_free(hw, prof->acts);
2315                 ice_free(hw, prof);
2316         }
2317
2318         return status;
2319 }
2320
2321 /**
2322  * ice_flow_acl_set_xtrct_seq_fld - Populate xtrct seq for single field
2323  * @buf: Destination buffer function writes partial xtrct sequence to
2324  * @info: Info about field
2325  */
2326 static void
2327 ice_flow_acl_set_xtrct_seq_fld(struct ice_aqc_acl_prof_generic_frmt *buf,
2328                                struct ice_flow_fld_info *info)
2329 {
2330         u16 dst, i;
2331         u8 src;
2332
2333         src = info->xtrct.idx * ICE_FLOW_FV_EXTRACT_SZ +
2334                 info->xtrct.disp / BITS_PER_BYTE;
2335         dst = info->entry.val;
2336         for (i = 0; i < info->entry.last; i++)
2337                 /* HW stores field vector words in LE, convert words back to BE
2338                  * so constructed entries will end up in network order
2339                  */
2340                 buf->byte_selection[dst++] = src++ ^ 1;
2341 }
2342
2343 /**
2344  * ice_flow_acl_set_xtrct_seq - Program ACL extraction sequence
2345  * @hw: pointer to the hardware structure
2346  * @prof: pointer to flow profile
2347  */
2348 static enum ice_status
2349 ice_flow_acl_set_xtrct_seq(struct ice_hw *hw, struct ice_flow_prof *prof)
2350 {
2351         struct ice_aqc_acl_prof_generic_frmt buf;
2352         struct ice_flow_fld_info *info;
2353         enum ice_status status;
2354         u8 prof_id = 0;
2355         u16 i;
2356
2357         ice_memset(&buf, 0, sizeof(buf), ICE_NONDMA_MEM);
2358
2359         status = ice_flow_get_hw_prof(hw, ICE_BLK_ACL, prof->id, &prof_id);
2360         if (status)
2361                 return status;
2362
2363         status = ice_flow_acl_is_prof_in_use(hw, prof, &buf);
2364         if (status && status != ICE_ERR_IN_USE)
2365                 return status;
2366
2367         if (!status) {
2368                 /* Program the profile dependent configuration. This is done
2369                  * only once regardless of the number of PFs using that profile
2370                  */
2371                 ice_memset(&buf, 0, sizeof(buf), ICE_NONDMA_MEM);
2372
2373                 for (i = 0; i < prof->segs_cnt; i++) {
2374                         struct ice_flow_seg_info *seg = &prof->segs[i];
2375                         u16 j;
2376
2377                         ice_for_each_set_bit(j, (ice_bitmap_t *)&seg->match,
2378                                              ICE_FLOW_FIELD_IDX_MAX) {
2379                                 info = &seg->fields[j];
2380
2381                                 if (info->type == ICE_FLOW_FLD_TYPE_RANGE)
2382                                         buf.word_selection[info->entry.val] =
2383                                                 info->xtrct.idx;
2384                                 else
2385                                         ice_flow_acl_set_xtrct_seq_fld(&buf,
2386                                                                        info);
2387                         }
2388
2389                         for (j = 0; j < seg->raws_cnt; j++) {
2390                                 info = &seg->raws[j].info;
2391                                 ice_flow_acl_set_xtrct_seq_fld(&buf, info);
2392                         }
2393                 }
2394
2395                 ice_memset(&buf.pf_scenario_num[0], ICE_ACL_INVALID_SCEN,
2396                            ICE_AQC_ACL_PROF_PF_SCEN_NUM_ELEMS,
2397                            ICE_NONDMA_MEM);
2398         }
2399
2400         /* Update the current PF */
2401         buf.pf_scenario_num[hw->pf_id] = (u8)prof->cfg.scen->id;
2402         status = ice_prgm_acl_prof_xtrct(hw, prof_id, &buf, NULL);
2403
2404         return status;
2405 }
2406
2407 /**
2408  * ice_flow_assoc_vsig_vsi - associate a VSI with VSIG
2409  * @hw: pointer to the hardware structure
2410  * @blk: classification stage
2411  * @vsi_handle: software VSI handle
2412  * @vsig: target VSI group
2413  *
2414  * Assumption: the caller has already verified that the VSI to
2415  * be added has the same characteristics as the VSIG and will
2416  * thereby have access to all resources added to that VSIG.
2417  */
2418 enum ice_status
2419 ice_flow_assoc_vsig_vsi(struct ice_hw *hw, enum ice_block blk, u16 vsi_handle,
2420                         u16 vsig)
2421 {
2422         enum ice_status status;
2423
2424         if (!ice_is_vsi_valid(hw, vsi_handle) || blk >= ICE_BLK_COUNT)
2425                 return ICE_ERR_PARAM;
2426
2427         ice_acquire_lock(&hw->fl_profs_locks[blk]);
2428         status = ice_add_vsi_flow(hw, blk, ice_get_hw_vsi_num(hw, vsi_handle),
2429                                   vsig);
2430         ice_release_lock(&hw->fl_profs_locks[blk]);
2431
2432         return status;
2433 }
2434
2435 /**
2436  * ice_flow_assoc_prof - associate a VSI with a flow profile
2437  * @hw: pointer to the hardware structure
2438  * @blk: classification stage
2439  * @prof: pointer to flow profile
2440  * @vsi_handle: software VSI handle
2441  *
2442  * Assumption: the caller has acquired the lock to the profile list
2443  * and the software VSI handle has been validated
2444  */
2445 enum ice_status
2446 ice_flow_assoc_prof(struct ice_hw *hw, enum ice_block blk,
2447                     struct ice_flow_prof *prof, u16 vsi_handle)
2448 {
2449         enum ice_status status = ICE_SUCCESS;
2450
2451         if (!ice_is_bit_set(prof->vsis, vsi_handle)) {
2452                 if (blk == ICE_BLK_ACL) {
2453                         status = ice_flow_acl_set_xtrct_seq(hw, prof);
2454                         if (status)
2455                                 return status;
2456                 }
2457                 status = ice_add_prof_id_flow(hw, blk,
2458                                               ice_get_hw_vsi_num(hw,
2459                                                                  vsi_handle),
2460                                               prof->id);
2461                 if (!status)
2462                         ice_set_bit(vsi_handle, prof->vsis);
2463                 else
2464                         ice_debug(hw, ICE_DBG_FLOW, "HW profile add failed, %d\n",
2465                                   status);
2466         }
2467
2468         return status;
2469 }
2470
2471 /**
2472  * ice_flow_disassoc_prof - disassociate a VSI from a flow profile
2473  * @hw: pointer to the hardware structure
2474  * @blk: classification stage
2475  * @prof: pointer to flow profile
2476  * @vsi_handle: software VSI handle
2477  *
2478  * Assumption: the caller has acquired the lock to the profile list
2479  * and the software VSI handle has been validated
2480  */
2481 static enum ice_status
2482 ice_flow_disassoc_prof(struct ice_hw *hw, enum ice_block blk,
2483                        struct ice_flow_prof *prof, u16 vsi_handle)
2484 {
2485         enum ice_status status = ICE_SUCCESS;
2486
2487         if (ice_is_bit_set(prof->vsis, vsi_handle)) {
2488                 status = ice_rem_prof_id_flow(hw, blk,
2489                                               ice_get_hw_vsi_num(hw,
2490                                                                  vsi_handle),
2491                                               prof->id);
2492                 if (!status)
2493                         ice_clear_bit(vsi_handle, prof->vsis);
2494                 else
2495                         ice_debug(hw, ICE_DBG_FLOW, "HW profile remove failed, %d\n",
2496                                   status);
2497         }
2498
2499         return status;
2500 }
2501
2502 /**
2503  * ice_flow_add_prof - Add a flow profile for packet segments and matched fields
2504  * @hw: pointer to the HW struct
2505  * @blk: classification stage
2506  * @dir: flow direction
2507  * @prof_id: unique ID to identify this flow profile
2508  * @segs: array of one or more packet segments that describe the flow
2509  * @segs_cnt: number of packet segments provided
2510  * @acts: array of default actions
2511  * @acts_cnt: number of default actions
2512  * @prof: stores the returned flow profile added
2513  */
2514 enum ice_status
2515 ice_flow_add_prof(struct ice_hw *hw, enum ice_block blk, enum ice_flow_dir dir,
2516                   u64 prof_id, struct ice_flow_seg_info *segs, u8 segs_cnt,
2517                   struct ice_flow_action *acts, u8 acts_cnt,
2518                   struct ice_flow_prof **prof)
2519 {
2520         enum ice_status status;
2521
2522         if (segs_cnt > ICE_FLOW_SEG_MAX)
2523                 return ICE_ERR_MAX_LIMIT;
2524
2525         if (!segs_cnt)
2526                 return ICE_ERR_PARAM;
2527
2528         if (!segs)
2529                 return ICE_ERR_BAD_PTR;
2530
2531         status = ice_flow_val_hdrs(segs, segs_cnt);
2532         if (status)
2533                 return status;
2534
2535         ice_acquire_lock(&hw->fl_profs_locks[blk]);
2536
2537         status = ice_flow_add_prof_sync(hw, blk, dir, prof_id, segs, segs_cnt,
2538                                         acts, acts_cnt, prof);
2539         if (!status)
2540                 LIST_ADD(&(*prof)->l_entry, &hw->fl_profs[blk]);
2541
2542         ice_release_lock(&hw->fl_profs_locks[blk]);
2543
2544         return status;
2545 }
2546
2547 /**
2548  * ice_flow_rem_prof - Remove a flow profile and all entries associated with it
2549  * @hw: pointer to the HW struct
2550  * @blk: the block for which the flow profile is to be removed
2551  * @prof_id: unique ID of the flow profile to be removed
2552  */
2553 enum ice_status
2554 ice_flow_rem_prof(struct ice_hw *hw, enum ice_block blk, u64 prof_id)
2555 {
2556         struct ice_flow_prof *prof;
2557         enum ice_status status;
2558
2559         ice_acquire_lock(&hw->fl_profs_locks[blk]);
2560
2561         prof = ice_flow_find_prof_id(hw, blk, prof_id);
2562         if (!prof) {
2563                 status = ICE_ERR_DOES_NOT_EXIST;
2564                 goto out;
2565         }
2566
2567         /* prof becomes invalid after the call */
2568         status = ice_flow_rem_prof_sync(hw, blk, prof);
2569
2570 out:
2571         ice_release_lock(&hw->fl_profs_locks[blk]);
2572
2573         return status;
2574 }
2575
2576 /**
2577  * ice_flow_find_entry - look for a flow entry using its unique ID
2578  * @hw: pointer to the HW struct
2579  * @blk: classification stage
2580  * @entry_id: unique ID to identify this flow entry
2581  *
2582  * This function looks for the flow entry with the specified unique ID in all
2583  * flow profiles of the specified classification stage. If the entry is found,
2584  * and it returns the handle to the flow entry. Otherwise, it returns
2585  * ICE_FLOW_ENTRY_ID_INVAL.
2586  */
2587 u64 ice_flow_find_entry(struct ice_hw *hw, enum ice_block blk, u64 entry_id)
2588 {
2589         struct ice_flow_entry *found = NULL;
2590         struct ice_flow_prof *p;
2591
2592         ice_acquire_lock(&hw->fl_profs_locks[blk]);
2593
2594         LIST_FOR_EACH_ENTRY(p, &hw->fl_profs[blk], ice_flow_prof, l_entry) {
2595                 struct ice_flow_entry *e;
2596
2597                 ice_acquire_lock(&p->entries_lock);
2598                 LIST_FOR_EACH_ENTRY(e, &p->entries, ice_flow_entry, l_entry)
2599                         if (e->id == entry_id) {
2600                                 found = e;
2601                                 break;
2602                         }
2603                 ice_release_lock(&p->entries_lock);
2604
2605                 if (found)
2606                         break;
2607         }
2608
2609         ice_release_lock(&hw->fl_profs_locks[blk]);
2610
2611         return found ? ICE_FLOW_ENTRY_HNDL(found) : ICE_FLOW_ENTRY_HANDLE_INVAL;
2612 }
2613
2614 /**
2615  * ice_flow_acl_check_actions - Checks the ACL rule's actions
2616  * @hw: pointer to the hardware structure
2617  * @acts: array of actions to be performed on a match
2618  * @acts_cnt: number of actions
2619  * @cnt_alloc: indicates if an ACL counter has been allocated.
2620  */
2621 static enum ice_status
2622 ice_flow_acl_check_actions(struct ice_hw *hw, struct ice_flow_action *acts,
2623                            u8 acts_cnt, bool *cnt_alloc)
2624 {
2625         ice_declare_bitmap(dup_check, ICE_AQC_TBL_MAX_ACTION_PAIRS * 2);
2626         int i;
2627
2628         ice_zero_bitmap(dup_check, ICE_AQC_TBL_MAX_ACTION_PAIRS * 2);
2629         *cnt_alloc = false;
2630
2631         if (acts_cnt > ICE_FLOW_ACL_MAX_NUM_ACT)
2632                 return ICE_ERR_OUT_OF_RANGE;
2633
2634         for (i = 0; i < acts_cnt; i++) {
2635                 if (acts[i].type != ICE_FLOW_ACT_NOP &&
2636                     acts[i].type != ICE_FLOW_ACT_DROP &&
2637                     acts[i].type != ICE_FLOW_ACT_CNTR_PKT &&
2638                     acts[i].type != ICE_FLOW_ACT_FWD_QUEUE)
2639                         return ICE_ERR_CFG;
2640
2641                 /* If the caller want to add two actions of the same type, then
2642                  * it is considered invalid configuration.
2643                  */
2644                 if (ice_test_and_set_bit(acts[i].type, dup_check))
2645                         return ICE_ERR_PARAM;
2646         }
2647
2648         /* Checks if ACL counters are needed. */
2649         for (i = 0; i < acts_cnt; i++) {
2650                 if (acts[i].type == ICE_FLOW_ACT_CNTR_PKT ||
2651                     acts[i].type == ICE_FLOW_ACT_CNTR_BYTES ||
2652                     acts[i].type == ICE_FLOW_ACT_CNTR_PKT_BYTES) {
2653                         struct ice_acl_cntrs cntrs = { 0 };
2654                         enum ice_status status;
2655
2656                         cntrs.amount = 1;
2657                         cntrs.bank = 0; /* Only bank0 for the moment */
2658
2659                         if (acts[i].type == ICE_FLOW_ACT_CNTR_PKT_BYTES)
2660                                 cntrs.type = ICE_AQC_ACL_CNT_TYPE_DUAL;
2661                         else
2662                                 cntrs.type = ICE_AQC_ACL_CNT_TYPE_SINGLE;
2663
2664                         status = ice_aq_alloc_acl_cntrs(hw, &cntrs, NULL);
2665                         if (status)
2666                                 return status;
2667                         /* Counter index within the bank */
2668                         acts[i].data.acl_act.value =
2669                                                 CPU_TO_LE16(cntrs.first_cntr);
2670                         *cnt_alloc = true;
2671                 }
2672         }
2673
2674         return ICE_SUCCESS;
2675 }
2676
2677 /**
2678  * ice_flow_acl_frmt_entry_range - Format an ACL range checker for a given field
2679  * @fld: number of the given field
2680  * @info: info about field
2681  * @range_buf: range checker configuration buffer
2682  * @data: pointer to a data buffer containing flow entry's match values/masks
2683  * @range: Input/output param indicating which range checkers are being used
2684  */
2685 static void
2686 ice_flow_acl_frmt_entry_range(u16 fld, struct ice_flow_fld_info *info,
2687                               struct ice_aqc_acl_profile_ranges *range_buf,
2688                               u8 *data, u8 *range)
2689 {
2690         u16 new_mask;
2691
2692         /* If not specified, default mask is all bits in field */
2693         new_mask = (info->src.mask == ICE_FLOW_FLD_OFF_INVAL ?
2694                     BIT(ice_flds_info[fld].size) - 1 :
2695                     (*(u16 *)(data + info->src.mask))) << info->xtrct.disp;
2696
2697         /* If the mask is 0, then we don't need to worry about this input
2698          * range checker value.
2699          */
2700         if (new_mask) {
2701                 u16 new_high =
2702                         (*(u16 *)(data + info->src.last)) << info->xtrct.disp;
2703                 u16 new_low =
2704                         (*(u16 *)(data + info->src.val)) << info->xtrct.disp;
2705                 u8 range_idx = info->entry.val;
2706
2707                 range_buf->checker_cfg[range_idx].low_boundary =
2708                         CPU_TO_BE16(new_low);
2709                 range_buf->checker_cfg[range_idx].high_boundary =
2710                         CPU_TO_BE16(new_high);
2711                 range_buf->checker_cfg[range_idx].mask = CPU_TO_BE16(new_mask);
2712
2713                 /* Indicate which range checker is being used */
2714                 *range |= BIT(range_idx);
2715         }
2716 }
2717
2718 /**
2719  * ice_flow_acl_frmt_entry_fld - Partially format ACL entry for a given field
2720  * @fld: number of the given field
2721  * @info: info about the field
2722  * @buf: buffer containing the entry
2723  * @dontcare: buffer containing don't care mask for entry
2724  * @data: pointer to a data buffer containing flow entry's match values/masks
2725  */
2726 static void
2727 ice_flow_acl_frmt_entry_fld(u16 fld, struct ice_flow_fld_info *info, u8 *buf,
2728                             u8 *dontcare, u8 *data)
2729 {
2730         u16 dst, src, mask, k, end_disp, tmp_s = 0, tmp_m = 0;
2731         bool use_mask = false;
2732         u8 disp;
2733
2734         src = info->src.val;
2735         mask = info->src.mask;
2736         dst = info->entry.val - ICE_AQC_ACL_PROF_BYTE_SEL_START_IDX;
2737         disp = info->xtrct.disp % BITS_PER_BYTE;
2738
2739         if (mask != ICE_FLOW_FLD_OFF_INVAL)
2740                 use_mask = true;
2741
2742         for (k = 0; k < info->entry.last; k++, dst++) {
2743                 /* Add overflow bits from previous byte */
2744                 buf[dst] = (tmp_s & 0xff00) >> 8;
2745
2746                 /* If mask is not valid, tmp_m is always zero, so just setting
2747                  * dontcare to 0 (no masked bits). If mask is valid, pulls in
2748                  * overflow bits of mask from prev byte
2749                  */
2750                 dontcare[dst] = (tmp_m & 0xff00) >> 8;
2751
2752                 /* If there is displacement, last byte will only contain
2753                  * displaced data, but there is no more data to read from user
2754                  * buffer, so skip so as not to potentially read beyond end of
2755                  * user buffer
2756                  */
2757                 if (!disp || k < info->entry.last - 1) {
2758                         /* Store shifted data to use in next byte */
2759                         tmp_s = data[src++] << disp;
2760
2761                         /* Add current (shifted) byte */
2762                         buf[dst] |= tmp_s & 0xff;
2763
2764                         /* Handle mask if valid */
2765                         if (use_mask) {
2766                                 tmp_m = (~data[mask++] & 0xff) << disp;
2767                                 dontcare[dst] |= tmp_m & 0xff;
2768                         }
2769                 }
2770         }
2771
2772         /* Fill in don't care bits at beginning of field */
2773         if (disp) {
2774                 dst = info->entry.val - ICE_AQC_ACL_PROF_BYTE_SEL_START_IDX;
2775                 for (k = 0; k < disp; k++)
2776                         dontcare[dst] |= BIT(k);
2777         }
2778
2779         end_disp = (disp + ice_flds_info[fld].size) % BITS_PER_BYTE;
2780
2781         /* Fill in don't care bits at end of field */
2782         if (end_disp) {
2783                 dst = info->entry.val - ICE_AQC_ACL_PROF_BYTE_SEL_START_IDX +
2784                       info->entry.last - 1;
2785                 for (k = end_disp; k < BITS_PER_BYTE; k++)
2786                         dontcare[dst] |= BIT(k);
2787         }
2788 }
2789
2790 /**
2791  * ice_flow_acl_frmt_entry - Format ACL entry
2792  * @hw: pointer to the hardware structure
2793  * @prof: pointer to flow profile
2794  * @e: pointer to the flow entry
2795  * @data: pointer to a data buffer containing flow entry's match values/masks
2796  * @acts: array of actions to be performed on a match
2797  * @acts_cnt: number of actions
2798  *
2799  * Formats the key (and key_inverse) to be matched from the data passed in,
2800  * along with data from the flow profile. This key/key_inverse pair makes up
2801  * the 'entry' for an ACL flow entry.
2802  */
2803 static enum ice_status
2804 ice_flow_acl_frmt_entry(struct ice_hw *hw, struct ice_flow_prof *prof,
2805                         struct ice_flow_entry *e, u8 *data,
2806                         struct ice_flow_action *acts, u8 acts_cnt)
2807 {
2808         u8 *buf = NULL, *dontcare = NULL, *key = NULL, range = 0, dir_flag_msk;
2809         struct ice_aqc_acl_profile_ranges *range_buf = NULL;
2810         enum ice_status status;
2811         bool cnt_alloc;
2812         u8 prof_id = 0;
2813         u16 i, buf_sz;
2814
2815         status = ice_flow_get_hw_prof(hw, ICE_BLK_ACL, prof->id, &prof_id);
2816         if (status)
2817                 return status;
2818
2819         /* Format the result action */
2820
2821         status = ice_flow_acl_check_actions(hw, acts, acts_cnt, &cnt_alloc);
2822         if (status)
2823                 return status;
2824
2825         status = ICE_ERR_NO_MEMORY;
2826
2827         e->acts = (struct ice_flow_action *)
2828                 ice_memdup(hw, acts, acts_cnt * sizeof(*acts),
2829                            ICE_NONDMA_TO_NONDMA);
2830         if (!e->acts)
2831                 goto out;
2832
2833         e->acts_cnt = acts_cnt;
2834
2835         /* Format the matching data */
2836         buf_sz = prof->cfg.scen->width;
2837         buf = (u8 *)ice_malloc(hw, buf_sz);
2838         if (!buf)
2839                 goto out;
2840
2841         dontcare = (u8 *)ice_malloc(hw, buf_sz);
2842         if (!dontcare)
2843                 goto out;
2844
2845         /* 'key' buffer will store both key and key_inverse, so must be twice
2846          * size of buf
2847          */
2848         key = (u8 *)ice_malloc(hw, buf_sz * 2);
2849         if (!key)
2850                 goto out;
2851
2852         range_buf = (struct ice_aqc_acl_profile_ranges *)
2853                 ice_malloc(hw, sizeof(struct ice_aqc_acl_profile_ranges));
2854         if (!range_buf)
2855                 goto out;
2856
2857         /* Set don't care mask to all 1's to start, will zero out used bytes */
2858         ice_memset(dontcare, 0xff, buf_sz, ICE_NONDMA_MEM);
2859
2860         for (i = 0; i < prof->segs_cnt; i++) {
2861                 struct ice_flow_seg_info *seg = &prof->segs[i];
2862                 u8 j;
2863
2864                 ice_for_each_set_bit(j, (ice_bitmap_t *)&seg->match,
2865                                      ICE_FLOW_FIELD_IDX_MAX) {
2866                         struct ice_flow_fld_info *info = &seg->fields[j];
2867
2868                         if (info->type == ICE_FLOW_FLD_TYPE_RANGE)
2869                                 ice_flow_acl_frmt_entry_range(j, info,
2870                                                               range_buf, data,
2871                                                               &range);
2872                         else
2873                                 ice_flow_acl_frmt_entry_fld(j, info, buf,
2874                                                             dontcare, data);
2875                 }
2876
2877                 for (j = 0; j < seg->raws_cnt; j++) {
2878                         struct ice_flow_fld_info *info = &seg->raws[j].info;
2879                         u16 dst, src, mask, k;
2880                         bool use_mask = false;
2881
2882                         src = info->src.val;
2883                         dst = info->entry.val -
2884                                         ICE_AQC_ACL_PROF_BYTE_SEL_START_IDX;
2885                         mask = info->src.mask;
2886
2887                         if (mask != ICE_FLOW_FLD_OFF_INVAL)
2888                                 use_mask = true;
2889
2890                         for (k = 0; k < info->entry.last; k++, dst++) {
2891                                 buf[dst] = data[src++];
2892                                 if (use_mask)
2893                                         dontcare[dst] = ~data[mask++];
2894                                 else
2895                                         dontcare[dst] = 0;
2896                         }
2897                 }
2898         }
2899
2900         buf[prof->cfg.scen->pid_idx] = (u8)prof_id;
2901         dontcare[prof->cfg.scen->pid_idx] = 0;
2902
2903         /* Format the buffer for direction flags */
2904         dir_flag_msk = BIT(ICE_FLG_PKT_DIR);
2905
2906         if (prof->dir == ICE_FLOW_RX)
2907                 buf[prof->cfg.scen->pkt_dir_idx] = dir_flag_msk;
2908
2909         if (range) {
2910                 buf[prof->cfg.scen->rng_chk_idx] = range;
2911                 /* Mark any unused range checkers as don't care */
2912                 dontcare[prof->cfg.scen->rng_chk_idx] = ~range;
2913                 e->range_buf = range_buf;
2914         } else {
2915                 ice_free(hw, range_buf);
2916         }
2917
2918         status = ice_set_key(key, buf_sz * 2, buf, NULL, dontcare, NULL, 0,
2919                              buf_sz);
2920         if (status)
2921                 goto out;
2922
2923         e->entry = key;
2924         e->entry_sz = buf_sz * 2;
2925
2926 out:
2927         if (buf)
2928                 ice_free(hw, buf);
2929
2930         if (dontcare)
2931                 ice_free(hw, dontcare);
2932
2933         if (status && key)
2934                 ice_free(hw, key);
2935
2936         if (status && range_buf) {
2937                 ice_free(hw, range_buf);
2938                 e->range_buf = NULL;
2939         }
2940
2941         if (status && e->acts) {
2942                 ice_free(hw, e->acts);
2943                 e->acts = NULL;
2944                 e->acts_cnt = 0;
2945         }
2946
2947         if (status && cnt_alloc)
2948                 ice_flow_acl_free_act_cntr(hw, acts, acts_cnt);
2949
2950         return status;
2951 }
2952
2953 /**
2954  * ice_flow_acl_find_scen_entry_cond - Find an ACL scenario entry that matches
2955  *                                     the compared data.
2956  * @prof: pointer to flow profile
2957  * @e: pointer to the comparing flow entry
2958  * @do_chg_action: decide if we want to change the ACL action
2959  * @do_add_entry: decide if we want to add the new ACL entry
2960  * @do_rem_entry: decide if we want to remove the current ACL entry
2961  *
2962  * Find an ACL scenario entry that matches the compared data. In the same time,
2963  * this function also figure out:
2964  * a/ If we want to change the ACL action
2965  * b/ If we want to add the new ACL entry
2966  * c/ If we want to remove the current ACL entry
2967  */
2968 static struct ice_flow_entry *
2969 ice_flow_acl_find_scen_entry_cond(struct ice_flow_prof *prof,
2970                                   struct ice_flow_entry *e, bool *do_chg_action,
2971                                   bool *do_add_entry, bool *do_rem_entry)
2972 {
2973         struct ice_flow_entry *p, *return_entry = NULL;
2974         u8 i, j;
2975
2976         /* Check if:
2977          * a/ There exists an entry with same matching data, but different
2978          *    priority, then we remove this existing ACL entry. Then, we
2979          *    will add the new entry to the ACL scenario.
2980          * b/ There exists an entry with same matching data, priority, and
2981          *    result action, then we do nothing
2982          * c/ There exists an entry with same matching data, priority, but
2983          *    different, action, then do only change the action's entry.
2984          * d/ Else, we add this new entry to the ACL scenario.
2985          */
2986         *do_chg_action = false;
2987         *do_add_entry = true;
2988         *do_rem_entry = false;
2989         LIST_FOR_EACH_ENTRY(p, &prof->entries, ice_flow_entry, l_entry) {
2990                 if (memcmp(p->entry, e->entry, p->entry_sz))
2991                         continue;
2992
2993                 /* From this point, we have the same matching_data. */
2994                 *do_add_entry = false;
2995                 return_entry = p;
2996
2997                 if (p->priority != e->priority) {
2998                         /* matching data && !priority */
2999                         *do_add_entry = true;
3000                         *do_rem_entry = true;
3001                         break;
3002                 }
3003
3004                 /* From this point, we will have matching_data && priority */
3005                 if (p->acts_cnt != e->acts_cnt)
3006                         *do_chg_action = true;
3007                 for (i = 0; i < p->acts_cnt; i++) {
3008                         bool found_not_match = false;
3009
3010                         for (j = 0; j < e->acts_cnt; j++)
3011                                 if (memcmp(&p->acts[i], &e->acts[j],
3012                                            sizeof(struct ice_flow_action))) {
3013                                         found_not_match = true;
3014                                         break;
3015                                 }
3016
3017                         if (found_not_match) {
3018                                 *do_chg_action = true;
3019                                 break;
3020                         }
3021                 }
3022
3023                 /* (do_chg_action = true) means :
3024                  *    matching_data && priority && !result_action
3025                  * (do_chg_action = false) means :
3026                  *    matching_data && priority && result_action
3027                  */
3028                 break;
3029         }
3030
3031         return return_entry;
3032 }
3033
3034 /**
3035  * ice_flow_acl_convert_to_acl_prio - Convert to ACL priority
3036  * @p: flow priority
3037  */
3038 static enum ice_acl_entry_prio
3039 ice_flow_acl_convert_to_acl_prio(enum ice_flow_priority p)
3040 {
3041         enum ice_acl_entry_prio acl_prio;
3042
3043         switch (p) {
3044         case ICE_FLOW_PRIO_LOW:
3045                 acl_prio = ICE_ACL_PRIO_LOW;
3046                 break;
3047         case ICE_FLOW_PRIO_NORMAL:
3048                 acl_prio = ICE_ACL_PRIO_NORMAL;
3049                 break;
3050         case ICE_FLOW_PRIO_HIGH:
3051                 acl_prio = ICE_ACL_PRIO_HIGH;
3052                 break;
3053         default:
3054                 acl_prio = ICE_ACL_PRIO_NORMAL;
3055                 break;
3056         }
3057
3058         return acl_prio;
3059 }
3060
3061 /**
3062  * ice_flow_acl_union_rng_chk - Perform union operation between two
3063  *                              range-range checker buffers
3064  * @dst_buf: pointer to destination range checker buffer
3065  * @src_buf: pointer to source range checker buffer
3066  *
3067  * For this function, we do the union between dst_buf and src_buf
3068  * range checker buffer, and we will save the result back to dst_buf
3069  */
3070 static enum ice_status
3071 ice_flow_acl_union_rng_chk(struct ice_aqc_acl_profile_ranges *dst_buf,
3072                            struct ice_aqc_acl_profile_ranges *src_buf)
3073 {
3074         u8 i, j;
3075
3076         if (!dst_buf || !src_buf)
3077                 return ICE_ERR_BAD_PTR;
3078
3079         for (i = 0; i < ICE_AQC_ACL_PROF_RANGES_NUM_CFG; i++) {
3080                 struct ice_acl_rng_data *cfg_data = NULL, *in_data;
3081                 bool will_populate = false;
3082
3083                 in_data = &src_buf->checker_cfg[i];
3084
3085                 if (!in_data->mask)
3086                         break;
3087
3088                 for (j = 0; j < ICE_AQC_ACL_PROF_RANGES_NUM_CFG; j++) {
3089                         cfg_data = &dst_buf->checker_cfg[j];
3090
3091                         if (!cfg_data->mask ||
3092                             !memcmp(cfg_data, in_data,
3093                                     sizeof(struct ice_acl_rng_data))) {
3094                                 will_populate = true;
3095                                 break;
3096                         }
3097                 }
3098
3099                 if (will_populate) {
3100                         ice_memcpy(cfg_data, in_data,
3101                                    sizeof(struct ice_acl_rng_data),
3102                                    ICE_NONDMA_TO_NONDMA);
3103                 } else {
3104                         /* No available slot left to program range checker */
3105                         return ICE_ERR_MAX_LIMIT;
3106                 }
3107         }
3108
3109         return ICE_SUCCESS;
3110 }
3111
3112 /**
3113  * ice_flow_acl_add_scen_entry_sync - Add entry to ACL scenario sync
3114  * @hw: pointer to the hardware structure
3115  * @prof: pointer to flow profile
3116  * @entry: double pointer to the flow entry
3117  *
3118  * For this function, we will look at the current added entries in the
3119  * corresponding ACL scenario. Then, we will perform matching logic to
3120  * see if we want to add/modify/do nothing with this new entry.
3121  */
3122 static enum ice_status
3123 ice_flow_acl_add_scen_entry_sync(struct ice_hw *hw, struct ice_flow_prof *prof,
3124                                  struct ice_flow_entry **entry)
3125 {
3126         bool do_add_entry, do_rem_entry, do_chg_action, do_chg_rng_chk;
3127         struct ice_aqc_acl_profile_ranges query_rng_buf, cfg_rng_buf;
3128         struct ice_acl_act_entry *acts = NULL;
3129         struct ice_flow_entry *exist;
3130         enum ice_status status = ICE_SUCCESS;
3131         struct ice_flow_entry *e;
3132         u8 i;
3133
3134         if (!entry || !(*entry) || !prof)
3135                 return ICE_ERR_BAD_PTR;
3136
3137         e = *entry;
3138
3139         do_chg_rng_chk = false;
3140         if (e->range_buf) {
3141                 u8 prof_id = 0;
3142
3143                 status = ice_flow_get_hw_prof(hw, ICE_BLK_ACL, prof->id,
3144                                               &prof_id);
3145                 if (status)
3146                         return status;
3147
3148                 /* Query the current range-checker value in FW */
3149                 status = ice_query_acl_prof_ranges(hw, prof_id, &query_rng_buf,
3150                                                    NULL);
3151                 if (status)
3152                         return status;
3153                 ice_memcpy(&cfg_rng_buf, &query_rng_buf,
3154                            sizeof(struct ice_aqc_acl_profile_ranges),
3155                            ICE_NONDMA_TO_NONDMA);
3156
3157                 /* Generate the new range-checker value */
3158                 status = ice_flow_acl_union_rng_chk(&cfg_rng_buf, e->range_buf);
3159                 if (status)
3160                         return status;
3161
3162                 /* Reconfigure the range check if the buffer is changed. */
3163                 do_chg_rng_chk = false;
3164                 if (memcmp(&query_rng_buf, &cfg_rng_buf,
3165                            sizeof(struct ice_aqc_acl_profile_ranges))) {
3166                         status = ice_prog_acl_prof_ranges(hw, prof_id,
3167                                                           &cfg_rng_buf, NULL);
3168                         if (status)
3169                                 return status;
3170
3171                         do_chg_rng_chk = true;
3172                 }
3173         }
3174
3175         /* Figure out if we want to (change the ACL action) and/or
3176          * (Add the new ACL entry) and/or (Remove the current ACL entry)
3177          */
3178         exist = ice_flow_acl_find_scen_entry_cond(prof, e, &do_chg_action,
3179                                                   &do_add_entry, &do_rem_entry);
3180         if (do_rem_entry) {
3181                 status = ice_flow_rem_entry_sync(hw, ICE_BLK_ACL, exist);
3182                 if (status)
3183                         return status;
3184         }
3185
3186         /* Prepare the result action buffer */
3187         acts = (struct ice_acl_act_entry *)
3188                 ice_calloc(hw, e->entry_sz, sizeof(struct ice_acl_act_entry));
3189         if (!acts)
3190                 return ICE_ERR_NO_MEMORY;
3191
3192         for (i = 0; i < e->acts_cnt; i++)
3193                 ice_memcpy(&acts[i], &e->acts[i].data.acl_act,
3194                            sizeof(struct ice_acl_act_entry),
3195                            ICE_NONDMA_TO_NONDMA);
3196
3197         if (do_add_entry) {
3198                 enum ice_acl_entry_prio prio;
3199                 u8 *keys, *inverts;
3200                 u16 entry_idx;
3201
3202                 keys = (u8 *)e->entry;
3203                 inverts = keys + (e->entry_sz / 2);
3204                 prio = ice_flow_acl_convert_to_acl_prio(e->priority);
3205
3206                 status = ice_acl_add_entry(hw, prof->cfg.scen, prio, keys,
3207                                            inverts, acts, e->acts_cnt,
3208                                            &entry_idx);
3209                 if (status)
3210                         goto out;
3211
3212                 e->scen_entry_idx = entry_idx;
3213                 LIST_ADD(&e->l_entry, &prof->entries);
3214         } else {
3215                 if (do_chg_action) {
3216                         /* For the action memory info, update the SW's copy of
3217                          * exist entry with e's action memory info
3218                          */
3219                         ice_free(hw, exist->acts);
3220                         exist->acts_cnt = e->acts_cnt;
3221                         exist->acts = (struct ice_flow_action *)
3222                                 ice_calloc(hw, exist->acts_cnt,
3223                                            sizeof(struct ice_flow_action));
3224                         if (!exist->acts) {
3225                                 status = ICE_ERR_NO_MEMORY;
3226                                 goto out;
3227                         }
3228
3229                         ice_memcpy(exist->acts, e->acts,
3230                                    sizeof(struct ice_flow_action) * e->acts_cnt,
3231                                    ICE_NONDMA_TO_NONDMA);
3232
3233                         status = ice_acl_prog_act(hw, prof->cfg.scen, acts,
3234                                                   e->acts_cnt,
3235                                                   exist->scen_entry_idx);
3236                         if (status)
3237                                 goto out;
3238                 }
3239
3240                 if (do_chg_rng_chk) {
3241                         /* In this case, we want to update the range checker
3242                          * information of the exist entry
3243                          */
3244                         status = ice_flow_acl_union_rng_chk(exist->range_buf,
3245                                                             e->range_buf);
3246                         if (status)
3247                                 goto out;
3248                 }
3249
3250                 /* As we don't add the new entry to our SW DB, deallocate its
3251                  * memories, and return the exist entry to the caller
3252                  */
3253                 ice_dealloc_flow_entry(hw, e);
3254                 *(entry) = exist;
3255         }
3256 out:
3257         ice_free(hw, acts);
3258
3259         return status;
3260 }
3261
3262 /**
3263  * ice_flow_acl_add_scen_entry - Add entry to ACL scenario
3264  * @hw: pointer to the hardware structure
3265  * @prof: pointer to flow profile
3266  * @e: double pointer to the flow entry
3267  */
3268 static enum ice_status
3269 ice_flow_acl_add_scen_entry(struct ice_hw *hw, struct ice_flow_prof *prof,
3270                             struct ice_flow_entry **e)
3271 {
3272         enum ice_status status;
3273
3274         ice_acquire_lock(&prof->entries_lock);
3275         status = ice_flow_acl_add_scen_entry_sync(hw, prof, e);
3276         ice_release_lock(&prof->entries_lock);
3277
3278         return status;
3279 }
3280
3281 /**
3282  * ice_flow_add_entry - Add a flow entry
3283  * @hw: pointer to the HW struct
3284  * @blk: classification stage
3285  * @prof_id: ID of the profile to add a new flow entry to
3286  * @entry_id: unique ID to identify this flow entry
3287  * @vsi_handle: software VSI handle for the flow entry
3288  * @prio: priority of the flow entry
3289  * @data: pointer to a data buffer containing flow entry's match values/masks
3290  * @acts: arrays of actions to be performed on a match
3291  * @acts_cnt: number of actions
3292  * @entry_h: pointer to buffer that receives the new flow entry's handle
3293  */
3294 enum ice_status
3295 ice_flow_add_entry(struct ice_hw *hw, enum ice_block blk, u64 prof_id,
3296                    u64 entry_id, u16 vsi_handle, enum ice_flow_priority prio,
3297                    void *data, struct ice_flow_action *acts, u8 acts_cnt,
3298                    u64 *entry_h)
3299 {
3300         struct ice_flow_entry *e = NULL;
3301         struct ice_flow_prof *prof;
3302         enum ice_status status = ICE_SUCCESS;
3303
3304         /* ACL entries must indicate an action */
3305         if (blk == ICE_BLK_ACL && (!acts || !acts_cnt))
3306                 return ICE_ERR_PARAM;
3307
3308         /* No flow entry data is expected for RSS */
3309         if (!entry_h || (!data && blk != ICE_BLK_RSS))
3310                 return ICE_ERR_BAD_PTR;
3311
3312         if (!ice_is_vsi_valid(hw, vsi_handle))
3313                 return ICE_ERR_PARAM;
3314
3315         ice_acquire_lock(&hw->fl_profs_locks[blk]);
3316
3317         prof = ice_flow_find_prof_id(hw, blk, prof_id);
3318         if (!prof) {
3319                 status = ICE_ERR_DOES_NOT_EXIST;
3320         } else {
3321                 /* Allocate memory for the entry being added and associate
3322                  * the VSI to the found flow profile
3323                  */
3324                 e = (struct ice_flow_entry *)ice_malloc(hw, sizeof(*e));
3325                 if (!e)
3326                         status = ICE_ERR_NO_MEMORY;
3327                 else
3328                         status = ice_flow_assoc_prof(hw, blk, prof, vsi_handle);
3329         }
3330
3331         ice_release_lock(&hw->fl_profs_locks[blk]);
3332         if (status)
3333                 goto out;
3334
3335         e->id = entry_id;
3336         e->vsi_handle = vsi_handle;
3337         e->prof = prof;
3338         e->priority = prio;
3339
3340         switch (blk) {
3341         case ICE_BLK_FD:
3342         case ICE_BLK_RSS:
3343                 break;
3344         case ICE_BLK_ACL:
3345                 /* ACL will handle the entry management */
3346                 status = ice_flow_acl_frmt_entry(hw, prof, e, (u8 *)data, acts,
3347                                                  acts_cnt);
3348                 if (status)
3349                         goto out;
3350
3351                 status = ice_flow_acl_add_scen_entry(hw, prof, &e);
3352                 if (status)
3353                         goto out;
3354
3355                 break;
3356         default:
3357                 status = ICE_ERR_NOT_IMPL;
3358                 goto out;
3359         }
3360
3361         if (blk != ICE_BLK_ACL) {
3362                 /* ACL will handle the entry management */
3363                 ice_acquire_lock(&prof->entries_lock);
3364                 LIST_ADD(&e->l_entry, &prof->entries);
3365                 ice_release_lock(&prof->entries_lock);
3366         }
3367
3368         *entry_h = ICE_FLOW_ENTRY_HNDL(e);
3369
3370 out:
3371         if (status && e) {
3372                 if (e->entry)
3373                         ice_free(hw, e->entry);
3374                 ice_free(hw, e);
3375         }
3376
3377         return status;
3378 }
3379
3380 /**
3381  * ice_flow_rem_entry - Remove a flow entry
3382  * @hw: pointer to the HW struct
3383  * @blk: classification stage
3384  * @entry_h: handle to the flow entry to be removed
3385  */
3386 enum ice_status ice_flow_rem_entry(struct ice_hw *hw, enum ice_block blk,
3387                                    u64 entry_h)
3388 {
3389         struct ice_flow_entry *entry;
3390         struct ice_flow_prof *prof;
3391         enum ice_status status = ICE_SUCCESS;
3392
3393         if (entry_h == ICE_FLOW_ENTRY_HANDLE_INVAL)
3394                 return ICE_ERR_PARAM;
3395
3396         entry = ICE_FLOW_ENTRY_PTR((intptr_t)entry_h);
3397
3398         /* Retain the pointer to the flow profile as the entry will be freed */
3399         prof = entry->prof;
3400
3401         if (prof) {
3402                 ice_acquire_lock(&prof->entries_lock);
3403                 status = ice_flow_rem_entry_sync(hw, blk, entry);
3404                 ice_release_lock(&prof->entries_lock);
3405         }
3406
3407         return status;
3408 }
3409
3410 /**
3411  * ice_flow_set_fld_ext - specifies locations of field from entry's input buffer
3412  * @seg: packet segment the field being set belongs to
3413  * @fld: field to be set
3414  * @field_type: type of the field
3415  * @val_loc: if not ICE_FLOW_FLD_OFF_INVAL, location of the value to match from
3416  *           entry's input buffer
3417  * @mask_loc: if not ICE_FLOW_FLD_OFF_INVAL, location of mask value from entry's
3418  *            input buffer
3419  * @last_loc: if not ICE_FLOW_FLD_OFF_INVAL, location of last/upper value from
3420  *            entry's input buffer
3421  *
3422  * This helper function stores information of a field being matched, including
3423  * the type of the field and the locations of the value to match, the mask, and
3424  * the upper-bound value in the start of the input buffer for a flow entry.
3425  * This function should only be used for fixed-size data structures.
3426  *
3427  * This function also opportunistically determines the protocol headers to be
3428  * present based on the fields being set. Some fields cannot be used alone to
3429  * determine the protocol headers present. Sometimes, fields for particular
3430  * protocol headers are not matched. In those cases, the protocol headers
3431  * must be explicitly set.
3432  */
3433 static void
3434 ice_flow_set_fld_ext(struct ice_flow_seg_info *seg, enum ice_flow_field fld,
3435                      enum ice_flow_fld_match_type field_type, u16 val_loc,
3436                      u16 mask_loc, u16 last_loc)
3437 {
3438         u64 bit = BIT_ULL(fld);
3439
3440         seg->match |= bit;
3441         if (field_type == ICE_FLOW_FLD_TYPE_RANGE)
3442                 seg->range |= bit;
3443
3444         seg->fields[fld].type = field_type;
3445         seg->fields[fld].src.val = val_loc;
3446         seg->fields[fld].src.mask = mask_loc;
3447         seg->fields[fld].src.last = last_loc;
3448
3449         ICE_FLOW_SET_HDRS(seg, ice_flds_info[fld].hdr);
3450 }
3451
3452 /**
3453  * ice_flow_set_fld - specifies locations of field from entry's input buffer
3454  * @seg: packet segment the field being set belongs to
3455  * @fld: field to be set
3456  * @val_loc: if not ICE_FLOW_FLD_OFF_INVAL, location of the value to match from
3457  *           entry's input buffer
3458  * @mask_loc: if not ICE_FLOW_FLD_OFF_INVAL, location of mask value from entry's
3459  *            input buffer
3460  * @last_loc: if not ICE_FLOW_FLD_OFF_INVAL, location of last/upper value from
3461  *            entry's input buffer
3462  * @range: indicate if field being matched is to be in a range
3463  *
3464  * This function specifies the locations, in the form of byte offsets from the
3465  * start of the input buffer for a flow entry, from where the value to match,
3466  * the mask value, and upper value can be extracted. These locations are then
3467  * stored in the flow profile. When adding a flow entry associated with the
3468  * flow profile, these locations will be used to quickly extract the values and
3469  * create the content of a match entry. This function should only be used for
3470  * fixed-size data structures.
3471  */
3472 void
3473 ice_flow_set_fld(struct ice_flow_seg_info *seg, enum ice_flow_field fld,
3474                  u16 val_loc, u16 mask_loc, u16 last_loc, bool range)
3475 {
3476         enum ice_flow_fld_match_type t = range ?
3477                 ICE_FLOW_FLD_TYPE_RANGE : ICE_FLOW_FLD_TYPE_REG;
3478
3479         ice_flow_set_fld_ext(seg, fld, t, val_loc, mask_loc, last_loc);
3480 }
3481
3482 /**
3483  * ice_flow_set_fld_prefix - sets locations of prefix field from entry's buf
3484  * @seg: packet segment the field being set belongs to
3485  * @fld: field to be set
3486  * @val_loc: if not ICE_FLOW_FLD_OFF_INVAL, location of the value to match from
3487  *           entry's input buffer
3488  * @pref_loc: location of prefix value from entry's input buffer
3489  * @pref_sz: size of the location holding the prefix value
3490  *
3491  * This function specifies the locations, in the form of byte offsets from the
3492  * start of the input buffer for a flow entry, from where the value to match
3493  * and the IPv4 prefix value can be extracted. These locations are then stored
3494  * in the flow profile. When adding flow entries to the associated flow profile,
3495  * these locations can be used to quickly extract the values to create the
3496  * content of a match entry. This function should only be used for fixed-size
3497  * data structures.
3498  */
3499 void
3500 ice_flow_set_fld_prefix(struct ice_flow_seg_info *seg, enum ice_flow_field fld,
3501                         u16 val_loc, u16 pref_loc, u8 pref_sz)
3502 {
3503         /* For this type of field, the "mask" location is for the prefix value's
3504          * location and the "last" location is for the size of the location of
3505          * the prefix value.
3506          */
3507         ice_flow_set_fld_ext(seg, fld, ICE_FLOW_FLD_TYPE_PREFIX, val_loc,
3508                              pref_loc, (u16)pref_sz);
3509 }
3510
3511 /**
3512  * ice_flow_add_fld_raw - sets locations of a raw field from entry's input buf
3513  * @seg: packet segment the field being set belongs to
3514  * @off: offset of the raw field from the beginning of the segment in bytes
3515  * @len: length of the raw pattern to be matched
3516  * @val_loc: location of the value to match from entry's input buffer
3517  * @mask_loc: location of mask value from entry's input buffer
3518  *
3519  * This function specifies the offset of the raw field to be match from the
3520  * beginning of the specified packet segment, and the locations, in the form of
3521  * byte offsets from the start of the input buffer for a flow entry, from where
3522  * the value to match and the mask value to be extracted. These locations are
3523  * then stored in the flow profile. When adding flow entries to the associated
3524  * flow profile, these locations can be used to quickly extract the values to
3525  * create the content of a match entry. This function should only be used for
3526  * fixed-size data structures.
3527  */
3528 void
3529 ice_flow_add_fld_raw(struct ice_flow_seg_info *seg, u16 off, u8 len,
3530                      u16 val_loc, u16 mask_loc)
3531 {
3532         if (seg->raws_cnt < ICE_FLOW_SEG_RAW_FLD_MAX) {
3533                 seg->raws[seg->raws_cnt].off = off;
3534                 seg->raws[seg->raws_cnt].info.type = ICE_FLOW_FLD_TYPE_SIZE;
3535                 seg->raws[seg->raws_cnt].info.src.val = val_loc;
3536                 seg->raws[seg->raws_cnt].info.src.mask = mask_loc;
3537                 /* The "last" field is used to store the length of the field */
3538                 seg->raws[seg->raws_cnt].info.src.last = len;
3539         }
3540
3541         /* Overflows of "raws" will be handled as an error condition later in
3542          * the flow when this information is processed.
3543          */
3544         seg->raws_cnt++;
3545 }
3546
3547 /**
3548  * ice_flow_rem_vsi_prof - remove vsi from flow profile
3549  * @hw: pointer to the hardware structure
3550  * @blk: classification stage
3551  * @vsi_handle: software VSI handle
3552  * @prof_id: unique ID to identify this flow profile
3553  *
3554  * This function removes the flow entries associated to the input
3555  * vsi handle and disassociates the vsi from the flow profile.
3556  */
3557 enum ice_status ice_flow_rem_vsi_prof(struct ice_hw *hw, enum ice_block blk, u16 vsi_handle,
3558                                       u64 prof_id)
3559 {
3560         struct ice_flow_prof *prof = NULL;
3561         enum ice_status status = ICE_SUCCESS;
3562
3563         if (blk >= ICE_BLK_COUNT || !ice_is_vsi_valid(hw, vsi_handle))
3564                 return ICE_ERR_PARAM;
3565
3566         /* find flow profile pointer with input package block and profile id */
3567         prof = ice_flow_find_prof_id(hw, ICE_BLK_FD, prof_id);
3568         if (!prof) {
3569                 ice_debug(hw, ICE_DBG_PKG,
3570                           "Cannot find flow profile id=%" PRIu64 "\n", prof_id);
3571                 return ICE_ERR_DOES_NOT_EXIST;
3572         }
3573
3574         /* Remove all remaining flow entries before removing the flow profile */
3575         if (!LIST_EMPTY(&prof->entries)) {
3576                 struct ice_flow_entry *e, *t;
3577
3578                 ice_acquire_lock(&prof->entries_lock);
3579                 LIST_FOR_EACH_ENTRY_SAFE(e, t, &prof->entries, ice_flow_entry,
3580                                          l_entry) {
3581                         if (e->vsi_handle != vsi_handle)
3582                                 continue;
3583
3584                         status = ice_flow_rem_entry_sync(hw, blk, e);
3585                         if (status)
3586                                 break;
3587                 }
3588                 ice_release_lock(&prof->entries_lock);
3589         }
3590         if (status)
3591                 return status;
3592
3593         /* disassociate the flow profile from sw vsi handle */
3594         status = ice_flow_disassoc_prof(hw, blk, prof, vsi_handle);
3595         if (status)
3596                 ice_debug(hw, ICE_DBG_PKG,
3597                           "ice_flow_disassoc_prof() failed with status=%d\n",
3598                           status);
3599         return status;
3600 }
3601
3602 #define ICE_FLOW_RSS_SEG_HDR_L2_MASKS \
3603 (ICE_FLOW_SEG_HDR_ETH | ICE_FLOW_SEG_HDR_ETH_NON_IP | ICE_FLOW_SEG_HDR_VLAN)
3604
3605 #define ICE_FLOW_RSS_SEG_HDR_L3_MASKS \
3606         (ICE_FLOW_SEG_HDR_IPV4 | ICE_FLOW_SEG_HDR_IPV6)
3607
3608 #define ICE_FLOW_RSS_SEG_HDR_L4_MASKS \
3609         (ICE_FLOW_SEG_HDR_TCP | ICE_FLOW_SEG_HDR_UDP | ICE_FLOW_SEG_HDR_SCTP)
3610
3611 #define ICE_FLOW_RSS_SEG_HDR_VAL_MASKS \
3612         (ICE_FLOW_RSS_SEG_HDR_L2_MASKS | \
3613          ICE_FLOW_RSS_SEG_HDR_L3_MASKS | \
3614          ICE_FLOW_RSS_SEG_HDR_L4_MASKS)
3615
3616 /**
3617  * ice_flow_set_rss_seg_info - setup packet segments for RSS
3618  * @segs: pointer to the flow field segment(s)
3619  * @seg_cnt: segment count
3620  * @cfg: configure parameters
3621  *
3622  * Helper function to extract fields from hash bitmap and use flow
3623  * header value to set flow field segment for further use in flow
3624  * profile entry or removal.
3625  */
3626 static enum ice_status
3627 ice_flow_set_rss_seg_info(struct ice_flow_seg_info *segs, u8 seg_cnt,
3628                           const struct ice_rss_hash_cfg *cfg)
3629 {
3630         struct ice_flow_seg_info *seg;
3631         u64 val;
3632         u8 i;
3633
3634         /* set inner most segment */
3635         seg = &segs[seg_cnt - 1];
3636
3637         ice_for_each_set_bit(i, (const ice_bitmap_t *)&cfg->hash_flds,
3638                              ICE_FLOW_FIELD_IDX_MAX)
3639                 ice_flow_set_fld(seg, (enum ice_flow_field)i,
3640                                  ICE_FLOW_FLD_OFF_INVAL, ICE_FLOW_FLD_OFF_INVAL,
3641                                  ICE_FLOW_FLD_OFF_INVAL, false);
3642
3643         ICE_FLOW_SET_HDRS(seg, cfg->addl_hdrs);
3644
3645         /* set outer most header */
3646         if (cfg->hdr_type == ICE_RSS_INNER_HEADERS_W_OUTER_IPV4)
3647                 segs[ICE_RSS_OUTER_HEADERS].hdrs |= ICE_FLOW_SEG_HDR_IPV4 |
3648                                                    ICE_FLOW_SEG_HDR_IPV_FRAG |
3649                                                    ICE_FLOW_SEG_HDR_IPV_OTHER;
3650         else if (cfg->hdr_type == ICE_RSS_INNER_HEADERS_W_OUTER_IPV6)
3651                 segs[ICE_RSS_OUTER_HEADERS].hdrs |= ICE_FLOW_SEG_HDR_IPV6 |
3652                                                    ICE_FLOW_SEG_HDR_IPV_FRAG |
3653                                                    ICE_FLOW_SEG_HDR_IPV_OTHER;
3654         else if (cfg->hdr_type == ICE_RSS_INNER_HEADERS_W_OUTER_IPV4_GRE)
3655                 segs[ICE_RSS_OUTER_HEADERS].hdrs |= ICE_FLOW_SEG_HDR_IPV4 |
3656                                                    ICE_FLOW_SEG_HDR_GRE |
3657                                                    ICE_FLOW_SEG_HDR_IPV_OTHER;
3658         else if (cfg->hdr_type == ICE_RSS_INNER_HEADERS_W_OUTER_IPV6_GRE)
3659                 segs[ICE_RSS_OUTER_HEADERS].hdrs |= ICE_FLOW_SEG_HDR_IPV6 |
3660                                                    ICE_FLOW_SEG_HDR_GRE |
3661                                                    ICE_FLOW_SEG_HDR_IPV_OTHER;
3662
3663         if (seg->hdrs & ~ICE_FLOW_RSS_SEG_HDR_VAL_MASKS &
3664             ~ICE_FLOW_RSS_HDRS_INNER_MASK & ~ICE_FLOW_SEG_HDR_IPV_OTHER &
3665             ~ICE_FLOW_SEG_HDR_IPV_FRAG)
3666                 return ICE_ERR_PARAM;
3667
3668         val = (u64)(seg->hdrs & ICE_FLOW_RSS_SEG_HDR_L3_MASKS);
3669         if (val && !ice_is_pow2(val))
3670                 return ICE_ERR_CFG;
3671
3672         val = (u64)(seg->hdrs & ICE_FLOW_RSS_SEG_HDR_L4_MASKS);
3673         if (val && !ice_is_pow2(val))
3674                 return ICE_ERR_CFG;
3675
3676         return ICE_SUCCESS;
3677 }
3678
3679 /**
3680  * ice_rem_vsi_rss_list - remove VSI from RSS list
3681  * @hw: pointer to the hardware structure
3682  * @vsi_handle: software VSI handle
3683  *
3684  * Remove the VSI from all RSS configurations in the list.
3685  */
3686 void ice_rem_vsi_rss_list(struct ice_hw *hw, u16 vsi_handle)
3687 {
3688         struct ice_rss_cfg *r, *tmp;
3689
3690         if (LIST_EMPTY(&hw->rss_list_head))
3691                 return;
3692
3693         ice_acquire_lock(&hw->rss_locks);
3694         LIST_FOR_EACH_ENTRY_SAFE(r, tmp, &hw->rss_list_head,
3695                                  ice_rss_cfg, l_entry)
3696                 if (ice_test_and_clear_bit(vsi_handle, r->vsis))
3697                         if (!ice_is_any_bit_set(r->vsis, ICE_MAX_VSI)) {
3698                                 LIST_DEL(&r->l_entry);
3699                                 ice_free(hw, r);
3700                         }
3701         ice_release_lock(&hw->rss_locks);
3702 }
3703
3704 /**
3705  * ice_rem_vsi_rss_cfg - remove RSS configurations associated with VSI
3706  * @hw: pointer to the hardware structure
3707  * @vsi_handle: software VSI handle
3708  *
3709  * This function will iterate through all flow profiles and disassociate
3710  * the VSI from that profile. If the flow profile has no VSIs it will
3711  * be removed.
3712  */
3713 enum ice_status ice_rem_vsi_rss_cfg(struct ice_hw *hw, u16 vsi_handle)
3714 {
3715         const enum ice_block blk = ICE_BLK_RSS;
3716         struct ice_flow_prof *p, *t;
3717         enum ice_status status = ICE_SUCCESS;
3718
3719         if (!ice_is_vsi_valid(hw, vsi_handle))
3720                 return ICE_ERR_PARAM;
3721
3722         if (LIST_EMPTY(&hw->fl_profs[blk]))
3723                 return ICE_SUCCESS;
3724
3725         ice_acquire_lock(&hw->rss_locks);
3726         LIST_FOR_EACH_ENTRY_SAFE(p, t, &hw->fl_profs[blk], ice_flow_prof,
3727                                  l_entry)
3728                 if (ice_is_bit_set(p->vsis, vsi_handle)) {
3729                         status = ice_flow_disassoc_prof(hw, blk, p, vsi_handle);
3730                         if (status)
3731                                 break;
3732
3733                         if (!ice_is_any_bit_set(p->vsis, ICE_MAX_VSI)) {
3734                                 status = ice_flow_rem_prof(hw, blk, p->id);
3735                                 if (status)
3736                                         break;
3737                         }
3738                 }
3739         ice_release_lock(&hw->rss_locks);
3740
3741         return status;
3742 }
3743
3744 /**
3745  * ice_get_rss_hdr_type - get a RSS profile's header type
3746  * @prof: RSS flow profile
3747  */
3748 static enum ice_rss_cfg_hdr_type
3749 ice_get_rss_hdr_type(struct ice_flow_prof *prof)
3750 {
3751         enum ice_rss_cfg_hdr_type hdr_type = ICE_RSS_ANY_HEADERS;
3752
3753         if (prof->segs_cnt == ICE_FLOW_SEG_SINGLE) {
3754                 hdr_type = ICE_RSS_OUTER_HEADERS;
3755         } else if (prof->segs_cnt == ICE_FLOW_SEG_MAX) {
3756                 if (prof->segs[ICE_RSS_OUTER_HEADERS].hdrs == ICE_FLOW_SEG_HDR_NONE)
3757                         hdr_type = ICE_RSS_INNER_HEADERS;
3758                 if (prof->segs[ICE_RSS_OUTER_HEADERS].hdrs & ICE_FLOW_SEG_HDR_IPV4)
3759                         hdr_type = ICE_RSS_INNER_HEADERS_W_OUTER_IPV4;
3760                 if (prof->segs[ICE_RSS_OUTER_HEADERS].hdrs & ICE_FLOW_SEG_HDR_IPV6)
3761                         hdr_type = ICE_RSS_INNER_HEADERS_W_OUTER_IPV6;
3762         }
3763
3764         return hdr_type;
3765 }
3766
3767 /**
3768  * ice_rem_rss_list - remove RSS configuration from list
3769  * @hw: pointer to the hardware structure
3770  * @vsi_handle: software VSI handle
3771  * @prof: pointer to flow profile
3772  *
3773  * Assumption: lock has already been acquired for RSS list
3774  */
3775 static void
3776 ice_rem_rss_list(struct ice_hw *hw, u16 vsi_handle, struct ice_flow_prof *prof)
3777 {
3778         enum ice_rss_cfg_hdr_type hdr_type;
3779         struct ice_rss_cfg *r, *tmp;
3780
3781         /* Search for RSS hash fields associated to the VSI that match the
3782          * hash configurations associated to the flow profile. If found
3783          * remove from the RSS entry list of the VSI context and delete entry.
3784          */
3785         hdr_type = ice_get_rss_hdr_type(prof);
3786         LIST_FOR_EACH_ENTRY_SAFE(r, tmp, &hw->rss_list_head,
3787                                  ice_rss_cfg, l_entry)
3788                 if (r->hash.hash_flds == prof->segs[prof->segs_cnt - 1].match &&
3789                     r->hash.addl_hdrs == prof->segs[prof->segs_cnt - 1].hdrs &&
3790                     r->hash.hdr_type == hdr_type) {
3791                         ice_clear_bit(vsi_handle, r->vsis);
3792                         if (!ice_is_any_bit_set(r->vsis, ICE_MAX_VSI)) {
3793                                 LIST_DEL(&r->l_entry);
3794                                 ice_free(hw, r);
3795                         }
3796                         return;
3797                 }
3798 }
3799
3800 /**
3801  * ice_add_rss_list - add RSS configuration to list
3802  * @hw: pointer to the hardware structure
3803  * @vsi_handle: software VSI handle
3804  * @prof: pointer to flow profile
3805  *
3806  * Assumption: lock has already been acquired for RSS list
3807  */
3808 static enum ice_status
3809 ice_add_rss_list(struct ice_hw *hw, u16 vsi_handle, struct ice_flow_prof *prof)
3810 {
3811         enum ice_rss_cfg_hdr_type hdr_type;
3812         struct ice_rss_cfg *r, *rss_cfg;
3813
3814         hdr_type = ice_get_rss_hdr_type(prof);
3815         LIST_FOR_EACH_ENTRY(r, &hw->rss_list_head,
3816                             ice_rss_cfg, l_entry)
3817                 if (r->hash.hash_flds == prof->segs[prof->segs_cnt - 1].match &&
3818                     r->hash.addl_hdrs == prof->segs[prof->segs_cnt - 1].hdrs &&
3819                     r->hash.hdr_type == hdr_type) {
3820                         ice_set_bit(vsi_handle, r->vsis);
3821                         return ICE_SUCCESS;
3822                 }
3823
3824         rss_cfg = (struct ice_rss_cfg *)ice_malloc(hw, sizeof(*rss_cfg));
3825         if (!rss_cfg)
3826                 return ICE_ERR_NO_MEMORY;
3827
3828         rss_cfg->hash.hash_flds = prof->segs[prof->segs_cnt - 1].match;
3829         rss_cfg->hash.addl_hdrs = prof->segs[prof->segs_cnt - 1].hdrs;
3830         rss_cfg->hash.hdr_type = hdr_type;
3831         rss_cfg->hash.symm = prof->cfg.symm;
3832         ice_set_bit(vsi_handle, rss_cfg->vsis);
3833
3834         LIST_ADD_TAIL(&rss_cfg->l_entry, &hw->rss_list_head);
3835
3836         return ICE_SUCCESS;
3837 }
3838
3839 #define ICE_FLOW_PROF_HASH_S    0
3840 #define ICE_FLOW_PROF_HASH_M    (0xFFFFFFFFULL << ICE_FLOW_PROF_HASH_S)
3841 #define ICE_FLOW_PROF_HDR_S     32
3842 #define ICE_FLOW_PROF_HDR_M     (0x3FFFFFFFULL << ICE_FLOW_PROF_HDR_S)
3843 #define ICE_FLOW_PROF_ENCAP_S   62
3844 #define ICE_FLOW_PROF_ENCAP_M   (0x3ULL << ICE_FLOW_PROF_ENCAP_S)
3845
3846 /* Flow profile ID format:
3847  * [0:31] - Packet match fields
3848  * [32:61] - Protocol header
3849  * [62:63] - Encapsulation flag:
3850  *           0 if non-tunneled
3851  *           1 if tunneled
3852  *           2 for tunneled with outer ipv4
3853  *           3 for tunneled with outer ipv6
3854  */
3855 #define ICE_FLOW_GEN_PROFID(hash, hdr, encap) \
3856         ((u64)(((u64)(hash) & ICE_FLOW_PROF_HASH_M) | \
3857                (((u64)(hdr) << ICE_FLOW_PROF_HDR_S) & ICE_FLOW_PROF_HDR_M) | \
3858                (((u64)(encap) << ICE_FLOW_PROF_ENCAP_S) & ICE_FLOW_PROF_ENCAP_M)))
3859
3860 static void
3861 ice_rss_config_xor_word(struct ice_hw *hw, u8 prof_id, u8 src, u8 dst)
3862 {
3863         u32 s = ((src % 4) << 3); /* byte shift */
3864         u32 v = dst | 0x80; /* value to program */
3865         u8 i = src / 4; /* register index */
3866         u32 reg;
3867
3868         reg = rd32(hw, GLQF_HSYMM(prof_id, i));
3869         reg = (reg & ~(0xff << s)) | (v << s);
3870         wr32(hw, GLQF_HSYMM(prof_id, i), reg);
3871 }
3872
3873 static void
3874 ice_rss_config_xor(struct ice_hw *hw, u8 prof_id, u8 src, u8 dst, u8 len)
3875 {
3876         int fv_last_word =
3877                 ICE_FLOW_SW_FIELD_VECTOR_MAX / ICE_FLOW_FV_EXTRACT_SZ - 1;
3878         int i;
3879
3880         for (i = 0; i < len; i++) {
3881                 ice_rss_config_xor_word(hw, prof_id,
3882                                         /* Yes, field vector in GLQF_HSYMM and
3883                                          * GLQF_HINSET is inversed!
3884                                          */
3885                                         fv_last_word - (src + i),
3886                                         fv_last_word - (dst + i));
3887                 ice_rss_config_xor_word(hw, prof_id,
3888                                         fv_last_word - (dst + i),
3889                                         fv_last_word - (src + i));
3890         }
3891 }
3892
3893 static void
3894 ice_rss_update_symm(struct ice_hw *hw,
3895                     struct ice_flow_prof *prof)
3896 {
3897         struct ice_prof_map *map;
3898         u8 prof_id, m;
3899
3900         ice_acquire_lock(&hw->blk[ICE_BLK_RSS].es.prof_map_lock);
3901         map = ice_search_prof_id(hw, ICE_BLK_RSS, prof->id);
3902         if (map)
3903                 prof_id = map->prof_id;
3904         ice_release_lock(&hw->blk[ICE_BLK_RSS].es.prof_map_lock);
3905         if (!map)
3906                 return;
3907         /* clear to default */
3908         for (m = 0; m < 6; m++)
3909                 wr32(hw, GLQF_HSYMM(prof_id, m), 0);
3910         if (prof->cfg.symm) {
3911                 struct ice_flow_seg_info *seg =
3912                         &prof->segs[prof->segs_cnt - 1];
3913
3914                 struct ice_flow_seg_xtrct *ipv4_src =
3915                         &seg->fields[ICE_FLOW_FIELD_IDX_IPV4_SA].xtrct;
3916                 struct ice_flow_seg_xtrct *ipv4_dst =
3917                         &seg->fields[ICE_FLOW_FIELD_IDX_IPV4_DA].xtrct;
3918                 struct ice_flow_seg_xtrct *ipv6_src =
3919                         &seg->fields[ICE_FLOW_FIELD_IDX_IPV6_SA].xtrct;
3920                 struct ice_flow_seg_xtrct *ipv6_dst =
3921                         &seg->fields[ICE_FLOW_FIELD_IDX_IPV6_DA].xtrct;
3922
3923                 struct ice_flow_seg_xtrct *tcp_src =
3924                         &seg->fields[ICE_FLOW_FIELD_IDX_TCP_SRC_PORT].xtrct;
3925                 struct ice_flow_seg_xtrct *tcp_dst =
3926                         &seg->fields[ICE_FLOW_FIELD_IDX_TCP_DST_PORT].xtrct;
3927
3928                 struct ice_flow_seg_xtrct *udp_src =
3929                         &seg->fields[ICE_FLOW_FIELD_IDX_UDP_SRC_PORT].xtrct;
3930                 struct ice_flow_seg_xtrct *udp_dst =
3931                         &seg->fields[ICE_FLOW_FIELD_IDX_UDP_DST_PORT].xtrct;
3932
3933                 struct ice_flow_seg_xtrct *sctp_src =
3934                         &seg->fields[ICE_FLOW_FIELD_IDX_SCTP_SRC_PORT].xtrct;
3935                 struct ice_flow_seg_xtrct *sctp_dst =
3936                         &seg->fields[ICE_FLOW_FIELD_IDX_SCTP_DST_PORT].xtrct;
3937
3938                 /* xor IPv4 */
3939                 if (ipv4_src->prot_id != 0 && ipv4_dst->prot_id != 0)
3940                         ice_rss_config_xor(hw, prof_id,
3941                                            ipv4_src->idx, ipv4_dst->idx, 2);
3942
3943                 /* xor IPv6 */
3944                 if (ipv6_src->prot_id != 0 && ipv6_dst->prot_id != 0)
3945                         ice_rss_config_xor(hw, prof_id,
3946                                            ipv6_src->idx, ipv6_dst->idx, 8);
3947
3948                 /* xor TCP */
3949                 if (tcp_src->prot_id != 0 && tcp_dst->prot_id != 0)
3950                         ice_rss_config_xor(hw, prof_id,
3951                                            tcp_src->idx, tcp_dst->idx, 1);
3952
3953                 /* xor UDP */
3954                 if (udp_src->prot_id != 0 && udp_dst->prot_id != 0)
3955                         ice_rss_config_xor(hw, prof_id,
3956                                            udp_src->idx, udp_dst->idx, 1);
3957
3958                 /* xor SCTP */
3959                 if (sctp_src->prot_id != 0 && sctp_dst->prot_id != 0)
3960                         ice_rss_config_xor(hw, prof_id,
3961                                            sctp_src->idx, sctp_dst->idx, 1);
3962         }
3963 }
3964
3965 /**
3966  * ice_add_rss_cfg_sync - add an RSS configuration
3967  * @hw: pointer to the hardware structure
3968  * @vsi_handle: software VSI handle
3969  * @cfg: configure parameters
3970  *
3971  * Assumption: lock has already been acquired for RSS list
3972  */
3973 static enum ice_status
3974 ice_add_rss_cfg_sync(struct ice_hw *hw, u16 vsi_handle,
3975                      const struct ice_rss_hash_cfg *cfg)
3976 {
3977         const enum ice_block blk = ICE_BLK_RSS;
3978         struct ice_flow_prof *prof = NULL;
3979         struct ice_flow_seg_info *segs;
3980         enum ice_status status;
3981         u8 segs_cnt;
3982
3983         segs_cnt = (cfg->hdr_type == ICE_RSS_OUTER_HEADERS) ?
3984                         ICE_FLOW_SEG_SINGLE : ICE_FLOW_SEG_MAX;
3985
3986         segs = (struct ice_flow_seg_info *)ice_calloc(hw, segs_cnt,
3987                                                       sizeof(*segs));
3988         if (!segs)
3989                 return ICE_ERR_NO_MEMORY;
3990
3991         /* Construct the packet segment info from the hashed fields */
3992         status = ice_flow_set_rss_seg_info(segs, segs_cnt, cfg);
3993         if (status)
3994                 goto exit;
3995
3996         /* Search for a flow profile that has matching headers, hash fields
3997          * and has the input VSI associated to it. If found, no further
3998          * operations required and exit.
3999          */
4000         prof = ice_flow_find_prof_conds(hw, blk, ICE_FLOW_RX, segs, segs_cnt,
4001                                         vsi_handle,
4002                                         ICE_FLOW_FIND_PROF_CHK_FLDS |
4003                                         ICE_FLOW_FIND_PROF_CHK_VSI);
4004         if (prof) {
4005                 if (prof->cfg.symm == cfg->symm)
4006                         goto exit;
4007                 prof->cfg.symm = cfg->symm;
4008                 goto update_symm;
4009         }
4010
4011         /* Check if a flow profile exists with the same protocol headers and
4012          * associated with the input VSI. If so disassociate the VSI from
4013          * this profile. The VSI will be added to a new profile created with
4014          * the protocol header and new hash field configuration.
4015          */
4016         prof = ice_flow_find_prof_conds(hw, blk, ICE_FLOW_RX, segs, segs_cnt,
4017                                         vsi_handle, ICE_FLOW_FIND_PROF_CHK_VSI);
4018         if (prof) {
4019                 status = ice_flow_disassoc_prof(hw, blk, prof, vsi_handle);
4020                 if (!status)
4021                         ice_rem_rss_list(hw, vsi_handle, prof);
4022                 else
4023                         goto exit;
4024
4025                 /* Remove profile if it has no VSIs associated */
4026                 if (!ice_is_any_bit_set(prof->vsis, ICE_MAX_VSI)) {
4027                         status = ice_flow_rem_prof(hw, blk, prof->id);
4028                         if (status)
4029                                 goto exit;
4030                 }
4031         }
4032
4033         /* Search for a profile that has same match fields only. If this
4034          * exists then associate the VSI to this profile.
4035          */
4036         prof = ice_flow_find_prof_conds(hw, blk, ICE_FLOW_RX, segs, segs_cnt,
4037                                         vsi_handle,
4038                                         ICE_FLOW_FIND_PROF_CHK_FLDS);
4039         if (prof) {
4040                 if (prof->cfg.symm == cfg->symm) {
4041                         status = ice_flow_assoc_prof(hw, blk, prof,
4042                                                      vsi_handle);
4043                         if (!status)
4044                                 status = ice_add_rss_list(hw, vsi_handle,
4045                                                           prof);
4046                 } else {
4047                         /* if a profile exist but with different symmetric
4048                          * requirement, just return error.
4049                          */
4050                         status = ICE_ERR_NOT_SUPPORTED;
4051                 }
4052                 goto exit;
4053         }
4054
4055         /* Create a new flow profile with generated profile and packet
4056          * segment information.
4057          */
4058         status = ice_flow_add_prof(hw, blk, ICE_FLOW_RX,
4059                                    ICE_FLOW_GEN_PROFID(cfg->hash_flds,
4060                                                        segs[segs_cnt - 1].hdrs,
4061                                                        cfg->hdr_type),
4062                                    segs, segs_cnt, NULL, 0, &prof);
4063         if (status)
4064                 goto exit;
4065
4066         status = ice_flow_assoc_prof(hw, blk, prof, vsi_handle);
4067         /* If association to a new flow profile failed then this profile can
4068          * be removed.
4069          */
4070         if (status) {
4071                 ice_flow_rem_prof(hw, blk, prof->id);
4072                 goto exit;
4073         }
4074
4075         status = ice_add_rss_list(hw, vsi_handle, prof);
4076
4077         prof->cfg.symm = cfg->symm;
4078 update_symm:
4079         ice_rss_update_symm(hw, prof);
4080
4081 exit:
4082         ice_free(hw, segs);
4083         return status;
4084 }
4085
4086 /**
4087  * ice_add_rss_cfg - add an RSS configuration with specified hashed fields
4088  * @hw: pointer to the hardware structure
4089  * @vsi_handle: software VSI handle
4090  * @cfg: configure parameters
4091  *
4092  * This function will generate a flow profile based on fields associated with
4093  * the input fields to hash on, the flow type and use the VSI number to add
4094  * a flow entry to the profile.
4095  */
4096 enum ice_status
4097 ice_add_rss_cfg(struct ice_hw *hw, u16 vsi_handle,
4098                 const struct ice_rss_hash_cfg *cfg)
4099 {
4100         struct ice_rss_hash_cfg local_cfg;
4101         enum ice_status status;
4102
4103         if (!ice_is_vsi_valid(hw, vsi_handle) ||
4104             !cfg || cfg->hdr_type > ICE_RSS_ANY_HEADERS ||
4105             cfg->hash_flds == ICE_HASH_INVALID)
4106                 return ICE_ERR_PARAM;
4107
4108         local_cfg = *cfg;
4109         if (cfg->hdr_type < ICE_RSS_ANY_HEADERS) {
4110                 ice_acquire_lock(&hw->rss_locks);
4111                 status = ice_add_rss_cfg_sync(hw, vsi_handle, &local_cfg);
4112                 ice_release_lock(&hw->rss_locks);
4113         } else {
4114                 ice_acquire_lock(&hw->rss_locks);
4115                 local_cfg.hdr_type = ICE_RSS_OUTER_HEADERS;
4116                 status = ice_add_rss_cfg_sync(hw, vsi_handle, &local_cfg);
4117                 if (!status) {
4118                         local_cfg.hdr_type = ICE_RSS_INNER_HEADERS;
4119                         status = ice_add_rss_cfg_sync(hw, vsi_handle,
4120                                                       &local_cfg);
4121                 }
4122                 ice_release_lock(&hw->rss_locks);
4123         }
4124
4125         return status;
4126 }
4127
4128 /**
4129  * ice_rem_rss_cfg_sync - remove an existing RSS configuration
4130  * @hw: pointer to the hardware structure
4131  * @vsi_handle: software VSI handle
4132  * @cfg: configure parameters
4133  *
4134  * Assumption: lock has already been acquired for RSS list
4135  */
4136 static enum ice_status
4137 ice_rem_rss_cfg_sync(struct ice_hw *hw, u16 vsi_handle,
4138                      const struct ice_rss_hash_cfg *cfg)
4139 {
4140         const enum ice_block blk = ICE_BLK_RSS;
4141         struct ice_flow_seg_info *segs;
4142         struct ice_flow_prof *prof;
4143         enum ice_status status;
4144         u8 segs_cnt;
4145
4146         segs_cnt = (cfg->hdr_type == ICE_RSS_OUTER_HEADERS) ?
4147                         ICE_FLOW_SEG_SINGLE : ICE_FLOW_SEG_MAX;
4148         segs = (struct ice_flow_seg_info *)ice_calloc(hw, segs_cnt,
4149                                                       sizeof(*segs));
4150         if (!segs)
4151                 return ICE_ERR_NO_MEMORY;
4152
4153         /* Construct the packet segment info from the hashed fields */
4154         status = ice_flow_set_rss_seg_info(segs, segs_cnt, cfg);
4155         if (status)
4156                 goto out;
4157
4158         prof = ice_flow_find_prof_conds(hw, blk, ICE_FLOW_RX, segs, segs_cnt,
4159                                         vsi_handle,
4160                                         ICE_FLOW_FIND_PROF_CHK_FLDS);
4161         if (!prof) {
4162                 status = ICE_ERR_DOES_NOT_EXIST;
4163                 goto out;
4164         }
4165
4166         status = ice_flow_disassoc_prof(hw, blk, prof, vsi_handle);
4167         if (status)
4168                 goto out;
4169
4170         /* Remove RSS configuration from VSI context before deleting
4171          * the flow profile.
4172          */
4173         ice_rem_rss_list(hw, vsi_handle, prof);
4174
4175         if (!ice_is_any_bit_set(prof->vsis, ICE_MAX_VSI))
4176                 status = ice_flow_rem_prof(hw, blk, prof->id);
4177
4178 out:
4179         ice_free(hw, segs);
4180         return status;
4181 }
4182
4183 /**
4184  * ice_rem_rss_cfg - remove an existing RSS config with matching hashed fields
4185  * @hw: pointer to the hardware structure
4186  * @vsi_handle: software VSI handle
4187  * @cfg: configure parameters
4188  *
4189  * This function will lookup the flow profile based on the input
4190  * hash field bitmap, iterate through the profile entry list of
4191  * that profile and find entry associated with input VSI to be
4192  * removed. Calls are made to underlying flow apis which will in
4193  * turn build or update buffers for RSS XLT1 section.
4194  */
4195 enum ice_status
4196 ice_rem_rss_cfg(struct ice_hw *hw, u16 vsi_handle,
4197                 const struct ice_rss_hash_cfg *cfg)
4198 {
4199         struct ice_rss_hash_cfg local_cfg;
4200         enum ice_status status;
4201
4202         if (!ice_is_vsi_valid(hw, vsi_handle) ||
4203             !cfg || cfg->hdr_type > ICE_RSS_ANY_HEADERS ||
4204             cfg->hash_flds == ICE_HASH_INVALID)
4205                 return ICE_ERR_PARAM;
4206
4207         ice_acquire_lock(&hw->rss_locks);
4208         local_cfg = *cfg;
4209         if (cfg->hdr_type < ICE_RSS_ANY_HEADERS) {
4210                 status = ice_rem_rss_cfg_sync(hw, vsi_handle, &local_cfg);
4211         } else {
4212                 local_cfg.hdr_type = ICE_RSS_OUTER_HEADERS;
4213                 status = ice_rem_rss_cfg_sync(hw, vsi_handle, &local_cfg);
4214
4215                 if (!status) {
4216                         local_cfg.hdr_type = ICE_RSS_INNER_HEADERS;
4217                         status = ice_rem_rss_cfg_sync(hw, vsi_handle,
4218                                                       &local_cfg);
4219                 }
4220         }
4221         ice_release_lock(&hw->rss_locks);
4222
4223         return status;
4224 }
4225
4226 /**
4227  * ice_replay_rss_cfg - replay RSS configurations associated with VSI
4228  * @hw: pointer to the hardware structure
4229  * @vsi_handle: software VSI handle
4230  */
4231 enum ice_status ice_replay_rss_cfg(struct ice_hw *hw, u16 vsi_handle)
4232 {
4233         enum ice_status status = ICE_SUCCESS;
4234         struct ice_rss_cfg *r;
4235
4236         if (!ice_is_vsi_valid(hw, vsi_handle))
4237                 return ICE_ERR_PARAM;
4238
4239         ice_acquire_lock(&hw->rss_locks);
4240         LIST_FOR_EACH_ENTRY(r, &hw->rss_list_head,
4241                             ice_rss_cfg, l_entry) {
4242                 if (ice_is_bit_set(r->vsis, vsi_handle)) {
4243                         status = ice_add_rss_cfg_sync(hw, vsi_handle, &r->hash);
4244                         if (status)
4245                                 break;
4246                 }
4247         }
4248         ice_release_lock(&hw->rss_locks);
4249
4250         return status;
4251 }
4252
4253 /**
4254  * ice_get_rss_cfg - returns hashed fields for the given header types
4255  * @hw: pointer to the hardware structure
4256  * @vsi_handle: software VSI handle
4257  * @hdrs: protocol header type
4258  *
4259  * This function will return the match fields of the first instance of flow
4260  * profile having the given header types and containing input VSI
4261  */
4262 u64 ice_get_rss_cfg(struct ice_hw *hw, u16 vsi_handle, u32 hdrs)
4263 {
4264         u64 rss_hash = ICE_HASH_INVALID;
4265         struct ice_rss_cfg *r;
4266
4267         /* verify if the protocol header is non zero and VSI is valid */
4268         if (hdrs == ICE_FLOW_SEG_HDR_NONE || !ice_is_vsi_valid(hw, vsi_handle))
4269                 return ICE_HASH_INVALID;
4270
4271         ice_acquire_lock(&hw->rss_locks);
4272         LIST_FOR_EACH_ENTRY(r, &hw->rss_list_head,
4273                             ice_rss_cfg, l_entry)
4274                 if (ice_is_bit_set(r->vsis, vsi_handle) &&
4275                     r->hash.addl_hdrs == hdrs) {
4276                         rss_hash = r->hash.hash_flds;
4277                         break;
4278                 }
4279         ice_release_lock(&hw->rss_locks);
4280
4281         return rss_hash;
4282 }