5b26d6c8b2b4d313709fbeea35c128753acd02e5
[dpdk.git] / drivers / net / ice / base / ice_flow.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2001-2021 Intel Corporation
3  */
4
5 #include "ice_common.h"
6 #include "ice_flow.h"
7
8 /* Size of known protocol header fields */
9 #define ICE_FLOW_FLD_SZ_ETH_TYPE        2
10 #define ICE_FLOW_FLD_SZ_VLAN            2
11 #define ICE_FLOW_FLD_SZ_IPV4_ADDR       4
12 #define ICE_FLOW_FLD_SZ_IPV6_ADDR       16
13 #define ICE_FLOW_FLD_SZ_IPV6_PRE32_ADDR 4
14 #define ICE_FLOW_FLD_SZ_IPV6_PRE48_ADDR 6
15 #define ICE_FLOW_FLD_SZ_IPV6_PRE64_ADDR 8
16 #define ICE_FLOW_FLD_SZ_IPV4_ID         2
17 #define ICE_FLOW_FLD_SZ_IPV6_ID         4
18 #define ICE_FLOW_FLD_SZ_IP_DSCP         1
19 #define ICE_FLOW_FLD_SZ_IP_TTL          1
20 #define ICE_FLOW_FLD_SZ_IP_PROT         1
21 #define ICE_FLOW_FLD_SZ_PORT            2
22 #define ICE_FLOW_FLD_SZ_TCP_FLAGS       1
23 #define ICE_FLOW_FLD_SZ_ICMP_TYPE       1
24 #define ICE_FLOW_FLD_SZ_ICMP_CODE       1
25 #define ICE_FLOW_FLD_SZ_ARP_OPER        2
26 #define ICE_FLOW_FLD_SZ_GRE_KEYID       4
27 #define ICE_FLOW_FLD_SZ_GTP_TEID        4
28 #define ICE_FLOW_FLD_SZ_GTP_QFI         2
29 #define ICE_FLOW_FLD_SZ_PPPOE_SESS_ID   2
30 #define ICE_FLOW_FLD_SZ_PFCP_SEID 8
31 #define ICE_FLOW_FLD_SZ_L2TPV3_SESS_ID  4
32 #define ICE_FLOW_FLD_SZ_ESP_SPI 4
33 #define ICE_FLOW_FLD_SZ_AH_SPI  4
34 #define ICE_FLOW_FLD_SZ_NAT_T_ESP_SPI   4
35 #define ICE_FLOW_FLD_SZ_VXLAN_VNI       4
36 #define ICE_FLOW_FLD_SZ_ECPRI_TP0_PC_ID 2
37
38 /* Describe properties of a protocol header field */
39 struct ice_flow_field_info {
40         enum ice_flow_seg_hdr hdr;
41         s16 off;        /* Offset from start of a protocol header, in bits */
42         u16 size;       /* Size of fields in bits */
43         u16 mask;       /* 16-bit mask for field */
44 };
45
46 #define ICE_FLOW_FLD_INFO(_hdr, _offset_bytes, _size_bytes) { \
47         .hdr = _hdr, \
48         .off = (_offset_bytes) * BITS_PER_BYTE, \
49         .size = (_size_bytes) * BITS_PER_BYTE, \
50         .mask = 0, \
51 }
52
53 #define ICE_FLOW_FLD_INFO_MSK(_hdr, _offset_bytes, _size_bytes, _mask) { \
54         .hdr = _hdr, \
55         .off = (_offset_bytes) * BITS_PER_BYTE, \
56         .size = (_size_bytes) * BITS_PER_BYTE, \
57         .mask = _mask, \
58 }
59
60 /* Table containing properties of supported protocol header fields */
61 static const
62 struct ice_flow_field_info ice_flds_info[ICE_FLOW_FIELD_IDX_MAX] = {
63         /* Ether */
64         /* ICE_FLOW_FIELD_IDX_ETH_DA */
65         ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_ETH, 0, ETH_ALEN),
66         /* ICE_FLOW_FIELD_IDX_ETH_SA */
67         ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_ETH, ETH_ALEN, ETH_ALEN),
68         /* ICE_FLOW_FIELD_IDX_S_VLAN */
69         ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_VLAN, 12, ICE_FLOW_FLD_SZ_VLAN),
70         /* ICE_FLOW_FIELD_IDX_C_VLAN */
71         ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_VLAN, 14, ICE_FLOW_FLD_SZ_VLAN),
72         /* ICE_FLOW_FIELD_IDX_ETH_TYPE */
73         ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_ETH, 0, ICE_FLOW_FLD_SZ_ETH_TYPE),
74         /* IPv4 / IPv6 */
75         /* ICE_FLOW_FIELD_IDX_IPV4_DSCP */
76         ICE_FLOW_FLD_INFO_MSK(ICE_FLOW_SEG_HDR_IPV4, 0, ICE_FLOW_FLD_SZ_IP_DSCP,
77                               0x00fc),
78         /* ICE_FLOW_FIELD_IDX_IPV6_DSCP */
79         ICE_FLOW_FLD_INFO_MSK(ICE_FLOW_SEG_HDR_IPV6, 0, ICE_FLOW_FLD_SZ_IP_DSCP,
80                               0x0ff0),
81         /* ICE_FLOW_FIELD_IDX_IPV4_TTL */
82         ICE_FLOW_FLD_INFO_MSK(ICE_FLOW_SEG_HDR_NONE, 8,
83                               ICE_FLOW_FLD_SZ_IP_TTL, 0xff00),
84         /* ICE_FLOW_FIELD_IDX_IPV4_PROT */
85         ICE_FLOW_FLD_INFO_MSK(ICE_FLOW_SEG_HDR_NONE, 8,
86                               ICE_FLOW_FLD_SZ_IP_PROT, 0x00ff),
87         /* ICE_FLOW_FIELD_IDX_IPV6_TTL */
88         ICE_FLOW_FLD_INFO_MSK(ICE_FLOW_SEG_HDR_NONE, 6,
89                               ICE_FLOW_FLD_SZ_IP_TTL, 0x00ff),
90         /* ICE_FLOW_FIELD_IDX_IPV6_PROT */
91         ICE_FLOW_FLD_INFO_MSK(ICE_FLOW_SEG_HDR_NONE, 6,
92                               ICE_FLOW_FLD_SZ_IP_PROT, 0xff00),
93         /* ICE_FLOW_FIELD_IDX_IPV4_SA */
94         ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_IPV4, 12, ICE_FLOW_FLD_SZ_IPV4_ADDR),
95         /* ICE_FLOW_FIELD_IDX_IPV4_DA */
96         ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_IPV4, 16, ICE_FLOW_FLD_SZ_IPV4_ADDR),
97         /* ICE_FLOW_FIELD_IDX_IPV6_SA */
98         ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_IPV6, 8, ICE_FLOW_FLD_SZ_IPV6_ADDR),
99         /* ICE_FLOW_FIELD_IDX_IPV6_DA */
100         ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_IPV6, 24, ICE_FLOW_FLD_SZ_IPV6_ADDR),
101         /* ICE_FLOW_FIELD_IDX_IPV4_FRAG */
102         ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_IPV_FRAG, 4,
103                           ICE_FLOW_FLD_SZ_IPV4_ID),
104         /* ICE_FLOW_FIELD_IDX_IPV6_FRAG */
105         ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_IPV_FRAG, 4,
106                           ICE_FLOW_FLD_SZ_IPV6_ID),
107         /* ICE_FLOW_FIELD_IDX_IPV6_PRE32_SA */
108         ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_IPV6, 8,
109                           ICE_FLOW_FLD_SZ_IPV6_PRE32_ADDR),
110         /* ICE_FLOW_FIELD_IDX_IPV6_PRE32_DA */
111         ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_IPV6, 24,
112                           ICE_FLOW_FLD_SZ_IPV6_PRE32_ADDR),
113         /* ICE_FLOW_FIELD_IDX_IPV6_PRE48_SA */
114         ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_IPV6, 8,
115                           ICE_FLOW_FLD_SZ_IPV6_PRE48_ADDR),
116         /* ICE_FLOW_FIELD_IDX_IPV6_PRE48_DA */
117         ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_IPV6, 24,
118                           ICE_FLOW_FLD_SZ_IPV6_PRE48_ADDR),
119         /* ICE_FLOW_FIELD_IDX_IPV6_PRE64_SA */
120         ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_IPV6, 8,
121                           ICE_FLOW_FLD_SZ_IPV6_PRE64_ADDR),
122         /* ICE_FLOW_FIELD_IDX_IPV6_PRE64_DA */
123         ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_IPV6, 24,
124                           ICE_FLOW_FLD_SZ_IPV6_PRE64_ADDR),
125         /* Transport */
126         /* ICE_FLOW_FIELD_IDX_TCP_SRC_PORT */
127         ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_TCP, 0, ICE_FLOW_FLD_SZ_PORT),
128         /* ICE_FLOW_FIELD_IDX_TCP_DST_PORT */
129         ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_TCP, 2, ICE_FLOW_FLD_SZ_PORT),
130         /* ICE_FLOW_FIELD_IDX_UDP_SRC_PORT */
131         ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_UDP, 0, ICE_FLOW_FLD_SZ_PORT),
132         /* ICE_FLOW_FIELD_IDX_UDP_DST_PORT */
133         ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_UDP, 2, ICE_FLOW_FLD_SZ_PORT),
134         /* ICE_FLOW_FIELD_IDX_SCTP_SRC_PORT */
135         ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_SCTP, 0, ICE_FLOW_FLD_SZ_PORT),
136         /* ICE_FLOW_FIELD_IDX_SCTP_DST_PORT */
137         ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_SCTP, 2, ICE_FLOW_FLD_SZ_PORT),
138         /* ICE_FLOW_FIELD_IDX_TCP_FLAGS */
139         ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_TCP, 13, ICE_FLOW_FLD_SZ_TCP_FLAGS),
140         /* ARP */
141         /* ICE_FLOW_FIELD_IDX_ARP_SIP */
142         ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_ARP, 14, ICE_FLOW_FLD_SZ_IPV4_ADDR),
143         /* ICE_FLOW_FIELD_IDX_ARP_DIP */
144         ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_ARP, 24, ICE_FLOW_FLD_SZ_IPV4_ADDR),
145         /* ICE_FLOW_FIELD_IDX_ARP_SHA */
146         ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_ARP, 8, ETH_ALEN),
147         /* ICE_FLOW_FIELD_IDX_ARP_DHA */
148         ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_ARP, 18, ETH_ALEN),
149         /* ICE_FLOW_FIELD_IDX_ARP_OP */
150         ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_ARP, 6, ICE_FLOW_FLD_SZ_ARP_OPER),
151         /* ICMP */
152         /* ICE_FLOW_FIELD_IDX_ICMP_TYPE */
153         ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_ICMP, 0, ICE_FLOW_FLD_SZ_ICMP_TYPE),
154         /* ICE_FLOW_FIELD_IDX_ICMP_CODE */
155         ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_ICMP, 1, ICE_FLOW_FLD_SZ_ICMP_CODE),
156         /* GRE */
157         /* ICE_FLOW_FIELD_IDX_GRE_KEYID */
158         ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_GRE, 12, ICE_FLOW_FLD_SZ_GRE_KEYID),
159         /* GTP */
160         /* ICE_FLOW_FIELD_IDX_GTPC_TEID */
161         ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_GTPC_TEID, 12,
162                           ICE_FLOW_FLD_SZ_GTP_TEID),
163         /* ICE_FLOW_FIELD_IDX_GTPU_IP_TEID */
164         ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_GTPU_IP, 12,
165                           ICE_FLOW_FLD_SZ_GTP_TEID),
166         /* ICE_FLOW_FIELD_IDX_GTPU_EH_TEID */
167         ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_GTPU_EH, 12,
168                           ICE_FLOW_FLD_SZ_GTP_TEID),
169         /* ICE_FLOW_FIELD_IDX_GTPU_EH_QFI */
170         ICE_FLOW_FLD_INFO_MSK(ICE_FLOW_SEG_HDR_GTPU_EH, 22,
171                               ICE_FLOW_FLD_SZ_GTP_QFI, 0x3f00),
172         /* ICE_FLOW_FIELD_IDX_GTPU_UP_TEID */
173         ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_GTPU_UP, 12,
174                           ICE_FLOW_FLD_SZ_GTP_TEID),
175         /* ICE_FLOW_FIELD_IDX_GTPU_DWN_TEID */
176         ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_GTPU_DWN, 12,
177                           ICE_FLOW_FLD_SZ_GTP_TEID),
178         /* PPPOE */
179         /* ICE_FLOW_FIELD_IDX_PPPOE_SESS_ID */
180         ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_PPPOE, 2,
181                           ICE_FLOW_FLD_SZ_PPPOE_SESS_ID),
182         /* PFCP */
183         /* ICE_FLOW_FIELD_IDX_PFCP_SEID */
184         ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_PFCP_SESSION, 12,
185                           ICE_FLOW_FLD_SZ_PFCP_SEID),
186         /* L2TPV3 */
187         /* ICE_FLOW_FIELD_IDX_L2TPV3_SESS_ID */
188         ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_L2TPV3, 0,
189                           ICE_FLOW_FLD_SZ_L2TPV3_SESS_ID),
190         /* ESP */
191         /* ICE_FLOW_FIELD_IDX_ESP_SPI */
192         ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_ESP, 0,
193                           ICE_FLOW_FLD_SZ_ESP_SPI),
194         /* AH */
195         /* ICE_FLOW_FIELD_IDX_AH_SPI */
196         ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_AH, 4,
197                           ICE_FLOW_FLD_SZ_AH_SPI),
198         /* NAT_T_ESP */
199         /* ICE_FLOW_FIELD_IDX_NAT_T_ESP_SPI */
200         ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_NAT_T_ESP, 8,
201                           ICE_FLOW_FLD_SZ_NAT_T_ESP_SPI),
202         /* ICE_FLOW_FIELD_IDX_VXLAN_VNI */
203         ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_VXLAN, 12,
204                           ICE_FLOW_FLD_SZ_VXLAN_VNI),
205         /* ECPRI_TP0 */
206         /* ICE_FLOW_FIELD_IDX_ECPRI_TP0_PC_ID */
207         ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_ECPRI_TP0, 4,
208                           ICE_FLOW_FLD_SZ_ECPRI_TP0_PC_ID),
209         /* UDP_ECPRI_TP0 */
210         /* ICE_FLOW_FIELD_IDX_UDP_ECPRI_TP0_PC_ID */
211         ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_UDP_ECPRI_TP0, 12,
212                           ICE_FLOW_FLD_SZ_ECPRI_TP0_PC_ID),
213 };
214
215 /* Bitmaps indicating relevant packet types for a particular protocol header
216  *
217  * Packet types for packets with an Outer/First/Single MAC header
218  */
219 static const u32 ice_ptypes_mac_ofos[] = {
220         0xFDC00846, 0xBFBF7F7E, 0xF70001DF, 0xFEFDFDFB,
221         0x0000077E, 0x000003FF, 0x00000000, 0x00000000,
222         0x00400000, 0x03FFF000, 0xFFFFFFE0, 0x00100707,
223         0xFFFFFF00, 0x0000003F, 0x00000000, 0x00000000,
224         0x00000000, 0x00000000, 0x00000000, 0x00000000,
225         0x00000000, 0x00000000, 0x00000000, 0x00000000,
226         0x00000000, 0x00000000, 0x00000000, 0x00000000,
227         0x00000000, 0x00000000, 0x00000000, 0x00000000,
228 };
229
230 /* Packet types for packets with an Innermost/Last MAC VLAN header */
231 static const u32 ice_ptypes_macvlan_il[] = {
232         0x00000000, 0xBC000000, 0x000001DF, 0xF0000000,
233         0x0000077E, 0x00000000, 0x00000000, 0x00000000,
234         0x00000000, 0x00000000, 0x00000000, 0x00000000,
235         0x00000000, 0x00000000, 0x00000000, 0x00000000,
236         0x00000000, 0x00000000, 0x00000000, 0x00000000,
237         0x00000000, 0x00000000, 0x00000000, 0x00000000,
238         0x00000000, 0x00000000, 0x00000000, 0x00000000,
239         0x00000000, 0x00000000, 0x00000000, 0x00000000,
240 };
241
242 /* Packet types for packets with an Outer/First/Single non-frag IPv4 header,
243  * does NOT include IPV4 other PTYPEs
244  */
245 static const u32 ice_ptypes_ipv4_ofos[] = {
246         0x1D800000, 0x24000800, 0x00000000, 0x00000000,
247         0x00000000, 0x00000155, 0x00000000, 0x00000000,
248         0x00000000, 0x000FC000, 0x000002A0, 0x00100000,
249         0x00001500, 0x00000000, 0x00000000, 0x00000000,
250         0x00000000, 0x00000000, 0x00000000, 0x00000000,
251         0x00000000, 0x00000000, 0x00000000, 0x00000000,
252         0x00000000, 0x00000000, 0x00000000, 0x00000000,
253         0x00000000, 0x00000000, 0x00000000, 0x00000000,
254 };
255
256 /* Packet types for packets with an Outer/First/Single non-frag IPv4 header,
257  * includes IPV4 other PTYPEs
258  */
259 static const u32 ice_ptypes_ipv4_ofos_all[] = {
260         0x1D800000, 0x27BF7800, 0x00000000, 0x00000000,
261         0x00000000, 0x00000155, 0x00000000, 0x00000000,
262         0x00000000, 0x000FC000, 0x83E0FAA0, 0x00000101,
263         0x03FFD500, 0x00000000, 0x00000000, 0x00000000,
264         0x00000000, 0x00000000, 0x00000000, 0x00000000,
265         0x00000000, 0x00000000, 0x00000000, 0x00000000,
266         0x00000000, 0x00000000, 0x00000000, 0x00000000,
267         0x00000000, 0x00000000, 0x00000000, 0x00000000,
268 };
269
270 /* Packet types for packets with an Innermost/Last IPv4 header */
271 static const u32 ice_ptypes_ipv4_il[] = {
272         0xE0000000, 0xB807700E, 0x80000003, 0xE01DC03B,
273         0x0000000E, 0x00000000, 0x00000000, 0x00000000,
274         0x00000000, 0x00000000, 0x001FF800, 0x00100000,
275         0xFC0FC000, 0x00000000, 0x00000000, 0x00000000,
276         0x00000000, 0x00000000, 0x00000000, 0x00000000,
277         0x00000000, 0x00000000, 0x00000000, 0x00000000,
278         0x00000000, 0x00000000, 0x00000000, 0x00000000,
279         0x00000000, 0x00000000, 0x00000000, 0x00000000,
280 };
281
282 /* Packet types for packets with an Outer/First/Single non-frag IPv6 header,
283  * does NOT include IVP6 other PTYPEs
284  */
285 static const u32 ice_ptypes_ipv6_ofos[] = {
286         0x00000000, 0x00000000, 0x76000000, 0x10002000,
287         0x00000000, 0x000002AA, 0x00000000, 0x00000000,
288         0x00000000, 0x03F00000, 0x00000540, 0x00000000,
289         0x00002A00, 0x00000000, 0x00000000, 0x00000000,
290         0x00000000, 0x00000000, 0x00000000, 0x00000000,
291         0x00000000, 0x00000000, 0x00000000, 0x00000000,
292         0x00000000, 0x00000000, 0x00000000, 0x00000000,
293         0x00000000, 0x00000000, 0x00000000, 0x00000000,
294 };
295
296 /* Packet types for packets with an Outer/First/Single non-frag IPv6 header,
297  * includes IPV6 other PTYPEs
298  */
299 static const u32 ice_ptypes_ipv6_ofos_all[] = {
300         0x00000000, 0x00000000, 0x76000000, 0x1EFDE000,
301         0x00000000, 0x000002AA, 0x00000000, 0x00000000,
302         0x00000000, 0x03F00000, 0x7C1F0540, 0x00000206,
303         0xFC002A00, 0x0000003F, 0x00000000, 0x00000000,
304         0x00000000, 0x00000000, 0x00000000, 0x00000000,
305         0x00000000, 0x00000000, 0x00000000, 0x00000000,
306         0x00000000, 0x00000000, 0x00000000, 0x00000000,
307         0x00000000, 0x00000000, 0x00000000, 0x00000000,
308 };
309
310 /* Packet types for packets with an Innermost/Last IPv6 header */
311 static const u32 ice_ptypes_ipv6_il[] = {
312         0x00000000, 0x03B80770, 0x000001DC, 0x0EE00000,
313         0x00000770, 0x00000000, 0x00000000, 0x00000000,
314         0x00000000, 0x00000000, 0x7FE00000, 0x00000000,
315         0x03F00000, 0x0000003F, 0x00000000, 0x00000000,
316         0x00000000, 0x00000000, 0x00000000, 0x00000000,
317         0x00000000, 0x00000000, 0x00000000, 0x00000000,
318         0x00000000, 0x00000000, 0x00000000, 0x00000000,
319         0x00000000, 0x00000000, 0x00000000, 0x00000000,
320 };
321
322 /* Packet types for packets with an Outer/First/Single
323  * non-frag IPv4 header - no L4
324  */
325 static const u32 ice_ptypes_ipv4_ofos_no_l4[] = {
326         0x10800000, 0x04000800, 0x00000000, 0x00000000,
327         0x00000000, 0x00000000, 0x00000000, 0x00000000,
328         0x00000000, 0x000cc000, 0x000002A0, 0x00000000,
329         0x00001500, 0x00000000, 0x00000000, 0x00000000,
330         0x00000000, 0x00000000, 0x00000000, 0x00000000,
331         0x00000000, 0x00000000, 0x00000000, 0x00000000,
332         0x00000000, 0x00000000, 0x00000000, 0x00000000,
333         0x00000000, 0x00000000, 0x00000000, 0x00000000,
334 };
335
336 /* Packet types for packets with an Innermost/Last IPv4 header - no L4 */
337 static const u32 ice_ptypes_ipv4_il_no_l4[] = {
338         0x60000000, 0x18043008, 0x80000002, 0x6010c021,
339         0x00000008, 0x00000000, 0x00000000, 0x00000000,
340         0x00000000, 0x00000000, 0x00139800, 0x00000000,
341         0x8C08C000, 0x00000000, 0x00000000, 0x00000000,
342         0x00000000, 0x00000000, 0x00000000, 0x00000000,
343         0x00000000, 0x00000000, 0x00000000, 0x00000000,
344         0x00000000, 0x00000000, 0x00000000, 0x00000000,
345         0x00000000, 0x00000000, 0x00000000, 0x00000000,
346 };
347
348 /* Packet types for packets with an Outer/First/Single
349  * non-frag IPv6 header - no L4
350  */
351 static const u32 ice_ptypes_ipv6_ofos_no_l4[] = {
352         0x00000000, 0x00000000, 0x42000000, 0x10002000,
353         0x00000000, 0x00000000, 0x00000000, 0x00000000,
354         0x00000000, 0x02300000, 0x00000540, 0x00000000,
355         0x00002A00, 0x00000000, 0x00000000, 0x00000000,
356         0x00000000, 0x00000000, 0x00000000, 0x00000000,
357         0x00000000, 0x00000000, 0x00000000, 0x00000000,
358         0x00000000, 0x00000000, 0x00000000, 0x00000000,
359         0x00000000, 0x00000000, 0x00000000, 0x00000000,
360 };
361
362 /* Packet types for packets with an Innermost/Last IPv6 header - no L4 */
363 static const u32 ice_ptypes_ipv6_il_no_l4[] = {
364         0x00000000, 0x02180430, 0x0000010c, 0x086010c0,
365         0x00000430, 0x00000000, 0x00000000, 0x00000000,
366         0x00000000, 0x00000000, 0x4e600000, 0x00000000,
367         0x02300000, 0x00000023, 0x00000000, 0x00000000,
368         0x00000000, 0x00000000, 0x00000000, 0x00000000,
369         0x00000000, 0x00000000, 0x00000000, 0x00000000,
370         0x00000000, 0x00000000, 0x00000000, 0x00000000,
371         0x00000000, 0x00000000, 0x00000000, 0x00000000,
372 };
373
374 /* Packet types for packets with an Outermost/First ARP header */
375 static const u32 ice_ptypes_arp_of[] = {
376         0x00000800, 0x00000000, 0x00000000, 0x00000000,
377         0x00000000, 0x00000000, 0x00000000, 0x00000000,
378         0x00000000, 0x00000000, 0x00000000, 0x00000000,
379         0x00000000, 0x00000000, 0x00000000, 0x00000000,
380         0x00000000, 0x00000000, 0x00000000, 0x00000000,
381         0x00000000, 0x00000000, 0x00000000, 0x00000000,
382         0x00000000, 0x00000000, 0x00000000, 0x00000000,
383         0x00000000, 0x00000000, 0x00000000, 0x00000000,
384 };
385
386 /* UDP Packet types for non-tunneled packets or tunneled
387  * packets with inner UDP.
388  */
389 static const u32 ice_ptypes_udp_il[] = {
390         0x81000000, 0x20204040, 0x04000010, 0x80810102,
391         0x00000040, 0x00000000, 0x00000000, 0x00000000,
392         0x00000000, 0x00410000, 0x908427E0, 0x00100007,
393         0x10410000, 0x00000004, 0x00000000, 0x00000000,
394         0x00000000, 0x00000000, 0x00000000, 0x00000000,
395         0x00000000, 0x00000000, 0x00000000, 0x00000000,
396         0x00000000, 0x00000000, 0x00000000, 0x00000000,
397         0x00000000, 0x00000000, 0x00000000, 0x00000000,
398 };
399
400 /* Packet types for packets with an Innermost/Last TCP header */
401 static const u32 ice_ptypes_tcp_il[] = {
402         0x04000000, 0x80810102, 0x10000040, 0x02040408,
403         0x00000102, 0x00000000, 0x00000000, 0x00000000,
404         0x00000000, 0x00820000, 0x21084000, 0x00000000,
405         0x20820000, 0x00000008, 0x00000000, 0x00000000,
406         0x00000000, 0x00000000, 0x00000000, 0x00000000,
407         0x00000000, 0x00000000, 0x00000000, 0x00000000,
408         0x00000000, 0x00000000, 0x00000000, 0x00000000,
409         0x00000000, 0x00000000, 0x00000000, 0x00000000,
410 };
411
412 /* Packet types for packets with an Innermost/Last SCTP header */
413 static const u32 ice_ptypes_sctp_il[] = {
414         0x08000000, 0x01020204, 0x20000081, 0x04080810,
415         0x00000204, 0x00000000, 0x00000000, 0x00000000,
416         0x00000000, 0x01040000, 0x00000000, 0x00000000,
417         0x41040000, 0x00000010, 0x00000000, 0x00000000,
418         0x00000000, 0x00000000, 0x00000000, 0x00000000,
419         0x00000000, 0x00000000, 0x00000000, 0x00000000,
420         0x00000000, 0x00000000, 0x00000000, 0x00000000,
421         0x00000000, 0x00000000, 0x00000000, 0x00000000,
422 };
423
424 /* Packet types for packets with an Outermost/First ICMP header */
425 static const u32 ice_ptypes_icmp_of[] = {
426         0x10000000, 0x00000000, 0x00000000, 0x00000000,
427         0x00000000, 0x00000000, 0x00000000, 0x00000000,
428         0x00000000, 0x00000000, 0x00000000, 0x00000000,
429         0x00000000, 0x00000000, 0x00000000, 0x00000000,
430         0x00000000, 0x00000000, 0x00000000, 0x00000000,
431         0x00000000, 0x00000000, 0x00000000, 0x00000000,
432         0x00000000, 0x00000000, 0x00000000, 0x00000000,
433         0x00000000, 0x00000000, 0x00000000, 0x00000000,
434 };
435
436 /* Packet types for packets with an Innermost/Last ICMP header */
437 static const u32 ice_ptypes_icmp_il[] = {
438         0x00000000, 0x02040408, 0x40000102, 0x08101020,
439         0x00000408, 0x00000000, 0x00000000, 0x00000000,
440         0x00000000, 0x00000000, 0x42108000, 0x00000000,
441         0x82080000, 0x00000020, 0x00000000, 0x00000000,
442         0x00000000, 0x00000000, 0x00000000, 0x00000000,
443         0x00000000, 0x00000000, 0x00000000, 0x00000000,
444         0x00000000, 0x00000000, 0x00000000, 0x00000000,
445         0x00000000, 0x00000000, 0x00000000, 0x00000000,
446 };
447
448 /* Packet types for packets with an Outermost/First GRE header */
449 static const u32 ice_ptypes_gre_of[] = {
450         0x00000000, 0xBFBF7800, 0x000001DF, 0xFEFDE000,
451         0x0000017E, 0x00000000, 0x00000000, 0x00000000,
452         0x00000000, 0x00000000, 0x00000000, 0x00000000,
453         0x00000000, 0x00000000, 0x00000000, 0x00000000,
454         0x00000000, 0x00000000, 0x00000000, 0x00000000,
455         0x00000000, 0x00000000, 0x00000000, 0x00000000,
456         0x00000000, 0x00000000, 0x00000000, 0x00000000,
457         0x00000000, 0x00000000, 0x00000000, 0x00000000,
458 };
459
460 /* Packet types for packets with an Innermost/Last MAC header */
461 static const u32 ice_ptypes_mac_il[] = {
462         0x00000000, 0x20000000, 0x00000000, 0x00000000,
463         0x00000000, 0x00000000, 0x00000000, 0x00000000,
464         0x00000000, 0x00000000, 0x00000000, 0x00000000,
465         0x00000000, 0x00000000, 0x00000000, 0x00000000,
466         0x00000000, 0x00000000, 0x00000000, 0x00000000,
467         0x00000000, 0x00000000, 0x00000000, 0x00000000,
468         0x00000000, 0x00000000, 0x00000000, 0x00000000,
469         0x00000000, 0x00000000, 0x00000000, 0x00000000,
470 };
471
472 /* Packet types for GTPC */
473 static const u32 ice_ptypes_gtpc[] = {
474         0x00000000, 0x00000000, 0x00000000, 0x00000000,
475         0x00000000, 0x00000000, 0x00000000, 0x00000000,
476         0x00000000, 0x00000000, 0x000001E0, 0x00000000,
477         0x00000000, 0x00000000, 0x00000000, 0x00000000,
478         0x00000000, 0x00000000, 0x00000000, 0x00000000,
479         0x00000000, 0x00000000, 0x00000000, 0x00000000,
480         0x00000000, 0x00000000, 0x00000000, 0x00000000,
481         0x00000000, 0x00000000, 0x00000000, 0x00000000,
482 };
483
484 /* Packet types for VXLAN with VNI */
485 static const u32 ice_ptypes_vxlan_vni[] = {
486         0x00000000, 0xBFBFF800, 0x00EFDFDF, 0xFEFDE000,
487         0x03BF7F7E, 0x00000000, 0x00000000, 0x00000000,
488         0x00000000, 0x00000000, 0x00000000, 0x00000000,
489         0x00000000, 0x00000000, 0x00000000, 0x00000000,
490         0x00000000, 0x00000000, 0x00000000, 0x00000000,
491         0x00000000, 0x00000000, 0x00000000, 0x00000000,
492         0x00000000, 0x00000000, 0x00000000, 0x00000000,
493         0x00000000, 0x00000000, 0x00000000, 0x00000000,
494 };
495
496 /* Packet types for GTPC with TEID */
497 static const u32 ice_ptypes_gtpc_tid[] = {
498         0x00000000, 0x00000000, 0x00000000, 0x00000000,
499         0x00000000, 0x00000000, 0x00000000, 0x00000000,
500         0x00000000, 0x00000000, 0x00000060, 0x00000000,
501         0x00000000, 0x00000000, 0x00000000, 0x00000000,
502         0x00000000, 0x00000000, 0x00000000, 0x00000000,
503         0x00000000, 0x00000000, 0x00000000, 0x00000000,
504         0x00000000, 0x00000000, 0x00000000, 0x00000000,
505         0x00000000, 0x00000000, 0x00000000, 0x00000000,
506 };
507
508 /* Packet types for GTPU */
509 static const struct ice_ptype_attributes ice_attr_gtpu_session[] = {
510         { ICE_MAC_IPV4_GTPU_IPV4_FRAG,    ICE_PTYPE_ATTR_GTP_SESSION },
511         { ICE_MAC_IPV4_GTPU_IPV4_PAY,     ICE_PTYPE_ATTR_GTP_SESSION },
512         { ICE_MAC_IPV4_GTPU_IPV4_UDP_PAY, ICE_PTYPE_ATTR_GTP_SESSION },
513         { ICE_MAC_IPV4_GTPU_IPV4_TCP,     ICE_PTYPE_ATTR_GTP_SESSION },
514         { ICE_MAC_IPV4_GTPU_IPV4_ICMP,    ICE_PTYPE_ATTR_GTP_SESSION },
515         { ICE_MAC_IPV6_GTPU_IPV4_FRAG,    ICE_PTYPE_ATTR_GTP_SESSION },
516         { ICE_MAC_IPV6_GTPU_IPV4_PAY,     ICE_PTYPE_ATTR_GTP_SESSION },
517         { ICE_MAC_IPV6_GTPU_IPV4_UDP_PAY, ICE_PTYPE_ATTR_GTP_SESSION },
518         { ICE_MAC_IPV6_GTPU_IPV4_TCP,     ICE_PTYPE_ATTR_GTP_SESSION },
519         { ICE_MAC_IPV6_GTPU_IPV4_ICMP,    ICE_PTYPE_ATTR_GTP_SESSION },
520         { ICE_MAC_IPV4_GTPU_IPV6_FRAG,    ICE_PTYPE_ATTR_GTP_SESSION },
521         { ICE_MAC_IPV4_GTPU_IPV6_PAY,     ICE_PTYPE_ATTR_GTP_SESSION },
522         { ICE_MAC_IPV4_GTPU_IPV6_UDP_PAY, ICE_PTYPE_ATTR_GTP_SESSION },
523         { ICE_MAC_IPV4_GTPU_IPV6_TCP,     ICE_PTYPE_ATTR_GTP_SESSION },
524         { ICE_MAC_IPV4_GTPU_IPV6_ICMPV6,  ICE_PTYPE_ATTR_GTP_SESSION },
525         { ICE_MAC_IPV6_GTPU_IPV6_FRAG,    ICE_PTYPE_ATTR_GTP_SESSION },
526         { ICE_MAC_IPV6_GTPU_IPV6_PAY,     ICE_PTYPE_ATTR_GTP_SESSION },
527         { ICE_MAC_IPV6_GTPU_IPV6_UDP_PAY, ICE_PTYPE_ATTR_GTP_SESSION },
528         { ICE_MAC_IPV6_GTPU_IPV6_TCP,     ICE_PTYPE_ATTR_GTP_SESSION },
529         { ICE_MAC_IPV6_GTPU_IPV6_ICMPV6,  ICE_PTYPE_ATTR_GTP_SESSION },
530 };
531
532 static const struct ice_ptype_attributes ice_attr_gtpu_eh[] = {
533         { ICE_MAC_IPV4_GTPU_IPV4_FRAG,    ICE_PTYPE_ATTR_GTP_PDU_EH },
534         { ICE_MAC_IPV4_GTPU_IPV4_PAY,     ICE_PTYPE_ATTR_GTP_PDU_EH },
535         { ICE_MAC_IPV4_GTPU_IPV4_UDP_PAY, ICE_PTYPE_ATTR_GTP_PDU_EH },
536         { ICE_MAC_IPV4_GTPU_IPV4_TCP,     ICE_PTYPE_ATTR_GTP_PDU_EH },
537         { ICE_MAC_IPV4_GTPU_IPV4_ICMP,    ICE_PTYPE_ATTR_GTP_PDU_EH },
538         { ICE_MAC_IPV6_GTPU_IPV4_FRAG,    ICE_PTYPE_ATTR_GTP_PDU_EH },
539         { ICE_MAC_IPV6_GTPU_IPV4_PAY,     ICE_PTYPE_ATTR_GTP_PDU_EH },
540         { ICE_MAC_IPV6_GTPU_IPV4_UDP_PAY, ICE_PTYPE_ATTR_GTP_PDU_EH },
541         { ICE_MAC_IPV6_GTPU_IPV4_TCP,     ICE_PTYPE_ATTR_GTP_PDU_EH },
542         { ICE_MAC_IPV6_GTPU_IPV4_ICMP,    ICE_PTYPE_ATTR_GTP_PDU_EH },
543         { ICE_MAC_IPV4_GTPU_IPV6_FRAG,    ICE_PTYPE_ATTR_GTP_PDU_EH },
544         { ICE_MAC_IPV4_GTPU_IPV6_PAY,     ICE_PTYPE_ATTR_GTP_PDU_EH },
545         { ICE_MAC_IPV4_GTPU_IPV6_UDP_PAY, ICE_PTYPE_ATTR_GTP_PDU_EH },
546         { ICE_MAC_IPV4_GTPU_IPV6_TCP,     ICE_PTYPE_ATTR_GTP_PDU_EH },
547         { ICE_MAC_IPV4_GTPU_IPV6_ICMPV6,  ICE_PTYPE_ATTR_GTP_PDU_EH },
548         { ICE_MAC_IPV6_GTPU_IPV6_FRAG,    ICE_PTYPE_ATTR_GTP_PDU_EH },
549         { ICE_MAC_IPV6_GTPU_IPV6_PAY,     ICE_PTYPE_ATTR_GTP_PDU_EH },
550         { ICE_MAC_IPV6_GTPU_IPV6_UDP_PAY, ICE_PTYPE_ATTR_GTP_PDU_EH },
551         { ICE_MAC_IPV6_GTPU_IPV6_TCP,     ICE_PTYPE_ATTR_GTP_PDU_EH },
552         { ICE_MAC_IPV6_GTPU_IPV6_ICMPV6,  ICE_PTYPE_ATTR_GTP_PDU_EH },
553 };
554
555 static const struct ice_ptype_attributes ice_attr_gtpu_down[] = {
556         { ICE_MAC_IPV4_GTPU_IPV4_FRAG,    ICE_PTYPE_ATTR_GTP_DOWNLINK },
557         { ICE_MAC_IPV4_GTPU_IPV4_PAY,     ICE_PTYPE_ATTR_GTP_DOWNLINK },
558         { ICE_MAC_IPV4_GTPU_IPV4_UDP_PAY, ICE_PTYPE_ATTR_GTP_DOWNLINK },
559         { ICE_MAC_IPV4_GTPU_IPV4_TCP,     ICE_PTYPE_ATTR_GTP_DOWNLINK },
560         { ICE_MAC_IPV4_GTPU_IPV4_ICMP,    ICE_PTYPE_ATTR_GTP_DOWNLINK },
561         { ICE_MAC_IPV6_GTPU_IPV4_FRAG,    ICE_PTYPE_ATTR_GTP_DOWNLINK },
562         { ICE_MAC_IPV6_GTPU_IPV4_PAY,     ICE_PTYPE_ATTR_GTP_DOWNLINK },
563         { ICE_MAC_IPV6_GTPU_IPV4_UDP_PAY, ICE_PTYPE_ATTR_GTP_DOWNLINK },
564         { ICE_MAC_IPV6_GTPU_IPV4_TCP,     ICE_PTYPE_ATTR_GTP_DOWNLINK },
565         { ICE_MAC_IPV6_GTPU_IPV4_ICMP,    ICE_PTYPE_ATTR_GTP_DOWNLINK },
566         { ICE_MAC_IPV4_GTPU_IPV6_FRAG,    ICE_PTYPE_ATTR_GTP_DOWNLINK },
567         { ICE_MAC_IPV4_GTPU_IPV6_PAY,     ICE_PTYPE_ATTR_GTP_DOWNLINK },
568         { ICE_MAC_IPV4_GTPU_IPV6_UDP_PAY, ICE_PTYPE_ATTR_GTP_DOWNLINK },
569         { ICE_MAC_IPV4_GTPU_IPV6_TCP,     ICE_PTYPE_ATTR_GTP_DOWNLINK },
570         { ICE_MAC_IPV4_GTPU_IPV6_ICMPV6,  ICE_PTYPE_ATTR_GTP_DOWNLINK },
571         { ICE_MAC_IPV6_GTPU_IPV6_FRAG,    ICE_PTYPE_ATTR_GTP_DOWNLINK },
572         { ICE_MAC_IPV6_GTPU_IPV6_PAY,     ICE_PTYPE_ATTR_GTP_DOWNLINK },
573         { ICE_MAC_IPV6_GTPU_IPV6_UDP_PAY, ICE_PTYPE_ATTR_GTP_DOWNLINK },
574         { ICE_MAC_IPV6_GTPU_IPV6_TCP,     ICE_PTYPE_ATTR_GTP_DOWNLINK },
575         { ICE_MAC_IPV6_GTPU_IPV6_ICMPV6,  ICE_PTYPE_ATTR_GTP_DOWNLINK },
576 };
577
578 static const struct ice_ptype_attributes ice_attr_gtpu_up[] = {
579         { ICE_MAC_IPV4_GTPU_IPV4_FRAG,    ICE_PTYPE_ATTR_GTP_UPLINK },
580         { ICE_MAC_IPV4_GTPU_IPV4_PAY,     ICE_PTYPE_ATTR_GTP_UPLINK },
581         { ICE_MAC_IPV4_GTPU_IPV4_UDP_PAY, ICE_PTYPE_ATTR_GTP_UPLINK },
582         { ICE_MAC_IPV4_GTPU_IPV4_TCP,     ICE_PTYPE_ATTR_GTP_UPLINK },
583         { ICE_MAC_IPV4_GTPU_IPV4_ICMP,    ICE_PTYPE_ATTR_GTP_UPLINK },
584         { ICE_MAC_IPV6_GTPU_IPV4_FRAG,    ICE_PTYPE_ATTR_GTP_UPLINK },
585         { ICE_MAC_IPV6_GTPU_IPV4_PAY,     ICE_PTYPE_ATTR_GTP_UPLINK },
586         { ICE_MAC_IPV6_GTPU_IPV4_UDP_PAY, ICE_PTYPE_ATTR_GTP_UPLINK },
587         { ICE_MAC_IPV6_GTPU_IPV4_TCP,     ICE_PTYPE_ATTR_GTP_UPLINK },
588         { ICE_MAC_IPV6_GTPU_IPV4_ICMP,    ICE_PTYPE_ATTR_GTP_UPLINK },
589         { ICE_MAC_IPV4_GTPU_IPV6_FRAG,    ICE_PTYPE_ATTR_GTP_UPLINK },
590         { ICE_MAC_IPV4_GTPU_IPV6_PAY,     ICE_PTYPE_ATTR_GTP_UPLINK },
591         { ICE_MAC_IPV4_GTPU_IPV6_UDP_PAY, ICE_PTYPE_ATTR_GTP_UPLINK },
592         { ICE_MAC_IPV4_GTPU_IPV6_TCP,     ICE_PTYPE_ATTR_GTP_UPLINK },
593         { ICE_MAC_IPV4_GTPU_IPV6_ICMPV6,  ICE_PTYPE_ATTR_GTP_UPLINK },
594         { ICE_MAC_IPV6_GTPU_IPV6_FRAG,    ICE_PTYPE_ATTR_GTP_UPLINK },
595         { ICE_MAC_IPV6_GTPU_IPV6_PAY,     ICE_PTYPE_ATTR_GTP_UPLINK },
596         { ICE_MAC_IPV6_GTPU_IPV6_UDP_PAY, ICE_PTYPE_ATTR_GTP_UPLINK },
597         { ICE_MAC_IPV6_GTPU_IPV6_TCP,     ICE_PTYPE_ATTR_GTP_UPLINK },
598         { ICE_MAC_IPV6_GTPU_IPV6_ICMPV6,  ICE_PTYPE_ATTR_GTP_UPLINK },
599 };
600
601 static const u32 ice_ptypes_gtpu[] = {
602         0x00000000, 0x00000000, 0x00000000, 0x00000000,
603         0x00000000, 0x00000000, 0x00000000, 0x00000000,
604         0x00000000, 0x00000000, 0x7FFFFE00, 0x00000000,
605         0x00000000, 0x00000000, 0x00000000, 0x00000000,
606         0x00000000, 0x00000000, 0x00000000, 0x00000000,
607         0x00000000, 0x00000000, 0x00000000, 0x00000000,
608         0x00000000, 0x00000000, 0x00000000, 0x00000000,
609         0x00000000, 0x00000000, 0x00000000, 0x00000000,
610 };
611
612 /* Packet types for pppoe */
613 static const u32 ice_ptypes_pppoe[] = {
614         0x00000000, 0x00000000, 0x00000000, 0x00000000,
615         0x00000000, 0x00000000, 0x00000000, 0x00000000,
616         0x00000000, 0x03ffe000, 0x00000000, 0x00000000,
617         0x00000000, 0x00000000, 0x00000000, 0x00000000,
618         0x00000000, 0x00000000, 0x00000000, 0x00000000,
619         0x00000000, 0x00000000, 0x00000000, 0x00000000,
620         0x00000000, 0x00000000, 0x00000000, 0x00000000,
621         0x00000000, 0x00000000, 0x00000000, 0x00000000,
622 };
623
624 /* Packet types for packets with PFCP NODE header */
625 static const u32 ice_ptypes_pfcp_node[] = {
626         0x00000000, 0x00000000, 0x00000000, 0x00000000,
627         0x00000000, 0x00000000, 0x00000000, 0x00000000,
628         0x00000000, 0x00000000, 0x80000000, 0x00000002,
629         0x00000000, 0x00000000, 0x00000000, 0x00000000,
630         0x00000000, 0x00000000, 0x00000000, 0x00000000,
631         0x00000000, 0x00000000, 0x00000000, 0x00000000,
632         0x00000000, 0x00000000, 0x00000000, 0x00000000,
633         0x00000000, 0x00000000, 0x00000000, 0x00000000,
634 };
635
636 /* Packet types for packets with PFCP SESSION header */
637 static const u32 ice_ptypes_pfcp_session[] = {
638         0x00000000, 0x00000000, 0x00000000, 0x00000000,
639         0x00000000, 0x00000000, 0x00000000, 0x00000000,
640         0x00000000, 0x00000000, 0x00000000, 0x00000005,
641         0x00000000, 0x00000000, 0x00000000, 0x00000000,
642         0x00000000, 0x00000000, 0x00000000, 0x00000000,
643         0x00000000, 0x00000000, 0x00000000, 0x00000000,
644         0x00000000, 0x00000000, 0x00000000, 0x00000000,
645         0x00000000, 0x00000000, 0x00000000, 0x00000000,
646 };
647
648 /* Packet types for l2tpv3 */
649 static const u32 ice_ptypes_l2tpv3[] = {
650         0x00000000, 0x00000000, 0x00000000, 0x00000000,
651         0x00000000, 0x00000000, 0x00000000, 0x00000000,
652         0x00000000, 0x00000000, 0x00000000, 0x00000300,
653         0x00000000, 0x00000000, 0x00000000, 0x00000000,
654         0x00000000, 0x00000000, 0x00000000, 0x00000000,
655         0x00000000, 0x00000000, 0x00000000, 0x00000000,
656         0x00000000, 0x00000000, 0x00000000, 0x00000000,
657         0x00000000, 0x00000000, 0x00000000, 0x00000000,
658 };
659
660 /* Packet types for esp */
661 static const u32 ice_ptypes_esp[] = {
662         0x00000000, 0x00000000, 0x00000000, 0x00000000,
663         0x00000000, 0x00000003, 0x00000000, 0x00000000,
664         0x00000000, 0x00000000, 0x00000000, 0x00000000,
665         0x00000000, 0x00000000, 0x00000000, 0x00000000,
666         0x00000000, 0x00000000, 0x00000000, 0x00000000,
667         0x00000000, 0x00000000, 0x00000000, 0x00000000,
668         0x00000000, 0x00000000, 0x00000000, 0x00000000,
669         0x00000000, 0x00000000, 0x00000000, 0x00000000,
670 };
671
672 /* Packet types for ah */
673 static const u32 ice_ptypes_ah[] = {
674         0x00000000, 0x00000000, 0x00000000, 0x00000000,
675         0x00000000, 0x0000000C, 0x00000000, 0x00000000,
676         0x00000000, 0x00000000, 0x00000000, 0x00000000,
677         0x00000000, 0x00000000, 0x00000000, 0x00000000,
678         0x00000000, 0x00000000, 0x00000000, 0x00000000,
679         0x00000000, 0x00000000, 0x00000000, 0x00000000,
680         0x00000000, 0x00000000, 0x00000000, 0x00000000,
681         0x00000000, 0x00000000, 0x00000000, 0x00000000,
682 };
683
684 /* Packet types for packets with NAT_T ESP header */
685 static const u32 ice_ptypes_nat_t_esp[] = {
686         0x00000000, 0x00000000, 0x00000000, 0x00000000,
687         0x00000000, 0x00000030, 0x00000000, 0x00000000,
688         0x00000000, 0x00000000, 0x00000000, 0x00000000,
689         0x00000000, 0x00000000, 0x00000000, 0x00000000,
690         0x00000000, 0x00000000, 0x00000000, 0x00000000,
691         0x00000000, 0x00000000, 0x00000000, 0x00000000,
692         0x00000000, 0x00000000, 0x00000000, 0x00000000,
693         0x00000000, 0x00000000, 0x00000000, 0x00000000,
694 };
695
696 static const u32 ice_ptypes_mac_non_ip_ofos[] = {
697         0x00000846, 0x00000000, 0x00000000, 0x00000000,
698         0x00000000, 0x00000000, 0x00000000, 0x00000000,
699         0x00400000, 0x03FFF000, 0x00000000, 0x00000000,
700         0x00000000, 0x00000000, 0x00000000, 0x00000000,
701         0x00000000, 0x00000000, 0x00000000, 0x00000000,
702         0x00000000, 0x00000000, 0x00000000, 0x00000000,
703         0x00000000, 0x00000000, 0x00000000, 0x00000000,
704         0x00000000, 0x00000000, 0x00000000, 0x00000000,
705 };
706
707 static const u32 ice_ptypes_gtpu_no_ip[] = {
708         0x00000000, 0x00000000, 0x00000000, 0x00000000,
709         0x00000000, 0x00000000, 0x00000000, 0x00000000,
710         0x00000000, 0x00000000, 0x00000600, 0x00000000,
711         0x00000000, 0x00000000, 0x00000000, 0x00000000,
712         0x00000000, 0x00000000, 0x00000000, 0x00000000,
713         0x00000000, 0x00000000, 0x00000000, 0x00000000,
714         0x00000000, 0x00000000, 0x00000000, 0x00000000,
715         0x00000000, 0x00000000, 0x00000000, 0x00000000,
716 };
717
718 static const u32 ice_ptypes_ecpri_tp0[] = {
719         0x00000000, 0x00000000, 0x00000000, 0x00000000,
720         0x00000000, 0x00000000, 0x00000000, 0x00000000,
721         0x00000000, 0x00000000, 0x00000000, 0x00000400,
722         0x00000000, 0x00000000, 0x00000000, 0x00000000,
723         0x00000000, 0x00000000, 0x00000000, 0x00000000,
724         0x00000000, 0x00000000, 0x00000000, 0x00000000,
725         0x00000000, 0x00000000, 0x00000000, 0x00000000,
726         0x00000000, 0x00000000, 0x00000000, 0x00000000,
727 };
728
729 static const u32 ice_ptypes_udp_ecpri_tp0[] = {
730         0x00000000, 0x00000000, 0x00000000, 0x00000000,
731         0x00000000, 0x00000000, 0x00000000, 0x00000000,
732         0x00000000, 0x00000000, 0x00000000, 0x00100000,
733         0x00000000, 0x00000000, 0x00000000, 0x00000000,
734         0x00000000, 0x00000000, 0x00000000, 0x00000000,
735         0x00000000, 0x00000000, 0x00000000, 0x00000000,
736         0x00000000, 0x00000000, 0x00000000, 0x00000000,
737         0x00000000, 0x00000000, 0x00000000, 0x00000000,
738 };
739
740 static const u32 ice_ptypes_l2tpv2[] = {
741         0x00000000, 0x00000000, 0x00000000, 0x00000000,
742         0x00000000, 0x00000000, 0x00000000, 0x00000000,
743         0x00000000, 0x00000000, 0x00000000, 0x00000000,
744         0xFFFFFF00, 0x0000003F, 0x00000000, 0x00000000,
745         0x00000000, 0x00000000, 0x00000000, 0x00000000,
746         0x00000000, 0x00000000, 0x00000000, 0x00000000,
747         0x00000000, 0x00000000, 0x00000000, 0x00000000,
748         0x00000000, 0x00000000, 0x00000000, 0x00000000,
749 };
750
751 static const u32 ice_ptypes_ppp[] = {
752         0x00000000, 0x00000000, 0x00000000, 0x00000000,
753         0x00000000, 0x00000000, 0x00000000, 0x00000000,
754         0x00000000, 0x00000000, 0x00000000, 0x00000000,
755         0xFFFFF000, 0x0000003F, 0x00000000, 0x00000000,
756         0x00000000, 0x00000000, 0x00000000, 0x00000000,
757         0x00000000, 0x00000000, 0x00000000, 0x00000000,
758         0x00000000, 0x00000000, 0x00000000, 0x00000000,
759         0x00000000, 0x00000000, 0x00000000, 0x00000000,
760 };
761
762 static const u32 ice_ptypes_ipv4_frag[] = {
763         0x00400000, 0x00000000, 0x00000000, 0x00000000,
764         0x00000000, 0x00000000, 0x00000000, 0x00000000,
765         0x00000000, 0x00000000, 0x00000000, 0x00000000,
766         0x00000000, 0x00000000, 0x00000000, 0x00000000,
767         0x00000000, 0x00000000, 0x00000000, 0x00000000,
768         0x00000000, 0x00000000, 0x00000000, 0x00000000,
769         0x00000000, 0x00000000, 0x00000000, 0x00000000,
770         0x00000000, 0x00000000, 0x00000000, 0x00000000,
771 };
772
773 static const u32 ice_ptypes_ipv6_frag[] = {
774         0x00000000, 0x00000000, 0x01000000, 0x00000000,
775         0x00000000, 0x00000000, 0x00000000, 0x00000000,
776         0x00000000, 0x00000000, 0x00000000, 0x00000000,
777         0x00000000, 0x00000000, 0x00000000, 0x00000000,
778         0x00000000, 0x00000000, 0x00000000, 0x00000000,
779         0x00000000, 0x00000000, 0x00000000, 0x00000000,
780         0x00000000, 0x00000000, 0x00000000, 0x00000000,
781         0x00000000, 0x00000000, 0x00000000, 0x00000000,
782 };
783
784 /* Manage parameters and info. used during the creation of a flow profile */
785 struct ice_flow_prof_params {
786         enum ice_block blk;
787         u16 entry_length; /* # of bytes formatted entry will require */
788         u8 es_cnt;
789         struct ice_flow_prof *prof;
790
791         /* For ACL, the es[0] will have the data of ICE_RX_MDID_PKT_FLAGS_15_0
792          * This will give us the direction flags.
793          */
794         struct ice_fv_word es[ICE_MAX_FV_WORDS];
795         /* attributes can be used to add attributes to a particular PTYPE */
796         const struct ice_ptype_attributes *attr;
797         u16 attr_cnt;
798
799         u16 mask[ICE_MAX_FV_WORDS];
800         ice_declare_bitmap(ptypes, ICE_FLOW_PTYPE_MAX);
801 };
802
803 #define ICE_FLOW_RSS_HDRS_INNER_MASK \
804         (ICE_FLOW_SEG_HDR_PPPOE | ICE_FLOW_SEG_HDR_GTPC | \
805         ICE_FLOW_SEG_HDR_GTPC_TEID | ICE_FLOW_SEG_HDR_GTPU | \
806         ICE_FLOW_SEG_HDR_PFCP_SESSION | ICE_FLOW_SEG_HDR_L2TPV3 | \
807         ICE_FLOW_SEG_HDR_ESP | ICE_FLOW_SEG_HDR_AH | \
808         ICE_FLOW_SEG_HDR_NAT_T_ESP | ICE_FLOW_SEG_HDR_GTPU_NON_IP | \
809         ICE_FLOW_SEG_HDR_ECPRI_TP0 | ICE_FLOW_SEG_HDR_UDP_ECPRI_TP0 | \
810         ICE_FLOW_SEG_HDR_L2TPV2 | ICE_FLOW_SEG_HDR_PPP | ICE_FLOW_SEG_HDR_GRE)
811
812 #define ICE_FLOW_SEG_HDRS_L2_MASK       \
813         (ICE_FLOW_SEG_HDR_ETH | ICE_FLOW_SEG_HDR_VLAN)
814 #define ICE_FLOW_SEG_HDRS_L3_MASK       \
815         (ICE_FLOW_SEG_HDR_IPV4 | ICE_FLOW_SEG_HDR_IPV6 | \
816          ICE_FLOW_SEG_HDR_ARP)
817 #define ICE_FLOW_SEG_HDRS_L4_MASK       \
818         (ICE_FLOW_SEG_HDR_ICMP | ICE_FLOW_SEG_HDR_TCP | ICE_FLOW_SEG_HDR_UDP | \
819          ICE_FLOW_SEG_HDR_SCTP)
820 /* mask for L4 protocols that are NOT part of IPV4/6 OTHER PTYPE groups */
821 #define ICE_FLOW_SEG_HDRS_L4_MASK_NO_OTHER      \
822         (ICE_FLOW_SEG_HDR_TCP | ICE_FLOW_SEG_HDR_UDP | ICE_FLOW_SEG_HDR_SCTP)
823
824 /**
825  * ice_flow_val_hdrs - validates packet segments for valid protocol headers
826  * @segs: array of one or more packet segments that describe the flow
827  * @segs_cnt: number of packet segments provided
828  */
829 static enum ice_status
830 ice_flow_val_hdrs(struct ice_flow_seg_info *segs, u8 segs_cnt)
831 {
832         u8 i;
833
834         for (i = 0; i < segs_cnt; i++) {
835                 /* Multiple L3 headers */
836                 if (segs[i].hdrs & ICE_FLOW_SEG_HDRS_L3_MASK &&
837                     !ice_is_pow2(segs[i].hdrs & ICE_FLOW_SEG_HDRS_L3_MASK))
838                         return ICE_ERR_PARAM;
839
840                 /* Multiple L4 headers */
841                 if (segs[i].hdrs & ICE_FLOW_SEG_HDRS_L4_MASK &&
842                     !ice_is_pow2(segs[i].hdrs & ICE_FLOW_SEG_HDRS_L4_MASK))
843                         return ICE_ERR_PARAM;
844         }
845
846         return ICE_SUCCESS;
847 }
848
849 /* Sizes of fixed known protocol headers without header options */
850 #define ICE_FLOW_PROT_HDR_SZ_MAC        14
851 #define ICE_FLOW_PROT_HDR_SZ_MAC_VLAN   (ICE_FLOW_PROT_HDR_SZ_MAC + 2)
852 #define ICE_FLOW_PROT_HDR_SZ_IPV4       20
853 #define ICE_FLOW_PROT_HDR_SZ_IPV6       40
854 #define ICE_FLOW_PROT_HDR_SZ_ARP        28
855 #define ICE_FLOW_PROT_HDR_SZ_ICMP       8
856 #define ICE_FLOW_PROT_HDR_SZ_TCP        20
857 #define ICE_FLOW_PROT_HDR_SZ_UDP        8
858 #define ICE_FLOW_PROT_HDR_SZ_SCTP       12
859
860 /**
861  * ice_flow_calc_seg_sz - calculates size of a packet segment based on headers
862  * @params: information about the flow to be processed
863  * @seg: index of packet segment whose header size is to be determined
864  */
865 static u16 ice_flow_calc_seg_sz(struct ice_flow_prof_params *params, u8 seg)
866 {
867         u16 sz;
868
869         /* L2 headers */
870         sz = (params->prof->segs[seg].hdrs & ICE_FLOW_SEG_HDR_VLAN) ?
871                 ICE_FLOW_PROT_HDR_SZ_MAC_VLAN : ICE_FLOW_PROT_HDR_SZ_MAC;
872
873         /* L3 headers */
874         if (params->prof->segs[seg].hdrs & ICE_FLOW_SEG_HDR_IPV4)
875                 sz += ICE_FLOW_PROT_HDR_SZ_IPV4;
876         else if (params->prof->segs[seg].hdrs & ICE_FLOW_SEG_HDR_IPV6)
877                 sz += ICE_FLOW_PROT_HDR_SZ_IPV6;
878         else if (params->prof->segs[seg].hdrs & ICE_FLOW_SEG_HDR_ARP)
879                 sz += ICE_FLOW_PROT_HDR_SZ_ARP;
880         else if (params->prof->segs[seg].hdrs & ICE_FLOW_SEG_HDRS_L4_MASK)
881                 /* A L3 header is required if L4 is specified */
882                 return 0;
883
884         /* L4 headers */
885         if (params->prof->segs[seg].hdrs & ICE_FLOW_SEG_HDR_ICMP)
886                 sz += ICE_FLOW_PROT_HDR_SZ_ICMP;
887         else if (params->prof->segs[seg].hdrs & ICE_FLOW_SEG_HDR_TCP)
888                 sz += ICE_FLOW_PROT_HDR_SZ_TCP;
889         else if (params->prof->segs[seg].hdrs & ICE_FLOW_SEG_HDR_UDP)
890                 sz += ICE_FLOW_PROT_HDR_SZ_UDP;
891         else if (params->prof->segs[seg].hdrs & ICE_FLOW_SEG_HDR_SCTP)
892                 sz += ICE_FLOW_PROT_HDR_SZ_SCTP;
893
894         return sz;
895 }
896
897 /**
898  * ice_flow_proc_seg_hdrs - process protocol headers present in pkt segments
899  * @params: information about the flow to be processed
900  *
901  * This function identifies the packet types associated with the protocol
902  * headers being present in packet segments of the specified flow profile.
903  */
904 static enum ice_status
905 ice_flow_proc_seg_hdrs(struct ice_flow_prof_params *params)
906 {
907         struct ice_flow_prof *prof;
908         u8 i;
909
910         ice_memset(params->ptypes, 0xff, sizeof(params->ptypes),
911                    ICE_NONDMA_MEM);
912
913         prof = params->prof;
914
915         for (i = 0; i < params->prof->segs_cnt; i++) {
916                 const ice_bitmap_t *src;
917                 u32 hdrs;
918
919                 hdrs = prof->segs[i].hdrs;
920
921                 if (hdrs & ICE_FLOW_SEG_HDR_ETH) {
922                         src = !i ? (const ice_bitmap_t *)ice_ptypes_mac_ofos :
923                                 (const ice_bitmap_t *)ice_ptypes_mac_il;
924                         ice_and_bitmap(params->ptypes, params->ptypes, src,
925                                        ICE_FLOW_PTYPE_MAX);
926                 }
927
928                 if (i && hdrs & ICE_FLOW_SEG_HDR_VLAN) {
929                         src = (const ice_bitmap_t *)ice_ptypes_macvlan_il;
930                         ice_and_bitmap(params->ptypes, params->ptypes, src,
931                                        ICE_FLOW_PTYPE_MAX);
932                 }
933
934                 if (!i && hdrs & ICE_FLOW_SEG_HDR_ARP) {
935                         ice_and_bitmap(params->ptypes, params->ptypes,
936                                        (const ice_bitmap_t *)ice_ptypes_arp_of,
937                                        ICE_FLOW_PTYPE_MAX);
938                 }
939
940                 if (hdrs & ICE_FLOW_SEG_HDR_ECPRI_TP0) {
941                         src = (const ice_bitmap_t *)ice_ptypes_ecpri_tp0;
942                         ice_and_bitmap(params->ptypes, params->ptypes, src,
943                                        ICE_FLOW_PTYPE_MAX);
944                 }
945                 if ((hdrs & ICE_FLOW_SEG_HDR_IPV4) &&
946                     (hdrs & ICE_FLOW_SEG_HDR_IPV_OTHER)) {
947                         src = i ?
948                                 (const ice_bitmap_t *)ice_ptypes_ipv4_il :
949                                 (const ice_bitmap_t *)ice_ptypes_ipv4_ofos_all;
950                         ice_and_bitmap(params->ptypes, params->ptypes, src,
951                                        ICE_FLOW_PTYPE_MAX);
952                 } else if ((hdrs & ICE_FLOW_SEG_HDR_IPV6) &&
953                            (hdrs & ICE_FLOW_SEG_HDR_IPV_OTHER)) {
954                         src = i ?
955                                 (const ice_bitmap_t *)ice_ptypes_ipv6_il :
956                                 (const ice_bitmap_t *)ice_ptypes_ipv6_ofos_all;
957                         ice_and_bitmap(params->ptypes, params->ptypes, src,
958                                        ICE_FLOW_PTYPE_MAX);
959                 } else if ((hdrs & ICE_FLOW_SEG_HDR_IPV4) &&
960                                 (hdrs & ICE_FLOW_SEG_HDR_IPV_FRAG)) {
961                         src = (const ice_bitmap_t *)ice_ptypes_ipv4_frag;
962                         ice_and_bitmap(params->ptypes, params->ptypes, src,
963                                        ICE_FLOW_PTYPE_MAX);
964                 } else if ((hdrs & ICE_FLOW_SEG_HDR_IPV6) &&
965                                 (hdrs & ICE_FLOW_SEG_HDR_IPV_FRAG)) {
966                         src = (const ice_bitmap_t *)ice_ptypes_ipv6_frag;
967                         ice_and_bitmap(params->ptypes, params->ptypes, src,
968                                        ICE_FLOW_PTYPE_MAX);
969                 } else if ((hdrs & ICE_FLOW_SEG_HDR_IPV4) &&
970                            !(hdrs & ICE_FLOW_SEG_HDRS_L4_MASK_NO_OTHER)) {
971                         src = !i ? (const ice_bitmap_t *)ice_ptypes_ipv4_ofos_no_l4 :
972                                 (const ice_bitmap_t *)ice_ptypes_ipv4_il_no_l4;
973                         ice_and_bitmap(params->ptypes, params->ptypes, src,
974                                        ICE_FLOW_PTYPE_MAX);
975                 } else if (hdrs & ICE_FLOW_SEG_HDR_IPV4) {
976                         src = !i ? (const ice_bitmap_t *)ice_ptypes_ipv4_ofos :
977                                 (const ice_bitmap_t *)ice_ptypes_ipv4_il;
978                         ice_and_bitmap(params->ptypes, params->ptypes, src,
979                                        ICE_FLOW_PTYPE_MAX);
980                 } else if ((hdrs & ICE_FLOW_SEG_HDR_IPV6) &&
981                            !(hdrs & ICE_FLOW_SEG_HDRS_L4_MASK_NO_OTHER)) {
982                         src = !i ? (const ice_bitmap_t *)ice_ptypes_ipv6_ofos_no_l4 :
983                                 (const ice_bitmap_t *)ice_ptypes_ipv6_il_no_l4;
984                         ice_and_bitmap(params->ptypes, params->ptypes, src,
985                                        ICE_FLOW_PTYPE_MAX);
986                 } else if (hdrs & ICE_FLOW_SEG_HDR_IPV6) {
987                         src = !i ? (const ice_bitmap_t *)ice_ptypes_ipv6_ofos :
988                                 (const ice_bitmap_t *)ice_ptypes_ipv6_il;
989                         ice_and_bitmap(params->ptypes, params->ptypes, src,
990                                        ICE_FLOW_PTYPE_MAX);
991                 }
992
993                 if (hdrs & ICE_FLOW_SEG_HDR_ETH_NON_IP) {
994                         src = (const ice_bitmap_t *)ice_ptypes_mac_non_ip_ofos;
995                         ice_and_bitmap(params->ptypes, params->ptypes,
996                                        src, ICE_FLOW_PTYPE_MAX);
997                 } else if (hdrs & ICE_FLOW_SEG_HDR_PPPOE) {
998                         src = (const ice_bitmap_t *)ice_ptypes_pppoe;
999                         ice_and_bitmap(params->ptypes, params->ptypes, src,
1000                                        ICE_FLOW_PTYPE_MAX);
1001                 } else {
1002                         src = (const ice_bitmap_t *)ice_ptypes_pppoe;
1003                         ice_andnot_bitmap(params->ptypes, params->ptypes, src,
1004                                           ICE_FLOW_PTYPE_MAX);
1005                 }
1006
1007                 if (hdrs & ICE_FLOW_SEG_HDR_UDP) {
1008                         src = (const ice_bitmap_t *)ice_ptypes_udp_il;
1009                         ice_and_bitmap(params->ptypes, params->ptypes, src,
1010                                        ICE_FLOW_PTYPE_MAX);
1011                 } else if (hdrs & ICE_FLOW_SEG_HDR_TCP) {
1012                         ice_and_bitmap(params->ptypes, params->ptypes,
1013                                        (const ice_bitmap_t *)ice_ptypes_tcp_il,
1014                                        ICE_FLOW_PTYPE_MAX);
1015                 } else if (hdrs & ICE_FLOW_SEG_HDR_SCTP) {
1016                         src = (const ice_bitmap_t *)ice_ptypes_sctp_il;
1017                         ice_and_bitmap(params->ptypes, params->ptypes, src,
1018                                        ICE_FLOW_PTYPE_MAX);
1019                 }
1020
1021                 if (hdrs & ICE_FLOW_SEG_HDR_ICMP) {
1022                         src = !i ? (const ice_bitmap_t *)ice_ptypes_icmp_of :
1023                                 (const ice_bitmap_t *)ice_ptypes_icmp_il;
1024                         ice_and_bitmap(params->ptypes, params->ptypes, src,
1025                                        ICE_FLOW_PTYPE_MAX);
1026                 } else if (hdrs & ICE_FLOW_SEG_HDR_GRE) {
1027                         src = (const ice_bitmap_t *)ice_ptypes_gre_of;
1028                         ice_and_bitmap(params->ptypes, params->ptypes, src,
1029                                        ICE_FLOW_PTYPE_MAX);
1030                 } else if (hdrs & ICE_FLOW_SEG_HDR_GTPC) {
1031                         src = (const ice_bitmap_t *)ice_ptypes_gtpc;
1032                         ice_and_bitmap(params->ptypes, params->ptypes,
1033                                        src, ICE_FLOW_PTYPE_MAX);
1034                 } else if (hdrs & ICE_FLOW_SEG_HDR_GTPC_TEID) {
1035                         src = (const ice_bitmap_t *)ice_ptypes_gtpc_tid;
1036                         ice_and_bitmap(params->ptypes, params->ptypes,
1037                                        src, ICE_FLOW_PTYPE_MAX);
1038                 } else if (hdrs & ICE_FLOW_SEG_HDR_GTPU_NON_IP) {
1039                         src = (const ice_bitmap_t *)ice_ptypes_gtpu_no_ip;
1040                         ice_and_bitmap(params->ptypes, params->ptypes,
1041                                        src, ICE_FLOW_PTYPE_MAX);
1042                 } else if (hdrs & ICE_FLOW_SEG_HDR_GTPU_DWN) {
1043                         src = (const ice_bitmap_t *)ice_ptypes_gtpu;
1044                         ice_and_bitmap(params->ptypes, params->ptypes,
1045                                        src, ICE_FLOW_PTYPE_MAX);
1046
1047                         /* Attributes for GTP packet with downlink */
1048                         params->attr = ice_attr_gtpu_down;
1049                         params->attr_cnt = ARRAY_SIZE(ice_attr_gtpu_down);
1050                 } else if (hdrs & ICE_FLOW_SEG_HDR_GTPU_UP) {
1051                         src = (const ice_bitmap_t *)ice_ptypes_gtpu;
1052                         ice_and_bitmap(params->ptypes, params->ptypes,
1053                                        src, ICE_FLOW_PTYPE_MAX);
1054
1055                         /* Attributes for GTP packet with uplink */
1056                         params->attr = ice_attr_gtpu_up;
1057                         params->attr_cnt = ARRAY_SIZE(ice_attr_gtpu_up);
1058                 } else if (hdrs & ICE_FLOW_SEG_HDR_GTPU_EH) {
1059                         src = (const ice_bitmap_t *)ice_ptypes_gtpu;
1060                         ice_and_bitmap(params->ptypes, params->ptypes,
1061                                        src, ICE_FLOW_PTYPE_MAX);
1062
1063                         /* Attributes for GTP packet with Extension Header */
1064                         params->attr = ice_attr_gtpu_eh;
1065                         params->attr_cnt = ARRAY_SIZE(ice_attr_gtpu_eh);
1066                 } else if (hdrs & ICE_FLOW_SEG_HDR_GTPU_IP) {
1067                         src = (const ice_bitmap_t *)ice_ptypes_gtpu;
1068                         ice_and_bitmap(params->ptypes, params->ptypes,
1069                                        src, ICE_FLOW_PTYPE_MAX);
1070
1071                         /* Attributes for GTP packet without Extension Header */
1072                         params->attr = ice_attr_gtpu_session;
1073                         params->attr_cnt = ARRAY_SIZE(ice_attr_gtpu_session);
1074                 } else if (hdrs & ICE_FLOW_SEG_HDR_L2TPV2) {
1075                         src = (const ice_bitmap_t *)ice_ptypes_l2tpv2;
1076                         ice_and_bitmap(params->ptypes, params->ptypes,
1077                                        src, ICE_FLOW_PTYPE_MAX);
1078                 } else if (hdrs & ICE_FLOW_SEG_HDR_L2TPV3) {
1079                         src = (const ice_bitmap_t *)ice_ptypes_l2tpv3;
1080                         ice_and_bitmap(params->ptypes, params->ptypes,
1081                                        src, ICE_FLOW_PTYPE_MAX);
1082                 } else if (hdrs & ICE_FLOW_SEG_HDR_ESP) {
1083                         src = (const ice_bitmap_t *)ice_ptypes_esp;
1084                         ice_and_bitmap(params->ptypes, params->ptypes,
1085                                        src, ICE_FLOW_PTYPE_MAX);
1086                 } else if (hdrs & ICE_FLOW_SEG_HDR_AH) {
1087                         src = (const ice_bitmap_t *)ice_ptypes_ah;
1088                         ice_and_bitmap(params->ptypes, params->ptypes,
1089                                        src, ICE_FLOW_PTYPE_MAX);
1090                 } else if (hdrs & ICE_FLOW_SEG_HDR_NAT_T_ESP) {
1091                         src = (const ice_bitmap_t *)ice_ptypes_nat_t_esp;
1092                         ice_and_bitmap(params->ptypes, params->ptypes,
1093                                        src, ICE_FLOW_PTYPE_MAX);
1094                 } else if (hdrs & ICE_FLOW_SEG_HDR_VXLAN) {
1095                         src = (const ice_bitmap_t *)ice_ptypes_vxlan_vni;
1096                         ice_and_bitmap(params->ptypes, params->ptypes,
1097                                        src, ICE_FLOW_PTYPE_MAX);
1098                 } else if (hdrs & ICE_FLOW_SEG_HDR_UDP_ECPRI_TP0) {
1099                         src = (const ice_bitmap_t *)ice_ptypes_udp_ecpri_tp0;
1100                         ice_and_bitmap(params->ptypes, params->ptypes,
1101                                        src, ICE_FLOW_PTYPE_MAX);
1102                 }
1103
1104                 if (hdrs & ICE_FLOW_SEG_HDR_PPP) {
1105                         src = (const ice_bitmap_t *)ice_ptypes_ppp;
1106                         ice_and_bitmap(params->ptypes, params->ptypes,
1107                                        src, ICE_FLOW_PTYPE_MAX);
1108                 }
1109
1110                 if (hdrs & ICE_FLOW_SEG_HDR_PFCP) {
1111                         if (hdrs & ICE_FLOW_SEG_HDR_PFCP_NODE)
1112                                 src =
1113                                 (const ice_bitmap_t *)ice_ptypes_pfcp_node;
1114                         else
1115                                 src =
1116                                 (const ice_bitmap_t *)ice_ptypes_pfcp_session;
1117
1118                         ice_and_bitmap(params->ptypes, params->ptypes,
1119                                        src, ICE_FLOW_PTYPE_MAX);
1120                 } else {
1121                         src = (const ice_bitmap_t *)ice_ptypes_pfcp_node;
1122                         ice_andnot_bitmap(params->ptypes, params->ptypes,
1123                                           src, ICE_FLOW_PTYPE_MAX);
1124
1125                         src = (const ice_bitmap_t *)ice_ptypes_pfcp_session;
1126                         ice_andnot_bitmap(params->ptypes, params->ptypes,
1127                                           src, ICE_FLOW_PTYPE_MAX);
1128                 }
1129         }
1130
1131         return ICE_SUCCESS;
1132 }
1133
1134 /**
1135  * ice_flow_xtract_pkt_flags - Create an extr sequence entry for packet flags
1136  * @hw: pointer to the HW struct
1137  * @params: information about the flow to be processed
1138  * @flags: The value of pkt_flags[x:x] in Rx/Tx MDID metadata.
1139  *
1140  * This function will allocate an extraction sequence entries for a DWORD size
1141  * chunk of the packet flags.
1142  */
1143 static enum ice_status
1144 ice_flow_xtract_pkt_flags(struct ice_hw *hw,
1145                           struct ice_flow_prof_params *params,
1146                           enum ice_flex_mdid_pkt_flags flags)
1147 {
1148         u8 fv_words = hw->blk[params->blk].es.fvw;
1149         u8 idx;
1150
1151         /* Make sure the number of extraction sequence entries required does not
1152          * exceed the block's capacity.
1153          */
1154         if (params->es_cnt >= fv_words)
1155                 return ICE_ERR_MAX_LIMIT;
1156
1157         /* some blocks require a reversed field vector layout */
1158         if (hw->blk[params->blk].es.reverse)
1159                 idx = fv_words - params->es_cnt - 1;
1160         else
1161                 idx = params->es_cnt;
1162
1163         params->es[idx].prot_id = ICE_PROT_META_ID;
1164         params->es[idx].off = flags;
1165         params->es_cnt++;
1166
1167         return ICE_SUCCESS;
1168 }
1169
1170 /**
1171  * ice_flow_xtract_fld - Create an extraction sequence entry for the given field
1172  * @hw: pointer to the HW struct
1173  * @params: information about the flow to be processed
1174  * @seg: packet segment index of the field to be extracted
1175  * @fld: ID of field to be extracted
1176  * @match: bitfield of all fields
1177  *
1178  * This function determines the protocol ID, offset, and size of the given
1179  * field. It then allocates one or more extraction sequence entries for the
1180  * given field, and fill the entries with protocol ID and offset information.
1181  */
1182 static enum ice_status
1183 ice_flow_xtract_fld(struct ice_hw *hw, struct ice_flow_prof_params *params,
1184                     u8 seg, enum ice_flow_field fld, u64 match)
1185 {
1186         enum ice_flow_field sib = ICE_FLOW_FIELD_IDX_MAX;
1187         enum ice_prot_id prot_id = ICE_PROT_ID_INVAL;
1188         u8 fv_words = hw->blk[params->blk].es.fvw;
1189         struct ice_flow_fld_info *flds;
1190         u16 cnt, ese_bits, i;
1191         u16 sib_mask = 0;
1192         u16 mask;
1193         u16 off;
1194
1195         flds = params->prof->segs[seg].fields;
1196
1197         switch (fld) {
1198         case ICE_FLOW_FIELD_IDX_ETH_DA:
1199         case ICE_FLOW_FIELD_IDX_ETH_SA:
1200         case ICE_FLOW_FIELD_IDX_S_VLAN:
1201         case ICE_FLOW_FIELD_IDX_C_VLAN:
1202                 prot_id = seg == 0 ? ICE_PROT_MAC_OF_OR_S : ICE_PROT_MAC_IL;
1203                 break;
1204         case ICE_FLOW_FIELD_IDX_ETH_TYPE:
1205                 prot_id = seg == 0 ? ICE_PROT_ETYPE_OL : ICE_PROT_ETYPE_IL;
1206                 break;
1207         case ICE_FLOW_FIELD_IDX_IPV4_DSCP:
1208                 prot_id = seg == 0 ? ICE_PROT_IPV4_OF_OR_S : ICE_PROT_IPV4_IL;
1209                 break;
1210         case ICE_FLOW_FIELD_IDX_IPV6_DSCP:
1211                 prot_id = seg == 0 ? ICE_PROT_IPV6_OF_OR_S : ICE_PROT_IPV6_IL;
1212                 break;
1213         case ICE_FLOW_FIELD_IDX_IPV4_TTL:
1214         case ICE_FLOW_FIELD_IDX_IPV4_PROT:
1215                 prot_id = seg == 0 ? ICE_PROT_IPV4_OF_OR_S : ICE_PROT_IPV4_IL;
1216
1217                 /* TTL and PROT share the same extraction seq. entry.
1218                  * Each is considered a sibling to the other in terms of sharing
1219                  * the same extraction sequence entry.
1220                  */
1221                 if (fld == ICE_FLOW_FIELD_IDX_IPV4_TTL)
1222                         sib = ICE_FLOW_FIELD_IDX_IPV4_PROT;
1223                 else
1224                         sib = ICE_FLOW_FIELD_IDX_IPV4_TTL;
1225
1226                 /* If the sibling field is also included, that field's
1227                  * mask needs to be included.
1228                  */
1229                 if (match & BIT(sib))
1230                         sib_mask = ice_flds_info[sib].mask;
1231                 break;
1232         case ICE_FLOW_FIELD_IDX_IPV6_TTL:
1233         case ICE_FLOW_FIELD_IDX_IPV6_PROT:
1234                 prot_id = seg == 0 ? ICE_PROT_IPV6_OF_OR_S : ICE_PROT_IPV6_IL;
1235
1236                 /* TTL and PROT share the same extraction seq. entry.
1237                  * Each is considered a sibling to the other in terms of sharing
1238                  * the same extraction sequence entry.
1239                  */
1240                 if (fld == ICE_FLOW_FIELD_IDX_IPV6_TTL)
1241                         sib = ICE_FLOW_FIELD_IDX_IPV6_PROT;
1242                 else
1243                         sib = ICE_FLOW_FIELD_IDX_IPV6_TTL;
1244
1245                 /* If the sibling field is also included, that field's
1246                  * mask needs to be included.
1247                  */
1248                 if (match & BIT(sib))
1249                         sib_mask = ice_flds_info[sib].mask;
1250                 break;
1251         case ICE_FLOW_FIELD_IDX_IPV4_SA:
1252         case ICE_FLOW_FIELD_IDX_IPV4_DA:
1253                 prot_id = seg == 0 ? ICE_PROT_IPV4_OF_OR_S : ICE_PROT_IPV4_IL;
1254                 break;
1255         case ICE_FLOW_FIELD_IDX_IPV4_ID:
1256                 prot_id = ICE_PROT_IPV4_OF_OR_S;
1257                 break;
1258         case ICE_FLOW_FIELD_IDX_IPV6_SA:
1259         case ICE_FLOW_FIELD_IDX_IPV6_DA:
1260         case ICE_FLOW_FIELD_IDX_IPV6_PRE32_SA:
1261         case ICE_FLOW_FIELD_IDX_IPV6_PRE32_DA:
1262         case ICE_FLOW_FIELD_IDX_IPV6_PRE48_SA:
1263         case ICE_FLOW_FIELD_IDX_IPV6_PRE48_DA:
1264         case ICE_FLOW_FIELD_IDX_IPV6_PRE64_SA:
1265         case ICE_FLOW_FIELD_IDX_IPV6_PRE64_DA:
1266                 prot_id = seg == 0 ? ICE_PROT_IPV6_OF_OR_S : ICE_PROT_IPV6_IL;
1267                 break;
1268         case ICE_FLOW_FIELD_IDX_IPV6_ID:
1269                 prot_id = ICE_PROT_IPV6_FRAG;
1270                 break;
1271         case ICE_FLOW_FIELD_IDX_TCP_SRC_PORT:
1272         case ICE_FLOW_FIELD_IDX_TCP_DST_PORT:
1273         case ICE_FLOW_FIELD_IDX_TCP_FLAGS:
1274                 prot_id = ICE_PROT_TCP_IL;
1275                 break;
1276         case ICE_FLOW_FIELD_IDX_UDP_SRC_PORT:
1277         case ICE_FLOW_FIELD_IDX_UDP_DST_PORT:
1278                 prot_id = ICE_PROT_UDP_IL_OR_S;
1279                 break;
1280         case ICE_FLOW_FIELD_IDX_SCTP_SRC_PORT:
1281         case ICE_FLOW_FIELD_IDX_SCTP_DST_PORT:
1282                 prot_id = ICE_PROT_SCTP_IL;
1283                 break;
1284         case ICE_FLOW_FIELD_IDX_VXLAN_VNI:
1285         case ICE_FLOW_FIELD_IDX_GTPC_TEID:
1286         case ICE_FLOW_FIELD_IDX_GTPU_IP_TEID:
1287         case ICE_FLOW_FIELD_IDX_GTPU_UP_TEID:
1288         case ICE_FLOW_FIELD_IDX_GTPU_DWN_TEID:
1289         case ICE_FLOW_FIELD_IDX_GTPU_EH_TEID:
1290         case ICE_FLOW_FIELD_IDX_GTPU_EH_QFI:
1291                 /* GTP is accessed through UDP OF protocol */
1292                 prot_id = ICE_PROT_UDP_OF;
1293                 break;
1294         case ICE_FLOW_FIELD_IDX_PPPOE_SESS_ID:
1295                 prot_id = ICE_PROT_PPPOE;
1296                 break;
1297         case ICE_FLOW_FIELD_IDX_PFCP_SEID:
1298                 prot_id = ICE_PROT_UDP_IL_OR_S;
1299                 break;
1300         case ICE_FLOW_FIELD_IDX_L2TPV3_SESS_ID:
1301                 prot_id = ICE_PROT_L2TPV3;
1302                 break;
1303         case ICE_FLOW_FIELD_IDX_ESP_SPI:
1304                 prot_id = ICE_PROT_ESP_F;
1305                 break;
1306         case ICE_FLOW_FIELD_IDX_AH_SPI:
1307                 prot_id = ICE_PROT_ESP_2;
1308                 break;
1309         case ICE_FLOW_FIELD_IDX_NAT_T_ESP_SPI:
1310                 prot_id = ICE_PROT_UDP_IL_OR_S;
1311                 break;
1312         case ICE_FLOW_FIELD_IDX_ECPRI_TP0_PC_ID:
1313                 prot_id = ICE_PROT_ECPRI;
1314                 break;
1315         case ICE_FLOW_FIELD_IDX_UDP_ECPRI_TP0_PC_ID:
1316                 prot_id = ICE_PROT_UDP_IL_OR_S;
1317                 break;
1318         case ICE_FLOW_FIELD_IDX_ARP_SIP:
1319         case ICE_FLOW_FIELD_IDX_ARP_DIP:
1320         case ICE_FLOW_FIELD_IDX_ARP_SHA:
1321         case ICE_FLOW_FIELD_IDX_ARP_DHA:
1322         case ICE_FLOW_FIELD_IDX_ARP_OP:
1323                 prot_id = ICE_PROT_ARP_OF;
1324                 break;
1325         case ICE_FLOW_FIELD_IDX_ICMP_TYPE:
1326         case ICE_FLOW_FIELD_IDX_ICMP_CODE:
1327                 /* ICMP type and code share the same extraction seq. entry */
1328                 prot_id = (params->prof->segs[seg].hdrs &
1329                            ICE_FLOW_SEG_HDR_IPV4) ?
1330                         ICE_PROT_ICMP_IL : ICE_PROT_ICMPV6_IL;
1331                 sib = fld == ICE_FLOW_FIELD_IDX_ICMP_TYPE ?
1332                         ICE_FLOW_FIELD_IDX_ICMP_CODE :
1333                         ICE_FLOW_FIELD_IDX_ICMP_TYPE;
1334                 break;
1335         case ICE_FLOW_FIELD_IDX_GRE_KEYID:
1336                 prot_id = ICE_PROT_GRE_OF;
1337                 break;
1338         default:
1339                 return ICE_ERR_NOT_IMPL;
1340         }
1341
1342         /* Each extraction sequence entry is a word in size, and extracts a
1343          * word-aligned offset from a protocol header.
1344          */
1345         ese_bits = ICE_FLOW_FV_EXTRACT_SZ * BITS_PER_BYTE;
1346
1347         flds[fld].xtrct.prot_id = prot_id;
1348         flds[fld].xtrct.off = (ice_flds_info[fld].off / ese_bits) *
1349                 ICE_FLOW_FV_EXTRACT_SZ;
1350         flds[fld].xtrct.disp = (u8)(ice_flds_info[fld].off % ese_bits);
1351         flds[fld].xtrct.idx = params->es_cnt;
1352         flds[fld].xtrct.mask = ice_flds_info[fld].mask;
1353
1354         /* Adjust the next field-entry index after accommodating the number of
1355          * entries this field consumes
1356          */
1357         cnt = DIVIDE_AND_ROUND_UP(flds[fld].xtrct.disp +
1358                                   ice_flds_info[fld].size, ese_bits);
1359
1360         /* Fill in the extraction sequence entries needed for this field */
1361         off = flds[fld].xtrct.off;
1362         mask = flds[fld].xtrct.mask;
1363         for (i = 0; i < cnt; i++) {
1364                 /* Only consume an extraction sequence entry if there is no
1365                  * sibling field associated with this field or the sibling entry
1366                  * already extracts the word shared with this field.
1367                  */
1368                 if (sib == ICE_FLOW_FIELD_IDX_MAX ||
1369                     flds[sib].xtrct.prot_id == ICE_PROT_ID_INVAL ||
1370                     flds[sib].xtrct.off != off) {
1371                         u8 idx;
1372
1373                         /* Make sure the number of extraction sequence required
1374                          * does not exceed the block's capability
1375                          */
1376                         if (params->es_cnt >= fv_words)
1377                                 return ICE_ERR_MAX_LIMIT;
1378
1379                         /* some blocks require a reversed field vector layout */
1380                         if (hw->blk[params->blk].es.reverse)
1381                                 idx = fv_words - params->es_cnt - 1;
1382                         else
1383                                 idx = params->es_cnt;
1384
1385                         params->es[idx].prot_id = prot_id;
1386                         params->es[idx].off = off;
1387                         params->mask[idx] = mask | sib_mask;
1388                         params->es_cnt++;
1389                 }
1390
1391                 off += ICE_FLOW_FV_EXTRACT_SZ;
1392         }
1393
1394         return ICE_SUCCESS;
1395 }
1396
1397 /**
1398  * ice_flow_xtract_raws - Create extract sequence entries for raw bytes
1399  * @hw: pointer to the HW struct
1400  * @params: information about the flow to be processed
1401  * @seg: index of packet segment whose raw fields are to be extracted
1402  */
1403 static enum ice_status
1404 ice_flow_xtract_raws(struct ice_hw *hw, struct ice_flow_prof_params *params,
1405                      u8 seg)
1406 {
1407         u16 fv_words;
1408         u16 hdrs_sz;
1409         u8 i;
1410
1411         if (!params->prof->segs[seg].raws_cnt)
1412                 return ICE_SUCCESS;
1413
1414         if (params->prof->segs[seg].raws_cnt >
1415             ARRAY_SIZE(params->prof->segs[seg].raws))
1416                 return ICE_ERR_MAX_LIMIT;
1417
1418         /* Offsets within the segment headers are not supported */
1419         hdrs_sz = ice_flow_calc_seg_sz(params, seg);
1420         if (!hdrs_sz)
1421                 return ICE_ERR_PARAM;
1422
1423         fv_words = hw->blk[params->blk].es.fvw;
1424
1425         for (i = 0; i < params->prof->segs[seg].raws_cnt; i++) {
1426                 struct ice_flow_seg_fld_raw *raw;
1427                 u16 off, cnt, j;
1428
1429                 raw = &params->prof->segs[seg].raws[i];
1430
1431                 /* Storing extraction information */
1432                 raw->info.xtrct.prot_id = ICE_PROT_MAC_OF_OR_S;
1433                 raw->info.xtrct.off = (raw->off / ICE_FLOW_FV_EXTRACT_SZ) *
1434                         ICE_FLOW_FV_EXTRACT_SZ;
1435                 raw->info.xtrct.disp = (raw->off % ICE_FLOW_FV_EXTRACT_SZ) *
1436                         BITS_PER_BYTE;
1437                 raw->info.xtrct.idx = params->es_cnt;
1438
1439                 /* Determine the number of field vector entries this raw field
1440                  * consumes.
1441                  */
1442                 cnt = DIVIDE_AND_ROUND_UP(raw->info.xtrct.disp +
1443                                           (raw->info.src.last * BITS_PER_BYTE),
1444                                           (ICE_FLOW_FV_EXTRACT_SZ *
1445                                            BITS_PER_BYTE));
1446                 off = raw->info.xtrct.off;
1447                 for (j = 0; j < cnt; j++) {
1448                         u16 idx;
1449
1450                         /* Make sure the number of extraction sequence required
1451                          * does not exceed the block's capability
1452                          */
1453                         if (params->es_cnt >= hw->blk[params->blk].es.count ||
1454                             params->es_cnt >= ICE_MAX_FV_WORDS)
1455                                 return ICE_ERR_MAX_LIMIT;
1456
1457                         /* some blocks require a reversed field vector layout */
1458                         if (hw->blk[params->blk].es.reverse)
1459                                 idx = fv_words - params->es_cnt - 1;
1460                         else
1461                                 idx = params->es_cnt;
1462
1463                         params->es[idx].prot_id = raw->info.xtrct.prot_id;
1464                         params->es[idx].off = off;
1465                         params->es_cnt++;
1466                         off += ICE_FLOW_FV_EXTRACT_SZ;
1467                 }
1468         }
1469
1470         return ICE_SUCCESS;
1471 }
1472
1473 /**
1474  * ice_flow_create_xtrct_seq - Create an extraction sequence for given segments
1475  * @hw: pointer to the HW struct
1476  * @params: information about the flow to be processed
1477  *
1478  * This function iterates through all matched fields in the given segments, and
1479  * creates an extraction sequence for the fields.
1480  */
1481 static enum ice_status
1482 ice_flow_create_xtrct_seq(struct ice_hw *hw,
1483                           struct ice_flow_prof_params *params)
1484 {
1485         enum ice_status status = ICE_SUCCESS;
1486         u8 i;
1487
1488         /* For ACL, we also need to extract the direction bit (Rx,Tx) data from
1489          * packet flags
1490          */
1491         if (params->blk == ICE_BLK_ACL) {
1492                 status = ice_flow_xtract_pkt_flags(hw, params,
1493                                                    ICE_RX_MDID_PKT_FLAGS_15_0);
1494                 if (status)
1495                         return status;
1496         }
1497
1498         for (i = 0; i < params->prof->segs_cnt; i++) {
1499                 u64 match = params->prof->segs[i].match;
1500                 enum ice_flow_field j;
1501
1502                 ice_for_each_set_bit(j, (ice_bitmap_t *)&match,
1503                                      ICE_FLOW_FIELD_IDX_MAX) {
1504                         status = ice_flow_xtract_fld(hw, params, i, j, match);
1505                         if (status)
1506                                 return status;
1507                         ice_clear_bit(j, (ice_bitmap_t *)&match);
1508                 }
1509
1510                 /* Process raw matching bytes */
1511                 status = ice_flow_xtract_raws(hw, params, i);
1512                 if (status)
1513                         return status;
1514         }
1515
1516         return status;
1517 }
1518
1519 /**
1520  * ice_flow_sel_acl_scen - returns the specific scenario
1521  * @hw: pointer to the hardware structure
1522  * @params: information about the flow to be processed
1523  *
1524  * This function will return the specific scenario based on the
1525  * params passed to it
1526  */
1527 static enum ice_status
1528 ice_flow_sel_acl_scen(struct ice_hw *hw, struct ice_flow_prof_params *params)
1529 {
1530         /* Find the best-fit scenario for the provided match width */
1531         struct ice_acl_scen *cand_scen = NULL, *scen;
1532
1533         if (!hw->acl_tbl)
1534                 return ICE_ERR_DOES_NOT_EXIST;
1535
1536         /* Loop through each scenario and match against the scenario width
1537          * to select the specific scenario
1538          */
1539         LIST_FOR_EACH_ENTRY(scen, &hw->acl_tbl->scens, ice_acl_scen, list_entry)
1540                 if (scen->eff_width >= params->entry_length &&
1541                     (!cand_scen || cand_scen->eff_width > scen->eff_width))
1542                         cand_scen = scen;
1543         if (!cand_scen)
1544                 return ICE_ERR_DOES_NOT_EXIST;
1545
1546         params->prof->cfg.scen = cand_scen;
1547
1548         return ICE_SUCCESS;
1549 }
1550
1551 /**
1552  * ice_flow_acl_def_entry_frmt - Determine the layout of flow entries
1553  * @params: information about the flow to be processed
1554  */
1555 static enum ice_status
1556 ice_flow_acl_def_entry_frmt(struct ice_flow_prof_params *params)
1557 {
1558         u16 index, i, range_idx = 0;
1559
1560         index = ICE_AQC_ACL_PROF_BYTE_SEL_START_IDX;
1561
1562         for (i = 0; i < params->prof->segs_cnt; i++) {
1563                 struct ice_flow_seg_info *seg = &params->prof->segs[i];
1564                 u8 j;
1565
1566                 ice_for_each_set_bit(j, (ice_bitmap_t *)&seg->match,
1567                                      ICE_FLOW_FIELD_IDX_MAX) {
1568                         struct ice_flow_fld_info *fld = &seg->fields[j];
1569
1570                         fld->entry.mask = ICE_FLOW_FLD_OFF_INVAL;
1571
1572                         if (fld->type == ICE_FLOW_FLD_TYPE_RANGE) {
1573                                 fld->entry.last = ICE_FLOW_FLD_OFF_INVAL;
1574
1575                                 /* Range checking only supported for single
1576                                  * words
1577                                  */
1578                                 if (DIVIDE_AND_ROUND_UP(ice_flds_info[j].size +
1579                                                         fld->xtrct.disp,
1580                                                         BITS_PER_BYTE * 2) > 1)
1581                                         return ICE_ERR_PARAM;
1582
1583                                 /* Ranges must define low and high values */
1584                                 if (fld->src.val == ICE_FLOW_FLD_OFF_INVAL ||
1585                                     fld->src.last == ICE_FLOW_FLD_OFF_INVAL)
1586                                         return ICE_ERR_PARAM;
1587
1588                                 fld->entry.val = range_idx++;
1589                         } else {
1590                                 /* Store adjusted byte-length of field for later
1591                                  * use, taking into account potential
1592                                  * non-byte-aligned displacement
1593                                  */
1594                                 fld->entry.last = DIVIDE_AND_ROUND_UP
1595                                         (ice_flds_info[j].size +
1596                                          (fld->xtrct.disp % BITS_PER_BYTE),
1597                                          BITS_PER_BYTE);
1598                                 fld->entry.val = index;
1599                                 index += fld->entry.last;
1600                         }
1601                 }
1602
1603                 for (j = 0; j < seg->raws_cnt; j++) {
1604                         struct ice_flow_seg_fld_raw *raw = &seg->raws[j];
1605
1606                         raw->info.entry.mask = ICE_FLOW_FLD_OFF_INVAL;
1607                         raw->info.entry.val = index;
1608                         raw->info.entry.last = raw->info.src.last;
1609                         index += raw->info.entry.last;
1610                 }
1611         }
1612
1613         /* Currently only support using the byte selection base, which only
1614          * allows for an effective entry size of 30 bytes. Reject anything
1615          * larger.
1616          */
1617         if (index > ICE_AQC_ACL_PROF_BYTE_SEL_ELEMS)
1618                 return ICE_ERR_PARAM;
1619
1620         /* Only 8 range checkers per profile, reject anything trying to use
1621          * more
1622          */
1623         if (range_idx > ICE_AQC_ACL_PROF_RANGES_NUM_CFG)
1624                 return ICE_ERR_PARAM;
1625
1626         /* Store # bytes required for entry for later use */
1627         params->entry_length = index - ICE_AQC_ACL_PROF_BYTE_SEL_START_IDX;
1628
1629         return ICE_SUCCESS;
1630 }
1631
1632 /**
1633  * ice_flow_proc_segs - process all packet segments associated with a profile
1634  * @hw: pointer to the HW struct
1635  * @params: information about the flow to be processed
1636  */
1637 static enum ice_status
1638 ice_flow_proc_segs(struct ice_hw *hw, struct ice_flow_prof_params *params)
1639 {
1640         enum ice_status status;
1641
1642         status = ice_flow_proc_seg_hdrs(params);
1643         if (status)
1644                 return status;
1645
1646         status = ice_flow_create_xtrct_seq(hw, params);
1647         if (status)
1648                 return status;
1649
1650         switch (params->blk) {
1651         case ICE_BLK_FD:
1652         case ICE_BLK_RSS:
1653                 status = ICE_SUCCESS;
1654                 break;
1655         case ICE_BLK_ACL:
1656                 status = ice_flow_acl_def_entry_frmt(params);
1657                 if (status)
1658                         return status;
1659                 status = ice_flow_sel_acl_scen(hw, params);
1660                 if (status)
1661                         return status;
1662                 break;
1663         default:
1664                 return ICE_ERR_NOT_IMPL;
1665         }
1666
1667         return status;
1668 }
1669
1670 #define ICE_FLOW_FIND_PROF_CHK_FLDS     0x00000001
1671 #define ICE_FLOW_FIND_PROF_CHK_VSI      0x00000002
1672 #define ICE_FLOW_FIND_PROF_NOT_CHK_DIR  0x00000004
1673
1674 /**
1675  * ice_flow_find_prof_conds - Find a profile matching headers and conditions
1676  * @hw: pointer to the HW struct
1677  * @blk: classification stage
1678  * @dir: flow direction
1679  * @segs: array of one or more packet segments that describe the flow
1680  * @segs_cnt: number of packet segments provided
1681  * @vsi_handle: software VSI handle to check VSI (ICE_FLOW_FIND_PROF_CHK_VSI)
1682  * @conds: additional conditions to be checked (ICE_FLOW_FIND_PROF_CHK_*)
1683  */
1684 static struct ice_flow_prof *
1685 ice_flow_find_prof_conds(struct ice_hw *hw, enum ice_block blk,
1686                          enum ice_flow_dir dir, struct ice_flow_seg_info *segs,
1687                          u8 segs_cnt, u16 vsi_handle, u32 conds)
1688 {
1689         struct ice_flow_prof *p, *prof = NULL;
1690
1691         ice_acquire_lock(&hw->fl_profs_locks[blk]);
1692         LIST_FOR_EACH_ENTRY(p, &hw->fl_profs[blk], ice_flow_prof, l_entry)
1693                 if ((p->dir == dir || conds & ICE_FLOW_FIND_PROF_NOT_CHK_DIR) &&
1694                     segs_cnt && segs_cnt == p->segs_cnt) {
1695                         u8 i;
1696
1697                         /* Check for profile-VSI association if specified */
1698                         if ((conds & ICE_FLOW_FIND_PROF_CHK_VSI) &&
1699                             ice_is_vsi_valid(hw, vsi_handle) &&
1700                             !ice_is_bit_set(p->vsis, vsi_handle))
1701                                 continue;
1702
1703                         /* Protocol headers must be checked. Matched fields are
1704                          * checked if specified.
1705                          */
1706                         for (i = 0; i < segs_cnt; i++)
1707                                 if (segs[i].hdrs != p->segs[i].hdrs ||
1708                                     ((conds & ICE_FLOW_FIND_PROF_CHK_FLDS) &&
1709                                      segs[i].match != p->segs[i].match))
1710                                         break;
1711
1712                         /* A match is found if all segments are matched */
1713                         if (i == segs_cnt) {
1714                                 prof = p;
1715                                 break;
1716                         }
1717                 }
1718         ice_release_lock(&hw->fl_profs_locks[blk]);
1719
1720         return prof;
1721 }
1722
1723 /**
1724  * ice_flow_find_prof - Look up a profile matching headers and matched fields
1725  * @hw: pointer to the HW struct
1726  * @blk: classification stage
1727  * @dir: flow direction
1728  * @segs: array of one or more packet segments that describe the flow
1729  * @segs_cnt: number of packet segments provided
1730  */
1731 u64
1732 ice_flow_find_prof(struct ice_hw *hw, enum ice_block blk, enum ice_flow_dir dir,
1733                    struct ice_flow_seg_info *segs, u8 segs_cnt)
1734 {
1735         struct ice_flow_prof *p;
1736
1737         p = ice_flow_find_prof_conds(hw, blk, dir, segs, segs_cnt,
1738                                      ICE_MAX_VSI, ICE_FLOW_FIND_PROF_CHK_FLDS);
1739
1740         return p ? p->id : ICE_FLOW_PROF_ID_INVAL;
1741 }
1742
1743 /**
1744  * ice_flow_find_prof_id - Look up a profile with given profile ID
1745  * @hw: pointer to the HW struct
1746  * @blk: classification stage
1747  * @prof_id: unique ID to identify this flow profile
1748  */
1749 static struct ice_flow_prof *
1750 ice_flow_find_prof_id(struct ice_hw *hw, enum ice_block blk, u64 prof_id)
1751 {
1752         struct ice_flow_prof *p;
1753
1754         LIST_FOR_EACH_ENTRY(p, &hw->fl_profs[blk], ice_flow_prof, l_entry)
1755                 if (p->id == prof_id)
1756                         return p;
1757
1758         return NULL;
1759 }
1760
1761 /**
1762  * ice_dealloc_flow_entry - Deallocate flow entry memory
1763  * @hw: pointer to the HW struct
1764  * @entry: flow entry to be removed
1765  */
1766 static void
1767 ice_dealloc_flow_entry(struct ice_hw *hw, struct ice_flow_entry *entry)
1768 {
1769         if (!entry)
1770                 return;
1771
1772         if (entry->entry)
1773                 ice_free(hw, entry->entry);
1774
1775         if (entry->range_buf) {
1776                 ice_free(hw, entry->range_buf);
1777                 entry->range_buf = NULL;
1778         }
1779
1780         if (entry->acts) {
1781                 ice_free(hw, entry->acts);
1782                 entry->acts = NULL;
1783                 entry->acts_cnt = 0;
1784         }
1785
1786         ice_free(hw, entry);
1787 }
1788
1789 /**
1790  * ice_flow_get_hw_prof - return the HW profile for a specific profile ID handle
1791  * @hw: pointer to the HW struct
1792  * @blk: classification stage
1793  * @prof_id: the profile ID handle
1794  * @hw_prof_id: pointer to variable to receive the HW profile ID
1795  */
1796 enum ice_status
1797 ice_flow_get_hw_prof(struct ice_hw *hw, enum ice_block blk, u64 prof_id,
1798                      u8 *hw_prof_id)
1799 {
1800         enum ice_status status = ICE_ERR_DOES_NOT_EXIST;
1801         struct ice_prof_map *map;
1802
1803         ice_acquire_lock(&hw->blk[blk].es.prof_map_lock);
1804         map = ice_search_prof_id(hw, blk, prof_id);
1805         if (map) {
1806                 *hw_prof_id = map->prof_id;
1807                 status = ICE_SUCCESS;
1808         }
1809         ice_release_lock(&hw->blk[blk].es.prof_map_lock);
1810         return status;
1811 }
1812
1813 #define ICE_ACL_INVALID_SCEN    0x3f
1814
1815 /**
1816  * ice_flow_acl_is_prof_in_use - Verify if the profile is associated to any PF
1817  * @hw: pointer to the hardware structure
1818  * @prof: pointer to flow profile
1819  * @buf: destination buffer function writes partial extraction sequence to
1820  *
1821  * returns ICE_SUCCESS if no PF is associated to the given profile
1822  * returns ICE_ERR_IN_USE if at least one PF is associated to the given profile
1823  * returns other error code for real error
1824  */
1825 static enum ice_status
1826 ice_flow_acl_is_prof_in_use(struct ice_hw *hw, struct ice_flow_prof *prof,
1827                             struct ice_aqc_acl_prof_generic_frmt *buf)
1828 {
1829         enum ice_status status;
1830         u8 prof_id = 0;
1831
1832         status = ice_flow_get_hw_prof(hw, ICE_BLK_ACL, prof->id, &prof_id);
1833         if (status)
1834                 return status;
1835
1836         status = ice_query_acl_prof(hw, prof_id, buf, NULL);
1837         if (status)
1838                 return status;
1839
1840         /* If all PF's associated scenarios are all 0 or all
1841          * ICE_ACL_INVALID_SCEN (63) for the given profile then the latter has
1842          * not been configured yet.
1843          */
1844         if (buf->pf_scenario_num[0] == 0 && buf->pf_scenario_num[1] == 0 &&
1845             buf->pf_scenario_num[2] == 0 && buf->pf_scenario_num[3] == 0 &&
1846             buf->pf_scenario_num[4] == 0 && buf->pf_scenario_num[5] == 0 &&
1847             buf->pf_scenario_num[6] == 0 && buf->pf_scenario_num[7] == 0)
1848                 return ICE_SUCCESS;
1849
1850         if (buf->pf_scenario_num[0] == ICE_ACL_INVALID_SCEN &&
1851             buf->pf_scenario_num[1] == ICE_ACL_INVALID_SCEN &&
1852             buf->pf_scenario_num[2] == ICE_ACL_INVALID_SCEN &&
1853             buf->pf_scenario_num[3] == ICE_ACL_INVALID_SCEN &&
1854             buf->pf_scenario_num[4] == ICE_ACL_INVALID_SCEN &&
1855             buf->pf_scenario_num[5] == ICE_ACL_INVALID_SCEN &&
1856             buf->pf_scenario_num[6] == ICE_ACL_INVALID_SCEN &&
1857             buf->pf_scenario_num[7] == ICE_ACL_INVALID_SCEN)
1858                 return ICE_SUCCESS;
1859
1860         return ICE_ERR_IN_USE;
1861 }
1862
1863 /**
1864  * ice_flow_acl_free_act_cntr - Free the ACL rule's actions
1865  * @hw: pointer to the hardware structure
1866  * @acts: array of actions to be performed on a match
1867  * @acts_cnt: number of actions
1868  */
1869 static enum ice_status
1870 ice_flow_acl_free_act_cntr(struct ice_hw *hw, struct ice_flow_action *acts,
1871                            u8 acts_cnt)
1872 {
1873         int i;
1874
1875         for (i = 0; i < acts_cnt; i++) {
1876                 if (acts[i].type == ICE_FLOW_ACT_CNTR_PKT ||
1877                     acts[i].type == ICE_FLOW_ACT_CNTR_BYTES ||
1878                     acts[i].type == ICE_FLOW_ACT_CNTR_PKT_BYTES) {
1879                         struct ice_acl_cntrs cntrs = { 0 };
1880                         enum ice_status status;
1881
1882                         /* amount is unused in the dealloc path but the common
1883                          * parameter check routine wants a value set, as zero
1884                          * is invalid for the check. Just set it.
1885                          */
1886                         cntrs.amount = 1;
1887                         cntrs.bank = 0; /* Only bank0 for the moment */
1888                         cntrs.first_cntr =
1889                                         LE16_TO_CPU(acts[i].data.acl_act.value);
1890                         cntrs.last_cntr =
1891                                         LE16_TO_CPU(acts[i].data.acl_act.value);
1892
1893                         if (acts[i].type == ICE_FLOW_ACT_CNTR_PKT_BYTES)
1894                                 cntrs.type = ICE_AQC_ACL_CNT_TYPE_DUAL;
1895                         else
1896                                 cntrs.type = ICE_AQC_ACL_CNT_TYPE_SINGLE;
1897
1898                         status = ice_aq_dealloc_acl_cntrs(hw, &cntrs, NULL);
1899                         if (status)
1900                                 return status;
1901                 }
1902         }
1903         return ICE_SUCCESS;
1904 }
1905
1906 /**
1907  * ice_flow_acl_disassoc_scen - Disassociate the scenario from the profile
1908  * @hw: pointer to the hardware structure
1909  * @prof: pointer to flow profile
1910  *
1911  * Disassociate the scenario from the profile for the PF of the VSI.
1912  */
1913 static enum ice_status
1914 ice_flow_acl_disassoc_scen(struct ice_hw *hw, struct ice_flow_prof *prof)
1915 {
1916         struct ice_aqc_acl_prof_generic_frmt buf;
1917         enum ice_status status = ICE_SUCCESS;
1918         u8 prof_id = 0;
1919
1920         ice_memset(&buf, 0, sizeof(buf), ICE_NONDMA_MEM);
1921
1922         status = ice_flow_get_hw_prof(hw, ICE_BLK_ACL, prof->id, &prof_id);
1923         if (status)
1924                 return status;
1925
1926         status = ice_query_acl_prof(hw, prof_id, &buf, NULL);
1927         if (status)
1928                 return status;
1929
1930         /* Clear scenario for this PF */
1931         buf.pf_scenario_num[hw->pf_id] = ICE_ACL_INVALID_SCEN;
1932         status = ice_prgm_acl_prof_xtrct(hw, prof_id, &buf, NULL);
1933
1934         return status;
1935 }
1936
1937 /**
1938  * ice_flow_rem_entry_sync - Remove a flow entry
1939  * @hw: pointer to the HW struct
1940  * @blk: classification stage
1941  * @entry: flow entry to be removed
1942  */
1943 static enum ice_status
1944 ice_flow_rem_entry_sync(struct ice_hw *hw, enum ice_block blk,
1945                         struct ice_flow_entry *entry)
1946 {
1947         if (!entry)
1948                 return ICE_ERR_BAD_PTR;
1949
1950         if (blk == ICE_BLK_ACL) {
1951                 enum ice_status status;
1952
1953                 if (!entry->prof)
1954                         return ICE_ERR_BAD_PTR;
1955
1956                 status = ice_acl_rem_entry(hw, entry->prof->cfg.scen,
1957                                            entry->scen_entry_idx);
1958                 if (status)
1959                         return status;
1960
1961                 /* Checks if we need to release an ACL counter. */
1962                 if (entry->acts_cnt && entry->acts)
1963                         ice_flow_acl_free_act_cntr(hw, entry->acts,
1964                                                    entry->acts_cnt);
1965         }
1966
1967         LIST_DEL(&entry->l_entry);
1968
1969         ice_dealloc_flow_entry(hw, entry);
1970
1971         return ICE_SUCCESS;
1972 }
1973
1974 /**
1975  * ice_flow_add_prof_sync - Add a flow profile for packet segments and fields
1976  * @hw: pointer to the HW struct
1977  * @blk: classification stage
1978  * @dir: flow direction
1979  * @prof_id: unique ID to identify this flow profile
1980  * @segs: array of one or more packet segments that describe the flow
1981  * @segs_cnt: number of packet segments provided
1982  * @acts: array of default actions
1983  * @acts_cnt: number of default actions
1984  * @prof: stores the returned flow profile added
1985  *
1986  * Assumption: the caller has acquired the lock to the profile list
1987  */
1988 static enum ice_status
1989 ice_flow_add_prof_sync(struct ice_hw *hw, enum ice_block blk,
1990                        enum ice_flow_dir dir, u64 prof_id,
1991                        struct ice_flow_seg_info *segs, u8 segs_cnt,
1992                        struct ice_flow_action *acts, u8 acts_cnt,
1993                        struct ice_flow_prof **prof)
1994 {
1995         struct ice_flow_prof_params *params;
1996         enum ice_status status;
1997         u8 i;
1998
1999         if (!prof || (acts_cnt && !acts))
2000                 return ICE_ERR_BAD_PTR;
2001
2002         params = (struct ice_flow_prof_params *)ice_malloc(hw, sizeof(*params));
2003         if (!params)
2004                 return ICE_ERR_NO_MEMORY;
2005
2006         params->prof = (struct ice_flow_prof *)
2007                 ice_malloc(hw, sizeof(*params->prof));
2008         if (!params->prof) {
2009                 status = ICE_ERR_NO_MEMORY;
2010                 goto free_params;
2011         }
2012
2013         /* initialize extraction sequence to all invalid (0xff) */
2014         for (i = 0; i < ICE_MAX_FV_WORDS; i++) {
2015                 params->es[i].prot_id = ICE_PROT_INVALID;
2016                 params->es[i].off = ICE_FV_OFFSET_INVAL;
2017         }
2018
2019         params->blk = blk;
2020         params->prof->id = prof_id;
2021         params->prof->dir = dir;
2022         params->prof->segs_cnt = segs_cnt;
2023
2024         /* Make a copy of the segments that need to be persistent in the flow
2025          * profile instance
2026          */
2027         for (i = 0; i < segs_cnt; i++)
2028                 ice_memcpy(&params->prof->segs[i], &segs[i], sizeof(*segs),
2029                            ICE_NONDMA_TO_NONDMA);
2030
2031         /* Make a copy of the actions that need to be persistent in the flow
2032          * profile instance.
2033          */
2034         if (acts_cnt) {
2035                 params->prof->acts = (struct ice_flow_action *)
2036                         ice_memdup(hw, acts, acts_cnt * sizeof(*acts),
2037                                    ICE_NONDMA_TO_NONDMA);
2038
2039                 if (!params->prof->acts) {
2040                         status = ICE_ERR_NO_MEMORY;
2041                         goto out;
2042                 }
2043         }
2044
2045         status = ice_flow_proc_segs(hw, params);
2046         if (status) {
2047                 ice_debug(hw, ICE_DBG_FLOW, "Error processing a flow's packet segments\n");
2048                 goto out;
2049         }
2050
2051         /* Add a HW profile for this flow profile */
2052         status = ice_add_prof(hw, blk, prof_id, (u8 *)params->ptypes,
2053                               params->attr, params->attr_cnt, params->es,
2054                               params->mask);
2055         if (status) {
2056                 ice_debug(hw, ICE_DBG_FLOW, "Error adding a HW flow profile\n");
2057                 goto out;
2058         }
2059
2060         INIT_LIST_HEAD(&params->prof->entries);
2061         ice_init_lock(&params->prof->entries_lock);
2062         *prof = params->prof;
2063
2064 out:
2065         if (status) {
2066                 if (params->prof->acts)
2067                         ice_free(hw, params->prof->acts);
2068                 ice_free(hw, params->prof);
2069         }
2070 free_params:
2071         ice_free(hw, params);
2072
2073         return status;
2074 }
2075
2076 /**
2077  * ice_flow_rem_prof_sync - remove a flow profile
2078  * @hw: pointer to the hardware structure
2079  * @blk: classification stage
2080  * @prof: pointer to flow profile to remove
2081  *
2082  * Assumption: the caller has acquired the lock to the profile list
2083  */
2084 static enum ice_status
2085 ice_flow_rem_prof_sync(struct ice_hw *hw, enum ice_block blk,
2086                        struct ice_flow_prof *prof)
2087 {
2088         enum ice_status status;
2089
2090         /* Remove all remaining flow entries before removing the flow profile */
2091         if (!LIST_EMPTY(&prof->entries)) {
2092                 struct ice_flow_entry *e, *t;
2093
2094                 ice_acquire_lock(&prof->entries_lock);
2095
2096                 LIST_FOR_EACH_ENTRY_SAFE(e, t, &prof->entries, ice_flow_entry,
2097                                          l_entry) {
2098                         status = ice_flow_rem_entry_sync(hw, blk, e);
2099                         if (status)
2100                                 break;
2101                 }
2102
2103                 ice_release_lock(&prof->entries_lock);
2104         }
2105
2106         if (blk == ICE_BLK_ACL) {
2107                 struct ice_aqc_acl_profile_ranges query_rng_buf;
2108                 struct ice_aqc_acl_prof_generic_frmt buf;
2109                 u8 prof_id = 0;
2110
2111                 /* Disassociate the scenario from the profile for the PF */
2112                 status = ice_flow_acl_disassoc_scen(hw, prof);
2113                 if (status)
2114                         return status;
2115
2116                 /* Clear the range-checker if the profile ID is no longer
2117                  * used by any PF
2118                  */
2119                 status = ice_flow_acl_is_prof_in_use(hw, prof, &buf);
2120                 if (status && status != ICE_ERR_IN_USE) {
2121                         return status;
2122                 } else if (!status) {
2123                         /* Clear the range-checker value for profile ID */
2124                         ice_memset(&query_rng_buf, 0,
2125                                    sizeof(struct ice_aqc_acl_profile_ranges),
2126                                    ICE_NONDMA_MEM);
2127
2128                         status = ice_flow_get_hw_prof(hw, blk, prof->id,
2129                                                       &prof_id);
2130                         if (status)
2131                                 return status;
2132
2133                         status = ice_prog_acl_prof_ranges(hw, prof_id,
2134                                                           &query_rng_buf, NULL);
2135                         if (status)
2136                                 return status;
2137                 }
2138         }
2139
2140         /* Remove all hardware profiles associated with this flow profile */
2141         status = ice_rem_prof(hw, blk, prof->id);
2142         if (!status) {
2143                 LIST_DEL(&prof->l_entry);
2144                 ice_destroy_lock(&prof->entries_lock);
2145                 if (prof->acts)
2146                         ice_free(hw, prof->acts);
2147                 ice_free(hw, prof);
2148         }
2149
2150         return status;
2151 }
2152
2153 /**
2154  * ice_flow_acl_set_xtrct_seq_fld - Populate xtrct seq for single field
2155  * @buf: Destination buffer function writes partial xtrct sequence to
2156  * @info: Info about field
2157  */
2158 static void
2159 ice_flow_acl_set_xtrct_seq_fld(struct ice_aqc_acl_prof_generic_frmt *buf,
2160                                struct ice_flow_fld_info *info)
2161 {
2162         u16 dst, i;
2163         u8 src;
2164
2165         src = info->xtrct.idx * ICE_FLOW_FV_EXTRACT_SZ +
2166                 info->xtrct.disp / BITS_PER_BYTE;
2167         dst = info->entry.val;
2168         for (i = 0; i < info->entry.last; i++)
2169                 /* HW stores field vector words in LE, convert words back to BE
2170                  * so constructed entries will end up in network order
2171                  */
2172                 buf->byte_selection[dst++] = src++ ^ 1;
2173 }
2174
2175 /**
2176  * ice_flow_acl_set_xtrct_seq - Program ACL extraction sequence
2177  * @hw: pointer to the hardware structure
2178  * @prof: pointer to flow profile
2179  */
2180 static enum ice_status
2181 ice_flow_acl_set_xtrct_seq(struct ice_hw *hw, struct ice_flow_prof *prof)
2182 {
2183         struct ice_aqc_acl_prof_generic_frmt buf;
2184         struct ice_flow_fld_info *info;
2185         enum ice_status status;
2186         u8 prof_id = 0;
2187         u16 i;
2188
2189         ice_memset(&buf, 0, sizeof(buf), ICE_NONDMA_MEM);
2190
2191         status = ice_flow_get_hw_prof(hw, ICE_BLK_ACL, prof->id, &prof_id);
2192         if (status)
2193                 return status;
2194
2195         status = ice_flow_acl_is_prof_in_use(hw, prof, &buf);
2196         if (status && status != ICE_ERR_IN_USE)
2197                 return status;
2198
2199         if (!status) {
2200                 /* Program the profile dependent configuration. This is done
2201                  * only once regardless of the number of PFs using that profile
2202                  */
2203                 ice_memset(&buf, 0, sizeof(buf), ICE_NONDMA_MEM);
2204
2205                 for (i = 0; i < prof->segs_cnt; i++) {
2206                         struct ice_flow_seg_info *seg = &prof->segs[i];
2207                         u16 j;
2208
2209                         ice_for_each_set_bit(j, (ice_bitmap_t *)&seg->match,
2210                                              ICE_FLOW_FIELD_IDX_MAX) {
2211                                 info = &seg->fields[j];
2212
2213                                 if (info->type == ICE_FLOW_FLD_TYPE_RANGE)
2214                                         buf.word_selection[info->entry.val] =
2215                                                 info->xtrct.idx;
2216                                 else
2217                                         ice_flow_acl_set_xtrct_seq_fld(&buf,
2218                                                                        info);
2219                         }
2220
2221                         for (j = 0; j < seg->raws_cnt; j++) {
2222                                 info = &seg->raws[j].info;
2223                                 ice_flow_acl_set_xtrct_seq_fld(&buf, info);
2224                         }
2225                 }
2226
2227                 ice_memset(&buf.pf_scenario_num[0], ICE_ACL_INVALID_SCEN,
2228                            ICE_AQC_ACL_PROF_PF_SCEN_NUM_ELEMS,
2229                            ICE_NONDMA_MEM);
2230         }
2231
2232         /* Update the current PF */
2233         buf.pf_scenario_num[hw->pf_id] = (u8)prof->cfg.scen->id;
2234         status = ice_prgm_acl_prof_xtrct(hw, prof_id, &buf, NULL);
2235
2236         return status;
2237 }
2238
2239 /**
2240  * ice_flow_assoc_vsig_vsi - associate a VSI with VSIG
2241  * @hw: pointer to the hardware structure
2242  * @blk: classification stage
2243  * @vsi_handle: software VSI handle
2244  * @vsig: target VSI group
2245  *
2246  * Assumption: the caller has already verified that the VSI to
2247  * be added has the same characteristics as the VSIG and will
2248  * thereby have access to all resources added to that VSIG.
2249  */
2250 enum ice_status
2251 ice_flow_assoc_vsig_vsi(struct ice_hw *hw, enum ice_block blk, u16 vsi_handle,
2252                         u16 vsig)
2253 {
2254         enum ice_status status;
2255
2256         if (!ice_is_vsi_valid(hw, vsi_handle) || blk >= ICE_BLK_COUNT)
2257                 return ICE_ERR_PARAM;
2258
2259         ice_acquire_lock(&hw->fl_profs_locks[blk]);
2260         status = ice_add_vsi_flow(hw, blk, ice_get_hw_vsi_num(hw, vsi_handle),
2261                                   vsig);
2262         ice_release_lock(&hw->fl_profs_locks[blk]);
2263
2264         return status;
2265 }
2266
2267 /**
2268  * ice_flow_assoc_prof - associate a VSI with a flow profile
2269  * @hw: pointer to the hardware structure
2270  * @blk: classification stage
2271  * @prof: pointer to flow profile
2272  * @vsi_handle: software VSI handle
2273  *
2274  * Assumption: the caller has acquired the lock to the profile list
2275  * and the software VSI handle has been validated
2276  */
2277 enum ice_status
2278 ice_flow_assoc_prof(struct ice_hw *hw, enum ice_block blk,
2279                     struct ice_flow_prof *prof, u16 vsi_handle)
2280 {
2281         enum ice_status status = ICE_SUCCESS;
2282
2283         if (!ice_is_bit_set(prof->vsis, vsi_handle)) {
2284                 if (blk == ICE_BLK_ACL) {
2285                         status = ice_flow_acl_set_xtrct_seq(hw, prof);
2286                         if (status)
2287                                 return status;
2288                 }
2289                 status = ice_add_prof_id_flow(hw, blk,
2290                                               ice_get_hw_vsi_num(hw,
2291                                                                  vsi_handle),
2292                                               prof->id);
2293                 if (!status)
2294                         ice_set_bit(vsi_handle, prof->vsis);
2295                 else
2296                         ice_debug(hw, ICE_DBG_FLOW, "HW profile add failed, %d\n",
2297                                   status);
2298         }
2299
2300         return status;
2301 }
2302
2303 /**
2304  * ice_flow_disassoc_prof - disassociate a VSI from a flow profile
2305  * @hw: pointer to the hardware structure
2306  * @blk: classification stage
2307  * @prof: pointer to flow profile
2308  * @vsi_handle: software VSI handle
2309  *
2310  * Assumption: the caller has acquired the lock to the profile list
2311  * and the software VSI handle has been validated
2312  */
2313 static enum ice_status
2314 ice_flow_disassoc_prof(struct ice_hw *hw, enum ice_block blk,
2315                        struct ice_flow_prof *prof, u16 vsi_handle)
2316 {
2317         enum ice_status status = ICE_SUCCESS;
2318
2319         if (ice_is_bit_set(prof->vsis, vsi_handle)) {
2320                 status = ice_rem_prof_id_flow(hw, blk,
2321                                               ice_get_hw_vsi_num(hw,
2322                                                                  vsi_handle),
2323                                               prof->id);
2324                 if (!status)
2325                         ice_clear_bit(vsi_handle, prof->vsis);
2326                 else
2327                         ice_debug(hw, ICE_DBG_FLOW, "HW profile remove failed, %d\n",
2328                                   status);
2329         }
2330
2331         return status;
2332 }
2333
2334 /**
2335  * ice_flow_add_prof - Add a flow profile for packet segments and matched fields
2336  * @hw: pointer to the HW struct
2337  * @blk: classification stage
2338  * @dir: flow direction
2339  * @prof_id: unique ID to identify this flow profile
2340  * @segs: array of one or more packet segments that describe the flow
2341  * @segs_cnt: number of packet segments provided
2342  * @acts: array of default actions
2343  * @acts_cnt: number of default actions
2344  * @prof: stores the returned flow profile added
2345  */
2346 enum ice_status
2347 ice_flow_add_prof(struct ice_hw *hw, enum ice_block blk, enum ice_flow_dir dir,
2348                   u64 prof_id, struct ice_flow_seg_info *segs, u8 segs_cnt,
2349                   struct ice_flow_action *acts, u8 acts_cnt,
2350                   struct ice_flow_prof **prof)
2351 {
2352         enum ice_status status;
2353
2354         if (segs_cnt > ICE_FLOW_SEG_MAX)
2355                 return ICE_ERR_MAX_LIMIT;
2356
2357         if (!segs_cnt)
2358                 return ICE_ERR_PARAM;
2359
2360         if (!segs)
2361                 return ICE_ERR_BAD_PTR;
2362
2363         status = ice_flow_val_hdrs(segs, segs_cnt);
2364         if (status)
2365                 return status;
2366
2367         ice_acquire_lock(&hw->fl_profs_locks[blk]);
2368
2369         status = ice_flow_add_prof_sync(hw, blk, dir, prof_id, segs, segs_cnt,
2370                                         acts, acts_cnt, prof);
2371         if (!status)
2372                 LIST_ADD(&(*prof)->l_entry, &hw->fl_profs[blk]);
2373
2374         ice_release_lock(&hw->fl_profs_locks[blk]);
2375
2376         return status;
2377 }
2378
2379 /**
2380  * ice_flow_rem_prof - Remove a flow profile and all entries associated with it
2381  * @hw: pointer to the HW struct
2382  * @blk: the block for which the flow profile is to be removed
2383  * @prof_id: unique ID of the flow profile to be removed
2384  */
2385 enum ice_status
2386 ice_flow_rem_prof(struct ice_hw *hw, enum ice_block blk, u64 prof_id)
2387 {
2388         struct ice_flow_prof *prof;
2389         enum ice_status status;
2390
2391         ice_acquire_lock(&hw->fl_profs_locks[blk]);
2392
2393         prof = ice_flow_find_prof_id(hw, blk, prof_id);
2394         if (!prof) {
2395                 status = ICE_ERR_DOES_NOT_EXIST;
2396                 goto out;
2397         }
2398
2399         /* prof becomes invalid after the call */
2400         status = ice_flow_rem_prof_sync(hw, blk, prof);
2401
2402 out:
2403         ice_release_lock(&hw->fl_profs_locks[blk]);
2404
2405         return status;
2406 }
2407
2408 /**
2409  * ice_flow_find_entry - look for a flow entry using its unique ID
2410  * @hw: pointer to the HW struct
2411  * @blk: classification stage
2412  * @entry_id: unique ID to identify this flow entry
2413  *
2414  * This function looks for the flow entry with the specified unique ID in all
2415  * flow profiles of the specified classification stage. If the entry is found,
2416  * and it returns the handle to the flow entry. Otherwise, it returns
2417  * ICE_FLOW_ENTRY_ID_INVAL.
2418  */
2419 u64 ice_flow_find_entry(struct ice_hw *hw, enum ice_block blk, u64 entry_id)
2420 {
2421         struct ice_flow_entry *found = NULL;
2422         struct ice_flow_prof *p;
2423
2424         ice_acquire_lock(&hw->fl_profs_locks[blk]);
2425
2426         LIST_FOR_EACH_ENTRY(p, &hw->fl_profs[blk], ice_flow_prof, l_entry) {
2427                 struct ice_flow_entry *e;
2428
2429                 ice_acquire_lock(&p->entries_lock);
2430                 LIST_FOR_EACH_ENTRY(e, &p->entries, ice_flow_entry, l_entry)
2431                         if (e->id == entry_id) {
2432                                 found = e;
2433                                 break;
2434                         }
2435                 ice_release_lock(&p->entries_lock);
2436
2437                 if (found)
2438                         break;
2439         }
2440
2441         ice_release_lock(&hw->fl_profs_locks[blk]);
2442
2443         return found ? ICE_FLOW_ENTRY_HNDL(found) : ICE_FLOW_ENTRY_HANDLE_INVAL;
2444 }
2445
2446 /**
2447  * ice_flow_acl_check_actions - Checks the ACL rule's actions
2448  * @hw: pointer to the hardware structure
2449  * @acts: array of actions to be performed on a match
2450  * @acts_cnt: number of actions
2451  * @cnt_alloc: indicates if an ACL counter has been allocated.
2452  */
2453 static enum ice_status
2454 ice_flow_acl_check_actions(struct ice_hw *hw, struct ice_flow_action *acts,
2455                            u8 acts_cnt, bool *cnt_alloc)
2456 {
2457         ice_declare_bitmap(dup_check, ICE_AQC_TBL_MAX_ACTION_PAIRS * 2);
2458         int i;
2459
2460         ice_zero_bitmap(dup_check, ICE_AQC_TBL_MAX_ACTION_PAIRS * 2);
2461         *cnt_alloc = false;
2462
2463         if (acts_cnt > ICE_FLOW_ACL_MAX_NUM_ACT)
2464                 return ICE_ERR_OUT_OF_RANGE;
2465
2466         for (i = 0; i < acts_cnt; i++) {
2467                 if (acts[i].type != ICE_FLOW_ACT_NOP &&
2468                     acts[i].type != ICE_FLOW_ACT_DROP &&
2469                     acts[i].type != ICE_FLOW_ACT_CNTR_PKT &&
2470                     acts[i].type != ICE_FLOW_ACT_FWD_QUEUE)
2471                         return ICE_ERR_CFG;
2472
2473                 /* If the caller want to add two actions of the same type, then
2474                  * it is considered invalid configuration.
2475                  */
2476                 if (ice_test_and_set_bit(acts[i].type, dup_check))
2477                         return ICE_ERR_PARAM;
2478         }
2479
2480         /* Checks if ACL counters are needed. */
2481         for (i = 0; i < acts_cnt; i++) {
2482                 if (acts[i].type == ICE_FLOW_ACT_CNTR_PKT ||
2483                     acts[i].type == ICE_FLOW_ACT_CNTR_BYTES ||
2484                     acts[i].type == ICE_FLOW_ACT_CNTR_PKT_BYTES) {
2485                         struct ice_acl_cntrs cntrs = { 0 };
2486                         enum ice_status status;
2487
2488                         cntrs.amount = 1;
2489                         cntrs.bank = 0; /* Only bank0 for the moment */
2490
2491                         if (acts[i].type == ICE_FLOW_ACT_CNTR_PKT_BYTES)
2492                                 cntrs.type = ICE_AQC_ACL_CNT_TYPE_DUAL;
2493                         else
2494                                 cntrs.type = ICE_AQC_ACL_CNT_TYPE_SINGLE;
2495
2496                         status = ice_aq_alloc_acl_cntrs(hw, &cntrs, NULL);
2497                         if (status)
2498                                 return status;
2499                         /* Counter index within the bank */
2500                         acts[i].data.acl_act.value =
2501                                                 CPU_TO_LE16(cntrs.first_cntr);
2502                         *cnt_alloc = true;
2503                 }
2504         }
2505
2506         return ICE_SUCCESS;
2507 }
2508
2509 /**
2510  * ice_flow_acl_frmt_entry_range - Format an ACL range checker for a given field
2511  * @fld: number of the given field
2512  * @info: info about field
2513  * @range_buf: range checker configuration buffer
2514  * @data: pointer to a data buffer containing flow entry's match values/masks
2515  * @range: Input/output param indicating which range checkers are being used
2516  */
2517 static void
2518 ice_flow_acl_frmt_entry_range(u16 fld, struct ice_flow_fld_info *info,
2519                               struct ice_aqc_acl_profile_ranges *range_buf,
2520                               u8 *data, u8 *range)
2521 {
2522         u16 new_mask;
2523
2524         /* If not specified, default mask is all bits in field */
2525         new_mask = (info->src.mask == ICE_FLOW_FLD_OFF_INVAL ?
2526                     BIT(ice_flds_info[fld].size) - 1 :
2527                     (*(u16 *)(data + info->src.mask))) << info->xtrct.disp;
2528
2529         /* If the mask is 0, then we don't need to worry about this input
2530          * range checker value.
2531          */
2532         if (new_mask) {
2533                 u16 new_high =
2534                         (*(u16 *)(data + info->src.last)) << info->xtrct.disp;
2535                 u16 new_low =
2536                         (*(u16 *)(data + info->src.val)) << info->xtrct.disp;
2537                 u8 range_idx = info->entry.val;
2538
2539                 range_buf->checker_cfg[range_idx].low_boundary =
2540                         CPU_TO_BE16(new_low);
2541                 range_buf->checker_cfg[range_idx].high_boundary =
2542                         CPU_TO_BE16(new_high);
2543                 range_buf->checker_cfg[range_idx].mask = CPU_TO_BE16(new_mask);
2544
2545                 /* Indicate which range checker is being used */
2546                 *range |= BIT(range_idx);
2547         }
2548 }
2549
2550 /**
2551  * ice_flow_acl_frmt_entry_fld - Partially format ACL entry for a given field
2552  * @fld: number of the given field
2553  * @info: info about the field
2554  * @buf: buffer containing the entry
2555  * @dontcare: buffer containing don't care mask for entry
2556  * @data: pointer to a data buffer containing flow entry's match values/masks
2557  */
2558 static void
2559 ice_flow_acl_frmt_entry_fld(u16 fld, struct ice_flow_fld_info *info, u8 *buf,
2560                             u8 *dontcare, u8 *data)
2561 {
2562         u16 dst, src, mask, k, end_disp, tmp_s = 0, tmp_m = 0;
2563         bool use_mask = false;
2564         u8 disp;
2565
2566         src = info->src.val;
2567         mask = info->src.mask;
2568         dst = info->entry.val - ICE_AQC_ACL_PROF_BYTE_SEL_START_IDX;
2569         disp = info->xtrct.disp % BITS_PER_BYTE;
2570
2571         if (mask != ICE_FLOW_FLD_OFF_INVAL)
2572                 use_mask = true;
2573
2574         for (k = 0; k < info->entry.last; k++, dst++) {
2575                 /* Add overflow bits from previous byte */
2576                 buf[dst] = (tmp_s & 0xff00) >> 8;
2577
2578                 /* If mask is not valid, tmp_m is always zero, so just setting
2579                  * dontcare to 0 (no masked bits). If mask is valid, pulls in
2580                  * overflow bits of mask from prev byte
2581                  */
2582                 dontcare[dst] = (tmp_m & 0xff00) >> 8;
2583
2584                 /* If there is displacement, last byte will only contain
2585                  * displaced data, but there is no more data to read from user
2586                  * buffer, so skip so as not to potentially read beyond end of
2587                  * user buffer
2588                  */
2589                 if (!disp || k < info->entry.last - 1) {
2590                         /* Store shifted data to use in next byte */
2591                         tmp_s = data[src++] << disp;
2592
2593                         /* Add current (shifted) byte */
2594                         buf[dst] |= tmp_s & 0xff;
2595
2596                         /* Handle mask if valid */
2597                         if (use_mask) {
2598                                 tmp_m = (~data[mask++] & 0xff) << disp;
2599                                 dontcare[dst] |= tmp_m & 0xff;
2600                         }
2601                 }
2602         }
2603
2604         /* Fill in don't care bits at beginning of field */
2605         if (disp) {
2606                 dst = info->entry.val - ICE_AQC_ACL_PROF_BYTE_SEL_START_IDX;
2607                 for (k = 0; k < disp; k++)
2608                         dontcare[dst] |= BIT(k);
2609         }
2610
2611         end_disp = (disp + ice_flds_info[fld].size) % BITS_PER_BYTE;
2612
2613         /* Fill in don't care bits at end of field */
2614         if (end_disp) {
2615                 dst = info->entry.val - ICE_AQC_ACL_PROF_BYTE_SEL_START_IDX +
2616                       info->entry.last - 1;
2617                 for (k = end_disp; k < BITS_PER_BYTE; k++)
2618                         dontcare[dst] |= BIT(k);
2619         }
2620 }
2621
2622 /**
2623  * ice_flow_acl_frmt_entry - Format ACL entry
2624  * @hw: pointer to the hardware structure
2625  * @prof: pointer to flow profile
2626  * @e: pointer to the flow entry
2627  * @data: pointer to a data buffer containing flow entry's match values/masks
2628  * @acts: array of actions to be performed on a match
2629  * @acts_cnt: number of actions
2630  *
2631  * Formats the key (and key_inverse) to be matched from the data passed in,
2632  * along with data from the flow profile. This key/key_inverse pair makes up
2633  * the 'entry' for an ACL flow entry.
2634  */
2635 static enum ice_status
2636 ice_flow_acl_frmt_entry(struct ice_hw *hw, struct ice_flow_prof *prof,
2637                         struct ice_flow_entry *e, u8 *data,
2638                         struct ice_flow_action *acts, u8 acts_cnt)
2639 {
2640         u8 *buf = NULL, *dontcare = NULL, *key = NULL, range = 0, dir_flag_msk;
2641         struct ice_aqc_acl_profile_ranges *range_buf = NULL;
2642         enum ice_status status;
2643         bool cnt_alloc;
2644         u8 prof_id = 0;
2645         u16 i, buf_sz;
2646
2647         status = ice_flow_get_hw_prof(hw, ICE_BLK_ACL, prof->id, &prof_id);
2648         if (status)
2649                 return status;
2650
2651         /* Format the result action */
2652
2653         status = ice_flow_acl_check_actions(hw, acts, acts_cnt, &cnt_alloc);
2654         if (status)
2655                 return status;
2656
2657         status = ICE_ERR_NO_MEMORY;
2658
2659         e->acts = (struct ice_flow_action *)
2660                 ice_memdup(hw, acts, acts_cnt * sizeof(*acts),
2661                            ICE_NONDMA_TO_NONDMA);
2662         if (!e->acts)
2663                 goto out;
2664
2665         e->acts_cnt = acts_cnt;
2666
2667         /* Format the matching data */
2668         buf_sz = prof->cfg.scen->width;
2669         buf = (u8 *)ice_malloc(hw, buf_sz);
2670         if (!buf)
2671                 goto out;
2672
2673         dontcare = (u8 *)ice_malloc(hw, buf_sz);
2674         if (!dontcare)
2675                 goto out;
2676
2677         /* 'key' buffer will store both key and key_inverse, so must be twice
2678          * size of buf
2679          */
2680         key = (u8 *)ice_malloc(hw, buf_sz * 2);
2681         if (!key)
2682                 goto out;
2683
2684         range_buf = (struct ice_aqc_acl_profile_ranges *)
2685                 ice_malloc(hw, sizeof(struct ice_aqc_acl_profile_ranges));
2686         if (!range_buf)
2687                 goto out;
2688
2689         /* Set don't care mask to all 1's to start, will zero out used bytes */
2690         ice_memset(dontcare, 0xff, buf_sz, ICE_NONDMA_MEM);
2691
2692         for (i = 0; i < prof->segs_cnt; i++) {
2693                 struct ice_flow_seg_info *seg = &prof->segs[i];
2694                 u8 j;
2695
2696                 ice_for_each_set_bit(j, (ice_bitmap_t *)&seg->match,
2697                                      ICE_FLOW_FIELD_IDX_MAX) {
2698                         struct ice_flow_fld_info *info = &seg->fields[j];
2699
2700                         if (info->type == ICE_FLOW_FLD_TYPE_RANGE)
2701                                 ice_flow_acl_frmt_entry_range(j, info,
2702                                                               range_buf, data,
2703                                                               &range);
2704                         else
2705                                 ice_flow_acl_frmt_entry_fld(j, info, buf,
2706                                                             dontcare, data);
2707                 }
2708
2709                 for (j = 0; j < seg->raws_cnt; j++) {
2710                         struct ice_flow_fld_info *info = &seg->raws[j].info;
2711                         u16 dst, src, mask, k;
2712                         bool use_mask = false;
2713
2714                         src = info->src.val;
2715                         dst = info->entry.val -
2716                                         ICE_AQC_ACL_PROF_BYTE_SEL_START_IDX;
2717                         mask = info->src.mask;
2718
2719                         if (mask != ICE_FLOW_FLD_OFF_INVAL)
2720                                 use_mask = true;
2721
2722                         for (k = 0; k < info->entry.last; k++, dst++) {
2723                                 buf[dst] = data[src++];
2724                                 if (use_mask)
2725                                         dontcare[dst] = ~data[mask++];
2726                                 else
2727                                         dontcare[dst] = 0;
2728                         }
2729                 }
2730         }
2731
2732         buf[prof->cfg.scen->pid_idx] = (u8)prof_id;
2733         dontcare[prof->cfg.scen->pid_idx] = 0;
2734
2735         /* Format the buffer for direction flags */
2736         dir_flag_msk = BIT(ICE_FLG_PKT_DIR);
2737
2738         if (prof->dir == ICE_FLOW_RX)
2739                 buf[prof->cfg.scen->pkt_dir_idx] = dir_flag_msk;
2740
2741         if (range) {
2742                 buf[prof->cfg.scen->rng_chk_idx] = range;
2743                 /* Mark any unused range checkers as don't care */
2744                 dontcare[prof->cfg.scen->rng_chk_idx] = ~range;
2745                 e->range_buf = range_buf;
2746         } else {
2747                 ice_free(hw, range_buf);
2748         }
2749
2750         status = ice_set_key(key, buf_sz * 2, buf, NULL, dontcare, NULL, 0,
2751                              buf_sz);
2752         if (status)
2753                 goto out;
2754
2755         e->entry = key;
2756         e->entry_sz = buf_sz * 2;
2757
2758 out:
2759         if (buf)
2760                 ice_free(hw, buf);
2761
2762         if (dontcare)
2763                 ice_free(hw, dontcare);
2764
2765         if (status && key)
2766                 ice_free(hw, key);
2767
2768         if (status && range_buf) {
2769                 ice_free(hw, range_buf);
2770                 e->range_buf = NULL;
2771         }
2772
2773         if (status && e->acts) {
2774                 ice_free(hw, e->acts);
2775                 e->acts = NULL;
2776                 e->acts_cnt = 0;
2777         }
2778
2779         if (status && cnt_alloc)
2780                 ice_flow_acl_free_act_cntr(hw, acts, acts_cnt);
2781
2782         return status;
2783 }
2784
2785 /**
2786  * ice_flow_acl_find_scen_entry_cond - Find an ACL scenario entry that matches
2787  *                                     the compared data.
2788  * @prof: pointer to flow profile
2789  * @e: pointer to the comparing flow entry
2790  * @do_chg_action: decide if we want to change the ACL action
2791  * @do_add_entry: decide if we want to add the new ACL entry
2792  * @do_rem_entry: decide if we want to remove the current ACL entry
2793  *
2794  * Find an ACL scenario entry that matches the compared data. In the same time,
2795  * this function also figure out:
2796  * a/ If we want to change the ACL action
2797  * b/ If we want to add the new ACL entry
2798  * c/ If we want to remove the current ACL entry
2799  */
2800 static struct ice_flow_entry *
2801 ice_flow_acl_find_scen_entry_cond(struct ice_flow_prof *prof,
2802                                   struct ice_flow_entry *e, bool *do_chg_action,
2803                                   bool *do_add_entry, bool *do_rem_entry)
2804 {
2805         struct ice_flow_entry *p, *return_entry = NULL;
2806         u8 i, j;
2807
2808         /* Check if:
2809          * a/ There exists an entry with same matching data, but different
2810          *    priority, then we remove this existing ACL entry. Then, we
2811          *    will add the new entry to the ACL scenario.
2812          * b/ There exists an entry with same matching data, priority, and
2813          *    result action, then we do nothing
2814          * c/ There exists an entry with same matching data, priority, but
2815          *    different, action, then do only change the action's entry.
2816          * d/ Else, we add this new entry to the ACL scenario.
2817          */
2818         *do_chg_action = false;
2819         *do_add_entry = true;
2820         *do_rem_entry = false;
2821         LIST_FOR_EACH_ENTRY(p, &prof->entries, ice_flow_entry, l_entry) {
2822                 if (memcmp(p->entry, e->entry, p->entry_sz))
2823                         continue;
2824
2825                 /* From this point, we have the same matching_data. */
2826                 *do_add_entry = false;
2827                 return_entry = p;
2828
2829                 if (p->priority != e->priority) {
2830                         /* matching data && !priority */
2831                         *do_add_entry = true;
2832                         *do_rem_entry = true;
2833                         break;
2834                 }
2835
2836                 /* From this point, we will have matching_data && priority */
2837                 if (p->acts_cnt != e->acts_cnt)
2838                         *do_chg_action = true;
2839                 for (i = 0; i < p->acts_cnt; i++) {
2840                         bool found_not_match = false;
2841
2842                         for (j = 0; j < e->acts_cnt; j++)
2843                                 if (memcmp(&p->acts[i], &e->acts[j],
2844                                            sizeof(struct ice_flow_action))) {
2845                                         found_not_match = true;
2846                                         break;
2847                                 }
2848
2849                         if (found_not_match) {
2850                                 *do_chg_action = true;
2851                                 break;
2852                         }
2853                 }
2854
2855                 /* (do_chg_action = true) means :
2856                  *    matching_data && priority && !result_action
2857                  * (do_chg_action = false) means :
2858                  *    matching_data && priority && result_action
2859                  */
2860                 break;
2861         }
2862
2863         return return_entry;
2864 }
2865
2866 /**
2867  * ice_flow_acl_convert_to_acl_prio - Convert to ACL priority
2868  * @p: flow priority
2869  */
2870 static enum ice_acl_entry_prio
2871 ice_flow_acl_convert_to_acl_prio(enum ice_flow_priority p)
2872 {
2873         enum ice_acl_entry_prio acl_prio;
2874
2875         switch (p) {
2876         case ICE_FLOW_PRIO_LOW:
2877                 acl_prio = ICE_ACL_PRIO_LOW;
2878                 break;
2879         case ICE_FLOW_PRIO_NORMAL:
2880                 acl_prio = ICE_ACL_PRIO_NORMAL;
2881                 break;
2882         case ICE_FLOW_PRIO_HIGH:
2883                 acl_prio = ICE_ACL_PRIO_HIGH;
2884                 break;
2885         default:
2886                 acl_prio = ICE_ACL_PRIO_NORMAL;
2887                 break;
2888         }
2889
2890         return acl_prio;
2891 }
2892
2893 /**
2894  * ice_flow_acl_union_rng_chk - Perform union operation between two
2895  *                              range-range checker buffers
2896  * @dst_buf: pointer to destination range checker buffer
2897  * @src_buf: pointer to source range checker buffer
2898  *
2899  * For this function, we do the union between dst_buf and src_buf
2900  * range checker buffer, and we will save the result back to dst_buf
2901  */
2902 static enum ice_status
2903 ice_flow_acl_union_rng_chk(struct ice_aqc_acl_profile_ranges *dst_buf,
2904                            struct ice_aqc_acl_profile_ranges *src_buf)
2905 {
2906         u8 i, j;
2907
2908         if (!dst_buf || !src_buf)
2909                 return ICE_ERR_BAD_PTR;
2910
2911         for (i = 0; i < ICE_AQC_ACL_PROF_RANGES_NUM_CFG; i++) {
2912                 struct ice_acl_rng_data *cfg_data = NULL, *in_data;
2913                 bool will_populate = false;
2914
2915                 in_data = &src_buf->checker_cfg[i];
2916
2917                 if (!in_data->mask)
2918                         break;
2919
2920                 for (j = 0; j < ICE_AQC_ACL_PROF_RANGES_NUM_CFG; j++) {
2921                         cfg_data = &dst_buf->checker_cfg[j];
2922
2923                         if (!cfg_data->mask ||
2924                             !memcmp(cfg_data, in_data,
2925                                     sizeof(struct ice_acl_rng_data))) {
2926                                 will_populate = true;
2927                                 break;
2928                         }
2929                 }
2930
2931                 if (will_populate) {
2932                         ice_memcpy(cfg_data, in_data,
2933                                    sizeof(struct ice_acl_rng_data),
2934                                    ICE_NONDMA_TO_NONDMA);
2935                 } else {
2936                         /* No available slot left to program range checker */
2937                         return ICE_ERR_MAX_LIMIT;
2938                 }
2939         }
2940
2941         return ICE_SUCCESS;
2942 }
2943
2944 /**
2945  * ice_flow_acl_add_scen_entry_sync - Add entry to ACL scenario sync
2946  * @hw: pointer to the hardware structure
2947  * @prof: pointer to flow profile
2948  * @entry: double pointer to the flow entry
2949  *
2950  * For this function, we will look at the current added entries in the
2951  * corresponding ACL scenario. Then, we will perform matching logic to
2952  * see if we want to add/modify/do nothing with this new entry.
2953  */
2954 static enum ice_status
2955 ice_flow_acl_add_scen_entry_sync(struct ice_hw *hw, struct ice_flow_prof *prof,
2956                                  struct ice_flow_entry **entry)
2957 {
2958         bool do_add_entry, do_rem_entry, do_chg_action, do_chg_rng_chk;
2959         struct ice_aqc_acl_profile_ranges query_rng_buf, cfg_rng_buf;
2960         struct ice_acl_act_entry *acts = NULL;
2961         struct ice_flow_entry *exist;
2962         enum ice_status status = ICE_SUCCESS;
2963         struct ice_flow_entry *e;
2964         u8 i;
2965
2966         if (!entry || !(*entry) || !prof)
2967                 return ICE_ERR_BAD_PTR;
2968
2969         e = *entry;
2970
2971         do_chg_rng_chk = false;
2972         if (e->range_buf) {
2973                 u8 prof_id = 0;
2974
2975                 status = ice_flow_get_hw_prof(hw, ICE_BLK_ACL, prof->id,
2976                                               &prof_id);
2977                 if (status)
2978                         return status;
2979
2980                 /* Query the current range-checker value in FW */
2981                 status = ice_query_acl_prof_ranges(hw, prof_id, &query_rng_buf,
2982                                                    NULL);
2983                 if (status)
2984                         return status;
2985                 ice_memcpy(&cfg_rng_buf, &query_rng_buf,
2986                            sizeof(struct ice_aqc_acl_profile_ranges),
2987                            ICE_NONDMA_TO_NONDMA);
2988
2989                 /* Generate the new range-checker value */
2990                 status = ice_flow_acl_union_rng_chk(&cfg_rng_buf, e->range_buf);
2991                 if (status)
2992                         return status;
2993
2994                 /* Reconfigure the range check if the buffer is changed. */
2995                 do_chg_rng_chk = false;
2996                 if (memcmp(&query_rng_buf, &cfg_rng_buf,
2997                            sizeof(struct ice_aqc_acl_profile_ranges))) {
2998                         status = ice_prog_acl_prof_ranges(hw, prof_id,
2999                                                           &cfg_rng_buf, NULL);
3000                         if (status)
3001                                 return status;
3002
3003                         do_chg_rng_chk = true;
3004                 }
3005         }
3006
3007         /* Figure out if we want to (change the ACL action) and/or
3008          * (Add the new ACL entry) and/or (Remove the current ACL entry)
3009          */
3010         exist = ice_flow_acl_find_scen_entry_cond(prof, e, &do_chg_action,
3011                                                   &do_add_entry, &do_rem_entry);
3012         if (do_rem_entry) {
3013                 status = ice_flow_rem_entry_sync(hw, ICE_BLK_ACL, exist);
3014                 if (status)
3015                         return status;
3016         }
3017
3018         /* Prepare the result action buffer */
3019         acts = (struct ice_acl_act_entry *)
3020                 ice_calloc(hw, e->entry_sz, sizeof(struct ice_acl_act_entry));
3021         if (!acts)
3022                 return ICE_ERR_NO_MEMORY;
3023
3024         for (i = 0; i < e->acts_cnt; i++)
3025                 ice_memcpy(&acts[i], &e->acts[i].data.acl_act,
3026                            sizeof(struct ice_acl_act_entry),
3027                            ICE_NONDMA_TO_NONDMA);
3028
3029         if (do_add_entry) {
3030                 enum ice_acl_entry_prio prio;
3031                 u8 *keys, *inverts;
3032                 u16 entry_idx;
3033
3034                 keys = (u8 *)e->entry;
3035                 inverts = keys + (e->entry_sz / 2);
3036                 prio = ice_flow_acl_convert_to_acl_prio(e->priority);
3037
3038                 status = ice_acl_add_entry(hw, prof->cfg.scen, prio, keys,
3039                                            inverts, acts, e->acts_cnt,
3040                                            &entry_idx);
3041                 if (status)
3042                         goto out;
3043
3044                 e->scen_entry_idx = entry_idx;
3045                 LIST_ADD(&e->l_entry, &prof->entries);
3046         } else {
3047                 if (do_chg_action) {
3048                         /* For the action memory info, update the SW's copy of
3049                          * exist entry with e's action memory info
3050                          */
3051                         ice_free(hw, exist->acts);
3052                         exist->acts_cnt = e->acts_cnt;
3053                         exist->acts = (struct ice_flow_action *)
3054                                 ice_calloc(hw, exist->acts_cnt,
3055                                            sizeof(struct ice_flow_action));
3056                         if (!exist->acts) {
3057                                 status = ICE_ERR_NO_MEMORY;
3058                                 goto out;
3059                         }
3060
3061                         ice_memcpy(exist->acts, e->acts,
3062                                    sizeof(struct ice_flow_action) * e->acts_cnt,
3063                                    ICE_NONDMA_TO_NONDMA);
3064
3065                         status = ice_acl_prog_act(hw, prof->cfg.scen, acts,
3066                                                   e->acts_cnt,
3067                                                   exist->scen_entry_idx);
3068                         if (status)
3069                                 goto out;
3070                 }
3071
3072                 if (do_chg_rng_chk) {
3073                         /* In this case, we want to update the range checker
3074                          * information of the exist entry
3075                          */
3076                         status = ice_flow_acl_union_rng_chk(exist->range_buf,
3077                                                             e->range_buf);
3078                         if (status)
3079                                 goto out;
3080                 }
3081
3082                 /* As we don't add the new entry to our SW DB, deallocate its
3083                  * memories, and return the exist entry to the caller
3084                  */
3085                 ice_dealloc_flow_entry(hw, e);
3086                 *(entry) = exist;
3087         }
3088 out:
3089         ice_free(hw, acts);
3090
3091         return status;
3092 }
3093
3094 /**
3095  * ice_flow_acl_add_scen_entry - Add entry to ACL scenario
3096  * @hw: pointer to the hardware structure
3097  * @prof: pointer to flow profile
3098  * @e: double pointer to the flow entry
3099  */
3100 static enum ice_status
3101 ice_flow_acl_add_scen_entry(struct ice_hw *hw, struct ice_flow_prof *prof,
3102                             struct ice_flow_entry **e)
3103 {
3104         enum ice_status status;
3105
3106         ice_acquire_lock(&prof->entries_lock);
3107         status = ice_flow_acl_add_scen_entry_sync(hw, prof, e);
3108         ice_release_lock(&prof->entries_lock);
3109
3110         return status;
3111 }
3112
3113 /**
3114  * ice_flow_add_entry - Add a flow entry
3115  * @hw: pointer to the HW struct
3116  * @blk: classification stage
3117  * @prof_id: ID of the profile to add a new flow entry to
3118  * @entry_id: unique ID to identify this flow entry
3119  * @vsi_handle: software VSI handle for the flow entry
3120  * @prio: priority of the flow entry
3121  * @data: pointer to a data buffer containing flow entry's match values/masks
3122  * @acts: arrays of actions to be performed on a match
3123  * @acts_cnt: number of actions
3124  * @entry_h: pointer to buffer that receives the new flow entry's handle
3125  */
3126 enum ice_status
3127 ice_flow_add_entry(struct ice_hw *hw, enum ice_block blk, u64 prof_id,
3128                    u64 entry_id, u16 vsi_handle, enum ice_flow_priority prio,
3129                    void *data, struct ice_flow_action *acts, u8 acts_cnt,
3130                    u64 *entry_h)
3131 {
3132         struct ice_flow_entry *e = NULL;
3133         struct ice_flow_prof *prof;
3134         enum ice_status status = ICE_SUCCESS;
3135
3136         /* ACL entries must indicate an action */
3137         if (blk == ICE_BLK_ACL && (!acts || !acts_cnt))
3138                 return ICE_ERR_PARAM;
3139
3140         /* No flow entry data is expected for RSS */
3141         if (!entry_h || (!data && blk != ICE_BLK_RSS))
3142                 return ICE_ERR_BAD_PTR;
3143
3144         if (!ice_is_vsi_valid(hw, vsi_handle))
3145                 return ICE_ERR_PARAM;
3146
3147         ice_acquire_lock(&hw->fl_profs_locks[blk]);
3148
3149         prof = ice_flow_find_prof_id(hw, blk, prof_id);
3150         if (!prof) {
3151                 status = ICE_ERR_DOES_NOT_EXIST;
3152         } else {
3153                 /* Allocate memory for the entry being added and associate
3154                  * the VSI to the found flow profile
3155                  */
3156                 e = (struct ice_flow_entry *)ice_malloc(hw, sizeof(*e));
3157                 if (!e)
3158                         status = ICE_ERR_NO_MEMORY;
3159                 else
3160                         status = ice_flow_assoc_prof(hw, blk, prof, vsi_handle);
3161         }
3162
3163         ice_release_lock(&hw->fl_profs_locks[blk]);
3164         if (status)
3165                 goto out;
3166
3167         e->id = entry_id;
3168         e->vsi_handle = vsi_handle;
3169         e->prof = prof;
3170         e->priority = prio;
3171
3172         switch (blk) {
3173         case ICE_BLK_FD:
3174         case ICE_BLK_RSS:
3175                 break;
3176         case ICE_BLK_ACL:
3177                 /* ACL will handle the entry management */
3178                 status = ice_flow_acl_frmt_entry(hw, prof, e, (u8 *)data, acts,
3179                                                  acts_cnt);
3180                 if (status)
3181                         goto out;
3182
3183                 status = ice_flow_acl_add_scen_entry(hw, prof, &e);
3184                 if (status)
3185                         goto out;
3186
3187                 break;
3188         default:
3189                 status = ICE_ERR_NOT_IMPL;
3190                 goto out;
3191         }
3192
3193         if (blk != ICE_BLK_ACL) {
3194                 /* ACL will handle the entry management */
3195                 ice_acquire_lock(&prof->entries_lock);
3196                 LIST_ADD(&e->l_entry, &prof->entries);
3197                 ice_release_lock(&prof->entries_lock);
3198         }
3199
3200         *entry_h = ICE_FLOW_ENTRY_HNDL(e);
3201
3202 out:
3203         if (status && e) {
3204                 if (e->entry)
3205                         ice_free(hw, e->entry);
3206                 ice_free(hw, e);
3207         }
3208
3209         return status;
3210 }
3211
3212 /**
3213  * ice_flow_rem_entry - Remove a flow entry
3214  * @hw: pointer to the HW struct
3215  * @blk: classification stage
3216  * @entry_h: handle to the flow entry to be removed
3217  */
3218 enum ice_status ice_flow_rem_entry(struct ice_hw *hw, enum ice_block blk,
3219                                    u64 entry_h)
3220 {
3221         struct ice_flow_entry *entry;
3222         struct ice_flow_prof *prof;
3223         enum ice_status status = ICE_SUCCESS;
3224
3225         if (entry_h == ICE_FLOW_ENTRY_HANDLE_INVAL)
3226                 return ICE_ERR_PARAM;
3227
3228         entry = ICE_FLOW_ENTRY_PTR((intptr_t)entry_h);
3229
3230         /* Retain the pointer to the flow profile as the entry will be freed */
3231         prof = entry->prof;
3232
3233         if (prof) {
3234                 ice_acquire_lock(&prof->entries_lock);
3235                 status = ice_flow_rem_entry_sync(hw, blk, entry);
3236                 ice_release_lock(&prof->entries_lock);
3237         }
3238
3239         return status;
3240 }
3241
3242 /**
3243  * ice_flow_set_fld_ext - specifies locations of field from entry's input buffer
3244  * @seg: packet segment the field being set belongs to
3245  * @fld: field to be set
3246  * @field_type: type of the field
3247  * @val_loc: if not ICE_FLOW_FLD_OFF_INVAL, location of the value to match from
3248  *           entry's input buffer
3249  * @mask_loc: if not ICE_FLOW_FLD_OFF_INVAL, location of mask value from entry's
3250  *            input buffer
3251  * @last_loc: if not ICE_FLOW_FLD_OFF_INVAL, location of last/upper value from
3252  *            entry's input buffer
3253  *
3254  * This helper function stores information of a field being matched, including
3255  * the type of the field and the locations of the value to match, the mask, and
3256  * the upper-bound value in the start of the input buffer for a flow entry.
3257  * This function should only be used for fixed-size data structures.
3258  *
3259  * This function also opportunistically determines the protocol headers to be
3260  * present based on the fields being set. Some fields cannot be used alone to
3261  * determine the protocol headers present. Sometimes, fields for particular
3262  * protocol headers are not matched. In those cases, the protocol headers
3263  * must be explicitly set.
3264  */
3265 static void
3266 ice_flow_set_fld_ext(struct ice_flow_seg_info *seg, enum ice_flow_field fld,
3267                      enum ice_flow_fld_match_type field_type, u16 val_loc,
3268                      u16 mask_loc, u16 last_loc)
3269 {
3270         u64 bit = BIT_ULL(fld);
3271
3272         seg->match |= bit;
3273         if (field_type == ICE_FLOW_FLD_TYPE_RANGE)
3274                 seg->range |= bit;
3275
3276         seg->fields[fld].type = field_type;
3277         seg->fields[fld].src.val = val_loc;
3278         seg->fields[fld].src.mask = mask_loc;
3279         seg->fields[fld].src.last = last_loc;
3280
3281         ICE_FLOW_SET_HDRS(seg, ice_flds_info[fld].hdr);
3282 }
3283
3284 /**
3285  * ice_flow_set_fld - specifies locations of field from entry's input buffer
3286  * @seg: packet segment the field being set belongs to
3287  * @fld: field to be set
3288  * @val_loc: if not ICE_FLOW_FLD_OFF_INVAL, location of the value to match from
3289  *           entry's input buffer
3290  * @mask_loc: if not ICE_FLOW_FLD_OFF_INVAL, location of mask value from entry's
3291  *            input buffer
3292  * @last_loc: if not ICE_FLOW_FLD_OFF_INVAL, location of last/upper value from
3293  *            entry's input buffer
3294  * @range: indicate if field being matched is to be in a range
3295  *
3296  * This function specifies the locations, in the form of byte offsets from the
3297  * start of the input buffer for a flow entry, from where the value to match,
3298  * the mask value, and upper value can be extracted. These locations are then
3299  * stored in the flow profile. When adding a flow entry associated with the
3300  * flow profile, these locations will be used to quickly extract the values and
3301  * create the content of a match entry. This function should only be used for
3302  * fixed-size data structures.
3303  */
3304 void
3305 ice_flow_set_fld(struct ice_flow_seg_info *seg, enum ice_flow_field fld,
3306                  u16 val_loc, u16 mask_loc, u16 last_loc, bool range)
3307 {
3308         enum ice_flow_fld_match_type t = range ?
3309                 ICE_FLOW_FLD_TYPE_RANGE : ICE_FLOW_FLD_TYPE_REG;
3310
3311         ice_flow_set_fld_ext(seg, fld, t, val_loc, mask_loc, last_loc);
3312 }
3313
3314 /**
3315  * ice_flow_set_fld_prefix - sets locations of prefix field from entry's buf
3316  * @seg: packet segment the field being set belongs to
3317  * @fld: field to be set
3318  * @val_loc: if not ICE_FLOW_FLD_OFF_INVAL, location of the value to match from
3319  *           entry's input buffer
3320  * @pref_loc: location of prefix value from entry's input buffer
3321  * @pref_sz: size of the location holding the prefix value
3322  *
3323  * This function specifies the locations, in the form of byte offsets from the
3324  * start of the input buffer for a flow entry, from where the value to match
3325  * and the IPv4 prefix value can be extracted. These locations are then stored
3326  * in the flow profile. When adding flow entries to the associated flow profile,
3327  * these locations can be used to quickly extract the values to create the
3328  * content of a match entry. This function should only be used for fixed-size
3329  * data structures.
3330  */
3331 void
3332 ice_flow_set_fld_prefix(struct ice_flow_seg_info *seg, enum ice_flow_field fld,
3333                         u16 val_loc, u16 pref_loc, u8 pref_sz)
3334 {
3335         /* For this type of field, the "mask" location is for the prefix value's
3336          * location and the "last" location is for the size of the location of
3337          * the prefix value.
3338          */
3339         ice_flow_set_fld_ext(seg, fld, ICE_FLOW_FLD_TYPE_PREFIX, val_loc,
3340                              pref_loc, (u16)pref_sz);
3341 }
3342
3343 /**
3344  * ice_flow_add_fld_raw - sets locations of a raw field from entry's input buf
3345  * @seg: packet segment the field being set belongs to
3346  * @off: offset of the raw field from the beginning of the segment in bytes
3347  * @len: length of the raw pattern to be matched
3348  * @val_loc: location of the value to match from entry's input buffer
3349  * @mask_loc: location of mask value from entry's input buffer
3350  *
3351  * This function specifies the offset of the raw field to be match from the
3352  * beginning of the specified packet segment, and the locations, in the form of
3353  * byte offsets from the start of the input buffer for a flow entry, from where
3354  * the value to match and the mask value to be extracted. These locations are
3355  * then stored in the flow profile. When adding flow entries to the associated
3356  * flow profile, these locations can be used to quickly extract the values to
3357  * create the content of a match entry. This function should only be used for
3358  * fixed-size data structures.
3359  */
3360 void
3361 ice_flow_add_fld_raw(struct ice_flow_seg_info *seg, u16 off, u8 len,
3362                      u16 val_loc, u16 mask_loc)
3363 {
3364         if (seg->raws_cnt < ICE_FLOW_SEG_RAW_FLD_MAX) {
3365                 seg->raws[seg->raws_cnt].off = off;
3366                 seg->raws[seg->raws_cnt].info.type = ICE_FLOW_FLD_TYPE_SIZE;
3367                 seg->raws[seg->raws_cnt].info.src.val = val_loc;
3368                 seg->raws[seg->raws_cnt].info.src.mask = mask_loc;
3369                 /* The "last" field is used to store the length of the field */
3370                 seg->raws[seg->raws_cnt].info.src.last = len;
3371         }
3372
3373         /* Overflows of "raws" will be handled as an error condition later in
3374          * the flow when this information is processed.
3375          */
3376         seg->raws_cnt++;
3377 }
3378
3379 /**
3380  * ice_flow_rem_vsi_prof - remove vsi from flow profile
3381  * @hw: pointer to the hardware structure
3382  * @blk: classification stage
3383  * @vsi_handle: software VSI handle
3384  * @prof_id: unique ID to identify this flow profile
3385  *
3386  * This function removes the flow entries associated to the input
3387  * vsi handle and disassociates the vsi from the flow profile.
3388  */
3389 enum ice_status ice_flow_rem_vsi_prof(struct ice_hw *hw, enum ice_block blk, u16 vsi_handle,
3390                                       u64 prof_id)
3391 {
3392         struct ice_flow_prof *prof = NULL;
3393         enum ice_status status = ICE_SUCCESS;
3394
3395         if (blk >= ICE_BLK_COUNT || !ice_is_vsi_valid(hw, vsi_handle))
3396                 return ICE_ERR_PARAM;
3397
3398         /* find flow profile pointer with input package block and profile id */
3399         prof = ice_flow_find_prof_id(hw, ICE_BLK_FD, prof_id);
3400         if (!prof) {
3401                 ice_debug(hw, ICE_DBG_PKG,
3402                           "Cannot find flow profile id=%" PRIu64 "\n", prof_id);
3403                 return ICE_ERR_DOES_NOT_EXIST;
3404         }
3405
3406         /* Remove all remaining flow entries before removing the flow profile */
3407         if (!LIST_EMPTY(&prof->entries)) {
3408                 struct ice_flow_entry *e, *t;
3409
3410                 ice_acquire_lock(&prof->entries_lock);
3411                 LIST_FOR_EACH_ENTRY_SAFE(e, t, &prof->entries, ice_flow_entry,
3412                                          l_entry) {
3413                         if (e->vsi_handle != vsi_handle)
3414                                 continue;
3415
3416                         status = ice_flow_rem_entry_sync(hw, blk, e);
3417                         if (status)
3418                                 break;
3419                 }
3420                 ice_release_lock(&prof->entries_lock);
3421         }
3422         if (status)
3423                 return status;
3424
3425         /* disassociate the flow profile from sw vsi handle */
3426         status = ice_flow_disassoc_prof(hw, blk, prof, vsi_handle);
3427         if (status)
3428                 ice_debug(hw, ICE_DBG_PKG,
3429                           "ice_flow_disassoc_prof() failed with status=%d\n",
3430                           status);
3431         return status;
3432 }
3433
3434 #define ICE_FLOW_RSS_SEG_HDR_L2_MASKS \
3435 (ICE_FLOW_SEG_HDR_ETH | ICE_FLOW_SEG_HDR_ETH_NON_IP | ICE_FLOW_SEG_HDR_VLAN)
3436
3437 #define ICE_FLOW_RSS_SEG_HDR_L3_MASKS \
3438         (ICE_FLOW_SEG_HDR_IPV4 | ICE_FLOW_SEG_HDR_IPV6)
3439
3440 #define ICE_FLOW_RSS_SEG_HDR_L4_MASKS \
3441         (ICE_FLOW_SEG_HDR_TCP | ICE_FLOW_SEG_HDR_UDP | ICE_FLOW_SEG_HDR_SCTP)
3442
3443 #define ICE_FLOW_RSS_SEG_HDR_VAL_MASKS \
3444         (ICE_FLOW_RSS_SEG_HDR_L2_MASKS | \
3445          ICE_FLOW_RSS_SEG_HDR_L3_MASKS | \
3446          ICE_FLOW_RSS_SEG_HDR_L4_MASKS)
3447
3448 /**
3449  * ice_flow_set_rss_seg_info - setup packet segments for RSS
3450  * @segs: pointer to the flow field segment(s)
3451  * @seg_cnt: segment count
3452  * @cfg: configure parameters
3453  *
3454  * Helper function to extract fields from hash bitmap and use flow
3455  * header value to set flow field segment for further use in flow
3456  * profile entry or removal.
3457  */
3458 static enum ice_status
3459 ice_flow_set_rss_seg_info(struct ice_flow_seg_info *segs, u8 seg_cnt,
3460                           const struct ice_rss_hash_cfg *cfg)
3461 {
3462         struct ice_flow_seg_info *seg;
3463         u64 val;
3464         u8 i;
3465
3466         /* set inner most segment */
3467         seg = &segs[seg_cnt - 1];
3468
3469         ice_for_each_set_bit(i, (const ice_bitmap_t *)&cfg->hash_flds,
3470                              ICE_FLOW_FIELD_IDX_MAX)
3471                 ice_flow_set_fld(seg, (enum ice_flow_field)i,
3472                                  ICE_FLOW_FLD_OFF_INVAL, ICE_FLOW_FLD_OFF_INVAL,
3473                                  ICE_FLOW_FLD_OFF_INVAL, false);
3474
3475         ICE_FLOW_SET_HDRS(seg, cfg->addl_hdrs);
3476
3477         /* set outer most header */
3478         if (cfg->hdr_type == ICE_RSS_INNER_HEADERS_W_OUTER_IPV4)
3479                 segs[ICE_RSS_OUTER_HEADERS].hdrs |= ICE_FLOW_SEG_HDR_IPV4 |
3480                                                    ICE_FLOW_SEG_HDR_IPV_FRAG |
3481                                                    ICE_FLOW_SEG_HDR_IPV_OTHER;
3482         else if (cfg->hdr_type == ICE_RSS_INNER_HEADERS_W_OUTER_IPV6)
3483                 segs[ICE_RSS_OUTER_HEADERS].hdrs |= ICE_FLOW_SEG_HDR_IPV6 |
3484                                                    ICE_FLOW_SEG_HDR_IPV_FRAG |
3485                                                    ICE_FLOW_SEG_HDR_IPV_OTHER;
3486
3487         if (seg->hdrs & ~ICE_FLOW_RSS_SEG_HDR_VAL_MASKS &
3488             ~ICE_FLOW_RSS_HDRS_INNER_MASK & ~ICE_FLOW_SEG_HDR_IPV_OTHER &
3489             ~ICE_FLOW_SEG_HDR_IPV_FRAG)
3490                 return ICE_ERR_PARAM;
3491
3492         val = (u64)(seg->hdrs & ICE_FLOW_RSS_SEG_HDR_L3_MASKS);
3493         if (val && !ice_is_pow2(val))
3494                 return ICE_ERR_CFG;
3495
3496         val = (u64)(seg->hdrs & ICE_FLOW_RSS_SEG_HDR_L4_MASKS);
3497         if (val && !ice_is_pow2(val))
3498                 return ICE_ERR_CFG;
3499
3500         return ICE_SUCCESS;
3501 }
3502
3503 /**
3504  * ice_rem_vsi_rss_list - remove VSI from RSS list
3505  * @hw: pointer to the hardware structure
3506  * @vsi_handle: software VSI handle
3507  *
3508  * Remove the VSI from all RSS configurations in the list.
3509  */
3510 void ice_rem_vsi_rss_list(struct ice_hw *hw, u16 vsi_handle)
3511 {
3512         struct ice_rss_cfg *r, *tmp;
3513
3514         if (LIST_EMPTY(&hw->rss_list_head))
3515                 return;
3516
3517         ice_acquire_lock(&hw->rss_locks);
3518         LIST_FOR_EACH_ENTRY_SAFE(r, tmp, &hw->rss_list_head,
3519                                  ice_rss_cfg, l_entry)
3520                 if (ice_test_and_clear_bit(vsi_handle, r->vsis))
3521                         if (!ice_is_any_bit_set(r->vsis, ICE_MAX_VSI)) {
3522                                 LIST_DEL(&r->l_entry);
3523                                 ice_free(hw, r);
3524                         }
3525         ice_release_lock(&hw->rss_locks);
3526 }
3527
3528 /**
3529  * ice_rem_vsi_rss_cfg - remove RSS configurations associated with VSI
3530  * @hw: pointer to the hardware structure
3531  * @vsi_handle: software VSI handle
3532  *
3533  * This function will iterate through all flow profiles and disassociate
3534  * the VSI from that profile. If the flow profile has no VSIs it will
3535  * be removed.
3536  */
3537 enum ice_status ice_rem_vsi_rss_cfg(struct ice_hw *hw, u16 vsi_handle)
3538 {
3539         const enum ice_block blk = ICE_BLK_RSS;
3540         struct ice_flow_prof *p, *t;
3541         enum ice_status status = ICE_SUCCESS;
3542
3543         if (!ice_is_vsi_valid(hw, vsi_handle))
3544                 return ICE_ERR_PARAM;
3545
3546         if (LIST_EMPTY(&hw->fl_profs[blk]))
3547                 return ICE_SUCCESS;
3548
3549         ice_acquire_lock(&hw->rss_locks);
3550         LIST_FOR_EACH_ENTRY_SAFE(p, t, &hw->fl_profs[blk], ice_flow_prof,
3551                                  l_entry)
3552                 if (ice_is_bit_set(p->vsis, vsi_handle)) {
3553                         status = ice_flow_disassoc_prof(hw, blk, p, vsi_handle);
3554                         if (status)
3555                                 break;
3556
3557                         if (!ice_is_any_bit_set(p->vsis, ICE_MAX_VSI)) {
3558                                 status = ice_flow_rem_prof(hw, blk, p->id);
3559                                 if (status)
3560                                         break;
3561                         }
3562                 }
3563         ice_release_lock(&hw->rss_locks);
3564
3565         return status;
3566 }
3567
3568 /**
3569  * ice_get_rss_hdr_type - get a RSS profile's header type
3570  * @prof: RSS flow profile
3571  */
3572 static enum ice_rss_cfg_hdr_type
3573 ice_get_rss_hdr_type(struct ice_flow_prof *prof)
3574 {
3575         enum ice_rss_cfg_hdr_type hdr_type = ICE_RSS_ANY_HEADERS;
3576
3577         if (prof->segs_cnt == ICE_FLOW_SEG_SINGLE) {
3578                 hdr_type = ICE_RSS_OUTER_HEADERS;
3579         } else if (prof->segs_cnt == ICE_FLOW_SEG_MAX) {
3580                 if (prof->segs[ICE_RSS_OUTER_HEADERS].hdrs == ICE_FLOW_SEG_HDR_NONE)
3581                         hdr_type = ICE_RSS_INNER_HEADERS;
3582                 if (prof->segs[ICE_RSS_OUTER_HEADERS].hdrs & ICE_FLOW_SEG_HDR_IPV4)
3583                         hdr_type = ICE_RSS_INNER_HEADERS_W_OUTER_IPV4;
3584                 if (prof->segs[ICE_RSS_OUTER_HEADERS].hdrs & ICE_FLOW_SEG_HDR_IPV6)
3585                         hdr_type = ICE_RSS_INNER_HEADERS_W_OUTER_IPV6;
3586         }
3587
3588         return hdr_type;
3589 }
3590
3591 /**
3592  * ice_rem_rss_list - remove RSS configuration from list
3593  * @hw: pointer to the hardware structure
3594  * @vsi_handle: software VSI handle
3595  * @prof: pointer to flow profile
3596  *
3597  * Assumption: lock has already been acquired for RSS list
3598  */
3599 static void
3600 ice_rem_rss_list(struct ice_hw *hw, u16 vsi_handle, struct ice_flow_prof *prof)
3601 {
3602         enum ice_rss_cfg_hdr_type hdr_type;
3603         struct ice_rss_cfg *r, *tmp;
3604
3605         /* Search for RSS hash fields associated to the VSI that match the
3606          * hash configurations associated to the flow profile. If found
3607          * remove from the RSS entry list of the VSI context and delete entry.
3608          */
3609         hdr_type = ice_get_rss_hdr_type(prof);
3610         LIST_FOR_EACH_ENTRY_SAFE(r, tmp, &hw->rss_list_head,
3611                                  ice_rss_cfg, l_entry)
3612                 if (r->hash.hash_flds == prof->segs[prof->segs_cnt - 1].match &&
3613                     r->hash.addl_hdrs == prof->segs[prof->segs_cnt - 1].hdrs &&
3614                     r->hash.hdr_type == hdr_type) {
3615                         ice_clear_bit(vsi_handle, r->vsis);
3616                         if (!ice_is_any_bit_set(r->vsis, ICE_MAX_VSI)) {
3617                                 LIST_DEL(&r->l_entry);
3618                                 ice_free(hw, r);
3619                         }
3620                         return;
3621                 }
3622 }
3623
3624 /**
3625  * ice_add_rss_list - add RSS configuration to list
3626  * @hw: pointer to the hardware structure
3627  * @vsi_handle: software VSI handle
3628  * @prof: pointer to flow profile
3629  *
3630  * Assumption: lock has already been acquired for RSS list
3631  */
3632 static enum ice_status
3633 ice_add_rss_list(struct ice_hw *hw, u16 vsi_handle, struct ice_flow_prof *prof)
3634 {
3635         enum ice_rss_cfg_hdr_type hdr_type;
3636         struct ice_rss_cfg *r, *rss_cfg;
3637
3638         hdr_type = ice_get_rss_hdr_type(prof);
3639         LIST_FOR_EACH_ENTRY(r, &hw->rss_list_head,
3640                             ice_rss_cfg, l_entry)
3641                 if (r->hash.hash_flds == prof->segs[prof->segs_cnt - 1].match &&
3642                     r->hash.addl_hdrs == prof->segs[prof->segs_cnt - 1].hdrs &&
3643                     r->hash.hdr_type == hdr_type) {
3644                         ice_set_bit(vsi_handle, r->vsis);
3645                         return ICE_SUCCESS;
3646                 }
3647
3648         rss_cfg = (struct ice_rss_cfg *)ice_malloc(hw, sizeof(*rss_cfg));
3649         if (!rss_cfg)
3650                 return ICE_ERR_NO_MEMORY;
3651
3652         rss_cfg->hash.hash_flds = prof->segs[prof->segs_cnt - 1].match;
3653         rss_cfg->hash.addl_hdrs = prof->segs[prof->segs_cnt - 1].hdrs;
3654         rss_cfg->hash.hdr_type = hdr_type;
3655         rss_cfg->hash.symm = prof->cfg.symm;
3656         ice_set_bit(vsi_handle, rss_cfg->vsis);
3657
3658         LIST_ADD_TAIL(&rss_cfg->l_entry, &hw->rss_list_head);
3659
3660         return ICE_SUCCESS;
3661 }
3662
3663 #define ICE_FLOW_PROF_HASH_S    0
3664 #define ICE_FLOW_PROF_HASH_M    (0xFFFFFFFFULL << ICE_FLOW_PROF_HASH_S)
3665 #define ICE_FLOW_PROF_HDR_S     32
3666 #define ICE_FLOW_PROF_HDR_M     (0x3FFFFFFFULL << ICE_FLOW_PROF_HDR_S)
3667 #define ICE_FLOW_PROF_ENCAP_S   62
3668 #define ICE_FLOW_PROF_ENCAP_M   (0x3ULL << ICE_FLOW_PROF_ENCAP_S)
3669
3670 /* Flow profile ID format:
3671  * [0:31] - Packet match fields
3672  * [32:61] - Protocol header
3673  * [62:63] - Encapsulation flag:
3674  *           0 if non-tunneled
3675  *           1 if tunneled
3676  *           2 for tunneled with outer ipv4
3677  *           3 for tunneled with outer ipv6
3678  */
3679 #define ICE_FLOW_GEN_PROFID(hash, hdr, encap) \
3680         ((u64)(((u64)(hash) & ICE_FLOW_PROF_HASH_M) | \
3681                (((u64)(hdr) << ICE_FLOW_PROF_HDR_S) & ICE_FLOW_PROF_HDR_M) | \
3682                (((u64)(encap) << ICE_FLOW_PROF_ENCAP_S) & ICE_FLOW_PROF_ENCAP_M)))
3683
3684 static void
3685 ice_rss_config_xor_word(struct ice_hw *hw, u8 prof_id, u8 src, u8 dst)
3686 {
3687         u32 s = ((src % 4) << 3); /* byte shift */
3688         u32 v = dst | 0x80; /* value to program */
3689         u8 i = src / 4; /* register index */
3690         u32 reg;
3691
3692         reg = rd32(hw, GLQF_HSYMM(prof_id, i));
3693         reg = (reg & ~(0xff << s)) | (v << s);
3694         wr32(hw, GLQF_HSYMM(prof_id, i), reg);
3695 }
3696
3697 static void
3698 ice_rss_config_xor(struct ice_hw *hw, u8 prof_id, u8 src, u8 dst, u8 len)
3699 {
3700         int fv_last_word =
3701                 ICE_FLOW_SW_FIELD_VECTOR_MAX / ICE_FLOW_FV_EXTRACT_SZ - 1;
3702         int i;
3703
3704         for (i = 0; i < len; i++) {
3705                 ice_rss_config_xor_word(hw, prof_id,
3706                                         /* Yes, field vector in GLQF_HSYMM and
3707                                          * GLQF_HINSET is inversed!
3708                                          */
3709                                         fv_last_word - (src + i),
3710                                         fv_last_word - (dst + i));
3711                 ice_rss_config_xor_word(hw, prof_id,
3712                                         fv_last_word - (dst + i),
3713                                         fv_last_word - (src + i));
3714         }
3715 }
3716
3717 static void
3718 ice_rss_update_symm(struct ice_hw *hw,
3719                     struct ice_flow_prof *prof)
3720 {
3721         struct ice_prof_map *map;
3722         u8 prof_id, m;
3723
3724         ice_acquire_lock(&hw->blk[ICE_BLK_RSS].es.prof_map_lock);
3725         map = ice_search_prof_id(hw, ICE_BLK_RSS, prof->id);
3726         if (map)
3727                 prof_id = map->prof_id;
3728         ice_release_lock(&hw->blk[ICE_BLK_RSS].es.prof_map_lock);
3729         if (!map)
3730                 return;
3731         /* clear to default */
3732         for (m = 0; m < 6; m++)
3733                 wr32(hw, GLQF_HSYMM(prof_id, m), 0);
3734         if (prof->cfg.symm) {
3735                 struct ice_flow_seg_info *seg =
3736                         &prof->segs[prof->segs_cnt - 1];
3737
3738                 struct ice_flow_seg_xtrct *ipv4_src =
3739                         &seg->fields[ICE_FLOW_FIELD_IDX_IPV4_SA].xtrct;
3740                 struct ice_flow_seg_xtrct *ipv4_dst =
3741                         &seg->fields[ICE_FLOW_FIELD_IDX_IPV4_DA].xtrct;
3742                 struct ice_flow_seg_xtrct *ipv6_src =
3743                         &seg->fields[ICE_FLOW_FIELD_IDX_IPV6_SA].xtrct;
3744                 struct ice_flow_seg_xtrct *ipv6_dst =
3745                         &seg->fields[ICE_FLOW_FIELD_IDX_IPV6_DA].xtrct;
3746
3747                 struct ice_flow_seg_xtrct *tcp_src =
3748                         &seg->fields[ICE_FLOW_FIELD_IDX_TCP_SRC_PORT].xtrct;
3749                 struct ice_flow_seg_xtrct *tcp_dst =
3750                         &seg->fields[ICE_FLOW_FIELD_IDX_TCP_DST_PORT].xtrct;
3751
3752                 struct ice_flow_seg_xtrct *udp_src =
3753                         &seg->fields[ICE_FLOW_FIELD_IDX_UDP_SRC_PORT].xtrct;
3754                 struct ice_flow_seg_xtrct *udp_dst =
3755                         &seg->fields[ICE_FLOW_FIELD_IDX_UDP_DST_PORT].xtrct;
3756
3757                 struct ice_flow_seg_xtrct *sctp_src =
3758                         &seg->fields[ICE_FLOW_FIELD_IDX_SCTP_SRC_PORT].xtrct;
3759                 struct ice_flow_seg_xtrct *sctp_dst =
3760                         &seg->fields[ICE_FLOW_FIELD_IDX_SCTP_DST_PORT].xtrct;
3761
3762                 /* xor IPv4 */
3763                 if (ipv4_src->prot_id != 0 && ipv4_dst->prot_id != 0)
3764                         ice_rss_config_xor(hw, prof_id,
3765                                            ipv4_src->idx, ipv4_dst->idx, 2);
3766
3767                 /* xor IPv6 */
3768                 if (ipv6_src->prot_id != 0 && ipv6_dst->prot_id != 0)
3769                         ice_rss_config_xor(hw, prof_id,
3770                                            ipv6_src->idx, ipv6_dst->idx, 8);
3771
3772                 /* xor TCP */
3773                 if (tcp_src->prot_id != 0 && tcp_dst->prot_id != 0)
3774                         ice_rss_config_xor(hw, prof_id,
3775                                            tcp_src->idx, tcp_dst->idx, 1);
3776
3777                 /* xor UDP */
3778                 if (udp_src->prot_id != 0 && udp_dst->prot_id != 0)
3779                         ice_rss_config_xor(hw, prof_id,
3780                                            udp_src->idx, udp_dst->idx, 1);
3781
3782                 /* xor SCTP */
3783                 if (sctp_src->prot_id != 0 && sctp_dst->prot_id != 0)
3784                         ice_rss_config_xor(hw, prof_id,
3785                                            sctp_src->idx, sctp_dst->idx, 1);
3786         }
3787 }
3788
3789 /**
3790  * ice_add_rss_cfg_sync - add an RSS configuration
3791  * @hw: pointer to the hardware structure
3792  * @vsi_handle: software VSI handle
3793  * @cfg: configure parameters
3794  *
3795  * Assumption: lock has already been acquired for RSS list
3796  */
3797 static enum ice_status
3798 ice_add_rss_cfg_sync(struct ice_hw *hw, u16 vsi_handle,
3799                      const struct ice_rss_hash_cfg *cfg)
3800 {
3801         const enum ice_block blk = ICE_BLK_RSS;
3802         struct ice_flow_prof *prof = NULL;
3803         struct ice_flow_seg_info *segs;
3804         enum ice_status status;
3805         u8 segs_cnt;
3806
3807         segs_cnt = (cfg->hdr_type == ICE_RSS_OUTER_HEADERS) ?
3808                         ICE_FLOW_SEG_SINGLE : ICE_FLOW_SEG_MAX;
3809
3810         segs = (struct ice_flow_seg_info *)ice_calloc(hw, segs_cnt,
3811                                                       sizeof(*segs));
3812         if (!segs)
3813                 return ICE_ERR_NO_MEMORY;
3814
3815         /* Construct the packet segment info from the hashed fields */
3816         status = ice_flow_set_rss_seg_info(segs, segs_cnt, cfg);
3817         if (status)
3818                 goto exit;
3819
3820         /* Search for a flow profile that has matching headers, hash fields
3821          * and has the input VSI associated to it. If found, no further
3822          * operations required and exit.
3823          */
3824         prof = ice_flow_find_prof_conds(hw, blk, ICE_FLOW_RX, segs, segs_cnt,
3825                                         vsi_handle,
3826                                         ICE_FLOW_FIND_PROF_CHK_FLDS |
3827                                         ICE_FLOW_FIND_PROF_CHK_VSI);
3828         if (prof) {
3829                 if (prof->cfg.symm == cfg->symm)
3830                         goto exit;
3831                 prof->cfg.symm = cfg->symm;
3832                 goto update_symm;
3833         }
3834
3835         /* Check if a flow profile exists with the same protocol headers and
3836          * associated with the input VSI. If so disassociate the VSI from
3837          * this profile. The VSI will be added to a new profile created with
3838          * the protocol header and new hash field configuration.
3839          */
3840         prof = ice_flow_find_prof_conds(hw, blk, ICE_FLOW_RX, segs, segs_cnt,
3841                                         vsi_handle, ICE_FLOW_FIND_PROF_CHK_VSI);
3842         if (prof) {
3843                 status = ice_flow_disassoc_prof(hw, blk, prof, vsi_handle);
3844                 if (!status)
3845                         ice_rem_rss_list(hw, vsi_handle, prof);
3846                 else
3847                         goto exit;
3848
3849                 /* Remove profile if it has no VSIs associated */
3850                 if (!ice_is_any_bit_set(prof->vsis, ICE_MAX_VSI)) {
3851                         status = ice_flow_rem_prof(hw, blk, prof->id);
3852                         if (status)
3853                                 goto exit;
3854                 }
3855         }
3856
3857         /* Search for a profile that has same match fields only. If this
3858          * exists then associate the VSI to this profile.
3859          */
3860         prof = ice_flow_find_prof_conds(hw, blk, ICE_FLOW_RX, segs, segs_cnt,
3861                                         vsi_handle,
3862                                         ICE_FLOW_FIND_PROF_CHK_FLDS);
3863         if (prof) {
3864                 if (prof->cfg.symm == cfg->symm) {
3865                         status = ice_flow_assoc_prof(hw, blk, prof,
3866                                                      vsi_handle);
3867                         if (!status)
3868                                 status = ice_add_rss_list(hw, vsi_handle,
3869                                                           prof);
3870                 } else {
3871                         /* if a profile exist but with different symmetric
3872                          * requirement, just return error.
3873                          */
3874                         status = ICE_ERR_NOT_SUPPORTED;
3875                 }
3876                 goto exit;
3877         }
3878
3879         /* Create a new flow profile with generated profile and packet
3880          * segment information.
3881          */
3882         status = ice_flow_add_prof(hw, blk, ICE_FLOW_RX,
3883                                    ICE_FLOW_GEN_PROFID(cfg->hash_flds,
3884                                                        segs[segs_cnt - 1].hdrs,
3885                                                        cfg->hdr_type),
3886                                    segs, segs_cnt, NULL, 0, &prof);
3887         if (status)
3888                 goto exit;
3889
3890         status = ice_flow_assoc_prof(hw, blk, prof, vsi_handle);
3891         /* If association to a new flow profile failed then this profile can
3892          * be removed.
3893          */
3894         if (status) {
3895                 ice_flow_rem_prof(hw, blk, prof->id);
3896                 goto exit;
3897         }
3898
3899         status = ice_add_rss_list(hw, vsi_handle, prof);
3900
3901         prof->cfg.symm = cfg->symm;
3902 update_symm:
3903         ice_rss_update_symm(hw, prof);
3904
3905 exit:
3906         ice_free(hw, segs);
3907         return status;
3908 }
3909
3910 /**
3911  * ice_add_rss_cfg - add an RSS configuration with specified hashed fields
3912  * @hw: pointer to the hardware structure
3913  * @vsi_handle: software VSI handle
3914  * @cfg: configure parameters
3915  *
3916  * This function will generate a flow profile based on fields associated with
3917  * the input fields to hash on, the flow type and use the VSI number to add
3918  * a flow entry to the profile.
3919  */
3920 enum ice_status
3921 ice_add_rss_cfg(struct ice_hw *hw, u16 vsi_handle,
3922                 const struct ice_rss_hash_cfg *cfg)
3923 {
3924         struct ice_rss_hash_cfg local_cfg;
3925         enum ice_status status;
3926
3927         if (!ice_is_vsi_valid(hw, vsi_handle) ||
3928             !cfg || cfg->hdr_type > ICE_RSS_ANY_HEADERS ||
3929             cfg->hash_flds == ICE_HASH_INVALID)
3930                 return ICE_ERR_PARAM;
3931
3932         local_cfg = *cfg;
3933         if (cfg->hdr_type < ICE_RSS_ANY_HEADERS) {
3934                 ice_acquire_lock(&hw->rss_locks);
3935                 status = ice_add_rss_cfg_sync(hw, vsi_handle, &local_cfg);
3936                 ice_release_lock(&hw->rss_locks);
3937         } else {
3938                 ice_acquire_lock(&hw->rss_locks);
3939                 local_cfg.hdr_type = ICE_RSS_OUTER_HEADERS;
3940                 status = ice_add_rss_cfg_sync(hw, vsi_handle, &local_cfg);
3941                 if (!status) {
3942                         local_cfg.hdr_type = ICE_RSS_INNER_HEADERS;
3943                         status = ice_add_rss_cfg_sync(hw, vsi_handle,
3944                                                       &local_cfg);
3945                 }
3946                 ice_release_lock(&hw->rss_locks);
3947         }
3948
3949         return status;
3950 }
3951
3952 /**
3953  * ice_rem_rss_cfg_sync - remove an existing RSS configuration
3954  * @hw: pointer to the hardware structure
3955  * @vsi_handle: software VSI handle
3956  * @cfg: configure parameters
3957  *
3958  * Assumption: lock has already been acquired for RSS list
3959  */
3960 static enum ice_status
3961 ice_rem_rss_cfg_sync(struct ice_hw *hw, u16 vsi_handle,
3962                      const struct ice_rss_hash_cfg *cfg)
3963 {
3964         const enum ice_block blk = ICE_BLK_RSS;
3965         struct ice_flow_seg_info *segs;
3966         struct ice_flow_prof *prof;
3967         enum ice_status status;
3968         u8 segs_cnt;
3969
3970         segs_cnt = (cfg->hdr_type == ICE_RSS_OUTER_HEADERS) ?
3971                         ICE_FLOW_SEG_SINGLE : ICE_FLOW_SEG_MAX;
3972         segs = (struct ice_flow_seg_info *)ice_calloc(hw, segs_cnt,
3973                                                       sizeof(*segs));
3974         if (!segs)
3975                 return ICE_ERR_NO_MEMORY;
3976
3977         /* Construct the packet segment info from the hashed fields */
3978         status = ice_flow_set_rss_seg_info(segs, segs_cnt, cfg);
3979         if (status)
3980                 goto out;
3981
3982         prof = ice_flow_find_prof_conds(hw, blk, ICE_FLOW_RX, segs, segs_cnt,
3983                                         vsi_handle,
3984                                         ICE_FLOW_FIND_PROF_CHK_FLDS);
3985         if (!prof) {
3986                 status = ICE_ERR_DOES_NOT_EXIST;
3987                 goto out;
3988         }
3989
3990         status = ice_flow_disassoc_prof(hw, blk, prof, vsi_handle);
3991         if (status)
3992                 goto out;
3993
3994         /* Remove RSS configuration from VSI context before deleting
3995          * the flow profile.
3996          */
3997         ice_rem_rss_list(hw, vsi_handle, prof);
3998
3999         if (!ice_is_any_bit_set(prof->vsis, ICE_MAX_VSI))
4000                 status = ice_flow_rem_prof(hw, blk, prof->id);
4001
4002 out:
4003         ice_free(hw, segs);
4004         return status;
4005 }
4006
4007 /**
4008  * ice_rem_rss_cfg - remove an existing RSS config with matching hashed fields
4009  * @hw: pointer to the hardware structure
4010  * @vsi_handle: software VSI handle
4011  * @cfg: configure parameters
4012  *
4013  * This function will lookup the flow profile based on the input
4014  * hash field bitmap, iterate through the profile entry list of
4015  * that profile and find entry associated with input VSI to be
4016  * removed. Calls are made to underlying flow apis which will in
4017  * turn build or update buffers for RSS XLT1 section.
4018  */
4019 enum ice_status
4020 ice_rem_rss_cfg(struct ice_hw *hw, u16 vsi_handle,
4021                 const struct ice_rss_hash_cfg *cfg)
4022 {
4023         struct ice_rss_hash_cfg local_cfg;
4024         enum ice_status status;
4025
4026         if (!ice_is_vsi_valid(hw, vsi_handle) ||
4027             !cfg || cfg->hdr_type > ICE_RSS_ANY_HEADERS ||
4028             cfg->hash_flds == ICE_HASH_INVALID)
4029                 return ICE_ERR_PARAM;
4030
4031         ice_acquire_lock(&hw->rss_locks);
4032         local_cfg = *cfg;
4033         if (cfg->hdr_type < ICE_RSS_ANY_HEADERS) {
4034                 status = ice_rem_rss_cfg_sync(hw, vsi_handle, &local_cfg);
4035         } else {
4036                 local_cfg.hdr_type = ICE_RSS_OUTER_HEADERS;
4037                 status = ice_rem_rss_cfg_sync(hw, vsi_handle, &local_cfg);
4038
4039                 if (!status) {
4040                         local_cfg.hdr_type = ICE_RSS_INNER_HEADERS;
4041                         status = ice_rem_rss_cfg_sync(hw, vsi_handle,
4042                                                       &local_cfg);
4043                 }
4044         }
4045         ice_release_lock(&hw->rss_locks);
4046
4047         return status;
4048 }
4049
4050 /**
4051  * ice_replay_rss_cfg - replay RSS configurations associated with VSI
4052  * @hw: pointer to the hardware structure
4053  * @vsi_handle: software VSI handle
4054  */
4055 enum ice_status ice_replay_rss_cfg(struct ice_hw *hw, u16 vsi_handle)
4056 {
4057         enum ice_status status = ICE_SUCCESS;
4058         struct ice_rss_cfg *r;
4059
4060         if (!ice_is_vsi_valid(hw, vsi_handle))
4061                 return ICE_ERR_PARAM;
4062
4063         ice_acquire_lock(&hw->rss_locks);
4064         LIST_FOR_EACH_ENTRY(r, &hw->rss_list_head,
4065                             ice_rss_cfg, l_entry) {
4066                 if (ice_is_bit_set(r->vsis, vsi_handle)) {
4067                         status = ice_add_rss_cfg_sync(hw, vsi_handle, &r->hash);
4068                         if (status)
4069                                 break;
4070                 }
4071         }
4072         ice_release_lock(&hw->rss_locks);
4073
4074         return status;
4075 }
4076
4077 /**
4078  * ice_get_rss_cfg - returns hashed fields for the given header types
4079  * @hw: pointer to the hardware structure
4080  * @vsi_handle: software VSI handle
4081  * @hdrs: protocol header type
4082  *
4083  * This function will return the match fields of the first instance of flow
4084  * profile having the given header types and containing input VSI
4085  */
4086 u64 ice_get_rss_cfg(struct ice_hw *hw, u16 vsi_handle, u32 hdrs)
4087 {
4088         u64 rss_hash = ICE_HASH_INVALID;
4089         struct ice_rss_cfg *r;
4090
4091         /* verify if the protocol header is non zero and VSI is valid */
4092         if (hdrs == ICE_FLOW_SEG_HDR_NONE || !ice_is_vsi_valid(hw, vsi_handle))
4093                 return ICE_HASH_INVALID;
4094
4095         ice_acquire_lock(&hw->rss_locks);
4096         LIST_FOR_EACH_ENTRY(r, &hw->rss_list_head,
4097                             ice_rss_cfg, l_entry)
4098                 if (ice_is_bit_set(r->vsis, vsi_handle) &&
4099                     r->hash.addl_hdrs == hdrs) {
4100                         rss_hash = r->hash.hash_flds;
4101                         break;
4102                 }
4103         ice_release_lock(&hw->rss_locks);
4104
4105         return rss_hash;
4106 }