net/ice/base: refactor post DDP download VLAN mode config
[dpdk.git] / drivers / net / ice / base / ice_flow.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2001-2021 Intel Corporation
3  */
4
5 #include "ice_common.h"
6 #include "ice_flow.h"
7
8 /* Size of known protocol header fields */
9 #define ICE_FLOW_FLD_SZ_ETH_TYPE        2
10 #define ICE_FLOW_FLD_SZ_VLAN            2
11 #define ICE_FLOW_FLD_SZ_IPV4_ADDR       4
12 #define ICE_FLOW_FLD_SZ_IPV6_ADDR       16
13 #define ICE_FLOW_FLD_SZ_IPV6_PRE32_ADDR 4
14 #define ICE_FLOW_FLD_SZ_IPV6_PRE48_ADDR 6
15 #define ICE_FLOW_FLD_SZ_IPV6_PRE64_ADDR 8
16 #define ICE_FLOW_FLD_SZ_IP_DSCP         1
17 #define ICE_FLOW_FLD_SZ_IP_TTL          1
18 #define ICE_FLOW_FLD_SZ_IP_PROT         1
19 #define ICE_FLOW_FLD_SZ_PORT            2
20 #define ICE_FLOW_FLD_SZ_TCP_FLAGS       1
21 #define ICE_FLOW_FLD_SZ_ICMP_TYPE       1
22 #define ICE_FLOW_FLD_SZ_ICMP_CODE       1
23 #define ICE_FLOW_FLD_SZ_ARP_OPER        2
24 #define ICE_FLOW_FLD_SZ_GRE_KEYID       4
25 #define ICE_FLOW_FLD_SZ_GTP_TEID        4
26 #define ICE_FLOW_FLD_SZ_GTP_QFI         2
27 #define ICE_FLOW_FLD_SZ_PPPOE_SESS_ID   2
28 #define ICE_FLOW_FLD_SZ_PFCP_SEID 8
29 #define ICE_FLOW_FLD_SZ_L2TPV3_SESS_ID  4
30 #define ICE_FLOW_FLD_SZ_ESP_SPI 4
31 #define ICE_FLOW_FLD_SZ_AH_SPI  4
32 #define ICE_FLOW_FLD_SZ_NAT_T_ESP_SPI   4
33 #define ICE_FLOW_FLD_SZ_VXLAN_VNI       4
34 #define ICE_FLOW_FLD_SZ_ECPRI_TP0_PC_ID 2
35
36 /* Describe properties of a protocol header field */
37 struct ice_flow_field_info {
38         enum ice_flow_seg_hdr hdr;
39         s16 off;        /* Offset from start of a protocol header, in bits */
40         u16 size;       /* Size of fields in bits */
41         u16 mask;       /* 16-bit mask for field */
42 };
43
44 #define ICE_FLOW_FLD_INFO(_hdr, _offset_bytes, _size_bytes) { \
45         .hdr = _hdr, \
46         .off = (_offset_bytes) * BITS_PER_BYTE, \
47         .size = (_size_bytes) * BITS_PER_BYTE, \
48         .mask = 0, \
49 }
50
51 #define ICE_FLOW_FLD_INFO_MSK(_hdr, _offset_bytes, _size_bytes, _mask) { \
52         .hdr = _hdr, \
53         .off = (_offset_bytes) * BITS_PER_BYTE, \
54         .size = (_size_bytes) * BITS_PER_BYTE, \
55         .mask = _mask, \
56 }
57
58 /* Table containing properties of supported protocol header fields */
59 static const
60 struct ice_flow_field_info ice_flds_info[ICE_FLOW_FIELD_IDX_MAX] = {
61         /* Ether */
62         /* ICE_FLOW_FIELD_IDX_ETH_DA */
63         ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_ETH, 0, ETH_ALEN),
64         /* ICE_FLOW_FIELD_IDX_ETH_SA */
65         ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_ETH, ETH_ALEN, ETH_ALEN),
66         /* ICE_FLOW_FIELD_IDX_S_VLAN */
67         ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_VLAN, 12, ICE_FLOW_FLD_SZ_VLAN),
68         /* ICE_FLOW_FIELD_IDX_C_VLAN */
69         ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_VLAN, 14, ICE_FLOW_FLD_SZ_VLAN),
70         /* ICE_FLOW_FIELD_IDX_ETH_TYPE */
71         ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_ETH, 0, ICE_FLOW_FLD_SZ_ETH_TYPE),
72         /* IPv4 / IPv6 */
73         /* ICE_FLOW_FIELD_IDX_IPV4_DSCP */
74         ICE_FLOW_FLD_INFO_MSK(ICE_FLOW_SEG_HDR_IPV4, 0, ICE_FLOW_FLD_SZ_IP_DSCP,
75                               0x00fc),
76         /* ICE_FLOW_FIELD_IDX_IPV6_DSCP */
77         ICE_FLOW_FLD_INFO_MSK(ICE_FLOW_SEG_HDR_IPV6, 0, ICE_FLOW_FLD_SZ_IP_DSCP,
78                               0x0ff0),
79         /* ICE_FLOW_FIELD_IDX_IPV4_TTL */
80         ICE_FLOW_FLD_INFO_MSK(ICE_FLOW_SEG_HDR_NONE, 8,
81                               ICE_FLOW_FLD_SZ_IP_TTL, 0xff00),
82         /* ICE_FLOW_FIELD_IDX_IPV4_PROT */
83         ICE_FLOW_FLD_INFO_MSK(ICE_FLOW_SEG_HDR_NONE, 8,
84                               ICE_FLOW_FLD_SZ_IP_PROT, 0x00ff),
85         /* ICE_FLOW_FIELD_IDX_IPV6_TTL */
86         ICE_FLOW_FLD_INFO_MSK(ICE_FLOW_SEG_HDR_NONE, 6,
87                               ICE_FLOW_FLD_SZ_IP_TTL, 0x00ff),
88         /* ICE_FLOW_FIELD_IDX_IPV6_PROT */
89         ICE_FLOW_FLD_INFO_MSK(ICE_FLOW_SEG_HDR_NONE, 6,
90                               ICE_FLOW_FLD_SZ_IP_PROT, 0xff00),
91         /* ICE_FLOW_FIELD_IDX_IPV4_SA */
92         ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_IPV4, 12, ICE_FLOW_FLD_SZ_IPV4_ADDR),
93         /* ICE_FLOW_FIELD_IDX_IPV4_DA */
94         ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_IPV4, 16, ICE_FLOW_FLD_SZ_IPV4_ADDR),
95         /* ICE_FLOW_FIELD_IDX_IPV6_SA */
96         ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_IPV6, 8, ICE_FLOW_FLD_SZ_IPV6_ADDR),
97         /* ICE_FLOW_FIELD_IDX_IPV6_DA */
98         ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_IPV6, 24, ICE_FLOW_FLD_SZ_IPV6_ADDR),
99         /* ICE_FLOW_FIELD_IDX_IPV6_PRE32_SA */
100         ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_IPV6, 8,
101                           ICE_FLOW_FLD_SZ_IPV6_PRE32_ADDR),
102         /* ICE_FLOW_FIELD_IDX_IPV6_PRE32_DA */
103         ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_IPV6, 24,
104                           ICE_FLOW_FLD_SZ_IPV6_PRE32_ADDR),
105         /* ICE_FLOW_FIELD_IDX_IPV6_PRE48_SA */
106         ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_IPV6, 8,
107                           ICE_FLOW_FLD_SZ_IPV6_PRE48_ADDR),
108         /* ICE_FLOW_FIELD_IDX_IPV6_PRE48_DA */
109         ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_IPV6, 24,
110                           ICE_FLOW_FLD_SZ_IPV6_PRE48_ADDR),
111         /* ICE_FLOW_FIELD_IDX_IPV6_PRE64_SA */
112         ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_IPV6, 8,
113                           ICE_FLOW_FLD_SZ_IPV6_PRE64_ADDR),
114         /* ICE_FLOW_FIELD_IDX_IPV6_PRE64_DA */
115         ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_IPV6, 24,
116                           ICE_FLOW_FLD_SZ_IPV6_PRE64_ADDR),
117         /* Transport */
118         /* ICE_FLOW_FIELD_IDX_TCP_SRC_PORT */
119         ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_TCP, 0, ICE_FLOW_FLD_SZ_PORT),
120         /* ICE_FLOW_FIELD_IDX_TCP_DST_PORT */
121         ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_TCP, 2, ICE_FLOW_FLD_SZ_PORT),
122         /* ICE_FLOW_FIELD_IDX_UDP_SRC_PORT */
123         ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_UDP, 0, ICE_FLOW_FLD_SZ_PORT),
124         /* ICE_FLOW_FIELD_IDX_UDP_DST_PORT */
125         ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_UDP, 2, ICE_FLOW_FLD_SZ_PORT),
126         /* ICE_FLOW_FIELD_IDX_SCTP_SRC_PORT */
127         ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_SCTP, 0, ICE_FLOW_FLD_SZ_PORT),
128         /* ICE_FLOW_FIELD_IDX_SCTP_DST_PORT */
129         ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_SCTP, 2, ICE_FLOW_FLD_SZ_PORT),
130         /* ICE_FLOW_FIELD_IDX_TCP_FLAGS */
131         ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_TCP, 13, ICE_FLOW_FLD_SZ_TCP_FLAGS),
132         /* ARP */
133         /* ICE_FLOW_FIELD_IDX_ARP_SIP */
134         ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_ARP, 14, ICE_FLOW_FLD_SZ_IPV4_ADDR),
135         /* ICE_FLOW_FIELD_IDX_ARP_DIP */
136         ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_ARP, 24, ICE_FLOW_FLD_SZ_IPV4_ADDR),
137         /* ICE_FLOW_FIELD_IDX_ARP_SHA */
138         ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_ARP, 8, ETH_ALEN),
139         /* ICE_FLOW_FIELD_IDX_ARP_DHA */
140         ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_ARP, 18, ETH_ALEN),
141         /* ICE_FLOW_FIELD_IDX_ARP_OP */
142         ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_ARP, 6, ICE_FLOW_FLD_SZ_ARP_OPER),
143         /* ICMP */
144         /* ICE_FLOW_FIELD_IDX_ICMP_TYPE */
145         ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_ICMP, 0, ICE_FLOW_FLD_SZ_ICMP_TYPE),
146         /* ICE_FLOW_FIELD_IDX_ICMP_CODE */
147         ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_ICMP, 1, ICE_FLOW_FLD_SZ_ICMP_CODE),
148         /* GRE */
149         /* ICE_FLOW_FIELD_IDX_GRE_KEYID */
150         ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_GRE, 12, ICE_FLOW_FLD_SZ_GRE_KEYID),
151         /* GTP */
152         /* ICE_FLOW_FIELD_IDX_GTPC_TEID */
153         ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_GTPC_TEID, 12,
154                           ICE_FLOW_FLD_SZ_GTP_TEID),
155         /* ICE_FLOW_FIELD_IDX_GTPU_IP_TEID */
156         ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_GTPU_IP, 12,
157                           ICE_FLOW_FLD_SZ_GTP_TEID),
158         /* ICE_FLOW_FIELD_IDX_GTPU_EH_TEID */
159         ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_GTPU_EH, 12,
160                           ICE_FLOW_FLD_SZ_GTP_TEID),
161         /* ICE_FLOW_FIELD_IDX_GTPU_EH_QFI */
162         ICE_FLOW_FLD_INFO_MSK(ICE_FLOW_SEG_HDR_GTPU_EH, 22,
163                               ICE_FLOW_FLD_SZ_GTP_QFI, 0x3f00),
164         /* ICE_FLOW_FIELD_IDX_GTPU_UP_TEID */
165         ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_GTPU_UP, 12,
166                           ICE_FLOW_FLD_SZ_GTP_TEID),
167         /* ICE_FLOW_FIELD_IDX_GTPU_DWN_TEID */
168         ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_GTPU_DWN, 12,
169                           ICE_FLOW_FLD_SZ_GTP_TEID),
170         /* PPPOE */
171         /* ICE_FLOW_FIELD_IDX_PPPOE_SESS_ID */
172         ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_PPPOE, 2,
173                           ICE_FLOW_FLD_SZ_PPPOE_SESS_ID),
174         /* PFCP */
175         /* ICE_FLOW_FIELD_IDX_PFCP_SEID */
176         ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_PFCP_SESSION, 12,
177                           ICE_FLOW_FLD_SZ_PFCP_SEID),
178         /* L2TPV3 */
179         /* ICE_FLOW_FIELD_IDX_L2TPV3_SESS_ID */
180         ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_L2TPV3, 0,
181                           ICE_FLOW_FLD_SZ_L2TPV3_SESS_ID),
182         /* ESP */
183         /* ICE_FLOW_FIELD_IDX_ESP_SPI */
184         ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_ESP, 0,
185                           ICE_FLOW_FLD_SZ_ESP_SPI),
186         /* AH */
187         /* ICE_FLOW_FIELD_IDX_AH_SPI */
188         ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_AH, 4,
189                           ICE_FLOW_FLD_SZ_AH_SPI),
190         /* NAT_T_ESP */
191         /* ICE_FLOW_FIELD_IDX_NAT_T_ESP_SPI */
192         ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_NAT_T_ESP, 8,
193                           ICE_FLOW_FLD_SZ_NAT_T_ESP_SPI),
194         /* ICE_FLOW_FIELD_IDX_VXLAN_VNI */
195         ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_VXLAN, 12,
196                           ICE_FLOW_FLD_SZ_VXLAN_VNI),
197         /* ECPRI_TP0 */
198         /* ICE_FLOW_FIELD_IDX_ECPRI_TP0_PC_ID */
199         ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_ECPRI_TP0, 4,
200                           ICE_FLOW_FLD_SZ_ECPRI_TP0_PC_ID),
201         /* UDP_ECPRI_TP0 */
202         /* ICE_FLOW_FIELD_IDX_UDP_ECPRI_TP0_PC_ID */
203         ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_UDP_ECPRI_TP0, 12,
204                           ICE_FLOW_FLD_SZ_ECPRI_TP0_PC_ID),
205 };
206
207 /* Bitmaps indicating relevant packet types for a particular protocol header
208  *
209  * Packet types for packets with an Outer/First/Single MAC header
210  */
211 static const u32 ice_ptypes_mac_ofos[] = {
212         0xFDC00846, 0xBFBF7F7E, 0xF70001DF, 0xFEFDFDFB,
213         0x0000077E, 0x000003FF, 0x00000000, 0x00000000,
214         0x00400000, 0x03FFF000, 0xFFFFFFE0, 0x00100707,
215         0xFFFFFF00, 0x0000003F, 0x00000000, 0x00000000,
216         0x00000000, 0x00000000, 0x00000000, 0x00000000,
217         0x00000000, 0x00000000, 0x00000000, 0x00000000,
218         0x00000000, 0x00000000, 0x00000000, 0x00000000,
219         0x00000000, 0x00000000, 0x00000000, 0x00000000,
220 };
221
222 /* Packet types for packets with an Innermost/Last MAC VLAN header */
223 static const u32 ice_ptypes_macvlan_il[] = {
224         0x00000000, 0xBC000000, 0x000001DF, 0xF0000000,
225         0x0000077E, 0x00000000, 0x00000000, 0x00000000,
226         0x00000000, 0x00000000, 0x00000000, 0x00000000,
227         0x00000000, 0x00000000, 0x00000000, 0x00000000,
228         0x00000000, 0x00000000, 0x00000000, 0x00000000,
229         0x00000000, 0x00000000, 0x00000000, 0x00000000,
230         0x00000000, 0x00000000, 0x00000000, 0x00000000,
231         0x00000000, 0x00000000, 0x00000000, 0x00000000,
232 };
233
234 /* Packet types for packets with an Outer/First/Single IPv4 header, does NOT
235  * include IPV4 other PTYPEs
236  */
237 static const u32 ice_ptypes_ipv4_ofos[] = {
238         0x1DC00000, 0x24000800, 0x00000000, 0x00000000,
239         0x00000000, 0x00000155, 0x00000000, 0x00000000,
240         0x00000000, 0x000FC000, 0x000002A0, 0x00100000,
241         0x00001500, 0x00000000, 0x00000000, 0x00000000,
242         0x00000000, 0x00000000, 0x00000000, 0x00000000,
243         0x00000000, 0x00000000, 0x00000000, 0x00000000,
244         0x00000000, 0x00000000, 0x00000000, 0x00000000,
245         0x00000000, 0x00000000, 0x00000000, 0x00000000,
246 };
247
248 /* Packet types for packets with an Outer/First/Single IPv4 header, includes
249  * IPV4 other PTYPEs
250  */
251 static const u32 ice_ptypes_ipv4_ofos_all[] = {
252         0x1DC00000, 0x24000800, 0x00000000, 0x00000000,
253         0x00000000, 0x00000155, 0x00000000, 0x00000000,
254         0x00000000, 0x000FC000, 0x83E0FAA0, 0x00000101,
255         0x03FFD500, 0x00000000, 0x00000000, 0x00000000,
256         0x00000000, 0x00000000, 0x00000000, 0x00000000,
257         0x00000000, 0x00000000, 0x00000000, 0x00000000,
258         0x00000000, 0x00000000, 0x00000000, 0x00000000,
259         0x00000000, 0x00000000, 0x00000000, 0x00000000,
260 };
261
262 /* Packet types for packets with an Innermost/Last IPv4 header */
263 static const u32 ice_ptypes_ipv4_il[] = {
264         0xE0000000, 0xB807700E, 0x80000003, 0xE01DC03B,
265         0x0000000E, 0x00000000, 0x00000000, 0x00000000,
266         0x00000000, 0x00000000, 0x001FF800, 0x00100000,
267         0xFC0FC000, 0x00000000, 0x00000000, 0x00000000,
268         0x00000000, 0x00000000, 0x00000000, 0x00000000,
269         0x00000000, 0x00000000, 0x00000000, 0x00000000,
270         0x00000000, 0x00000000, 0x00000000, 0x00000000,
271         0x00000000, 0x00000000, 0x00000000, 0x00000000,
272 };
273
274 /* Packet types for packets with an Outer/First/Single IPv6 header, does NOT
275  * include IVP6 other PTYPEs
276  */
277 static const u32 ice_ptypes_ipv6_ofos[] = {
278         0x00000000, 0x00000000, 0x77000000, 0x10002000,
279         0x00000000, 0x000002AA, 0x00000000, 0x00000000,
280         0x00000000, 0x03F00000, 0x00000540, 0x00000000,
281         0x00002A00, 0x00000000, 0x00000000, 0x00000000,
282         0x00000000, 0x00000000, 0x00000000, 0x00000000,
283         0x00000000, 0x00000000, 0x00000000, 0x00000000,
284         0x00000000, 0x00000000, 0x00000000, 0x00000000,
285         0x00000000, 0x00000000, 0x00000000, 0x00000000,
286 };
287
288 /* Packet types for packets with an Outer/First/Single IPv6 header, includes
289  * IPV6 other PTYPEs
290  */
291 static const u32 ice_ptypes_ipv6_ofos_all[] = {
292         0x00000000, 0x00000000, 0x77000000, 0x10002000,
293         0x00000000, 0x000002AA, 0x00000000, 0x00000000,
294         0x00000000, 0x03F00000, 0x7C1F0540, 0x00000206,
295         0xFC002A00, 0x0000003F, 0x00000000, 0x00000000,
296         0x00000000, 0x00000000, 0x00000000, 0x00000000,
297         0x00000000, 0x00000000, 0x00000000, 0x00000000,
298         0x00000000, 0x00000000, 0x00000000, 0x00000000,
299         0x00000000, 0x00000000, 0x00000000, 0x00000000,
300 };
301
302 /* Packet types for packets with an Innermost/Last IPv6 header */
303 static const u32 ice_ptypes_ipv6_il[] = {
304         0x00000000, 0x03B80770, 0x000001DC, 0x0EE00000,
305         0x00000770, 0x00000000, 0x00000000, 0x00000000,
306         0x00000000, 0x00000000, 0x7FE00000, 0x00000000,
307         0x03F00000, 0x0000003F, 0x00000000, 0x00000000,
308         0x00000000, 0x00000000, 0x00000000, 0x00000000,
309         0x00000000, 0x00000000, 0x00000000, 0x00000000,
310         0x00000000, 0x00000000, 0x00000000, 0x00000000,
311         0x00000000, 0x00000000, 0x00000000, 0x00000000,
312 };
313
314 /* Packet types for packets with an Outer/First/Single IPv4 header - no L4 */
315 static const u32 ice_ptypes_ipv4_ofos_no_l4[] = {
316         0x10C00000, 0x04000800, 0x00000000, 0x00000000,
317         0x00000000, 0x00000000, 0x00000000, 0x00000000,
318         0x00000000, 0x000cc000, 0x000002A0, 0x00000000,
319         0x00001500, 0x00000000, 0x00000000, 0x00000000,
320         0x00000000, 0x00000000, 0x00000000, 0x00000000,
321         0x00000000, 0x00000000, 0x00000000, 0x00000000,
322         0x00000000, 0x00000000, 0x00000000, 0x00000000,
323         0x00000000, 0x00000000, 0x00000000, 0x00000000,
324 };
325
326 /* Packet types for packets with an Innermost/Last IPv4 header - no L4 */
327 static const u32 ice_ptypes_ipv4_il_no_l4[] = {
328         0x60000000, 0x18043008, 0x80000002, 0x6010c021,
329         0x00000008, 0x00000000, 0x00000000, 0x00000000,
330         0x00000000, 0x00000000, 0x00139800, 0x00000000,
331         0x8C08C000, 0x00000000, 0x00000000, 0x00000000,
332         0x00000000, 0x00000000, 0x00000000, 0x00000000,
333         0x00000000, 0x00000000, 0x00000000, 0x00000000,
334         0x00000000, 0x00000000, 0x00000000, 0x00000000,
335         0x00000000, 0x00000000, 0x00000000, 0x00000000,
336 };
337
338 /* Packet types for packets with an Outer/First/Single IPv6 header - no L4 */
339 static const u32 ice_ptypes_ipv6_ofos_no_l4[] = {
340         0x00000000, 0x00000000, 0x43000000, 0x10002000,
341         0x00000000, 0x00000000, 0x00000000, 0x00000000,
342         0x00000000, 0x02300000, 0x00000540, 0x00000000,
343         0x00002A00, 0x00000000, 0x00000000, 0x00000000,
344         0x00000000, 0x00000000, 0x00000000, 0x00000000,
345         0x00000000, 0x00000000, 0x00000000, 0x00000000,
346         0x00000000, 0x00000000, 0x00000000, 0x00000000,
347         0x00000000, 0x00000000, 0x00000000, 0x00000000,
348 };
349
350 /* Packet types for packets with an Innermost/Last IPv6 header - no L4 */
351 static const u32 ice_ptypes_ipv6_il_no_l4[] = {
352         0x00000000, 0x02180430, 0x0000010c, 0x086010c0,
353         0x00000430, 0x00000000, 0x00000000, 0x00000000,
354         0x00000000, 0x00000000, 0x4e600000, 0x00000000,
355         0x02300000, 0x00000023, 0x00000000, 0x00000000,
356         0x00000000, 0x00000000, 0x00000000, 0x00000000,
357         0x00000000, 0x00000000, 0x00000000, 0x00000000,
358         0x00000000, 0x00000000, 0x00000000, 0x00000000,
359         0x00000000, 0x00000000, 0x00000000, 0x00000000,
360 };
361
362 /* Packet types for packets with an Outermost/First ARP header */
363 static const u32 ice_ptypes_arp_of[] = {
364         0x00000800, 0x00000000, 0x00000000, 0x00000000,
365         0x00000000, 0x00000000, 0x00000000, 0x00000000,
366         0x00000000, 0x00000000, 0x00000000, 0x00000000,
367         0x00000000, 0x00000000, 0x00000000, 0x00000000,
368         0x00000000, 0x00000000, 0x00000000, 0x00000000,
369         0x00000000, 0x00000000, 0x00000000, 0x00000000,
370         0x00000000, 0x00000000, 0x00000000, 0x00000000,
371         0x00000000, 0x00000000, 0x00000000, 0x00000000,
372 };
373
374 /* UDP Packet types for non-tunneled packets or tunneled
375  * packets with inner UDP.
376  */
377 static const u32 ice_ptypes_udp_il[] = {
378         0x81000000, 0x20204040, 0x04000010, 0x80810102,
379         0x00000040, 0x00000000, 0x00000000, 0x00000000,
380         0x00000000, 0x00410000, 0x908427E0, 0x00100007,
381         0x10410000, 0x00000004, 0x00000000, 0x00000000,
382         0x00000000, 0x00000000, 0x00000000, 0x00000000,
383         0x00000000, 0x00000000, 0x00000000, 0x00000000,
384         0x00000000, 0x00000000, 0x00000000, 0x00000000,
385         0x00000000, 0x00000000, 0x00000000, 0x00000000,
386 };
387
388 /* Packet types for packets with an Innermost/Last TCP header */
389 static const u32 ice_ptypes_tcp_il[] = {
390         0x04000000, 0x80810102, 0x10000040, 0x02040408,
391         0x00000102, 0x00000000, 0x00000000, 0x00000000,
392         0x00000000, 0x00820000, 0x21084000, 0x00000000,
393         0x20820000, 0x00000008, 0x00000000, 0x00000000,
394         0x00000000, 0x00000000, 0x00000000, 0x00000000,
395         0x00000000, 0x00000000, 0x00000000, 0x00000000,
396         0x00000000, 0x00000000, 0x00000000, 0x00000000,
397         0x00000000, 0x00000000, 0x00000000, 0x00000000,
398 };
399
400 /* Packet types for packets with an Innermost/Last SCTP header */
401 static const u32 ice_ptypes_sctp_il[] = {
402         0x08000000, 0x01020204, 0x20000081, 0x04080810,
403         0x00000204, 0x00000000, 0x00000000, 0x00000000,
404         0x00000000, 0x01040000, 0x00000000, 0x00000000,
405         0x41040000, 0x00000010, 0x00000000, 0x00000000,
406         0x00000000, 0x00000000, 0x00000000, 0x00000000,
407         0x00000000, 0x00000000, 0x00000000, 0x00000000,
408         0x00000000, 0x00000000, 0x00000000, 0x00000000,
409         0x00000000, 0x00000000, 0x00000000, 0x00000000,
410 };
411
412 /* Packet types for packets with an Outermost/First ICMP header */
413 static const u32 ice_ptypes_icmp_of[] = {
414         0x10000000, 0x00000000, 0x00000000, 0x00000000,
415         0x00000000, 0x00000000, 0x00000000, 0x00000000,
416         0x00000000, 0x00000000, 0x00000000, 0x00000000,
417         0x00000000, 0x00000000, 0x00000000, 0x00000000,
418         0x00000000, 0x00000000, 0x00000000, 0x00000000,
419         0x00000000, 0x00000000, 0x00000000, 0x00000000,
420         0x00000000, 0x00000000, 0x00000000, 0x00000000,
421         0x00000000, 0x00000000, 0x00000000, 0x00000000,
422 };
423
424 /* Packet types for packets with an Innermost/Last ICMP header */
425 static const u32 ice_ptypes_icmp_il[] = {
426         0x00000000, 0x02040408, 0x40000102, 0x08101020,
427         0x00000408, 0x00000000, 0x00000000, 0x00000000,
428         0x00000000, 0x00000000, 0x42108000, 0x00000000,
429         0x82080000, 0x00000020, 0x00000000, 0x00000000,
430         0x00000000, 0x00000000, 0x00000000, 0x00000000,
431         0x00000000, 0x00000000, 0x00000000, 0x00000000,
432         0x00000000, 0x00000000, 0x00000000, 0x00000000,
433         0x00000000, 0x00000000, 0x00000000, 0x00000000,
434 };
435
436 /* Packet types for packets with an Outermost/First GRE header */
437 static const u32 ice_ptypes_gre_of[] = {
438         0x00000000, 0xBFBF7800, 0x000001DF, 0xFEFDE000,
439         0x0000017E, 0x00000000, 0x00000000, 0x00000000,
440         0x00000000, 0x00000000, 0x00000000, 0x00000000,
441         0x00000000, 0x00000000, 0x00000000, 0x00000000,
442         0x00000000, 0x00000000, 0x00000000, 0x00000000,
443         0x00000000, 0x00000000, 0x00000000, 0x00000000,
444         0x00000000, 0x00000000, 0x00000000, 0x00000000,
445         0x00000000, 0x00000000, 0x00000000, 0x00000000,
446 };
447
448 /* Packet types for packets with an Innermost/Last MAC header */
449 static const u32 ice_ptypes_mac_il[] = {
450         0x00000000, 0x20000000, 0x00000000, 0x00000000,
451         0x00000000, 0x00000000, 0x00000000, 0x00000000,
452         0x00000000, 0x00000000, 0x00000000, 0x00000000,
453         0x00000000, 0x00000000, 0x00000000, 0x00000000,
454         0x00000000, 0x00000000, 0x00000000, 0x00000000,
455         0x00000000, 0x00000000, 0x00000000, 0x00000000,
456         0x00000000, 0x00000000, 0x00000000, 0x00000000,
457         0x00000000, 0x00000000, 0x00000000, 0x00000000,
458 };
459
460 /* Packet types for GTPC */
461 static const u32 ice_ptypes_gtpc[] = {
462         0x00000000, 0x00000000, 0x00000000, 0x00000000,
463         0x00000000, 0x00000000, 0x00000000, 0x00000000,
464         0x00000000, 0x00000000, 0x000001E0, 0x00000000,
465         0x00000000, 0x00000000, 0x00000000, 0x00000000,
466         0x00000000, 0x00000000, 0x00000000, 0x00000000,
467         0x00000000, 0x00000000, 0x00000000, 0x00000000,
468         0x00000000, 0x00000000, 0x00000000, 0x00000000,
469         0x00000000, 0x00000000, 0x00000000, 0x00000000,
470 };
471
472 /* Packet types for VXLAN with VNI */
473 static const u32 ice_ptypes_vxlan_vni[] = {
474         0x00000000, 0xBFBFF800, 0x00EFDFDF, 0xFEFDE000,
475         0x03BF7F7E, 0x00000000, 0x00000000, 0x00000000,
476         0x00000000, 0x00000000, 0x00000000, 0x00000000,
477         0x00000000, 0x00000000, 0x00000000, 0x00000000,
478         0x00000000, 0x00000000, 0x00000000, 0x00000000,
479         0x00000000, 0x00000000, 0x00000000, 0x00000000,
480         0x00000000, 0x00000000, 0x00000000, 0x00000000,
481         0x00000000, 0x00000000, 0x00000000, 0x00000000,
482 };
483
484 /* Packet types for GTPC with TEID */
485 static const u32 ice_ptypes_gtpc_tid[] = {
486         0x00000000, 0x00000000, 0x00000000, 0x00000000,
487         0x00000000, 0x00000000, 0x00000000, 0x00000000,
488         0x00000000, 0x00000000, 0x00000060, 0x00000000,
489         0x00000000, 0x00000000, 0x00000000, 0x00000000,
490         0x00000000, 0x00000000, 0x00000000, 0x00000000,
491         0x00000000, 0x00000000, 0x00000000, 0x00000000,
492         0x00000000, 0x00000000, 0x00000000, 0x00000000,
493         0x00000000, 0x00000000, 0x00000000, 0x00000000,
494 };
495
496 /* Packet types for GTPU */
497 static const struct ice_ptype_attributes ice_attr_gtpu_session[] = {
498         { ICE_MAC_IPV4_GTPU_IPV4_FRAG,    ICE_PTYPE_ATTR_GTP_SESSION },
499         { ICE_MAC_IPV4_GTPU_IPV4_PAY,     ICE_PTYPE_ATTR_GTP_SESSION },
500         { ICE_MAC_IPV4_GTPU_IPV4_UDP_PAY, ICE_PTYPE_ATTR_GTP_SESSION },
501         { ICE_MAC_IPV4_GTPU_IPV4_TCP,     ICE_PTYPE_ATTR_GTP_SESSION },
502         { ICE_MAC_IPV4_GTPU_IPV4_ICMP,    ICE_PTYPE_ATTR_GTP_SESSION },
503         { ICE_MAC_IPV6_GTPU_IPV4_FRAG,    ICE_PTYPE_ATTR_GTP_SESSION },
504         { ICE_MAC_IPV6_GTPU_IPV4_PAY,     ICE_PTYPE_ATTR_GTP_SESSION },
505         { ICE_MAC_IPV6_GTPU_IPV4_UDP_PAY, ICE_PTYPE_ATTR_GTP_SESSION },
506         { ICE_MAC_IPV6_GTPU_IPV4_TCP,     ICE_PTYPE_ATTR_GTP_SESSION },
507         { ICE_MAC_IPV6_GTPU_IPV4_ICMP,    ICE_PTYPE_ATTR_GTP_SESSION },
508         { ICE_MAC_IPV4_GTPU_IPV6_FRAG,    ICE_PTYPE_ATTR_GTP_SESSION },
509         { ICE_MAC_IPV4_GTPU_IPV6_PAY,     ICE_PTYPE_ATTR_GTP_SESSION },
510         { ICE_MAC_IPV4_GTPU_IPV6_UDP_PAY, ICE_PTYPE_ATTR_GTP_SESSION },
511         { ICE_MAC_IPV4_GTPU_IPV6_TCP,     ICE_PTYPE_ATTR_GTP_SESSION },
512         { ICE_MAC_IPV4_GTPU_IPV6_ICMPV6,  ICE_PTYPE_ATTR_GTP_SESSION },
513         { ICE_MAC_IPV6_GTPU_IPV6_FRAG,    ICE_PTYPE_ATTR_GTP_SESSION },
514         { ICE_MAC_IPV6_GTPU_IPV6_PAY,     ICE_PTYPE_ATTR_GTP_SESSION },
515         { ICE_MAC_IPV6_GTPU_IPV6_UDP_PAY, ICE_PTYPE_ATTR_GTP_SESSION },
516         { ICE_MAC_IPV6_GTPU_IPV6_TCP,     ICE_PTYPE_ATTR_GTP_SESSION },
517         { ICE_MAC_IPV6_GTPU_IPV6_ICMPV6,  ICE_PTYPE_ATTR_GTP_SESSION },
518 };
519
520 static const struct ice_ptype_attributes ice_attr_gtpu_eh[] = {
521         { ICE_MAC_IPV4_GTPU_IPV4_FRAG,    ICE_PTYPE_ATTR_GTP_PDU_EH },
522         { ICE_MAC_IPV4_GTPU_IPV4_PAY,     ICE_PTYPE_ATTR_GTP_PDU_EH },
523         { ICE_MAC_IPV4_GTPU_IPV4_UDP_PAY, ICE_PTYPE_ATTR_GTP_PDU_EH },
524         { ICE_MAC_IPV4_GTPU_IPV4_TCP,     ICE_PTYPE_ATTR_GTP_PDU_EH },
525         { ICE_MAC_IPV4_GTPU_IPV4_ICMP,    ICE_PTYPE_ATTR_GTP_PDU_EH },
526         { ICE_MAC_IPV6_GTPU_IPV4_FRAG,    ICE_PTYPE_ATTR_GTP_PDU_EH },
527         { ICE_MAC_IPV6_GTPU_IPV4_PAY,     ICE_PTYPE_ATTR_GTP_PDU_EH },
528         { ICE_MAC_IPV6_GTPU_IPV4_UDP_PAY, ICE_PTYPE_ATTR_GTP_PDU_EH },
529         { ICE_MAC_IPV6_GTPU_IPV4_TCP,     ICE_PTYPE_ATTR_GTP_PDU_EH },
530         { ICE_MAC_IPV6_GTPU_IPV4_ICMP,    ICE_PTYPE_ATTR_GTP_PDU_EH },
531         { ICE_MAC_IPV4_GTPU_IPV6_FRAG,    ICE_PTYPE_ATTR_GTP_PDU_EH },
532         { ICE_MAC_IPV4_GTPU_IPV6_PAY,     ICE_PTYPE_ATTR_GTP_PDU_EH },
533         { ICE_MAC_IPV4_GTPU_IPV6_UDP_PAY, ICE_PTYPE_ATTR_GTP_PDU_EH },
534         { ICE_MAC_IPV4_GTPU_IPV6_TCP,     ICE_PTYPE_ATTR_GTP_PDU_EH },
535         { ICE_MAC_IPV4_GTPU_IPV6_ICMPV6,  ICE_PTYPE_ATTR_GTP_PDU_EH },
536         { ICE_MAC_IPV6_GTPU_IPV6_FRAG,    ICE_PTYPE_ATTR_GTP_PDU_EH },
537         { ICE_MAC_IPV6_GTPU_IPV6_PAY,     ICE_PTYPE_ATTR_GTP_PDU_EH },
538         { ICE_MAC_IPV6_GTPU_IPV6_UDP_PAY, ICE_PTYPE_ATTR_GTP_PDU_EH },
539         { ICE_MAC_IPV6_GTPU_IPV6_TCP,     ICE_PTYPE_ATTR_GTP_PDU_EH },
540         { ICE_MAC_IPV6_GTPU_IPV6_ICMPV6,  ICE_PTYPE_ATTR_GTP_PDU_EH },
541 };
542
543 static const struct ice_ptype_attributes ice_attr_gtpu_down[] = {
544         { ICE_MAC_IPV4_GTPU_IPV4_FRAG,    ICE_PTYPE_ATTR_GTP_DOWNLINK },
545         { ICE_MAC_IPV4_GTPU_IPV4_PAY,     ICE_PTYPE_ATTR_GTP_DOWNLINK },
546         { ICE_MAC_IPV4_GTPU_IPV4_UDP_PAY, ICE_PTYPE_ATTR_GTP_DOWNLINK },
547         { ICE_MAC_IPV4_GTPU_IPV4_TCP,     ICE_PTYPE_ATTR_GTP_DOWNLINK },
548         { ICE_MAC_IPV4_GTPU_IPV4_ICMP,    ICE_PTYPE_ATTR_GTP_DOWNLINK },
549         { ICE_MAC_IPV6_GTPU_IPV4_FRAG,    ICE_PTYPE_ATTR_GTP_DOWNLINK },
550         { ICE_MAC_IPV6_GTPU_IPV4_PAY,     ICE_PTYPE_ATTR_GTP_DOWNLINK },
551         { ICE_MAC_IPV6_GTPU_IPV4_UDP_PAY, ICE_PTYPE_ATTR_GTP_DOWNLINK },
552         { ICE_MAC_IPV6_GTPU_IPV4_TCP,     ICE_PTYPE_ATTR_GTP_DOWNLINK },
553         { ICE_MAC_IPV6_GTPU_IPV4_ICMP,    ICE_PTYPE_ATTR_GTP_DOWNLINK },
554         { ICE_MAC_IPV4_GTPU_IPV6_FRAG,    ICE_PTYPE_ATTR_GTP_DOWNLINK },
555         { ICE_MAC_IPV4_GTPU_IPV6_PAY,     ICE_PTYPE_ATTR_GTP_DOWNLINK },
556         { ICE_MAC_IPV4_GTPU_IPV6_UDP_PAY, ICE_PTYPE_ATTR_GTP_DOWNLINK },
557         { ICE_MAC_IPV4_GTPU_IPV6_TCP,     ICE_PTYPE_ATTR_GTP_DOWNLINK },
558         { ICE_MAC_IPV4_GTPU_IPV6_ICMPV6,  ICE_PTYPE_ATTR_GTP_DOWNLINK },
559         { ICE_MAC_IPV6_GTPU_IPV6_FRAG,    ICE_PTYPE_ATTR_GTP_DOWNLINK },
560         { ICE_MAC_IPV6_GTPU_IPV6_PAY,     ICE_PTYPE_ATTR_GTP_DOWNLINK },
561         { ICE_MAC_IPV6_GTPU_IPV6_UDP_PAY, ICE_PTYPE_ATTR_GTP_DOWNLINK },
562         { ICE_MAC_IPV6_GTPU_IPV6_TCP,     ICE_PTYPE_ATTR_GTP_DOWNLINK },
563         { ICE_MAC_IPV6_GTPU_IPV6_ICMPV6,  ICE_PTYPE_ATTR_GTP_DOWNLINK },
564 };
565
566 static const struct ice_ptype_attributes ice_attr_gtpu_up[] = {
567         { ICE_MAC_IPV4_GTPU_IPV4_FRAG,    ICE_PTYPE_ATTR_GTP_UPLINK },
568         { ICE_MAC_IPV4_GTPU_IPV4_PAY,     ICE_PTYPE_ATTR_GTP_UPLINK },
569         { ICE_MAC_IPV4_GTPU_IPV4_UDP_PAY, ICE_PTYPE_ATTR_GTP_UPLINK },
570         { ICE_MAC_IPV4_GTPU_IPV4_TCP,     ICE_PTYPE_ATTR_GTP_UPLINK },
571         { ICE_MAC_IPV4_GTPU_IPV4_ICMP,    ICE_PTYPE_ATTR_GTP_UPLINK },
572         { ICE_MAC_IPV6_GTPU_IPV4_FRAG,    ICE_PTYPE_ATTR_GTP_UPLINK },
573         { ICE_MAC_IPV6_GTPU_IPV4_PAY,     ICE_PTYPE_ATTR_GTP_UPLINK },
574         { ICE_MAC_IPV6_GTPU_IPV4_UDP_PAY, ICE_PTYPE_ATTR_GTP_UPLINK },
575         { ICE_MAC_IPV6_GTPU_IPV4_TCP,     ICE_PTYPE_ATTR_GTP_UPLINK },
576         { ICE_MAC_IPV6_GTPU_IPV4_ICMP,    ICE_PTYPE_ATTR_GTP_UPLINK },
577         { ICE_MAC_IPV4_GTPU_IPV6_FRAG,    ICE_PTYPE_ATTR_GTP_UPLINK },
578         { ICE_MAC_IPV4_GTPU_IPV6_PAY,     ICE_PTYPE_ATTR_GTP_UPLINK },
579         { ICE_MAC_IPV4_GTPU_IPV6_UDP_PAY, ICE_PTYPE_ATTR_GTP_UPLINK },
580         { ICE_MAC_IPV4_GTPU_IPV6_TCP,     ICE_PTYPE_ATTR_GTP_UPLINK },
581         { ICE_MAC_IPV4_GTPU_IPV6_ICMPV6,  ICE_PTYPE_ATTR_GTP_UPLINK },
582         { ICE_MAC_IPV6_GTPU_IPV6_FRAG,    ICE_PTYPE_ATTR_GTP_UPLINK },
583         { ICE_MAC_IPV6_GTPU_IPV6_PAY,     ICE_PTYPE_ATTR_GTP_UPLINK },
584         { ICE_MAC_IPV6_GTPU_IPV6_UDP_PAY, ICE_PTYPE_ATTR_GTP_UPLINK },
585         { ICE_MAC_IPV6_GTPU_IPV6_TCP,     ICE_PTYPE_ATTR_GTP_UPLINK },
586         { ICE_MAC_IPV6_GTPU_IPV6_ICMPV6,  ICE_PTYPE_ATTR_GTP_UPLINK },
587 };
588
589 static const u32 ice_ptypes_gtpu[] = {
590         0x00000000, 0x00000000, 0x00000000, 0x00000000,
591         0x00000000, 0x00000000, 0x00000000, 0x00000000,
592         0x00000000, 0x00000000, 0x7FFFFE00, 0x00000000,
593         0x00000000, 0x00000000, 0x00000000, 0x00000000,
594         0x00000000, 0x00000000, 0x00000000, 0x00000000,
595         0x00000000, 0x00000000, 0x00000000, 0x00000000,
596         0x00000000, 0x00000000, 0x00000000, 0x00000000,
597         0x00000000, 0x00000000, 0x00000000, 0x00000000,
598 };
599
600 /* Packet types for pppoe */
601 static const u32 ice_ptypes_pppoe[] = {
602         0x00000000, 0x00000000, 0x00000000, 0x00000000,
603         0x00000000, 0x00000000, 0x00000000, 0x00000000,
604         0x00000000, 0x03ffe000, 0x00000000, 0x00000000,
605         0x00000000, 0x00000000, 0x00000000, 0x00000000,
606         0x00000000, 0x00000000, 0x00000000, 0x00000000,
607         0x00000000, 0x00000000, 0x00000000, 0x00000000,
608         0x00000000, 0x00000000, 0x00000000, 0x00000000,
609         0x00000000, 0x00000000, 0x00000000, 0x00000000,
610 };
611
612 /* Packet types for packets with PFCP NODE header */
613 static const u32 ice_ptypes_pfcp_node[] = {
614         0x00000000, 0x00000000, 0x00000000, 0x00000000,
615         0x00000000, 0x00000000, 0x00000000, 0x00000000,
616         0x00000000, 0x00000000, 0x80000000, 0x00000002,
617         0x00000000, 0x00000000, 0x00000000, 0x00000000,
618         0x00000000, 0x00000000, 0x00000000, 0x00000000,
619         0x00000000, 0x00000000, 0x00000000, 0x00000000,
620         0x00000000, 0x00000000, 0x00000000, 0x00000000,
621         0x00000000, 0x00000000, 0x00000000, 0x00000000,
622 };
623
624 /* Packet types for packets with PFCP SESSION header */
625 static const u32 ice_ptypes_pfcp_session[] = {
626         0x00000000, 0x00000000, 0x00000000, 0x00000000,
627         0x00000000, 0x00000000, 0x00000000, 0x00000000,
628         0x00000000, 0x00000000, 0x00000000, 0x00000005,
629         0x00000000, 0x00000000, 0x00000000, 0x00000000,
630         0x00000000, 0x00000000, 0x00000000, 0x00000000,
631         0x00000000, 0x00000000, 0x00000000, 0x00000000,
632         0x00000000, 0x00000000, 0x00000000, 0x00000000,
633         0x00000000, 0x00000000, 0x00000000, 0x00000000,
634 };
635
636 /* Packet types for l2tpv3 */
637 static const u32 ice_ptypes_l2tpv3[] = {
638         0x00000000, 0x00000000, 0x00000000, 0x00000000,
639         0x00000000, 0x00000000, 0x00000000, 0x00000000,
640         0x00000000, 0x00000000, 0x00000000, 0x00000300,
641         0x00000000, 0x00000000, 0x00000000, 0x00000000,
642         0x00000000, 0x00000000, 0x00000000, 0x00000000,
643         0x00000000, 0x00000000, 0x00000000, 0x00000000,
644         0x00000000, 0x00000000, 0x00000000, 0x00000000,
645         0x00000000, 0x00000000, 0x00000000, 0x00000000,
646 };
647
648 /* Packet types for esp */
649 static const u32 ice_ptypes_esp[] = {
650         0x00000000, 0x00000000, 0x00000000, 0x00000000,
651         0x00000000, 0x00000003, 0x00000000, 0x00000000,
652         0x00000000, 0x00000000, 0x00000000, 0x00000000,
653         0x00000000, 0x00000000, 0x00000000, 0x00000000,
654         0x00000000, 0x00000000, 0x00000000, 0x00000000,
655         0x00000000, 0x00000000, 0x00000000, 0x00000000,
656         0x00000000, 0x00000000, 0x00000000, 0x00000000,
657         0x00000000, 0x00000000, 0x00000000, 0x00000000,
658 };
659
660 /* Packet types for ah */
661 static const u32 ice_ptypes_ah[] = {
662         0x00000000, 0x00000000, 0x00000000, 0x00000000,
663         0x00000000, 0x0000000C, 0x00000000, 0x00000000,
664         0x00000000, 0x00000000, 0x00000000, 0x00000000,
665         0x00000000, 0x00000000, 0x00000000, 0x00000000,
666         0x00000000, 0x00000000, 0x00000000, 0x00000000,
667         0x00000000, 0x00000000, 0x00000000, 0x00000000,
668         0x00000000, 0x00000000, 0x00000000, 0x00000000,
669         0x00000000, 0x00000000, 0x00000000, 0x00000000,
670 };
671
672 /* Packet types for packets with NAT_T ESP header */
673 static const u32 ice_ptypes_nat_t_esp[] = {
674         0x00000000, 0x00000000, 0x00000000, 0x00000000,
675         0x00000000, 0x00000030, 0x00000000, 0x00000000,
676         0x00000000, 0x00000000, 0x00000000, 0x00000000,
677         0x00000000, 0x00000000, 0x00000000, 0x00000000,
678         0x00000000, 0x00000000, 0x00000000, 0x00000000,
679         0x00000000, 0x00000000, 0x00000000, 0x00000000,
680         0x00000000, 0x00000000, 0x00000000, 0x00000000,
681         0x00000000, 0x00000000, 0x00000000, 0x00000000,
682 };
683
684 static const u32 ice_ptypes_mac_non_ip_ofos[] = {
685         0x00000846, 0x00000000, 0x00000000, 0x00000000,
686         0x00000000, 0x00000000, 0x00000000, 0x00000000,
687         0x00400000, 0x03FFF000, 0x00000000, 0x00000000,
688         0x00000000, 0x00000000, 0x00000000, 0x00000000,
689         0x00000000, 0x00000000, 0x00000000, 0x00000000,
690         0x00000000, 0x00000000, 0x00000000, 0x00000000,
691         0x00000000, 0x00000000, 0x00000000, 0x00000000,
692         0x00000000, 0x00000000, 0x00000000, 0x00000000,
693 };
694
695 static const u32 ice_ptypes_gtpu_no_ip[] = {
696         0x00000000, 0x00000000, 0x00000000, 0x00000000,
697         0x00000000, 0x00000000, 0x00000000, 0x00000000,
698         0x00000000, 0x00000000, 0x00000600, 0x00000000,
699         0x00000000, 0x00000000, 0x00000000, 0x00000000,
700         0x00000000, 0x00000000, 0x00000000, 0x00000000,
701         0x00000000, 0x00000000, 0x00000000, 0x00000000,
702         0x00000000, 0x00000000, 0x00000000, 0x00000000,
703         0x00000000, 0x00000000, 0x00000000, 0x00000000,
704 };
705
706 static const u32 ice_ptypes_ecpri_tp0[] = {
707         0x00000000, 0x00000000, 0x00000000, 0x00000000,
708         0x00000000, 0x00000000, 0x00000000, 0x00000000,
709         0x00000000, 0x00000000, 0x00000000, 0x00000400,
710         0x00000000, 0x00000000, 0x00000000, 0x00000000,
711         0x00000000, 0x00000000, 0x00000000, 0x00000000,
712         0x00000000, 0x00000000, 0x00000000, 0x00000000,
713         0x00000000, 0x00000000, 0x00000000, 0x00000000,
714         0x00000000, 0x00000000, 0x00000000, 0x00000000,
715 };
716
717 static const u32 ice_ptypes_udp_ecpri_tp0[] = {
718         0x00000000, 0x00000000, 0x00000000, 0x00000000,
719         0x00000000, 0x00000000, 0x00000000, 0x00000000,
720         0x00000000, 0x00000000, 0x00000000, 0x00100000,
721         0x00000000, 0x00000000, 0x00000000, 0x00000000,
722         0x00000000, 0x00000000, 0x00000000, 0x00000000,
723         0x00000000, 0x00000000, 0x00000000, 0x00000000,
724         0x00000000, 0x00000000, 0x00000000, 0x00000000,
725         0x00000000, 0x00000000, 0x00000000, 0x00000000,
726 };
727
728 static const u32 ice_ptypes_l2tpv2[] = {
729         0x00000000, 0x00000000, 0x00000000, 0x00000000,
730         0x00000000, 0x00000000, 0x00000000, 0x00000000,
731         0x00000000, 0x00000000, 0x00000000, 0x00000000,
732         0xFFFFFF00, 0x0000003F, 0x00000000, 0x00000000,
733         0x00000000, 0x00000000, 0x00000000, 0x00000000,
734         0x00000000, 0x00000000, 0x00000000, 0x00000000,
735         0x00000000, 0x00000000, 0x00000000, 0x00000000,
736         0x00000000, 0x00000000, 0x00000000, 0x00000000,
737 };
738
739 static const u32 ice_ptypes_ppp[] = {
740         0x00000000, 0x00000000, 0x00000000, 0x00000000,
741         0x00000000, 0x00000000, 0x00000000, 0x00000000,
742         0x00000000, 0x00000000, 0x00000000, 0x00000000,
743         0xFFFFF000, 0x0000003F, 0x00000000, 0x00000000,
744         0x00000000, 0x00000000, 0x00000000, 0x00000000,
745         0x00000000, 0x00000000, 0x00000000, 0x00000000,
746         0x00000000, 0x00000000, 0x00000000, 0x00000000,
747         0x00000000, 0x00000000, 0x00000000, 0x00000000,
748 };
749
750 /* Manage parameters and info. used during the creation of a flow profile */
751 struct ice_flow_prof_params {
752         enum ice_block blk;
753         u16 entry_length; /* # of bytes formatted entry will require */
754         u8 es_cnt;
755         struct ice_flow_prof *prof;
756
757         /* For ACL, the es[0] will have the data of ICE_RX_MDID_PKT_FLAGS_15_0
758          * This will give us the direction flags.
759          */
760         struct ice_fv_word es[ICE_MAX_FV_WORDS];
761         /* attributes can be used to add attributes to a particular PTYPE */
762         const struct ice_ptype_attributes *attr;
763         u16 attr_cnt;
764
765         u16 mask[ICE_MAX_FV_WORDS];
766         ice_declare_bitmap(ptypes, ICE_FLOW_PTYPE_MAX);
767 };
768
769 #define ICE_FLOW_RSS_HDRS_INNER_MASK \
770         (ICE_FLOW_SEG_HDR_PPPOE | ICE_FLOW_SEG_HDR_GTPC | \
771         ICE_FLOW_SEG_HDR_GTPC_TEID | ICE_FLOW_SEG_HDR_GTPU | \
772         ICE_FLOW_SEG_HDR_PFCP_SESSION | ICE_FLOW_SEG_HDR_L2TPV3 | \
773         ICE_FLOW_SEG_HDR_ESP | ICE_FLOW_SEG_HDR_AH | \
774         ICE_FLOW_SEG_HDR_NAT_T_ESP | ICE_FLOW_SEG_HDR_GTPU_NON_IP | \
775         ICE_FLOW_SEG_HDR_ECPRI_TP0 | ICE_FLOW_SEG_HDR_UDP_ECPRI_TP0 | \
776         ICE_FLOW_SEG_HDR_L2TPV2 | ICE_FLOW_SEG_HDR_PPP)
777
778 #define ICE_FLOW_SEG_HDRS_L2_MASK       \
779         (ICE_FLOW_SEG_HDR_ETH | ICE_FLOW_SEG_HDR_VLAN)
780 #define ICE_FLOW_SEG_HDRS_L3_MASK       \
781         (ICE_FLOW_SEG_HDR_IPV4 | ICE_FLOW_SEG_HDR_IPV6 | \
782          ICE_FLOW_SEG_HDR_ARP)
783 #define ICE_FLOW_SEG_HDRS_L4_MASK       \
784         (ICE_FLOW_SEG_HDR_ICMP | ICE_FLOW_SEG_HDR_TCP | ICE_FLOW_SEG_HDR_UDP | \
785          ICE_FLOW_SEG_HDR_SCTP)
786 /* mask for L4 protocols that are NOT part of IPV4/6 OTHER PTYPE groups */
787 #define ICE_FLOW_SEG_HDRS_L4_MASK_NO_OTHER      \
788         (ICE_FLOW_SEG_HDR_TCP | ICE_FLOW_SEG_HDR_UDP | ICE_FLOW_SEG_HDR_SCTP)
789
790 /**
791  * ice_flow_val_hdrs - validates packet segments for valid protocol headers
792  * @segs: array of one or more packet segments that describe the flow
793  * @segs_cnt: number of packet segments provided
794  */
795 static enum ice_status
796 ice_flow_val_hdrs(struct ice_flow_seg_info *segs, u8 segs_cnt)
797 {
798         u8 i;
799
800         for (i = 0; i < segs_cnt; i++) {
801                 /* Multiple L3 headers */
802                 if (segs[i].hdrs & ICE_FLOW_SEG_HDRS_L3_MASK &&
803                     !ice_is_pow2(segs[i].hdrs & ICE_FLOW_SEG_HDRS_L3_MASK))
804                         return ICE_ERR_PARAM;
805
806                 /* Multiple L4 headers */
807                 if (segs[i].hdrs & ICE_FLOW_SEG_HDRS_L4_MASK &&
808                     !ice_is_pow2(segs[i].hdrs & ICE_FLOW_SEG_HDRS_L4_MASK))
809                         return ICE_ERR_PARAM;
810         }
811
812         return ICE_SUCCESS;
813 }
814
815 /* Sizes of fixed known protocol headers without header options */
816 #define ICE_FLOW_PROT_HDR_SZ_MAC        14
817 #define ICE_FLOW_PROT_HDR_SZ_MAC_VLAN   (ICE_FLOW_PROT_HDR_SZ_MAC + 2)
818 #define ICE_FLOW_PROT_HDR_SZ_IPV4       20
819 #define ICE_FLOW_PROT_HDR_SZ_IPV6       40
820 #define ICE_FLOW_PROT_HDR_SZ_ARP        28
821 #define ICE_FLOW_PROT_HDR_SZ_ICMP       8
822 #define ICE_FLOW_PROT_HDR_SZ_TCP        20
823 #define ICE_FLOW_PROT_HDR_SZ_UDP        8
824 #define ICE_FLOW_PROT_HDR_SZ_SCTP       12
825
826 /**
827  * ice_flow_calc_seg_sz - calculates size of a packet segment based on headers
828  * @params: information about the flow to be processed
829  * @seg: index of packet segment whose header size is to be determined
830  */
831 static u16 ice_flow_calc_seg_sz(struct ice_flow_prof_params *params, u8 seg)
832 {
833         u16 sz;
834
835         /* L2 headers */
836         sz = (params->prof->segs[seg].hdrs & ICE_FLOW_SEG_HDR_VLAN) ?
837                 ICE_FLOW_PROT_HDR_SZ_MAC_VLAN : ICE_FLOW_PROT_HDR_SZ_MAC;
838
839         /* L3 headers */
840         if (params->prof->segs[seg].hdrs & ICE_FLOW_SEG_HDR_IPV4)
841                 sz += ICE_FLOW_PROT_HDR_SZ_IPV4;
842         else if (params->prof->segs[seg].hdrs & ICE_FLOW_SEG_HDR_IPV6)
843                 sz += ICE_FLOW_PROT_HDR_SZ_IPV6;
844         else if (params->prof->segs[seg].hdrs & ICE_FLOW_SEG_HDR_ARP)
845                 sz += ICE_FLOW_PROT_HDR_SZ_ARP;
846         else if (params->prof->segs[seg].hdrs & ICE_FLOW_SEG_HDRS_L4_MASK)
847                 /* A L3 header is required if L4 is specified */
848                 return 0;
849
850         /* L4 headers */
851         if (params->prof->segs[seg].hdrs & ICE_FLOW_SEG_HDR_ICMP)
852                 sz += ICE_FLOW_PROT_HDR_SZ_ICMP;
853         else if (params->prof->segs[seg].hdrs & ICE_FLOW_SEG_HDR_TCP)
854                 sz += ICE_FLOW_PROT_HDR_SZ_TCP;
855         else if (params->prof->segs[seg].hdrs & ICE_FLOW_SEG_HDR_UDP)
856                 sz += ICE_FLOW_PROT_HDR_SZ_UDP;
857         else if (params->prof->segs[seg].hdrs & ICE_FLOW_SEG_HDR_SCTP)
858                 sz += ICE_FLOW_PROT_HDR_SZ_SCTP;
859
860         return sz;
861 }
862
863 /**
864  * ice_flow_proc_seg_hdrs - process protocol headers present in pkt segments
865  * @params: information about the flow to be processed
866  *
867  * This function identifies the packet types associated with the protocol
868  * headers being present in packet segments of the specified flow profile.
869  */
870 static enum ice_status
871 ice_flow_proc_seg_hdrs(struct ice_flow_prof_params *params)
872 {
873         struct ice_flow_prof *prof;
874         u8 i;
875
876         ice_memset(params->ptypes, 0xff, sizeof(params->ptypes),
877                    ICE_NONDMA_MEM);
878
879         prof = params->prof;
880
881         for (i = 0; i < params->prof->segs_cnt; i++) {
882                 const ice_bitmap_t *src;
883                 u32 hdrs;
884
885                 hdrs = prof->segs[i].hdrs;
886
887                 if (hdrs & ICE_FLOW_SEG_HDR_ETH) {
888                         src = !i ? (const ice_bitmap_t *)ice_ptypes_mac_ofos :
889                                 (const ice_bitmap_t *)ice_ptypes_mac_il;
890                         ice_and_bitmap(params->ptypes, params->ptypes, src,
891                                        ICE_FLOW_PTYPE_MAX);
892                 }
893
894                 if (i && hdrs & ICE_FLOW_SEG_HDR_VLAN) {
895                         src = (const ice_bitmap_t *)ice_ptypes_macvlan_il;
896                         ice_and_bitmap(params->ptypes, params->ptypes, src,
897                                        ICE_FLOW_PTYPE_MAX);
898                 }
899
900                 if (!i && hdrs & ICE_FLOW_SEG_HDR_ARP) {
901                         ice_and_bitmap(params->ptypes, params->ptypes,
902                                        (const ice_bitmap_t *)ice_ptypes_arp_of,
903                                        ICE_FLOW_PTYPE_MAX);
904                 }
905
906                 if (hdrs & ICE_FLOW_SEG_HDR_ECPRI_TP0) {
907                         src = (const ice_bitmap_t *)ice_ptypes_ecpri_tp0;
908                         ice_and_bitmap(params->ptypes, params->ptypes, src,
909                                        ICE_FLOW_PTYPE_MAX);
910                 }
911                 if ((hdrs & ICE_FLOW_SEG_HDR_IPV4) &&
912                     (hdrs & ICE_FLOW_SEG_HDR_IPV_OTHER)) {
913                         src = i ?
914                                 (const ice_bitmap_t *)ice_ptypes_ipv4_il :
915                                 (const ice_bitmap_t *)ice_ptypes_ipv4_ofos_all;
916                         ice_and_bitmap(params->ptypes, params->ptypes, src,
917                                        ICE_FLOW_PTYPE_MAX);
918                 } else if ((hdrs & ICE_FLOW_SEG_HDR_IPV6) &&
919                            (hdrs & ICE_FLOW_SEG_HDR_IPV_OTHER)) {
920                         src = i ?
921                                 (const ice_bitmap_t *)ice_ptypes_ipv6_il :
922                                 (const ice_bitmap_t *)ice_ptypes_ipv6_ofos_all;
923                         ice_and_bitmap(params->ptypes, params->ptypes, src,
924                                        ICE_FLOW_PTYPE_MAX);
925                 } else if ((hdrs & ICE_FLOW_SEG_HDR_IPV4) &&
926                            !(hdrs & ICE_FLOW_SEG_HDRS_L4_MASK_NO_OTHER)) {
927                         src = !i ? (const ice_bitmap_t *)ice_ptypes_ipv4_ofos_no_l4 :
928                                 (const ice_bitmap_t *)ice_ptypes_ipv4_il_no_l4;
929                         ice_and_bitmap(params->ptypes, params->ptypes, src,
930                                        ICE_FLOW_PTYPE_MAX);
931                 } else if (hdrs & ICE_FLOW_SEG_HDR_IPV4) {
932                         src = !i ? (const ice_bitmap_t *)ice_ptypes_ipv4_ofos :
933                                 (const ice_bitmap_t *)ice_ptypes_ipv4_il;
934                         ice_and_bitmap(params->ptypes, params->ptypes, src,
935                                        ICE_FLOW_PTYPE_MAX);
936                 } else if ((hdrs & ICE_FLOW_SEG_HDR_IPV6) &&
937                            !(hdrs & ICE_FLOW_SEG_HDRS_L4_MASK_NO_OTHER)) {
938                         src = !i ? (const ice_bitmap_t *)ice_ptypes_ipv6_ofos_no_l4 :
939                                 (const ice_bitmap_t *)ice_ptypes_ipv6_il_no_l4;
940                         ice_and_bitmap(params->ptypes, params->ptypes, src,
941                                        ICE_FLOW_PTYPE_MAX);
942                 } else if (hdrs & ICE_FLOW_SEG_HDR_IPV6) {
943                         src = !i ? (const ice_bitmap_t *)ice_ptypes_ipv6_ofos :
944                                 (const ice_bitmap_t *)ice_ptypes_ipv6_il;
945                         ice_and_bitmap(params->ptypes, params->ptypes, src,
946                                        ICE_FLOW_PTYPE_MAX);
947                 }
948
949                 if (hdrs & ICE_FLOW_SEG_HDR_ETH_NON_IP) {
950                         src = (const ice_bitmap_t *)ice_ptypes_mac_non_ip_ofos;
951                         ice_and_bitmap(params->ptypes, params->ptypes,
952                                        src, ICE_FLOW_PTYPE_MAX);
953                 } else if (hdrs & ICE_FLOW_SEG_HDR_PPPOE) {
954                         src = (const ice_bitmap_t *)ice_ptypes_pppoe;
955                         ice_and_bitmap(params->ptypes, params->ptypes, src,
956                                        ICE_FLOW_PTYPE_MAX);
957                 } else {
958                         src = (const ice_bitmap_t *)ice_ptypes_pppoe;
959                         ice_andnot_bitmap(params->ptypes, params->ptypes, src,
960                                           ICE_FLOW_PTYPE_MAX);
961                 }
962
963                 if (hdrs & ICE_FLOW_SEG_HDR_UDP) {
964                         src = (const ice_bitmap_t *)ice_ptypes_udp_il;
965                         ice_and_bitmap(params->ptypes, params->ptypes, src,
966                                        ICE_FLOW_PTYPE_MAX);
967                 } else if (hdrs & ICE_FLOW_SEG_HDR_TCP) {
968                         ice_and_bitmap(params->ptypes, params->ptypes,
969                                        (const ice_bitmap_t *)ice_ptypes_tcp_il,
970                                        ICE_FLOW_PTYPE_MAX);
971                 } else if (hdrs & ICE_FLOW_SEG_HDR_SCTP) {
972                         src = (const ice_bitmap_t *)ice_ptypes_sctp_il;
973                         ice_and_bitmap(params->ptypes, params->ptypes, src,
974                                        ICE_FLOW_PTYPE_MAX);
975                 }
976
977                 if (hdrs & ICE_FLOW_SEG_HDR_ICMP) {
978                         src = !i ? (const ice_bitmap_t *)ice_ptypes_icmp_of :
979                                 (const ice_bitmap_t *)ice_ptypes_icmp_il;
980                         ice_and_bitmap(params->ptypes, params->ptypes, src,
981                                        ICE_FLOW_PTYPE_MAX);
982                 } else if (hdrs & ICE_FLOW_SEG_HDR_GRE) {
983                         if (!i) {
984                                 src = (const ice_bitmap_t *)ice_ptypes_gre_of;
985                                 ice_and_bitmap(params->ptypes, params->ptypes,
986                                                src, ICE_FLOW_PTYPE_MAX);
987                         }
988                 } else if (hdrs & ICE_FLOW_SEG_HDR_GTPC) {
989                         src = (const ice_bitmap_t *)ice_ptypes_gtpc;
990                         ice_and_bitmap(params->ptypes, params->ptypes,
991                                        src, ICE_FLOW_PTYPE_MAX);
992                 } else if (hdrs & ICE_FLOW_SEG_HDR_GTPC_TEID) {
993                         src = (const ice_bitmap_t *)ice_ptypes_gtpc_tid;
994                         ice_and_bitmap(params->ptypes, params->ptypes,
995                                        src, ICE_FLOW_PTYPE_MAX);
996                 } else if (hdrs & ICE_FLOW_SEG_HDR_GTPU_NON_IP) {
997                         src = (const ice_bitmap_t *)ice_ptypes_gtpu_no_ip;
998                         ice_and_bitmap(params->ptypes, params->ptypes,
999                                        src, ICE_FLOW_PTYPE_MAX);
1000                 } else if (hdrs & ICE_FLOW_SEG_HDR_GTPU_DWN) {
1001                         src = (const ice_bitmap_t *)ice_ptypes_gtpu;
1002                         ice_and_bitmap(params->ptypes, params->ptypes,
1003                                        src, ICE_FLOW_PTYPE_MAX);
1004
1005                         /* Attributes for GTP packet with downlink */
1006                         params->attr = ice_attr_gtpu_down;
1007                         params->attr_cnt = ARRAY_SIZE(ice_attr_gtpu_down);
1008                 } else if (hdrs & ICE_FLOW_SEG_HDR_GTPU_UP) {
1009                         src = (const ice_bitmap_t *)ice_ptypes_gtpu;
1010                         ice_and_bitmap(params->ptypes, params->ptypes,
1011                                        src, ICE_FLOW_PTYPE_MAX);
1012
1013                         /* Attributes for GTP packet with uplink */
1014                         params->attr = ice_attr_gtpu_up;
1015                         params->attr_cnt = ARRAY_SIZE(ice_attr_gtpu_up);
1016                 } else if (hdrs & ICE_FLOW_SEG_HDR_GTPU_EH) {
1017                         src = (const ice_bitmap_t *)ice_ptypes_gtpu;
1018                         ice_and_bitmap(params->ptypes, params->ptypes,
1019                                        src, ICE_FLOW_PTYPE_MAX);
1020
1021                         /* Attributes for GTP packet with Extension Header */
1022                         params->attr = ice_attr_gtpu_eh;
1023                         params->attr_cnt = ARRAY_SIZE(ice_attr_gtpu_eh);
1024                 } else if (hdrs & ICE_FLOW_SEG_HDR_GTPU_IP) {
1025                         src = (const ice_bitmap_t *)ice_ptypes_gtpu;
1026                         ice_and_bitmap(params->ptypes, params->ptypes,
1027                                        src, ICE_FLOW_PTYPE_MAX);
1028
1029                         /* Attributes for GTP packet without Extension Header */
1030                         params->attr = ice_attr_gtpu_session;
1031                         params->attr_cnt = ARRAY_SIZE(ice_attr_gtpu_session);
1032                 } else if (hdrs & ICE_FLOW_SEG_HDR_L2TPV2) {
1033                         src = (const ice_bitmap_t *)ice_ptypes_l2tpv2;
1034                         ice_and_bitmap(params->ptypes, params->ptypes,
1035                                        src, ICE_FLOW_PTYPE_MAX);
1036                 } else if (hdrs & ICE_FLOW_SEG_HDR_L2TPV3) {
1037                         src = (const ice_bitmap_t *)ice_ptypes_l2tpv3;
1038                         ice_and_bitmap(params->ptypes, params->ptypes,
1039                                        src, ICE_FLOW_PTYPE_MAX);
1040                 } else if (hdrs & ICE_FLOW_SEG_HDR_ESP) {
1041                         src = (const ice_bitmap_t *)ice_ptypes_esp;
1042                         ice_and_bitmap(params->ptypes, params->ptypes,
1043                                        src, ICE_FLOW_PTYPE_MAX);
1044                 } else if (hdrs & ICE_FLOW_SEG_HDR_AH) {
1045                         src = (const ice_bitmap_t *)ice_ptypes_ah;
1046                         ice_and_bitmap(params->ptypes, params->ptypes,
1047                                        src, ICE_FLOW_PTYPE_MAX);
1048                 } else if (hdrs & ICE_FLOW_SEG_HDR_NAT_T_ESP) {
1049                         src = (const ice_bitmap_t *)ice_ptypes_nat_t_esp;
1050                         ice_and_bitmap(params->ptypes, params->ptypes,
1051                                        src, ICE_FLOW_PTYPE_MAX);
1052                 } else if (hdrs & ICE_FLOW_SEG_HDR_VXLAN) {
1053                         src = (const ice_bitmap_t *)ice_ptypes_vxlan_vni;
1054                         ice_and_bitmap(params->ptypes, params->ptypes,
1055                                        src, ICE_FLOW_PTYPE_MAX);
1056                 } else if (hdrs & ICE_FLOW_SEG_HDR_UDP_ECPRI_TP0) {
1057                         src = (const ice_bitmap_t *)ice_ptypes_udp_ecpri_tp0;
1058                         ice_and_bitmap(params->ptypes, params->ptypes,
1059                                        src, ICE_FLOW_PTYPE_MAX);
1060                 }
1061
1062                 if (hdrs & ICE_FLOW_SEG_HDR_PPP) {
1063                         src = (const ice_bitmap_t *)ice_ptypes_ppp;
1064                         ice_and_bitmap(params->ptypes, params->ptypes,
1065                                        src, ICE_FLOW_PTYPE_MAX);
1066                 }
1067
1068                 if (hdrs & ICE_FLOW_SEG_HDR_PFCP) {
1069                         if (hdrs & ICE_FLOW_SEG_HDR_PFCP_NODE)
1070                                 src =
1071                                 (const ice_bitmap_t *)ice_ptypes_pfcp_node;
1072                         else
1073                                 src =
1074                                 (const ice_bitmap_t *)ice_ptypes_pfcp_session;
1075
1076                         ice_and_bitmap(params->ptypes, params->ptypes,
1077                                        src, ICE_FLOW_PTYPE_MAX);
1078                 } else {
1079                         src = (const ice_bitmap_t *)ice_ptypes_pfcp_node;
1080                         ice_andnot_bitmap(params->ptypes, params->ptypes,
1081                                           src, ICE_FLOW_PTYPE_MAX);
1082
1083                         src = (const ice_bitmap_t *)ice_ptypes_pfcp_session;
1084                         ice_andnot_bitmap(params->ptypes, params->ptypes,
1085                                           src, ICE_FLOW_PTYPE_MAX);
1086                 }
1087         }
1088
1089         return ICE_SUCCESS;
1090 }
1091
1092 /**
1093  * ice_flow_xtract_pkt_flags - Create an extr sequence entry for packet flags
1094  * @hw: pointer to the HW struct
1095  * @params: information about the flow to be processed
1096  * @flags: The value of pkt_flags[x:x] in Rx/Tx MDID metadata.
1097  *
1098  * This function will allocate an extraction sequence entries for a DWORD size
1099  * chunk of the packet flags.
1100  */
1101 static enum ice_status
1102 ice_flow_xtract_pkt_flags(struct ice_hw *hw,
1103                           struct ice_flow_prof_params *params,
1104                           enum ice_flex_mdid_pkt_flags flags)
1105 {
1106         u8 fv_words = hw->blk[params->blk].es.fvw;
1107         u8 idx;
1108
1109         /* Make sure the number of extraction sequence entries required does not
1110          * exceed the block's capacity.
1111          */
1112         if (params->es_cnt >= fv_words)
1113                 return ICE_ERR_MAX_LIMIT;
1114
1115         /* some blocks require a reversed field vector layout */
1116         if (hw->blk[params->blk].es.reverse)
1117                 idx = fv_words - params->es_cnt - 1;
1118         else
1119                 idx = params->es_cnt;
1120
1121         params->es[idx].prot_id = ICE_PROT_META_ID;
1122         params->es[idx].off = flags;
1123         params->es_cnt++;
1124
1125         return ICE_SUCCESS;
1126 }
1127
1128 /**
1129  * ice_flow_xtract_fld - Create an extraction sequence entry for the given field
1130  * @hw: pointer to the HW struct
1131  * @params: information about the flow to be processed
1132  * @seg: packet segment index of the field to be extracted
1133  * @fld: ID of field to be extracted
1134  * @match: bitfield of all fields
1135  *
1136  * This function determines the protocol ID, offset, and size of the given
1137  * field. It then allocates one or more extraction sequence entries for the
1138  * given field, and fill the entries with protocol ID and offset information.
1139  */
1140 static enum ice_status
1141 ice_flow_xtract_fld(struct ice_hw *hw, struct ice_flow_prof_params *params,
1142                     u8 seg, enum ice_flow_field fld, u64 match)
1143 {
1144         enum ice_flow_field sib = ICE_FLOW_FIELD_IDX_MAX;
1145         enum ice_prot_id prot_id = ICE_PROT_ID_INVAL;
1146         u8 fv_words = hw->blk[params->blk].es.fvw;
1147         struct ice_flow_fld_info *flds;
1148         u16 cnt, ese_bits, i;
1149         u16 sib_mask = 0;
1150         u16 mask;
1151         u16 off;
1152
1153         flds = params->prof->segs[seg].fields;
1154
1155         switch (fld) {
1156         case ICE_FLOW_FIELD_IDX_ETH_DA:
1157         case ICE_FLOW_FIELD_IDX_ETH_SA:
1158         case ICE_FLOW_FIELD_IDX_S_VLAN:
1159         case ICE_FLOW_FIELD_IDX_C_VLAN:
1160                 prot_id = seg == 0 ? ICE_PROT_MAC_OF_OR_S : ICE_PROT_MAC_IL;
1161                 break;
1162         case ICE_FLOW_FIELD_IDX_ETH_TYPE:
1163                 prot_id = seg == 0 ? ICE_PROT_ETYPE_OL : ICE_PROT_ETYPE_IL;
1164                 break;
1165         case ICE_FLOW_FIELD_IDX_IPV4_DSCP:
1166                 prot_id = seg == 0 ? ICE_PROT_IPV4_OF_OR_S : ICE_PROT_IPV4_IL;
1167                 break;
1168         case ICE_FLOW_FIELD_IDX_IPV6_DSCP:
1169                 prot_id = seg == 0 ? ICE_PROT_IPV6_OF_OR_S : ICE_PROT_IPV6_IL;
1170                 break;
1171         case ICE_FLOW_FIELD_IDX_IPV4_TTL:
1172         case ICE_FLOW_FIELD_IDX_IPV4_PROT:
1173                 prot_id = seg == 0 ? ICE_PROT_IPV4_OF_OR_S : ICE_PROT_IPV4_IL;
1174
1175                 /* TTL and PROT share the same extraction seq. entry.
1176                  * Each is considered a sibling to the other in terms of sharing
1177                  * the same extraction sequence entry.
1178                  */
1179                 if (fld == ICE_FLOW_FIELD_IDX_IPV4_TTL)
1180                         sib = ICE_FLOW_FIELD_IDX_IPV4_PROT;
1181                 else
1182                         sib = ICE_FLOW_FIELD_IDX_IPV4_TTL;
1183
1184                 /* If the sibling field is also included, that field's
1185                  * mask needs to be included.
1186                  */
1187                 if (match & BIT(sib))
1188                         sib_mask = ice_flds_info[sib].mask;
1189                 break;
1190         case ICE_FLOW_FIELD_IDX_IPV6_TTL:
1191         case ICE_FLOW_FIELD_IDX_IPV6_PROT:
1192                 prot_id = seg == 0 ? ICE_PROT_IPV6_OF_OR_S : ICE_PROT_IPV6_IL;
1193
1194                 /* TTL and PROT share the same extraction seq. entry.
1195                  * Each is considered a sibling to the other in terms of sharing
1196                  * the same extraction sequence entry.
1197                  */
1198                 if (fld == ICE_FLOW_FIELD_IDX_IPV6_TTL)
1199                         sib = ICE_FLOW_FIELD_IDX_IPV6_PROT;
1200                 else
1201                         sib = ICE_FLOW_FIELD_IDX_IPV6_TTL;
1202
1203                 /* If the sibling field is also included, that field's
1204                  * mask needs to be included.
1205                  */
1206                 if (match & BIT(sib))
1207                         sib_mask = ice_flds_info[sib].mask;
1208                 break;
1209         case ICE_FLOW_FIELD_IDX_IPV4_SA:
1210         case ICE_FLOW_FIELD_IDX_IPV4_DA:
1211                 prot_id = seg == 0 ? ICE_PROT_IPV4_OF_OR_S : ICE_PROT_IPV4_IL;
1212                 break;
1213         case ICE_FLOW_FIELD_IDX_IPV6_SA:
1214         case ICE_FLOW_FIELD_IDX_IPV6_DA:
1215         case ICE_FLOW_FIELD_IDX_IPV6_PRE32_SA:
1216         case ICE_FLOW_FIELD_IDX_IPV6_PRE32_DA:
1217         case ICE_FLOW_FIELD_IDX_IPV6_PRE48_SA:
1218         case ICE_FLOW_FIELD_IDX_IPV6_PRE48_DA:
1219         case ICE_FLOW_FIELD_IDX_IPV6_PRE64_SA:
1220         case ICE_FLOW_FIELD_IDX_IPV6_PRE64_DA:
1221                 prot_id = seg == 0 ? ICE_PROT_IPV6_OF_OR_S : ICE_PROT_IPV6_IL;
1222                 break;
1223         case ICE_FLOW_FIELD_IDX_TCP_SRC_PORT:
1224         case ICE_FLOW_FIELD_IDX_TCP_DST_PORT:
1225         case ICE_FLOW_FIELD_IDX_TCP_FLAGS:
1226                 prot_id = ICE_PROT_TCP_IL;
1227                 break;
1228         case ICE_FLOW_FIELD_IDX_UDP_SRC_PORT:
1229         case ICE_FLOW_FIELD_IDX_UDP_DST_PORT:
1230                 prot_id = ICE_PROT_UDP_IL_OR_S;
1231                 break;
1232         case ICE_FLOW_FIELD_IDX_SCTP_SRC_PORT:
1233         case ICE_FLOW_FIELD_IDX_SCTP_DST_PORT:
1234                 prot_id = ICE_PROT_SCTP_IL;
1235                 break;
1236         case ICE_FLOW_FIELD_IDX_VXLAN_VNI:
1237         case ICE_FLOW_FIELD_IDX_GTPC_TEID:
1238         case ICE_FLOW_FIELD_IDX_GTPU_IP_TEID:
1239         case ICE_FLOW_FIELD_IDX_GTPU_UP_TEID:
1240         case ICE_FLOW_FIELD_IDX_GTPU_DWN_TEID:
1241         case ICE_FLOW_FIELD_IDX_GTPU_EH_TEID:
1242         case ICE_FLOW_FIELD_IDX_GTPU_EH_QFI:
1243                 /* GTP is accessed through UDP OF protocol */
1244                 prot_id = ICE_PROT_UDP_OF;
1245                 break;
1246         case ICE_FLOW_FIELD_IDX_PPPOE_SESS_ID:
1247                 prot_id = ICE_PROT_PPPOE;
1248                 break;
1249         case ICE_FLOW_FIELD_IDX_PFCP_SEID:
1250                 prot_id = ICE_PROT_UDP_IL_OR_S;
1251                 break;
1252         case ICE_FLOW_FIELD_IDX_L2TPV3_SESS_ID:
1253                 prot_id = ICE_PROT_L2TPV3;
1254                 break;
1255         case ICE_FLOW_FIELD_IDX_ESP_SPI:
1256                 prot_id = ICE_PROT_ESP_F;
1257                 break;
1258         case ICE_FLOW_FIELD_IDX_AH_SPI:
1259                 prot_id = ICE_PROT_ESP_2;
1260                 break;
1261         case ICE_FLOW_FIELD_IDX_NAT_T_ESP_SPI:
1262                 prot_id = ICE_PROT_UDP_IL_OR_S;
1263                 break;
1264         case ICE_FLOW_FIELD_IDX_ECPRI_TP0_PC_ID:
1265                 prot_id = ICE_PROT_ECPRI;
1266                 break;
1267         case ICE_FLOW_FIELD_IDX_UDP_ECPRI_TP0_PC_ID:
1268                 prot_id = ICE_PROT_UDP_IL_OR_S;
1269                 break;
1270         case ICE_FLOW_FIELD_IDX_ARP_SIP:
1271         case ICE_FLOW_FIELD_IDX_ARP_DIP:
1272         case ICE_FLOW_FIELD_IDX_ARP_SHA:
1273         case ICE_FLOW_FIELD_IDX_ARP_DHA:
1274         case ICE_FLOW_FIELD_IDX_ARP_OP:
1275                 prot_id = ICE_PROT_ARP_OF;
1276                 break;
1277         case ICE_FLOW_FIELD_IDX_ICMP_TYPE:
1278         case ICE_FLOW_FIELD_IDX_ICMP_CODE:
1279                 /* ICMP type and code share the same extraction seq. entry */
1280                 prot_id = (params->prof->segs[seg].hdrs &
1281                            ICE_FLOW_SEG_HDR_IPV4) ?
1282                         ICE_PROT_ICMP_IL : ICE_PROT_ICMPV6_IL;
1283                 sib = fld == ICE_FLOW_FIELD_IDX_ICMP_TYPE ?
1284                         ICE_FLOW_FIELD_IDX_ICMP_CODE :
1285                         ICE_FLOW_FIELD_IDX_ICMP_TYPE;
1286                 break;
1287         case ICE_FLOW_FIELD_IDX_GRE_KEYID:
1288                 prot_id = ICE_PROT_GRE_OF;
1289                 break;
1290         default:
1291                 return ICE_ERR_NOT_IMPL;
1292         }
1293
1294         /* Each extraction sequence entry is a word in size, and extracts a
1295          * word-aligned offset from a protocol header.
1296          */
1297         ese_bits = ICE_FLOW_FV_EXTRACT_SZ * BITS_PER_BYTE;
1298
1299         flds[fld].xtrct.prot_id = prot_id;
1300         flds[fld].xtrct.off = (ice_flds_info[fld].off / ese_bits) *
1301                 ICE_FLOW_FV_EXTRACT_SZ;
1302         flds[fld].xtrct.disp = (u8)(ice_flds_info[fld].off % ese_bits);
1303         flds[fld].xtrct.idx = params->es_cnt;
1304         flds[fld].xtrct.mask = ice_flds_info[fld].mask;
1305
1306         /* Adjust the next field-entry index after accommodating the number of
1307          * entries this field consumes
1308          */
1309         cnt = DIVIDE_AND_ROUND_UP(flds[fld].xtrct.disp +
1310                                   ice_flds_info[fld].size, ese_bits);
1311
1312         /* Fill in the extraction sequence entries needed for this field */
1313         off = flds[fld].xtrct.off;
1314         mask = flds[fld].xtrct.mask;
1315         for (i = 0; i < cnt; i++) {
1316                 /* Only consume an extraction sequence entry if there is no
1317                  * sibling field associated with this field or the sibling entry
1318                  * already extracts the word shared with this field.
1319                  */
1320                 if (sib == ICE_FLOW_FIELD_IDX_MAX ||
1321                     flds[sib].xtrct.prot_id == ICE_PROT_ID_INVAL ||
1322                     flds[sib].xtrct.off != off) {
1323                         u8 idx;
1324
1325                         /* Make sure the number of extraction sequence required
1326                          * does not exceed the block's capability
1327                          */
1328                         if (params->es_cnt >= fv_words)
1329                                 return ICE_ERR_MAX_LIMIT;
1330
1331                         /* some blocks require a reversed field vector layout */
1332                         if (hw->blk[params->blk].es.reverse)
1333                                 idx = fv_words - params->es_cnt - 1;
1334                         else
1335                                 idx = params->es_cnt;
1336
1337                         params->es[idx].prot_id = prot_id;
1338                         params->es[idx].off = off;
1339                         params->mask[idx] = mask | sib_mask;
1340                         params->es_cnt++;
1341                 }
1342
1343                 off += ICE_FLOW_FV_EXTRACT_SZ;
1344         }
1345
1346         return ICE_SUCCESS;
1347 }
1348
1349 /**
1350  * ice_flow_xtract_raws - Create extract sequence entries for raw bytes
1351  * @hw: pointer to the HW struct
1352  * @params: information about the flow to be processed
1353  * @seg: index of packet segment whose raw fields are to be extracted
1354  */
1355 static enum ice_status
1356 ice_flow_xtract_raws(struct ice_hw *hw, struct ice_flow_prof_params *params,
1357                      u8 seg)
1358 {
1359         u16 fv_words;
1360         u16 hdrs_sz;
1361         u8 i;
1362
1363         if (!params->prof->segs[seg].raws_cnt)
1364                 return ICE_SUCCESS;
1365
1366         if (params->prof->segs[seg].raws_cnt >
1367             ARRAY_SIZE(params->prof->segs[seg].raws))
1368                 return ICE_ERR_MAX_LIMIT;
1369
1370         /* Offsets within the segment headers are not supported */
1371         hdrs_sz = ice_flow_calc_seg_sz(params, seg);
1372         if (!hdrs_sz)
1373                 return ICE_ERR_PARAM;
1374
1375         fv_words = hw->blk[params->blk].es.fvw;
1376
1377         for (i = 0; i < params->prof->segs[seg].raws_cnt; i++) {
1378                 struct ice_flow_seg_fld_raw *raw;
1379                 u16 off, cnt, j;
1380
1381                 raw = &params->prof->segs[seg].raws[i];
1382
1383                 /* Storing extraction information */
1384                 raw->info.xtrct.prot_id = ICE_PROT_MAC_OF_OR_S;
1385                 raw->info.xtrct.off = (raw->off / ICE_FLOW_FV_EXTRACT_SZ) *
1386                         ICE_FLOW_FV_EXTRACT_SZ;
1387                 raw->info.xtrct.disp = (raw->off % ICE_FLOW_FV_EXTRACT_SZ) *
1388                         BITS_PER_BYTE;
1389                 raw->info.xtrct.idx = params->es_cnt;
1390
1391                 /* Determine the number of field vector entries this raw field
1392                  * consumes.
1393                  */
1394                 cnt = DIVIDE_AND_ROUND_UP(raw->info.xtrct.disp +
1395                                           (raw->info.src.last * BITS_PER_BYTE),
1396                                           (ICE_FLOW_FV_EXTRACT_SZ *
1397                                            BITS_PER_BYTE));
1398                 off = raw->info.xtrct.off;
1399                 for (j = 0; j < cnt; j++) {
1400                         u16 idx;
1401
1402                         /* Make sure the number of extraction sequence required
1403                          * does not exceed the block's capability
1404                          */
1405                         if (params->es_cnt >= hw->blk[params->blk].es.count ||
1406                             params->es_cnt >= ICE_MAX_FV_WORDS)
1407                                 return ICE_ERR_MAX_LIMIT;
1408
1409                         /* some blocks require a reversed field vector layout */
1410                         if (hw->blk[params->blk].es.reverse)
1411                                 idx = fv_words - params->es_cnt - 1;
1412                         else
1413                                 idx = params->es_cnt;
1414
1415                         params->es[idx].prot_id = raw->info.xtrct.prot_id;
1416                         params->es[idx].off = off;
1417                         params->es_cnt++;
1418                         off += ICE_FLOW_FV_EXTRACT_SZ;
1419                 }
1420         }
1421
1422         return ICE_SUCCESS;
1423 }
1424
1425 /**
1426  * ice_flow_create_xtrct_seq - Create an extraction sequence for given segments
1427  * @hw: pointer to the HW struct
1428  * @params: information about the flow to be processed
1429  *
1430  * This function iterates through all matched fields in the given segments, and
1431  * creates an extraction sequence for the fields.
1432  */
1433 static enum ice_status
1434 ice_flow_create_xtrct_seq(struct ice_hw *hw,
1435                           struct ice_flow_prof_params *params)
1436 {
1437         enum ice_status status = ICE_SUCCESS;
1438         u8 i;
1439
1440         /* For ACL, we also need to extract the direction bit (Rx,Tx) data from
1441          * packet flags
1442          */
1443         if (params->blk == ICE_BLK_ACL) {
1444                 status = ice_flow_xtract_pkt_flags(hw, params,
1445                                                    ICE_RX_MDID_PKT_FLAGS_15_0);
1446                 if (status)
1447                         return status;
1448         }
1449
1450         for (i = 0; i < params->prof->segs_cnt; i++) {
1451                 u64 match = params->prof->segs[i].match;
1452                 enum ice_flow_field j;
1453
1454                 ice_for_each_set_bit(j, (ice_bitmap_t *)&match,
1455                                      ICE_FLOW_FIELD_IDX_MAX) {
1456                         status = ice_flow_xtract_fld(hw, params, i, j, match);
1457                         if (status)
1458                                 return status;
1459                         ice_clear_bit(j, (ice_bitmap_t *)&match);
1460                 }
1461
1462                 /* Process raw matching bytes */
1463                 status = ice_flow_xtract_raws(hw, params, i);
1464                 if (status)
1465                         return status;
1466         }
1467
1468         return status;
1469 }
1470
1471 /**
1472  * ice_flow_sel_acl_scen - returns the specific scenario
1473  * @hw: pointer to the hardware structure
1474  * @params: information about the flow to be processed
1475  *
1476  * This function will return the specific scenario based on the
1477  * params passed to it
1478  */
1479 static enum ice_status
1480 ice_flow_sel_acl_scen(struct ice_hw *hw, struct ice_flow_prof_params *params)
1481 {
1482         /* Find the best-fit scenario for the provided match width */
1483         struct ice_acl_scen *cand_scen = NULL, *scen;
1484
1485         if (!hw->acl_tbl)
1486                 return ICE_ERR_DOES_NOT_EXIST;
1487
1488         /* Loop through each scenario and match against the scenario width
1489          * to select the specific scenario
1490          */
1491         LIST_FOR_EACH_ENTRY(scen, &hw->acl_tbl->scens, ice_acl_scen, list_entry)
1492                 if (scen->eff_width >= params->entry_length &&
1493                     (!cand_scen || cand_scen->eff_width > scen->eff_width))
1494                         cand_scen = scen;
1495         if (!cand_scen)
1496                 return ICE_ERR_DOES_NOT_EXIST;
1497
1498         params->prof->cfg.scen = cand_scen;
1499
1500         return ICE_SUCCESS;
1501 }
1502
1503 /**
1504  * ice_flow_acl_def_entry_frmt - Determine the layout of flow entries
1505  * @params: information about the flow to be processed
1506  */
1507 static enum ice_status
1508 ice_flow_acl_def_entry_frmt(struct ice_flow_prof_params *params)
1509 {
1510         u16 index, i, range_idx = 0;
1511
1512         index = ICE_AQC_ACL_PROF_BYTE_SEL_START_IDX;
1513
1514         for (i = 0; i < params->prof->segs_cnt; i++) {
1515                 struct ice_flow_seg_info *seg = &params->prof->segs[i];
1516                 u8 j;
1517
1518                 ice_for_each_set_bit(j, (ice_bitmap_t *)&seg->match,
1519                                      ICE_FLOW_FIELD_IDX_MAX) {
1520                         struct ice_flow_fld_info *fld = &seg->fields[j];
1521
1522                         fld->entry.mask = ICE_FLOW_FLD_OFF_INVAL;
1523
1524                         if (fld->type == ICE_FLOW_FLD_TYPE_RANGE) {
1525                                 fld->entry.last = ICE_FLOW_FLD_OFF_INVAL;
1526
1527                                 /* Range checking only supported for single
1528                                  * words
1529                                  */
1530                                 if (DIVIDE_AND_ROUND_UP(ice_flds_info[j].size +
1531                                                         fld->xtrct.disp,
1532                                                         BITS_PER_BYTE * 2) > 1)
1533                                         return ICE_ERR_PARAM;
1534
1535                                 /* Ranges must define low and high values */
1536                                 if (fld->src.val == ICE_FLOW_FLD_OFF_INVAL ||
1537                                     fld->src.last == ICE_FLOW_FLD_OFF_INVAL)
1538                                         return ICE_ERR_PARAM;
1539
1540                                 fld->entry.val = range_idx++;
1541                         } else {
1542                                 /* Store adjusted byte-length of field for later
1543                                  * use, taking into account potential
1544                                  * non-byte-aligned displacement
1545                                  */
1546                                 fld->entry.last = DIVIDE_AND_ROUND_UP
1547                                         (ice_flds_info[j].size +
1548                                          (fld->xtrct.disp % BITS_PER_BYTE),
1549                                          BITS_PER_BYTE);
1550                                 fld->entry.val = index;
1551                                 index += fld->entry.last;
1552                         }
1553                 }
1554
1555                 for (j = 0; j < seg->raws_cnt; j++) {
1556                         struct ice_flow_seg_fld_raw *raw = &seg->raws[j];
1557
1558                         raw->info.entry.mask = ICE_FLOW_FLD_OFF_INVAL;
1559                         raw->info.entry.val = index;
1560                         raw->info.entry.last = raw->info.src.last;
1561                         index += raw->info.entry.last;
1562                 }
1563         }
1564
1565         /* Currently only support using the byte selection base, which only
1566          * allows for an effective entry size of 30 bytes. Reject anything
1567          * larger.
1568          */
1569         if (index > ICE_AQC_ACL_PROF_BYTE_SEL_ELEMS)
1570                 return ICE_ERR_PARAM;
1571
1572         /* Only 8 range checkers per profile, reject anything trying to use
1573          * more
1574          */
1575         if (range_idx > ICE_AQC_ACL_PROF_RANGES_NUM_CFG)
1576                 return ICE_ERR_PARAM;
1577
1578         /* Store # bytes required for entry for later use */
1579         params->entry_length = index - ICE_AQC_ACL_PROF_BYTE_SEL_START_IDX;
1580
1581         return ICE_SUCCESS;
1582 }
1583
1584 /**
1585  * ice_flow_proc_segs - process all packet segments associated with a profile
1586  * @hw: pointer to the HW struct
1587  * @params: information about the flow to be processed
1588  */
1589 static enum ice_status
1590 ice_flow_proc_segs(struct ice_hw *hw, struct ice_flow_prof_params *params)
1591 {
1592         enum ice_status status;
1593
1594         status = ice_flow_proc_seg_hdrs(params);
1595         if (status)
1596                 return status;
1597
1598         status = ice_flow_create_xtrct_seq(hw, params);
1599         if (status)
1600                 return status;
1601
1602         switch (params->blk) {
1603         case ICE_BLK_FD:
1604         case ICE_BLK_RSS:
1605                 status = ICE_SUCCESS;
1606                 break;
1607         case ICE_BLK_ACL:
1608                 status = ice_flow_acl_def_entry_frmt(params);
1609                 if (status)
1610                         return status;
1611                 status = ice_flow_sel_acl_scen(hw, params);
1612                 if (status)
1613                         return status;
1614                 break;
1615         default:
1616                 return ICE_ERR_NOT_IMPL;
1617         }
1618
1619         return status;
1620 }
1621
1622 #define ICE_FLOW_FIND_PROF_CHK_FLDS     0x00000001
1623 #define ICE_FLOW_FIND_PROF_CHK_VSI      0x00000002
1624 #define ICE_FLOW_FIND_PROF_NOT_CHK_DIR  0x00000004
1625
1626 /**
1627  * ice_flow_find_prof_conds - Find a profile matching headers and conditions
1628  * @hw: pointer to the HW struct
1629  * @blk: classification stage
1630  * @dir: flow direction
1631  * @segs: array of one or more packet segments that describe the flow
1632  * @segs_cnt: number of packet segments provided
1633  * @vsi_handle: software VSI handle to check VSI (ICE_FLOW_FIND_PROF_CHK_VSI)
1634  * @conds: additional conditions to be checked (ICE_FLOW_FIND_PROF_CHK_*)
1635  */
1636 static struct ice_flow_prof *
1637 ice_flow_find_prof_conds(struct ice_hw *hw, enum ice_block blk,
1638                          enum ice_flow_dir dir, struct ice_flow_seg_info *segs,
1639                          u8 segs_cnt, u16 vsi_handle, u32 conds)
1640 {
1641         struct ice_flow_prof *p, *prof = NULL;
1642
1643         ice_acquire_lock(&hw->fl_profs_locks[blk]);
1644         LIST_FOR_EACH_ENTRY(p, &hw->fl_profs[blk], ice_flow_prof, l_entry)
1645                 if ((p->dir == dir || conds & ICE_FLOW_FIND_PROF_NOT_CHK_DIR) &&
1646                     segs_cnt && segs_cnt == p->segs_cnt) {
1647                         u8 i;
1648
1649                         /* Check for profile-VSI association if specified */
1650                         if ((conds & ICE_FLOW_FIND_PROF_CHK_VSI) &&
1651                             ice_is_vsi_valid(hw, vsi_handle) &&
1652                             !ice_is_bit_set(p->vsis, vsi_handle))
1653                                 continue;
1654
1655                         /* Protocol headers must be checked. Matched fields are
1656                          * checked if specified.
1657                          */
1658                         for (i = 0; i < segs_cnt; i++)
1659                                 if (segs[i].hdrs != p->segs[i].hdrs ||
1660                                     ((conds & ICE_FLOW_FIND_PROF_CHK_FLDS) &&
1661                                      segs[i].match != p->segs[i].match))
1662                                         break;
1663
1664                         /* A match is found if all segments are matched */
1665                         if (i == segs_cnt) {
1666                                 prof = p;
1667                                 break;
1668                         }
1669                 }
1670         ice_release_lock(&hw->fl_profs_locks[blk]);
1671
1672         return prof;
1673 }
1674
1675 /**
1676  * ice_flow_find_prof - Look up a profile matching headers and matched fields
1677  * @hw: pointer to the HW struct
1678  * @blk: classification stage
1679  * @dir: flow direction
1680  * @segs: array of one or more packet segments that describe the flow
1681  * @segs_cnt: number of packet segments provided
1682  */
1683 u64
1684 ice_flow_find_prof(struct ice_hw *hw, enum ice_block blk, enum ice_flow_dir dir,
1685                    struct ice_flow_seg_info *segs, u8 segs_cnt)
1686 {
1687         struct ice_flow_prof *p;
1688
1689         p = ice_flow_find_prof_conds(hw, blk, dir, segs, segs_cnt,
1690                                      ICE_MAX_VSI, ICE_FLOW_FIND_PROF_CHK_FLDS);
1691
1692         return p ? p->id : ICE_FLOW_PROF_ID_INVAL;
1693 }
1694
1695 /**
1696  * ice_flow_find_prof_id - Look up a profile with given profile ID
1697  * @hw: pointer to the HW struct
1698  * @blk: classification stage
1699  * @prof_id: unique ID to identify this flow profile
1700  */
1701 static struct ice_flow_prof *
1702 ice_flow_find_prof_id(struct ice_hw *hw, enum ice_block blk, u64 prof_id)
1703 {
1704         struct ice_flow_prof *p;
1705
1706         LIST_FOR_EACH_ENTRY(p, &hw->fl_profs[blk], ice_flow_prof, l_entry)
1707                 if (p->id == prof_id)
1708                         return p;
1709
1710         return NULL;
1711 }
1712
1713 /**
1714  * ice_dealloc_flow_entry - Deallocate flow entry memory
1715  * @hw: pointer to the HW struct
1716  * @entry: flow entry to be removed
1717  */
1718 static void
1719 ice_dealloc_flow_entry(struct ice_hw *hw, struct ice_flow_entry *entry)
1720 {
1721         if (!entry)
1722                 return;
1723
1724         if (entry->entry)
1725                 ice_free(hw, entry->entry);
1726
1727         if (entry->range_buf) {
1728                 ice_free(hw, entry->range_buf);
1729                 entry->range_buf = NULL;
1730         }
1731
1732         if (entry->acts) {
1733                 ice_free(hw, entry->acts);
1734                 entry->acts = NULL;
1735                 entry->acts_cnt = 0;
1736         }
1737
1738         ice_free(hw, entry);
1739 }
1740
1741 /**
1742  * ice_flow_get_hw_prof - return the HW profile for a specific profile ID handle
1743  * @hw: pointer to the HW struct
1744  * @blk: classification stage
1745  * @prof_id: the profile ID handle
1746  * @hw_prof_id: pointer to variable to receive the HW profile ID
1747  */
1748 enum ice_status
1749 ice_flow_get_hw_prof(struct ice_hw *hw, enum ice_block blk, u64 prof_id,
1750                      u8 *hw_prof_id)
1751 {
1752         enum ice_status status = ICE_ERR_DOES_NOT_EXIST;
1753         struct ice_prof_map *map;
1754
1755         ice_acquire_lock(&hw->blk[blk].es.prof_map_lock);
1756         map = ice_search_prof_id(hw, blk, prof_id);
1757         if (map) {
1758                 *hw_prof_id = map->prof_id;
1759                 status = ICE_SUCCESS;
1760         }
1761         ice_release_lock(&hw->blk[blk].es.prof_map_lock);
1762         return status;
1763 }
1764
1765 #define ICE_ACL_INVALID_SCEN    0x3f
1766
1767 /**
1768  * ice_flow_acl_is_prof_in_use - Verify if the profile is associated to any PF
1769  * @hw: pointer to the hardware structure
1770  * @prof: pointer to flow profile
1771  * @buf: destination buffer function writes partial extraction sequence to
1772  *
1773  * returns ICE_SUCCESS if no PF is associated to the given profile
1774  * returns ICE_ERR_IN_USE if at least one PF is associated to the given profile
1775  * returns other error code for real error
1776  */
1777 static enum ice_status
1778 ice_flow_acl_is_prof_in_use(struct ice_hw *hw, struct ice_flow_prof *prof,
1779                             struct ice_aqc_acl_prof_generic_frmt *buf)
1780 {
1781         enum ice_status status;
1782         u8 prof_id = 0;
1783
1784         status = ice_flow_get_hw_prof(hw, ICE_BLK_ACL, prof->id, &prof_id);
1785         if (status)
1786                 return status;
1787
1788         status = ice_query_acl_prof(hw, prof_id, buf, NULL);
1789         if (status)
1790                 return status;
1791
1792         /* If all PF's associated scenarios are all 0 or all
1793          * ICE_ACL_INVALID_SCEN (63) for the given profile then the latter has
1794          * not been configured yet.
1795          */
1796         if (buf->pf_scenario_num[0] == 0 && buf->pf_scenario_num[1] == 0 &&
1797             buf->pf_scenario_num[2] == 0 && buf->pf_scenario_num[3] == 0 &&
1798             buf->pf_scenario_num[4] == 0 && buf->pf_scenario_num[5] == 0 &&
1799             buf->pf_scenario_num[6] == 0 && buf->pf_scenario_num[7] == 0)
1800                 return ICE_SUCCESS;
1801
1802         if (buf->pf_scenario_num[0] == ICE_ACL_INVALID_SCEN &&
1803             buf->pf_scenario_num[1] == ICE_ACL_INVALID_SCEN &&
1804             buf->pf_scenario_num[2] == ICE_ACL_INVALID_SCEN &&
1805             buf->pf_scenario_num[3] == ICE_ACL_INVALID_SCEN &&
1806             buf->pf_scenario_num[4] == ICE_ACL_INVALID_SCEN &&
1807             buf->pf_scenario_num[5] == ICE_ACL_INVALID_SCEN &&
1808             buf->pf_scenario_num[6] == ICE_ACL_INVALID_SCEN &&
1809             buf->pf_scenario_num[7] == ICE_ACL_INVALID_SCEN)
1810                 return ICE_SUCCESS;
1811
1812         return ICE_ERR_IN_USE;
1813 }
1814
1815 /**
1816  * ice_flow_acl_free_act_cntr - Free the ACL rule's actions
1817  * @hw: pointer to the hardware structure
1818  * @acts: array of actions to be performed on a match
1819  * @acts_cnt: number of actions
1820  */
1821 static enum ice_status
1822 ice_flow_acl_free_act_cntr(struct ice_hw *hw, struct ice_flow_action *acts,
1823                            u8 acts_cnt)
1824 {
1825         int i;
1826
1827         for (i = 0; i < acts_cnt; i++) {
1828                 if (acts[i].type == ICE_FLOW_ACT_CNTR_PKT ||
1829                     acts[i].type == ICE_FLOW_ACT_CNTR_BYTES ||
1830                     acts[i].type == ICE_FLOW_ACT_CNTR_PKT_BYTES) {
1831                         struct ice_acl_cntrs cntrs = { 0 };
1832                         enum ice_status status;
1833
1834                         /* amount is unused in the dealloc path but the common
1835                          * parameter check routine wants a value set, as zero
1836                          * is invalid for the check. Just set it.
1837                          */
1838                         cntrs.amount = 1;
1839                         cntrs.bank = 0; /* Only bank0 for the moment */
1840                         cntrs.first_cntr =
1841                                         LE16_TO_CPU(acts[i].data.acl_act.value);
1842                         cntrs.last_cntr =
1843                                         LE16_TO_CPU(acts[i].data.acl_act.value);
1844
1845                         if (acts[i].type == ICE_FLOW_ACT_CNTR_PKT_BYTES)
1846                                 cntrs.type = ICE_AQC_ACL_CNT_TYPE_DUAL;
1847                         else
1848                                 cntrs.type = ICE_AQC_ACL_CNT_TYPE_SINGLE;
1849
1850                         status = ice_aq_dealloc_acl_cntrs(hw, &cntrs, NULL);
1851                         if (status)
1852                                 return status;
1853                 }
1854         }
1855         return ICE_SUCCESS;
1856 }
1857
1858 /**
1859  * ice_flow_acl_disassoc_scen - Disassociate the scenario from the profile
1860  * @hw: pointer to the hardware structure
1861  * @prof: pointer to flow profile
1862  *
1863  * Disassociate the scenario from the profile for the PF of the VSI.
1864  */
1865 static enum ice_status
1866 ice_flow_acl_disassoc_scen(struct ice_hw *hw, struct ice_flow_prof *prof)
1867 {
1868         struct ice_aqc_acl_prof_generic_frmt buf;
1869         enum ice_status status = ICE_SUCCESS;
1870         u8 prof_id = 0;
1871
1872         ice_memset(&buf, 0, sizeof(buf), ICE_NONDMA_MEM);
1873
1874         status = ice_flow_get_hw_prof(hw, ICE_BLK_ACL, prof->id, &prof_id);
1875         if (status)
1876                 return status;
1877
1878         status = ice_query_acl_prof(hw, prof_id, &buf, NULL);
1879         if (status)
1880                 return status;
1881
1882         /* Clear scenario for this PF */
1883         buf.pf_scenario_num[hw->pf_id] = ICE_ACL_INVALID_SCEN;
1884         status = ice_prgm_acl_prof_xtrct(hw, prof_id, &buf, NULL);
1885
1886         return status;
1887 }
1888
1889 /**
1890  * ice_flow_rem_entry_sync - Remove a flow entry
1891  * @hw: pointer to the HW struct
1892  * @blk: classification stage
1893  * @entry: flow entry to be removed
1894  */
1895 static enum ice_status
1896 ice_flow_rem_entry_sync(struct ice_hw *hw, enum ice_block blk,
1897                         struct ice_flow_entry *entry)
1898 {
1899         if (!entry)
1900                 return ICE_ERR_BAD_PTR;
1901
1902         if (blk == ICE_BLK_ACL) {
1903                 enum ice_status status;
1904
1905                 if (!entry->prof)
1906                         return ICE_ERR_BAD_PTR;
1907
1908                 status = ice_acl_rem_entry(hw, entry->prof->cfg.scen,
1909                                            entry->scen_entry_idx);
1910                 if (status)
1911                         return status;
1912
1913                 /* Checks if we need to release an ACL counter. */
1914                 if (entry->acts_cnt && entry->acts)
1915                         ice_flow_acl_free_act_cntr(hw, entry->acts,
1916                                                    entry->acts_cnt);
1917         }
1918
1919         LIST_DEL(&entry->l_entry);
1920
1921         ice_dealloc_flow_entry(hw, entry);
1922
1923         return ICE_SUCCESS;
1924 }
1925
1926 /**
1927  * ice_flow_add_prof_sync - Add a flow profile for packet segments and fields
1928  * @hw: pointer to the HW struct
1929  * @blk: classification stage
1930  * @dir: flow direction
1931  * @prof_id: unique ID to identify this flow profile
1932  * @segs: array of one or more packet segments that describe the flow
1933  * @segs_cnt: number of packet segments provided
1934  * @acts: array of default actions
1935  * @acts_cnt: number of default actions
1936  * @prof: stores the returned flow profile added
1937  *
1938  * Assumption: the caller has acquired the lock to the profile list
1939  */
1940 static enum ice_status
1941 ice_flow_add_prof_sync(struct ice_hw *hw, enum ice_block blk,
1942                        enum ice_flow_dir dir, u64 prof_id,
1943                        struct ice_flow_seg_info *segs, u8 segs_cnt,
1944                        struct ice_flow_action *acts, u8 acts_cnt,
1945                        struct ice_flow_prof **prof)
1946 {
1947         struct ice_flow_prof_params *params;
1948         enum ice_status status;
1949         u8 i;
1950
1951         if (!prof || (acts_cnt && !acts))
1952                 return ICE_ERR_BAD_PTR;
1953
1954         params = (struct ice_flow_prof_params *)ice_malloc(hw, sizeof(*params));
1955         if (!params)
1956                 return ICE_ERR_NO_MEMORY;
1957
1958         params->prof = (struct ice_flow_prof *)
1959                 ice_malloc(hw, sizeof(*params->prof));
1960         if (!params->prof) {
1961                 status = ICE_ERR_NO_MEMORY;
1962                 goto free_params;
1963         }
1964
1965         /* initialize extraction sequence to all invalid (0xff) */
1966         for (i = 0; i < ICE_MAX_FV_WORDS; i++) {
1967                 params->es[i].prot_id = ICE_PROT_INVALID;
1968                 params->es[i].off = ICE_FV_OFFSET_INVAL;
1969         }
1970
1971         params->blk = blk;
1972         params->prof->id = prof_id;
1973         params->prof->dir = dir;
1974         params->prof->segs_cnt = segs_cnt;
1975
1976         /* Make a copy of the segments that need to be persistent in the flow
1977          * profile instance
1978          */
1979         for (i = 0; i < segs_cnt; i++)
1980                 ice_memcpy(&params->prof->segs[i], &segs[i], sizeof(*segs),
1981                            ICE_NONDMA_TO_NONDMA);
1982
1983         /* Make a copy of the actions that need to be persistent in the flow
1984          * profile instance.
1985          */
1986         if (acts_cnt) {
1987                 params->prof->acts = (struct ice_flow_action *)
1988                         ice_memdup(hw, acts, acts_cnt * sizeof(*acts),
1989                                    ICE_NONDMA_TO_NONDMA);
1990
1991                 if (!params->prof->acts) {
1992                         status = ICE_ERR_NO_MEMORY;
1993                         goto out;
1994                 }
1995         }
1996
1997         status = ice_flow_proc_segs(hw, params);
1998         if (status) {
1999                 ice_debug(hw, ICE_DBG_FLOW, "Error processing a flow's packet segments\n");
2000                 goto out;
2001         }
2002
2003         /* Add a HW profile for this flow profile */
2004         status = ice_add_prof(hw, blk, prof_id, (u8 *)params->ptypes,
2005                               params->attr, params->attr_cnt, params->es,
2006                               params->mask);
2007         if (status) {
2008                 ice_debug(hw, ICE_DBG_FLOW, "Error adding a HW flow profile\n");
2009                 goto out;
2010         }
2011
2012         INIT_LIST_HEAD(&params->prof->entries);
2013         ice_init_lock(&params->prof->entries_lock);
2014         *prof = params->prof;
2015
2016 out:
2017         if (status) {
2018                 if (params->prof->acts)
2019                         ice_free(hw, params->prof->acts);
2020                 ice_free(hw, params->prof);
2021         }
2022 free_params:
2023         ice_free(hw, params);
2024
2025         return status;
2026 }
2027
2028 /**
2029  * ice_flow_rem_prof_sync - remove a flow profile
2030  * @hw: pointer to the hardware structure
2031  * @blk: classification stage
2032  * @prof: pointer to flow profile to remove
2033  *
2034  * Assumption: the caller has acquired the lock to the profile list
2035  */
2036 static enum ice_status
2037 ice_flow_rem_prof_sync(struct ice_hw *hw, enum ice_block blk,
2038                        struct ice_flow_prof *prof)
2039 {
2040         enum ice_status status;
2041
2042         /* Remove all remaining flow entries before removing the flow profile */
2043         if (!LIST_EMPTY(&prof->entries)) {
2044                 struct ice_flow_entry *e, *t;
2045
2046                 ice_acquire_lock(&prof->entries_lock);
2047
2048                 LIST_FOR_EACH_ENTRY_SAFE(e, t, &prof->entries, ice_flow_entry,
2049                                          l_entry) {
2050                         status = ice_flow_rem_entry_sync(hw, blk, e);
2051                         if (status)
2052                                 break;
2053                 }
2054
2055                 ice_release_lock(&prof->entries_lock);
2056         }
2057
2058         if (blk == ICE_BLK_ACL) {
2059                 struct ice_aqc_acl_profile_ranges query_rng_buf;
2060                 struct ice_aqc_acl_prof_generic_frmt buf;
2061                 u8 prof_id = 0;
2062
2063                 /* Disassociate the scenario from the profile for the PF */
2064                 status = ice_flow_acl_disassoc_scen(hw, prof);
2065                 if (status)
2066                         return status;
2067
2068                 /* Clear the range-checker if the profile ID is no longer
2069                  * used by any PF
2070                  */
2071                 status = ice_flow_acl_is_prof_in_use(hw, prof, &buf);
2072                 if (status && status != ICE_ERR_IN_USE) {
2073                         return status;
2074                 } else if (!status) {
2075                         /* Clear the range-checker value for profile ID */
2076                         ice_memset(&query_rng_buf, 0,
2077                                    sizeof(struct ice_aqc_acl_profile_ranges),
2078                                    ICE_NONDMA_MEM);
2079
2080                         status = ice_flow_get_hw_prof(hw, blk, prof->id,
2081                                                       &prof_id);
2082                         if (status)
2083                                 return status;
2084
2085                         status = ice_prog_acl_prof_ranges(hw, prof_id,
2086                                                           &query_rng_buf, NULL);
2087                         if (status)
2088                                 return status;
2089                 }
2090         }
2091
2092         /* Remove all hardware profiles associated with this flow profile */
2093         status = ice_rem_prof(hw, blk, prof->id);
2094         if (!status) {
2095                 LIST_DEL(&prof->l_entry);
2096                 ice_destroy_lock(&prof->entries_lock);
2097                 if (prof->acts)
2098                         ice_free(hw, prof->acts);
2099                 ice_free(hw, prof);
2100         }
2101
2102         return status;
2103 }
2104
2105 /**
2106  * ice_flow_acl_set_xtrct_seq_fld - Populate xtrct seq for single field
2107  * @buf: Destination buffer function writes partial xtrct sequence to
2108  * @info: Info about field
2109  */
2110 static void
2111 ice_flow_acl_set_xtrct_seq_fld(struct ice_aqc_acl_prof_generic_frmt *buf,
2112                                struct ice_flow_fld_info *info)
2113 {
2114         u16 dst, i;
2115         u8 src;
2116
2117         src = info->xtrct.idx * ICE_FLOW_FV_EXTRACT_SZ +
2118                 info->xtrct.disp / BITS_PER_BYTE;
2119         dst = info->entry.val;
2120         for (i = 0; i < info->entry.last; i++)
2121                 /* HW stores field vector words in LE, convert words back to BE
2122                  * so constructed entries will end up in network order
2123                  */
2124                 buf->byte_selection[dst++] = src++ ^ 1;
2125 }
2126
2127 /**
2128  * ice_flow_acl_set_xtrct_seq - Program ACL extraction sequence
2129  * @hw: pointer to the hardware structure
2130  * @prof: pointer to flow profile
2131  */
2132 static enum ice_status
2133 ice_flow_acl_set_xtrct_seq(struct ice_hw *hw, struct ice_flow_prof *prof)
2134 {
2135         struct ice_aqc_acl_prof_generic_frmt buf;
2136         struct ice_flow_fld_info *info;
2137         enum ice_status status;
2138         u8 prof_id = 0;
2139         u16 i;
2140
2141         ice_memset(&buf, 0, sizeof(buf), ICE_NONDMA_MEM);
2142
2143         status = ice_flow_get_hw_prof(hw, ICE_BLK_ACL, prof->id, &prof_id);
2144         if (status)
2145                 return status;
2146
2147         status = ice_flow_acl_is_prof_in_use(hw, prof, &buf);
2148         if (status && status != ICE_ERR_IN_USE)
2149                 return status;
2150
2151         if (!status) {
2152                 /* Program the profile dependent configuration. This is done
2153                  * only once regardless of the number of PFs using that profile
2154                  */
2155                 ice_memset(&buf, 0, sizeof(buf), ICE_NONDMA_MEM);
2156
2157                 for (i = 0; i < prof->segs_cnt; i++) {
2158                         struct ice_flow_seg_info *seg = &prof->segs[i];
2159                         u16 j;
2160
2161                         ice_for_each_set_bit(j, (ice_bitmap_t *)&seg->match,
2162                                              ICE_FLOW_FIELD_IDX_MAX) {
2163                                 info = &seg->fields[j];
2164
2165                                 if (info->type == ICE_FLOW_FLD_TYPE_RANGE)
2166                                         buf.word_selection[info->entry.val] =
2167                                                 info->xtrct.idx;
2168                                 else
2169                                         ice_flow_acl_set_xtrct_seq_fld(&buf,
2170                                                                        info);
2171                         }
2172
2173                         for (j = 0; j < seg->raws_cnt; j++) {
2174                                 info = &seg->raws[j].info;
2175                                 ice_flow_acl_set_xtrct_seq_fld(&buf, info);
2176                         }
2177                 }
2178
2179                 ice_memset(&buf.pf_scenario_num[0], ICE_ACL_INVALID_SCEN,
2180                            ICE_AQC_ACL_PROF_PF_SCEN_NUM_ELEMS,
2181                            ICE_NONDMA_MEM);
2182         }
2183
2184         /* Update the current PF */
2185         buf.pf_scenario_num[hw->pf_id] = (u8)prof->cfg.scen->id;
2186         status = ice_prgm_acl_prof_xtrct(hw, prof_id, &buf, NULL);
2187
2188         return status;
2189 }
2190
2191 /**
2192  * ice_flow_assoc_vsig_vsi - associate a VSI with VSIG
2193  * @hw: pointer to the hardware structure
2194  * @blk: classification stage
2195  * @vsi_handle: software VSI handle
2196  * @vsig: target VSI group
2197  *
2198  * Assumption: the caller has already verified that the VSI to
2199  * be added has the same characteristics as the VSIG and will
2200  * thereby have access to all resources added to that VSIG.
2201  */
2202 enum ice_status
2203 ice_flow_assoc_vsig_vsi(struct ice_hw *hw, enum ice_block blk, u16 vsi_handle,
2204                         u16 vsig)
2205 {
2206         enum ice_status status;
2207
2208         if (!ice_is_vsi_valid(hw, vsi_handle) || blk >= ICE_BLK_COUNT)
2209                 return ICE_ERR_PARAM;
2210
2211         ice_acquire_lock(&hw->fl_profs_locks[blk]);
2212         status = ice_add_vsi_flow(hw, blk, ice_get_hw_vsi_num(hw, vsi_handle),
2213                                   vsig);
2214         ice_release_lock(&hw->fl_profs_locks[blk]);
2215
2216         return status;
2217 }
2218
2219 /**
2220  * ice_flow_assoc_prof - associate a VSI with a flow profile
2221  * @hw: pointer to the hardware structure
2222  * @blk: classification stage
2223  * @prof: pointer to flow profile
2224  * @vsi_handle: software VSI handle
2225  *
2226  * Assumption: the caller has acquired the lock to the profile list
2227  * and the software VSI handle has been validated
2228  */
2229 enum ice_status
2230 ice_flow_assoc_prof(struct ice_hw *hw, enum ice_block blk,
2231                     struct ice_flow_prof *prof, u16 vsi_handle)
2232 {
2233         enum ice_status status = ICE_SUCCESS;
2234
2235         if (!ice_is_bit_set(prof->vsis, vsi_handle)) {
2236                 if (blk == ICE_BLK_ACL) {
2237                         status = ice_flow_acl_set_xtrct_seq(hw, prof);
2238                         if (status)
2239                                 return status;
2240                 }
2241                 status = ice_add_prof_id_flow(hw, blk,
2242                                               ice_get_hw_vsi_num(hw,
2243                                                                  vsi_handle),
2244                                               prof->id);
2245                 if (!status)
2246                         ice_set_bit(vsi_handle, prof->vsis);
2247                 else
2248                         ice_debug(hw, ICE_DBG_FLOW, "HW profile add failed, %d\n",
2249                                   status);
2250         }
2251
2252         return status;
2253 }
2254
2255 /**
2256  * ice_flow_disassoc_prof - disassociate a VSI from a flow profile
2257  * @hw: pointer to the hardware structure
2258  * @blk: classification stage
2259  * @prof: pointer to flow profile
2260  * @vsi_handle: software VSI handle
2261  *
2262  * Assumption: the caller has acquired the lock to the profile list
2263  * and the software VSI handle has been validated
2264  */
2265 static enum ice_status
2266 ice_flow_disassoc_prof(struct ice_hw *hw, enum ice_block blk,
2267                        struct ice_flow_prof *prof, u16 vsi_handle)
2268 {
2269         enum ice_status status = ICE_SUCCESS;
2270
2271         if (ice_is_bit_set(prof->vsis, vsi_handle)) {
2272                 status = ice_rem_prof_id_flow(hw, blk,
2273                                               ice_get_hw_vsi_num(hw,
2274                                                                  vsi_handle),
2275                                               prof->id);
2276                 if (!status)
2277                         ice_clear_bit(vsi_handle, prof->vsis);
2278                 else
2279                         ice_debug(hw, ICE_DBG_FLOW, "HW profile remove failed, %d\n",
2280                                   status);
2281         }
2282
2283         return status;
2284 }
2285
2286 /**
2287  * ice_flow_add_prof - Add a flow profile for packet segments and matched fields
2288  * @hw: pointer to the HW struct
2289  * @blk: classification stage
2290  * @dir: flow direction
2291  * @prof_id: unique ID to identify this flow profile
2292  * @segs: array of one or more packet segments that describe the flow
2293  * @segs_cnt: number of packet segments provided
2294  * @acts: array of default actions
2295  * @acts_cnt: number of default actions
2296  * @prof: stores the returned flow profile added
2297  */
2298 enum ice_status
2299 ice_flow_add_prof(struct ice_hw *hw, enum ice_block blk, enum ice_flow_dir dir,
2300                   u64 prof_id, struct ice_flow_seg_info *segs, u8 segs_cnt,
2301                   struct ice_flow_action *acts, u8 acts_cnt,
2302                   struct ice_flow_prof **prof)
2303 {
2304         enum ice_status status;
2305
2306         if (segs_cnt > ICE_FLOW_SEG_MAX)
2307                 return ICE_ERR_MAX_LIMIT;
2308
2309         if (!segs_cnt)
2310                 return ICE_ERR_PARAM;
2311
2312         if (!segs)
2313                 return ICE_ERR_BAD_PTR;
2314
2315         status = ice_flow_val_hdrs(segs, segs_cnt);
2316         if (status)
2317                 return status;
2318
2319         ice_acquire_lock(&hw->fl_profs_locks[blk]);
2320
2321         status = ice_flow_add_prof_sync(hw, blk, dir, prof_id, segs, segs_cnt,
2322                                         acts, acts_cnt, prof);
2323         if (!status)
2324                 LIST_ADD(&(*prof)->l_entry, &hw->fl_profs[blk]);
2325
2326         ice_release_lock(&hw->fl_profs_locks[blk]);
2327
2328         return status;
2329 }
2330
2331 /**
2332  * ice_flow_rem_prof - Remove a flow profile and all entries associated with it
2333  * @hw: pointer to the HW struct
2334  * @blk: the block for which the flow profile is to be removed
2335  * @prof_id: unique ID of the flow profile to be removed
2336  */
2337 enum ice_status
2338 ice_flow_rem_prof(struct ice_hw *hw, enum ice_block blk, u64 prof_id)
2339 {
2340         struct ice_flow_prof *prof;
2341         enum ice_status status;
2342
2343         ice_acquire_lock(&hw->fl_profs_locks[blk]);
2344
2345         prof = ice_flow_find_prof_id(hw, blk, prof_id);
2346         if (!prof) {
2347                 status = ICE_ERR_DOES_NOT_EXIST;
2348                 goto out;
2349         }
2350
2351         /* prof becomes invalid after the call */
2352         status = ice_flow_rem_prof_sync(hw, blk, prof);
2353
2354 out:
2355         ice_release_lock(&hw->fl_profs_locks[blk]);
2356
2357         return status;
2358 }
2359
2360 /**
2361  * ice_flow_find_entry - look for a flow entry using its unique ID
2362  * @hw: pointer to the HW struct
2363  * @blk: classification stage
2364  * @entry_id: unique ID to identify this flow entry
2365  *
2366  * This function looks for the flow entry with the specified unique ID in all
2367  * flow profiles of the specified classification stage. If the entry is found,
2368  * and it returns the handle to the flow entry. Otherwise, it returns
2369  * ICE_FLOW_ENTRY_ID_INVAL.
2370  */
2371 u64 ice_flow_find_entry(struct ice_hw *hw, enum ice_block blk, u64 entry_id)
2372 {
2373         struct ice_flow_entry *found = NULL;
2374         struct ice_flow_prof *p;
2375
2376         ice_acquire_lock(&hw->fl_profs_locks[blk]);
2377
2378         LIST_FOR_EACH_ENTRY(p, &hw->fl_profs[blk], ice_flow_prof, l_entry) {
2379                 struct ice_flow_entry *e;
2380
2381                 ice_acquire_lock(&p->entries_lock);
2382                 LIST_FOR_EACH_ENTRY(e, &p->entries, ice_flow_entry, l_entry)
2383                         if (e->id == entry_id) {
2384                                 found = e;
2385                                 break;
2386                         }
2387                 ice_release_lock(&p->entries_lock);
2388
2389                 if (found)
2390                         break;
2391         }
2392
2393         ice_release_lock(&hw->fl_profs_locks[blk]);
2394
2395         return found ? ICE_FLOW_ENTRY_HNDL(found) : ICE_FLOW_ENTRY_HANDLE_INVAL;
2396 }
2397
2398 /**
2399  * ice_flow_acl_check_actions - Checks the ACL rule's actions
2400  * @hw: pointer to the hardware structure
2401  * @acts: array of actions to be performed on a match
2402  * @acts_cnt: number of actions
2403  * @cnt_alloc: indicates if an ACL counter has been allocated.
2404  */
2405 static enum ice_status
2406 ice_flow_acl_check_actions(struct ice_hw *hw, struct ice_flow_action *acts,
2407                            u8 acts_cnt, bool *cnt_alloc)
2408 {
2409         ice_declare_bitmap(dup_check, ICE_AQC_TBL_MAX_ACTION_PAIRS * 2);
2410         int i;
2411
2412         ice_zero_bitmap(dup_check, ICE_AQC_TBL_MAX_ACTION_PAIRS * 2);
2413         *cnt_alloc = false;
2414
2415         if (acts_cnt > ICE_FLOW_ACL_MAX_NUM_ACT)
2416                 return ICE_ERR_OUT_OF_RANGE;
2417
2418         for (i = 0; i < acts_cnt; i++) {
2419                 if (acts[i].type != ICE_FLOW_ACT_NOP &&
2420                     acts[i].type != ICE_FLOW_ACT_DROP &&
2421                     acts[i].type != ICE_FLOW_ACT_CNTR_PKT &&
2422                     acts[i].type != ICE_FLOW_ACT_FWD_QUEUE)
2423                         return ICE_ERR_CFG;
2424
2425                 /* If the caller want to add two actions of the same type, then
2426                  * it is considered invalid configuration.
2427                  */
2428                 if (ice_test_and_set_bit(acts[i].type, dup_check))
2429                         return ICE_ERR_PARAM;
2430         }
2431
2432         /* Checks if ACL counters are needed. */
2433         for (i = 0; i < acts_cnt; i++) {
2434                 if (acts[i].type == ICE_FLOW_ACT_CNTR_PKT ||
2435                     acts[i].type == ICE_FLOW_ACT_CNTR_BYTES ||
2436                     acts[i].type == ICE_FLOW_ACT_CNTR_PKT_BYTES) {
2437                         struct ice_acl_cntrs cntrs = { 0 };
2438                         enum ice_status status;
2439
2440                         cntrs.amount = 1;
2441                         cntrs.bank = 0; /* Only bank0 for the moment */
2442
2443                         if (acts[i].type == ICE_FLOW_ACT_CNTR_PKT_BYTES)
2444                                 cntrs.type = ICE_AQC_ACL_CNT_TYPE_DUAL;
2445                         else
2446                                 cntrs.type = ICE_AQC_ACL_CNT_TYPE_SINGLE;
2447
2448                         status = ice_aq_alloc_acl_cntrs(hw, &cntrs, NULL);
2449                         if (status)
2450                                 return status;
2451                         /* Counter index within the bank */
2452                         acts[i].data.acl_act.value =
2453                                                 CPU_TO_LE16(cntrs.first_cntr);
2454                         *cnt_alloc = true;
2455                 }
2456         }
2457
2458         return ICE_SUCCESS;
2459 }
2460
2461 /**
2462  * ice_flow_acl_frmt_entry_range - Format an ACL range checker for a given field
2463  * @fld: number of the given field
2464  * @info: info about field
2465  * @range_buf: range checker configuration buffer
2466  * @data: pointer to a data buffer containing flow entry's match values/masks
2467  * @range: Input/output param indicating which range checkers are being used
2468  */
2469 static void
2470 ice_flow_acl_frmt_entry_range(u16 fld, struct ice_flow_fld_info *info,
2471                               struct ice_aqc_acl_profile_ranges *range_buf,
2472                               u8 *data, u8 *range)
2473 {
2474         u16 new_mask;
2475
2476         /* If not specified, default mask is all bits in field */
2477         new_mask = (info->src.mask == ICE_FLOW_FLD_OFF_INVAL ?
2478                     BIT(ice_flds_info[fld].size) - 1 :
2479                     (*(u16 *)(data + info->src.mask))) << info->xtrct.disp;
2480
2481         /* If the mask is 0, then we don't need to worry about this input
2482          * range checker value.
2483          */
2484         if (new_mask) {
2485                 u16 new_high =
2486                         (*(u16 *)(data + info->src.last)) << info->xtrct.disp;
2487                 u16 new_low =
2488                         (*(u16 *)(data + info->src.val)) << info->xtrct.disp;
2489                 u8 range_idx = info->entry.val;
2490
2491                 range_buf->checker_cfg[range_idx].low_boundary =
2492                         CPU_TO_BE16(new_low);
2493                 range_buf->checker_cfg[range_idx].high_boundary =
2494                         CPU_TO_BE16(new_high);
2495                 range_buf->checker_cfg[range_idx].mask = CPU_TO_BE16(new_mask);
2496
2497                 /* Indicate which range checker is being used */
2498                 *range |= BIT(range_idx);
2499         }
2500 }
2501
2502 /**
2503  * ice_flow_acl_frmt_entry_fld - Partially format ACL entry for a given field
2504  * @fld: number of the given field
2505  * @info: info about the field
2506  * @buf: buffer containing the entry
2507  * @dontcare: buffer containing don't care mask for entry
2508  * @data: pointer to a data buffer containing flow entry's match values/masks
2509  */
2510 static void
2511 ice_flow_acl_frmt_entry_fld(u16 fld, struct ice_flow_fld_info *info, u8 *buf,
2512                             u8 *dontcare, u8 *data)
2513 {
2514         u16 dst, src, mask, k, end_disp, tmp_s = 0, tmp_m = 0;
2515         bool use_mask = false;
2516         u8 disp;
2517
2518         src = info->src.val;
2519         mask = info->src.mask;
2520         dst = info->entry.val - ICE_AQC_ACL_PROF_BYTE_SEL_START_IDX;
2521         disp = info->xtrct.disp % BITS_PER_BYTE;
2522
2523         if (mask != ICE_FLOW_FLD_OFF_INVAL)
2524                 use_mask = true;
2525
2526         for (k = 0; k < info->entry.last; k++, dst++) {
2527                 /* Add overflow bits from previous byte */
2528                 buf[dst] = (tmp_s & 0xff00) >> 8;
2529
2530                 /* If mask is not valid, tmp_m is always zero, so just setting
2531                  * dontcare to 0 (no masked bits). If mask is valid, pulls in
2532                  * overflow bits of mask from prev byte
2533                  */
2534                 dontcare[dst] = (tmp_m & 0xff00) >> 8;
2535
2536                 /* If there is displacement, last byte will only contain
2537                  * displaced data, but there is no more data to read from user
2538                  * buffer, so skip so as not to potentially read beyond end of
2539                  * user buffer
2540                  */
2541                 if (!disp || k < info->entry.last - 1) {
2542                         /* Store shifted data to use in next byte */
2543                         tmp_s = data[src++] << disp;
2544
2545                         /* Add current (shifted) byte */
2546                         buf[dst] |= tmp_s & 0xff;
2547
2548                         /* Handle mask if valid */
2549                         if (use_mask) {
2550                                 tmp_m = (~data[mask++] & 0xff) << disp;
2551                                 dontcare[dst] |= tmp_m & 0xff;
2552                         }
2553                 }
2554         }
2555
2556         /* Fill in don't care bits at beginning of field */
2557         if (disp) {
2558                 dst = info->entry.val - ICE_AQC_ACL_PROF_BYTE_SEL_START_IDX;
2559                 for (k = 0; k < disp; k++)
2560                         dontcare[dst] |= BIT(k);
2561         }
2562
2563         end_disp = (disp + ice_flds_info[fld].size) % BITS_PER_BYTE;
2564
2565         /* Fill in don't care bits at end of field */
2566         if (end_disp) {
2567                 dst = info->entry.val - ICE_AQC_ACL_PROF_BYTE_SEL_START_IDX +
2568                       info->entry.last - 1;
2569                 for (k = end_disp; k < BITS_PER_BYTE; k++)
2570                         dontcare[dst] |= BIT(k);
2571         }
2572 }
2573
2574 /**
2575  * ice_flow_acl_frmt_entry - Format ACL entry
2576  * @hw: pointer to the hardware structure
2577  * @prof: pointer to flow profile
2578  * @e: pointer to the flow entry
2579  * @data: pointer to a data buffer containing flow entry's match values/masks
2580  * @acts: array of actions to be performed on a match
2581  * @acts_cnt: number of actions
2582  *
2583  * Formats the key (and key_inverse) to be matched from the data passed in,
2584  * along with data from the flow profile. This key/key_inverse pair makes up
2585  * the 'entry' for an ACL flow entry.
2586  */
2587 static enum ice_status
2588 ice_flow_acl_frmt_entry(struct ice_hw *hw, struct ice_flow_prof *prof,
2589                         struct ice_flow_entry *e, u8 *data,
2590                         struct ice_flow_action *acts, u8 acts_cnt)
2591 {
2592         u8 *buf = NULL, *dontcare = NULL, *key = NULL, range = 0, dir_flag_msk;
2593         struct ice_aqc_acl_profile_ranges *range_buf = NULL;
2594         enum ice_status status;
2595         bool cnt_alloc;
2596         u8 prof_id = 0;
2597         u16 i, buf_sz;
2598
2599         status = ice_flow_get_hw_prof(hw, ICE_BLK_ACL, prof->id, &prof_id);
2600         if (status)
2601                 return status;
2602
2603         /* Format the result action */
2604
2605         status = ice_flow_acl_check_actions(hw, acts, acts_cnt, &cnt_alloc);
2606         if (status)
2607                 return status;
2608
2609         status = ICE_ERR_NO_MEMORY;
2610
2611         e->acts = (struct ice_flow_action *)
2612                 ice_memdup(hw, acts, acts_cnt * sizeof(*acts),
2613                            ICE_NONDMA_TO_NONDMA);
2614         if (!e->acts)
2615                 goto out;
2616
2617         e->acts_cnt = acts_cnt;
2618
2619         /* Format the matching data */
2620         buf_sz = prof->cfg.scen->width;
2621         buf = (u8 *)ice_malloc(hw, buf_sz);
2622         if (!buf)
2623                 goto out;
2624
2625         dontcare = (u8 *)ice_malloc(hw, buf_sz);
2626         if (!dontcare)
2627                 goto out;
2628
2629         /* 'key' buffer will store both key and key_inverse, so must be twice
2630          * size of buf
2631          */
2632         key = (u8 *)ice_malloc(hw, buf_sz * 2);
2633         if (!key)
2634                 goto out;
2635
2636         range_buf = (struct ice_aqc_acl_profile_ranges *)
2637                 ice_malloc(hw, sizeof(struct ice_aqc_acl_profile_ranges));
2638         if (!range_buf)
2639                 goto out;
2640
2641         /* Set don't care mask to all 1's to start, will zero out used bytes */
2642         ice_memset(dontcare, 0xff, buf_sz, ICE_NONDMA_MEM);
2643
2644         for (i = 0; i < prof->segs_cnt; i++) {
2645                 struct ice_flow_seg_info *seg = &prof->segs[i];
2646                 u8 j;
2647
2648                 ice_for_each_set_bit(j, (ice_bitmap_t *)&seg->match,
2649                                      ICE_FLOW_FIELD_IDX_MAX) {
2650                         struct ice_flow_fld_info *info = &seg->fields[j];
2651
2652                         if (info->type == ICE_FLOW_FLD_TYPE_RANGE)
2653                                 ice_flow_acl_frmt_entry_range(j, info,
2654                                                               range_buf, data,
2655                                                               &range);
2656                         else
2657                                 ice_flow_acl_frmt_entry_fld(j, info, buf,
2658                                                             dontcare, data);
2659                 }
2660
2661                 for (j = 0; j < seg->raws_cnt; j++) {
2662                         struct ice_flow_fld_info *info = &seg->raws[j].info;
2663                         u16 dst, src, mask, k;
2664                         bool use_mask = false;
2665
2666                         src = info->src.val;
2667                         dst = info->entry.val -
2668                                         ICE_AQC_ACL_PROF_BYTE_SEL_START_IDX;
2669                         mask = info->src.mask;
2670
2671                         if (mask != ICE_FLOW_FLD_OFF_INVAL)
2672                                 use_mask = true;
2673
2674                         for (k = 0; k < info->entry.last; k++, dst++) {
2675                                 buf[dst] = data[src++];
2676                                 if (use_mask)
2677                                         dontcare[dst] = ~data[mask++];
2678                                 else
2679                                         dontcare[dst] = 0;
2680                         }
2681                 }
2682         }
2683
2684         buf[prof->cfg.scen->pid_idx] = (u8)prof_id;
2685         dontcare[prof->cfg.scen->pid_idx] = 0;
2686
2687         /* Format the buffer for direction flags */
2688         dir_flag_msk = BIT(ICE_FLG_PKT_DIR);
2689
2690         if (prof->dir == ICE_FLOW_RX)
2691                 buf[prof->cfg.scen->pkt_dir_idx] = dir_flag_msk;
2692
2693         if (range) {
2694                 buf[prof->cfg.scen->rng_chk_idx] = range;
2695                 /* Mark any unused range checkers as don't care */
2696                 dontcare[prof->cfg.scen->rng_chk_idx] = ~range;
2697                 e->range_buf = range_buf;
2698         } else {
2699                 ice_free(hw, range_buf);
2700         }
2701
2702         status = ice_set_key(key, buf_sz * 2, buf, NULL, dontcare, NULL, 0,
2703                              buf_sz);
2704         if (status)
2705                 goto out;
2706
2707         e->entry = key;
2708         e->entry_sz = buf_sz * 2;
2709
2710 out:
2711         if (buf)
2712                 ice_free(hw, buf);
2713
2714         if (dontcare)
2715                 ice_free(hw, dontcare);
2716
2717         if (status && key)
2718                 ice_free(hw, key);
2719
2720         if (status && range_buf) {
2721                 ice_free(hw, range_buf);
2722                 e->range_buf = NULL;
2723         }
2724
2725         if (status && e->acts) {
2726                 ice_free(hw, e->acts);
2727                 e->acts = NULL;
2728                 e->acts_cnt = 0;
2729         }
2730
2731         if (status && cnt_alloc)
2732                 ice_flow_acl_free_act_cntr(hw, acts, acts_cnt);
2733
2734         return status;
2735 }
2736
2737 /**
2738  * ice_flow_acl_find_scen_entry_cond - Find an ACL scenario entry that matches
2739  *                                     the compared data.
2740  * @prof: pointer to flow profile
2741  * @e: pointer to the comparing flow entry
2742  * @do_chg_action: decide if we want to change the ACL action
2743  * @do_add_entry: decide if we want to add the new ACL entry
2744  * @do_rem_entry: decide if we want to remove the current ACL entry
2745  *
2746  * Find an ACL scenario entry that matches the compared data. In the same time,
2747  * this function also figure out:
2748  * a/ If we want to change the ACL action
2749  * b/ If we want to add the new ACL entry
2750  * c/ If we want to remove the current ACL entry
2751  */
2752 static struct ice_flow_entry *
2753 ice_flow_acl_find_scen_entry_cond(struct ice_flow_prof *prof,
2754                                   struct ice_flow_entry *e, bool *do_chg_action,
2755                                   bool *do_add_entry, bool *do_rem_entry)
2756 {
2757         struct ice_flow_entry *p, *return_entry = NULL;
2758         u8 i, j;
2759
2760         /* Check if:
2761          * a/ There exists an entry with same matching data, but different
2762          *    priority, then we remove this existing ACL entry. Then, we
2763          *    will add the new entry to the ACL scenario.
2764          * b/ There exists an entry with same matching data, priority, and
2765          *    result action, then we do nothing
2766          * c/ There exists an entry with same matching data, priority, but
2767          *    different, action, then do only change the action's entry.
2768          * d/ Else, we add this new entry to the ACL scenario.
2769          */
2770         *do_chg_action = false;
2771         *do_add_entry = true;
2772         *do_rem_entry = false;
2773         LIST_FOR_EACH_ENTRY(p, &prof->entries, ice_flow_entry, l_entry) {
2774                 if (memcmp(p->entry, e->entry, p->entry_sz))
2775                         continue;
2776
2777                 /* From this point, we have the same matching_data. */
2778                 *do_add_entry = false;
2779                 return_entry = p;
2780
2781                 if (p->priority != e->priority) {
2782                         /* matching data && !priority */
2783                         *do_add_entry = true;
2784                         *do_rem_entry = true;
2785                         break;
2786                 }
2787
2788                 /* From this point, we will have matching_data && priority */
2789                 if (p->acts_cnt != e->acts_cnt)
2790                         *do_chg_action = true;
2791                 for (i = 0; i < p->acts_cnt; i++) {
2792                         bool found_not_match = false;
2793
2794                         for (j = 0; j < e->acts_cnt; j++)
2795                                 if (memcmp(&p->acts[i], &e->acts[j],
2796                                            sizeof(struct ice_flow_action))) {
2797                                         found_not_match = true;
2798                                         break;
2799                                 }
2800
2801                         if (found_not_match) {
2802                                 *do_chg_action = true;
2803                                 break;
2804                         }
2805                 }
2806
2807                 /* (do_chg_action = true) means :
2808                  *    matching_data && priority && !result_action
2809                  * (do_chg_action = false) means :
2810                  *    matching_data && priority && result_action
2811                  */
2812                 break;
2813         }
2814
2815         return return_entry;
2816 }
2817
2818 /**
2819  * ice_flow_acl_convert_to_acl_prio - Convert to ACL priority
2820  * @p: flow priority
2821  */
2822 static enum ice_acl_entry_prio
2823 ice_flow_acl_convert_to_acl_prio(enum ice_flow_priority p)
2824 {
2825         enum ice_acl_entry_prio acl_prio;
2826
2827         switch (p) {
2828         case ICE_FLOW_PRIO_LOW:
2829                 acl_prio = ICE_ACL_PRIO_LOW;
2830                 break;
2831         case ICE_FLOW_PRIO_NORMAL:
2832                 acl_prio = ICE_ACL_PRIO_NORMAL;
2833                 break;
2834         case ICE_FLOW_PRIO_HIGH:
2835                 acl_prio = ICE_ACL_PRIO_HIGH;
2836                 break;
2837         default:
2838                 acl_prio = ICE_ACL_PRIO_NORMAL;
2839                 break;
2840         }
2841
2842         return acl_prio;
2843 }
2844
2845 /**
2846  * ice_flow_acl_union_rng_chk - Perform union operation between two
2847  *                              range-range checker buffers
2848  * @dst_buf: pointer to destination range checker buffer
2849  * @src_buf: pointer to source range checker buffer
2850  *
2851  * For this function, we do the union between dst_buf and src_buf
2852  * range checker buffer, and we will save the result back to dst_buf
2853  */
2854 static enum ice_status
2855 ice_flow_acl_union_rng_chk(struct ice_aqc_acl_profile_ranges *dst_buf,
2856                            struct ice_aqc_acl_profile_ranges *src_buf)
2857 {
2858         u8 i, j;
2859
2860         if (!dst_buf || !src_buf)
2861                 return ICE_ERR_BAD_PTR;
2862
2863         for (i = 0; i < ICE_AQC_ACL_PROF_RANGES_NUM_CFG; i++) {
2864                 struct ice_acl_rng_data *cfg_data = NULL, *in_data;
2865                 bool will_populate = false;
2866
2867                 in_data = &src_buf->checker_cfg[i];
2868
2869                 if (!in_data->mask)
2870                         break;
2871
2872                 for (j = 0; j < ICE_AQC_ACL_PROF_RANGES_NUM_CFG; j++) {
2873                         cfg_data = &dst_buf->checker_cfg[j];
2874
2875                         if (!cfg_data->mask ||
2876                             !memcmp(cfg_data, in_data,
2877                                     sizeof(struct ice_acl_rng_data))) {
2878                                 will_populate = true;
2879                                 break;
2880                         }
2881                 }
2882
2883                 if (will_populate) {
2884                         ice_memcpy(cfg_data, in_data,
2885                                    sizeof(struct ice_acl_rng_data),
2886                                    ICE_NONDMA_TO_NONDMA);
2887                 } else {
2888                         /* No available slot left to program range checker */
2889                         return ICE_ERR_MAX_LIMIT;
2890                 }
2891         }
2892
2893         return ICE_SUCCESS;
2894 }
2895
2896 /**
2897  * ice_flow_acl_add_scen_entry_sync - Add entry to ACL scenario sync
2898  * @hw: pointer to the hardware structure
2899  * @prof: pointer to flow profile
2900  * @entry: double pointer to the flow entry
2901  *
2902  * For this function, we will look at the current added entries in the
2903  * corresponding ACL scenario. Then, we will perform matching logic to
2904  * see if we want to add/modify/do nothing with this new entry.
2905  */
2906 static enum ice_status
2907 ice_flow_acl_add_scen_entry_sync(struct ice_hw *hw, struct ice_flow_prof *prof,
2908                                  struct ice_flow_entry **entry)
2909 {
2910         bool do_add_entry, do_rem_entry, do_chg_action, do_chg_rng_chk;
2911         struct ice_aqc_acl_profile_ranges query_rng_buf, cfg_rng_buf;
2912         struct ice_acl_act_entry *acts = NULL;
2913         struct ice_flow_entry *exist;
2914         enum ice_status status = ICE_SUCCESS;
2915         struct ice_flow_entry *e;
2916         u8 i;
2917
2918         if (!entry || !(*entry) || !prof)
2919                 return ICE_ERR_BAD_PTR;
2920
2921         e = *entry;
2922
2923         do_chg_rng_chk = false;
2924         if (e->range_buf) {
2925                 u8 prof_id = 0;
2926
2927                 status = ice_flow_get_hw_prof(hw, ICE_BLK_ACL, prof->id,
2928                                               &prof_id);
2929                 if (status)
2930                         return status;
2931
2932                 /* Query the current range-checker value in FW */
2933                 status = ice_query_acl_prof_ranges(hw, prof_id, &query_rng_buf,
2934                                                    NULL);
2935                 if (status)
2936                         return status;
2937                 ice_memcpy(&cfg_rng_buf, &query_rng_buf,
2938                            sizeof(struct ice_aqc_acl_profile_ranges),
2939                            ICE_NONDMA_TO_NONDMA);
2940
2941                 /* Generate the new range-checker value */
2942                 status = ice_flow_acl_union_rng_chk(&cfg_rng_buf, e->range_buf);
2943                 if (status)
2944                         return status;
2945
2946                 /* Reconfigure the range check if the buffer is changed. */
2947                 do_chg_rng_chk = false;
2948                 if (memcmp(&query_rng_buf, &cfg_rng_buf,
2949                            sizeof(struct ice_aqc_acl_profile_ranges))) {
2950                         status = ice_prog_acl_prof_ranges(hw, prof_id,
2951                                                           &cfg_rng_buf, NULL);
2952                         if (status)
2953                                 return status;
2954
2955                         do_chg_rng_chk = true;
2956                 }
2957         }
2958
2959         /* Figure out if we want to (change the ACL action) and/or
2960          * (Add the new ACL entry) and/or (Remove the current ACL entry)
2961          */
2962         exist = ice_flow_acl_find_scen_entry_cond(prof, e, &do_chg_action,
2963                                                   &do_add_entry, &do_rem_entry);
2964         if (do_rem_entry) {
2965                 status = ice_flow_rem_entry_sync(hw, ICE_BLK_ACL, exist);
2966                 if (status)
2967                         return status;
2968         }
2969
2970         /* Prepare the result action buffer */
2971         acts = (struct ice_acl_act_entry *)
2972                 ice_calloc(hw, e->entry_sz, sizeof(struct ice_acl_act_entry));
2973         if (!acts)
2974                 return ICE_ERR_NO_MEMORY;
2975
2976         for (i = 0; i < e->acts_cnt; i++)
2977                 ice_memcpy(&acts[i], &e->acts[i].data.acl_act,
2978                            sizeof(struct ice_acl_act_entry),
2979                            ICE_NONDMA_TO_NONDMA);
2980
2981         if (do_add_entry) {
2982                 enum ice_acl_entry_prio prio;
2983                 u8 *keys, *inverts;
2984                 u16 entry_idx;
2985
2986                 keys = (u8 *)e->entry;
2987                 inverts = keys + (e->entry_sz / 2);
2988                 prio = ice_flow_acl_convert_to_acl_prio(e->priority);
2989
2990                 status = ice_acl_add_entry(hw, prof->cfg.scen, prio, keys,
2991                                            inverts, acts, e->acts_cnt,
2992                                            &entry_idx);
2993                 if (status)
2994                         goto out;
2995
2996                 e->scen_entry_idx = entry_idx;
2997                 LIST_ADD(&e->l_entry, &prof->entries);
2998         } else {
2999                 if (do_chg_action) {
3000                         /* For the action memory info, update the SW's copy of
3001                          * exist entry with e's action memory info
3002                          */
3003                         ice_free(hw, exist->acts);
3004                         exist->acts_cnt = e->acts_cnt;
3005                         exist->acts = (struct ice_flow_action *)
3006                                 ice_calloc(hw, exist->acts_cnt,
3007                                            sizeof(struct ice_flow_action));
3008                         if (!exist->acts) {
3009                                 status = ICE_ERR_NO_MEMORY;
3010                                 goto out;
3011                         }
3012
3013                         ice_memcpy(exist->acts, e->acts,
3014                                    sizeof(struct ice_flow_action) * e->acts_cnt,
3015                                    ICE_NONDMA_TO_NONDMA);
3016
3017                         status = ice_acl_prog_act(hw, prof->cfg.scen, acts,
3018                                                   e->acts_cnt,
3019                                                   exist->scen_entry_idx);
3020                         if (status)
3021                                 goto out;
3022                 }
3023
3024                 if (do_chg_rng_chk) {
3025                         /* In this case, we want to update the range checker
3026                          * information of the exist entry
3027                          */
3028                         status = ice_flow_acl_union_rng_chk(exist->range_buf,
3029                                                             e->range_buf);
3030                         if (status)
3031                                 goto out;
3032                 }
3033
3034                 /* As we don't add the new entry to our SW DB, deallocate its
3035                  * memories, and return the exist entry to the caller
3036                  */
3037                 ice_dealloc_flow_entry(hw, e);
3038                 *(entry) = exist;
3039         }
3040 out:
3041         ice_free(hw, acts);
3042
3043         return status;
3044 }
3045
3046 /**
3047  * ice_flow_acl_add_scen_entry - Add entry to ACL scenario
3048  * @hw: pointer to the hardware structure
3049  * @prof: pointer to flow profile
3050  * @e: double pointer to the flow entry
3051  */
3052 static enum ice_status
3053 ice_flow_acl_add_scen_entry(struct ice_hw *hw, struct ice_flow_prof *prof,
3054                             struct ice_flow_entry **e)
3055 {
3056         enum ice_status status;
3057
3058         ice_acquire_lock(&prof->entries_lock);
3059         status = ice_flow_acl_add_scen_entry_sync(hw, prof, e);
3060         ice_release_lock(&prof->entries_lock);
3061
3062         return status;
3063 }
3064
3065 /**
3066  * ice_flow_add_entry - Add a flow entry
3067  * @hw: pointer to the HW struct
3068  * @blk: classification stage
3069  * @prof_id: ID of the profile to add a new flow entry to
3070  * @entry_id: unique ID to identify this flow entry
3071  * @vsi_handle: software VSI handle for the flow entry
3072  * @prio: priority of the flow entry
3073  * @data: pointer to a data buffer containing flow entry's match values/masks
3074  * @acts: arrays of actions to be performed on a match
3075  * @acts_cnt: number of actions
3076  * @entry_h: pointer to buffer that receives the new flow entry's handle
3077  */
3078 enum ice_status
3079 ice_flow_add_entry(struct ice_hw *hw, enum ice_block blk, u64 prof_id,
3080                    u64 entry_id, u16 vsi_handle, enum ice_flow_priority prio,
3081                    void *data, struct ice_flow_action *acts, u8 acts_cnt,
3082                    u64 *entry_h)
3083 {
3084         struct ice_flow_entry *e = NULL;
3085         struct ice_flow_prof *prof;
3086         enum ice_status status = ICE_SUCCESS;
3087
3088         /* ACL entries must indicate an action */
3089         if (blk == ICE_BLK_ACL && (!acts || !acts_cnt))
3090                 return ICE_ERR_PARAM;
3091
3092         /* No flow entry data is expected for RSS */
3093         if (!entry_h || (!data && blk != ICE_BLK_RSS))
3094                 return ICE_ERR_BAD_PTR;
3095
3096         if (!ice_is_vsi_valid(hw, vsi_handle))
3097                 return ICE_ERR_PARAM;
3098
3099         ice_acquire_lock(&hw->fl_profs_locks[blk]);
3100
3101         prof = ice_flow_find_prof_id(hw, blk, prof_id);
3102         if (!prof) {
3103                 status = ICE_ERR_DOES_NOT_EXIST;
3104         } else {
3105                 /* Allocate memory for the entry being added and associate
3106                  * the VSI to the found flow profile
3107                  */
3108                 e = (struct ice_flow_entry *)ice_malloc(hw, sizeof(*e));
3109                 if (!e)
3110                         status = ICE_ERR_NO_MEMORY;
3111                 else
3112                         status = ice_flow_assoc_prof(hw, blk, prof, vsi_handle);
3113         }
3114
3115         ice_release_lock(&hw->fl_profs_locks[blk]);
3116         if (status)
3117                 goto out;
3118
3119         e->id = entry_id;
3120         e->vsi_handle = vsi_handle;
3121         e->prof = prof;
3122         e->priority = prio;
3123
3124         switch (blk) {
3125         case ICE_BLK_FD:
3126         case ICE_BLK_RSS:
3127                 break;
3128         case ICE_BLK_ACL:
3129                 /* ACL will handle the entry management */
3130                 status = ice_flow_acl_frmt_entry(hw, prof, e, (u8 *)data, acts,
3131                                                  acts_cnt);
3132                 if (status)
3133                         goto out;
3134
3135                 status = ice_flow_acl_add_scen_entry(hw, prof, &e);
3136                 if (status)
3137                         goto out;
3138
3139                 break;
3140         default:
3141                 status = ICE_ERR_NOT_IMPL;
3142                 goto out;
3143         }
3144
3145         if (blk != ICE_BLK_ACL) {
3146                 /* ACL will handle the entry management */
3147                 ice_acquire_lock(&prof->entries_lock);
3148                 LIST_ADD(&e->l_entry, &prof->entries);
3149                 ice_release_lock(&prof->entries_lock);
3150         }
3151
3152         *entry_h = ICE_FLOW_ENTRY_HNDL(e);
3153
3154 out:
3155         if (status && e) {
3156                 if (e->entry)
3157                         ice_free(hw, e->entry);
3158                 ice_free(hw, e);
3159         }
3160
3161         return status;
3162 }
3163
3164 /**
3165  * ice_flow_rem_entry - Remove a flow entry
3166  * @hw: pointer to the HW struct
3167  * @blk: classification stage
3168  * @entry_h: handle to the flow entry to be removed
3169  */
3170 enum ice_status ice_flow_rem_entry(struct ice_hw *hw, enum ice_block blk,
3171                                    u64 entry_h)
3172 {
3173         struct ice_flow_entry *entry;
3174         struct ice_flow_prof *prof;
3175         enum ice_status status = ICE_SUCCESS;
3176
3177         if (entry_h == ICE_FLOW_ENTRY_HANDLE_INVAL)
3178                 return ICE_ERR_PARAM;
3179
3180         entry = ICE_FLOW_ENTRY_PTR((intptr_t)entry_h);
3181
3182         /* Retain the pointer to the flow profile as the entry will be freed */
3183         prof = entry->prof;
3184
3185         if (prof) {
3186                 ice_acquire_lock(&prof->entries_lock);
3187                 status = ice_flow_rem_entry_sync(hw, blk, entry);
3188                 ice_release_lock(&prof->entries_lock);
3189         }
3190
3191         return status;
3192 }
3193
3194 /**
3195  * ice_flow_set_fld_ext - specifies locations of field from entry's input buffer
3196  * @seg: packet segment the field being set belongs to
3197  * @fld: field to be set
3198  * @field_type: type of the field
3199  * @val_loc: if not ICE_FLOW_FLD_OFF_INVAL, location of the value to match from
3200  *           entry's input buffer
3201  * @mask_loc: if not ICE_FLOW_FLD_OFF_INVAL, location of mask value from entry's
3202  *            input buffer
3203  * @last_loc: if not ICE_FLOW_FLD_OFF_INVAL, location of last/upper value from
3204  *            entry's input buffer
3205  *
3206  * This helper function stores information of a field being matched, including
3207  * the type of the field and the locations of the value to match, the mask, and
3208  * the upper-bound value in the start of the input buffer for a flow entry.
3209  * This function should only be used for fixed-size data structures.
3210  *
3211  * This function also opportunistically determines the protocol headers to be
3212  * present based on the fields being set. Some fields cannot be used alone to
3213  * determine the protocol headers present. Sometimes, fields for particular
3214  * protocol headers are not matched. In those cases, the protocol headers
3215  * must be explicitly set.
3216  */
3217 static void
3218 ice_flow_set_fld_ext(struct ice_flow_seg_info *seg, enum ice_flow_field fld,
3219                      enum ice_flow_fld_match_type field_type, u16 val_loc,
3220                      u16 mask_loc, u16 last_loc)
3221 {
3222         u64 bit = BIT_ULL(fld);
3223
3224         seg->match |= bit;
3225         if (field_type == ICE_FLOW_FLD_TYPE_RANGE)
3226                 seg->range |= bit;
3227
3228         seg->fields[fld].type = field_type;
3229         seg->fields[fld].src.val = val_loc;
3230         seg->fields[fld].src.mask = mask_loc;
3231         seg->fields[fld].src.last = last_loc;
3232
3233         ICE_FLOW_SET_HDRS(seg, ice_flds_info[fld].hdr);
3234 }
3235
3236 /**
3237  * ice_flow_set_fld - specifies locations of field from entry's input buffer
3238  * @seg: packet segment the field being set belongs to
3239  * @fld: field to be set
3240  * @val_loc: if not ICE_FLOW_FLD_OFF_INVAL, location of the value to match from
3241  *           entry's input buffer
3242  * @mask_loc: if not ICE_FLOW_FLD_OFF_INVAL, location of mask value from entry's
3243  *            input buffer
3244  * @last_loc: if not ICE_FLOW_FLD_OFF_INVAL, location of last/upper value from
3245  *            entry's input buffer
3246  * @range: indicate if field being matched is to be in a range
3247  *
3248  * This function specifies the locations, in the form of byte offsets from the
3249  * start of the input buffer for a flow entry, from where the value to match,
3250  * the mask value, and upper value can be extracted. These locations are then
3251  * stored in the flow profile. When adding a flow entry associated with the
3252  * flow profile, these locations will be used to quickly extract the values and
3253  * create the content of a match entry. This function should only be used for
3254  * fixed-size data structures.
3255  */
3256 void
3257 ice_flow_set_fld(struct ice_flow_seg_info *seg, enum ice_flow_field fld,
3258                  u16 val_loc, u16 mask_loc, u16 last_loc, bool range)
3259 {
3260         enum ice_flow_fld_match_type t = range ?
3261                 ICE_FLOW_FLD_TYPE_RANGE : ICE_FLOW_FLD_TYPE_REG;
3262
3263         ice_flow_set_fld_ext(seg, fld, t, val_loc, mask_loc, last_loc);
3264 }
3265
3266 /**
3267  * ice_flow_set_fld_prefix - sets locations of prefix field from entry's buf
3268  * @seg: packet segment the field being set belongs to
3269  * @fld: field to be set
3270  * @val_loc: if not ICE_FLOW_FLD_OFF_INVAL, location of the value to match from
3271  *           entry's input buffer
3272  * @pref_loc: location of prefix value from entry's input buffer
3273  * @pref_sz: size of the location holding the prefix value
3274  *
3275  * This function specifies the locations, in the form of byte offsets from the
3276  * start of the input buffer for a flow entry, from where the value to match
3277  * and the IPv4 prefix value can be extracted. These locations are then stored
3278  * in the flow profile. When adding flow entries to the associated flow profile,
3279  * these locations can be used to quickly extract the values to create the
3280  * content of a match entry. This function should only be used for fixed-size
3281  * data structures.
3282  */
3283 void
3284 ice_flow_set_fld_prefix(struct ice_flow_seg_info *seg, enum ice_flow_field fld,
3285                         u16 val_loc, u16 pref_loc, u8 pref_sz)
3286 {
3287         /* For this type of field, the "mask" location is for the prefix value's
3288          * location and the "last" location is for the size of the location of
3289          * the prefix value.
3290          */
3291         ice_flow_set_fld_ext(seg, fld, ICE_FLOW_FLD_TYPE_PREFIX, val_loc,
3292                              pref_loc, (u16)pref_sz);
3293 }
3294
3295 /**
3296  * ice_flow_add_fld_raw - sets locations of a raw field from entry's input buf
3297  * @seg: packet segment the field being set belongs to
3298  * @off: offset of the raw field from the beginning of the segment in bytes
3299  * @len: length of the raw pattern to be matched
3300  * @val_loc: location of the value to match from entry's input buffer
3301  * @mask_loc: location of mask value from entry's input buffer
3302  *
3303  * This function specifies the offset of the raw field to be match from the
3304  * beginning of the specified packet segment, and the locations, in the form of
3305  * byte offsets from the start of the input buffer for a flow entry, from where
3306  * the value to match and the mask value to be extracted. These locations are
3307  * then stored in the flow profile. When adding flow entries to the associated
3308  * flow profile, these locations can be used to quickly extract the values to
3309  * create the content of a match entry. This function should only be used for
3310  * fixed-size data structures.
3311  */
3312 void
3313 ice_flow_add_fld_raw(struct ice_flow_seg_info *seg, u16 off, u8 len,
3314                      u16 val_loc, u16 mask_loc)
3315 {
3316         if (seg->raws_cnt < ICE_FLOW_SEG_RAW_FLD_MAX) {
3317                 seg->raws[seg->raws_cnt].off = off;
3318                 seg->raws[seg->raws_cnt].info.type = ICE_FLOW_FLD_TYPE_SIZE;
3319                 seg->raws[seg->raws_cnt].info.src.val = val_loc;
3320                 seg->raws[seg->raws_cnt].info.src.mask = mask_loc;
3321                 /* The "last" field is used to store the length of the field */
3322                 seg->raws[seg->raws_cnt].info.src.last = len;
3323         }
3324
3325         /* Overflows of "raws" will be handled as an error condition later in
3326          * the flow when this information is processed.
3327          */
3328         seg->raws_cnt++;
3329 }
3330
3331 /**
3332  * ice_flow_rem_vsi_prof - remove vsi from flow profile
3333  * @hw: pointer to the hardware structure
3334  * @blk: classification stage
3335  * @vsi_handle: software VSI handle
3336  * @prof_id: unique ID to identify this flow profile
3337  *
3338  * This function removes the flow entries associated to the input
3339  * vsi handle and disassociates the vsi from the flow profile.
3340  */
3341 enum ice_status ice_flow_rem_vsi_prof(struct ice_hw *hw, enum ice_block blk, u16 vsi_handle,
3342                                       u64 prof_id)
3343 {
3344         struct ice_flow_prof *prof = NULL;
3345         enum ice_status status = ICE_SUCCESS;
3346
3347         if (blk >= ICE_BLK_COUNT || !ice_is_vsi_valid(hw, vsi_handle))
3348                 return ICE_ERR_PARAM;
3349
3350         /* find flow profile pointer with input package block and profile id */
3351         prof = ice_flow_find_prof_id(hw, ICE_BLK_FD, prof_id);
3352         if (!prof) {
3353                 ice_debug(hw, ICE_DBG_PKG,
3354                           "Cannot find flow profile id=%" PRIu64 "\n", prof_id);
3355                 return ICE_ERR_DOES_NOT_EXIST;
3356         }
3357
3358         /* Remove all remaining flow entries before removing the flow profile */
3359         if (!LIST_EMPTY(&prof->entries)) {
3360                 struct ice_flow_entry *e, *t;
3361
3362                 ice_acquire_lock(&prof->entries_lock);
3363                 LIST_FOR_EACH_ENTRY_SAFE(e, t, &prof->entries, ice_flow_entry,
3364                                          l_entry) {
3365                         if (e->vsi_handle != vsi_handle)
3366                                 continue;
3367
3368                         status = ice_flow_rem_entry_sync(hw, blk, e);
3369                         if (status)
3370                                 break;
3371                 }
3372                 ice_release_lock(&prof->entries_lock);
3373         }
3374         if (status)
3375                 return status;
3376
3377         /* disassociate the flow profile from sw vsi handle */
3378         status = ice_flow_disassoc_prof(hw, blk, prof, vsi_handle);
3379         if (status)
3380                 ice_debug(hw, ICE_DBG_PKG,
3381                           "ice_flow_disassoc_prof() failed with status=%d\n",
3382                           status);
3383         return status;
3384 }
3385
3386 #define ICE_FLOW_RSS_SEG_HDR_L2_MASKS \
3387 (ICE_FLOW_SEG_HDR_ETH | ICE_FLOW_SEG_HDR_VLAN)
3388
3389 #define ICE_FLOW_RSS_SEG_HDR_L3_MASKS \
3390         (ICE_FLOW_SEG_HDR_IPV4 | ICE_FLOW_SEG_HDR_IPV6)
3391
3392 #define ICE_FLOW_RSS_SEG_HDR_L4_MASKS \
3393         (ICE_FLOW_SEG_HDR_TCP | ICE_FLOW_SEG_HDR_UDP | ICE_FLOW_SEG_HDR_SCTP)
3394
3395 #define ICE_FLOW_RSS_SEG_HDR_VAL_MASKS \
3396         (ICE_FLOW_RSS_SEG_HDR_L2_MASKS | \
3397          ICE_FLOW_RSS_SEG_HDR_L3_MASKS | \
3398          ICE_FLOW_RSS_SEG_HDR_L4_MASKS)
3399
3400 /**
3401  * ice_flow_set_rss_seg_info - setup packet segments for RSS
3402  * @segs: pointer to the flow field segment(s)
3403  * @seg_cnt: segment count
3404  * @cfg: configure parameters
3405  *
3406  * Helper function to extract fields from hash bitmap and use flow
3407  * header value to set flow field segment for further use in flow
3408  * profile entry or removal.
3409  */
3410 static enum ice_status
3411 ice_flow_set_rss_seg_info(struct ice_flow_seg_info *segs, u8 seg_cnt,
3412                           const struct ice_rss_hash_cfg *cfg)
3413 {
3414         struct ice_flow_seg_info *seg;
3415         u64 val;
3416         u8 i;
3417
3418         /* set inner most segment */
3419         seg = &segs[seg_cnt - 1];
3420
3421         ice_for_each_set_bit(i, (const ice_bitmap_t *)&cfg->hash_flds,
3422                              ICE_FLOW_FIELD_IDX_MAX)
3423                 ice_flow_set_fld(seg, (enum ice_flow_field)i,
3424                                  ICE_FLOW_FLD_OFF_INVAL, ICE_FLOW_FLD_OFF_INVAL,
3425                                  ICE_FLOW_FLD_OFF_INVAL, false);
3426
3427         ICE_FLOW_SET_HDRS(seg, cfg->addl_hdrs);
3428
3429         /* set outer most header */
3430         if (cfg->hdr_type == ICE_RSS_INNER_HEADERS_W_OUTER_IPV4)
3431                 segs[ICE_RSS_OUTER_HEADERS].hdrs |= ICE_FLOW_SEG_HDR_IPV4 |
3432                                                    ICE_FLOW_SEG_HDR_IPV_OTHER;
3433         else if (cfg->hdr_type == ICE_RSS_INNER_HEADERS_W_OUTER_IPV6)
3434                 segs[ICE_RSS_OUTER_HEADERS].hdrs |= ICE_FLOW_SEG_HDR_IPV6 |
3435                                                    ICE_FLOW_SEG_HDR_IPV_OTHER;
3436
3437         if (seg->hdrs & ~ICE_FLOW_RSS_SEG_HDR_VAL_MASKS &
3438             ~ICE_FLOW_RSS_HDRS_INNER_MASK & ~ICE_FLOW_SEG_HDR_IPV_OTHER)
3439                 return ICE_ERR_PARAM;
3440
3441         val = (u64)(seg->hdrs & ICE_FLOW_RSS_SEG_HDR_L3_MASKS);
3442         if (val && !ice_is_pow2(val))
3443                 return ICE_ERR_CFG;
3444
3445         val = (u64)(seg->hdrs & ICE_FLOW_RSS_SEG_HDR_L4_MASKS);
3446         if (val && !ice_is_pow2(val))
3447                 return ICE_ERR_CFG;
3448
3449         return ICE_SUCCESS;
3450 }
3451
3452 /**
3453  * ice_rem_vsi_rss_list - remove VSI from RSS list
3454  * @hw: pointer to the hardware structure
3455  * @vsi_handle: software VSI handle
3456  *
3457  * Remove the VSI from all RSS configurations in the list.
3458  */
3459 void ice_rem_vsi_rss_list(struct ice_hw *hw, u16 vsi_handle)
3460 {
3461         struct ice_rss_cfg *r, *tmp;
3462
3463         if (LIST_EMPTY(&hw->rss_list_head))
3464                 return;
3465
3466         ice_acquire_lock(&hw->rss_locks);
3467         LIST_FOR_EACH_ENTRY_SAFE(r, tmp, &hw->rss_list_head,
3468                                  ice_rss_cfg, l_entry)
3469                 if (ice_test_and_clear_bit(vsi_handle, r->vsis))
3470                         if (!ice_is_any_bit_set(r->vsis, ICE_MAX_VSI)) {
3471                                 LIST_DEL(&r->l_entry);
3472                                 ice_free(hw, r);
3473                         }
3474         ice_release_lock(&hw->rss_locks);
3475 }
3476
3477 /**
3478  * ice_rem_vsi_rss_cfg - remove RSS configurations associated with VSI
3479  * @hw: pointer to the hardware structure
3480  * @vsi_handle: software VSI handle
3481  *
3482  * This function will iterate through all flow profiles and disassociate
3483  * the VSI from that profile. If the flow profile has no VSIs it will
3484  * be removed.
3485  */
3486 enum ice_status ice_rem_vsi_rss_cfg(struct ice_hw *hw, u16 vsi_handle)
3487 {
3488         const enum ice_block blk = ICE_BLK_RSS;
3489         struct ice_flow_prof *p, *t;
3490         enum ice_status status = ICE_SUCCESS;
3491
3492         if (!ice_is_vsi_valid(hw, vsi_handle))
3493                 return ICE_ERR_PARAM;
3494
3495         if (LIST_EMPTY(&hw->fl_profs[blk]))
3496                 return ICE_SUCCESS;
3497
3498         ice_acquire_lock(&hw->rss_locks);
3499         LIST_FOR_EACH_ENTRY_SAFE(p, t, &hw->fl_profs[blk], ice_flow_prof,
3500                                  l_entry)
3501                 if (ice_is_bit_set(p->vsis, vsi_handle)) {
3502                         status = ice_flow_disassoc_prof(hw, blk, p, vsi_handle);
3503                         if (status)
3504                                 break;
3505
3506                         if (!ice_is_any_bit_set(p->vsis, ICE_MAX_VSI)) {
3507                                 status = ice_flow_rem_prof(hw, blk, p->id);
3508                                 if (status)
3509                                         break;
3510                         }
3511                 }
3512         ice_release_lock(&hw->rss_locks);
3513
3514         return status;
3515 }
3516
3517 /**
3518  * ice_get_rss_hdr_type - get a RSS profile's header type
3519  * @prof: RSS flow profile
3520  */
3521 static enum ice_rss_cfg_hdr_type
3522 ice_get_rss_hdr_type(struct ice_flow_prof *prof)
3523 {
3524         enum ice_rss_cfg_hdr_type hdr_type = ICE_RSS_ANY_HEADERS;
3525
3526         if (prof->segs_cnt == ICE_FLOW_SEG_SINGLE) {
3527                 hdr_type = ICE_RSS_OUTER_HEADERS;
3528         } else if (prof->segs_cnt == ICE_FLOW_SEG_MAX) {
3529                 if (prof->segs[ICE_RSS_OUTER_HEADERS].hdrs == ICE_FLOW_SEG_HDR_NONE)
3530                         hdr_type = ICE_RSS_INNER_HEADERS;
3531                 if (prof->segs[ICE_RSS_OUTER_HEADERS].hdrs & ICE_FLOW_SEG_HDR_IPV4)
3532                         hdr_type = ICE_RSS_INNER_HEADERS_W_OUTER_IPV4;
3533                 if (prof->segs[ICE_RSS_OUTER_HEADERS].hdrs & ICE_FLOW_SEG_HDR_IPV6)
3534                         hdr_type = ICE_RSS_INNER_HEADERS_W_OUTER_IPV6;
3535         }
3536
3537         return hdr_type;
3538 }
3539
3540 /**
3541  * ice_rem_rss_list - remove RSS configuration from list
3542  * @hw: pointer to the hardware structure
3543  * @vsi_handle: software VSI handle
3544  * @prof: pointer to flow profile
3545  *
3546  * Assumption: lock has already been acquired for RSS list
3547  */
3548 static void
3549 ice_rem_rss_list(struct ice_hw *hw, u16 vsi_handle, struct ice_flow_prof *prof)
3550 {
3551         enum ice_rss_cfg_hdr_type hdr_type;
3552         struct ice_rss_cfg *r, *tmp;
3553
3554         /* Search for RSS hash fields associated to the VSI that match the
3555          * hash configurations associated to the flow profile. If found
3556          * remove from the RSS entry list of the VSI context and delete entry.
3557          */
3558         hdr_type = ice_get_rss_hdr_type(prof);
3559         LIST_FOR_EACH_ENTRY_SAFE(r, tmp, &hw->rss_list_head,
3560                                  ice_rss_cfg, l_entry)
3561                 if (r->hash.hash_flds == prof->segs[prof->segs_cnt - 1].match &&
3562                     r->hash.addl_hdrs == prof->segs[prof->segs_cnt - 1].hdrs &&
3563                     r->hash.hdr_type == hdr_type) {
3564                         ice_clear_bit(vsi_handle, r->vsis);
3565                         if (!ice_is_any_bit_set(r->vsis, ICE_MAX_VSI)) {
3566                                 LIST_DEL(&r->l_entry);
3567                                 ice_free(hw, r);
3568                         }
3569                         return;
3570                 }
3571 }
3572
3573 /**
3574  * ice_add_rss_list - add RSS configuration to list
3575  * @hw: pointer to the hardware structure
3576  * @vsi_handle: software VSI handle
3577  * @prof: pointer to flow profile
3578  *
3579  * Assumption: lock has already been acquired for RSS list
3580  */
3581 static enum ice_status
3582 ice_add_rss_list(struct ice_hw *hw, u16 vsi_handle, struct ice_flow_prof *prof)
3583 {
3584         enum ice_rss_cfg_hdr_type hdr_type;
3585         struct ice_rss_cfg *r, *rss_cfg;
3586
3587         hdr_type = ice_get_rss_hdr_type(prof);
3588         LIST_FOR_EACH_ENTRY(r, &hw->rss_list_head,
3589                             ice_rss_cfg, l_entry)
3590                 if (r->hash.hash_flds == prof->segs[prof->segs_cnt - 1].match &&
3591                     r->hash.addl_hdrs == prof->segs[prof->segs_cnt - 1].hdrs &&
3592                     r->hash.hdr_type == hdr_type) {
3593                         ice_set_bit(vsi_handle, r->vsis);
3594                         return ICE_SUCCESS;
3595                 }
3596
3597         rss_cfg = (struct ice_rss_cfg *)ice_malloc(hw, sizeof(*rss_cfg));
3598         if (!rss_cfg)
3599                 return ICE_ERR_NO_MEMORY;
3600
3601         rss_cfg->hash.hash_flds = prof->segs[prof->segs_cnt - 1].match;
3602         rss_cfg->hash.addl_hdrs = prof->segs[prof->segs_cnt - 1].hdrs;
3603         rss_cfg->hash.hdr_type = hdr_type;
3604         rss_cfg->hash.symm = prof->cfg.symm;
3605         ice_set_bit(vsi_handle, rss_cfg->vsis);
3606
3607         LIST_ADD_TAIL(&rss_cfg->l_entry, &hw->rss_list_head);
3608
3609         return ICE_SUCCESS;
3610 }
3611
3612 #define ICE_FLOW_PROF_HASH_S    0
3613 #define ICE_FLOW_PROF_HASH_M    (0xFFFFFFFFULL << ICE_FLOW_PROF_HASH_S)
3614 #define ICE_FLOW_PROF_HDR_S     32
3615 #define ICE_FLOW_PROF_HDR_M     (0x3FFFFFFFULL << ICE_FLOW_PROF_HDR_S)
3616 #define ICE_FLOW_PROF_ENCAP_S   62
3617 #define ICE_FLOW_PROF_ENCAP_M   (0x3ULL << ICE_FLOW_PROF_ENCAP_S)
3618
3619 /* Flow profile ID format:
3620  * [0:31] - Packet match fields
3621  * [32:61] - Protocol header
3622  * [62:63] - Encapsulation flag:
3623  *           0 if non-tunneled
3624  *           1 if tunneled
3625  *           2 for tunneled with outer ipv4
3626  *           3 for tunneled with outer ipv6
3627  */
3628 #define ICE_FLOW_GEN_PROFID(hash, hdr, encap) \
3629         ((u64)(((u64)(hash) & ICE_FLOW_PROF_HASH_M) | \
3630                (((u64)(hdr) << ICE_FLOW_PROF_HDR_S) & ICE_FLOW_PROF_HDR_M) | \
3631                (((u64)(encap) << ICE_FLOW_PROF_ENCAP_S) & ICE_FLOW_PROF_ENCAP_M)))
3632
3633 static void
3634 ice_rss_config_xor_word(struct ice_hw *hw, u8 prof_id, u8 src, u8 dst)
3635 {
3636         u32 s = ((src % 4) << 3); /* byte shift */
3637         u32 v = dst | 0x80; /* value to program */
3638         u8 i = src / 4; /* register index */
3639         u32 reg;
3640
3641         reg = rd32(hw, GLQF_HSYMM(prof_id, i));
3642         reg = (reg & ~(0xff << s)) | (v << s);
3643         wr32(hw, GLQF_HSYMM(prof_id, i), reg);
3644 }
3645
3646 static void
3647 ice_rss_config_xor(struct ice_hw *hw, u8 prof_id, u8 src, u8 dst, u8 len)
3648 {
3649         int fv_last_word =
3650                 ICE_FLOW_SW_FIELD_VECTOR_MAX / ICE_FLOW_FV_EXTRACT_SZ - 1;
3651         int i;
3652
3653         for (i = 0; i < len; i++) {
3654                 ice_rss_config_xor_word(hw, prof_id,
3655                                         /* Yes, field vector in GLQF_HSYMM and
3656                                          * GLQF_HINSET is inversed!
3657                                          */
3658                                         fv_last_word - (src + i),
3659                                         fv_last_word - (dst + i));
3660                 ice_rss_config_xor_word(hw, prof_id,
3661                                         fv_last_word - (dst + i),
3662                                         fv_last_word - (src + i));
3663         }
3664 }
3665
3666 static void
3667 ice_rss_update_symm(struct ice_hw *hw,
3668                     struct ice_flow_prof *prof)
3669 {
3670         struct ice_prof_map *map;
3671         u8 prof_id, m;
3672
3673         ice_acquire_lock(&hw->blk[ICE_BLK_RSS].es.prof_map_lock);
3674         map = ice_search_prof_id(hw, ICE_BLK_RSS, prof->id);
3675         if (map)
3676                 prof_id = map->prof_id;
3677         ice_release_lock(&hw->blk[ICE_BLK_RSS].es.prof_map_lock);
3678         if (!map)
3679                 return;
3680         /* clear to default */
3681         for (m = 0; m < 6; m++)
3682                 wr32(hw, GLQF_HSYMM(prof_id, m), 0);
3683         if (prof->cfg.symm) {
3684                 struct ice_flow_seg_info *seg =
3685                         &prof->segs[prof->segs_cnt - 1];
3686
3687                 struct ice_flow_seg_xtrct *ipv4_src =
3688                         &seg->fields[ICE_FLOW_FIELD_IDX_IPV4_SA].xtrct;
3689                 struct ice_flow_seg_xtrct *ipv4_dst =
3690                         &seg->fields[ICE_FLOW_FIELD_IDX_IPV4_DA].xtrct;
3691                 struct ice_flow_seg_xtrct *ipv6_src =
3692                         &seg->fields[ICE_FLOW_FIELD_IDX_IPV6_SA].xtrct;
3693                 struct ice_flow_seg_xtrct *ipv6_dst =
3694                         &seg->fields[ICE_FLOW_FIELD_IDX_IPV6_DA].xtrct;
3695
3696                 struct ice_flow_seg_xtrct *tcp_src =
3697                         &seg->fields[ICE_FLOW_FIELD_IDX_TCP_SRC_PORT].xtrct;
3698                 struct ice_flow_seg_xtrct *tcp_dst =
3699                         &seg->fields[ICE_FLOW_FIELD_IDX_TCP_DST_PORT].xtrct;
3700
3701                 struct ice_flow_seg_xtrct *udp_src =
3702                         &seg->fields[ICE_FLOW_FIELD_IDX_UDP_SRC_PORT].xtrct;
3703                 struct ice_flow_seg_xtrct *udp_dst =
3704                         &seg->fields[ICE_FLOW_FIELD_IDX_UDP_DST_PORT].xtrct;
3705
3706                 struct ice_flow_seg_xtrct *sctp_src =
3707                         &seg->fields[ICE_FLOW_FIELD_IDX_SCTP_SRC_PORT].xtrct;
3708                 struct ice_flow_seg_xtrct *sctp_dst =
3709                         &seg->fields[ICE_FLOW_FIELD_IDX_SCTP_DST_PORT].xtrct;
3710
3711                 /* xor IPv4 */
3712                 if (ipv4_src->prot_id != 0 && ipv4_dst->prot_id != 0)
3713                         ice_rss_config_xor(hw, prof_id,
3714                                            ipv4_src->idx, ipv4_dst->idx, 2);
3715
3716                 /* xor IPv6 */
3717                 if (ipv6_src->prot_id != 0 && ipv6_dst->prot_id != 0)
3718                         ice_rss_config_xor(hw, prof_id,
3719                                            ipv6_src->idx, ipv6_dst->idx, 8);
3720
3721                 /* xor TCP */
3722                 if (tcp_src->prot_id != 0 && tcp_dst->prot_id != 0)
3723                         ice_rss_config_xor(hw, prof_id,
3724                                            tcp_src->idx, tcp_dst->idx, 1);
3725
3726                 /* xor UDP */
3727                 if (udp_src->prot_id != 0 && udp_dst->prot_id != 0)
3728                         ice_rss_config_xor(hw, prof_id,
3729                                            udp_src->idx, udp_dst->idx, 1);
3730
3731                 /* xor SCTP */
3732                 if (sctp_src->prot_id != 0 && sctp_dst->prot_id != 0)
3733                         ice_rss_config_xor(hw, prof_id,
3734                                            sctp_src->idx, sctp_dst->idx, 1);
3735         }
3736 }
3737
3738 /**
3739  * ice_add_rss_cfg_sync - add an RSS configuration
3740  * @hw: pointer to the hardware structure
3741  * @vsi_handle: software VSI handle
3742  * @cfg: configure parameters
3743  *
3744  * Assumption: lock has already been acquired for RSS list
3745  */
3746 static enum ice_status
3747 ice_add_rss_cfg_sync(struct ice_hw *hw, u16 vsi_handle,
3748                      const struct ice_rss_hash_cfg *cfg)
3749 {
3750         const enum ice_block blk = ICE_BLK_RSS;
3751         struct ice_flow_prof *prof = NULL;
3752         struct ice_flow_seg_info *segs;
3753         enum ice_status status;
3754         u8 segs_cnt;
3755
3756         segs_cnt = (cfg->hdr_type == ICE_RSS_OUTER_HEADERS) ?
3757                         ICE_FLOW_SEG_SINGLE : ICE_FLOW_SEG_MAX;
3758
3759         segs = (struct ice_flow_seg_info *)ice_calloc(hw, segs_cnt,
3760                                                       sizeof(*segs));
3761         if (!segs)
3762                 return ICE_ERR_NO_MEMORY;
3763
3764         /* Construct the packet segment info from the hashed fields */
3765         status = ice_flow_set_rss_seg_info(segs, segs_cnt, cfg);
3766         if (status)
3767                 goto exit;
3768
3769         /* Search for a flow profile that has matching headers, hash fields
3770          * and has the input VSI associated to it. If found, no further
3771          * operations required and exit.
3772          */
3773         prof = ice_flow_find_prof_conds(hw, blk, ICE_FLOW_RX, segs, segs_cnt,
3774                                         vsi_handle,
3775                                         ICE_FLOW_FIND_PROF_CHK_FLDS |
3776                                         ICE_FLOW_FIND_PROF_CHK_VSI);
3777         if (prof) {
3778                 if (prof->cfg.symm == cfg->symm)
3779                         goto exit;
3780                 prof->cfg.symm = cfg->symm;
3781                 goto update_symm;
3782         }
3783
3784         /* Check if a flow profile exists with the same protocol headers and
3785          * associated with the input VSI. If so disassociate the VSI from
3786          * this profile. The VSI will be added to a new profile created with
3787          * the protocol header and new hash field configuration.
3788          */
3789         prof = ice_flow_find_prof_conds(hw, blk, ICE_FLOW_RX, segs, segs_cnt,
3790                                         vsi_handle, ICE_FLOW_FIND_PROF_CHK_VSI);
3791         if (prof) {
3792                 status = ice_flow_disassoc_prof(hw, blk, prof, vsi_handle);
3793                 if (!status)
3794                         ice_rem_rss_list(hw, vsi_handle, prof);
3795                 else
3796                         goto exit;
3797
3798                 /* Remove profile if it has no VSIs associated */
3799                 if (!ice_is_any_bit_set(prof->vsis, ICE_MAX_VSI)) {
3800                         status = ice_flow_rem_prof(hw, blk, prof->id);
3801                         if (status)
3802                                 goto exit;
3803                 }
3804         }
3805
3806         /* Search for a profile that has same match fields only. If this
3807          * exists then associate the VSI to this profile.
3808          */
3809         prof = ice_flow_find_prof_conds(hw, blk, ICE_FLOW_RX, segs, segs_cnt,
3810                                         vsi_handle,
3811                                         ICE_FLOW_FIND_PROF_CHK_FLDS);
3812         if (prof) {
3813                 if (prof->cfg.symm == cfg->symm) {
3814                         status = ice_flow_assoc_prof(hw, blk, prof,
3815                                                      vsi_handle);
3816                         if (!status)
3817                                 status = ice_add_rss_list(hw, vsi_handle,
3818                                                           prof);
3819                 } else {
3820                         /* if a profile exist but with different symmetric
3821                          * requirement, just return error.
3822                          */
3823                         status = ICE_ERR_NOT_SUPPORTED;
3824                 }
3825                 goto exit;
3826         }
3827
3828         /* Create a new flow profile with generated profile and packet
3829          * segment information.
3830          */
3831         status = ice_flow_add_prof(hw, blk, ICE_FLOW_RX,
3832                                    ICE_FLOW_GEN_PROFID(cfg->hash_flds,
3833                                                        segs[segs_cnt - 1].hdrs,
3834                                                        cfg->hdr_type),
3835                                    segs, segs_cnt, NULL, 0, &prof);
3836         if (status)
3837                 goto exit;
3838
3839         status = ice_flow_assoc_prof(hw, blk, prof, vsi_handle);
3840         /* If association to a new flow profile failed then this profile can
3841          * be removed.
3842          */
3843         if (status) {
3844                 ice_flow_rem_prof(hw, blk, prof->id);
3845                 goto exit;
3846         }
3847
3848         status = ice_add_rss_list(hw, vsi_handle, prof);
3849
3850         prof->cfg.symm = cfg->symm;
3851 update_symm:
3852         ice_rss_update_symm(hw, prof);
3853
3854 exit:
3855         ice_free(hw, segs);
3856         return status;
3857 }
3858
3859 /**
3860  * ice_add_rss_cfg - add an RSS configuration with specified hashed fields
3861  * @hw: pointer to the hardware structure
3862  * @vsi_handle: software VSI handle
3863  * @cfg: configure parameters
3864  *
3865  * This function will generate a flow profile based on fields associated with
3866  * the input fields to hash on, the flow type and use the VSI number to add
3867  * a flow entry to the profile.
3868  */
3869 enum ice_status
3870 ice_add_rss_cfg(struct ice_hw *hw, u16 vsi_handle,
3871                 const struct ice_rss_hash_cfg *cfg)
3872 {
3873         struct ice_rss_hash_cfg local_cfg;
3874         enum ice_status status;
3875
3876         if (!ice_is_vsi_valid(hw, vsi_handle) ||
3877             !cfg || cfg->hdr_type > ICE_RSS_ANY_HEADERS ||
3878             cfg->hash_flds == ICE_HASH_INVALID)
3879                 return ICE_ERR_PARAM;
3880
3881         local_cfg = *cfg;
3882         if (cfg->hdr_type < ICE_RSS_ANY_HEADERS) {
3883                 ice_acquire_lock(&hw->rss_locks);
3884                 status = ice_add_rss_cfg_sync(hw, vsi_handle, &local_cfg);
3885                 ice_release_lock(&hw->rss_locks);
3886         } else {
3887                 ice_acquire_lock(&hw->rss_locks);
3888                 local_cfg.hdr_type = ICE_RSS_OUTER_HEADERS;
3889                 status = ice_add_rss_cfg_sync(hw, vsi_handle, &local_cfg);
3890                 if (!status) {
3891                         local_cfg.hdr_type = ICE_RSS_INNER_HEADERS;
3892                         status = ice_add_rss_cfg_sync(hw, vsi_handle,
3893                                                       &local_cfg);
3894                 }
3895                 ice_release_lock(&hw->rss_locks);
3896         }
3897
3898         return status;
3899 }
3900
3901 /**
3902  * ice_rem_rss_cfg_sync - remove an existing RSS configuration
3903  * @hw: pointer to the hardware structure
3904  * @vsi_handle: software VSI handle
3905  * @cfg: configure parameters
3906  *
3907  * Assumption: lock has already been acquired for RSS list
3908  */
3909 static enum ice_status
3910 ice_rem_rss_cfg_sync(struct ice_hw *hw, u16 vsi_handle,
3911                      const struct ice_rss_hash_cfg *cfg)
3912 {
3913         const enum ice_block blk = ICE_BLK_RSS;
3914         struct ice_flow_seg_info *segs;
3915         struct ice_flow_prof *prof;
3916         enum ice_status status;
3917         u8 segs_cnt;
3918
3919         segs_cnt = (cfg->hdr_type == ICE_RSS_OUTER_HEADERS) ?
3920                         ICE_FLOW_SEG_SINGLE : ICE_FLOW_SEG_MAX;
3921         segs = (struct ice_flow_seg_info *)ice_calloc(hw, segs_cnt,
3922                                                       sizeof(*segs));
3923         if (!segs)
3924                 return ICE_ERR_NO_MEMORY;
3925
3926         /* Construct the packet segment info from the hashed fields */
3927         status = ice_flow_set_rss_seg_info(segs, segs_cnt, cfg);
3928         if (status)
3929                 goto out;
3930
3931         prof = ice_flow_find_prof_conds(hw, blk, ICE_FLOW_RX, segs, segs_cnt,
3932                                         vsi_handle,
3933                                         ICE_FLOW_FIND_PROF_CHK_FLDS);
3934         if (!prof) {
3935                 status = ICE_ERR_DOES_NOT_EXIST;
3936                 goto out;
3937         }
3938
3939         status = ice_flow_disassoc_prof(hw, blk, prof, vsi_handle);
3940         if (status)
3941                 goto out;
3942
3943         /* Remove RSS configuration from VSI context before deleting
3944          * the flow profile.
3945          */
3946         ice_rem_rss_list(hw, vsi_handle, prof);
3947
3948         if (!ice_is_any_bit_set(prof->vsis, ICE_MAX_VSI))
3949                 status = ice_flow_rem_prof(hw, blk, prof->id);
3950
3951 out:
3952         ice_free(hw, segs);
3953         return status;
3954 }
3955
3956 /**
3957  * ice_rem_rss_cfg - remove an existing RSS config with matching hashed fields
3958  * @hw: pointer to the hardware structure
3959  * @vsi_handle: software VSI handle
3960  * @cfg: configure parameters
3961  *
3962  * This function will lookup the flow profile based on the input
3963  * hash field bitmap, iterate through the profile entry list of
3964  * that profile and find entry associated with input VSI to be
3965  * removed. Calls are made to underlying flow apis which will in
3966  * turn build or update buffers for RSS XLT1 section.
3967  */
3968 enum ice_status
3969 ice_rem_rss_cfg(struct ice_hw *hw, u16 vsi_handle,
3970                 const struct ice_rss_hash_cfg *cfg)
3971 {
3972         struct ice_rss_hash_cfg local_cfg;
3973         enum ice_status status;
3974
3975         if (!ice_is_vsi_valid(hw, vsi_handle) ||
3976             !cfg || cfg->hdr_type > ICE_RSS_ANY_HEADERS ||
3977             cfg->hash_flds == ICE_HASH_INVALID)
3978                 return ICE_ERR_PARAM;
3979
3980         ice_acquire_lock(&hw->rss_locks);
3981         local_cfg = *cfg;
3982         if (cfg->hdr_type < ICE_RSS_ANY_HEADERS) {
3983                 status = ice_rem_rss_cfg_sync(hw, vsi_handle, &local_cfg);
3984         } else {
3985                 local_cfg.hdr_type = ICE_RSS_OUTER_HEADERS;
3986                 status = ice_rem_rss_cfg_sync(hw, vsi_handle, &local_cfg);
3987
3988                 if (!status) {
3989                         local_cfg.hdr_type = ICE_RSS_INNER_HEADERS;
3990                         status = ice_rem_rss_cfg_sync(hw, vsi_handle,
3991                                                       &local_cfg);
3992                 }
3993         }
3994         ice_release_lock(&hw->rss_locks);
3995
3996         return status;
3997 }
3998
3999 /**
4000  * ice_replay_rss_cfg - replay RSS configurations associated with VSI
4001  * @hw: pointer to the hardware structure
4002  * @vsi_handle: software VSI handle
4003  */
4004 enum ice_status ice_replay_rss_cfg(struct ice_hw *hw, u16 vsi_handle)
4005 {
4006         enum ice_status status = ICE_SUCCESS;
4007         struct ice_rss_cfg *r;
4008
4009         if (!ice_is_vsi_valid(hw, vsi_handle))
4010                 return ICE_ERR_PARAM;
4011
4012         ice_acquire_lock(&hw->rss_locks);
4013         LIST_FOR_EACH_ENTRY(r, &hw->rss_list_head,
4014                             ice_rss_cfg, l_entry) {
4015                 if (ice_is_bit_set(r->vsis, vsi_handle)) {
4016                         status = ice_add_rss_cfg_sync(hw, vsi_handle, &r->hash);
4017                         if (status)
4018                                 break;
4019                 }
4020         }
4021         ice_release_lock(&hw->rss_locks);
4022
4023         return status;
4024 }
4025
4026 /**
4027  * ice_get_rss_cfg - returns hashed fields for the given header types
4028  * @hw: pointer to the hardware structure
4029  * @vsi_handle: software VSI handle
4030  * @hdrs: protocol header type
4031  *
4032  * This function will return the match fields of the first instance of flow
4033  * profile having the given header types and containing input VSI
4034  */
4035 u64 ice_get_rss_cfg(struct ice_hw *hw, u16 vsi_handle, u32 hdrs)
4036 {
4037         u64 rss_hash = ICE_HASH_INVALID;
4038         struct ice_rss_cfg *r;
4039
4040         /* verify if the protocol header is non zero and VSI is valid */
4041         if (hdrs == ICE_FLOW_SEG_HDR_NONE || !ice_is_vsi_valid(hw, vsi_handle))
4042                 return ICE_HASH_INVALID;
4043
4044         ice_acquire_lock(&hw->rss_locks);
4045         LIST_FOR_EACH_ENTRY(r, &hw->rss_list_head,
4046                             ice_rss_cfg, l_entry)
4047                 if (ice_is_bit_set(r->vsis, vsi_handle) &&
4048                     r->hash.addl_hdrs == hdrs) {
4049                         rss_hash = r->hash.hash_flds;
4050                         break;
4051                 }
4052         ice_release_lock(&hw->rss_locks);
4053
4054         return rss_hash;
4055 }