net/ice/base: support eCPRI over MAC type 0 flow
[dpdk.git] / drivers / net / ice / base / ice_flow.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2001-2020 Intel Corporation
3  */
4
5 #include "ice_common.h"
6 #include "ice_flow.h"
7
8 /* Size of known protocol header fields */
9 #define ICE_FLOW_FLD_SZ_ETH_TYPE        2
10 #define ICE_FLOW_FLD_SZ_VLAN            2
11 #define ICE_FLOW_FLD_SZ_IPV4_ADDR       4
12 #define ICE_FLOW_FLD_SZ_IPV6_ADDR       16
13 #define ICE_FLOW_FLD_SZ_IPV6_PRE32_ADDR 4
14 #define ICE_FLOW_FLD_SZ_IPV6_PRE48_ADDR 6
15 #define ICE_FLOW_FLD_SZ_IPV6_PRE64_ADDR 8
16 #define ICE_FLOW_FLD_SZ_IP_DSCP         1
17 #define ICE_FLOW_FLD_SZ_IP_TTL          1
18 #define ICE_FLOW_FLD_SZ_IP_PROT         1
19 #define ICE_FLOW_FLD_SZ_PORT            2
20 #define ICE_FLOW_FLD_SZ_TCP_FLAGS       1
21 #define ICE_FLOW_FLD_SZ_ICMP_TYPE       1
22 #define ICE_FLOW_FLD_SZ_ICMP_CODE       1
23 #define ICE_FLOW_FLD_SZ_ARP_OPER        2
24 #define ICE_FLOW_FLD_SZ_GRE_KEYID       4
25 #define ICE_FLOW_FLD_SZ_GTP_TEID        4
26 #define ICE_FLOW_FLD_SZ_GTP_QFI         2
27 #define ICE_FLOW_FLD_SZ_PPPOE_SESS_ID   2
28 #define ICE_FLOW_FLD_SZ_PFCP_SEID 8
29 #define ICE_FLOW_FLD_SZ_L2TPV3_SESS_ID  4
30 #define ICE_FLOW_FLD_SZ_ESP_SPI 4
31 #define ICE_FLOW_FLD_SZ_AH_SPI  4
32 #define ICE_FLOW_FLD_SZ_NAT_T_ESP_SPI   4
33 #define ICE_FLOW_FLD_SZ_VXLAN_VNI       4
34 #define ICE_FLOW_FLD_SZ_ECPRI_TP0_PC_ID 2
35
36 /* Describe properties of a protocol header field */
37 struct ice_flow_field_info {
38         enum ice_flow_seg_hdr hdr;
39         s16 off;        /* Offset from start of a protocol header, in bits */
40         u16 size;       /* Size of fields in bits */
41         u16 mask;       /* 16-bit mask for field */
42 };
43
44 #define ICE_FLOW_FLD_INFO(_hdr, _offset_bytes, _size_bytes) { \
45         .hdr = _hdr, \
46         .off = (_offset_bytes) * BITS_PER_BYTE, \
47         .size = (_size_bytes) * BITS_PER_BYTE, \
48         .mask = 0, \
49 }
50
51 #define ICE_FLOW_FLD_INFO_MSK(_hdr, _offset_bytes, _size_bytes, _mask) { \
52         .hdr = _hdr, \
53         .off = (_offset_bytes) * BITS_PER_BYTE, \
54         .size = (_size_bytes) * BITS_PER_BYTE, \
55         .mask = _mask, \
56 }
57
58 /* Table containing properties of supported protocol header fields */
59 static const
60 struct ice_flow_field_info ice_flds_info[ICE_FLOW_FIELD_IDX_MAX] = {
61         /* Ether */
62         /* ICE_FLOW_FIELD_IDX_ETH_DA */
63         ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_ETH, 0, ETH_ALEN),
64         /* ICE_FLOW_FIELD_IDX_ETH_SA */
65         ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_ETH, ETH_ALEN, ETH_ALEN),
66         /* ICE_FLOW_FIELD_IDX_S_VLAN */
67         ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_VLAN, 12, ICE_FLOW_FLD_SZ_VLAN),
68         /* ICE_FLOW_FIELD_IDX_C_VLAN */
69         ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_VLAN, 14, ICE_FLOW_FLD_SZ_VLAN),
70         /* ICE_FLOW_FIELD_IDX_ETH_TYPE */
71         ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_ETH, 0, ICE_FLOW_FLD_SZ_ETH_TYPE),
72         /* IPv4 / IPv6 */
73         /* ICE_FLOW_FIELD_IDX_IPV4_DSCP */
74         ICE_FLOW_FLD_INFO_MSK(ICE_FLOW_SEG_HDR_IPV4, 0, ICE_FLOW_FLD_SZ_IP_DSCP,
75                               0x00fc),
76         /* ICE_FLOW_FIELD_IDX_IPV6_DSCP */
77         ICE_FLOW_FLD_INFO_MSK(ICE_FLOW_SEG_HDR_IPV6, 0, ICE_FLOW_FLD_SZ_IP_DSCP,
78                               0x0ff0),
79         /* ICE_FLOW_FIELD_IDX_IPV4_TTL */
80         ICE_FLOW_FLD_INFO_MSK(ICE_FLOW_SEG_HDR_NONE, 8,
81                               ICE_FLOW_FLD_SZ_IP_TTL, 0xff00),
82         /* ICE_FLOW_FIELD_IDX_IPV4_PROT */
83         ICE_FLOW_FLD_INFO_MSK(ICE_FLOW_SEG_HDR_NONE, 8,
84                               ICE_FLOW_FLD_SZ_IP_PROT, 0x00ff),
85         /* ICE_FLOW_FIELD_IDX_IPV6_TTL */
86         ICE_FLOW_FLD_INFO_MSK(ICE_FLOW_SEG_HDR_NONE, 6,
87                               ICE_FLOW_FLD_SZ_IP_TTL, 0x00ff),
88         /* ICE_FLOW_FIELD_IDX_IPV6_PROT */
89         ICE_FLOW_FLD_INFO_MSK(ICE_FLOW_SEG_HDR_NONE, 6,
90                               ICE_FLOW_FLD_SZ_IP_PROT, 0xff00),
91         /* ICE_FLOW_FIELD_IDX_IPV4_SA */
92         ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_IPV4, 12, ICE_FLOW_FLD_SZ_IPV4_ADDR),
93         /* ICE_FLOW_FIELD_IDX_IPV4_DA */
94         ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_IPV4, 16, ICE_FLOW_FLD_SZ_IPV4_ADDR),
95         /* ICE_FLOW_FIELD_IDX_IPV6_SA */
96         ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_IPV6, 8, ICE_FLOW_FLD_SZ_IPV6_ADDR),
97         /* ICE_FLOW_FIELD_IDX_IPV6_DA */
98         ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_IPV6, 24, ICE_FLOW_FLD_SZ_IPV6_ADDR),
99         /* ICE_FLOW_FIELD_IDX_IPV6_PRE32_SA */
100         ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_IPV6, 8,
101                           ICE_FLOW_FLD_SZ_IPV6_PRE32_ADDR),
102         /* ICE_FLOW_FIELD_IDX_IPV6_PRE32_DA */
103         ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_IPV6, 24,
104                           ICE_FLOW_FLD_SZ_IPV6_PRE32_ADDR),
105         /* ICE_FLOW_FIELD_IDX_IPV6_PRE48_SA */
106         ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_IPV6, 8,
107                           ICE_FLOW_FLD_SZ_IPV6_PRE48_ADDR),
108         /* ICE_FLOW_FIELD_IDX_IPV6_PRE48_DA */
109         ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_IPV6, 24,
110                           ICE_FLOW_FLD_SZ_IPV6_PRE48_ADDR),
111         /* ICE_FLOW_FIELD_IDX_IPV6_PRE64_SA */
112         ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_IPV6, 8,
113                           ICE_FLOW_FLD_SZ_IPV6_PRE64_ADDR),
114         /* ICE_FLOW_FIELD_IDX_IPV6_PRE64_DA */
115         ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_IPV6, 24,
116                           ICE_FLOW_FLD_SZ_IPV6_PRE64_ADDR),
117         /* Transport */
118         /* ICE_FLOW_FIELD_IDX_TCP_SRC_PORT */
119         ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_TCP, 0, ICE_FLOW_FLD_SZ_PORT),
120         /* ICE_FLOW_FIELD_IDX_TCP_DST_PORT */
121         ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_TCP, 2, ICE_FLOW_FLD_SZ_PORT),
122         /* ICE_FLOW_FIELD_IDX_UDP_SRC_PORT */
123         ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_UDP, 0, ICE_FLOW_FLD_SZ_PORT),
124         /* ICE_FLOW_FIELD_IDX_UDP_DST_PORT */
125         ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_UDP, 2, ICE_FLOW_FLD_SZ_PORT),
126         /* ICE_FLOW_FIELD_IDX_SCTP_SRC_PORT */
127         ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_SCTP, 0, ICE_FLOW_FLD_SZ_PORT),
128         /* ICE_FLOW_FIELD_IDX_SCTP_DST_PORT */
129         ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_SCTP, 2, ICE_FLOW_FLD_SZ_PORT),
130         /* ICE_FLOW_FIELD_IDX_TCP_FLAGS */
131         ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_TCP, 13, ICE_FLOW_FLD_SZ_TCP_FLAGS),
132         /* ARP */
133         /* ICE_FLOW_FIELD_IDX_ARP_SIP */
134         ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_ARP, 14, ICE_FLOW_FLD_SZ_IPV4_ADDR),
135         /* ICE_FLOW_FIELD_IDX_ARP_DIP */
136         ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_ARP, 24, ICE_FLOW_FLD_SZ_IPV4_ADDR),
137         /* ICE_FLOW_FIELD_IDX_ARP_SHA */
138         ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_ARP, 8, ETH_ALEN),
139         /* ICE_FLOW_FIELD_IDX_ARP_DHA */
140         ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_ARP, 18, ETH_ALEN),
141         /* ICE_FLOW_FIELD_IDX_ARP_OP */
142         ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_ARP, 6, ICE_FLOW_FLD_SZ_ARP_OPER),
143         /* ICMP */
144         /* ICE_FLOW_FIELD_IDX_ICMP_TYPE */
145         ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_ICMP, 0, ICE_FLOW_FLD_SZ_ICMP_TYPE),
146         /* ICE_FLOW_FIELD_IDX_ICMP_CODE */
147         ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_ICMP, 1, ICE_FLOW_FLD_SZ_ICMP_CODE),
148         /* GRE */
149         /* ICE_FLOW_FIELD_IDX_GRE_KEYID */
150         ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_GRE, 12, ICE_FLOW_FLD_SZ_GRE_KEYID),
151         /* GTP */
152         /* ICE_FLOW_FIELD_IDX_GTPC_TEID */
153         ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_GTPC_TEID, 12,
154                           ICE_FLOW_FLD_SZ_GTP_TEID),
155         /* ICE_FLOW_FIELD_IDX_GTPU_IP_TEID */
156         ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_GTPU_IP, 12,
157                           ICE_FLOW_FLD_SZ_GTP_TEID),
158         /* ICE_FLOW_FIELD_IDX_GTPU_EH_TEID */
159         ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_GTPU_EH, 12,
160                           ICE_FLOW_FLD_SZ_GTP_TEID),
161         /* ICE_FLOW_FIELD_IDX_GTPU_EH_QFI */
162         ICE_FLOW_FLD_INFO_MSK(ICE_FLOW_SEG_HDR_GTPU_EH, 22,
163                               ICE_FLOW_FLD_SZ_GTP_QFI, 0x3f00),
164         /* ICE_FLOW_FIELD_IDX_GTPU_UP_TEID */
165         ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_GTPU_UP, 12,
166                           ICE_FLOW_FLD_SZ_GTP_TEID),
167         /* ICE_FLOW_FIELD_IDX_GTPU_DWN_TEID */
168         ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_GTPU_DWN, 12,
169                           ICE_FLOW_FLD_SZ_GTP_TEID),
170         /* PPPOE */
171         /* ICE_FLOW_FIELD_IDX_PPPOE_SESS_ID */
172         ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_PPPOE, 2,
173                           ICE_FLOW_FLD_SZ_PPPOE_SESS_ID),
174         /* PFCP */
175         /* ICE_FLOW_FIELD_IDX_PFCP_SEID */
176         ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_PFCP_SESSION, 12,
177                           ICE_FLOW_FLD_SZ_PFCP_SEID),
178         /* L2TPV3 */
179         /* ICE_FLOW_FIELD_IDX_L2TPV3_SESS_ID */
180         ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_L2TPV3, 0,
181                           ICE_FLOW_FLD_SZ_L2TPV3_SESS_ID),
182         /* ESP */
183         /* ICE_FLOW_FIELD_IDX_ESP_SPI */
184         ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_ESP, 0,
185                           ICE_FLOW_FLD_SZ_ESP_SPI),
186         /* AH */
187         /* ICE_FLOW_FIELD_IDX_AH_SPI */
188         ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_AH, 4,
189                           ICE_FLOW_FLD_SZ_AH_SPI),
190         /* NAT_T_ESP */
191         /* ICE_FLOW_FIELD_IDX_NAT_T_ESP_SPI */
192         ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_NAT_T_ESP, 8,
193                           ICE_FLOW_FLD_SZ_NAT_T_ESP_SPI),
194         /* ICE_FLOW_FIELD_IDX_VXLAN_VNI */
195         ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_VXLAN, 12,
196                           ICE_FLOW_FLD_SZ_VXLAN_VNI),
197         /* ECPRI_TP0 */
198         /* ICE_FLOW_FIELD_IDX_ECPRI_TP0_PC_ID */
199         ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_ECPRI_TP0, 4,
200                           ICE_FLOW_FLD_SZ_ECPRI_TP0_PC_ID),
201 };
202
203 /* Bitmaps indicating relevant packet types for a particular protocol header
204  *
205  * Packet types for packets with an Outer/First/Single MAC header
206  */
207 static const u32 ice_ptypes_mac_ofos[] = {
208         0xFDC00846, 0xBFBF7F7E, 0xF70001DF, 0xFEFDFDFB,
209         0x0000077E, 0x000003FF, 0x00000000, 0x00000000,
210         0x00400000, 0x03FFF000, 0xFFFFFFE0, 0x00000707,
211         0x00000000, 0x00000000, 0x00000000, 0x00000000,
212         0x00000000, 0x00000000, 0x00000000, 0x00000000,
213         0x00000000, 0x00000000, 0x00000000, 0x00000000,
214         0x00000000, 0x00000000, 0x00000000, 0x00000000,
215         0x00000000, 0x00000000, 0x00000000, 0x00000000,
216 };
217
218 /* Packet types for packets with an Innermost/Last MAC VLAN header */
219 static const u32 ice_ptypes_macvlan_il[] = {
220         0x00000000, 0xBC000000, 0x000001DF, 0xF0000000,
221         0x0000077E, 0x00000000, 0x00000000, 0x00000000,
222         0x00000000, 0x00000000, 0x00000000, 0x00000000,
223         0x00000000, 0x00000000, 0x00000000, 0x00000000,
224         0x00000000, 0x00000000, 0x00000000, 0x00000000,
225         0x00000000, 0x00000000, 0x00000000, 0x00000000,
226         0x00000000, 0x00000000, 0x00000000, 0x00000000,
227         0x00000000, 0x00000000, 0x00000000, 0x00000000,
228 };
229
230 /* Packet types for packets with an Outer/First/Single IPv4 header, does NOT
231  * include IPV4 other PTYPEs
232  */
233 static const u32 ice_ptypes_ipv4_ofos[] = {
234         0x1DC00000, 0x24000800, 0x00000000, 0x00000000,
235         0x00000000, 0x00000155, 0x00000000, 0x00000000,
236         0x00000000, 0x000FC000, 0x000002A0, 0x00000000,
237         0x00000000, 0x00000000, 0x00000000, 0x00000000,
238         0x00000000, 0x00000000, 0x00000000, 0x00000000,
239         0x00000000, 0x00000000, 0x00000000, 0x00000000,
240         0x00000000, 0x00000000, 0x00000000, 0x00000000,
241         0x00000000, 0x00000000, 0x00000000, 0x00000000,
242 };
243
244 /* Packet types for packets with an Outer/First/Single IPv4 header, includes
245  * IPV4 other PTYPEs
246  */
247 static const u32 ice_ptypes_ipv4_ofos_all[] = {
248         0x1DC00000, 0x24000800, 0x00000000, 0x00000000,
249         0x00000000, 0x00000155, 0x00000000, 0x00000000,
250         0x00000000, 0x000FC000, 0x83E0FAA0, 0x00000101,
251         0x00000000, 0x00000000, 0x00000000, 0x00000000,
252         0x00000000, 0x00000000, 0x00000000, 0x00000000,
253         0x00000000, 0x00000000, 0x00000000, 0x00000000,
254         0x00000000, 0x00000000, 0x00000000, 0x00000000,
255         0x00000000, 0x00000000, 0x00000000, 0x00000000,
256 };
257
258 /* Packet types for packets with an Innermost/Last IPv4 header */
259 static const u32 ice_ptypes_ipv4_il[] = {
260         0xE0000000, 0xB807700E, 0x80000003, 0xE01DC03B,
261         0x0000000E, 0x00000000, 0x00000000, 0x00000000,
262         0x00000000, 0x00000000, 0x001FF800, 0x00000000,
263         0x00000000, 0x00000000, 0x00000000, 0x00000000,
264         0x00000000, 0x00000000, 0x00000000, 0x00000000,
265         0x00000000, 0x00000000, 0x00000000, 0x00000000,
266         0x00000000, 0x00000000, 0x00000000, 0x00000000,
267         0x00000000, 0x00000000, 0x00000000, 0x00000000,
268 };
269
270 /* Packet types for packets with an Outer/First/Single IPv6 header, does NOT
271  * include IVP6 other PTYPEs
272  */
273 static const u32 ice_ptypes_ipv6_ofos[] = {
274         0x00000000, 0x00000000, 0x77000000, 0x10002000,
275         0x00000000, 0x000002AA, 0x00000000, 0x00000000,
276         0x00000000, 0x03F00000, 0x00000540, 0x00000000,
277         0x00000000, 0x00000000, 0x00000000, 0x00000000,
278         0x00000000, 0x00000000, 0x00000000, 0x00000000,
279         0x00000000, 0x00000000, 0x00000000, 0x00000000,
280         0x00000000, 0x00000000, 0x00000000, 0x00000000,
281         0x00000000, 0x00000000, 0x00000000, 0x00000000,
282 };
283
284 /* Packet types for packets with an Outer/First/Single IPv6 header, includes
285  * IPV6 other PTYPEs
286  */
287 static const u32 ice_ptypes_ipv6_ofos_all[] = {
288         0x00000000, 0x00000000, 0x77000000, 0x10002000,
289         0x00000000, 0x000002AA, 0x00000000, 0x00000000,
290         0x00000000, 0x03F00000, 0x7C1F0540, 0x00000206,
291         0x00000000, 0x00000000, 0x00000000, 0x00000000,
292         0x00000000, 0x00000000, 0x00000000, 0x00000000,
293         0x00000000, 0x00000000, 0x00000000, 0x00000000,
294         0x00000000, 0x00000000, 0x00000000, 0x00000000,
295         0x00000000, 0x00000000, 0x00000000, 0x00000000,
296 };
297
298 /* Packet types for packets with an Innermost/Last IPv6 header */
299 static const u32 ice_ptypes_ipv6_il[] = {
300         0x00000000, 0x03B80770, 0x000001DC, 0x0EE00000,
301         0x00000770, 0x00000000, 0x00000000, 0x00000000,
302         0x00000000, 0x00000000, 0x7FE00000, 0x00000000,
303         0x00000000, 0x00000000, 0x00000000, 0x00000000,
304         0x00000000, 0x00000000, 0x00000000, 0x00000000,
305         0x00000000, 0x00000000, 0x00000000, 0x00000000,
306         0x00000000, 0x00000000, 0x00000000, 0x00000000,
307         0x00000000, 0x00000000, 0x00000000, 0x00000000,
308 };
309
310 /* Packet types for packets with an Outer/First/Single IPv4 header - no L4 */
311 static const u32 ice_ptypes_ipv4_ofos_no_l4[] = {
312         0x10C00000, 0x04000800, 0x00000000, 0x00000000,
313         0x00000000, 0x00000000, 0x00000000, 0x00000000,
314         0x00000000, 0x000cc000, 0x000002A0, 0x00000000,
315         0x00000000, 0x00000000, 0x00000000, 0x00000000,
316         0x00000000, 0x00000000, 0x00000000, 0x00000000,
317         0x00000000, 0x00000000, 0x00000000, 0x00000000,
318         0x00000000, 0x00000000, 0x00000000, 0x00000000,
319         0x00000000, 0x00000000, 0x00000000, 0x00000000,
320 };
321
322 /* Packet types for packets with an Innermost/Last IPv4 header - no L4 */
323 static const u32 ice_ptypes_ipv4_il_no_l4[] = {
324         0x60000000, 0x18043008, 0x80000002, 0x6010c021,
325         0x00000008, 0x00000000, 0x00000000, 0x00000000,
326         0x00000000, 0x00000000, 0x00139800, 0x00000000,
327         0x00000000, 0x00000000, 0x00000000, 0x00000000,
328         0x00000000, 0x00000000, 0x00000000, 0x00000000,
329         0x00000000, 0x00000000, 0x00000000, 0x00000000,
330         0x00000000, 0x00000000, 0x00000000, 0x00000000,
331         0x00000000, 0x00000000, 0x00000000, 0x00000000,
332 };
333
334 /* Packet types for packets with an Outer/First/Single IPv6 header - no L4 */
335 static const u32 ice_ptypes_ipv6_ofos_no_l4[] = {
336         0x00000000, 0x00000000, 0x43000000, 0x10002000,
337         0x00000000, 0x00000000, 0x00000000, 0x00000000,
338         0x00000000, 0x02300000, 0x00000540, 0x00000000,
339         0x00000000, 0x00000000, 0x00000000, 0x00000000,
340         0x00000000, 0x00000000, 0x00000000, 0x00000000,
341         0x00000000, 0x00000000, 0x00000000, 0x00000000,
342         0x00000000, 0x00000000, 0x00000000, 0x00000000,
343         0x00000000, 0x00000000, 0x00000000, 0x00000000,
344 };
345
346 /* Packet types for packets with an Innermost/Last IPv6 header - no L4 */
347 static const u32 ice_ptypes_ipv6_il_no_l4[] = {
348         0x00000000, 0x02180430, 0x0000010c, 0x086010c0,
349         0x00000430, 0x00000000, 0x00000000, 0x00000000,
350         0x00000000, 0x00000000, 0x4e600000, 0x00000000,
351         0x00000000, 0x00000000, 0x00000000, 0x00000000,
352         0x00000000, 0x00000000, 0x00000000, 0x00000000,
353         0x00000000, 0x00000000, 0x00000000, 0x00000000,
354         0x00000000, 0x00000000, 0x00000000, 0x00000000,
355         0x00000000, 0x00000000, 0x00000000, 0x00000000,
356 };
357
358 /* Packet types for packets with an Outermost/First ARP header */
359 static const u32 ice_ptypes_arp_of[] = {
360         0x00000800, 0x00000000, 0x00000000, 0x00000000,
361         0x00000000, 0x00000000, 0x00000000, 0x00000000,
362         0x00000000, 0x00000000, 0x00000000, 0x00000000,
363         0x00000000, 0x00000000, 0x00000000, 0x00000000,
364         0x00000000, 0x00000000, 0x00000000, 0x00000000,
365         0x00000000, 0x00000000, 0x00000000, 0x00000000,
366         0x00000000, 0x00000000, 0x00000000, 0x00000000,
367         0x00000000, 0x00000000, 0x00000000, 0x00000000,
368 };
369
370 /* UDP Packet types for non-tunneled packets or tunneled
371  * packets with inner UDP.
372  */
373 static const u32 ice_ptypes_udp_il[] = {
374         0x81000000, 0x20204040, 0x04000010, 0x80810102,
375         0x00000040, 0x00000000, 0x00000000, 0x00000000,
376         0x00000000, 0x00410000, 0x908427E0, 0x00000007,
377         0x00000000, 0x00000000, 0x00000000, 0x00000000,
378         0x00000000, 0x00000000, 0x00000000, 0x00000000,
379         0x00000000, 0x00000000, 0x00000000, 0x00000000,
380         0x00000000, 0x00000000, 0x00000000, 0x00000000,
381         0x00000000, 0x00000000, 0x00000000, 0x00000000,
382 };
383
384 /* Packet types for packets with an Innermost/Last TCP header */
385 static const u32 ice_ptypes_tcp_il[] = {
386         0x04000000, 0x80810102, 0x10000040, 0x02040408,
387         0x00000102, 0x00000000, 0x00000000, 0x00000000,
388         0x00000000, 0x00820000, 0x21084000, 0x00000000,
389         0x00000000, 0x00000000, 0x00000000, 0x00000000,
390         0x00000000, 0x00000000, 0x00000000, 0x00000000,
391         0x00000000, 0x00000000, 0x00000000, 0x00000000,
392         0x00000000, 0x00000000, 0x00000000, 0x00000000,
393         0x00000000, 0x00000000, 0x00000000, 0x00000000,
394 };
395
396 /* Packet types for packets with an Innermost/Last SCTP header */
397 static const u32 ice_ptypes_sctp_il[] = {
398         0x08000000, 0x01020204, 0x20000081, 0x04080810,
399         0x00000204, 0x00000000, 0x00000000, 0x00000000,
400         0x00000000, 0x01040000, 0x00000000, 0x00000000,
401         0x00000000, 0x00000000, 0x00000000, 0x00000000,
402         0x00000000, 0x00000000, 0x00000000, 0x00000000,
403         0x00000000, 0x00000000, 0x00000000, 0x00000000,
404         0x00000000, 0x00000000, 0x00000000, 0x00000000,
405         0x00000000, 0x00000000, 0x00000000, 0x00000000,
406 };
407
408 /* Packet types for packets with an Outermost/First ICMP header */
409 static const u32 ice_ptypes_icmp_of[] = {
410         0x10000000, 0x00000000, 0x00000000, 0x00000000,
411         0x00000000, 0x00000000, 0x00000000, 0x00000000,
412         0x00000000, 0x00000000, 0x00000000, 0x00000000,
413         0x00000000, 0x00000000, 0x00000000, 0x00000000,
414         0x00000000, 0x00000000, 0x00000000, 0x00000000,
415         0x00000000, 0x00000000, 0x00000000, 0x00000000,
416         0x00000000, 0x00000000, 0x00000000, 0x00000000,
417         0x00000000, 0x00000000, 0x00000000, 0x00000000,
418 };
419
420 /* Packet types for packets with an Innermost/Last ICMP header */
421 static const u32 ice_ptypes_icmp_il[] = {
422         0x00000000, 0x02040408, 0x40000102, 0x08101020,
423         0x00000408, 0x00000000, 0x00000000, 0x00000000,
424         0x00000000, 0x00000000, 0x42108000, 0x00000000,
425         0x00000000, 0x00000000, 0x00000000, 0x00000000,
426         0x00000000, 0x00000000, 0x00000000, 0x00000000,
427         0x00000000, 0x00000000, 0x00000000, 0x00000000,
428         0x00000000, 0x00000000, 0x00000000, 0x00000000,
429         0x00000000, 0x00000000, 0x00000000, 0x00000000,
430 };
431
432 /* Packet types for packets with an Outermost/First GRE header */
433 static const u32 ice_ptypes_gre_of[] = {
434         0x00000000, 0xBFBF7800, 0x000001DF, 0xFEFDE000,
435         0x0000017E, 0x00000000, 0x00000000, 0x00000000,
436         0x00000000, 0x00000000, 0x00000000, 0x00000000,
437         0x00000000, 0x00000000, 0x00000000, 0x00000000,
438         0x00000000, 0x00000000, 0x00000000, 0x00000000,
439         0x00000000, 0x00000000, 0x00000000, 0x00000000,
440         0x00000000, 0x00000000, 0x00000000, 0x00000000,
441         0x00000000, 0x00000000, 0x00000000, 0x00000000,
442 };
443
444 /* Packet types for packets with an Innermost/Last MAC header */
445 static const u32 ice_ptypes_mac_il[] = {
446         0x00000000, 0x20000000, 0x00000000, 0x00000000,
447         0x00000000, 0x00000000, 0x00000000, 0x00000000,
448         0x00000000, 0x00000000, 0x00000000, 0x00000000,
449         0x00000000, 0x00000000, 0x00000000, 0x00000000,
450         0x00000000, 0x00000000, 0x00000000, 0x00000000,
451         0x00000000, 0x00000000, 0x00000000, 0x00000000,
452         0x00000000, 0x00000000, 0x00000000, 0x00000000,
453         0x00000000, 0x00000000, 0x00000000, 0x00000000,
454 };
455
456 /* Packet types for GTPC */
457 static const u32 ice_ptypes_gtpc[] = {
458         0x00000000, 0x00000000, 0x00000000, 0x00000000,
459         0x00000000, 0x00000000, 0x00000000, 0x00000000,
460         0x00000000, 0x00000000, 0x000001E0, 0x00000000,
461         0x00000000, 0x00000000, 0x00000000, 0x00000000,
462         0x00000000, 0x00000000, 0x00000000, 0x00000000,
463         0x00000000, 0x00000000, 0x00000000, 0x00000000,
464         0x00000000, 0x00000000, 0x00000000, 0x00000000,
465         0x00000000, 0x00000000, 0x00000000, 0x00000000,
466 };
467
468 /* Packet types for VXLAN with VNI */
469 static const u32 ice_ptypes_vxlan_vni[] = {
470         0x00000000, 0xBFBFF800, 0x00EFDFDF, 0xFEFDE000,
471         0x03BF7F7E, 0x00000000, 0x00000000, 0x00000000,
472         0x00000000, 0x00000000, 0x00000000, 0x00000000,
473         0x00000000, 0x00000000, 0x00000000, 0x00000000,
474         0x00000000, 0x00000000, 0x00000000, 0x00000000,
475         0x00000000, 0x00000000, 0x00000000, 0x00000000,
476         0x00000000, 0x00000000, 0x00000000, 0x00000000,
477         0x00000000, 0x00000000, 0x00000000, 0x00000000,
478 };
479
480 /* Packet types for GTPC with TEID */
481 static const u32 ice_ptypes_gtpc_tid[] = {
482         0x00000000, 0x00000000, 0x00000000, 0x00000000,
483         0x00000000, 0x00000000, 0x00000000, 0x00000000,
484         0x00000000, 0x00000000, 0x00000060, 0x00000000,
485         0x00000000, 0x00000000, 0x00000000, 0x00000000,
486         0x00000000, 0x00000000, 0x00000000, 0x00000000,
487         0x00000000, 0x00000000, 0x00000000, 0x00000000,
488         0x00000000, 0x00000000, 0x00000000, 0x00000000,
489         0x00000000, 0x00000000, 0x00000000, 0x00000000,
490 };
491
492 /* Packet types for GTPU */
493 static const struct ice_ptype_attributes ice_attr_gtpu_session[] = {
494         { ICE_MAC_IPV4_GTPU_IPV4_FRAG,    ICE_PTYPE_ATTR_GTP_SESSION },
495         { ICE_MAC_IPV4_GTPU_IPV4_PAY,     ICE_PTYPE_ATTR_GTP_SESSION },
496         { ICE_MAC_IPV4_GTPU_IPV4_UDP_PAY, ICE_PTYPE_ATTR_GTP_SESSION },
497         { ICE_MAC_IPV4_GTPU_IPV4_TCP,     ICE_PTYPE_ATTR_GTP_SESSION },
498         { ICE_MAC_IPV4_GTPU_IPV4_ICMP,    ICE_PTYPE_ATTR_GTP_SESSION },
499         { ICE_MAC_IPV6_GTPU_IPV4_FRAG,    ICE_PTYPE_ATTR_GTP_SESSION },
500         { ICE_MAC_IPV6_GTPU_IPV4_PAY,     ICE_PTYPE_ATTR_GTP_SESSION },
501         { ICE_MAC_IPV6_GTPU_IPV4_UDP_PAY, ICE_PTYPE_ATTR_GTP_SESSION },
502         { ICE_MAC_IPV6_GTPU_IPV4_TCP,     ICE_PTYPE_ATTR_GTP_SESSION },
503         { ICE_MAC_IPV6_GTPU_IPV4_ICMP,    ICE_PTYPE_ATTR_GTP_SESSION },
504         { ICE_MAC_IPV4_GTPU_IPV6_FRAG,    ICE_PTYPE_ATTR_GTP_SESSION },
505         { ICE_MAC_IPV4_GTPU_IPV6_PAY,     ICE_PTYPE_ATTR_GTP_SESSION },
506         { ICE_MAC_IPV4_GTPU_IPV6_UDP_PAY, ICE_PTYPE_ATTR_GTP_SESSION },
507         { ICE_MAC_IPV4_GTPU_IPV6_TCP,     ICE_PTYPE_ATTR_GTP_SESSION },
508         { ICE_MAC_IPV4_GTPU_IPV6_ICMPV6,  ICE_PTYPE_ATTR_GTP_SESSION },
509         { ICE_MAC_IPV6_GTPU_IPV6_FRAG,    ICE_PTYPE_ATTR_GTP_SESSION },
510         { ICE_MAC_IPV6_GTPU_IPV6_PAY,     ICE_PTYPE_ATTR_GTP_SESSION },
511         { ICE_MAC_IPV6_GTPU_IPV6_UDP_PAY, ICE_PTYPE_ATTR_GTP_SESSION },
512         { ICE_MAC_IPV6_GTPU_IPV6_TCP,     ICE_PTYPE_ATTR_GTP_SESSION },
513         { ICE_MAC_IPV6_GTPU_IPV6_ICMPV6,  ICE_PTYPE_ATTR_GTP_SESSION },
514 };
515
516 static const struct ice_ptype_attributes ice_attr_gtpu_eh[] = {
517         { ICE_MAC_IPV4_GTPU_IPV4_FRAG,    ICE_PTYPE_ATTR_GTP_PDU_EH },
518         { ICE_MAC_IPV4_GTPU_IPV4_PAY,     ICE_PTYPE_ATTR_GTP_PDU_EH },
519         { ICE_MAC_IPV4_GTPU_IPV4_UDP_PAY, ICE_PTYPE_ATTR_GTP_PDU_EH },
520         { ICE_MAC_IPV4_GTPU_IPV4_TCP,     ICE_PTYPE_ATTR_GTP_PDU_EH },
521         { ICE_MAC_IPV4_GTPU_IPV4_ICMP,    ICE_PTYPE_ATTR_GTP_PDU_EH },
522         { ICE_MAC_IPV6_GTPU_IPV4_FRAG,    ICE_PTYPE_ATTR_GTP_PDU_EH },
523         { ICE_MAC_IPV6_GTPU_IPV4_PAY,     ICE_PTYPE_ATTR_GTP_PDU_EH },
524         { ICE_MAC_IPV6_GTPU_IPV4_UDP_PAY, ICE_PTYPE_ATTR_GTP_PDU_EH },
525         { ICE_MAC_IPV6_GTPU_IPV4_TCP,     ICE_PTYPE_ATTR_GTP_PDU_EH },
526         { ICE_MAC_IPV6_GTPU_IPV4_ICMP,    ICE_PTYPE_ATTR_GTP_PDU_EH },
527         { ICE_MAC_IPV4_GTPU_IPV6_FRAG,    ICE_PTYPE_ATTR_GTP_PDU_EH },
528         { ICE_MAC_IPV4_GTPU_IPV6_PAY,     ICE_PTYPE_ATTR_GTP_PDU_EH },
529         { ICE_MAC_IPV4_GTPU_IPV6_UDP_PAY, ICE_PTYPE_ATTR_GTP_PDU_EH },
530         { ICE_MAC_IPV4_GTPU_IPV6_TCP,     ICE_PTYPE_ATTR_GTP_PDU_EH },
531         { ICE_MAC_IPV4_GTPU_IPV6_ICMPV6,  ICE_PTYPE_ATTR_GTP_PDU_EH },
532         { ICE_MAC_IPV6_GTPU_IPV6_FRAG,    ICE_PTYPE_ATTR_GTP_PDU_EH },
533         { ICE_MAC_IPV6_GTPU_IPV6_PAY,     ICE_PTYPE_ATTR_GTP_PDU_EH },
534         { ICE_MAC_IPV6_GTPU_IPV6_UDP_PAY, ICE_PTYPE_ATTR_GTP_PDU_EH },
535         { ICE_MAC_IPV6_GTPU_IPV6_TCP,     ICE_PTYPE_ATTR_GTP_PDU_EH },
536         { ICE_MAC_IPV6_GTPU_IPV6_ICMPV6,  ICE_PTYPE_ATTR_GTP_PDU_EH },
537 };
538
539 static const struct ice_ptype_attributes ice_attr_gtpu_down[] = {
540         { ICE_MAC_IPV4_GTPU_IPV4_FRAG,    ICE_PTYPE_ATTR_GTP_DOWNLINK },
541         { ICE_MAC_IPV4_GTPU_IPV4_PAY,     ICE_PTYPE_ATTR_GTP_DOWNLINK },
542         { ICE_MAC_IPV4_GTPU_IPV4_UDP_PAY, ICE_PTYPE_ATTR_GTP_DOWNLINK },
543         { ICE_MAC_IPV4_GTPU_IPV4_TCP,     ICE_PTYPE_ATTR_GTP_DOWNLINK },
544         { ICE_MAC_IPV4_GTPU_IPV4_ICMP,    ICE_PTYPE_ATTR_GTP_DOWNLINK },
545         { ICE_MAC_IPV6_GTPU_IPV4_FRAG,    ICE_PTYPE_ATTR_GTP_DOWNLINK },
546         { ICE_MAC_IPV6_GTPU_IPV4_PAY,     ICE_PTYPE_ATTR_GTP_DOWNLINK },
547         { ICE_MAC_IPV6_GTPU_IPV4_UDP_PAY, ICE_PTYPE_ATTR_GTP_DOWNLINK },
548         { ICE_MAC_IPV6_GTPU_IPV4_TCP,     ICE_PTYPE_ATTR_GTP_DOWNLINK },
549         { ICE_MAC_IPV6_GTPU_IPV4_ICMP,    ICE_PTYPE_ATTR_GTP_DOWNLINK },
550         { ICE_MAC_IPV4_GTPU_IPV6_FRAG,    ICE_PTYPE_ATTR_GTP_DOWNLINK },
551         { ICE_MAC_IPV4_GTPU_IPV6_PAY,     ICE_PTYPE_ATTR_GTP_DOWNLINK },
552         { ICE_MAC_IPV4_GTPU_IPV6_UDP_PAY, ICE_PTYPE_ATTR_GTP_DOWNLINK },
553         { ICE_MAC_IPV4_GTPU_IPV6_TCP,     ICE_PTYPE_ATTR_GTP_DOWNLINK },
554         { ICE_MAC_IPV4_GTPU_IPV6_ICMPV6,  ICE_PTYPE_ATTR_GTP_DOWNLINK },
555         { ICE_MAC_IPV6_GTPU_IPV6_FRAG,    ICE_PTYPE_ATTR_GTP_DOWNLINK },
556         { ICE_MAC_IPV6_GTPU_IPV6_PAY,     ICE_PTYPE_ATTR_GTP_DOWNLINK },
557         { ICE_MAC_IPV6_GTPU_IPV6_UDP_PAY, ICE_PTYPE_ATTR_GTP_DOWNLINK },
558         { ICE_MAC_IPV6_GTPU_IPV6_TCP,     ICE_PTYPE_ATTR_GTP_DOWNLINK },
559         { ICE_MAC_IPV6_GTPU_IPV6_ICMPV6,  ICE_PTYPE_ATTR_GTP_DOWNLINK },
560 };
561
562 static const struct ice_ptype_attributes ice_attr_gtpu_up[] = {
563         { ICE_MAC_IPV4_GTPU_IPV4_FRAG,    ICE_PTYPE_ATTR_GTP_UPLINK },
564         { ICE_MAC_IPV4_GTPU_IPV4_PAY,     ICE_PTYPE_ATTR_GTP_UPLINK },
565         { ICE_MAC_IPV4_GTPU_IPV4_UDP_PAY, ICE_PTYPE_ATTR_GTP_UPLINK },
566         { ICE_MAC_IPV4_GTPU_IPV4_TCP,     ICE_PTYPE_ATTR_GTP_UPLINK },
567         { ICE_MAC_IPV4_GTPU_IPV4_ICMP,    ICE_PTYPE_ATTR_GTP_UPLINK },
568         { ICE_MAC_IPV6_GTPU_IPV4_FRAG,    ICE_PTYPE_ATTR_GTP_UPLINK },
569         { ICE_MAC_IPV6_GTPU_IPV4_PAY,     ICE_PTYPE_ATTR_GTP_UPLINK },
570         { ICE_MAC_IPV6_GTPU_IPV4_UDP_PAY, ICE_PTYPE_ATTR_GTP_UPLINK },
571         { ICE_MAC_IPV6_GTPU_IPV4_TCP,     ICE_PTYPE_ATTR_GTP_UPLINK },
572         { ICE_MAC_IPV6_GTPU_IPV4_ICMP,    ICE_PTYPE_ATTR_GTP_UPLINK },
573         { ICE_MAC_IPV4_GTPU_IPV6_FRAG,    ICE_PTYPE_ATTR_GTP_UPLINK },
574         { ICE_MAC_IPV4_GTPU_IPV6_PAY,     ICE_PTYPE_ATTR_GTP_UPLINK },
575         { ICE_MAC_IPV4_GTPU_IPV6_UDP_PAY, ICE_PTYPE_ATTR_GTP_UPLINK },
576         { ICE_MAC_IPV4_GTPU_IPV6_TCP,     ICE_PTYPE_ATTR_GTP_UPLINK },
577         { ICE_MAC_IPV4_GTPU_IPV6_ICMPV6,  ICE_PTYPE_ATTR_GTP_UPLINK },
578         { ICE_MAC_IPV6_GTPU_IPV6_FRAG,    ICE_PTYPE_ATTR_GTP_UPLINK },
579         { ICE_MAC_IPV6_GTPU_IPV6_PAY,     ICE_PTYPE_ATTR_GTP_UPLINK },
580         { ICE_MAC_IPV6_GTPU_IPV6_UDP_PAY, ICE_PTYPE_ATTR_GTP_UPLINK },
581         { ICE_MAC_IPV6_GTPU_IPV6_TCP,     ICE_PTYPE_ATTR_GTP_UPLINK },
582         { ICE_MAC_IPV6_GTPU_IPV6_ICMPV6,  ICE_PTYPE_ATTR_GTP_UPLINK },
583 };
584
585 static const u32 ice_ptypes_gtpu[] = {
586         0x00000000, 0x00000000, 0x00000000, 0x00000000,
587         0x00000000, 0x00000000, 0x00000000, 0x00000000,
588         0x00000000, 0x00000000, 0x7FFFFE00, 0x00000000,
589         0x00000000, 0x00000000, 0x00000000, 0x00000000,
590         0x00000000, 0x00000000, 0x00000000, 0x00000000,
591         0x00000000, 0x00000000, 0x00000000, 0x00000000,
592         0x00000000, 0x00000000, 0x00000000, 0x00000000,
593         0x00000000, 0x00000000, 0x00000000, 0x00000000,
594 };
595
596 /* Packet types for pppoe */
597 static const u32 ice_ptypes_pppoe[] = {
598         0x00000000, 0x00000000, 0x00000000, 0x00000000,
599         0x00000000, 0x00000000, 0x00000000, 0x00000000,
600         0x00000000, 0x03ffe000, 0x00000000, 0x00000000,
601         0x00000000, 0x00000000, 0x00000000, 0x00000000,
602         0x00000000, 0x00000000, 0x00000000, 0x00000000,
603         0x00000000, 0x00000000, 0x00000000, 0x00000000,
604         0x00000000, 0x00000000, 0x00000000, 0x00000000,
605         0x00000000, 0x00000000, 0x00000000, 0x00000000,
606 };
607
608 /* Packet types for packets with PFCP NODE header */
609 static const u32 ice_ptypes_pfcp_node[] = {
610         0x00000000, 0x00000000, 0x00000000, 0x00000000,
611         0x00000000, 0x00000000, 0x00000000, 0x00000000,
612         0x00000000, 0x00000000, 0x80000000, 0x00000002,
613         0x00000000, 0x00000000, 0x00000000, 0x00000000,
614         0x00000000, 0x00000000, 0x00000000, 0x00000000,
615         0x00000000, 0x00000000, 0x00000000, 0x00000000,
616         0x00000000, 0x00000000, 0x00000000, 0x00000000,
617         0x00000000, 0x00000000, 0x00000000, 0x00000000,
618 };
619
620 /* Packet types for packets with PFCP SESSION header */
621 static const u32 ice_ptypes_pfcp_session[] = {
622         0x00000000, 0x00000000, 0x00000000, 0x00000000,
623         0x00000000, 0x00000000, 0x00000000, 0x00000000,
624         0x00000000, 0x00000000, 0x00000000, 0x00000005,
625         0x00000000, 0x00000000, 0x00000000, 0x00000000,
626         0x00000000, 0x00000000, 0x00000000, 0x00000000,
627         0x00000000, 0x00000000, 0x00000000, 0x00000000,
628         0x00000000, 0x00000000, 0x00000000, 0x00000000,
629         0x00000000, 0x00000000, 0x00000000, 0x00000000,
630 };
631
632 /* Packet types for l2tpv3 */
633 static const u32 ice_ptypes_l2tpv3[] = {
634         0x00000000, 0x00000000, 0x00000000, 0x00000000,
635         0x00000000, 0x00000000, 0x00000000, 0x00000000,
636         0x00000000, 0x00000000, 0x00000000, 0x00000300,
637         0x00000000, 0x00000000, 0x00000000, 0x00000000,
638         0x00000000, 0x00000000, 0x00000000, 0x00000000,
639         0x00000000, 0x00000000, 0x00000000, 0x00000000,
640         0x00000000, 0x00000000, 0x00000000, 0x00000000,
641         0x00000000, 0x00000000, 0x00000000, 0x00000000,
642 };
643
644 /* Packet types for esp */
645 static const u32 ice_ptypes_esp[] = {
646         0x00000000, 0x00000000, 0x00000000, 0x00000000,
647         0x00000000, 0x00000003, 0x00000000, 0x00000000,
648         0x00000000, 0x00000000, 0x00000000, 0x00000000,
649         0x00000000, 0x00000000, 0x00000000, 0x00000000,
650         0x00000000, 0x00000000, 0x00000000, 0x00000000,
651         0x00000000, 0x00000000, 0x00000000, 0x00000000,
652         0x00000000, 0x00000000, 0x00000000, 0x00000000,
653         0x00000000, 0x00000000, 0x00000000, 0x00000000,
654 };
655
656 /* Packet types for ah */
657 static const u32 ice_ptypes_ah[] = {
658         0x00000000, 0x00000000, 0x00000000, 0x00000000,
659         0x00000000, 0x0000000C, 0x00000000, 0x00000000,
660         0x00000000, 0x00000000, 0x00000000, 0x00000000,
661         0x00000000, 0x00000000, 0x00000000, 0x00000000,
662         0x00000000, 0x00000000, 0x00000000, 0x00000000,
663         0x00000000, 0x00000000, 0x00000000, 0x00000000,
664         0x00000000, 0x00000000, 0x00000000, 0x00000000,
665         0x00000000, 0x00000000, 0x00000000, 0x00000000,
666 };
667
668 /* Packet types for packets with NAT_T ESP header */
669 static const u32 ice_ptypes_nat_t_esp[] = {
670         0x00000000, 0x00000000, 0x00000000, 0x00000000,
671         0x00000000, 0x00000030, 0x00000000, 0x00000000,
672         0x00000000, 0x00000000, 0x00000000, 0x00000000,
673         0x00000000, 0x00000000, 0x00000000, 0x00000000,
674         0x00000000, 0x00000000, 0x00000000, 0x00000000,
675         0x00000000, 0x00000000, 0x00000000, 0x00000000,
676         0x00000000, 0x00000000, 0x00000000, 0x00000000,
677         0x00000000, 0x00000000, 0x00000000, 0x00000000,
678 };
679
680 static const u32 ice_ptypes_mac_non_ip_ofos[] = {
681         0x00000846, 0x00000000, 0x00000000, 0x00000000,
682         0x00000000, 0x00000000, 0x00000000, 0x00000000,
683         0x00400000, 0x03FFF000, 0x00000000, 0x00000000,
684         0x00000000, 0x00000000, 0x00000000, 0x00000000,
685         0x00000000, 0x00000000, 0x00000000, 0x00000000,
686         0x00000000, 0x00000000, 0x00000000, 0x00000000,
687         0x00000000, 0x00000000, 0x00000000, 0x00000000,
688         0x00000000, 0x00000000, 0x00000000, 0x00000000,
689 };
690
691 static const u32 ice_ptypes_gtpu_no_ip[] = {
692         0x00000000, 0x00000000, 0x00000000, 0x00000000,
693         0x00000000, 0x00000000, 0x00000000, 0x00000000,
694         0x00000000, 0x00000000, 0x00000600, 0x00000000,
695         0x00000000, 0x00000000, 0x00000000, 0x00000000,
696         0x00000000, 0x00000000, 0x00000000, 0x00000000,
697         0x00000000, 0x00000000, 0x00000000, 0x00000000,
698         0x00000000, 0x00000000, 0x00000000, 0x00000000,
699         0x00000000, 0x00000000, 0x00000000, 0x00000000,
700 };
701
702 static const u32 ice_ptypes_ecpri_tp0[] = {
703         0x00000000, 0x00000000, 0x00000000, 0x00000000,
704         0x00000000, 0x00000000, 0x00000000, 0x00000000,
705         0x00000000, 0x00000000, 0x00000000, 0x00000400,
706         0x00000000, 0x00000000, 0x00000000, 0x00000000,
707         0x00000000, 0x00000000, 0x00000000, 0x00000000,
708         0x00000000, 0x00000000, 0x00000000, 0x00000000,
709         0x00000000, 0x00000000, 0x00000000, 0x00000000,
710         0x00000000, 0x00000000, 0x00000000, 0x00000000,
711 };
712
713 /* Manage parameters and info. used during the creation of a flow profile */
714 struct ice_flow_prof_params {
715         enum ice_block blk;
716         u16 entry_length; /* # of bytes formatted entry will require */
717         u8 es_cnt;
718         struct ice_flow_prof *prof;
719
720         /* For ACL, the es[0] will have the data of ICE_RX_MDID_PKT_FLAGS_15_0
721          * This will give us the direction flags.
722          */
723         struct ice_fv_word es[ICE_MAX_FV_WORDS];
724         /* attributes can be used to add attributes to a particular PTYPE */
725         const struct ice_ptype_attributes *attr;
726         u16 attr_cnt;
727
728         u16 mask[ICE_MAX_FV_WORDS];
729         ice_declare_bitmap(ptypes, ICE_FLOW_PTYPE_MAX);
730 };
731
732 #define ICE_FLOW_RSS_HDRS_INNER_MASK \
733         (ICE_FLOW_SEG_HDR_PPPOE | ICE_FLOW_SEG_HDR_GTPC | \
734         ICE_FLOW_SEG_HDR_GTPC_TEID | ICE_FLOW_SEG_HDR_GTPU | \
735         ICE_FLOW_SEG_HDR_PFCP_SESSION | ICE_FLOW_SEG_HDR_L2TPV3 | \
736         ICE_FLOW_SEG_HDR_ESP | ICE_FLOW_SEG_HDR_AH | \
737         ICE_FLOW_SEG_HDR_NAT_T_ESP | ICE_FLOW_SEG_HDR_GTPU_NON_IP | \
738         ICE_FLOW_SEG_HDR_ECPRI_TP0)
739
740 #define ICE_FLOW_SEG_HDRS_L2_MASK       \
741         (ICE_FLOW_SEG_HDR_ETH | ICE_FLOW_SEG_HDR_VLAN)
742 #define ICE_FLOW_SEG_HDRS_L3_MASK       \
743         (ICE_FLOW_SEG_HDR_IPV4 | ICE_FLOW_SEG_HDR_IPV6 | \
744          ICE_FLOW_SEG_HDR_ARP)
745 #define ICE_FLOW_SEG_HDRS_L4_MASK       \
746         (ICE_FLOW_SEG_HDR_ICMP | ICE_FLOW_SEG_HDR_TCP | ICE_FLOW_SEG_HDR_UDP | \
747          ICE_FLOW_SEG_HDR_SCTP)
748 /* mask for L4 protocols that are NOT part of IPV4/6 OTHER PTYPE groups */
749 #define ICE_FLOW_SEG_HDRS_L4_MASK_NO_OTHER      \
750         (ICE_FLOW_SEG_HDR_TCP | ICE_FLOW_SEG_HDR_UDP | ICE_FLOW_SEG_HDR_SCTP)
751
752 /**
753  * ice_flow_val_hdrs - validates packet segments for valid protocol headers
754  * @segs: array of one or more packet segments that describe the flow
755  * @segs_cnt: number of packet segments provided
756  */
757 static enum ice_status
758 ice_flow_val_hdrs(struct ice_flow_seg_info *segs, u8 segs_cnt)
759 {
760         u8 i;
761
762         for (i = 0; i < segs_cnt; i++) {
763                 /* Multiple L3 headers */
764                 if (segs[i].hdrs & ICE_FLOW_SEG_HDRS_L3_MASK &&
765                     !ice_is_pow2(segs[i].hdrs & ICE_FLOW_SEG_HDRS_L3_MASK))
766                         return ICE_ERR_PARAM;
767
768                 /* Multiple L4 headers */
769                 if (segs[i].hdrs & ICE_FLOW_SEG_HDRS_L4_MASK &&
770                     !ice_is_pow2(segs[i].hdrs & ICE_FLOW_SEG_HDRS_L4_MASK))
771                         return ICE_ERR_PARAM;
772         }
773
774         return ICE_SUCCESS;
775 }
776
777 /* Sizes of fixed known protocol headers without header options */
778 #define ICE_FLOW_PROT_HDR_SZ_MAC        14
779 #define ICE_FLOW_PROT_HDR_SZ_MAC_VLAN   (ICE_FLOW_PROT_HDR_SZ_MAC + 2)
780 #define ICE_FLOW_PROT_HDR_SZ_IPV4       20
781 #define ICE_FLOW_PROT_HDR_SZ_IPV6       40
782 #define ICE_FLOW_PROT_HDR_SZ_ARP        28
783 #define ICE_FLOW_PROT_HDR_SZ_ICMP       8
784 #define ICE_FLOW_PROT_HDR_SZ_TCP        20
785 #define ICE_FLOW_PROT_HDR_SZ_UDP        8
786 #define ICE_FLOW_PROT_HDR_SZ_SCTP       12
787
788 /**
789  * ice_flow_calc_seg_sz - calculates size of a packet segment based on headers
790  * @params: information about the flow to be processed
791  * @seg: index of packet segment whose header size is to be determined
792  */
793 static u16 ice_flow_calc_seg_sz(struct ice_flow_prof_params *params, u8 seg)
794 {
795         u16 sz;
796
797         /* L2 headers */
798         sz = (params->prof->segs[seg].hdrs & ICE_FLOW_SEG_HDR_VLAN) ?
799                 ICE_FLOW_PROT_HDR_SZ_MAC_VLAN : ICE_FLOW_PROT_HDR_SZ_MAC;
800
801         /* L3 headers */
802         if (params->prof->segs[seg].hdrs & ICE_FLOW_SEG_HDR_IPV4)
803                 sz += ICE_FLOW_PROT_HDR_SZ_IPV4;
804         else if (params->prof->segs[seg].hdrs & ICE_FLOW_SEG_HDR_IPV6)
805                 sz += ICE_FLOW_PROT_HDR_SZ_IPV6;
806         else if (params->prof->segs[seg].hdrs & ICE_FLOW_SEG_HDR_ARP)
807                 sz += ICE_FLOW_PROT_HDR_SZ_ARP;
808         else if (params->prof->segs[seg].hdrs & ICE_FLOW_SEG_HDRS_L4_MASK)
809                 /* A L3 header is required if L4 is specified */
810                 return 0;
811
812         /* L4 headers */
813         if (params->prof->segs[seg].hdrs & ICE_FLOW_SEG_HDR_ICMP)
814                 sz += ICE_FLOW_PROT_HDR_SZ_ICMP;
815         else if (params->prof->segs[seg].hdrs & ICE_FLOW_SEG_HDR_TCP)
816                 sz += ICE_FLOW_PROT_HDR_SZ_TCP;
817         else if (params->prof->segs[seg].hdrs & ICE_FLOW_SEG_HDR_UDP)
818                 sz += ICE_FLOW_PROT_HDR_SZ_UDP;
819         else if (params->prof->segs[seg].hdrs & ICE_FLOW_SEG_HDR_SCTP)
820                 sz += ICE_FLOW_PROT_HDR_SZ_SCTP;
821
822         return sz;
823 }
824
825 /**
826  * ice_flow_proc_seg_hdrs - process protocol headers present in pkt segments
827  * @params: information about the flow to be processed
828  *
829  * This function identifies the packet types associated with the protocol
830  * headers being present in packet segments of the specified flow profile.
831  */
832 static enum ice_status
833 ice_flow_proc_seg_hdrs(struct ice_flow_prof_params *params)
834 {
835         struct ice_flow_prof *prof;
836         u8 i;
837
838         ice_memset(params->ptypes, 0xff, sizeof(params->ptypes),
839                    ICE_NONDMA_MEM);
840
841         prof = params->prof;
842
843         for (i = 0; i < params->prof->segs_cnt; i++) {
844                 const ice_bitmap_t *src;
845                 u32 hdrs;
846
847                 hdrs = prof->segs[i].hdrs;
848
849                 if (hdrs & ICE_FLOW_SEG_HDR_ETH) {
850                         src = !i ? (const ice_bitmap_t *)ice_ptypes_mac_ofos :
851                                 (const ice_bitmap_t *)ice_ptypes_mac_il;
852                         ice_and_bitmap(params->ptypes, params->ptypes, src,
853                                        ICE_FLOW_PTYPE_MAX);
854                 }
855
856                 if (i && hdrs & ICE_FLOW_SEG_HDR_VLAN) {
857                         src = (const ice_bitmap_t *)ice_ptypes_macvlan_il;
858                         ice_and_bitmap(params->ptypes, params->ptypes, src,
859                                        ICE_FLOW_PTYPE_MAX);
860                 }
861
862                 if (!i && hdrs & ICE_FLOW_SEG_HDR_ARP) {
863                         ice_and_bitmap(params->ptypes, params->ptypes,
864                                        (const ice_bitmap_t *)ice_ptypes_arp_of,
865                                        ICE_FLOW_PTYPE_MAX);
866                 }
867
868                 if (hdrs & ICE_FLOW_SEG_HDR_ECPRI_TP0) {
869                         src = (const ice_bitmap_t *)ice_ptypes_ecpri_tp0;
870                         ice_and_bitmap(params->ptypes, params->ptypes, src,
871                                        ICE_FLOW_PTYPE_MAX);
872                 }
873                 if ((hdrs & ICE_FLOW_SEG_HDR_IPV4) &&
874                     (hdrs & ICE_FLOW_SEG_HDR_IPV_OTHER)) {
875                         src = i ?
876                                 (const ice_bitmap_t *)ice_ptypes_ipv4_il :
877                                 (const ice_bitmap_t *)ice_ptypes_ipv4_ofos_all;
878                         ice_and_bitmap(params->ptypes, params->ptypes, src,
879                                        ICE_FLOW_PTYPE_MAX);
880                 } else if ((hdrs & ICE_FLOW_SEG_HDR_IPV6) &&
881                            (hdrs & ICE_FLOW_SEG_HDR_IPV_OTHER)) {
882                         src = i ?
883                                 (const ice_bitmap_t *)ice_ptypes_ipv6_il :
884                                 (const ice_bitmap_t *)ice_ptypes_ipv6_ofos_all;
885                         ice_and_bitmap(params->ptypes, params->ptypes, src,
886                                        ICE_FLOW_PTYPE_MAX);
887                 } else if ((hdrs & ICE_FLOW_SEG_HDR_IPV4) &&
888                            !(hdrs & ICE_FLOW_SEG_HDRS_L4_MASK_NO_OTHER)) {
889                         src = !i ? (const ice_bitmap_t *)ice_ptypes_ipv4_ofos_no_l4 :
890                                 (const ice_bitmap_t *)ice_ptypes_ipv4_il_no_l4;
891                         ice_and_bitmap(params->ptypes, params->ptypes, src,
892                                        ICE_FLOW_PTYPE_MAX);
893                 } else if (hdrs & ICE_FLOW_SEG_HDR_IPV4) {
894                         src = !i ? (const ice_bitmap_t *)ice_ptypes_ipv4_ofos :
895                                 (const ice_bitmap_t *)ice_ptypes_ipv4_il;
896                         ice_and_bitmap(params->ptypes, params->ptypes, src,
897                                        ICE_FLOW_PTYPE_MAX);
898                 } else if ((hdrs & ICE_FLOW_SEG_HDR_IPV6) &&
899                            !(hdrs & ICE_FLOW_SEG_HDRS_L4_MASK_NO_OTHER)) {
900                         src = !i ? (const ice_bitmap_t *)ice_ptypes_ipv6_ofos_no_l4 :
901                                 (const ice_bitmap_t *)ice_ptypes_ipv6_il_no_l4;
902                         ice_and_bitmap(params->ptypes, params->ptypes, src,
903                                        ICE_FLOW_PTYPE_MAX);
904                 } else if (hdrs & ICE_FLOW_SEG_HDR_IPV6) {
905                         src = !i ? (const ice_bitmap_t *)ice_ptypes_ipv6_ofos :
906                                 (const ice_bitmap_t *)ice_ptypes_ipv6_il;
907                         ice_and_bitmap(params->ptypes, params->ptypes, src,
908                                        ICE_FLOW_PTYPE_MAX);
909                 }
910
911                 if (hdrs & ICE_FLOW_SEG_HDR_ETH_NON_IP) {
912                         src = (const ice_bitmap_t *)ice_ptypes_mac_non_ip_ofos;
913                         ice_and_bitmap(params->ptypes, params->ptypes,
914                                        src, ICE_FLOW_PTYPE_MAX);
915                 } else if (hdrs & ICE_FLOW_SEG_HDR_PPPOE) {
916                         src = (const ice_bitmap_t *)ice_ptypes_pppoe;
917                         ice_and_bitmap(params->ptypes, params->ptypes, src,
918                                        ICE_FLOW_PTYPE_MAX);
919                 } else {
920                         src = (const ice_bitmap_t *)ice_ptypes_pppoe;
921                         ice_andnot_bitmap(params->ptypes, params->ptypes, src,
922                                           ICE_FLOW_PTYPE_MAX);
923                 }
924
925                 if (hdrs & ICE_FLOW_SEG_HDR_UDP) {
926                         src = (const ice_bitmap_t *)ice_ptypes_udp_il;
927                         ice_and_bitmap(params->ptypes, params->ptypes, src,
928                                        ICE_FLOW_PTYPE_MAX);
929                 } else if (hdrs & ICE_FLOW_SEG_HDR_TCP) {
930                         ice_and_bitmap(params->ptypes, params->ptypes,
931                                        (const ice_bitmap_t *)ice_ptypes_tcp_il,
932                                        ICE_FLOW_PTYPE_MAX);
933                 } else if (hdrs & ICE_FLOW_SEG_HDR_SCTP) {
934                         src = (const ice_bitmap_t *)ice_ptypes_sctp_il;
935                         ice_and_bitmap(params->ptypes, params->ptypes, src,
936                                        ICE_FLOW_PTYPE_MAX);
937                 }
938
939                 if (hdrs & ICE_FLOW_SEG_HDR_ICMP) {
940                         src = !i ? (const ice_bitmap_t *)ice_ptypes_icmp_of :
941                                 (const ice_bitmap_t *)ice_ptypes_icmp_il;
942                         ice_and_bitmap(params->ptypes, params->ptypes, src,
943                                        ICE_FLOW_PTYPE_MAX);
944                 } else if (hdrs & ICE_FLOW_SEG_HDR_GRE) {
945                         if (!i) {
946                                 src = (const ice_bitmap_t *)ice_ptypes_gre_of;
947                                 ice_and_bitmap(params->ptypes, params->ptypes,
948                                                src, ICE_FLOW_PTYPE_MAX);
949                         }
950                 } else if (hdrs & ICE_FLOW_SEG_HDR_GTPC) {
951                         src = (const ice_bitmap_t *)ice_ptypes_gtpc;
952                         ice_and_bitmap(params->ptypes, params->ptypes,
953                                        src, ICE_FLOW_PTYPE_MAX);
954                 } else if (hdrs & ICE_FLOW_SEG_HDR_GTPC_TEID) {
955                         src = (const ice_bitmap_t *)ice_ptypes_gtpc_tid;
956                         ice_and_bitmap(params->ptypes, params->ptypes,
957                                        src, ICE_FLOW_PTYPE_MAX);
958                 } else if (hdrs & ICE_FLOW_SEG_HDR_GTPU_NON_IP) {
959                         src = (const ice_bitmap_t *)ice_ptypes_gtpu_no_ip;
960                         ice_and_bitmap(params->ptypes, params->ptypes,
961                                        src, ICE_FLOW_PTYPE_MAX);
962                 } else if (hdrs & ICE_FLOW_SEG_HDR_GTPU_DWN) {
963                         src = (const ice_bitmap_t *)ice_ptypes_gtpu;
964                         ice_and_bitmap(params->ptypes, params->ptypes,
965                                        src, ICE_FLOW_PTYPE_MAX);
966
967                         /* Attributes for GTP packet with downlink */
968                         params->attr = ice_attr_gtpu_down;
969                         params->attr_cnt = ARRAY_SIZE(ice_attr_gtpu_down);
970                 } else if (hdrs & ICE_FLOW_SEG_HDR_GTPU_UP) {
971                         src = (const ice_bitmap_t *)ice_ptypes_gtpu;
972                         ice_and_bitmap(params->ptypes, params->ptypes,
973                                        src, ICE_FLOW_PTYPE_MAX);
974
975                         /* Attributes for GTP packet with uplink */
976                         params->attr = ice_attr_gtpu_up;
977                         params->attr_cnt = ARRAY_SIZE(ice_attr_gtpu_up);
978                 } else if (hdrs & ICE_FLOW_SEG_HDR_GTPU_EH) {
979                         src = (const ice_bitmap_t *)ice_ptypes_gtpu;
980                         ice_and_bitmap(params->ptypes, params->ptypes,
981                                        src, ICE_FLOW_PTYPE_MAX);
982
983                         /* Attributes for GTP packet with Extension Header */
984                         params->attr = ice_attr_gtpu_eh;
985                         params->attr_cnt = ARRAY_SIZE(ice_attr_gtpu_eh);
986                 } else if (hdrs & ICE_FLOW_SEG_HDR_GTPU_IP) {
987                         src = (const ice_bitmap_t *)ice_ptypes_gtpu;
988                         ice_and_bitmap(params->ptypes, params->ptypes,
989                                        src, ICE_FLOW_PTYPE_MAX);
990
991                         /* Attributes for GTP packet without Extension Header */
992                         params->attr = ice_attr_gtpu_session;
993                         params->attr_cnt = ARRAY_SIZE(ice_attr_gtpu_session);
994                 } else if (hdrs & ICE_FLOW_SEG_HDR_L2TPV3) {
995                         src = (const ice_bitmap_t *)ice_ptypes_l2tpv3;
996                         ice_and_bitmap(params->ptypes, params->ptypes,
997                                        src, ICE_FLOW_PTYPE_MAX);
998                 } else if (hdrs & ICE_FLOW_SEG_HDR_ESP) {
999                         src = (const ice_bitmap_t *)ice_ptypes_esp;
1000                         ice_and_bitmap(params->ptypes, params->ptypes,
1001                                        src, ICE_FLOW_PTYPE_MAX);
1002                 } else if (hdrs & ICE_FLOW_SEG_HDR_AH) {
1003                         src = (const ice_bitmap_t *)ice_ptypes_ah;
1004                         ice_and_bitmap(params->ptypes, params->ptypes,
1005                                        src, ICE_FLOW_PTYPE_MAX);
1006                 } else if (hdrs & ICE_FLOW_SEG_HDR_NAT_T_ESP) {
1007                         src = (const ice_bitmap_t *)ice_ptypes_nat_t_esp;
1008                         ice_and_bitmap(params->ptypes, params->ptypes,
1009                                        src, ICE_FLOW_PTYPE_MAX);
1010                 } else if (hdrs & ICE_FLOW_SEG_HDR_VXLAN) {
1011                         src = (const ice_bitmap_t *)ice_ptypes_vxlan_vni;
1012                         ice_and_bitmap(params->ptypes, params->ptypes,
1013                                        src, ICE_FLOW_PTYPE_MAX);
1014                 }
1015
1016                 if (hdrs & ICE_FLOW_SEG_HDR_PFCP) {
1017                         if (hdrs & ICE_FLOW_SEG_HDR_PFCP_NODE)
1018                                 src =
1019                                 (const ice_bitmap_t *)ice_ptypes_pfcp_node;
1020                         else
1021                                 src =
1022                                 (const ice_bitmap_t *)ice_ptypes_pfcp_session;
1023
1024                         ice_and_bitmap(params->ptypes, params->ptypes,
1025                                        src, ICE_FLOW_PTYPE_MAX);
1026                 } else {
1027                         src = (const ice_bitmap_t *)ice_ptypes_pfcp_node;
1028                         ice_andnot_bitmap(params->ptypes, params->ptypes,
1029                                           src, ICE_FLOW_PTYPE_MAX);
1030
1031                         src = (const ice_bitmap_t *)ice_ptypes_pfcp_session;
1032                         ice_andnot_bitmap(params->ptypes, params->ptypes,
1033                                           src, ICE_FLOW_PTYPE_MAX);
1034                 }
1035         }
1036
1037         return ICE_SUCCESS;
1038 }
1039
1040 /**
1041  * ice_flow_xtract_pkt_flags - Create an extr sequence entry for packet flags
1042  * @hw: pointer to the HW struct
1043  * @params: information about the flow to be processed
1044  * @flags: The value of pkt_flags[x:x] in Rx/Tx MDID metadata.
1045  *
1046  * This function will allocate an extraction sequence entries for a DWORD size
1047  * chunk of the packet flags.
1048  */
1049 static enum ice_status
1050 ice_flow_xtract_pkt_flags(struct ice_hw *hw,
1051                           struct ice_flow_prof_params *params,
1052                           enum ice_flex_mdid_pkt_flags flags)
1053 {
1054         u8 fv_words = hw->blk[params->blk].es.fvw;
1055         u8 idx;
1056
1057         /* Make sure the number of extraction sequence entries required does not
1058          * exceed the block's capacity.
1059          */
1060         if (params->es_cnt >= fv_words)
1061                 return ICE_ERR_MAX_LIMIT;
1062
1063         /* some blocks require a reversed field vector layout */
1064         if (hw->blk[params->blk].es.reverse)
1065                 idx = fv_words - params->es_cnt - 1;
1066         else
1067                 idx = params->es_cnt;
1068
1069         params->es[idx].prot_id = ICE_PROT_META_ID;
1070         params->es[idx].off = flags;
1071         params->es_cnt++;
1072
1073         return ICE_SUCCESS;
1074 }
1075
1076 /**
1077  * ice_flow_xtract_fld - Create an extraction sequence entry for the given field
1078  * @hw: pointer to the HW struct
1079  * @params: information about the flow to be processed
1080  * @seg: packet segment index of the field to be extracted
1081  * @fld: ID of field to be extracted
1082  * @match: bitfield of all fields
1083  *
1084  * This function determines the protocol ID, offset, and size of the given
1085  * field. It then allocates one or more extraction sequence entries for the
1086  * given field, and fill the entries with protocol ID and offset information.
1087  */
1088 static enum ice_status
1089 ice_flow_xtract_fld(struct ice_hw *hw, struct ice_flow_prof_params *params,
1090                     u8 seg, enum ice_flow_field fld, u64 match)
1091 {
1092         enum ice_flow_field sib = ICE_FLOW_FIELD_IDX_MAX;
1093         enum ice_prot_id prot_id = ICE_PROT_ID_INVAL;
1094         u8 fv_words = hw->blk[params->blk].es.fvw;
1095         struct ice_flow_fld_info *flds;
1096         u16 cnt, ese_bits, i;
1097         u16 sib_mask = 0;
1098         u16 mask;
1099         u16 off;
1100
1101         flds = params->prof->segs[seg].fields;
1102
1103         switch (fld) {
1104         case ICE_FLOW_FIELD_IDX_ETH_DA:
1105         case ICE_FLOW_FIELD_IDX_ETH_SA:
1106         case ICE_FLOW_FIELD_IDX_S_VLAN:
1107         case ICE_FLOW_FIELD_IDX_C_VLAN:
1108                 prot_id = seg == 0 ? ICE_PROT_MAC_OF_OR_S : ICE_PROT_MAC_IL;
1109                 break;
1110         case ICE_FLOW_FIELD_IDX_ETH_TYPE:
1111                 prot_id = seg == 0 ? ICE_PROT_ETYPE_OL : ICE_PROT_ETYPE_IL;
1112                 break;
1113         case ICE_FLOW_FIELD_IDX_IPV4_DSCP:
1114                 prot_id = seg == 0 ? ICE_PROT_IPV4_OF_OR_S : ICE_PROT_IPV4_IL;
1115                 break;
1116         case ICE_FLOW_FIELD_IDX_IPV6_DSCP:
1117                 prot_id = seg == 0 ? ICE_PROT_IPV6_OF_OR_S : ICE_PROT_IPV6_IL;
1118                 break;
1119         case ICE_FLOW_FIELD_IDX_IPV4_TTL:
1120         case ICE_FLOW_FIELD_IDX_IPV4_PROT:
1121                 prot_id = seg == 0 ? ICE_PROT_IPV4_OF_OR_S : ICE_PROT_IPV4_IL;
1122
1123                 /* TTL and PROT share the same extraction seq. entry.
1124                  * Each is considered a sibling to the other in terms of sharing
1125                  * the same extraction sequence entry.
1126                  */
1127                 if (fld == ICE_FLOW_FIELD_IDX_IPV4_TTL)
1128                         sib = ICE_FLOW_FIELD_IDX_IPV4_PROT;
1129                 else
1130                         sib = ICE_FLOW_FIELD_IDX_IPV4_TTL;
1131
1132                 /* If the sibling field is also included, that field's
1133                  * mask needs to be included.
1134                  */
1135                 if (match & BIT(sib))
1136                         sib_mask = ice_flds_info[sib].mask;
1137                 break;
1138         case ICE_FLOW_FIELD_IDX_IPV6_TTL:
1139         case ICE_FLOW_FIELD_IDX_IPV6_PROT:
1140                 prot_id = seg == 0 ? ICE_PROT_IPV6_OF_OR_S : ICE_PROT_IPV6_IL;
1141
1142                 /* TTL and PROT share the same extraction seq. entry.
1143                  * Each is considered a sibling to the other in terms of sharing
1144                  * the same extraction sequence entry.
1145                  */
1146                 if (fld == ICE_FLOW_FIELD_IDX_IPV6_TTL)
1147                         sib = ICE_FLOW_FIELD_IDX_IPV6_PROT;
1148                 else
1149                         sib = ICE_FLOW_FIELD_IDX_IPV6_TTL;
1150
1151                 /* If the sibling field is also included, that field's
1152                  * mask needs to be included.
1153                  */
1154                 if (match & BIT(sib))
1155                         sib_mask = ice_flds_info[sib].mask;
1156                 break;
1157         case ICE_FLOW_FIELD_IDX_IPV4_SA:
1158         case ICE_FLOW_FIELD_IDX_IPV4_DA:
1159                 prot_id = seg == 0 ? ICE_PROT_IPV4_OF_OR_S : ICE_PROT_IPV4_IL;
1160                 break;
1161         case ICE_FLOW_FIELD_IDX_IPV6_SA:
1162         case ICE_FLOW_FIELD_IDX_IPV6_DA:
1163         case ICE_FLOW_FIELD_IDX_IPV6_PRE32_SA:
1164         case ICE_FLOW_FIELD_IDX_IPV6_PRE32_DA:
1165         case ICE_FLOW_FIELD_IDX_IPV6_PRE48_SA:
1166         case ICE_FLOW_FIELD_IDX_IPV6_PRE48_DA:
1167         case ICE_FLOW_FIELD_IDX_IPV6_PRE64_SA:
1168         case ICE_FLOW_FIELD_IDX_IPV6_PRE64_DA:
1169                 prot_id = seg == 0 ? ICE_PROT_IPV6_OF_OR_S : ICE_PROT_IPV6_IL;
1170                 break;
1171         case ICE_FLOW_FIELD_IDX_TCP_SRC_PORT:
1172         case ICE_FLOW_FIELD_IDX_TCP_DST_PORT:
1173         case ICE_FLOW_FIELD_IDX_TCP_FLAGS:
1174                 prot_id = ICE_PROT_TCP_IL;
1175                 break;
1176         case ICE_FLOW_FIELD_IDX_UDP_SRC_PORT:
1177         case ICE_FLOW_FIELD_IDX_UDP_DST_PORT:
1178                 prot_id = ICE_PROT_UDP_IL_OR_S;
1179                 break;
1180         case ICE_FLOW_FIELD_IDX_SCTP_SRC_PORT:
1181         case ICE_FLOW_FIELD_IDX_SCTP_DST_PORT:
1182                 prot_id = ICE_PROT_SCTP_IL;
1183                 break;
1184         case ICE_FLOW_FIELD_IDX_VXLAN_VNI:
1185         case ICE_FLOW_FIELD_IDX_GTPC_TEID:
1186         case ICE_FLOW_FIELD_IDX_GTPU_IP_TEID:
1187         case ICE_FLOW_FIELD_IDX_GTPU_UP_TEID:
1188         case ICE_FLOW_FIELD_IDX_GTPU_DWN_TEID:
1189         case ICE_FLOW_FIELD_IDX_GTPU_EH_TEID:
1190         case ICE_FLOW_FIELD_IDX_GTPU_EH_QFI:
1191                 /* GTP is accessed through UDP OF protocol */
1192                 prot_id = ICE_PROT_UDP_OF;
1193                 break;
1194         case ICE_FLOW_FIELD_IDX_PPPOE_SESS_ID:
1195                 prot_id = ICE_PROT_PPPOE;
1196                 break;
1197         case ICE_FLOW_FIELD_IDX_PFCP_SEID:
1198                 prot_id = ICE_PROT_UDP_IL_OR_S;
1199                 break;
1200         case ICE_FLOW_FIELD_IDX_L2TPV3_SESS_ID:
1201                 prot_id = ICE_PROT_L2TPV3;
1202                 break;
1203         case ICE_FLOW_FIELD_IDX_ESP_SPI:
1204                 prot_id = ICE_PROT_ESP_F;
1205                 break;
1206         case ICE_FLOW_FIELD_IDX_AH_SPI:
1207                 prot_id = ICE_PROT_ESP_2;
1208                 break;
1209         case ICE_FLOW_FIELD_IDX_NAT_T_ESP_SPI:
1210                 prot_id = ICE_PROT_UDP_IL_OR_S;
1211                 break;
1212         case ICE_FLOW_FIELD_IDX_ECPRI_TP0_PC_ID:
1213                 prot_id = ICE_PROT_ECPRI;
1214                 break;
1215         case ICE_FLOW_FIELD_IDX_ARP_SIP:
1216         case ICE_FLOW_FIELD_IDX_ARP_DIP:
1217         case ICE_FLOW_FIELD_IDX_ARP_SHA:
1218         case ICE_FLOW_FIELD_IDX_ARP_DHA:
1219         case ICE_FLOW_FIELD_IDX_ARP_OP:
1220                 prot_id = ICE_PROT_ARP_OF;
1221                 break;
1222         case ICE_FLOW_FIELD_IDX_ICMP_TYPE:
1223         case ICE_FLOW_FIELD_IDX_ICMP_CODE:
1224                 /* ICMP type and code share the same extraction seq. entry */
1225                 prot_id = (params->prof->segs[seg].hdrs &
1226                            ICE_FLOW_SEG_HDR_IPV4) ?
1227                         ICE_PROT_ICMP_IL : ICE_PROT_ICMPV6_IL;
1228                 sib = fld == ICE_FLOW_FIELD_IDX_ICMP_TYPE ?
1229                         ICE_FLOW_FIELD_IDX_ICMP_CODE :
1230                         ICE_FLOW_FIELD_IDX_ICMP_TYPE;
1231                 break;
1232         case ICE_FLOW_FIELD_IDX_GRE_KEYID:
1233                 prot_id = ICE_PROT_GRE_OF;
1234                 break;
1235         default:
1236                 return ICE_ERR_NOT_IMPL;
1237         }
1238
1239         /* Each extraction sequence entry is a word in size, and extracts a
1240          * word-aligned offset from a protocol header.
1241          */
1242         ese_bits = ICE_FLOW_FV_EXTRACT_SZ * BITS_PER_BYTE;
1243
1244         flds[fld].xtrct.prot_id = prot_id;
1245         flds[fld].xtrct.off = (ice_flds_info[fld].off / ese_bits) *
1246                 ICE_FLOW_FV_EXTRACT_SZ;
1247         flds[fld].xtrct.disp = (u8)(ice_flds_info[fld].off % ese_bits);
1248         flds[fld].xtrct.idx = params->es_cnt;
1249         flds[fld].xtrct.mask = ice_flds_info[fld].mask;
1250
1251         /* Adjust the next field-entry index after accommodating the number of
1252          * entries this field consumes
1253          */
1254         cnt = DIVIDE_AND_ROUND_UP(flds[fld].xtrct.disp +
1255                                   ice_flds_info[fld].size, ese_bits);
1256
1257         /* Fill in the extraction sequence entries needed for this field */
1258         off = flds[fld].xtrct.off;
1259         mask = flds[fld].xtrct.mask;
1260         for (i = 0; i < cnt; i++) {
1261                 /* Only consume an extraction sequence entry if there is no
1262                  * sibling field associated with this field or the sibling entry
1263                  * already extracts the word shared with this field.
1264                  */
1265                 if (sib == ICE_FLOW_FIELD_IDX_MAX ||
1266                     flds[sib].xtrct.prot_id == ICE_PROT_ID_INVAL ||
1267                     flds[sib].xtrct.off != off) {
1268                         u8 idx;
1269
1270                         /* Make sure the number of extraction sequence required
1271                          * does not exceed the block's capability
1272                          */
1273                         if (params->es_cnt >= fv_words)
1274                                 return ICE_ERR_MAX_LIMIT;
1275
1276                         /* some blocks require a reversed field vector layout */
1277                         if (hw->blk[params->blk].es.reverse)
1278                                 idx = fv_words - params->es_cnt - 1;
1279                         else
1280                                 idx = params->es_cnt;
1281
1282                         params->es[idx].prot_id = prot_id;
1283                         params->es[idx].off = off;
1284                         params->mask[idx] = mask | sib_mask;
1285                         params->es_cnt++;
1286                 }
1287
1288                 off += ICE_FLOW_FV_EXTRACT_SZ;
1289         }
1290
1291         return ICE_SUCCESS;
1292 }
1293
1294 /**
1295  * ice_flow_xtract_raws - Create extract sequence entries for raw bytes
1296  * @hw: pointer to the HW struct
1297  * @params: information about the flow to be processed
1298  * @seg: index of packet segment whose raw fields are to be extracted
1299  */
1300 static enum ice_status
1301 ice_flow_xtract_raws(struct ice_hw *hw, struct ice_flow_prof_params *params,
1302                      u8 seg)
1303 {
1304         u16 fv_words;
1305         u16 hdrs_sz;
1306         u8 i;
1307
1308         if (!params->prof->segs[seg].raws_cnt)
1309                 return ICE_SUCCESS;
1310
1311         if (params->prof->segs[seg].raws_cnt >
1312             ARRAY_SIZE(params->prof->segs[seg].raws))
1313                 return ICE_ERR_MAX_LIMIT;
1314
1315         /* Offsets within the segment headers are not supported */
1316         hdrs_sz = ice_flow_calc_seg_sz(params, seg);
1317         if (!hdrs_sz)
1318                 return ICE_ERR_PARAM;
1319
1320         fv_words = hw->blk[params->blk].es.fvw;
1321
1322         for (i = 0; i < params->prof->segs[seg].raws_cnt; i++) {
1323                 struct ice_flow_seg_fld_raw *raw;
1324                 u16 off, cnt, j;
1325
1326                 raw = &params->prof->segs[seg].raws[i];
1327
1328                 /* Storing extraction information */
1329                 raw->info.xtrct.prot_id = ICE_PROT_MAC_OF_OR_S;
1330                 raw->info.xtrct.off = (raw->off / ICE_FLOW_FV_EXTRACT_SZ) *
1331                         ICE_FLOW_FV_EXTRACT_SZ;
1332                 raw->info.xtrct.disp = (raw->off % ICE_FLOW_FV_EXTRACT_SZ) *
1333                         BITS_PER_BYTE;
1334                 raw->info.xtrct.idx = params->es_cnt;
1335
1336                 /* Determine the number of field vector entries this raw field
1337                  * consumes.
1338                  */
1339                 cnt = DIVIDE_AND_ROUND_UP(raw->info.xtrct.disp +
1340                                           (raw->info.src.last * BITS_PER_BYTE),
1341                                           (ICE_FLOW_FV_EXTRACT_SZ *
1342                                            BITS_PER_BYTE));
1343                 off = raw->info.xtrct.off;
1344                 for (j = 0; j < cnt; j++) {
1345                         u16 idx;
1346
1347                         /* Make sure the number of extraction sequence required
1348                          * does not exceed the block's capability
1349                          */
1350                         if (params->es_cnt >= hw->blk[params->blk].es.count ||
1351                             params->es_cnt >= ICE_MAX_FV_WORDS)
1352                                 return ICE_ERR_MAX_LIMIT;
1353
1354                         /* some blocks require a reversed field vector layout */
1355                         if (hw->blk[params->blk].es.reverse)
1356                                 idx = fv_words - params->es_cnt - 1;
1357                         else
1358                                 idx = params->es_cnt;
1359
1360                         params->es[idx].prot_id = raw->info.xtrct.prot_id;
1361                         params->es[idx].off = off;
1362                         params->es_cnt++;
1363                         off += ICE_FLOW_FV_EXTRACT_SZ;
1364                 }
1365         }
1366
1367         return ICE_SUCCESS;
1368 }
1369
1370 /**
1371  * ice_flow_create_xtrct_seq - Create an extraction sequence for given segments
1372  * @hw: pointer to the HW struct
1373  * @params: information about the flow to be processed
1374  *
1375  * This function iterates through all matched fields in the given segments, and
1376  * creates an extraction sequence for the fields.
1377  */
1378 static enum ice_status
1379 ice_flow_create_xtrct_seq(struct ice_hw *hw,
1380                           struct ice_flow_prof_params *params)
1381 {
1382         enum ice_status status = ICE_SUCCESS;
1383         u8 i;
1384
1385         /* For ACL, we also need to extract the direction bit (Rx,Tx) data from
1386          * packet flags
1387          */
1388         if (params->blk == ICE_BLK_ACL) {
1389                 status = ice_flow_xtract_pkt_flags(hw, params,
1390                                                    ICE_RX_MDID_PKT_FLAGS_15_0);
1391                 if (status)
1392                         return status;
1393         }
1394
1395         for (i = 0; i < params->prof->segs_cnt; i++) {
1396                 u64 match = params->prof->segs[i].match;
1397                 enum ice_flow_field j;
1398
1399                 ice_for_each_set_bit(j, (ice_bitmap_t *)&match,
1400                                      ICE_FLOW_FIELD_IDX_MAX) {
1401                         status = ice_flow_xtract_fld(hw, params, i, j, match);
1402                         if (status)
1403                                 return status;
1404                         ice_clear_bit(j, (ice_bitmap_t *)&match);
1405                 }
1406
1407                 /* Process raw matching bytes */
1408                 status = ice_flow_xtract_raws(hw, params, i);
1409                 if (status)
1410                         return status;
1411         }
1412
1413         return status;
1414 }
1415
1416 /**
1417  * ice_flow_sel_acl_scen - returns the specific scenario
1418  * @hw: pointer to the hardware structure
1419  * @params: information about the flow to be processed
1420  *
1421  * This function will return the specific scenario based on the
1422  * params passed to it
1423  */
1424 static enum ice_status
1425 ice_flow_sel_acl_scen(struct ice_hw *hw, struct ice_flow_prof_params *params)
1426 {
1427         /* Find the best-fit scenario for the provided match width */
1428         struct ice_acl_scen *cand_scen = NULL, *scen;
1429
1430         if (!hw->acl_tbl)
1431                 return ICE_ERR_DOES_NOT_EXIST;
1432
1433         /* Loop through each scenario and match against the scenario width
1434          * to select the specific scenario
1435          */
1436         LIST_FOR_EACH_ENTRY(scen, &hw->acl_tbl->scens, ice_acl_scen, list_entry)
1437                 if (scen->eff_width >= params->entry_length &&
1438                     (!cand_scen || cand_scen->eff_width > scen->eff_width))
1439                         cand_scen = scen;
1440         if (!cand_scen)
1441                 return ICE_ERR_DOES_NOT_EXIST;
1442
1443         params->prof->cfg.scen = cand_scen;
1444
1445         return ICE_SUCCESS;
1446 }
1447
1448 /**
1449  * ice_flow_acl_def_entry_frmt - Determine the layout of flow entries
1450  * @params: information about the flow to be processed
1451  */
1452 static enum ice_status
1453 ice_flow_acl_def_entry_frmt(struct ice_flow_prof_params *params)
1454 {
1455         u16 index, i, range_idx = 0;
1456
1457         index = ICE_AQC_ACL_PROF_BYTE_SEL_START_IDX;
1458
1459         for (i = 0; i < params->prof->segs_cnt; i++) {
1460                 struct ice_flow_seg_info *seg = &params->prof->segs[i];
1461                 u8 j;
1462
1463                 ice_for_each_set_bit(j, (ice_bitmap_t *)&seg->match,
1464                                      ICE_FLOW_FIELD_IDX_MAX) {
1465                         struct ice_flow_fld_info *fld = &seg->fields[j];
1466
1467                         fld->entry.mask = ICE_FLOW_FLD_OFF_INVAL;
1468
1469                         if (fld->type == ICE_FLOW_FLD_TYPE_RANGE) {
1470                                 fld->entry.last = ICE_FLOW_FLD_OFF_INVAL;
1471
1472                                 /* Range checking only supported for single
1473                                  * words
1474                                  */
1475                                 if (DIVIDE_AND_ROUND_UP(ice_flds_info[j].size +
1476                                                         fld->xtrct.disp,
1477                                                         BITS_PER_BYTE * 2) > 1)
1478                                         return ICE_ERR_PARAM;
1479
1480                                 /* Ranges must define low and high values */
1481                                 if (fld->src.val == ICE_FLOW_FLD_OFF_INVAL ||
1482                                     fld->src.last == ICE_FLOW_FLD_OFF_INVAL)
1483                                         return ICE_ERR_PARAM;
1484
1485                                 fld->entry.val = range_idx++;
1486                         } else {
1487                                 /* Store adjusted byte-length of field for later
1488                                  * use, taking into account potential
1489                                  * non-byte-aligned displacement
1490                                  */
1491                                 fld->entry.last = DIVIDE_AND_ROUND_UP
1492                                         (ice_flds_info[j].size +
1493                                          (fld->xtrct.disp % BITS_PER_BYTE),
1494                                          BITS_PER_BYTE);
1495                                 fld->entry.val = index;
1496                                 index += fld->entry.last;
1497                         }
1498                 }
1499
1500                 for (j = 0; j < seg->raws_cnt; j++) {
1501                         struct ice_flow_seg_fld_raw *raw = &seg->raws[j];
1502
1503                         raw->info.entry.mask = ICE_FLOW_FLD_OFF_INVAL;
1504                         raw->info.entry.val = index;
1505                         raw->info.entry.last = raw->info.src.last;
1506                         index += raw->info.entry.last;
1507                 }
1508         }
1509
1510         /* Currently only support using the byte selection base, which only
1511          * allows for an effective entry size of 30 bytes. Reject anything
1512          * larger.
1513          */
1514         if (index > ICE_AQC_ACL_PROF_BYTE_SEL_ELEMS)
1515                 return ICE_ERR_PARAM;
1516
1517         /* Only 8 range checkers per profile, reject anything trying to use
1518          * more
1519          */
1520         if (range_idx > ICE_AQC_ACL_PROF_RANGES_NUM_CFG)
1521                 return ICE_ERR_PARAM;
1522
1523         /* Store # bytes required for entry for later use */
1524         params->entry_length = index - ICE_AQC_ACL_PROF_BYTE_SEL_START_IDX;
1525
1526         return ICE_SUCCESS;
1527 }
1528
1529 /**
1530  * ice_flow_proc_segs - process all packet segments associated with a profile
1531  * @hw: pointer to the HW struct
1532  * @params: information about the flow to be processed
1533  */
1534 static enum ice_status
1535 ice_flow_proc_segs(struct ice_hw *hw, struct ice_flow_prof_params *params)
1536 {
1537         enum ice_status status;
1538
1539         status = ice_flow_proc_seg_hdrs(params);
1540         if (status)
1541                 return status;
1542
1543         status = ice_flow_create_xtrct_seq(hw, params);
1544         if (status)
1545                 return status;
1546
1547         switch (params->blk) {
1548         case ICE_BLK_FD:
1549         case ICE_BLK_RSS:
1550                 status = ICE_SUCCESS;
1551                 break;
1552         case ICE_BLK_ACL:
1553                 status = ice_flow_acl_def_entry_frmt(params);
1554                 if (status)
1555                         return status;
1556                 status = ice_flow_sel_acl_scen(hw, params);
1557                 if (status)
1558                         return status;
1559                 break;
1560         default:
1561                 return ICE_ERR_NOT_IMPL;
1562         }
1563
1564         return status;
1565 }
1566
1567 #define ICE_FLOW_FIND_PROF_CHK_FLDS     0x00000001
1568 #define ICE_FLOW_FIND_PROF_CHK_VSI      0x00000002
1569 #define ICE_FLOW_FIND_PROF_NOT_CHK_DIR  0x00000004
1570
1571 /**
1572  * ice_flow_find_prof_conds - Find a profile matching headers and conditions
1573  * @hw: pointer to the HW struct
1574  * @blk: classification stage
1575  * @dir: flow direction
1576  * @segs: array of one or more packet segments that describe the flow
1577  * @segs_cnt: number of packet segments provided
1578  * @vsi_handle: software VSI handle to check VSI (ICE_FLOW_FIND_PROF_CHK_VSI)
1579  * @conds: additional conditions to be checked (ICE_FLOW_FIND_PROF_CHK_*)
1580  */
1581 static struct ice_flow_prof *
1582 ice_flow_find_prof_conds(struct ice_hw *hw, enum ice_block blk,
1583                          enum ice_flow_dir dir, struct ice_flow_seg_info *segs,
1584                          u8 segs_cnt, u16 vsi_handle, u32 conds)
1585 {
1586         struct ice_flow_prof *p, *prof = NULL;
1587
1588         ice_acquire_lock(&hw->fl_profs_locks[blk]);
1589         LIST_FOR_EACH_ENTRY(p, &hw->fl_profs[blk], ice_flow_prof, l_entry)
1590                 if ((p->dir == dir || conds & ICE_FLOW_FIND_PROF_NOT_CHK_DIR) &&
1591                     segs_cnt && segs_cnt == p->segs_cnt) {
1592                         u8 i;
1593
1594                         /* Check for profile-VSI association if specified */
1595                         if ((conds & ICE_FLOW_FIND_PROF_CHK_VSI) &&
1596                             ice_is_vsi_valid(hw, vsi_handle) &&
1597                             !ice_is_bit_set(p->vsis, vsi_handle))
1598                                 continue;
1599
1600                         /* Protocol headers must be checked. Matched fields are
1601                          * checked if specified.
1602                          */
1603                         for (i = 0; i < segs_cnt; i++)
1604                                 if (segs[i].hdrs != p->segs[i].hdrs ||
1605                                     ((conds & ICE_FLOW_FIND_PROF_CHK_FLDS) &&
1606                                      segs[i].match != p->segs[i].match))
1607                                         break;
1608
1609                         /* A match is found if all segments are matched */
1610                         if (i == segs_cnt) {
1611                                 prof = p;
1612                                 break;
1613                         }
1614                 }
1615         ice_release_lock(&hw->fl_profs_locks[blk]);
1616
1617         return prof;
1618 }
1619
1620 /**
1621  * ice_flow_find_prof - Look up a profile matching headers and matched fields
1622  * @hw: pointer to the HW struct
1623  * @blk: classification stage
1624  * @dir: flow direction
1625  * @segs: array of one or more packet segments that describe the flow
1626  * @segs_cnt: number of packet segments provided
1627  */
1628 u64
1629 ice_flow_find_prof(struct ice_hw *hw, enum ice_block blk, enum ice_flow_dir dir,
1630                    struct ice_flow_seg_info *segs, u8 segs_cnt)
1631 {
1632         struct ice_flow_prof *p;
1633
1634         p = ice_flow_find_prof_conds(hw, blk, dir, segs, segs_cnt,
1635                                      ICE_MAX_VSI, ICE_FLOW_FIND_PROF_CHK_FLDS);
1636
1637         return p ? p->id : ICE_FLOW_PROF_ID_INVAL;
1638 }
1639
1640 /**
1641  * ice_flow_find_prof_id - Look up a profile with given profile ID
1642  * @hw: pointer to the HW struct
1643  * @blk: classification stage
1644  * @prof_id: unique ID to identify this flow profile
1645  */
1646 static struct ice_flow_prof *
1647 ice_flow_find_prof_id(struct ice_hw *hw, enum ice_block blk, u64 prof_id)
1648 {
1649         struct ice_flow_prof *p;
1650
1651         LIST_FOR_EACH_ENTRY(p, &hw->fl_profs[blk], ice_flow_prof, l_entry)
1652                 if (p->id == prof_id)
1653                         return p;
1654
1655         return NULL;
1656 }
1657
1658 /**
1659  * ice_dealloc_flow_entry - Deallocate flow entry memory
1660  * @hw: pointer to the HW struct
1661  * @entry: flow entry to be removed
1662  */
1663 static void
1664 ice_dealloc_flow_entry(struct ice_hw *hw, struct ice_flow_entry *entry)
1665 {
1666         if (!entry)
1667                 return;
1668
1669         if (entry->entry)
1670                 ice_free(hw, entry->entry);
1671
1672         if (entry->range_buf) {
1673                 ice_free(hw, entry->range_buf);
1674                 entry->range_buf = NULL;
1675         }
1676
1677         if (entry->acts) {
1678                 ice_free(hw, entry->acts);
1679                 entry->acts = NULL;
1680                 entry->acts_cnt = 0;
1681         }
1682
1683         ice_free(hw, entry);
1684 }
1685
1686 /**
1687  * ice_flow_get_hw_prof - return the HW profile for a specific profile ID handle
1688  * @hw: pointer to the HW struct
1689  * @blk: classification stage
1690  * @prof_id: the profile ID handle
1691  * @hw_prof_id: pointer to variable to receive the HW profile ID
1692  */
1693 enum ice_status
1694 ice_flow_get_hw_prof(struct ice_hw *hw, enum ice_block blk, u64 prof_id,
1695                      u8 *hw_prof_id)
1696 {
1697         enum ice_status status = ICE_ERR_DOES_NOT_EXIST;
1698         struct ice_prof_map *map;
1699
1700         ice_acquire_lock(&hw->blk[blk].es.prof_map_lock);
1701         map = ice_search_prof_id(hw, blk, prof_id);
1702         if (map) {
1703                 *hw_prof_id = map->prof_id;
1704                 status = ICE_SUCCESS;
1705         }
1706         ice_release_lock(&hw->blk[blk].es.prof_map_lock);
1707         return status;
1708 }
1709
1710 #define ICE_ACL_INVALID_SCEN    0x3f
1711
1712 /**
1713  * ice_flow_acl_is_prof_in_use - Verify if the profile is associated to any PF
1714  * @hw: pointer to the hardware structure
1715  * @prof: pointer to flow profile
1716  * @buf: destination buffer function writes partial extraction sequence to
1717  *
1718  * returns ICE_SUCCESS if no PF is associated to the given profile
1719  * returns ICE_ERR_IN_USE if at least one PF is associated to the given profile
1720  * returns other error code for real error
1721  */
1722 static enum ice_status
1723 ice_flow_acl_is_prof_in_use(struct ice_hw *hw, struct ice_flow_prof *prof,
1724                             struct ice_aqc_acl_prof_generic_frmt *buf)
1725 {
1726         enum ice_status status;
1727         u8 prof_id = 0;
1728
1729         status = ice_flow_get_hw_prof(hw, ICE_BLK_ACL, prof->id, &prof_id);
1730         if (status)
1731                 return status;
1732
1733         status = ice_query_acl_prof(hw, prof_id, buf, NULL);
1734         if (status)
1735                 return status;
1736
1737         /* If all PF's associated scenarios are all 0 or all
1738          * ICE_ACL_INVALID_SCEN (63) for the given profile then the latter has
1739          * not been configured yet.
1740          */
1741         if (buf->pf_scenario_num[0] == 0 && buf->pf_scenario_num[1] == 0 &&
1742             buf->pf_scenario_num[2] == 0 && buf->pf_scenario_num[3] == 0 &&
1743             buf->pf_scenario_num[4] == 0 && buf->pf_scenario_num[5] == 0 &&
1744             buf->pf_scenario_num[6] == 0 && buf->pf_scenario_num[7] == 0)
1745                 return ICE_SUCCESS;
1746
1747         if (buf->pf_scenario_num[0] == ICE_ACL_INVALID_SCEN &&
1748             buf->pf_scenario_num[1] == ICE_ACL_INVALID_SCEN &&
1749             buf->pf_scenario_num[2] == ICE_ACL_INVALID_SCEN &&
1750             buf->pf_scenario_num[3] == ICE_ACL_INVALID_SCEN &&
1751             buf->pf_scenario_num[4] == ICE_ACL_INVALID_SCEN &&
1752             buf->pf_scenario_num[5] == ICE_ACL_INVALID_SCEN &&
1753             buf->pf_scenario_num[6] == ICE_ACL_INVALID_SCEN &&
1754             buf->pf_scenario_num[7] == ICE_ACL_INVALID_SCEN)
1755                 return ICE_SUCCESS;
1756
1757         return ICE_ERR_IN_USE;
1758 }
1759
1760 /**
1761  * ice_flow_acl_free_act_cntr - Free the ACL rule's actions
1762  * @hw: pointer to the hardware structure
1763  * @acts: array of actions to be performed on a match
1764  * @acts_cnt: number of actions
1765  */
1766 static enum ice_status
1767 ice_flow_acl_free_act_cntr(struct ice_hw *hw, struct ice_flow_action *acts,
1768                            u8 acts_cnt)
1769 {
1770         int i;
1771
1772         for (i = 0; i < acts_cnt; i++) {
1773                 if (acts[i].type == ICE_FLOW_ACT_CNTR_PKT ||
1774                     acts[i].type == ICE_FLOW_ACT_CNTR_BYTES ||
1775                     acts[i].type == ICE_FLOW_ACT_CNTR_PKT_BYTES) {
1776                         struct ice_acl_cntrs cntrs;
1777                         enum ice_status status;
1778
1779                         cntrs.bank = 0; /* Only bank0 for the moment */
1780                         cntrs.first_cntr =
1781                                         LE16_TO_CPU(acts[i].data.acl_act.value);
1782                         cntrs.last_cntr =
1783                                         LE16_TO_CPU(acts[i].data.acl_act.value);
1784
1785                         if (acts[i].type == ICE_FLOW_ACT_CNTR_PKT_BYTES)
1786                                 cntrs.type = ICE_AQC_ACL_CNT_TYPE_DUAL;
1787                         else
1788                                 cntrs.type = ICE_AQC_ACL_CNT_TYPE_SINGLE;
1789
1790                         status = ice_aq_dealloc_acl_cntrs(hw, &cntrs, NULL);
1791                         if (status)
1792                                 return status;
1793                 }
1794         }
1795         return ICE_SUCCESS;
1796 }
1797
1798 /**
1799  * ice_flow_acl_disassoc_scen - Disassociate the scenario from the profile
1800  * @hw: pointer to the hardware structure
1801  * @prof: pointer to flow profile
1802  *
1803  * Disassociate the scenario from the profile for the PF of the VSI.
1804  */
1805 static enum ice_status
1806 ice_flow_acl_disassoc_scen(struct ice_hw *hw, struct ice_flow_prof *prof)
1807 {
1808         struct ice_aqc_acl_prof_generic_frmt buf;
1809         enum ice_status status = ICE_SUCCESS;
1810         u8 prof_id = 0;
1811
1812         ice_memset(&buf, 0, sizeof(buf), ICE_NONDMA_MEM);
1813
1814         status = ice_flow_get_hw_prof(hw, ICE_BLK_ACL, prof->id, &prof_id);
1815         if (status)
1816                 return status;
1817
1818         status = ice_query_acl_prof(hw, prof_id, &buf, NULL);
1819         if (status)
1820                 return status;
1821
1822         /* Clear scenario for this PF */
1823         buf.pf_scenario_num[hw->pf_id] = ICE_ACL_INVALID_SCEN;
1824         status = ice_prgm_acl_prof_xtrct(hw, prof_id, &buf, NULL);
1825
1826         return status;
1827 }
1828
1829 /**
1830  * ice_flow_rem_entry_sync - Remove a flow entry
1831  * @hw: pointer to the HW struct
1832  * @blk: classification stage
1833  * @entry: flow entry to be removed
1834  */
1835 static enum ice_status
1836 ice_flow_rem_entry_sync(struct ice_hw *hw, enum ice_block blk,
1837                         struct ice_flow_entry *entry)
1838 {
1839         if (!entry)
1840                 return ICE_ERR_BAD_PTR;
1841
1842         if (blk == ICE_BLK_ACL) {
1843                 enum ice_status status;
1844
1845                 if (!entry->prof)
1846                         return ICE_ERR_BAD_PTR;
1847
1848                 status = ice_acl_rem_entry(hw, entry->prof->cfg.scen,
1849                                            entry->scen_entry_idx);
1850                 if (status)
1851                         return status;
1852
1853                 /* Checks if we need to release an ACL counter. */
1854                 if (entry->acts_cnt && entry->acts)
1855                         ice_flow_acl_free_act_cntr(hw, entry->acts,
1856                                                    entry->acts_cnt);
1857         }
1858
1859         LIST_DEL(&entry->l_entry);
1860
1861         ice_dealloc_flow_entry(hw, entry);
1862
1863         return ICE_SUCCESS;
1864 }
1865
1866 /**
1867  * ice_flow_add_prof_sync - Add a flow profile for packet segments and fields
1868  * @hw: pointer to the HW struct
1869  * @blk: classification stage
1870  * @dir: flow direction
1871  * @prof_id: unique ID to identify this flow profile
1872  * @segs: array of one or more packet segments that describe the flow
1873  * @segs_cnt: number of packet segments provided
1874  * @acts: array of default actions
1875  * @acts_cnt: number of default actions
1876  * @prof: stores the returned flow profile added
1877  *
1878  * Assumption: the caller has acquired the lock to the profile list
1879  */
1880 static enum ice_status
1881 ice_flow_add_prof_sync(struct ice_hw *hw, enum ice_block blk,
1882                        enum ice_flow_dir dir, u64 prof_id,
1883                        struct ice_flow_seg_info *segs, u8 segs_cnt,
1884                        struct ice_flow_action *acts, u8 acts_cnt,
1885                        struct ice_flow_prof **prof)
1886 {
1887         struct ice_flow_prof_params *params;
1888         enum ice_status status;
1889         u8 i;
1890
1891         if (!prof || (acts_cnt && !acts))
1892                 return ICE_ERR_BAD_PTR;
1893
1894         params = (struct ice_flow_prof_params *)ice_malloc(hw, sizeof(*params));
1895         if (!params)
1896                 return ICE_ERR_NO_MEMORY;
1897
1898         params->prof = (struct ice_flow_prof *)
1899                 ice_malloc(hw, sizeof(*params->prof));
1900         if (!params->prof) {
1901                 status = ICE_ERR_NO_MEMORY;
1902                 goto free_params;
1903         }
1904
1905         /* initialize extraction sequence to all invalid (0xff) */
1906         for (i = 0; i < ICE_MAX_FV_WORDS; i++) {
1907                 params->es[i].prot_id = ICE_PROT_INVALID;
1908                 params->es[i].off = ICE_FV_OFFSET_INVAL;
1909         }
1910
1911         params->blk = blk;
1912         params->prof->id = prof_id;
1913         params->prof->dir = dir;
1914         params->prof->segs_cnt = segs_cnt;
1915
1916         /* Make a copy of the segments that need to be persistent in the flow
1917          * profile instance
1918          */
1919         for (i = 0; i < segs_cnt; i++)
1920                 ice_memcpy(&params->prof->segs[i], &segs[i], sizeof(*segs),
1921                            ICE_NONDMA_TO_NONDMA);
1922
1923         /* Make a copy of the actions that need to be persistent in the flow
1924          * profile instance.
1925          */
1926         if (acts_cnt) {
1927                 params->prof->acts = (struct ice_flow_action *)
1928                         ice_memdup(hw, acts, acts_cnt * sizeof(*acts),
1929                                    ICE_NONDMA_TO_NONDMA);
1930
1931                 if (!params->prof->acts) {
1932                         status = ICE_ERR_NO_MEMORY;
1933                         goto out;
1934                 }
1935         }
1936
1937         status = ice_flow_proc_segs(hw, params);
1938         if (status) {
1939                 ice_debug(hw, ICE_DBG_FLOW, "Error processing a flow's packet segments\n");
1940                 goto out;
1941         }
1942
1943         /* Add a HW profile for this flow profile */
1944         status = ice_add_prof(hw, blk, prof_id, (u8 *)params->ptypes,
1945                               params->attr, params->attr_cnt, params->es,
1946                               params->mask);
1947         if (status) {
1948                 ice_debug(hw, ICE_DBG_FLOW, "Error adding a HW flow profile\n");
1949                 goto out;
1950         }
1951
1952         INIT_LIST_HEAD(&params->prof->entries);
1953         ice_init_lock(&params->prof->entries_lock);
1954         *prof = params->prof;
1955
1956 out:
1957         if (status) {
1958                 if (params->prof->acts)
1959                         ice_free(hw, params->prof->acts);
1960                 ice_free(hw, params->prof);
1961         }
1962 free_params:
1963         ice_free(hw, params);
1964
1965         return status;
1966 }
1967
1968 /**
1969  * ice_flow_rem_prof_sync - remove a flow profile
1970  * @hw: pointer to the hardware structure
1971  * @blk: classification stage
1972  * @prof: pointer to flow profile to remove
1973  *
1974  * Assumption: the caller has acquired the lock to the profile list
1975  */
1976 static enum ice_status
1977 ice_flow_rem_prof_sync(struct ice_hw *hw, enum ice_block blk,
1978                        struct ice_flow_prof *prof)
1979 {
1980         enum ice_status status;
1981
1982         /* Remove all remaining flow entries before removing the flow profile */
1983         if (!LIST_EMPTY(&prof->entries)) {
1984                 struct ice_flow_entry *e, *t;
1985
1986                 ice_acquire_lock(&prof->entries_lock);
1987
1988                 LIST_FOR_EACH_ENTRY_SAFE(e, t, &prof->entries, ice_flow_entry,
1989                                          l_entry) {
1990                         status = ice_flow_rem_entry_sync(hw, blk, e);
1991                         if (status)
1992                                 break;
1993                 }
1994
1995                 ice_release_lock(&prof->entries_lock);
1996         }
1997
1998         if (blk == ICE_BLK_ACL) {
1999                 struct ice_aqc_acl_profile_ranges query_rng_buf;
2000                 struct ice_aqc_acl_prof_generic_frmt buf;
2001                 u8 prof_id = 0;
2002
2003                 /* Disassociate the scenario from the profile for the PF */
2004                 status = ice_flow_acl_disassoc_scen(hw, prof);
2005                 if (status)
2006                         return status;
2007
2008                 /* Clear the range-checker if the profile ID is no longer
2009                  * used by any PF
2010                  */
2011                 status = ice_flow_acl_is_prof_in_use(hw, prof, &buf);
2012                 if (status && status != ICE_ERR_IN_USE) {
2013                         return status;
2014                 } else if (!status) {
2015                         /* Clear the range-checker value for profile ID */
2016                         ice_memset(&query_rng_buf, 0,
2017                                    sizeof(struct ice_aqc_acl_profile_ranges),
2018                                    ICE_NONDMA_MEM);
2019
2020                         status = ice_flow_get_hw_prof(hw, blk, prof->id,
2021                                                       &prof_id);
2022                         if (status)
2023                                 return status;
2024
2025                         status = ice_prog_acl_prof_ranges(hw, prof_id,
2026                                                           &query_rng_buf, NULL);
2027                         if (status)
2028                                 return status;
2029                 }
2030         }
2031
2032         /* Remove all hardware profiles associated with this flow profile */
2033         status = ice_rem_prof(hw, blk, prof->id);
2034         if (!status) {
2035                 LIST_DEL(&prof->l_entry);
2036                 ice_destroy_lock(&prof->entries_lock);
2037                 if (prof->acts)
2038                         ice_free(hw, prof->acts);
2039                 ice_free(hw, prof);
2040         }
2041
2042         return status;
2043 }
2044
2045 /**
2046  * ice_flow_acl_set_xtrct_seq_fld - Populate xtrct seq for single field
2047  * @buf: Destination buffer function writes partial xtrct sequence to
2048  * @info: Info about field
2049  */
2050 static void
2051 ice_flow_acl_set_xtrct_seq_fld(struct ice_aqc_acl_prof_generic_frmt *buf,
2052                                struct ice_flow_fld_info *info)
2053 {
2054         u16 dst, i;
2055         u8 src;
2056
2057         src = info->xtrct.idx * ICE_FLOW_FV_EXTRACT_SZ +
2058                 info->xtrct.disp / BITS_PER_BYTE;
2059         dst = info->entry.val;
2060         for (i = 0; i < info->entry.last; i++)
2061                 /* HW stores field vector words in LE, convert words back to BE
2062                  * so constructed entries will end up in network order
2063                  */
2064                 buf->byte_selection[dst++] = src++ ^ 1;
2065 }
2066
2067 /**
2068  * ice_flow_acl_set_xtrct_seq - Program ACL extraction sequence
2069  * @hw: pointer to the hardware structure
2070  * @prof: pointer to flow profile
2071  */
2072 static enum ice_status
2073 ice_flow_acl_set_xtrct_seq(struct ice_hw *hw, struct ice_flow_prof *prof)
2074 {
2075         struct ice_aqc_acl_prof_generic_frmt buf;
2076         struct ice_flow_fld_info *info;
2077         enum ice_status status;
2078         u8 prof_id = 0;
2079         u16 i;
2080
2081         ice_memset(&buf, 0, sizeof(buf), ICE_NONDMA_MEM);
2082
2083         status = ice_flow_get_hw_prof(hw, ICE_BLK_ACL, prof->id, &prof_id);
2084         if (status)
2085                 return status;
2086
2087         status = ice_flow_acl_is_prof_in_use(hw, prof, &buf);
2088         if (status && status != ICE_ERR_IN_USE)
2089                 return status;
2090
2091         if (!status) {
2092                 /* Program the profile dependent configuration. This is done
2093                  * only once regardless of the number of PFs using that profile
2094                  */
2095                 ice_memset(&buf, 0, sizeof(buf), ICE_NONDMA_MEM);
2096
2097                 for (i = 0; i < prof->segs_cnt; i++) {
2098                         struct ice_flow_seg_info *seg = &prof->segs[i];
2099                         u16 j;
2100
2101                         ice_for_each_set_bit(j, (ice_bitmap_t *)&seg->match,
2102                                              ICE_FLOW_FIELD_IDX_MAX) {
2103                                 info = &seg->fields[j];
2104
2105                                 if (info->type == ICE_FLOW_FLD_TYPE_RANGE)
2106                                         buf.word_selection[info->entry.val] =
2107                                                 info->xtrct.idx;
2108                                 else
2109                                         ice_flow_acl_set_xtrct_seq_fld(&buf,
2110                                                                        info);
2111                         }
2112
2113                         for (j = 0; j < seg->raws_cnt; j++) {
2114                                 info = &seg->raws[j].info;
2115                                 ice_flow_acl_set_xtrct_seq_fld(&buf, info);
2116                         }
2117                 }
2118
2119                 ice_memset(&buf.pf_scenario_num[0], ICE_ACL_INVALID_SCEN,
2120                            ICE_AQC_ACL_PROF_PF_SCEN_NUM_ELEMS,
2121                            ICE_NONDMA_MEM);
2122         }
2123
2124         /* Update the current PF */
2125         buf.pf_scenario_num[hw->pf_id] = (u8)prof->cfg.scen->id;
2126         status = ice_prgm_acl_prof_xtrct(hw, prof_id, &buf, NULL);
2127
2128         return status;
2129 }
2130
2131 /**
2132  * ice_flow_assoc_vsig_vsi - associate a VSI with VSIG
2133  * @hw: pointer to the hardware structure
2134  * @blk: classification stage
2135  * @vsi_handle: software VSI handle
2136  * @vsig: target VSI group
2137  *
2138  * Assumption: the caller has already verified that the VSI to
2139  * be added has the same characteristics as the VSIG and will
2140  * thereby have access to all resources added to that VSIG.
2141  */
2142 enum ice_status
2143 ice_flow_assoc_vsig_vsi(struct ice_hw *hw, enum ice_block blk, u16 vsi_handle,
2144                         u16 vsig)
2145 {
2146         enum ice_status status;
2147
2148         if (!ice_is_vsi_valid(hw, vsi_handle) || blk >= ICE_BLK_COUNT)
2149                 return ICE_ERR_PARAM;
2150
2151         ice_acquire_lock(&hw->fl_profs_locks[blk]);
2152         status = ice_add_vsi_flow(hw, blk, ice_get_hw_vsi_num(hw, vsi_handle),
2153                                   vsig);
2154         ice_release_lock(&hw->fl_profs_locks[blk]);
2155
2156         return status;
2157 }
2158
2159 /**
2160  * ice_flow_assoc_prof - associate a VSI with a flow profile
2161  * @hw: pointer to the hardware structure
2162  * @blk: classification stage
2163  * @prof: pointer to flow profile
2164  * @vsi_handle: software VSI handle
2165  *
2166  * Assumption: the caller has acquired the lock to the profile list
2167  * and the software VSI handle has been validated
2168  */
2169 enum ice_status
2170 ice_flow_assoc_prof(struct ice_hw *hw, enum ice_block blk,
2171                     struct ice_flow_prof *prof, u16 vsi_handle)
2172 {
2173         enum ice_status status = ICE_SUCCESS;
2174
2175         if (!ice_is_bit_set(prof->vsis, vsi_handle)) {
2176                 if (blk == ICE_BLK_ACL) {
2177                         status = ice_flow_acl_set_xtrct_seq(hw, prof);
2178                         if (status)
2179                                 return status;
2180                 }
2181                 status = ice_add_prof_id_flow(hw, blk,
2182                                               ice_get_hw_vsi_num(hw,
2183                                                                  vsi_handle),
2184                                               prof->id);
2185                 if (!status)
2186                         ice_set_bit(vsi_handle, prof->vsis);
2187                 else
2188                         ice_debug(hw, ICE_DBG_FLOW, "HW profile add failed, %d\n",
2189                                   status);
2190         }
2191
2192         return status;
2193 }
2194
2195 /**
2196  * ice_flow_disassoc_prof - disassociate a VSI from a flow profile
2197  * @hw: pointer to the hardware structure
2198  * @blk: classification stage
2199  * @prof: pointer to flow profile
2200  * @vsi_handle: software VSI handle
2201  *
2202  * Assumption: the caller has acquired the lock to the profile list
2203  * and the software VSI handle has been validated
2204  */
2205 static enum ice_status
2206 ice_flow_disassoc_prof(struct ice_hw *hw, enum ice_block blk,
2207                        struct ice_flow_prof *prof, u16 vsi_handle)
2208 {
2209         enum ice_status status = ICE_SUCCESS;
2210
2211         if (ice_is_bit_set(prof->vsis, vsi_handle)) {
2212                 status = ice_rem_prof_id_flow(hw, blk,
2213                                               ice_get_hw_vsi_num(hw,
2214                                                                  vsi_handle),
2215                                               prof->id);
2216                 if (!status)
2217                         ice_clear_bit(vsi_handle, prof->vsis);
2218                 else
2219                         ice_debug(hw, ICE_DBG_FLOW, "HW profile remove failed, %d\n",
2220                                   status);
2221         }
2222
2223         return status;
2224 }
2225
2226 /**
2227  * ice_flow_add_prof - Add a flow profile for packet segments and matched fields
2228  * @hw: pointer to the HW struct
2229  * @blk: classification stage
2230  * @dir: flow direction
2231  * @prof_id: unique ID to identify this flow profile
2232  * @segs: array of one or more packet segments that describe the flow
2233  * @segs_cnt: number of packet segments provided
2234  * @acts: array of default actions
2235  * @acts_cnt: number of default actions
2236  * @prof: stores the returned flow profile added
2237  */
2238 enum ice_status
2239 ice_flow_add_prof(struct ice_hw *hw, enum ice_block blk, enum ice_flow_dir dir,
2240                   u64 prof_id, struct ice_flow_seg_info *segs, u8 segs_cnt,
2241                   struct ice_flow_action *acts, u8 acts_cnt,
2242                   struct ice_flow_prof **prof)
2243 {
2244         enum ice_status status;
2245
2246         if (segs_cnt > ICE_FLOW_SEG_MAX)
2247                 return ICE_ERR_MAX_LIMIT;
2248
2249         if (!segs_cnt)
2250                 return ICE_ERR_PARAM;
2251
2252         if (!segs)
2253                 return ICE_ERR_BAD_PTR;
2254
2255         status = ice_flow_val_hdrs(segs, segs_cnt);
2256         if (status)
2257                 return status;
2258
2259         ice_acquire_lock(&hw->fl_profs_locks[blk]);
2260
2261         status = ice_flow_add_prof_sync(hw, blk, dir, prof_id, segs, segs_cnt,
2262                                         acts, acts_cnt, prof);
2263         if (!status)
2264                 LIST_ADD(&(*prof)->l_entry, &hw->fl_profs[blk]);
2265
2266         ice_release_lock(&hw->fl_profs_locks[blk]);
2267
2268         return status;
2269 }
2270
2271 /**
2272  * ice_flow_rem_prof - Remove a flow profile and all entries associated with it
2273  * @hw: pointer to the HW struct
2274  * @blk: the block for which the flow profile is to be removed
2275  * @prof_id: unique ID of the flow profile to be removed
2276  */
2277 enum ice_status
2278 ice_flow_rem_prof(struct ice_hw *hw, enum ice_block blk, u64 prof_id)
2279 {
2280         struct ice_flow_prof *prof;
2281         enum ice_status status;
2282
2283         ice_acquire_lock(&hw->fl_profs_locks[blk]);
2284
2285         prof = ice_flow_find_prof_id(hw, blk, prof_id);
2286         if (!prof) {
2287                 status = ICE_ERR_DOES_NOT_EXIST;
2288                 goto out;
2289         }
2290
2291         /* prof becomes invalid after the call */
2292         status = ice_flow_rem_prof_sync(hw, blk, prof);
2293
2294 out:
2295         ice_release_lock(&hw->fl_profs_locks[blk]);
2296
2297         return status;
2298 }
2299
2300 /**
2301  * ice_flow_find_entry - look for a flow entry using its unique ID
2302  * @hw: pointer to the HW struct
2303  * @blk: classification stage
2304  * @entry_id: unique ID to identify this flow entry
2305  *
2306  * This function looks for the flow entry with the specified unique ID in all
2307  * flow profiles of the specified classification stage. If the entry is found,
2308  * and it returns the handle to the flow entry. Otherwise, it returns
2309  * ICE_FLOW_ENTRY_ID_INVAL.
2310  */
2311 u64 ice_flow_find_entry(struct ice_hw *hw, enum ice_block blk, u64 entry_id)
2312 {
2313         struct ice_flow_entry *found = NULL;
2314         struct ice_flow_prof *p;
2315
2316         ice_acquire_lock(&hw->fl_profs_locks[blk]);
2317
2318         LIST_FOR_EACH_ENTRY(p, &hw->fl_profs[blk], ice_flow_prof, l_entry) {
2319                 struct ice_flow_entry *e;
2320
2321                 ice_acquire_lock(&p->entries_lock);
2322                 LIST_FOR_EACH_ENTRY(e, &p->entries, ice_flow_entry, l_entry)
2323                         if (e->id == entry_id) {
2324                                 found = e;
2325                                 break;
2326                         }
2327                 ice_release_lock(&p->entries_lock);
2328
2329                 if (found)
2330                         break;
2331         }
2332
2333         ice_release_lock(&hw->fl_profs_locks[blk]);
2334
2335         return found ? ICE_FLOW_ENTRY_HNDL(found) : ICE_FLOW_ENTRY_HANDLE_INVAL;
2336 }
2337
2338 /**
2339  * ice_flow_acl_check_actions - Checks the ACL rule's actions
2340  * @hw: pointer to the hardware structure
2341  * @acts: array of actions to be performed on a match
2342  * @acts_cnt: number of actions
2343  * @cnt_alloc: indicates if an ACL counter has been allocated.
2344  */
2345 static enum ice_status
2346 ice_flow_acl_check_actions(struct ice_hw *hw, struct ice_flow_action *acts,
2347                            u8 acts_cnt, bool *cnt_alloc)
2348 {
2349         ice_declare_bitmap(dup_check, ICE_AQC_TBL_MAX_ACTION_PAIRS * 2);
2350         int i;
2351
2352         ice_zero_bitmap(dup_check, ICE_AQC_TBL_MAX_ACTION_PAIRS * 2);
2353         *cnt_alloc = false;
2354
2355         if (acts_cnt > ICE_FLOW_ACL_MAX_NUM_ACT)
2356                 return ICE_ERR_OUT_OF_RANGE;
2357
2358         for (i = 0; i < acts_cnt; i++) {
2359                 if (acts[i].type != ICE_FLOW_ACT_NOP &&
2360                     acts[i].type != ICE_FLOW_ACT_DROP &&
2361                     acts[i].type != ICE_FLOW_ACT_CNTR_PKT &&
2362                     acts[i].type != ICE_FLOW_ACT_FWD_QUEUE)
2363                         return ICE_ERR_CFG;
2364
2365                 /* If the caller want to add two actions of the same type, then
2366                  * it is considered invalid configuration.
2367                  */
2368                 if (ice_test_and_set_bit(acts[i].type, dup_check))
2369                         return ICE_ERR_PARAM;
2370         }
2371
2372         /* Checks if ACL counters are needed. */
2373         for (i = 0; i < acts_cnt; i++) {
2374                 if (acts[i].type == ICE_FLOW_ACT_CNTR_PKT ||
2375                     acts[i].type == ICE_FLOW_ACT_CNTR_BYTES ||
2376                     acts[i].type == ICE_FLOW_ACT_CNTR_PKT_BYTES) {
2377                         struct ice_acl_cntrs cntrs;
2378                         enum ice_status status;
2379
2380                         cntrs.amount = 1;
2381                         cntrs.bank = 0; /* Only bank0 for the moment */
2382
2383                         if (acts[i].type == ICE_FLOW_ACT_CNTR_PKT_BYTES)
2384                                 cntrs.type = ICE_AQC_ACL_CNT_TYPE_DUAL;
2385                         else
2386                                 cntrs.type = ICE_AQC_ACL_CNT_TYPE_SINGLE;
2387
2388                         status = ice_aq_alloc_acl_cntrs(hw, &cntrs, NULL);
2389                         if (status)
2390                                 return status;
2391                         /* Counter index within the bank */
2392                         acts[i].data.acl_act.value =
2393                                                 CPU_TO_LE16(cntrs.first_cntr);
2394                         *cnt_alloc = true;
2395                 }
2396         }
2397
2398         return ICE_SUCCESS;
2399 }
2400
2401 /**
2402  * ice_flow_acl_frmt_entry_range - Format an ACL range checker for a given field
2403  * @fld: number of the given field
2404  * @info: info about field
2405  * @range_buf: range checker configuration buffer
2406  * @data: pointer to a data buffer containing flow entry's match values/masks
2407  * @range: Input/output param indicating which range checkers are being used
2408  */
2409 static void
2410 ice_flow_acl_frmt_entry_range(u16 fld, struct ice_flow_fld_info *info,
2411                               struct ice_aqc_acl_profile_ranges *range_buf,
2412                               u8 *data, u8 *range)
2413 {
2414         u16 new_mask;
2415
2416         /* If not specified, default mask is all bits in field */
2417         new_mask = (info->src.mask == ICE_FLOW_FLD_OFF_INVAL ?
2418                     BIT(ice_flds_info[fld].size) - 1 :
2419                     (*(u16 *)(data + info->src.mask))) << info->xtrct.disp;
2420
2421         /* If the mask is 0, then we don't need to worry about this input
2422          * range checker value.
2423          */
2424         if (new_mask) {
2425                 u16 new_high =
2426                         (*(u16 *)(data + info->src.last)) << info->xtrct.disp;
2427                 u16 new_low =
2428                         (*(u16 *)(data + info->src.val)) << info->xtrct.disp;
2429                 u8 range_idx = info->entry.val;
2430
2431                 range_buf->checker_cfg[range_idx].low_boundary =
2432                         CPU_TO_BE16(new_low);
2433                 range_buf->checker_cfg[range_idx].high_boundary =
2434                         CPU_TO_BE16(new_high);
2435                 range_buf->checker_cfg[range_idx].mask = CPU_TO_BE16(new_mask);
2436
2437                 /* Indicate which range checker is being used */
2438                 *range |= BIT(range_idx);
2439         }
2440 }
2441
2442 /**
2443  * ice_flow_acl_frmt_entry_fld - Partially format ACL entry for a given field
2444  * @fld: number of the given field
2445  * @info: info about the field
2446  * @buf: buffer containing the entry
2447  * @dontcare: buffer containing don't care mask for entry
2448  * @data: pointer to a data buffer containing flow entry's match values/masks
2449  */
2450 static void
2451 ice_flow_acl_frmt_entry_fld(u16 fld, struct ice_flow_fld_info *info, u8 *buf,
2452                             u8 *dontcare, u8 *data)
2453 {
2454         u16 dst, src, mask, k, end_disp, tmp_s = 0, tmp_m = 0;
2455         bool use_mask = false;
2456         u8 disp;
2457
2458         src = info->src.val;
2459         mask = info->src.mask;
2460         dst = info->entry.val - ICE_AQC_ACL_PROF_BYTE_SEL_START_IDX;
2461         disp = info->xtrct.disp % BITS_PER_BYTE;
2462
2463         if (mask != ICE_FLOW_FLD_OFF_INVAL)
2464                 use_mask = true;
2465
2466         for (k = 0; k < info->entry.last; k++, dst++) {
2467                 /* Add overflow bits from previous byte */
2468                 buf[dst] = (tmp_s & 0xff00) >> 8;
2469
2470                 /* If mask is not valid, tmp_m is always zero, so just setting
2471                  * dontcare to 0 (no masked bits). If mask is valid, pulls in
2472                  * overflow bits of mask from prev byte
2473                  */
2474                 dontcare[dst] = (tmp_m & 0xff00) >> 8;
2475
2476                 /* If there is displacement, last byte will only contain
2477                  * displaced data, but there is no more data to read from user
2478                  * buffer, so skip so as not to potentially read beyond end of
2479                  * user buffer
2480                  */
2481                 if (!disp || k < info->entry.last - 1) {
2482                         /* Store shifted data to use in next byte */
2483                         tmp_s = data[src++] << disp;
2484
2485                         /* Add current (shifted) byte */
2486                         buf[dst] |= tmp_s & 0xff;
2487
2488                         /* Handle mask if valid */
2489                         if (use_mask) {
2490                                 tmp_m = (~data[mask++] & 0xff) << disp;
2491                                 dontcare[dst] |= tmp_m & 0xff;
2492                         }
2493                 }
2494         }
2495
2496         /* Fill in don't care bits at beginning of field */
2497         if (disp) {
2498                 dst = info->entry.val - ICE_AQC_ACL_PROF_BYTE_SEL_START_IDX;
2499                 for (k = 0; k < disp; k++)
2500                         dontcare[dst] |= BIT(k);
2501         }
2502
2503         end_disp = (disp + ice_flds_info[fld].size) % BITS_PER_BYTE;
2504
2505         /* Fill in don't care bits at end of field */
2506         if (end_disp) {
2507                 dst = info->entry.val - ICE_AQC_ACL_PROF_BYTE_SEL_START_IDX +
2508                       info->entry.last - 1;
2509                 for (k = end_disp; k < BITS_PER_BYTE; k++)
2510                         dontcare[dst] |= BIT(k);
2511         }
2512 }
2513
2514 /**
2515  * ice_flow_acl_frmt_entry - Format ACL entry
2516  * @hw: pointer to the hardware structure
2517  * @prof: pointer to flow profile
2518  * @e: pointer to the flow entry
2519  * @data: pointer to a data buffer containing flow entry's match values/masks
2520  * @acts: array of actions to be performed on a match
2521  * @acts_cnt: number of actions
2522  *
2523  * Formats the key (and key_inverse) to be matched from the data passed in,
2524  * along with data from the flow profile. This key/key_inverse pair makes up
2525  * the 'entry' for an ACL flow entry.
2526  */
2527 static enum ice_status
2528 ice_flow_acl_frmt_entry(struct ice_hw *hw, struct ice_flow_prof *prof,
2529                         struct ice_flow_entry *e, u8 *data,
2530                         struct ice_flow_action *acts, u8 acts_cnt)
2531 {
2532         u8 *buf = NULL, *dontcare = NULL, *key = NULL, range = 0, dir_flag_msk;
2533         struct ice_aqc_acl_profile_ranges *range_buf = NULL;
2534         enum ice_status status;
2535         bool cnt_alloc;
2536         u8 prof_id = 0;
2537         u16 i, buf_sz;
2538
2539         status = ice_flow_get_hw_prof(hw, ICE_BLK_ACL, prof->id, &prof_id);
2540         if (status)
2541                 return status;
2542
2543         /* Format the result action */
2544
2545         status = ice_flow_acl_check_actions(hw, acts, acts_cnt, &cnt_alloc);
2546         if (status)
2547                 return status;
2548
2549         status = ICE_ERR_NO_MEMORY;
2550
2551         e->acts = (struct ice_flow_action *)
2552                 ice_memdup(hw, acts, acts_cnt * sizeof(*acts),
2553                            ICE_NONDMA_TO_NONDMA);
2554         if (!e->acts)
2555                 goto out;
2556
2557         e->acts_cnt = acts_cnt;
2558
2559         /* Format the matching data */
2560         buf_sz = prof->cfg.scen->width;
2561         buf = (u8 *)ice_malloc(hw, buf_sz);
2562         if (!buf)
2563                 goto out;
2564
2565         dontcare = (u8 *)ice_malloc(hw, buf_sz);
2566         if (!dontcare)
2567                 goto out;
2568
2569         /* 'key' buffer will store both key and key_inverse, so must be twice
2570          * size of buf
2571          */
2572         key = (u8 *)ice_malloc(hw, buf_sz * 2);
2573         if (!key)
2574                 goto out;
2575
2576         range_buf = (struct ice_aqc_acl_profile_ranges *)
2577                 ice_malloc(hw, sizeof(struct ice_aqc_acl_profile_ranges));
2578         if (!range_buf)
2579                 goto out;
2580
2581         /* Set don't care mask to all 1's to start, will zero out used bytes */
2582         ice_memset(dontcare, 0xff, buf_sz, ICE_NONDMA_MEM);
2583
2584         for (i = 0; i < prof->segs_cnt; i++) {
2585                 struct ice_flow_seg_info *seg = &prof->segs[i];
2586                 u8 j;
2587
2588                 ice_for_each_set_bit(j, (ice_bitmap_t *)&seg->match,
2589                                      ICE_FLOW_FIELD_IDX_MAX) {
2590                         struct ice_flow_fld_info *info = &seg->fields[j];
2591
2592                         if (info->type == ICE_FLOW_FLD_TYPE_RANGE)
2593                                 ice_flow_acl_frmt_entry_range(j, info,
2594                                                               range_buf, data,
2595                                                               &range);
2596                         else
2597                                 ice_flow_acl_frmt_entry_fld(j, info, buf,
2598                                                             dontcare, data);
2599                 }
2600
2601                 for (j = 0; j < seg->raws_cnt; j++) {
2602                         struct ice_flow_fld_info *info = &seg->raws[j].info;
2603                         u16 dst, src, mask, k;
2604                         bool use_mask = false;
2605
2606                         src = info->src.val;
2607                         dst = info->entry.val -
2608                                         ICE_AQC_ACL_PROF_BYTE_SEL_START_IDX;
2609                         mask = info->src.mask;
2610
2611                         if (mask != ICE_FLOW_FLD_OFF_INVAL)
2612                                 use_mask = true;
2613
2614                         for (k = 0; k < info->entry.last; k++, dst++) {
2615                                 buf[dst] = data[src++];
2616                                 if (use_mask)
2617                                         dontcare[dst] = ~data[mask++];
2618                                 else
2619                                         dontcare[dst] = 0;
2620                         }
2621                 }
2622         }
2623
2624         buf[prof->cfg.scen->pid_idx] = (u8)prof_id;
2625         dontcare[prof->cfg.scen->pid_idx] = 0;
2626
2627         /* Format the buffer for direction flags */
2628         dir_flag_msk = BIT(ICE_FLG_PKT_DIR);
2629
2630         if (prof->dir == ICE_FLOW_RX)
2631                 buf[prof->cfg.scen->pkt_dir_idx] = dir_flag_msk;
2632
2633         if (range) {
2634                 buf[prof->cfg.scen->rng_chk_idx] = range;
2635                 /* Mark any unused range checkers as don't care */
2636                 dontcare[prof->cfg.scen->rng_chk_idx] = ~range;
2637                 e->range_buf = range_buf;
2638         } else {
2639                 ice_free(hw, range_buf);
2640         }
2641
2642         status = ice_set_key(key, buf_sz * 2, buf, NULL, dontcare, NULL, 0,
2643                              buf_sz);
2644         if (status)
2645                 goto out;
2646
2647         e->entry = key;
2648         e->entry_sz = buf_sz * 2;
2649
2650 out:
2651         if (buf)
2652                 ice_free(hw, buf);
2653
2654         if (dontcare)
2655                 ice_free(hw, dontcare);
2656
2657         if (status && key)
2658                 ice_free(hw, key);
2659
2660         if (status && range_buf) {
2661                 ice_free(hw, range_buf);
2662                 e->range_buf = NULL;
2663         }
2664
2665         if (status && e->acts) {
2666                 ice_free(hw, e->acts);
2667                 e->acts = NULL;
2668                 e->acts_cnt = 0;
2669         }
2670
2671         if (status && cnt_alloc)
2672                 ice_flow_acl_free_act_cntr(hw, acts, acts_cnt);
2673
2674         return status;
2675 }
2676
2677 /**
2678  * ice_flow_acl_find_scen_entry_cond - Find an ACL scenario entry that matches
2679  *                                     the compared data.
2680  * @prof: pointer to flow profile
2681  * @e: pointer to the comparing flow entry
2682  * @do_chg_action: decide if we want to change the ACL action
2683  * @do_add_entry: decide if we want to add the new ACL entry
2684  * @do_rem_entry: decide if we want to remove the current ACL entry
2685  *
2686  * Find an ACL scenario entry that matches the compared data. In the same time,
2687  * this function also figure out:
2688  * a/ If we want to change the ACL action
2689  * b/ If we want to add the new ACL entry
2690  * c/ If we want to remove the current ACL entry
2691  */
2692 static struct ice_flow_entry *
2693 ice_flow_acl_find_scen_entry_cond(struct ice_flow_prof *prof,
2694                                   struct ice_flow_entry *e, bool *do_chg_action,
2695                                   bool *do_add_entry, bool *do_rem_entry)
2696 {
2697         struct ice_flow_entry *p, *return_entry = NULL;
2698         u8 i, j;
2699
2700         /* Check if:
2701          * a/ There exists an entry with same matching data, but different
2702          *    priority, then we remove this existing ACL entry. Then, we
2703          *    will add the new entry to the ACL scenario.
2704          * b/ There exists an entry with same matching data, priority, and
2705          *    result action, then we do nothing
2706          * c/ There exists an entry with same matching data, priority, but
2707          *    different, action, then do only change the action's entry.
2708          * d/ Else, we add this new entry to the ACL scenario.
2709          */
2710         *do_chg_action = false;
2711         *do_add_entry = true;
2712         *do_rem_entry = false;
2713         LIST_FOR_EACH_ENTRY(p, &prof->entries, ice_flow_entry, l_entry) {
2714                 if (memcmp(p->entry, e->entry, p->entry_sz))
2715                         continue;
2716
2717                 /* From this point, we have the same matching_data. */
2718                 *do_add_entry = false;
2719                 return_entry = p;
2720
2721                 if (p->priority != e->priority) {
2722                         /* matching data && !priority */
2723                         *do_add_entry = true;
2724                         *do_rem_entry = true;
2725                         break;
2726                 }
2727
2728                 /* From this point, we will have matching_data && priority */
2729                 if (p->acts_cnt != e->acts_cnt)
2730                         *do_chg_action = true;
2731                 for (i = 0; i < p->acts_cnt; i++) {
2732                         bool found_not_match = false;
2733
2734                         for (j = 0; j < e->acts_cnt; j++)
2735                                 if (memcmp(&p->acts[i], &e->acts[j],
2736                                            sizeof(struct ice_flow_action))) {
2737                                         found_not_match = true;
2738                                         break;
2739                                 }
2740
2741                         if (found_not_match) {
2742                                 *do_chg_action = true;
2743                                 break;
2744                         }
2745                 }
2746
2747                 /* (do_chg_action = true) means :
2748                  *    matching_data && priority && !result_action
2749                  * (do_chg_action = false) means :
2750                  *    matching_data && priority && result_action
2751                  */
2752                 break;
2753         }
2754
2755         return return_entry;
2756 }
2757
2758 /**
2759  * ice_flow_acl_convert_to_acl_prio - Convert to ACL priority
2760  * @p: flow priority
2761  */
2762 static enum ice_acl_entry_prio
2763 ice_flow_acl_convert_to_acl_prio(enum ice_flow_priority p)
2764 {
2765         enum ice_acl_entry_prio acl_prio;
2766
2767         switch (p) {
2768         case ICE_FLOW_PRIO_LOW:
2769                 acl_prio = ICE_ACL_PRIO_LOW;
2770                 break;
2771         case ICE_FLOW_PRIO_NORMAL:
2772                 acl_prio = ICE_ACL_PRIO_NORMAL;
2773                 break;
2774         case ICE_FLOW_PRIO_HIGH:
2775                 acl_prio = ICE_ACL_PRIO_HIGH;
2776                 break;
2777         default:
2778                 acl_prio = ICE_ACL_PRIO_NORMAL;
2779                 break;
2780         }
2781
2782         return acl_prio;
2783 }
2784
2785 /**
2786  * ice_flow_acl_union_rng_chk - Perform union operation between two
2787  *                              range-range checker buffers
2788  * @dst_buf: pointer to destination range checker buffer
2789  * @src_buf: pointer to source range checker buffer
2790  *
2791  * For this function, we do the union between dst_buf and src_buf
2792  * range checker buffer, and we will save the result back to dst_buf
2793  */
2794 static enum ice_status
2795 ice_flow_acl_union_rng_chk(struct ice_aqc_acl_profile_ranges *dst_buf,
2796                            struct ice_aqc_acl_profile_ranges *src_buf)
2797 {
2798         u8 i, j;
2799
2800         if (!dst_buf || !src_buf)
2801                 return ICE_ERR_BAD_PTR;
2802
2803         for (i = 0; i < ICE_AQC_ACL_PROF_RANGES_NUM_CFG; i++) {
2804                 struct ice_acl_rng_data *cfg_data = NULL, *in_data;
2805                 bool will_populate = false;
2806
2807                 in_data = &src_buf->checker_cfg[i];
2808
2809                 if (!in_data->mask)
2810                         break;
2811
2812                 for (j = 0; j < ICE_AQC_ACL_PROF_RANGES_NUM_CFG; j++) {
2813                         cfg_data = &dst_buf->checker_cfg[j];
2814
2815                         if (!cfg_data->mask ||
2816                             !memcmp(cfg_data, in_data,
2817                                     sizeof(struct ice_acl_rng_data))) {
2818                                 will_populate = true;
2819                                 break;
2820                         }
2821                 }
2822
2823                 if (will_populate) {
2824                         ice_memcpy(cfg_data, in_data,
2825                                    sizeof(struct ice_acl_rng_data),
2826                                    ICE_NONDMA_TO_NONDMA);
2827                 } else {
2828                         /* No available slot left to program range checker */
2829                         return ICE_ERR_MAX_LIMIT;
2830                 }
2831         }
2832
2833         return ICE_SUCCESS;
2834 }
2835
2836 /**
2837  * ice_flow_acl_add_scen_entry_sync - Add entry to ACL scenario sync
2838  * @hw: pointer to the hardware structure
2839  * @prof: pointer to flow profile
2840  * @entry: double pointer to the flow entry
2841  *
2842  * For this function, we will look at the current added entries in the
2843  * corresponding ACL scenario. Then, we will perform matching logic to
2844  * see if we want to add/modify/do nothing with this new entry.
2845  */
2846 static enum ice_status
2847 ice_flow_acl_add_scen_entry_sync(struct ice_hw *hw, struct ice_flow_prof *prof,
2848                                  struct ice_flow_entry **entry)
2849 {
2850         bool do_add_entry, do_rem_entry, do_chg_action, do_chg_rng_chk;
2851         struct ice_aqc_acl_profile_ranges query_rng_buf, cfg_rng_buf;
2852         struct ice_acl_act_entry *acts = NULL;
2853         struct ice_flow_entry *exist;
2854         enum ice_status status = ICE_SUCCESS;
2855         struct ice_flow_entry *e;
2856         u8 i;
2857
2858         if (!entry || !(*entry) || !prof)
2859                 return ICE_ERR_BAD_PTR;
2860
2861         e = *entry;
2862
2863         do_chg_rng_chk = false;
2864         if (e->range_buf) {
2865                 u8 prof_id = 0;
2866
2867                 status = ice_flow_get_hw_prof(hw, ICE_BLK_ACL, prof->id,
2868                                               &prof_id);
2869                 if (status)
2870                         return status;
2871
2872                 /* Query the current range-checker value in FW */
2873                 status = ice_query_acl_prof_ranges(hw, prof_id, &query_rng_buf,
2874                                                    NULL);
2875                 if (status)
2876                         return status;
2877                 ice_memcpy(&cfg_rng_buf, &query_rng_buf,
2878                            sizeof(struct ice_aqc_acl_profile_ranges),
2879                            ICE_NONDMA_TO_NONDMA);
2880
2881                 /* Generate the new range-checker value */
2882                 status = ice_flow_acl_union_rng_chk(&cfg_rng_buf, e->range_buf);
2883                 if (status)
2884                         return status;
2885
2886                 /* Reconfigure the range check if the buffer is changed. */
2887                 do_chg_rng_chk = false;
2888                 if (memcmp(&query_rng_buf, &cfg_rng_buf,
2889                            sizeof(struct ice_aqc_acl_profile_ranges))) {
2890                         status = ice_prog_acl_prof_ranges(hw, prof_id,
2891                                                           &cfg_rng_buf, NULL);
2892                         if (status)
2893                                 return status;
2894
2895                         do_chg_rng_chk = true;
2896                 }
2897         }
2898
2899         /* Figure out if we want to (change the ACL action) and/or
2900          * (Add the new ACL entry) and/or (Remove the current ACL entry)
2901          */
2902         exist = ice_flow_acl_find_scen_entry_cond(prof, e, &do_chg_action,
2903                                                   &do_add_entry, &do_rem_entry);
2904         if (do_rem_entry) {
2905                 status = ice_flow_rem_entry_sync(hw, ICE_BLK_ACL, exist);
2906                 if (status)
2907                         return status;
2908         }
2909
2910         /* Prepare the result action buffer */
2911         acts = (struct ice_acl_act_entry *)
2912                 ice_calloc(hw, e->entry_sz, sizeof(struct ice_acl_act_entry));
2913         if (!acts)
2914                 return ICE_ERR_NO_MEMORY;
2915
2916         for (i = 0; i < e->acts_cnt; i++)
2917                 ice_memcpy(&acts[i], &e->acts[i].data.acl_act,
2918                            sizeof(struct ice_acl_act_entry),
2919                            ICE_NONDMA_TO_NONDMA);
2920
2921         if (do_add_entry) {
2922                 enum ice_acl_entry_prio prio;
2923                 u8 *keys, *inverts;
2924                 u16 entry_idx;
2925
2926                 keys = (u8 *)e->entry;
2927                 inverts = keys + (e->entry_sz / 2);
2928                 prio = ice_flow_acl_convert_to_acl_prio(e->priority);
2929
2930                 status = ice_acl_add_entry(hw, prof->cfg.scen, prio, keys,
2931                                            inverts, acts, e->acts_cnt,
2932                                            &entry_idx);
2933                 if (status)
2934                         goto out;
2935
2936                 e->scen_entry_idx = entry_idx;
2937                 LIST_ADD(&e->l_entry, &prof->entries);
2938         } else {
2939                 if (do_chg_action) {
2940                         /* For the action memory info, update the SW's copy of
2941                          * exist entry with e's action memory info
2942                          */
2943                         ice_free(hw, exist->acts);
2944                         exist->acts_cnt = e->acts_cnt;
2945                         exist->acts = (struct ice_flow_action *)
2946                                 ice_calloc(hw, exist->acts_cnt,
2947                                            sizeof(struct ice_flow_action));
2948                         if (!exist->acts) {
2949                                 status = ICE_ERR_NO_MEMORY;
2950                                 goto out;
2951                         }
2952
2953                         ice_memcpy(exist->acts, e->acts,
2954                                    sizeof(struct ice_flow_action) * e->acts_cnt,
2955                                    ICE_NONDMA_TO_NONDMA);
2956
2957                         status = ice_acl_prog_act(hw, prof->cfg.scen, acts,
2958                                                   e->acts_cnt,
2959                                                   exist->scen_entry_idx);
2960                         if (status)
2961                                 goto out;
2962                 }
2963
2964                 if (do_chg_rng_chk) {
2965                         /* In this case, we want to update the range checker
2966                          * information of the exist entry
2967                          */
2968                         status = ice_flow_acl_union_rng_chk(exist->range_buf,
2969                                                             e->range_buf);
2970                         if (status)
2971                                 goto out;
2972                 }
2973
2974                 /* As we don't add the new entry to our SW DB, deallocate its
2975                  * memories, and return the exist entry to the caller
2976                  */
2977                 ice_dealloc_flow_entry(hw, e);
2978                 *(entry) = exist;
2979         }
2980 out:
2981         ice_free(hw, acts);
2982
2983         return status;
2984 }
2985
2986 /**
2987  * ice_flow_acl_add_scen_entry - Add entry to ACL scenario
2988  * @hw: pointer to the hardware structure
2989  * @prof: pointer to flow profile
2990  * @e: double pointer to the flow entry
2991  */
2992 static enum ice_status
2993 ice_flow_acl_add_scen_entry(struct ice_hw *hw, struct ice_flow_prof *prof,
2994                             struct ice_flow_entry **e)
2995 {
2996         enum ice_status status;
2997
2998         ice_acquire_lock(&prof->entries_lock);
2999         status = ice_flow_acl_add_scen_entry_sync(hw, prof, e);
3000         ice_release_lock(&prof->entries_lock);
3001
3002         return status;
3003 }
3004
3005 /**
3006  * ice_flow_add_entry - Add a flow entry
3007  * @hw: pointer to the HW struct
3008  * @blk: classification stage
3009  * @prof_id: ID of the profile to add a new flow entry to
3010  * @entry_id: unique ID to identify this flow entry
3011  * @vsi_handle: software VSI handle for the flow entry
3012  * @prio: priority of the flow entry
3013  * @data: pointer to a data buffer containing flow entry's match values/masks
3014  * @acts: arrays of actions to be performed on a match
3015  * @acts_cnt: number of actions
3016  * @entry_h: pointer to buffer that receives the new flow entry's handle
3017  */
3018 enum ice_status
3019 ice_flow_add_entry(struct ice_hw *hw, enum ice_block blk, u64 prof_id,
3020                    u64 entry_id, u16 vsi_handle, enum ice_flow_priority prio,
3021                    void *data, struct ice_flow_action *acts, u8 acts_cnt,
3022                    u64 *entry_h)
3023 {
3024         struct ice_flow_entry *e = NULL;
3025         struct ice_flow_prof *prof;
3026         enum ice_status status = ICE_SUCCESS;
3027
3028         /* ACL entries must indicate an action */
3029         if (blk == ICE_BLK_ACL && (!acts || !acts_cnt))
3030                 return ICE_ERR_PARAM;
3031
3032         /* No flow entry data is expected for RSS */
3033         if (!entry_h || (!data && blk != ICE_BLK_RSS))
3034                 return ICE_ERR_BAD_PTR;
3035
3036         if (!ice_is_vsi_valid(hw, vsi_handle))
3037                 return ICE_ERR_PARAM;
3038
3039         ice_acquire_lock(&hw->fl_profs_locks[blk]);
3040
3041         prof = ice_flow_find_prof_id(hw, blk, prof_id);
3042         if (!prof) {
3043                 status = ICE_ERR_DOES_NOT_EXIST;
3044         } else {
3045                 /* Allocate memory for the entry being added and associate
3046                  * the VSI to the found flow profile
3047                  */
3048                 e = (struct ice_flow_entry *)ice_malloc(hw, sizeof(*e));
3049                 if (!e)
3050                         status = ICE_ERR_NO_MEMORY;
3051                 else
3052                         status = ice_flow_assoc_prof(hw, blk, prof, vsi_handle);
3053         }
3054
3055         ice_release_lock(&hw->fl_profs_locks[blk]);
3056         if (status)
3057                 goto out;
3058
3059         e->id = entry_id;
3060         e->vsi_handle = vsi_handle;
3061         e->prof = prof;
3062         e->priority = prio;
3063
3064         switch (blk) {
3065         case ICE_BLK_FD:
3066         case ICE_BLK_RSS:
3067                 break;
3068         case ICE_BLK_ACL:
3069                 /* ACL will handle the entry management */
3070                 status = ice_flow_acl_frmt_entry(hw, prof, e, (u8 *)data, acts,
3071                                                  acts_cnt);
3072                 if (status)
3073                         goto out;
3074
3075                 status = ice_flow_acl_add_scen_entry(hw, prof, &e);
3076                 if (status)
3077                         goto out;
3078
3079                 break;
3080         default:
3081                 status = ICE_ERR_NOT_IMPL;
3082                 goto out;
3083         }
3084
3085         if (blk != ICE_BLK_ACL) {
3086                 /* ACL will handle the entry management */
3087                 ice_acquire_lock(&prof->entries_lock);
3088                 LIST_ADD(&e->l_entry, &prof->entries);
3089                 ice_release_lock(&prof->entries_lock);
3090         }
3091
3092         *entry_h = ICE_FLOW_ENTRY_HNDL(e);
3093
3094 out:
3095         if (status && e) {
3096                 if (e->entry)
3097                         ice_free(hw, e->entry);
3098                 ice_free(hw, e);
3099         }
3100
3101         return status;
3102 }
3103
3104 /**
3105  * ice_flow_rem_entry - Remove a flow entry
3106  * @hw: pointer to the HW struct
3107  * @blk: classification stage
3108  * @entry_h: handle to the flow entry to be removed
3109  */
3110 enum ice_status ice_flow_rem_entry(struct ice_hw *hw, enum ice_block blk,
3111                                    u64 entry_h)
3112 {
3113         struct ice_flow_entry *entry;
3114         struct ice_flow_prof *prof;
3115         enum ice_status status = ICE_SUCCESS;
3116
3117         if (entry_h == ICE_FLOW_ENTRY_HANDLE_INVAL)
3118                 return ICE_ERR_PARAM;
3119
3120         entry = ICE_FLOW_ENTRY_PTR((unsigned long)entry_h);
3121
3122         /* Retain the pointer to the flow profile as the entry will be freed */
3123         prof = entry->prof;
3124
3125         if (prof) {
3126                 ice_acquire_lock(&prof->entries_lock);
3127                 status = ice_flow_rem_entry_sync(hw, blk, entry);
3128                 ice_release_lock(&prof->entries_lock);
3129         }
3130
3131         return status;
3132 }
3133
3134 /**
3135  * ice_flow_set_fld_ext - specifies locations of field from entry's input buffer
3136  * @seg: packet segment the field being set belongs to
3137  * @fld: field to be set
3138  * @field_type: type of the field
3139  * @val_loc: if not ICE_FLOW_FLD_OFF_INVAL, location of the value to match from
3140  *           entry's input buffer
3141  * @mask_loc: if not ICE_FLOW_FLD_OFF_INVAL, location of mask value from entry's
3142  *            input buffer
3143  * @last_loc: if not ICE_FLOW_FLD_OFF_INVAL, location of last/upper value from
3144  *            entry's input buffer
3145  *
3146  * This helper function stores information of a field being matched, including
3147  * the type of the field and the locations of the value to match, the mask, and
3148  * the upper-bound value in the start of the input buffer for a flow entry.
3149  * This function should only be used for fixed-size data structures.
3150  *
3151  * This function also opportunistically determines the protocol headers to be
3152  * present based on the fields being set. Some fields cannot be used alone to
3153  * determine the protocol headers present. Sometimes, fields for particular
3154  * protocol headers are not matched. In those cases, the protocol headers
3155  * must be explicitly set.
3156  */
3157 static void
3158 ice_flow_set_fld_ext(struct ice_flow_seg_info *seg, enum ice_flow_field fld,
3159                      enum ice_flow_fld_match_type field_type, u16 val_loc,
3160                      u16 mask_loc, u16 last_loc)
3161 {
3162         u64 bit = BIT_ULL(fld);
3163
3164         seg->match |= bit;
3165         if (field_type == ICE_FLOW_FLD_TYPE_RANGE)
3166                 seg->range |= bit;
3167
3168         seg->fields[fld].type = field_type;
3169         seg->fields[fld].src.val = val_loc;
3170         seg->fields[fld].src.mask = mask_loc;
3171         seg->fields[fld].src.last = last_loc;
3172
3173         ICE_FLOW_SET_HDRS(seg, ice_flds_info[fld].hdr);
3174 }
3175
3176 /**
3177  * ice_flow_set_fld - specifies locations of field from entry's input buffer
3178  * @seg: packet segment the field being set belongs to
3179  * @fld: field to be set
3180  * @val_loc: if not ICE_FLOW_FLD_OFF_INVAL, location of the value to match from
3181  *           entry's input buffer
3182  * @mask_loc: if not ICE_FLOW_FLD_OFF_INVAL, location of mask value from entry's
3183  *            input buffer
3184  * @last_loc: if not ICE_FLOW_FLD_OFF_INVAL, location of last/upper value from
3185  *            entry's input buffer
3186  * @range: indicate if field being matched is to be in a range
3187  *
3188  * This function specifies the locations, in the form of byte offsets from the
3189  * start of the input buffer for a flow entry, from where the value to match,
3190  * the mask value, and upper value can be extracted. These locations are then
3191  * stored in the flow profile. When adding a flow entry associated with the
3192  * flow profile, these locations will be used to quickly extract the values and
3193  * create the content of a match entry. This function should only be used for
3194  * fixed-size data structures.
3195  */
3196 void
3197 ice_flow_set_fld(struct ice_flow_seg_info *seg, enum ice_flow_field fld,
3198                  u16 val_loc, u16 mask_loc, u16 last_loc, bool range)
3199 {
3200         enum ice_flow_fld_match_type t = range ?
3201                 ICE_FLOW_FLD_TYPE_RANGE : ICE_FLOW_FLD_TYPE_REG;
3202
3203         ice_flow_set_fld_ext(seg, fld, t, val_loc, mask_loc, last_loc);
3204 }
3205
3206 /**
3207  * ice_flow_set_fld_prefix - sets locations of prefix field from entry's buf
3208  * @seg: packet segment the field being set belongs to
3209  * @fld: field to be set
3210  * @val_loc: if not ICE_FLOW_FLD_OFF_INVAL, location of the value to match from
3211  *           entry's input buffer
3212  * @pref_loc: location of prefix value from entry's input buffer
3213  * @pref_sz: size of the location holding the prefix value
3214  *
3215  * This function specifies the locations, in the form of byte offsets from the
3216  * start of the input buffer for a flow entry, from where the value to match
3217  * and the IPv4 prefix value can be extracted. These locations are then stored
3218  * in the flow profile. When adding flow entries to the associated flow profile,
3219  * these locations can be used to quickly extract the values to create the
3220  * content of a match entry. This function should only be used for fixed-size
3221  * data structures.
3222  */
3223 void
3224 ice_flow_set_fld_prefix(struct ice_flow_seg_info *seg, enum ice_flow_field fld,
3225                         u16 val_loc, u16 pref_loc, u8 pref_sz)
3226 {
3227         /* For this type of field, the "mask" location is for the prefix value's
3228          * location and the "last" location is for the size of the location of
3229          * the prefix value.
3230          */
3231         ice_flow_set_fld_ext(seg, fld, ICE_FLOW_FLD_TYPE_PREFIX, val_loc,
3232                              pref_loc, (u16)pref_sz);
3233 }
3234
3235 /**
3236  * ice_flow_add_fld_raw - sets locations of a raw field from entry's input buf
3237  * @seg: packet segment the field being set belongs to
3238  * @off: offset of the raw field from the beginning of the segment in bytes
3239  * @len: length of the raw pattern to be matched
3240  * @val_loc: location of the value to match from entry's input buffer
3241  * @mask_loc: location of mask value from entry's input buffer
3242  *
3243  * This function specifies the offset of the raw field to be match from the
3244  * beginning of the specified packet segment, and the locations, in the form of
3245  * byte offsets from the start of the input buffer for a flow entry, from where
3246  * the value to match and the mask value to be extracted. These locations are
3247  * then stored in the flow profile. When adding flow entries to the associated
3248  * flow profile, these locations can be used to quickly extract the values to
3249  * create the content of a match entry. This function should only be used for
3250  * fixed-size data structures.
3251  */
3252 void
3253 ice_flow_add_fld_raw(struct ice_flow_seg_info *seg, u16 off, u8 len,
3254                      u16 val_loc, u16 mask_loc)
3255 {
3256         if (seg->raws_cnt < ICE_FLOW_SEG_RAW_FLD_MAX) {
3257                 seg->raws[seg->raws_cnt].off = off;
3258                 seg->raws[seg->raws_cnt].info.type = ICE_FLOW_FLD_TYPE_SIZE;
3259                 seg->raws[seg->raws_cnt].info.src.val = val_loc;
3260                 seg->raws[seg->raws_cnt].info.src.mask = mask_loc;
3261                 /* The "last" field is used to store the length of the field */
3262                 seg->raws[seg->raws_cnt].info.src.last = len;
3263         }
3264
3265         /* Overflows of "raws" will be handled as an error condition later in
3266          * the flow when this information is processed.
3267          */
3268         seg->raws_cnt++;
3269 }
3270
3271 #define ICE_FLOW_RSS_SEG_HDR_L2_MASKS \
3272 (ICE_FLOW_SEG_HDR_ETH | ICE_FLOW_SEG_HDR_VLAN)
3273
3274 #define ICE_FLOW_RSS_SEG_HDR_L3_MASKS \
3275         (ICE_FLOW_SEG_HDR_IPV4 | ICE_FLOW_SEG_HDR_IPV6)
3276
3277 #define ICE_FLOW_RSS_SEG_HDR_L4_MASKS \
3278         (ICE_FLOW_SEG_HDR_TCP | ICE_FLOW_SEG_HDR_UDP | ICE_FLOW_SEG_HDR_SCTP)
3279
3280 #define ICE_FLOW_RSS_SEG_HDR_VAL_MASKS \
3281         (ICE_FLOW_RSS_SEG_HDR_L2_MASKS | \
3282          ICE_FLOW_RSS_SEG_HDR_L3_MASKS | \
3283          ICE_FLOW_RSS_SEG_HDR_L4_MASKS)
3284
3285 /**
3286  * ice_flow_set_rss_seg_info - setup packet segments for RSS
3287  * @segs: pointer to the flow field segment(s)
3288  * @seg_cnt: segment count
3289  * @cfg: configure parameters
3290  *
3291  * Helper function to extract fields from hash bitmap and use flow
3292  * header value to set flow field segment for further use in flow
3293  * profile entry or removal.
3294  */
3295 static enum ice_status
3296 ice_flow_set_rss_seg_info(struct ice_flow_seg_info *segs, u8 seg_cnt,
3297                           const struct ice_rss_hash_cfg *cfg)
3298 {
3299         struct ice_flow_seg_info *seg;
3300         u64 val;
3301         u8 i;
3302
3303         /* set inner most segment */
3304         seg = &segs[seg_cnt - 1];
3305
3306         ice_for_each_set_bit(i, (const ice_bitmap_t *)&cfg->hash_flds,
3307                              ICE_FLOW_FIELD_IDX_MAX)
3308                 ice_flow_set_fld(seg, (enum ice_flow_field)i,
3309                                  ICE_FLOW_FLD_OFF_INVAL, ICE_FLOW_FLD_OFF_INVAL,
3310                                  ICE_FLOW_FLD_OFF_INVAL, false);
3311
3312         ICE_FLOW_SET_HDRS(seg, cfg->addl_hdrs);
3313
3314         /* set outer most header */
3315         if (cfg->hdr_type == ICE_RSS_INNER_HEADERS_W_OUTER_IPV4)
3316                 segs[ICE_RSS_OUTER_HEADERS].hdrs |= ICE_FLOW_SEG_HDR_IPV4 |
3317                                                    ICE_FLOW_SEG_HDR_IPV_OTHER;
3318         else if (cfg->hdr_type == ICE_RSS_INNER_HEADERS_W_OUTER_IPV6)
3319                 segs[ICE_RSS_OUTER_HEADERS].hdrs |= ICE_FLOW_SEG_HDR_IPV6 |
3320                                                    ICE_FLOW_SEG_HDR_IPV_OTHER;
3321
3322         if (seg->hdrs & ~ICE_FLOW_RSS_SEG_HDR_VAL_MASKS &
3323             ~ICE_FLOW_RSS_HDRS_INNER_MASK & ~ICE_FLOW_SEG_HDR_IPV_OTHER)
3324                 return ICE_ERR_PARAM;
3325
3326         val = (u64)(seg->hdrs & ICE_FLOW_RSS_SEG_HDR_L3_MASKS);
3327         if (val && !ice_is_pow2(val))
3328                 return ICE_ERR_CFG;
3329
3330         val = (u64)(seg->hdrs & ICE_FLOW_RSS_SEG_HDR_L4_MASKS);
3331         if (val && !ice_is_pow2(val))
3332                 return ICE_ERR_CFG;
3333
3334         return ICE_SUCCESS;
3335 }
3336
3337 /**
3338  * ice_rem_vsi_rss_list - remove VSI from RSS list
3339  * @hw: pointer to the hardware structure
3340  * @vsi_handle: software VSI handle
3341  *
3342  * Remove the VSI from all RSS configurations in the list.
3343  */
3344 void ice_rem_vsi_rss_list(struct ice_hw *hw, u16 vsi_handle)
3345 {
3346         struct ice_rss_cfg *r, *tmp;
3347
3348         if (LIST_EMPTY(&hw->rss_list_head))
3349                 return;
3350
3351         ice_acquire_lock(&hw->rss_locks);
3352         LIST_FOR_EACH_ENTRY_SAFE(r, tmp, &hw->rss_list_head,
3353                                  ice_rss_cfg, l_entry)
3354                 if (ice_test_and_clear_bit(vsi_handle, r->vsis))
3355                         if (!ice_is_any_bit_set(r->vsis, ICE_MAX_VSI)) {
3356                                 LIST_DEL(&r->l_entry);
3357                                 ice_free(hw, r);
3358                         }
3359         ice_release_lock(&hw->rss_locks);
3360 }
3361
3362 /**
3363  * ice_rem_vsi_rss_cfg - remove RSS configurations associated with VSI
3364  * @hw: pointer to the hardware structure
3365  * @vsi_handle: software VSI handle
3366  *
3367  * This function will iterate through all flow profiles and disassociate
3368  * the VSI from that profile. If the flow profile has no VSIs it will
3369  * be removed.
3370  */
3371 enum ice_status ice_rem_vsi_rss_cfg(struct ice_hw *hw, u16 vsi_handle)
3372 {
3373         const enum ice_block blk = ICE_BLK_RSS;
3374         struct ice_flow_prof *p, *t;
3375         enum ice_status status = ICE_SUCCESS;
3376
3377         if (!ice_is_vsi_valid(hw, vsi_handle))
3378                 return ICE_ERR_PARAM;
3379
3380         if (LIST_EMPTY(&hw->fl_profs[blk]))
3381                 return ICE_SUCCESS;
3382
3383         ice_acquire_lock(&hw->rss_locks);
3384         LIST_FOR_EACH_ENTRY_SAFE(p, t, &hw->fl_profs[blk], ice_flow_prof,
3385                                  l_entry)
3386                 if (ice_is_bit_set(p->vsis, vsi_handle)) {
3387                         status = ice_flow_disassoc_prof(hw, blk, p, vsi_handle);
3388                         if (status)
3389                                 break;
3390
3391                         if (!ice_is_any_bit_set(p->vsis, ICE_MAX_VSI)) {
3392                                 status = ice_flow_rem_prof(hw, blk, p->id);
3393                                 if (status)
3394                                         break;
3395                         }
3396                 }
3397         ice_release_lock(&hw->rss_locks);
3398
3399         return status;
3400 }
3401
3402 /**
3403  * ice_get_rss_hdr_type - get a RSS profile's header type
3404  * @prof: RSS flow profile
3405  */
3406 static enum ice_rss_cfg_hdr_type
3407 ice_get_rss_hdr_type(struct ice_flow_prof *prof)
3408 {
3409         enum ice_rss_cfg_hdr_type hdr_type = ICE_RSS_ANY_HEADERS;
3410
3411         if (prof->segs_cnt == ICE_FLOW_SEG_SINGLE) {
3412                 hdr_type = ICE_RSS_OUTER_HEADERS;
3413         } else if (prof->segs_cnt == ICE_FLOW_SEG_MAX) {
3414                 if (prof->segs[ICE_RSS_OUTER_HEADERS].hdrs == ICE_FLOW_SEG_HDR_NONE)
3415                         hdr_type = ICE_RSS_INNER_HEADERS;
3416                 if (prof->segs[ICE_RSS_OUTER_HEADERS].hdrs & ICE_FLOW_SEG_HDR_IPV4)
3417                         hdr_type = ICE_RSS_INNER_HEADERS_W_OUTER_IPV4;
3418                 if (prof->segs[ICE_RSS_OUTER_HEADERS].hdrs & ICE_FLOW_SEG_HDR_IPV6)
3419                         hdr_type = ICE_RSS_INNER_HEADERS_W_OUTER_IPV6;
3420         }
3421
3422         return hdr_type;
3423 }
3424
3425 /**
3426  * ice_rem_rss_list - remove RSS configuration from list
3427  * @hw: pointer to the hardware structure
3428  * @vsi_handle: software VSI handle
3429  * @prof: pointer to flow profile
3430  *
3431  * Assumption: lock has already been acquired for RSS list
3432  */
3433 static void
3434 ice_rem_rss_list(struct ice_hw *hw, u16 vsi_handle, struct ice_flow_prof *prof)
3435 {
3436         enum ice_rss_cfg_hdr_type hdr_type;
3437         struct ice_rss_cfg *r, *tmp;
3438
3439         /* Search for RSS hash fields associated to the VSI that match the
3440          * hash configurations associated to the flow profile. If found
3441          * remove from the RSS entry list of the VSI context and delete entry.
3442          */
3443         hdr_type = ice_get_rss_hdr_type(prof);
3444         LIST_FOR_EACH_ENTRY_SAFE(r, tmp, &hw->rss_list_head,
3445                                  ice_rss_cfg, l_entry)
3446                 if (r->hash.hash_flds == prof->segs[prof->segs_cnt - 1].match &&
3447                     r->hash.addl_hdrs == prof->segs[prof->segs_cnt - 1].hdrs &&
3448                     r->hash.hdr_type == hdr_type) {
3449                         ice_clear_bit(vsi_handle, r->vsis);
3450                         if (!ice_is_any_bit_set(r->vsis, ICE_MAX_VSI)) {
3451                                 LIST_DEL(&r->l_entry);
3452                                 ice_free(hw, r);
3453                         }
3454                         return;
3455                 }
3456 }
3457
3458 /**
3459  * ice_add_rss_list - add RSS configuration to list
3460  * @hw: pointer to the hardware structure
3461  * @vsi_handle: software VSI handle
3462  * @prof: pointer to flow profile
3463  *
3464  * Assumption: lock has already been acquired for RSS list
3465  */
3466 static enum ice_status
3467 ice_add_rss_list(struct ice_hw *hw, u16 vsi_handle, struct ice_flow_prof *prof)
3468 {
3469         enum ice_rss_cfg_hdr_type hdr_type;
3470         struct ice_rss_cfg *r, *rss_cfg;
3471
3472         hdr_type = ice_get_rss_hdr_type(prof);
3473         LIST_FOR_EACH_ENTRY(r, &hw->rss_list_head,
3474                             ice_rss_cfg, l_entry)
3475                 if (r->hash.hash_flds == prof->segs[prof->segs_cnt - 1].match &&
3476                     r->hash.addl_hdrs == prof->segs[prof->segs_cnt - 1].hdrs &&
3477                     r->hash.hdr_type == hdr_type) {
3478                         ice_set_bit(vsi_handle, r->vsis);
3479                         return ICE_SUCCESS;
3480                 }
3481
3482         rss_cfg = (struct ice_rss_cfg *)ice_malloc(hw, sizeof(*rss_cfg));
3483         if (!rss_cfg)
3484                 return ICE_ERR_NO_MEMORY;
3485
3486         rss_cfg->hash.hash_flds = prof->segs[prof->segs_cnt - 1].match;
3487         rss_cfg->hash.addl_hdrs = prof->segs[prof->segs_cnt - 1].hdrs;
3488         rss_cfg->hash.hdr_type = hdr_type;
3489         rss_cfg->hash.symm = prof->cfg.symm;
3490         ice_set_bit(vsi_handle, rss_cfg->vsis);
3491
3492         LIST_ADD_TAIL(&rss_cfg->l_entry, &hw->rss_list_head);
3493
3494         return ICE_SUCCESS;
3495 }
3496
3497 #define ICE_FLOW_PROF_HASH_S    0
3498 #define ICE_FLOW_PROF_HASH_M    (0xFFFFFFFFULL << ICE_FLOW_PROF_HASH_S)
3499 #define ICE_FLOW_PROF_HDR_S     32
3500 #define ICE_FLOW_PROF_HDR_M     (0x3FFFFFFFULL << ICE_FLOW_PROF_HDR_S)
3501 #define ICE_FLOW_PROF_ENCAP_S   62
3502 #define ICE_FLOW_PROF_ENCAP_M   (0x3ULL << ICE_FLOW_PROF_ENCAP_S)
3503
3504 /* Flow profile ID format:
3505  * [0:31] - Packet match fields
3506  * [32:61] - Protocol header
3507  * [62:63] - Encapsulation flag:
3508  *           0 if non-tunneled
3509  *           1 if tunneled
3510  *           2 for tunneled with outer ipv4
3511  *           3 for tunneled with outer ipv6
3512  */
3513 #define ICE_FLOW_GEN_PROFID(hash, hdr, encap) \
3514         (u64)(((u64)(hash) & ICE_FLOW_PROF_HASH_M) | \
3515               (((u64)(hdr) << ICE_FLOW_PROF_HDR_S) & ICE_FLOW_PROF_HDR_M) | \
3516               (((u64)(encap) << ICE_FLOW_PROF_ENCAP_S) & ICE_FLOW_PROF_ENCAP_M))
3517
3518 static void
3519 ice_rss_config_xor_word(struct ice_hw *hw, u8 prof_id, u8 src, u8 dst)
3520 {
3521         u32 s = ((src % 4) << 3); /* byte shift */
3522         u32 v = dst | 0x80; /* value to program */
3523         u8 i = src / 4; /* register index */
3524         u32 reg;
3525
3526         reg = rd32(hw, GLQF_HSYMM(prof_id, i));
3527         reg = (reg & ~(0xff << s)) | (v << s);
3528         wr32(hw, GLQF_HSYMM(prof_id, i), reg);
3529 }
3530
3531 static void
3532 ice_rss_config_xor(struct ice_hw *hw, u8 prof_id, u8 src, u8 dst, u8 len)
3533 {
3534         int fv_last_word =
3535                 ICE_FLOW_SW_FIELD_VECTOR_MAX / ICE_FLOW_FV_EXTRACT_SZ - 1;
3536         int i;
3537
3538         for (i = 0; i < len; i++) {
3539                 ice_rss_config_xor_word(hw, prof_id,
3540                                         /* Yes, field vector in GLQF_HSYMM and
3541                                          * GLQF_HINSET is inversed!
3542                                          */
3543                                         fv_last_word - (src + i),
3544                                         fv_last_word - (dst + i));
3545                 ice_rss_config_xor_word(hw, prof_id,
3546                                         fv_last_word - (dst + i),
3547                                         fv_last_word - (src + i));
3548         }
3549 }
3550
3551 static void
3552 ice_rss_update_symm(struct ice_hw *hw,
3553                     struct ice_flow_prof *prof)
3554 {
3555         struct ice_prof_map *map;
3556         u8 prof_id, m;
3557
3558         ice_acquire_lock(&hw->blk[ICE_BLK_RSS].es.prof_map_lock);
3559         map = ice_search_prof_id(hw, ICE_BLK_RSS, prof->id);
3560         if (map)
3561                 prof_id = map->prof_id;
3562         ice_release_lock(&hw->blk[ICE_BLK_RSS].es.prof_map_lock);
3563         if (!map)
3564                 return;
3565         /* clear to default */
3566         for (m = 0; m < 6; m++)
3567                 wr32(hw, GLQF_HSYMM(prof_id, m), 0);
3568         if (prof->cfg.symm) {
3569                 struct ice_flow_seg_info *seg =
3570                         &prof->segs[prof->segs_cnt - 1];
3571
3572                 struct ice_flow_seg_xtrct *ipv4_src =
3573                         &seg->fields[ICE_FLOW_FIELD_IDX_IPV4_SA].xtrct;
3574                 struct ice_flow_seg_xtrct *ipv4_dst =
3575                         &seg->fields[ICE_FLOW_FIELD_IDX_IPV4_DA].xtrct;
3576                 struct ice_flow_seg_xtrct *ipv6_src =
3577                         &seg->fields[ICE_FLOW_FIELD_IDX_IPV6_SA].xtrct;
3578                 struct ice_flow_seg_xtrct *ipv6_dst =
3579                         &seg->fields[ICE_FLOW_FIELD_IDX_IPV6_DA].xtrct;
3580
3581                 struct ice_flow_seg_xtrct *tcp_src =
3582                         &seg->fields[ICE_FLOW_FIELD_IDX_TCP_SRC_PORT].xtrct;
3583                 struct ice_flow_seg_xtrct *tcp_dst =
3584                         &seg->fields[ICE_FLOW_FIELD_IDX_TCP_DST_PORT].xtrct;
3585
3586                 struct ice_flow_seg_xtrct *udp_src =
3587                         &seg->fields[ICE_FLOW_FIELD_IDX_UDP_SRC_PORT].xtrct;
3588                 struct ice_flow_seg_xtrct *udp_dst =
3589                         &seg->fields[ICE_FLOW_FIELD_IDX_UDP_DST_PORT].xtrct;
3590
3591                 struct ice_flow_seg_xtrct *sctp_src =
3592                         &seg->fields[ICE_FLOW_FIELD_IDX_SCTP_SRC_PORT].xtrct;
3593                 struct ice_flow_seg_xtrct *sctp_dst =
3594                         &seg->fields[ICE_FLOW_FIELD_IDX_SCTP_DST_PORT].xtrct;
3595
3596                 /* xor IPv4 */
3597                 if (ipv4_src->prot_id != 0 && ipv4_dst->prot_id != 0)
3598                         ice_rss_config_xor(hw, prof_id,
3599                                            ipv4_src->idx, ipv4_dst->idx, 2);
3600
3601                 /* xor IPv6 */
3602                 if (ipv6_src->prot_id != 0 && ipv6_dst->prot_id != 0)
3603                         ice_rss_config_xor(hw, prof_id,
3604                                            ipv6_src->idx, ipv6_dst->idx, 8);
3605
3606                 /* xor TCP */
3607                 if (tcp_src->prot_id != 0 && tcp_dst->prot_id != 0)
3608                         ice_rss_config_xor(hw, prof_id,
3609                                            tcp_src->idx, tcp_dst->idx, 1);
3610
3611                 /* xor UDP */
3612                 if (udp_src->prot_id != 0 && udp_dst->prot_id != 0)
3613                         ice_rss_config_xor(hw, prof_id,
3614                                            udp_src->idx, udp_dst->idx, 1);
3615
3616                 /* xor SCTP */
3617                 if (sctp_src->prot_id != 0 && sctp_dst->prot_id != 0)
3618                         ice_rss_config_xor(hw, prof_id,
3619                                            sctp_src->idx, sctp_dst->idx, 1);
3620         }
3621 }
3622
3623 /**
3624  * ice_add_rss_cfg_sync - add an RSS configuration
3625  * @hw: pointer to the hardware structure
3626  * @vsi_handle: software VSI handle
3627  * @cfg: configure parameters
3628  *
3629  * Assumption: lock has already been acquired for RSS list
3630  */
3631 static enum ice_status
3632 ice_add_rss_cfg_sync(struct ice_hw *hw, u16 vsi_handle,
3633                      const struct ice_rss_hash_cfg *cfg)
3634 {
3635         const enum ice_block blk = ICE_BLK_RSS;
3636         struct ice_flow_prof *prof = NULL;
3637         struct ice_flow_seg_info *segs;
3638         enum ice_status status;
3639         u8 segs_cnt;
3640
3641         segs_cnt = (cfg->hdr_type == ICE_RSS_OUTER_HEADERS) ?
3642                         ICE_FLOW_SEG_SINGLE : ICE_FLOW_SEG_MAX;
3643
3644         segs = (struct ice_flow_seg_info *)ice_calloc(hw, segs_cnt,
3645                                                       sizeof(*segs));
3646         if (!segs)
3647                 return ICE_ERR_NO_MEMORY;
3648
3649         /* Construct the packet segment info from the hashed fields */
3650         status = ice_flow_set_rss_seg_info(segs, segs_cnt, cfg);
3651         if (status)
3652                 goto exit;
3653
3654         /* Don't do RSS for GTPU Outer */
3655         if (segs_cnt == ICE_FLOW_SEG_SINGLE &&
3656             segs[segs_cnt - 1].hdrs & ICE_FLOW_SEG_HDR_GTPU) {
3657                 status = ICE_SUCCESS;
3658                 goto exit;
3659         }
3660
3661         /* Search for a flow profile that has matching headers, hash fields
3662          * and has the input VSI associated to it. If found, no further
3663          * operations required and exit.
3664          */
3665         prof = ice_flow_find_prof_conds(hw, blk, ICE_FLOW_RX, segs, segs_cnt,
3666                                         vsi_handle,
3667                                         ICE_FLOW_FIND_PROF_CHK_FLDS |
3668                                         ICE_FLOW_FIND_PROF_CHK_VSI);
3669         if (prof) {
3670                 if (prof->cfg.symm == cfg->symm)
3671                         goto exit;
3672                 prof->cfg.symm = cfg->symm;
3673                 goto update_symm;
3674         }
3675
3676         /* Check if a flow profile exists with the same protocol headers and
3677          * associated with the input VSI. If so disassociate the VSI from
3678          * this profile. The VSI will be added to a new profile created with
3679          * the protocol header and new hash field configuration.
3680          */
3681         prof = ice_flow_find_prof_conds(hw, blk, ICE_FLOW_RX, segs, segs_cnt,
3682                                         vsi_handle, ICE_FLOW_FIND_PROF_CHK_VSI);
3683         if (prof) {
3684                 status = ice_flow_disassoc_prof(hw, blk, prof, vsi_handle);
3685                 if (!status)
3686                         ice_rem_rss_list(hw, vsi_handle, prof);
3687                 else
3688                         goto exit;
3689
3690                 /* Remove profile if it has no VSIs associated */
3691                 if (!ice_is_any_bit_set(prof->vsis, ICE_MAX_VSI)) {
3692                         status = ice_flow_rem_prof(hw, blk, prof->id);
3693                         if (status)
3694                                 goto exit;
3695                 }
3696         }
3697
3698         /* Search for a profile that has same match fields only. If this
3699          * exists then associate the VSI to this profile.
3700          */
3701         prof = ice_flow_find_prof_conds(hw, blk, ICE_FLOW_RX, segs, segs_cnt,
3702                                         vsi_handle,
3703                                         ICE_FLOW_FIND_PROF_CHK_FLDS);
3704         if (prof) {
3705                 if (prof->cfg.symm == cfg->symm) {
3706                         status = ice_flow_assoc_prof(hw, blk, prof,
3707                                                      vsi_handle);
3708                         if (!status)
3709                                 status = ice_add_rss_list(hw, vsi_handle,
3710                                                           prof);
3711                 } else {
3712                         /* if a profile exist but with different symmetric
3713                          * requirement, just return error.
3714                          */
3715                         status = ICE_ERR_NOT_SUPPORTED;
3716                 }
3717                 goto exit;
3718         }
3719
3720         /* Create a new flow profile with generated profile and packet
3721          * segment information.
3722          */
3723         status = ice_flow_add_prof(hw, blk, ICE_FLOW_RX,
3724                                    ICE_FLOW_GEN_PROFID(cfg->hash_flds,
3725                                                        segs[segs_cnt - 1].hdrs,
3726                                                        cfg->hdr_type),
3727                                    segs, segs_cnt, NULL, 0, &prof);
3728         if (status)
3729                 goto exit;
3730
3731         status = ice_flow_assoc_prof(hw, blk, prof, vsi_handle);
3732         /* If association to a new flow profile failed then this profile can
3733          * be removed.
3734          */
3735         if (status) {
3736                 ice_flow_rem_prof(hw, blk, prof->id);
3737                 goto exit;
3738         }
3739
3740         status = ice_add_rss_list(hw, vsi_handle, prof);
3741
3742         prof->cfg.symm = cfg->symm;
3743 update_symm:
3744         ice_rss_update_symm(hw, prof);
3745
3746 exit:
3747         ice_free(hw, segs);
3748         return status;
3749 }
3750
3751 /**
3752  * ice_add_rss_cfg - add an RSS configuration with specified hashed fields
3753  * @hw: pointer to the hardware structure
3754  * @vsi_handle: software VSI handle
3755  * @cfg: configure parameters
3756  *
3757  * This function will generate a flow profile based on fields associated with
3758  * the input fields to hash on, the flow type and use the VSI number to add
3759  * a flow entry to the profile.
3760  */
3761 enum ice_status
3762 ice_add_rss_cfg(struct ice_hw *hw, u16 vsi_handle,
3763                 const struct ice_rss_hash_cfg *cfg)
3764 {
3765         struct ice_rss_hash_cfg local_cfg;
3766         enum ice_status status;
3767
3768         if (!ice_is_vsi_valid(hw, vsi_handle) ||
3769             !cfg || cfg->hdr_type > ICE_RSS_ANY_HEADERS ||
3770             cfg->hash_flds == ICE_HASH_INVALID)
3771                 return ICE_ERR_PARAM;
3772
3773         local_cfg = *cfg;
3774         if (cfg->hdr_type < ICE_RSS_ANY_HEADERS) {
3775                 ice_acquire_lock(&hw->rss_locks);
3776                 status = ice_add_rss_cfg_sync(hw, vsi_handle, &local_cfg);
3777                 ice_release_lock(&hw->rss_locks);
3778         } else {
3779                 ice_acquire_lock(&hw->rss_locks);
3780                 local_cfg.hdr_type = ICE_RSS_OUTER_HEADERS;
3781                 status = ice_add_rss_cfg_sync(hw, vsi_handle, &local_cfg);
3782                 if (!status) {
3783                         local_cfg.hdr_type = ICE_RSS_INNER_HEADERS;
3784                         status = ice_add_rss_cfg_sync(hw, vsi_handle,
3785                                                       &local_cfg);
3786                 }
3787                 ice_release_lock(&hw->rss_locks);
3788         }
3789
3790         return status;
3791 }
3792
3793 /**
3794  * ice_rem_rss_cfg_sync - remove an existing RSS configuration
3795  * @hw: pointer to the hardware structure
3796  * @vsi_handle: software VSI handle
3797  * @cfg: configure parameters
3798  *
3799  * Assumption: lock has already been acquired for RSS list
3800  */
3801 static enum ice_status
3802 ice_rem_rss_cfg_sync(struct ice_hw *hw, u16 vsi_handle,
3803                      const struct ice_rss_hash_cfg *cfg)
3804 {
3805         const enum ice_block blk = ICE_BLK_RSS;
3806         struct ice_flow_seg_info *segs;
3807         struct ice_flow_prof *prof;
3808         enum ice_status status;
3809         u8 segs_cnt;
3810
3811         segs_cnt = (cfg->hdr_type == ICE_RSS_OUTER_HEADERS) ?
3812                         ICE_FLOW_SEG_SINGLE : ICE_FLOW_SEG_MAX;
3813         segs = (struct ice_flow_seg_info *)ice_calloc(hw, segs_cnt,
3814                                                       sizeof(*segs));
3815         if (!segs)
3816                 return ICE_ERR_NO_MEMORY;
3817
3818         /* Construct the packet segment info from the hashed fields */
3819         status = ice_flow_set_rss_seg_info(segs, segs_cnt, cfg);
3820         if (status)
3821                 goto out;
3822
3823         /* Don't do RSS for GTPU Outer */
3824         if (segs_cnt == ICE_FLOW_SEG_SINGLE &&
3825             segs[segs_cnt - 1].hdrs & ICE_FLOW_SEG_HDR_GTPU) {
3826                 status = ICE_SUCCESS;
3827                 goto out;
3828         }
3829
3830         prof = ice_flow_find_prof_conds(hw, blk, ICE_FLOW_RX, segs, segs_cnt,
3831                                         vsi_handle,
3832                                         ICE_FLOW_FIND_PROF_CHK_FLDS);
3833         if (!prof) {
3834                 status = ICE_ERR_DOES_NOT_EXIST;
3835                 goto out;
3836         }
3837
3838         status = ice_flow_disassoc_prof(hw, blk, prof, vsi_handle);
3839         if (status)
3840                 goto out;
3841
3842         /* Remove RSS configuration from VSI context before deleting
3843          * the flow profile.
3844          */
3845         ice_rem_rss_list(hw, vsi_handle, prof);
3846
3847         if (!ice_is_any_bit_set(prof->vsis, ICE_MAX_VSI))
3848                 status = ice_flow_rem_prof(hw, blk, prof->id);
3849
3850 out:
3851         ice_free(hw, segs);
3852         return status;
3853 }
3854
3855 /**
3856  * ice_rem_rss_cfg - remove an existing RSS config with matching hashed fields
3857  * @hw: pointer to the hardware structure
3858  * @vsi_handle: software VSI handle
3859  * @cfg: configure parameters
3860  *
3861  * This function will lookup the flow profile based on the input
3862  * hash field bitmap, iterate through the profile entry list of
3863  * that profile and find entry associated with input VSI to be
3864  * removed. Calls are made to underlying flow apis which will in
3865  * turn build or update buffers for RSS XLT1 section.
3866  */
3867 enum ice_status
3868 ice_rem_rss_cfg(struct ice_hw *hw, u16 vsi_handle,
3869                 const struct ice_rss_hash_cfg *cfg)
3870 {
3871         struct ice_rss_hash_cfg local_cfg;
3872         enum ice_status status;
3873
3874         if (!ice_is_vsi_valid(hw, vsi_handle) ||
3875             !cfg || cfg->hdr_type > ICE_RSS_ANY_HEADERS ||
3876             cfg->hash_flds == ICE_HASH_INVALID)
3877                 return ICE_ERR_PARAM;
3878
3879         ice_acquire_lock(&hw->rss_locks);
3880         local_cfg = *cfg;
3881         if (cfg->hdr_type < ICE_RSS_ANY_HEADERS) {
3882                 status = ice_rem_rss_cfg_sync(hw, vsi_handle, &local_cfg);
3883         } else {
3884                 local_cfg.hdr_type = ICE_RSS_OUTER_HEADERS;
3885                 status = ice_rem_rss_cfg_sync(hw, vsi_handle, &local_cfg);
3886
3887                 if (!status) {
3888                         local_cfg.hdr_type = ICE_RSS_INNER_HEADERS;
3889                         status = ice_rem_rss_cfg_sync(hw, vsi_handle,
3890                                                       &local_cfg);
3891                 }
3892         }
3893         ice_release_lock(&hw->rss_locks);
3894
3895         return status;
3896 }
3897
3898 /**
3899  * ice_replay_rss_cfg - replay RSS configurations associated with VSI
3900  * @hw: pointer to the hardware structure
3901  * @vsi_handle: software VSI handle
3902  */
3903 enum ice_status ice_replay_rss_cfg(struct ice_hw *hw, u16 vsi_handle)
3904 {
3905         enum ice_status status = ICE_SUCCESS;
3906         struct ice_rss_cfg *r;
3907
3908         if (!ice_is_vsi_valid(hw, vsi_handle))
3909                 return ICE_ERR_PARAM;
3910
3911         ice_acquire_lock(&hw->rss_locks);
3912         LIST_FOR_EACH_ENTRY(r, &hw->rss_list_head,
3913                             ice_rss_cfg, l_entry) {
3914                 if (ice_is_bit_set(r->vsis, vsi_handle)) {
3915                         status = ice_add_rss_cfg_sync(hw, vsi_handle, &r->hash);
3916                         if (status)
3917                                 break;
3918                 }
3919         }
3920         ice_release_lock(&hw->rss_locks);
3921
3922         return status;
3923 }
3924
3925 /**
3926  * ice_get_rss_cfg - returns hashed fields for the given header types
3927  * @hw: pointer to the hardware structure
3928  * @vsi_handle: software VSI handle
3929  * @hdrs: protocol header type
3930  *
3931  * This function will return the match fields of the first instance of flow
3932  * profile having the given header types and containing input VSI
3933  */
3934 u64 ice_get_rss_cfg(struct ice_hw *hw, u16 vsi_handle, u32 hdrs)
3935 {
3936         u64 rss_hash = ICE_HASH_INVALID;
3937         struct ice_rss_cfg *r;
3938
3939         /* verify if the protocol header is non zero and VSI is valid */
3940         if (hdrs == ICE_FLOW_SEG_HDR_NONE || !ice_is_vsi_valid(hw, vsi_handle))
3941                 return ICE_HASH_INVALID;
3942
3943         ice_acquire_lock(&hw->rss_locks);
3944         LIST_FOR_EACH_ENTRY(r, &hw->rss_list_head,
3945                             ice_rss_cfg, l_entry)
3946                 if (ice_is_bit_set(r->vsis, vsi_handle) &&
3947                     r->hash.addl_hdrs == hdrs) {
3948                         rss_hash = r->hash.hash_flds;
3949                         break;
3950                 }
3951         ice_release_lock(&hw->rss_locks);
3952
3953         return rss_hash;
3954 }