net/ice/base: align macro names to specification
[dpdk.git] / drivers / net / ice / base / ice_flow.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2001-2020 Intel Corporation
3  */
4
5 #include "ice_common.h"
6 #include "ice_flow.h"
7
8 /* Size of known protocol header fields */
9 #define ICE_FLOW_FLD_SZ_ETH_TYPE        2
10 #define ICE_FLOW_FLD_SZ_VLAN            2
11 #define ICE_FLOW_FLD_SZ_IPV4_ADDR       4
12 #define ICE_FLOW_FLD_SZ_IPV6_ADDR       16
13 #define ICE_FLOW_FLD_SZ_IPV6_PRE32_ADDR 4
14 #define ICE_FLOW_FLD_SZ_IPV6_PRE48_ADDR 6
15 #define ICE_FLOW_FLD_SZ_IPV6_PRE64_ADDR 8
16 #define ICE_FLOW_FLD_SZ_IP_DSCP         1
17 #define ICE_FLOW_FLD_SZ_IP_TTL          1
18 #define ICE_FLOW_FLD_SZ_IP_PROT         1
19 #define ICE_FLOW_FLD_SZ_PORT            2
20 #define ICE_FLOW_FLD_SZ_TCP_FLAGS       1
21 #define ICE_FLOW_FLD_SZ_ICMP_TYPE       1
22 #define ICE_FLOW_FLD_SZ_ICMP_CODE       1
23 #define ICE_FLOW_FLD_SZ_ARP_OPER        2
24 #define ICE_FLOW_FLD_SZ_GRE_KEYID       4
25 #define ICE_FLOW_FLD_SZ_GTP_TEID        4
26 #define ICE_FLOW_FLD_SZ_GTP_QFI         2
27 #define ICE_FLOW_FLD_SZ_PPPOE_SESS_ID   2
28 #define ICE_FLOW_FLD_SZ_PFCP_SEID 8
29 #define ICE_FLOW_FLD_SZ_L2TPV3_SESS_ID  4
30 #define ICE_FLOW_FLD_SZ_ESP_SPI 4
31 #define ICE_FLOW_FLD_SZ_AH_SPI  4
32 #define ICE_FLOW_FLD_SZ_NAT_T_ESP_SPI   4
33 #define ICE_FLOW_FLD_SZ_VXLAN_VNI       4
34
35 /* Describe properties of a protocol header field */
36 struct ice_flow_field_info {
37         enum ice_flow_seg_hdr hdr;
38         s16 off;        /* Offset from start of a protocol header, in bits */
39         u16 size;       /* Size of fields in bits */
40         u16 mask;       /* 16-bit mask for field */
41 };
42
43 #define ICE_FLOW_FLD_INFO(_hdr, _offset_bytes, _size_bytes) { \
44         .hdr = _hdr, \
45         .off = (_offset_bytes) * BITS_PER_BYTE, \
46         .size = (_size_bytes) * BITS_PER_BYTE, \
47         .mask = 0, \
48 }
49
50 #define ICE_FLOW_FLD_INFO_MSK(_hdr, _offset_bytes, _size_bytes, _mask) { \
51         .hdr = _hdr, \
52         .off = (_offset_bytes) * BITS_PER_BYTE, \
53         .size = (_size_bytes) * BITS_PER_BYTE, \
54         .mask = _mask, \
55 }
56
57 /* Table containing properties of supported protocol header fields */
58 static const
59 struct ice_flow_field_info ice_flds_info[ICE_FLOW_FIELD_IDX_MAX] = {
60         /* Ether */
61         /* ICE_FLOW_FIELD_IDX_ETH_DA */
62         ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_ETH, 0, ETH_ALEN),
63         /* ICE_FLOW_FIELD_IDX_ETH_SA */
64         ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_ETH, ETH_ALEN, ETH_ALEN),
65         /* ICE_FLOW_FIELD_IDX_S_VLAN */
66         ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_VLAN, 12, ICE_FLOW_FLD_SZ_VLAN),
67         /* ICE_FLOW_FIELD_IDX_C_VLAN */
68         ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_VLAN, 14, ICE_FLOW_FLD_SZ_VLAN),
69         /* ICE_FLOW_FIELD_IDX_ETH_TYPE */
70         ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_ETH, 0, ICE_FLOW_FLD_SZ_ETH_TYPE),
71         /* IPv4 / IPv6 */
72         /* ICE_FLOW_FIELD_IDX_IPV4_DSCP */
73         ICE_FLOW_FLD_INFO_MSK(ICE_FLOW_SEG_HDR_IPV4, 0, ICE_FLOW_FLD_SZ_IP_DSCP,
74                               0x00fc),
75         /* ICE_FLOW_FIELD_IDX_IPV6_DSCP */
76         ICE_FLOW_FLD_INFO_MSK(ICE_FLOW_SEG_HDR_IPV6, 0, ICE_FLOW_FLD_SZ_IP_DSCP,
77                               0x0ff0),
78         /* ICE_FLOW_FIELD_IDX_IPV4_TTL */
79         ICE_FLOW_FLD_INFO_MSK(ICE_FLOW_SEG_HDR_NONE, 8,
80                               ICE_FLOW_FLD_SZ_IP_TTL, 0xff00),
81         /* ICE_FLOW_FIELD_IDX_IPV4_PROT */
82         ICE_FLOW_FLD_INFO_MSK(ICE_FLOW_SEG_HDR_NONE, 8,
83                               ICE_FLOW_FLD_SZ_IP_PROT, 0x00ff),
84         /* ICE_FLOW_FIELD_IDX_IPV6_TTL */
85         ICE_FLOW_FLD_INFO_MSK(ICE_FLOW_SEG_HDR_NONE, 6,
86                               ICE_FLOW_FLD_SZ_IP_TTL, 0x00ff),
87         /* ICE_FLOW_FIELD_IDX_IPV6_PROT */
88         ICE_FLOW_FLD_INFO_MSK(ICE_FLOW_SEG_HDR_NONE, 6,
89                               ICE_FLOW_FLD_SZ_IP_PROT, 0xff00),
90         /* ICE_FLOW_FIELD_IDX_IPV4_SA */
91         ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_IPV4, 12, ICE_FLOW_FLD_SZ_IPV4_ADDR),
92         /* ICE_FLOW_FIELD_IDX_IPV4_DA */
93         ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_IPV4, 16, ICE_FLOW_FLD_SZ_IPV4_ADDR),
94         /* ICE_FLOW_FIELD_IDX_IPV6_SA */
95         ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_IPV6, 8, ICE_FLOW_FLD_SZ_IPV6_ADDR),
96         /* ICE_FLOW_FIELD_IDX_IPV6_DA */
97         ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_IPV6, 24, ICE_FLOW_FLD_SZ_IPV6_ADDR),
98         /* ICE_FLOW_FIELD_IDX_IPV6_PRE32_SA */
99         ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_IPV6, 8,
100                           ICE_FLOW_FLD_SZ_IPV6_PRE32_ADDR),
101         /* ICE_FLOW_FIELD_IDX_IPV6_PRE32_DA */
102         ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_IPV6, 24,
103                           ICE_FLOW_FLD_SZ_IPV6_PRE32_ADDR),
104         /* ICE_FLOW_FIELD_IDX_IPV6_PRE48_SA */
105         ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_IPV6, 8,
106                           ICE_FLOW_FLD_SZ_IPV6_PRE48_ADDR),
107         /* ICE_FLOW_FIELD_IDX_IPV6_PRE48_DA */
108         ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_IPV6, 24,
109                           ICE_FLOW_FLD_SZ_IPV6_PRE48_ADDR),
110         /* ICE_FLOW_FIELD_IDX_IPV6_PRE64_SA */
111         ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_IPV6, 8,
112                           ICE_FLOW_FLD_SZ_IPV6_PRE64_ADDR),
113         /* ICE_FLOW_FIELD_IDX_IPV6_PRE64_DA */
114         ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_IPV6, 24,
115                           ICE_FLOW_FLD_SZ_IPV6_PRE64_ADDR),
116         /* Transport */
117         /* ICE_FLOW_FIELD_IDX_TCP_SRC_PORT */
118         ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_TCP, 0, ICE_FLOW_FLD_SZ_PORT),
119         /* ICE_FLOW_FIELD_IDX_TCP_DST_PORT */
120         ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_TCP, 2, ICE_FLOW_FLD_SZ_PORT),
121         /* ICE_FLOW_FIELD_IDX_UDP_SRC_PORT */
122         ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_UDP, 0, ICE_FLOW_FLD_SZ_PORT),
123         /* ICE_FLOW_FIELD_IDX_UDP_DST_PORT */
124         ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_UDP, 2, ICE_FLOW_FLD_SZ_PORT),
125         /* ICE_FLOW_FIELD_IDX_SCTP_SRC_PORT */
126         ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_SCTP, 0, ICE_FLOW_FLD_SZ_PORT),
127         /* ICE_FLOW_FIELD_IDX_SCTP_DST_PORT */
128         ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_SCTP, 2, ICE_FLOW_FLD_SZ_PORT),
129         /* ICE_FLOW_FIELD_IDX_TCP_FLAGS */
130         ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_TCP, 13, ICE_FLOW_FLD_SZ_TCP_FLAGS),
131         /* ARP */
132         /* ICE_FLOW_FIELD_IDX_ARP_SIP */
133         ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_ARP, 14, ICE_FLOW_FLD_SZ_IPV4_ADDR),
134         /* ICE_FLOW_FIELD_IDX_ARP_DIP */
135         ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_ARP, 24, ICE_FLOW_FLD_SZ_IPV4_ADDR),
136         /* ICE_FLOW_FIELD_IDX_ARP_SHA */
137         ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_ARP, 8, ETH_ALEN),
138         /* ICE_FLOW_FIELD_IDX_ARP_DHA */
139         ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_ARP, 18, ETH_ALEN),
140         /* ICE_FLOW_FIELD_IDX_ARP_OP */
141         ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_ARP, 6, ICE_FLOW_FLD_SZ_ARP_OPER),
142         /* ICMP */
143         /* ICE_FLOW_FIELD_IDX_ICMP_TYPE */
144         ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_ICMP, 0, ICE_FLOW_FLD_SZ_ICMP_TYPE),
145         /* ICE_FLOW_FIELD_IDX_ICMP_CODE */
146         ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_ICMP, 1, ICE_FLOW_FLD_SZ_ICMP_CODE),
147         /* GRE */
148         /* ICE_FLOW_FIELD_IDX_GRE_KEYID */
149         ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_GRE, 12, ICE_FLOW_FLD_SZ_GRE_KEYID),
150         /* GTP */
151         /* ICE_FLOW_FIELD_IDX_GTPC_TEID */
152         ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_GTPC_TEID, 12,
153                           ICE_FLOW_FLD_SZ_GTP_TEID),
154         /* ICE_FLOW_FIELD_IDX_GTPU_IP_TEID */
155         ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_GTPU_IP, 12,
156                           ICE_FLOW_FLD_SZ_GTP_TEID),
157         /* ICE_FLOW_FIELD_IDX_GTPU_EH_TEID */
158         ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_GTPU_EH, 12,
159                           ICE_FLOW_FLD_SZ_GTP_TEID),
160         /* ICE_FLOW_FIELD_IDX_GTPU_EH_QFI */
161         ICE_FLOW_FLD_INFO_MSK(ICE_FLOW_SEG_HDR_GTPU_EH, 22,
162                               ICE_FLOW_FLD_SZ_GTP_QFI, 0x3f00),
163         /* ICE_FLOW_FIELD_IDX_GTPU_UP_TEID */
164         ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_GTPU_UP, 12,
165                           ICE_FLOW_FLD_SZ_GTP_TEID),
166         /* ICE_FLOW_FIELD_IDX_GTPU_DWN_TEID */
167         ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_GTPU_DWN, 12,
168                           ICE_FLOW_FLD_SZ_GTP_TEID),
169         /* PPPOE */
170         /* ICE_FLOW_FIELD_IDX_PPPOE_SESS_ID */
171         ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_PPPOE, 2,
172                           ICE_FLOW_FLD_SZ_PPPOE_SESS_ID),
173         /* PFCP */
174         /* ICE_FLOW_FIELD_IDX_PFCP_SEID */
175         ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_PFCP_SESSION, 12,
176                           ICE_FLOW_FLD_SZ_PFCP_SEID),
177         /* L2TPV3 */
178         /* ICE_FLOW_FIELD_IDX_L2TPV3_SESS_ID */
179         ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_L2TPV3, 0,
180                           ICE_FLOW_FLD_SZ_L2TPV3_SESS_ID),
181         /* ESP */
182         /* ICE_FLOW_FIELD_IDX_ESP_SPI */
183         ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_ESP, 0,
184                           ICE_FLOW_FLD_SZ_ESP_SPI),
185         /* AH */
186         /* ICE_FLOW_FIELD_IDX_AH_SPI */
187         ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_AH, 4,
188                           ICE_FLOW_FLD_SZ_AH_SPI),
189         /* NAT_T_ESP */
190         /* ICE_FLOW_FIELD_IDX_NAT_T_ESP_SPI */
191         ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_NAT_T_ESP, 8,
192                           ICE_FLOW_FLD_SZ_NAT_T_ESP_SPI),
193         /* ICE_FLOW_FIELD_IDX_VXLAN_VNI */
194         ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_VXLAN, 12,
195                           ICE_FLOW_FLD_SZ_VXLAN_VNI),
196 };
197
198 /* Bitmaps indicating relevant packet types for a particular protocol header
199  *
200  * Packet types for packets with an Outer/First/Single MAC header
201  */
202 static const u32 ice_ptypes_mac_ofos[] = {
203         0xFDC00846, 0xBFBF7F7E, 0xF70001DF, 0xFEFDFDFB,
204         0x0000077E, 0x000003FF, 0x00000000, 0x00000000,
205         0x00400000, 0x03FFF000, 0xFFFFFFE0, 0x00000307,
206         0x00000000, 0x00000000, 0x00000000, 0x00000000,
207         0x00000000, 0x00000000, 0x00000000, 0x00000000,
208         0x00000000, 0x00000000, 0x00000000, 0x00000000,
209         0x00000000, 0x00000000, 0x00000000, 0x00000000,
210         0x00000000, 0x00000000, 0x00000000, 0x00000000,
211 };
212
213 /* Packet types for packets with an Innermost/Last MAC VLAN header */
214 static const u32 ice_ptypes_macvlan_il[] = {
215         0x00000000, 0xBC000000, 0x000001DF, 0xF0000000,
216         0x0000077E, 0x00000000, 0x00000000, 0x00000000,
217         0x00000000, 0x00000000, 0x00000000, 0x00000000,
218         0x00000000, 0x00000000, 0x00000000, 0x00000000,
219         0x00000000, 0x00000000, 0x00000000, 0x00000000,
220         0x00000000, 0x00000000, 0x00000000, 0x00000000,
221         0x00000000, 0x00000000, 0x00000000, 0x00000000,
222         0x00000000, 0x00000000, 0x00000000, 0x00000000,
223 };
224
225 /* Packet types for packets with an Outer/First/Single IPv4 header, does NOT
226  * include IPV4 other PTYPEs
227  */
228 static const u32 ice_ptypes_ipv4_ofos[] = {
229         0x1DC00000, 0x24000800, 0x00000000, 0x00000000,
230         0x00000000, 0x00000155, 0x00000000, 0x00000000,
231         0x00000000, 0x000FC000, 0x000002A0, 0x00000000,
232         0x00000000, 0x00000000, 0x00000000, 0x00000000,
233         0x00000000, 0x00000000, 0x00000000, 0x00000000,
234         0x00000000, 0x00000000, 0x00000000, 0x00000000,
235         0x00000000, 0x00000000, 0x00000000, 0x00000000,
236         0x00000000, 0x00000000, 0x00000000, 0x00000000,
237 };
238
239 /* Packet types for packets with an Outer/First/Single IPv4 header, includes
240  * IPV4 other PTYPEs
241  */
242 static const u32 ice_ptypes_ipv4_ofos_all[] = {
243         0x1DC00000, 0x24000800, 0x00000000, 0x00000000,
244         0x00000000, 0x00000155, 0x00000000, 0x00000000,
245         0x00000000, 0x000FC000, 0x83E0FAA0, 0x00000101,
246         0x00000000, 0x00000000, 0x00000000, 0x00000000,
247         0x00000000, 0x00000000, 0x00000000, 0x00000000,
248         0x00000000, 0x00000000, 0x00000000, 0x00000000,
249         0x00000000, 0x00000000, 0x00000000, 0x00000000,
250         0x00000000, 0x00000000, 0x00000000, 0x00000000,
251 };
252
253 /* Packet types for packets with an Innermost/Last IPv4 header */
254 static const u32 ice_ptypes_ipv4_il[] = {
255         0xE0000000, 0xB807700E, 0x80000003, 0xE01DC03B,
256         0x0000000E, 0x00000000, 0x00000000, 0x00000000,
257         0x00000000, 0x00000000, 0x001FF800, 0x00000000,
258         0x00000000, 0x00000000, 0x00000000, 0x00000000,
259         0x00000000, 0x00000000, 0x00000000, 0x00000000,
260         0x00000000, 0x00000000, 0x00000000, 0x00000000,
261         0x00000000, 0x00000000, 0x00000000, 0x00000000,
262         0x00000000, 0x00000000, 0x00000000, 0x00000000,
263 };
264
265 /* Packet types for packets with an Outer/First/Single IPv6 header, does NOT
266  * include IVP6 other PTYPEs
267  */
268 static const u32 ice_ptypes_ipv6_ofos[] = {
269         0x00000000, 0x00000000, 0x77000000, 0x10002000,
270         0x00000000, 0x000002AA, 0x00000000, 0x00000000,
271         0x00000000, 0x03F00000, 0x00000540, 0x00000000,
272         0x00000000, 0x00000000, 0x00000000, 0x00000000,
273         0x00000000, 0x00000000, 0x00000000, 0x00000000,
274         0x00000000, 0x00000000, 0x00000000, 0x00000000,
275         0x00000000, 0x00000000, 0x00000000, 0x00000000,
276         0x00000000, 0x00000000, 0x00000000, 0x00000000,
277 };
278
279 /* Packet types for packets with an Outer/First/Single IPv6 header, includes
280  * IPV6 other PTYPEs
281  */
282 static const u32 ice_ptypes_ipv6_ofos_all[] = {
283         0x00000000, 0x00000000, 0x77000000, 0x10002000,
284         0x00000000, 0x000002AA, 0x00000000, 0x00000000,
285         0x00000000, 0x03F00000, 0x7C1F0540, 0x00000206,
286         0x00000000, 0x00000000, 0x00000000, 0x00000000,
287         0x00000000, 0x00000000, 0x00000000, 0x00000000,
288         0x00000000, 0x00000000, 0x00000000, 0x00000000,
289         0x00000000, 0x00000000, 0x00000000, 0x00000000,
290         0x00000000, 0x00000000, 0x00000000, 0x00000000,
291 };
292
293 /* Packet types for packets with an Innermost/Last IPv6 header */
294 static const u32 ice_ptypes_ipv6_il[] = {
295         0x00000000, 0x03B80770, 0x000001DC, 0x0EE00000,
296         0x00000770, 0x00000000, 0x00000000, 0x00000000,
297         0x00000000, 0x00000000, 0x7FE00000, 0x00000000,
298         0x00000000, 0x00000000, 0x00000000, 0x00000000,
299         0x00000000, 0x00000000, 0x00000000, 0x00000000,
300         0x00000000, 0x00000000, 0x00000000, 0x00000000,
301         0x00000000, 0x00000000, 0x00000000, 0x00000000,
302         0x00000000, 0x00000000, 0x00000000, 0x00000000,
303 };
304
305 /* Packet types for packets with an Outer/First/Single IPv4 header - no L4 */
306 static const u32 ice_ptypes_ipv4_ofos_no_l4[] = {
307         0x10C00000, 0x04000800, 0x00000000, 0x00000000,
308         0x00000000, 0x00000000, 0x00000000, 0x00000000,
309         0x00000000, 0x000cc000, 0x000002A0, 0x00000000,
310         0x00000000, 0x00000000, 0x00000000, 0x00000000,
311         0x00000000, 0x00000000, 0x00000000, 0x00000000,
312         0x00000000, 0x00000000, 0x00000000, 0x00000000,
313         0x00000000, 0x00000000, 0x00000000, 0x00000000,
314         0x00000000, 0x00000000, 0x00000000, 0x00000000,
315 };
316
317 /* Packet types for packets with an Innermost/Last IPv4 header - no L4 */
318 static const u32 ice_ptypes_ipv4_il_no_l4[] = {
319         0x60000000, 0x18043008, 0x80000002, 0x6010c021,
320         0x00000008, 0x00000000, 0x00000000, 0x00000000,
321         0x00000000, 0x00000000, 0x00139800, 0x00000000,
322         0x00000000, 0x00000000, 0x00000000, 0x00000000,
323         0x00000000, 0x00000000, 0x00000000, 0x00000000,
324         0x00000000, 0x00000000, 0x00000000, 0x00000000,
325         0x00000000, 0x00000000, 0x00000000, 0x00000000,
326         0x00000000, 0x00000000, 0x00000000, 0x00000000,
327 };
328
329 /* Packet types for packets with an Outer/First/Single IPv6 header - no L4 */
330 static const u32 ice_ptypes_ipv6_ofos_no_l4[] = {
331         0x00000000, 0x00000000, 0x43000000, 0x10002000,
332         0x00000000, 0x00000000, 0x00000000, 0x00000000,
333         0x00000000, 0x02300000, 0x00000540, 0x00000000,
334         0x00000000, 0x00000000, 0x00000000, 0x00000000,
335         0x00000000, 0x00000000, 0x00000000, 0x00000000,
336         0x00000000, 0x00000000, 0x00000000, 0x00000000,
337         0x00000000, 0x00000000, 0x00000000, 0x00000000,
338         0x00000000, 0x00000000, 0x00000000, 0x00000000,
339 };
340
341 /* Packet types for packets with an Innermost/Last IPv6 header - no L4 */
342 static const u32 ice_ptypes_ipv6_il_no_l4[] = {
343         0x00000000, 0x02180430, 0x0000010c, 0x086010c0,
344         0x00000430, 0x00000000, 0x00000000, 0x00000000,
345         0x00000000, 0x00000000, 0x4e600000, 0x00000000,
346         0x00000000, 0x00000000, 0x00000000, 0x00000000,
347         0x00000000, 0x00000000, 0x00000000, 0x00000000,
348         0x00000000, 0x00000000, 0x00000000, 0x00000000,
349         0x00000000, 0x00000000, 0x00000000, 0x00000000,
350         0x00000000, 0x00000000, 0x00000000, 0x00000000,
351 };
352
353 /* Packet types for packets with an Outermost/First ARP header */
354 static const u32 ice_ptypes_arp_of[] = {
355         0x00000800, 0x00000000, 0x00000000, 0x00000000,
356         0x00000000, 0x00000000, 0x00000000, 0x00000000,
357         0x00000000, 0x00000000, 0x00000000, 0x00000000,
358         0x00000000, 0x00000000, 0x00000000, 0x00000000,
359         0x00000000, 0x00000000, 0x00000000, 0x00000000,
360         0x00000000, 0x00000000, 0x00000000, 0x00000000,
361         0x00000000, 0x00000000, 0x00000000, 0x00000000,
362         0x00000000, 0x00000000, 0x00000000, 0x00000000,
363 };
364
365 /* UDP Packet types for non-tunneled packets or tunneled
366  * packets with inner UDP.
367  */
368 static const u32 ice_ptypes_udp_il[] = {
369         0x81000000, 0x20204040, 0x04000010, 0x80810102,
370         0x00000040, 0x00000000, 0x00000000, 0x00000000,
371         0x00000000, 0x00410000, 0x908427E0, 0x00000007,
372         0x00000000, 0x00000000, 0x00000000, 0x00000000,
373         0x00000000, 0x00000000, 0x00000000, 0x00000000,
374         0x00000000, 0x00000000, 0x00000000, 0x00000000,
375         0x00000000, 0x00000000, 0x00000000, 0x00000000,
376         0x00000000, 0x00000000, 0x00000000, 0x00000000,
377 };
378
379 /* Packet types for packets with an Innermost/Last TCP header */
380 static const u32 ice_ptypes_tcp_il[] = {
381         0x04000000, 0x80810102, 0x10000040, 0x02040408,
382         0x00000102, 0x00000000, 0x00000000, 0x00000000,
383         0x00000000, 0x00820000, 0x21084000, 0x00000000,
384         0x00000000, 0x00000000, 0x00000000, 0x00000000,
385         0x00000000, 0x00000000, 0x00000000, 0x00000000,
386         0x00000000, 0x00000000, 0x00000000, 0x00000000,
387         0x00000000, 0x00000000, 0x00000000, 0x00000000,
388         0x00000000, 0x00000000, 0x00000000, 0x00000000,
389 };
390
391 /* Packet types for packets with an Innermost/Last SCTP header */
392 static const u32 ice_ptypes_sctp_il[] = {
393         0x08000000, 0x01020204, 0x20000081, 0x04080810,
394         0x00000204, 0x00000000, 0x00000000, 0x00000000,
395         0x00000000, 0x01040000, 0x00000000, 0x00000000,
396         0x00000000, 0x00000000, 0x00000000, 0x00000000,
397         0x00000000, 0x00000000, 0x00000000, 0x00000000,
398         0x00000000, 0x00000000, 0x00000000, 0x00000000,
399         0x00000000, 0x00000000, 0x00000000, 0x00000000,
400         0x00000000, 0x00000000, 0x00000000, 0x00000000,
401 };
402
403 /* Packet types for packets with an Outermost/First ICMP header */
404 static const u32 ice_ptypes_icmp_of[] = {
405         0x10000000, 0x00000000, 0x00000000, 0x00000000,
406         0x00000000, 0x00000000, 0x00000000, 0x00000000,
407         0x00000000, 0x00000000, 0x00000000, 0x00000000,
408         0x00000000, 0x00000000, 0x00000000, 0x00000000,
409         0x00000000, 0x00000000, 0x00000000, 0x00000000,
410         0x00000000, 0x00000000, 0x00000000, 0x00000000,
411         0x00000000, 0x00000000, 0x00000000, 0x00000000,
412         0x00000000, 0x00000000, 0x00000000, 0x00000000,
413 };
414
415 /* Packet types for packets with an Innermost/Last ICMP header */
416 static const u32 ice_ptypes_icmp_il[] = {
417         0x00000000, 0x02040408, 0x40000102, 0x08101020,
418         0x00000408, 0x00000000, 0x00000000, 0x00000000,
419         0x00000000, 0x00000000, 0x42108000, 0x00000000,
420         0x00000000, 0x00000000, 0x00000000, 0x00000000,
421         0x00000000, 0x00000000, 0x00000000, 0x00000000,
422         0x00000000, 0x00000000, 0x00000000, 0x00000000,
423         0x00000000, 0x00000000, 0x00000000, 0x00000000,
424         0x00000000, 0x00000000, 0x00000000, 0x00000000,
425 };
426
427 /* Packet types for packets with an Outermost/First GRE header */
428 static const u32 ice_ptypes_gre_of[] = {
429         0x00000000, 0xBFBF7800, 0x000001DF, 0xFEFDE000,
430         0x0000017E, 0x00000000, 0x00000000, 0x00000000,
431         0x00000000, 0x00000000, 0x00000000, 0x00000000,
432         0x00000000, 0x00000000, 0x00000000, 0x00000000,
433         0x00000000, 0x00000000, 0x00000000, 0x00000000,
434         0x00000000, 0x00000000, 0x00000000, 0x00000000,
435         0x00000000, 0x00000000, 0x00000000, 0x00000000,
436         0x00000000, 0x00000000, 0x00000000, 0x00000000,
437 };
438
439 /* Packet types for packets with an Innermost/Last MAC header */
440 static const u32 ice_ptypes_mac_il[] = {
441         0x00000000, 0x20000000, 0x00000000, 0x00000000,
442         0x00000000, 0x00000000, 0x00000000, 0x00000000,
443         0x00000000, 0x00000000, 0x00000000, 0x00000000,
444         0x00000000, 0x00000000, 0x00000000, 0x00000000,
445         0x00000000, 0x00000000, 0x00000000, 0x00000000,
446         0x00000000, 0x00000000, 0x00000000, 0x00000000,
447         0x00000000, 0x00000000, 0x00000000, 0x00000000,
448         0x00000000, 0x00000000, 0x00000000, 0x00000000,
449 };
450
451 /* Packet types for GTPC */
452 static const u32 ice_ptypes_gtpc[] = {
453         0x00000000, 0x00000000, 0x00000000, 0x00000000,
454         0x00000000, 0x00000000, 0x00000000, 0x00000000,
455         0x00000000, 0x00000000, 0x000001E0, 0x00000000,
456         0x00000000, 0x00000000, 0x00000000, 0x00000000,
457         0x00000000, 0x00000000, 0x00000000, 0x00000000,
458         0x00000000, 0x00000000, 0x00000000, 0x00000000,
459         0x00000000, 0x00000000, 0x00000000, 0x00000000,
460         0x00000000, 0x00000000, 0x00000000, 0x00000000,
461 };
462
463 /* Packet types for VXLAN with VNI */
464 static const u32 ice_ptypes_vxlan_vni[] = {
465         0x00000000, 0xBFBFF800, 0x00EFDFDF, 0xFEFDE000,
466         0x03BF7F7E, 0x00000000, 0x00000000, 0x00000000,
467         0x00000000, 0x00000000, 0x00000000, 0x00000000,
468         0x00000000, 0x00000000, 0x00000000, 0x00000000,
469         0x00000000, 0x00000000, 0x00000000, 0x00000000,
470         0x00000000, 0x00000000, 0x00000000, 0x00000000,
471         0x00000000, 0x00000000, 0x00000000, 0x00000000,
472         0x00000000, 0x00000000, 0x00000000, 0x00000000,
473 };
474
475 /* Packet types for GTPC with TEID */
476 static const u32 ice_ptypes_gtpc_tid[] = {
477         0x00000000, 0x00000000, 0x00000000, 0x00000000,
478         0x00000000, 0x00000000, 0x00000000, 0x00000000,
479         0x00000000, 0x00000000, 0x00000060, 0x00000000,
480         0x00000000, 0x00000000, 0x00000000, 0x00000000,
481         0x00000000, 0x00000000, 0x00000000, 0x00000000,
482         0x00000000, 0x00000000, 0x00000000, 0x00000000,
483         0x00000000, 0x00000000, 0x00000000, 0x00000000,
484         0x00000000, 0x00000000, 0x00000000, 0x00000000,
485 };
486
487 /* Packet types for GTPU */
488 static const struct ice_ptype_attributes ice_attr_gtpu_session[] = {
489         { ICE_MAC_IPV4_GTPU_IPV4_FRAG,    ICE_PTYPE_ATTR_GTP_SESSION },
490         { ICE_MAC_IPV4_GTPU_IPV4_PAY,     ICE_PTYPE_ATTR_GTP_SESSION },
491         { ICE_MAC_IPV4_GTPU_IPV4_UDP_PAY, ICE_PTYPE_ATTR_GTP_SESSION },
492         { ICE_MAC_IPV4_GTPU_IPV4_TCP,     ICE_PTYPE_ATTR_GTP_SESSION },
493         { ICE_MAC_IPV4_GTPU_IPV4_ICMP,    ICE_PTYPE_ATTR_GTP_SESSION },
494         { ICE_MAC_IPV6_GTPU_IPV4_FRAG,    ICE_PTYPE_ATTR_GTP_SESSION },
495         { ICE_MAC_IPV6_GTPU_IPV4_PAY,     ICE_PTYPE_ATTR_GTP_SESSION },
496         { ICE_MAC_IPV6_GTPU_IPV4_UDP_PAY, ICE_PTYPE_ATTR_GTP_SESSION },
497         { ICE_MAC_IPV6_GTPU_IPV4_TCP,     ICE_PTYPE_ATTR_GTP_SESSION },
498         { ICE_MAC_IPV6_GTPU_IPV4_ICMP,    ICE_PTYPE_ATTR_GTP_SESSION },
499         { ICE_MAC_IPV4_GTPU_IPV6_FRAG,    ICE_PTYPE_ATTR_GTP_SESSION },
500         { ICE_MAC_IPV4_GTPU_IPV6_PAY,     ICE_PTYPE_ATTR_GTP_SESSION },
501         { ICE_MAC_IPV4_GTPU_IPV6_UDP_PAY, ICE_PTYPE_ATTR_GTP_SESSION },
502         { ICE_MAC_IPV4_GTPU_IPV6_TCP,     ICE_PTYPE_ATTR_GTP_SESSION },
503         { ICE_MAC_IPV4_GTPU_IPV6_ICMPV6,  ICE_PTYPE_ATTR_GTP_SESSION },
504         { ICE_MAC_IPV6_GTPU_IPV6_FRAG,    ICE_PTYPE_ATTR_GTP_SESSION },
505         { ICE_MAC_IPV6_GTPU_IPV6_PAY,     ICE_PTYPE_ATTR_GTP_SESSION },
506         { ICE_MAC_IPV6_GTPU_IPV6_UDP_PAY, ICE_PTYPE_ATTR_GTP_SESSION },
507         { ICE_MAC_IPV6_GTPU_IPV6_TCP,     ICE_PTYPE_ATTR_GTP_SESSION },
508         { ICE_MAC_IPV6_GTPU_IPV6_ICMPV6,  ICE_PTYPE_ATTR_GTP_SESSION },
509 };
510
511 static const struct ice_ptype_attributes ice_attr_gtpu_eh[] = {
512         { ICE_MAC_IPV4_GTPU_IPV4_FRAG,    ICE_PTYPE_ATTR_GTP_PDU_EH },
513         { ICE_MAC_IPV4_GTPU_IPV4_PAY,     ICE_PTYPE_ATTR_GTP_PDU_EH },
514         { ICE_MAC_IPV4_GTPU_IPV4_UDP_PAY, ICE_PTYPE_ATTR_GTP_PDU_EH },
515         { ICE_MAC_IPV4_GTPU_IPV4_TCP,     ICE_PTYPE_ATTR_GTP_PDU_EH },
516         { ICE_MAC_IPV4_GTPU_IPV4_ICMP,    ICE_PTYPE_ATTR_GTP_PDU_EH },
517         { ICE_MAC_IPV6_GTPU_IPV4_FRAG,    ICE_PTYPE_ATTR_GTP_PDU_EH },
518         { ICE_MAC_IPV6_GTPU_IPV4_PAY,     ICE_PTYPE_ATTR_GTP_PDU_EH },
519         { ICE_MAC_IPV6_GTPU_IPV4_UDP_PAY, ICE_PTYPE_ATTR_GTP_PDU_EH },
520         { ICE_MAC_IPV6_GTPU_IPV4_TCP,     ICE_PTYPE_ATTR_GTP_PDU_EH },
521         { ICE_MAC_IPV6_GTPU_IPV4_ICMP,    ICE_PTYPE_ATTR_GTP_PDU_EH },
522         { ICE_MAC_IPV4_GTPU_IPV6_FRAG,    ICE_PTYPE_ATTR_GTP_PDU_EH },
523         { ICE_MAC_IPV4_GTPU_IPV6_PAY,     ICE_PTYPE_ATTR_GTP_PDU_EH },
524         { ICE_MAC_IPV4_GTPU_IPV6_UDP_PAY, ICE_PTYPE_ATTR_GTP_PDU_EH },
525         { ICE_MAC_IPV4_GTPU_IPV6_TCP,     ICE_PTYPE_ATTR_GTP_PDU_EH },
526         { ICE_MAC_IPV4_GTPU_IPV6_ICMPV6,  ICE_PTYPE_ATTR_GTP_PDU_EH },
527         { ICE_MAC_IPV6_GTPU_IPV6_FRAG,    ICE_PTYPE_ATTR_GTP_PDU_EH },
528         { ICE_MAC_IPV6_GTPU_IPV6_PAY,     ICE_PTYPE_ATTR_GTP_PDU_EH },
529         { ICE_MAC_IPV6_GTPU_IPV6_UDP_PAY, ICE_PTYPE_ATTR_GTP_PDU_EH },
530         { ICE_MAC_IPV6_GTPU_IPV6_TCP,     ICE_PTYPE_ATTR_GTP_PDU_EH },
531         { ICE_MAC_IPV6_GTPU_IPV6_ICMPV6,  ICE_PTYPE_ATTR_GTP_PDU_EH },
532 };
533
534 static const struct ice_ptype_attributes ice_attr_gtpu_down[] = {
535         { ICE_MAC_IPV4_GTPU_IPV4_FRAG,    ICE_PTYPE_ATTR_GTP_DOWNLINK },
536         { ICE_MAC_IPV4_GTPU_IPV4_PAY,     ICE_PTYPE_ATTR_GTP_DOWNLINK },
537         { ICE_MAC_IPV4_GTPU_IPV4_UDP_PAY, ICE_PTYPE_ATTR_GTP_DOWNLINK },
538         { ICE_MAC_IPV4_GTPU_IPV4_TCP,     ICE_PTYPE_ATTR_GTP_DOWNLINK },
539         { ICE_MAC_IPV4_GTPU_IPV4_ICMP,    ICE_PTYPE_ATTR_GTP_DOWNLINK },
540         { ICE_MAC_IPV6_GTPU_IPV4_FRAG,    ICE_PTYPE_ATTR_GTP_DOWNLINK },
541         { ICE_MAC_IPV6_GTPU_IPV4_PAY,     ICE_PTYPE_ATTR_GTP_DOWNLINK },
542         { ICE_MAC_IPV6_GTPU_IPV4_UDP_PAY, ICE_PTYPE_ATTR_GTP_DOWNLINK },
543         { ICE_MAC_IPV6_GTPU_IPV4_TCP,     ICE_PTYPE_ATTR_GTP_DOWNLINK },
544         { ICE_MAC_IPV6_GTPU_IPV4_ICMP,    ICE_PTYPE_ATTR_GTP_DOWNLINK },
545         { ICE_MAC_IPV4_GTPU_IPV6_FRAG,    ICE_PTYPE_ATTR_GTP_DOWNLINK },
546         { ICE_MAC_IPV4_GTPU_IPV6_PAY,     ICE_PTYPE_ATTR_GTP_DOWNLINK },
547         { ICE_MAC_IPV4_GTPU_IPV6_UDP_PAY, ICE_PTYPE_ATTR_GTP_DOWNLINK },
548         { ICE_MAC_IPV4_GTPU_IPV6_TCP,     ICE_PTYPE_ATTR_GTP_DOWNLINK },
549         { ICE_MAC_IPV4_GTPU_IPV6_ICMPV6,  ICE_PTYPE_ATTR_GTP_DOWNLINK },
550         { ICE_MAC_IPV6_GTPU_IPV6_FRAG,    ICE_PTYPE_ATTR_GTP_DOWNLINK },
551         { ICE_MAC_IPV6_GTPU_IPV6_PAY,     ICE_PTYPE_ATTR_GTP_DOWNLINK },
552         { ICE_MAC_IPV6_GTPU_IPV6_UDP_PAY, ICE_PTYPE_ATTR_GTP_DOWNLINK },
553         { ICE_MAC_IPV6_GTPU_IPV6_TCP,     ICE_PTYPE_ATTR_GTP_DOWNLINK },
554         { ICE_MAC_IPV6_GTPU_IPV6_ICMPV6,  ICE_PTYPE_ATTR_GTP_DOWNLINK },
555 };
556
557 static const struct ice_ptype_attributes ice_attr_gtpu_up[] = {
558         { ICE_MAC_IPV4_GTPU_IPV4_FRAG,    ICE_PTYPE_ATTR_GTP_UPLINK },
559         { ICE_MAC_IPV4_GTPU_IPV4_PAY,     ICE_PTYPE_ATTR_GTP_UPLINK },
560         { ICE_MAC_IPV4_GTPU_IPV4_UDP_PAY, ICE_PTYPE_ATTR_GTP_UPLINK },
561         { ICE_MAC_IPV4_GTPU_IPV4_TCP,     ICE_PTYPE_ATTR_GTP_UPLINK },
562         { ICE_MAC_IPV4_GTPU_IPV4_ICMP,    ICE_PTYPE_ATTR_GTP_UPLINK },
563         { ICE_MAC_IPV6_GTPU_IPV4_FRAG,    ICE_PTYPE_ATTR_GTP_UPLINK },
564         { ICE_MAC_IPV6_GTPU_IPV4_PAY,     ICE_PTYPE_ATTR_GTP_UPLINK },
565         { ICE_MAC_IPV6_GTPU_IPV4_UDP_PAY, ICE_PTYPE_ATTR_GTP_UPLINK },
566         { ICE_MAC_IPV6_GTPU_IPV4_TCP,     ICE_PTYPE_ATTR_GTP_UPLINK },
567         { ICE_MAC_IPV6_GTPU_IPV4_ICMP,    ICE_PTYPE_ATTR_GTP_UPLINK },
568         { ICE_MAC_IPV4_GTPU_IPV6_FRAG,    ICE_PTYPE_ATTR_GTP_UPLINK },
569         { ICE_MAC_IPV4_GTPU_IPV6_PAY,     ICE_PTYPE_ATTR_GTP_UPLINK },
570         { ICE_MAC_IPV4_GTPU_IPV6_UDP_PAY, ICE_PTYPE_ATTR_GTP_UPLINK },
571         { ICE_MAC_IPV4_GTPU_IPV6_TCP,     ICE_PTYPE_ATTR_GTP_UPLINK },
572         { ICE_MAC_IPV4_GTPU_IPV6_ICMPV6,  ICE_PTYPE_ATTR_GTP_UPLINK },
573         { ICE_MAC_IPV6_GTPU_IPV6_FRAG,    ICE_PTYPE_ATTR_GTP_UPLINK },
574         { ICE_MAC_IPV6_GTPU_IPV6_PAY,     ICE_PTYPE_ATTR_GTP_UPLINK },
575         { ICE_MAC_IPV6_GTPU_IPV6_UDP_PAY, ICE_PTYPE_ATTR_GTP_UPLINK },
576         { ICE_MAC_IPV6_GTPU_IPV6_TCP,     ICE_PTYPE_ATTR_GTP_UPLINK },
577         { ICE_MAC_IPV6_GTPU_IPV6_ICMPV6,  ICE_PTYPE_ATTR_GTP_UPLINK },
578 };
579
580 static const u32 ice_ptypes_gtpu[] = {
581         0x00000000, 0x00000000, 0x00000000, 0x00000000,
582         0x00000000, 0x00000000, 0x00000000, 0x00000000,
583         0x00000000, 0x00000000, 0x7FFFFE00, 0x00000000,
584         0x00000000, 0x00000000, 0x00000000, 0x00000000,
585         0x00000000, 0x00000000, 0x00000000, 0x00000000,
586         0x00000000, 0x00000000, 0x00000000, 0x00000000,
587         0x00000000, 0x00000000, 0x00000000, 0x00000000,
588         0x00000000, 0x00000000, 0x00000000, 0x00000000,
589 };
590
591 /* Packet types for pppoe */
592 static const u32 ice_ptypes_pppoe[] = {
593         0x00000000, 0x00000000, 0x00000000, 0x00000000,
594         0x00000000, 0x00000000, 0x00000000, 0x00000000,
595         0x00000000, 0x03ffe000, 0x00000000, 0x00000000,
596         0x00000000, 0x00000000, 0x00000000, 0x00000000,
597         0x00000000, 0x00000000, 0x00000000, 0x00000000,
598         0x00000000, 0x00000000, 0x00000000, 0x00000000,
599         0x00000000, 0x00000000, 0x00000000, 0x00000000,
600         0x00000000, 0x00000000, 0x00000000, 0x00000000,
601 };
602
603 /* Packet types for packets with PFCP NODE header */
604 static const u32 ice_ptypes_pfcp_node[] = {
605         0x00000000, 0x00000000, 0x00000000, 0x00000000,
606         0x00000000, 0x00000000, 0x00000000, 0x00000000,
607         0x00000000, 0x00000000, 0x80000000, 0x00000002,
608         0x00000000, 0x00000000, 0x00000000, 0x00000000,
609         0x00000000, 0x00000000, 0x00000000, 0x00000000,
610         0x00000000, 0x00000000, 0x00000000, 0x00000000,
611         0x00000000, 0x00000000, 0x00000000, 0x00000000,
612         0x00000000, 0x00000000, 0x00000000, 0x00000000,
613 };
614
615 /* Packet types for packets with PFCP SESSION header */
616 static const u32 ice_ptypes_pfcp_session[] = {
617         0x00000000, 0x00000000, 0x00000000, 0x00000000,
618         0x00000000, 0x00000000, 0x00000000, 0x00000000,
619         0x00000000, 0x00000000, 0x00000000, 0x00000005,
620         0x00000000, 0x00000000, 0x00000000, 0x00000000,
621         0x00000000, 0x00000000, 0x00000000, 0x00000000,
622         0x00000000, 0x00000000, 0x00000000, 0x00000000,
623         0x00000000, 0x00000000, 0x00000000, 0x00000000,
624         0x00000000, 0x00000000, 0x00000000, 0x00000000,
625 };
626
627 /* Packet types for l2tpv3 */
628 static const u32 ice_ptypes_l2tpv3[] = {
629         0x00000000, 0x00000000, 0x00000000, 0x00000000,
630         0x00000000, 0x00000000, 0x00000000, 0x00000000,
631         0x00000000, 0x00000000, 0x00000000, 0x00000300,
632         0x00000000, 0x00000000, 0x00000000, 0x00000000,
633         0x00000000, 0x00000000, 0x00000000, 0x00000000,
634         0x00000000, 0x00000000, 0x00000000, 0x00000000,
635         0x00000000, 0x00000000, 0x00000000, 0x00000000,
636         0x00000000, 0x00000000, 0x00000000, 0x00000000,
637 };
638
639 /* Packet types for esp */
640 static const u32 ice_ptypes_esp[] = {
641         0x00000000, 0x00000000, 0x00000000, 0x00000000,
642         0x00000000, 0x00000003, 0x00000000, 0x00000000,
643         0x00000000, 0x00000000, 0x00000000, 0x00000000,
644         0x00000000, 0x00000000, 0x00000000, 0x00000000,
645         0x00000000, 0x00000000, 0x00000000, 0x00000000,
646         0x00000000, 0x00000000, 0x00000000, 0x00000000,
647         0x00000000, 0x00000000, 0x00000000, 0x00000000,
648         0x00000000, 0x00000000, 0x00000000, 0x00000000,
649 };
650
651 /* Packet types for ah */
652 static const u32 ice_ptypes_ah[] = {
653         0x00000000, 0x00000000, 0x00000000, 0x00000000,
654         0x00000000, 0x0000000C, 0x00000000, 0x00000000,
655         0x00000000, 0x00000000, 0x00000000, 0x00000000,
656         0x00000000, 0x00000000, 0x00000000, 0x00000000,
657         0x00000000, 0x00000000, 0x00000000, 0x00000000,
658         0x00000000, 0x00000000, 0x00000000, 0x00000000,
659         0x00000000, 0x00000000, 0x00000000, 0x00000000,
660         0x00000000, 0x00000000, 0x00000000, 0x00000000,
661 };
662
663 /* Packet types for packets with NAT_T ESP header */
664 static const u32 ice_ptypes_nat_t_esp[] = {
665         0x00000000, 0x00000000, 0x00000000, 0x00000000,
666         0x00000000, 0x00000030, 0x00000000, 0x00000000,
667         0x00000000, 0x00000000, 0x00000000, 0x00000000,
668         0x00000000, 0x00000000, 0x00000000, 0x00000000,
669         0x00000000, 0x00000000, 0x00000000, 0x00000000,
670         0x00000000, 0x00000000, 0x00000000, 0x00000000,
671         0x00000000, 0x00000000, 0x00000000, 0x00000000,
672         0x00000000, 0x00000000, 0x00000000, 0x00000000,
673 };
674
675 static const u32 ice_ptypes_mac_non_ip_ofos[] = {
676         0x00000846, 0x00000000, 0x00000000, 0x00000000,
677         0x00000000, 0x00000000, 0x00000000, 0x00000000,
678         0x00400000, 0x03FFF000, 0x00000000, 0x00000000,
679         0x00000000, 0x00000000, 0x00000000, 0x00000000,
680         0x00000000, 0x00000000, 0x00000000, 0x00000000,
681         0x00000000, 0x00000000, 0x00000000, 0x00000000,
682         0x00000000, 0x00000000, 0x00000000, 0x00000000,
683         0x00000000, 0x00000000, 0x00000000, 0x00000000,
684 };
685
686 static const u32 ice_ptypes_gtpu_no_ip[] = {
687         0x00000000, 0x00000000, 0x00000000, 0x00000000,
688         0x00000000, 0x00000000, 0x00000000, 0x00000000,
689         0x00000000, 0x00000000, 0x00000600, 0x00000000,
690         0x00000000, 0x00000000, 0x00000000, 0x00000000,
691         0x00000000, 0x00000000, 0x00000000, 0x00000000,
692         0x00000000, 0x00000000, 0x00000000, 0x00000000,
693         0x00000000, 0x00000000, 0x00000000, 0x00000000,
694         0x00000000, 0x00000000, 0x00000000, 0x00000000,
695 };
696
697 /* Manage parameters and info. used during the creation of a flow profile */
698 struct ice_flow_prof_params {
699         enum ice_block blk;
700         u16 entry_length; /* # of bytes formatted entry will require */
701         u8 es_cnt;
702         struct ice_flow_prof *prof;
703
704         /* For ACL, the es[0] will have the data of ICE_RX_MDID_PKT_FLAGS_15_0
705          * This will give us the direction flags.
706          */
707         struct ice_fv_word es[ICE_MAX_FV_WORDS];
708         /* attributes can be used to add attributes to a particular PTYPE */
709         const struct ice_ptype_attributes *attr;
710         u16 attr_cnt;
711
712         u16 mask[ICE_MAX_FV_WORDS];
713         ice_declare_bitmap(ptypes, ICE_FLOW_PTYPE_MAX);
714 };
715
716 #define ICE_FLOW_RSS_HDRS_INNER_MASK \
717         (ICE_FLOW_SEG_HDR_PPPOE | ICE_FLOW_SEG_HDR_GTPC | \
718         ICE_FLOW_SEG_HDR_GTPC_TEID | ICE_FLOW_SEG_HDR_GTPU | \
719         ICE_FLOW_SEG_HDR_PFCP_SESSION | ICE_FLOW_SEG_HDR_L2TPV3 | \
720         ICE_FLOW_SEG_HDR_ESP | ICE_FLOW_SEG_HDR_AH | \
721         ICE_FLOW_SEG_HDR_NAT_T_ESP | ICE_FLOW_SEG_HDR_GTPU_NON_IP)
722
723 #define ICE_FLOW_SEG_HDRS_L2_MASK       \
724         (ICE_FLOW_SEG_HDR_ETH | ICE_FLOW_SEG_HDR_VLAN)
725 #define ICE_FLOW_SEG_HDRS_L3_MASK       \
726         (ICE_FLOW_SEG_HDR_IPV4 | ICE_FLOW_SEG_HDR_IPV6 | \
727          ICE_FLOW_SEG_HDR_ARP)
728 #define ICE_FLOW_SEG_HDRS_L4_MASK       \
729         (ICE_FLOW_SEG_HDR_ICMP | ICE_FLOW_SEG_HDR_TCP | ICE_FLOW_SEG_HDR_UDP | \
730          ICE_FLOW_SEG_HDR_SCTP)
731 /* mask for L4 protocols that are NOT part of IPV4/6 OTHER PTYPE groups */
732 #define ICE_FLOW_SEG_HDRS_L4_MASK_NO_OTHER      \
733         (ICE_FLOW_SEG_HDR_TCP | ICE_FLOW_SEG_HDR_UDP | ICE_FLOW_SEG_HDR_SCTP)
734
735 /**
736  * ice_flow_val_hdrs - validates packet segments for valid protocol headers
737  * @segs: array of one or more packet segments that describe the flow
738  * @segs_cnt: number of packet segments provided
739  */
740 static enum ice_status
741 ice_flow_val_hdrs(struct ice_flow_seg_info *segs, u8 segs_cnt)
742 {
743         u8 i;
744
745         for (i = 0; i < segs_cnt; i++) {
746                 /* Multiple L3 headers */
747                 if (segs[i].hdrs & ICE_FLOW_SEG_HDRS_L3_MASK &&
748                     !ice_is_pow2(segs[i].hdrs & ICE_FLOW_SEG_HDRS_L3_MASK))
749                         return ICE_ERR_PARAM;
750
751                 /* Multiple L4 headers */
752                 if (segs[i].hdrs & ICE_FLOW_SEG_HDRS_L4_MASK &&
753                     !ice_is_pow2(segs[i].hdrs & ICE_FLOW_SEG_HDRS_L4_MASK))
754                         return ICE_ERR_PARAM;
755         }
756
757         return ICE_SUCCESS;
758 }
759
760 /* Sizes of fixed known protocol headers without header options */
761 #define ICE_FLOW_PROT_HDR_SZ_MAC        14
762 #define ICE_FLOW_PROT_HDR_SZ_MAC_VLAN   (ICE_FLOW_PROT_HDR_SZ_MAC + 2)
763 #define ICE_FLOW_PROT_HDR_SZ_IPV4       20
764 #define ICE_FLOW_PROT_HDR_SZ_IPV6       40
765 #define ICE_FLOW_PROT_HDR_SZ_ARP        28
766 #define ICE_FLOW_PROT_HDR_SZ_ICMP       8
767 #define ICE_FLOW_PROT_HDR_SZ_TCP        20
768 #define ICE_FLOW_PROT_HDR_SZ_UDP        8
769 #define ICE_FLOW_PROT_HDR_SZ_SCTP       12
770
771 /**
772  * ice_flow_calc_seg_sz - calculates size of a packet segment based on headers
773  * @params: information about the flow to be processed
774  * @seg: index of packet segment whose header size is to be determined
775  */
776 static u16 ice_flow_calc_seg_sz(struct ice_flow_prof_params *params, u8 seg)
777 {
778         u16 sz;
779
780         /* L2 headers */
781         sz = (params->prof->segs[seg].hdrs & ICE_FLOW_SEG_HDR_VLAN) ?
782                 ICE_FLOW_PROT_HDR_SZ_MAC_VLAN : ICE_FLOW_PROT_HDR_SZ_MAC;
783
784         /* L3 headers */
785         if (params->prof->segs[seg].hdrs & ICE_FLOW_SEG_HDR_IPV4)
786                 sz += ICE_FLOW_PROT_HDR_SZ_IPV4;
787         else if (params->prof->segs[seg].hdrs & ICE_FLOW_SEG_HDR_IPV6)
788                 sz += ICE_FLOW_PROT_HDR_SZ_IPV6;
789         else if (params->prof->segs[seg].hdrs & ICE_FLOW_SEG_HDR_ARP)
790                 sz += ICE_FLOW_PROT_HDR_SZ_ARP;
791         else if (params->prof->segs[seg].hdrs & ICE_FLOW_SEG_HDRS_L4_MASK)
792                 /* A L3 header is required if L4 is specified */
793                 return 0;
794
795         /* L4 headers */
796         if (params->prof->segs[seg].hdrs & ICE_FLOW_SEG_HDR_ICMP)
797                 sz += ICE_FLOW_PROT_HDR_SZ_ICMP;
798         else if (params->prof->segs[seg].hdrs & ICE_FLOW_SEG_HDR_TCP)
799                 sz += ICE_FLOW_PROT_HDR_SZ_TCP;
800         else if (params->prof->segs[seg].hdrs & ICE_FLOW_SEG_HDR_UDP)
801                 sz += ICE_FLOW_PROT_HDR_SZ_UDP;
802         else if (params->prof->segs[seg].hdrs & ICE_FLOW_SEG_HDR_SCTP)
803                 sz += ICE_FLOW_PROT_HDR_SZ_SCTP;
804
805         return sz;
806 }
807
808 /**
809  * ice_flow_proc_seg_hdrs - process protocol headers present in pkt segments
810  * @params: information about the flow to be processed
811  *
812  * This function identifies the packet types associated with the protocol
813  * headers being present in packet segments of the specified flow profile.
814  */
815 static enum ice_status
816 ice_flow_proc_seg_hdrs(struct ice_flow_prof_params *params)
817 {
818         struct ice_flow_prof *prof;
819         u8 i;
820
821         ice_memset(params->ptypes, 0xff, sizeof(params->ptypes),
822                    ICE_NONDMA_MEM);
823
824         prof = params->prof;
825
826         for (i = 0; i < params->prof->segs_cnt; i++) {
827                 const ice_bitmap_t *src;
828                 u32 hdrs;
829
830                 hdrs = prof->segs[i].hdrs;
831
832                 if (hdrs & ICE_FLOW_SEG_HDR_ETH) {
833                         src = !i ? (const ice_bitmap_t *)ice_ptypes_mac_ofos :
834                                 (const ice_bitmap_t *)ice_ptypes_mac_il;
835                         ice_and_bitmap(params->ptypes, params->ptypes, src,
836                                        ICE_FLOW_PTYPE_MAX);
837                 }
838
839                 if (i && hdrs & ICE_FLOW_SEG_HDR_VLAN) {
840                         src = (const ice_bitmap_t *)ice_ptypes_macvlan_il;
841                         ice_and_bitmap(params->ptypes, params->ptypes, src,
842                                        ICE_FLOW_PTYPE_MAX);
843                 }
844
845                 if (!i && hdrs & ICE_FLOW_SEG_HDR_ARP) {
846                         ice_and_bitmap(params->ptypes, params->ptypes,
847                                        (const ice_bitmap_t *)ice_ptypes_arp_of,
848                                        ICE_FLOW_PTYPE_MAX);
849                 }
850
851                 if (hdrs & ICE_FLOW_SEG_HDR_PPPOE) {
852                         src = (const ice_bitmap_t *)ice_ptypes_pppoe;
853                         ice_and_bitmap(params->ptypes, params->ptypes, src,
854                                        ICE_FLOW_PTYPE_MAX);
855                 }
856                 if ((hdrs & ICE_FLOW_SEG_HDR_IPV4) &&
857                     (hdrs & ICE_FLOW_SEG_HDR_IPV_OTHER)) {
858                         src = i ?
859                                 (const ice_bitmap_t *)ice_ptypes_ipv4_il :
860                                 (const ice_bitmap_t *)ice_ptypes_ipv4_ofos_all;
861                         ice_and_bitmap(params->ptypes, params->ptypes, src,
862                                        ICE_FLOW_PTYPE_MAX);
863                 } else if ((hdrs & ICE_FLOW_SEG_HDR_IPV6) &&
864                            (hdrs & ICE_FLOW_SEG_HDR_IPV_OTHER)) {
865                         src = i ?
866                                 (const ice_bitmap_t *)ice_ptypes_ipv6_il :
867                                 (const ice_bitmap_t *)ice_ptypes_ipv6_ofos_all;
868                         ice_and_bitmap(params->ptypes, params->ptypes, src,
869                                        ICE_FLOW_PTYPE_MAX);
870                 } else if ((hdrs & ICE_FLOW_SEG_HDR_IPV4) &&
871                            !(hdrs & ICE_FLOW_SEG_HDRS_L4_MASK_NO_OTHER)) {
872                         src = !i ? (const ice_bitmap_t *)ice_ptypes_ipv4_ofos_no_l4 :
873                                 (const ice_bitmap_t *)ice_ptypes_ipv4_il_no_l4;
874                         ice_and_bitmap(params->ptypes, params->ptypes, src,
875                                        ICE_FLOW_PTYPE_MAX);
876                 } else if (hdrs & ICE_FLOW_SEG_HDR_IPV4) {
877                         src = !i ? (const ice_bitmap_t *)ice_ptypes_ipv4_ofos :
878                                 (const ice_bitmap_t *)ice_ptypes_ipv4_il;
879                         ice_and_bitmap(params->ptypes, params->ptypes, src,
880                                        ICE_FLOW_PTYPE_MAX);
881                 } else if ((hdrs & ICE_FLOW_SEG_HDR_IPV6) &&
882                            !(hdrs & ICE_FLOW_SEG_HDRS_L4_MASK_NO_OTHER)) {
883                         src = !i ? (const ice_bitmap_t *)ice_ptypes_ipv6_ofos_no_l4 :
884                                 (const ice_bitmap_t *)ice_ptypes_ipv6_il_no_l4;
885                         ice_and_bitmap(params->ptypes, params->ptypes, src,
886                                        ICE_FLOW_PTYPE_MAX);
887                 } else if (hdrs & ICE_FLOW_SEG_HDR_IPV6) {
888                         src = !i ? (const ice_bitmap_t *)ice_ptypes_ipv6_ofos :
889                                 (const ice_bitmap_t *)ice_ptypes_ipv6_il;
890                         ice_and_bitmap(params->ptypes, params->ptypes, src,
891                                        ICE_FLOW_PTYPE_MAX);
892                 }
893
894                 if (hdrs & ICE_FLOW_SEG_HDR_ETH_NON_IP) {
895                         src = (const ice_bitmap_t *)ice_ptypes_mac_non_ip_ofos;
896                         ice_and_bitmap(params->ptypes, params->ptypes,
897                                        src, ICE_FLOW_PTYPE_MAX);
898                 } else if (hdrs & ICE_FLOW_SEG_HDR_PPPOE) {
899                         src = (const ice_bitmap_t *)ice_ptypes_pppoe;
900                         ice_and_bitmap(params->ptypes, params->ptypes, src,
901                                        ICE_FLOW_PTYPE_MAX);
902                 } else {
903                         src = (const ice_bitmap_t *)ice_ptypes_pppoe;
904                         ice_andnot_bitmap(params->ptypes, params->ptypes, src,
905                                           ICE_FLOW_PTYPE_MAX);
906                 }
907
908                 if (hdrs & ICE_FLOW_SEG_HDR_UDP) {
909                         src = (const ice_bitmap_t *)ice_ptypes_udp_il;
910                         ice_and_bitmap(params->ptypes, params->ptypes, src,
911                                        ICE_FLOW_PTYPE_MAX);
912                 } else if (hdrs & ICE_FLOW_SEG_HDR_TCP) {
913                         ice_and_bitmap(params->ptypes, params->ptypes,
914                                        (const ice_bitmap_t *)ice_ptypes_tcp_il,
915                                        ICE_FLOW_PTYPE_MAX);
916                 } else if (hdrs & ICE_FLOW_SEG_HDR_SCTP) {
917                         src = (const ice_bitmap_t *)ice_ptypes_sctp_il;
918                         ice_and_bitmap(params->ptypes, params->ptypes, src,
919                                        ICE_FLOW_PTYPE_MAX);
920                 }
921
922                 if (hdrs & ICE_FLOW_SEG_HDR_ICMP) {
923                         src = !i ? (const ice_bitmap_t *)ice_ptypes_icmp_of :
924                                 (const ice_bitmap_t *)ice_ptypes_icmp_il;
925                         ice_and_bitmap(params->ptypes, params->ptypes, src,
926                                        ICE_FLOW_PTYPE_MAX);
927                 } else if (hdrs & ICE_FLOW_SEG_HDR_GRE) {
928                         if (!i) {
929                                 src = (const ice_bitmap_t *)ice_ptypes_gre_of;
930                                 ice_and_bitmap(params->ptypes, params->ptypes,
931                                                src, ICE_FLOW_PTYPE_MAX);
932                         }
933                 } else if (hdrs & ICE_FLOW_SEG_HDR_GTPC) {
934                         src = (const ice_bitmap_t *)ice_ptypes_gtpc;
935                         ice_and_bitmap(params->ptypes, params->ptypes,
936                                        src, ICE_FLOW_PTYPE_MAX);
937                 } else if (hdrs & ICE_FLOW_SEG_HDR_GTPC_TEID) {
938                         src = (const ice_bitmap_t *)ice_ptypes_gtpc_tid;
939                         ice_and_bitmap(params->ptypes, params->ptypes,
940                                        src, ICE_FLOW_PTYPE_MAX);
941                 } else if (hdrs & ICE_FLOW_SEG_HDR_GTPU_NON_IP) {
942                         src = (const ice_bitmap_t *)ice_ptypes_gtpu_no_ip;
943                         ice_and_bitmap(params->ptypes, params->ptypes,
944                                        src, ICE_FLOW_PTYPE_MAX);
945                 } else if (hdrs & ICE_FLOW_SEG_HDR_GTPU_DWN) {
946                         src = (const ice_bitmap_t *)ice_ptypes_gtpu;
947                         ice_and_bitmap(params->ptypes, params->ptypes,
948                                        src, ICE_FLOW_PTYPE_MAX);
949
950                         /* Attributes for GTP packet with downlink */
951                         params->attr = ice_attr_gtpu_down;
952                         params->attr_cnt = ARRAY_SIZE(ice_attr_gtpu_down);
953                 } else if (hdrs & ICE_FLOW_SEG_HDR_GTPU_UP) {
954                         src = (const ice_bitmap_t *)ice_ptypes_gtpu;
955                         ice_and_bitmap(params->ptypes, params->ptypes,
956                                        src, ICE_FLOW_PTYPE_MAX);
957
958                         /* Attributes for GTP packet with uplink */
959                         params->attr = ice_attr_gtpu_up;
960                         params->attr_cnt = ARRAY_SIZE(ice_attr_gtpu_up);
961                 } else if (hdrs & ICE_FLOW_SEG_HDR_GTPU_EH) {
962                         src = (const ice_bitmap_t *)ice_ptypes_gtpu;
963                         ice_and_bitmap(params->ptypes, params->ptypes,
964                                        src, ICE_FLOW_PTYPE_MAX);
965
966                         /* Attributes for GTP packet with Extension Header */
967                         params->attr = ice_attr_gtpu_eh;
968                         params->attr_cnt = ARRAY_SIZE(ice_attr_gtpu_eh);
969                 } else if (hdrs & ICE_FLOW_SEG_HDR_GTPU_IP) {
970                         src = (const ice_bitmap_t *)ice_ptypes_gtpu;
971                         ice_and_bitmap(params->ptypes, params->ptypes,
972                                        src, ICE_FLOW_PTYPE_MAX);
973
974                         /* Attributes for GTP packet without Extension Header */
975                         params->attr = ice_attr_gtpu_session;
976                         params->attr_cnt = ARRAY_SIZE(ice_attr_gtpu_session);
977                 } else if (hdrs & ICE_FLOW_SEG_HDR_L2TPV3) {
978                         src = (const ice_bitmap_t *)ice_ptypes_l2tpv3;
979                         ice_and_bitmap(params->ptypes, params->ptypes,
980                                        src, ICE_FLOW_PTYPE_MAX);
981                 } else if (hdrs & ICE_FLOW_SEG_HDR_ESP) {
982                         src = (const ice_bitmap_t *)ice_ptypes_esp;
983                         ice_and_bitmap(params->ptypes, params->ptypes,
984                                        src, ICE_FLOW_PTYPE_MAX);
985                 } else if (hdrs & ICE_FLOW_SEG_HDR_AH) {
986                         src = (const ice_bitmap_t *)ice_ptypes_ah;
987                         ice_and_bitmap(params->ptypes, params->ptypes,
988                                        src, ICE_FLOW_PTYPE_MAX);
989                 } else if (hdrs & ICE_FLOW_SEG_HDR_NAT_T_ESP) {
990                         src = (const ice_bitmap_t *)ice_ptypes_nat_t_esp;
991                         ice_and_bitmap(params->ptypes, params->ptypes,
992                                        src, ICE_FLOW_PTYPE_MAX);
993                 } else if (hdrs & ICE_FLOW_SEG_HDR_VXLAN) {
994                         src = (const ice_bitmap_t *)ice_ptypes_vxlan_vni;
995                         ice_and_bitmap(params->ptypes, params->ptypes,
996                                        src, ICE_FLOW_PTYPE_MAX);
997                 }
998
999                 if (hdrs & ICE_FLOW_SEG_HDR_PFCP) {
1000                         if (hdrs & ICE_FLOW_SEG_HDR_PFCP_NODE)
1001                                 src =
1002                                 (const ice_bitmap_t *)ice_ptypes_pfcp_node;
1003                         else
1004                                 src =
1005                                 (const ice_bitmap_t *)ice_ptypes_pfcp_session;
1006
1007                         ice_and_bitmap(params->ptypes, params->ptypes,
1008                                        src, ICE_FLOW_PTYPE_MAX);
1009                 } else {
1010                         src = (const ice_bitmap_t *)ice_ptypes_pfcp_node;
1011                         ice_andnot_bitmap(params->ptypes, params->ptypes,
1012                                           src, ICE_FLOW_PTYPE_MAX);
1013
1014                         src = (const ice_bitmap_t *)ice_ptypes_pfcp_session;
1015                         ice_andnot_bitmap(params->ptypes, params->ptypes,
1016                                           src, ICE_FLOW_PTYPE_MAX);
1017                 }
1018         }
1019
1020         return ICE_SUCCESS;
1021 }
1022
1023 /**
1024  * ice_flow_xtract_pkt_flags - Create an extr sequence entry for packet flags
1025  * @hw: pointer to the HW struct
1026  * @params: information about the flow to be processed
1027  * @flags: The value of pkt_flags[x:x] in Rx/Tx MDID metadata.
1028  *
1029  * This function will allocate an extraction sequence entries for a DWORD size
1030  * chunk of the packet flags.
1031  */
1032 static enum ice_status
1033 ice_flow_xtract_pkt_flags(struct ice_hw *hw,
1034                           struct ice_flow_prof_params *params,
1035                           enum ice_flex_mdid_pkt_flags flags)
1036 {
1037         u8 fv_words = hw->blk[params->blk].es.fvw;
1038         u8 idx;
1039
1040         /* Make sure the number of extraction sequence entries required does not
1041          * exceed the block's capacity.
1042          */
1043         if (params->es_cnt >= fv_words)
1044                 return ICE_ERR_MAX_LIMIT;
1045
1046         /* some blocks require a reversed field vector layout */
1047         if (hw->blk[params->blk].es.reverse)
1048                 idx = fv_words - params->es_cnt - 1;
1049         else
1050                 idx = params->es_cnt;
1051
1052         params->es[idx].prot_id = ICE_PROT_META_ID;
1053         params->es[idx].off = flags;
1054         params->es_cnt++;
1055
1056         return ICE_SUCCESS;
1057 }
1058
1059 /**
1060  * ice_flow_xtract_fld - Create an extraction sequence entry for the given field
1061  * @hw: pointer to the HW struct
1062  * @params: information about the flow to be processed
1063  * @seg: packet segment index of the field to be extracted
1064  * @fld: ID of field to be extracted
1065  * @match: bitfield of all fields
1066  *
1067  * This function determines the protocol ID, offset, and size of the given
1068  * field. It then allocates one or more extraction sequence entries for the
1069  * given field, and fill the entries with protocol ID and offset information.
1070  */
1071 static enum ice_status
1072 ice_flow_xtract_fld(struct ice_hw *hw, struct ice_flow_prof_params *params,
1073                     u8 seg, enum ice_flow_field fld, u64 match)
1074 {
1075         enum ice_flow_field sib = ICE_FLOW_FIELD_IDX_MAX;
1076         enum ice_prot_id prot_id = ICE_PROT_ID_INVAL;
1077         u8 fv_words = hw->blk[params->blk].es.fvw;
1078         struct ice_flow_fld_info *flds;
1079         u16 cnt, ese_bits, i;
1080         u16 sib_mask = 0;
1081         u16 mask;
1082         u16 off;
1083
1084         flds = params->prof->segs[seg].fields;
1085
1086         switch (fld) {
1087         case ICE_FLOW_FIELD_IDX_ETH_DA:
1088         case ICE_FLOW_FIELD_IDX_ETH_SA:
1089         case ICE_FLOW_FIELD_IDX_S_VLAN:
1090         case ICE_FLOW_FIELD_IDX_C_VLAN:
1091                 prot_id = seg == 0 ? ICE_PROT_MAC_OF_OR_S : ICE_PROT_MAC_IL;
1092                 break;
1093         case ICE_FLOW_FIELD_IDX_ETH_TYPE:
1094                 prot_id = seg == 0 ? ICE_PROT_ETYPE_OL : ICE_PROT_ETYPE_IL;
1095                 break;
1096         case ICE_FLOW_FIELD_IDX_IPV4_DSCP:
1097                 prot_id = seg == 0 ? ICE_PROT_IPV4_OF_OR_S : ICE_PROT_IPV4_IL;
1098                 break;
1099         case ICE_FLOW_FIELD_IDX_IPV6_DSCP:
1100                 prot_id = seg == 0 ? ICE_PROT_IPV6_OF_OR_S : ICE_PROT_IPV6_IL;
1101                 break;
1102         case ICE_FLOW_FIELD_IDX_IPV4_TTL:
1103         case ICE_FLOW_FIELD_IDX_IPV4_PROT:
1104                 prot_id = seg == 0 ? ICE_PROT_IPV4_OF_OR_S : ICE_PROT_IPV4_IL;
1105
1106                 /* TTL and PROT share the same extraction seq. entry.
1107                  * Each is considered a sibling to the other in terms of sharing
1108                  * the same extraction sequence entry.
1109                  */
1110                 if (fld == ICE_FLOW_FIELD_IDX_IPV4_TTL)
1111                         sib = ICE_FLOW_FIELD_IDX_IPV4_PROT;
1112                 else
1113                         sib = ICE_FLOW_FIELD_IDX_IPV4_TTL;
1114
1115                 /* If the sibling field is also included, that field's
1116                  * mask needs to be included.
1117                  */
1118                 if (match & BIT(sib))
1119                         sib_mask = ice_flds_info[sib].mask;
1120                 break;
1121         case ICE_FLOW_FIELD_IDX_IPV6_TTL:
1122         case ICE_FLOW_FIELD_IDX_IPV6_PROT:
1123                 prot_id = seg == 0 ? ICE_PROT_IPV6_OF_OR_S : ICE_PROT_IPV6_IL;
1124
1125                 /* TTL and PROT share the same extraction seq. entry.
1126                  * Each is considered a sibling to the other in terms of sharing
1127                  * the same extraction sequence entry.
1128                  */
1129                 if (fld == ICE_FLOW_FIELD_IDX_IPV6_TTL)
1130                         sib = ICE_FLOW_FIELD_IDX_IPV6_PROT;
1131                 else
1132                         sib = ICE_FLOW_FIELD_IDX_IPV6_TTL;
1133
1134                 /* If the sibling field is also included, that field's
1135                  * mask needs to be included.
1136                  */
1137                 if (match & BIT(sib))
1138                         sib_mask = ice_flds_info[sib].mask;
1139                 break;
1140         case ICE_FLOW_FIELD_IDX_IPV4_SA:
1141         case ICE_FLOW_FIELD_IDX_IPV4_DA:
1142                 prot_id = seg == 0 ? ICE_PROT_IPV4_OF_OR_S : ICE_PROT_IPV4_IL;
1143                 break;
1144         case ICE_FLOW_FIELD_IDX_IPV6_SA:
1145         case ICE_FLOW_FIELD_IDX_IPV6_DA:
1146         case ICE_FLOW_FIELD_IDX_IPV6_PRE32_SA:
1147         case ICE_FLOW_FIELD_IDX_IPV6_PRE32_DA:
1148         case ICE_FLOW_FIELD_IDX_IPV6_PRE48_SA:
1149         case ICE_FLOW_FIELD_IDX_IPV6_PRE48_DA:
1150         case ICE_FLOW_FIELD_IDX_IPV6_PRE64_SA:
1151         case ICE_FLOW_FIELD_IDX_IPV6_PRE64_DA:
1152                 prot_id = seg == 0 ? ICE_PROT_IPV6_OF_OR_S : ICE_PROT_IPV6_IL;
1153                 break;
1154         case ICE_FLOW_FIELD_IDX_TCP_SRC_PORT:
1155         case ICE_FLOW_FIELD_IDX_TCP_DST_PORT:
1156         case ICE_FLOW_FIELD_IDX_TCP_FLAGS:
1157                 prot_id = ICE_PROT_TCP_IL;
1158                 break;
1159         case ICE_FLOW_FIELD_IDX_UDP_SRC_PORT:
1160         case ICE_FLOW_FIELD_IDX_UDP_DST_PORT:
1161                 prot_id = ICE_PROT_UDP_IL_OR_S;
1162                 break;
1163         case ICE_FLOW_FIELD_IDX_SCTP_SRC_PORT:
1164         case ICE_FLOW_FIELD_IDX_SCTP_DST_PORT:
1165                 prot_id = ICE_PROT_SCTP_IL;
1166                 break;
1167         case ICE_FLOW_FIELD_IDX_VXLAN_VNI:
1168         case ICE_FLOW_FIELD_IDX_GTPC_TEID:
1169         case ICE_FLOW_FIELD_IDX_GTPU_IP_TEID:
1170         case ICE_FLOW_FIELD_IDX_GTPU_UP_TEID:
1171         case ICE_FLOW_FIELD_IDX_GTPU_DWN_TEID:
1172         case ICE_FLOW_FIELD_IDX_GTPU_EH_TEID:
1173         case ICE_FLOW_FIELD_IDX_GTPU_EH_QFI:
1174                 /* GTP is accessed through UDP OF protocol */
1175                 prot_id = ICE_PROT_UDP_OF;
1176                 break;
1177         case ICE_FLOW_FIELD_IDX_PPPOE_SESS_ID:
1178                 prot_id = ICE_PROT_PPPOE;
1179                 break;
1180         case ICE_FLOW_FIELD_IDX_PFCP_SEID:
1181                 prot_id = ICE_PROT_UDP_IL_OR_S;
1182                 break;
1183         case ICE_FLOW_FIELD_IDX_L2TPV3_SESS_ID:
1184                 prot_id = ICE_PROT_L2TPV3;
1185                 break;
1186         case ICE_FLOW_FIELD_IDX_ESP_SPI:
1187                 prot_id = ICE_PROT_ESP_F;
1188                 break;
1189         case ICE_FLOW_FIELD_IDX_AH_SPI:
1190                 prot_id = ICE_PROT_ESP_2;
1191                 break;
1192         case ICE_FLOW_FIELD_IDX_NAT_T_ESP_SPI:
1193                 prot_id = ICE_PROT_UDP_IL_OR_S;
1194                 break;
1195         case ICE_FLOW_FIELD_IDX_ARP_SIP:
1196         case ICE_FLOW_FIELD_IDX_ARP_DIP:
1197         case ICE_FLOW_FIELD_IDX_ARP_SHA:
1198         case ICE_FLOW_FIELD_IDX_ARP_DHA:
1199         case ICE_FLOW_FIELD_IDX_ARP_OP:
1200                 prot_id = ICE_PROT_ARP_OF;
1201                 break;
1202         case ICE_FLOW_FIELD_IDX_ICMP_TYPE:
1203         case ICE_FLOW_FIELD_IDX_ICMP_CODE:
1204                 /* ICMP type and code share the same extraction seq. entry */
1205                 prot_id = (params->prof->segs[seg].hdrs &
1206                            ICE_FLOW_SEG_HDR_IPV4) ?
1207                         ICE_PROT_ICMP_IL : ICE_PROT_ICMPV6_IL;
1208                 sib = fld == ICE_FLOW_FIELD_IDX_ICMP_TYPE ?
1209                         ICE_FLOW_FIELD_IDX_ICMP_CODE :
1210                         ICE_FLOW_FIELD_IDX_ICMP_TYPE;
1211                 break;
1212         case ICE_FLOW_FIELD_IDX_GRE_KEYID:
1213                 prot_id = ICE_PROT_GRE_OF;
1214                 break;
1215         default:
1216                 return ICE_ERR_NOT_IMPL;
1217         }
1218
1219         /* Each extraction sequence entry is a word in size, and extracts a
1220          * word-aligned offset from a protocol header.
1221          */
1222         ese_bits = ICE_FLOW_FV_EXTRACT_SZ * BITS_PER_BYTE;
1223
1224         flds[fld].xtrct.prot_id = prot_id;
1225         flds[fld].xtrct.off = (ice_flds_info[fld].off / ese_bits) *
1226                 ICE_FLOW_FV_EXTRACT_SZ;
1227         flds[fld].xtrct.disp = (u8)(ice_flds_info[fld].off % ese_bits);
1228         flds[fld].xtrct.idx = params->es_cnt;
1229         flds[fld].xtrct.mask = ice_flds_info[fld].mask;
1230
1231         /* Adjust the next field-entry index after accommodating the number of
1232          * entries this field consumes
1233          */
1234         cnt = DIVIDE_AND_ROUND_UP(flds[fld].xtrct.disp +
1235                                   ice_flds_info[fld].size, ese_bits);
1236
1237         /* Fill in the extraction sequence entries needed for this field */
1238         off = flds[fld].xtrct.off;
1239         mask = flds[fld].xtrct.mask;
1240         for (i = 0; i < cnt; i++) {
1241                 /* Only consume an extraction sequence entry if there is no
1242                  * sibling field associated with this field or the sibling entry
1243                  * already extracts the word shared with this field.
1244                  */
1245                 if (sib == ICE_FLOW_FIELD_IDX_MAX ||
1246                     flds[sib].xtrct.prot_id == ICE_PROT_ID_INVAL ||
1247                     flds[sib].xtrct.off != off) {
1248                         u8 idx;
1249
1250                         /* Make sure the number of extraction sequence required
1251                          * does not exceed the block's capability
1252                          */
1253                         if (params->es_cnt >= fv_words)
1254                                 return ICE_ERR_MAX_LIMIT;
1255
1256                         /* some blocks require a reversed field vector layout */
1257                         if (hw->blk[params->blk].es.reverse)
1258                                 idx = fv_words - params->es_cnt - 1;
1259                         else
1260                                 idx = params->es_cnt;
1261
1262                         params->es[idx].prot_id = prot_id;
1263                         params->es[idx].off = off;
1264                         params->mask[idx] = mask | sib_mask;
1265                         params->es_cnt++;
1266                 }
1267
1268                 off += ICE_FLOW_FV_EXTRACT_SZ;
1269         }
1270
1271         return ICE_SUCCESS;
1272 }
1273
1274 /**
1275  * ice_flow_xtract_raws - Create extract sequence entries for raw bytes
1276  * @hw: pointer to the HW struct
1277  * @params: information about the flow to be processed
1278  * @seg: index of packet segment whose raw fields are to be extracted
1279  */
1280 static enum ice_status
1281 ice_flow_xtract_raws(struct ice_hw *hw, struct ice_flow_prof_params *params,
1282                      u8 seg)
1283 {
1284         u16 fv_words;
1285         u16 hdrs_sz;
1286         u8 i;
1287
1288         if (!params->prof->segs[seg].raws_cnt)
1289                 return ICE_SUCCESS;
1290
1291         if (params->prof->segs[seg].raws_cnt >
1292             ARRAY_SIZE(params->prof->segs[seg].raws))
1293                 return ICE_ERR_MAX_LIMIT;
1294
1295         /* Offsets within the segment headers are not supported */
1296         hdrs_sz = ice_flow_calc_seg_sz(params, seg);
1297         if (!hdrs_sz)
1298                 return ICE_ERR_PARAM;
1299
1300         fv_words = hw->blk[params->blk].es.fvw;
1301
1302         for (i = 0; i < params->prof->segs[seg].raws_cnt; i++) {
1303                 struct ice_flow_seg_fld_raw *raw;
1304                 u16 off, cnt, j;
1305
1306                 raw = &params->prof->segs[seg].raws[i];
1307
1308                 /* Storing extraction information */
1309                 raw->info.xtrct.prot_id = ICE_PROT_MAC_OF_OR_S;
1310                 raw->info.xtrct.off = (raw->off / ICE_FLOW_FV_EXTRACT_SZ) *
1311                         ICE_FLOW_FV_EXTRACT_SZ;
1312                 raw->info.xtrct.disp = (raw->off % ICE_FLOW_FV_EXTRACT_SZ) *
1313                         BITS_PER_BYTE;
1314                 raw->info.xtrct.idx = params->es_cnt;
1315
1316                 /* Determine the number of field vector entries this raw field
1317                  * consumes.
1318                  */
1319                 cnt = DIVIDE_AND_ROUND_UP(raw->info.xtrct.disp +
1320                                           (raw->info.src.last * BITS_PER_BYTE),
1321                                           (ICE_FLOW_FV_EXTRACT_SZ *
1322                                            BITS_PER_BYTE));
1323                 off = raw->info.xtrct.off;
1324                 for (j = 0; j < cnt; j++) {
1325                         u16 idx;
1326
1327                         /* Make sure the number of extraction sequence required
1328                          * does not exceed the block's capability
1329                          */
1330                         if (params->es_cnt >= hw->blk[params->blk].es.count ||
1331                             params->es_cnt >= ICE_MAX_FV_WORDS)
1332                                 return ICE_ERR_MAX_LIMIT;
1333
1334                         /* some blocks require a reversed field vector layout */
1335                         if (hw->blk[params->blk].es.reverse)
1336                                 idx = fv_words - params->es_cnt - 1;
1337                         else
1338                                 idx = params->es_cnt;
1339
1340                         params->es[idx].prot_id = raw->info.xtrct.prot_id;
1341                         params->es[idx].off = off;
1342                         params->es_cnt++;
1343                         off += ICE_FLOW_FV_EXTRACT_SZ;
1344                 }
1345         }
1346
1347         return ICE_SUCCESS;
1348 }
1349
1350 /**
1351  * ice_flow_create_xtrct_seq - Create an extraction sequence for given segments
1352  * @hw: pointer to the HW struct
1353  * @params: information about the flow to be processed
1354  *
1355  * This function iterates through all matched fields in the given segments, and
1356  * creates an extraction sequence for the fields.
1357  */
1358 static enum ice_status
1359 ice_flow_create_xtrct_seq(struct ice_hw *hw,
1360                           struct ice_flow_prof_params *params)
1361 {
1362         enum ice_status status = ICE_SUCCESS;
1363         u8 i;
1364
1365         /* For ACL, we also need to extract the direction bit (Rx,Tx) data from
1366          * packet flags
1367          */
1368         if (params->blk == ICE_BLK_ACL) {
1369                 status = ice_flow_xtract_pkt_flags(hw, params,
1370                                                    ICE_RX_MDID_PKT_FLAGS_15_0);
1371                 if (status)
1372                         return status;
1373         }
1374
1375         for (i = 0; i < params->prof->segs_cnt; i++) {
1376                 u64 match = params->prof->segs[i].match;
1377                 enum ice_flow_field j;
1378
1379                 ice_for_each_set_bit(j, (ice_bitmap_t *)&match,
1380                                      ICE_FLOW_FIELD_IDX_MAX) {
1381                         status = ice_flow_xtract_fld(hw, params, i, j, match);
1382                         if (status)
1383                                 return status;
1384                         ice_clear_bit(j, (ice_bitmap_t *)&match);
1385                 }
1386
1387                 /* Process raw matching bytes */
1388                 status = ice_flow_xtract_raws(hw, params, i);
1389                 if (status)
1390                         return status;
1391         }
1392
1393         return status;
1394 }
1395
1396 /**
1397  * ice_flow_sel_acl_scen - returns the specific scenario
1398  * @hw: pointer to the hardware structure
1399  * @params: information about the flow to be processed
1400  *
1401  * This function will return the specific scenario based on the
1402  * params passed to it
1403  */
1404 static enum ice_status
1405 ice_flow_sel_acl_scen(struct ice_hw *hw, struct ice_flow_prof_params *params)
1406 {
1407         /* Find the best-fit scenario for the provided match width */
1408         struct ice_acl_scen *cand_scen = NULL, *scen;
1409
1410         if (!hw->acl_tbl)
1411                 return ICE_ERR_DOES_NOT_EXIST;
1412
1413         /* Loop through each scenario and match against the scenario width
1414          * to select the specific scenario
1415          */
1416         LIST_FOR_EACH_ENTRY(scen, &hw->acl_tbl->scens, ice_acl_scen, list_entry)
1417                 if (scen->eff_width >= params->entry_length &&
1418                     (!cand_scen || cand_scen->eff_width > scen->eff_width))
1419                         cand_scen = scen;
1420         if (!cand_scen)
1421                 return ICE_ERR_DOES_NOT_EXIST;
1422
1423         params->prof->cfg.scen = cand_scen;
1424
1425         return ICE_SUCCESS;
1426 }
1427
1428 /**
1429  * ice_flow_acl_def_entry_frmt - Determine the layout of flow entries
1430  * @params: information about the flow to be processed
1431  */
1432 static enum ice_status
1433 ice_flow_acl_def_entry_frmt(struct ice_flow_prof_params *params)
1434 {
1435         u16 index, i, range_idx = 0;
1436
1437         index = ICE_AQC_ACL_PROF_BYTE_SEL_START_IDX;
1438
1439         for (i = 0; i < params->prof->segs_cnt; i++) {
1440                 struct ice_flow_seg_info *seg = &params->prof->segs[i];
1441                 u8 j;
1442
1443                 ice_for_each_set_bit(j, (ice_bitmap_t *)&seg->match,
1444                                      ICE_FLOW_FIELD_IDX_MAX) {
1445                         struct ice_flow_fld_info *fld = &seg->fields[j];
1446
1447                         fld->entry.mask = ICE_FLOW_FLD_OFF_INVAL;
1448
1449                         if (fld->type == ICE_FLOW_FLD_TYPE_RANGE) {
1450                                 fld->entry.last = ICE_FLOW_FLD_OFF_INVAL;
1451
1452                                 /* Range checking only supported for single
1453                                  * words
1454                                  */
1455                                 if (DIVIDE_AND_ROUND_UP(ice_flds_info[j].size +
1456                                                         fld->xtrct.disp,
1457                                                         BITS_PER_BYTE * 2) > 1)
1458                                         return ICE_ERR_PARAM;
1459
1460                                 /* Ranges must define low and high values */
1461                                 if (fld->src.val == ICE_FLOW_FLD_OFF_INVAL ||
1462                                     fld->src.last == ICE_FLOW_FLD_OFF_INVAL)
1463                                         return ICE_ERR_PARAM;
1464
1465                                 fld->entry.val = range_idx++;
1466                         } else {
1467                                 /* Store adjusted byte-length of field for later
1468                                  * use, taking into account potential
1469                                  * non-byte-aligned displacement
1470                                  */
1471                                 fld->entry.last = DIVIDE_AND_ROUND_UP
1472                                         (ice_flds_info[j].size +
1473                                          (fld->xtrct.disp % BITS_PER_BYTE),
1474                                          BITS_PER_BYTE);
1475                                 fld->entry.val = index;
1476                                 index += fld->entry.last;
1477                         }
1478                 }
1479
1480                 for (j = 0; j < seg->raws_cnt; j++) {
1481                         struct ice_flow_seg_fld_raw *raw = &seg->raws[j];
1482
1483                         raw->info.entry.mask = ICE_FLOW_FLD_OFF_INVAL;
1484                         raw->info.entry.val = index;
1485                         raw->info.entry.last = raw->info.src.last;
1486                         index += raw->info.entry.last;
1487                 }
1488         }
1489
1490         /* Currently only support using the byte selection base, which only
1491          * allows for an effective entry size of 30 bytes. Reject anything
1492          * larger.
1493          */
1494         if (index > ICE_AQC_ACL_PROF_BYTE_SEL_ELEMS)
1495                 return ICE_ERR_PARAM;
1496
1497         /* Only 8 range checkers per profile, reject anything trying to use
1498          * more
1499          */
1500         if (range_idx > ICE_AQC_ACL_PROF_RANGES_NUM_CFG)
1501                 return ICE_ERR_PARAM;
1502
1503         /* Store # bytes required for entry for later use */
1504         params->entry_length = index - ICE_AQC_ACL_PROF_BYTE_SEL_START_IDX;
1505
1506         return ICE_SUCCESS;
1507 }
1508
1509 /**
1510  * ice_flow_proc_segs - process all packet segments associated with a profile
1511  * @hw: pointer to the HW struct
1512  * @params: information about the flow to be processed
1513  */
1514 static enum ice_status
1515 ice_flow_proc_segs(struct ice_hw *hw, struct ice_flow_prof_params *params)
1516 {
1517         enum ice_status status;
1518
1519         status = ice_flow_proc_seg_hdrs(params);
1520         if (status)
1521                 return status;
1522
1523         status = ice_flow_create_xtrct_seq(hw, params);
1524         if (status)
1525                 return status;
1526
1527         switch (params->blk) {
1528         case ICE_BLK_FD:
1529         case ICE_BLK_RSS:
1530                 status = ICE_SUCCESS;
1531                 break;
1532         case ICE_BLK_ACL:
1533                 status = ice_flow_acl_def_entry_frmt(params);
1534                 if (status)
1535                         return status;
1536                 status = ice_flow_sel_acl_scen(hw, params);
1537                 if (status)
1538                         return status;
1539                 break;
1540         default:
1541                 return ICE_ERR_NOT_IMPL;
1542         }
1543
1544         return status;
1545 }
1546
1547 #define ICE_FLOW_FIND_PROF_CHK_FLDS     0x00000001
1548 #define ICE_FLOW_FIND_PROF_CHK_VSI      0x00000002
1549 #define ICE_FLOW_FIND_PROF_NOT_CHK_DIR  0x00000004
1550
1551 /**
1552  * ice_flow_find_prof_conds - Find a profile matching headers and conditions
1553  * @hw: pointer to the HW struct
1554  * @blk: classification stage
1555  * @dir: flow direction
1556  * @segs: array of one or more packet segments that describe the flow
1557  * @segs_cnt: number of packet segments provided
1558  * @vsi_handle: software VSI handle to check VSI (ICE_FLOW_FIND_PROF_CHK_VSI)
1559  * @conds: additional conditions to be checked (ICE_FLOW_FIND_PROF_CHK_*)
1560  */
1561 static struct ice_flow_prof *
1562 ice_flow_find_prof_conds(struct ice_hw *hw, enum ice_block blk,
1563                          enum ice_flow_dir dir, struct ice_flow_seg_info *segs,
1564                          u8 segs_cnt, u16 vsi_handle, u32 conds)
1565 {
1566         struct ice_flow_prof *p, *prof = NULL;
1567
1568         ice_acquire_lock(&hw->fl_profs_locks[blk]);
1569         LIST_FOR_EACH_ENTRY(p, &hw->fl_profs[blk], ice_flow_prof, l_entry)
1570                 if ((p->dir == dir || conds & ICE_FLOW_FIND_PROF_NOT_CHK_DIR) &&
1571                     segs_cnt && segs_cnt == p->segs_cnt) {
1572                         u8 i;
1573
1574                         /* Check for profile-VSI association if specified */
1575                         if ((conds & ICE_FLOW_FIND_PROF_CHK_VSI) &&
1576                             ice_is_vsi_valid(hw, vsi_handle) &&
1577                             !ice_is_bit_set(p->vsis, vsi_handle))
1578                                 continue;
1579
1580                         /* Protocol headers must be checked. Matched fields are
1581                          * checked if specified.
1582                          */
1583                         for (i = 0; i < segs_cnt; i++)
1584                                 if (segs[i].hdrs != p->segs[i].hdrs ||
1585                                     ((conds & ICE_FLOW_FIND_PROF_CHK_FLDS) &&
1586                                      segs[i].match != p->segs[i].match))
1587                                         break;
1588
1589                         /* A match is found if all segments are matched */
1590                         if (i == segs_cnt) {
1591                                 prof = p;
1592                                 break;
1593                         }
1594                 }
1595         ice_release_lock(&hw->fl_profs_locks[blk]);
1596
1597         return prof;
1598 }
1599
1600 /**
1601  * ice_flow_find_prof - Look up a profile matching headers and matched fields
1602  * @hw: pointer to the HW struct
1603  * @blk: classification stage
1604  * @dir: flow direction
1605  * @segs: array of one or more packet segments that describe the flow
1606  * @segs_cnt: number of packet segments provided
1607  */
1608 u64
1609 ice_flow_find_prof(struct ice_hw *hw, enum ice_block blk, enum ice_flow_dir dir,
1610                    struct ice_flow_seg_info *segs, u8 segs_cnt)
1611 {
1612         struct ice_flow_prof *p;
1613
1614         p = ice_flow_find_prof_conds(hw, blk, dir, segs, segs_cnt,
1615                                      ICE_MAX_VSI, ICE_FLOW_FIND_PROF_CHK_FLDS);
1616
1617         return p ? p->id : ICE_FLOW_PROF_ID_INVAL;
1618 }
1619
1620 /**
1621  * ice_flow_find_prof_id - Look up a profile with given profile ID
1622  * @hw: pointer to the HW struct
1623  * @blk: classification stage
1624  * @prof_id: unique ID to identify this flow profile
1625  */
1626 static struct ice_flow_prof *
1627 ice_flow_find_prof_id(struct ice_hw *hw, enum ice_block blk, u64 prof_id)
1628 {
1629         struct ice_flow_prof *p;
1630
1631         LIST_FOR_EACH_ENTRY(p, &hw->fl_profs[blk], ice_flow_prof, l_entry)
1632                 if (p->id == prof_id)
1633                         return p;
1634
1635         return NULL;
1636 }
1637
1638 /**
1639  * ice_dealloc_flow_entry - Deallocate flow entry memory
1640  * @hw: pointer to the HW struct
1641  * @entry: flow entry to be removed
1642  */
1643 static void
1644 ice_dealloc_flow_entry(struct ice_hw *hw, struct ice_flow_entry *entry)
1645 {
1646         if (!entry)
1647                 return;
1648
1649         if (entry->entry)
1650                 ice_free(hw, entry->entry);
1651
1652         if (entry->range_buf) {
1653                 ice_free(hw, entry->range_buf);
1654                 entry->range_buf = NULL;
1655         }
1656
1657         if (entry->acts) {
1658                 ice_free(hw, entry->acts);
1659                 entry->acts = NULL;
1660                 entry->acts_cnt = 0;
1661         }
1662
1663         ice_free(hw, entry);
1664 }
1665
1666 /**
1667  * ice_flow_get_hw_prof - return the HW profile for a specific profile ID handle
1668  * @hw: pointer to the HW struct
1669  * @blk: classification stage
1670  * @prof_id: the profile ID handle
1671  * @hw_prof_id: pointer to variable to receive the HW profile ID
1672  */
1673 enum ice_status
1674 ice_flow_get_hw_prof(struct ice_hw *hw, enum ice_block blk, u64 prof_id,
1675                      u8 *hw_prof_id)
1676 {
1677         enum ice_status status = ICE_ERR_DOES_NOT_EXIST;
1678         struct ice_prof_map *map;
1679
1680         ice_acquire_lock(&hw->blk[blk].es.prof_map_lock);
1681         map = ice_search_prof_id(hw, blk, prof_id);
1682         if (map) {
1683                 *hw_prof_id = map->prof_id;
1684                 status = ICE_SUCCESS;
1685         }
1686         ice_release_lock(&hw->blk[blk].es.prof_map_lock);
1687         return status;
1688 }
1689
1690 #define ICE_ACL_INVALID_SCEN    0x3f
1691
1692 /**
1693  * ice_flow_acl_is_prof_in_use - Verify if the profile is associated to any PF
1694  * @hw: pointer to the hardware structure
1695  * @prof: pointer to flow profile
1696  * @buf: destination buffer function writes partial extraction sequence to
1697  *
1698  * returns ICE_SUCCESS if no PF is associated to the given profile
1699  * returns ICE_ERR_IN_USE if at least one PF is associated to the given profile
1700  * returns other error code for real error
1701  */
1702 static enum ice_status
1703 ice_flow_acl_is_prof_in_use(struct ice_hw *hw, struct ice_flow_prof *prof,
1704                             struct ice_aqc_acl_prof_generic_frmt *buf)
1705 {
1706         enum ice_status status;
1707         u8 prof_id = 0;
1708
1709         status = ice_flow_get_hw_prof(hw, ICE_BLK_ACL, prof->id, &prof_id);
1710         if (status)
1711                 return status;
1712
1713         status = ice_query_acl_prof(hw, prof_id, buf, NULL);
1714         if (status)
1715                 return status;
1716
1717         /* If all PF's associated scenarios are all 0 or all
1718          * ICE_ACL_INVALID_SCEN (63) for the given profile then the latter has
1719          * not been configured yet.
1720          */
1721         if (buf->pf_scenario_num[0] == 0 && buf->pf_scenario_num[1] == 0 &&
1722             buf->pf_scenario_num[2] == 0 && buf->pf_scenario_num[3] == 0 &&
1723             buf->pf_scenario_num[4] == 0 && buf->pf_scenario_num[5] == 0 &&
1724             buf->pf_scenario_num[6] == 0 && buf->pf_scenario_num[7] == 0)
1725                 return ICE_SUCCESS;
1726
1727         if (buf->pf_scenario_num[0] == ICE_ACL_INVALID_SCEN &&
1728             buf->pf_scenario_num[1] == ICE_ACL_INVALID_SCEN &&
1729             buf->pf_scenario_num[2] == ICE_ACL_INVALID_SCEN &&
1730             buf->pf_scenario_num[3] == ICE_ACL_INVALID_SCEN &&
1731             buf->pf_scenario_num[4] == ICE_ACL_INVALID_SCEN &&
1732             buf->pf_scenario_num[5] == ICE_ACL_INVALID_SCEN &&
1733             buf->pf_scenario_num[6] == ICE_ACL_INVALID_SCEN &&
1734             buf->pf_scenario_num[7] == ICE_ACL_INVALID_SCEN)
1735                 return ICE_SUCCESS;
1736
1737         return ICE_ERR_IN_USE;
1738 }
1739
1740 /**
1741  * ice_flow_acl_free_act_cntr - Free the ACL rule's actions
1742  * @hw: pointer to the hardware structure
1743  * @acts: array of actions to be performed on a match
1744  * @acts_cnt: number of actions
1745  */
1746 static enum ice_status
1747 ice_flow_acl_free_act_cntr(struct ice_hw *hw, struct ice_flow_action *acts,
1748                            u8 acts_cnt)
1749 {
1750         int i;
1751
1752         for (i = 0; i < acts_cnt; i++) {
1753                 if (acts[i].type == ICE_FLOW_ACT_CNTR_PKT ||
1754                     acts[i].type == ICE_FLOW_ACT_CNTR_BYTES ||
1755                     acts[i].type == ICE_FLOW_ACT_CNTR_PKT_BYTES) {
1756                         struct ice_acl_cntrs cntrs;
1757                         enum ice_status status;
1758
1759                         cntrs.bank = 0; /* Only bank0 for the moment */
1760                         cntrs.first_cntr =
1761                                         LE16_TO_CPU(acts[i].data.acl_act.value);
1762                         cntrs.last_cntr =
1763                                         LE16_TO_CPU(acts[i].data.acl_act.value);
1764
1765                         if (acts[i].type == ICE_FLOW_ACT_CNTR_PKT_BYTES)
1766                                 cntrs.type = ICE_AQC_ACL_CNT_TYPE_DUAL;
1767                         else
1768                                 cntrs.type = ICE_AQC_ACL_CNT_TYPE_SINGLE;
1769
1770                         status = ice_aq_dealloc_acl_cntrs(hw, &cntrs, NULL);
1771                         if (status)
1772                                 return status;
1773                 }
1774         }
1775         return ICE_SUCCESS;
1776 }
1777
1778 /**
1779  * ice_flow_acl_disassoc_scen - Disassociate the scenario from the profile
1780  * @hw: pointer to the hardware structure
1781  * @prof: pointer to flow profile
1782  *
1783  * Disassociate the scenario from the profile for the PF of the VSI.
1784  */
1785 static enum ice_status
1786 ice_flow_acl_disassoc_scen(struct ice_hw *hw, struct ice_flow_prof *prof)
1787 {
1788         struct ice_aqc_acl_prof_generic_frmt buf;
1789         enum ice_status status = ICE_SUCCESS;
1790         u8 prof_id = 0;
1791
1792         ice_memset(&buf, 0, sizeof(buf), ICE_NONDMA_MEM);
1793
1794         status = ice_flow_get_hw_prof(hw, ICE_BLK_ACL, prof->id, &prof_id);
1795         if (status)
1796                 return status;
1797
1798         status = ice_query_acl_prof(hw, prof_id, &buf, NULL);
1799         if (status)
1800                 return status;
1801
1802         /* Clear scenario for this PF */
1803         buf.pf_scenario_num[hw->pf_id] = ICE_ACL_INVALID_SCEN;
1804         status = ice_prgm_acl_prof_xtrct(hw, prof_id, &buf, NULL);
1805
1806         return status;
1807 }
1808
1809 /**
1810  * ice_flow_rem_entry_sync - Remove a flow entry
1811  * @hw: pointer to the HW struct
1812  * @blk: classification stage
1813  * @entry: flow entry to be removed
1814  */
1815 static enum ice_status
1816 ice_flow_rem_entry_sync(struct ice_hw *hw, enum ice_block blk,
1817                         struct ice_flow_entry *entry)
1818 {
1819         if (!entry)
1820                 return ICE_ERR_BAD_PTR;
1821
1822         if (blk == ICE_BLK_ACL) {
1823                 enum ice_status status;
1824
1825                 if (!entry->prof)
1826                         return ICE_ERR_BAD_PTR;
1827
1828                 status = ice_acl_rem_entry(hw, entry->prof->cfg.scen,
1829                                            entry->scen_entry_idx);
1830                 if (status)
1831                         return status;
1832
1833                 /* Checks if we need to release an ACL counter. */
1834                 if (entry->acts_cnt && entry->acts)
1835                         ice_flow_acl_free_act_cntr(hw, entry->acts,
1836                                                    entry->acts_cnt);
1837         }
1838
1839         LIST_DEL(&entry->l_entry);
1840
1841         ice_dealloc_flow_entry(hw, entry);
1842
1843         return ICE_SUCCESS;
1844 }
1845
1846 /**
1847  * ice_flow_add_prof_sync - Add a flow profile for packet segments and fields
1848  * @hw: pointer to the HW struct
1849  * @blk: classification stage
1850  * @dir: flow direction
1851  * @prof_id: unique ID to identify this flow profile
1852  * @segs: array of one or more packet segments that describe the flow
1853  * @segs_cnt: number of packet segments provided
1854  * @acts: array of default actions
1855  * @acts_cnt: number of default actions
1856  * @prof: stores the returned flow profile added
1857  *
1858  * Assumption: the caller has acquired the lock to the profile list
1859  */
1860 static enum ice_status
1861 ice_flow_add_prof_sync(struct ice_hw *hw, enum ice_block blk,
1862                        enum ice_flow_dir dir, u64 prof_id,
1863                        struct ice_flow_seg_info *segs, u8 segs_cnt,
1864                        struct ice_flow_action *acts, u8 acts_cnt,
1865                        struct ice_flow_prof **prof)
1866 {
1867         struct ice_flow_prof_params *params;
1868         enum ice_status status;
1869         u8 i;
1870
1871         if (!prof || (acts_cnt && !acts))
1872                 return ICE_ERR_BAD_PTR;
1873
1874         params = (struct ice_flow_prof_params *)ice_malloc(hw, sizeof(*params));
1875         if (!params)
1876                 return ICE_ERR_NO_MEMORY;
1877
1878         params->prof = (struct ice_flow_prof *)
1879                 ice_malloc(hw, sizeof(*params->prof));
1880         if (!params->prof) {
1881                 status = ICE_ERR_NO_MEMORY;
1882                 goto free_params;
1883         }
1884
1885         /* initialize extraction sequence to all invalid (0xff) */
1886         for (i = 0; i < ICE_MAX_FV_WORDS; i++) {
1887                 params->es[i].prot_id = ICE_PROT_INVALID;
1888                 params->es[i].off = ICE_FV_OFFSET_INVAL;
1889         }
1890
1891         params->blk = blk;
1892         params->prof->id = prof_id;
1893         params->prof->dir = dir;
1894         params->prof->segs_cnt = segs_cnt;
1895
1896         /* Make a copy of the segments that need to be persistent in the flow
1897          * profile instance
1898          */
1899         for (i = 0; i < segs_cnt; i++)
1900                 ice_memcpy(&params->prof->segs[i], &segs[i], sizeof(*segs),
1901                            ICE_NONDMA_TO_NONDMA);
1902
1903         /* Make a copy of the actions that need to be persistent in the flow
1904          * profile instance.
1905          */
1906         if (acts_cnt) {
1907                 params->prof->acts = (struct ice_flow_action *)
1908                         ice_memdup(hw, acts, acts_cnt * sizeof(*acts),
1909                                    ICE_NONDMA_TO_NONDMA);
1910
1911                 if (!params->prof->acts) {
1912                         status = ICE_ERR_NO_MEMORY;
1913                         goto out;
1914                 }
1915         }
1916
1917         status = ice_flow_proc_segs(hw, params);
1918         if (status) {
1919                 ice_debug(hw, ICE_DBG_FLOW, "Error processing a flow's packet segments\n");
1920                 goto out;
1921         }
1922
1923         /* Add a HW profile for this flow profile */
1924         status = ice_add_prof(hw, blk, prof_id, (u8 *)params->ptypes,
1925                               params->attr, params->attr_cnt, params->es,
1926                               params->mask);
1927         if (status) {
1928                 ice_debug(hw, ICE_DBG_FLOW, "Error adding a HW flow profile\n");
1929                 goto out;
1930         }
1931
1932         INIT_LIST_HEAD(&params->prof->entries);
1933         ice_init_lock(&params->prof->entries_lock);
1934         *prof = params->prof;
1935
1936 out:
1937         if (status) {
1938                 if (params->prof->acts)
1939                         ice_free(hw, params->prof->acts);
1940                 ice_free(hw, params->prof);
1941         }
1942 free_params:
1943         ice_free(hw, params);
1944
1945         return status;
1946 }
1947
1948 /**
1949  * ice_flow_rem_prof_sync - remove a flow profile
1950  * @hw: pointer to the hardware structure
1951  * @blk: classification stage
1952  * @prof: pointer to flow profile to remove
1953  *
1954  * Assumption: the caller has acquired the lock to the profile list
1955  */
1956 static enum ice_status
1957 ice_flow_rem_prof_sync(struct ice_hw *hw, enum ice_block blk,
1958                        struct ice_flow_prof *prof)
1959 {
1960         enum ice_status status;
1961
1962         /* Remove all remaining flow entries before removing the flow profile */
1963         if (!LIST_EMPTY(&prof->entries)) {
1964                 struct ice_flow_entry *e, *t;
1965
1966                 ice_acquire_lock(&prof->entries_lock);
1967
1968                 LIST_FOR_EACH_ENTRY_SAFE(e, t, &prof->entries, ice_flow_entry,
1969                                          l_entry) {
1970                         status = ice_flow_rem_entry_sync(hw, blk, e);
1971                         if (status)
1972                                 break;
1973                 }
1974
1975                 ice_release_lock(&prof->entries_lock);
1976         }
1977
1978         if (blk == ICE_BLK_ACL) {
1979                 struct ice_aqc_acl_profile_ranges query_rng_buf;
1980                 struct ice_aqc_acl_prof_generic_frmt buf;
1981                 u8 prof_id = 0;
1982
1983                 /* Disassociate the scenario from the profile for the PF */
1984                 status = ice_flow_acl_disassoc_scen(hw, prof);
1985                 if (status)
1986                         return status;
1987
1988                 /* Clear the range-checker if the profile ID is no longer
1989                  * used by any PF
1990                  */
1991                 status = ice_flow_acl_is_prof_in_use(hw, prof, &buf);
1992                 if (status && status != ICE_ERR_IN_USE) {
1993                         return status;
1994                 } else if (!status) {
1995                         /* Clear the range-checker value for profile ID */
1996                         ice_memset(&query_rng_buf, 0,
1997                                    sizeof(struct ice_aqc_acl_profile_ranges),
1998                                    ICE_NONDMA_MEM);
1999
2000                         status = ice_flow_get_hw_prof(hw, blk, prof->id,
2001                                                       &prof_id);
2002                         if (status)
2003                                 return status;
2004
2005                         status = ice_prog_acl_prof_ranges(hw, prof_id,
2006                                                           &query_rng_buf, NULL);
2007                         if (status)
2008                                 return status;
2009                 }
2010         }
2011
2012         /* Remove all hardware profiles associated with this flow profile */
2013         status = ice_rem_prof(hw, blk, prof->id);
2014         if (!status) {
2015                 LIST_DEL(&prof->l_entry);
2016                 ice_destroy_lock(&prof->entries_lock);
2017                 if (prof->acts)
2018                         ice_free(hw, prof->acts);
2019                 ice_free(hw, prof);
2020         }
2021
2022         return status;
2023 }
2024
2025 /**
2026  * ice_flow_acl_set_xtrct_seq_fld - Populate xtrct seq for single field
2027  * @buf: Destination buffer function writes partial xtrct sequence to
2028  * @info: Info about field
2029  */
2030 static void
2031 ice_flow_acl_set_xtrct_seq_fld(struct ice_aqc_acl_prof_generic_frmt *buf,
2032                                struct ice_flow_fld_info *info)
2033 {
2034         u16 dst, i;
2035         u8 src;
2036
2037         src = info->xtrct.idx * ICE_FLOW_FV_EXTRACT_SZ +
2038                 info->xtrct.disp / BITS_PER_BYTE;
2039         dst = info->entry.val;
2040         for (i = 0; i < info->entry.last; i++)
2041                 /* HW stores field vector words in LE, convert words back to BE
2042                  * so constructed entries will end up in network order
2043                  */
2044                 buf->byte_selection[dst++] = src++ ^ 1;
2045 }
2046
2047 /**
2048  * ice_flow_acl_set_xtrct_seq - Program ACL extraction sequence
2049  * @hw: pointer to the hardware structure
2050  * @prof: pointer to flow profile
2051  */
2052 static enum ice_status
2053 ice_flow_acl_set_xtrct_seq(struct ice_hw *hw, struct ice_flow_prof *prof)
2054 {
2055         struct ice_aqc_acl_prof_generic_frmt buf;
2056         struct ice_flow_fld_info *info;
2057         enum ice_status status;
2058         u8 prof_id = 0;
2059         u16 i;
2060
2061         ice_memset(&buf, 0, sizeof(buf), ICE_NONDMA_MEM);
2062
2063         status = ice_flow_get_hw_prof(hw, ICE_BLK_ACL, prof->id, &prof_id);
2064         if (status)
2065                 return status;
2066
2067         status = ice_flow_acl_is_prof_in_use(hw, prof, &buf);
2068         if (status && status != ICE_ERR_IN_USE)
2069                 return status;
2070
2071         if (!status) {
2072                 /* Program the profile dependent configuration. This is done
2073                  * only once regardless of the number of PFs using that profile
2074                  */
2075                 ice_memset(&buf, 0, sizeof(buf), ICE_NONDMA_MEM);
2076
2077                 for (i = 0; i < prof->segs_cnt; i++) {
2078                         struct ice_flow_seg_info *seg = &prof->segs[i];
2079                         u16 j;
2080
2081                         ice_for_each_set_bit(j, (ice_bitmap_t *)&seg->match,
2082                                              ICE_FLOW_FIELD_IDX_MAX) {
2083                                 info = &seg->fields[j];
2084
2085                                 if (info->type == ICE_FLOW_FLD_TYPE_RANGE)
2086                                         buf.word_selection[info->entry.val] =
2087                                                 info->xtrct.idx;
2088                                 else
2089                                         ice_flow_acl_set_xtrct_seq_fld(&buf,
2090                                                                        info);
2091                         }
2092
2093                         for (j = 0; j < seg->raws_cnt; j++) {
2094                                 info = &seg->raws[j].info;
2095                                 ice_flow_acl_set_xtrct_seq_fld(&buf, info);
2096                         }
2097                 }
2098
2099                 ice_memset(&buf.pf_scenario_num[0], ICE_ACL_INVALID_SCEN,
2100                            ICE_AQC_ACL_PROF_PF_SCEN_NUM_ELEMS,
2101                            ICE_NONDMA_MEM);
2102         }
2103
2104         /* Update the current PF */
2105         buf.pf_scenario_num[hw->pf_id] = (u8)prof->cfg.scen->id;
2106         status = ice_prgm_acl_prof_xtrct(hw, prof_id, &buf, NULL);
2107
2108         return status;
2109 }
2110
2111 /**
2112  * ice_flow_assoc_vsig_vsi - associate a VSI with VSIG
2113  * @hw: pointer to the hardware structure
2114  * @blk: classification stage
2115  * @vsi_handle: software VSI handle
2116  * @vsig: target VSI group
2117  *
2118  * Assumption: the caller has already verified that the VSI to
2119  * be added has the same characteristics as the VSIG and will
2120  * thereby have access to all resources added to that VSIG.
2121  */
2122 enum ice_status
2123 ice_flow_assoc_vsig_vsi(struct ice_hw *hw, enum ice_block blk, u16 vsi_handle,
2124                         u16 vsig)
2125 {
2126         enum ice_status status;
2127
2128         if (!ice_is_vsi_valid(hw, vsi_handle) || blk >= ICE_BLK_COUNT)
2129                 return ICE_ERR_PARAM;
2130
2131         ice_acquire_lock(&hw->fl_profs_locks[blk]);
2132         status = ice_add_vsi_flow(hw, blk, ice_get_hw_vsi_num(hw, vsi_handle),
2133                                   vsig);
2134         ice_release_lock(&hw->fl_profs_locks[blk]);
2135
2136         return status;
2137 }
2138
2139 /**
2140  * ice_flow_assoc_prof - associate a VSI with a flow profile
2141  * @hw: pointer to the hardware structure
2142  * @blk: classification stage
2143  * @prof: pointer to flow profile
2144  * @vsi_handle: software VSI handle
2145  *
2146  * Assumption: the caller has acquired the lock to the profile list
2147  * and the software VSI handle has been validated
2148  */
2149 enum ice_status
2150 ice_flow_assoc_prof(struct ice_hw *hw, enum ice_block blk,
2151                     struct ice_flow_prof *prof, u16 vsi_handle)
2152 {
2153         enum ice_status status = ICE_SUCCESS;
2154
2155         if (!ice_is_bit_set(prof->vsis, vsi_handle)) {
2156                 if (blk == ICE_BLK_ACL) {
2157                         status = ice_flow_acl_set_xtrct_seq(hw, prof);
2158                         if (status)
2159                                 return status;
2160                 }
2161                 status = ice_add_prof_id_flow(hw, blk,
2162                                               ice_get_hw_vsi_num(hw,
2163                                                                  vsi_handle),
2164                                               prof->id);
2165                 if (!status)
2166                         ice_set_bit(vsi_handle, prof->vsis);
2167                 else
2168                         ice_debug(hw, ICE_DBG_FLOW, "HW profile add failed, %d\n",
2169                                   status);
2170         }
2171
2172         return status;
2173 }
2174
2175 /**
2176  * ice_flow_disassoc_prof - disassociate a VSI from a flow profile
2177  * @hw: pointer to the hardware structure
2178  * @blk: classification stage
2179  * @prof: pointer to flow profile
2180  * @vsi_handle: software VSI handle
2181  *
2182  * Assumption: the caller has acquired the lock to the profile list
2183  * and the software VSI handle has been validated
2184  */
2185 static enum ice_status
2186 ice_flow_disassoc_prof(struct ice_hw *hw, enum ice_block blk,
2187                        struct ice_flow_prof *prof, u16 vsi_handle)
2188 {
2189         enum ice_status status = ICE_SUCCESS;
2190
2191         if (ice_is_bit_set(prof->vsis, vsi_handle)) {
2192                 status = ice_rem_prof_id_flow(hw, blk,
2193                                               ice_get_hw_vsi_num(hw,
2194                                                                  vsi_handle),
2195                                               prof->id);
2196                 if (!status)
2197                         ice_clear_bit(vsi_handle, prof->vsis);
2198                 else
2199                         ice_debug(hw, ICE_DBG_FLOW, "HW profile remove failed, %d\n",
2200                                   status);
2201         }
2202
2203         return status;
2204 }
2205
2206 /**
2207  * ice_flow_add_prof - Add a flow profile for packet segments and matched fields
2208  * @hw: pointer to the HW struct
2209  * @blk: classification stage
2210  * @dir: flow direction
2211  * @prof_id: unique ID to identify this flow profile
2212  * @segs: array of one or more packet segments that describe the flow
2213  * @segs_cnt: number of packet segments provided
2214  * @acts: array of default actions
2215  * @acts_cnt: number of default actions
2216  * @prof: stores the returned flow profile added
2217  */
2218 enum ice_status
2219 ice_flow_add_prof(struct ice_hw *hw, enum ice_block blk, enum ice_flow_dir dir,
2220                   u64 prof_id, struct ice_flow_seg_info *segs, u8 segs_cnt,
2221                   struct ice_flow_action *acts, u8 acts_cnt,
2222                   struct ice_flow_prof **prof)
2223 {
2224         enum ice_status status;
2225
2226         if (segs_cnt > ICE_FLOW_SEG_MAX)
2227                 return ICE_ERR_MAX_LIMIT;
2228
2229         if (!segs_cnt)
2230                 return ICE_ERR_PARAM;
2231
2232         if (!segs)
2233                 return ICE_ERR_BAD_PTR;
2234
2235         status = ice_flow_val_hdrs(segs, segs_cnt);
2236         if (status)
2237                 return status;
2238
2239         ice_acquire_lock(&hw->fl_profs_locks[blk]);
2240
2241         status = ice_flow_add_prof_sync(hw, blk, dir, prof_id, segs, segs_cnt,
2242                                         acts, acts_cnt, prof);
2243         if (!status)
2244                 LIST_ADD(&(*prof)->l_entry, &hw->fl_profs[blk]);
2245
2246         ice_release_lock(&hw->fl_profs_locks[blk]);
2247
2248         return status;
2249 }
2250
2251 /**
2252  * ice_flow_rem_prof - Remove a flow profile and all entries associated with it
2253  * @hw: pointer to the HW struct
2254  * @blk: the block for which the flow profile is to be removed
2255  * @prof_id: unique ID of the flow profile to be removed
2256  */
2257 enum ice_status
2258 ice_flow_rem_prof(struct ice_hw *hw, enum ice_block blk, u64 prof_id)
2259 {
2260         struct ice_flow_prof *prof;
2261         enum ice_status status;
2262
2263         ice_acquire_lock(&hw->fl_profs_locks[blk]);
2264
2265         prof = ice_flow_find_prof_id(hw, blk, prof_id);
2266         if (!prof) {
2267                 status = ICE_ERR_DOES_NOT_EXIST;
2268                 goto out;
2269         }
2270
2271         /* prof becomes invalid after the call */
2272         status = ice_flow_rem_prof_sync(hw, blk, prof);
2273
2274 out:
2275         ice_release_lock(&hw->fl_profs_locks[blk]);
2276
2277         return status;
2278 }
2279
2280 /**
2281  * ice_flow_find_entry - look for a flow entry using its unique ID
2282  * @hw: pointer to the HW struct
2283  * @blk: classification stage
2284  * @entry_id: unique ID to identify this flow entry
2285  *
2286  * This function looks for the flow entry with the specified unique ID in all
2287  * flow profiles of the specified classification stage. If the entry is found,
2288  * and it returns the handle to the flow entry. Otherwise, it returns
2289  * ICE_FLOW_ENTRY_ID_INVAL.
2290  */
2291 u64 ice_flow_find_entry(struct ice_hw *hw, enum ice_block blk, u64 entry_id)
2292 {
2293         struct ice_flow_entry *found = NULL;
2294         struct ice_flow_prof *p;
2295
2296         ice_acquire_lock(&hw->fl_profs_locks[blk]);
2297
2298         LIST_FOR_EACH_ENTRY(p, &hw->fl_profs[blk], ice_flow_prof, l_entry) {
2299                 struct ice_flow_entry *e;
2300
2301                 ice_acquire_lock(&p->entries_lock);
2302                 LIST_FOR_EACH_ENTRY(e, &p->entries, ice_flow_entry, l_entry)
2303                         if (e->id == entry_id) {
2304                                 found = e;
2305                                 break;
2306                         }
2307                 ice_release_lock(&p->entries_lock);
2308
2309                 if (found)
2310                         break;
2311         }
2312
2313         ice_release_lock(&hw->fl_profs_locks[blk]);
2314
2315         return found ? ICE_FLOW_ENTRY_HNDL(found) : ICE_FLOW_ENTRY_HANDLE_INVAL;
2316 }
2317
2318 /**
2319  * ice_flow_acl_check_actions - Checks the ACL rule's actions
2320  * @hw: pointer to the hardware structure
2321  * @acts: array of actions to be performed on a match
2322  * @acts_cnt: number of actions
2323  * @cnt_alloc: indicates if an ACL counter has been allocated.
2324  */
2325 static enum ice_status
2326 ice_flow_acl_check_actions(struct ice_hw *hw, struct ice_flow_action *acts,
2327                            u8 acts_cnt, bool *cnt_alloc)
2328 {
2329         ice_declare_bitmap(dup_check, ICE_AQC_TBL_MAX_ACTION_PAIRS * 2);
2330         int i;
2331
2332         ice_zero_bitmap(dup_check, ICE_AQC_TBL_MAX_ACTION_PAIRS * 2);
2333         *cnt_alloc = false;
2334
2335         if (acts_cnt > ICE_FLOW_ACL_MAX_NUM_ACT)
2336                 return ICE_ERR_OUT_OF_RANGE;
2337
2338         for (i = 0; i < acts_cnt; i++) {
2339                 if (acts[i].type != ICE_FLOW_ACT_NOP &&
2340                     acts[i].type != ICE_FLOW_ACT_DROP &&
2341                     acts[i].type != ICE_FLOW_ACT_CNTR_PKT &&
2342                     acts[i].type != ICE_FLOW_ACT_FWD_QUEUE)
2343                         return ICE_ERR_CFG;
2344
2345                 /* If the caller want to add two actions of the same type, then
2346                  * it is considered invalid configuration.
2347                  */
2348                 if (ice_test_and_set_bit(acts[i].type, dup_check))
2349                         return ICE_ERR_PARAM;
2350         }
2351
2352         /* Checks if ACL counters are needed. */
2353         for (i = 0; i < acts_cnt; i++) {
2354                 if (acts[i].type == ICE_FLOW_ACT_CNTR_PKT ||
2355                     acts[i].type == ICE_FLOW_ACT_CNTR_BYTES ||
2356                     acts[i].type == ICE_FLOW_ACT_CNTR_PKT_BYTES) {
2357                         struct ice_acl_cntrs cntrs;
2358                         enum ice_status status;
2359
2360                         cntrs.amount = 1;
2361                         cntrs.bank = 0; /* Only bank0 for the moment */
2362
2363                         if (acts[i].type == ICE_FLOW_ACT_CNTR_PKT_BYTES)
2364                                 cntrs.type = ICE_AQC_ACL_CNT_TYPE_DUAL;
2365                         else
2366                                 cntrs.type = ICE_AQC_ACL_CNT_TYPE_SINGLE;
2367
2368                         status = ice_aq_alloc_acl_cntrs(hw, &cntrs, NULL);
2369                         if (status)
2370                                 return status;
2371                         /* Counter index within the bank */
2372                         acts[i].data.acl_act.value =
2373                                                 CPU_TO_LE16(cntrs.first_cntr);
2374                         *cnt_alloc = true;
2375                 }
2376         }
2377
2378         return ICE_SUCCESS;
2379 }
2380
2381 /**
2382  * ice_flow_acl_frmt_entry_range - Format an ACL range checker for a given field
2383  * @fld: number of the given field
2384  * @info: info about field
2385  * @range_buf: range checker configuration buffer
2386  * @data: pointer to a data buffer containing flow entry's match values/masks
2387  * @range: Input/output param indicating which range checkers are being used
2388  */
2389 static void
2390 ice_flow_acl_frmt_entry_range(u16 fld, struct ice_flow_fld_info *info,
2391                               struct ice_aqc_acl_profile_ranges *range_buf,
2392                               u8 *data, u8 *range)
2393 {
2394         u16 new_mask;
2395
2396         /* If not specified, default mask is all bits in field */
2397         new_mask = (info->src.mask == ICE_FLOW_FLD_OFF_INVAL ?
2398                     BIT(ice_flds_info[fld].size) - 1 :
2399                     (*(u16 *)(data + info->src.mask))) << info->xtrct.disp;
2400
2401         /* If the mask is 0, then we don't need to worry about this input
2402          * range checker value.
2403          */
2404         if (new_mask) {
2405                 u16 new_high =
2406                         (*(u16 *)(data + info->src.last)) << info->xtrct.disp;
2407                 u16 new_low =
2408                         (*(u16 *)(data + info->src.val)) << info->xtrct.disp;
2409                 u8 range_idx = info->entry.val;
2410
2411                 range_buf->checker_cfg[range_idx].low_boundary =
2412                         CPU_TO_BE16(new_low);
2413                 range_buf->checker_cfg[range_idx].high_boundary =
2414                         CPU_TO_BE16(new_high);
2415                 range_buf->checker_cfg[range_idx].mask = CPU_TO_BE16(new_mask);
2416
2417                 /* Indicate which range checker is being used */
2418                 *range |= BIT(range_idx);
2419         }
2420 }
2421
2422 /**
2423  * ice_flow_acl_frmt_entry_fld - Partially format ACL entry for a given field
2424  * @fld: number of the given field
2425  * @info: info about the field
2426  * @buf: buffer containing the entry
2427  * @dontcare: buffer containing don't care mask for entry
2428  * @data: pointer to a data buffer containing flow entry's match values/masks
2429  */
2430 static void
2431 ice_flow_acl_frmt_entry_fld(u16 fld, struct ice_flow_fld_info *info, u8 *buf,
2432                             u8 *dontcare, u8 *data)
2433 {
2434         u16 dst, src, mask, k, end_disp, tmp_s = 0, tmp_m = 0;
2435         bool use_mask = false;
2436         u8 disp;
2437
2438         src = info->src.val;
2439         mask = info->src.mask;
2440         dst = info->entry.val - ICE_AQC_ACL_PROF_BYTE_SEL_START_IDX;
2441         disp = info->xtrct.disp % BITS_PER_BYTE;
2442
2443         if (mask != ICE_FLOW_FLD_OFF_INVAL)
2444                 use_mask = true;
2445
2446         for (k = 0; k < info->entry.last; k++, dst++) {
2447                 /* Add overflow bits from previous byte */
2448                 buf[dst] = (tmp_s & 0xff00) >> 8;
2449
2450                 /* If mask is not valid, tmp_m is always zero, so just setting
2451                  * dontcare to 0 (no masked bits). If mask is valid, pulls in
2452                  * overflow bits of mask from prev byte
2453                  */
2454                 dontcare[dst] = (tmp_m & 0xff00) >> 8;
2455
2456                 /* If there is displacement, last byte will only contain
2457                  * displaced data, but there is no more data to read from user
2458                  * buffer, so skip so as not to potentially read beyond end of
2459                  * user buffer
2460                  */
2461                 if (!disp || k < info->entry.last - 1) {
2462                         /* Store shifted data to use in next byte */
2463                         tmp_s = data[src++] << disp;
2464
2465                         /* Add current (shifted) byte */
2466                         buf[dst] |= tmp_s & 0xff;
2467
2468                         /* Handle mask if valid */
2469                         if (use_mask) {
2470                                 tmp_m = (~data[mask++] & 0xff) << disp;
2471                                 dontcare[dst] |= tmp_m & 0xff;
2472                         }
2473                 }
2474         }
2475
2476         /* Fill in don't care bits at beginning of field */
2477         if (disp) {
2478                 dst = info->entry.val - ICE_AQC_ACL_PROF_BYTE_SEL_START_IDX;
2479                 for (k = 0; k < disp; k++)
2480                         dontcare[dst] |= BIT(k);
2481         }
2482
2483         end_disp = (disp + ice_flds_info[fld].size) % BITS_PER_BYTE;
2484
2485         /* Fill in don't care bits at end of field */
2486         if (end_disp) {
2487                 dst = info->entry.val - ICE_AQC_ACL_PROF_BYTE_SEL_START_IDX +
2488                       info->entry.last - 1;
2489                 for (k = end_disp; k < BITS_PER_BYTE; k++)
2490                         dontcare[dst] |= BIT(k);
2491         }
2492 }
2493
2494 /**
2495  * ice_flow_acl_frmt_entry - Format ACL entry
2496  * @hw: pointer to the hardware structure
2497  * @prof: pointer to flow profile
2498  * @e: pointer to the flow entry
2499  * @data: pointer to a data buffer containing flow entry's match values/masks
2500  * @acts: array of actions to be performed on a match
2501  * @acts_cnt: number of actions
2502  *
2503  * Formats the key (and key_inverse) to be matched from the data passed in,
2504  * along with data from the flow profile. This key/key_inverse pair makes up
2505  * the 'entry' for an ACL flow entry.
2506  */
2507 static enum ice_status
2508 ice_flow_acl_frmt_entry(struct ice_hw *hw, struct ice_flow_prof *prof,
2509                         struct ice_flow_entry *e, u8 *data,
2510                         struct ice_flow_action *acts, u8 acts_cnt)
2511 {
2512         u8 *buf = NULL, *dontcare = NULL, *key = NULL, range = 0, dir_flag_msk;
2513         struct ice_aqc_acl_profile_ranges *range_buf = NULL;
2514         enum ice_status status;
2515         bool cnt_alloc;
2516         u8 prof_id = 0;
2517         u16 i, buf_sz;
2518
2519         status = ice_flow_get_hw_prof(hw, ICE_BLK_ACL, prof->id, &prof_id);
2520         if (status)
2521                 return status;
2522
2523         /* Format the result action */
2524
2525         status = ice_flow_acl_check_actions(hw, acts, acts_cnt, &cnt_alloc);
2526         if (status)
2527                 return status;
2528
2529         status = ICE_ERR_NO_MEMORY;
2530
2531         e->acts = (struct ice_flow_action *)
2532                 ice_memdup(hw, acts, acts_cnt * sizeof(*acts),
2533                            ICE_NONDMA_TO_NONDMA);
2534         if (!e->acts)
2535                 goto out;
2536
2537         e->acts_cnt = acts_cnt;
2538
2539         /* Format the matching data */
2540         buf_sz = prof->cfg.scen->width;
2541         buf = (u8 *)ice_malloc(hw, buf_sz);
2542         if (!buf)
2543                 goto out;
2544
2545         dontcare = (u8 *)ice_malloc(hw, buf_sz);
2546         if (!dontcare)
2547                 goto out;
2548
2549         /* 'key' buffer will store both key and key_inverse, so must be twice
2550          * size of buf
2551          */
2552         key = (u8 *)ice_malloc(hw, buf_sz * 2);
2553         if (!key)
2554                 goto out;
2555
2556         range_buf = (struct ice_aqc_acl_profile_ranges *)
2557                 ice_malloc(hw, sizeof(struct ice_aqc_acl_profile_ranges));
2558         if (!range_buf)
2559                 goto out;
2560
2561         /* Set don't care mask to all 1's to start, will zero out used bytes */
2562         ice_memset(dontcare, 0xff, buf_sz, ICE_NONDMA_MEM);
2563
2564         for (i = 0; i < prof->segs_cnt; i++) {
2565                 struct ice_flow_seg_info *seg = &prof->segs[i];
2566                 u8 j;
2567
2568                 ice_for_each_set_bit(j, (ice_bitmap_t *)&seg->match,
2569                                      ICE_FLOW_FIELD_IDX_MAX) {
2570                         struct ice_flow_fld_info *info = &seg->fields[j];
2571
2572                         if (info->type == ICE_FLOW_FLD_TYPE_RANGE)
2573                                 ice_flow_acl_frmt_entry_range(j, info,
2574                                                               range_buf, data,
2575                                                               &range);
2576                         else
2577                                 ice_flow_acl_frmt_entry_fld(j, info, buf,
2578                                                             dontcare, data);
2579                 }
2580
2581                 for (j = 0; j < seg->raws_cnt; j++) {
2582                         struct ice_flow_fld_info *info = &seg->raws[j].info;
2583                         u16 dst, src, mask, k;
2584                         bool use_mask = false;
2585
2586                         src = info->src.val;
2587                         dst = info->entry.val -
2588                                         ICE_AQC_ACL_PROF_BYTE_SEL_START_IDX;
2589                         mask = info->src.mask;
2590
2591                         if (mask != ICE_FLOW_FLD_OFF_INVAL)
2592                                 use_mask = true;
2593
2594                         for (k = 0; k < info->entry.last; k++, dst++) {
2595                                 buf[dst] = data[src++];
2596                                 if (use_mask)
2597                                         dontcare[dst] = ~data[mask++];
2598                                 else
2599                                         dontcare[dst] = 0;
2600                         }
2601                 }
2602         }
2603
2604         buf[prof->cfg.scen->pid_idx] = (u8)prof_id;
2605         dontcare[prof->cfg.scen->pid_idx] = 0;
2606
2607         /* Format the buffer for direction flags */
2608         dir_flag_msk = BIT(ICE_FLG_PKT_DIR);
2609
2610         if (prof->dir == ICE_FLOW_RX)
2611                 buf[prof->cfg.scen->pkt_dir_idx] = dir_flag_msk;
2612
2613         if (range) {
2614                 buf[prof->cfg.scen->rng_chk_idx] = range;
2615                 /* Mark any unused range checkers as don't care */
2616                 dontcare[prof->cfg.scen->rng_chk_idx] = ~range;
2617                 e->range_buf = range_buf;
2618         } else {
2619                 ice_free(hw, range_buf);
2620         }
2621
2622         status = ice_set_key(key, buf_sz * 2, buf, NULL, dontcare, NULL, 0,
2623                              buf_sz);
2624         if (status)
2625                 goto out;
2626
2627         e->entry = key;
2628         e->entry_sz = buf_sz * 2;
2629
2630 out:
2631         if (buf)
2632                 ice_free(hw, buf);
2633
2634         if (dontcare)
2635                 ice_free(hw, dontcare);
2636
2637         if (status && key)
2638                 ice_free(hw, key);
2639
2640         if (status && range_buf) {
2641                 ice_free(hw, range_buf);
2642                 e->range_buf = NULL;
2643         }
2644
2645         if (status && e->acts) {
2646                 ice_free(hw, e->acts);
2647                 e->acts = NULL;
2648                 e->acts_cnt = 0;
2649         }
2650
2651         if (status && cnt_alloc)
2652                 ice_flow_acl_free_act_cntr(hw, acts, acts_cnt);
2653
2654         return status;
2655 }
2656
2657 /**
2658  * ice_flow_acl_find_scen_entry_cond - Find an ACL scenario entry that matches
2659  *                                     the compared data.
2660  * @prof: pointer to flow profile
2661  * @e: pointer to the comparing flow entry
2662  * @do_chg_action: decide if we want to change the ACL action
2663  * @do_add_entry: decide if we want to add the new ACL entry
2664  * @do_rem_entry: decide if we want to remove the current ACL entry
2665  *
2666  * Find an ACL scenario entry that matches the compared data. In the same time,
2667  * this function also figure out:
2668  * a/ If we want to change the ACL action
2669  * b/ If we want to add the new ACL entry
2670  * c/ If we want to remove the current ACL entry
2671  */
2672 static struct ice_flow_entry *
2673 ice_flow_acl_find_scen_entry_cond(struct ice_flow_prof *prof,
2674                                   struct ice_flow_entry *e, bool *do_chg_action,
2675                                   bool *do_add_entry, bool *do_rem_entry)
2676 {
2677         struct ice_flow_entry *p, *return_entry = NULL;
2678         u8 i, j;
2679
2680         /* Check if:
2681          * a/ There exists an entry with same matching data, but different
2682          *    priority, then we remove this existing ACL entry. Then, we
2683          *    will add the new entry to the ACL scenario.
2684          * b/ There exists an entry with same matching data, priority, and
2685          *    result action, then we do nothing
2686          * c/ There exists an entry with same matching data, priority, but
2687          *    different, action, then do only change the action's entry.
2688          * d/ Else, we add this new entry to the ACL scenario.
2689          */
2690         *do_chg_action = false;
2691         *do_add_entry = true;
2692         *do_rem_entry = false;
2693         LIST_FOR_EACH_ENTRY(p, &prof->entries, ice_flow_entry, l_entry) {
2694                 if (memcmp(p->entry, e->entry, p->entry_sz))
2695                         continue;
2696
2697                 /* From this point, we have the same matching_data. */
2698                 *do_add_entry = false;
2699                 return_entry = p;
2700
2701                 if (p->priority != e->priority) {
2702                         /* matching data && !priority */
2703                         *do_add_entry = true;
2704                         *do_rem_entry = true;
2705                         break;
2706                 }
2707
2708                 /* From this point, we will have matching_data && priority */
2709                 if (p->acts_cnt != e->acts_cnt)
2710                         *do_chg_action = true;
2711                 for (i = 0; i < p->acts_cnt; i++) {
2712                         bool found_not_match = false;
2713
2714                         for (j = 0; j < e->acts_cnt; j++)
2715                                 if (memcmp(&p->acts[i], &e->acts[j],
2716                                            sizeof(struct ice_flow_action))) {
2717                                         found_not_match = true;
2718                                         break;
2719                                 }
2720
2721                         if (found_not_match) {
2722                                 *do_chg_action = true;
2723                                 break;
2724                         }
2725                 }
2726
2727                 /* (do_chg_action = true) means :
2728                  *    matching_data && priority && !result_action
2729                  * (do_chg_action = false) means :
2730                  *    matching_data && priority && result_action
2731                  */
2732                 break;
2733         }
2734
2735         return return_entry;
2736 }
2737
2738 /**
2739  * ice_flow_acl_convert_to_acl_prio - Convert to ACL priority
2740  * @p: flow priority
2741  */
2742 static enum ice_acl_entry_prio
2743 ice_flow_acl_convert_to_acl_prio(enum ice_flow_priority p)
2744 {
2745         enum ice_acl_entry_prio acl_prio;
2746
2747         switch (p) {
2748         case ICE_FLOW_PRIO_LOW:
2749                 acl_prio = ICE_ACL_PRIO_LOW;
2750                 break;
2751         case ICE_FLOW_PRIO_NORMAL:
2752                 acl_prio = ICE_ACL_PRIO_NORMAL;
2753                 break;
2754         case ICE_FLOW_PRIO_HIGH:
2755                 acl_prio = ICE_ACL_PRIO_HIGH;
2756                 break;
2757         default:
2758                 acl_prio = ICE_ACL_PRIO_NORMAL;
2759                 break;
2760         }
2761
2762         return acl_prio;
2763 }
2764
2765 /**
2766  * ice_flow_acl_union_rng_chk - Perform union operation between two
2767  *                              range-range checker buffers
2768  * @dst_buf: pointer to destination range checker buffer
2769  * @src_buf: pointer to source range checker buffer
2770  *
2771  * For this function, we do the union between dst_buf and src_buf
2772  * range checker buffer, and we will save the result back to dst_buf
2773  */
2774 static enum ice_status
2775 ice_flow_acl_union_rng_chk(struct ice_aqc_acl_profile_ranges *dst_buf,
2776                            struct ice_aqc_acl_profile_ranges *src_buf)
2777 {
2778         u8 i, j;
2779
2780         if (!dst_buf || !src_buf)
2781                 return ICE_ERR_BAD_PTR;
2782
2783         for (i = 0; i < ICE_AQC_ACL_PROF_RANGES_NUM_CFG; i++) {
2784                 struct ice_acl_rng_data *cfg_data = NULL, *in_data;
2785                 bool will_populate = false;
2786
2787                 in_data = &src_buf->checker_cfg[i];
2788
2789                 if (!in_data->mask)
2790                         break;
2791
2792                 for (j = 0; j < ICE_AQC_ACL_PROF_RANGES_NUM_CFG; j++) {
2793                         cfg_data = &dst_buf->checker_cfg[j];
2794
2795                         if (!cfg_data->mask ||
2796                             !memcmp(cfg_data, in_data,
2797                                     sizeof(struct ice_acl_rng_data))) {
2798                                 will_populate = true;
2799                                 break;
2800                         }
2801                 }
2802
2803                 if (will_populate) {
2804                         ice_memcpy(cfg_data, in_data,
2805                                    sizeof(struct ice_acl_rng_data),
2806                                    ICE_NONDMA_TO_NONDMA);
2807                 } else {
2808                         /* No available slot left to program range checker */
2809                         return ICE_ERR_MAX_LIMIT;
2810                 }
2811         }
2812
2813         return ICE_SUCCESS;
2814 }
2815
2816 /**
2817  * ice_flow_acl_add_scen_entry_sync - Add entry to ACL scenario sync
2818  * @hw: pointer to the hardware structure
2819  * @prof: pointer to flow profile
2820  * @entry: double pointer to the flow entry
2821  *
2822  * For this function, we will look at the current added entries in the
2823  * corresponding ACL scenario. Then, we will perform matching logic to
2824  * see if we want to add/modify/do nothing with this new entry.
2825  */
2826 static enum ice_status
2827 ice_flow_acl_add_scen_entry_sync(struct ice_hw *hw, struct ice_flow_prof *prof,
2828                                  struct ice_flow_entry **entry)
2829 {
2830         bool do_add_entry, do_rem_entry, do_chg_action, do_chg_rng_chk;
2831         struct ice_aqc_acl_profile_ranges query_rng_buf, cfg_rng_buf;
2832         struct ice_acl_act_entry *acts = NULL;
2833         struct ice_flow_entry *exist;
2834         enum ice_status status = ICE_SUCCESS;
2835         struct ice_flow_entry *e;
2836         u8 i;
2837
2838         if (!entry || !(*entry) || !prof)
2839                 return ICE_ERR_BAD_PTR;
2840
2841         e = *entry;
2842
2843         do_chg_rng_chk = false;
2844         if (e->range_buf) {
2845                 u8 prof_id = 0;
2846
2847                 status = ice_flow_get_hw_prof(hw, ICE_BLK_ACL, prof->id,
2848                                               &prof_id);
2849                 if (status)
2850                         return status;
2851
2852                 /* Query the current range-checker value in FW */
2853                 status = ice_query_acl_prof_ranges(hw, prof_id, &query_rng_buf,
2854                                                    NULL);
2855                 if (status)
2856                         return status;
2857                 ice_memcpy(&cfg_rng_buf, &query_rng_buf,
2858                            sizeof(struct ice_aqc_acl_profile_ranges),
2859                            ICE_NONDMA_TO_NONDMA);
2860
2861                 /* Generate the new range-checker value */
2862                 status = ice_flow_acl_union_rng_chk(&cfg_rng_buf, e->range_buf);
2863                 if (status)
2864                         return status;
2865
2866                 /* Reconfigure the range check if the buffer is changed. */
2867                 do_chg_rng_chk = false;
2868                 if (memcmp(&query_rng_buf, &cfg_rng_buf,
2869                            sizeof(struct ice_aqc_acl_profile_ranges))) {
2870                         status = ice_prog_acl_prof_ranges(hw, prof_id,
2871                                                           &cfg_rng_buf, NULL);
2872                         if (status)
2873                                 return status;
2874
2875                         do_chg_rng_chk = true;
2876                 }
2877         }
2878
2879         /* Figure out if we want to (change the ACL action) and/or
2880          * (Add the new ACL entry) and/or (Remove the current ACL entry)
2881          */
2882         exist = ice_flow_acl_find_scen_entry_cond(prof, e, &do_chg_action,
2883                                                   &do_add_entry, &do_rem_entry);
2884         if (do_rem_entry) {
2885                 status = ice_flow_rem_entry_sync(hw, ICE_BLK_ACL, exist);
2886                 if (status)
2887                         return status;
2888         }
2889
2890         /* Prepare the result action buffer */
2891         acts = (struct ice_acl_act_entry *)
2892                 ice_calloc(hw, e->entry_sz, sizeof(struct ice_acl_act_entry));
2893         if (!acts)
2894                 return ICE_ERR_NO_MEMORY;
2895
2896         for (i = 0; i < e->acts_cnt; i++)
2897                 ice_memcpy(&acts[i], &e->acts[i].data.acl_act,
2898                            sizeof(struct ice_acl_act_entry),
2899                            ICE_NONDMA_TO_NONDMA);
2900
2901         if (do_add_entry) {
2902                 enum ice_acl_entry_prio prio;
2903                 u8 *keys, *inverts;
2904                 u16 entry_idx;
2905
2906                 keys = (u8 *)e->entry;
2907                 inverts = keys + (e->entry_sz / 2);
2908                 prio = ice_flow_acl_convert_to_acl_prio(e->priority);
2909
2910                 status = ice_acl_add_entry(hw, prof->cfg.scen, prio, keys,
2911                                            inverts, acts, e->acts_cnt,
2912                                            &entry_idx);
2913                 if (status)
2914                         goto out;
2915
2916                 e->scen_entry_idx = entry_idx;
2917                 LIST_ADD(&e->l_entry, &prof->entries);
2918         } else {
2919                 if (do_chg_action) {
2920                         /* For the action memory info, update the SW's copy of
2921                          * exist entry with e's action memory info
2922                          */
2923                         ice_free(hw, exist->acts);
2924                         exist->acts_cnt = e->acts_cnt;
2925                         exist->acts = (struct ice_flow_action *)
2926                                 ice_calloc(hw, exist->acts_cnt,
2927                                            sizeof(struct ice_flow_action));
2928                         if (!exist->acts) {
2929                                 status = ICE_ERR_NO_MEMORY;
2930                                 goto out;
2931                         }
2932
2933                         ice_memcpy(exist->acts, e->acts,
2934                                    sizeof(struct ice_flow_action) * e->acts_cnt,
2935                                    ICE_NONDMA_TO_NONDMA);
2936
2937                         status = ice_acl_prog_act(hw, prof->cfg.scen, acts,
2938                                                   e->acts_cnt,
2939                                                   exist->scen_entry_idx);
2940                         if (status)
2941                                 goto out;
2942                 }
2943
2944                 if (do_chg_rng_chk) {
2945                         /* In this case, we want to update the range checker
2946                          * information of the exist entry
2947                          */
2948                         status = ice_flow_acl_union_rng_chk(exist->range_buf,
2949                                                             e->range_buf);
2950                         if (status)
2951                                 goto out;
2952                 }
2953
2954                 /* As we don't add the new entry to our SW DB, deallocate its
2955                  * memories, and return the exist entry to the caller
2956                  */
2957                 ice_dealloc_flow_entry(hw, e);
2958                 *(entry) = exist;
2959         }
2960 out:
2961         ice_free(hw, acts);
2962
2963         return status;
2964 }
2965
2966 /**
2967  * ice_flow_acl_add_scen_entry - Add entry to ACL scenario
2968  * @hw: pointer to the hardware structure
2969  * @prof: pointer to flow profile
2970  * @e: double pointer to the flow entry
2971  */
2972 static enum ice_status
2973 ice_flow_acl_add_scen_entry(struct ice_hw *hw, struct ice_flow_prof *prof,
2974                             struct ice_flow_entry **e)
2975 {
2976         enum ice_status status;
2977
2978         ice_acquire_lock(&prof->entries_lock);
2979         status = ice_flow_acl_add_scen_entry_sync(hw, prof, e);
2980         ice_release_lock(&prof->entries_lock);
2981
2982         return status;
2983 }
2984
2985 /**
2986  * ice_flow_add_entry - Add a flow entry
2987  * @hw: pointer to the HW struct
2988  * @blk: classification stage
2989  * @prof_id: ID of the profile to add a new flow entry to
2990  * @entry_id: unique ID to identify this flow entry
2991  * @vsi_handle: software VSI handle for the flow entry
2992  * @prio: priority of the flow entry
2993  * @data: pointer to a data buffer containing flow entry's match values/masks
2994  * @acts: arrays of actions to be performed on a match
2995  * @acts_cnt: number of actions
2996  * @entry_h: pointer to buffer that receives the new flow entry's handle
2997  */
2998 enum ice_status
2999 ice_flow_add_entry(struct ice_hw *hw, enum ice_block blk, u64 prof_id,
3000                    u64 entry_id, u16 vsi_handle, enum ice_flow_priority prio,
3001                    void *data, struct ice_flow_action *acts, u8 acts_cnt,
3002                    u64 *entry_h)
3003 {
3004         struct ice_flow_entry *e = NULL;
3005         struct ice_flow_prof *prof;
3006         enum ice_status status = ICE_SUCCESS;
3007
3008         /* ACL entries must indicate an action */
3009         if (blk == ICE_BLK_ACL && (!acts || !acts_cnt))
3010                 return ICE_ERR_PARAM;
3011
3012         /* No flow entry data is expected for RSS */
3013         if (!entry_h || (!data && blk != ICE_BLK_RSS))
3014                 return ICE_ERR_BAD_PTR;
3015
3016         if (!ice_is_vsi_valid(hw, vsi_handle))
3017                 return ICE_ERR_PARAM;
3018
3019         ice_acquire_lock(&hw->fl_profs_locks[blk]);
3020
3021         prof = ice_flow_find_prof_id(hw, blk, prof_id);
3022         if (!prof) {
3023                 status = ICE_ERR_DOES_NOT_EXIST;
3024         } else {
3025                 /* Allocate memory for the entry being added and associate
3026                  * the VSI to the found flow profile
3027                  */
3028                 e = (struct ice_flow_entry *)ice_malloc(hw, sizeof(*e));
3029                 if (!e)
3030                         status = ICE_ERR_NO_MEMORY;
3031                 else
3032                         status = ice_flow_assoc_prof(hw, blk, prof, vsi_handle);
3033         }
3034
3035         ice_release_lock(&hw->fl_profs_locks[blk]);
3036         if (status)
3037                 goto out;
3038
3039         e->id = entry_id;
3040         e->vsi_handle = vsi_handle;
3041         e->prof = prof;
3042         e->priority = prio;
3043
3044         switch (blk) {
3045         case ICE_BLK_FD:
3046         case ICE_BLK_RSS:
3047                 break;
3048         case ICE_BLK_ACL:
3049                 /* ACL will handle the entry management */
3050                 status = ice_flow_acl_frmt_entry(hw, prof, e, (u8 *)data, acts,
3051                                                  acts_cnt);
3052                 if (status)
3053                         goto out;
3054
3055                 status = ice_flow_acl_add_scen_entry(hw, prof, &e);
3056                 if (status)
3057                         goto out;
3058
3059                 break;
3060         default:
3061                 status = ICE_ERR_NOT_IMPL;
3062                 goto out;
3063         }
3064
3065         if (blk != ICE_BLK_ACL) {
3066                 /* ACL will handle the entry management */
3067                 ice_acquire_lock(&prof->entries_lock);
3068                 LIST_ADD(&e->l_entry, &prof->entries);
3069                 ice_release_lock(&prof->entries_lock);
3070         }
3071
3072         *entry_h = ICE_FLOW_ENTRY_HNDL(e);
3073
3074 out:
3075         if (status && e) {
3076                 if (e->entry)
3077                         ice_free(hw, e->entry);
3078                 ice_free(hw, e);
3079         }
3080
3081         return status;
3082 }
3083
3084 /**
3085  * ice_flow_rem_entry - Remove a flow entry
3086  * @hw: pointer to the HW struct
3087  * @blk: classification stage
3088  * @entry_h: handle to the flow entry to be removed
3089  */
3090 enum ice_status ice_flow_rem_entry(struct ice_hw *hw, enum ice_block blk,
3091                                    u64 entry_h)
3092 {
3093         struct ice_flow_entry *entry;
3094         struct ice_flow_prof *prof;
3095         enum ice_status status = ICE_SUCCESS;
3096
3097         if (entry_h == ICE_FLOW_ENTRY_HANDLE_INVAL)
3098                 return ICE_ERR_PARAM;
3099
3100         entry = ICE_FLOW_ENTRY_PTR((unsigned long)entry_h);
3101
3102         /* Retain the pointer to the flow profile as the entry will be freed */
3103         prof = entry->prof;
3104
3105         if (prof) {
3106                 ice_acquire_lock(&prof->entries_lock);
3107                 status = ice_flow_rem_entry_sync(hw, blk, entry);
3108                 ice_release_lock(&prof->entries_lock);
3109         }
3110
3111         return status;
3112 }
3113
3114 /**
3115  * ice_flow_set_fld_ext - specifies locations of field from entry's input buffer
3116  * @seg: packet segment the field being set belongs to
3117  * @fld: field to be set
3118  * @field_type: type of the field
3119  * @val_loc: if not ICE_FLOW_FLD_OFF_INVAL, location of the value to match from
3120  *           entry's input buffer
3121  * @mask_loc: if not ICE_FLOW_FLD_OFF_INVAL, location of mask value from entry's
3122  *            input buffer
3123  * @last_loc: if not ICE_FLOW_FLD_OFF_INVAL, location of last/upper value from
3124  *            entry's input buffer
3125  *
3126  * This helper function stores information of a field being matched, including
3127  * the type of the field and the locations of the value to match, the mask, and
3128  * the upper-bound value in the start of the input buffer for a flow entry.
3129  * This function should only be used for fixed-size data structures.
3130  *
3131  * This function also opportunistically determines the protocol headers to be
3132  * present based on the fields being set. Some fields cannot be used alone to
3133  * determine the protocol headers present. Sometimes, fields for particular
3134  * protocol headers are not matched. In those cases, the protocol headers
3135  * must be explicitly set.
3136  */
3137 static void
3138 ice_flow_set_fld_ext(struct ice_flow_seg_info *seg, enum ice_flow_field fld,
3139                      enum ice_flow_fld_match_type field_type, u16 val_loc,
3140                      u16 mask_loc, u16 last_loc)
3141 {
3142         u64 bit = BIT_ULL(fld);
3143
3144         seg->match |= bit;
3145         if (field_type == ICE_FLOW_FLD_TYPE_RANGE)
3146                 seg->range |= bit;
3147
3148         seg->fields[fld].type = field_type;
3149         seg->fields[fld].src.val = val_loc;
3150         seg->fields[fld].src.mask = mask_loc;
3151         seg->fields[fld].src.last = last_loc;
3152
3153         ICE_FLOW_SET_HDRS(seg, ice_flds_info[fld].hdr);
3154 }
3155
3156 /**
3157  * ice_flow_set_fld - specifies locations of field from entry's input buffer
3158  * @seg: packet segment the field being set belongs to
3159  * @fld: field to be set
3160  * @val_loc: if not ICE_FLOW_FLD_OFF_INVAL, location of the value to match from
3161  *           entry's input buffer
3162  * @mask_loc: if not ICE_FLOW_FLD_OFF_INVAL, location of mask value from entry's
3163  *            input buffer
3164  * @last_loc: if not ICE_FLOW_FLD_OFF_INVAL, location of last/upper value from
3165  *            entry's input buffer
3166  * @range: indicate if field being matched is to be in a range
3167  *
3168  * This function specifies the locations, in the form of byte offsets from the
3169  * start of the input buffer for a flow entry, from where the value to match,
3170  * the mask value, and upper value can be extracted. These locations are then
3171  * stored in the flow profile. When adding a flow entry associated with the
3172  * flow profile, these locations will be used to quickly extract the values and
3173  * create the content of a match entry. This function should only be used for
3174  * fixed-size data structures.
3175  */
3176 void
3177 ice_flow_set_fld(struct ice_flow_seg_info *seg, enum ice_flow_field fld,
3178                  u16 val_loc, u16 mask_loc, u16 last_loc, bool range)
3179 {
3180         enum ice_flow_fld_match_type t = range ?
3181                 ICE_FLOW_FLD_TYPE_RANGE : ICE_FLOW_FLD_TYPE_REG;
3182
3183         ice_flow_set_fld_ext(seg, fld, t, val_loc, mask_loc, last_loc);
3184 }
3185
3186 /**
3187  * ice_flow_set_fld_prefix - sets locations of prefix field from entry's buf
3188  * @seg: packet segment the field being set belongs to
3189  * @fld: field to be set
3190  * @val_loc: if not ICE_FLOW_FLD_OFF_INVAL, location of the value to match from
3191  *           entry's input buffer
3192  * @pref_loc: location of prefix value from entry's input buffer
3193  * @pref_sz: size of the location holding the prefix value
3194  *
3195  * This function specifies the locations, in the form of byte offsets from the
3196  * start of the input buffer for a flow entry, from where the value to match
3197  * and the IPv4 prefix value can be extracted. These locations are then stored
3198  * in the flow profile. When adding flow entries to the associated flow profile,
3199  * these locations can be used to quickly extract the values to create the
3200  * content of a match entry. This function should only be used for fixed-size
3201  * data structures.
3202  */
3203 void
3204 ice_flow_set_fld_prefix(struct ice_flow_seg_info *seg, enum ice_flow_field fld,
3205                         u16 val_loc, u16 pref_loc, u8 pref_sz)
3206 {
3207         /* For this type of field, the "mask" location is for the prefix value's
3208          * location and the "last" location is for the size of the location of
3209          * the prefix value.
3210          */
3211         ice_flow_set_fld_ext(seg, fld, ICE_FLOW_FLD_TYPE_PREFIX, val_loc,
3212                              pref_loc, (u16)pref_sz);
3213 }
3214
3215 /**
3216  * ice_flow_add_fld_raw - sets locations of a raw field from entry's input buf
3217  * @seg: packet segment the field being set belongs to
3218  * @off: offset of the raw field from the beginning of the segment in bytes
3219  * @len: length of the raw pattern to be matched
3220  * @val_loc: location of the value to match from entry's input buffer
3221  * @mask_loc: location of mask value from entry's input buffer
3222  *
3223  * This function specifies the offset of the raw field to be match from the
3224  * beginning of the specified packet segment, and the locations, in the form of
3225  * byte offsets from the start of the input buffer for a flow entry, from where
3226  * the value to match and the mask value to be extracted. These locations are
3227  * then stored in the flow profile. When adding flow entries to the associated
3228  * flow profile, these locations can be used to quickly extract the values to
3229  * create the content of a match entry. This function should only be used for
3230  * fixed-size data structures.
3231  */
3232 void
3233 ice_flow_add_fld_raw(struct ice_flow_seg_info *seg, u16 off, u8 len,
3234                      u16 val_loc, u16 mask_loc)
3235 {
3236         if (seg->raws_cnt < ICE_FLOW_SEG_RAW_FLD_MAX) {
3237                 seg->raws[seg->raws_cnt].off = off;
3238                 seg->raws[seg->raws_cnt].info.type = ICE_FLOW_FLD_TYPE_SIZE;
3239                 seg->raws[seg->raws_cnt].info.src.val = val_loc;
3240                 seg->raws[seg->raws_cnt].info.src.mask = mask_loc;
3241                 /* The "last" field is used to store the length of the field */
3242                 seg->raws[seg->raws_cnt].info.src.last = len;
3243         }
3244
3245         /* Overflows of "raws" will be handled as an error condition later in
3246          * the flow when this information is processed.
3247          */
3248         seg->raws_cnt++;
3249 }
3250
3251 #define ICE_FLOW_RSS_SEG_HDR_L2_MASKS \
3252 (ICE_FLOW_SEG_HDR_ETH | ICE_FLOW_SEG_HDR_VLAN)
3253
3254 #define ICE_FLOW_RSS_SEG_HDR_L3_MASKS \
3255         (ICE_FLOW_SEG_HDR_IPV4 | ICE_FLOW_SEG_HDR_IPV6)
3256
3257 #define ICE_FLOW_RSS_SEG_HDR_L4_MASKS \
3258         (ICE_FLOW_SEG_HDR_TCP | ICE_FLOW_SEG_HDR_UDP | ICE_FLOW_SEG_HDR_SCTP)
3259
3260 #define ICE_FLOW_RSS_SEG_HDR_VAL_MASKS \
3261         (ICE_FLOW_RSS_SEG_HDR_L2_MASKS | \
3262          ICE_FLOW_RSS_SEG_HDR_L3_MASKS | \
3263          ICE_FLOW_RSS_SEG_HDR_L4_MASKS)
3264
3265 /**
3266  * ice_flow_set_rss_seg_info - setup packet segments for RSS
3267  * @segs: pointer to the flow field segment(s)
3268  * @seg_cnt: segment count
3269  * @cfg: configure parameters
3270  *
3271  * Helper function to extract fields from hash bitmap and use flow
3272  * header value to set flow field segment for further use in flow
3273  * profile entry or removal.
3274  */
3275 static enum ice_status
3276 ice_flow_set_rss_seg_info(struct ice_flow_seg_info *segs, u8 seg_cnt,
3277                           const struct ice_rss_hash_cfg *cfg)
3278 {
3279         struct ice_flow_seg_info *seg;
3280         u64 val;
3281         u8 i;
3282
3283         /* set inner most segment */
3284         seg = &segs[seg_cnt - 1];
3285
3286         ice_for_each_set_bit(i, (const ice_bitmap_t *)&cfg->hash_flds,
3287                              ICE_FLOW_FIELD_IDX_MAX)
3288                 ice_flow_set_fld(seg, (enum ice_flow_field)i,
3289                                  ICE_FLOW_FLD_OFF_INVAL, ICE_FLOW_FLD_OFF_INVAL,
3290                                  ICE_FLOW_FLD_OFF_INVAL, false);
3291
3292         ICE_FLOW_SET_HDRS(seg, cfg->addl_hdrs);
3293
3294         /* set outer most header */
3295         if (cfg->hdr_type == ICE_RSS_INNER_HEADERS_W_OUTER_IPV4)
3296                 segs[ICE_RSS_OUTER_HEADERS].hdrs |= ICE_FLOW_SEG_HDR_IPV4 |
3297                                                    ICE_FLOW_SEG_HDR_IPV_OTHER;
3298         else if (cfg->hdr_type == ICE_RSS_INNER_HEADERS_W_OUTER_IPV6)
3299                 segs[ICE_RSS_OUTER_HEADERS].hdrs |= ICE_FLOW_SEG_HDR_IPV6 |
3300                                                    ICE_FLOW_SEG_HDR_IPV_OTHER;
3301
3302         if (seg->hdrs & ~ICE_FLOW_RSS_SEG_HDR_VAL_MASKS &
3303             ~ICE_FLOW_RSS_HDRS_INNER_MASK & ~ICE_FLOW_SEG_HDR_IPV_OTHER)
3304                 return ICE_ERR_PARAM;
3305
3306         val = (u64)(seg->hdrs & ICE_FLOW_RSS_SEG_HDR_L3_MASKS);
3307         if (val && !ice_is_pow2(val))
3308                 return ICE_ERR_CFG;
3309
3310         val = (u64)(seg->hdrs & ICE_FLOW_RSS_SEG_HDR_L4_MASKS);
3311         if (val && !ice_is_pow2(val))
3312                 return ICE_ERR_CFG;
3313
3314         return ICE_SUCCESS;
3315 }
3316
3317 /**
3318  * ice_rem_vsi_rss_list - remove VSI from RSS list
3319  * @hw: pointer to the hardware structure
3320  * @vsi_handle: software VSI handle
3321  *
3322  * Remove the VSI from all RSS configurations in the list.
3323  */
3324 void ice_rem_vsi_rss_list(struct ice_hw *hw, u16 vsi_handle)
3325 {
3326         struct ice_rss_cfg *r, *tmp;
3327
3328         if (LIST_EMPTY(&hw->rss_list_head))
3329                 return;
3330
3331         ice_acquire_lock(&hw->rss_locks);
3332         LIST_FOR_EACH_ENTRY_SAFE(r, tmp, &hw->rss_list_head,
3333                                  ice_rss_cfg, l_entry)
3334                 if (ice_test_and_clear_bit(vsi_handle, r->vsis))
3335                         if (!ice_is_any_bit_set(r->vsis, ICE_MAX_VSI)) {
3336                                 LIST_DEL(&r->l_entry);
3337                                 ice_free(hw, r);
3338                         }
3339         ice_release_lock(&hw->rss_locks);
3340 }
3341
3342 /**
3343  * ice_rem_vsi_rss_cfg - remove RSS configurations associated with VSI
3344  * @hw: pointer to the hardware structure
3345  * @vsi_handle: software VSI handle
3346  *
3347  * This function will iterate through all flow profiles and disassociate
3348  * the VSI from that profile. If the flow profile has no VSIs it will
3349  * be removed.
3350  */
3351 enum ice_status ice_rem_vsi_rss_cfg(struct ice_hw *hw, u16 vsi_handle)
3352 {
3353         const enum ice_block blk = ICE_BLK_RSS;
3354         struct ice_flow_prof *p, *t;
3355         enum ice_status status = ICE_SUCCESS;
3356
3357         if (!ice_is_vsi_valid(hw, vsi_handle))
3358                 return ICE_ERR_PARAM;
3359
3360         if (LIST_EMPTY(&hw->fl_profs[blk]))
3361                 return ICE_SUCCESS;
3362
3363         ice_acquire_lock(&hw->rss_locks);
3364         LIST_FOR_EACH_ENTRY_SAFE(p, t, &hw->fl_profs[blk], ice_flow_prof,
3365                                  l_entry)
3366                 if (ice_is_bit_set(p->vsis, vsi_handle)) {
3367                         status = ice_flow_disassoc_prof(hw, blk, p, vsi_handle);
3368                         if (status)
3369                                 break;
3370
3371                         if (!ice_is_any_bit_set(p->vsis, ICE_MAX_VSI)) {
3372                                 status = ice_flow_rem_prof(hw, blk, p->id);
3373                                 if (status)
3374                                         break;
3375                         }
3376                 }
3377         ice_release_lock(&hw->rss_locks);
3378
3379         return status;
3380 }
3381
3382 /**
3383  * ice_get_rss_hdr_type - get a RSS profile's header type
3384  * @prof: RSS flow profile
3385  */
3386 static enum ice_rss_cfg_hdr_type
3387 ice_get_rss_hdr_type(struct ice_flow_prof *prof)
3388 {
3389         enum ice_rss_cfg_hdr_type hdr_type = ICE_RSS_ANY_HEADERS;
3390
3391         if (prof->segs_cnt == ICE_FLOW_SEG_SINGLE) {
3392                 hdr_type = ICE_RSS_OUTER_HEADERS;
3393         } else if (prof->segs_cnt == ICE_FLOW_SEG_MAX) {
3394                 if (prof->segs[ICE_RSS_OUTER_HEADERS].hdrs == ICE_FLOW_SEG_HDR_NONE)
3395                         hdr_type = ICE_RSS_INNER_HEADERS;
3396                 if (prof->segs[ICE_RSS_OUTER_HEADERS].hdrs & ICE_FLOW_SEG_HDR_IPV4)
3397                         hdr_type = ICE_RSS_INNER_HEADERS_W_OUTER_IPV4;
3398                 if (prof->segs[ICE_RSS_OUTER_HEADERS].hdrs & ICE_FLOW_SEG_HDR_IPV6)
3399                         hdr_type = ICE_RSS_INNER_HEADERS_W_OUTER_IPV6;
3400         }
3401
3402         return hdr_type;
3403 }
3404
3405 /**
3406  * ice_rem_rss_list - remove RSS configuration from list
3407  * @hw: pointer to the hardware structure
3408  * @vsi_handle: software VSI handle
3409  * @prof: pointer to flow profile
3410  *
3411  * Assumption: lock has already been acquired for RSS list
3412  */
3413 static void
3414 ice_rem_rss_list(struct ice_hw *hw, u16 vsi_handle, struct ice_flow_prof *prof)
3415 {
3416         enum ice_rss_cfg_hdr_type hdr_type;
3417         struct ice_rss_cfg *r, *tmp;
3418
3419         /* Search for RSS hash fields associated to the VSI that match the
3420          * hash configurations associated to the flow profile. If found
3421          * remove from the RSS entry list of the VSI context and delete entry.
3422          */
3423         hdr_type = ice_get_rss_hdr_type(prof);
3424         LIST_FOR_EACH_ENTRY_SAFE(r, tmp, &hw->rss_list_head,
3425                                  ice_rss_cfg, l_entry)
3426                 if (r->hash.hash_flds == prof->segs[prof->segs_cnt - 1].match &&
3427                     r->hash.addl_hdrs == prof->segs[prof->segs_cnt - 1].hdrs &&
3428                     r->hash.hdr_type == hdr_type) {
3429                         ice_clear_bit(vsi_handle, r->vsis);
3430                         if (!ice_is_any_bit_set(r->vsis, ICE_MAX_VSI)) {
3431                                 LIST_DEL(&r->l_entry);
3432                                 ice_free(hw, r);
3433                         }
3434                         return;
3435                 }
3436 }
3437
3438 /**
3439  * ice_add_rss_list - add RSS configuration to list
3440  * @hw: pointer to the hardware structure
3441  * @vsi_handle: software VSI handle
3442  * @prof: pointer to flow profile
3443  *
3444  * Assumption: lock has already been acquired for RSS list
3445  */
3446 static enum ice_status
3447 ice_add_rss_list(struct ice_hw *hw, u16 vsi_handle, struct ice_flow_prof *prof)
3448 {
3449         enum ice_rss_cfg_hdr_type hdr_type;
3450         struct ice_rss_cfg *r, *rss_cfg;
3451
3452         hdr_type = ice_get_rss_hdr_type(prof);
3453         LIST_FOR_EACH_ENTRY(r, &hw->rss_list_head,
3454                             ice_rss_cfg, l_entry)
3455                 if (r->hash.hash_flds == prof->segs[prof->segs_cnt - 1].match &&
3456                     r->hash.addl_hdrs == prof->segs[prof->segs_cnt - 1].hdrs &&
3457                     r->hash.hdr_type == hdr_type) {
3458                         ice_set_bit(vsi_handle, r->vsis);
3459                         return ICE_SUCCESS;
3460                 }
3461
3462         rss_cfg = (struct ice_rss_cfg *)ice_malloc(hw, sizeof(*rss_cfg));
3463         if (!rss_cfg)
3464                 return ICE_ERR_NO_MEMORY;
3465
3466         rss_cfg->hash.hash_flds = prof->segs[prof->segs_cnt - 1].match;
3467         rss_cfg->hash.addl_hdrs = prof->segs[prof->segs_cnt - 1].hdrs;
3468         rss_cfg->hash.hdr_type = hdr_type;
3469         rss_cfg->hash.symm = prof->cfg.symm;
3470         ice_set_bit(vsi_handle, rss_cfg->vsis);
3471
3472         LIST_ADD_TAIL(&rss_cfg->l_entry, &hw->rss_list_head);
3473
3474         return ICE_SUCCESS;
3475 }
3476
3477 #define ICE_FLOW_PROF_HASH_S    0
3478 #define ICE_FLOW_PROF_HASH_M    (0xFFFFFFFFULL << ICE_FLOW_PROF_HASH_S)
3479 #define ICE_FLOW_PROF_HDR_S     32
3480 #define ICE_FLOW_PROF_HDR_M     (0x3FFFFFFFULL << ICE_FLOW_PROF_HDR_S)
3481 #define ICE_FLOW_PROF_ENCAP_S   62
3482 #define ICE_FLOW_PROF_ENCAP_M   (0x3ULL << ICE_FLOW_PROF_ENCAP_S)
3483
3484 /* Flow profile ID format:
3485  * [0:31] - Packet match fields
3486  * [32:61] - Protocol header
3487  * [62:63] - Encapsulation flag:
3488  *           0 if non-tunneled
3489  *           1 if tunneled
3490  *           2 for tunneled with outer ipv4
3491  *           3 for tunneled with outer ipv6
3492  */
3493 #define ICE_FLOW_GEN_PROFID(hash, hdr, encap) \
3494         (u64)(((u64)(hash) & ICE_FLOW_PROF_HASH_M) | \
3495               (((u64)(hdr) << ICE_FLOW_PROF_HDR_S) & ICE_FLOW_PROF_HDR_M) | \
3496               (((u64)(encap) << ICE_FLOW_PROF_ENCAP_S) & ICE_FLOW_PROF_ENCAP_M))
3497
3498 static void
3499 ice_rss_config_xor_word(struct ice_hw *hw, u8 prof_id, u8 src, u8 dst)
3500 {
3501         u32 s = ((src % 4) << 3); /* byte shift */
3502         u32 v = dst | 0x80; /* value to program */
3503         u8 i = src / 4; /* register index */
3504         u32 reg;
3505
3506         reg = rd32(hw, GLQF_HSYMM(prof_id, i));
3507         reg = (reg & ~(0xff << s)) | (v << s);
3508         wr32(hw, GLQF_HSYMM(prof_id, i), reg);
3509 }
3510
3511 static void
3512 ice_rss_config_xor(struct ice_hw *hw, u8 prof_id, u8 src, u8 dst, u8 len)
3513 {
3514         int fv_last_word =
3515                 ICE_FLOW_SW_FIELD_VECTOR_MAX / ICE_FLOW_FV_EXTRACT_SZ - 1;
3516         int i;
3517
3518         for (i = 0; i < len; i++) {
3519                 ice_rss_config_xor_word(hw, prof_id,
3520                                         /* Yes, field vector in GLQF_HSYMM and
3521                                          * GLQF_HINSET is inversed!
3522                                          */
3523                                         fv_last_word - (src + i),
3524                                         fv_last_word - (dst + i));
3525                 ice_rss_config_xor_word(hw, prof_id,
3526                                         fv_last_word - (dst + i),
3527                                         fv_last_word - (src + i));
3528         }
3529 }
3530
3531 static void
3532 ice_rss_update_symm(struct ice_hw *hw,
3533                     struct ice_flow_prof *prof)
3534 {
3535         struct ice_prof_map *map;
3536         u8 prof_id, m;
3537
3538         ice_acquire_lock(&hw->blk[ICE_BLK_RSS].es.prof_map_lock);
3539         map = ice_search_prof_id(hw, ICE_BLK_RSS, prof->id);
3540         if (map)
3541                 prof_id = map->prof_id;
3542         ice_release_lock(&hw->blk[ICE_BLK_RSS].es.prof_map_lock);
3543         if (!map)
3544                 return;
3545         /* clear to default */
3546         for (m = 0; m < 6; m++)
3547                 wr32(hw, GLQF_HSYMM(prof_id, m), 0);
3548         if (prof->cfg.symm) {
3549                 struct ice_flow_seg_info *seg =
3550                         &prof->segs[prof->segs_cnt - 1];
3551
3552                 struct ice_flow_seg_xtrct *ipv4_src =
3553                         &seg->fields[ICE_FLOW_FIELD_IDX_IPV4_SA].xtrct;
3554                 struct ice_flow_seg_xtrct *ipv4_dst =
3555                         &seg->fields[ICE_FLOW_FIELD_IDX_IPV4_DA].xtrct;
3556                 struct ice_flow_seg_xtrct *ipv6_src =
3557                         &seg->fields[ICE_FLOW_FIELD_IDX_IPV6_SA].xtrct;
3558                 struct ice_flow_seg_xtrct *ipv6_dst =
3559                         &seg->fields[ICE_FLOW_FIELD_IDX_IPV6_DA].xtrct;
3560
3561                 struct ice_flow_seg_xtrct *tcp_src =
3562                         &seg->fields[ICE_FLOW_FIELD_IDX_TCP_SRC_PORT].xtrct;
3563                 struct ice_flow_seg_xtrct *tcp_dst =
3564                         &seg->fields[ICE_FLOW_FIELD_IDX_TCP_DST_PORT].xtrct;
3565
3566                 struct ice_flow_seg_xtrct *udp_src =
3567                         &seg->fields[ICE_FLOW_FIELD_IDX_UDP_SRC_PORT].xtrct;
3568                 struct ice_flow_seg_xtrct *udp_dst =
3569                         &seg->fields[ICE_FLOW_FIELD_IDX_UDP_DST_PORT].xtrct;
3570
3571                 struct ice_flow_seg_xtrct *sctp_src =
3572                         &seg->fields[ICE_FLOW_FIELD_IDX_SCTP_SRC_PORT].xtrct;
3573                 struct ice_flow_seg_xtrct *sctp_dst =
3574                         &seg->fields[ICE_FLOW_FIELD_IDX_SCTP_DST_PORT].xtrct;
3575
3576                 /* xor IPv4 */
3577                 if (ipv4_src->prot_id != 0 && ipv4_dst->prot_id != 0)
3578                         ice_rss_config_xor(hw, prof_id,
3579                                            ipv4_src->idx, ipv4_dst->idx, 2);
3580
3581                 /* xor IPv6 */
3582                 if (ipv6_src->prot_id != 0 && ipv6_dst->prot_id != 0)
3583                         ice_rss_config_xor(hw, prof_id,
3584                                            ipv6_src->idx, ipv6_dst->idx, 8);
3585
3586                 /* xor TCP */
3587                 if (tcp_src->prot_id != 0 && tcp_dst->prot_id != 0)
3588                         ice_rss_config_xor(hw, prof_id,
3589                                            tcp_src->idx, tcp_dst->idx, 1);
3590
3591                 /* xor UDP */
3592                 if (udp_src->prot_id != 0 && udp_dst->prot_id != 0)
3593                         ice_rss_config_xor(hw, prof_id,
3594                                            udp_src->idx, udp_dst->idx, 1);
3595
3596                 /* xor SCTP */
3597                 if (sctp_src->prot_id != 0 && sctp_dst->prot_id != 0)
3598                         ice_rss_config_xor(hw, prof_id,
3599                                            sctp_src->idx, sctp_dst->idx, 1);
3600         }
3601 }
3602
3603 /**
3604  * ice_add_rss_cfg_sync - add an RSS configuration
3605  * @hw: pointer to the hardware structure
3606  * @vsi_handle: software VSI handle
3607  * @cfg: configure parameters
3608  *
3609  * Assumption: lock has already been acquired for RSS list
3610  */
3611 static enum ice_status
3612 ice_add_rss_cfg_sync(struct ice_hw *hw, u16 vsi_handle,
3613                      const struct ice_rss_hash_cfg *cfg)
3614 {
3615         const enum ice_block blk = ICE_BLK_RSS;
3616         struct ice_flow_prof *prof = NULL;
3617         struct ice_flow_seg_info *segs;
3618         enum ice_status status;
3619         u8 segs_cnt;
3620
3621         segs_cnt = (cfg->hdr_type == ICE_RSS_OUTER_HEADERS) ?
3622                         ICE_FLOW_SEG_SINGLE : ICE_FLOW_SEG_MAX;
3623
3624         segs = (struct ice_flow_seg_info *)ice_calloc(hw, segs_cnt,
3625                                                       sizeof(*segs));
3626         if (!segs)
3627                 return ICE_ERR_NO_MEMORY;
3628
3629         /* Construct the packet segment info from the hashed fields */
3630         status = ice_flow_set_rss_seg_info(segs, segs_cnt, cfg);
3631         if (status)
3632                 goto exit;
3633
3634         /* Don't do RSS for GTPU Outer */
3635         if (segs_cnt == ICE_FLOW_SEG_SINGLE &&
3636             segs[segs_cnt - 1].hdrs & ICE_FLOW_SEG_HDR_GTPU) {
3637                 status = ICE_SUCCESS;
3638                 goto exit;
3639         }
3640
3641         /* Search for a flow profile that has matching headers, hash fields
3642          * and has the input VSI associated to it. If found, no further
3643          * operations required and exit.
3644          */
3645         prof = ice_flow_find_prof_conds(hw, blk, ICE_FLOW_RX, segs, segs_cnt,
3646                                         vsi_handle,
3647                                         ICE_FLOW_FIND_PROF_CHK_FLDS |
3648                                         ICE_FLOW_FIND_PROF_CHK_VSI);
3649         if (prof) {
3650                 if (prof->cfg.symm == cfg->symm)
3651                         goto exit;
3652                 prof->cfg.symm = cfg->symm;
3653                 goto update_symm;
3654         }
3655
3656         /* Check if a flow profile exists with the same protocol headers and
3657          * associated with the input VSI. If so disassociate the VSI from
3658          * this profile. The VSI will be added to a new profile created with
3659          * the protocol header and new hash field configuration.
3660          */
3661         prof = ice_flow_find_prof_conds(hw, blk, ICE_FLOW_RX, segs, segs_cnt,
3662                                         vsi_handle, ICE_FLOW_FIND_PROF_CHK_VSI);
3663         if (prof) {
3664                 status = ice_flow_disassoc_prof(hw, blk, prof, vsi_handle);
3665                 if (!status)
3666                         ice_rem_rss_list(hw, vsi_handle, prof);
3667                 else
3668                         goto exit;
3669
3670                 /* Remove profile if it has no VSIs associated */
3671                 if (!ice_is_any_bit_set(prof->vsis, ICE_MAX_VSI)) {
3672                         status = ice_flow_rem_prof(hw, blk, prof->id);
3673                         if (status)
3674                                 goto exit;
3675                 }
3676         }
3677
3678         /* Search for a profile that has same match fields only. If this
3679          * exists then associate the VSI to this profile.
3680          */
3681         prof = ice_flow_find_prof_conds(hw, blk, ICE_FLOW_RX, segs, segs_cnt,
3682                                         vsi_handle,
3683                                         ICE_FLOW_FIND_PROF_CHK_FLDS);
3684         if (prof) {
3685                 if (prof->cfg.symm == cfg->symm) {
3686                         status = ice_flow_assoc_prof(hw, blk, prof,
3687                                                      vsi_handle);
3688                         if (!status)
3689                                 status = ice_add_rss_list(hw, vsi_handle,
3690                                                           prof);
3691                 } else {
3692                         /* if a profile exist but with different symmetric
3693                          * requirement, just return error.
3694                          */
3695                         status = ICE_ERR_NOT_SUPPORTED;
3696                 }
3697                 goto exit;
3698         }
3699
3700         /* Create a new flow profile with generated profile and packet
3701          * segment information.
3702          */
3703         status = ice_flow_add_prof(hw, blk, ICE_FLOW_RX,
3704                                    ICE_FLOW_GEN_PROFID(cfg->hash_flds,
3705                                                        segs[segs_cnt - 1].hdrs,
3706                                                        cfg->hdr_type),
3707                                    segs, segs_cnt, NULL, 0, &prof);
3708         if (status)
3709                 goto exit;
3710
3711         status = ice_flow_assoc_prof(hw, blk, prof, vsi_handle);
3712         /* If association to a new flow profile failed then this profile can
3713          * be removed.
3714          */
3715         if (status) {
3716                 ice_flow_rem_prof(hw, blk, prof->id);
3717                 goto exit;
3718         }
3719
3720         status = ice_add_rss_list(hw, vsi_handle, prof);
3721
3722         prof->cfg.symm = cfg->symm;
3723 update_symm:
3724         ice_rss_update_symm(hw, prof);
3725
3726 exit:
3727         ice_free(hw, segs);
3728         return status;
3729 }
3730
3731 /**
3732  * ice_add_rss_cfg - add an RSS configuration with specified hashed fields
3733  * @hw: pointer to the hardware structure
3734  * @vsi_handle: software VSI handle
3735  * @cfg: configure parameters
3736  *
3737  * This function will generate a flow profile based on fields associated with
3738  * the input fields to hash on, the flow type and use the VSI number to add
3739  * a flow entry to the profile.
3740  */
3741 enum ice_status
3742 ice_add_rss_cfg(struct ice_hw *hw, u16 vsi_handle,
3743                 const struct ice_rss_hash_cfg *cfg)
3744 {
3745         struct ice_rss_hash_cfg local_cfg;
3746         enum ice_status status;
3747
3748         if (!ice_is_vsi_valid(hw, vsi_handle) ||
3749             !cfg || cfg->hdr_type > ICE_RSS_ANY_HEADERS ||
3750             cfg->hash_flds == ICE_HASH_INVALID)
3751                 return ICE_ERR_PARAM;
3752
3753         local_cfg = *cfg;
3754         if (cfg->hdr_type < ICE_RSS_ANY_HEADERS) {
3755                 ice_acquire_lock(&hw->rss_locks);
3756                 status = ice_add_rss_cfg_sync(hw, vsi_handle, &local_cfg);
3757                 ice_release_lock(&hw->rss_locks);
3758         } else {
3759                 ice_acquire_lock(&hw->rss_locks);
3760                 local_cfg.hdr_type = ICE_RSS_OUTER_HEADERS;
3761                 status = ice_add_rss_cfg_sync(hw, vsi_handle, &local_cfg);
3762                 if (!status) {
3763                         local_cfg.hdr_type = ICE_RSS_INNER_HEADERS;
3764                         status = ice_add_rss_cfg_sync(hw, vsi_handle,
3765                                                       &local_cfg);
3766                 }
3767                 ice_release_lock(&hw->rss_locks);
3768         }
3769
3770         return status;
3771 }
3772
3773 /**
3774  * ice_rem_rss_cfg_sync - remove an existing RSS configuration
3775  * @hw: pointer to the hardware structure
3776  * @vsi_handle: software VSI handle
3777  * @cfg: configure parameters
3778  *
3779  * Assumption: lock has already been acquired for RSS list
3780  */
3781 static enum ice_status
3782 ice_rem_rss_cfg_sync(struct ice_hw *hw, u16 vsi_handle,
3783                      const struct ice_rss_hash_cfg *cfg)
3784 {
3785         const enum ice_block blk = ICE_BLK_RSS;
3786         struct ice_flow_seg_info *segs;
3787         struct ice_flow_prof *prof;
3788         enum ice_status status;
3789         u8 segs_cnt;
3790
3791         segs_cnt = (cfg->hdr_type == ICE_RSS_OUTER_HEADERS) ?
3792                         ICE_FLOW_SEG_SINGLE : ICE_FLOW_SEG_MAX;
3793         segs = (struct ice_flow_seg_info *)ice_calloc(hw, segs_cnt,
3794                                                       sizeof(*segs));
3795         if (!segs)
3796                 return ICE_ERR_NO_MEMORY;
3797
3798         /* Construct the packet segment info from the hashed fields */
3799         status = ice_flow_set_rss_seg_info(segs, segs_cnt, cfg);
3800         if (status)
3801                 goto out;
3802
3803         /* Don't do RSS for GTPU Outer */
3804         if (segs_cnt == ICE_FLOW_SEG_SINGLE &&
3805             segs[segs_cnt - 1].hdrs & ICE_FLOW_SEG_HDR_GTPU) {
3806                 status = ICE_SUCCESS;
3807                 goto out;
3808         }
3809
3810         prof = ice_flow_find_prof_conds(hw, blk, ICE_FLOW_RX, segs, segs_cnt,
3811                                         vsi_handle,
3812                                         ICE_FLOW_FIND_PROF_CHK_FLDS);
3813         if (!prof) {
3814                 status = ICE_ERR_DOES_NOT_EXIST;
3815                 goto out;
3816         }
3817
3818         status = ice_flow_disassoc_prof(hw, blk, prof, vsi_handle);
3819         if (status)
3820                 goto out;
3821
3822         /* Remove RSS configuration from VSI context before deleting
3823          * the flow profile.
3824          */
3825         ice_rem_rss_list(hw, vsi_handle, prof);
3826
3827         if (!ice_is_any_bit_set(prof->vsis, ICE_MAX_VSI))
3828                 status = ice_flow_rem_prof(hw, blk, prof->id);
3829
3830 out:
3831         ice_free(hw, segs);
3832         return status;
3833 }
3834
3835 /**
3836  * ice_rem_rss_cfg - remove an existing RSS config with matching hashed fields
3837  * @hw: pointer to the hardware structure
3838  * @vsi_handle: software VSI handle
3839  * @cfg: configure parameters
3840  *
3841  * This function will lookup the flow profile based on the input
3842  * hash field bitmap, iterate through the profile entry list of
3843  * that profile and find entry associated with input VSI to be
3844  * removed. Calls are made to underlying flow apis which will in
3845  * turn build or update buffers for RSS XLT1 section.
3846  */
3847 enum ice_status
3848 ice_rem_rss_cfg(struct ice_hw *hw, u16 vsi_handle,
3849                 const struct ice_rss_hash_cfg *cfg)
3850 {
3851         struct ice_rss_hash_cfg local_cfg;
3852         enum ice_status status;
3853
3854         if (!ice_is_vsi_valid(hw, vsi_handle) ||
3855             !cfg || cfg->hdr_type > ICE_RSS_ANY_HEADERS ||
3856             cfg->hash_flds == ICE_HASH_INVALID)
3857                 return ICE_ERR_PARAM;
3858
3859         ice_acquire_lock(&hw->rss_locks);
3860         local_cfg = *cfg;
3861         if (cfg->hdr_type < ICE_RSS_ANY_HEADERS) {
3862                 status = ice_rem_rss_cfg_sync(hw, vsi_handle, &local_cfg);
3863         } else {
3864                 local_cfg.hdr_type = ICE_RSS_OUTER_HEADERS;
3865                 status = ice_rem_rss_cfg_sync(hw, vsi_handle, &local_cfg);
3866
3867                 if (!status) {
3868                         local_cfg.hdr_type = ICE_RSS_INNER_HEADERS;
3869                         status = ice_rem_rss_cfg_sync(hw, vsi_handle,
3870                                                       &local_cfg);
3871                 }
3872         }
3873         ice_release_lock(&hw->rss_locks);
3874
3875         return status;
3876 }
3877
3878 /**
3879  * ice_replay_rss_cfg - replay RSS configurations associated with VSI
3880  * @hw: pointer to the hardware structure
3881  * @vsi_handle: software VSI handle
3882  */
3883 enum ice_status ice_replay_rss_cfg(struct ice_hw *hw, u16 vsi_handle)
3884 {
3885         enum ice_status status = ICE_SUCCESS;
3886         struct ice_rss_cfg *r;
3887
3888         if (!ice_is_vsi_valid(hw, vsi_handle))
3889                 return ICE_ERR_PARAM;
3890
3891         ice_acquire_lock(&hw->rss_locks);
3892         LIST_FOR_EACH_ENTRY(r, &hw->rss_list_head,
3893                             ice_rss_cfg, l_entry) {
3894                 if (ice_is_bit_set(r->vsis, vsi_handle)) {
3895                         status = ice_add_rss_cfg_sync(hw, vsi_handle, &r->hash);
3896                         if (status)
3897                                 break;
3898                 }
3899         }
3900         ice_release_lock(&hw->rss_locks);
3901
3902         return status;
3903 }
3904
3905 /**
3906  * ice_get_rss_cfg - returns hashed fields for the given header types
3907  * @hw: pointer to the hardware structure
3908  * @vsi_handle: software VSI handle
3909  * @hdrs: protocol header type
3910  *
3911  * This function will return the match fields of the first instance of flow
3912  * profile having the given header types and containing input VSI
3913  */
3914 u64 ice_get_rss_cfg(struct ice_hw *hw, u16 vsi_handle, u32 hdrs)
3915 {
3916         u64 rss_hash = ICE_HASH_INVALID;
3917         struct ice_rss_cfg *r;
3918
3919         /* verify if the protocol header is non zero and VSI is valid */
3920         if (hdrs == ICE_FLOW_SEG_HDR_NONE || !ice_is_vsi_valid(hw, vsi_handle))
3921                 return ICE_HASH_INVALID;
3922
3923         ice_acquire_lock(&hw->rss_locks);
3924         LIST_FOR_EACH_ENTRY(r, &hw->rss_list_head,
3925                             ice_rss_cfg, l_entry)
3926                 if (ice_is_bit_set(r->vsis, vsi_handle) &&
3927                     r->hash.addl_hdrs == hdrs) {
3928                         rss_hash = r->hash.hash_flds;
3929                         break;
3930                 }
3931         ice_release_lock(&hw->rss_locks);
3932
3933         return rss_hash;
3934 }