net/ice/base: fix uninitialized struct
[dpdk.git] / drivers / net / ice / base / ice_flow.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2001-2021 Intel Corporation
3  */
4
5 #include "ice_common.h"
6 #include "ice_flow.h"
7
8 /* Size of known protocol header fields */
9 #define ICE_FLOW_FLD_SZ_ETH_TYPE        2
10 #define ICE_FLOW_FLD_SZ_VLAN            2
11 #define ICE_FLOW_FLD_SZ_IPV4_ADDR       4
12 #define ICE_FLOW_FLD_SZ_IPV6_ADDR       16
13 #define ICE_FLOW_FLD_SZ_IPV6_PRE32_ADDR 4
14 #define ICE_FLOW_FLD_SZ_IPV6_PRE48_ADDR 6
15 #define ICE_FLOW_FLD_SZ_IPV6_PRE64_ADDR 8
16 #define ICE_FLOW_FLD_SZ_IP_DSCP         1
17 #define ICE_FLOW_FLD_SZ_IP_TTL          1
18 #define ICE_FLOW_FLD_SZ_IP_PROT         1
19 #define ICE_FLOW_FLD_SZ_PORT            2
20 #define ICE_FLOW_FLD_SZ_TCP_FLAGS       1
21 #define ICE_FLOW_FLD_SZ_ICMP_TYPE       1
22 #define ICE_FLOW_FLD_SZ_ICMP_CODE       1
23 #define ICE_FLOW_FLD_SZ_ARP_OPER        2
24 #define ICE_FLOW_FLD_SZ_GRE_KEYID       4
25 #define ICE_FLOW_FLD_SZ_GTP_TEID        4
26 #define ICE_FLOW_FLD_SZ_GTP_QFI         2
27 #define ICE_FLOW_FLD_SZ_PPPOE_SESS_ID   2
28 #define ICE_FLOW_FLD_SZ_PFCP_SEID 8
29 #define ICE_FLOW_FLD_SZ_L2TPV3_SESS_ID  4
30 #define ICE_FLOW_FLD_SZ_ESP_SPI 4
31 #define ICE_FLOW_FLD_SZ_AH_SPI  4
32 #define ICE_FLOW_FLD_SZ_NAT_T_ESP_SPI   4
33 #define ICE_FLOW_FLD_SZ_VXLAN_VNI       4
34 #define ICE_FLOW_FLD_SZ_ECPRI_TP0_PC_ID 2
35
36 /* Describe properties of a protocol header field */
37 struct ice_flow_field_info {
38         enum ice_flow_seg_hdr hdr;
39         s16 off;        /* Offset from start of a protocol header, in bits */
40         u16 size;       /* Size of fields in bits */
41         u16 mask;       /* 16-bit mask for field */
42 };
43
44 #define ICE_FLOW_FLD_INFO(_hdr, _offset_bytes, _size_bytes) { \
45         .hdr = _hdr, \
46         .off = (_offset_bytes) * BITS_PER_BYTE, \
47         .size = (_size_bytes) * BITS_PER_BYTE, \
48         .mask = 0, \
49 }
50
51 #define ICE_FLOW_FLD_INFO_MSK(_hdr, _offset_bytes, _size_bytes, _mask) { \
52         .hdr = _hdr, \
53         .off = (_offset_bytes) * BITS_PER_BYTE, \
54         .size = (_size_bytes) * BITS_PER_BYTE, \
55         .mask = _mask, \
56 }
57
58 /* Table containing properties of supported protocol header fields */
59 static const
60 struct ice_flow_field_info ice_flds_info[ICE_FLOW_FIELD_IDX_MAX] = {
61         /* Ether */
62         /* ICE_FLOW_FIELD_IDX_ETH_DA */
63         ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_ETH, 0, ETH_ALEN),
64         /* ICE_FLOW_FIELD_IDX_ETH_SA */
65         ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_ETH, ETH_ALEN, ETH_ALEN),
66         /* ICE_FLOW_FIELD_IDX_S_VLAN */
67         ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_VLAN, 12, ICE_FLOW_FLD_SZ_VLAN),
68         /* ICE_FLOW_FIELD_IDX_C_VLAN */
69         ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_VLAN, 14, ICE_FLOW_FLD_SZ_VLAN),
70         /* ICE_FLOW_FIELD_IDX_ETH_TYPE */
71         ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_ETH, 0, ICE_FLOW_FLD_SZ_ETH_TYPE),
72         /* IPv4 / IPv6 */
73         /* ICE_FLOW_FIELD_IDX_IPV4_DSCP */
74         ICE_FLOW_FLD_INFO_MSK(ICE_FLOW_SEG_HDR_IPV4, 0, ICE_FLOW_FLD_SZ_IP_DSCP,
75                               0x00fc),
76         /* ICE_FLOW_FIELD_IDX_IPV6_DSCP */
77         ICE_FLOW_FLD_INFO_MSK(ICE_FLOW_SEG_HDR_IPV6, 0, ICE_FLOW_FLD_SZ_IP_DSCP,
78                               0x0ff0),
79         /* ICE_FLOW_FIELD_IDX_IPV4_TTL */
80         ICE_FLOW_FLD_INFO_MSK(ICE_FLOW_SEG_HDR_NONE, 8,
81                               ICE_FLOW_FLD_SZ_IP_TTL, 0xff00),
82         /* ICE_FLOW_FIELD_IDX_IPV4_PROT */
83         ICE_FLOW_FLD_INFO_MSK(ICE_FLOW_SEG_HDR_NONE, 8,
84                               ICE_FLOW_FLD_SZ_IP_PROT, 0x00ff),
85         /* ICE_FLOW_FIELD_IDX_IPV6_TTL */
86         ICE_FLOW_FLD_INFO_MSK(ICE_FLOW_SEG_HDR_NONE, 6,
87                               ICE_FLOW_FLD_SZ_IP_TTL, 0x00ff),
88         /* ICE_FLOW_FIELD_IDX_IPV6_PROT */
89         ICE_FLOW_FLD_INFO_MSK(ICE_FLOW_SEG_HDR_NONE, 6,
90                               ICE_FLOW_FLD_SZ_IP_PROT, 0xff00),
91         /* ICE_FLOW_FIELD_IDX_IPV4_SA */
92         ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_IPV4, 12, ICE_FLOW_FLD_SZ_IPV4_ADDR),
93         /* ICE_FLOW_FIELD_IDX_IPV4_DA */
94         ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_IPV4, 16, ICE_FLOW_FLD_SZ_IPV4_ADDR),
95         /* ICE_FLOW_FIELD_IDX_IPV6_SA */
96         ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_IPV6, 8, ICE_FLOW_FLD_SZ_IPV6_ADDR),
97         /* ICE_FLOW_FIELD_IDX_IPV6_DA */
98         ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_IPV6, 24, ICE_FLOW_FLD_SZ_IPV6_ADDR),
99         /* ICE_FLOW_FIELD_IDX_IPV6_PRE32_SA */
100         ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_IPV6, 8,
101                           ICE_FLOW_FLD_SZ_IPV6_PRE32_ADDR),
102         /* ICE_FLOW_FIELD_IDX_IPV6_PRE32_DA */
103         ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_IPV6, 24,
104                           ICE_FLOW_FLD_SZ_IPV6_PRE32_ADDR),
105         /* ICE_FLOW_FIELD_IDX_IPV6_PRE48_SA */
106         ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_IPV6, 8,
107                           ICE_FLOW_FLD_SZ_IPV6_PRE48_ADDR),
108         /* ICE_FLOW_FIELD_IDX_IPV6_PRE48_DA */
109         ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_IPV6, 24,
110                           ICE_FLOW_FLD_SZ_IPV6_PRE48_ADDR),
111         /* ICE_FLOW_FIELD_IDX_IPV6_PRE64_SA */
112         ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_IPV6, 8,
113                           ICE_FLOW_FLD_SZ_IPV6_PRE64_ADDR),
114         /* ICE_FLOW_FIELD_IDX_IPV6_PRE64_DA */
115         ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_IPV6, 24,
116                           ICE_FLOW_FLD_SZ_IPV6_PRE64_ADDR),
117         /* Transport */
118         /* ICE_FLOW_FIELD_IDX_TCP_SRC_PORT */
119         ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_TCP, 0, ICE_FLOW_FLD_SZ_PORT),
120         /* ICE_FLOW_FIELD_IDX_TCP_DST_PORT */
121         ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_TCP, 2, ICE_FLOW_FLD_SZ_PORT),
122         /* ICE_FLOW_FIELD_IDX_UDP_SRC_PORT */
123         ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_UDP, 0, ICE_FLOW_FLD_SZ_PORT),
124         /* ICE_FLOW_FIELD_IDX_UDP_DST_PORT */
125         ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_UDP, 2, ICE_FLOW_FLD_SZ_PORT),
126         /* ICE_FLOW_FIELD_IDX_SCTP_SRC_PORT */
127         ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_SCTP, 0, ICE_FLOW_FLD_SZ_PORT),
128         /* ICE_FLOW_FIELD_IDX_SCTP_DST_PORT */
129         ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_SCTP, 2, ICE_FLOW_FLD_SZ_PORT),
130         /* ICE_FLOW_FIELD_IDX_TCP_FLAGS */
131         ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_TCP, 13, ICE_FLOW_FLD_SZ_TCP_FLAGS),
132         /* ARP */
133         /* ICE_FLOW_FIELD_IDX_ARP_SIP */
134         ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_ARP, 14, ICE_FLOW_FLD_SZ_IPV4_ADDR),
135         /* ICE_FLOW_FIELD_IDX_ARP_DIP */
136         ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_ARP, 24, ICE_FLOW_FLD_SZ_IPV4_ADDR),
137         /* ICE_FLOW_FIELD_IDX_ARP_SHA */
138         ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_ARP, 8, ETH_ALEN),
139         /* ICE_FLOW_FIELD_IDX_ARP_DHA */
140         ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_ARP, 18, ETH_ALEN),
141         /* ICE_FLOW_FIELD_IDX_ARP_OP */
142         ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_ARP, 6, ICE_FLOW_FLD_SZ_ARP_OPER),
143         /* ICMP */
144         /* ICE_FLOW_FIELD_IDX_ICMP_TYPE */
145         ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_ICMP, 0, ICE_FLOW_FLD_SZ_ICMP_TYPE),
146         /* ICE_FLOW_FIELD_IDX_ICMP_CODE */
147         ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_ICMP, 1, ICE_FLOW_FLD_SZ_ICMP_CODE),
148         /* GRE */
149         /* ICE_FLOW_FIELD_IDX_GRE_KEYID */
150         ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_GRE, 12, ICE_FLOW_FLD_SZ_GRE_KEYID),
151         /* GTP */
152         /* ICE_FLOW_FIELD_IDX_GTPC_TEID */
153         ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_GTPC_TEID, 12,
154                           ICE_FLOW_FLD_SZ_GTP_TEID),
155         /* ICE_FLOW_FIELD_IDX_GTPU_IP_TEID */
156         ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_GTPU_IP, 12,
157                           ICE_FLOW_FLD_SZ_GTP_TEID),
158         /* ICE_FLOW_FIELD_IDX_GTPU_EH_TEID */
159         ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_GTPU_EH, 12,
160                           ICE_FLOW_FLD_SZ_GTP_TEID),
161         /* ICE_FLOW_FIELD_IDX_GTPU_EH_QFI */
162         ICE_FLOW_FLD_INFO_MSK(ICE_FLOW_SEG_HDR_GTPU_EH, 22,
163                               ICE_FLOW_FLD_SZ_GTP_QFI, 0x3f00),
164         /* ICE_FLOW_FIELD_IDX_GTPU_UP_TEID */
165         ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_GTPU_UP, 12,
166                           ICE_FLOW_FLD_SZ_GTP_TEID),
167         /* ICE_FLOW_FIELD_IDX_GTPU_DWN_TEID */
168         ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_GTPU_DWN, 12,
169                           ICE_FLOW_FLD_SZ_GTP_TEID),
170         /* PPPOE */
171         /* ICE_FLOW_FIELD_IDX_PPPOE_SESS_ID */
172         ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_PPPOE, 2,
173                           ICE_FLOW_FLD_SZ_PPPOE_SESS_ID),
174         /* PFCP */
175         /* ICE_FLOW_FIELD_IDX_PFCP_SEID */
176         ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_PFCP_SESSION, 12,
177                           ICE_FLOW_FLD_SZ_PFCP_SEID),
178         /* L2TPV3 */
179         /* ICE_FLOW_FIELD_IDX_L2TPV3_SESS_ID */
180         ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_L2TPV3, 0,
181                           ICE_FLOW_FLD_SZ_L2TPV3_SESS_ID),
182         /* ESP */
183         /* ICE_FLOW_FIELD_IDX_ESP_SPI */
184         ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_ESP, 0,
185                           ICE_FLOW_FLD_SZ_ESP_SPI),
186         /* AH */
187         /* ICE_FLOW_FIELD_IDX_AH_SPI */
188         ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_AH, 4,
189                           ICE_FLOW_FLD_SZ_AH_SPI),
190         /* NAT_T_ESP */
191         /* ICE_FLOW_FIELD_IDX_NAT_T_ESP_SPI */
192         ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_NAT_T_ESP, 8,
193                           ICE_FLOW_FLD_SZ_NAT_T_ESP_SPI),
194         /* ICE_FLOW_FIELD_IDX_VXLAN_VNI */
195         ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_VXLAN, 12,
196                           ICE_FLOW_FLD_SZ_VXLAN_VNI),
197         /* ECPRI_TP0 */
198         /* ICE_FLOW_FIELD_IDX_ECPRI_TP0_PC_ID */
199         ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_ECPRI_TP0, 4,
200                           ICE_FLOW_FLD_SZ_ECPRI_TP0_PC_ID),
201         /* UDP_ECPRI_TP0 */
202         /* ICE_FLOW_FIELD_IDX_UDP_ECPRI_TP0_PC_ID */
203         ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_UDP_ECPRI_TP0, 12,
204                           ICE_FLOW_FLD_SZ_ECPRI_TP0_PC_ID),
205 };
206
207 /* Bitmaps indicating relevant packet types for a particular protocol header
208  *
209  * Packet types for packets with an Outer/First/Single MAC header
210  */
211 static const u32 ice_ptypes_mac_ofos[] = {
212         0xFDC00846, 0xBFBF7F7E, 0xF70001DF, 0xFEFDFDFB,
213         0x0000077E, 0x000003FF, 0x00000000, 0x00000000,
214         0x00400000, 0x03FFF000, 0xFFFFFFE0, 0x00100707,
215         0x00000000, 0x00000000, 0x00000000, 0x00000000,
216         0x00000000, 0x00000000, 0x00000000, 0x00000000,
217         0x00000000, 0x00000000, 0x00000000, 0x00000000,
218         0x00000000, 0x00000000, 0x00000000, 0x00000000,
219         0x00000000, 0x00000000, 0x00000000, 0x00000000,
220 };
221
222 /* Packet types for packets with an Innermost/Last MAC VLAN header */
223 static const u32 ice_ptypes_macvlan_il[] = {
224         0x00000000, 0xBC000000, 0x000001DF, 0xF0000000,
225         0x0000077E, 0x00000000, 0x00000000, 0x00000000,
226         0x00000000, 0x00000000, 0x00000000, 0x00000000,
227         0x00000000, 0x00000000, 0x00000000, 0x00000000,
228         0x00000000, 0x00000000, 0x00000000, 0x00000000,
229         0x00000000, 0x00000000, 0x00000000, 0x00000000,
230         0x00000000, 0x00000000, 0x00000000, 0x00000000,
231         0x00000000, 0x00000000, 0x00000000, 0x00000000,
232 };
233
234 /* Packet types for packets with an Outer/First/Single IPv4 header, does NOT
235  * include IPV4 other PTYPEs
236  */
237 static const u32 ice_ptypes_ipv4_ofos[] = {
238         0x1DC00000, 0x24000800, 0x00000000, 0x00000000,
239         0x00000000, 0x00000155, 0x00000000, 0x00000000,
240         0x00000000, 0x000FC000, 0x000002A0, 0x00100000,
241         0x00000000, 0x00000000, 0x00000000, 0x00000000,
242         0x00000000, 0x00000000, 0x00000000, 0x00000000,
243         0x00000000, 0x00000000, 0x00000000, 0x00000000,
244         0x00000000, 0x00000000, 0x00000000, 0x00000000,
245         0x00000000, 0x00000000, 0x00000000, 0x00000000,
246 };
247
248 /* Packet types for packets with an Outer/First/Single IPv4 header, includes
249  * IPV4 other PTYPEs
250  */
251 static const u32 ice_ptypes_ipv4_ofos_all[] = {
252         0x1DC00000, 0x24000800, 0x00000000, 0x00000000,
253         0x00000000, 0x00000155, 0x00000000, 0x00000000,
254         0x00000000, 0x000FC000, 0x83E0FAA0, 0x00000101,
255         0x00000000, 0x00000000, 0x00000000, 0x00000000,
256         0x00000000, 0x00000000, 0x00000000, 0x00000000,
257         0x00000000, 0x00000000, 0x00000000, 0x00000000,
258         0x00000000, 0x00000000, 0x00000000, 0x00000000,
259         0x00000000, 0x00000000, 0x00000000, 0x00000000,
260 };
261
262 /* Packet types for packets with an Innermost/Last IPv4 header */
263 static const u32 ice_ptypes_ipv4_il[] = {
264         0xE0000000, 0xB807700E, 0x80000003, 0xE01DC03B,
265         0x0000000E, 0x00000000, 0x00000000, 0x00000000,
266         0x00000000, 0x00000000, 0x001FF800, 0x00100000,
267         0x00000000, 0x00000000, 0x00000000, 0x00000000,
268         0x00000000, 0x00000000, 0x00000000, 0x00000000,
269         0x00000000, 0x00000000, 0x00000000, 0x00000000,
270         0x00000000, 0x00000000, 0x00000000, 0x00000000,
271         0x00000000, 0x00000000, 0x00000000, 0x00000000,
272 };
273
274 /* Packet types for packets with an Outer/First/Single IPv6 header, does NOT
275  * include IVP6 other PTYPEs
276  */
277 static const u32 ice_ptypes_ipv6_ofos[] = {
278         0x00000000, 0x00000000, 0x77000000, 0x10002000,
279         0x00000000, 0x000002AA, 0x00000000, 0x00000000,
280         0x00000000, 0x03F00000, 0x00000540, 0x00000000,
281         0x00000000, 0x00000000, 0x00000000, 0x00000000,
282         0x00000000, 0x00000000, 0x00000000, 0x00000000,
283         0x00000000, 0x00000000, 0x00000000, 0x00000000,
284         0x00000000, 0x00000000, 0x00000000, 0x00000000,
285         0x00000000, 0x00000000, 0x00000000, 0x00000000,
286 };
287
288 /* Packet types for packets with an Outer/First/Single IPv6 header, includes
289  * IPV6 other PTYPEs
290  */
291 static const u32 ice_ptypes_ipv6_ofos_all[] = {
292         0x00000000, 0x00000000, 0x77000000, 0x10002000,
293         0x00000000, 0x000002AA, 0x00000000, 0x00000000,
294         0x00000000, 0x03F00000, 0x7C1F0540, 0x00000206,
295         0x00000000, 0x00000000, 0x00000000, 0x00000000,
296         0x00000000, 0x00000000, 0x00000000, 0x00000000,
297         0x00000000, 0x00000000, 0x00000000, 0x00000000,
298         0x00000000, 0x00000000, 0x00000000, 0x00000000,
299         0x00000000, 0x00000000, 0x00000000, 0x00000000,
300 };
301
302 /* Packet types for packets with an Innermost/Last IPv6 header */
303 static const u32 ice_ptypes_ipv6_il[] = {
304         0x00000000, 0x03B80770, 0x000001DC, 0x0EE00000,
305         0x00000770, 0x00000000, 0x00000000, 0x00000000,
306         0x00000000, 0x00000000, 0x7FE00000, 0x00000000,
307         0x00000000, 0x00000000, 0x00000000, 0x00000000,
308         0x00000000, 0x00000000, 0x00000000, 0x00000000,
309         0x00000000, 0x00000000, 0x00000000, 0x00000000,
310         0x00000000, 0x00000000, 0x00000000, 0x00000000,
311         0x00000000, 0x00000000, 0x00000000, 0x00000000,
312 };
313
314 /* Packet types for packets with an Outer/First/Single IPv4 header - no L4 */
315 static const u32 ice_ptypes_ipv4_ofos_no_l4[] = {
316         0x10C00000, 0x04000800, 0x00000000, 0x00000000,
317         0x00000000, 0x00000000, 0x00000000, 0x00000000,
318         0x00000000, 0x000cc000, 0x000002A0, 0x00000000,
319         0x00000000, 0x00000000, 0x00000000, 0x00000000,
320         0x00000000, 0x00000000, 0x00000000, 0x00000000,
321         0x00000000, 0x00000000, 0x00000000, 0x00000000,
322         0x00000000, 0x00000000, 0x00000000, 0x00000000,
323         0x00000000, 0x00000000, 0x00000000, 0x00000000,
324 };
325
326 /* Packet types for packets with an Innermost/Last IPv4 header - no L4 */
327 static const u32 ice_ptypes_ipv4_il_no_l4[] = {
328         0x60000000, 0x18043008, 0x80000002, 0x6010c021,
329         0x00000008, 0x00000000, 0x00000000, 0x00000000,
330         0x00000000, 0x00000000, 0x00139800, 0x00000000,
331         0x00000000, 0x00000000, 0x00000000, 0x00000000,
332         0x00000000, 0x00000000, 0x00000000, 0x00000000,
333         0x00000000, 0x00000000, 0x00000000, 0x00000000,
334         0x00000000, 0x00000000, 0x00000000, 0x00000000,
335         0x00000000, 0x00000000, 0x00000000, 0x00000000,
336 };
337
338 /* Packet types for packets with an Outer/First/Single IPv6 header - no L4 */
339 static const u32 ice_ptypes_ipv6_ofos_no_l4[] = {
340         0x00000000, 0x00000000, 0x43000000, 0x10002000,
341         0x00000000, 0x00000000, 0x00000000, 0x00000000,
342         0x00000000, 0x02300000, 0x00000540, 0x00000000,
343         0x00000000, 0x00000000, 0x00000000, 0x00000000,
344         0x00000000, 0x00000000, 0x00000000, 0x00000000,
345         0x00000000, 0x00000000, 0x00000000, 0x00000000,
346         0x00000000, 0x00000000, 0x00000000, 0x00000000,
347         0x00000000, 0x00000000, 0x00000000, 0x00000000,
348 };
349
350 /* Packet types for packets with an Innermost/Last IPv6 header - no L4 */
351 static const u32 ice_ptypes_ipv6_il_no_l4[] = {
352         0x00000000, 0x02180430, 0x0000010c, 0x086010c0,
353         0x00000430, 0x00000000, 0x00000000, 0x00000000,
354         0x00000000, 0x00000000, 0x4e600000, 0x00000000,
355         0x00000000, 0x00000000, 0x00000000, 0x00000000,
356         0x00000000, 0x00000000, 0x00000000, 0x00000000,
357         0x00000000, 0x00000000, 0x00000000, 0x00000000,
358         0x00000000, 0x00000000, 0x00000000, 0x00000000,
359         0x00000000, 0x00000000, 0x00000000, 0x00000000,
360 };
361
362 /* Packet types for packets with an Outermost/First ARP header */
363 static const u32 ice_ptypes_arp_of[] = {
364         0x00000800, 0x00000000, 0x00000000, 0x00000000,
365         0x00000000, 0x00000000, 0x00000000, 0x00000000,
366         0x00000000, 0x00000000, 0x00000000, 0x00000000,
367         0x00000000, 0x00000000, 0x00000000, 0x00000000,
368         0x00000000, 0x00000000, 0x00000000, 0x00000000,
369         0x00000000, 0x00000000, 0x00000000, 0x00000000,
370         0x00000000, 0x00000000, 0x00000000, 0x00000000,
371         0x00000000, 0x00000000, 0x00000000, 0x00000000,
372 };
373
374 /* UDP Packet types for non-tunneled packets or tunneled
375  * packets with inner UDP.
376  */
377 static const u32 ice_ptypes_udp_il[] = {
378         0x81000000, 0x20204040, 0x04000010, 0x80810102,
379         0x00000040, 0x00000000, 0x00000000, 0x00000000,
380         0x00000000, 0x00410000, 0x908427E0, 0x00100007,
381         0x00000000, 0x00000000, 0x00000000, 0x00000000,
382         0x00000000, 0x00000000, 0x00000000, 0x00000000,
383         0x00000000, 0x00000000, 0x00000000, 0x00000000,
384         0x00000000, 0x00000000, 0x00000000, 0x00000000,
385         0x00000000, 0x00000000, 0x00000000, 0x00000000,
386 };
387
388 /* Packet types for packets with an Innermost/Last TCP header */
389 static const u32 ice_ptypes_tcp_il[] = {
390         0x04000000, 0x80810102, 0x10000040, 0x02040408,
391         0x00000102, 0x00000000, 0x00000000, 0x00000000,
392         0x00000000, 0x00820000, 0x21084000, 0x00000000,
393         0x00000000, 0x00000000, 0x00000000, 0x00000000,
394         0x00000000, 0x00000000, 0x00000000, 0x00000000,
395         0x00000000, 0x00000000, 0x00000000, 0x00000000,
396         0x00000000, 0x00000000, 0x00000000, 0x00000000,
397         0x00000000, 0x00000000, 0x00000000, 0x00000000,
398 };
399
400 /* Packet types for packets with an Innermost/Last SCTP header */
401 static const u32 ice_ptypes_sctp_il[] = {
402         0x08000000, 0x01020204, 0x20000081, 0x04080810,
403         0x00000204, 0x00000000, 0x00000000, 0x00000000,
404         0x00000000, 0x01040000, 0x00000000, 0x00000000,
405         0x00000000, 0x00000000, 0x00000000, 0x00000000,
406         0x00000000, 0x00000000, 0x00000000, 0x00000000,
407         0x00000000, 0x00000000, 0x00000000, 0x00000000,
408         0x00000000, 0x00000000, 0x00000000, 0x00000000,
409         0x00000000, 0x00000000, 0x00000000, 0x00000000,
410 };
411
412 /* Packet types for packets with an Outermost/First ICMP header */
413 static const u32 ice_ptypes_icmp_of[] = {
414         0x10000000, 0x00000000, 0x00000000, 0x00000000,
415         0x00000000, 0x00000000, 0x00000000, 0x00000000,
416         0x00000000, 0x00000000, 0x00000000, 0x00000000,
417         0x00000000, 0x00000000, 0x00000000, 0x00000000,
418         0x00000000, 0x00000000, 0x00000000, 0x00000000,
419         0x00000000, 0x00000000, 0x00000000, 0x00000000,
420         0x00000000, 0x00000000, 0x00000000, 0x00000000,
421         0x00000000, 0x00000000, 0x00000000, 0x00000000,
422 };
423
424 /* Packet types for packets with an Innermost/Last ICMP header */
425 static const u32 ice_ptypes_icmp_il[] = {
426         0x00000000, 0x02040408, 0x40000102, 0x08101020,
427         0x00000408, 0x00000000, 0x00000000, 0x00000000,
428         0x00000000, 0x00000000, 0x42108000, 0x00000000,
429         0x00000000, 0x00000000, 0x00000000, 0x00000000,
430         0x00000000, 0x00000000, 0x00000000, 0x00000000,
431         0x00000000, 0x00000000, 0x00000000, 0x00000000,
432         0x00000000, 0x00000000, 0x00000000, 0x00000000,
433         0x00000000, 0x00000000, 0x00000000, 0x00000000,
434 };
435
436 /* Packet types for packets with an Outermost/First GRE header */
437 static const u32 ice_ptypes_gre_of[] = {
438         0x00000000, 0xBFBF7800, 0x000001DF, 0xFEFDE000,
439         0x0000017E, 0x00000000, 0x00000000, 0x00000000,
440         0x00000000, 0x00000000, 0x00000000, 0x00000000,
441         0x00000000, 0x00000000, 0x00000000, 0x00000000,
442         0x00000000, 0x00000000, 0x00000000, 0x00000000,
443         0x00000000, 0x00000000, 0x00000000, 0x00000000,
444         0x00000000, 0x00000000, 0x00000000, 0x00000000,
445         0x00000000, 0x00000000, 0x00000000, 0x00000000,
446 };
447
448 /* Packet types for packets with an Innermost/Last MAC header */
449 static const u32 ice_ptypes_mac_il[] = {
450         0x00000000, 0x20000000, 0x00000000, 0x00000000,
451         0x00000000, 0x00000000, 0x00000000, 0x00000000,
452         0x00000000, 0x00000000, 0x00000000, 0x00000000,
453         0x00000000, 0x00000000, 0x00000000, 0x00000000,
454         0x00000000, 0x00000000, 0x00000000, 0x00000000,
455         0x00000000, 0x00000000, 0x00000000, 0x00000000,
456         0x00000000, 0x00000000, 0x00000000, 0x00000000,
457         0x00000000, 0x00000000, 0x00000000, 0x00000000,
458 };
459
460 /* Packet types for GTPC */
461 static const u32 ice_ptypes_gtpc[] = {
462         0x00000000, 0x00000000, 0x00000000, 0x00000000,
463         0x00000000, 0x00000000, 0x00000000, 0x00000000,
464         0x00000000, 0x00000000, 0x000001E0, 0x00000000,
465         0x00000000, 0x00000000, 0x00000000, 0x00000000,
466         0x00000000, 0x00000000, 0x00000000, 0x00000000,
467         0x00000000, 0x00000000, 0x00000000, 0x00000000,
468         0x00000000, 0x00000000, 0x00000000, 0x00000000,
469         0x00000000, 0x00000000, 0x00000000, 0x00000000,
470 };
471
472 /* Packet types for VXLAN with VNI */
473 static const u32 ice_ptypes_vxlan_vni[] = {
474         0x00000000, 0xBFBFF800, 0x00EFDFDF, 0xFEFDE000,
475         0x03BF7F7E, 0x00000000, 0x00000000, 0x00000000,
476         0x00000000, 0x00000000, 0x00000000, 0x00000000,
477         0x00000000, 0x00000000, 0x00000000, 0x00000000,
478         0x00000000, 0x00000000, 0x00000000, 0x00000000,
479         0x00000000, 0x00000000, 0x00000000, 0x00000000,
480         0x00000000, 0x00000000, 0x00000000, 0x00000000,
481         0x00000000, 0x00000000, 0x00000000, 0x00000000,
482 };
483
484 /* Packet types for GTPC with TEID */
485 static const u32 ice_ptypes_gtpc_tid[] = {
486         0x00000000, 0x00000000, 0x00000000, 0x00000000,
487         0x00000000, 0x00000000, 0x00000000, 0x00000000,
488         0x00000000, 0x00000000, 0x00000060, 0x00000000,
489         0x00000000, 0x00000000, 0x00000000, 0x00000000,
490         0x00000000, 0x00000000, 0x00000000, 0x00000000,
491         0x00000000, 0x00000000, 0x00000000, 0x00000000,
492         0x00000000, 0x00000000, 0x00000000, 0x00000000,
493         0x00000000, 0x00000000, 0x00000000, 0x00000000,
494 };
495
496 /* Packet types for GTPU */
497 static const struct ice_ptype_attributes ice_attr_gtpu_session[] = {
498         { ICE_MAC_IPV4_GTPU_IPV4_FRAG,    ICE_PTYPE_ATTR_GTP_SESSION },
499         { ICE_MAC_IPV4_GTPU_IPV4_PAY,     ICE_PTYPE_ATTR_GTP_SESSION },
500         { ICE_MAC_IPV4_GTPU_IPV4_UDP_PAY, ICE_PTYPE_ATTR_GTP_SESSION },
501         { ICE_MAC_IPV4_GTPU_IPV4_TCP,     ICE_PTYPE_ATTR_GTP_SESSION },
502         { ICE_MAC_IPV4_GTPU_IPV4_ICMP,    ICE_PTYPE_ATTR_GTP_SESSION },
503         { ICE_MAC_IPV6_GTPU_IPV4_FRAG,    ICE_PTYPE_ATTR_GTP_SESSION },
504         { ICE_MAC_IPV6_GTPU_IPV4_PAY,     ICE_PTYPE_ATTR_GTP_SESSION },
505         { ICE_MAC_IPV6_GTPU_IPV4_UDP_PAY, ICE_PTYPE_ATTR_GTP_SESSION },
506         { ICE_MAC_IPV6_GTPU_IPV4_TCP,     ICE_PTYPE_ATTR_GTP_SESSION },
507         { ICE_MAC_IPV6_GTPU_IPV4_ICMP,    ICE_PTYPE_ATTR_GTP_SESSION },
508         { ICE_MAC_IPV4_GTPU_IPV6_FRAG,    ICE_PTYPE_ATTR_GTP_SESSION },
509         { ICE_MAC_IPV4_GTPU_IPV6_PAY,     ICE_PTYPE_ATTR_GTP_SESSION },
510         { ICE_MAC_IPV4_GTPU_IPV6_UDP_PAY, ICE_PTYPE_ATTR_GTP_SESSION },
511         { ICE_MAC_IPV4_GTPU_IPV6_TCP,     ICE_PTYPE_ATTR_GTP_SESSION },
512         { ICE_MAC_IPV4_GTPU_IPV6_ICMPV6,  ICE_PTYPE_ATTR_GTP_SESSION },
513         { ICE_MAC_IPV6_GTPU_IPV6_FRAG,    ICE_PTYPE_ATTR_GTP_SESSION },
514         { ICE_MAC_IPV6_GTPU_IPV6_PAY,     ICE_PTYPE_ATTR_GTP_SESSION },
515         { ICE_MAC_IPV6_GTPU_IPV6_UDP_PAY, ICE_PTYPE_ATTR_GTP_SESSION },
516         { ICE_MAC_IPV6_GTPU_IPV6_TCP,     ICE_PTYPE_ATTR_GTP_SESSION },
517         { ICE_MAC_IPV6_GTPU_IPV6_ICMPV6,  ICE_PTYPE_ATTR_GTP_SESSION },
518 };
519
520 static const struct ice_ptype_attributes ice_attr_gtpu_eh[] = {
521         { ICE_MAC_IPV4_GTPU_IPV4_FRAG,    ICE_PTYPE_ATTR_GTP_PDU_EH },
522         { ICE_MAC_IPV4_GTPU_IPV4_PAY,     ICE_PTYPE_ATTR_GTP_PDU_EH },
523         { ICE_MAC_IPV4_GTPU_IPV4_UDP_PAY, ICE_PTYPE_ATTR_GTP_PDU_EH },
524         { ICE_MAC_IPV4_GTPU_IPV4_TCP,     ICE_PTYPE_ATTR_GTP_PDU_EH },
525         { ICE_MAC_IPV4_GTPU_IPV4_ICMP,    ICE_PTYPE_ATTR_GTP_PDU_EH },
526         { ICE_MAC_IPV6_GTPU_IPV4_FRAG,    ICE_PTYPE_ATTR_GTP_PDU_EH },
527         { ICE_MAC_IPV6_GTPU_IPV4_PAY,     ICE_PTYPE_ATTR_GTP_PDU_EH },
528         { ICE_MAC_IPV6_GTPU_IPV4_UDP_PAY, ICE_PTYPE_ATTR_GTP_PDU_EH },
529         { ICE_MAC_IPV6_GTPU_IPV4_TCP,     ICE_PTYPE_ATTR_GTP_PDU_EH },
530         { ICE_MAC_IPV6_GTPU_IPV4_ICMP,    ICE_PTYPE_ATTR_GTP_PDU_EH },
531         { ICE_MAC_IPV4_GTPU_IPV6_FRAG,    ICE_PTYPE_ATTR_GTP_PDU_EH },
532         { ICE_MAC_IPV4_GTPU_IPV6_PAY,     ICE_PTYPE_ATTR_GTP_PDU_EH },
533         { ICE_MAC_IPV4_GTPU_IPV6_UDP_PAY, ICE_PTYPE_ATTR_GTP_PDU_EH },
534         { ICE_MAC_IPV4_GTPU_IPV6_TCP,     ICE_PTYPE_ATTR_GTP_PDU_EH },
535         { ICE_MAC_IPV4_GTPU_IPV6_ICMPV6,  ICE_PTYPE_ATTR_GTP_PDU_EH },
536         { ICE_MAC_IPV6_GTPU_IPV6_FRAG,    ICE_PTYPE_ATTR_GTP_PDU_EH },
537         { ICE_MAC_IPV6_GTPU_IPV6_PAY,     ICE_PTYPE_ATTR_GTP_PDU_EH },
538         { ICE_MAC_IPV6_GTPU_IPV6_UDP_PAY, ICE_PTYPE_ATTR_GTP_PDU_EH },
539         { ICE_MAC_IPV6_GTPU_IPV6_TCP,     ICE_PTYPE_ATTR_GTP_PDU_EH },
540         { ICE_MAC_IPV6_GTPU_IPV6_ICMPV6,  ICE_PTYPE_ATTR_GTP_PDU_EH },
541 };
542
543 static const struct ice_ptype_attributes ice_attr_gtpu_down[] = {
544         { ICE_MAC_IPV4_GTPU_IPV4_FRAG,    ICE_PTYPE_ATTR_GTP_DOWNLINK },
545         { ICE_MAC_IPV4_GTPU_IPV4_PAY,     ICE_PTYPE_ATTR_GTP_DOWNLINK },
546         { ICE_MAC_IPV4_GTPU_IPV4_UDP_PAY, ICE_PTYPE_ATTR_GTP_DOWNLINK },
547         { ICE_MAC_IPV4_GTPU_IPV4_TCP,     ICE_PTYPE_ATTR_GTP_DOWNLINK },
548         { ICE_MAC_IPV4_GTPU_IPV4_ICMP,    ICE_PTYPE_ATTR_GTP_DOWNLINK },
549         { ICE_MAC_IPV6_GTPU_IPV4_FRAG,    ICE_PTYPE_ATTR_GTP_DOWNLINK },
550         { ICE_MAC_IPV6_GTPU_IPV4_PAY,     ICE_PTYPE_ATTR_GTP_DOWNLINK },
551         { ICE_MAC_IPV6_GTPU_IPV4_UDP_PAY, ICE_PTYPE_ATTR_GTP_DOWNLINK },
552         { ICE_MAC_IPV6_GTPU_IPV4_TCP,     ICE_PTYPE_ATTR_GTP_DOWNLINK },
553         { ICE_MAC_IPV6_GTPU_IPV4_ICMP,    ICE_PTYPE_ATTR_GTP_DOWNLINK },
554         { ICE_MAC_IPV4_GTPU_IPV6_FRAG,    ICE_PTYPE_ATTR_GTP_DOWNLINK },
555         { ICE_MAC_IPV4_GTPU_IPV6_PAY,     ICE_PTYPE_ATTR_GTP_DOWNLINK },
556         { ICE_MAC_IPV4_GTPU_IPV6_UDP_PAY, ICE_PTYPE_ATTR_GTP_DOWNLINK },
557         { ICE_MAC_IPV4_GTPU_IPV6_TCP,     ICE_PTYPE_ATTR_GTP_DOWNLINK },
558         { ICE_MAC_IPV4_GTPU_IPV6_ICMPV6,  ICE_PTYPE_ATTR_GTP_DOWNLINK },
559         { ICE_MAC_IPV6_GTPU_IPV6_FRAG,    ICE_PTYPE_ATTR_GTP_DOWNLINK },
560         { ICE_MAC_IPV6_GTPU_IPV6_PAY,     ICE_PTYPE_ATTR_GTP_DOWNLINK },
561         { ICE_MAC_IPV6_GTPU_IPV6_UDP_PAY, ICE_PTYPE_ATTR_GTP_DOWNLINK },
562         { ICE_MAC_IPV6_GTPU_IPV6_TCP,     ICE_PTYPE_ATTR_GTP_DOWNLINK },
563         { ICE_MAC_IPV6_GTPU_IPV6_ICMPV6,  ICE_PTYPE_ATTR_GTP_DOWNLINK },
564 };
565
566 static const struct ice_ptype_attributes ice_attr_gtpu_up[] = {
567         { ICE_MAC_IPV4_GTPU_IPV4_FRAG,    ICE_PTYPE_ATTR_GTP_UPLINK },
568         { ICE_MAC_IPV4_GTPU_IPV4_PAY,     ICE_PTYPE_ATTR_GTP_UPLINK },
569         { ICE_MAC_IPV4_GTPU_IPV4_UDP_PAY, ICE_PTYPE_ATTR_GTP_UPLINK },
570         { ICE_MAC_IPV4_GTPU_IPV4_TCP,     ICE_PTYPE_ATTR_GTP_UPLINK },
571         { ICE_MAC_IPV4_GTPU_IPV4_ICMP,    ICE_PTYPE_ATTR_GTP_UPLINK },
572         { ICE_MAC_IPV6_GTPU_IPV4_FRAG,    ICE_PTYPE_ATTR_GTP_UPLINK },
573         { ICE_MAC_IPV6_GTPU_IPV4_PAY,     ICE_PTYPE_ATTR_GTP_UPLINK },
574         { ICE_MAC_IPV6_GTPU_IPV4_UDP_PAY, ICE_PTYPE_ATTR_GTP_UPLINK },
575         { ICE_MAC_IPV6_GTPU_IPV4_TCP,     ICE_PTYPE_ATTR_GTP_UPLINK },
576         { ICE_MAC_IPV6_GTPU_IPV4_ICMP,    ICE_PTYPE_ATTR_GTP_UPLINK },
577         { ICE_MAC_IPV4_GTPU_IPV6_FRAG,    ICE_PTYPE_ATTR_GTP_UPLINK },
578         { ICE_MAC_IPV4_GTPU_IPV6_PAY,     ICE_PTYPE_ATTR_GTP_UPLINK },
579         { ICE_MAC_IPV4_GTPU_IPV6_UDP_PAY, ICE_PTYPE_ATTR_GTP_UPLINK },
580         { ICE_MAC_IPV4_GTPU_IPV6_TCP,     ICE_PTYPE_ATTR_GTP_UPLINK },
581         { ICE_MAC_IPV4_GTPU_IPV6_ICMPV6,  ICE_PTYPE_ATTR_GTP_UPLINK },
582         { ICE_MAC_IPV6_GTPU_IPV6_FRAG,    ICE_PTYPE_ATTR_GTP_UPLINK },
583         { ICE_MAC_IPV6_GTPU_IPV6_PAY,     ICE_PTYPE_ATTR_GTP_UPLINK },
584         { ICE_MAC_IPV6_GTPU_IPV6_UDP_PAY, ICE_PTYPE_ATTR_GTP_UPLINK },
585         { ICE_MAC_IPV6_GTPU_IPV6_TCP,     ICE_PTYPE_ATTR_GTP_UPLINK },
586         { ICE_MAC_IPV6_GTPU_IPV6_ICMPV6,  ICE_PTYPE_ATTR_GTP_UPLINK },
587 };
588
589 static const u32 ice_ptypes_gtpu[] = {
590         0x00000000, 0x00000000, 0x00000000, 0x00000000,
591         0x00000000, 0x00000000, 0x00000000, 0x00000000,
592         0x00000000, 0x00000000, 0x7FFFFE00, 0x00000000,
593         0x00000000, 0x00000000, 0x00000000, 0x00000000,
594         0x00000000, 0x00000000, 0x00000000, 0x00000000,
595         0x00000000, 0x00000000, 0x00000000, 0x00000000,
596         0x00000000, 0x00000000, 0x00000000, 0x00000000,
597         0x00000000, 0x00000000, 0x00000000, 0x00000000,
598 };
599
600 /* Packet types for pppoe */
601 static const u32 ice_ptypes_pppoe[] = {
602         0x00000000, 0x00000000, 0x00000000, 0x00000000,
603         0x00000000, 0x00000000, 0x00000000, 0x00000000,
604         0x00000000, 0x03ffe000, 0x00000000, 0x00000000,
605         0x00000000, 0x00000000, 0x00000000, 0x00000000,
606         0x00000000, 0x00000000, 0x00000000, 0x00000000,
607         0x00000000, 0x00000000, 0x00000000, 0x00000000,
608         0x00000000, 0x00000000, 0x00000000, 0x00000000,
609         0x00000000, 0x00000000, 0x00000000, 0x00000000,
610 };
611
612 /* Packet types for packets with PFCP NODE header */
613 static const u32 ice_ptypes_pfcp_node[] = {
614         0x00000000, 0x00000000, 0x00000000, 0x00000000,
615         0x00000000, 0x00000000, 0x00000000, 0x00000000,
616         0x00000000, 0x00000000, 0x80000000, 0x00000002,
617         0x00000000, 0x00000000, 0x00000000, 0x00000000,
618         0x00000000, 0x00000000, 0x00000000, 0x00000000,
619         0x00000000, 0x00000000, 0x00000000, 0x00000000,
620         0x00000000, 0x00000000, 0x00000000, 0x00000000,
621         0x00000000, 0x00000000, 0x00000000, 0x00000000,
622 };
623
624 /* Packet types for packets with PFCP SESSION header */
625 static const u32 ice_ptypes_pfcp_session[] = {
626         0x00000000, 0x00000000, 0x00000000, 0x00000000,
627         0x00000000, 0x00000000, 0x00000000, 0x00000000,
628         0x00000000, 0x00000000, 0x00000000, 0x00000005,
629         0x00000000, 0x00000000, 0x00000000, 0x00000000,
630         0x00000000, 0x00000000, 0x00000000, 0x00000000,
631         0x00000000, 0x00000000, 0x00000000, 0x00000000,
632         0x00000000, 0x00000000, 0x00000000, 0x00000000,
633         0x00000000, 0x00000000, 0x00000000, 0x00000000,
634 };
635
636 /* Packet types for l2tpv3 */
637 static const u32 ice_ptypes_l2tpv3[] = {
638         0x00000000, 0x00000000, 0x00000000, 0x00000000,
639         0x00000000, 0x00000000, 0x00000000, 0x00000000,
640         0x00000000, 0x00000000, 0x00000000, 0x00000300,
641         0x00000000, 0x00000000, 0x00000000, 0x00000000,
642         0x00000000, 0x00000000, 0x00000000, 0x00000000,
643         0x00000000, 0x00000000, 0x00000000, 0x00000000,
644         0x00000000, 0x00000000, 0x00000000, 0x00000000,
645         0x00000000, 0x00000000, 0x00000000, 0x00000000,
646 };
647
648 /* Packet types for esp */
649 static const u32 ice_ptypes_esp[] = {
650         0x00000000, 0x00000000, 0x00000000, 0x00000000,
651         0x00000000, 0x00000003, 0x00000000, 0x00000000,
652         0x00000000, 0x00000000, 0x00000000, 0x00000000,
653         0x00000000, 0x00000000, 0x00000000, 0x00000000,
654         0x00000000, 0x00000000, 0x00000000, 0x00000000,
655         0x00000000, 0x00000000, 0x00000000, 0x00000000,
656         0x00000000, 0x00000000, 0x00000000, 0x00000000,
657         0x00000000, 0x00000000, 0x00000000, 0x00000000,
658 };
659
660 /* Packet types for ah */
661 static const u32 ice_ptypes_ah[] = {
662         0x00000000, 0x00000000, 0x00000000, 0x00000000,
663         0x00000000, 0x0000000C, 0x00000000, 0x00000000,
664         0x00000000, 0x00000000, 0x00000000, 0x00000000,
665         0x00000000, 0x00000000, 0x00000000, 0x00000000,
666         0x00000000, 0x00000000, 0x00000000, 0x00000000,
667         0x00000000, 0x00000000, 0x00000000, 0x00000000,
668         0x00000000, 0x00000000, 0x00000000, 0x00000000,
669         0x00000000, 0x00000000, 0x00000000, 0x00000000,
670 };
671
672 /* Packet types for packets with NAT_T ESP header */
673 static const u32 ice_ptypes_nat_t_esp[] = {
674         0x00000000, 0x00000000, 0x00000000, 0x00000000,
675         0x00000000, 0x00000030, 0x00000000, 0x00000000,
676         0x00000000, 0x00000000, 0x00000000, 0x00000000,
677         0x00000000, 0x00000000, 0x00000000, 0x00000000,
678         0x00000000, 0x00000000, 0x00000000, 0x00000000,
679         0x00000000, 0x00000000, 0x00000000, 0x00000000,
680         0x00000000, 0x00000000, 0x00000000, 0x00000000,
681         0x00000000, 0x00000000, 0x00000000, 0x00000000,
682 };
683
684 static const u32 ice_ptypes_mac_non_ip_ofos[] = {
685         0x00000846, 0x00000000, 0x00000000, 0x00000000,
686         0x00000000, 0x00000000, 0x00000000, 0x00000000,
687         0x00400000, 0x03FFF000, 0x00000000, 0x00000000,
688         0x00000000, 0x00000000, 0x00000000, 0x00000000,
689         0x00000000, 0x00000000, 0x00000000, 0x00000000,
690         0x00000000, 0x00000000, 0x00000000, 0x00000000,
691         0x00000000, 0x00000000, 0x00000000, 0x00000000,
692         0x00000000, 0x00000000, 0x00000000, 0x00000000,
693 };
694
695 static const u32 ice_ptypes_gtpu_no_ip[] = {
696         0x00000000, 0x00000000, 0x00000000, 0x00000000,
697         0x00000000, 0x00000000, 0x00000000, 0x00000000,
698         0x00000000, 0x00000000, 0x00000600, 0x00000000,
699         0x00000000, 0x00000000, 0x00000000, 0x00000000,
700         0x00000000, 0x00000000, 0x00000000, 0x00000000,
701         0x00000000, 0x00000000, 0x00000000, 0x00000000,
702         0x00000000, 0x00000000, 0x00000000, 0x00000000,
703         0x00000000, 0x00000000, 0x00000000, 0x00000000,
704 };
705
706 static const u32 ice_ptypes_ecpri_tp0[] = {
707         0x00000000, 0x00000000, 0x00000000, 0x00000000,
708         0x00000000, 0x00000000, 0x00000000, 0x00000000,
709         0x00000000, 0x00000000, 0x00000000, 0x00000400,
710         0x00000000, 0x00000000, 0x00000000, 0x00000000,
711         0x00000000, 0x00000000, 0x00000000, 0x00000000,
712         0x00000000, 0x00000000, 0x00000000, 0x00000000,
713         0x00000000, 0x00000000, 0x00000000, 0x00000000,
714         0x00000000, 0x00000000, 0x00000000, 0x00000000,
715 };
716
717 static const u32 ice_ptypes_udp_ecpri_tp0[] = {
718         0x00000000, 0x00000000, 0x00000000, 0x00000000,
719         0x00000000, 0x00000000, 0x00000000, 0x00000000,
720         0x00000000, 0x00000000, 0x00000000, 0x00100000,
721         0x00000000, 0x00000000, 0x00000000, 0x00000000,
722         0x00000000, 0x00000000, 0x00000000, 0x00000000,
723         0x00000000, 0x00000000, 0x00000000, 0x00000000,
724         0x00000000, 0x00000000, 0x00000000, 0x00000000,
725         0x00000000, 0x00000000, 0x00000000, 0x00000000,
726 };
727
728 /* Manage parameters and info. used during the creation of a flow profile */
729 struct ice_flow_prof_params {
730         enum ice_block blk;
731         u16 entry_length; /* # of bytes formatted entry will require */
732         u8 es_cnt;
733         struct ice_flow_prof *prof;
734
735         /* For ACL, the es[0] will have the data of ICE_RX_MDID_PKT_FLAGS_15_0
736          * This will give us the direction flags.
737          */
738         struct ice_fv_word es[ICE_MAX_FV_WORDS];
739         /* attributes can be used to add attributes to a particular PTYPE */
740         const struct ice_ptype_attributes *attr;
741         u16 attr_cnt;
742
743         u16 mask[ICE_MAX_FV_WORDS];
744         ice_declare_bitmap(ptypes, ICE_FLOW_PTYPE_MAX);
745 };
746
747 #define ICE_FLOW_RSS_HDRS_INNER_MASK \
748         (ICE_FLOW_SEG_HDR_PPPOE | ICE_FLOW_SEG_HDR_GTPC | \
749         ICE_FLOW_SEG_HDR_GTPC_TEID | ICE_FLOW_SEG_HDR_GTPU | \
750         ICE_FLOW_SEG_HDR_PFCP_SESSION | ICE_FLOW_SEG_HDR_L2TPV3 | \
751         ICE_FLOW_SEG_HDR_ESP | ICE_FLOW_SEG_HDR_AH | \
752         ICE_FLOW_SEG_HDR_NAT_T_ESP | ICE_FLOW_SEG_HDR_GTPU_NON_IP | \
753         ICE_FLOW_SEG_HDR_ECPRI_TP0 | ICE_FLOW_SEG_HDR_UDP_ECPRI_TP0)
754
755 #define ICE_FLOW_SEG_HDRS_L2_MASK       \
756         (ICE_FLOW_SEG_HDR_ETH | ICE_FLOW_SEG_HDR_VLAN)
757 #define ICE_FLOW_SEG_HDRS_L3_MASK       \
758         (ICE_FLOW_SEG_HDR_IPV4 | ICE_FLOW_SEG_HDR_IPV6 | \
759          ICE_FLOW_SEG_HDR_ARP)
760 #define ICE_FLOW_SEG_HDRS_L4_MASK       \
761         (ICE_FLOW_SEG_HDR_ICMP | ICE_FLOW_SEG_HDR_TCP | ICE_FLOW_SEG_HDR_UDP | \
762          ICE_FLOW_SEG_HDR_SCTP)
763 /* mask for L4 protocols that are NOT part of IPV4/6 OTHER PTYPE groups */
764 #define ICE_FLOW_SEG_HDRS_L4_MASK_NO_OTHER      \
765         (ICE_FLOW_SEG_HDR_TCP | ICE_FLOW_SEG_HDR_UDP | ICE_FLOW_SEG_HDR_SCTP)
766
767 /**
768  * ice_flow_val_hdrs - validates packet segments for valid protocol headers
769  * @segs: array of one or more packet segments that describe the flow
770  * @segs_cnt: number of packet segments provided
771  */
772 static enum ice_status
773 ice_flow_val_hdrs(struct ice_flow_seg_info *segs, u8 segs_cnt)
774 {
775         u8 i;
776
777         for (i = 0; i < segs_cnt; i++) {
778                 /* Multiple L3 headers */
779                 if (segs[i].hdrs & ICE_FLOW_SEG_HDRS_L3_MASK &&
780                     !ice_is_pow2(segs[i].hdrs & ICE_FLOW_SEG_HDRS_L3_MASK))
781                         return ICE_ERR_PARAM;
782
783                 /* Multiple L4 headers */
784                 if (segs[i].hdrs & ICE_FLOW_SEG_HDRS_L4_MASK &&
785                     !ice_is_pow2(segs[i].hdrs & ICE_FLOW_SEG_HDRS_L4_MASK))
786                         return ICE_ERR_PARAM;
787         }
788
789         return ICE_SUCCESS;
790 }
791
792 /* Sizes of fixed known protocol headers without header options */
793 #define ICE_FLOW_PROT_HDR_SZ_MAC        14
794 #define ICE_FLOW_PROT_HDR_SZ_MAC_VLAN   (ICE_FLOW_PROT_HDR_SZ_MAC + 2)
795 #define ICE_FLOW_PROT_HDR_SZ_IPV4       20
796 #define ICE_FLOW_PROT_HDR_SZ_IPV6       40
797 #define ICE_FLOW_PROT_HDR_SZ_ARP        28
798 #define ICE_FLOW_PROT_HDR_SZ_ICMP       8
799 #define ICE_FLOW_PROT_HDR_SZ_TCP        20
800 #define ICE_FLOW_PROT_HDR_SZ_UDP        8
801 #define ICE_FLOW_PROT_HDR_SZ_SCTP       12
802
803 /**
804  * ice_flow_calc_seg_sz - calculates size of a packet segment based on headers
805  * @params: information about the flow to be processed
806  * @seg: index of packet segment whose header size is to be determined
807  */
808 static u16 ice_flow_calc_seg_sz(struct ice_flow_prof_params *params, u8 seg)
809 {
810         u16 sz;
811
812         /* L2 headers */
813         sz = (params->prof->segs[seg].hdrs & ICE_FLOW_SEG_HDR_VLAN) ?
814                 ICE_FLOW_PROT_HDR_SZ_MAC_VLAN : ICE_FLOW_PROT_HDR_SZ_MAC;
815
816         /* L3 headers */
817         if (params->prof->segs[seg].hdrs & ICE_FLOW_SEG_HDR_IPV4)
818                 sz += ICE_FLOW_PROT_HDR_SZ_IPV4;
819         else if (params->prof->segs[seg].hdrs & ICE_FLOW_SEG_HDR_IPV6)
820                 sz += ICE_FLOW_PROT_HDR_SZ_IPV6;
821         else if (params->prof->segs[seg].hdrs & ICE_FLOW_SEG_HDR_ARP)
822                 sz += ICE_FLOW_PROT_HDR_SZ_ARP;
823         else if (params->prof->segs[seg].hdrs & ICE_FLOW_SEG_HDRS_L4_MASK)
824                 /* A L3 header is required if L4 is specified */
825                 return 0;
826
827         /* L4 headers */
828         if (params->prof->segs[seg].hdrs & ICE_FLOW_SEG_HDR_ICMP)
829                 sz += ICE_FLOW_PROT_HDR_SZ_ICMP;
830         else if (params->prof->segs[seg].hdrs & ICE_FLOW_SEG_HDR_TCP)
831                 sz += ICE_FLOW_PROT_HDR_SZ_TCP;
832         else if (params->prof->segs[seg].hdrs & ICE_FLOW_SEG_HDR_UDP)
833                 sz += ICE_FLOW_PROT_HDR_SZ_UDP;
834         else if (params->prof->segs[seg].hdrs & ICE_FLOW_SEG_HDR_SCTP)
835                 sz += ICE_FLOW_PROT_HDR_SZ_SCTP;
836
837         return sz;
838 }
839
840 /**
841  * ice_flow_proc_seg_hdrs - process protocol headers present in pkt segments
842  * @params: information about the flow to be processed
843  *
844  * This function identifies the packet types associated with the protocol
845  * headers being present in packet segments of the specified flow profile.
846  */
847 static enum ice_status
848 ice_flow_proc_seg_hdrs(struct ice_flow_prof_params *params)
849 {
850         struct ice_flow_prof *prof;
851         u8 i;
852
853         ice_memset(params->ptypes, 0xff, sizeof(params->ptypes),
854                    ICE_NONDMA_MEM);
855
856         prof = params->prof;
857
858         for (i = 0; i < params->prof->segs_cnt; i++) {
859                 const ice_bitmap_t *src;
860                 u32 hdrs;
861
862                 hdrs = prof->segs[i].hdrs;
863
864                 if (hdrs & ICE_FLOW_SEG_HDR_ETH) {
865                         src = !i ? (const ice_bitmap_t *)ice_ptypes_mac_ofos :
866                                 (const ice_bitmap_t *)ice_ptypes_mac_il;
867                         ice_and_bitmap(params->ptypes, params->ptypes, src,
868                                        ICE_FLOW_PTYPE_MAX);
869                 }
870
871                 if (i && hdrs & ICE_FLOW_SEG_HDR_VLAN) {
872                         src = (const ice_bitmap_t *)ice_ptypes_macvlan_il;
873                         ice_and_bitmap(params->ptypes, params->ptypes, src,
874                                        ICE_FLOW_PTYPE_MAX);
875                 }
876
877                 if (!i && hdrs & ICE_FLOW_SEG_HDR_ARP) {
878                         ice_and_bitmap(params->ptypes, params->ptypes,
879                                        (const ice_bitmap_t *)ice_ptypes_arp_of,
880                                        ICE_FLOW_PTYPE_MAX);
881                 }
882
883                 if (hdrs & ICE_FLOW_SEG_HDR_ECPRI_TP0) {
884                         src = (const ice_bitmap_t *)ice_ptypes_ecpri_tp0;
885                         ice_and_bitmap(params->ptypes, params->ptypes, src,
886                                        ICE_FLOW_PTYPE_MAX);
887                 }
888                 if ((hdrs & ICE_FLOW_SEG_HDR_IPV4) &&
889                     (hdrs & ICE_FLOW_SEG_HDR_IPV_OTHER)) {
890                         src = i ?
891                                 (const ice_bitmap_t *)ice_ptypes_ipv4_il :
892                                 (const ice_bitmap_t *)ice_ptypes_ipv4_ofos_all;
893                         ice_and_bitmap(params->ptypes, params->ptypes, src,
894                                        ICE_FLOW_PTYPE_MAX);
895                 } else if ((hdrs & ICE_FLOW_SEG_HDR_IPV6) &&
896                            (hdrs & ICE_FLOW_SEG_HDR_IPV_OTHER)) {
897                         src = i ?
898                                 (const ice_bitmap_t *)ice_ptypes_ipv6_il :
899                                 (const ice_bitmap_t *)ice_ptypes_ipv6_ofos_all;
900                         ice_and_bitmap(params->ptypes, params->ptypes, src,
901                                        ICE_FLOW_PTYPE_MAX);
902                 } else if ((hdrs & ICE_FLOW_SEG_HDR_IPV4) &&
903                            !(hdrs & ICE_FLOW_SEG_HDRS_L4_MASK_NO_OTHER)) {
904                         src = !i ? (const ice_bitmap_t *)ice_ptypes_ipv4_ofos_no_l4 :
905                                 (const ice_bitmap_t *)ice_ptypes_ipv4_il_no_l4;
906                         ice_and_bitmap(params->ptypes, params->ptypes, src,
907                                        ICE_FLOW_PTYPE_MAX);
908                 } else if (hdrs & ICE_FLOW_SEG_HDR_IPV4) {
909                         src = !i ? (const ice_bitmap_t *)ice_ptypes_ipv4_ofos :
910                                 (const ice_bitmap_t *)ice_ptypes_ipv4_il;
911                         ice_and_bitmap(params->ptypes, params->ptypes, src,
912                                        ICE_FLOW_PTYPE_MAX);
913                 } else if ((hdrs & ICE_FLOW_SEG_HDR_IPV6) &&
914                            !(hdrs & ICE_FLOW_SEG_HDRS_L4_MASK_NO_OTHER)) {
915                         src = !i ? (const ice_bitmap_t *)ice_ptypes_ipv6_ofos_no_l4 :
916                                 (const ice_bitmap_t *)ice_ptypes_ipv6_il_no_l4;
917                         ice_and_bitmap(params->ptypes, params->ptypes, src,
918                                        ICE_FLOW_PTYPE_MAX);
919                 } else if (hdrs & ICE_FLOW_SEG_HDR_IPV6) {
920                         src = !i ? (const ice_bitmap_t *)ice_ptypes_ipv6_ofos :
921                                 (const ice_bitmap_t *)ice_ptypes_ipv6_il;
922                         ice_and_bitmap(params->ptypes, params->ptypes, src,
923                                        ICE_FLOW_PTYPE_MAX);
924                 }
925
926                 if (hdrs & ICE_FLOW_SEG_HDR_ETH_NON_IP) {
927                         src = (const ice_bitmap_t *)ice_ptypes_mac_non_ip_ofos;
928                         ice_and_bitmap(params->ptypes, params->ptypes,
929                                        src, ICE_FLOW_PTYPE_MAX);
930                 } else if (hdrs & ICE_FLOW_SEG_HDR_PPPOE) {
931                         src = (const ice_bitmap_t *)ice_ptypes_pppoe;
932                         ice_and_bitmap(params->ptypes, params->ptypes, src,
933                                        ICE_FLOW_PTYPE_MAX);
934                 } else {
935                         src = (const ice_bitmap_t *)ice_ptypes_pppoe;
936                         ice_andnot_bitmap(params->ptypes, params->ptypes, src,
937                                           ICE_FLOW_PTYPE_MAX);
938                 }
939
940                 if (hdrs & ICE_FLOW_SEG_HDR_UDP) {
941                         src = (const ice_bitmap_t *)ice_ptypes_udp_il;
942                         ice_and_bitmap(params->ptypes, params->ptypes, src,
943                                        ICE_FLOW_PTYPE_MAX);
944                 } else if (hdrs & ICE_FLOW_SEG_HDR_TCP) {
945                         ice_and_bitmap(params->ptypes, params->ptypes,
946                                        (const ice_bitmap_t *)ice_ptypes_tcp_il,
947                                        ICE_FLOW_PTYPE_MAX);
948                 } else if (hdrs & ICE_FLOW_SEG_HDR_SCTP) {
949                         src = (const ice_bitmap_t *)ice_ptypes_sctp_il;
950                         ice_and_bitmap(params->ptypes, params->ptypes, src,
951                                        ICE_FLOW_PTYPE_MAX);
952                 }
953
954                 if (hdrs & ICE_FLOW_SEG_HDR_ICMP) {
955                         src = !i ? (const ice_bitmap_t *)ice_ptypes_icmp_of :
956                                 (const ice_bitmap_t *)ice_ptypes_icmp_il;
957                         ice_and_bitmap(params->ptypes, params->ptypes, src,
958                                        ICE_FLOW_PTYPE_MAX);
959                 } else if (hdrs & ICE_FLOW_SEG_HDR_GRE) {
960                         if (!i) {
961                                 src = (const ice_bitmap_t *)ice_ptypes_gre_of;
962                                 ice_and_bitmap(params->ptypes, params->ptypes,
963                                                src, ICE_FLOW_PTYPE_MAX);
964                         }
965                 } else if (hdrs & ICE_FLOW_SEG_HDR_GTPC) {
966                         src = (const ice_bitmap_t *)ice_ptypes_gtpc;
967                         ice_and_bitmap(params->ptypes, params->ptypes,
968                                        src, ICE_FLOW_PTYPE_MAX);
969                 } else if (hdrs & ICE_FLOW_SEG_HDR_GTPC_TEID) {
970                         src = (const ice_bitmap_t *)ice_ptypes_gtpc_tid;
971                         ice_and_bitmap(params->ptypes, params->ptypes,
972                                        src, ICE_FLOW_PTYPE_MAX);
973                 } else if (hdrs & ICE_FLOW_SEG_HDR_GTPU_NON_IP) {
974                         src = (const ice_bitmap_t *)ice_ptypes_gtpu_no_ip;
975                         ice_and_bitmap(params->ptypes, params->ptypes,
976                                        src, ICE_FLOW_PTYPE_MAX);
977                 } else if (hdrs & ICE_FLOW_SEG_HDR_GTPU_DWN) {
978                         src = (const ice_bitmap_t *)ice_ptypes_gtpu;
979                         ice_and_bitmap(params->ptypes, params->ptypes,
980                                        src, ICE_FLOW_PTYPE_MAX);
981
982                         /* Attributes for GTP packet with downlink */
983                         params->attr = ice_attr_gtpu_down;
984                         params->attr_cnt = ARRAY_SIZE(ice_attr_gtpu_down);
985                 } else if (hdrs & ICE_FLOW_SEG_HDR_GTPU_UP) {
986                         src = (const ice_bitmap_t *)ice_ptypes_gtpu;
987                         ice_and_bitmap(params->ptypes, params->ptypes,
988                                        src, ICE_FLOW_PTYPE_MAX);
989
990                         /* Attributes for GTP packet with uplink */
991                         params->attr = ice_attr_gtpu_up;
992                         params->attr_cnt = ARRAY_SIZE(ice_attr_gtpu_up);
993                 } else if (hdrs & ICE_FLOW_SEG_HDR_GTPU_EH) {
994                         src = (const ice_bitmap_t *)ice_ptypes_gtpu;
995                         ice_and_bitmap(params->ptypes, params->ptypes,
996                                        src, ICE_FLOW_PTYPE_MAX);
997
998                         /* Attributes for GTP packet with Extension Header */
999                         params->attr = ice_attr_gtpu_eh;
1000                         params->attr_cnt = ARRAY_SIZE(ice_attr_gtpu_eh);
1001                 } else if (hdrs & ICE_FLOW_SEG_HDR_GTPU_IP) {
1002                         src = (const ice_bitmap_t *)ice_ptypes_gtpu;
1003                         ice_and_bitmap(params->ptypes, params->ptypes,
1004                                        src, ICE_FLOW_PTYPE_MAX);
1005
1006                         /* Attributes for GTP packet without Extension Header */
1007                         params->attr = ice_attr_gtpu_session;
1008                         params->attr_cnt = ARRAY_SIZE(ice_attr_gtpu_session);
1009                 } else if (hdrs & ICE_FLOW_SEG_HDR_L2TPV3) {
1010                         src = (const ice_bitmap_t *)ice_ptypes_l2tpv3;
1011                         ice_and_bitmap(params->ptypes, params->ptypes,
1012                                        src, ICE_FLOW_PTYPE_MAX);
1013                 } else if (hdrs & ICE_FLOW_SEG_HDR_ESP) {
1014                         src = (const ice_bitmap_t *)ice_ptypes_esp;
1015                         ice_and_bitmap(params->ptypes, params->ptypes,
1016                                        src, ICE_FLOW_PTYPE_MAX);
1017                 } else if (hdrs & ICE_FLOW_SEG_HDR_AH) {
1018                         src = (const ice_bitmap_t *)ice_ptypes_ah;
1019                         ice_and_bitmap(params->ptypes, params->ptypes,
1020                                        src, ICE_FLOW_PTYPE_MAX);
1021                 } else if (hdrs & ICE_FLOW_SEG_HDR_NAT_T_ESP) {
1022                         src = (const ice_bitmap_t *)ice_ptypes_nat_t_esp;
1023                         ice_and_bitmap(params->ptypes, params->ptypes,
1024                                        src, ICE_FLOW_PTYPE_MAX);
1025                 } else if (hdrs & ICE_FLOW_SEG_HDR_VXLAN) {
1026                         src = (const ice_bitmap_t *)ice_ptypes_vxlan_vni;
1027                         ice_and_bitmap(params->ptypes, params->ptypes,
1028                                        src, ICE_FLOW_PTYPE_MAX);
1029                 } else if (hdrs & ICE_FLOW_SEG_HDR_UDP_ECPRI_TP0) {
1030                         src = (const ice_bitmap_t *)ice_ptypes_udp_ecpri_tp0;
1031                         ice_and_bitmap(params->ptypes, params->ptypes,
1032                                        src, ICE_FLOW_PTYPE_MAX);
1033                 }
1034
1035                 if (hdrs & ICE_FLOW_SEG_HDR_PFCP) {
1036                         if (hdrs & ICE_FLOW_SEG_HDR_PFCP_NODE)
1037                                 src =
1038                                 (const ice_bitmap_t *)ice_ptypes_pfcp_node;
1039                         else
1040                                 src =
1041                                 (const ice_bitmap_t *)ice_ptypes_pfcp_session;
1042
1043                         ice_and_bitmap(params->ptypes, params->ptypes,
1044                                        src, ICE_FLOW_PTYPE_MAX);
1045                 } else {
1046                         src = (const ice_bitmap_t *)ice_ptypes_pfcp_node;
1047                         ice_andnot_bitmap(params->ptypes, params->ptypes,
1048                                           src, ICE_FLOW_PTYPE_MAX);
1049
1050                         src = (const ice_bitmap_t *)ice_ptypes_pfcp_session;
1051                         ice_andnot_bitmap(params->ptypes, params->ptypes,
1052                                           src, ICE_FLOW_PTYPE_MAX);
1053                 }
1054         }
1055
1056         return ICE_SUCCESS;
1057 }
1058
1059 /**
1060  * ice_flow_xtract_pkt_flags - Create an extr sequence entry for packet flags
1061  * @hw: pointer to the HW struct
1062  * @params: information about the flow to be processed
1063  * @flags: The value of pkt_flags[x:x] in Rx/Tx MDID metadata.
1064  *
1065  * This function will allocate an extraction sequence entries for a DWORD size
1066  * chunk of the packet flags.
1067  */
1068 static enum ice_status
1069 ice_flow_xtract_pkt_flags(struct ice_hw *hw,
1070                           struct ice_flow_prof_params *params,
1071                           enum ice_flex_mdid_pkt_flags flags)
1072 {
1073         u8 fv_words = hw->blk[params->blk].es.fvw;
1074         u8 idx;
1075
1076         /* Make sure the number of extraction sequence entries required does not
1077          * exceed the block's capacity.
1078          */
1079         if (params->es_cnt >= fv_words)
1080                 return ICE_ERR_MAX_LIMIT;
1081
1082         /* some blocks require a reversed field vector layout */
1083         if (hw->blk[params->blk].es.reverse)
1084                 idx = fv_words - params->es_cnt - 1;
1085         else
1086                 idx = params->es_cnt;
1087
1088         params->es[idx].prot_id = ICE_PROT_META_ID;
1089         params->es[idx].off = flags;
1090         params->es_cnt++;
1091
1092         return ICE_SUCCESS;
1093 }
1094
1095 /**
1096  * ice_flow_xtract_fld - Create an extraction sequence entry for the given field
1097  * @hw: pointer to the HW struct
1098  * @params: information about the flow to be processed
1099  * @seg: packet segment index of the field to be extracted
1100  * @fld: ID of field to be extracted
1101  * @match: bitfield of all fields
1102  *
1103  * This function determines the protocol ID, offset, and size of the given
1104  * field. It then allocates one or more extraction sequence entries for the
1105  * given field, and fill the entries with protocol ID and offset information.
1106  */
1107 static enum ice_status
1108 ice_flow_xtract_fld(struct ice_hw *hw, struct ice_flow_prof_params *params,
1109                     u8 seg, enum ice_flow_field fld, u64 match)
1110 {
1111         enum ice_flow_field sib = ICE_FLOW_FIELD_IDX_MAX;
1112         enum ice_prot_id prot_id = ICE_PROT_ID_INVAL;
1113         u8 fv_words = hw->blk[params->blk].es.fvw;
1114         struct ice_flow_fld_info *flds;
1115         u16 cnt, ese_bits, i;
1116         u16 sib_mask = 0;
1117         u16 mask;
1118         u16 off;
1119
1120         flds = params->prof->segs[seg].fields;
1121
1122         switch (fld) {
1123         case ICE_FLOW_FIELD_IDX_ETH_DA:
1124         case ICE_FLOW_FIELD_IDX_ETH_SA:
1125         case ICE_FLOW_FIELD_IDX_S_VLAN:
1126         case ICE_FLOW_FIELD_IDX_C_VLAN:
1127                 prot_id = seg == 0 ? ICE_PROT_MAC_OF_OR_S : ICE_PROT_MAC_IL;
1128                 break;
1129         case ICE_FLOW_FIELD_IDX_ETH_TYPE:
1130                 prot_id = seg == 0 ? ICE_PROT_ETYPE_OL : ICE_PROT_ETYPE_IL;
1131                 break;
1132         case ICE_FLOW_FIELD_IDX_IPV4_DSCP:
1133                 prot_id = seg == 0 ? ICE_PROT_IPV4_OF_OR_S : ICE_PROT_IPV4_IL;
1134                 break;
1135         case ICE_FLOW_FIELD_IDX_IPV6_DSCP:
1136                 prot_id = seg == 0 ? ICE_PROT_IPV6_OF_OR_S : ICE_PROT_IPV6_IL;
1137                 break;
1138         case ICE_FLOW_FIELD_IDX_IPV4_TTL:
1139         case ICE_FLOW_FIELD_IDX_IPV4_PROT:
1140                 prot_id = seg == 0 ? ICE_PROT_IPV4_OF_OR_S : ICE_PROT_IPV4_IL;
1141
1142                 /* TTL and PROT share the same extraction seq. entry.
1143                  * Each is considered a sibling to the other in terms of sharing
1144                  * the same extraction sequence entry.
1145                  */
1146                 if (fld == ICE_FLOW_FIELD_IDX_IPV4_TTL)
1147                         sib = ICE_FLOW_FIELD_IDX_IPV4_PROT;
1148                 else
1149                         sib = ICE_FLOW_FIELD_IDX_IPV4_TTL;
1150
1151                 /* If the sibling field is also included, that field's
1152                  * mask needs to be included.
1153                  */
1154                 if (match & BIT(sib))
1155                         sib_mask = ice_flds_info[sib].mask;
1156                 break;
1157         case ICE_FLOW_FIELD_IDX_IPV6_TTL:
1158         case ICE_FLOW_FIELD_IDX_IPV6_PROT:
1159                 prot_id = seg == 0 ? ICE_PROT_IPV6_OF_OR_S : ICE_PROT_IPV6_IL;
1160
1161                 /* TTL and PROT share the same extraction seq. entry.
1162                  * Each is considered a sibling to the other in terms of sharing
1163                  * the same extraction sequence entry.
1164                  */
1165                 if (fld == ICE_FLOW_FIELD_IDX_IPV6_TTL)
1166                         sib = ICE_FLOW_FIELD_IDX_IPV6_PROT;
1167                 else
1168                         sib = ICE_FLOW_FIELD_IDX_IPV6_TTL;
1169
1170                 /* If the sibling field is also included, that field's
1171                  * mask needs to be included.
1172                  */
1173                 if (match & BIT(sib))
1174                         sib_mask = ice_flds_info[sib].mask;
1175                 break;
1176         case ICE_FLOW_FIELD_IDX_IPV4_SA:
1177         case ICE_FLOW_FIELD_IDX_IPV4_DA:
1178                 prot_id = seg == 0 ? ICE_PROT_IPV4_OF_OR_S : ICE_PROT_IPV4_IL;
1179                 break;
1180         case ICE_FLOW_FIELD_IDX_IPV6_SA:
1181         case ICE_FLOW_FIELD_IDX_IPV6_DA:
1182         case ICE_FLOW_FIELD_IDX_IPV6_PRE32_SA:
1183         case ICE_FLOW_FIELD_IDX_IPV6_PRE32_DA:
1184         case ICE_FLOW_FIELD_IDX_IPV6_PRE48_SA:
1185         case ICE_FLOW_FIELD_IDX_IPV6_PRE48_DA:
1186         case ICE_FLOW_FIELD_IDX_IPV6_PRE64_SA:
1187         case ICE_FLOW_FIELD_IDX_IPV6_PRE64_DA:
1188                 prot_id = seg == 0 ? ICE_PROT_IPV6_OF_OR_S : ICE_PROT_IPV6_IL;
1189                 break;
1190         case ICE_FLOW_FIELD_IDX_TCP_SRC_PORT:
1191         case ICE_FLOW_FIELD_IDX_TCP_DST_PORT:
1192         case ICE_FLOW_FIELD_IDX_TCP_FLAGS:
1193                 prot_id = ICE_PROT_TCP_IL;
1194                 break;
1195         case ICE_FLOW_FIELD_IDX_UDP_SRC_PORT:
1196         case ICE_FLOW_FIELD_IDX_UDP_DST_PORT:
1197                 prot_id = ICE_PROT_UDP_IL_OR_S;
1198                 break;
1199         case ICE_FLOW_FIELD_IDX_SCTP_SRC_PORT:
1200         case ICE_FLOW_FIELD_IDX_SCTP_DST_PORT:
1201                 prot_id = ICE_PROT_SCTP_IL;
1202                 break;
1203         case ICE_FLOW_FIELD_IDX_VXLAN_VNI:
1204         case ICE_FLOW_FIELD_IDX_GTPC_TEID:
1205         case ICE_FLOW_FIELD_IDX_GTPU_IP_TEID:
1206         case ICE_FLOW_FIELD_IDX_GTPU_UP_TEID:
1207         case ICE_FLOW_FIELD_IDX_GTPU_DWN_TEID:
1208         case ICE_FLOW_FIELD_IDX_GTPU_EH_TEID:
1209         case ICE_FLOW_FIELD_IDX_GTPU_EH_QFI:
1210                 /* GTP is accessed through UDP OF protocol */
1211                 prot_id = ICE_PROT_UDP_OF;
1212                 break;
1213         case ICE_FLOW_FIELD_IDX_PPPOE_SESS_ID:
1214                 prot_id = ICE_PROT_PPPOE;
1215                 break;
1216         case ICE_FLOW_FIELD_IDX_PFCP_SEID:
1217                 prot_id = ICE_PROT_UDP_IL_OR_S;
1218                 break;
1219         case ICE_FLOW_FIELD_IDX_L2TPV3_SESS_ID:
1220                 prot_id = ICE_PROT_L2TPV3;
1221                 break;
1222         case ICE_FLOW_FIELD_IDX_ESP_SPI:
1223                 prot_id = ICE_PROT_ESP_F;
1224                 break;
1225         case ICE_FLOW_FIELD_IDX_AH_SPI:
1226                 prot_id = ICE_PROT_ESP_2;
1227                 break;
1228         case ICE_FLOW_FIELD_IDX_NAT_T_ESP_SPI:
1229                 prot_id = ICE_PROT_UDP_IL_OR_S;
1230                 break;
1231         case ICE_FLOW_FIELD_IDX_ECPRI_TP0_PC_ID:
1232                 prot_id = ICE_PROT_ECPRI;
1233                 break;
1234         case ICE_FLOW_FIELD_IDX_UDP_ECPRI_TP0_PC_ID:
1235                 prot_id = ICE_PROT_UDP_IL_OR_S;
1236                 break;
1237         case ICE_FLOW_FIELD_IDX_ARP_SIP:
1238         case ICE_FLOW_FIELD_IDX_ARP_DIP:
1239         case ICE_FLOW_FIELD_IDX_ARP_SHA:
1240         case ICE_FLOW_FIELD_IDX_ARP_DHA:
1241         case ICE_FLOW_FIELD_IDX_ARP_OP:
1242                 prot_id = ICE_PROT_ARP_OF;
1243                 break;
1244         case ICE_FLOW_FIELD_IDX_ICMP_TYPE:
1245         case ICE_FLOW_FIELD_IDX_ICMP_CODE:
1246                 /* ICMP type and code share the same extraction seq. entry */
1247                 prot_id = (params->prof->segs[seg].hdrs &
1248                            ICE_FLOW_SEG_HDR_IPV4) ?
1249                         ICE_PROT_ICMP_IL : ICE_PROT_ICMPV6_IL;
1250                 sib = fld == ICE_FLOW_FIELD_IDX_ICMP_TYPE ?
1251                         ICE_FLOW_FIELD_IDX_ICMP_CODE :
1252                         ICE_FLOW_FIELD_IDX_ICMP_TYPE;
1253                 break;
1254         case ICE_FLOW_FIELD_IDX_GRE_KEYID:
1255                 prot_id = ICE_PROT_GRE_OF;
1256                 break;
1257         default:
1258                 return ICE_ERR_NOT_IMPL;
1259         }
1260
1261         /* Each extraction sequence entry is a word in size, and extracts a
1262          * word-aligned offset from a protocol header.
1263          */
1264         ese_bits = ICE_FLOW_FV_EXTRACT_SZ * BITS_PER_BYTE;
1265
1266         flds[fld].xtrct.prot_id = prot_id;
1267         flds[fld].xtrct.off = (ice_flds_info[fld].off / ese_bits) *
1268                 ICE_FLOW_FV_EXTRACT_SZ;
1269         flds[fld].xtrct.disp = (u8)(ice_flds_info[fld].off % ese_bits);
1270         flds[fld].xtrct.idx = params->es_cnt;
1271         flds[fld].xtrct.mask = ice_flds_info[fld].mask;
1272
1273         /* Adjust the next field-entry index after accommodating the number of
1274          * entries this field consumes
1275          */
1276         cnt = DIVIDE_AND_ROUND_UP(flds[fld].xtrct.disp +
1277                                   ice_flds_info[fld].size, ese_bits);
1278
1279         /* Fill in the extraction sequence entries needed for this field */
1280         off = flds[fld].xtrct.off;
1281         mask = flds[fld].xtrct.mask;
1282         for (i = 0; i < cnt; i++) {
1283                 /* Only consume an extraction sequence entry if there is no
1284                  * sibling field associated with this field or the sibling entry
1285                  * already extracts the word shared with this field.
1286                  */
1287                 if (sib == ICE_FLOW_FIELD_IDX_MAX ||
1288                     flds[sib].xtrct.prot_id == ICE_PROT_ID_INVAL ||
1289                     flds[sib].xtrct.off != off) {
1290                         u8 idx;
1291
1292                         /* Make sure the number of extraction sequence required
1293                          * does not exceed the block's capability
1294                          */
1295                         if (params->es_cnt >= fv_words)
1296                                 return ICE_ERR_MAX_LIMIT;
1297
1298                         /* some blocks require a reversed field vector layout */
1299                         if (hw->blk[params->blk].es.reverse)
1300                                 idx = fv_words - params->es_cnt - 1;
1301                         else
1302                                 idx = params->es_cnt;
1303
1304                         params->es[idx].prot_id = prot_id;
1305                         params->es[idx].off = off;
1306                         params->mask[idx] = mask | sib_mask;
1307                         params->es_cnt++;
1308                 }
1309
1310                 off += ICE_FLOW_FV_EXTRACT_SZ;
1311         }
1312
1313         return ICE_SUCCESS;
1314 }
1315
1316 /**
1317  * ice_flow_xtract_raws - Create extract sequence entries for raw bytes
1318  * @hw: pointer to the HW struct
1319  * @params: information about the flow to be processed
1320  * @seg: index of packet segment whose raw fields are to be extracted
1321  */
1322 static enum ice_status
1323 ice_flow_xtract_raws(struct ice_hw *hw, struct ice_flow_prof_params *params,
1324                      u8 seg)
1325 {
1326         u16 fv_words;
1327         u16 hdrs_sz;
1328         u8 i;
1329
1330         if (!params->prof->segs[seg].raws_cnt)
1331                 return ICE_SUCCESS;
1332
1333         if (params->prof->segs[seg].raws_cnt >
1334             ARRAY_SIZE(params->prof->segs[seg].raws))
1335                 return ICE_ERR_MAX_LIMIT;
1336
1337         /* Offsets within the segment headers are not supported */
1338         hdrs_sz = ice_flow_calc_seg_sz(params, seg);
1339         if (!hdrs_sz)
1340                 return ICE_ERR_PARAM;
1341
1342         fv_words = hw->blk[params->blk].es.fvw;
1343
1344         for (i = 0; i < params->prof->segs[seg].raws_cnt; i++) {
1345                 struct ice_flow_seg_fld_raw *raw;
1346                 u16 off, cnt, j;
1347
1348                 raw = &params->prof->segs[seg].raws[i];
1349
1350                 /* Storing extraction information */
1351                 raw->info.xtrct.prot_id = ICE_PROT_MAC_OF_OR_S;
1352                 raw->info.xtrct.off = (raw->off / ICE_FLOW_FV_EXTRACT_SZ) *
1353                         ICE_FLOW_FV_EXTRACT_SZ;
1354                 raw->info.xtrct.disp = (raw->off % ICE_FLOW_FV_EXTRACT_SZ) *
1355                         BITS_PER_BYTE;
1356                 raw->info.xtrct.idx = params->es_cnt;
1357
1358                 /* Determine the number of field vector entries this raw field
1359                  * consumes.
1360                  */
1361                 cnt = DIVIDE_AND_ROUND_UP(raw->info.xtrct.disp +
1362                                           (raw->info.src.last * BITS_PER_BYTE),
1363                                           (ICE_FLOW_FV_EXTRACT_SZ *
1364                                            BITS_PER_BYTE));
1365                 off = raw->info.xtrct.off;
1366                 for (j = 0; j < cnt; j++) {
1367                         u16 idx;
1368
1369                         /* Make sure the number of extraction sequence required
1370                          * does not exceed the block's capability
1371                          */
1372                         if (params->es_cnt >= hw->blk[params->blk].es.count ||
1373                             params->es_cnt >= ICE_MAX_FV_WORDS)
1374                                 return ICE_ERR_MAX_LIMIT;
1375
1376                         /* some blocks require a reversed field vector layout */
1377                         if (hw->blk[params->blk].es.reverse)
1378                                 idx = fv_words - params->es_cnt - 1;
1379                         else
1380                                 idx = params->es_cnt;
1381
1382                         params->es[idx].prot_id = raw->info.xtrct.prot_id;
1383                         params->es[idx].off = off;
1384                         params->es_cnt++;
1385                         off += ICE_FLOW_FV_EXTRACT_SZ;
1386                 }
1387         }
1388
1389         return ICE_SUCCESS;
1390 }
1391
1392 /**
1393  * ice_flow_create_xtrct_seq - Create an extraction sequence for given segments
1394  * @hw: pointer to the HW struct
1395  * @params: information about the flow to be processed
1396  *
1397  * This function iterates through all matched fields in the given segments, and
1398  * creates an extraction sequence for the fields.
1399  */
1400 static enum ice_status
1401 ice_flow_create_xtrct_seq(struct ice_hw *hw,
1402                           struct ice_flow_prof_params *params)
1403 {
1404         enum ice_status status = ICE_SUCCESS;
1405         u8 i;
1406
1407         /* For ACL, we also need to extract the direction bit (Rx,Tx) data from
1408          * packet flags
1409          */
1410         if (params->blk == ICE_BLK_ACL) {
1411                 status = ice_flow_xtract_pkt_flags(hw, params,
1412                                                    ICE_RX_MDID_PKT_FLAGS_15_0);
1413                 if (status)
1414                         return status;
1415         }
1416
1417         for (i = 0; i < params->prof->segs_cnt; i++) {
1418                 u64 match = params->prof->segs[i].match;
1419                 enum ice_flow_field j;
1420
1421                 ice_for_each_set_bit(j, (ice_bitmap_t *)&match,
1422                                      ICE_FLOW_FIELD_IDX_MAX) {
1423                         status = ice_flow_xtract_fld(hw, params, i, j, match);
1424                         if (status)
1425                                 return status;
1426                         ice_clear_bit(j, (ice_bitmap_t *)&match);
1427                 }
1428
1429                 /* Process raw matching bytes */
1430                 status = ice_flow_xtract_raws(hw, params, i);
1431                 if (status)
1432                         return status;
1433         }
1434
1435         return status;
1436 }
1437
1438 /**
1439  * ice_flow_sel_acl_scen - returns the specific scenario
1440  * @hw: pointer to the hardware structure
1441  * @params: information about the flow to be processed
1442  *
1443  * This function will return the specific scenario based on the
1444  * params passed to it
1445  */
1446 static enum ice_status
1447 ice_flow_sel_acl_scen(struct ice_hw *hw, struct ice_flow_prof_params *params)
1448 {
1449         /* Find the best-fit scenario for the provided match width */
1450         struct ice_acl_scen *cand_scen = NULL, *scen;
1451
1452         if (!hw->acl_tbl)
1453                 return ICE_ERR_DOES_NOT_EXIST;
1454
1455         /* Loop through each scenario and match against the scenario width
1456          * to select the specific scenario
1457          */
1458         LIST_FOR_EACH_ENTRY(scen, &hw->acl_tbl->scens, ice_acl_scen, list_entry)
1459                 if (scen->eff_width >= params->entry_length &&
1460                     (!cand_scen || cand_scen->eff_width > scen->eff_width))
1461                         cand_scen = scen;
1462         if (!cand_scen)
1463                 return ICE_ERR_DOES_NOT_EXIST;
1464
1465         params->prof->cfg.scen = cand_scen;
1466
1467         return ICE_SUCCESS;
1468 }
1469
1470 /**
1471  * ice_flow_acl_def_entry_frmt - Determine the layout of flow entries
1472  * @params: information about the flow to be processed
1473  */
1474 static enum ice_status
1475 ice_flow_acl_def_entry_frmt(struct ice_flow_prof_params *params)
1476 {
1477         u16 index, i, range_idx = 0;
1478
1479         index = ICE_AQC_ACL_PROF_BYTE_SEL_START_IDX;
1480
1481         for (i = 0; i < params->prof->segs_cnt; i++) {
1482                 struct ice_flow_seg_info *seg = &params->prof->segs[i];
1483                 u8 j;
1484
1485                 ice_for_each_set_bit(j, (ice_bitmap_t *)&seg->match,
1486                                      ICE_FLOW_FIELD_IDX_MAX) {
1487                         struct ice_flow_fld_info *fld = &seg->fields[j];
1488
1489                         fld->entry.mask = ICE_FLOW_FLD_OFF_INVAL;
1490
1491                         if (fld->type == ICE_FLOW_FLD_TYPE_RANGE) {
1492                                 fld->entry.last = ICE_FLOW_FLD_OFF_INVAL;
1493
1494                                 /* Range checking only supported for single
1495                                  * words
1496                                  */
1497                                 if (DIVIDE_AND_ROUND_UP(ice_flds_info[j].size +
1498                                                         fld->xtrct.disp,
1499                                                         BITS_PER_BYTE * 2) > 1)
1500                                         return ICE_ERR_PARAM;
1501
1502                                 /* Ranges must define low and high values */
1503                                 if (fld->src.val == ICE_FLOW_FLD_OFF_INVAL ||
1504                                     fld->src.last == ICE_FLOW_FLD_OFF_INVAL)
1505                                         return ICE_ERR_PARAM;
1506
1507                                 fld->entry.val = range_idx++;
1508                         } else {
1509                                 /* Store adjusted byte-length of field for later
1510                                  * use, taking into account potential
1511                                  * non-byte-aligned displacement
1512                                  */
1513                                 fld->entry.last = DIVIDE_AND_ROUND_UP
1514                                         (ice_flds_info[j].size +
1515                                          (fld->xtrct.disp % BITS_PER_BYTE),
1516                                          BITS_PER_BYTE);
1517                                 fld->entry.val = index;
1518                                 index += fld->entry.last;
1519                         }
1520                 }
1521
1522                 for (j = 0; j < seg->raws_cnt; j++) {
1523                         struct ice_flow_seg_fld_raw *raw = &seg->raws[j];
1524
1525                         raw->info.entry.mask = ICE_FLOW_FLD_OFF_INVAL;
1526                         raw->info.entry.val = index;
1527                         raw->info.entry.last = raw->info.src.last;
1528                         index += raw->info.entry.last;
1529                 }
1530         }
1531
1532         /* Currently only support using the byte selection base, which only
1533          * allows for an effective entry size of 30 bytes. Reject anything
1534          * larger.
1535          */
1536         if (index > ICE_AQC_ACL_PROF_BYTE_SEL_ELEMS)
1537                 return ICE_ERR_PARAM;
1538
1539         /* Only 8 range checkers per profile, reject anything trying to use
1540          * more
1541          */
1542         if (range_idx > ICE_AQC_ACL_PROF_RANGES_NUM_CFG)
1543                 return ICE_ERR_PARAM;
1544
1545         /* Store # bytes required for entry for later use */
1546         params->entry_length = index - ICE_AQC_ACL_PROF_BYTE_SEL_START_IDX;
1547
1548         return ICE_SUCCESS;
1549 }
1550
1551 /**
1552  * ice_flow_proc_segs - process all packet segments associated with a profile
1553  * @hw: pointer to the HW struct
1554  * @params: information about the flow to be processed
1555  */
1556 static enum ice_status
1557 ice_flow_proc_segs(struct ice_hw *hw, struct ice_flow_prof_params *params)
1558 {
1559         enum ice_status status;
1560
1561         status = ice_flow_proc_seg_hdrs(params);
1562         if (status)
1563                 return status;
1564
1565         status = ice_flow_create_xtrct_seq(hw, params);
1566         if (status)
1567                 return status;
1568
1569         switch (params->blk) {
1570         case ICE_BLK_FD:
1571         case ICE_BLK_RSS:
1572                 status = ICE_SUCCESS;
1573                 break;
1574         case ICE_BLK_ACL:
1575                 status = ice_flow_acl_def_entry_frmt(params);
1576                 if (status)
1577                         return status;
1578                 status = ice_flow_sel_acl_scen(hw, params);
1579                 if (status)
1580                         return status;
1581                 break;
1582         default:
1583                 return ICE_ERR_NOT_IMPL;
1584         }
1585
1586         return status;
1587 }
1588
1589 #define ICE_FLOW_FIND_PROF_CHK_FLDS     0x00000001
1590 #define ICE_FLOW_FIND_PROF_CHK_VSI      0x00000002
1591 #define ICE_FLOW_FIND_PROF_NOT_CHK_DIR  0x00000004
1592
1593 /**
1594  * ice_flow_find_prof_conds - Find a profile matching headers and conditions
1595  * @hw: pointer to the HW struct
1596  * @blk: classification stage
1597  * @dir: flow direction
1598  * @segs: array of one or more packet segments that describe the flow
1599  * @segs_cnt: number of packet segments provided
1600  * @vsi_handle: software VSI handle to check VSI (ICE_FLOW_FIND_PROF_CHK_VSI)
1601  * @conds: additional conditions to be checked (ICE_FLOW_FIND_PROF_CHK_*)
1602  */
1603 static struct ice_flow_prof *
1604 ice_flow_find_prof_conds(struct ice_hw *hw, enum ice_block blk,
1605                          enum ice_flow_dir dir, struct ice_flow_seg_info *segs,
1606                          u8 segs_cnt, u16 vsi_handle, u32 conds)
1607 {
1608         struct ice_flow_prof *p, *prof = NULL;
1609
1610         ice_acquire_lock(&hw->fl_profs_locks[blk]);
1611         LIST_FOR_EACH_ENTRY(p, &hw->fl_profs[blk], ice_flow_prof, l_entry)
1612                 if ((p->dir == dir || conds & ICE_FLOW_FIND_PROF_NOT_CHK_DIR) &&
1613                     segs_cnt && segs_cnt == p->segs_cnt) {
1614                         u8 i;
1615
1616                         /* Check for profile-VSI association if specified */
1617                         if ((conds & ICE_FLOW_FIND_PROF_CHK_VSI) &&
1618                             ice_is_vsi_valid(hw, vsi_handle) &&
1619                             !ice_is_bit_set(p->vsis, vsi_handle))
1620                                 continue;
1621
1622                         /* Protocol headers must be checked. Matched fields are
1623                          * checked if specified.
1624                          */
1625                         for (i = 0; i < segs_cnt; i++)
1626                                 if (segs[i].hdrs != p->segs[i].hdrs ||
1627                                     ((conds & ICE_FLOW_FIND_PROF_CHK_FLDS) &&
1628                                      segs[i].match != p->segs[i].match))
1629                                         break;
1630
1631                         /* A match is found if all segments are matched */
1632                         if (i == segs_cnt) {
1633                                 prof = p;
1634                                 break;
1635                         }
1636                 }
1637         ice_release_lock(&hw->fl_profs_locks[blk]);
1638
1639         return prof;
1640 }
1641
1642 /**
1643  * ice_flow_find_prof - Look up a profile matching headers and matched fields
1644  * @hw: pointer to the HW struct
1645  * @blk: classification stage
1646  * @dir: flow direction
1647  * @segs: array of one or more packet segments that describe the flow
1648  * @segs_cnt: number of packet segments provided
1649  */
1650 u64
1651 ice_flow_find_prof(struct ice_hw *hw, enum ice_block blk, enum ice_flow_dir dir,
1652                    struct ice_flow_seg_info *segs, u8 segs_cnt)
1653 {
1654         struct ice_flow_prof *p;
1655
1656         p = ice_flow_find_prof_conds(hw, blk, dir, segs, segs_cnt,
1657                                      ICE_MAX_VSI, ICE_FLOW_FIND_PROF_CHK_FLDS);
1658
1659         return p ? p->id : ICE_FLOW_PROF_ID_INVAL;
1660 }
1661
1662 /**
1663  * ice_flow_find_prof_id - Look up a profile with given profile ID
1664  * @hw: pointer to the HW struct
1665  * @blk: classification stage
1666  * @prof_id: unique ID to identify this flow profile
1667  */
1668 static struct ice_flow_prof *
1669 ice_flow_find_prof_id(struct ice_hw *hw, enum ice_block blk, u64 prof_id)
1670 {
1671         struct ice_flow_prof *p;
1672
1673         LIST_FOR_EACH_ENTRY(p, &hw->fl_profs[blk], ice_flow_prof, l_entry)
1674                 if (p->id == prof_id)
1675                         return p;
1676
1677         return NULL;
1678 }
1679
1680 /**
1681  * ice_dealloc_flow_entry - Deallocate flow entry memory
1682  * @hw: pointer to the HW struct
1683  * @entry: flow entry to be removed
1684  */
1685 static void
1686 ice_dealloc_flow_entry(struct ice_hw *hw, struct ice_flow_entry *entry)
1687 {
1688         if (!entry)
1689                 return;
1690
1691         if (entry->entry)
1692                 ice_free(hw, entry->entry);
1693
1694         if (entry->range_buf) {
1695                 ice_free(hw, entry->range_buf);
1696                 entry->range_buf = NULL;
1697         }
1698
1699         if (entry->acts) {
1700                 ice_free(hw, entry->acts);
1701                 entry->acts = NULL;
1702                 entry->acts_cnt = 0;
1703         }
1704
1705         ice_free(hw, entry);
1706 }
1707
1708 /**
1709  * ice_flow_get_hw_prof - return the HW profile for a specific profile ID handle
1710  * @hw: pointer to the HW struct
1711  * @blk: classification stage
1712  * @prof_id: the profile ID handle
1713  * @hw_prof_id: pointer to variable to receive the HW profile ID
1714  */
1715 enum ice_status
1716 ice_flow_get_hw_prof(struct ice_hw *hw, enum ice_block blk, u64 prof_id,
1717                      u8 *hw_prof_id)
1718 {
1719         enum ice_status status = ICE_ERR_DOES_NOT_EXIST;
1720         struct ice_prof_map *map;
1721
1722         ice_acquire_lock(&hw->blk[blk].es.prof_map_lock);
1723         map = ice_search_prof_id(hw, blk, prof_id);
1724         if (map) {
1725                 *hw_prof_id = map->prof_id;
1726                 status = ICE_SUCCESS;
1727         }
1728         ice_release_lock(&hw->blk[blk].es.prof_map_lock);
1729         return status;
1730 }
1731
1732 #define ICE_ACL_INVALID_SCEN    0x3f
1733
1734 /**
1735  * ice_flow_acl_is_prof_in_use - Verify if the profile is associated to any PF
1736  * @hw: pointer to the hardware structure
1737  * @prof: pointer to flow profile
1738  * @buf: destination buffer function writes partial extraction sequence to
1739  *
1740  * returns ICE_SUCCESS if no PF is associated to the given profile
1741  * returns ICE_ERR_IN_USE if at least one PF is associated to the given profile
1742  * returns other error code for real error
1743  */
1744 static enum ice_status
1745 ice_flow_acl_is_prof_in_use(struct ice_hw *hw, struct ice_flow_prof *prof,
1746                             struct ice_aqc_acl_prof_generic_frmt *buf)
1747 {
1748         enum ice_status status;
1749         u8 prof_id = 0;
1750
1751         status = ice_flow_get_hw_prof(hw, ICE_BLK_ACL, prof->id, &prof_id);
1752         if (status)
1753                 return status;
1754
1755         status = ice_query_acl_prof(hw, prof_id, buf, NULL);
1756         if (status)
1757                 return status;
1758
1759         /* If all PF's associated scenarios are all 0 or all
1760          * ICE_ACL_INVALID_SCEN (63) for the given profile then the latter has
1761          * not been configured yet.
1762          */
1763         if (buf->pf_scenario_num[0] == 0 && buf->pf_scenario_num[1] == 0 &&
1764             buf->pf_scenario_num[2] == 0 && buf->pf_scenario_num[3] == 0 &&
1765             buf->pf_scenario_num[4] == 0 && buf->pf_scenario_num[5] == 0 &&
1766             buf->pf_scenario_num[6] == 0 && buf->pf_scenario_num[7] == 0)
1767                 return ICE_SUCCESS;
1768
1769         if (buf->pf_scenario_num[0] == ICE_ACL_INVALID_SCEN &&
1770             buf->pf_scenario_num[1] == ICE_ACL_INVALID_SCEN &&
1771             buf->pf_scenario_num[2] == ICE_ACL_INVALID_SCEN &&
1772             buf->pf_scenario_num[3] == ICE_ACL_INVALID_SCEN &&
1773             buf->pf_scenario_num[4] == ICE_ACL_INVALID_SCEN &&
1774             buf->pf_scenario_num[5] == ICE_ACL_INVALID_SCEN &&
1775             buf->pf_scenario_num[6] == ICE_ACL_INVALID_SCEN &&
1776             buf->pf_scenario_num[7] == ICE_ACL_INVALID_SCEN)
1777                 return ICE_SUCCESS;
1778
1779         return ICE_ERR_IN_USE;
1780 }
1781
1782 /**
1783  * ice_flow_acl_free_act_cntr - Free the ACL rule's actions
1784  * @hw: pointer to the hardware structure
1785  * @acts: array of actions to be performed on a match
1786  * @acts_cnt: number of actions
1787  */
1788 static enum ice_status
1789 ice_flow_acl_free_act_cntr(struct ice_hw *hw, struct ice_flow_action *acts,
1790                            u8 acts_cnt)
1791 {
1792         int i;
1793
1794         for (i = 0; i < acts_cnt; i++) {
1795                 if (acts[i].type == ICE_FLOW_ACT_CNTR_PKT ||
1796                     acts[i].type == ICE_FLOW_ACT_CNTR_BYTES ||
1797                     acts[i].type == ICE_FLOW_ACT_CNTR_PKT_BYTES) {
1798                         struct ice_acl_cntrs cntrs = { 0 };
1799                         enum ice_status status;
1800
1801                         /* amount is unused in the dealloc path but the common
1802                          * parameter check routine wants a value set, as zero
1803                          * is invalid for the check. Just set it.
1804                          */
1805                         cntrs.amount = 1;
1806                         cntrs.bank = 0; /* Only bank0 for the moment */
1807                         cntrs.first_cntr =
1808                                         LE16_TO_CPU(acts[i].data.acl_act.value);
1809                         cntrs.last_cntr =
1810                                         LE16_TO_CPU(acts[i].data.acl_act.value);
1811
1812                         if (acts[i].type == ICE_FLOW_ACT_CNTR_PKT_BYTES)
1813                                 cntrs.type = ICE_AQC_ACL_CNT_TYPE_DUAL;
1814                         else
1815                                 cntrs.type = ICE_AQC_ACL_CNT_TYPE_SINGLE;
1816
1817                         status = ice_aq_dealloc_acl_cntrs(hw, &cntrs, NULL);
1818                         if (status)
1819                                 return status;
1820                 }
1821         }
1822         return ICE_SUCCESS;
1823 }
1824
1825 /**
1826  * ice_flow_acl_disassoc_scen - Disassociate the scenario from the profile
1827  * @hw: pointer to the hardware structure
1828  * @prof: pointer to flow profile
1829  *
1830  * Disassociate the scenario from the profile for the PF of the VSI.
1831  */
1832 static enum ice_status
1833 ice_flow_acl_disassoc_scen(struct ice_hw *hw, struct ice_flow_prof *prof)
1834 {
1835         struct ice_aqc_acl_prof_generic_frmt buf;
1836         enum ice_status status = ICE_SUCCESS;
1837         u8 prof_id = 0;
1838
1839         ice_memset(&buf, 0, sizeof(buf), ICE_NONDMA_MEM);
1840
1841         status = ice_flow_get_hw_prof(hw, ICE_BLK_ACL, prof->id, &prof_id);
1842         if (status)
1843                 return status;
1844
1845         status = ice_query_acl_prof(hw, prof_id, &buf, NULL);
1846         if (status)
1847                 return status;
1848
1849         /* Clear scenario for this PF */
1850         buf.pf_scenario_num[hw->pf_id] = ICE_ACL_INVALID_SCEN;
1851         status = ice_prgm_acl_prof_xtrct(hw, prof_id, &buf, NULL);
1852
1853         return status;
1854 }
1855
1856 /**
1857  * ice_flow_rem_entry_sync - Remove a flow entry
1858  * @hw: pointer to the HW struct
1859  * @blk: classification stage
1860  * @entry: flow entry to be removed
1861  */
1862 static enum ice_status
1863 ice_flow_rem_entry_sync(struct ice_hw *hw, enum ice_block blk,
1864                         struct ice_flow_entry *entry)
1865 {
1866         if (!entry)
1867                 return ICE_ERR_BAD_PTR;
1868
1869         if (blk == ICE_BLK_ACL) {
1870                 enum ice_status status;
1871
1872                 if (!entry->prof)
1873                         return ICE_ERR_BAD_PTR;
1874
1875                 status = ice_acl_rem_entry(hw, entry->prof->cfg.scen,
1876                                            entry->scen_entry_idx);
1877                 if (status)
1878                         return status;
1879
1880                 /* Checks if we need to release an ACL counter. */
1881                 if (entry->acts_cnt && entry->acts)
1882                         ice_flow_acl_free_act_cntr(hw, entry->acts,
1883                                                    entry->acts_cnt);
1884         }
1885
1886         LIST_DEL(&entry->l_entry);
1887
1888         ice_dealloc_flow_entry(hw, entry);
1889
1890         return ICE_SUCCESS;
1891 }
1892
1893 /**
1894  * ice_flow_add_prof_sync - Add a flow profile for packet segments and fields
1895  * @hw: pointer to the HW struct
1896  * @blk: classification stage
1897  * @dir: flow direction
1898  * @prof_id: unique ID to identify this flow profile
1899  * @segs: array of one or more packet segments that describe the flow
1900  * @segs_cnt: number of packet segments provided
1901  * @acts: array of default actions
1902  * @acts_cnt: number of default actions
1903  * @prof: stores the returned flow profile added
1904  *
1905  * Assumption: the caller has acquired the lock to the profile list
1906  */
1907 static enum ice_status
1908 ice_flow_add_prof_sync(struct ice_hw *hw, enum ice_block blk,
1909                        enum ice_flow_dir dir, u64 prof_id,
1910                        struct ice_flow_seg_info *segs, u8 segs_cnt,
1911                        struct ice_flow_action *acts, u8 acts_cnt,
1912                        struct ice_flow_prof **prof)
1913 {
1914         struct ice_flow_prof_params *params;
1915         enum ice_status status;
1916         u8 i;
1917
1918         if (!prof || (acts_cnt && !acts))
1919                 return ICE_ERR_BAD_PTR;
1920
1921         params = (struct ice_flow_prof_params *)ice_malloc(hw, sizeof(*params));
1922         if (!params)
1923                 return ICE_ERR_NO_MEMORY;
1924
1925         params->prof = (struct ice_flow_prof *)
1926                 ice_malloc(hw, sizeof(*params->prof));
1927         if (!params->prof) {
1928                 status = ICE_ERR_NO_MEMORY;
1929                 goto free_params;
1930         }
1931
1932         /* initialize extraction sequence to all invalid (0xff) */
1933         for (i = 0; i < ICE_MAX_FV_WORDS; i++) {
1934                 params->es[i].prot_id = ICE_PROT_INVALID;
1935                 params->es[i].off = ICE_FV_OFFSET_INVAL;
1936         }
1937
1938         params->blk = blk;
1939         params->prof->id = prof_id;
1940         params->prof->dir = dir;
1941         params->prof->segs_cnt = segs_cnt;
1942
1943         /* Make a copy of the segments that need to be persistent in the flow
1944          * profile instance
1945          */
1946         for (i = 0; i < segs_cnt; i++)
1947                 ice_memcpy(&params->prof->segs[i], &segs[i], sizeof(*segs),
1948                            ICE_NONDMA_TO_NONDMA);
1949
1950         /* Make a copy of the actions that need to be persistent in the flow
1951          * profile instance.
1952          */
1953         if (acts_cnt) {
1954                 params->prof->acts = (struct ice_flow_action *)
1955                         ice_memdup(hw, acts, acts_cnt * sizeof(*acts),
1956                                    ICE_NONDMA_TO_NONDMA);
1957
1958                 if (!params->prof->acts) {
1959                         status = ICE_ERR_NO_MEMORY;
1960                         goto out;
1961                 }
1962         }
1963
1964         status = ice_flow_proc_segs(hw, params);
1965         if (status) {
1966                 ice_debug(hw, ICE_DBG_FLOW, "Error processing a flow's packet segments\n");
1967                 goto out;
1968         }
1969
1970         /* Add a HW profile for this flow profile */
1971         status = ice_add_prof(hw, blk, prof_id, (u8 *)params->ptypes,
1972                               params->attr, params->attr_cnt, params->es,
1973                               params->mask);
1974         if (status) {
1975                 ice_debug(hw, ICE_DBG_FLOW, "Error adding a HW flow profile\n");
1976                 goto out;
1977         }
1978
1979         INIT_LIST_HEAD(&params->prof->entries);
1980         ice_init_lock(&params->prof->entries_lock);
1981         *prof = params->prof;
1982
1983 out:
1984         if (status) {
1985                 if (params->prof->acts)
1986                         ice_free(hw, params->prof->acts);
1987                 ice_free(hw, params->prof);
1988         }
1989 free_params:
1990         ice_free(hw, params);
1991
1992         return status;
1993 }
1994
1995 /**
1996  * ice_flow_rem_prof_sync - remove a flow profile
1997  * @hw: pointer to the hardware structure
1998  * @blk: classification stage
1999  * @prof: pointer to flow profile to remove
2000  *
2001  * Assumption: the caller has acquired the lock to the profile list
2002  */
2003 static enum ice_status
2004 ice_flow_rem_prof_sync(struct ice_hw *hw, enum ice_block blk,
2005                        struct ice_flow_prof *prof)
2006 {
2007         enum ice_status status;
2008
2009         /* Remove all remaining flow entries before removing the flow profile */
2010         if (!LIST_EMPTY(&prof->entries)) {
2011                 struct ice_flow_entry *e, *t;
2012
2013                 ice_acquire_lock(&prof->entries_lock);
2014
2015                 LIST_FOR_EACH_ENTRY_SAFE(e, t, &prof->entries, ice_flow_entry,
2016                                          l_entry) {
2017                         status = ice_flow_rem_entry_sync(hw, blk, e);
2018                         if (status)
2019                                 break;
2020                 }
2021
2022                 ice_release_lock(&prof->entries_lock);
2023         }
2024
2025         if (blk == ICE_BLK_ACL) {
2026                 struct ice_aqc_acl_profile_ranges query_rng_buf;
2027                 struct ice_aqc_acl_prof_generic_frmt buf;
2028                 u8 prof_id = 0;
2029
2030                 /* Disassociate the scenario from the profile for the PF */
2031                 status = ice_flow_acl_disassoc_scen(hw, prof);
2032                 if (status)
2033                         return status;
2034
2035                 /* Clear the range-checker if the profile ID is no longer
2036                  * used by any PF
2037                  */
2038                 status = ice_flow_acl_is_prof_in_use(hw, prof, &buf);
2039                 if (status && status != ICE_ERR_IN_USE) {
2040                         return status;
2041                 } else if (!status) {
2042                         /* Clear the range-checker value for profile ID */
2043                         ice_memset(&query_rng_buf, 0,
2044                                    sizeof(struct ice_aqc_acl_profile_ranges),
2045                                    ICE_NONDMA_MEM);
2046
2047                         status = ice_flow_get_hw_prof(hw, blk, prof->id,
2048                                                       &prof_id);
2049                         if (status)
2050                                 return status;
2051
2052                         status = ice_prog_acl_prof_ranges(hw, prof_id,
2053                                                           &query_rng_buf, NULL);
2054                         if (status)
2055                                 return status;
2056                 }
2057         }
2058
2059         /* Remove all hardware profiles associated with this flow profile */
2060         status = ice_rem_prof(hw, blk, prof->id);
2061         if (!status) {
2062                 LIST_DEL(&prof->l_entry);
2063                 ice_destroy_lock(&prof->entries_lock);
2064                 if (prof->acts)
2065                         ice_free(hw, prof->acts);
2066                 ice_free(hw, prof);
2067         }
2068
2069         return status;
2070 }
2071
2072 /**
2073  * ice_flow_acl_set_xtrct_seq_fld - Populate xtrct seq for single field
2074  * @buf: Destination buffer function writes partial xtrct sequence to
2075  * @info: Info about field
2076  */
2077 static void
2078 ice_flow_acl_set_xtrct_seq_fld(struct ice_aqc_acl_prof_generic_frmt *buf,
2079                                struct ice_flow_fld_info *info)
2080 {
2081         u16 dst, i;
2082         u8 src;
2083
2084         src = info->xtrct.idx * ICE_FLOW_FV_EXTRACT_SZ +
2085                 info->xtrct.disp / BITS_PER_BYTE;
2086         dst = info->entry.val;
2087         for (i = 0; i < info->entry.last; i++)
2088                 /* HW stores field vector words in LE, convert words back to BE
2089                  * so constructed entries will end up in network order
2090                  */
2091                 buf->byte_selection[dst++] = src++ ^ 1;
2092 }
2093
2094 /**
2095  * ice_flow_acl_set_xtrct_seq - Program ACL extraction sequence
2096  * @hw: pointer to the hardware structure
2097  * @prof: pointer to flow profile
2098  */
2099 static enum ice_status
2100 ice_flow_acl_set_xtrct_seq(struct ice_hw *hw, struct ice_flow_prof *prof)
2101 {
2102         struct ice_aqc_acl_prof_generic_frmt buf;
2103         struct ice_flow_fld_info *info;
2104         enum ice_status status;
2105         u8 prof_id = 0;
2106         u16 i;
2107
2108         ice_memset(&buf, 0, sizeof(buf), ICE_NONDMA_MEM);
2109
2110         status = ice_flow_get_hw_prof(hw, ICE_BLK_ACL, prof->id, &prof_id);
2111         if (status)
2112                 return status;
2113
2114         status = ice_flow_acl_is_prof_in_use(hw, prof, &buf);
2115         if (status && status != ICE_ERR_IN_USE)
2116                 return status;
2117
2118         if (!status) {
2119                 /* Program the profile dependent configuration. This is done
2120                  * only once regardless of the number of PFs using that profile
2121                  */
2122                 ice_memset(&buf, 0, sizeof(buf), ICE_NONDMA_MEM);
2123
2124                 for (i = 0; i < prof->segs_cnt; i++) {
2125                         struct ice_flow_seg_info *seg = &prof->segs[i];
2126                         u16 j;
2127
2128                         ice_for_each_set_bit(j, (ice_bitmap_t *)&seg->match,
2129                                              ICE_FLOW_FIELD_IDX_MAX) {
2130                                 info = &seg->fields[j];
2131
2132                                 if (info->type == ICE_FLOW_FLD_TYPE_RANGE)
2133                                         buf.word_selection[info->entry.val] =
2134                                                 info->xtrct.idx;
2135                                 else
2136                                         ice_flow_acl_set_xtrct_seq_fld(&buf,
2137                                                                        info);
2138                         }
2139
2140                         for (j = 0; j < seg->raws_cnt; j++) {
2141                                 info = &seg->raws[j].info;
2142                                 ice_flow_acl_set_xtrct_seq_fld(&buf, info);
2143                         }
2144                 }
2145
2146                 ice_memset(&buf.pf_scenario_num[0], ICE_ACL_INVALID_SCEN,
2147                            ICE_AQC_ACL_PROF_PF_SCEN_NUM_ELEMS,
2148                            ICE_NONDMA_MEM);
2149         }
2150
2151         /* Update the current PF */
2152         buf.pf_scenario_num[hw->pf_id] = (u8)prof->cfg.scen->id;
2153         status = ice_prgm_acl_prof_xtrct(hw, prof_id, &buf, NULL);
2154
2155         return status;
2156 }
2157
2158 /**
2159  * ice_flow_assoc_vsig_vsi - associate a VSI with VSIG
2160  * @hw: pointer to the hardware structure
2161  * @blk: classification stage
2162  * @vsi_handle: software VSI handle
2163  * @vsig: target VSI group
2164  *
2165  * Assumption: the caller has already verified that the VSI to
2166  * be added has the same characteristics as the VSIG and will
2167  * thereby have access to all resources added to that VSIG.
2168  */
2169 enum ice_status
2170 ice_flow_assoc_vsig_vsi(struct ice_hw *hw, enum ice_block blk, u16 vsi_handle,
2171                         u16 vsig)
2172 {
2173         enum ice_status status;
2174
2175         if (!ice_is_vsi_valid(hw, vsi_handle) || blk >= ICE_BLK_COUNT)
2176                 return ICE_ERR_PARAM;
2177
2178         ice_acquire_lock(&hw->fl_profs_locks[blk]);
2179         status = ice_add_vsi_flow(hw, blk, ice_get_hw_vsi_num(hw, vsi_handle),
2180                                   vsig);
2181         ice_release_lock(&hw->fl_profs_locks[blk]);
2182
2183         return status;
2184 }
2185
2186 /**
2187  * ice_flow_assoc_prof - associate a VSI with a flow profile
2188  * @hw: pointer to the hardware structure
2189  * @blk: classification stage
2190  * @prof: pointer to flow profile
2191  * @vsi_handle: software VSI handle
2192  *
2193  * Assumption: the caller has acquired the lock to the profile list
2194  * and the software VSI handle has been validated
2195  */
2196 enum ice_status
2197 ice_flow_assoc_prof(struct ice_hw *hw, enum ice_block blk,
2198                     struct ice_flow_prof *prof, u16 vsi_handle)
2199 {
2200         enum ice_status status = ICE_SUCCESS;
2201
2202         if (!ice_is_bit_set(prof->vsis, vsi_handle)) {
2203                 if (blk == ICE_BLK_ACL) {
2204                         status = ice_flow_acl_set_xtrct_seq(hw, prof);
2205                         if (status)
2206                                 return status;
2207                 }
2208                 status = ice_add_prof_id_flow(hw, blk,
2209                                               ice_get_hw_vsi_num(hw,
2210                                                                  vsi_handle),
2211                                               prof->id);
2212                 if (!status)
2213                         ice_set_bit(vsi_handle, prof->vsis);
2214                 else
2215                         ice_debug(hw, ICE_DBG_FLOW, "HW profile add failed, %d\n",
2216                                   status);
2217         }
2218
2219         return status;
2220 }
2221
2222 /**
2223  * ice_flow_disassoc_prof - disassociate a VSI from a flow profile
2224  * @hw: pointer to the hardware structure
2225  * @blk: classification stage
2226  * @prof: pointer to flow profile
2227  * @vsi_handle: software VSI handle
2228  *
2229  * Assumption: the caller has acquired the lock to the profile list
2230  * and the software VSI handle has been validated
2231  */
2232 static enum ice_status
2233 ice_flow_disassoc_prof(struct ice_hw *hw, enum ice_block blk,
2234                        struct ice_flow_prof *prof, u16 vsi_handle)
2235 {
2236         enum ice_status status = ICE_SUCCESS;
2237
2238         if (ice_is_bit_set(prof->vsis, vsi_handle)) {
2239                 status = ice_rem_prof_id_flow(hw, blk,
2240                                               ice_get_hw_vsi_num(hw,
2241                                                                  vsi_handle),
2242                                               prof->id);
2243                 if (!status)
2244                         ice_clear_bit(vsi_handle, prof->vsis);
2245                 else
2246                         ice_debug(hw, ICE_DBG_FLOW, "HW profile remove failed, %d\n",
2247                                   status);
2248         }
2249
2250         return status;
2251 }
2252
2253 /**
2254  * ice_flow_add_prof - Add a flow profile for packet segments and matched fields
2255  * @hw: pointer to the HW struct
2256  * @blk: classification stage
2257  * @dir: flow direction
2258  * @prof_id: unique ID to identify this flow profile
2259  * @segs: array of one or more packet segments that describe the flow
2260  * @segs_cnt: number of packet segments provided
2261  * @acts: array of default actions
2262  * @acts_cnt: number of default actions
2263  * @prof: stores the returned flow profile added
2264  */
2265 enum ice_status
2266 ice_flow_add_prof(struct ice_hw *hw, enum ice_block blk, enum ice_flow_dir dir,
2267                   u64 prof_id, struct ice_flow_seg_info *segs, u8 segs_cnt,
2268                   struct ice_flow_action *acts, u8 acts_cnt,
2269                   struct ice_flow_prof **prof)
2270 {
2271         enum ice_status status;
2272
2273         if (segs_cnt > ICE_FLOW_SEG_MAX)
2274                 return ICE_ERR_MAX_LIMIT;
2275
2276         if (!segs_cnt)
2277                 return ICE_ERR_PARAM;
2278
2279         if (!segs)
2280                 return ICE_ERR_BAD_PTR;
2281
2282         status = ice_flow_val_hdrs(segs, segs_cnt);
2283         if (status)
2284                 return status;
2285
2286         ice_acquire_lock(&hw->fl_profs_locks[blk]);
2287
2288         status = ice_flow_add_prof_sync(hw, blk, dir, prof_id, segs, segs_cnt,
2289                                         acts, acts_cnt, prof);
2290         if (!status)
2291                 LIST_ADD(&(*prof)->l_entry, &hw->fl_profs[blk]);
2292
2293         ice_release_lock(&hw->fl_profs_locks[blk]);
2294
2295         return status;
2296 }
2297
2298 /**
2299  * ice_flow_rem_prof - Remove a flow profile and all entries associated with it
2300  * @hw: pointer to the HW struct
2301  * @blk: the block for which the flow profile is to be removed
2302  * @prof_id: unique ID of the flow profile to be removed
2303  */
2304 enum ice_status
2305 ice_flow_rem_prof(struct ice_hw *hw, enum ice_block blk, u64 prof_id)
2306 {
2307         struct ice_flow_prof *prof;
2308         enum ice_status status;
2309
2310         ice_acquire_lock(&hw->fl_profs_locks[blk]);
2311
2312         prof = ice_flow_find_prof_id(hw, blk, prof_id);
2313         if (!prof) {
2314                 status = ICE_ERR_DOES_NOT_EXIST;
2315                 goto out;
2316         }
2317
2318         /* prof becomes invalid after the call */
2319         status = ice_flow_rem_prof_sync(hw, blk, prof);
2320
2321 out:
2322         ice_release_lock(&hw->fl_profs_locks[blk]);
2323
2324         return status;
2325 }
2326
2327 /**
2328  * ice_flow_find_entry - look for a flow entry using its unique ID
2329  * @hw: pointer to the HW struct
2330  * @blk: classification stage
2331  * @entry_id: unique ID to identify this flow entry
2332  *
2333  * This function looks for the flow entry with the specified unique ID in all
2334  * flow profiles of the specified classification stage. If the entry is found,
2335  * and it returns the handle to the flow entry. Otherwise, it returns
2336  * ICE_FLOW_ENTRY_ID_INVAL.
2337  */
2338 u64 ice_flow_find_entry(struct ice_hw *hw, enum ice_block blk, u64 entry_id)
2339 {
2340         struct ice_flow_entry *found = NULL;
2341         struct ice_flow_prof *p;
2342
2343         ice_acquire_lock(&hw->fl_profs_locks[blk]);
2344
2345         LIST_FOR_EACH_ENTRY(p, &hw->fl_profs[blk], ice_flow_prof, l_entry) {
2346                 struct ice_flow_entry *e;
2347
2348                 ice_acquire_lock(&p->entries_lock);
2349                 LIST_FOR_EACH_ENTRY(e, &p->entries, ice_flow_entry, l_entry)
2350                         if (e->id == entry_id) {
2351                                 found = e;
2352                                 break;
2353                         }
2354                 ice_release_lock(&p->entries_lock);
2355
2356                 if (found)
2357                         break;
2358         }
2359
2360         ice_release_lock(&hw->fl_profs_locks[blk]);
2361
2362         return found ? ICE_FLOW_ENTRY_HNDL(found) : ICE_FLOW_ENTRY_HANDLE_INVAL;
2363 }
2364
2365 /**
2366  * ice_flow_acl_check_actions - Checks the ACL rule's actions
2367  * @hw: pointer to the hardware structure
2368  * @acts: array of actions to be performed on a match
2369  * @acts_cnt: number of actions
2370  * @cnt_alloc: indicates if an ACL counter has been allocated.
2371  */
2372 static enum ice_status
2373 ice_flow_acl_check_actions(struct ice_hw *hw, struct ice_flow_action *acts,
2374                            u8 acts_cnt, bool *cnt_alloc)
2375 {
2376         ice_declare_bitmap(dup_check, ICE_AQC_TBL_MAX_ACTION_PAIRS * 2);
2377         int i;
2378
2379         ice_zero_bitmap(dup_check, ICE_AQC_TBL_MAX_ACTION_PAIRS * 2);
2380         *cnt_alloc = false;
2381
2382         if (acts_cnt > ICE_FLOW_ACL_MAX_NUM_ACT)
2383                 return ICE_ERR_OUT_OF_RANGE;
2384
2385         for (i = 0; i < acts_cnt; i++) {
2386                 if (acts[i].type != ICE_FLOW_ACT_NOP &&
2387                     acts[i].type != ICE_FLOW_ACT_DROP &&
2388                     acts[i].type != ICE_FLOW_ACT_CNTR_PKT &&
2389                     acts[i].type != ICE_FLOW_ACT_FWD_QUEUE)
2390                         return ICE_ERR_CFG;
2391
2392                 /* If the caller want to add two actions of the same type, then
2393                  * it is considered invalid configuration.
2394                  */
2395                 if (ice_test_and_set_bit(acts[i].type, dup_check))
2396                         return ICE_ERR_PARAM;
2397         }
2398
2399         /* Checks if ACL counters are needed. */
2400         for (i = 0; i < acts_cnt; i++) {
2401                 if (acts[i].type == ICE_FLOW_ACT_CNTR_PKT ||
2402                     acts[i].type == ICE_FLOW_ACT_CNTR_BYTES ||
2403                     acts[i].type == ICE_FLOW_ACT_CNTR_PKT_BYTES) {
2404                         struct ice_acl_cntrs cntrs = { 0 };
2405                         enum ice_status status;
2406
2407                         cntrs.amount = 1;
2408                         cntrs.bank = 0; /* Only bank0 for the moment */
2409
2410                         if (acts[i].type == ICE_FLOW_ACT_CNTR_PKT_BYTES)
2411                                 cntrs.type = ICE_AQC_ACL_CNT_TYPE_DUAL;
2412                         else
2413                                 cntrs.type = ICE_AQC_ACL_CNT_TYPE_SINGLE;
2414
2415                         status = ice_aq_alloc_acl_cntrs(hw, &cntrs, NULL);
2416                         if (status)
2417                                 return status;
2418                         /* Counter index within the bank */
2419                         acts[i].data.acl_act.value =
2420                                                 CPU_TO_LE16(cntrs.first_cntr);
2421                         *cnt_alloc = true;
2422                 }
2423         }
2424
2425         return ICE_SUCCESS;
2426 }
2427
2428 /**
2429  * ice_flow_acl_frmt_entry_range - Format an ACL range checker for a given field
2430  * @fld: number of the given field
2431  * @info: info about field
2432  * @range_buf: range checker configuration buffer
2433  * @data: pointer to a data buffer containing flow entry's match values/masks
2434  * @range: Input/output param indicating which range checkers are being used
2435  */
2436 static void
2437 ice_flow_acl_frmt_entry_range(u16 fld, struct ice_flow_fld_info *info,
2438                               struct ice_aqc_acl_profile_ranges *range_buf,
2439                               u8 *data, u8 *range)
2440 {
2441         u16 new_mask;
2442
2443         /* If not specified, default mask is all bits in field */
2444         new_mask = (info->src.mask == ICE_FLOW_FLD_OFF_INVAL ?
2445                     BIT(ice_flds_info[fld].size) - 1 :
2446                     (*(u16 *)(data + info->src.mask))) << info->xtrct.disp;
2447
2448         /* If the mask is 0, then we don't need to worry about this input
2449          * range checker value.
2450          */
2451         if (new_mask) {
2452                 u16 new_high =
2453                         (*(u16 *)(data + info->src.last)) << info->xtrct.disp;
2454                 u16 new_low =
2455                         (*(u16 *)(data + info->src.val)) << info->xtrct.disp;
2456                 u8 range_idx = info->entry.val;
2457
2458                 range_buf->checker_cfg[range_idx].low_boundary =
2459                         CPU_TO_BE16(new_low);
2460                 range_buf->checker_cfg[range_idx].high_boundary =
2461                         CPU_TO_BE16(new_high);
2462                 range_buf->checker_cfg[range_idx].mask = CPU_TO_BE16(new_mask);
2463
2464                 /* Indicate which range checker is being used */
2465                 *range |= BIT(range_idx);
2466         }
2467 }
2468
2469 /**
2470  * ice_flow_acl_frmt_entry_fld - Partially format ACL entry for a given field
2471  * @fld: number of the given field
2472  * @info: info about the field
2473  * @buf: buffer containing the entry
2474  * @dontcare: buffer containing don't care mask for entry
2475  * @data: pointer to a data buffer containing flow entry's match values/masks
2476  */
2477 static void
2478 ice_flow_acl_frmt_entry_fld(u16 fld, struct ice_flow_fld_info *info, u8 *buf,
2479                             u8 *dontcare, u8 *data)
2480 {
2481         u16 dst, src, mask, k, end_disp, tmp_s = 0, tmp_m = 0;
2482         bool use_mask = false;
2483         u8 disp;
2484
2485         src = info->src.val;
2486         mask = info->src.mask;
2487         dst = info->entry.val - ICE_AQC_ACL_PROF_BYTE_SEL_START_IDX;
2488         disp = info->xtrct.disp % BITS_PER_BYTE;
2489
2490         if (mask != ICE_FLOW_FLD_OFF_INVAL)
2491                 use_mask = true;
2492
2493         for (k = 0; k < info->entry.last; k++, dst++) {
2494                 /* Add overflow bits from previous byte */
2495                 buf[dst] = (tmp_s & 0xff00) >> 8;
2496
2497                 /* If mask is not valid, tmp_m is always zero, so just setting
2498                  * dontcare to 0 (no masked bits). If mask is valid, pulls in
2499                  * overflow bits of mask from prev byte
2500                  */
2501                 dontcare[dst] = (tmp_m & 0xff00) >> 8;
2502
2503                 /* If there is displacement, last byte will only contain
2504                  * displaced data, but there is no more data to read from user
2505                  * buffer, so skip so as not to potentially read beyond end of
2506                  * user buffer
2507                  */
2508                 if (!disp || k < info->entry.last - 1) {
2509                         /* Store shifted data to use in next byte */
2510                         tmp_s = data[src++] << disp;
2511
2512                         /* Add current (shifted) byte */
2513                         buf[dst] |= tmp_s & 0xff;
2514
2515                         /* Handle mask if valid */
2516                         if (use_mask) {
2517                                 tmp_m = (~data[mask++] & 0xff) << disp;
2518                                 dontcare[dst] |= tmp_m & 0xff;
2519                         }
2520                 }
2521         }
2522
2523         /* Fill in don't care bits at beginning of field */
2524         if (disp) {
2525                 dst = info->entry.val - ICE_AQC_ACL_PROF_BYTE_SEL_START_IDX;
2526                 for (k = 0; k < disp; k++)
2527                         dontcare[dst] |= BIT(k);
2528         }
2529
2530         end_disp = (disp + ice_flds_info[fld].size) % BITS_PER_BYTE;
2531
2532         /* Fill in don't care bits at end of field */
2533         if (end_disp) {
2534                 dst = info->entry.val - ICE_AQC_ACL_PROF_BYTE_SEL_START_IDX +
2535                       info->entry.last - 1;
2536                 for (k = end_disp; k < BITS_PER_BYTE; k++)
2537                         dontcare[dst] |= BIT(k);
2538         }
2539 }
2540
2541 /**
2542  * ice_flow_acl_frmt_entry - Format ACL entry
2543  * @hw: pointer to the hardware structure
2544  * @prof: pointer to flow profile
2545  * @e: pointer to the flow entry
2546  * @data: pointer to a data buffer containing flow entry's match values/masks
2547  * @acts: array of actions to be performed on a match
2548  * @acts_cnt: number of actions
2549  *
2550  * Formats the key (and key_inverse) to be matched from the data passed in,
2551  * along with data from the flow profile. This key/key_inverse pair makes up
2552  * the 'entry' for an ACL flow entry.
2553  */
2554 static enum ice_status
2555 ice_flow_acl_frmt_entry(struct ice_hw *hw, struct ice_flow_prof *prof,
2556                         struct ice_flow_entry *e, u8 *data,
2557                         struct ice_flow_action *acts, u8 acts_cnt)
2558 {
2559         u8 *buf = NULL, *dontcare = NULL, *key = NULL, range = 0, dir_flag_msk;
2560         struct ice_aqc_acl_profile_ranges *range_buf = NULL;
2561         enum ice_status status;
2562         bool cnt_alloc;
2563         u8 prof_id = 0;
2564         u16 i, buf_sz;
2565
2566         status = ice_flow_get_hw_prof(hw, ICE_BLK_ACL, prof->id, &prof_id);
2567         if (status)
2568                 return status;
2569
2570         /* Format the result action */
2571
2572         status = ice_flow_acl_check_actions(hw, acts, acts_cnt, &cnt_alloc);
2573         if (status)
2574                 return status;
2575
2576         status = ICE_ERR_NO_MEMORY;
2577
2578         e->acts = (struct ice_flow_action *)
2579                 ice_memdup(hw, acts, acts_cnt * sizeof(*acts),
2580                            ICE_NONDMA_TO_NONDMA);
2581         if (!e->acts)
2582                 goto out;
2583
2584         e->acts_cnt = acts_cnt;
2585
2586         /* Format the matching data */
2587         buf_sz = prof->cfg.scen->width;
2588         buf = (u8 *)ice_malloc(hw, buf_sz);
2589         if (!buf)
2590                 goto out;
2591
2592         dontcare = (u8 *)ice_malloc(hw, buf_sz);
2593         if (!dontcare)
2594                 goto out;
2595
2596         /* 'key' buffer will store both key and key_inverse, so must be twice
2597          * size of buf
2598          */
2599         key = (u8 *)ice_malloc(hw, buf_sz * 2);
2600         if (!key)
2601                 goto out;
2602
2603         range_buf = (struct ice_aqc_acl_profile_ranges *)
2604                 ice_malloc(hw, sizeof(struct ice_aqc_acl_profile_ranges));
2605         if (!range_buf)
2606                 goto out;
2607
2608         /* Set don't care mask to all 1's to start, will zero out used bytes */
2609         ice_memset(dontcare, 0xff, buf_sz, ICE_NONDMA_MEM);
2610
2611         for (i = 0; i < prof->segs_cnt; i++) {
2612                 struct ice_flow_seg_info *seg = &prof->segs[i];
2613                 u8 j;
2614
2615                 ice_for_each_set_bit(j, (ice_bitmap_t *)&seg->match,
2616                                      ICE_FLOW_FIELD_IDX_MAX) {
2617                         struct ice_flow_fld_info *info = &seg->fields[j];
2618
2619                         if (info->type == ICE_FLOW_FLD_TYPE_RANGE)
2620                                 ice_flow_acl_frmt_entry_range(j, info,
2621                                                               range_buf, data,
2622                                                               &range);
2623                         else
2624                                 ice_flow_acl_frmt_entry_fld(j, info, buf,
2625                                                             dontcare, data);
2626                 }
2627
2628                 for (j = 0; j < seg->raws_cnt; j++) {
2629                         struct ice_flow_fld_info *info = &seg->raws[j].info;
2630                         u16 dst, src, mask, k;
2631                         bool use_mask = false;
2632
2633                         src = info->src.val;
2634                         dst = info->entry.val -
2635                                         ICE_AQC_ACL_PROF_BYTE_SEL_START_IDX;
2636                         mask = info->src.mask;
2637
2638                         if (mask != ICE_FLOW_FLD_OFF_INVAL)
2639                                 use_mask = true;
2640
2641                         for (k = 0; k < info->entry.last; k++, dst++) {
2642                                 buf[dst] = data[src++];
2643                                 if (use_mask)
2644                                         dontcare[dst] = ~data[mask++];
2645                                 else
2646                                         dontcare[dst] = 0;
2647                         }
2648                 }
2649         }
2650
2651         buf[prof->cfg.scen->pid_idx] = (u8)prof_id;
2652         dontcare[prof->cfg.scen->pid_idx] = 0;
2653
2654         /* Format the buffer for direction flags */
2655         dir_flag_msk = BIT(ICE_FLG_PKT_DIR);
2656
2657         if (prof->dir == ICE_FLOW_RX)
2658                 buf[prof->cfg.scen->pkt_dir_idx] = dir_flag_msk;
2659
2660         if (range) {
2661                 buf[prof->cfg.scen->rng_chk_idx] = range;
2662                 /* Mark any unused range checkers as don't care */
2663                 dontcare[prof->cfg.scen->rng_chk_idx] = ~range;
2664                 e->range_buf = range_buf;
2665         } else {
2666                 ice_free(hw, range_buf);
2667         }
2668
2669         status = ice_set_key(key, buf_sz * 2, buf, NULL, dontcare, NULL, 0,
2670                              buf_sz);
2671         if (status)
2672                 goto out;
2673
2674         e->entry = key;
2675         e->entry_sz = buf_sz * 2;
2676
2677 out:
2678         if (buf)
2679                 ice_free(hw, buf);
2680
2681         if (dontcare)
2682                 ice_free(hw, dontcare);
2683
2684         if (status && key)
2685                 ice_free(hw, key);
2686
2687         if (status && range_buf) {
2688                 ice_free(hw, range_buf);
2689                 e->range_buf = NULL;
2690         }
2691
2692         if (status && e->acts) {
2693                 ice_free(hw, e->acts);
2694                 e->acts = NULL;
2695                 e->acts_cnt = 0;
2696         }
2697
2698         if (status && cnt_alloc)
2699                 ice_flow_acl_free_act_cntr(hw, acts, acts_cnt);
2700
2701         return status;
2702 }
2703
2704 /**
2705  * ice_flow_acl_find_scen_entry_cond - Find an ACL scenario entry that matches
2706  *                                     the compared data.
2707  * @prof: pointer to flow profile
2708  * @e: pointer to the comparing flow entry
2709  * @do_chg_action: decide if we want to change the ACL action
2710  * @do_add_entry: decide if we want to add the new ACL entry
2711  * @do_rem_entry: decide if we want to remove the current ACL entry
2712  *
2713  * Find an ACL scenario entry that matches the compared data. In the same time,
2714  * this function also figure out:
2715  * a/ If we want to change the ACL action
2716  * b/ If we want to add the new ACL entry
2717  * c/ If we want to remove the current ACL entry
2718  */
2719 static struct ice_flow_entry *
2720 ice_flow_acl_find_scen_entry_cond(struct ice_flow_prof *prof,
2721                                   struct ice_flow_entry *e, bool *do_chg_action,
2722                                   bool *do_add_entry, bool *do_rem_entry)
2723 {
2724         struct ice_flow_entry *p, *return_entry = NULL;
2725         u8 i, j;
2726
2727         /* Check if:
2728          * a/ There exists an entry with same matching data, but different
2729          *    priority, then we remove this existing ACL entry. Then, we
2730          *    will add the new entry to the ACL scenario.
2731          * b/ There exists an entry with same matching data, priority, and
2732          *    result action, then we do nothing
2733          * c/ There exists an entry with same matching data, priority, but
2734          *    different, action, then do only change the action's entry.
2735          * d/ Else, we add this new entry to the ACL scenario.
2736          */
2737         *do_chg_action = false;
2738         *do_add_entry = true;
2739         *do_rem_entry = false;
2740         LIST_FOR_EACH_ENTRY(p, &prof->entries, ice_flow_entry, l_entry) {
2741                 if (memcmp(p->entry, e->entry, p->entry_sz))
2742                         continue;
2743
2744                 /* From this point, we have the same matching_data. */
2745                 *do_add_entry = false;
2746                 return_entry = p;
2747
2748                 if (p->priority != e->priority) {
2749                         /* matching data && !priority */
2750                         *do_add_entry = true;
2751                         *do_rem_entry = true;
2752                         break;
2753                 }
2754
2755                 /* From this point, we will have matching_data && priority */
2756                 if (p->acts_cnt != e->acts_cnt)
2757                         *do_chg_action = true;
2758                 for (i = 0; i < p->acts_cnt; i++) {
2759                         bool found_not_match = false;
2760
2761                         for (j = 0; j < e->acts_cnt; j++)
2762                                 if (memcmp(&p->acts[i], &e->acts[j],
2763                                            sizeof(struct ice_flow_action))) {
2764                                         found_not_match = true;
2765                                         break;
2766                                 }
2767
2768                         if (found_not_match) {
2769                                 *do_chg_action = true;
2770                                 break;
2771                         }
2772                 }
2773
2774                 /* (do_chg_action = true) means :
2775                  *    matching_data && priority && !result_action
2776                  * (do_chg_action = false) means :
2777                  *    matching_data && priority && result_action
2778                  */
2779                 break;
2780         }
2781
2782         return return_entry;
2783 }
2784
2785 /**
2786  * ice_flow_acl_convert_to_acl_prio - Convert to ACL priority
2787  * @p: flow priority
2788  */
2789 static enum ice_acl_entry_prio
2790 ice_flow_acl_convert_to_acl_prio(enum ice_flow_priority p)
2791 {
2792         enum ice_acl_entry_prio acl_prio;
2793
2794         switch (p) {
2795         case ICE_FLOW_PRIO_LOW:
2796                 acl_prio = ICE_ACL_PRIO_LOW;
2797                 break;
2798         case ICE_FLOW_PRIO_NORMAL:
2799                 acl_prio = ICE_ACL_PRIO_NORMAL;
2800                 break;
2801         case ICE_FLOW_PRIO_HIGH:
2802                 acl_prio = ICE_ACL_PRIO_HIGH;
2803                 break;
2804         default:
2805                 acl_prio = ICE_ACL_PRIO_NORMAL;
2806                 break;
2807         }
2808
2809         return acl_prio;
2810 }
2811
2812 /**
2813  * ice_flow_acl_union_rng_chk - Perform union operation between two
2814  *                              range-range checker buffers
2815  * @dst_buf: pointer to destination range checker buffer
2816  * @src_buf: pointer to source range checker buffer
2817  *
2818  * For this function, we do the union between dst_buf and src_buf
2819  * range checker buffer, and we will save the result back to dst_buf
2820  */
2821 static enum ice_status
2822 ice_flow_acl_union_rng_chk(struct ice_aqc_acl_profile_ranges *dst_buf,
2823                            struct ice_aqc_acl_profile_ranges *src_buf)
2824 {
2825         u8 i, j;
2826
2827         if (!dst_buf || !src_buf)
2828                 return ICE_ERR_BAD_PTR;
2829
2830         for (i = 0; i < ICE_AQC_ACL_PROF_RANGES_NUM_CFG; i++) {
2831                 struct ice_acl_rng_data *cfg_data = NULL, *in_data;
2832                 bool will_populate = false;
2833
2834                 in_data = &src_buf->checker_cfg[i];
2835
2836                 if (!in_data->mask)
2837                         break;
2838
2839                 for (j = 0; j < ICE_AQC_ACL_PROF_RANGES_NUM_CFG; j++) {
2840                         cfg_data = &dst_buf->checker_cfg[j];
2841
2842                         if (!cfg_data->mask ||
2843                             !memcmp(cfg_data, in_data,
2844                                     sizeof(struct ice_acl_rng_data))) {
2845                                 will_populate = true;
2846                                 break;
2847                         }
2848                 }
2849
2850                 if (will_populate) {
2851                         ice_memcpy(cfg_data, in_data,
2852                                    sizeof(struct ice_acl_rng_data),
2853                                    ICE_NONDMA_TO_NONDMA);
2854                 } else {
2855                         /* No available slot left to program range checker */
2856                         return ICE_ERR_MAX_LIMIT;
2857                 }
2858         }
2859
2860         return ICE_SUCCESS;
2861 }
2862
2863 /**
2864  * ice_flow_acl_add_scen_entry_sync - Add entry to ACL scenario sync
2865  * @hw: pointer to the hardware structure
2866  * @prof: pointer to flow profile
2867  * @entry: double pointer to the flow entry
2868  *
2869  * For this function, we will look at the current added entries in the
2870  * corresponding ACL scenario. Then, we will perform matching logic to
2871  * see if we want to add/modify/do nothing with this new entry.
2872  */
2873 static enum ice_status
2874 ice_flow_acl_add_scen_entry_sync(struct ice_hw *hw, struct ice_flow_prof *prof,
2875                                  struct ice_flow_entry **entry)
2876 {
2877         bool do_add_entry, do_rem_entry, do_chg_action, do_chg_rng_chk;
2878         struct ice_aqc_acl_profile_ranges query_rng_buf, cfg_rng_buf;
2879         struct ice_acl_act_entry *acts = NULL;
2880         struct ice_flow_entry *exist;
2881         enum ice_status status = ICE_SUCCESS;
2882         struct ice_flow_entry *e;
2883         u8 i;
2884
2885         if (!entry || !(*entry) || !prof)
2886                 return ICE_ERR_BAD_PTR;
2887
2888         e = *entry;
2889
2890         do_chg_rng_chk = false;
2891         if (e->range_buf) {
2892                 u8 prof_id = 0;
2893
2894                 status = ice_flow_get_hw_prof(hw, ICE_BLK_ACL, prof->id,
2895                                               &prof_id);
2896                 if (status)
2897                         return status;
2898
2899                 /* Query the current range-checker value in FW */
2900                 status = ice_query_acl_prof_ranges(hw, prof_id, &query_rng_buf,
2901                                                    NULL);
2902                 if (status)
2903                         return status;
2904                 ice_memcpy(&cfg_rng_buf, &query_rng_buf,
2905                            sizeof(struct ice_aqc_acl_profile_ranges),
2906                            ICE_NONDMA_TO_NONDMA);
2907
2908                 /* Generate the new range-checker value */
2909                 status = ice_flow_acl_union_rng_chk(&cfg_rng_buf, e->range_buf);
2910                 if (status)
2911                         return status;
2912
2913                 /* Reconfigure the range check if the buffer is changed. */
2914                 do_chg_rng_chk = false;
2915                 if (memcmp(&query_rng_buf, &cfg_rng_buf,
2916                            sizeof(struct ice_aqc_acl_profile_ranges))) {
2917                         status = ice_prog_acl_prof_ranges(hw, prof_id,
2918                                                           &cfg_rng_buf, NULL);
2919                         if (status)
2920                                 return status;
2921
2922                         do_chg_rng_chk = true;
2923                 }
2924         }
2925
2926         /* Figure out if we want to (change the ACL action) and/or
2927          * (Add the new ACL entry) and/or (Remove the current ACL entry)
2928          */
2929         exist = ice_flow_acl_find_scen_entry_cond(prof, e, &do_chg_action,
2930                                                   &do_add_entry, &do_rem_entry);
2931         if (do_rem_entry) {
2932                 status = ice_flow_rem_entry_sync(hw, ICE_BLK_ACL, exist);
2933                 if (status)
2934                         return status;
2935         }
2936
2937         /* Prepare the result action buffer */
2938         acts = (struct ice_acl_act_entry *)
2939                 ice_calloc(hw, e->entry_sz, sizeof(struct ice_acl_act_entry));
2940         if (!acts)
2941                 return ICE_ERR_NO_MEMORY;
2942
2943         for (i = 0; i < e->acts_cnt; i++)
2944                 ice_memcpy(&acts[i], &e->acts[i].data.acl_act,
2945                            sizeof(struct ice_acl_act_entry),
2946                            ICE_NONDMA_TO_NONDMA);
2947
2948         if (do_add_entry) {
2949                 enum ice_acl_entry_prio prio;
2950                 u8 *keys, *inverts;
2951                 u16 entry_idx;
2952
2953                 keys = (u8 *)e->entry;
2954                 inverts = keys + (e->entry_sz / 2);
2955                 prio = ice_flow_acl_convert_to_acl_prio(e->priority);
2956
2957                 status = ice_acl_add_entry(hw, prof->cfg.scen, prio, keys,
2958                                            inverts, acts, e->acts_cnt,
2959                                            &entry_idx);
2960                 if (status)
2961                         goto out;
2962
2963                 e->scen_entry_idx = entry_idx;
2964                 LIST_ADD(&e->l_entry, &prof->entries);
2965         } else {
2966                 if (do_chg_action) {
2967                         /* For the action memory info, update the SW's copy of
2968                          * exist entry with e's action memory info
2969                          */
2970                         ice_free(hw, exist->acts);
2971                         exist->acts_cnt = e->acts_cnt;
2972                         exist->acts = (struct ice_flow_action *)
2973                                 ice_calloc(hw, exist->acts_cnt,
2974                                            sizeof(struct ice_flow_action));
2975                         if (!exist->acts) {
2976                                 status = ICE_ERR_NO_MEMORY;
2977                                 goto out;
2978                         }
2979
2980                         ice_memcpy(exist->acts, e->acts,
2981                                    sizeof(struct ice_flow_action) * e->acts_cnt,
2982                                    ICE_NONDMA_TO_NONDMA);
2983
2984                         status = ice_acl_prog_act(hw, prof->cfg.scen, acts,
2985                                                   e->acts_cnt,
2986                                                   exist->scen_entry_idx);
2987                         if (status)
2988                                 goto out;
2989                 }
2990
2991                 if (do_chg_rng_chk) {
2992                         /* In this case, we want to update the range checker
2993                          * information of the exist entry
2994                          */
2995                         status = ice_flow_acl_union_rng_chk(exist->range_buf,
2996                                                             e->range_buf);
2997                         if (status)
2998                                 goto out;
2999                 }
3000
3001                 /* As we don't add the new entry to our SW DB, deallocate its
3002                  * memories, and return the exist entry to the caller
3003                  */
3004                 ice_dealloc_flow_entry(hw, e);
3005                 *(entry) = exist;
3006         }
3007 out:
3008         ice_free(hw, acts);
3009
3010         return status;
3011 }
3012
3013 /**
3014  * ice_flow_acl_add_scen_entry - Add entry to ACL scenario
3015  * @hw: pointer to the hardware structure
3016  * @prof: pointer to flow profile
3017  * @e: double pointer to the flow entry
3018  */
3019 static enum ice_status
3020 ice_flow_acl_add_scen_entry(struct ice_hw *hw, struct ice_flow_prof *prof,
3021                             struct ice_flow_entry **e)
3022 {
3023         enum ice_status status;
3024
3025         ice_acquire_lock(&prof->entries_lock);
3026         status = ice_flow_acl_add_scen_entry_sync(hw, prof, e);
3027         ice_release_lock(&prof->entries_lock);
3028
3029         return status;
3030 }
3031
3032 /**
3033  * ice_flow_add_entry - Add a flow entry
3034  * @hw: pointer to the HW struct
3035  * @blk: classification stage
3036  * @prof_id: ID of the profile to add a new flow entry to
3037  * @entry_id: unique ID to identify this flow entry
3038  * @vsi_handle: software VSI handle for the flow entry
3039  * @prio: priority of the flow entry
3040  * @data: pointer to a data buffer containing flow entry's match values/masks
3041  * @acts: arrays of actions to be performed on a match
3042  * @acts_cnt: number of actions
3043  * @entry_h: pointer to buffer that receives the new flow entry's handle
3044  */
3045 enum ice_status
3046 ice_flow_add_entry(struct ice_hw *hw, enum ice_block blk, u64 prof_id,
3047                    u64 entry_id, u16 vsi_handle, enum ice_flow_priority prio,
3048                    void *data, struct ice_flow_action *acts, u8 acts_cnt,
3049                    u64 *entry_h)
3050 {
3051         struct ice_flow_entry *e = NULL;
3052         struct ice_flow_prof *prof;
3053         enum ice_status status = ICE_SUCCESS;
3054
3055         /* ACL entries must indicate an action */
3056         if (blk == ICE_BLK_ACL && (!acts || !acts_cnt))
3057                 return ICE_ERR_PARAM;
3058
3059         /* No flow entry data is expected for RSS */
3060         if (!entry_h || (!data && blk != ICE_BLK_RSS))
3061                 return ICE_ERR_BAD_PTR;
3062
3063         if (!ice_is_vsi_valid(hw, vsi_handle))
3064                 return ICE_ERR_PARAM;
3065
3066         ice_acquire_lock(&hw->fl_profs_locks[blk]);
3067
3068         prof = ice_flow_find_prof_id(hw, blk, prof_id);
3069         if (!prof) {
3070                 status = ICE_ERR_DOES_NOT_EXIST;
3071         } else {
3072                 /* Allocate memory for the entry being added and associate
3073                  * the VSI to the found flow profile
3074                  */
3075                 e = (struct ice_flow_entry *)ice_malloc(hw, sizeof(*e));
3076                 if (!e)
3077                         status = ICE_ERR_NO_MEMORY;
3078                 else
3079                         status = ice_flow_assoc_prof(hw, blk, prof, vsi_handle);
3080         }
3081
3082         ice_release_lock(&hw->fl_profs_locks[blk]);
3083         if (status)
3084                 goto out;
3085
3086         e->id = entry_id;
3087         e->vsi_handle = vsi_handle;
3088         e->prof = prof;
3089         e->priority = prio;
3090
3091         switch (blk) {
3092         case ICE_BLK_FD:
3093         case ICE_BLK_RSS:
3094                 break;
3095         case ICE_BLK_ACL:
3096                 /* ACL will handle the entry management */
3097                 status = ice_flow_acl_frmt_entry(hw, prof, e, (u8 *)data, acts,
3098                                                  acts_cnt);
3099                 if (status)
3100                         goto out;
3101
3102                 status = ice_flow_acl_add_scen_entry(hw, prof, &e);
3103                 if (status)
3104                         goto out;
3105
3106                 break;
3107         default:
3108                 status = ICE_ERR_NOT_IMPL;
3109                 goto out;
3110         }
3111
3112         if (blk != ICE_BLK_ACL) {
3113                 /* ACL will handle the entry management */
3114                 ice_acquire_lock(&prof->entries_lock);
3115                 LIST_ADD(&e->l_entry, &prof->entries);
3116                 ice_release_lock(&prof->entries_lock);
3117         }
3118
3119         *entry_h = ICE_FLOW_ENTRY_HNDL(e);
3120
3121 out:
3122         if (status && e) {
3123                 if (e->entry)
3124                         ice_free(hw, e->entry);
3125                 ice_free(hw, e);
3126         }
3127
3128         return status;
3129 }
3130
3131 /**
3132  * ice_flow_rem_entry - Remove a flow entry
3133  * @hw: pointer to the HW struct
3134  * @blk: classification stage
3135  * @entry_h: handle to the flow entry to be removed
3136  */
3137 enum ice_status ice_flow_rem_entry(struct ice_hw *hw, enum ice_block blk,
3138                                    u64 entry_h)
3139 {
3140         struct ice_flow_entry *entry;
3141         struct ice_flow_prof *prof;
3142         enum ice_status status = ICE_SUCCESS;
3143
3144         if (entry_h == ICE_FLOW_ENTRY_HANDLE_INVAL)
3145                 return ICE_ERR_PARAM;
3146
3147         entry = ICE_FLOW_ENTRY_PTR((unsigned long)entry_h);
3148
3149         /* Retain the pointer to the flow profile as the entry will be freed */
3150         prof = entry->prof;
3151
3152         if (prof) {
3153                 ice_acquire_lock(&prof->entries_lock);
3154                 status = ice_flow_rem_entry_sync(hw, blk, entry);
3155                 ice_release_lock(&prof->entries_lock);
3156         }
3157
3158         return status;
3159 }
3160
3161 /**
3162  * ice_flow_set_fld_ext - specifies locations of field from entry's input buffer
3163  * @seg: packet segment the field being set belongs to
3164  * @fld: field to be set
3165  * @field_type: type of the field
3166  * @val_loc: if not ICE_FLOW_FLD_OFF_INVAL, location of the value to match from
3167  *           entry's input buffer
3168  * @mask_loc: if not ICE_FLOW_FLD_OFF_INVAL, location of mask value from entry's
3169  *            input buffer
3170  * @last_loc: if not ICE_FLOW_FLD_OFF_INVAL, location of last/upper value from
3171  *            entry's input buffer
3172  *
3173  * This helper function stores information of a field being matched, including
3174  * the type of the field and the locations of the value to match, the mask, and
3175  * the upper-bound value in the start of the input buffer for a flow entry.
3176  * This function should only be used for fixed-size data structures.
3177  *
3178  * This function also opportunistically determines the protocol headers to be
3179  * present based on the fields being set. Some fields cannot be used alone to
3180  * determine the protocol headers present. Sometimes, fields for particular
3181  * protocol headers are not matched. In those cases, the protocol headers
3182  * must be explicitly set.
3183  */
3184 static void
3185 ice_flow_set_fld_ext(struct ice_flow_seg_info *seg, enum ice_flow_field fld,
3186                      enum ice_flow_fld_match_type field_type, u16 val_loc,
3187                      u16 mask_loc, u16 last_loc)
3188 {
3189         u64 bit = BIT_ULL(fld);
3190
3191         seg->match |= bit;
3192         if (field_type == ICE_FLOW_FLD_TYPE_RANGE)
3193                 seg->range |= bit;
3194
3195         seg->fields[fld].type = field_type;
3196         seg->fields[fld].src.val = val_loc;
3197         seg->fields[fld].src.mask = mask_loc;
3198         seg->fields[fld].src.last = last_loc;
3199
3200         ICE_FLOW_SET_HDRS(seg, ice_flds_info[fld].hdr);
3201 }
3202
3203 /**
3204  * ice_flow_set_fld - specifies locations of field from entry's input buffer
3205  * @seg: packet segment the field being set belongs to
3206  * @fld: field to be set
3207  * @val_loc: if not ICE_FLOW_FLD_OFF_INVAL, location of the value to match from
3208  *           entry's input buffer
3209  * @mask_loc: if not ICE_FLOW_FLD_OFF_INVAL, location of mask value from entry's
3210  *            input buffer
3211  * @last_loc: if not ICE_FLOW_FLD_OFF_INVAL, location of last/upper value from
3212  *            entry's input buffer
3213  * @range: indicate if field being matched is to be in a range
3214  *
3215  * This function specifies the locations, in the form of byte offsets from the
3216  * start of the input buffer for a flow entry, from where the value to match,
3217  * the mask value, and upper value can be extracted. These locations are then
3218  * stored in the flow profile. When adding a flow entry associated with the
3219  * flow profile, these locations will be used to quickly extract the values and
3220  * create the content of a match entry. This function should only be used for
3221  * fixed-size data structures.
3222  */
3223 void
3224 ice_flow_set_fld(struct ice_flow_seg_info *seg, enum ice_flow_field fld,
3225                  u16 val_loc, u16 mask_loc, u16 last_loc, bool range)
3226 {
3227         enum ice_flow_fld_match_type t = range ?
3228                 ICE_FLOW_FLD_TYPE_RANGE : ICE_FLOW_FLD_TYPE_REG;
3229
3230         ice_flow_set_fld_ext(seg, fld, t, val_loc, mask_loc, last_loc);
3231 }
3232
3233 /**
3234  * ice_flow_set_fld_prefix - sets locations of prefix field from entry's buf
3235  * @seg: packet segment the field being set belongs to
3236  * @fld: field to be set
3237  * @val_loc: if not ICE_FLOW_FLD_OFF_INVAL, location of the value to match from
3238  *           entry's input buffer
3239  * @pref_loc: location of prefix value from entry's input buffer
3240  * @pref_sz: size of the location holding the prefix value
3241  *
3242  * This function specifies the locations, in the form of byte offsets from the
3243  * start of the input buffer for a flow entry, from where the value to match
3244  * and the IPv4 prefix value can be extracted. These locations are then stored
3245  * in the flow profile. When adding flow entries to the associated flow profile,
3246  * these locations can be used to quickly extract the values to create the
3247  * content of a match entry. This function should only be used for fixed-size
3248  * data structures.
3249  */
3250 void
3251 ice_flow_set_fld_prefix(struct ice_flow_seg_info *seg, enum ice_flow_field fld,
3252                         u16 val_loc, u16 pref_loc, u8 pref_sz)
3253 {
3254         /* For this type of field, the "mask" location is for the prefix value's
3255          * location and the "last" location is for the size of the location of
3256          * the prefix value.
3257          */
3258         ice_flow_set_fld_ext(seg, fld, ICE_FLOW_FLD_TYPE_PREFIX, val_loc,
3259                              pref_loc, (u16)pref_sz);
3260 }
3261
3262 /**
3263  * ice_flow_add_fld_raw - sets locations of a raw field from entry's input buf
3264  * @seg: packet segment the field being set belongs to
3265  * @off: offset of the raw field from the beginning of the segment in bytes
3266  * @len: length of the raw pattern to be matched
3267  * @val_loc: location of the value to match from entry's input buffer
3268  * @mask_loc: location of mask value from entry's input buffer
3269  *
3270  * This function specifies the offset of the raw field to be match from the
3271  * beginning of the specified packet segment, and the locations, in the form of
3272  * byte offsets from the start of the input buffer for a flow entry, from where
3273  * the value to match and the mask value to be extracted. These locations are
3274  * then stored in the flow profile. When adding flow entries to the associated
3275  * flow profile, these locations can be used to quickly extract the values to
3276  * create the content of a match entry. This function should only be used for
3277  * fixed-size data structures.
3278  */
3279 void
3280 ice_flow_add_fld_raw(struct ice_flow_seg_info *seg, u16 off, u8 len,
3281                      u16 val_loc, u16 mask_loc)
3282 {
3283         if (seg->raws_cnt < ICE_FLOW_SEG_RAW_FLD_MAX) {
3284                 seg->raws[seg->raws_cnt].off = off;
3285                 seg->raws[seg->raws_cnt].info.type = ICE_FLOW_FLD_TYPE_SIZE;
3286                 seg->raws[seg->raws_cnt].info.src.val = val_loc;
3287                 seg->raws[seg->raws_cnt].info.src.mask = mask_loc;
3288                 /* The "last" field is used to store the length of the field */
3289                 seg->raws[seg->raws_cnt].info.src.last = len;
3290         }
3291
3292         /* Overflows of "raws" will be handled as an error condition later in
3293          * the flow when this information is processed.
3294          */
3295         seg->raws_cnt++;
3296 }
3297
3298 #define ICE_FLOW_RSS_SEG_HDR_L2_MASKS \
3299 (ICE_FLOW_SEG_HDR_ETH | ICE_FLOW_SEG_HDR_VLAN)
3300
3301 #define ICE_FLOW_RSS_SEG_HDR_L3_MASKS \
3302         (ICE_FLOW_SEG_HDR_IPV4 | ICE_FLOW_SEG_HDR_IPV6)
3303
3304 #define ICE_FLOW_RSS_SEG_HDR_L4_MASKS \
3305         (ICE_FLOW_SEG_HDR_TCP | ICE_FLOW_SEG_HDR_UDP | ICE_FLOW_SEG_HDR_SCTP)
3306
3307 #define ICE_FLOW_RSS_SEG_HDR_VAL_MASKS \
3308         (ICE_FLOW_RSS_SEG_HDR_L2_MASKS | \
3309          ICE_FLOW_RSS_SEG_HDR_L3_MASKS | \
3310          ICE_FLOW_RSS_SEG_HDR_L4_MASKS)
3311
3312 /**
3313  * ice_flow_set_rss_seg_info - setup packet segments for RSS
3314  * @segs: pointer to the flow field segment(s)
3315  * @seg_cnt: segment count
3316  * @cfg: configure parameters
3317  *
3318  * Helper function to extract fields from hash bitmap and use flow
3319  * header value to set flow field segment for further use in flow
3320  * profile entry or removal.
3321  */
3322 static enum ice_status
3323 ice_flow_set_rss_seg_info(struct ice_flow_seg_info *segs, u8 seg_cnt,
3324                           const struct ice_rss_hash_cfg *cfg)
3325 {
3326         struct ice_flow_seg_info *seg;
3327         u64 val;
3328         u8 i;
3329
3330         /* set inner most segment */
3331         seg = &segs[seg_cnt - 1];
3332
3333         ice_for_each_set_bit(i, (const ice_bitmap_t *)&cfg->hash_flds,
3334                              ICE_FLOW_FIELD_IDX_MAX)
3335                 ice_flow_set_fld(seg, (enum ice_flow_field)i,
3336                                  ICE_FLOW_FLD_OFF_INVAL, ICE_FLOW_FLD_OFF_INVAL,
3337                                  ICE_FLOW_FLD_OFF_INVAL, false);
3338
3339         ICE_FLOW_SET_HDRS(seg, cfg->addl_hdrs);
3340
3341         /* set outer most header */
3342         if (cfg->hdr_type == ICE_RSS_INNER_HEADERS_W_OUTER_IPV4)
3343                 segs[ICE_RSS_OUTER_HEADERS].hdrs |= ICE_FLOW_SEG_HDR_IPV4 |
3344                                                    ICE_FLOW_SEG_HDR_IPV_OTHER;
3345         else if (cfg->hdr_type == ICE_RSS_INNER_HEADERS_W_OUTER_IPV6)
3346                 segs[ICE_RSS_OUTER_HEADERS].hdrs |= ICE_FLOW_SEG_HDR_IPV6 |
3347                                                    ICE_FLOW_SEG_HDR_IPV_OTHER;
3348
3349         if (seg->hdrs & ~ICE_FLOW_RSS_SEG_HDR_VAL_MASKS &
3350             ~ICE_FLOW_RSS_HDRS_INNER_MASK & ~ICE_FLOW_SEG_HDR_IPV_OTHER)
3351                 return ICE_ERR_PARAM;
3352
3353         val = (u64)(seg->hdrs & ICE_FLOW_RSS_SEG_HDR_L3_MASKS);
3354         if (val && !ice_is_pow2(val))
3355                 return ICE_ERR_CFG;
3356
3357         val = (u64)(seg->hdrs & ICE_FLOW_RSS_SEG_HDR_L4_MASKS);
3358         if (val && !ice_is_pow2(val))
3359                 return ICE_ERR_CFG;
3360
3361         return ICE_SUCCESS;
3362 }
3363
3364 /**
3365  * ice_rem_vsi_rss_list - remove VSI from RSS list
3366  * @hw: pointer to the hardware structure
3367  * @vsi_handle: software VSI handle
3368  *
3369  * Remove the VSI from all RSS configurations in the list.
3370  */
3371 void ice_rem_vsi_rss_list(struct ice_hw *hw, u16 vsi_handle)
3372 {
3373         struct ice_rss_cfg *r, *tmp;
3374
3375         if (LIST_EMPTY(&hw->rss_list_head))
3376                 return;
3377
3378         ice_acquire_lock(&hw->rss_locks);
3379         LIST_FOR_EACH_ENTRY_SAFE(r, tmp, &hw->rss_list_head,
3380                                  ice_rss_cfg, l_entry)
3381                 if (ice_test_and_clear_bit(vsi_handle, r->vsis))
3382                         if (!ice_is_any_bit_set(r->vsis, ICE_MAX_VSI)) {
3383                                 LIST_DEL(&r->l_entry);
3384                                 ice_free(hw, r);
3385                         }
3386         ice_release_lock(&hw->rss_locks);
3387 }
3388
3389 /**
3390  * ice_rem_vsi_rss_cfg - remove RSS configurations associated with VSI
3391  * @hw: pointer to the hardware structure
3392  * @vsi_handle: software VSI handle
3393  *
3394  * This function will iterate through all flow profiles and disassociate
3395  * the VSI from that profile. If the flow profile has no VSIs it will
3396  * be removed.
3397  */
3398 enum ice_status ice_rem_vsi_rss_cfg(struct ice_hw *hw, u16 vsi_handle)
3399 {
3400         const enum ice_block blk = ICE_BLK_RSS;
3401         struct ice_flow_prof *p, *t;
3402         enum ice_status status = ICE_SUCCESS;
3403
3404         if (!ice_is_vsi_valid(hw, vsi_handle))
3405                 return ICE_ERR_PARAM;
3406
3407         if (LIST_EMPTY(&hw->fl_profs[blk]))
3408                 return ICE_SUCCESS;
3409
3410         ice_acquire_lock(&hw->rss_locks);
3411         LIST_FOR_EACH_ENTRY_SAFE(p, t, &hw->fl_profs[blk], ice_flow_prof,
3412                                  l_entry)
3413                 if (ice_is_bit_set(p->vsis, vsi_handle)) {
3414                         status = ice_flow_disassoc_prof(hw, blk, p, vsi_handle);
3415                         if (status)
3416                                 break;
3417
3418                         if (!ice_is_any_bit_set(p->vsis, ICE_MAX_VSI)) {
3419                                 status = ice_flow_rem_prof(hw, blk, p->id);
3420                                 if (status)
3421                                         break;
3422                         }
3423                 }
3424         ice_release_lock(&hw->rss_locks);
3425
3426         return status;
3427 }
3428
3429 /**
3430  * ice_get_rss_hdr_type - get a RSS profile's header type
3431  * @prof: RSS flow profile
3432  */
3433 static enum ice_rss_cfg_hdr_type
3434 ice_get_rss_hdr_type(struct ice_flow_prof *prof)
3435 {
3436         enum ice_rss_cfg_hdr_type hdr_type = ICE_RSS_ANY_HEADERS;
3437
3438         if (prof->segs_cnt == ICE_FLOW_SEG_SINGLE) {
3439                 hdr_type = ICE_RSS_OUTER_HEADERS;
3440         } else if (prof->segs_cnt == ICE_FLOW_SEG_MAX) {
3441                 if (prof->segs[ICE_RSS_OUTER_HEADERS].hdrs == ICE_FLOW_SEG_HDR_NONE)
3442                         hdr_type = ICE_RSS_INNER_HEADERS;
3443                 if (prof->segs[ICE_RSS_OUTER_HEADERS].hdrs & ICE_FLOW_SEG_HDR_IPV4)
3444                         hdr_type = ICE_RSS_INNER_HEADERS_W_OUTER_IPV4;
3445                 if (prof->segs[ICE_RSS_OUTER_HEADERS].hdrs & ICE_FLOW_SEG_HDR_IPV6)
3446                         hdr_type = ICE_RSS_INNER_HEADERS_W_OUTER_IPV6;
3447         }
3448
3449         return hdr_type;
3450 }
3451
3452 /**
3453  * ice_rem_rss_list - remove RSS configuration from list
3454  * @hw: pointer to the hardware structure
3455  * @vsi_handle: software VSI handle
3456  * @prof: pointer to flow profile
3457  *
3458  * Assumption: lock has already been acquired for RSS list
3459  */
3460 static void
3461 ice_rem_rss_list(struct ice_hw *hw, u16 vsi_handle, struct ice_flow_prof *prof)
3462 {
3463         enum ice_rss_cfg_hdr_type hdr_type;
3464         struct ice_rss_cfg *r, *tmp;
3465
3466         /* Search for RSS hash fields associated to the VSI that match the
3467          * hash configurations associated to the flow profile. If found
3468          * remove from the RSS entry list of the VSI context and delete entry.
3469          */
3470         hdr_type = ice_get_rss_hdr_type(prof);
3471         LIST_FOR_EACH_ENTRY_SAFE(r, tmp, &hw->rss_list_head,
3472                                  ice_rss_cfg, l_entry)
3473                 if (r->hash.hash_flds == prof->segs[prof->segs_cnt - 1].match &&
3474                     r->hash.addl_hdrs == prof->segs[prof->segs_cnt - 1].hdrs &&
3475                     r->hash.hdr_type == hdr_type) {
3476                         ice_clear_bit(vsi_handle, r->vsis);
3477                         if (!ice_is_any_bit_set(r->vsis, ICE_MAX_VSI)) {
3478                                 LIST_DEL(&r->l_entry);
3479                                 ice_free(hw, r);
3480                         }
3481                         return;
3482                 }
3483 }
3484
3485 /**
3486  * ice_add_rss_list - add RSS configuration to list
3487  * @hw: pointer to the hardware structure
3488  * @vsi_handle: software VSI handle
3489  * @prof: pointer to flow profile
3490  *
3491  * Assumption: lock has already been acquired for RSS list
3492  */
3493 static enum ice_status
3494 ice_add_rss_list(struct ice_hw *hw, u16 vsi_handle, struct ice_flow_prof *prof)
3495 {
3496         enum ice_rss_cfg_hdr_type hdr_type;
3497         struct ice_rss_cfg *r, *rss_cfg;
3498
3499         hdr_type = ice_get_rss_hdr_type(prof);
3500         LIST_FOR_EACH_ENTRY(r, &hw->rss_list_head,
3501                             ice_rss_cfg, l_entry)
3502                 if (r->hash.hash_flds == prof->segs[prof->segs_cnt - 1].match &&
3503                     r->hash.addl_hdrs == prof->segs[prof->segs_cnt - 1].hdrs &&
3504                     r->hash.hdr_type == hdr_type) {
3505                         ice_set_bit(vsi_handle, r->vsis);
3506                         return ICE_SUCCESS;
3507                 }
3508
3509         rss_cfg = (struct ice_rss_cfg *)ice_malloc(hw, sizeof(*rss_cfg));
3510         if (!rss_cfg)
3511                 return ICE_ERR_NO_MEMORY;
3512
3513         rss_cfg->hash.hash_flds = prof->segs[prof->segs_cnt - 1].match;
3514         rss_cfg->hash.addl_hdrs = prof->segs[prof->segs_cnt - 1].hdrs;
3515         rss_cfg->hash.hdr_type = hdr_type;
3516         rss_cfg->hash.symm = prof->cfg.symm;
3517         ice_set_bit(vsi_handle, rss_cfg->vsis);
3518
3519         LIST_ADD_TAIL(&rss_cfg->l_entry, &hw->rss_list_head);
3520
3521         return ICE_SUCCESS;
3522 }
3523
3524 #define ICE_FLOW_PROF_HASH_S    0
3525 #define ICE_FLOW_PROF_HASH_M    (0xFFFFFFFFULL << ICE_FLOW_PROF_HASH_S)
3526 #define ICE_FLOW_PROF_HDR_S     32
3527 #define ICE_FLOW_PROF_HDR_M     (0x3FFFFFFFULL << ICE_FLOW_PROF_HDR_S)
3528 #define ICE_FLOW_PROF_ENCAP_S   62
3529 #define ICE_FLOW_PROF_ENCAP_M   (0x3ULL << ICE_FLOW_PROF_ENCAP_S)
3530
3531 /* Flow profile ID format:
3532  * [0:31] - Packet match fields
3533  * [32:61] - Protocol header
3534  * [62:63] - Encapsulation flag:
3535  *           0 if non-tunneled
3536  *           1 if tunneled
3537  *           2 for tunneled with outer ipv4
3538  *           3 for tunneled with outer ipv6
3539  */
3540 #define ICE_FLOW_GEN_PROFID(hash, hdr, encap) \
3541         ((u64)(((u64)(hash) & ICE_FLOW_PROF_HASH_M) | \
3542                (((u64)(hdr) << ICE_FLOW_PROF_HDR_S) & ICE_FLOW_PROF_HDR_M) | \
3543                (((u64)(encap) << ICE_FLOW_PROF_ENCAP_S) & ICE_FLOW_PROF_ENCAP_M)))
3544
3545 static void
3546 ice_rss_config_xor_word(struct ice_hw *hw, u8 prof_id, u8 src, u8 dst)
3547 {
3548         u32 s = ((src % 4) << 3); /* byte shift */
3549         u32 v = dst | 0x80; /* value to program */
3550         u8 i = src / 4; /* register index */
3551         u32 reg;
3552
3553         reg = rd32(hw, GLQF_HSYMM(prof_id, i));
3554         reg = (reg & ~(0xff << s)) | (v << s);
3555         wr32(hw, GLQF_HSYMM(prof_id, i), reg);
3556 }
3557
3558 static void
3559 ice_rss_config_xor(struct ice_hw *hw, u8 prof_id, u8 src, u8 dst, u8 len)
3560 {
3561         int fv_last_word =
3562                 ICE_FLOW_SW_FIELD_VECTOR_MAX / ICE_FLOW_FV_EXTRACT_SZ - 1;
3563         int i;
3564
3565         for (i = 0; i < len; i++) {
3566                 ice_rss_config_xor_word(hw, prof_id,
3567                                         /* Yes, field vector in GLQF_HSYMM and
3568                                          * GLQF_HINSET is inversed!
3569                                          */
3570                                         fv_last_word - (src + i),
3571                                         fv_last_word - (dst + i));
3572                 ice_rss_config_xor_word(hw, prof_id,
3573                                         fv_last_word - (dst + i),
3574                                         fv_last_word - (src + i));
3575         }
3576 }
3577
3578 static void
3579 ice_rss_update_symm(struct ice_hw *hw,
3580                     struct ice_flow_prof *prof)
3581 {
3582         struct ice_prof_map *map;
3583         u8 prof_id, m;
3584
3585         ice_acquire_lock(&hw->blk[ICE_BLK_RSS].es.prof_map_lock);
3586         map = ice_search_prof_id(hw, ICE_BLK_RSS, prof->id);
3587         if (map)
3588                 prof_id = map->prof_id;
3589         ice_release_lock(&hw->blk[ICE_BLK_RSS].es.prof_map_lock);
3590         if (!map)
3591                 return;
3592         /* clear to default */
3593         for (m = 0; m < 6; m++)
3594                 wr32(hw, GLQF_HSYMM(prof_id, m), 0);
3595         if (prof->cfg.symm) {
3596                 struct ice_flow_seg_info *seg =
3597                         &prof->segs[prof->segs_cnt - 1];
3598
3599                 struct ice_flow_seg_xtrct *ipv4_src =
3600                         &seg->fields[ICE_FLOW_FIELD_IDX_IPV4_SA].xtrct;
3601                 struct ice_flow_seg_xtrct *ipv4_dst =
3602                         &seg->fields[ICE_FLOW_FIELD_IDX_IPV4_DA].xtrct;
3603                 struct ice_flow_seg_xtrct *ipv6_src =
3604                         &seg->fields[ICE_FLOW_FIELD_IDX_IPV6_SA].xtrct;
3605                 struct ice_flow_seg_xtrct *ipv6_dst =
3606                         &seg->fields[ICE_FLOW_FIELD_IDX_IPV6_DA].xtrct;
3607
3608                 struct ice_flow_seg_xtrct *tcp_src =
3609                         &seg->fields[ICE_FLOW_FIELD_IDX_TCP_SRC_PORT].xtrct;
3610                 struct ice_flow_seg_xtrct *tcp_dst =
3611                         &seg->fields[ICE_FLOW_FIELD_IDX_TCP_DST_PORT].xtrct;
3612
3613                 struct ice_flow_seg_xtrct *udp_src =
3614                         &seg->fields[ICE_FLOW_FIELD_IDX_UDP_SRC_PORT].xtrct;
3615                 struct ice_flow_seg_xtrct *udp_dst =
3616                         &seg->fields[ICE_FLOW_FIELD_IDX_UDP_DST_PORT].xtrct;
3617
3618                 struct ice_flow_seg_xtrct *sctp_src =
3619                         &seg->fields[ICE_FLOW_FIELD_IDX_SCTP_SRC_PORT].xtrct;
3620                 struct ice_flow_seg_xtrct *sctp_dst =
3621                         &seg->fields[ICE_FLOW_FIELD_IDX_SCTP_DST_PORT].xtrct;
3622
3623                 /* xor IPv4 */
3624                 if (ipv4_src->prot_id != 0 && ipv4_dst->prot_id != 0)
3625                         ice_rss_config_xor(hw, prof_id,
3626                                            ipv4_src->idx, ipv4_dst->idx, 2);
3627
3628                 /* xor IPv6 */
3629                 if (ipv6_src->prot_id != 0 && ipv6_dst->prot_id != 0)
3630                         ice_rss_config_xor(hw, prof_id,
3631                                            ipv6_src->idx, ipv6_dst->idx, 8);
3632
3633                 /* xor TCP */
3634                 if (tcp_src->prot_id != 0 && tcp_dst->prot_id != 0)
3635                         ice_rss_config_xor(hw, prof_id,
3636                                            tcp_src->idx, tcp_dst->idx, 1);
3637
3638                 /* xor UDP */
3639                 if (udp_src->prot_id != 0 && udp_dst->prot_id != 0)
3640                         ice_rss_config_xor(hw, prof_id,
3641                                            udp_src->idx, udp_dst->idx, 1);
3642
3643                 /* xor SCTP */
3644                 if (sctp_src->prot_id != 0 && sctp_dst->prot_id != 0)
3645                         ice_rss_config_xor(hw, prof_id,
3646                                            sctp_src->idx, sctp_dst->idx, 1);
3647         }
3648 }
3649
3650 /**
3651  * ice_add_rss_cfg_sync - add an RSS configuration
3652  * @hw: pointer to the hardware structure
3653  * @vsi_handle: software VSI handle
3654  * @cfg: configure parameters
3655  *
3656  * Assumption: lock has already been acquired for RSS list
3657  */
3658 static enum ice_status
3659 ice_add_rss_cfg_sync(struct ice_hw *hw, u16 vsi_handle,
3660                      const struct ice_rss_hash_cfg *cfg)
3661 {
3662         const enum ice_block blk = ICE_BLK_RSS;
3663         struct ice_flow_prof *prof = NULL;
3664         struct ice_flow_seg_info *segs;
3665         enum ice_status status;
3666         u8 segs_cnt;
3667
3668         segs_cnt = (cfg->hdr_type == ICE_RSS_OUTER_HEADERS) ?
3669                         ICE_FLOW_SEG_SINGLE : ICE_FLOW_SEG_MAX;
3670
3671         segs = (struct ice_flow_seg_info *)ice_calloc(hw, segs_cnt,
3672                                                       sizeof(*segs));
3673         if (!segs)
3674                 return ICE_ERR_NO_MEMORY;
3675
3676         /* Construct the packet segment info from the hashed fields */
3677         status = ice_flow_set_rss_seg_info(segs, segs_cnt, cfg);
3678         if (status)
3679                 goto exit;
3680
3681         /* Don't do RSS for GTPU Outer */
3682         if (segs_cnt == ICE_FLOW_SEG_SINGLE &&
3683             segs[segs_cnt - 1].hdrs & ICE_FLOW_SEG_HDR_GTPU) {
3684                 status = ICE_SUCCESS;
3685                 goto exit;
3686         }
3687
3688         /* Search for a flow profile that has matching headers, hash fields
3689          * and has the input VSI associated to it. If found, no further
3690          * operations required and exit.
3691          */
3692         prof = ice_flow_find_prof_conds(hw, blk, ICE_FLOW_RX, segs, segs_cnt,
3693                                         vsi_handle,
3694                                         ICE_FLOW_FIND_PROF_CHK_FLDS |
3695                                         ICE_FLOW_FIND_PROF_CHK_VSI);
3696         if (prof) {
3697                 if (prof->cfg.symm == cfg->symm)
3698                         goto exit;
3699                 prof->cfg.symm = cfg->symm;
3700                 goto update_symm;
3701         }
3702
3703         /* Check if a flow profile exists with the same protocol headers and
3704          * associated with the input VSI. If so disassociate the VSI from
3705          * this profile. The VSI will be added to a new profile created with
3706          * the protocol header and new hash field configuration.
3707          */
3708         prof = ice_flow_find_prof_conds(hw, blk, ICE_FLOW_RX, segs, segs_cnt,
3709                                         vsi_handle, ICE_FLOW_FIND_PROF_CHK_VSI);
3710         if (prof) {
3711                 status = ice_flow_disassoc_prof(hw, blk, prof, vsi_handle);
3712                 if (!status)
3713                         ice_rem_rss_list(hw, vsi_handle, prof);
3714                 else
3715                         goto exit;
3716
3717                 /* Remove profile if it has no VSIs associated */
3718                 if (!ice_is_any_bit_set(prof->vsis, ICE_MAX_VSI)) {
3719                         status = ice_flow_rem_prof(hw, blk, prof->id);
3720                         if (status)
3721                                 goto exit;
3722                 }
3723         }
3724
3725         /* Search for a profile that has same match fields only. If this
3726          * exists then associate the VSI to this profile.
3727          */
3728         prof = ice_flow_find_prof_conds(hw, blk, ICE_FLOW_RX, segs, segs_cnt,
3729                                         vsi_handle,
3730                                         ICE_FLOW_FIND_PROF_CHK_FLDS);
3731         if (prof) {
3732                 if (prof->cfg.symm == cfg->symm) {
3733                         status = ice_flow_assoc_prof(hw, blk, prof,
3734                                                      vsi_handle);
3735                         if (!status)
3736                                 status = ice_add_rss_list(hw, vsi_handle,
3737                                                           prof);
3738                 } else {
3739                         /* if a profile exist but with different symmetric
3740                          * requirement, just return error.
3741                          */
3742                         status = ICE_ERR_NOT_SUPPORTED;
3743                 }
3744                 goto exit;
3745         }
3746
3747         /* Create a new flow profile with generated profile and packet
3748          * segment information.
3749          */
3750         status = ice_flow_add_prof(hw, blk, ICE_FLOW_RX,
3751                                    ICE_FLOW_GEN_PROFID(cfg->hash_flds,
3752                                                        segs[segs_cnt - 1].hdrs,
3753                                                        cfg->hdr_type),
3754                                    segs, segs_cnt, NULL, 0, &prof);
3755         if (status)
3756                 goto exit;
3757
3758         status = ice_flow_assoc_prof(hw, blk, prof, vsi_handle);
3759         /* If association to a new flow profile failed then this profile can
3760          * be removed.
3761          */
3762         if (status) {
3763                 ice_flow_rem_prof(hw, blk, prof->id);
3764                 goto exit;
3765         }
3766
3767         status = ice_add_rss_list(hw, vsi_handle, prof);
3768
3769         prof->cfg.symm = cfg->symm;
3770 update_symm:
3771         ice_rss_update_symm(hw, prof);
3772
3773 exit:
3774         ice_free(hw, segs);
3775         return status;
3776 }
3777
3778 /**
3779  * ice_add_rss_cfg - add an RSS configuration with specified hashed fields
3780  * @hw: pointer to the hardware structure
3781  * @vsi_handle: software VSI handle
3782  * @cfg: configure parameters
3783  *
3784  * This function will generate a flow profile based on fields associated with
3785  * the input fields to hash on, the flow type and use the VSI number to add
3786  * a flow entry to the profile.
3787  */
3788 enum ice_status
3789 ice_add_rss_cfg(struct ice_hw *hw, u16 vsi_handle,
3790                 const struct ice_rss_hash_cfg *cfg)
3791 {
3792         struct ice_rss_hash_cfg local_cfg;
3793         enum ice_status status;
3794
3795         if (!ice_is_vsi_valid(hw, vsi_handle) ||
3796             !cfg || cfg->hdr_type > ICE_RSS_ANY_HEADERS ||
3797             cfg->hash_flds == ICE_HASH_INVALID)
3798                 return ICE_ERR_PARAM;
3799
3800         local_cfg = *cfg;
3801         if (cfg->hdr_type < ICE_RSS_ANY_HEADERS) {
3802                 ice_acquire_lock(&hw->rss_locks);
3803                 status = ice_add_rss_cfg_sync(hw, vsi_handle, &local_cfg);
3804                 ice_release_lock(&hw->rss_locks);
3805         } else {
3806                 ice_acquire_lock(&hw->rss_locks);
3807                 local_cfg.hdr_type = ICE_RSS_OUTER_HEADERS;
3808                 status = ice_add_rss_cfg_sync(hw, vsi_handle, &local_cfg);
3809                 if (!status) {
3810                         local_cfg.hdr_type = ICE_RSS_INNER_HEADERS;
3811                         status = ice_add_rss_cfg_sync(hw, vsi_handle,
3812                                                       &local_cfg);
3813                 }
3814                 ice_release_lock(&hw->rss_locks);
3815         }
3816
3817         return status;
3818 }
3819
3820 /**
3821  * ice_rem_rss_cfg_sync - remove an existing RSS configuration
3822  * @hw: pointer to the hardware structure
3823  * @vsi_handle: software VSI handle
3824  * @cfg: configure parameters
3825  *
3826  * Assumption: lock has already been acquired for RSS list
3827  */
3828 static enum ice_status
3829 ice_rem_rss_cfg_sync(struct ice_hw *hw, u16 vsi_handle,
3830                      const struct ice_rss_hash_cfg *cfg)
3831 {
3832         const enum ice_block blk = ICE_BLK_RSS;
3833         struct ice_flow_seg_info *segs;
3834         struct ice_flow_prof *prof;
3835         enum ice_status status;
3836         u8 segs_cnt;
3837
3838         segs_cnt = (cfg->hdr_type == ICE_RSS_OUTER_HEADERS) ?
3839                         ICE_FLOW_SEG_SINGLE : ICE_FLOW_SEG_MAX;
3840         segs = (struct ice_flow_seg_info *)ice_calloc(hw, segs_cnt,
3841                                                       sizeof(*segs));
3842         if (!segs)
3843                 return ICE_ERR_NO_MEMORY;
3844
3845         /* Construct the packet segment info from the hashed fields */
3846         status = ice_flow_set_rss_seg_info(segs, segs_cnt, cfg);
3847         if (status)
3848                 goto out;
3849
3850         /* Don't do RSS for GTPU Outer */
3851         if (segs_cnt == ICE_FLOW_SEG_SINGLE &&
3852             segs[segs_cnt - 1].hdrs & ICE_FLOW_SEG_HDR_GTPU) {
3853                 status = ICE_SUCCESS;
3854                 goto out;
3855         }
3856
3857         prof = ice_flow_find_prof_conds(hw, blk, ICE_FLOW_RX, segs, segs_cnt,
3858                                         vsi_handle,
3859                                         ICE_FLOW_FIND_PROF_CHK_FLDS);
3860         if (!prof) {
3861                 status = ICE_ERR_DOES_NOT_EXIST;
3862                 goto out;
3863         }
3864
3865         status = ice_flow_disassoc_prof(hw, blk, prof, vsi_handle);
3866         if (status)
3867                 goto out;
3868
3869         /* Remove RSS configuration from VSI context before deleting
3870          * the flow profile.
3871          */
3872         ice_rem_rss_list(hw, vsi_handle, prof);
3873
3874         if (!ice_is_any_bit_set(prof->vsis, ICE_MAX_VSI))
3875                 status = ice_flow_rem_prof(hw, blk, prof->id);
3876
3877 out:
3878         ice_free(hw, segs);
3879         return status;
3880 }
3881
3882 /**
3883  * ice_rem_rss_cfg - remove an existing RSS config with matching hashed fields
3884  * @hw: pointer to the hardware structure
3885  * @vsi_handle: software VSI handle
3886  * @cfg: configure parameters
3887  *
3888  * This function will lookup the flow profile based on the input
3889  * hash field bitmap, iterate through the profile entry list of
3890  * that profile and find entry associated with input VSI to be
3891  * removed. Calls are made to underlying flow apis which will in
3892  * turn build or update buffers for RSS XLT1 section.
3893  */
3894 enum ice_status
3895 ice_rem_rss_cfg(struct ice_hw *hw, u16 vsi_handle,
3896                 const struct ice_rss_hash_cfg *cfg)
3897 {
3898         struct ice_rss_hash_cfg local_cfg;
3899         enum ice_status status;
3900
3901         if (!ice_is_vsi_valid(hw, vsi_handle) ||
3902             !cfg || cfg->hdr_type > ICE_RSS_ANY_HEADERS ||
3903             cfg->hash_flds == ICE_HASH_INVALID)
3904                 return ICE_ERR_PARAM;
3905
3906         ice_acquire_lock(&hw->rss_locks);
3907         local_cfg = *cfg;
3908         if (cfg->hdr_type < ICE_RSS_ANY_HEADERS) {
3909                 status = ice_rem_rss_cfg_sync(hw, vsi_handle, &local_cfg);
3910         } else {
3911                 local_cfg.hdr_type = ICE_RSS_OUTER_HEADERS;
3912                 status = ice_rem_rss_cfg_sync(hw, vsi_handle, &local_cfg);
3913
3914                 if (!status) {
3915                         local_cfg.hdr_type = ICE_RSS_INNER_HEADERS;
3916                         status = ice_rem_rss_cfg_sync(hw, vsi_handle,
3917                                                       &local_cfg);
3918                 }
3919         }
3920         ice_release_lock(&hw->rss_locks);
3921
3922         return status;
3923 }
3924
3925 /**
3926  * ice_replay_rss_cfg - replay RSS configurations associated with VSI
3927  * @hw: pointer to the hardware structure
3928  * @vsi_handle: software VSI handle
3929  */
3930 enum ice_status ice_replay_rss_cfg(struct ice_hw *hw, u16 vsi_handle)
3931 {
3932         enum ice_status status = ICE_SUCCESS;
3933         struct ice_rss_cfg *r;
3934
3935         if (!ice_is_vsi_valid(hw, vsi_handle))
3936                 return ICE_ERR_PARAM;
3937
3938         ice_acquire_lock(&hw->rss_locks);
3939         LIST_FOR_EACH_ENTRY(r, &hw->rss_list_head,
3940                             ice_rss_cfg, l_entry) {
3941                 if (ice_is_bit_set(r->vsis, vsi_handle)) {
3942                         status = ice_add_rss_cfg_sync(hw, vsi_handle, &r->hash);
3943                         if (status)
3944                                 break;
3945                 }
3946         }
3947         ice_release_lock(&hw->rss_locks);
3948
3949         return status;
3950 }
3951
3952 /**
3953  * ice_get_rss_cfg - returns hashed fields for the given header types
3954  * @hw: pointer to the hardware structure
3955  * @vsi_handle: software VSI handle
3956  * @hdrs: protocol header type
3957  *
3958  * This function will return the match fields of the first instance of flow
3959  * profile having the given header types and containing input VSI
3960  */
3961 u64 ice_get_rss_cfg(struct ice_hw *hw, u16 vsi_handle, u32 hdrs)
3962 {
3963         u64 rss_hash = ICE_HASH_INVALID;
3964         struct ice_rss_cfg *r;
3965
3966         /* verify if the protocol header is non zero and VSI is valid */
3967         if (hdrs == ICE_FLOW_SEG_HDR_NONE || !ice_is_vsi_valid(hw, vsi_handle))
3968                 return ICE_HASH_INVALID;
3969
3970         ice_acquire_lock(&hw->rss_locks);
3971         LIST_FOR_EACH_ENTRY(r, &hw->rss_list_head,
3972                             ice_rss_cfg, l_entry)
3973                 if (ice_is_bit_set(r->vsis, vsi_handle) &&
3974                     r->hash.addl_hdrs == hdrs) {
3975                         rss_hash = r->hash.hash_flds;
3976                         break;
3977                 }
3978         ice_release_lock(&hw->rss_locks);
3979
3980         return rss_hash;
3981 }