net/ice/base: cleanup some code style
[dpdk.git] / drivers / net / ice / base / ice_flow.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2001-2020 Intel Corporation
3  */
4
5 #include "ice_common.h"
6 #include "ice_flow.h"
7
8 /* Size of known protocol header fields */
9 #define ICE_FLOW_FLD_SZ_ETH_TYPE        2
10 #define ICE_FLOW_FLD_SZ_VLAN            2
11 #define ICE_FLOW_FLD_SZ_IPV4_ADDR       4
12 #define ICE_FLOW_FLD_SZ_IPV6_ADDR       16
13 #define ICE_FLOW_FLD_SZ_IPV6_PRE32_ADDR 4
14 #define ICE_FLOW_FLD_SZ_IPV6_PRE48_ADDR 6
15 #define ICE_FLOW_FLD_SZ_IPV6_PRE64_ADDR 8
16 #define ICE_FLOW_FLD_SZ_IP_DSCP         1
17 #define ICE_FLOW_FLD_SZ_IP_TTL          1
18 #define ICE_FLOW_FLD_SZ_IP_PROT         1
19 #define ICE_FLOW_FLD_SZ_PORT            2
20 #define ICE_FLOW_FLD_SZ_TCP_FLAGS       1
21 #define ICE_FLOW_FLD_SZ_ICMP_TYPE       1
22 #define ICE_FLOW_FLD_SZ_ICMP_CODE       1
23 #define ICE_FLOW_FLD_SZ_ARP_OPER        2
24 #define ICE_FLOW_FLD_SZ_GRE_KEYID       4
25 #define ICE_FLOW_FLD_SZ_GTP_TEID        4
26 #define ICE_FLOW_FLD_SZ_GTP_QFI         2
27 #define ICE_FLOW_FLD_SZ_PPPOE_SESS_ID   2
28 #define ICE_FLOW_FLD_SZ_PFCP_SEID 8
29 #define ICE_FLOW_FLD_SZ_L2TPV3_SESS_ID  4
30 #define ICE_FLOW_FLD_SZ_ESP_SPI 4
31 #define ICE_FLOW_FLD_SZ_AH_SPI  4
32 #define ICE_FLOW_FLD_SZ_NAT_T_ESP_SPI   4
33
34 /* Describe properties of a protocol header field */
35 struct ice_flow_field_info {
36         enum ice_flow_seg_hdr hdr;
37         s16 off;        /* Offset from start of a protocol header, in bits */
38         u16 size;       /* Size of fields in bits */
39         u16 mask;       /* 16-bit mask for field */
40 };
41
42 #define ICE_FLOW_FLD_INFO(_hdr, _offset_bytes, _size_bytes) { \
43         .hdr = _hdr, \
44         .off = (_offset_bytes) * BITS_PER_BYTE, \
45         .size = (_size_bytes) * BITS_PER_BYTE, \
46         .mask = 0, \
47 }
48
49 #define ICE_FLOW_FLD_INFO_MSK(_hdr, _offset_bytes, _size_bytes, _mask) { \
50         .hdr = _hdr, \
51         .off = (_offset_bytes) * BITS_PER_BYTE, \
52         .size = (_size_bytes) * BITS_PER_BYTE, \
53         .mask = _mask, \
54 }
55
56 /* Table containing properties of supported protocol header fields */
57 static const
58 struct ice_flow_field_info ice_flds_info[ICE_FLOW_FIELD_IDX_MAX] = {
59         /* Ether */
60         /* ICE_FLOW_FIELD_IDX_ETH_DA */
61         ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_ETH, 0, ETH_ALEN),
62         /* ICE_FLOW_FIELD_IDX_ETH_SA */
63         ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_ETH, ETH_ALEN, ETH_ALEN),
64         /* ICE_FLOW_FIELD_IDX_S_VLAN */
65         ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_VLAN, 12, ICE_FLOW_FLD_SZ_VLAN),
66         /* ICE_FLOW_FIELD_IDX_C_VLAN */
67         ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_VLAN, 14, ICE_FLOW_FLD_SZ_VLAN),
68         /* ICE_FLOW_FIELD_IDX_ETH_TYPE */
69         ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_ETH, 0, ICE_FLOW_FLD_SZ_ETH_TYPE),
70         /* IPv4 / IPv6 */
71         /* ICE_FLOW_FIELD_IDX_IPV4_DSCP */
72         ICE_FLOW_FLD_INFO_MSK(ICE_FLOW_SEG_HDR_IPV4, 0, ICE_FLOW_FLD_SZ_IP_DSCP,
73                               0x00fc),
74         /* ICE_FLOW_FIELD_IDX_IPV6_DSCP */
75         ICE_FLOW_FLD_INFO_MSK(ICE_FLOW_SEG_HDR_IPV6, 0, ICE_FLOW_FLD_SZ_IP_DSCP,
76                               0x0ff0),
77         /* ICE_FLOW_FIELD_IDX_IPV4_TTL */
78         ICE_FLOW_FLD_INFO_MSK(ICE_FLOW_SEG_HDR_NONE, 8,
79                               ICE_FLOW_FLD_SZ_IP_TTL, 0xff00),
80         /* ICE_FLOW_FIELD_IDX_IPV4_PROT */
81         ICE_FLOW_FLD_INFO_MSK(ICE_FLOW_SEG_HDR_NONE, 8,
82                               ICE_FLOW_FLD_SZ_IP_PROT, 0x00ff),
83         /* ICE_FLOW_FIELD_IDX_IPV6_TTL */
84         ICE_FLOW_FLD_INFO_MSK(ICE_FLOW_SEG_HDR_NONE, 6,
85                               ICE_FLOW_FLD_SZ_IP_TTL, 0x00ff),
86         /* ICE_FLOW_FIELD_IDX_IPV6_PROT */
87         ICE_FLOW_FLD_INFO_MSK(ICE_FLOW_SEG_HDR_NONE, 6,
88                               ICE_FLOW_FLD_SZ_IP_PROT, 0xff00),
89         /* ICE_FLOW_FIELD_IDX_IPV4_SA */
90         ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_IPV4, 12, ICE_FLOW_FLD_SZ_IPV4_ADDR),
91         /* ICE_FLOW_FIELD_IDX_IPV4_DA */
92         ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_IPV4, 16, ICE_FLOW_FLD_SZ_IPV4_ADDR),
93         /* ICE_FLOW_FIELD_IDX_IPV6_SA */
94         ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_IPV6, 8, ICE_FLOW_FLD_SZ_IPV6_ADDR),
95         /* ICE_FLOW_FIELD_IDX_IPV6_DA */
96         ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_IPV6, 24, ICE_FLOW_FLD_SZ_IPV6_ADDR),
97         /* ICE_FLOW_FIELD_IDX_IPV6_PRE32_SA */
98         ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_IPV6, 8,
99                           ICE_FLOW_FLD_SZ_IPV6_PRE32_ADDR),
100         /* ICE_FLOW_FIELD_IDX_IPV6_PRE32_DA */
101         ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_IPV6, 24,
102                           ICE_FLOW_FLD_SZ_IPV6_PRE32_ADDR),
103         /* ICE_FLOW_FIELD_IDX_IPV6_PRE48_SA */
104         ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_IPV6, 8,
105                           ICE_FLOW_FLD_SZ_IPV6_PRE48_ADDR),
106         /* ICE_FLOW_FIELD_IDX_IPV6_PRE48_DA */
107         ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_IPV6, 24,
108                           ICE_FLOW_FLD_SZ_IPV6_PRE48_ADDR),
109         /* ICE_FLOW_FIELD_IDX_IPV6_PRE64_SA */
110         ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_IPV6, 8,
111                           ICE_FLOW_FLD_SZ_IPV6_PRE64_ADDR),
112         /* ICE_FLOW_FIELD_IDX_IPV6_PRE64_DA */
113         ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_IPV6, 24,
114                           ICE_FLOW_FLD_SZ_IPV6_PRE64_ADDR),
115         /* Transport */
116         /* ICE_FLOW_FIELD_IDX_TCP_SRC_PORT */
117         ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_TCP, 0, ICE_FLOW_FLD_SZ_PORT),
118         /* ICE_FLOW_FIELD_IDX_TCP_DST_PORT */
119         ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_TCP, 2, ICE_FLOW_FLD_SZ_PORT),
120         /* ICE_FLOW_FIELD_IDX_UDP_SRC_PORT */
121         ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_UDP, 0, ICE_FLOW_FLD_SZ_PORT),
122         /* ICE_FLOW_FIELD_IDX_UDP_DST_PORT */
123         ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_UDP, 2, ICE_FLOW_FLD_SZ_PORT),
124         /* ICE_FLOW_FIELD_IDX_SCTP_SRC_PORT */
125         ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_SCTP, 0, ICE_FLOW_FLD_SZ_PORT),
126         /* ICE_FLOW_FIELD_IDX_SCTP_DST_PORT */
127         ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_SCTP, 2, ICE_FLOW_FLD_SZ_PORT),
128         /* ICE_FLOW_FIELD_IDX_TCP_FLAGS */
129         ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_TCP, 13, ICE_FLOW_FLD_SZ_TCP_FLAGS),
130         /* ARP */
131         /* ICE_FLOW_FIELD_IDX_ARP_SIP */
132         ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_ARP, 14, ICE_FLOW_FLD_SZ_IPV4_ADDR),
133         /* ICE_FLOW_FIELD_IDX_ARP_DIP */
134         ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_ARP, 24, ICE_FLOW_FLD_SZ_IPV4_ADDR),
135         /* ICE_FLOW_FIELD_IDX_ARP_SHA */
136         ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_ARP, 8, ETH_ALEN),
137         /* ICE_FLOW_FIELD_IDX_ARP_DHA */
138         ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_ARP, 18, ETH_ALEN),
139         /* ICE_FLOW_FIELD_IDX_ARP_OP */
140         ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_ARP, 6, ICE_FLOW_FLD_SZ_ARP_OPER),
141         /* ICMP */
142         /* ICE_FLOW_FIELD_IDX_ICMP_TYPE */
143         ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_ICMP, 0, ICE_FLOW_FLD_SZ_ICMP_TYPE),
144         /* ICE_FLOW_FIELD_IDX_ICMP_CODE */
145         ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_ICMP, 1, ICE_FLOW_FLD_SZ_ICMP_CODE),
146         /* GRE */
147         /* ICE_FLOW_FIELD_IDX_GRE_KEYID */
148         ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_GRE, 12, ICE_FLOW_FLD_SZ_GRE_KEYID),
149         /* GTP */
150         /* ICE_FLOW_FIELD_IDX_GTPC_TEID */
151         ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_GTPC_TEID, 12,
152                           ICE_FLOW_FLD_SZ_GTP_TEID),
153         /* ICE_FLOW_FIELD_IDX_GTPU_IP_TEID */
154         ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_GTPU_IP, 12,
155                           ICE_FLOW_FLD_SZ_GTP_TEID),
156         /* ICE_FLOW_FIELD_IDX_GTPU_EH_TEID */
157         ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_GTPU_EH, 12,
158                           ICE_FLOW_FLD_SZ_GTP_TEID),
159         /* ICE_FLOW_FIELD_IDX_GTPU_EH_QFI */
160         ICE_FLOW_FLD_INFO_MSK(ICE_FLOW_SEG_HDR_GTPU_EH, 22,
161                               ICE_FLOW_FLD_SZ_GTP_QFI, 0x3f00),
162         /* ICE_FLOW_FIELD_IDX_GTPU_UP_TEID */
163         ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_GTPU_UP, 12,
164                           ICE_FLOW_FLD_SZ_GTP_TEID),
165         /* ICE_FLOW_FIELD_IDX_GTPU_DWN_TEID */
166         ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_GTPU_DWN, 12,
167                           ICE_FLOW_FLD_SZ_GTP_TEID),
168         /* PPPOE */
169         /* ICE_FLOW_FIELD_IDX_PPPOE_SESS_ID */
170         ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_PPPOE, 2,
171                           ICE_FLOW_FLD_SZ_PPPOE_SESS_ID),
172         /* PFCP */
173         /* ICE_FLOW_FIELD_IDX_PFCP_SEID */
174         ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_PFCP_SESSION, 12,
175                           ICE_FLOW_FLD_SZ_PFCP_SEID),
176         /* L2TPV3 */
177         /* ICE_FLOW_FIELD_IDX_L2TPV3_SESS_ID */
178         ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_L2TPV3, 0,
179                           ICE_FLOW_FLD_SZ_L2TPV3_SESS_ID),
180         /* ESP */
181         /* ICE_FLOW_FIELD_IDX_ESP_SPI */
182         ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_ESP, 0,
183                           ICE_FLOW_FLD_SZ_ESP_SPI),
184         /* AH */
185         /* ICE_FLOW_FIELD_IDX_AH_SPI */
186         ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_AH, 4,
187                           ICE_FLOW_FLD_SZ_AH_SPI),
188         /* NAT_T_ESP */
189         /* ICE_FLOW_FIELD_IDX_NAT_T_ESP_SPI */
190         ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_NAT_T_ESP, 8,
191                           ICE_FLOW_FLD_SZ_NAT_T_ESP_SPI),
192 };
193
194 /* Bitmaps indicating relevant packet types for a particular protocol header
195  *
196  * Packet types for packets with an Outer/First/Single MAC header
197  */
198 static const u32 ice_ptypes_mac_ofos[] = {
199         0xFDC00846, 0xBFBF7F7E, 0xF70001DF, 0xFEFDFDFB,
200         0x0000077E, 0x00000000, 0x00000000, 0x00000000,
201         0x00400000, 0x03FFF000, 0x7FFFFFE0, 0x00000000,
202         0x00000000, 0x00000000, 0x00000000, 0x00000000,
203         0x00000000, 0x00000000, 0x00000000, 0x00000000,
204         0x00000000, 0x00000000, 0x00000000, 0x00000000,
205         0x00000000, 0x00000000, 0x00000000, 0x00000000,
206         0x00000000, 0x00000000, 0x00000000, 0x00000000,
207 };
208
209 /* Packet types for packets with an Innermost/Last MAC VLAN header */
210 static const u32 ice_ptypes_macvlan_il[] = {
211         0x00000000, 0xBC000000, 0x000001DF, 0xF0000000,
212         0x0000077E, 0x00000000, 0x00000000, 0x00000000,
213         0x00000000, 0x00000000, 0x00000000, 0x00000000,
214         0x00000000, 0x00000000, 0x00000000, 0x00000000,
215         0x00000000, 0x00000000, 0x00000000, 0x00000000,
216         0x00000000, 0x00000000, 0x00000000, 0x00000000,
217         0x00000000, 0x00000000, 0x00000000, 0x00000000,
218         0x00000000, 0x00000000, 0x00000000, 0x00000000,
219 };
220
221 /* Packet types for packets with an Outer/First/Single IPv4 header, does NOT
222  * include IPV4 other PTYPEs
223  */
224 static const u32 ice_ptypes_ipv4_ofos[] = {
225         0x1DC00000, 0x04000800, 0x00000000, 0x00000000,
226         0x00000000, 0x00000155, 0x00000000, 0x00000000,
227         0x00000000, 0x000FC000, 0x00000000, 0x00000000,
228         0x00000000, 0x00000000, 0x00000000, 0x00000000,
229         0x00000000, 0x00000000, 0x00000000, 0x00000000,
230         0x00000000, 0x00000000, 0x00000000, 0x00000000,
231         0x00000000, 0x00000000, 0x00000000, 0x00000000,
232         0x00000000, 0x00000000, 0x00000000, 0x00000000,
233 };
234
235 /* Packet types for packets with an Outer/First/Single IPv4 header, includes
236  * IPV4 other PTYPEs
237  */
238 static const u32 ice_ptypes_ipv4_ofos_all[] = {
239         0x1DC00000, 0x04000800, 0x00000000, 0x00000000,
240         0x00000000, 0x00000155, 0x00000000, 0x00000000,
241         0x00000000, 0x000FC000, 0x83E0F800, 0x00000101,
242         0x00000000, 0x00000000, 0x00000000, 0x00000000,
243         0x00000000, 0x00000000, 0x00000000, 0x00000000,
244         0x00000000, 0x00000000, 0x00000000, 0x00000000,
245         0x00000000, 0x00000000, 0x00000000, 0x00000000,
246         0x00000000, 0x00000000, 0x00000000, 0x00000000,
247 };
248
249 /* Packet types for packets with an Innermost/Last IPv4 header */
250 static const u32 ice_ptypes_ipv4_il[] = {
251         0xE0000000, 0xB807700E, 0x80000003, 0xE01DC03B,
252         0x0000000E, 0x00000000, 0x00000000, 0x00000000,
253         0x00000000, 0x00000000, 0x001FF800, 0x00000000,
254         0x00000000, 0x00000000, 0x00000000, 0x00000000,
255         0x00000000, 0x00000000, 0x00000000, 0x00000000,
256         0x00000000, 0x00000000, 0x00000000, 0x00000000,
257         0x00000000, 0x00000000, 0x00000000, 0x00000000,
258         0x00000000, 0x00000000, 0x00000000, 0x00000000,
259 };
260
261 /* Packet types for packets with an Outer/First/Single IPv6 header, does NOT
262  * include IVP6 other PTYPEs
263  */
264 static const u32 ice_ptypes_ipv6_ofos[] = {
265         0x00000000, 0x00000000, 0x77000000, 0x10002000,
266         0x00000000, 0x000002AA, 0x00000000, 0x00000000,
267         0x00000000, 0x03F00000, 0x00000000, 0x00000000,
268         0x00000000, 0x00000000, 0x00000000, 0x00000000,
269         0x00000000, 0x00000000, 0x00000000, 0x00000000,
270         0x00000000, 0x00000000, 0x00000000, 0x00000000,
271         0x00000000, 0x00000000, 0x00000000, 0x00000000,
272         0x00000000, 0x00000000, 0x00000000, 0x00000000,
273 };
274
275 /* Packet types for packets with an Outer/First/Single IPv6 header, includes
276  * IPV6 other PTYPEs
277  */
278 static const u32 ice_ptypes_ipv6_ofos_all[] = {
279         0x00000000, 0x00000000, 0x77000000, 0x10002000,
280         0x00000000, 0x000002AA, 0x00000000, 0x00000000,
281         0x00080F00, 0x03F00000, 0x7C1F0000, 0x00000206,
282         0x00000000, 0x00000000, 0x00000000, 0x00000000,
283         0x00000000, 0x00000000, 0x00000000, 0x00000000,
284         0x00000000, 0x00000000, 0x00000000, 0x00000000,
285         0x00000000, 0x00000000, 0x00000000, 0x00000000,
286         0x00000000, 0x00000000, 0x00000000, 0x00000000,
287 };
288
289 /* Packet types for packets with an Innermost/Last IPv6 header */
290 static const u32 ice_ptypes_ipv6_il[] = {
291         0x00000000, 0x03B80770, 0x000001DC, 0x0EE00000,
292         0x00000770, 0x00000000, 0x00000000, 0x00000000,
293         0x00000000, 0x00000000, 0x7FE00000, 0x00000000,
294         0x00000000, 0x00000000, 0x00000000, 0x00000000,
295         0x00000000, 0x00000000, 0x00000000, 0x00000000,
296         0x00000000, 0x00000000, 0x00000000, 0x00000000,
297         0x00000000, 0x00000000, 0x00000000, 0x00000000,
298         0x00000000, 0x00000000, 0x00000000, 0x00000000,
299 };
300
301 /* Packet types for packets with an Outer/First/Single IPv4 header - no L4 */
302 static const u32 ice_ipv4_ofos_no_l4[] = {
303         0x10C00000, 0x04000800, 0x00000000, 0x00000000,
304         0x00000000, 0x00000000, 0x00000000, 0x00000000,
305         0x00000000, 0x000cc000, 0x00000000, 0x00000000,
306         0x00000000, 0x00000000, 0x00000000, 0x00000000,
307         0x00000000, 0x00000000, 0x00000000, 0x00000000,
308         0x00000000, 0x00000000, 0x00000000, 0x00000000,
309         0x00000000, 0x00000000, 0x00000000, 0x00000000,
310         0x00000000, 0x00000000, 0x00000000, 0x00000000,
311 };
312
313 /* Packet types for packets with an Innermost/Last IPv4 header - no L4 */
314 static const u32 ice_ipv4_il_no_l4[] = {
315         0x60000000, 0x18043008, 0x80000002, 0x6010c021,
316         0x00000008, 0x00000000, 0x00000000, 0x00000000,
317         0x00000000, 0x00000000, 0x00139800, 0x00000000,
318         0x00000000, 0x00000000, 0x00000000, 0x00000000,
319         0x00000000, 0x00000000, 0x00000000, 0x00000000,
320         0x00000000, 0x00000000, 0x00000000, 0x00000000,
321         0x00000000, 0x00000000, 0x00000000, 0x00000000,
322         0x00000000, 0x00000000, 0x00000000, 0x00000000,
323 };
324
325 /* Packet types for packets with an Outer/First/Single IPv6 header - no L4 */
326 static const u32 ice_ipv6_ofos_no_l4[] = {
327         0x00000000, 0x00000000, 0x43000000, 0x10002000,
328         0x00000000, 0x00000000, 0x00000000, 0x00000000,
329         0x00000000, 0x02300000, 0x00000000, 0x00000000,
330         0x00000000, 0x00000000, 0x00000000, 0x00000000,
331         0x00000000, 0x00000000, 0x00000000, 0x00000000,
332         0x00000000, 0x00000000, 0x00000000, 0x00000000,
333         0x00000000, 0x00000000, 0x00000000, 0x00000000,
334         0x00000000, 0x00000000, 0x00000000, 0x00000000,
335 };
336
337 /* Packet types for packets with an Innermost/Last IPv6 header - no L4 */
338 static const u32 ice_ipv6_il_no_l4[] = {
339         0x00000000, 0x02180430, 0x0000010c, 0x086010c0,
340         0x00000430, 0x00000000, 0x00000000, 0x00000000,
341         0x00000000, 0x00000000, 0x4e600000, 0x00000000,
342         0x00000000, 0x00000000, 0x00000000, 0x00000000,
343         0x00000000, 0x00000000, 0x00000000, 0x00000000,
344         0x00000000, 0x00000000, 0x00000000, 0x00000000,
345         0x00000000, 0x00000000, 0x00000000, 0x00000000,
346         0x00000000, 0x00000000, 0x00000000, 0x00000000,
347 };
348
349 /* Packet types for packets with an Outermost/First ARP header */
350 static const u32 ice_ptypes_arp_of[] = {
351         0x00000800, 0x00000000, 0x00000000, 0x00000000,
352         0x00000000, 0x00000000, 0x00000000, 0x00000000,
353         0x00000000, 0x00000000, 0x00000000, 0x00000000,
354         0x00000000, 0x00000000, 0x00000000, 0x00000000,
355         0x00000000, 0x00000000, 0x00000000, 0x00000000,
356         0x00000000, 0x00000000, 0x00000000, 0x00000000,
357         0x00000000, 0x00000000, 0x00000000, 0x00000000,
358         0x00000000, 0x00000000, 0x00000000, 0x00000000,
359 };
360
361 /* UDP Packet types for non-tunneled packets or tunneled
362  * packets with inner UDP.
363  */
364 static const u32 ice_ptypes_udp_il[] = {
365         0x81000000, 0x20204040, 0x04000010, 0x80810102,
366         0x00000040, 0x00000000, 0x00000000, 0x00000000,
367         0x00000000, 0x00410000, 0x90842000, 0x00000007,
368         0x00000000, 0x00000000, 0x00000000, 0x00000000,
369         0x00000000, 0x00000000, 0x00000000, 0x00000000,
370         0x00000000, 0x00000000, 0x00000000, 0x00000000,
371         0x00000000, 0x00000000, 0x00000000, 0x00000000,
372         0x00000000, 0x00000000, 0x00000000, 0x00000000,
373 };
374
375 /* Packet types for packets with an Innermost/Last TCP header */
376 static const u32 ice_ptypes_tcp_il[] = {
377         0x04000000, 0x80810102, 0x10000040, 0x02040408,
378         0x00000102, 0x00000000, 0x00000000, 0x00000000,
379         0x00000000, 0x00820000, 0x21084000, 0x00000000,
380         0x00000000, 0x00000000, 0x00000000, 0x00000000,
381         0x00000000, 0x00000000, 0x00000000, 0x00000000,
382         0x00000000, 0x00000000, 0x00000000, 0x00000000,
383         0x00000000, 0x00000000, 0x00000000, 0x00000000,
384         0x00000000, 0x00000000, 0x00000000, 0x00000000,
385 };
386
387 /* Packet types for packets with an Innermost/Last SCTP header */
388 static const u32 ice_ptypes_sctp_il[] = {
389         0x08000000, 0x01020204, 0x20000081, 0x04080810,
390         0x00000204, 0x00000000, 0x00000000, 0x00000000,
391         0x00000000, 0x01040000, 0x00000000, 0x00000000,
392         0x00000000, 0x00000000, 0x00000000, 0x00000000,
393         0x00000000, 0x00000000, 0x00000000, 0x00000000,
394         0x00000000, 0x00000000, 0x00000000, 0x00000000,
395         0x00000000, 0x00000000, 0x00000000, 0x00000000,
396         0x00000000, 0x00000000, 0x00000000, 0x00000000,
397 };
398
399 /* Packet types for packets with an Outermost/First ICMP header */
400 static const u32 ice_ptypes_icmp_of[] = {
401         0x10000000, 0x00000000, 0x00000000, 0x00000000,
402         0x00000000, 0x00000000, 0x00000000, 0x00000000,
403         0x00000000, 0x00000000, 0x00000000, 0x00000000,
404         0x00000000, 0x00000000, 0x00000000, 0x00000000,
405         0x00000000, 0x00000000, 0x00000000, 0x00000000,
406         0x00000000, 0x00000000, 0x00000000, 0x00000000,
407         0x00000000, 0x00000000, 0x00000000, 0x00000000,
408         0x00000000, 0x00000000, 0x00000000, 0x00000000,
409 };
410
411 /* Packet types for packets with an Innermost/Last ICMP header */
412 static const u32 ice_ptypes_icmp_il[] = {
413         0x00000000, 0x02040408, 0x40000102, 0x08101020,
414         0x00000408, 0x00000000, 0x00000000, 0x00000000,
415         0x00000000, 0x00000000, 0x42108000, 0x00000000,
416         0x00000000, 0x00000000, 0x00000000, 0x00000000,
417         0x00000000, 0x00000000, 0x00000000, 0x00000000,
418         0x00000000, 0x00000000, 0x00000000, 0x00000000,
419         0x00000000, 0x00000000, 0x00000000, 0x00000000,
420         0x00000000, 0x00000000, 0x00000000, 0x00000000,
421 };
422
423 /* Packet types for packets with an Outermost/First GRE header */
424 static const u32 ice_ptypes_gre_of[] = {
425         0x00000000, 0xBFBF7800, 0x000001DF, 0xFEFDE000,
426         0x0000017E, 0x00000000, 0x00000000, 0x00000000,
427         0x00000000, 0x00000000, 0x00000000, 0x00000000,
428         0x00000000, 0x00000000, 0x00000000, 0x00000000,
429         0x00000000, 0x00000000, 0x00000000, 0x00000000,
430         0x00000000, 0x00000000, 0x00000000, 0x00000000,
431         0x00000000, 0x00000000, 0x00000000, 0x00000000,
432         0x00000000, 0x00000000, 0x00000000, 0x00000000,
433 };
434
435 /* Packet types for packets with an Innermost/Last MAC header */
436 static const u32 ice_ptypes_mac_il[] = {
437         0x00000000, 0x00000000, 0x00000000, 0x00000000,
438         0x00000000, 0x00000000, 0x00000000, 0x00000000,
439         0x00000000, 0x00000000, 0x00000000, 0x00000000,
440         0x00000000, 0x00000000, 0x00000000, 0x00000000,
441         0x00000000, 0x00000000, 0x00000000, 0x00000000,
442         0x00000000, 0x00000000, 0x00000000, 0x00000000,
443         0x00000000, 0x00000000, 0x00000000, 0x00000000,
444         0x00000000, 0x00000000, 0x00000000, 0x00000000,
445 };
446
447 /* Packet types for GTPC */
448 static const u32 ice_ptypes_gtpc[] = {
449         0x00000000, 0x00000000, 0x00000000, 0x00000000,
450         0x00000000, 0x00000000, 0x00000000, 0x00000000,
451         0x00000000, 0x00000000, 0x00000180, 0x00000000,
452         0x00000000, 0x00000000, 0x00000000, 0x00000000,
453         0x00000000, 0x00000000, 0x00000000, 0x00000000,
454         0x00000000, 0x00000000, 0x00000000, 0x00000000,
455         0x00000000, 0x00000000, 0x00000000, 0x00000000,
456         0x00000000, 0x00000000, 0x00000000, 0x00000000,
457 };
458
459 /* Packet types for GTPC with TEID */
460 static const u32 ice_ptypes_gtpc_tid[] = {
461         0x00000000, 0x00000000, 0x00000000, 0x00000000,
462         0x00000000, 0x00000000, 0x00000000, 0x00000000,
463         0x00000000, 0x00000000, 0x00000060, 0x00000000,
464         0x00000000, 0x00000000, 0x00000000, 0x00000000,
465         0x00000000, 0x00000000, 0x00000000, 0x00000000,
466         0x00000000, 0x00000000, 0x00000000, 0x00000000,
467         0x00000000, 0x00000000, 0x00000000, 0x00000000,
468         0x00000000, 0x00000000, 0x00000000, 0x00000000,
469 };
470
471 /* Packet types for GTPU */
472 static const struct ice_ptype_attributes ice_attr_gtpu_eh[] = {
473         { ICE_MAC_IPV4_GTPU_IPV4_FRAG,    ICE_PTYPE_ATTR_GTP_PDU_EH },
474         { ICE_MAC_IPV4_GTPU_IPV4_PAY,     ICE_PTYPE_ATTR_GTP_PDU_EH },
475         { ICE_MAC_IPV4_GTPU_IPV4_UDP_PAY, ICE_PTYPE_ATTR_GTP_PDU_EH },
476         { ICE_MAC_IPV4_GTPU_IPV4_TCP,     ICE_PTYPE_ATTR_GTP_PDU_EH },
477         { ICE_MAC_IPV4_GTPU_IPV4_ICMP,    ICE_PTYPE_ATTR_GTP_PDU_EH },
478         { ICE_MAC_IPV6_GTPU_IPV4_FRAG,    ICE_PTYPE_ATTR_GTP_PDU_EH },
479         { ICE_MAC_IPV6_GTPU_IPV4_PAY,     ICE_PTYPE_ATTR_GTP_PDU_EH },
480         { ICE_MAC_IPV6_GTPU_IPV4_UDP_PAY, ICE_PTYPE_ATTR_GTP_PDU_EH },
481         { ICE_MAC_IPV6_GTPU_IPV4_TCP,     ICE_PTYPE_ATTR_GTP_PDU_EH },
482         { ICE_MAC_IPV6_GTPU_IPV4_ICMP,    ICE_PTYPE_ATTR_GTP_PDU_EH },
483         { ICE_MAC_IPV4_GTPU_IPV6_FRAG,    ICE_PTYPE_ATTR_GTP_PDU_EH },
484         { ICE_MAC_IPV4_GTPU_IPV6_PAY,     ICE_PTYPE_ATTR_GTP_PDU_EH },
485         { ICE_MAC_IPV4_GTPU_IPV6_UDP_PAY, ICE_PTYPE_ATTR_GTP_PDU_EH },
486         { ICE_MAC_IPV4_GTPU_IPV6_TCP,     ICE_PTYPE_ATTR_GTP_PDU_EH },
487         { ICE_MAC_IPV4_GTPU_IPV6_ICMPV6,  ICE_PTYPE_ATTR_GTP_PDU_EH },
488         { ICE_MAC_IPV6_GTPU_IPV6_FRAG,    ICE_PTYPE_ATTR_GTP_PDU_EH },
489         { ICE_MAC_IPV6_GTPU_IPV6_PAY,     ICE_PTYPE_ATTR_GTP_PDU_EH },
490         { ICE_MAC_IPV6_GTPU_IPV6_UDP_PAY, ICE_PTYPE_ATTR_GTP_PDU_EH },
491         { ICE_MAC_IPV6_GTPU_IPV6_TCP,     ICE_PTYPE_ATTR_GTP_PDU_EH },
492         { ICE_MAC_IPV6_GTPU_IPV6_ICMPV6,  ICE_PTYPE_ATTR_GTP_PDU_EH },
493 };
494
495 static const struct ice_ptype_attributes ice_attr_gtpu_down[] = {
496         { ICE_MAC_IPV4_GTPU_IPV4_FRAG,    ICE_PTYPE_ATTR_GTP_DOWNLINK },
497         { ICE_MAC_IPV4_GTPU_IPV4_PAY,     ICE_PTYPE_ATTR_GTP_DOWNLINK },
498         { ICE_MAC_IPV4_GTPU_IPV4_UDP_PAY, ICE_PTYPE_ATTR_GTP_DOWNLINK },
499         { ICE_MAC_IPV4_GTPU_IPV4_TCP,     ICE_PTYPE_ATTR_GTP_DOWNLINK },
500         { ICE_MAC_IPV4_GTPU_IPV4_ICMP,    ICE_PTYPE_ATTR_GTP_DOWNLINK },
501         { ICE_MAC_IPV6_GTPU_IPV4_FRAG,    ICE_PTYPE_ATTR_GTP_DOWNLINK },
502         { ICE_MAC_IPV6_GTPU_IPV4_PAY,     ICE_PTYPE_ATTR_GTP_DOWNLINK },
503         { ICE_MAC_IPV6_GTPU_IPV4_UDP_PAY, ICE_PTYPE_ATTR_GTP_DOWNLINK },
504         { ICE_MAC_IPV6_GTPU_IPV4_TCP,     ICE_PTYPE_ATTR_GTP_DOWNLINK },
505         { ICE_MAC_IPV6_GTPU_IPV4_ICMP,    ICE_PTYPE_ATTR_GTP_DOWNLINK },
506         { ICE_MAC_IPV4_GTPU_IPV6_FRAG,    ICE_PTYPE_ATTR_GTP_DOWNLINK },
507         { ICE_MAC_IPV4_GTPU_IPV6_PAY,     ICE_PTYPE_ATTR_GTP_DOWNLINK },
508         { ICE_MAC_IPV4_GTPU_IPV6_UDP_PAY, ICE_PTYPE_ATTR_GTP_DOWNLINK },
509         { ICE_MAC_IPV4_GTPU_IPV6_TCP,     ICE_PTYPE_ATTR_GTP_DOWNLINK },
510         { ICE_MAC_IPV4_GTPU_IPV6_ICMPV6,  ICE_PTYPE_ATTR_GTP_DOWNLINK },
511         { ICE_MAC_IPV6_GTPU_IPV6_FRAG,    ICE_PTYPE_ATTR_GTP_DOWNLINK },
512         { ICE_MAC_IPV6_GTPU_IPV6_PAY,     ICE_PTYPE_ATTR_GTP_DOWNLINK },
513         { ICE_MAC_IPV6_GTPU_IPV6_UDP_PAY, ICE_PTYPE_ATTR_GTP_DOWNLINK },
514         { ICE_MAC_IPV6_GTPU_IPV6_TCP,     ICE_PTYPE_ATTR_GTP_DOWNLINK },
515         { ICE_MAC_IPV6_GTPU_IPV6_ICMPV6,  ICE_PTYPE_ATTR_GTP_DOWNLINK },
516 };
517
518 static const struct ice_ptype_attributes ice_attr_gtpu_up[] = {
519         { ICE_MAC_IPV4_GTPU_IPV4_FRAG,    ICE_PTYPE_ATTR_GTP_UPLINK },
520         { ICE_MAC_IPV4_GTPU_IPV4_PAY,     ICE_PTYPE_ATTR_GTP_UPLINK },
521         { ICE_MAC_IPV4_GTPU_IPV4_UDP_PAY, ICE_PTYPE_ATTR_GTP_UPLINK },
522         { ICE_MAC_IPV4_GTPU_IPV4_TCP,     ICE_PTYPE_ATTR_GTP_UPLINK },
523         { ICE_MAC_IPV4_GTPU_IPV4_ICMP,    ICE_PTYPE_ATTR_GTP_UPLINK },
524         { ICE_MAC_IPV6_GTPU_IPV4_FRAG,    ICE_PTYPE_ATTR_GTP_UPLINK },
525         { ICE_MAC_IPV6_GTPU_IPV4_PAY,     ICE_PTYPE_ATTR_GTP_UPLINK },
526         { ICE_MAC_IPV6_GTPU_IPV4_UDP_PAY, ICE_PTYPE_ATTR_GTP_UPLINK },
527         { ICE_MAC_IPV6_GTPU_IPV4_TCP,     ICE_PTYPE_ATTR_GTP_UPLINK },
528         { ICE_MAC_IPV6_GTPU_IPV4_ICMP,    ICE_PTYPE_ATTR_GTP_UPLINK },
529         { ICE_MAC_IPV4_GTPU_IPV6_FRAG,    ICE_PTYPE_ATTR_GTP_UPLINK },
530         { ICE_MAC_IPV4_GTPU_IPV6_PAY,     ICE_PTYPE_ATTR_GTP_UPLINK },
531         { ICE_MAC_IPV4_GTPU_IPV6_UDP_PAY, ICE_PTYPE_ATTR_GTP_UPLINK },
532         { ICE_MAC_IPV4_GTPU_IPV6_TCP,     ICE_PTYPE_ATTR_GTP_UPLINK },
533         { ICE_MAC_IPV4_GTPU_IPV6_ICMPV6,  ICE_PTYPE_ATTR_GTP_UPLINK },
534         { ICE_MAC_IPV6_GTPU_IPV6_FRAG,    ICE_PTYPE_ATTR_GTP_UPLINK },
535         { ICE_MAC_IPV6_GTPU_IPV6_PAY,     ICE_PTYPE_ATTR_GTP_UPLINK },
536         { ICE_MAC_IPV6_GTPU_IPV6_UDP_PAY, ICE_PTYPE_ATTR_GTP_UPLINK },
537         { ICE_MAC_IPV6_GTPU_IPV6_TCP,     ICE_PTYPE_ATTR_GTP_UPLINK },
538         { ICE_MAC_IPV6_GTPU_IPV6_ICMPV6,  ICE_PTYPE_ATTR_GTP_UPLINK },
539 };
540
541 static const u32 ice_ptypes_gtpu[] = {
542         0x00000000, 0x00000000, 0x00000000, 0x00000000,
543         0x00000000, 0x00000000, 0x00000000, 0x00000000,
544         0x00000000, 0x00000000, 0x7FFFFE00, 0x00000000,
545         0x00000000, 0x00000000, 0x00000000, 0x00000000,
546         0x00000000, 0x00000000, 0x00000000, 0x00000000,
547         0x00000000, 0x00000000, 0x00000000, 0x00000000,
548         0x00000000, 0x00000000, 0x00000000, 0x00000000,
549         0x00000000, 0x00000000, 0x00000000, 0x00000000,
550 };
551
552 /* Packet types for pppoe */
553 static const u32 ice_ptypes_pppoe[] = {
554         0x00000000, 0x00000000, 0x00000000, 0x00000000,
555         0x00000000, 0x00000000, 0x00000000, 0x00000000,
556         0x00000000, 0x03ffe000, 0x00000000, 0x00000000,
557         0x00000000, 0x00000000, 0x00000000, 0x00000000,
558         0x00000000, 0x00000000, 0x00000000, 0x00000000,
559         0x00000000, 0x00000000, 0x00000000, 0x00000000,
560         0x00000000, 0x00000000, 0x00000000, 0x00000000,
561         0x00000000, 0x00000000, 0x00000000, 0x00000000,
562 };
563
564 /* Packet types for packets with PFCP NODE header */
565 static const u32 ice_ptypes_pfcp_node[] = {
566         0x00000000, 0x00000000, 0x00000000, 0x00000000,
567         0x00000000, 0x00000000, 0x00000000, 0x00000000,
568         0x00000000, 0x00000000, 0x80000000, 0x00000002,
569         0x00000000, 0x00000000, 0x00000000, 0x00000000,
570         0x00000000, 0x00000000, 0x00000000, 0x00000000,
571         0x00000000, 0x00000000, 0x00000000, 0x00000000,
572         0x00000000, 0x00000000, 0x00000000, 0x00000000,
573         0x00000000, 0x00000000, 0x00000000, 0x00000000,
574 };
575
576 /* Packet types for packets with PFCP SESSION header */
577 static const u32 ice_ptypes_pfcp_session[] = {
578         0x00000000, 0x00000000, 0x00000000, 0x00000000,
579         0x00000000, 0x00000000, 0x00000000, 0x00000000,
580         0x00000000, 0x00000000, 0x00000000, 0x00000005,
581         0x00000000, 0x00000000, 0x00000000, 0x00000000,
582         0x00000000, 0x00000000, 0x00000000, 0x00000000,
583         0x00000000, 0x00000000, 0x00000000, 0x00000000,
584         0x00000000, 0x00000000, 0x00000000, 0x00000000,
585         0x00000000, 0x00000000, 0x00000000, 0x00000000,
586 };
587
588 /* Packet types for l2tpv3 */
589 static const u32 ice_ptypes_l2tpv3[] = {
590         0x00000000, 0x00000000, 0x00000000, 0x00000000,
591         0x00000000, 0x00000000, 0x00000000, 0x00000000,
592         0x00000000, 0x00000000, 0x00000000, 0x00000300,
593         0x00000000, 0x00000000, 0x00000000, 0x00000000,
594         0x00000000, 0x00000000, 0x00000000, 0x00000000,
595         0x00000000, 0x00000000, 0x00000000, 0x00000000,
596         0x00000000, 0x00000000, 0x00000000, 0x00000000,
597         0x00000000, 0x00000000, 0x00000000, 0x00000000,
598 };
599
600 /* Packet types for esp */
601 static const u32 ice_ptypes_esp[] = {
602         0x00000000, 0x00000000, 0x00000000, 0x00000000,
603         0x00000000, 0x00000003, 0x00000000, 0x00000000,
604         0x00000000, 0x00000000, 0x00000000, 0x00000000,
605         0x00000000, 0x00000000, 0x00000000, 0x00000000,
606         0x00000000, 0x00000000, 0x00000000, 0x00000000,
607         0x00000000, 0x00000000, 0x00000000, 0x00000000,
608         0x00000000, 0x00000000, 0x00000000, 0x00000000,
609         0x00000000, 0x00000000, 0x00000000, 0x00000000,
610 };
611
612 /* Packet types for ah */
613 static const u32 ice_ptypes_ah[] = {
614         0x00000000, 0x00000000, 0x00000000, 0x00000000,
615         0x00000000, 0x0000000C, 0x00000000, 0x00000000,
616         0x00000000, 0x00000000, 0x00000000, 0x00000000,
617         0x00000000, 0x00000000, 0x00000000, 0x00000000,
618         0x00000000, 0x00000000, 0x00000000, 0x00000000,
619         0x00000000, 0x00000000, 0x00000000, 0x00000000,
620         0x00000000, 0x00000000, 0x00000000, 0x00000000,
621         0x00000000, 0x00000000, 0x00000000, 0x00000000,
622 };
623
624 /* Packet types for packets with NAT_T ESP header */
625 static const u32 ice_ptypes_nat_t_esp[] = {
626         0x00000000, 0x00000000, 0x00000000, 0x00000000,
627         0x00000000, 0x00000030, 0x00000000, 0x00000000,
628         0x00000000, 0x00000000, 0x00000000, 0x00000000,
629         0x00000000, 0x00000000, 0x00000000, 0x00000000,
630         0x00000000, 0x00000000, 0x00000000, 0x00000000,
631         0x00000000, 0x00000000, 0x00000000, 0x00000000,
632         0x00000000, 0x00000000, 0x00000000, 0x00000000,
633         0x00000000, 0x00000000, 0x00000000, 0x00000000,
634 };
635
636 static const u32 ice_ptypes_mac_non_ip_ofos[] = {
637         0x00000846, 0x00000000, 0x00000000, 0x00000000,
638         0x00000000, 0x00000000, 0x00000000, 0x00000000,
639         0x00400000, 0x03FFF000, 0x00000000, 0x00000000,
640         0x00000000, 0x00000000, 0x00000000, 0x00000000,
641         0x00000000, 0x00000000, 0x00000000, 0x00000000,
642         0x00000000, 0x00000000, 0x00000000, 0x00000000,
643         0x00000000, 0x00000000, 0x00000000, 0x00000000,
644         0x00000000, 0x00000000, 0x00000000, 0x00000000,
645 };
646
647 /* Manage parameters and info. used during the creation of a flow profile */
648 struct ice_flow_prof_params {
649         enum ice_block blk;
650         u16 entry_length; /* # of bytes formatted entry will require */
651         u8 es_cnt;
652         struct ice_flow_prof *prof;
653
654         /* For ACL, the es[0] will have the data of ICE_RX_MDID_PKT_FLAGS_15_0
655          * This will give us the direction flags.
656          */
657         struct ice_fv_word es[ICE_MAX_FV_WORDS];
658         /* attributes can be used to add attributes to a particular PTYPE */
659         const struct ice_ptype_attributes *attr;
660         u16 attr_cnt;
661
662         u16 mask[ICE_MAX_FV_WORDS];
663         ice_declare_bitmap(ptypes, ICE_FLOW_PTYPE_MAX);
664 };
665
666 #define ICE_FLOW_RSS_HDRS_INNER_MASK \
667         (ICE_FLOW_SEG_HDR_PPPOE | ICE_FLOW_SEG_HDR_GTPC | \
668         ICE_FLOW_SEG_HDR_GTPC_TEID | ICE_FLOW_SEG_HDR_GTPU | \
669         ICE_FLOW_SEG_HDR_PFCP_SESSION | ICE_FLOW_SEG_HDR_L2TPV3 | \
670         ICE_FLOW_SEG_HDR_ESP | ICE_FLOW_SEG_HDR_AH | \
671         ICE_FLOW_SEG_HDR_NAT_T_ESP)
672
673 #define ICE_FLOW_SEG_HDRS_L2_MASK       \
674         (ICE_FLOW_SEG_HDR_ETH | ICE_FLOW_SEG_HDR_VLAN)
675 #define ICE_FLOW_SEG_HDRS_L3_MASK       \
676         (ICE_FLOW_SEG_HDR_IPV4 | ICE_FLOW_SEG_HDR_IPV6 | \
677          ICE_FLOW_SEG_HDR_ARP)
678 #define ICE_FLOW_SEG_HDRS_L4_MASK       \
679         (ICE_FLOW_SEG_HDR_ICMP | ICE_FLOW_SEG_HDR_TCP | ICE_FLOW_SEG_HDR_UDP | \
680          ICE_FLOW_SEG_HDR_SCTP)
681 /* mask for L4 protocols that are NOT part of IPV4/6 OTHER PTYPE groups */
682 #define ICE_FLOW_SEG_HDRS_L4_MASK_NO_OTHER      \
683         (ICE_FLOW_SEG_HDR_TCP | ICE_FLOW_SEG_HDR_UDP | ICE_FLOW_SEG_HDR_SCTP)
684
685 /**
686  * ice_flow_val_hdrs - validates packet segments for valid protocol headers
687  * @segs: array of one or more packet segments that describe the flow
688  * @segs_cnt: number of packet segments provided
689  */
690 static enum ice_status
691 ice_flow_val_hdrs(struct ice_flow_seg_info *segs, u8 segs_cnt)
692 {
693         u8 i;
694
695         for (i = 0; i < segs_cnt; i++) {
696                 /* Multiple L3 headers */
697                 if (segs[i].hdrs & ICE_FLOW_SEG_HDRS_L3_MASK &&
698                     !ice_is_pow2(segs[i].hdrs & ICE_FLOW_SEG_HDRS_L3_MASK))
699                         return ICE_ERR_PARAM;
700
701                 /* Multiple L4 headers */
702                 if (segs[i].hdrs & ICE_FLOW_SEG_HDRS_L4_MASK &&
703                     !ice_is_pow2(segs[i].hdrs & ICE_FLOW_SEG_HDRS_L4_MASK))
704                         return ICE_ERR_PARAM;
705         }
706
707         return ICE_SUCCESS;
708 }
709
710 /* Sizes of fixed known protocol headers without header options */
711 #define ICE_FLOW_PROT_HDR_SZ_MAC        14
712 #define ICE_FLOW_PROT_HDR_SZ_MAC_VLAN   (ICE_FLOW_PROT_HDR_SZ_MAC + 2)
713 #define ICE_FLOW_PROT_HDR_SZ_IPV4       20
714 #define ICE_FLOW_PROT_HDR_SZ_IPV6       40
715 #define ICE_FLOW_PROT_HDR_SZ_ARP        28
716 #define ICE_FLOW_PROT_HDR_SZ_ICMP       8
717 #define ICE_FLOW_PROT_HDR_SZ_TCP        20
718 #define ICE_FLOW_PROT_HDR_SZ_UDP        8
719 #define ICE_FLOW_PROT_HDR_SZ_SCTP       12
720
721 /**
722  * ice_flow_calc_seg_sz - calculates size of a packet segment based on headers
723  * @params: information about the flow to be processed
724  * @seg: index of packet segment whose header size is to be determined
725  */
726 static u16 ice_flow_calc_seg_sz(struct ice_flow_prof_params *params, u8 seg)
727 {
728         u16 sz;
729
730         /* L2 headers */
731         sz = (params->prof->segs[seg].hdrs & ICE_FLOW_SEG_HDR_VLAN) ?
732                 ICE_FLOW_PROT_HDR_SZ_MAC_VLAN : ICE_FLOW_PROT_HDR_SZ_MAC;
733
734         /* L3 headers */
735         if (params->prof->segs[seg].hdrs & ICE_FLOW_SEG_HDR_IPV4)
736                 sz += ICE_FLOW_PROT_HDR_SZ_IPV4;
737         else if (params->prof->segs[seg].hdrs & ICE_FLOW_SEG_HDR_IPV6)
738                 sz += ICE_FLOW_PROT_HDR_SZ_IPV6;
739         else if (params->prof->segs[seg].hdrs & ICE_FLOW_SEG_HDR_ARP)
740                 sz += ICE_FLOW_PROT_HDR_SZ_ARP;
741         else if (params->prof->segs[seg].hdrs & ICE_FLOW_SEG_HDRS_L4_MASK)
742                 /* A L3 header is required if L4 is specified */
743                 return 0;
744
745         /* L4 headers */
746         if (params->prof->segs[seg].hdrs & ICE_FLOW_SEG_HDR_ICMP)
747                 sz += ICE_FLOW_PROT_HDR_SZ_ICMP;
748         else if (params->prof->segs[seg].hdrs & ICE_FLOW_SEG_HDR_TCP)
749                 sz += ICE_FLOW_PROT_HDR_SZ_TCP;
750         else if (params->prof->segs[seg].hdrs & ICE_FLOW_SEG_HDR_UDP)
751                 sz += ICE_FLOW_PROT_HDR_SZ_UDP;
752         else if (params->prof->segs[seg].hdrs & ICE_FLOW_SEG_HDR_SCTP)
753                 sz += ICE_FLOW_PROT_HDR_SZ_SCTP;
754
755         return sz;
756 }
757
758 /**
759  * ice_flow_proc_seg_hdrs - process protocol headers present in pkt segments
760  * @params: information about the flow to be processed
761  *
762  * This function identifies the packet types associated with the protocol
763  * headers being present in packet segments of the specified flow profile.
764  */
765 static enum ice_status
766 ice_flow_proc_seg_hdrs(struct ice_flow_prof_params *params)
767 {
768         struct ice_flow_prof *prof;
769         u8 i;
770
771         ice_memset(params->ptypes, 0xff, sizeof(params->ptypes),
772                    ICE_NONDMA_MEM);
773
774         prof = params->prof;
775
776         for (i = 0; i < params->prof->segs_cnt; i++) {
777                 const ice_bitmap_t *src;
778                 u32 hdrs;
779
780                 hdrs = prof->segs[i].hdrs;
781
782                 if (hdrs & ICE_FLOW_SEG_HDR_ETH) {
783                         src = !i ? (const ice_bitmap_t *)ice_ptypes_mac_ofos :
784                                 (const ice_bitmap_t *)ice_ptypes_mac_il;
785                         ice_and_bitmap(params->ptypes, params->ptypes, src,
786                                        ICE_FLOW_PTYPE_MAX);
787                 }
788
789                 if (i && hdrs & ICE_FLOW_SEG_HDR_VLAN) {
790                         src = (const ice_bitmap_t *)ice_ptypes_macvlan_il;
791                         ice_and_bitmap(params->ptypes, params->ptypes, src,
792                                        ICE_FLOW_PTYPE_MAX);
793                 }
794
795                 if (!i && hdrs & ICE_FLOW_SEG_HDR_ARP) {
796                         ice_and_bitmap(params->ptypes, params->ptypes,
797                                        (const ice_bitmap_t *)ice_ptypes_arp_of,
798                                        ICE_FLOW_PTYPE_MAX);
799                 }
800
801                 if (hdrs & ICE_FLOW_SEG_HDR_PPPOE) {
802                         src = (const ice_bitmap_t *)ice_ptypes_pppoe;
803                         ice_and_bitmap(params->ptypes, params->ptypes, src,
804                                        ICE_FLOW_PTYPE_MAX);
805                 }
806                 if ((hdrs & ICE_FLOW_SEG_HDR_IPV4) &&
807                     (hdrs & ICE_FLOW_SEG_HDR_IPV_OTHER)) {
808                         src = i ?
809                                 (const ice_bitmap_t *)ice_ptypes_ipv4_il :
810                                 (const ice_bitmap_t *)ice_ptypes_ipv4_ofos_all;
811                         ice_and_bitmap(params->ptypes, params->ptypes, src,
812                                        ICE_FLOW_PTYPE_MAX);
813                 } else if ((hdrs & ICE_FLOW_SEG_HDR_IPV6) &&
814                            (hdrs & ICE_FLOW_SEG_HDR_IPV_OTHER)) {
815                         src = i ?
816                                 (const ice_bitmap_t *)ice_ptypes_ipv6_il :
817                                 (const ice_bitmap_t *)ice_ptypes_ipv6_ofos_all;
818                         ice_and_bitmap(params->ptypes, params->ptypes, src,
819                                        ICE_FLOW_PTYPE_MAX);
820                 } else if ((hdrs & ICE_FLOW_SEG_HDR_IPV4) &&
821                            !(hdrs & ICE_FLOW_SEG_HDRS_L4_MASK_NO_OTHER)) {
822                         src = !i ? (const ice_bitmap_t *)ice_ipv4_ofos_no_l4 :
823                                 (const ice_bitmap_t *)ice_ipv4_il_no_l4;
824                         ice_and_bitmap(params->ptypes, params->ptypes, src,
825                                        ICE_FLOW_PTYPE_MAX);
826                 } else if (hdrs & ICE_FLOW_SEG_HDR_IPV4) {
827                         src = !i ? (const ice_bitmap_t *)ice_ptypes_ipv4_ofos :
828                                 (const ice_bitmap_t *)ice_ptypes_ipv4_il;
829                         ice_and_bitmap(params->ptypes, params->ptypes, src,
830                                        ICE_FLOW_PTYPE_MAX);
831                 } else if ((hdrs & ICE_FLOW_SEG_HDR_IPV6) &&
832                            !(hdrs & ICE_FLOW_SEG_HDRS_L4_MASK_NO_OTHER)) {
833                         src = !i ? (const ice_bitmap_t *)ice_ipv6_ofos_no_l4 :
834                                 (const ice_bitmap_t *)ice_ipv6_il_no_l4;
835                         ice_and_bitmap(params->ptypes, params->ptypes, src,
836                                        ICE_FLOW_PTYPE_MAX);
837                 } else if (hdrs & ICE_FLOW_SEG_HDR_IPV6) {
838                         src = !i ? (const ice_bitmap_t *)ice_ptypes_ipv6_ofos :
839                                 (const ice_bitmap_t *)ice_ptypes_ipv6_il;
840                         ice_and_bitmap(params->ptypes, params->ptypes, src,
841                                        ICE_FLOW_PTYPE_MAX);
842                 }
843
844                 if (hdrs & ICE_FLOW_SEG_HDR_ETH_NON_IP) {
845                         src = (const ice_bitmap_t *)ice_ptypes_mac_non_ip_ofos;
846                         ice_and_bitmap(params->ptypes, params->ptypes,
847                                        src, ICE_FLOW_PTYPE_MAX);
848                 } else if (hdrs & ICE_FLOW_SEG_HDR_PPPOE) {
849                         src = (const ice_bitmap_t *)ice_ptypes_pppoe;
850                         ice_and_bitmap(params->ptypes, params->ptypes, src,
851                                        ICE_FLOW_PTYPE_MAX);
852                 } else {
853                         src = (const ice_bitmap_t *)ice_ptypes_pppoe;
854                         ice_andnot_bitmap(params->ptypes, params->ptypes, src,
855                                           ICE_FLOW_PTYPE_MAX);
856                 }
857
858                 if (hdrs & ICE_FLOW_SEG_HDR_UDP) {
859                         src = (const ice_bitmap_t *)ice_ptypes_udp_il;
860                         ice_and_bitmap(params->ptypes, params->ptypes, src,
861                                        ICE_FLOW_PTYPE_MAX);
862                 } else if (hdrs & ICE_FLOW_SEG_HDR_TCP) {
863                         ice_and_bitmap(params->ptypes, params->ptypes,
864                                        (const ice_bitmap_t *)ice_ptypes_tcp_il,
865                                        ICE_FLOW_PTYPE_MAX);
866                 } else if (hdrs & ICE_FLOW_SEG_HDR_SCTP) {
867                         src = (const ice_bitmap_t *)ice_ptypes_sctp_il;
868                         ice_and_bitmap(params->ptypes, params->ptypes, src,
869                                        ICE_FLOW_PTYPE_MAX);
870                 }
871
872                 if (hdrs & ICE_FLOW_SEG_HDR_ICMP) {
873                         src = !i ? (const ice_bitmap_t *)ice_ptypes_icmp_of :
874                                 (const ice_bitmap_t *)ice_ptypes_icmp_il;
875                         ice_and_bitmap(params->ptypes, params->ptypes, src,
876                                        ICE_FLOW_PTYPE_MAX);
877                 } else if (hdrs & ICE_FLOW_SEG_HDR_GRE) {
878                         if (!i) {
879                                 src = (const ice_bitmap_t *)ice_ptypes_gre_of;
880                                 ice_and_bitmap(params->ptypes, params->ptypes,
881                                                src, ICE_FLOW_PTYPE_MAX);
882                         }
883                 } else if (hdrs & ICE_FLOW_SEG_HDR_GTPC) {
884                         src = (const ice_bitmap_t *)ice_ptypes_gtpc;
885                         ice_and_bitmap(params->ptypes, params->ptypes,
886                                        src, ICE_FLOW_PTYPE_MAX);
887                 } else if (hdrs & ICE_FLOW_SEG_HDR_GTPC_TEID) {
888                         src = (const ice_bitmap_t *)ice_ptypes_gtpc_tid;
889                         ice_and_bitmap(params->ptypes, params->ptypes,
890                                        src, ICE_FLOW_PTYPE_MAX);
891                 } else if (hdrs & ICE_FLOW_SEG_HDR_GTPU_DWN) {
892                         src = (const ice_bitmap_t *)ice_ptypes_gtpu;
893                         ice_and_bitmap(params->ptypes, params->ptypes,
894                                        src, ICE_FLOW_PTYPE_MAX);
895
896                         /* Attributes for GTP packet with downlink */
897                         params->attr = ice_attr_gtpu_down;
898                         params->attr_cnt = ARRAY_SIZE(ice_attr_gtpu_down);
899                 } else if (hdrs & ICE_FLOW_SEG_HDR_GTPU_UP) {
900                         src = (const ice_bitmap_t *)ice_ptypes_gtpu;
901                         ice_and_bitmap(params->ptypes, params->ptypes,
902                                        src, ICE_FLOW_PTYPE_MAX);
903
904                         /* Attributes for GTP packet with uplink */
905                         params->attr = ice_attr_gtpu_up;
906                         params->attr_cnt = ARRAY_SIZE(ice_attr_gtpu_up);
907                 } else if (hdrs & ICE_FLOW_SEG_HDR_GTPU_EH) {
908                         src = (const ice_bitmap_t *)ice_ptypes_gtpu;
909                         ice_and_bitmap(params->ptypes, params->ptypes,
910                                        src, ICE_FLOW_PTYPE_MAX);
911
912                         /* Attributes for GTP packet with Extension Header */
913                         params->attr = ice_attr_gtpu_eh;
914                         params->attr_cnt = ARRAY_SIZE(ice_attr_gtpu_eh);
915                 } else if (hdrs & ICE_FLOW_SEG_HDR_GTPU_IP) {
916                         src = (const ice_bitmap_t *)ice_ptypes_gtpu;
917                         ice_and_bitmap(params->ptypes, params->ptypes,
918                                        src, ICE_FLOW_PTYPE_MAX);
919                 } else if (hdrs & ICE_FLOW_SEG_HDR_L2TPV3) {
920                         src = (const ice_bitmap_t *)ice_ptypes_l2tpv3;
921                         ice_and_bitmap(params->ptypes, params->ptypes,
922                                        src, ICE_FLOW_PTYPE_MAX);
923                 } else if (hdrs & ICE_FLOW_SEG_HDR_ESP) {
924                         src = (const ice_bitmap_t *)ice_ptypes_esp;
925                         ice_and_bitmap(params->ptypes, params->ptypes,
926                                        src, ICE_FLOW_PTYPE_MAX);
927                 } else if (hdrs & ICE_FLOW_SEG_HDR_AH) {
928                         src = (const ice_bitmap_t *)ice_ptypes_ah;
929                         ice_and_bitmap(params->ptypes, params->ptypes,
930                                        src, ICE_FLOW_PTYPE_MAX);
931                 } else if (hdrs & ICE_FLOW_SEG_HDR_NAT_T_ESP) {
932                         src = (const ice_bitmap_t *)ice_ptypes_nat_t_esp;
933                         ice_and_bitmap(params->ptypes, params->ptypes,
934                                        src, ICE_FLOW_PTYPE_MAX);
935                 }
936
937                 if (hdrs & ICE_FLOW_SEG_HDR_PFCP) {
938                         if (hdrs & ICE_FLOW_SEG_HDR_PFCP_NODE)
939                                 src =
940                                 (const ice_bitmap_t *)ice_ptypes_pfcp_node;
941                         else
942                                 src =
943                                 (const ice_bitmap_t *)ice_ptypes_pfcp_session;
944
945                         ice_and_bitmap(params->ptypes, params->ptypes,
946                                        src, ICE_FLOW_PTYPE_MAX);
947                 } else {
948                         src = (const ice_bitmap_t *)ice_ptypes_pfcp_node;
949                         ice_andnot_bitmap(params->ptypes, params->ptypes,
950                                           src, ICE_FLOW_PTYPE_MAX);
951
952                         src = (const ice_bitmap_t *)ice_ptypes_pfcp_session;
953                         ice_andnot_bitmap(params->ptypes, params->ptypes,
954                                           src, ICE_FLOW_PTYPE_MAX);
955                 }
956         }
957
958         return ICE_SUCCESS;
959 }
960
961 /**
962  * ice_flow_xtract_pkt_flags - Create an extr sequence entry for packet flags
963  * @hw: pointer to the HW struct
964  * @params: information about the flow to be processed
965  * @flags: The value of pkt_flags[x:x] in Rx/Tx MDID metadata.
966  *
967  * This function will allocate an extraction sequence entries for a DWORD size
968  * chunk of the packet flags.
969  */
970 static enum ice_status
971 ice_flow_xtract_pkt_flags(struct ice_hw *hw,
972                           struct ice_flow_prof_params *params,
973                           enum ice_flex_mdid_pkt_flags flags)
974 {
975         u8 fv_words = hw->blk[params->blk].es.fvw;
976         u8 idx;
977
978         /* Make sure the number of extraction sequence entries required does not
979          * exceed the block's capacity.
980          */
981         if (params->es_cnt >= fv_words)
982                 return ICE_ERR_MAX_LIMIT;
983
984         /* some blocks require a reversed field vector layout */
985         if (hw->blk[params->blk].es.reverse)
986                 idx = fv_words - params->es_cnt - 1;
987         else
988                 idx = params->es_cnt;
989
990         params->es[idx].prot_id = ICE_PROT_META_ID;
991         params->es[idx].off = flags;
992         params->es_cnt++;
993
994         return ICE_SUCCESS;
995 }
996
997 /**
998  * ice_flow_xtract_fld - Create an extraction sequence entry for the given field
999  * @hw: pointer to the HW struct
1000  * @params: information about the flow to be processed
1001  * @seg: packet segment index of the field to be extracted
1002  * @fld: ID of field to be extracted
1003  * @match: bitfield of all fields
1004  *
1005  * This function determines the protocol ID, offset, and size of the given
1006  * field. It then allocates one or more extraction sequence entries for the
1007  * given field, and fill the entries with protocol ID and offset information.
1008  */
1009 static enum ice_status
1010 ice_flow_xtract_fld(struct ice_hw *hw, struct ice_flow_prof_params *params,
1011                     u8 seg, enum ice_flow_field fld, u64 match)
1012 {
1013         enum ice_flow_field sib = ICE_FLOW_FIELD_IDX_MAX;
1014         enum ice_prot_id prot_id = ICE_PROT_ID_INVAL;
1015         u8 fv_words = hw->blk[params->blk].es.fvw;
1016         struct ice_flow_fld_info *flds;
1017         u16 cnt, ese_bits, i;
1018         u16 sib_mask = 0;
1019         u16 mask;
1020         u16 off;
1021
1022         flds = params->prof->segs[seg].fields;
1023
1024         switch (fld) {
1025         case ICE_FLOW_FIELD_IDX_ETH_DA:
1026         case ICE_FLOW_FIELD_IDX_ETH_SA:
1027         case ICE_FLOW_FIELD_IDX_S_VLAN:
1028         case ICE_FLOW_FIELD_IDX_C_VLAN:
1029                 prot_id = seg == 0 ? ICE_PROT_MAC_OF_OR_S : ICE_PROT_MAC_IL;
1030                 break;
1031         case ICE_FLOW_FIELD_IDX_ETH_TYPE:
1032                 prot_id = seg == 0 ? ICE_PROT_ETYPE_OL : ICE_PROT_ETYPE_IL;
1033                 break;
1034         case ICE_FLOW_FIELD_IDX_IPV4_DSCP:
1035                 prot_id = seg == 0 ? ICE_PROT_IPV4_OF_OR_S : ICE_PROT_IPV4_IL;
1036                 break;
1037         case ICE_FLOW_FIELD_IDX_IPV6_DSCP:
1038                 prot_id = seg == 0 ? ICE_PROT_IPV6_OF_OR_S : ICE_PROT_IPV6_IL;
1039                 break;
1040         case ICE_FLOW_FIELD_IDX_IPV4_TTL:
1041         case ICE_FLOW_FIELD_IDX_IPV4_PROT:
1042                 prot_id = seg == 0 ? ICE_PROT_IPV4_OF_OR_S : ICE_PROT_IPV4_IL;
1043
1044                 /* TTL and PROT share the same extraction seq. entry.
1045                  * Each is considered a sibling to the other in terms of sharing
1046                  * the same extraction sequence entry.
1047                  */
1048                 if (fld == ICE_FLOW_FIELD_IDX_IPV4_TTL)
1049                         sib = ICE_FLOW_FIELD_IDX_IPV4_PROT;
1050                 else if (fld == ICE_FLOW_FIELD_IDX_IPV4_PROT)
1051                         sib = ICE_FLOW_FIELD_IDX_IPV4_TTL;
1052
1053                 /* If the sibling field is also included, that field's
1054                  * mask needs to be included.
1055                  */
1056                 if (match & BIT(sib))
1057                         sib_mask = ice_flds_info[sib].mask;
1058                 break;
1059         case ICE_FLOW_FIELD_IDX_IPV6_TTL:
1060         case ICE_FLOW_FIELD_IDX_IPV6_PROT:
1061                 prot_id = seg == 0 ? ICE_PROT_IPV6_OF_OR_S : ICE_PROT_IPV6_IL;
1062
1063                 /* TTL and PROT share the same extraction seq. entry.
1064                  * Each is considered a sibling to the other in terms of sharing
1065                  * the same extraction sequence entry.
1066                  */
1067                 if (fld == ICE_FLOW_FIELD_IDX_IPV6_TTL)
1068                         sib = ICE_FLOW_FIELD_IDX_IPV6_PROT;
1069                 else if (fld == ICE_FLOW_FIELD_IDX_IPV6_PROT)
1070                         sib = ICE_FLOW_FIELD_IDX_IPV6_TTL;
1071
1072                 /* If the sibling field is also included, that field's
1073                  * mask needs to be included.
1074                  */
1075                 if (match & BIT(sib))
1076                         sib_mask = ice_flds_info[sib].mask;
1077                 break;
1078         case ICE_FLOW_FIELD_IDX_IPV4_SA:
1079         case ICE_FLOW_FIELD_IDX_IPV4_DA:
1080                 prot_id = seg == 0 ? ICE_PROT_IPV4_OF_OR_S : ICE_PROT_IPV4_IL;
1081                 break;
1082         case ICE_FLOW_FIELD_IDX_IPV6_SA:
1083         case ICE_FLOW_FIELD_IDX_IPV6_DA:
1084         case ICE_FLOW_FIELD_IDX_IPV6_PRE32_SA:
1085         case ICE_FLOW_FIELD_IDX_IPV6_PRE32_DA:
1086         case ICE_FLOW_FIELD_IDX_IPV6_PRE48_SA:
1087         case ICE_FLOW_FIELD_IDX_IPV6_PRE48_DA:
1088         case ICE_FLOW_FIELD_IDX_IPV6_PRE64_SA:
1089         case ICE_FLOW_FIELD_IDX_IPV6_PRE64_DA:
1090                 prot_id = seg == 0 ? ICE_PROT_IPV6_OF_OR_S : ICE_PROT_IPV6_IL;
1091                 break;
1092         case ICE_FLOW_FIELD_IDX_TCP_SRC_PORT:
1093         case ICE_FLOW_FIELD_IDX_TCP_DST_PORT:
1094         case ICE_FLOW_FIELD_IDX_TCP_FLAGS:
1095                 prot_id = ICE_PROT_TCP_IL;
1096                 break;
1097         case ICE_FLOW_FIELD_IDX_UDP_SRC_PORT:
1098         case ICE_FLOW_FIELD_IDX_UDP_DST_PORT:
1099                 prot_id = ICE_PROT_UDP_IL_OR_S;
1100                 break;
1101         case ICE_FLOW_FIELD_IDX_SCTP_SRC_PORT:
1102         case ICE_FLOW_FIELD_IDX_SCTP_DST_PORT:
1103                 prot_id = ICE_PROT_SCTP_IL;
1104                 break;
1105         case ICE_FLOW_FIELD_IDX_GTPC_TEID:
1106         case ICE_FLOW_FIELD_IDX_GTPU_IP_TEID:
1107         case ICE_FLOW_FIELD_IDX_GTPU_UP_TEID:
1108         case ICE_FLOW_FIELD_IDX_GTPU_DWN_TEID:
1109         case ICE_FLOW_FIELD_IDX_GTPU_EH_TEID:
1110         case ICE_FLOW_FIELD_IDX_GTPU_EH_QFI:
1111                 /* GTP is accessed through UDP OF protocol */
1112                 prot_id = ICE_PROT_UDP_OF;
1113                 break;
1114         case ICE_FLOW_FIELD_IDX_PPPOE_SESS_ID:
1115                 prot_id = ICE_PROT_PPPOE;
1116                 break;
1117         case ICE_FLOW_FIELD_IDX_PFCP_SEID:
1118                 prot_id = ICE_PROT_UDP_IL_OR_S;
1119                 break;
1120         case ICE_FLOW_FIELD_IDX_L2TPV3_SESS_ID:
1121                 prot_id = ICE_PROT_L2TPV3;
1122                 break;
1123         case ICE_FLOW_FIELD_IDX_ESP_SPI:
1124                 prot_id = ICE_PROT_ESP_F;
1125                 break;
1126         case ICE_FLOW_FIELD_IDX_AH_SPI:
1127                 prot_id = ICE_PROT_ESP_2;
1128                 break;
1129         case ICE_FLOW_FIELD_IDX_NAT_T_ESP_SPI:
1130                 prot_id = ICE_PROT_UDP_IL_OR_S;
1131                 break;
1132         case ICE_FLOW_FIELD_IDX_ARP_SIP:
1133         case ICE_FLOW_FIELD_IDX_ARP_DIP:
1134         case ICE_FLOW_FIELD_IDX_ARP_SHA:
1135         case ICE_FLOW_FIELD_IDX_ARP_DHA:
1136         case ICE_FLOW_FIELD_IDX_ARP_OP:
1137                 prot_id = ICE_PROT_ARP_OF;
1138                 break;
1139         case ICE_FLOW_FIELD_IDX_ICMP_TYPE:
1140         case ICE_FLOW_FIELD_IDX_ICMP_CODE:
1141                 /* ICMP type and code share the same extraction seq. entry */
1142                 prot_id = (params->prof->segs[seg].hdrs &
1143                            ICE_FLOW_SEG_HDR_IPV4) ?
1144                         ICE_PROT_ICMP_IL : ICE_PROT_ICMPV6_IL;
1145                 sib = fld == ICE_FLOW_FIELD_IDX_ICMP_TYPE ?
1146                         ICE_FLOW_FIELD_IDX_ICMP_CODE :
1147                         ICE_FLOW_FIELD_IDX_ICMP_TYPE;
1148                 break;
1149         case ICE_FLOW_FIELD_IDX_GRE_KEYID:
1150                 prot_id = ICE_PROT_GRE_OF;
1151                 break;
1152         default:
1153                 return ICE_ERR_NOT_IMPL;
1154         }
1155
1156         /* Each extraction sequence entry is a word in size, and extracts a
1157          * word-aligned offset from a protocol header.
1158          */
1159         ese_bits = ICE_FLOW_FV_EXTRACT_SZ * BITS_PER_BYTE;
1160
1161         flds[fld].xtrct.prot_id = prot_id;
1162         flds[fld].xtrct.off = (ice_flds_info[fld].off / ese_bits) *
1163                 ICE_FLOW_FV_EXTRACT_SZ;
1164         flds[fld].xtrct.disp = (u8)(ice_flds_info[fld].off % ese_bits);
1165         flds[fld].xtrct.idx = params->es_cnt;
1166         flds[fld].xtrct.mask = ice_flds_info[fld].mask;
1167
1168         /* Adjust the next field-entry index after accommodating the number of
1169          * entries this field consumes
1170          */
1171         cnt = DIVIDE_AND_ROUND_UP(flds[fld].xtrct.disp +
1172                                   ice_flds_info[fld].size, ese_bits);
1173
1174         /* Fill in the extraction sequence entries needed for this field */
1175         off = flds[fld].xtrct.off;
1176         mask = flds[fld].xtrct.mask;
1177         for (i = 0; i < cnt; i++) {
1178                 /* Only consume an extraction sequence entry if there is no
1179                  * sibling field associated with this field or the sibling entry
1180                  * already extracts the word shared with this field.
1181                  */
1182                 if (sib == ICE_FLOW_FIELD_IDX_MAX ||
1183                     flds[sib].xtrct.prot_id == ICE_PROT_ID_INVAL ||
1184                     flds[sib].xtrct.off != off) {
1185                         u8 idx;
1186
1187                         /* Make sure the number of extraction sequence required
1188                          * does not exceed the block's capability
1189                          */
1190                         if (params->es_cnt >= fv_words)
1191                                 return ICE_ERR_MAX_LIMIT;
1192
1193                         /* some blocks require a reversed field vector layout */
1194                         if (hw->blk[params->blk].es.reverse)
1195                                 idx = fv_words - params->es_cnt - 1;
1196                         else
1197                                 idx = params->es_cnt;
1198
1199                         params->es[idx].prot_id = prot_id;
1200                         params->es[idx].off = off;
1201                         params->mask[idx] = mask | sib_mask;
1202                         params->es_cnt++;
1203                 }
1204
1205                 off += ICE_FLOW_FV_EXTRACT_SZ;
1206         }
1207
1208         return ICE_SUCCESS;
1209 }
1210
1211 /**
1212  * ice_flow_xtract_raws - Create extract sequence entries for raw bytes
1213  * @hw: pointer to the HW struct
1214  * @params: information about the flow to be processed
1215  * @seg: index of packet segment whose raw fields are to be be extracted
1216  */
1217 static enum ice_status
1218 ice_flow_xtract_raws(struct ice_hw *hw, struct ice_flow_prof_params *params,
1219                      u8 seg)
1220 {
1221         u16 fv_words;
1222         u16 hdrs_sz;
1223         u8 i;
1224
1225         if (!params->prof->segs[seg].raws_cnt)
1226                 return ICE_SUCCESS;
1227
1228         if (params->prof->segs[seg].raws_cnt >
1229             ARRAY_SIZE(params->prof->segs[seg].raws))
1230                 return ICE_ERR_MAX_LIMIT;
1231
1232         /* Offsets within the segment headers are not supported */
1233         hdrs_sz = ice_flow_calc_seg_sz(params, seg);
1234         if (!hdrs_sz)
1235                 return ICE_ERR_PARAM;
1236
1237         fv_words = hw->blk[params->blk].es.fvw;
1238
1239         for (i = 0; i < params->prof->segs[seg].raws_cnt; i++) {
1240                 struct ice_flow_seg_fld_raw *raw;
1241                 u16 off, cnt, j;
1242
1243                 raw = &params->prof->segs[seg].raws[i];
1244
1245                 /* Storing extraction information */
1246                 raw->info.xtrct.prot_id = ICE_PROT_MAC_OF_OR_S;
1247                 raw->info.xtrct.off = (raw->off / ICE_FLOW_FV_EXTRACT_SZ) *
1248                         ICE_FLOW_FV_EXTRACT_SZ;
1249                 raw->info.xtrct.disp = (raw->off % ICE_FLOW_FV_EXTRACT_SZ) *
1250                         BITS_PER_BYTE;
1251                 raw->info.xtrct.idx = params->es_cnt;
1252
1253                 /* Determine the number of field vector entries this raw field
1254                  * consumes.
1255                  */
1256                 cnt = DIVIDE_AND_ROUND_UP(raw->info.xtrct.disp +
1257                                           (raw->info.src.last * BITS_PER_BYTE),
1258                                           (ICE_FLOW_FV_EXTRACT_SZ *
1259                                            BITS_PER_BYTE));
1260                 off = raw->info.xtrct.off;
1261                 for (j = 0; j < cnt; j++) {
1262                         u16 idx;
1263
1264                         /* Make sure the number of extraction sequence required
1265                          * does not exceed the block's capability
1266                          */
1267                         if (params->es_cnt >= hw->blk[params->blk].es.count ||
1268                             params->es_cnt >= ICE_MAX_FV_WORDS)
1269                                 return ICE_ERR_MAX_LIMIT;
1270
1271                         /* some blocks require a reversed field vector layout */
1272                         if (hw->blk[params->blk].es.reverse)
1273                                 idx = fv_words - params->es_cnt - 1;
1274                         else
1275                                 idx = params->es_cnt;
1276
1277                         params->es[idx].prot_id = raw->info.xtrct.prot_id;
1278                         params->es[idx].off = off;
1279                         params->es_cnt++;
1280                         off += ICE_FLOW_FV_EXTRACT_SZ;
1281                 }
1282         }
1283
1284         return ICE_SUCCESS;
1285 }
1286
1287 /**
1288  * ice_flow_create_xtrct_seq - Create an extraction sequence for given segments
1289  * @hw: pointer to the HW struct
1290  * @params: information about the flow to be processed
1291  *
1292  * This function iterates through all matched fields in the given segments, and
1293  * creates an extraction sequence for the fields.
1294  */
1295 static enum ice_status
1296 ice_flow_create_xtrct_seq(struct ice_hw *hw,
1297                           struct ice_flow_prof_params *params)
1298 {
1299         enum ice_status status = ICE_SUCCESS;
1300         u8 i;
1301
1302         /* For ACL, we also need to extract the direction bit (Rx,Tx) data from
1303          * packet flags
1304          */
1305         if (params->blk == ICE_BLK_ACL) {
1306                 status = ice_flow_xtract_pkt_flags(hw, params,
1307                                                    ICE_RX_MDID_PKT_FLAGS_15_0);
1308                 if (status)
1309                         return status;
1310         }
1311
1312         for (i = 0; i < params->prof->segs_cnt; i++) {
1313                 u64 match = params->prof->segs[i].match;
1314                 enum ice_flow_field j;
1315
1316                 for (j = 0; j < ICE_FLOW_FIELD_IDX_MAX && match; j++) {
1317                         const u64 bit = BIT_ULL(j);
1318
1319                         if (match & bit) {
1320                                 status = ice_flow_xtract_fld(hw, params, i, j,
1321                                                              match);
1322                                 if (status)
1323                                         return status;
1324                                 match &= ~bit;
1325                         }
1326                 }
1327
1328                 /* Process raw matching bytes */
1329                 status = ice_flow_xtract_raws(hw, params, i);
1330                 if (status)
1331                         return status;
1332         }
1333
1334         return status;
1335 }
1336
1337 /**
1338  * ice_flow_sel_acl_scen - returns the specific scenario
1339  * @hw: pointer to the hardware structure
1340  * @params: information about the flow to be processed
1341  *
1342  * This function will return the specific scenario based on the
1343  * params passed to it
1344  */
1345 static enum ice_status
1346 ice_flow_sel_acl_scen(struct ice_hw *hw, struct ice_flow_prof_params *params)
1347 {
1348         /* Find the best-fit scenario for the provided match width */
1349         struct ice_acl_scen *cand_scen = NULL, *scen;
1350
1351         if (!hw->acl_tbl)
1352                 return ICE_ERR_DOES_NOT_EXIST;
1353
1354         /* Loop through each scenario and match against the scenario width
1355          * to select the specific scenario
1356          */
1357         LIST_FOR_EACH_ENTRY(scen, &hw->acl_tbl->scens, ice_acl_scen, list_entry)
1358                 if (scen->eff_width >= params->entry_length &&
1359                     (!cand_scen || cand_scen->eff_width > scen->eff_width))
1360                         cand_scen = scen;
1361         if (!cand_scen)
1362                 return ICE_ERR_DOES_NOT_EXIST;
1363
1364         params->prof->cfg.scen = cand_scen;
1365
1366         return ICE_SUCCESS;
1367 }
1368
1369 /**
1370  * ice_flow_acl_def_entry_frmt - Determine the layout of flow entries
1371  * @params: information about the flow to be processed
1372  */
1373 static enum ice_status
1374 ice_flow_acl_def_entry_frmt(struct ice_flow_prof_params *params)
1375 {
1376         u16 index, i, range_idx = 0;
1377
1378         index = ICE_AQC_ACL_PROF_BYTE_SEL_START_IDX;
1379
1380         for (i = 0; i < params->prof->segs_cnt; i++) {
1381                 struct ice_flow_seg_info *seg = &params->prof->segs[i];
1382                 u64 match = seg->match;
1383                 u8 j;
1384
1385                 for (j = 0; j < ICE_FLOW_FIELD_IDX_MAX && match; j++) {
1386                         struct ice_flow_fld_info *fld;
1387                         const u64 bit = BIT_ULL(j);
1388
1389                         if (!(match & bit))
1390                                 continue;
1391
1392                         fld = &seg->fields[j];
1393                         fld->entry.mask = ICE_FLOW_FLD_OFF_INVAL;
1394
1395                         if (fld->type == ICE_FLOW_FLD_TYPE_RANGE) {
1396                                 fld->entry.last = ICE_FLOW_FLD_OFF_INVAL;
1397
1398                                 /* Range checking only supported for single
1399                                  * words
1400                                  */
1401                                 if (DIVIDE_AND_ROUND_UP(ice_flds_info[j].size +
1402                                                         fld->xtrct.disp,
1403                                                         BITS_PER_BYTE * 2) > 1)
1404                                         return ICE_ERR_PARAM;
1405
1406                                 /* Ranges must define low and high values */
1407                                 if (fld->src.val == ICE_FLOW_FLD_OFF_INVAL ||
1408                                     fld->src.last == ICE_FLOW_FLD_OFF_INVAL)
1409                                         return ICE_ERR_PARAM;
1410
1411                                 fld->entry.val = range_idx++;
1412                         } else {
1413                                 /* Store adjusted byte-length of field for later
1414                                  * use, taking into account potential
1415                                  * non-byte-aligned displacement
1416                                  */
1417                                 fld->entry.last = DIVIDE_AND_ROUND_UP
1418                                         (ice_flds_info[j].size +
1419                                          (fld->xtrct.disp % BITS_PER_BYTE),
1420                                          BITS_PER_BYTE);
1421                                 fld->entry.val = index;
1422                                 index += fld->entry.last;
1423                         }
1424
1425                         match &= ~bit;
1426                 }
1427
1428                 for (j = 0; j < seg->raws_cnt; j++) {
1429                         struct ice_flow_seg_fld_raw *raw = &seg->raws[j];
1430
1431                         raw->info.entry.mask = ICE_FLOW_FLD_OFF_INVAL;
1432                         raw->info.entry.val = index;
1433                         raw->info.entry.last = raw->info.src.last;
1434                         index += raw->info.entry.last;
1435                 }
1436         }
1437
1438         /* Currently only support using the byte selection base, which only
1439          * allows for an effective entry size of 30 bytes. Reject anything
1440          * larger.
1441          */
1442         if (index > ICE_AQC_ACL_PROF_BYTE_SEL_ELEMS)
1443                 return ICE_ERR_PARAM;
1444
1445         /* Only 8 range checkers per profile, reject anything trying to use
1446          * more
1447          */
1448         if (range_idx > ICE_AQC_ACL_PROF_RANGES_NUM_CFG)
1449                 return ICE_ERR_PARAM;
1450
1451         /* Store # bytes required for entry for later use */
1452         params->entry_length = index - ICE_AQC_ACL_PROF_BYTE_SEL_START_IDX;
1453
1454         return ICE_SUCCESS;
1455 }
1456
1457 /**
1458  * ice_flow_proc_segs - process all packet segments associated with a profile
1459  * @hw: pointer to the HW struct
1460  * @params: information about the flow to be processed
1461  */
1462 static enum ice_status
1463 ice_flow_proc_segs(struct ice_hw *hw, struct ice_flow_prof_params *params)
1464 {
1465         enum ice_status status;
1466
1467         status = ice_flow_proc_seg_hdrs(params);
1468         if (status)
1469                 return status;
1470
1471         status = ice_flow_create_xtrct_seq(hw, params);
1472         if (status)
1473                 return status;
1474
1475         switch (params->blk) {
1476         case ICE_BLK_FD:
1477         case ICE_BLK_RSS:
1478                 status = ICE_SUCCESS;
1479                 break;
1480         case ICE_BLK_ACL:
1481                 status = ice_flow_acl_def_entry_frmt(params);
1482                 if (status)
1483                         return status;
1484                 status = ice_flow_sel_acl_scen(hw, params);
1485                 if (status)
1486                         return status;
1487                 break;
1488         default:
1489                 return ICE_ERR_NOT_IMPL;
1490         }
1491
1492         return status;
1493 }
1494
1495 #define ICE_FLOW_FIND_PROF_CHK_FLDS     0x00000001
1496 #define ICE_FLOW_FIND_PROF_CHK_VSI      0x00000002
1497 #define ICE_FLOW_FIND_PROF_NOT_CHK_DIR  0x00000004
1498
1499 /**
1500  * ice_flow_find_prof_conds - Find a profile matching headers and conditions
1501  * @hw: pointer to the HW struct
1502  * @blk: classification stage
1503  * @dir: flow direction
1504  * @segs: array of one or more packet segments that describe the flow
1505  * @segs_cnt: number of packet segments provided
1506  * @vsi_handle: software VSI handle to check VSI (ICE_FLOW_FIND_PROF_CHK_VSI)
1507  * @conds: additional conditions to be checked (ICE_FLOW_FIND_PROF_CHK_*)
1508  */
1509 static struct ice_flow_prof *
1510 ice_flow_find_prof_conds(struct ice_hw *hw, enum ice_block blk,
1511                          enum ice_flow_dir dir, struct ice_flow_seg_info *segs,
1512                          u8 segs_cnt, u16 vsi_handle, u32 conds)
1513 {
1514         struct ice_flow_prof *p, *prof = NULL;
1515
1516         ice_acquire_lock(&hw->fl_profs_locks[blk]);
1517         LIST_FOR_EACH_ENTRY(p, &hw->fl_profs[blk], ice_flow_prof, l_entry)
1518                 if ((p->dir == dir || conds & ICE_FLOW_FIND_PROF_NOT_CHK_DIR) &&
1519                     segs_cnt && segs_cnt == p->segs_cnt) {
1520                         u8 i;
1521
1522                         /* Check for profile-VSI association if specified */
1523                         if ((conds & ICE_FLOW_FIND_PROF_CHK_VSI) &&
1524                             ice_is_vsi_valid(hw, vsi_handle) &&
1525                             !ice_is_bit_set(p->vsis, vsi_handle))
1526                                 continue;
1527
1528                         /* Protocol headers must be checked. Matched fields are
1529                          * checked if specified.
1530                          */
1531                         for (i = 0; i < segs_cnt; i++)
1532                                 if (segs[i].hdrs != p->segs[i].hdrs ||
1533                                     ((conds & ICE_FLOW_FIND_PROF_CHK_FLDS) &&
1534                                      segs[i].match != p->segs[i].match))
1535                                         break;
1536
1537                         /* A match is found if all segments are matched */
1538                         if (i == segs_cnt) {
1539                                 prof = p;
1540                                 break;
1541                         }
1542                 }
1543         ice_release_lock(&hw->fl_profs_locks[blk]);
1544
1545         return prof;
1546 }
1547
1548 /**
1549  * ice_flow_find_prof - Look up a profile matching headers and matched fields
1550  * @hw: pointer to the HW struct
1551  * @blk: classification stage
1552  * @dir: flow direction
1553  * @segs: array of one or more packet segments that describe the flow
1554  * @segs_cnt: number of packet segments provided
1555  */
1556 u64
1557 ice_flow_find_prof(struct ice_hw *hw, enum ice_block blk, enum ice_flow_dir dir,
1558                    struct ice_flow_seg_info *segs, u8 segs_cnt)
1559 {
1560         struct ice_flow_prof *p;
1561
1562         p = ice_flow_find_prof_conds(hw, blk, dir, segs, segs_cnt,
1563                                      ICE_MAX_VSI, ICE_FLOW_FIND_PROF_CHK_FLDS);
1564
1565         return p ? p->id : ICE_FLOW_PROF_ID_INVAL;
1566 }
1567
1568 /**
1569  * ice_flow_find_prof_id - Look up a profile with given profile ID
1570  * @hw: pointer to the HW struct
1571  * @blk: classification stage
1572  * @prof_id: unique ID to identify this flow profile
1573  */
1574 static struct ice_flow_prof *
1575 ice_flow_find_prof_id(struct ice_hw *hw, enum ice_block blk, u64 prof_id)
1576 {
1577         struct ice_flow_prof *p;
1578
1579         LIST_FOR_EACH_ENTRY(p, &hw->fl_profs[blk], ice_flow_prof, l_entry)
1580                 if (p->id == prof_id)
1581                         return p;
1582
1583         return NULL;
1584 }
1585
1586 /**
1587  * ice_dealloc_flow_entry - Deallocate flow entry memory
1588  * @hw: pointer to the HW struct
1589  * @entry: flow entry to be removed
1590  */
1591 static void
1592 ice_dealloc_flow_entry(struct ice_hw *hw, struct ice_flow_entry *entry)
1593 {
1594         if (!entry)
1595                 return;
1596
1597         if (entry->entry)
1598                 ice_free(hw, entry->entry);
1599
1600         if (entry->range_buf) {
1601                 ice_free(hw, entry->range_buf);
1602                 entry->range_buf = NULL;
1603         }
1604
1605         if (entry->acts) {
1606                 ice_free(hw, entry->acts);
1607                 entry->acts = NULL;
1608                 entry->acts_cnt = 0;
1609         }
1610
1611         ice_free(hw, entry);
1612 }
1613
1614 #define ICE_ACL_INVALID_SCEN    0x3f
1615
1616 /**
1617  * ice_flow_acl_is_prof_in_use - Verify if the profile is associated to any PF
1618  * @hw: pointer to the hardware structure
1619  * @prof: pointer to flow profile
1620  * @buf: destination buffer function writes partial extraction sequence to
1621  *
1622  * returns ICE_SUCCESS if no PF is associated to the given profile
1623  * returns ICE_ERR_IN_USE if at least one PF is associated to the given profile
1624  * returns other error code for real error
1625  */
1626 static enum ice_status
1627 ice_flow_acl_is_prof_in_use(struct ice_hw *hw, struct ice_flow_prof *prof,
1628                             struct ice_aqc_acl_prof_generic_frmt *buf)
1629 {
1630         enum ice_status status;
1631         u8 prof_id = 0;
1632
1633         status = ice_flow_get_hw_prof(hw, ICE_BLK_ACL, prof->id, &prof_id);
1634         if (status)
1635                 return status;
1636
1637         status = ice_query_acl_prof(hw, prof_id, buf, NULL);
1638         if (status)
1639                 return status;
1640
1641         /* If all PF's associated scenarios are all 0 or all
1642          * ICE_ACL_INVALID_SCEN (63) for the given profile then the latter has
1643          * not been configured yet.
1644          */
1645         if (buf->pf_scenario_num[0] == 0 && buf->pf_scenario_num[1] == 0 &&
1646             buf->pf_scenario_num[2] == 0 && buf->pf_scenario_num[3] == 0 &&
1647             buf->pf_scenario_num[4] == 0 && buf->pf_scenario_num[5] == 0 &&
1648             buf->pf_scenario_num[6] == 0 && buf->pf_scenario_num[7] == 0)
1649                 return ICE_SUCCESS;
1650
1651         if (buf->pf_scenario_num[0] == ICE_ACL_INVALID_SCEN &&
1652             buf->pf_scenario_num[1] == ICE_ACL_INVALID_SCEN &&
1653             buf->pf_scenario_num[2] == ICE_ACL_INVALID_SCEN &&
1654             buf->pf_scenario_num[3] == ICE_ACL_INVALID_SCEN &&
1655             buf->pf_scenario_num[4] == ICE_ACL_INVALID_SCEN &&
1656             buf->pf_scenario_num[5] == ICE_ACL_INVALID_SCEN &&
1657             buf->pf_scenario_num[6] == ICE_ACL_INVALID_SCEN &&
1658             buf->pf_scenario_num[7] == ICE_ACL_INVALID_SCEN)
1659                 return ICE_SUCCESS;
1660         else
1661                 return ICE_ERR_IN_USE;
1662 }
1663
1664 /**
1665  * ice_flow_acl_free_act_cntr - Free the ACL rule's actions
1666  * @hw: pointer to the hardware structure
1667  * @acts: array of actions to be performed on a match
1668  * @acts_cnt: number of actions
1669  */
1670 static enum ice_status
1671 ice_flow_acl_free_act_cntr(struct ice_hw *hw, struct ice_flow_action *acts,
1672                            u8 acts_cnt)
1673 {
1674         int i;
1675
1676         for (i = 0; i < acts_cnt; i++) {
1677                 if (acts[i].type == ICE_FLOW_ACT_CNTR_PKT ||
1678                     acts[i].type == ICE_FLOW_ACT_CNTR_BYTES ||
1679                     acts[i].type == ICE_FLOW_ACT_CNTR_PKT_BYTES) {
1680                         struct ice_acl_cntrs cntrs;
1681                         enum ice_status status;
1682
1683                         cntrs.bank = 0; /* Only bank0 for the moment */
1684                         cntrs.first_cntr =
1685                                         LE16_TO_CPU(acts[i].data.acl_act.value);
1686                         cntrs.last_cntr =
1687                                         LE16_TO_CPU(acts[i].data.acl_act.value);
1688
1689                         if (acts[i].type == ICE_FLOW_ACT_CNTR_PKT_BYTES)
1690                                 cntrs.type = ICE_AQC_ACL_CNT_TYPE_DUAL;
1691                         else
1692                                 cntrs.type = ICE_AQC_ACL_CNT_TYPE_SINGLE;
1693
1694                         status = ice_aq_dealloc_acl_cntrs(hw, &cntrs, NULL);
1695                         if (status)
1696                                 return status;
1697                 }
1698         }
1699         return ICE_SUCCESS;
1700 }
1701
1702 /**
1703  * ice_flow_acl_disassoc_scen - Disassociate the scenario from the profile
1704  * @hw: pointer to the hardware structure
1705  * @prof: pointer to flow profile
1706  *
1707  * Disassociate the scenario from the profile for the PF of the VSI.
1708  */
1709 static enum ice_status
1710 ice_flow_acl_disassoc_scen(struct ice_hw *hw, struct ice_flow_prof *prof)
1711 {
1712         struct ice_aqc_acl_prof_generic_frmt buf;
1713         enum ice_status status = ICE_SUCCESS;
1714         u8 prof_id = 0;
1715
1716         ice_memset(&buf, 0, sizeof(buf), ICE_NONDMA_MEM);
1717
1718         status = ice_flow_get_hw_prof(hw, ICE_BLK_ACL, prof->id, &prof_id);
1719         if (status)
1720                 return status;
1721
1722         status = ice_query_acl_prof(hw, prof_id, &buf, NULL);
1723         if (status)
1724                 return status;
1725
1726         /* Clear scenario for this PF */
1727         buf.pf_scenario_num[hw->pf_id] = ICE_ACL_INVALID_SCEN;
1728         status = ice_prgm_acl_prof_extrt(hw, prof_id, &buf, NULL);
1729
1730         return status;
1731 }
1732
1733 /**
1734  * ice_flow_rem_entry_sync - Remove a flow entry
1735  * @hw: pointer to the HW struct
1736  * @blk: classification stage
1737  * @entry: flow entry to be removed
1738  */
1739 static enum ice_status
1740 ice_flow_rem_entry_sync(struct ice_hw *hw, enum ice_block blk,
1741                         struct ice_flow_entry *entry)
1742 {
1743         if (!entry)
1744                 return ICE_ERR_BAD_PTR;
1745
1746         if (blk == ICE_BLK_ACL) {
1747                 enum ice_status status;
1748
1749                 if (!entry->prof)
1750                         return ICE_ERR_BAD_PTR;
1751
1752                 status = ice_acl_rem_entry(hw, entry->prof->cfg.scen,
1753                                            entry->scen_entry_idx);
1754                 if (status)
1755                         return status;
1756
1757                 /* Checks if we need to release an ACL counter. */
1758                 if (entry->acts_cnt && entry->acts)
1759                         ice_flow_acl_free_act_cntr(hw, entry->acts,
1760                                                    entry->acts_cnt);
1761         }
1762
1763         LIST_DEL(&entry->l_entry);
1764
1765         ice_dealloc_flow_entry(hw, entry);
1766
1767         return ICE_SUCCESS;
1768 }
1769
1770 /**
1771  * ice_flow_add_prof_sync - Add a flow profile for packet segments and fields
1772  * @hw: pointer to the HW struct
1773  * @blk: classification stage
1774  * @dir: flow direction
1775  * @prof_id: unique ID to identify this flow profile
1776  * @segs: array of one or more packet segments that describe the flow
1777  * @segs_cnt: number of packet segments provided
1778  * @acts: array of default actions
1779  * @acts_cnt: number of default actions
1780  * @prof: stores the returned flow profile added
1781  *
1782  * Assumption: the caller has acquired the lock to the profile list
1783  */
1784 static enum ice_status
1785 ice_flow_add_prof_sync(struct ice_hw *hw, enum ice_block blk,
1786                        enum ice_flow_dir dir, u64 prof_id,
1787                        struct ice_flow_seg_info *segs, u8 segs_cnt,
1788                        struct ice_flow_action *acts, u8 acts_cnt,
1789                        struct ice_flow_prof **prof)
1790 {
1791         struct ice_flow_prof_params params;
1792         enum ice_status status;
1793         u8 i;
1794
1795         if (!prof || (acts_cnt && !acts))
1796                 return ICE_ERR_BAD_PTR;
1797
1798         ice_memset(&params, 0, sizeof(params), ICE_NONDMA_MEM);
1799         params.prof = (struct ice_flow_prof *)
1800                 ice_malloc(hw, sizeof(*params.prof));
1801         if (!params.prof)
1802                 return ICE_ERR_NO_MEMORY;
1803
1804         /* initialize extraction sequence to all invalid (0xff) */
1805         for (i = 0; i < ICE_MAX_FV_WORDS; i++) {
1806                 params.es[i].prot_id = ICE_PROT_INVALID;
1807                 params.es[i].off = ICE_FV_OFFSET_INVAL;
1808         }
1809
1810         params.blk = blk;
1811         params.prof->id = prof_id;
1812         params.prof->dir = dir;
1813         params.prof->segs_cnt = segs_cnt;
1814
1815         /* Make a copy of the segments that need to be persistent in the flow
1816          * profile instance
1817          */
1818         for (i = 0; i < segs_cnt; i++)
1819                 ice_memcpy(&params.prof->segs[i], &segs[i], sizeof(*segs),
1820                            ICE_NONDMA_TO_NONDMA);
1821
1822         /* Make a copy of the actions that need to be persistent in the flow
1823          * profile instance.
1824          */
1825         if (acts_cnt) {
1826                 params.prof->acts = (struct ice_flow_action *)
1827                         ice_memdup(hw, acts, acts_cnt * sizeof(*acts),
1828                                    ICE_NONDMA_TO_NONDMA);
1829
1830                 if (!params.prof->acts) {
1831                         status = ICE_ERR_NO_MEMORY;
1832                         goto out;
1833                 }
1834         }
1835
1836         status = ice_flow_proc_segs(hw, &params);
1837         if (status) {
1838                 ice_debug(hw, ICE_DBG_FLOW,
1839                           "Error processing a flow's packet segments\n");
1840                 goto out;
1841         }
1842
1843         /* Add a HW profile for this flow profile */
1844         status = ice_add_prof(hw, blk, prof_id, (u8 *)params.ptypes,
1845                               params.attr, params.attr_cnt, params.es,
1846                               params.mask);
1847         if (status) {
1848                 ice_debug(hw, ICE_DBG_FLOW, "Error adding a HW flow profile\n");
1849                 goto out;
1850         }
1851
1852         INIT_LIST_HEAD(&params.prof->entries);
1853         ice_init_lock(&params.prof->entries_lock);
1854         *prof = params.prof;
1855
1856 out:
1857         if (status) {
1858                 if (params.prof->acts)
1859                         ice_free(hw, params.prof->acts);
1860                 ice_free(hw, params.prof);
1861         }
1862
1863         return status;
1864 }
1865
1866 /**
1867  * ice_flow_rem_prof_sync - remove a flow profile
1868  * @hw: pointer to the hardware structure
1869  * @blk: classification stage
1870  * @prof: pointer to flow profile to remove
1871  *
1872  * Assumption: the caller has acquired the lock to the profile list
1873  */
1874 static enum ice_status
1875 ice_flow_rem_prof_sync(struct ice_hw *hw, enum ice_block blk,
1876                        struct ice_flow_prof *prof)
1877 {
1878         enum ice_status status;
1879
1880         /* Remove all remaining flow entries before removing the flow profile */
1881         if (!LIST_EMPTY(&prof->entries)) {
1882                 struct ice_flow_entry *e, *t;
1883
1884                 ice_acquire_lock(&prof->entries_lock);
1885
1886                 LIST_FOR_EACH_ENTRY_SAFE(e, t, &prof->entries, ice_flow_entry,
1887                                          l_entry) {
1888                         status = ice_flow_rem_entry_sync(hw, blk, e);
1889                         if (status)
1890                                 break;
1891                 }
1892
1893                 ice_release_lock(&prof->entries_lock);
1894         }
1895
1896         if (blk == ICE_BLK_ACL) {
1897                 struct ice_aqc_acl_profile_ranges query_rng_buf;
1898                 struct ice_aqc_acl_prof_generic_frmt buf;
1899                 u8 prof_id = 0;
1900
1901                 /* Disassociate the scenario from the profile for the PF */
1902                 status = ice_flow_acl_disassoc_scen(hw, prof);
1903                 if (status)
1904                         return status;
1905
1906                 /* Clear the range-checker if the profile ID is no longer
1907                  * used by any PF
1908                  */
1909                 status = ice_flow_acl_is_prof_in_use(hw, prof, &buf);
1910                 if (status && status != ICE_ERR_IN_USE) {
1911                         return status;
1912                 } else if (!status) {
1913                         /* Clear the range-checker value for profile ID */
1914                         ice_memset(&query_rng_buf, 0,
1915                                    sizeof(struct ice_aqc_acl_profile_ranges),
1916                                    ICE_NONDMA_MEM);
1917
1918                         status = ice_flow_get_hw_prof(hw, blk, prof->id,
1919                                                       &prof_id);
1920                         if (status)
1921                                 return status;
1922
1923                         status = ice_prog_acl_prof_ranges(hw, prof_id,
1924                                                           &query_rng_buf, NULL);
1925                         if (status)
1926                                 return status;
1927                 }
1928         }
1929
1930         /* Remove all hardware profiles associated with this flow profile */
1931         status = ice_rem_prof(hw, blk, prof->id);
1932         if (!status) {
1933                 LIST_DEL(&prof->l_entry);
1934                 ice_destroy_lock(&prof->entries_lock);
1935                 if (prof->acts)
1936                         ice_free(hw, prof->acts);
1937                 ice_free(hw, prof);
1938         }
1939
1940         return status;
1941 }
1942
1943 /**
1944  * ice_flow_acl_set_xtrct_seq_fld - Populate xtrct seq for single field
1945  * @buf: Destination buffer function writes partial xtrct sequence to
1946  * @info: Info about field
1947  */
1948 static void
1949 ice_flow_acl_set_xtrct_seq_fld(struct ice_aqc_acl_prof_generic_frmt *buf,
1950                                struct ice_flow_fld_info *info)
1951 {
1952         u16 dst, i;
1953         u8 src;
1954
1955         src = info->xtrct.idx * ICE_FLOW_FV_EXTRACT_SZ +
1956                 info->xtrct.disp / BITS_PER_BYTE;
1957         dst = info->entry.val;
1958         for (i = 0; i < info->entry.last; i++)
1959                 /* HW stores field vector words in LE, convert words back to BE
1960                  * so constructed entries will end up in network order
1961                  */
1962                 buf->byte_selection[dst++] = src++ ^ 1;
1963 }
1964
1965 /**
1966  * ice_flow_acl_set_xtrct_seq - Program ACL extraction sequence
1967  * @hw: pointer to the hardware structure
1968  * @prof: pointer to flow profile
1969  */
1970 static enum ice_status
1971 ice_flow_acl_set_xtrct_seq(struct ice_hw *hw, struct ice_flow_prof *prof)
1972 {
1973         struct ice_aqc_acl_prof_generic_frmt buf;
1974         struct ice_flow_fld_info *info;
1975         enum ice_status status;
1976         u8 prof_id = 0;
1977         u16 i;
1978
1979         ice_memset(&buf, 0, sizeof(buf), ICE_NONDMA_MEM);
1980
1981         status = ice_flow_get_hw_prof(hw, ICE_BLK_ACL, prof->id, &prof_id);
1982         if (status)
1983                 return status;
1984
1985         status = ice_flow_acl_is_prof_in_use(hw, prof, &buf);
1986         if (status && status != ICE_ERR_IN_USE)
1987                 return status;
1988
1989         if (!status) {
1990                 /* Program the profile dependent configuration. This is done
1991                  * only once regardless of the number of PFs using that profile
1992                  */
1993                 ice_memset(&buf, 0, sizeof(buf), ICE_NONDMA_MEM);
1994
1995                 for (i = 0; i < prof->segs_cnt; i++) {
1996                         struct ice_flow_seg_info *seg = &prof->segs[i];
1997                         u64 match = seg->match;
1998                         u16 j;
1999
2000                         for (j = 0; j < ICE_FLOW_FIELD_IDX_MAX && match; j++) {
2001                                 const u64 bit = BIT_ULL(j);
2002
2003                                 if (!(match & bit))
2004                                         continue;
2005
2006                                 info = &seg->fields[j];
2007
2008                                 if (info->type == ICE_FLOW_FLD_TYPE_RANGE)
2009                                         buf.word_selection[info->entry.val] =
2010                                                                 info->xtrct.idx;
2011                                 else
2012                                         ice_flow_acl_set_xtrct_seq_fld(&buf,
2013                                                                        info);
2014
2015                                 match &= ~bit;
2016                         }
2017
2018                         for (j = 0; j < seg->raws_cnt; j++) {
2019                                 info = &seg->raws[j].info;
2020                                 ice_flow_acl_set_xtrct_seq_fld(&buf, info);
2021                         }
2022                 }
2023
2024                 ice_memset(&buf.pf_scenario_num[0], ICE_ACL_INVALID_SCEN,
2025                            ICE_AQC_ACL_PROF_PF_SCEN_NUM_ELEMS,
2026                            ICE_NONDMA_MEM);
2027         }
2028
2029         /* Update the current PF */
2030         buf.pf_scenario_num[hw->pf_id] = (u8)prof->cfg.scen->id;
2031         status = ice_prgm_acl_prof_extrt(hw, prof_id, &buf, NULL);
2032
2033         return status;
2034 }
2035
2036 /**
2037  * ice_flow_assoc_vsig_vsi - associate a VSI with VSIG
2038  * @hw: pointer to the hardware structure
2039  * @blk: classification stage
2040  * @vsi_handle: software VSI handle
2041  * @vsig: target VSI group
2042  *
2043  * Assumption: the caller has already verified that the VSI to
2044  * be added has the same characteristics as the VSIG and will
2045  * thereby have access to all resources added to that VSIG.
2046  */
2047 enum ice_status
2048 ice_flow_assoc_vsig_vsi(struct ice_hw *hw, enum ice_block blk, u16 vsi_handle,
2049                         u16 vsig)
2050 {
2051         enum ice_status status;
2052
2053         if (!ice_is_vsi_valid(hw, vsi_handle) || blk >= ICE_BLK_COUNT)
2054                 return ICE_ERR_PARAM;
2055
2056         ice_acquire_lock(&hw->fl_profs_locks[blk]);
2057         status = ice_add_vsi_flow(hw, blk, ice_get_hw_vsi_num(hw, vsi_handle),
2058                                   vsig);
2059         ice_release_lock(&hw->fl_profs_locks[blk]);
2060
2061         return status;
2062 }
2063
2064 /**
2065  * ice_flow_assoc_prof - associate a VSI with a flow profile
2066  * @hw: pointer to the hardware structure
2067  * @blk: classification stage
2068  * @prof: pointer to flow profile
2069  * @vsi_handle: software VSI handle
2070  *
2071  * Assumption: the caller has acquired the lock to the profile list
2072  * and the software VSI handle has been validated
2073  */
2074 static enum ice_status
2075 ice_flow_assoc_prof(struct ice_hw *hw, enum ice_block blk,
2076                     struct ice_flow_prof *prof, u16 vsi_handle)
2077 {
2078         enum ice_status status = ICE_SUCCESS;
2079
2080         if (!ice_is_bit_set(prof->vsis, vsi_handle)) {
2081                 if (blk == ICE_BLK_ACL) {
2082                         status = ice_flow_acl_set_xtrct_seq(hw, prof);
2083                         if (status)
2084                                 return status;
2085                 }
2086                 status = ice_add_prof_id_flow(hw, blk,
2087                                               ice_get_hw_vsi_num(hw,
2088                                                                  vsi_handle),
2089                                               prof->id);
2090                 if (!status)
2091                         ice_set_bit(vsi_handle, prof->vsis);
2092                 else
2093                         ice_debug(hw, ICE_DBG_FLOW,
2094                                   "HW profile add failed, %d\n",
2095                                   status);
2096         }
2097
2098         return status;
2099 }
2100
2101 /**
2102  * ice_flow_disassoc_prof - disassociate a VSI from a flow profile
2103  * @hw: pointer to the hardware structure
2104  * @blk: classification stage
2105  * @prof: pointer to flow profile
2106  * @vsi_handle: software VSI handle
2107  *
2108  * Assumption: the caller has acquired the lock to the profile list
2109  * and the software VSI handle has been validated
2110  */
2111 static enum ice_status
2112 ice_flow_disassoc_prof(struct ice_hw *hw, enum ice_block blk,
2113                        struct ice_flow_prof *prof, u16 vsi_handle)
2114 {
2115         enum ice_status status = ICE_SUCCESS;
2116
2117         if (ice_is_bit_set(prof->vsis, vsi_handle)) {
2118                 status = ice_rem_prof_id_flow(hw, blk,
2119                                               ice_get_hw_vsi_num(hw,
2120                                                                  vsi_handle),
2121                                               prof->id);
2122                 if (!status)
2123                         ice_clear_bit(vsi_handle, prof->vsis);
2124                 else
2125                         ice_debug(hw, ICE_DBG_FLOW,
2126                                   "HW profile remove failed, %d\n",
2127                                   status);
2128         }
2129
2130         return status;
2131 }
2132
2133 /**
2134  * ice_flow_add_prof - Add a flow profile for packet segments and matched fields
2135  * @hw: pointer to the HW struct
2136  * @blk: classification stage
2137  * @dir: flow direction
2138  * @prof_id: unique ID to identify this flow profile
2139  * @segs: array of one or more packet segments that describe the flow
2140  * @segs_cnt: number of packet segments provided
2141  * @acts: array of default actions
2142  * @acts_cnt: number of default actions
2143  * @prof: stores the returned flow profile added
2144  */
2145 enum ice_status
2146 ice_flow_add_prof(struct ice_hw *hw, enum ice_block blk, enum ice_flow_dir dir,
2147                   u64 prof_id, struct ice_flow_seg_info *segs, u8 segs_cnt,
2148                   struct ice_flow_action *acts, u8 acts_cnt,
2149                   struct ice_flow_prof **prof)
2150 {
2151         enum ice_status status;
2152
2153         if (segs_cnt > ICE_FLOW_SEG_MAX)
2154                 return ICE_ERR_MAX_LIMIT;
2155
2156         if (!segs_cnt)
2157                 return ICE_ERR_PARAM;
2158
2159         if (!segs)
2160                 return ICE_ERR_BAD_PTR;
2161
2162         status = ice_flow_val_hdrs(segs, segs_cnt);
2163         if (status)
2164                 return status;
2165
2166         ice_acquire_lock(&hw->fl_profs_locks[blk]);
2167
2168         status = ice_flow_add_prof_sync(hw, blk, dir, prof_id, segs, segs_cnt,
2169                                         acts, acts_cnt, prof);
2170         if (!status)
2171                 LIST_ADD(&(*prof)->l_entry, &hw->fl_profs[blk]);
2172
2173         ice_release_lock(&hw->fl_profs_locks[blk]);
2174
2175         return status;
2176 }
2177
2178 /**
2179  * ice_flow_rem_prof - Remove a flow profile and all entries associated with it
2180  * @hw: pointer to the HW struct
2181  * @blk: the block for which the flow profile is to be removed
2182  * @prof_id: unique ID of the flow profile to be removed
2183  */
2184 enum ice_status
2185 ice_flow_rem_prof(struct ice_hw *hw, enum ice_block blk, u64 prof_id)
2186 {
2187         struct ice_flow_prof *prof;
2188         enum ice_status status;
2189
2190         ice_acquire_lock(&hw->fl_profs_locks[blk]);
2191
2192         prof = ice_flow_find_prof_id(hw, blk, prof_id);
2193         if (!prof) {
2194                 status = ICE_ERR_DOES_NOT_EXIST;
2195                 goto out;
2196         }
2197
2198         /* prof becomes invalid after the call */
2199         status = ice_flow_rem_prof_sync(hw, blk, prof);
2200
2201 out:
2202         ice_release_lock(&hw->fl_profs_locks[blk]);
2203
2204         return status;
2205 }
2206
2207 /**
2208  * ice_flow_get_hw_prof - return the HW profile for a specific profile ID handle
2209  * @hw: pointer to the HW struct
2210  * @blk: classification stage
2211  * @prof_id: the profile ID handle
2212  * @hw_prof_id: pointer to variable to receive the HW profile ID
2213  */
2214 enum ice_status
2215 ice_flow_get_hw_prof(struct ice_hw *hw, enum ice_block blk, u64 prof_id,
2216                      u8 *hw_prof_id)
2217 {
2218         enum ice_status status = ICE_ERR_DOES_NOT_EXIST;
2219         struct ice_prof_map *map;
2220
2221         ice_acquire_lock(&hw->blk[blk].es.prof_map_lock);
2222         map = ice_search_prof_id(hw, blk, prof_id);
2223         if (map) {
2224                 *hw_prof_id = map->prof_id;
2225                 status = ICE_SUCCESS;
2226         }
2227         ice_release_lock(&hw->blk[blk].es.prof_map_lock);
2228         return status;
2229 }
2230
2231 /**
2232  * ice_flow_find_entry - look for a flow entry using its unique ID
2233  * @hw: pointer to the HW struct
2234  * @blk: classification stage
2235  * @entry_id: unique ID to identify this flow entry
2236  *
2237  * This function looks for the flow entry with the specified unique ID in all
2238  * flow profiles of the specified classification stage. If the entry is found,
2239  * and it returns the handle to the flow entry. Otherwise, it returns
2240  * ICE_FLOW_ENTRY_ID_INVAL.
2241  */
2242 u64 ice_flow_find_entry(struct ice_hw *hw, enum ice_block blk, u64 entry_id)
2243 {
2244         struct ice_flow_entry *found = NULL;
2245         struct ice_flow_prof *p;
2246
2247         ice_acquire_lock(&hw->fl_profs_locks[blk]);
2248
2249         LIST_FOR_EACH_ENTRY(p, &hw->fl_profs[blk], ice_flow_prof, l_entry) {
2250                 struct ice_flow_entry *e;
2251
2252                 ice_acquire_lock(&p->entries_lock);
2253                 LIST_FOR_EACH_ENTRY(e, &p->entries, ice_flow_entry, l_entry)
2254                         if (e->id == entry_id) {
2255                                 found = e;
2256                                 break;
2257                         }
2258                 ice_release_lock(&p->entries_lock);
2259
2260                 if (found)
2261                         break;
2262         }
2263
2264         ice_release_lock(&hw->fl_profs_locks[blk]);
2265
2266         return found ? ICE_FLOW_ENTRY_HNDL(found) : ICE_FLOW_ENTRY_HANDLE_INVAL;
2267 }
2268
2269 /**
2270  * ice_flow_acl_check_actions - Checks the ACL rule's actions
2271  * @hw: pointer to the hardware structure
2272  * @acts: array of actions to be performed on a match
2273  * @acts_cnt: number of actions
2274  * @cnt_alloc: indicates if an ACL counter has been allocated.
2275  */
2276 static enum ice_status
2277 ice_flow_acl_check_actions(struct ice_hw *hw, struct ice_flow_action *acts,
2278                            u8 acts_cnt, bool *cnt_alloc)
2279 {
2280         ice_declare_bitmap(dup_check, ICE_AQC_TBL_MAX_ACTION_PAIRS * 2);
2281         int i;
2282
2283         ice_zero_bitmap(dup_check, ICE_AQC_TBL_MAX_ACTION_PAIRS * 2);
2284         *cnt_alloc = false;
2285
2286         if (acts_cnt > ICE_FLOW_ACL_MAX_NUM_ACT)
2287                 return ICE_ERR_OUT_OF_RANGE;
2288
2289         for (i = 0; i < acts_cnt; i++) {
2290                 if (acts[i].type != ICE_FLOW_ACT_NOP &&
2291                     acts[i].type != ICE_FLOW_ACT_DROP &&
2292                     acts[i].type != ICE_FLOW_ACT_CNTR_PKT &&
2293                     acts[i].type != ICE_FLOW_ACT_FWD_QUEUE)
2294                         return ICE_ERR_CFG;
2295
2296                 /* If the caller want to add two actions of the same type, then
2297                  * it is considered invalid configuration.
2298                  */
2299                 if (ice_test_and_set_bit(acts[i].type, dup_check))
2300                         return ICE_ERR_PARAM;
2301         }
2302
2303         /* Checks if ACL counters are needed. */
2304         for (i = 0; i < acts_cnt; i++) {
2305                 if (acts[i].type == ICE_FLOW_ACT_CNTR_PKT ||
2306                     acts[i].type == ICE_FLOW_ACT_CNTR_BYTES ||
2307                     acts[i].type == ICE_FLOW_ACT_CNTR_PKT_BYTES) {
2308                         struct ice_acl_cntrs cntrs;
2309                         enum ice_status status;
2310
2311                         cntrs.amount = 1;
2312                         cntrs.bank = 0; /* Only bank0 for the moment */
2313
2314                         if (acts[i].type == ICE_FLOW_ACT_CNTR_PKT_BYTES)
2315                                 cntrs.type = ICE_AQC_ACL_CNT_TYPE_DUAL;
2316                         else
2317                                 cntrs.type = ICE_AQC_ACL_CNT_TYPE_SINGLE;
2318
2319                         status = ice_aq_alloc_acl_cntrs(hw, &cntrs, NULL);
2320                         if (status)
2321                                 return status;
2322                         /* Counter index within the bank */
2323                         acts[i].data.acl_act.value =
2324                                                 CPU_TO_LE16(cntrs.first_cntr);
2325                         *cnt_alloc = true;
2326                 }
2327         }
2328
2329         return ICE_SUCCESS;
2330 }
2331
2332 /**
2333  * ice_flow_acl_frmt_entry_range - Format an ACL range checker for a given field
2334  * @fld: number of the given field
2335  * @info: info about field
2336  * @range_buf: range checker configuration buffer
2337  * @data: pointer to a data buffer containing flow entry's match values/masks
2338  * @range: Input/output param indicating which range checkers are being used
2339  */
2340 static void
2341 ice_flow_acl_frmt_entry_range(u16 fld, struct ice_flow_fld_info *info,
2342                               struct ice_aqc_acl_profile_ranges *range_buf,
2343                               u8 *data, u8 *range)
2344 {
2345         u16 new_mask;
2346
2347         /* If not specified, default mask is all bits in field */
2348         new_mask = (info->src.mask == ICE_FLOW_FLD_OFF_INVAL ?
2349                     BIT(ice_flds_info[fld].size) - 1 :
2350                     (*(u16 *)(data + info->src.mask))) << info->xtrct.disp;
2351
2352         /* If the mask is 0, then we don't need to worry about this input
2353          * range checker value.
2354          */
2355         if (new_mask) {
2356                 u16 new_high =
2357                         (*(u16 *)(data + info->src.last)) << info->xtrct.disp;
2358                 u16 new_low =
2359                         (*(u16 *)(data + info->src.val)) << info->xtrct.disp;
2360                 u8 range_idx = info->entry.val;
2361
2362                 range_buf->checker_cfg[range_idx].low_boundary =
2363                         CPU_TO_BE16(new_low);
2364                 range_buf->checker_cfg[range_idx].high_boundary =
2365                         CPU_TO_BE16(new_high);
2366                 range_buf->checker_cfg[range_idx].mask = CPU_TO_BE16(new_mask);
2367
2368                 /* Indicate which range checker is being used */
2369                 *range |= BIT(range_idx);
2370         }
2371 }
2372
2373 /**
2374  * ice_flow_acl_frmt_entry_fld - Partially format ACL entry for a given field
2375  * @fld: number of the given field
2376  * @info: info about the field
2377  * @buf: buffer containing the entry
2378  * @dontcare: buffer containing don't care mask for entry
2379  * @data: pointer to a data buffer containing flow entry's match values/masks
2380  */
2381 static void
2382 ice_flow_acl_frmt_entry_fld(u16 fld, struct ice_flow_fld_info *info, u8 *buf,
2383                             u8 *dontcare, u8 *data)
2384 {
2385         u16 dst, src, mask, k, end_disp, tmp_s = 0, tmp_m = 0;
2386         bool use_mask = false;
2387         u8 disp;
2388
2389         src = info->src.val;
2390         mask = info->src.mask;
2391         dst = info->entry.val - ICE_AQC_ACL_PROF_BYTE_SEL_START_IDX;
2392         disp = info->xtrct.disp % BITS_PER_BYTE;
2393
2394         if (mask != ICE_FLOW_FLD_OFF_INVAL)
2395                 use_mask = true;
2396
2397         for (k = 0; k < info->entry.last; k++, dst++) {
2398                 /* Add overflow bits from previous byte */
2399                 buf[dst] = (tmp_s & 0xff00) >> 8;
2400
2401                 /* If mask is not valid, tmp_m is always zero, so just setting
2402                  * dontcare to 0 (no masked bits). If mask is valid, pulls in
2403                  * overflow bits of mask from prev byte
2404                  */
2405                 dontcare[dst] = (tmp_m & 0xff00) >> 8;
2406
2407                 /* If there is displacement, last byte will only contain
2408                  * displaced data, but there is no more data to read from user
2409                  * buffer, so skip so as not to potentially read beyond end of
2410                  * user buffer
2411                  */
2412                 if (!disp || k < info->entry.last - 1) {
2413                         /* Store shifted data to use in next byte */
2414                         tmp_s = data[src++] << disp;
2415
2416                         /* Add current (shifted) byte */
2417                         buf[dst] |= tmp_s & 0xff;
2418
2419                         /* Handle mask if valid */
2420                         if (use_mask) {
2421                                 tmp_m = (~data[mask++] & 0xff) << disp;
2422                                 dontcare[dst] |= tmp_m & 0xff;
2423                         }
2424                 }
2425         }
2426
2427         /* Fill in don't care bits at beginning of field */
2428         if (disp) {
2429                 dst = info->entry.val - ICE_AQC_ACL_PROF_BYTE_SEL_START_IDX;
2430                 for (k = 0; k < disp; k++)
2431                         dontcare[dst] |= BIT(k);
2432         }
2433
2434         end_disp = (disp + ice_flds_info[fld].size) % BITS_PER_BYTE;
2435
2436         /* Fill in don't care bits at end of field */
2437         if (end_disp) {
2438                 dst = info->entry.val - ICE_AQC_ACL_PROF_BYTE_SEL_START_IDX +
2439                       info->entry.last - 1;
2440                 for (k = end_disp; k < BITS_PER_BYTE; k++)
2441                         dontcare[dst] |= BIT(k);
2442         }
2443 }
2444
2445 /**
2446  * ice_flow_acl_frmt_entry - Format ACL entry
2447  * @hw: pointer to the hardware structure
2448  * @prof: pointer to flow profile
2449  * @e: pointer to the flow entry
2450  * @data: pointer to a data buffer containing flow entry's match values/masks
2451  * @acts: array of actions to be performed on a match
2452  * @acts_cnt: number of actions
2453  *
2454  * Formats the key (and key_inverse) to be matched from the data passed in,
2455  * along with data from the flow profile. This key/key_inverse pair makes up
2456  * the 'entry' for an ACL flow entry.
2457  */
2458 static enum ice_status
2459 ice_flow_acl_frmt_entry(struct ice_hw *hw, struct ice_flow_prof *prof,
2460                         struct ice_flow_entry *e, u8 *data,
2461                         struct ice_flow_action *acts, u8 acts_cnt)
2462 {
2463         u8 *buf = NULL, *dontcare = NULL, *key = NULL, range = 0, dir_flag_msk;
2464         struct ice_aqc_acl_profile_ranges *range_buf = NULL;
2465         enum ice_status status;
2466         bool cnt_alloc;
2467         u8 prof_id = 0;
2468         u16 i, buf_sz;
2469
2470         status = ice_flow_get_hw_prof(hw, ICE_BLK_ACL, prof->id, &prof_id);
2471         if (status)
2472                 return status;
2473
2474         /* Format the result action */
2475
2476         status = ice_flow_acl_check_actions(hw, acts, acts_cnt, &cnt_alloc);
2477         if (status)
2478                 return status;
2479
2480         status = ICE_ERR_NO_MEMORY;
2481
2482         e->acts = (struct ice_flow_action *)
2483                 ice_memdup(hw, acts, acts_cnt * sizeof(*acts),
2484                            ICE_NONDMA_TO_NONDMA);
2485
2486         if (!e->acts)
2487                 goto out;
2488
2489         e->acts_cnt = acts_cnt;
2490
2491         /* Format the matching data */
2492         buf_sz = prof->cfg.scen->width;
2493         buf = (u8 *)ice_malloc(hw, buf_sz);
2494         if (!buf)
2495                 goto out;
2496
2497         dontcare = (u8 *)ice_malloc(hw, buf_sz);
2498         if (!dontcare)
2499                 goto out;
2500
2501         /* 'key' buffer will store both key and key_inverse, so must be twice
2502          * size of buf
2503          */
2504         key = (u8 *)ice_malloc(hw, buf_sz * 2);
2505         if (!key)
2506                 goto out;
2507
2508         range_buf = (struct ice_aqc_acl_profile_ranges *)
2509                 ice_malloc(hw, sizeof(struct ice_aqc_acl_profile_ranges));
2510         if (!range_buf)
2511                 goto out;
2512
2513         /* Set don't care mask to all 1's to start, will zero out used bytes */
2514         ice_memset(dontcare, 0xff, buf_sz, ICE_NONDMA_MEM);
2515
2516         for (i = 0; i < prof->segs_cnt; i++) {
2517                 struct ice_flow_seg_info *seg = &prof->segs[i];
2518                 u64 match = seg->match;
2519                 u16 j;
2520
2521                 for (j = 0; j < ICE_FLOW_FIELD_IDX_MAX && match; j++) {
2522                         struct ice_flow_fld_info *info;
2523                         const u64 bit = BIT_ULL(j);
2524
2525                         if (!(match & bit))
2526                                 continue;
2527
2528                         info = &seg->fields[j];
2529
2530                         if (info->type == ICE_FLOW_FLD_TYPE_RANGE)
2531                                 ice_flow_acl_frmt_entry_range(j, info,
2532                                                               range_buf, data,
2533                                                               &range);
2534                         else
2535                                 ice_flow_acl_frmt_entry_fld(j, info, buf,
2536                                                             dontcare, data);
2537
2538                         match &= ~bit;
2539                 }
2540
2541                 for (j = 0; j < seg->raws_cnt; j++) {
2542                         struct ice_flow_fld_info *info = &seg->raws[j].info;
2543                         u16 dst, src, mask, k;
2544                         bool use_mask = false;
2545
2546                         src = info->src.val;
2547                         dst = info->entry.val -
2548                                         ICE_AQC_ACL_PROF_BYTE_SEL_START_IDX;
2549                         mask = info->src.mask;
2550
2551                         if (mask != ICE_FLOW_FLD_OFF_INVAL)
2552                                 use_mask = true;
2553
2554                         for (k = 0; k < info->entry.last; k++, dst++) {
2555                                 buf[dst] = data[src++];
2556                                 if (use_mask)
2557                                         dontcare[dst] = ~data[mask++];
2558                                 else
2559                                         dontcare[dst] = 0;
2560                         }
2561                 }
2562         }
2563
2564         buf[prof->cfg.scen->pid_idx] = (u8)prof_id;
2565         dontcare[prof->cfg.scen->pid_idx] = 0;
2566
2567         /* Format the buffer for direction flags */
2568         dir_flag_msk = BIT(ICE_FLG_PKT_DIR);
2569
2570         if (prof->dir == ICE_FLOW_RX)
2571                 buf[prof->cfg.scen->pkt_dir_idx] = dir_flag_msk;
2572
2573         if (range) {
2574                 buf[prof->cfg.scen->rng_chk_idx] = range;
2575                 /* Mark any unused range checkers as don't care */
2576                 dontcare[prof->cfg.scen->rng_chk_idx] = ~range;
2577                 e->range_buf = range_buf;
2578         } else {
2579                 ice_free(hw, range_buf);
2580         }
2581
2582         status = ice_set_key(key, buf_sz * 2, buf, NULL, dontcare, NULL, 0,
2583                              buf_sz);
2584         if (status)
2585                 goto out;
2586
2587         e->entry = key;
2588         e->entry_sz = buf_sz * 2;
2589
2590 out:
2591         if (buf)
2592                 ice_free(hw, buf);
2593
2594         if (dontcare)
2595                 ice_free(hw, dontcare);
2596
2597         if (status && key)
2598                 ice_free(hw, key);
2599
2600         if (status && range_buf) {
2601                 ice_free(hw, range_buf);
2602                 e->range_buf = NULL;
2603         }
2604
2605         if (status && e->acts) {
2606                 ice_free(hw, e->acts);
2607                 e->acts = NULL;
2608                 e->acts_cnt = 0;
2609         }
2610
2611         if (status && cnt_alloc)
2612                 ice_flow_acl_free_act_cntr(hw, acts, acts_cnt);
2613
2614         return status;
2615 }
2616
2617 /**
2618  * ice_flow_acl_find_scen_entry_cond - Find an ACL scenario entry that matches
2619  *                                     the compared data.
2620  * @prof: pointer to flow profile
2621  * @e: pointer to the comparing flow entry
2622  * @do_chg_action: decide if we want to change the ACL action
2623  * @do_add_entry: decide if we want to add the new ACL entry
2624  * @do_rem_entry: decide if we want to remove the current ACL entry
2625  *
2626  * Find an ACL scenario entry that matches the compared data. In the same time,
2627  * this function also figure out:
2628  * a/ If we want to change the ACL action
2629  * b/ If we want to add the new ACL entry
2630  * c/ If we want to remove the current ACL entry
2631  */
2632 static struct ice_flow_entry *
2633 ice_flow_acl_find_scen_entry_cond(struct ice_flow_prof *prof,
2634                                   struct ice_flow_entry *e, bool *do_chg_action,
2635                                   bool *do_add_entry, bool *do_rem_entry)
2636 {
2637         struct ice_flow_entry *p, *return_entry = NULL;
2638         u8 i, j;
2639
2640         /* Check if:
2641          * a/ There exists an entry with same matching data, but different
2642          *    priority, then we remove this existing ACL entry. Then, we
2643          *    will add the new entry to the ACL scenario.
2644          * b/ There exists an entry with same matching data, priority, and
2645          *    result action, then we do nothing
2646          * c/ There exists an entry with same matching data, priority, but
2647          *    different, action, then do only change the action's entry.
2648          * d/ Else, we add this new entry to the ACL scenario.
2649          */
2650         *do_chg_action = false;
2651         *do_add_entry = true;
2652         *do_rem_entry = false;
2653         LIST_FOR_EACH_ENTRY(p, &prof->entries, ice_flow_entry, l_entry) {
2654                 if (memcmp(p->entry, e->entry, p->entry_sz))
2655                         continue;
2656
2657                 /* From this point, we have the same matching_data. */
2658                 *do_add_entry = false;
2659                 return_entry = p;
2660
2661                 if (p->priority != e->priority) {
2662                         /* matching data && !priority */
2663                         *do_add_entry = true;
2664                         *do_rem_entry = true;
2665                         break;
2666                 }
2667
2668                 /* From this point, we will have matching_data && priority */
2669                 if (p->acts_cnt != e->acts_cnt)
2670                         *do_chg_action = true;
2671                 for (i = 0; i < p->acts_cnt; i++) {
2672                         bool found_not_match = false;
2673
2674                         for (j = 0; j < e->acts_cnt; j++)
2675                                 if (memcmp(&p->acts[i], &e->acts[j],
2676                                            sizeof(struct ice_flow_action))) {
2677                                         found_not_match = true;
2678                                         break;
2679                                 }
2680
2681                         if (found_not_match) {
2682                                 *do_chg_action = true;
2683                                 break;
2684                         }
2685                 }
2686
2687                 /* (do_chg_action = true) means :
2688                  *    matching_data && priority && !result_action
2689                  * (do_chg_action = false) means :
2690                  *    matching_data && priority && result_action
2691                  */
2692                 break;
2693         }
2694
2695         return return_entry;
2696 }
2697
2698 /**
2699  * ice_flow_acl_convert_to_acl_prior - Convert to ACL priority
2700  * @p: flow priority
2701  */
2702 static enum ice_acl_entry_prior
2703 ice_flow_acl_convert_to_acl_prior(enum ice_flow_priority p)
2704 {
2705         enum ice_acl_entry_prior acl_prior;
2706
2707         switch (p) {
2708         case ICE_FLOW_PRIO_LOW:
2709                 acl_prior = ICE_LOW;
2710                 break;
2711         case ICE_FLOW_PRIO_NORMAL:
2712                 acl_prior = ICE_NORMAL;
2713                 break;
2714         case ICE_FLOW_PRIO_HIGH:
2715                 acl_prior = ICE_HIGH;
2716                 break;
2717         default:
2718                 acl_prior = ICE_NORMAL;
2719                 break;
2720         }
2721
2722         return acl_prior;
2723 }
2724
2725 /**
2726  * ice_flow_acl_union_rng_chk - Perform union operation between two
2727  *                              range-range checker buffers
2728  * @dst_buf: pointer to destination range checker buffer
2729  * @src_buf: pointer to source range checker buffer
2730  *
2731  * For this function, we do the union between dst_buf and src_buf
2732  * range checker buffer, and we will save the result back to dst_buf
2733  */
2734 static enum ice_status
2735 ice_flow_acl_union_rng_chk(struct ice_aqc_acl_profile_ranges *dst_buf,
2736                            struct ice_aqc_acl_profile_ranges *src_buf)
2737 {
2738         u8 i, j;
2739
2740         if (!dst_buf || !src_buf)
2741                 return ICE_ERR_BAD_PTR;
2742
2743         for (i = 0; i < ICE_AQC_ACL_PROF_RANGES_NUM_CFG; i++) {
2744                 struct ice_acl_rng_data *cfg_data = NULL, *in_data;
2745                 bool will_populate = false;
2746
2747                 in_data = &src_buf->checker_cfg[i];
2748
2749                 if (!in_data->mask)
2750                         break;
2751
2752                 for (j = 0; j < ICE_AQC_ACL_PROF_RANGES_NUM_CFG; j++) {
2753                         cfg_data = &dst_buf->checker_cfg[j];
2754
2755                         if (!cfg_data->mask ||
2756                             !memcmp(cfg_data, in_data,
2757                                     sizeof(struct ice_acl_rng_data))) {
2758                                 will_populate = true;
2759                                 break;
2760                         }
2761                 }
2762
2763                 if (will_populate) {
2764                         ice_memcpy(cfg_data, in_data,
2765                                    sizeof(struct ice_acl_rng_data),
2766                                    ICE_NONDMA_TO_NONDMA);
2767                 } else {
2768                         /* No available slot left to program range checker */
2769                         return ICE_ERR_MAX_LIMIT;
2770                 }
2771         }
2772
2773         return ICE_SUCCESS;
2774 }
2775
2776 /**
2777  * ice_flow_acl_add_scen_entry_sync - Add entry to ACL scenario sync
2778  * @hw: pointer to the hardware structure
2779  * @prof: pointer to flow profile
2780  * @entry: double pointer to the flow entry
2781  *
2782  * For this function, we will look at the current added entries in the
2783  * corresponding ACL scenario. Then, we will perform matching logic to
2784  * see if we want to add/modify/do nothing with this new entry.
2785  */
2786 static enum ice_status
2787 ice_flow_acl_add_scen_entry_sync(struct ice_hw *hw, struct ice_flow_prof *prof,
2788                                  struct ice_flow_entry **entry)
2789 {
2790         bool do_add_entry, do_rem_entry, do_chg_action, do_chg_rng_chk;
2791         struct ice_aqc_acl_profile_ranges query_rng_buf, cfg_rng_buf;
2792         struct ice_acl_act_entry *acts = NULL;
2793         struct ice_flow_entry *exist;
2794         enum ice_status status = ICE_SUCCESS;
2795         struct ice_flow_entry *e;
2796         u8 i;
2797
2798         if (!entry || !(*entry) || !prof)
2799                 return ICE_ERR_BAD_PTR;
2800
2801         e = *(entry);
2802
2803         do_chg_rng_chk = false;
2804         if (e->range_buf) {
2805                 u8 prof_id = 0;
2806
2807                 status = ice_flow_get_hw_prof(hw, ICE_BLK_ACL, prof->id,
2808                                               &prof_id);
2809                 if (status)
2810                         return status;
2811
2812                 /* Query the current range-checker value in FW */
2813                 status = ice_query_acl_prof_ranges(hw, prof_id, &query_rng_buf,
2814                                                    NULL);
2815                 if (status)
2816                         return status;
2817                 ice_memcpy(&cfg_rng_buf, &query_rng_buf,
2818                            sizeof(struct ice_aqc_acl_profile_ranges),
2819                            ICE_NONDMA_TO_NONDMA);
2820
2821                 /* Generate the new range-checker value */
2822                 status = ice_flow_acl_union_rng_chk(&cfg_rng_buf, e->range_buf);
2823                 if (status)
2824                         return status;
2825
2826                 /* Reconfigure the range check if the buffer is changed. */
2827                 do_chg_rng_chk = false;
2828                 if (memcmp(&query_rng_buf, &cfg_rng_buf,
2829                            sizeof(struct ice_aqc_acl_profile_ranges))) {
2830                         status = ice_prog_acl_prof_ranges(hw, prof_id,
2831                                                           &cfg_rng_buf, NULL);
2832                         if (status)
2833                                 return status;
2834
2835                         do_chg_rng_chk = true;
2836                 }
2837         }
2838
2839         /* Figure out if we want to (change the ACL action) and/or
2840          * (Add the new ACL entry) and/or (Remove the current ACL entry)
2841          */
2842         exist = ice_flow_acl_find_scen_entry_cond(prof, e, &do_chg_action,
2843                                                   &do_add_entry, &do_rem_entry);
2844
2845         if (do_rem_entry) {
2846                 status = ice_flow_rem_entry_sync(hw, ICE_BLK_ACL, exist);
2847                 if (status)
2848                         return status;
2849         }
2850
2851         /* Prepare the result action buffer */
2852         acts = (struct ice_acl_act_entry *)ice_calloc
2853                 (hw, e->entry_sz, sizeof(struct ice_acl_act_entry));
2854         for (i = 0; i < e->acts_cnt; i++)
2855                 ice_memcpy(&acts[i], &e->acts[i].data.acl_act,
2856                            sizeof(struct ice_acl_act_entry),
2857                            ICE_NONDMA_TO_NONDMA);
2858
2859         if (do_add_entry) {
2860                 enum ice_acl_entry_prior prior;
2861                 u8 *keys, *inverts;
2862                 u16 entry_idx;
2863
2864                 keys = (u8 *)e->entry;
2865                 inverts = keys + (e->entry_sz / 2);
2866                 prior = ice_flow_acl_convert_to_acl_prior(e->priority);
2867
2868                 status = ice_acl_add_entry(hw, prof->cfg.scen, prior, keys,
2869                                            inverts, acts, e->acts_cnt,
2870                                            &entry_idx);
2871                 if (status)
2872                         goto out;
2873
2874                 e->scen_entry_idx = entry_idx;
2875                 LIST_ADD(&e->l_entry, &prof->entries);
2876         } else {
2877                 if (do_chg_action) {
2878                         /* For the action memory info, update the SW's copy of
2879                          * exist entry with e's action memory info
2880                          */
2881                         ice_free(hw, exist->acts);
2882                         exist->acts_cnt = e->acts_cnt;
2883                         exist->acts = (struct ice_flow_action *)
2884                                 ice_calloc(hw, exist->acts_cnt,
2885                                            sizeof(struct ice_flow_action));
2886
2887                         if (!exist->acts) {
2888                                 status = ICE_ERR_NO_MEMORY;
2889                                 goto out;
2890                         }
2891
2892                         ice_memcpy(exist->acts, e->acts,
2893                                    sizeof(struct ice_flow_action) * e->acts_cnt,
2894                                    ICE_NONDMA_TO_NONDMA);
2895
2896                         status = ice_acl_prog_act(hw, prof->cfg.scen, acts,
2897                                                   e->acts_cnt,
2898                                                   exist->scen_entry_idx);
2899                         if (status)
2900                                 goto out;
2901                 }
2902
2903                 if (do_chg_rng_chk) {
2904                         /* In this case, we want to update the range checker
2905                          * information of the exist entry
2906                          */
2907                         status = ice_flow_acl_union_rng_chk(exist->range_buf,
2908                                                             e->range_buf);
2909                         if (status)
2910                                 goto out;
2911                 }
2912
2913                 /* As we don't add the new entry to our SW DB, deallocate its
2914                  * memories, and return the exist entry to the caller
2915                  */
2916                 ice_dealloc_flow_entry(hw, e);
2917                 *(entry) = exist;
2918         }
2919 out:
2920         if (acts)
2921                 ice_free(hw, acts);
2922
2923         return status;
2924 }
2925
2926 /**
2927  * ice_flow_acl_add_scen_entry - Add entry to ACL scenario
2928  * @hw: pointer to the hardware structure
2929  * @prof: pointer to flow profile
2930  * @e: double pointer to the flow entry
2931  */
2932 static enum ice_status
2933 ice_flow_acl_add_scen_entry(struct ice_hw *hw, struct ice_flow_prof *prof,
2934                             struct ice_flow_entry **e)
2935 {
2936         enum ice_status status;
2937
2938         ice_acquire_lock(&prof->entries_lock);
2939         status = ice_flow_acl_add_scen_entry_sync(hw, prof, e);
2940         ice_release_lock(&prof->entries_lock);
2941
2942         return status;
2943 }
2944
2945 /**
2946  * ice_flow_add_entry - Add a flow entry
2947  * @hw: pointer to the HW struct
2948  * @blk: classification stage
2949  * @prof_id: ID of the profile to add a new flow entry to
2950  * @entry_id: unique ID to identify this flow entry
2951  * @vsi_handle: software VSI handle for the flow entry
2952  * @prio: priority of the flow entry
2953  * @data: pointer to a data buffer containing flow entry's match values/masks
2954  * @acts: arrays of actions to be performed on a match
2955  * @acts_cnt: number of actions
2956  * @entry_h: pointer to buffer that receives the new flow entry's handle
2957  */
2958 enum ice_status
2959 ice_flow_add_entry(struct ice_hw *hw, enum ice_block blk, u64 prof_id,
2960                    u64 entry_id, u16 vsi_handle, enum ice_flow_priority prio,
2961                    void *data, struct ice_flow_action *acts, u8 acts_cnt,
2962                    u64 *entry_h)
2963 {
2964         struct ice_flow_entry *e = NULL;
2965         struct ice_flow_prof *prof;
2966         enum ice_status status = ICE_SUCCESS;
2967
2968         /* ACL entries must indicate an action */
2969         if (blk == ICE_BLK_ACL && (!acts || !acts_cnt))
2970                 return ICE_ERR_PARAM;
2971
2972         /* No flow entry data is expected for RSS */
2973         if (!entry_h || (!data && blk != ICE_BLK_RSS))
2974                 return ICE_ERR_BAD_PTR;
2975
2976         if (!ice_is_vsi_valid(hw, vsi_handle))
2977                 return ICE_ERR_PARAM;
2978
2979         ice_acquire_lock(&hw->fl_profs_locks[blk]);
2980
2981         prof = ice_flow_find_prof_id(hw, blk, prof_id);
2982         if (!prof) {
2983                 status = ICE_ERR_DOES_NOT_EXIST;
2984         } else {
2985                 /* Allocate memory for the entry being added and associate
2986                  * the VSI to the found flow profile
2987                  */
2988                 e = (struct ice_flow_entry *)ice_malloc(hw, sizeof(*e));
2989                 if (!e)
2990                         status = ICE_ERR_NO_MEMORY;
2991                 else
2992                         status = ice_flow_assoc_prof(hw, blk, prof, vsi_handle);
2993         }
2994
2995         ice_release_lock(&hw->fl_profs_locks[blk]);
2996         if (status)
2997                 goto out;
2998
2999         e->id = entry_id;
3000         e->vsi_handle = vsi_handle;
3001         e->prof = prof;
3002         e->priority = prio;
3003
3004         switch (blk) {
3005         case ICE_BLK_FD:
3006         case ICE_BLK_RSS:
3007                 break;
3008         case ICE_BLK_ACL:
3009                 /* ACL will handle the entry management */
3010                 status = ice_flow_acl_frmt_entry(hw, prof, e, (u8 *)data, acts,
3011                                                  acts_cnt);
3012                 if (status)
3013                         goto out;
3014
3015                 status = ice_flow_acl_add_scen_entry(hw, prof, &e);
3016                 if (status)
3017                         goto out;
3018
3019                 break;
3020         default:
3021                 status = ICE_ERR_NOT_IMPL;
3022                 goto out;
3023         }
3024
3025         if (blk != ICE_BLK_ACL) {
3026                 /* ACL will handle the entry management */
3027                 ice_acquire_lock(&prof->entries_lock);
3028                 LIST_ADD(&e->l_entry, &prof->entries);
3029                 ice_release_lock(&prof->entries_lock);
3030         }
3031
3032         *entry_h = ICE_FLOW_ENTRY_HNDL(e);
3033
3034 out:
3035         if (status && e) {
3036                 if (e->entry)
3037                         ice_free(hw, e->entry);
3038                 ice_free(hw, e);
3039         }
3040
3041         return status;
3042 }
3043
3044 /**
3045  * ice_flow_rem_entry - Remove a flow entry
3046  * @hw: pointer to the HW struct
3047  * @blk: classification stage
3048  * @entry_h: handle to the flow entry to be removed
3049  */
3050 enum ice_status ice_flow_rem_entry(struct ice_hw *hw, enum ice_block blk,
3051                                    u64 entry_h)
3052 {
3053         struct ice_flow_entry *entry;
3054         struct ice_flow_prof *prof;
3055         enum ice_status status = ICE_SUCCESS;
3056
3057         if (entry_h == ICE_FLOW_ENTRY_HANDLE_INVAL)
3058                 return ICE_ERR_PARAM;
3059
3060         entry = ICE_FLOW_ENTRY_PTR((unsigned long)entry_h);
3061
3062         /* Retain the pointer to the flow profile as the entry will be freed */
3063         prof = entry->prof;
3064
3065         if (prof) {
3066                 ice_acquire_lock(&prof->entries_lock);
3067                 status = ice_flow_rem_entry_sync(hw, blk, entry);
3068                 ice_release_lock(&prof->entries_lock);
3069         }
3070
3071         return status;
3072 }
3073
3074 /**
3075  * ice_flow_set_fld_ext - specifies locations of field from entry's input buffer
3076  * @seg: packet segment the field being set belongs to
3077  * @fld: field to be set
3078  * @field_type: type of the field
3079  * @val_loc: if not ICE_FLOW_FLD_OFF_INVAL, location of the value to match from
3080  *           entry's input buffer
3081  * @mask_loc: if not ICE_FLOW_FLD_OFF_INVAL, location of mask value from entry's
3082  *            input buffer
3083  * @last_loc: if not ICE_FLOW_FLD_OFF_INVAL, location of last/upper value from
3084  *            entry's input buffer
3085  *
3086  * This helper function stores information of a field being matched, including
3087  * the type of the field and the locations of the value to match, the mask, and
3088  * and the upper-bound value in the start of the input buffer for a flow entry.
3089  * This function should only be used for fixed-size data structures.
3090  *
3091  * This function also opportunistically determines the protocol headers to be
3092  * present based on the fields being set. Some fields cannot be used alone to
3093  * determine the protocol headers present. Sometimes, fields for particular
3094  * protocol headers are not matched. In those cases, the protocol headers
3095  * must be explicitly set.
3096  */
3097 static void
3098 ice_flow_set_fld_ext(struct ice_flow_seg_info *seg, enum ice_flow_field fld,
3099                      enum ice_flow_fld_match_type field_type, u16 val_loc,
3100                      u16 mask_loc, u16 last_loc)
3101 {
3102         u64 bit = BIT_ULL(fld);
3103
3104         seg->match |= bit;
3105         if (field_type == ICE_FLOW_FLD_TYPE_RANGE)
3106                 seg->range |= bit;
3107
3108         seg->fields[fld].type = field_type;
3109         seg->fields[fld].src.val = val_loc;
3110         seg->fields[fld].src.mask = mask_loc;
3111         seg->fields[fld].src.last = last_loc;
3112
3113         ICE_FLOW_SET_HDRS(seg, ice_flds_info[fld].hdr);
3114 }
3115
3116 /**
3117  * ice_flow_set_fld - specifies locations of field from entry's input buffer
3118  * @seg: packet segment the field being set belongs to
3119  * @fld: field to be set
3120  * @val_loc: if not ICE_FLOW_FLD_OFF_INVAL, location of the value to match from
3121  *           entry's input buffer
3122  * @mask_loc: if not ICE_FLOW_FLD_OFF_INVAL, location of mask value from entry's
3123  *            input buffer
3124  * @last_loc: if not ICE_FLOW_FLD_OFF_INVAL, location of last/upper value from
3125  *            entry's input buffer
3126  * @range: indicate if field being matched is to be in a range
3127  *
3128  * This function specifies the locations, in the form of byte offsets from the
3129  * start of the input buffer for a flow entry, from where the value to match,
3130  * the mask value, and upper value can be extracted. These locations are then
3131  * stored in the flow profile. When adding a flow entry associated with the
3132  * flow profile, these locations will be used to quickly extract the values and
3133  * create the content of a match entry. This function should only be used for
3134  * fixed-size data structures.
3135  */
3136 void
3137 ice_flow_set_fld(struct ice_flow_seg_info *seg, enum ice_flow_field fld,
3138                  u16 val_loc, u16 mask_loc, u16 last_loc, bool range)
3139 {
3140         enum ice_flow_fld_match_type t = range ?
3141                 ICE_FLOW_FLD_TYPE_RANGE : ICE_FLOW_FLD_TYPE_REG;
3142
3143         ice_flow_set_fld_ext(seg, fld, t, val_loc, mask_loc, last_loc);
3144 }
3145
3146 /**
3147  * ice_flow_set_fld_prefix - sets locations of prefix field from entry's buf
3148  * @seg: packet segment the field being set belongs to
3149  * @fld: field to be set
3150  * @val_loc: if not ICE_FLOW_FLD_OFF_INVAL, location of the value to match from
3151  *           entry's input buffer
3152  * @pref_loc: location of prefix value from entry's input buffer
3153  * @pref_sz: size of the location holding the prefix value
3154  *
3155  * This function specifies the locations, in the form of byte offsets from the
3156  * start of the input buffer for a flow entry, from where the value to match
3157  * and the IPv4 prefix value can be extracted. These locations are then stored
3158  * in the flow profile. When adding flow entries to the associated flow profile,
3159  * these locations can be used to quickly extract the values to create the
3160  * content of a match entry. This function should only be used for fixed-size
3161  * data structures.
3162  */
3163 void
3164 ice_flow_set_fld_prefix(struct ice_flow_seg_info *seg, enum ice_flow_field fld,
3165                         u16 val_loc, u16 pref_loc, u8 pref_sz)
3166 {
3167         /* For this type of field, the "mask" location is for the prefix value's
3168          * location and the "last" location is for the size of the location of
3169          * the prefix value.
3170          */
3171         ice_flow_set_fld_ext(seg, fld, ICE_FLOW_FLD_TYPE_PREFIX, val_loc,
3172                              pref_loc, (u16)pref_sz);
3173 }
3174
3175 /**
3176  * ice_flow_add_fld_raw - sets locations of a raw field from entry's input buf
3177  * @seg: packet segment the field being set belongs to
3178  * @off: offset of the raw field from the beginning of the segment in bytes
3179  * @len: length of the raw pattern to be matched
3180  * @val_loc: location of the value to match from entry's input buffer
3181  * @mask_loc: location of mask value from entry's input buffer
3182  *
3183  * This function specifies the offset of the raw field to be match from the
3184  * beginning of the specified packet segment, and the locations, in the form of
3185  * byte offsets from the start of the input buffer for a flow entry, from where
3186  * the value to match and the mask value to be extracted. These locations are
3187  * then stored in the flow profile. When adding flow entries to the associated
3188  * flow profile, these locations can be used to quickly extract the values to
3189  * create the content of a match entry. This function should only be used for
3190  * fixed-size data structures.
3191  */
3192 void
3193 ice_flow_add_fld_raw(struct ice_flow_seg_info *seg, u16 off, u8 len,
3194                      u16 val_loc, u16 mask_loc)
3195 {
3196         if (seg->raws_cnt < ICE_FLOW_SEG_RAW_FLD_MAX) {
3197                 seg->raws[seg->raws_cnt].off = off;
3198                 seg->raws[seg->raws_cnt].info.type = ICE_FLOW_FLD_TYPE_SIZE;
3199                 seg->raws[seg->raws_cnt].info.src.val = val_loc;
3200                 seg->raws[seg->raws_cnt].info.src.mask = mask_loc;
3201                 /* The "last" field is used to store the length of the field */
3202                 seg->raws[seg->raws_cnt].info.src.last = len;
3203         }
3204
3205         /* Overflows of "raws" will be handled as an error condition later in
3206          * the flow when this information is processed.
3207          */
3208         seg->raws_cnt++;
3209 }
3210
3211 #define ICE_FLOW_RSS_SEG_HDR_L2_MASKS \
3212 (ICE_FLOW_SEG_HDR_ETH | ICE_FLOW_SEG_HDR_VLAN)
3213
3214 #define ICE_FLOW_RSS_SEG_HDR_L3_MASKS \
3215         (ICE_FLOW_SEG_HDR_IPV4 | ICE_FLOW_SEG_HDR_IPV6)
3216
3217 #define ICE_FLOW_RSS_SEG_HDR_L4_MASKS \
3218         (ICE_FLOW_SEG_HDR_TCP | ICE_FLOW_SEG_HDR_UDP | \
3219          ICE_FLOW_SEG_HDR_SCTP)
3220
3221 #define ICE_FLOW_RSS_SEG_HDR_VAL_MASKS \
3222         (ICE_FLOW_RSS_SEG_HDR_L2_MASKS | \
3223          ICE_FLOW_RSS_SEG_HDR_L3_MASKS | \
3224          ICE_FLOW_RSS_SEG_HDR_L4_MASKS)
3225
3226 /**
3227  * ice_flow_set_rss_seg_info - setup packet segments for RSS
3228  * @segs: pointer to the flow field segment(s)
3229  * @hash_fields: fields to be hashed on for the segment(s)
3230  * @flow_hdr: protocol header fields within a packet segment
3231  *
3232  * Helper function to extract fields from hash bitmap and use flow
3233  * header value to set flow field segment for further use in flow
3234  * profile entry or removal.
3235  */
3236 static enum ice_status
3237 ice_flow_set_rss_seg_info(struct ice_flow_seg_info *segs, u64 hash_fields,
3238                           u32 flow_hdr)
3239 {
3240         u64 val = hash_fields;
3241         u8 i;
3242
3243         for (i = 0; val && i < ICE_FLOW_FIELD_IDX_MAX; i++) {
3244                 u64 bit = BIT_ULL(i);
3245
3246                 if (val & bit) {
3247                         ice_flow_set_fld(segs, (enum ice_flow_field)i,
3248                                          ICE_FLOW_FLD_OFF_INVAL,
3249                                          ICE_FLOW_FLD_OFF_INVAL,
3250                                          ICE_FLOW_FLD_OFF_INVAL, false);
3251                         val &= ~bit;
3252                 }
3253         }
3254         ICE_FLOW_SET_HDRS(segs, flow_hdr);
3255
3256         if (segs->hdrs & ~ICE_FLOW_RSS_SEG_HDR_VAL_MASKS &
3257             ~ICE_FLOW_RSS_HDRS_INNER_MASK & ~ICE_FLOW_SEG_HDR_IPV_OTHER)
3258                 return ICE_ERR_PARAM;
3259
3260         val = (u64)(segs->hdrs & ICE_FLOW_RSS_SEG_HDR_L3_MASKS);
3261         if (val && !ice_is_pow2(val))
3262                 return ICE_ERR_CFG;
3263
3264         val = (u64)(segs->hdrs & ICE_FLOW_RSS_SEG_HDR_L4_MASKS);
3265         if (val && !ice_is_pow2(val))
3266                 return ICE_ERR_CFG;
3267
3268         return ICE_SUCCESS;
3269 }
3270
3271 /**
3272  * ice_rem_vsi_rss_list - remove VSI from RSS list
3273  * @hw: pointer to the hardware structure
3274  * @vsi_handle: software VSI handle
3275  *
3276  * Remove the VSI from all RSS configurations in the list.
3277  */
3278 void ice_rem_vsi_rss_list(struct ice_hw *hw, u16 vsi_handle)
3279 {
3280         struct ice_rss_cfg *r, *tmp;
3281
3282         if (LIST_EMPTY(&hw->rss_list_head))
3283                 return;
3284
3285         ice_acquire_lock(&hw->rss_locks);
3286         LIST_FOR_EACH_ENTRY_SAFE(r, tmp, &hw->rss_list_head,
3287                                  ice_rss_cfg, l_entry)
3288                 if (ice_test_and_clear_bit(vsi_handle, r->vsis))
3289                         if (!ice_is_any_bit_set(r->vsis, ICE_MAX_VSI)) {
3290                                 LIST_DEL(&r->l_entry);
3291                                 ice_free(hw, r);
3292                         }
3293         ice_release_lock(&hw->rss_locks);
3294 }
3295
3296 /**
3297  * ice_rem_vsi_rss_cfg - remove RSS configurations associated with VSI
3298  * @hw: pointer to the hardware structure
3299  * @vsi_handle: software VSI handle
3300  *
3301  * This function will iterate through all flow profiles and disassociate
3302  * the VSI from that profile. If the flow profile has no VSIs it will
3303  * be removed.
3304  */
3305 enum ice_status ice_rem_vsi_rss_cfg(struct ice_hw *hw, u16 vsi_handle)
3306 {
3307         const enum ice_block blk = ICE_BLK_RSS;
3308         struct ice_flow_prof *p, *t;
3309         enum ice_status status = ICE_SUCCESS;
3310
3311         if (!ice_is_vsi_valid(hw, vsi_handle))
3312                 return ICE_ERR_PARAM;
3313
3314         if (LIST_EMPTY(&hw->fl_profs[blk]))
3315                 return ICE_SUCCESS;
3316
3317         ice_acquire_lock(&hw->rss_locks);
3318         LIST_FOR_EACH_ENTRY_SAFE(p, t, &hw->fl_profs[blk], ice_flow_prof,
3319                                  l_entry)
3320                 if (ice_is_bit_set(p->vsis, vsi_handle)) {
3321                         status = ice_flow_disassoc_prof(hw, blk, p, vsi_handle);
3322                         if (status)
3323                                 break;
3324
3325                         if (!ice_is_any_bit_set(p->vsis, ICE_MAX_VSI)) {
3326                                 status = ice_flow_rem_prof(hw, blk, p->id);
3327                                 if (status)
3328                                         break;
3329                         }
3330                 }
3331         ice_release_lock(&hw->rss_locks);
3332
3333         return status;
3334 }
3335
3336 /**
3337  * ice_rem_rss_list - remove RSS configuration from list
3338  * @hw: pointer to the hardware structure
3339  * @vsi_handle: software VSI handle
3340  * @prof: pointer to flow profile
3341  *
3342  * Assumption: lock has already been acquired for RSS list
3343  */
3344 static void
3345 ice_rem_rss_list(struct ice_hw *hw, u16 vsi_handle, struct ice_flow_prof *prof)
3346 {
3347         struct ice_rss_cfg *r, *tmp;
3348
3349         /* Search for RSS hash fields associated to the VSI that match the
3350          * hash configurations associated to the flow profile. If found
3351          * remove from the RSS entry list of the VSI context and delete entry.
3352          */
3353         LIST_FOR_EACH_ENTRY_SAFE(r, tmp, &hw->rss_list_head,
3354                                  ice_rss_cfg, l_entry)
3355                 if (r->hashed_flds == prof->segs[prof->segs_cnt - 1].match &&
3356                     r->packet_hdr == prof->segs[prof->segs_cnt - 1].hdrs) {
3357                         ice_clear_bit(vsi_handle, r->vsis);
3358                         if (!ice_is_any_bit_set(r->vsis, ICE_MAX_VSI)) {
3359                                 LIST_DEL(&r->l_entry);
3360                                 ice_free(hw, r);
3361                         }
3362                         return;
3363                 }
3364 }
3365
3366 /**
3367  * ice_add_rss_list - add RSS configuration to list
3368  * @hw: pointer to the hardware structure
3369  * @vsi_handle: software VSI handle
3370  * @prof: pointer to flow profile
3371  *
3372  * Assumption: lock has already been acquired for RSS list
3373  */
3374 static enum ice_status
3375 ice_add_rss_list(struct ice_hw *hw, u16 vsi_handle, struct ice_flow_prof *prof)
3376 {
3377         struct ice_rss_cfg *r, *rss_cfg;
3378
3379         LIST_FOR_EACH_ENTRY(r, &hw->rss_list_head,
3380                             ice_rss_cfg, l_entry)
3381                 if (r->hashed_flds == prof->segs[prof->segs_cnt - 1].match &&
3382                     r->packet_hdr == prof->segs[prof->segs_cnt - 1].hdrs) {
3383                         ice_set_bit(vsi_handle, r->vsis);
3384                         return ICE_SUCCESS;
3385                 }
3386
3387         rss_cfg = (struct ice_rss_cfg *)ice_malloc(hw, sizeof(*rss_cfg));
3388         if (!rss_cfg)
3389                 return ICE_ERR_NO_MEMORY;
3390
3391         rss_cfg->hashed_flds = prof->segs[prof->segs_cnt - 1].match;
3392         rss_cfg->packet_hdr = prof->segs[prof->segs_cnt - 1].hdrs;
3393         rss_cfg->symm = prof->cfg.symm;
3394         ice_set_bit(vsi_handle, rss_cfg->vsis);
3395
3396         LIST_ADD_TAIL(&rss_cfg->l_entry, &hw->rss_list_head);
3397
3398         return ICE_SUCCESS;
3399 }
3400
3401 #define ICE_FLOW_PROF_HASH_S    0
3402 #define ICE_FLOW_PROF_HASH_M    (0xFFFFFFFFULL << ICE_FLOW_PROF_HASH_S)
3403 #define ICE_FLOW_PROF_HDR_S     32
3404 #define ICE_FLOW_PROF_HDR_M     (0x3FFFFFFFULL << ICE_FLOW_PROF_HDR_S)
3405 #define ICE_FLOW_PROF_ENCAP_S   63
3406 #define ICE_FLOW_PROF_ENCAP_M   (BIT_ULL(ICE_FLOW_PROF_ENCAP_S))
3407
3408 #define ICE_RSS_OUTER_HEADERS   1
3409 #define ICE_RSS_INNER_HEADERS   2
3410
3411 /* Flow profile ID format:
3412  * [0:31] - Packet match fields
3413  * [32:62] - Protocol header
3414  * [63] - Encapsulation flag, 0 if non-tunneled, 1 if tunneled
3415  */
3416 #define ICE_FLOW_GEN_PROFID(hash, hdr, segs_cnt) \
3417         (u64)(((u64)(hash) & ICE_FLOW_PROF_HASH_M) | \
3418               (((u64)(hdr) << ICE_FLOW_PROF_HDR_S) & ICE_FLOW_PROF_HDR_M) | \
3419               ((u8)((segs_cnt) - 1) ? ICE_FLOW_PROF_ENCAP_M : 0))
3420
3421 static void
3422 ice_rss_config_xor_word(struct ice_hw *hw, u8 prof_id, u8 src, u8 dst)
3423 {
3424         u32 s = ((src % 4) << 3); /* byte shift */
3425         u32 v = dst | 0x80; /* value to program */
3426         u8 i = src / 4; /* register index */
3427         u32 reg;
3428
3429         reg = rd32(hw, GLQF_HSYMM(prof_id, i));
3430         reg = (reg & ~(0xff << s)) | (v << s);
3431         wr32(hw, GLQF_HSYMM(prof_id, i), reg);
3432 }
3433
3434 static void
3435 ice_rss_config_xor(struct ice_hw *hw, u8 prof_id, u8 src, u8 dst, u8 len)
3436 {
3437         int fv_last_word =
3438                 ICE_FLOW_SW_FIELD_VECTOR_MAX / ICE_FLOW_FV_EXTRACT_SZ - 1;
3439         int i;
3440
3441         for (i = 0; i < len; i++) {
3442                 ice_rss_config_xor_word(hw, prof_id,
3443                                         /* Yes, field vector in GLQF_HSYMM and
3444                                          * GLQF_HINSET is inversed!
3445                                          */
3446                                         fv_last_word - (src + i),
3447                                         fv_last_word - (dst + i));
3448                 ice_rss_config_xor_word(hw, prof_id,
3449                                         fv_last_word - (dst + i),
3450                                         fv_last_word - (src + i));
3451         }
3452 }
3453
3454 static void
3455 ice_rss_update_symm(struct ice_hw *hw,
3456                     struct ice_flow_prof *prof)
3457 {
3458         struct ice_prof_map *map;
3459         u8 prof_id, m;
3460
3461         ice_acquire_lock(&hw->blk[ICE_BLK_RSS].es.prof_map_lock);
3462         map = ice_search_prof_id(hw, ICE_BLK_RSS, prof->id);
3463         if (map)
3464                 prof_id = map->prof_id;
3465         ice_release_lock(&hw->blk[ICE_BLK_RSS].es.prof_map_lock);
3466         if (!map)
3467                 return;
3468         /* clear to default */
3469         for (m = 0; m < 6; m++)
3470                 wr32(hw, GLQF_HSYMM(prof_id, m), 0);
3471         if (prof->cfg.symm) {
3472                 struct ice_flow_seg_info *seg =
3473                         &prof->segs[prof->segs_cnt - 1];
3474
3475                 struct ice_flow_seg_xtrct *ipv4_src =
3476                         &seg->fields[ICE_FLOW_FIELD_IDX_IPV4_SA].xtrct;
3477                 struct ice_flow_seg_xtrct *ipv4_dst =
3478                         &seg->fields[ICE_FLOW_FIELD_IDX_IPV4_DA].xtrct;
3479                 struct ice_flow_seg_xtrct *ipv6_src =
3480                         &seg->fields[ICE_FLOW_FIELD_IDX_IPV6_SA].xtrct;
3481                 struct ice_flow_seg_xtrct *ipv6_dst =
3482                         &seg->fields[ICE_FLOW_FIELD_IDX_IPV6_DA].xtrct;
3483
3484                 struct ice_flow_seg_xtrct *tcp_src =
3485                         &seg->fields[ICE_FLOW_FIELD_IDX_TCP_SRC_PORT].xtrct;
3486                 struct ice_flow_seg_xtrct *tcp_dst =
3487                         &seg->fields[ICE_FLOW_FIELD_IDX_TCP_DST_PORT].xtrct;
3488
3489                 struct ice_flow_seg_xtrct *udp_src =
3490                         &seg->fields[ICE_FLOW_FIELD_IDX_UDP_SRC_PORT].xtrct;
3491                 struct ice_flow_seg_xtrct *udp_dst =
3492                         &seg->fields[ICE_FLOW_FIELD_IDX_UDP_DST_PORT].xtrct;
3493
3494                 struct ice_flow_seg_xtrct *sctp_src =
3495                         &seg->fields[ICE_FLOW_FIELD_IDX_SCTP_SRC_PORT].xtrct;
3496                 struct ice_flow_seg_xtrct *sctp_dst =
3497                         &seg->fields[ICE_FLOW_FIELD_IDX_SCTP_DST_PORT].xtrct;
3498
3499                 /* xor IPv4 */
3500                 if (ipv4_src->prot_id != 0 && ipv4_dst->prot_id != 0)
3501                         ice_rss_config_xor(hw, prof_id,
3502                                            ipv4_src->idx, ipv4_dst->idx, 2);
3503
3504                 /* xor IPv6 */
3505                 if (ipv6_src->prot_id != 0 && ipv6_dst->prot_id != 0)
3506                         ice_rss_config_xor(hw, prof_id,
3507                                            ipv6_src->idx, ipv6_dst->idx, 8);
3508
3509                 /* xor TCP */
3510                 if (tcp_src->prot_id != 0 && tcp_dst->prot_id != 0)
3511                         ice_rss_config_xor(hw, prof_id,
3512                                            tcp_src->idx, tcp_dst->idx, 1);
3513
3514                 /* xor UDP */
3515                 if (udp_src->prot_id != 0 && udp_dst->prot_id != 0)
3516                         ice_rss_config_xor(hw, prof_id,
3517                                            udp_src->idx, udp_dst->idx, 1);
3518
3519                 /* xor SCTP */
3520                 if (sctp_src->prot_id != 0 && sctp_dst->prot_id != 0)
3521                         ice_rss_config_xor(hw, prof_id,
3522                                            sctp_src->idx, sctp_dst->idx, 1);
3523         }
3524 }
3525
3526 /**
3527  * ice_add_rss_cfg_sync - add an RSS configuration
3528  * @hw: pointer to the hardware structure
3529  * @vsi_handle: software VSI handle
3530  * @hashed_flds: hash bit fields (ICE_FLOW_HASH_*) to configure
3531  * @addl_hdrs: protocol header fields
3532  * @segs_cnt: packet segment count
3533  * @symm: symmetric hash enable/disable
3534  *
3535  * Assumption: lock has already been acquired for RSS list
3536  */
3537 static enum ice_status
3538 ice_add_rss_cfg_sync(struct ice_hw *hw, u16 vsi_handle, u64 hashed_flds,
3539                      u32 addl_hdrs, u8 segs_cnt, bool symm)
3540 {
3541         const enum ice_block blk = ICE_BLK_RSS;
3542         struct ice_flow_prof *prof = NULL;
3543         struct ice_flow_seg_info *segs;
3544         enum ice_status status;
3545
3546         if (!segs_cnt || segs_cnt > ICE_FLOW_SEG_MAX)
3547                 return ICE_ERR_PARAM;
3548
3549         segs = (struct ice_flow_seg_info *)ice_calloc(hw, segs_cnt,
3550                                                       sizeof(*segs));
3551         if (!segs)
3552                 return ICE_ERR_NO_MEMORY;
3553
3554         /* Construct the packet segment info from the hashed fields */
3555         status = ice_flow_set_rss_seg_info(&segs[segs_cnt - 1], hashed_flds,
3556                                            addl_hdrs);
3557         if (status)
3558                 goto exit;
3559
3560         /* don't do RSS for GTPU outer */
3561         if (segs_cnt == ICE_RSS_OUTER_HEADERS &&
3562             (segs[segs_cnt - 1].hdrs & ICE_FLOW_SEG_HDR_GTPU)) {
3563                 printf("ignore gtpu\n");
3564                 return ICE_SUCCESS;
3565         }
3566
3567         /* Search for a flow profile that has matching headers, hash fields
3568          * and has the input VSI associated to it. If found, no further
3569          * operations required and exit.
3570          */
3571         prof = ice_flow_find_prof_conds(hw, blk, ICE_FLOW_RX, segs, segs_cnt,
3572                                         vsi_handle,
3573                                         ICE_FLOW_FIND_PROF_CHK_FLDS |
3574                                         ICE_FLOW_FIND_PROF_CHK_VSI);
3575         if (prof) {
3576                 if (prof->cfg.symm == symm)
3577                         goto exit;
3578                 prof->cfg.symm = symm;
3579                 goto update_symm;
3580         }
3581
3582         /* Check if a flow profile exists with the same protocol headers and
3583          * associated with the input VSI. If so disassociate the VSI from
3584          * this profile. The VSI will be added to a new profile created with
3585          * the protocol header and new hash field configuration.
3586          */
3587         prof = ice_flow_find_prof_conds(hw, blk, ICE_FLOW_RX, segs, segs_cnt,
3588                                         vsi_handle, ICE_FLOW_FIND_PROF_CHK_VSI);
3589         if (prof) {
3590                 status = ice_flow_disassoc_prof(hw, blk, prof, vsi_handle);
3591                 if (!status)
3592                         ice_rem_rss_list(hw, vsi_handle, prof);
3593                 else
3594                         goto exit;
3595
3596                 /* Remove profile if it has no VSIs associated */
3597                 if (!ice_is_any_bit_set(prof->vsis, ICE_MAX_VSI)) {
3598                         status = ice_flow_rem_prof(hw, blk, prof->id);
3599                         if (status)
3600                                 goto exit;
3601                 }
3602         }
3603
3604         /* Search for a profile that has same match fields only. If this
3605          * exists then associate the VSI to this profile.
3606          */
3607         prof = ice_flow_find_prof_conds(hw, blk, ICE_FLOW_RX, segs, segs_cnt,
3608                                         vsi_handle,
3609                                         ICE_FLOW_FIND_PROF_CHK_FLDS);
3610         if (prof) {
3611                 if (prof->cfg.symm == symm) {
3612                         status = ice_flow_assoc_prof(hw, blk, prof,
3613                                                      vsi_handle);
3614                         if (!status)
3615                                 status = ice_add_rss_list(hw, vsi_handle,
3616                                                           prof);
3617                 } else {
3618                         /* if a profile exist but with different symmetric
3619                          * requirement, just return error.
3620                          */
3621                         status = ICE_ERR_NOT_SUPPORTED;
3622                 }
3623                 goto exit;
3624         }
3625
3626         /* Create a new flow profile with generated profile and packet
3627          * segment information.
3628          */
3629         status = ice_flow_add_prof(hw, blk, ICE_FLOW_RX,
3630                                    ICE_FLOW_GEN_PROFID(hashed_flds,
3631                                                        segs[segs_cnt - 1].hdrs,
3632                                                        segs_cnt),
3633                                    segs, segs_cnt, NULL, 0, &prof);
3634         if (status)
3635                 goto exit;
3636
3637         status = ice_flow_assoc_prof(hw, blk, prof, vsi_handle);
3638         /* If association to a new flow profile failed then this profile can
3639          * be removed.
3640          */
3641         if (status) {
3642                 ice_flow_rem_prof(hw, blk, prof->id);
3643                 goto exit;
3644         }
3645
3646         status = ice_add_rss_list(hw, vsi_handle, prof);
3647
3648         prof->cfg.symm = symm;
3649
3650 update_symm:
3651         ice_rss_update_symm(hw, prof);
3652
3653 exit:
3654         ice_free(hw, segs);
3655         return status;
3656 }
3657
3658 /**
3659  * ice_add_rss_cfg - add an RSS configuration with specified hashed fields
3660  * @hw: pointer to the hardware structure
3661  * @vsi_handle: software VSI handle
3662  * @hashed_flds: hash bit fields (ICE_FLOW_HASH_*) to configure
3663  * @addl_hdrs: protocol header fields
3664  * @symm: symmetric hash enable/disable
3665  *
3666  * This function will generate a flow profile based on fields associated with
3667  * the input fields to hash on, the flow type and use the VSI number to add
3668  * a flow entry to the profile.
3669  */
3670 enum ice_status
3671 ice_add_rss_cfg(struct ice_hw *hw, u16 vsi_handle, u64 hashed_flds,
3672                 u32 addl_hdrs, bool symm)
3673 {
3674         enum ice_status status;
3675
3676         if (hashed_flds == ICE_HASH_INVALID ||
3677             !ice_is_vsi_valid(hw, vsi_handle))
3678                 return ICE_ERR_PARAM;
3679
3680         ice_acquire_lock(&hw->rss_locks);
3681         status = ice_add_rss_cfg_sync(hw, vsi_handle, hashed_flds, addl_hdrs,
3682                                       ICE_RSS_OUTER_HEADERS, symm);
3683
3684         if (!status)
3685                 status = ice_add_rss_cfg_sync(hw, vsi_handle, hashed_flds,
3686                                               addl_hdrs, ICE_RSS_INNER_HEADERS,
3687                                               symm);
3688         ice_release_lock(&hw->rss_locks);
3689
3690         return status;
3691 }
3692
3693 /**
3694  * ice_rem_rss_cfg_sync - remove an existing RSS configuration
3695  * @hw: pointer to the hardware structure
3696  * @vsi_handle: software VSI handle
3697  * @hashed_flds: Packet hash types (ICE_FLOW_HASH_*) to remove
3698  * @addl_hdrs: Protocol header fields within a packet segment
3699  * @segs_cnt: packet segment count
3700  *
3701  * Assumption: lock has already been acquired for RSS list
3702  */
3703 static enum ice_status
3704 ice_rem_rss_cfg_sync(struct ice_hw *hw, u16 vsi_handle, u64 hashed_flds,
3705                      u32 addl_hdrs, u8 segs_cnt)
3706 {
3707         const enum ice_block blk = ICE_BLK_RSS;
3708         struct ice_flow_seg_info *segs;
3709         struct ice_flow_prof *prof;
3710         enum ice_status status;
3711
3712         segs = (struct ice_flow_seg_info *)ice_calloc(hw, segs_cnt,
3713                                                       sizeof(*segs));
3714         if (!segs)
3715                 return ICE_ERR_NO_MEMORY;
3716
3717         if (segs_cnt == ICE_RSS_OUTER_HEADERS &&
3718             segs[segs_cnt - 1].hdrs & ICE_FLOW_SEG_HDR_GTPU)
3719                 return ICE_SUCCESS;
3720
3721         /* Construct the packet segment info from the hashed fields */
3722         status = ice_flow_set_rss_seg_info(&segs[segs_cnt - 1], hashed_flds,
3723                                            addl_hdrs);
3724         if (status)
3725                 goto out;
3726
3727         prof = ice_flow_find_prof_conds(hw, blk, ICE_FLOW_RX, segs, segs_cnt,
3728                                         vsi_handle,
3729                                         ICE_FLOW_FIND_PROF_CHK_FLDS);
3730         if (!prof) {
3731                 status = ICE_ERR_DOES_NOT_EXIST;
3732                 goto out;
3733         }
3734
3735         status = ice_flow_disassoc_prof(hw, blk, prof, vsi_handle);
3736         if (status)
3737                 goto out;
3738
3739         /* Remove RSS configuration from VSI context before deleting
3740          * the flow profile.
3741          */
3742         ice_rem_rss_list(hw, vsi_handle, prof);
3743
3744         if (!ice_is_any_bit_set(prof->vsis, ICE_MAX_VSI))
3745                 status = ice_flow_rem_prof(hw, blk, prof->id);
3746
3747 out:
3748         ice_free(hw, segs);
3749         return status;
3750 }
3751
3752 /**
3753  * ice_rem_rss_cfg - remove an existing RSS config with matching hashed fields
3754  * @hw: pointer to the hardware structure
3755  * @vsi_handle: software VSI handle
3756  * @hashed_flds: Packet hash types (ICE_FLOW_HASH_*) to remove
3757  * @addl_hdrs: Protocol header fields within a packet segment
3758  *
3759  * This function will lookup the flow profile based on the input
3760  * hash field bitmap, iterate through the profile entry list of
3761  * that profile and find entry associated with input VSI to be
3762  * removed. Calls are made to underlying flow apis which will in
3763  * turn build or update buffers for RSS XLT1 section.
3764  */
3765 enum ice_status
3766 ice_rem_rss_cfg(struct ice_hw *hw, u16 vsi_handle, u64 hashed_flds,
3767                 u32 addl_hdrs)
3768 {
3769         enum ice_status status;
3770
3771         if (hashed_flds == ICE_HASH_INVALID ||
3772             !ice_is_vsi_valid(hw, vsi_handle))
3773                 return ICE_ERR_PARAM;
3774
3775         ice_acquire_lock(&hw->rss_locks);
3776         status = ice_rem_rss_cfg_sync(hw, vsi_handle, hashed_flds, addl_hdrs,
3777                                       ICE_RSS_OUTER_HEADERS);
3778         if (!status)
3779                 status = ice_rem_rss_cfg_sync(hw, vsi_handle, hashed_flds,
3780                                               addl_hdrs, ICE_RSS_INNER_HEADERS);
3781         ice_release_lock(&hw->rss_locks);
3782
3783         return status;
3784 }
3785
3786 /**
3787  * ice_replay_rss_cfg - replay RSS configurations associated with VSI
3788  * @hw: pointer to the hardware structure
3789  * @vsi_handle: software VSI handle
3790  */
3791 enum ice_status ice_replay_rss_cfg(struct ice_hw *hw, u16 vsi_handle)
3792 {
3793         enum ice_status status = ICE_SUCCESS;
3794         struct ice_rss_cfg *r;
3795
3796         if (!ice_is_vsi_valid(hw, vsi_handle))
3797                 return ICE_ERR_PARAM;
3798
3799         ice_acquire_lock(&hw->rss_locks);
3800         LIST_FOR_EACH_ENTRY(r, &hw->rss_list_head,
3801                             ice_rss_cfg, l_entry) {
3802                 if (ice_is_bit_set(r->vsis, vsi_handle)) {
3803                         status = ice_add_rss_cfg_sync(hw, vsi_handle,
3804                                                       r->hashed_flds,
3805                                                       r->packet_hdr,
3806                                                       ICE_RSS_OUTER_HEADERS,
3807                                                       r->symm);
3808                         if (status)
3809                                 break;
3810                         status = ice_add_rss_cfg_sync(hw, vsi_handle,
3811                                                       r->hashed_flds,
3812                                                       r->packet_hdr,
3813                                                       ICE_RSS_INNER_HEADERS,
3814                                                       r->symm);
3815                         if (status)
3816                                 break;
3817                 }
3818         }
3819         ice_release_lock(&hw->rss_locks);
3820
3821         return status;
3822 }
3823
3824 /**
3825  * ice_get_rss_cfg - returns hashed fields for the given header types
3826  * @hw: pointer to the hardware structure
3827  * @vsi_handle: software VSI handle
3828  * @hdrs: protocol header type
3829  *
3830  * This function will return the match fields of the first instance of flow
3831  * profile having the given header types and containing input VSI
3832  */
3833 u64 ice_get_rss_cfg(struct ice_hw *hw, u16 vsi_handle, u32 hdrs)
3834 {
3835         u64 rss_hash = ICE_HASH_INVALID;
3836         struct ice_rss_cfg *r;
3837
3838         /* verify if the protocol header is non zero and VSI is valid */
3839         if (hdrs == ICE_FLOW_SEG_HDR_NONE || !ice_is_vsi_valid(hw, vsi_handle))
3840                 return ICE_HASH_INVALID;
3841
3842         ice_acquire_lock(&hw->rss_locks);
3843         LIST_FOR_EACH_ENTRY(r, &hw->rss_list_head,
3844                             ice_rss_cfg, l_entry)
3845                 if (ice_is_bit_set(r->vsis, vsi_handle) &&
3846                     r->packet_hdr == hdrs) {
3847                         rss_hash = r->hashed_flds;
3848                         break;
3849                 }
3850         ice_release_lock(&hw->rss_locks);
3851
3852         return rss_hash;
3853 }