net/ice/base: group case statements
[dpdk.git] / drivers / net / ice / base / ice_flow.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2001-2020
3  */
4
5 #include "ice_common.h"
6 #include "ice_flow.h"
7
8 /* Size of known protocol header fields */
9 #define ICE_FLOW_FLD_SZ_ETH_TYPE        2
10 #define ICE_FLOW_FLD_SZ_VLAN            2
11 #define ICE_FLOW_FLD_SZ_IPV4_ADDR       4
12 #define ICE_FLOW_FLD_SZ_IPV6_ADDR       16
13 #define ICE_FLOW_FLD_SZ_IP_DSCP         1
14 #define ICE_FLOW_FLD_SZ_IP_TTL          1
15 #define ICE_FLOW_FLD_SZ_IP_PROT         1
16 #define ICE_FLOW_FLD_SZ_PORT            2
17 #define ICE_FLOW_FLD_SZ_TCP_FLAGS       1
18 #define ICE_FLOW_FLD_SZ_ICMP_TYPE       1
19 #define ICE_FLOW_FLD_SZ_ICMP_CODE       1
20 #define ICE_FLOW_FLD_SZ_ARP_OPER        2
21 #define ICE_FLOW_FLD_SZ_GRE_KEYID       4
22 #define ICE_FLOW_FLD_SZ_GTP_TEID        4
23 #define ICE_FLOW_FLD_SZ_GTP_QFI         2
24 #define ICE_FLOW_FLD_SZ_PPPOE_SESS_ID   2
25
26 /* Describe properties of a protocol header field */
27 struct ice_flow_field_info {
28         enum ice_flow_seg_hdr hdr;
29         s16 off;        /* Offset from start of a protocol header, in bits */
30         u16 size;       /* Size of fields in bits */
31         u16 mask;       /* 16-bit mask for field */
32 };
33
34 #define ICE_FLOW_FLD_INFO(_hdr, _offset_bytes, _size_bytes) { \
35         .hdr = _hdr, \
36         .off = (_offset_bytes) * BITS_PER_BYTE, \
37         .size = (_size_bytes) * BITS_PER_BYTE, \
38         .mask = 0, \
39 }
40
41 #define ICE_FLOW_FLD_INFO_MSK(_hdr, _offset_bytes, _size_bytes, _mask) { \
42         .hdr = _hdr, \
43         .off = (_offset_bytes) * BITS_PER_BYTE, \
44         .size = (_size_bytes) * BITS_PER_BYTE, \
45         .mask = _mask, \
46 }
47
48 /* Table containing properties of supported protocol header fields */
49 static const
50 struct ice_flow_field_info ice_flds_info[ICE_FLOW_FIELD_IDX_MAX] = {
51         /* Ether */
52         /* ICE_FLOW_FIELD_IDX_ETH_DA */
53         ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_ETH, 0, ETH_ALEN),
54         /* ICE_FLOW_FIELD_IDX_ETH_SA */
55         ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_ETH, ETH_ALEN, ETH_ALEN),
56         /* ICE_FLOW_FIELD_IDX_S_VLAN */
57         ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_VLAN, 12, ICE_FLOW_FLD_SZ_VLAN),
58         /* ICE_FLOW_FIELD_IDX_C_VLAN */
59         ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_VLAN, 14, ICE_FLOW_FLD_SZ_VLAN),
60         /* ICE_FLOW_FIELD_IDX_ETH_TYPE */
61         ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_ETH, 12, ICE_FLOW_FLD_SZ_ETH_TYPE),
62         /* IPv4 / IPv6 */
63         /* ICE_FLOW_FIELD_IDX_IPV4_DSCP */
64         ICE_FLOW_FLD_INFO_MSK(ICE_FLOW_SEG_HDR_IPV4, 0, ICE_FLOW_FLD_SZ_IP_DSCP,
65                               0x00fc),
66         /* ICE_FLOW_FIELD_IDX_IPV6_DSCP */
67         ICE_FLOW_FLD_INFO_MSK(ICE_FLOW_SEG_HDR_IPV6, 0, ICE_FLOW_FLD_SZ_IP_DSCP,
68                               0x0ff0),
69         /* ICE_FLOW_FIELD_IDX_IPV4_TTL */
70         ICE_FLOW_FLD_INFO_MSK(ICE_FLOW_SEG_HDR_NONE, 8,
71                               ICE_FLOW_FLD_SZ_IP_TTL, 0xff00),
72         /* ICE_FLOW_FIELD_IDX_IPV4_PROT */
73         ICE_FLOW_FLD_INFO_MSK(ICE_FLOW_SEG_HDR_NONE, 8,
74                               ICE_FLOW_FLD_SZ_IP_PROT, 0x00ff),
75         /* ICE_FLOW_FIELD_IDX_IPV6_TTL */
76         ICE_FLOW_FLD_INFO_MSK(ICE_FLOW_SEG_HDR_NONE, 6,
77                               ICE_FLOW_FLD_SZ_IP_TTL, 0x00ff),
78         /* ICE_FLOW_FIELD_IDX_IPV6_PROT */
79         ICE_FLOW_FLD_INFO_MSK(ICE_FLOW_SEG_HDR_NONE, 6,
80                               ICE_FLOW_FLD_SZ_IP_PROT, 0xff00),
81         /* ICE_FLOW_FIELD_IDX_IPV4_SA */
82         ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_IPV4, 12, ICE_FLOW_FLD_SZ_IPV4_ADDR),
83         /* ICE_FLOW_FIELD_IDX_IPV4_DA */
84         ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_IPV4, 16, ICE_FLOW_FLD_SZ_IPV4_ADDR),
85         /* ICE_FLOW_FIELD_IDX_IPV6_SA */
86         ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_IPV6, 8, ICE_FLOW_FLD_SZ_IPV6_ADDR),
87         /* ICE_FLOW_FIELD_IDX_IPV6_DA */
88         ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_IPV6, 24, ICE_FLOW_FLD_SZ_IPV6_ADDR),
89         /* Transport */
90         /* ICE_FLOW_FIELD_IDX_TCP_SRC_PORT */
91         ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_TCP, 0, ICE_FLOW_FLD_SZ_PORT),
92         /* ICE_FLOW_FIELD_IDX_TCP_DST_PORT */
93         ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_TCP, 2, ICE_FLOW_FLD_SZ_PORT),
94         /* ICE_FLOW_FIELD_IDX_UDP_SRC_PORT */
95         ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_UDP, 0, ICE_FLOW_FLD_SZ_PORT),
96         /* ICE_FLOW_FIELD_IDX_UDP_DST_PORT */
97         ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_UDP, 2, ICE_FLOW_FLD_SZ_PORT),
98         /* ICE_FLOW_FIELD_IDX_SCTP_SRC_PORT */
99         ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_SCTP, 0, ICE_FLOW_FLD_SZ_PORT),
100         /* ICE_FLOW_FIELD_IDX_SCTP_DST_PORT */
101         ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_SCTP, 2, ICE_FLOW_FLD_SZ_PORT),
102         /* ICE_FLOW_FIELD_IDX_TCP_FLAGS */
103         ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_TCP, 13, ICE_FLOW_FLD_SZ_TCP_FLAGS),
104         /* ARP */
105         /* ICE_FLOW_FIELD_IDX_ARP_SIP */
106         ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_ARP, 14, ICE_FLOW_FLD_SZ_IPV4_ADDR),
107         /* ICE_FLOW_FIELD_IDX_ARP_DIP */
108         ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_ARP, 24, ICE_FLOW_FLD_SZ_IPV4_ADDR),
109         /* ICE_FLOW_FIELD_IDX_ARP_SHA */
110         ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_ARP, 8, ETH_ALEN),
111         /* ICE_FLOW_FIELD_IDX_ARP_DHA */
112         ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_ARP, 18, ETH_ALEN),
113         /* ICE_FLOW_FIELD_IDX_ARP_OP */
114         ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_ARP, 6, ICE_FLOW_FLD_SZ_ARP_OPER),
115         /* ICMP */
116         /* ICE_FLOW_FIELD_IDX_ICMP_TYPE */
117         ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_ICMP, 0, ICE_FLOW_FLD_SZ_ICMP_TYPE),
118         /* ICE_FLOW_FIELD_IDX_ICMP_CODE */
119         ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_ICMP, 1, ICE_FLOW_FLD_SZ_ICMP_CODE),
120         /* GRE */
121         /* ICE_FLOW_FIELD_IDX_GRE_KEYID */
122         ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_GRE, 12, ICE_FLOW_FLD_SZ_GRE_KEYID),
123         /* GTP */
124         /* ICE_FLOW_FIELD_IDX_GTPC_TEID */
125         ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_GTPC_TEID, 12,
126                           ICE_FLOW_FLD_SZ_GTP_TEID),
127         /* ICE_FLOW_FIELD_IDX_GTPU_IP_TEID */
128         ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_GTPU_IP, 12,
129                           ICE_FLOW_FLD_SZ_GTP_TEID),
130         /* ICE_FLOW_FIELD_IDX_GTPU_EH_TEID */
131         ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_GTPU_EH, 12,
132                           ICE_FLOW_FLD_SZ_GTP_TEID),
133         /* ICE_FLOW_FIELD_IDX_GTPU_EH_QFI */
134         ICE_FLOW_FLD_INFO_MSK(ICE_FLOW_SEG_HDR_GTPU_EH, 22,
135                               ICE_FLOW_FLD_SZ_GTP_QFI, 0x3f00),
136         /* ICE_FLOW_FIELD_IDX_GTPU_UP_TEID */
137         ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_GTPU_UP, 12,
138                           ICE_FLOW_FLD_SZ_GTP_TEID),
139         /* ICE_FLOW_FIELD_IDX_GTPU_DWN_TEID */
140         ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_GTPU_DWN, 12,
141                           ICE_FLOW_FLD_SZ_GTP_TEID),
142         /* PPPOE */
143         /* ICE_FLOW_FIELD_IDX_PPPOE_SESS_ID */
144         ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_PPPOE, 2,
145                           ICE_FLOW_FLD_SZ_PPPOE_SESS_ID),
146 };
147
148 /* Bitmaps indicating relevant packet types for a particular protocol header
149  *
150  * Packet types for packets with an Outer/First/Single MAC header
151  */
152 static const u32 ice_ptypes_mac_ofos[] = {
153         0xFDC00846, 0xBFBF7F7E, 0xF70001DF, 0xFEFDFDFB,
154         0x0000077E, 0x00000000, 0x00000000, 0x00000000,
155         0x00000000, 0x03FFF000, 0x7FFFFFE0, 0x00000000,
156         0x00000000, 0x00000000, 0x00000000, 0x00000000,
157         0x00000000, 0x00000000, 0x00000000, 0x00000000,
158         0x00000000, 0x00000000, 0x00000000, 0x00000000,
159         0x00000000, 0x00000000, 0x00000000, 0x00000000,
160         0x00000000, 0x00000000, 0x00000000, 0x00000000,
161 };
162
163 /* Packet types for packets with an Innermost/Last MAC VLAN header */
164 static const u32 ice_ptypes_macvlan_il[] = {
165         0x00000000, 0xBC000000, 0x000001DF, 0xF0000000,
166         0x0000077E, 0x00000000, 0x00000000, 0x00000000,
167         0x00000000, 0x00000000, 0x00000000, 0x00000000,
168         0x00000000, 0x00000000, 0x00000000, 0x00000000,
169         0x00000000, 0x00000000, 0x00000000, 0x00000000,
170         0x00000000, 0x00000000, 0x00000000, 0x00000000,
171         0x00000000, 0x00000000, 0x00000000, 0x00000000,
172         0x00000000, 0x00000000, 0x00000000, 0x00000000,
173 };
174
175 /* Packet types for packets with an Outer/First/Single IPv4 header */
176 static const u32 ice_ptypes_ipv4_ofos[] = {
177         0x1DC00000, 0x04000800, 0x00000000, 0x00000000,
178         0x00000000, 0x00000000, 0x00000000, 0x00000000,
179         0x0003000F, 0x000FC000, 0x03E0F800, 0x00000000,
180         0x00000000, 0x00000000, 0x00000000, 0x00000000,
181         0x00000000, 0x00000000, 0x00000000, 0x00000000,
182         0x00000000, 0x00000000, 0x00000000, 0x00000000,
183         0x00000000, 0x00000000, 0x00000000, 0x00000000,
184         0x00000000, 0x00000000, 0x00000000, 0x00000000,
185 };
186
187 /* Packet types for packets with an Innermost/Last IPv4 header */
188 static const u32 ice_ptypes_ipv4_il[] = {
189         0xE0000000, 0xB807700E, 0x80000003, 0xE01DC03B,
190         0x0000000E, 0x00000000, 0x00000000, 0x00000000,
191         0x00000000, 0x00000000, 0x001FF800, 0x00000000,
192         0x00000000, 0x00000000, 0x00000000, 0x00000000,
193         0x00000000, 0x00000000, 0x00000000, 0x00000000,
194         0x00000000, 0x00000000, 0x00000000, 0x00000000,
195         0x00000000, 0x00000000, 0x00000000, 0x00000000,
196         0x00000000, 0x00000000, 0x00000000, 0x00000000,
197 };
198
199 /* Packet types for packets with an Outer/First/Single IPv6 header */
200 static const u32 ice_ptypes_ipv6_ofos[] = {
201         0x00000000, 0x00000000, 0x77000000, 0x10002000,
202         0x00000000, 0x00000000, 0x00000000, 0x00000000,
203         0x00080F00, 0x03F00000, 0x7C1F0000, 0x00000000,
204         0x00000000, 0x00000000, 0x00000000, 0x00000000,
205         0x00000000, 0x00000000, 0x00000000, 0x00000000,
206         0x00000000, 0x00000000, 0x00000000, 0x00000000,
207         0x00000000, 0x00000000, 0x00000000, 0x00000000,
208         0x00000000, 0x00000000, 0x00000000, 0x00000000,
209 };
210
211 /* Packet types for packets with an Innermost/Last IPv6 header */
212 static const u32 ice_ptypes_ipv6_il[] = {
213         0x00000000, 0x03B80770, 0x000001DC, 0x0EE00000,
214         0x00000770, 0x00000000, 0x00000000, 0x00000000,
215         0x00000000, 0x00000000, 0x7FE00000, 0x00000000,
216         0x00000000, 0x00000000, 0x00000000, 0x00000000,
217         0x00000000, 0x00000000, 0x00000000, 0x00000000,
218         0x00000000, 0x00000000, 0x00000000, 0x00000000,
219         0x00000000, 0x00000000, 0x00000000, 0x00000000,
220         0x00000000, 0x00000000, 0x00000000, 0x00000000,
221 };
222
223 /* Packet types for packets with an Outermost/First ARP header */
224 static const u32 ice_ptypes_arp_of[] = {
225         0x00000800, 0x00000000, 0x00000000, 0x00000000,
226         0x00000000, 0x00000000, 0x00000000, 0x00000000,
227         0x00000000, 0x00000000, 0x00000000, 0x00000000,
228         0x00000000, 0x00000000, 0x00000000, 0x00000000,
229         0x00000000, 0x00000000, 0x00000000, 0x00000000,
230         0x00000000, 0x00000000, 0x00000000, 0x00000000,
231         0x00000000, 0x00000000, 0x00000000, 0x00000000,
232         0x00000000, 0x00000000, 0x00000000, 0x00000000,
233 };
234
235 /* UDP Packet types for non-tunneled packets or tunneled
236  * packets with inner UDP.
237  */
238 static const u32 ice_ptypes_udp_il[] = {
239         0x81000000, 0x20204040, 0x04000010, 0x80810102,
240         0x00000040, 0x00000000, 0x00000000, 0x00000000,
241         0x00000000, 0x00410000, 0x10842000, 0x00000000,
242         0x00000000, 0x00000000, 0x00000000, 0x00000000,
243         0x00000000, 0x00000000, 0x00000000, 0x00000000,
244         0x00000000, 0x00000000, 0x00000000, 0x00000000,
245         0x00000000, 0x00000000, 0x00000000, 0x00000000,
246         0x00000000, 0x00000000, 0x00000000, 0x00000000,
247 };
248
249 /* Packet types for packets with an Innermost/Last TCP header */
250 static const u32 ice_ptypes_tcp_il[] = {
251         0x04000000, 0x80810102, 0x10000040, 0x02040408,
252         0x00000102, 0x00000000, 0x00000000, 0x00000000,
253         0x00000000, 0x00820000, 0x21084000, 0x00000000,
254         0x00000000, 0x00000000, 0x00000000, 0x00000000,
255         0x00000000, 0x00000000, 0x00000000, 0x00000000,
256         0x00000000, 0x00000000, 0x00000000, 0x00000000,
257         0x00000000, 0x00000000, 0x00000000, 0x00000000,
258         0x00000000, 0x00000000, 0x00000000, 0x00000000,
259 };
260
261 /* Packet types for packets with an Innermost/Last SCTP header */
262 static const u32 ice_ptypes_sctp_il[] = {
263         0x08000000, 0x01020204, 0x20000081, 0x04080810,
264         0x00000204, 0x00000000, 0x00000000, 0x00000000,
265         0x00000000, 0x01040000, 0x00000000, 0x00000000,
266         0x00000000, 0x00000000, 0x00000000, 0x00000000,
267         0x00000000, 0x00000000, 0x00000000, 0x00000000,
268         0x00000000, 0x00000000, 0x00000000, 0x00000000,
269         0x00000000, 0x00000000, 0x00000000, 0x00000000,
270         0x00000000, 0x00000000, 0x00000000, 0x00000000,
271 };
272
273 /* Packet types for packets with an Outermost/First ICMP header */
274 static const u32 ice_ptypes_icmp_of[] = {
275         0x10000000, 0x00000000, 0x00000000, 0x00000000,
276         0x00000000, 0x00000000, 0x00000000, 0x00000000,
277         0x00000000, 0x00000000, 0x00000000, 0x00000000,
278         0x00000000, 0x00000000, 0x00000000, 0x00000000,
279         0x00000000, 0x00000000, 0x00000000, 0x00000000,
280         0x00000000, 0x00000000, 0x00000000, 0x00000000,
281         0x00000000, 0x00000000, 0x00000000, 0x00000000,
282         0x00000000, 0x00000000, 0x00000000, 0x00000000,
283 };
284
285 /* Packet types for packets with an Innermost/Last ICMP header */
286 static const u32 ice_ptypes_icmp_il[] = {
287         0x00000000, 0x02040408, 0x40000102, 0x08101020,
288         0x00000408, 0x00000000, 0x00000000, 0x00000000,
289         0x00000000, 0x00000000, 0x42108000, 0x00000000,
290         0x00000000, 0x00000000, 0x00000000, 0x00000000,
291         0x00000000, 0x00000000, 0x00000000, 0x00000000,
292         0x00000000, 0x00000000, 0x00000000, 0x00000000,
293         0x00000000, 0x00000000, 0x00000000, 0x00000000,
294         0x00000000, 0x00000000, 0x00000000, 0x00000000,
295 };
296
297 /* Packet types for packets with an Outermost/First GRE header */
298 static const u32 ice_ptypes_gre_of[] = {
299         0x00000000, 0xBFBF7800, 0x000001DF, 0xFEFDE000,
300         0x0000017E, 0x00000000, 0x00000000, 0x00000000,
301         0x00000000, 0x00000000, 0x00000000, 0x00000000,
302         0x00000000, 0x00000000, 0x00000000, 0x00000000,
303         0x00000000, 0x00000000, 0x00000000, 0x00000000,
304         0x00000000, 0x00000000, 0x00000000, 0x00000000,
305         0x00000000, 0x00000000, 0x00000000, 0x00000000,
306         0x00000000, 0x00000000, 0x00000000, 0x00000000,
307 };
308
309 /* Packet types for packets with an Innermost/Last MAC header */
310 static const u32 ice_ptypes_mac_il[] = {
311         0x00000000, 0x00000000, 0x00000000, 0x00000000,
312         0x00000000, 0x00000000, 0x00000000, 0x00000000,
313         0x00000000, 0x00000000, 0x00000000, 0x00000000,
314         0x00000000, 0x00000000, 0x00000000, 0x00000000,
315         0x00000000, 0x00000000, 0x00000000, 0x00000000,
316         0x00000000, 0x00000000, 0x00000000, 0x00000000,
317         0x00000000, 0x00000000, 0x00000000, 0x00000000,
318         0x00000000, 0x00000000, 0x00000000, 0x00000000,
319 };
320
321 /* Packet types for GTPC */
322 static const u32 ice_ptypes_gtpc[] = {
323         0x00000000, 0x00000000, 0x00000000, 0x00000000,
324         0x00000000, 0x00000000, 0x00000000, 0x00000000,
325         0x00000000, 0x00000000, 0x00000180, 0x00000000,
326         0x00000000, 0x00000000, 0x00000000, 0x00000000,
327         0x00000000, 0x00000000, 0x00000000, 0x00000000,
328         0x00000000, 0x00000000, 0x00000000, 0x00000000,
329         0x00000000, 0x00000000, 0x00000000, 0x00000000,
330         0x00000000, 0x00000000, 0x00000000, 0x00000000,
331 };
332
333 /* Packet types for GTPC with TEID */
334 static const u32 ice_ptypes_gtpc_tid[] = {
335         0x00000000, 0x00000000, 0x00000000, 0x00000000,
336         0x00000000, 0x00000000, 0x00000000, 0x00000000,
337         0x00000000, 0x00000000, 0x00000060, 0x00000000,
338         0x00000000, 0x00000000, 0x00000000, 0x00000000,
339         0x00000000, 0x00000000, 0x00000000, 0x00000000,
340         0x00000000, 0x00000000, 0x00000000, 0x00000000,
341         0x00000000, 0x00000000, 0x00000000, 0x00000000,
342         0x00000000, 0x00000000, 0x00000000, 0x00000000,
343 };
344
345 /* Packet types for GTPU */
346 static const struct ice_ptype_attributes ice_attr_gtpu_eh[] = {
347         { ICE_MAC_IPV4_GTPU_IPV4_FRAG,    ICE_PTYPE_ATTR_GTP_PDU_EH },
348         { ICE_MAC_IPV4_GTPU_IPV4_PAY,     ICE_PTYPE_ATTR_GTP_PDU_EH },
349         { ICE_MAC_IPV4_GTPU_IPV4_UDP_PAY, ICE_PTYPE_ATTR_GTP_PDU_EH },
350         { ICE_MAC_IPV4_GTPU_IPV4_TCP,     ICE_PTYPE_ATTR_GTP_PDU_EH },
351         { ICE_MAC_IPV4_GTPU_IPV4_ICMP,    ICE_PTYPE_ATTR_GTP_PDU_EH },
352         { ICE_MAC_IPV6_GTPU_IPV4_FRAG,    ICE_PTYPE_ATTR_GTP_PDU_EH },
353         { ICE_MAC_IPV6_GTPU_IPV4_PAY,     ICE_PTYPE_ATTR_GTP_PDU_EH },
354         { ICE_MAC_IPV6_GTPU_IPV4_UDP_PAY, ICE_PTYPE_ATTR_GTP_PDU_EH },
355         { ICE_MAC_IPV6_GTPU_IPV4_TCP,     ICE_PTYPE_ATTR_GTP_PDU_EH },
356         { ICE_MAC_IPV6_GTPU_IPV4_ICMP,    ICE_PTYPE_ATTR_GTP_PDU_EH },
357         { ICE_MAC_IPV4_GTPU_IPV6_FRAG,    ICE_PTYPE_ATTR_GTP_PDU_EH },
358         { ICE_MAC_IPV4_GTPU_IPV6_PAY,     ICE_PTYPE_ATTR_GTP_PDU_EH },
359         { ICE_MAC_IPV4_GTPU_IPV6_UDP_PAY, ICE_PTYPE_ATTR_GTP_PDU_EH },
360         { ICE_MAC_IPV4_GTPU_IPV6_TCP,     ICE_PTYPE_ATTR_GTP_PDU_EH },
361         { ICE_MAC_IPV4_GTPU_IPV6_ICMPV6,  ICE_PTYPE_ATTR_GTP_PDU_EH },
362         { ICE_MAC_IPV6_GTPU_IPV6_FRAG,    ICE_PTYPE_ATTR_GTP_PDU_EH },
363         { ICE_MAC_IPV6_GTPU_IPV6_PAY,     ICE_PTYPE_ATTR_GTP_PDU_EH },
364         { ICE_MAC_IPV6_GTPU_IPV6_UDP_PAY, ICE_PTYPE_ATTR_GTP_PDU_EH },
365         { ICE_MAC_IPV6_GTPU_IPV6_TCP,     ICE_PTYPE_ATTR_GTP_PDU_EH },
366         { ICE_MAC_IPV6_GTPU_IPV6_ICMPV6,  ICE_PTYPE_ATTR_GTP_PDU_EH },
367 };
368
369 static const struct ice_ptype_attributes ice_attr_gtpu_down[] = {
370         { ICE_MAC_IPV4_GTPU_IPV4_FRAG,    ICE_PTYPE_ATTR_GTP_DOWNLINK },
371         { ICE_MAC_IPV4_GTPU_IPV4_PAY,     ICE_PTYPE_ATTR_GTP_DOWNLINK },
372         { ICE_MAC_IPV4_GTPU_IPV4_UDP_PAY, ICE_PTYPE_ATTR_GTP_DOWNLINK },
373         { ICE_MAC_IPV4_GTPU_IPV4_TCP,     ICE_PTYPE_ATTR_GTP_DOWNLINK },
374         { ICE_MAC_IPV4_GTPU_IPV4_ICMP,    ICE_PTYPE_ATTR_GTP_DOWNLINK },
375         { ICE_MAC_IPV6_GTPU_IPV4_FRAG,    ICE_PTYPE_ATTR_GTP_DOWNLINK },
376         { ICE_MAC_IPV6_GTPU_IPV4_PAY,     ICE_PTYPE_ATTR_GTP_DOWNLINK },
377         { ICE_MAC_IPV6_GTPU_IPV4_UDP_PAY, ICE_PTYPE_ATTR_GTP_DOWNLINK },
378         { ICE_MAC_IPV6_GTPU_IPV4_TCP,     ICE_PTYPE_ATTR_GTP_DOWNLINK },
379         { ICE_MAC_IPV6_GTPU_IPV4_ICMP,    ICE_PTYPE_ATTR_GTP_DOWNLINK },
380         { ICE_MAC_IPV4_GTPU_IPV6_FRAG,    ICE_PTYPE_ATTR_GTP_DOWNLINK },
381         { ICE_MAC_IPV4_GTPU_IPV6_PAY,     ICE_PTYPE_ATTR_GTP_DOWNLINK },
382         { ICE_MAC_IPV4_GTPU_IPV6_UDP_PAY, ICE_PTYPE_ATTR_GTP_DOWNLINK },
383         { ICE_MAC_IPV4_GTPU_IPV6_TCP,     ICE_PTYPE_ATTR_GTP_DOWNLINK },
384         { ICE_MAC_IPV4_GTPU_IPV6_ICMPV6,  ICE_PTYPE_ATTR_GTP_DOWNLINK },
385         { ICE_MAC_IPV6_GTPU_IPV6_FRAG,    ICE_PTYPE_ATTR_GTP_DOWNLINK },
386         { ICE_MAC_IPV6_GTPU_IPV6_PAY,     ICE_PTYPE_ATTR_GTP_DOWNLINK },
387         { ICE_MAC_IPV6_GTPU_IPV6_UDP_PAY, ICE_PTYPE_ATTR_GTP_DOWNLINK },
388         { ICE_MAC_IPV6_GTPU_IPV6_TCP,     ICE_PTYPE_ATTR_GTP_DOWNLINK },
389         { ICE_MAC_IPV6_GTPU_IPV6_ICMPV6,  ICE_PTYPE_ATTR_GTP_DOWNLINK },
390 };
391
392 static const struct ice_ptype_attributes ice_attr_gtpu_up[] = {
393         { ICE_MAC_IPV4_GTPU_IPV4_FRAG,    ICE_PTYPE_ATTR_GTP_UPLINK },
394         { ICE_MAC_IPV4_GTPU_IPV4_PAY,     ICE_PTYPE_ATTR_GTP_UPLINK },
395         { ICE_MAC_IPV4_GTPU_IPV4_UDP_PAY, ICE_PTYPE_ATTR_GTP_UPLINK },
396         { ICE_MAC_IPV4_GTPU_IPV4_TCP,     ICE_PTYPE_ATTR_GTP_UPLINK },
397         { ICE_MAC_IPV4_GTPU_IPV4_ICMP,    ICE_PTYPE_ATTR_GTP_UPLINK },
398         { ICE_MAC_IPV6_GTPU_IPV4_FRAG,    ICE_PTYPE_ATTR_GTP_UPLINK },
399         { ICE_MAC_IPV6_GTPU_IPV4_PAY,     ICE_PTYPE_ATTR_GTP_UPLINK },
400         { ICE_MAC_IPV6_GTPU_IPV4_UDP_PAY, ICE_PTYPE_ATTR_GTP_UPLINK },
401         { ICE_MAC_IPV6_GTPU_IPV4_TCP,     ICE_PTYPE_ATTR_GTP_UPLINK },
402         { ICE_MAC_IPV6_GTPU_IPV4_ICMP,    ICE_PTYPE_ATTR_GTP_UPLINK },
403         { ICE_MAC_IPV4_GTPU_IPV6_FRAG,    ICE_PTYPE_ATTR_GTP_UPLINK },
404         { ICE_MAC_IPV4_GTPU_IPV6_PAY,     ICE_PTYPE_ATTR_GTP_UPLINK },
405         { ICE_MAC_IPV4_GTPU_IPV6_UDP_PAY, ICE_PTYPE_ATTR_GTP_UPLINK },
406         { ICE_MAC_IPV4_GTPU_IPV6_TCP,     ICE_PTYPE_ATTR_GTP_UPLINK },
407         { ICE_MAC_IPV4_GTPU_IPV6_ICMPV6,  ICE_PTYPE_ATTR_GTP_UPLINK },
408         { ICE_MAC_IPV6_GTPU_IPV6_FRAG,    ICE_PTYPE_ATTR_GTP_UPLINK },
409         { ICE_MAC_IPV6_GTPU_IPV6_PAY,     ICE_PTYPE_ATTR_GTP_UPLINK },
410         { ICE_MAC_IPV6_GTPU_IPV6_UDP_PAY, ICE_PTYPE_ATTR_GTP_UPLINK },
411         { ICE_MAC_IPV6_GTPU_IPV6_TCP,     ICE_PTYPE_ATTR_GTP_UPLINK },
412         { ICE_MAC_IPV6_GTPU_IPV6_ICMPV6,  ICE_PTYPE_ATTR_GTP_UPLINK },
413 };
414
415 static const u32 ice_ptypes_gtpu[] = {
416         0x00000000, 0x00000000, 0x00000000, 0x00000000,
417         0x00000000, 0x00000000, 0x00000000, 0x00000000,
418         0x00000000, 0x00000000, 0x7FFFFE00, 0x00000000,
419         0x00000000, 0x00000000, 0x00000000, 0x00000000,
420         0x00000000, 0x00000000, 0x00000000, 0x00000000,
421         0x00000000, 0x00000000, 0x00000000, 0x00000000,
422         0x00000000, 0x00000000, 0x00000000, 0x00000000,
423         0x00000000, 0x00000000, 0x00000000, 0x00000000,
424 };
425
426 /* Packet types for pppoe */
427 static const u32 ice_ptypes_pppoe[] = {
428         0x00000000, 0x00000000, 0x00000000, 0x00000000,
429         0x00000000, 0x00000000, 0x00000000, 0x00000000,
430         0x00000000, 0x03FFF000, 0x00000000, 0x00000000,
431         0x00000000, 0x00000000, 0x00000000, 0x00000000,
432         0x00000000, 0x00000000, 0x00000000, 0x00000000,
433         0x00000000, 0x00000000, 0x00000000, 0x00000000,
434         0x00000000, 0x00000000, 0x00000000, 0x00000000,
435         0x00000000, 0x00000000, 0x00000000, 0x00000000,
436 };
437
438 /* Manage parameters and info. used during the creation of a flow profile */
439 struct ice_flow_prof_params {
440         enum ice_block blk;
441         u16 entry_length; /* # of bytes formatted entry will require */
442         u8 es_cnt;
443         struct ice_flow_prof *prof;
444
445         /* For ACL, the es[0] will have the data of ICE_RX_MDID_PKT_FLAGS_15_0
446          * This will give us the direction flags.
447          */
448         struct ice_fv_word es[ICE_MAX_FV_WORDS];
449         /* attributes can be used to add attributes to a particular PTYPE */
450         const struct ice_ptype_attributes *attr;
451         u16 attr_cnt;
452
453         u16 mask[ICE_MAX_FV_WORDS];
454         ice_declare_bitmap(ptypes, ICE_FLOW_PTYPE_MAX);
455 };
456
457 #define ICE_FLOW_RSS_HDRS_INNER_MASK \
458         (ICE_FLOW_SEG_HDR_PPPOE | ICE_FLOW_SEG_HDR_GTPC | \
459          ICE_FLOW_SEG_HDR_GTPC_TEID | ICE_FLOW_SEG_HDR_GTPU)
460
461 #define ICE_FLOW_SEG_HDRS_L2_MASK       \
462         (ICE_FLOW_SEG_HDR_ETH | ICE_FLOW_SEG_HDR_VLAN)
463 #define ICE_FLOW_SEG_HDRS_L3_MASK       \
464         (ICE_FLOW_SEG_HDR_IPV4 | ICE_FLOW_SEG_HDR_IPV6 | \
465          ICE_FLOW_SEG_HDR_ARP)
466 #define ICE_FLOW_SEG_HDRS_L4_MASK       \
467         (ICE_FLOW_SEG_HDR_ICMP | ICE_FLOW_SEG_HDR_TCP | ICE_FLOW_SEG_HDR_UDP | \
468          ICE_FLOW_SEG_HDR_SCTP)
469
470 /**
471  * ice_flow_val_hdrs - validates packet segments for valid protocol headers
472  * @segs: array of one or more packet segments that describe the flow
473  * @segs_cnt: number of packet segments provided
474  */
475 static enum ice_status
476 ice_flow_val_hdrs(struct ice_flow_seg_info *segs, u8 segs_cnt)
477 {
478         u8 i;
479
480         for (i = 0; i < segs_cnt; i++) {
481                 /* Multiple L3 headers */
482                 if (segs[i].hdrs & ICE_FLOW_SEG_HDRS_L3_MASK &&
483                     !ice_is_pow2(segs[i].hdrs & ICE_FLOW_SEG_HDRS_L3_MASK))
484                         return ICE_ERR_PARAM;
485
486                 /* Multiple L4 headers */
487                 if (segs[i].hdrs & ICE_FLOW_SEG_HDRS_L4_MASK &&
488                     !ice_is_pow2(segs[i].hdrs & ICE_FLOW_SEG_HDRS_L4_MASK))
489                         return ICE_ERR_PARAM;
490         }
491
492         return ICE_SUCCESS;
493 }
494
495 /* Sizes of fixed known protocol headers without header options */
496 #define ICE_FLOW_PROT_HDR_SZ_MAC        14
497 #define ICE_FLOW_PROT_HDR_SZ_MAC_VLAN   (ICE_FLOW_PROT_HDR_SZ_MAC + 2)
498 #define ICE_FLOW_PROT_HDR_SZ_IPV4       20
499 #define ICE_FLOW_PROT_HDR_SZ_IPV6       40
500 #define ICE_FLOW_PROT_HDR_SZ_ARP        28
501 #define ICE_FLOW_PROT_HDR_SZ_ICMP       8
502 #define ICE_FLOW_PROT_HDR_SZ_TCP        20
503 #define ICE_FLOW_PROT_HDR_SZ_UDP        8
504 #define ICE_FLOW_PROT_HDR_SZ_SCTP       12
505
506 /**
507  * ice_flow_calc_seg_sz - calculates size of a packet segment based on headers
508  * @params: information about the flow to be processed
509  * @seg: index of packet segment whose header size is to be determined
510  */
511 static u16 ice_flow_calc_seg_sz(struct ice_flow_prof_params *params, u8 seg)
512 {
513         u16 sz;
514
515         /* L2 headers */
516         sz = (params->prof->segs[seg].hdrs & ICE_FLOW_SEG_HDR_VLAN) ?
517                 ICE_FLOW_PROT_HDR_SZ_MAC_VLAN : ICE_FLOW_PROT_HDR_SZ_MAC;
518
519         /* L3 headers */
520         if (params->prof->segs[seg].hdrs & ICE_FLOW_SEG_HDR_IPV4)
521                 sz += ICE_FLOW_PROT_HDR_SZ_IPV4;
522         else if (params->prof->segs[seg].hdrs & ICE_FLOW_SEG_HDR_IPV6)
523                 sz += ICE_FLOW_PROT_HDR_SZ_IPV6;
524         else if (params->prof->segs[seg].hdrs & ICE_FLOW_SEG_HDR_ARP)
525                 sz += ICE_FLOW_PROT_HDR_SZ_ARP;
526         else if (params->prof->segs[seg].hdrs & ICE_FLOW_SEG_HDRS_L4_MASK)
527                 /* A L3 header is required if L4 is specified */
528                 return 0;
529
530         /* L4 headers */
531         if (params->prof->segs[seg].hdrs & ICE_FLOW_SEG_HDR_ICMP)
532                 sz += ICE_FLOW_PROT_HDR_SZ_ICMP;
533         else if (params->prof->segs[seg].hdrs & ICE_FLOW_SEG_HDR_TCP)
534                 sz += ICE_FLOW_PROT_HDR_SZ_TCP;
535         else if (params->prof->segs[seg].hdrs & ICE_FLOW_SEG_HDR_UDP)
536                 sz += ICE_FLOW_PROT_HDR_SZ_UDP;
537         else if (params->prof->segs[seg].hdrs & ICE_FLOW_SEG_HDR_SCTP)
538                 sz += ICE_FLOW_PROT_HDR_SZ_SCTP;
539
540         return sz;
541 }
542
543 /**
544  * ice_flow_proc_seg_hdrs - process protocol headers present in pkt segments
545  * @params: information about the flow to be processed
546  *
547  * This function identifies the packet types associated with the protocol
548  * headers being present in packet segments of the specified flow profile.
549  */
550 static enum ice_status
551 ice_flow_proc_seg_hdrs(struct ice_flow_prof_params *params)
552 {
553         struct ice_flow_prof *prof;
554         u8 i;
555
556         ice_memset(params->ptypes, 0xff, sizeof(params->ptypes),
557                    ICE_NONDMA_MEM);
558
559         prof = params->prof;
560
561         for (i = 0; i < params->prof->segs_cnt; i++) {
562                 const ice_bitmap_t *src;
563                 u32 hdrs;
564
565                 hdrs = prof->segs[i].hdrs;
566
567                 if (hdrs & ICE_FLOW_SEG_HDR_ETH) {
568                         src = !i ? (const ice_bitmap_t *)ice_ptypes_mac_ofos :
569                                 (const ice_bitmap_t *)ice_ptypes_mac_il;
570                         ice_and_bitmap(params->ptypes, params->ptypes, src,
571                                        ICE_FLOW_PTYPE_MAX);
572                 }
573
574                 if (i && hdrs & ICE_FLOW_SEG_HDR_VLAN) {
575                         src = (const ice_bitmap_t *)ice_ptypes_macvlan_il;
576                         ice_and_bitmap(params->ptypes, params->ptypes, src,
577                                        ICE_FLOW_PTYPE_MAX);
578                 }
579
580                 if (!i && hdrs & ICE_FLOW_SEG_HDR_ARP) {
581                         ice_and_bitmap(params->ptypes, params->ptypes,
582                                        (const ice_bitmap_t *)ice_ptypes_arp_of,
583                                        ICE_FLOW_PTYPE_MAX);
584                 }
585
586                 if (hdrs & ICE_FLOW_SEG_HDR_PPPOE) {
587                         src = (const ice_bitmap_t *)ice_ptypes_pppoe;
588                         ice_and_bitmap(params->ptypes, params->ptypes, src,
589                                        ICE_FLOW_PTYPE_MAX);
590                 }
591
592                 if (hdrs & ICE_FLOW_SEG_HDR_IPV4) {
593                         src = !i ? (const ice_bitmap_t *)ice_ptypes_ipv4_ofos :
594                                 (const ice_bitmap_t *)ice_ptypes_ipv4_il;
595                         ice_and_bitmap(params->ptypes, params->ptypes, src,
596                                        ICE_FLOW_PTYPE_MAX);
597                 } else if (hdrs & ICE_FLOW_SEG_HDR_IPV6) {
598                         src = !i ? (const ice_bitmap_t *)ice_ptypes_ipv6_ofos :
599                                 (const ice_bitmap_t *)ice_ptypes_ipv6_il;
600                         ice_and_bitmap(params->ptypes, params->ptypes, src,
601                                        ICE_FLOW_PTYPE_MAX);
602                 }
603
604                 if (hdrs & ICE_FLOW_SEG_HDR_ICMP) {
605                         src = !i ? (const ice_bitmap_t *)ice_ptypes_icmp_of :
606                                 (const ice_bitmap_t *)ice_ptypes_icmp_il;
607                         ice_and_bitmap(params->ptypes, params->ptypes, src,
608                                        ICE_FLOW_PTYPE_MAX);
609                 } else if (hdrs & ICE_FLOW_SEG_HDR_UDP) {
610                         src = (const ice_bitmap_t *)ice_ptypes_udp_il;
611                         ice_and_bitmap(params->ptypes, params->ptypes, src,
612                                        ICE_FLOW_PTYPE_MAX);
613                 } else if (hdrs & ICE_FLOW_SEG_HDR_TCP) {
614                         ice_and_bitmap(params->ptypes, params->ptypes,
615                                        (const ice_bitmap_t *)ice_ptypes_tcp_il,
616                                        ICE_FLOW_PTYPE_MAX);
617                 } else if (hdrs & ICE_FLOW_SEG_HDR_SCTP) {
618                         src = (const ice_bitmap_t *)ice_ptypes_sctp_il;
619                         ice_and_bitmap(params->ptypes, params->ptypes, src,
620                                        ICE_FLOW_PTYPE_MAX);
621                 } else if (hdrs & ICE_FLOW_SEG_HDR_GRE) {
622                         if (!i) {
623                                 src = (const ice_bitmap_t *)ice_ptypes_gre_of;
624                                 ice_and_bitmap(params->ptypes, params->ptypes,
625                                                src, ICE_FLOW_PTYPE_MAX);
626                         }
627                 } else if (hdrs & ICE_FLOW_SEG_HDR_GTPC) {
628                         src = (const ice_bitmap_t *)ice_ptypes_gtpc;
629                         ice_and_bitmap(params->ptypes, params->ptypes,
630                                        src, ICE_FLOW_PTYPE_MAX);
631                 } else if (hdrs & ICE_FLOW_SEG_HDR_GTPC_TEID) {
632                         src = (const ice_bitmap_t *)ice_ptypes_gtpc_tid;
633                         ice_and_bitmap(params->ptypes, params->ptypes,
634                                        src, ICE_FLOW_PTYPE_MAX);
635                 } else if (hdrs & ICE_FLOW_SEG_HDR_GTPU_DWN) {
636                         src = (const ice_bitmap_t *)ice_ptypes_gtpu;
637                         ice_and_bitmap(params->ptypes, params->ptypes,
638                                        src, ICE_FLOW_PTYPE_MAX);
639
640                         /* Attributes for GTP packet with downlink */
641                         params->attr = ice_attr_gtpu_down;
642                         params->attr_cnt = ARRAY_SIZE(ice_attr_gtpu_down);
643                 } else if (hdrs & ICE_FLOW_SEG_HDR_GTPU_UP) {
644                         src = (const ice_bitmap_t *)ice_ptypes_gtpu;
645                         ice_and_bitmap(params->ptypes, params->ptypes,
646                                        src, ICE_FLOW_PTYPE_MAX);
647
648                         /* Attributes for GTP packet with uplink */
649                         params->attr = ice_attr_gtpu_up;
650                         params->attr_cnt = ARRAY_SIZE(ice_attr_gtpu_up);
651                 } else if (hdrs & ICE_FLOW_SEG_HDR_GTPU_EH) {
652                         src = (const ice_bitmap_t *)ice_ptypes_gtpu;
653                         ice_and_bitmap(params->ptypes, params->ptypes,
654                                        src, ICE_FLOW_PTYPE_MAX);
655
656                         /* Attributes for GTP packet with Extension Header */
657                         params->attr = ice_attr_gtpu_eh;
658                         params->attr_cnt = ARRAY_SIZE(ice_attr_gtpu_eh);
659                 } else if (hdrs & ICE_FLOW_SEG_HDR_GTPU_IP) {
660                         src = (const ice_bitmap_t *)ice_ptypes_gtpu;
661                         ice_and_bitmap(params->ptypes, params->ptypes,
662                                        src, ICE_FLOW_PTYPE_MAX);
663                 }
664         }
665
666         return ICE_SUCCESS;
667 }
668
669 /**
670  * ice_flow_xtract_pkt_flags - Create an extr sequence entry for packet flags
671  * @hw: pointer to the HW struct
672  * @params: information about the flow to be processed
673  * @flags: The value of pkt_flags[x:x] in Rx/Tx MDID metadata.
674  *
675  * This function will allocate an extraction sequence entries for a DWORD size
676  * chunk of the packet flags.
677  */
678 static enum ice_status
679 ice_flow_xtract_pkt_flags(struct ice_hw *hw,
680                           struct ice_flow_prof_params *params,
681                           enum ice_flex_mdid_pkt_flags flags)
682 {
683         u8 fv_words = hw->blk[params->blk].es.fvw;
684         u8 idx;
685
686         /* Make sure the number of extraction sequence entries required does not
687          * exceed the block's capacity.
688          */
689         if (params->es_cnt >= fv_words)
690                 return ICE_ERR_MAX_LIMIT;
691
692         /* some blocks require a reversed field vector layout */
693         if (hw->blk[params->blk].es.reverse)
694                 idx = fv_words - params->es_cnt - 1;
695         else
696                 idx = params->es_cnt;
697
698         params->es[idx].prot_id = ICE_PROT_META_ID;
699         params->es[idx].off = flags;
700         params->es_cnt++;
701
702         return ICE_SUCCESS;
703 }
704
705 /**
706  * ice_flow_xtract_fld - Create an extraction sequence entry for the given field
707  * @hw: pointer to the HW struct
708  * @params: information about the flow to be processed
709  * @seg: packet segment index of the field to be extracted
710  * @fld: ID of field to be extracted
711  * @match: bitfield of all fields
712  *
713  * This function determines the protocol ID, offset, and size of the given
714  * field. It then allocates one or more extraction sequence entries for the
715  * given field, and fill the entries with protocol ID and offset information.
716  */
717 static enum ice_status
718 ice_flow_xtract_fld(struct ice_hw *hw, struct ice_flow_prof_params *params,
719                     u8 seg, enum ice_flow_field fld, u64 match)
720 {
721         enum ice_flow_field sib = ICE_FLOW_FIELD_IDX_MAX;
722         enum ice_prot_id prot_id = ICE_PROT_ID_INVAL;
723         u8 fv_words = hw->blk[params->blk].es.fvw;
724         struct ice_flow_fld_info *flds;
725         u16 cnt, ese_bits, i;
726         u16 sib_mask = 0;
727         s16 adj = 0;
728         u16 mask;
729         u16 off;
730
731         flds = params->prof->segs[seg].fields;
732
733         switch (fld) {
734         case ICE_FLOW_FIELD_IDX_ETH_DA:
735         case ICE_FLOW_FIELD_IDX_ETH_SA:
736         case ICE_FLOW_FIELD_IDX_S_VLAN:
737         case ICE_FLOW_FIELD_IDX_C_VLAN:
738                 prot_id = seg == 0 ? ICE_PROT_MAC_OF_OR_S : ICE_PROT_MAC_IL;
739                 break;
740         case ICE_FLOW_FIELD_IDX_ETH_TYPE:
741                 prot_id = seg == 0 ? ICE_PROT_ETYPE_OL : ICE_PROT_ETYPE_IL;
742                 break;
743         case ICE_FLOW_FIELD_IDX_IPV4_DSCP:
744                 prot_id = seg == 0 ? ICE_PROT_IPV4_OF_OR_S : ICE_PROT_IPV4_IL;
745                 break;
746         case ICE_FLOW_FIELD_IDX_IPV6_DSCP:
747                 prot_id = seg == 0 ? ICE_PROT_IPV6_OF_OR_S : ICE_PROT_IPV6_IL;
748                 break;
749         case ICE_FLOW_FIELD_IDX_IPV4_TTL:
750         case ICE_FLOW_FIELD_IDX_IPV4_PROT:
751                 prot_id = seg == 0 ? ICE_PROT_IPV4_OF_OR_S : ICE_PROT_IPV4_IL;
752
753                 /* TTL and PROT share the same extraction seq. entry.
754                  * Each is considered a sibling to the other in terms of sharing
755                  * the same extraction sequence entry.
756                  */
757                 if (fld == ICE_FLOW_FIELD_IDX_IPV4_TTL)
758                         sib = ICE_FLOW_FIELD_IDX_IPV4_PROT;
759                 else if (fld == ICE_FLOW_FIELD_IDX_IPV4_PROT)
760                         sib = ICE_FLOW_FIELD_IDX_IPV4_TTL;
761
762                 /* If the sibling field is also included, that field's
763                  * mask needs to be included.
764                  */
765                 if (match & BIT(sib))
766                         sib_mask = ice_flds_info[sib].mask;
767                 break;
768         case ICE_FLOW_FIELD_IDX_IPV6_TTL:
769         case ICE_FLOW_FIELD_IDX_IPV6_PROT:
770                 prot_id = seg == 0 ? ICE_PROT_IPV6_OF_OR_S : ICE_PROT_IPV6_IL;
771
772                 /* TTL and PROT share the same extraction seq. entry.
773                  * Each is considered a sibling to the other in terms of sharing
774                  * the same extraction sequence entry.
775                  */
776                 if (fld == ICE_FLOW_FIELD_IDX_IPV6_TTL)
777                         sib = ICE_FLOW_FIELD_IDX_IPV6_PROT;
778                 else if (fld == ICE_FLOW_FIELD_IDX_IPV6_PROT)
779                         sib = ICE_FLOW_FIELD_IDX_IPV6_TTL;
780
781                 /* If the sibling field is also included, that field's
782                  * mask needs to be included.
783                  */
784                 if (match & BIT(sib))
785                         sib_mask = ice_flds_info[sib].mask;
786                 break;
787         case ICE_FLOW_FIELD_IDX_IPV4_SA:
788         case ICE_FLOW_FIELD_IDX_IPV4_DA:
789                 prot_id = seg == 0 ? ICE_PROT_IPV4_OF_OR_S : ICE_PROT_IPV4_IL;
790                 break;
791         case ICE_FLOW_FIELD_IDX_IPV6_SA:
792         case ICE_FLOW_FIELD_IDX_IPV6_DA:
793                 prot_id = seg == 0 ? ICE_PROT_IPV6_OF_OR_S : ICE_PROT_IPV6_IL;
794                 break;
795         case ICE_FLOW_FIELD_IDX_TCP_SRC_PORT:
796         case ICE_FLOW_FIELD_IDX_TCP_DST_PORT:
797         case ICE_FLOW_FIELD_IDX_TCP_FLAGS:
798                 prot_id = ICE_PROT_TCP_IL;
799                 break;
800         case ICE_FLOW_FIELD_IDX_UDP_SRC_PORT:
801         case ICE_FLOW_FIELD_IDX_UDP_DST_PORT:
802                 prot_id = ICE_PROT_UDP_IL_OR_S;
803                 break;
804         case ICE_FLOW_FIELD_IDX_SCTP_SRC_PORT:
805         case ICE_FLOW_FIELD_IDX_SCTP_DST_PORT:
806                 prot_id = ICE_PROT_SCTP_IL;
807                 break;
808         case ICE_FLOW_FIELD_IDX_GTPC_TEID:
809         case ICE_FLOW_FIELD_IDX_GTPU_IP_TEID:
810         case ICE_FLOW_FIELD_IDX_GTPU_UP_TEID:
811         case ICE_FLOW_FIELD_IDX_GTPU_DWN_TEID:
812         case ICE_FLOW_FIELD_IDX_GTPU_EH_TEID:
813         case ICE_FLOW_FIELD_IDX_GTPU_EH_QFI:
814                 /* GTP is accessed through UDP OF protocol */
815                 prot_id = ICE_PROT_UDP_OF;
816                 break;
817         case ICE_FLOW_FIELD_IDX_PPPOE_SESS_ID:
818                 prot_id = ICE_PROT_PPPOE;
819                 break;
820         case ICE_FLOW_FIELD_IDX_ARP_SIP:
821         case ICE_FLOW_FIELD_IDX_ARP_DIP:
822         case ICE_FLOW_FIELD_IDX_ARP_SHA:
823         case ICE_FLOW_FIELD_IDX_ARP_DHA:
824         case ICE_FLOW_FIELD_IDX_ARP_OP:
825                 prot_id = ICE_PROT_ARP_OF;
826                 break;
827         case ICE_FLOW_FIELD_IDX_ICMP_TYPE:
828         case ICE_FLOW_FIELD_IDX_ICMP_CODE:
829                 /* ICMP type and code share the same extraction seq. entry */
830                 prot_id = (params->prof->segs[seg].hdrs &
831                            ICE_FLOW_SEG_HDR_IPV4) ?
832                         ICE_PROT_ICMP_IL : ICE_PROT_ICMPV6_IL;
833                 sib = fld == ICE_FLOW_FIELD_IDX_ICMP_TYPE ?
834                         ICE_FLOW_FIELD_IDX_ICMP_CODE :
835                         ICE_FLOW_FIELD_IDX_ICMP_TYPE;
836                 break;
837         case ICE_FLOW_FIELD_IDX_GRE_KEYID:
838                 prot_id = ICE_PROT_GRE_OF;
839                 break;
840         default:
841                 return ICE_ERR_NOT_IMPL;
842         }
843
844         /* Each extraction sequence entry is a word in size, and extracts a
845          * word-aligned offset from a protocol header.
846          */
847         ese_bits = ICE_FLOW_FV_EXTRACT_SZ * BITS_PER_BYTE;
848
849         flds[fld].xtrct.prot_id = prot_id;
850         flds[fld].xtrct.off = (ice_flds_info[fld].off / ese_bits) *
851                 ICE_FLOW_FV_EXTRACT_SZ;
852         flds[fld].xtrct.disp = (u8)((ice_flds_info[fld].off + adj) % ese_bits);
853         flds[fld].xtrct.idx = params->es_cnt;
854         flds[fld].xtrct.mask = ice_flds_info[fld].mask;
855
856         /* Adjust the next field-entry index after accommodating the number of
857          * entries this field consumes
858          */
859         cnt = DIVIDE_AND_ROUND_UP(flds[fld].xtrct.disp +
860                                   ice_flds_info[fld].size, ese_bits);
861
862         /* Fill in the extraction sequence entries needed for this field */
863         off = flds[fld].xtrct.off;
864         mask = flds[fld].xtrct.mask;
865         for (i = 0; i < cnt; i++) {
866                 /* Only consume an extraction sequence entry if there is no
867                  * sibling field associated with this field or the sibling entry
868                  * already extracts the word shared with this field.
869                  */
870                 if (sib == ICE_FLOW_FIELD_IDX_MAX ||
871                     flds[sib].xtrct.prot_id == ICE_PROT_ID_INVAL ||
872                     flds[sib].xtrct.off != off) {
873                         u8 idx;
874
875                         /* Make sure the number of extraction sequence required
876                          * does not exceed the block's capability
877                          */
878                         if (params->es_cnt >= fv_words)
879                                 return ICE_ERR_MAX_LIMIT;
880
881                         /* some blocks require a reversed field vector layout */
882                         if (hw->blk[params->blk].es.reverse)
883                                 idx = fv_words - params->es_cnt - 1;
884                         else
885                                 idx = params->es_cnt;
886
887                         params->es[idx].prot_id = prot_id;
888                         params->es[idx].off = off;
889                         params->mask[idx] = mask | sib_mask;
890                         params->es_cnt++;
891                 }
892
893                 off += ICE_FLOW_FV_EXTRACT_SZ;
894         }
895
896         return ICE_SUCCESS;
897 }
898
899 /**
900  * ice_flow_xtract_raws - Create extract sequence entries for raw bytes
901  * @hw: pointer to the HW struct
902  * @params: information about the flow to be processed
903  * @seg: index of packet segment whose raw fields are to be be extracted
904  */
905 static enum ice_status
906 ice_flow_xtract_raws(struct ice_hw *hw, struct ice_flow_prof_params *params,
907                      u8 seg)
908 {
909         u16 fv_words;
910         u16 hdrs_sz;
911         u8 i;
912
913         if (!params->prof->segs[seg].raws_cnt)
914                 return ICE_SUCCESS;
915
916         if (params->prof->segs[seg].raws_cnt >
917             ARRAY_SIZE(params->prof->segs[seg].raws))
918                 return ICE_ERR_MAX_LIMIT;
919
920         /* Offsets within the segment headers are not supported */
921         hdrs_sz = ice_flow_calc_seg_sz(params, seg);
922         if (!hdrs_sz)
923                 return ICE_ERR_PARAM;
924
925         fv_words = hw->blk[params->blk].es.fvw;
926
927         for (i = 0; i < params->prof->segs[seg].raws_cnt; i++) {
928                 struct ice_flow_seg_fld_raw *raw;
929                 u16 off, cnt, j;
930
931                 raw = &params->prof->segs[seg].raws[i];
932
933                 /* Storing extraction information */
934                 raw->info.xtrct.prot_id = ICE_PROT_MAC_OF_OR_S;
935                 raw->info.xtrct.off = (raw->off / ICE_FLOW_FV_EXTRACT_SZ) *
936                         ICE_FLOW_FV_EXTRACT_SZ;
937                 raw->info.xtrct.disp = (raw->off % ICE_FLOW_FV_EXTRACT_SZ) *
938                         BITS_PER_BYTE;
939                 raw->info.xtrct.idx = params->es_cnt;
940
941                 /* Determine the number of field vector entries this raw field
942                  * consumes.
943                  */
944                 cnt = DIVIDE_AND_ROUND_UP(raw->info.xtrct.disp +
945                                           (raw->info.src.last * BITS_PER_BYTE),
946                                           (ICE_FLOW_FV_EXTRACT_SZ *
947                                            BITS_PER_BYTE));
948                 off = raw->info.xtrct.off;
949                 for (j = 0; j < cnt; j++) {
950                         u16 idx;
951
952                         /* Make sure the number of extraction sequence required
953                          * does not exceed the block's capability
954                          */
955                         if (params->es_cnt >= hw->blk[params->blk].es.count ||
956                             params->es_cnt >= ICE_MAX_FV_WORDS)
957                                 return ICE_ERR_MAX_LIMIT;
958
959                         /* some blocks require a reversed field vector layout */
960                         if (hw->blk[params->blk].es.reverse)
961                                 idx = fv_words - params->es_cnt - 1;
962                         else
963                                 idx = params->es_cnt;
964
965                         params->es[idx].prot_id = raw->info.xtrct.prot_id;
966                         params->es[idx].off = off;
967                         params->es_cnt++;
968                         off += ICE_FLOW_FV_EXTRACT_SZ;
969                 }
970         }
971
972         return ICE_SUCCESS;
973 }
974
975 /**
976  * ice_flow_create_xtrct_seq - Create an extraction sequence for given segments
977  * @hw: pointer to the HW struct
978  * @params: information about the flow to be processed
979  *
980  * This function iterates through all matched fields in the given segments, and
981  * creates an extraction sequence for the fields.
982  */
983 static enum ice_status
984 ice_flow_create_xtrct_seq(struct ice_hw *hw,
985                           struct ice_flow_prof_params *params)
986 {
987         enum ice_status status = ICE_SUCCESS;
988         u8 i;
989
990         /* For ACL, we also need to extract the direction bit (Rx,Tx) data from
991          * packet flags
992          */
993         if (params->blk == ICE_BLK_ACL) {
994                 status = ice_flow_xtract_pkt_flags(hw, params,
995                                                    ICE_RX_MDID_PKT_FLAGS_15_0);
996                 if (status)
997                         return status;
998         }
999
1000         for (i = 0; i < params->prof->segs_cnt; i++) {
1001                 u64 match = params->prof->segs[i].match;
1002                 enum ice_flow_field j;
1003
1004                 for (j = 0; j < ICE_FLOW_FIELD_IDX_MAX && match; j++) {
1005                         const u64 bit = BIT_ULL(j);
1006
1007                         if (match & bit) {
1008                                 status = ice_flow_xtract_fld(hw, params, i, j,
1009                                                              match);
1010                                 if (status)
1011                                         return status;
1012                                 match &= ~bit;
1013                         }
1014                 }
1015
1016                 /* Process raw matching bytes */
1017                 status = ice_flow_xtract_raws(hw, params, i);
1018                 if (status)
1019                         return status;
1020         }
1021
1022         return status;
1023 }
1024
1025 /**
1026  * ice_flow_sel_acl_scen - returns the specific scenario
1027  * @hw: pointer to the hardware structure
1028  * @params: information about the flow to be processed
1029  *
1030  * This function will return the specific scenario based on the
1031  * params passed to it
1032  */
1033 static enum ice_status
1034 ice_flow_sel_acl_scen(struct ice_hw *hw, struct ice_flow_prof_params *params)
1035 {
1036         /* Find the best-fit scenario for the provided match width */
1037         struct ice_acl_scen *cand_scen = NULL, *scen;
1038
1039         if (!hw->acl_tbl)
1040                 return ICE_ERR_DOES_NOT_EXIST;
1041
1042         /* Loop through each scenario and match against the scenario width
1043          * to select the specific scenario
1044          */
1045         LIST_FOR_EACH_ENTRY(scen, &hw->acl_tbl->scens, ice_acl_scen, list_entry)
1046                 if (scen->eff_width >= params->entry_length &&
1047                     (!cand_scen || cand_scen->eff_width > scen->eff_width))
1048                         cand_scen = scen;
1049         if (!cand_scen)
1050                 return ICE_ERR_DOES_NOT_EXIST;
1051
1052         params->prof->cfg.scen = cand_scen;
1053
1054         return ICE_SUCCESS;
1055 }
1056
1057 /**
1058  * ice_flow_acl_def_entry_frmt - Determine the layout of flow entries
1059  * @params: information about the flow to be processed
1060  */
1061 static enum ice_status
1062 ice_flow_acl_def_entry_frmt(struct ice_flow_prof_params *params)
1063 {
1064         u16 index, i, range_idx = 0;
1065
1066         index = ICE_AQC_ACL_PROF_BYTE_SEL_START_IDX;
1067
1068         for (i = 0; i < params->prof->segs_cnt; i++) {
1069                 struct ice_flow_seg_info *seg = &params->prof->segs[i];
1070                 u64 match = seg->match;
1071                 u8 j;
1072
1073                 for (j = 0; j < ICE_FLOW_FIELD_IDX_MAX && match; j++) {
1074                         struct ice_flow_fld_info *fld;
1075                         const u64 bit = BIT_ULL(j);
1076
1077                         if (!(match & bit))
1078                                 continue;
1079
1080                         fld = &seg->fields[j];
1081                         fld->entry.mask = ICE_FLOW_FLD_OFF_INVAL;
1082
1083                         if (fld->type == ICE_FLOW_FLD_TYPE_RANGE) {
1084                                 fld->entry.last = ICE_FLOW_FLD_OFF_INVAL;
1085
1086                                 /* Range checking only supported for single
1087                                  * words
1088                                  */
1089                                 if (DIVIDE_AND_ROUND_UP(ice_flds_info[j].size +
1090                                                         fld->xtrct.disp,
1091                                                         BITS_PER_BYTE * 2) > 1)
1092                                         return ICE_ERR_PARAM;
1093
1094                                 /* Ranges must define low and high values */
1095                                 if (fld->src.val == ICE_FLOW_FLD_OFF_INVAL ||
1096                                     fld->src.last == ICE_FLOW_FLD_OFF_INVAL)
1097                                         return ICE_ERR_PARAM;
1098
1099                                 fld->entry.val = range_idx++;
1100                         } else {
1101                                 /* Store adjusted byte-length of field for later
1102                                  * use, taking into account potential
1103                                  * non-byte-aligned displacement
1104                                  */
1105                                 fld->entry.last = DIVIDE_AND_ROUND_UP
1106                                         (ice_flds_info[j].size +
1107                                          (fld->xtrct.disp % BITS_PER_BYTE),
1108                                          BITS_PER_BYTE);
1109                                 fld->entry.val = index;
1110                                 index += fld->entry.last;
1111                         }
1112
1113                         match &= ~bit;
1114                 }
1115
1116                 for (j = 0; j < seg->raws_cnt; j++) {
1117                         struct ice_flow_seg_fld_raw *raw = &seg->raws[j];
1118
1119                         raw->info.entry.mask = ICE_FLOW_FLD_OFF_INVAL;
1120                         raw->info.entry.val = index;
1121                         raw->info.entry.last = raw->info.src.last;
1122                         index += raw->info.entry.last;
1123                 }
1124         }
1125
1126         /* Currently only support using the byte selection base, which only
1127          * allows for an effective entry size of 30 bytes. Reject anything
1128          * larger.
1129          */
1130         if (index > ICE_AQC_ACL_PROF_BYTE_SEL_ELEMS)
1131                 return ICE_ERR_PARAM;
1132
1133         /* Only 8 range checkers per profile, reject anything trying to use
1134          * more
1135          */
1136         if (range_idx > ICE_AQC_ACL_PROF_RANGES_NUM_CFG)
1137                 return ICE_ERR_PARAM;
1138
1139         /* Store # bytes required for entry for later use */
1140         params->entry_length = index - ICE_AQC_ACL_PROF_BYTE_SEL_START_IDX;
1141
1142         return ICE_SUCCESS;
1143 }
1144
1145 /**
1146  * ice_flow_proc_segs - process all packet segments associated with a profile
1147  * @hw: pointer to the HW struct
1148  * @params: information about the flow to be processed
1149  */
1150 static enum ice_status
1151 ice_flow_proc_segs(struct ice_hw *hw, struct ice_flow_prof_params *params)
1152 {
1153         enum ice_status status;
1154
1155         status = ice_flow_proc_seg_hdrs(params);
1156         if (status)
1157                 return status;
1158
1159         status = ice_flow_create_xtrct_seq(hw, params);
1160         if (status)
1161                 return status;
1162
1163         switch (params->blk) {
1164         case ICE_BLK_FD:
1165         case ICE_BLK_RSS:
1166                 status = ICE_SUCCESS;
1167                 break;
1168         case ICE_BLK_ACL:
1169                 status = ice_flow_acl_def_entry_frmt(params);
1170                 if (status)
1171                         return status;
1172                 status = ice_flow_sel_acl_scen(hw, params);
1173                 if (status)
1174                         return status;
1175                 break;
1176         case ICE_BLK_SW:
1177         default:
1178                 return ICE_ERR_NOT_IMPL;
1179         }
1180
1181         return status;
1182 }
1183
1184 #define ICE_FLOW_FIND_PROF_CHK_FLDS     0x00000001
1185 #define ICE_FLOW_FIND_PROF_CHK_VSI      0x00000002
1186 #define ICE_FLOW_FIND_PROF_NOT_CHK_DIR  0x00000004
1187
1188 /**
1189  * ice_flow_find_prof_conds - Find a profile matching headers and conditions
1190  * @hw: pointer to the HW struct
1191  * @blk: classification stage
1192  * @dir: flow direction
1193  * @segs: array of one or more packet segments that describe the flow
1194  * @segs_cnt: number of packet segments provided
1195  * @vsi_handle: software VSI handle to check VSI (ICE_FLOW_FIND_PROF_CHK_VSI)
1196  * @conds: additional conditions to be checked (ICE_FLOW_FIND_PROF_CHK_*)
1197  */
1198 static struct ice_flow_prof *
1199 ice_flow_find_prof_conds(struct ice_hw *hw, enum ice_block blk,
1200                          enum ice_flow_dir dir, struct ice_flow_seg_info *segs,
1201                          u8 segs_cnt, u16 vsi_handle, u32 conds)
1202 {
1203         struct ice_flow_prof *p, *prof = NULL;
1204
1205         ice_acquire_lock(&hw->fl_profs_locks[blk]);
1206         LIST_FOR_EACH_ENTRY(p, &hw->fl_profs[blk], ice_flow_prof, l_entry) {
1207                 if ((p->dir == dir || conds & ICE_FLOW_FIND_PROF_NOT_CHK_DIR) &&
1208                     segs_cnt && segs_cnt == p->segs_cnt) {
1209                         u8 i;
1210
1211                         /* Check for profile-VSI association if specified */
1212                         if ((conds & ICE_FLOW_FIND_PROF_CHK_VSI) &&
1213                             ice_is_vsi_valid(hw, vsi_handle) &&
1214                             !ice_is_bit_set(p->vsis, vsi_handle))
1215                                 continue;
1216
1217                         /* Protocol headers must be checked. Matched fields are
1218                          * checked if specified.
1219                          */
1220                         for (i = 0; i < segs_cnt; i++)
1221                                 if (segs[i].hdrs != p->segs[i].hdrs ||
1222                                     ((conds & ICE_FLOW_FIND_PROF_CHK_FLDS) &&
1223                                      segs[i].match != p->segs[i].match))
1224                                         break;
1225
1226                         /* A match is found if all segments are matched */
1227                         if (i == segs_cnt) {
1228                                 prof = p;
1229                                 break;
1230                         }
1231                 }
1232         }
1233         ice_release_lock(&hw->fl_profs_locks[blk]);
1234
1235         return prof;
1236 }
1237
1238 /**
1239  * ice_flow_find_prof - Look up a profile matching headers and matched fields
1240  * @hw: pointer to the HW struct
1241  * @blk: classification stage
1242  * @dir: flow direction
1243  * @segs: array of one or more packet segments that describe the flow
1244  * @segs_cnt: number of packet segments provided
1245  */
1246 u64
1247 ice_flow_find_prof(struct ice_hw *hw, enum ice_block blk, enum ice_flow_dir dir,
1248                    struct ice_flow_seg_info *segs, u8 segs_cnt)
1249 {
1250         struct ice_flow_prof *p;
1251
1252         p = ice_flow_find_prof_conds(hw, blk, dir, segs, segs_cnt,
1253                                      ICE_MAX_VSI, ICE_FLOW_FIND_PROF_CHK_FLDS);
1254
1255         return p ? p->id : ICE_FLOW_PROF_ID_INVAL;
1256 }
1257
1258 /**
1259  * ice_flow_find_prof_id - Look up a profile with given profile ID
1260  * @hw: pointer to the HW struct
1261  * @blk: classification stage
1262  * @prof_id: unique ID to identify this flow profile
1263  */
1264 static struct ice_flow_prof *
1265 ice_flow_find_prof_id(struct ice_hw *hw, enum ice_block blk, u64 prof_id)
1266 {
1267         struct ice_flow_prof *p;
1268
1269         LIST_FOR_EACH_ENTRY(p, &hw->fl_profs[blk], ice_flow_prof, l_entry) {
1270                 if (p->id == prof_id)
1271                         return p;
1272         }
1273
1274         return NULL;
1275 }
1276
1277 /**
1278  * ice_dealloc_flow_entry - Deallocate flow entry memory
1279  * @hw: pointer to the HW struct
1280  * @entry: flow entry to be removed
1281  */
1282 static void
1283 ice_dealloc_flow_entry(struct ice_hw *hw, struct ice_flow_entry *entry)
1284 {
1285         if (!entry)
1286                 return;
1287
1288         if (entry->entry)
1289                 ice_free(hw, entry->entry);
1290
1291         if (entry->range_buf) {
1292                 ice_free(hw, entry->range_buf);
1293                 entry->range_buf = NULL;
1294         }
1295
1296         if (entry->acts) {
1297                 ice_free(hw, entry->acts);
1298                 entry->acts = NULL;
1299                 entry->acts_cnt = 0;
1300         }
1301
1302         ice_free(hw, entry);
1303 }
1304
1305 #define ICE_ACL_INVALID_SCEN    0x3f
1306
1307 /**
1308  * ice_flow_acl_is_prof_in_use - Verify if the profile is associated to any pf
1309  * @hw: pointer to the hardware structure
1310  * @prof: pointer to flow profile
1311  * @buf: destination buffer function writes partial xtrct sequence to
1312  *
1313  * returns ICE_SUCCESS if no pf is associated to the given profile
1314  * returns ICE_ERR_IN_USE if at least one pf is associated to the given profile
1315  * returns other error code for real error
1316  */
1317 static enum ice_status
1318 ice_flow_acl_is_prof_in_use(struct ice_hw *hw, struct ice_flow_prof *prof,
1319                             struct ice_aqc_acl_prof_generic_frmt *buf)
1320 {
1321         enum ice_status status;
1322         u8 prof_id = 0;
1323
1324         status = ice_flow_get_hw_prof(hw, ICE_BLK_ACL, prof->id, &prof_id);
1325         if (status)
1326                 return status;
1327
1328         status = ice_query_acl_prof(hw, prof_id, buf, NULL);
1329         if (status)
1330                 return status;
1331
1332         /* If all pf's associated scenarios are all 0 or all
1333          * ICE_ACL_INVALID_SCEN (63) for the given profile then the latter has
1334          * not been configured yet.
1335          */
1336         if (buf->pf_scenario_num[0] == 0 && buf->pf_scenario_num[1] == 0 &&
1337             buf->pf_scenario_num[2] == 0 && buf->pf_scenario_num[3] == 0 &&
1338             buf->pf_scenario_num[4] == 0 && buf->pf_scenario_num[5] == 0 &&
1339             buf->pf_scenario_num[6] == 0 && buf->pf_scenario_num[7] == 0)
1340                 return ICE_SUCCESS;
1341
1342         if (buf->pf_scenario_num[0] == ICE_ACL_INVALID_SCEN &&
1343             buf->pf_scenario_num[1] == ICE_ACL_INVALID_SCEN &&
1344             buf->pf_scenario_num[2] == ICE_ACL_INVALID_SCEN &&
1345             buf->pf_scenario_num[3] == ICE_ACL_INVALID_SCEN &&
1346             buf->pf_scenario_num[4] == ICE_ACL_INVALID_SCEN &&
1347             buf->pf_scenario_num[5] == ICE_ACL_INVALID_SCEN &&
1348             buf->pf_scenario_num[6] == ICE_ACL_INVALID_SCEN &&
1349             buf->pf_scenario_num[7] == ICE_ACL_INVALID_SCEN)
1350                 return ICE_SUCCESS;
1351         else
1352                 return ICE_ERR_IN_USE;
1353 }
1354
1355 /**
1356  * ice_flow_acl_free_act_cntr - Free the acl rule's actions
1357  * @hw: pointer to the hardware structure
1358  * @acts: array of actions to be performed on a match
1359  * @acts_cnt: number of actions
1360  */
1361 static enum ice_status
1362 ice_flow_acl_free_act_cntr(struct ice_hw *hw, struct ice_flow_action *acts,
1363                            u8 acts_cnt)
1364 {
1365         int i;
1366
1367         for (i = 0; i < acts_cnt; i++) {
1368                 if (acts[i].type == ICE_FLOW_ACT_CNTR_PKT ||
1369                     acts[i].type == ICE_FLOW_ACT_CNTR_BYTES ||
1370                     acts[i].type == ICE_FLOW_ACT_CNTR_PKT_BYTES) {
1371                         struct ice_acl_cntrs cntrs;
1372                         enum ice_status status;
1373
1374                         cntrs.bank = 0; /* Only bank0 for the moment */
1375                         cntrs.first_cntr =
1376                                         LE16_TO_CPU(acts[i].data.acl_act.value);
1377                         cntrs.last_cntr =
1378                                         LE16_TO_CPU(acts[i].data.acl_act.value);
1379
1380                         if (acts[i].type == ICE_FLOW_ACT_CNTR_PKT_BYTES)
1381                                 cntrs.type = ICE_AQC_ACL_CNT_TYPE_DUAL;
1382                         else
1383                                 cntrs.type = ICE_AQC_ACL_CNT_TYPE_SINGLE;
1384
1385                         status = ice_aq_dealloc_acl_cntrs(hw, &cntrs, NULL);
1386                         if (status)
1387                                 return status;
1388                 }
1389         }
1390         return ICE_SUCCESS;
1391 }
1392
1393 /**
1394  * ice_flow_acl_disassoc_scen - Disassociate the scenario to the Profile
1395  * @hw: pointer to the hardware structure
1396  * @prof: pointer to flow profile
1397  *
1398  * Disassociate the scenario to the Profile for the PF of the VSI.
1399  */
1400 static enum ice_status
1401 ice_flow_acl_disassoc_scen(struct ice_hw *hw, struct ice_flow_prof *prof)
1402 {
1403         struct ice_aqc_acl_prof_generic_frmt buf;
1404         enum ice_status status = ICE_SUCCESS;
1405         u8 prof_id = 0;
1406
1407         ice_memset(&buf, 0, sizeof(buf), ICE_NONDMA_MEM);
1408
1409         status = ice_flow_get_hw_prof(hw, ICE_BLK_ACL, prof->id, &prof_id);
1410         if (status)
1411                 return status;
1412
1413         status = ice_query_acl_prof(hw, prof_id, &buf, NULL);
1414         if (status)
1415                 return status;
1416
1417         /* Clear scenario for this pf */
1418         buf.pf_scenario_num[hw->pf_id] = ICE_ACL_INVALID_SCEN;
1419         status = ice_prgm_acl_prof_extrt(hw, prof_id, &buf, NULL);
1420
1421         return status;
1422 }
1423
1424 /**
1425  * ice_flow_rem_entry_sync - Remove a flow entry
1426  * @hw: pointer to the HW struct
1427  * @blk: classification stage
1428  * @entry: flow entry to be removed
1429  */
1430 static enum ice_status
1431 ice_flow_rem_entry_sync(struct ice_hw *hw, enum ice_block blk,
1432                         struct ice_flow_entry *entry)
1433 {
1434         if (!entry)
1435                 return ICE_ERR_BAD_PTR;
1436
1437         if (blk == ICE_BLK_ACL) {
1438                 enum ice_status status;
1439
1440                 if (!entry->prof)
1441                         return ICE_ERR_BAD_PTR;
1442
1443                 status = ice_acl_rem_entry(hw, entry->prof->cfg.scen,
1444                                            entry->scen_entry_idx);
1445                 if (status)
1446                         return status;
1447
1448                 /* Checks if we need to release an ACL counter. */
1449                 if (entry->acts_cnt && entry->acts)
1450                         ice_flow_acl_free_act_cntr(hw, entry->acts,
1451                                                    entry->acts_cnt);
1452         }
1453
1454         LIST_DEL(&entry->l_entry);
1455
1456         ice_dealloc_flow_entry(hw, entry);
1457
1458         return ICE_SUCCESS;
1459 }
1460
1461 /**
1462  * ice_flow_add_prof_sync - Add a flow profile for packet segments and fields
1463  * @hw: pointer to the HW struct
1464  * @blk: classification stage
1465  * @dir: flow direction
1466  * @prof_id: unique ID to identify this flow profile
1467  * @segs: array of one or more packet segments that describe the flow
1468  * @segs_cnt: number of packet segments provided
1469  * @acts: array of default actions
1470  * @acts_cnt: number of default actions
1471  * @prof: stores the returned flow profile added
1472  *
1473  * Assumption: the caller has acquired the lock to the profile list
1474  */
1475 static enum ice_status
1476 ice_flow_add_prof_sync(struct ice_hw *hw, enum ice_block blk,
1477                        enum ice_flow_dir dir, u64 prof_id,
1478                        struct ice_flow_seg_info *segs, u8 segs_cnt,
1479                        struct ice_flow_action *acts, u8 acts_cnt,
1480                        struct ice_flow_prof **prof)
1481 {
1482         struct ice_flow_prof_params params;
1483         enum ice_status status;
1484         u8 i;
1485
1486         if (!prof || (acts_cnt && !acts))
1487                 return ICE_ERR_BAD_PTR;
1488
1489         ice_memset(&params, 0, sizeof(params), ICE_NONDMA_MEM);
1490         params.prof = (struct ice_flow_prof *)
1491                 ice_malloc(hw, sizeof(*params.prof));
1492         if (!params.prof)
1493                 return ICE_ERR_NO_MEMORY;
1494
1495         /* initialize extraction sequence to all invalid (0xff) */
1496         for (i = 0; i < ICE_MAX_FV_WORDS; i++) {
1497                 params.es[i].prot_id = ICE_PROT_INVALID;
1498                 params.es[i].off = ICE_FV_OFFSET_INVAL;
1499         }
1500
1501         params.blk = blk;
1502         params.prof->id = prof_id;
1503         params.prof->dir = dir;
1504         params.prof->segs_cnt = segs_cnt;
1505
1506         /* Make a copy of the segments that need to be persistent in the flow
1507          * profile instance
1508          */
1509         for (i = 0; i < segs_cnt; i++)
1510                 ice_memcpy(&params.prof->segs[i], &segs[i], sizeof(*segs),
1511                            ICE_NONDMA_TO_NONDMA);
1512
1513         /* Make a copy of the actions that need to be persistent in the flow
1514          * profile instance.
1515          */
1516         if (acts_cnt) {
1517                 params.prof->acts = (struct ice_flow_action *)
1518                         ice_memdup(hw, acts, acts_cnt * sizeof(*acts),
1519                                    ICE_NONDMA_TO_NONDMA);
1520
1521                 if (!params.prof->acts) {
1522                         status = ICE_ERR_NO_MEMORY;
1523                         goto out;
1524                 }
1525         }
1526
1527         status = ice_flow_proc_segs(hw, &params);
1528         if (status) {
1529                 ice_debug(hw, ICE_DBG_FLOW,
1530                           "Error processing a flow's packet segments\n");
1531                 goto out;
1532         }
1533
1534         /* Add a HW profile for this flow profile */
1535         status = ice_add_prof(hw, blk, prof_id, (u8 *)params.ptypes,
1536                               params.attr, params.attr_cnt, params.es,
1537                               params.mask);
1538         if (status) {
1539                 ice_debug(hw, ICE_DBG_FLOW, "Error adding a HW flow profile\n");
1540                 goto out;
1541         }
1542
1543         INIT_LIST_HEAD(&params.prof->entries);
1544         ice_init_lock(&params.prof->entries_lock);
1545         *prof = params.prof;
1546
1547 out:
1548         if (status) {
1549                 if (params.prof->acts)
1550                         ice_free(hw, params.prof->acts);
1551                 ice_free(hw, params.prof);
1552         }
1553
1554         return status;
1555 }
1556
1557 /**
1558  * ice_flow_rem_prof_sync - remove a flow profile
1559  * @hw: pointer to the hardware structure
1560  * @blk: classification stage
1561  * @prof: pointer to flow profile to remove
1562  *
1563  * Assumption: the caller has acquired the lock to the profile list
1564  */
1565 static enum ice_status
1566 ice_flow_rem_prof_sync(struct ice_hw *hw, enum ice_block blk,
1567                        struct ice_flow_prof *prof)
1568 {
1569         enum ice_status status;
1570
1571         /* Remove all remaining flow entries before removing the flow profile */
1572         if (!LIST_EMPTY(&prof->entries)) {
1573                 struct ice_flow_entry *e, *t;
1574
1575                 ice_acquire_lock(&prof->entries_lock);
1576
1577                 LIST_FOR_EACH_ENTRY_SAFE(e, t, &prof->entries, ice_flow_entry,
1578                                          l_entry) {
1579                         status = ice_flow_rem_entry_sync(hw, blk, e);
1580                         if (status)
1581                                 break;
1582                 }
1583
1584                 ice_release_lock(&prof->entries_lock);
1585         }
1586
1587         if (blk == ICE_BLK_ACL) {
1588                 struct ice_aqc_acl_profile_ranges query_rng_buf;
1589                 struct ice_aqc_acl_prof_generic_frmt buf;
1590                 u8 prof_id = 0;
1591
1592                 /* Deassociate the scenario to the Profile for the PF */
1593                 status = ice_flow_acl_disassoc_scen(hw, prof);
1594                 if (status)
1595                         return status;
1596
1597                 /* Clear the range-checker if the profile ID is no longer
1598                  * used by any PF
1599                  */
1600                 status = ice_flow_acl_is_prof_in_use(hw, prof, &buf);
1601                 if (status && status != ICE_ERR_IN_USE) {
1602                         return status;
1603                 } else if (!status) {
1604                         /* Clear the range-checker value for profile ID */
1605                         ice_memset(&query_rng_buf, 0,
1606                                    sizeof(struct ice_aqc_acl_profile_ranges),
1607                                    ICE_NONDMA_MEM);
1608
1609                         status = ice_flow_get_hw_prof(hw, blk, prof->id,
1610                                                       &prof_id);
1611                         if (status)
1612                                 return status;
1613
1614                         status = ice_prog_acl_prof_ranges(hw, prof_id,
1615                                                           &query_rng_buf, NULL);
1616                         if (status)
1617                                 return status;
1618                 }
1619         }
1620
1621         /* Remove all hardware profiles associated with this flow profile */
1622         status = ice_rem_prof(hw, blk, prof->id);
1623         if (!status) {
1624                 LIST_DEL(&prof->l_entry);
1625                 ice_destroy_lock(&prof->entries_lock);
1626                 if (prof->acts)
1627                         ice_free(hw, prof->acts);
1628                 ice_free(hw, prof);
1629         }
1630
1631         return status;
1632 }
1633
1634 /**
1635  * ice_flow_acl_set_xtrct_seq_fld - Populate xtrct seq for single field
1636  * @buf: Destination buffer function writes partial xtrct sequence to
1637  * @info: Info about field
1638  */
1639 static void
1640 ice_flow_acl_set_xtrct_seq_fld(struct ice_aqc_acl_prof_generic_frmt *buf,
1641                                struct ice_flow_fld_info *info)
1642 {
1643         u16 dst, i;
1644         u8 src;
1645
1646         src = info->xtrct.idx * ICE_FLOW_FV_EXTRACT_SZ +
1647                 info->xtrct.disp / BITS_PER_BYTE;
1648         dst = info->entry.val;
1649         for (i = 0; i < info->entry.last; i++)
1650                 /* HW stores field vector words in LE, convert words back to BE
1651                  * so constructed entries will end up in network order
1652                  */
1653                 buf->byte_selection[dst++] = src++ ^ 1;
1654 }
1655
1656 /**
1657  * ice_flow_acl_set_xtrct_seq - Program ACL extraction sequence
1658  * @hw: pointer to the hardware structure
1659  * @prof: pointer to flow profile
1660  */
1661 static enum ice_status
1662 ice_flow_acl_set_xtrct_seq(struct ice_hw *hw, struct ice_flow_prof *prof)
1663 {
1664         struct ice_aqc_acl_prof_generic_frmt buf;
1665         struct ice_flow_fld_info *info;
1666         enum ice_status status;
1667         u8 prof_id = 0;
1668         u16 i;
1669
1670         ice_memset(&buf, 0, sizeof(buf), ICE_NONDMA_MEM);
1671
1672         status = ice_flow_get_hw_prof(hw, ICE_BLK_ACL, prof->id, &prof_id);
1673         if (status)
1674                 return status;
1675
1676         status = ice_flow_acl_is_prof_in_use(hw, prof, &buf);
1677         if (status && status != ICE_ERR_IN_USE)
1678                 return status;
1679
1680         if (!status) {
1681                 /* Program the profile dependent configuration. This is done
1682                  * only once regardless of the number of PFs using that profile
1683                  */
1684                 ice_memset(&buf, 0, sizeof(buf), ICE_NONDMA_MEM);
1685
1686                 for (i = 0; i < prof->segs_cnt; i++) {
1687                         struct ice_flow_seg_info *seg = &prof->segs[i];
1688                         u64 match = seg->match;
1689                         u16 j;
1690
1691                         for (j = 0; j < ICE_FLOW_FIELD_IDX_MAX && match; j++) {
1692                                 const u64 bit = BIT_ULL(j);
1693
1694                                 if (!(match & bit))
1695                                         continue;
1696
1697                                 info = &seg->fields[j];
1698
1699                                 if (info->type == ICE_FLOW_FLD_TYPE_RANGE)
1700                                         buf.word_selection[info->entry.val] =
1701                                                                 info->xtrct.idx;
1702                                 else
1703                                         ice_flow_acl_set_xtrct_seq_fld(&buf,
1704                                                                        info);
1705
1706                                 match &= ~bit;
1707                         }
1708
1709                         for (j = 0; j < seg->raws_cnt; j++) {
1710                                 info = &seg->raws[j].info;
1711                                 ice_flow_acl_set_xtrct_seq_fld(&buf, info);
1712                         }
1713                 }
1714
1715                 ice_memset(&buf.pf_scenario_num[0], ICE_ACL_INVALID_SCEN,
1716                            ICE_AQC_ACL_PROF_PF_SCEN_NUM_ELEMS,
1717                            ICE_NONDMA_MEM);
1718         }
1719
1720         /* Update the current PF */
1721         buf.pf_scenario_num[hw->pf_id] = (u8)prof->cfg.scen->id;
1722         status = ice_prgm_acl_prof_extrt(hw, prof_id, &buf, NULL);
1723
1724         return status;
1725 }
1726
1727 /**
1728  * ice_flow_assoc_vsig_vsi - associate a VSI with VSIG
1729  * @hw: pointer to the hardware structure
1730  * @blk: classification stage
1731  * @vsi_handle: software VSI handle
1732  * @vsig: target VSI group
1733  *
1734  * Assumption: the caller has already verified that the VSI to
1735  * be added has the same characteristics as the VSIG and will
1736  * thereby have access to all resources added to that VSIG.
1737  */
1738 enum ice_status
1739 ice_flow_assoc_vsig_vsi(struct ice_hw *hw, enum ice_block blk, u16 vsi_handle,
1740                         u16 vsig)
1741 {
1742         enum ice_status status;
1743
1744         if (!ice_is_vsi_valid(hw, vsi_handle) || blk >= ICE_BLK_COUNT)
1745                 return ICE_ERR_PARAM;
1746
1747         ice_acquire_lock(&hw->fl_profs_locks[blk]);
1748         status = ice_add_vsi_flow(hw, blk, ice_get_hw_vsi_num(hw, vsi_handle),
1749                                   vsig);
1750         ice_release_lock(&hw->fl_profs_locks[blk]);
1751
1752         return status;
1753 }
1754
1755 /**
1756  * ice_flow_assoc_prof - associate a VSI with a flow profile
1757  * @hw: pointer to the hardware structure
1758  * @blk: classification stage
1759  * @prof: pointer to flow profile
1760  * @vsi_handle: software VSI handle
1761  *
1762  * Assumption: the caller has acquired the lock to the profile list
1763  * and the software VSI handle has been validated
1764  */
1765 static enum ice_status
1766 ice_flow_assoc_prof(struct ice_hw *hw, enum ice_block blk,
1767                     struct ice_flow_prof *prof, u16 vsi_handle)
1768 {
1769         enum ice_status status = ICE_SUCCESS;
1770
1771         if (!ice_is_bit_set(prof->vsis, vsi_handle)) {
1772                 if (blk == ICE_BLK_ACL) {
1773                         status = ice_flow_acl_set_xtrct_seq(hw, prof);
1774                         if (status)
1775                                 return status;
1776                 }
1777                 status = ice_add_prof_id_flow(hw, blk,
1778                                               ice_get_hw_vsi_num(hw,
1779                                                                  vsi_handle),
1780                                               prof->id);
1781                 if (!status)
1782                         ice_set_bit(vsi_handle, prof->vsis);
1783                 else
1784                         ice_debug(hw, ICE_DBG_FLOW,
1785                                   "HW profile add failed, %d\n",
1786                                   status);
1787         }
1788
1789         return status;
1790 }
1791
1792 /**
1793  * ice_flow_disassoc_prof - disassociate a VSI from a flow profile
1794  * @hw: pointer to the hardware structure
1795  * @blk: classification stage
1796  * @prof: pointer to flow profile
1797  * @vsi_handle: software VSI handle
1798  *
1799  * Assumption: the caller has acquired the lock to the profile list
1800  * and the software VSI handle has been validated
1801  */
1802 static enum ice_status
1803 ice_flow_disassoc_prof(struct ice_hw *hw, enum ice_block blk,
1804                        struct ice_flow_prof *prof, u16 vsi_handle)
1805 {
1806         enum ice_status status = ICE_SUCCESS;
1807
1808         if (ice_is_bit_set(prof->vsis, vsi_handle)) {
1809                 status = ice_rem_prof_id_flow(hw, blk,
1810                                               ice_get_hw_vsi_num(hw,
1811                                                                  vsi_handle),
1812                                               prof->id);
1813                 if (!status)
1814                         ice_clear_bit(vsi_handle, prof->vsis);
1815                 else
1816                         ice_debug(hw, ICE_DBG_FLOW,
1817                                   "HW profile remove failed, %d\n",
1818                                   status);
1819         }
1820
1821         return status;
1822 }
1823
1824 /**
1825  * ice_flow_add_prof - Add a flow profile for packet segments and matched fields
1826  * @hw: pointer to the HW struct
1827  * @blk: classification stage
1828  * @dir: flow direction
1829  * @prof_id: unique ID to identify this flow profile
1830  * @segs: array of one or more packet segments that describe the flow
1831  * @segs_cnt: number of packet segments provided
1832  * @acts: array of default actions
1833  * @acts_cnt: number of default actions
1834  * @prof: stores the returned flow profile added
1835  */
1836 enum ice_status
1837 ice_flow_add_prof(struct ice_hw *hw, enum ice_block blk, enum ice_flow_dir dir,
1838                   u64 prof_id, struct ice_flow_seg_info *segs, u8 segs_cnt,
1839                   struct ice_flow_action *acts, u8 acts_cnt,
1840                   struct ice_flow_prof **prof)
1841 {
1842         enum ice_status status;
1843
1844         if (segs_cnt > ICE_FLOW_SEG_MAX)
1845                 return ICE_ERR_MAX_LIMIT;
1846
1847         if (!segs_cnt)
1848                 return ICE_ERR_PARAM;
1849
1850         if (!segs)
1851                 return ICE_ERR_BAD_PTR;
1852
1853         status = ice_flow_val_hdrs(segs, segs_cnt);
1854         if (status)
1855                 return status;
1856
1857         ice_acquire_lock(&hw->fl_profs_locks[blk]);
1858
1859         status = ice_flow_add_prof_sync(hw, blk, dir, prof_id, segs, segs_cnt,
1860                                         acts, acts_cnt, prof);
1861         if (!status)
1862                 LIST_ADD(&(*prof)->l_entry, &hw->fl_profs[blk]);
1863
1864         ice_release_lock(&hw->fl_profs_locks[blk]);
1865
1866         return status;
1867 }
1868
1869 /**
1870  * ice_flow_rem_prof - Remove a flow profile and all entries associated with it
1871  * @hw: pointer to the HW struct
1872  * @blk: the block for which the flow profile is to be removed
1873  * @prof_id: unique ID of the flow profile to be removed
1874  */
1875 enum ice_status
1876 ice_flow_rem_prof(struct ice_hw *hw, enum ice_block blk, u64 prof_id)
1877 {
1878         struct ice_flow_prof *prof;
1879         enum ice_status status;
1880
1881         ice_acquire_lock(&hw->fl_profs_locks[blk]);
1882
1883         prof = ice_flow_find_prof_id(hw, blk, prof_id);
1884         if (!prof) {
1885                 status = ICE_ERR_DOES_NOT_EXIST;
1886                 goto out;
1887         }
1888
1889         /* prof becomes invalid after the call */
1890         status = ice_flow_rem_prof_sync(hw, blk, prof);
1891
1892 out:
1893         ice_release_lock(&hw->fl_profs_locks[blk]);
1894
1895         return status;
1896 }
1897
1898 /**
1899  * ice_flow_get_hw_prof - return the HW profile for a specific profile ID handle
1900  * @hw: pointer to the HW struct
1901  * @blk: classification stage
1902  * @prof_id: the profile ID handle
1903  * @hw_prof_id: pointer to variable to receive the HW profile ID
1904  */
1905 enum ice_status
1906 ice_flow_get_hw_prof(struct ice_hw *hw, enum ice_block blk, u64 prof_id,
1907                      u8 *hw_prof_id)
1908 {
1909         struct ice_prof_map *map;
1910
1911         map = ice_search_prof_id(hw, blk, prof_id);
1912         if (map) {
1913                 *hw_prof_id = map->prof_id;
1914                 return ICE_SUCCESS;
1915         }
1916
1917         return ICE_ERR_DOES_NOT_EXIST;
1918 }
1919
1920 /**
1921  * ice_flow_find_entry - look for a flow entry using its unique ID
1922  * @hw: pointer to the HW struct
1923  * @blk: classification stage
1924  * @entry_id: unique ID to identify this flow entry
1925  *
1926  * This function looks for the flow entry with the specified unique ID in all
1927  * flow profiles of the specified classification stage. If the entry is found,
1928  * and it returns the handle to the flow entry. Otherwise, it returns
1929  * ICE_FLOW_ENTRY_ID_INVAL.
1930  */
1931 u64 ice_flow_find_entry(struct ice_hw *hw, enum ice_block blk, u64 entry_id)
1932 {
1933         struct ice_flow_entry *found = NULL;
1934         struct ice_flow_prof *p;
1935
1936         ice_acquire_lock(&hw->fl_profs_locks[blk]);
1937
1938         LIST_FOR_EACH_ENTRY(p, &hw->fl_profs[blk], ice_flow_prof, l_entry) {
1939                 struct ice_flow_entry *e;
1940
1941                 ice_acquire_lock(&p->entries_lock);
1942                 LIST_FOR_EACH_ENTRY(e, &p->entries, ice_flow_entry, l_entry)
1943                         if (e->id == entry_id) {
1944                                 found = e;
1945                                 break;
1946                         }
1947                 ice_release_lock(&p->entries_lock);
1948
1949                 if (found)
1950                         break;
1951         }
1952
1953         ice_release_lock(&hw->fl_profs_locks[blk]);
1954
1955         return found ? ICE_FLOW_ENTRY_HNDL(found) : ICE_FLOW_ENTRY_HANDLE_INVAL;
1956 }
1957
1958 /**
1959  * ice_flow_acl_check_actions - Checks the acl rule's actions
1960  * @hw: pointer to the hardware structure
1961  * @acts: array of actions to be performed on a match
1962  * @acts_cnt: number of actions
1963  * @cnt_alloc: indicates if a ACL counter has been allocated.
1964  */
1965 static enum ice_status
1966 ice_flow_acl_check_actions(struct ice_hw *hw, struct ice_flow_action *acts,
1967                            u8 acts_cnt, bool *cnt_alloc)
1968 {
1969         ice_declare_bitmap(dup_check, ICE_AQC_TBL_MAX_ACTION_PAIRS * 2);
1970         int i;
1971
1972         ice_zero_bitmap(dup_check, ICE_AQC_TBL_MAX_ACTION_PAIRS * 2);
1973         *cnt_alloc = false;
1974
1975         if (acts_cnt > ICE_FLOW_ACL_MAX_NUM_ACT)
1976                 return ICE_ERR_OUT_OF_RANGE;
1977
1978         for (i = 0; i < acts_cnt; i++) {
1979                 if (acts[i].type != ICE_FLOW_ACT_NOP &&
1980                     acts[i].type != ICE_FLOW_ACT_DROP &&
1981                     acts[i].type != ICE_FLOW_ACT_CNTR_PKT &&
1982                     acts[i].type != ICE_FLOW_ACT_FWD_QUEUE)
1983                         return ICE_ERR_CFG;
1984
1985                 /* If the caller want to add two actions of the same type, then
1986                  * it is considered invalid configuration.
1987                  */
1988                 if (ice_test_and_set_bit(acts[i].type, dup_check))
1989                         return ICE_ERR_PARAM;
1990         }
1991
1992         /* Checks if ACL counters are needed. */
1993         for (i = 0; i < acts_cnt; i++) {
1994                 if (acts[i].type == ICE_FLOW_ACT_CNTR_PKT ||
1995                     acts[i].type == ICE_FLOW_ACT_CNTR_BYTES ||
1996                     acts[i].type == ICE_FLOW_ACT_CNTR_PKT_BYTES) {
1997                         struct ice_acl_cntrs cntrs;
1998                         enum ice_status status;
1999
2000                         cntrs.amount = 1;
2001                         cntrs.bank = 0; /* Only bank0 for the moment */
2002
2003                         if (acts[i].type == ICE_FLOW_ACT_CNTR_PKT_BYTES)
2004                                 cntrs.type = ICE_AQC_ACL_CNT_TYPE_DUAL;
2005                         else
2006                                 cntrs.type = ICE_AQC_ACL_CNT_TYPE_SINGLE;
2007
2008                         status = ice_aq_alloc_acl_cntrs(hw, &cntrs, NULL);
2009                         if (status)
2010                                 return status;
2011                         /* Counter index within the bank */
2012                         acts[i].data.acl_act.value =
2013                                                 CPU_TO_LE16(cntrs.first_cntr);
2014                         *cnt_alloc = true;
2015                 }
2016         }
2017
2018         return ICE_SUCCESS;
2019 }
2020
2021 /**
2022  * ice_flow_acl_frmt_entry_range - Format an acl range checker for a given field
2023  * @fld: number of the given field
2024  * @info: info about field
2025  * @range_buf: range checker configuration buffer
2026  * @data: pointer to a data buffer containing flow entry's match values/masks
2027  * @range: Input/output param indicating which range checkers are being used
2028  */
2029 static void
2030 ice_flow_acl_frmt_entry_range(u16 fld, struct ice_flow_fld_info *info,
2031                               struct ice_aqc_acl_profile_ranges *range_buf,
2032                               u8 *data, u8 *range)
2033 {
2034         u16 new_mask;
2035
2036         /* If not specified, default mask is all bits in field */
2037         new_mask = (info->src.mask == ICE_FLOW_FLD_OFF_INVAL ?
2038                     BIT(ice_flds_info[fld].size) - 1 :
2039                     (*(u16 *)(data + info->src.mask))) << info->xtrct.disp;
2040
2041         /* If the mask is 0, then we don't need to worry about this input
2042          * range checker value.
2043          */
2044         if (new_mask) {
2045                 u16 new_high =
2046                         (*(u16 *)(data + info->src.last)) << info->xtrct.disp;
2047                 u16 new_low =
2048                         (*(u16 *)(data + info->src.val)) << info->xtrct.disp;
2049                 u8 range_idx = info->entry.val;
2050
2051                 range_buf->checker_cfg[range_idx].low_boundary =
2052                         CPU_TO_BE16(new_low);
2053                 range_buf->checker_cfg[range_idx].high_boundary =
2054                         CPU_TO_BE16(new_high);
2055                 range_buf->checker_cfg[range_idx].mask = CPU_TO_BE16(new_mask);
2056
2057                 /* Indicate which range checker is being used */
2058                 *range |= BIT(range_idx);
2059         }
2060 }
2061
2062 /**
2063  * ice_flow_acl_frmt_entry_fld - Partially format acl entry for a given field
2064  * @fld: number of the given field
2065  * @info: info about the field
2066  * @buf: buffer containing the entry
2067  * @dontcare: buffer containing don't care mask for entry
2068  * @data: pointer to a data buffer containing flow entry's match values/masks
2069  */
2070 static void
2071 ice_flow_acl_frmt_entry_fld(u16 fld, struct ice_flow_fld_info *info, u8 *buf,
2072                             u8 *dontcare, u8 *data)
2073 {
2074         u16 dst, src, mask, k, end_disp, tmp_s = 0, tmp_m = 0;
2075         bool use_mask = false;
2076         u8 disp;
2077
2078         src = info->src.val;
2079         mask = info->src.mask;
2080         dst = info->entry.val - ICE_AQC_ACL_PROF_BYTE_SEL_START_IDX;
2081         disp = info->xtrct.disp % BITS_PER_BYTE;
2082
2083         if (mask != ICE_FLOW_FLD_OFF_INVAL)
2084                 use_mask = true;
2085
2086         for (k = 0; k < info->entry.last; k++, dst++) {
2087                 /* Add overflow bits from previous byte */
2088                 buf[dst] = (tmp_s & 0xff00) >> 8;
2089
2090                 /* If mask is not valid, tmp_m is always zero, so just setting
2091                  * dontcare to 0 (no masked bits). If mask is valid, pulls in
2092                  * overflow bits of mask from prev byte
2093                  */
2094                 dontcare[dst] = (tmp_m & 0xff00) >> 8;
2095
2096                 /* If there is displacement, last byte will only contain
2097                  * displaced data, but there is no more data to read from user
2098                  * buffer, so skip so as not to potentially read beyond end of
2099                  * user buffer
2100                  */
2101                 if (!disp || k < info->entry.last - 1) {
2102                         /* Store shifted data to use in next byte */
2103                         tmp_s = data[src++] << disp;
2104
2105                         /* Add current (shifted) byte */
2106                         buf[dst] |= tmp_s & 0xff;
2107
2108                         /* Handle mask if valid */
2109                         if (use_mask) {
2110                                 tmp_m = (~data[mask++] & 0xff) << disp;
2111                                 dontcare[dst] |= tmp_m & 0xff;
2112                         }
2113                 }
2114         }
2115
2116         /* Fill in don't care bits at beginning of field */
2117         if (disp) {
2118                 dst = info->entry.val - ICE_AQC_ACL_PROF_BYTE_SEL_START_IDX;
2119                 for (k = 0; k < disp; k++)
2120                         dontcare[dst] |= BIT(k);
2121         }
2122
2123         end_disp = (disp + ice_flds_info[fld].size) % BITS_PER_BYTE;
2124
2125         /* Fill in don't care bits at end of field */
2126         if (end_disp) {
2127                 dst = info->entry.val - ICE_AQC_ACL_PROF_BYTE_SEL_START_IDX +
2128                       info->entry.last - 1;
2129                 for (k = end_disp; k < BITS_PER_BYTE; k++)
2130                         dontcare[dst] |= BIT(k);
2131         }
2132 }
2133
2134 /**
2135  * ice_flow_acl_frmt_entry - Format acl entry
2136  * @hw: pointer to the hardware structure
2137  * @prof: pointer to flow profile
2138  * @e: pointer to the flow entry
2139  * @data: pointer to a data buffer containing flow entry's match values/masks
2140  * @acts: array of actions to be performed on a match
2141  * @acts_cnt: number of actions
2142  *
2143  * Formats the key (and key_inverse) to be matched from the data passed in,
2144  * along with data from the flow profile. This key/key_inverse pair makes up
2145  * the 'entry' for an acl flow entry.
2146  */
2147 static enum ice_status
2148 ice_flow_acl_frmt_entry(struct ice_hw *hw, struct ice_flow_prof *prof,
2149                         struct ice_flow_entry *e, u8 *data,
2150                         struct ice_flow_action *acts, u8 acts_cnt)
2151 {
2152         u8 *buf = NULL, *dontcare = NULL, *key = NULL, range = 0, dir_flag_msk;
2153         struct ice_aqc_acl_profile_ranges *range_buf = NULL;
2154         enum ice_status status;
2155         bool cnt_alloc;
2156         u8 prof_id = 0;
2157         u16 i, buf_sz;
2158
2159         status = ice_flow_get_hw_prof(hw, ICE_BLK_ACL, prof->id, &prof_id);
2160         if (status)
2161                 return status;
2162
2163         /* Format the result action */
2164
2165         status = ice_flow_acl_check_actions(hw, acts, acts_cnt, &cnt_alloc);
2166         if (status)
2167                 return status;
2168
2169         status = ICE_ERR_NO_MEMORY;
2170
2171         e->acts = (struct ice_flow_action *)
2172                 ice_memdup(hw, acts, acts_cnt * sizeof(*acts),
2173                            ICE_NONDMA_TO_NONDMA);
2174
2175         if (!e->acts)
2176                 goto out;
2177
2178         e->acts_cnt = acts_cnt;
2179
2180         /* Format the matching data */
2181         buf_sz = prof->cfg.scen->width;
2182         buf = (u8 *)ice_malloc(hw, buf_sz);
2183         if (!buf)
2184                 goto out;
2185
2186         dontcare = (u8 *)ice_malloc(hw, buf_sz);
2187         if (!dontcare)
2188                 goto out;
2189
2190         /* 'key' buffer will store both key and key_inverse, so must be twice
2191          * size of buf
2192          */
2193         key = (u8 *)ice_malloc(hw, buf_sz * 2);
2194         if (!key)
2195                 goto out;
2196
2197         range_buf = (struct ice_aqc_acl_profile_ranges *)
2198                 ice_malloc(hw, sizeof(struct ice_aqc_acl_profile_ranges));
2199         if (!range_buf)
2200                 goto out;
2201
2202         /* Set don't care mask to all 1's to start, will zero out used bytes */
2203         ice_memset(dontcare, 0xff, buf_sz, ICE_NONDMA_MEM);
2204
2205         for (i = 0; i < prof->segs_cnt; i++) {
2206                 struct ice_flow_seg_info *seg = &prof->segs[i];
2207                 u64 match = seg->match;
2208                 u16 j;
2209
2210                 for (j = 0; j < ICE_FLOW_FIELD_IDX_MAX && match; j++) {
2211                         struct ice_flow_fld_info *info;
2212                         const u64 bit = BIT_ULL(j);
2213
2214                         if (!(match & bit))
2215                                 continue;
2216
2217                         info = &seg->fields[j];
2218
2219                         if (info->type == ICE_FLOW_FLD_TYPE_RANGE)
2220                                 ice_flow_acl_frmt_entry_range(j, info,
2221                                                               range_buf, data,
2222                                                               &range);
2223                         else
2224                                 ice_flow_acl_frmt_entry_fld(j, info, buf,
2225                                                             dontcare, data);
2226
2227                         match &= ~bit;
2228                 }
2229
2230                 for (j = 0; j < seg->raws_cnt; j++) {
2231                         struct ice_flow_fld_info *info = &seg->raws[j].info;
2232                         u16 dst, src, mask, k;
2233                         bool use_mask = false;
2234
2235                         src = info->src.val;
2236                         dst = info->entry.val -
2237                                         ICE_AQC_ACL_PROF_BYTE_SEL_START_IDX;
2238                         mask = info->src.mask;
2239
2240                         if (mask != ICE_FLOW_FLD_OFF_INVAL)
2241                                 use_mask = true;
2242
2243                         for (k = 0; k < info->entry.last; k++, dst++) {
2244                                 buf[dst] = data[src++];
2245                                 if (use_mask)
2246                                         dontcare[dst] = ~data[mask++];
2247                                 else
2248                                         dontcare[dst] = 0;
2249                         }
2250                 }
2251         }
2252
2253         buf[prof->cfg.scen->pid_idx] = (u8)prof_id;
2254         dontcare[prof->cfg.scen->pid_idx] = 0;
2255
2256         /* Format the buffer for direction flags */
2257         dir_flag_msk = BIT(ICE_FLG_PKT_DIR);
2258
2259         if (prof->dir == ICE_FLOW_RX)
2260                 buf[prof->cfg.scen->pkt_dir_idx] = dir_flag_msk;
2261
2262         if (range) {
2263                 buf[prof->cfg.scen->rng_chk_idx] = range;
2264                 /* Mark any unused range checkers as don't care */
2265                 dontcare[prof->cfg.scen->rng_chk_idx] = ~range;
2266                 e->range_buf = range_buf;
2267         } else {
2268                 ice_free(hw, range_buf);
2269         }
2270
2271         status = ice_set_key(key, buf_sz * 2, buf, NULL, dontcare, NULL, 0,
2272                              buf_sz);
2273         if (status)
2274                 goto out;
2275
2276         e->entry = key;
2277         e->entry_sz = buf_sz * 2;
2278
2279 out:
2280         if (buf)
2281                 ice_free(hw, buf);
2282
2283         if (dontcare)
2284                 ice_free(hw, dontcare);
2285
2286         if (status && key)
2287                 ice_free(hw, key);
2288
2289         if (status && range_buf) {
2290                 ice_free(hw, range_buf);
2291                 e->range_buf = NULL;
2292         }
2293
2294         if (status && e->acts) {
2295                 ice_free(hw, e->acts);
2296                 e->acts = NULL;
2297                 e->acts_cnt = 0;
2298         }
2299
2300         if (status && cnt_alloc)
2301                 ice_flow_acl_free_act_cntr(hw, acts, acts_cnt);
2302
2303         return status;
2304 }
2305
2306 /**
2307  * ice_flow_acl_find_scen_entry_cond - Find an ACL scenario entry that matches
2308  *                                     the compared data.
2309  * @prof: pointer to flow profile
2310  * @e: pointer to the comparing flow entry
2311  * @do_chg_action: decide if we want to change the ACL action
2312  * @do_add_entry: decide if we want to add the new ACL entry
2313  * @do_rem_entry: decide if we want to remove the current ACL entry
2314  *
2315  * Find an ACL scenario entry that matches the compared data. In the same time,
2316  * this function also figure out:
2317  * a/ If we want to change the ACL action
2318  * b/ If we want to add the new ACL entry
2319  * c/ If we want to remove the current ACL entry
2320  */
2321 static struct ice_flow_entry *
2322 ice_flow_acl_find_scen_entry_cond(struct ice_flow_prof *prof,
2323                                   struct ice_flow_entry *e, bool *do_chg_action,
2324                                   bool *do_add_entry, bool *do_rem_entry)
2325 {
2326         struct ice_flow_entry *p, *return_entry = NULL;
2327         u8 i, j;
2328
2329         /* Check if:
2330          * a/ There exists an entry with same matching data, but different
2331          *    priority, then we remove this existing ACL entry. Then, we
2332          *    will add the new entry to the ACL scenario.
2333          * b/ There exists an entry with same matching data, priority, and
2334          *    result action, then we do nothing
2335          * c/ There exists an entry with same matching data, priority, but
2336          *    different, action, then do only change the action's entry.
2337          * d/ Else, we add this new entry to the ACL scenario.
2338          */
2339         *do_chg_action = false;
2340         *do_add_entry = true;
2341         *do_rem_entry = false;
2342         LIST_FOR_EACH_ENTRY(p, &prof->entries, ice_flow_entry, l_entry) {
2343                 if (memcmp(p->entry, e->entry, p->entry_sz))
2344                         continue;
2345
2346                 /* From this point, we have the same matching_data. */
2347                 *do_add_entry = false;
2348                 return_entry = p;
2349
2350                 if (p->priority != e->priority) {
2351                         /* matching data && !priority */
2352                         *do_add_entry = true;
2353                         *do_rem_entry = true;
2354                         break;
2355                 }
2356
2357                 /* From this point, we will have matching_data && priority */
2358                 if (p->acts_cnt != e->acts_cnt)
2359                         *do_chg_action = true;
2360                 for (i = 0; i < p->acts_cnt; i++) {
2361                         bool found_not_match = false;
2362
2363                         for (j = 0; j < e->acts_cnt; j++)
2364                                 if (memcmp(&p->acts[i], &e->acts[j],
2365                                            sizeof(struct ice_flow_action))) {
2366                                         found_not_match = true;
2367                                         break;
2368                                 }
2369
2370                         if (found_not_match) {
2371                                 *do_chg_action = true;
2372                                 break;
2373                         }
2374                 }
2375
2376                 /* (do_chg_action = true) means :
2377                  *    matching_data && priority && !result_action
2378                  * (do_chg_action = false) means :
2379                  *    matching_data && priority && result_action
2380                  */
2381                 break;
2382         }
2383
2384         return return_entry;
2385 }
2386
2387 /**
2388  * ice_flow_acl_convert_to_acl_prior - Convert to ACL priority
2389  * @p: flow priority
2390  */
2391 static enum ice_acl_entry_prior
2392 ice_flow_acl_convert_to_acl_prior(enum ice_flow_priority p)
2393 {
2394         enum ice_acl_entry_prior acl_prior;
2395
2396         switch (p) {
2397         case ICE_FLOW_PRIO_LOW:
2398                 acl_prior = ICE_LOW;
2399                 break;
2400         case ICE_FLOW_PRIO_NORMAL:
2401                 acl_prior = ICE_NORMAL;
2402                 break;
2403         case ICE_FLOW_PRIO_HIGH:
2404                 acl_prior = ICE_HIGH;
2405                 break;
2406         default:
2407                 acl_prior = ICE_NORMAL;
2408                 break;
2409         }
2410
2411         return acl_prior;
2412 }
2413
2414 /**
2415  * ice_flow_acl_union_rng_chk - Perform union operation between two
2416  *                              range-range checker buffers
2417  * @dst_buf: pointer to destination range checker buffer
2418  * @src_buf: pointer to source range checker buffer
2419  *
2420  * For this function, we do the union between dst_buf and src_buf
2421  * range checker buffer, and we will save the result back to dst_buf
2422  */
2423 static enum ice_status
2424 ice_flow_acl_union_rng_chk(struct ice_aqc_acl_profile_ranges *dst_buf,
2425                            struct ice_aqc_acl_profile_ranges *src_buf)
2426 {
2427         u8 i, j;
2428
2429         if (!dst_buf || !src_buf)
2430                 return ICE_ERR_BAD_PTR;
2431
2432         for (i = 0; i < ICE_AQC_ACL_PROF_RANGES_NUM_CFG; i++) {
2433                 struct ice_acl_rng_data *cfg_data = NULL, *in_data;
2434                 bool will_populate = false;
2435
2436                 in_data = &src_buf->checker_cfg[i];
2437
2438                 if (!in_data->mask)
2439                         break;
2440
2441                 for (j = 0; j < ICE_AQC_ACL_PROF_RANGES_NUM_CFG; j++) {
2442                         cfg_data = &dst_buf->checker_cfg[j];
2443
2444                         if (!cfg_data->mask ||
2445                             !memcmp(cfg_data, in_data,
2446                                     sizeof(struct ice_acl_rng_data))) {
2447                                 will_populate = true;
2448                                 break;
2449                         }
2450                 }
2451
2452                 if (will_populate) {
2453                         ice_memcpy(cfg_data, in_data,
2454                                    sizeof(struct ice_acl_rng_data),
2455                                    ICE_NONDMA_TO_NONDMA);
2456                 } else {
2457                         /* No available slot left to program range checker */
2458                         return ICE_ERR_MAX_LIMIT;
2459                 }
2460         }
2461
2462         return ICE_SUCCESS;
2463 }
2464
2465 /**
2466  * ice_flow_acl_add_scen_entry_sync - Add entry to ACL scenario sync
2467  * @hw: pointer to the hardware structure
2468  * @prof: pointer to flow profile
2469  * @entry: double pointer to the flow entry
2470  *
2471  * For this function, we will look at the current added entries in the
2472  * corresponding ACL scenario. Then, we will perform matching logic to
2473  * see if we want to add/modify/do nothing with this new entry.
2474  */
2475 static enum ice_status
2476 ice_flow_acl_add_scen_entry_sync(struct ice_hw *hw, struct ice_flow_prof *prof,
2477                                  struct ice_flow_entry **entry)
2478 {
2479         bool do_add_entry, do_rem_entry, do_chg_action, do_chg_rng_chk;
2480         struct ice_aqc_acl_profile_ranges query_rng_buf, cfg_rng_buf;
2481         struct ice_acl_act_entry *acts = NULL;
2482         struct ice_flow_entry *exist;
2483         enum ice_status status = ICE_SUCCESS;
2484         struct ice_flow_entry *e;
2485         u8 i;
2486
2487         if (!entry || !(*entry) || !prof)
2488                 return ICE_ERR_BAD_PTR;
2489
2490         e = *(entry);
2491
2492         do_chg_rng_chk = false;
2493         if (e->range_buf) {
2494                 u8 prof_id = 0;
2495
2496                 status = ice_flow_get_hw_prof(hw, ICE_BLK_ACL, prof->id,
2497                                               &prof_id);
2498                 if (status)
2499                         return status;
2500
2501                 /* Query the current range-checker value in FW */
2502                 status = ice_query_acl_prof_ranges(hw, prof_id, &query_rng_buf,
2503                                                    NULL);
2504                 if (status)
2505                         return status;
2506                 ice_memcpy(&cfg_rng_buf, &query_rng_buf,
2507                            sizeof(struct ice_aqc_acl_profile_ranges),
2508                            ICE_NONDMA_TO_NONDMA);
2509
2510                 /* Generate the new range-checker value */
2511                 status = ice_flow_acl_union_rng_chk(&cfg_rng_buf, e->range_buf);
2512                 if (status)
2513                         return status;
2514
2515                 /* Reconfigure the range check if the buffer is changed. */
2516                 do_chg_rng_chk = false;
2517                 if (memcmp(&query_rng_buf, &cfg_rng_buf,
2518                            sizeof(struct ice_aqc_acl_profile_ranges))) {
2519                         status = ice_prog_acl_prof_ranges(hw, prof_id,
2520                                                           &cfg_rng_buf, NULL);
2521                         if (status)
2522                                 return status;
2523
2524                         do_chg_rng_chk = true;
2525                 }
2526         }
2527
2528         /* Figure out if we want to (change the ACL action) and/or
2529          * (Add the new ACL entry) and/or (Remove the current ACL entry)
2530          */
2531         exist = ice_flow_acl_find_scen_entry_cond(prof, e, &do_chg_action,
2532                                                   &do_add_entry, &do_rem_entry);
2533
2534         if (do_rem_entry) {
2535                 status = ice_flow_rem_entry_sync(hw, ICE_BLK_ACL, exist);
2536                 if (status)
2537                         return status;
2538         }
2539
2540         /* Prepare the result action buffer */
2541         acts = (struct ice_acl_act_entry *)ice_calloc
2542                 (hw, e->entry_sz, sizeof(struct ice_acl_act_entry));
2543         for (i = 0; i < e->acts_cnt; i++)
2544                 ice_memcpy(&acts[i], &e->acts[i].data.acl_act,
2545                            sizeof(struct ice_acl_act_entry),
2546                            ICE_NONDMA_TO_NONDMA);
2547
2548         if (do_add_entry) {
2549                 enum ice_acl_entry_prior prior;
2550                 u8 *keys, *inverts;
2551                 u16 entry_idx;
2552
2553                 keys = (u8 *)e->entry;
2554                 inverts = keys + (e->entry_sz / 2);
2555                 prior = ice_flow_acl_convert_to_acl_prior(e->priority);
2556
2557                 status = ice_acl_add_entry(hw, prof->cfg.scen, prior, keys,
2558                                            inverts, acts, e->acts_cnt,
2559                                            &entry_idx);
2560                 if (status)
2561                         goto out;
2562
2563                 e->scen_entry_idx = entry_idx;
2564                 LIST_ADD(&e->l_entry, &prof->entries);
2565         } else {
2566                 if (do_chg_action) {
2567                         /* For the action memory info, update the SW's copy of
2568                          * exist entry with e's action memory info
2569                          */
2570                         ice_free(hw, exist->acts);
2571                         exist->acts_cnt = e->acts_cnt;
2572                         exist->acts = (struct ice_flow_action *)
2573                                 ice_calloc(hw, exist->acts_cnt,
2574                                            sizeof(struct ice_flow_action));
2575
2576                         if (!exist->acts) {
2577                                 status = ICE_ERR_NO_MEMORY;
2578                                 goto out;
2579                         }
2580
2581                         ice_memcpy(exist->acts, e->acts,
2582                                    sizeof(struct ice_flow_action) * e->acts_cnt,
2583                                    ICE_NONDMA_TO_NONDMA);
2584
2585                         status = ice_acl_prog_act(hw, prof->cfg.scen, acts,
2586                                                   e->acts_cnt,
2587                                                   exist->scen_entry_idx);
2588                         if (status)
2589                                 goto out;
2590                 }
2591
2592                 if (do_chg_rng_chk) {
2593                         /* In this case, we want to update the range checker
2594                          * information of the exist entry
2595                          */
2596                         status = ice_flow_acl_union_rng_chk(exist->range_buf,
2597                                                             e->range_buf);
2598                         if (status)
2599                                 goto out;
2600                 }
2601
2602                 /* As we don't add the new entry to our SW DB, deallocate its
2603                  * memories, and return the exist entry to the caller
2604                  */
2605                 ice_dealloc_flow_entry(hw, e);
2606                 *(entry) = exist;
2607         }
2608 out:
2609         if (acts)
2610                 ice_free(hw, acts);
2611
2612         return status;
2613 }
2614
2615 /**
2616  * ice_flow_acl_add_scen_entry - Add entry to ACL scenario
2617  * @hw: pointer to the hardware structure
2618  * @prof: pointer to flow profile
2619  * @e: double pointer to the flow entry
2620  */
2621 static enum ice_status
2622 ice_flow_acl_add_scen_entry(struct ice_hw *hw, struct ice_flow_prof *prof,
2623                             struct ice_flow_entry **e)
2624 {
2625         enum ice_status status;
2626
2627         ice_acquire_lock(&prof->entries_lock);
2628         status = ice_flow_acl_add_scen_entry_sync(hw, prof, e);
2629         ice_release_lock(&prof->entries_lock);
2630
2631         return status;
2632 }
2633
2634 /**
2635  * ice_flow_add_entry - Add a flow entry
2636  * @hw: pointer to the HW struct
2637  * @blk: classification stage
2638  * @prof_id: ID of the profile to add a new flow entry to
2639  * @entry_id: unique ID to identify this flow entry
2640  * @vsi_handle: software VSI handle for the flow entry
2641  * @prio: priority of the flow entry
2642  * @data: pointer to a data buffer containing flow entry's match values/masks
2643  * @acts: arrays of actions to be performed on a match
2644  * @acts_cnt: number of actions
2645  * @entry_h: pointer to buffer that receives the new flow entry's handle
2646  */
2647 enum ice_status
2648 ice_flow_add_entry(struct ice_hw *hw, enum ice_block blk, u64 prof_id,
2649                    u64 entry_id, u16 vsi_handle, enum ice_flow_priority prio,
2650                    void *data, struct ice_flow_action *acts, u8 acts_cnt,
2651                    u64 *entry_h)
2652 {
2653         struct ice_flow_entry *e = NULL;
2654         struct ice_flow_prof *prof;
2655         enum ice_status status = ICE_SUCCESS;
2656
2657         /* ACL entries must indicate an action */
2658         if (blk == ICE_BLK_ACL && (!acts || !acts_cnt))
2659                 return ICE_ERR_PARAM;
2660
2661         /* No flow entry data is expected for RSS */
2662         if (!entry_h || (!data && blk != ICE_BLK_RSS))
2663                 return ICE_ERR_BAD_PTR;
2664
2665         if (!ice_is_vsi_valid(hw, vsi_handle))
2666                 return ICE_ERR_PARAM;
2667
2668         ice_acquire_lock(&hw->fl_profs_locks[blk]);
2669
2670         prof = ice_flow_find_prof_id(hw, blk, prof_id);
2671         if (!prof) {
2672                 status = ICE_ERR_DOES_NOT_EXIST;
2673         } else {
2674                 /* Allocate memory for the entry being added and associate
2675                  * the VSI to the found flow profile
2676                  */
2677                 e = (struct ice_flow_entry *)ice_malloc(hw, sizeof(*e));
2678                 if (!e)
2679                         status = ICE_ERR_NO_MEMORY;
2680                 else
2681                         status = ice_flow_assoc_prof(hw, blk, prof, vsi_handle);
2682         }
2683
2684         ice_release_lock(&hw->fl_profs_locks[blk]);
2685         if (status)
2686                 goto out;
2687
2688         e->id = entry_id;
2689         e->vsi_handle = vsi_handle;
2690         e->prof = prof;
2691         e->priority = prio;
2692
2693         switch (blk) {
2694         case ICE_BLK_FD:
2695         case ICE_BLK_RSS:
2696                 break;
2697         case ICE_BLK_ACL:
2698                 /* ACL will handle the entry management */
2699                 status = ice_flow_acl_frmt_entry(hw, prof, e, (u8 *)data, acts,
2700                                                  acts_cnt);
2701                 if (status)
2702                         goto out;
2703
2704                 status = ice_flow_acl_add_scen_entry(hw, prof, &e);
2705                 if (status)
2706                         goto out;
2707
2708                 break;
2709         case ICE_BLK_SW:
2710         case ICE_BLK_PE:
2711         default:
2712                 status = ICE_ERR_NOT_IMPL;
2713                 goto out;
2714         }
2715
2716         if (blk != ICE_BLK_ACL) {
2717                 /* ACL will handle the entry management */
2718                 ice_acquire_lock(&prof->entries_lock);
2719                 LIST_ADD(&e->l_entry, &prof->entries);
2720                 ice_release_lock(&prof->entries_lock);
2721         }
2722
2723         *entry_h = ICE_FLOW_ENTRY_HNDL(e);
2724
2725 out:
2726         if (status && e) {
2727                 if (e->entry)
2728                         ice_free(hw, e->entry);
2729                 ice_free(hw, e);
2730         }
2731
2732         return status;
2733 }
2734
2735 /**
2736  * ice_flow_rem_entry - Remove a flow entry
2737  * @hw: pointer to the HW struct
2738  * @blk: classification stage
2739  * @entry_h: handle to the flow entry to be removed
2740  */
2741 enum ice_status ice_flow_rem_entry(struct ice_hw *hw, enum ice_block blk,
2742                                    u64 entry_h)
2743 {
2744         struct ice_flow_entry *entry;
2745         struct ice_flow_prof *prof;
2746         enum ice_status status = ICE_SUCCESS;
2747
2748         if (entry_h == ICE_FLOW_ENTRY_HANDLE_INVAL)
2749                 return ICE_ERR_PARAM;
2750
2751         entry = ICE_FLOW_ENTRY_PTR((unsigned long)entry_h);
2752
2753         /* Retain the pointer to the flow profile as the entry will be freed */
2754         prof = entry->prof;
2755
2756         if (prof) {
2757                 ice_acquire_lock(&prof->entries_lock);
2758                 status = ice_flow_rem_entry_sync(hw, blk, entry);
2759                 ice_release_lock(&prof->entries_lock);
2760         }
2761
2762         return status;
2763 }
2764
2765 /**
2766  * ice_flow_set_fld_ext - specifies locations of field from entry's input buffer
2767  * @seg: packet segment the field being set belongs to
2768  * @fld: field to be set
2769  * @field_type: type of the field
2770  * @val_loc: if not ICE_FLOW_FLD_OFF_INVAL, location of the value to match from
2771  *           entry's input buffer
2772  * @mask_loc: if not ICE_FLOW_FLD_OFF_INVAL, location of mask value from entry's
2773  *            input buffer
2774  * @last_loc: if not ICE_FLOW_FLD_OFF_INVAL, location of last/upper value from
2775  *            entry's input buffer
2776  *
2777  * This helper function stores information of a field being matched, including
2778  * the type of the field and the locations of the value to match, the mask, and
2779  * and the upper-bound value in the start of the input buffer for a flow entry.
2780  * This function should only be used for fixed-size data structures.
2781  *
2782  * This function also opportunistically determines the protocol headers to be
2783  * present based on the fields being set. Some fields cannot be used alone to
2784  * determine the protocol headers present. Sometimes, fields for particular
2785  * protocol headers are not matched. In those cases, the protocol headers
2786  * must be explicitly set.
2787  */
2788 static void
2789 ice_flow_set_fld_ext(struct ice_flow_seg_info *seg, enum ice_flow_field fld,
2790                      enum ice_flow_fld_match_type field_type, u16 val_loc,
2791                      u16 mask_loc, u16 last_loc)
2792 {
2793         u64 bit = BIT_ULL(fld);
2794
2795         seg->match |= bit;
2796         if (field_type == ICE_FLOW_FLD_TYPE_RANGE)
2797                 seg->range |= bit;
2798
2799         seg->fields[fld].type = field_type;
2800         seg->fields[fld].src.val = val_loc;
2801         seg->fields[fld].src.mask = mask_loc;
2802         seg->fields[fld].src.last = last_loc;
2803
2804         ICE_FLOW_SET_HDRS(seg, ice_flds_info[fld].hdr);
2805 }
2806
2807 /**
2808  * ice_flow_set_fld - specifies locations of field from entry's input buffer
2809  * @seg: packet segment the field being set belongs to
2810  * @fld: field to be set
2811  * @val_loc: if not ICE_FLOW_FLD_OFF_INVAL, location of the value to match from
2812  *           entry's input buffer
2813  * @mask_loc: if not ICE_FLOW_FLD_OFF_INVAL, location of mask value from entry's
2814  *            input buffer
2815  * @last_loc: if not ICE_FLOW_FLD_OFF_INVAL, location of last/upper value from
2816  *            entry's input buffer
2817  * @range: indicate if field being matched is to be in a range
2818  *
2819  * This function specifies the locations, in the form of byte offsets from the
2820  * start of the input buffer for a flow entry, from where the value to match,
2821  * the mask value, and upper value can be extracted. These locations are then
2822  * stored in the flow profile. When adding a flow entry associated with the
2823  * flow profile, these locations will be used to quickly extract the values and
2824  * create the content of a match entry. This function should only be used for
2825  * fixed-size data structures.
2826  */
2827 void
2828 ice_flow_set_fld(struct ice_flow_seg_info *seg, enum ice_flow_field fld,
2829                  u16 val_loc, u16 mask_loc, u16 last_loc, bool range)
2830 {
2831         enum ice_flow_fld_match_type t = range ?
2832                 ICE_FLOW_FLD_TYPE_RANGE : ICE_FLOW_FLD_TYPE_REG;
2833
2834         ice_flow_set_fld_ext(seg, fld, t, val_loc, mask_loc, last_loc);
2835 }
2836
2837 /**
2838  * ice_flow_set_fld_prefix - sets locations of prefix field from entry's buf
2839  * @seg: packet segment the field being set belongs to
2840  * @fld: field to be set
2841  * @val_loc: if not ICE_FLOW_FLD_OFF_INVAL, location of the value to match from
2842  *           entry's input buffer
2843  * @pref_loc: location of prefix value from entry's input buffer
2844  * @pref_sz: size of the location holding the prefix value
2845  *
2846  * This function specifies the locations, in the form of byte offsets from the
2847  * start of the input buffer for a flow entry, from where the value to match
2848  * and the IPv4 prefix value can be extracted. These locations are then stored
2849  * in the flow profile. When adding flow entries to the associated flow profile,
2850  * these locations can be used to quickly extract the values to create the
2851  * content of a match entry. This function should only be used for fixed-size
2852  * data structures.
2853  */
2854 void
2855 ice_flow_set_fld_prefix(struct ice_flow_seg_info *seg, enum ice_flow_field fld,
2856                         u16 val_loc, u16 pref_loc, u8 pref_sz)
2857 {
2858         /* For this type of field, the "mask" location is for the prefix value's
2859          * location and the "last" location is for the size of the location of
2860          * the prefix value.
2861          */
2862         ice_flow_set_fld_ext(seg, fld, ICE_FLOW_FLD_TYPE_PREFIX, val_loc,
2863                              pref_loc, (u16)pref_sz);
2864 }
2865
2866 /**
2867  * ice_flow_add_fld_raw - sets locations of a raw field from entry's input buf
2868  * @seg: packet segment the field being set belongs to
2869  * @off: offset of the raw field from the beginning of the segment in bytes
2870  * @len: length of the raw pattern to be matched
2871  * @val_loc: location of the value to match from entry's input buffer
2872  * @mask_loc: location of mask value from entry's input buffer
2873  *
2874  * This function specifies the offset of the raw field to be match from the
2875  * beginning of the specified packet segment, and the locations, in the form of
2876  * byte offsets from the start of the input buffer for a flow entry, from where
2877  * the value to match and the mask value to be extracted. These locations are
2878  * then stored in the flow profile. When adding flow entries to the associated
2879  * flow profile, these locations can be used to quickly extract the values to
2880  * create the content of a match entry. This function should only be used for
2881  * fixed-size data structures.
2882  */
2883 void
2884 ice_flow_add_fld_raw(struct ice_flow_seg_info *seg, u16 off, u8 len,
2885                      u16 val_loc, u16 mask_loc)
2886 {
2887         if (seg->raws_cnt < ICE_FLOW_SEG_RAW_FLD_MAX) {
2888                 seg->raws[seg->raws_cnt].off = off;
2889                 seg->raws[seg->raws_cnt].info.type = ICE_FLOW_FLD_TYPE_SIZE;
2890                 seg->raws[seg->raws_cnt].info.src.val = val_loc;
2891                 seg->raws[seg->raws_cnt].info.src.mask = mask_loc;
2892                 /* The "last" field is used to store the length of the field */
2893                 seg->raws[seg->raws_cnt].info.src.last = len;
2894         }
2895
2896         /* Overflows of "raws" will be handled as an error condition later in
2897          * the flow when this information is processed.
2898          */
2899         seg->raws_cnt++;
2900 }
2901
2902 #define ICE_FLOW_RSS_SEG_HDR_L2_MASKS \
2903 (ICE_FLOW_SEG_HDR_ETH | ICE_FLOW_SEG_HDR_VLAN)
2904
2905 #define ICE_FLOW_RSS_SEG_HDR_L3_MASKS \
2906         (ICE_FLOW_SEG_HDR_IPV4 | ICE_FLOW_SEG_HDR_IPV6)
2907
2908 #define ICE_FLOW_RSS_SEG_HDR_L4_MASKS \
2909         (ICE_FLOW_SEG_HDR_TCP | ICE_FLOW_SEG_HDR_UDP | \
2910          ICE_FLOW_SEG_HDR_SCTP)
2911
2912 #define ICE_FLOW_RSS_SEG_HDR_VAL_MASKS \
2913         (ICE_FLOW_RSS_SEG_HDR_L2_MASKS | \
2914          ICE_FLOW_RSS_SEG_HDR_L3_MASKS | \
2915          ICE_FLOW_RSS_SEG_HDR_L4_MASKS)
2916
2917 /**
2918  * ice_flow_set_rss_seg_info - setup packet segments for RSS
2919  * @segs: pointer to the flow field segment(s)
2920  * @hash_fields: fields to be hashed on for the segment(s)
2921  * @flow_hdr: protocol header fields within a packet segment
2922  *
2923  * Helper function to extract fields from hash bitmap and use flow
2924  * header value to set flow field segment for further use in flow
2925  * profile entry or removal.
2926  */
2927 static enum ice_status
2928 ice_flow_set_rss_seg_info(struct ice_flow_seg_info *segs, u64 hash_fields,
2929                           u32 flow_hdr)
2930 {
2931         u64 val = hash_fields;
2932         u8 i;
2933
2934         for (i = 0; val && i < ICE_FLOW_FIELD_IDX_MAX; i++) {
2935                 u64 bit = BIT_ULL(i);
2936
2937                 if (val & bit) {
2938                         ice_flow_set_fld(segs, (enum ice_flow_field)i,
2939                                          ICE_FLOW_FLD_OFF_INVAL,
2940                                          ICE_FLOW_FLD_OFF_INVAL,
2941                                          ICE_FLOW_FLD_OFF_INVAL, false);
2942                         val &= ~bit;
2943                 }
2944         }
2945         ICE_FLOW_SET_HDRS(segs, flow_hdr);
2946
2947         if (segs->hdrs & ~ICE_FLOW_RSS_SEG_HDR_VAL_MASKS &
2948             ~ICE_FLOW_RSS_HDRS_INNER_MASK)
2949                 return ICE_ERR_PARAM;
2950
2951         val = (u64)(segs->hdrs & ICE_FLOW_RSS_SEG_HDR_L3_MASKS);
2952         if (val && !ice_is_pow2(val))
2953                 return ICE_ERR_CFG;
2954
2955         val = (u64)(segs->hdrs & ICE_FLOW_RSS_SEG_HDR_L4_MASKS);
2956         if (val && !ice_is_pow2(val))
2957                 return ICE_ERR_CFG;
2958
2959         return ICE_SUCCESS;
2960 }
2961
2962 /**
2963  * ice_rem_vsi_rss_list - remove VSI from RSS list
2964  * @hw: pointer to the hardware structure
2965  * @vsi_handle: software VSI handle
2966  *
2967  * Remove the VSI from all RSS configurations in the list.
2968  */
2969 void ice_rem_vsi_rss_list(struct ice_hw *hw, u16 vsi_handle)
2970 {
2971         struct ice_rss_cfg *r, *tmp;
2972
2973         if (LIST_EMPTY(&hw->rss_list_head))
2974                 return;
2975
2976         ice_acquire_lock(&hw->rss_locks);
2977         LIST_FOR_EACH_ENTRY_SAFE(r, tmp, &hw->rss_list_head,
2978                                  ice_rss_cfg, l_entry) {
2979                 if (ice_test_and_clear_bit(vsi_handle, r->vsis))
2980                         if (!ice_is_any_bit_set(r->vsis, ICE_MAX_VSI)) {
2981                                 LIST_DEL(&r->l_entry);
2982                                 ice_free(hw, r);
2983                         }
2984         }
2985         ice_release_lock(&hw->rss_locks);
2986 }
2987
2988 /**
2989  * ice_rem_vsi_rss_cfg - remove RSS configurations associated with VSI
2990  * @hw: pointer to the hardware structure
2991  * @vsi_handle: software VSI handle
2992  *
2993  * This function will iterate through all flow profiles and disassociate
2994  * the VSI from that profile. If the flow profile has no VSIs it will
2995  * be removed.
2996  */
2997 enum ice_status ice_rem_vsi_rss_cfg(struct ice_hw *hw, u16 vsi_handle)
2998 {
2999         const enum ice_block blk = ICE_BLK_RSS;
3000         struct ice_flow_prof *p, *t;
3001         enum ice_status status = ICE_SUCCESS;
3002
3003         if (!ice_is_vsi_valid(hw, vsi_handle))
3004                 return ICE_ERR_PARAM;
3005
3006         if (LIST_EMPTY(&hw->fl_profs[blk]))
3007                 return ICE_SUCCESS;
3008
3009         ice_acquire_lock(&hw->fl_profs_locks[blk]);
3010         LIST_FOR_EACH_ENTRY_SAFE(p, t, &hw->fl_profs[blk], ice_flow_prof,
3011                                  l_entry) {
3012                 if (ice_is_bit_set(p->vsis, vsi_handle)) {
3013                         status = ice_flow_disassoc_prof(hw, blk, p, vsi_handle);
3014                         if (status)
3015                                 break;
3016
3017                         if (!ice_is_any_bit_set(p->vsis, ICE_MAX_VSI)) {
3018                                 status = ice_flow_rem_prof_sync(hw, blk, p);
3019                                 if (status)
3020                                         break;
3021                         }
3022                 }
3023         }
3024         ice_release_lock(&hw->fl_profs_locks[blk]);
3025
3026         return status;
3027 }
3028
3029 /**
3030  * ice_rem_rss_list - remove RSS configuration from list
3031  * @hw: pointer to the hardware structure
3032  * @vsi_handle: software VSI handle
3033  * @prof: pointer to flow profile
3034  *
3035  * Assumption: lock has already been acquired for RSS list
3036  */
3037 static void
3038 ice_rem_rss_list(struct ice_hw *hw, u16 vsi_handle, struct ice_flow_prof *prof)
3039 {
3040         struct ice_rss_cfg *r, *tmp;
3041
3042         /* Search for RSS hash fields associated to the VSI that match the
3043          * hash configurations associated to the flow profile. If found
3044          * remove from the RSS entry list of the VSI context and delete entry.
3045          */
3046         LIST_FOR_EACH_ENTRY_SAFE(r, tmp, &hw->rss_list_head,
3047                                  ice_rss_cfg, l_entry) {
3048                 if (r->hashed_flds == prof->segs[prof->segs_cnt - 1].match &&
3049                     r->packet_hdr == prof->segs[prof->segs_cnt - 1].hdrs) {
3050                         ice_clear_bit(vsi_handle, r->vsis);
3051                         if (!ice_is_any_bit_set(r->vsis, ICE_MAX_VSI)) {
3052                                 LIST_DEL(&r->l_entry);
3053                                 ice_free(hw, r);
3054                         }
3055                         return;
3056                 }
3057         }
3058 }
3059
3060 /**
3061  * ice_add_rss_list - add RSS configuration to list
3062  * @hw: pointer to the hardware structure
3063  * @vsi_handle: software VSI handle
3064  * @prof: pointer to flow profile
3065  *
3066  * Assumption: lock has already been acquired for RSS list
3067  */
3068 static enum ice_status
3069 ice_add_rss_list(struct ice_hw *hw, u16 vsi_handle, struct ice_flow_prof *prof)
3070 {
3071         struct ice_rss_cfg *r, *rss_cfg;
3072
3073         LIST_FOR_EACH_ENTRY(r, &hw->rss_list_head,
3074                             ice_rss_cfg, l_entry)
3075                 if (r->hashed_flds == prof->segs[prof->segs_cnt - 1].match &&
3076                     r->packet_hdr == prof->segs[prof->segs_cnt - 1].hdrs) {
3077                         ice_set_bit(vsi_handle, r->vsis);
3078                         return ICE_SUCCESS;
3079                 }
3080
3081         rss_cfg = (struct ice_rss_cfg *)ice_malloc(hw, sizeof(*rss_cfg));
3082         if (!rss_cfg)
3083                 return ICE_ERR_NO_MEMORY;
3084
3085         rss_cfg->hashed_flds = prof->segs[prof->segs_cnt - 1].match;
3086         rss_cfg->packet_hdr = prof->segs[prof->segs_cnt - 1].hdrs;
3087         rss_cfg->symm = prof->cfg.symm;
3088         ice_set_bit(vsi_handle, rss_cfg->vsis);
3089
3090         LIST_ADD_TAIL(&rss_cfg->l_entry, &hw->rss_list_head);
3091
3092         return ICE_SUCCESS;
3093 }
3094
3095 #define ICE_FLOW_PROF_HASH_S    0
3096 #define ICE_FLOW_PROF_HASH_M    (0xFFFFFFFFULL << ICE_FLOW_PROF_HASH_S)
3097 #define ICE_FLOW_PROF_HDR_S     32
3098 #define ICE_FLOW_PROF_HDR_M     (0x3FFFFFFFULL << ICE_FLOW_PROF_HDR_S)
3099 #define ICE_FLOW_PROF_ENCAP_S   63
3100 #define ICE_FLOW_PROF_ENCAP_M   (BIT_ULL(ICE_FLOW_PROF_ENCAP_S))
3101
3102 #define ICE_RSS_OUTER_HEADERS   1
3103 #define ICE_RSS_INNER_HEADERS   2
3104
3105 /* Flow profile ID format:
3106  * [0:31] - Packet match fields
3107  * [32:62] - Protocol header
3108  * [63] - Encapsulation flag, 0 if non-tunneled, 1 if tunneled
3109  */
3110 #define ICE_FLOW_GEN_PROFID(hash, hdr, segs_cnt) \
3111         (u64)(((u64)(hash) & ICE_FLOW_PROF_HASH_M) | \
3112               (((u64)(hdr) << ICE_FLOW_PROF_HDR_S) & ICE_FLOW_PROF_HDR_M) | \
3113               ((u8)((segs_cnt) - 1) ? ICE_FLOW_PROF_ENCAP_M : 0))
3114
3115 static void
3116 ice_rss_config_xor_word(struct ice_hw *hw, u8 prof_id, u8 src, u8 dst)
3117 {
3118         u32 s = ((src % 4) << 3); /* byte shift */
3119         u32 v = dst | 0x80; /* value to program */
3120         u8 i = src / 4; /* register index */
3121         u32 reg;
3122
3123         reg = rd32(hw, GLQF_HSYMM(prof_id, i));
3124         reg = (reg & ~(0xff << s)) | (v << s);
3125         wr32(hw, GLQF_HSYMM(prof_id, i), reg);
3126 }
3127
3128 static void
3129 ice_rss_config_xor(struct ice_hw *hw, u8 prof_id, u8 src, u8 dst, u8 len)
3130 {
3131         int fv_last_word =
3132                 ICE_FLOW_SW_FIELD_VECTOR_MAX / ICE_FLOW_FV_EXTRACT_SZ - 1;
3133         int i;
3134
3135         for (i = 0; i < len; i++) {
3136                 ice_rss_config_xor_word(hw, prof_id,
3137                                         /* Yes, field vector in GLQF_HSYMM and
3138                                          * GLQF_HINSET is inversed!
3139                                          */
3140                                         fv_last_word - (src + i),
3141                                         fv_last_word - (dst + i));
3142                 ice_rss_config_xor_word(hw, prof_id,
3143                                         fv_last_word - (dst + i),
3144                                         fv_last_word - (src + i));
3145         }
3146 }
3147
3148 static void
3149 ice_rss_update_symm(struct ice_hw *hw,
3150                     struct ice_flow_prof *prof)
3151 {
3152         struct ice_prof_map *map;
3153         u8 prof_id, m;
3154
3155         map = ice_search_prof_id(hw, ICE_BLK_RSS, prof->id);
3156         prof_id = map->prof_id;
3157
3158         /* clear to default */
3159         for (m = 0; m < 6; m++)
3160                 wr32(hw, GLQF_HSYMM(prof_id, m), 0);
3161         if (prof->cfg.symm) {
3162                 struct ice_flow_seg_info *seg =
3163                         &prof->segs[prof->segs_cnt - 1];
3164
3165                 struct ice_flow_seg_xtrct *ipv4_src =
3166                         &seg->fields[ICE_FLOW_FIELD_IDX_IPV4_SA].xtrct;
3167                 struct ice_flow_seg_xtrct *ipv4_dst =
3168                         &seg->fields[ICE_FLOW_FIELD_IDX_IPV4_DA].xtrct;
3169                 struct ice_flow_seg_xtrct *ipv6_src =
3170                         &seg->fields[ICE_FLOW_FIELD_IDX_IPV6_SA].xtrct;
3171                 struct ice_flow_seg_xtrct *ipv6_dst =
3172                         &seg->fields[ICE_FLOW_FIELD_IDX_IPV6_DA].xtrct;
3173
3174                 struct ice_flow_seg_xtrct *tcp_src =
3175                         &seg->fields[ICE_FLOW_FIELD_IDX_TCP_SRC_PORT].xtrct;
3176                 struct ice_flow_seg_xtrct *tcp_dst =
3177                         &seg->fields[ICE_FLOW_FIELD_IDX_TCP_DST_PORT].xtrct;
3178
3179                 struct ice_flow_seg_xtrct *udp_src =
3180                         &seg->fields[ICE_FLOW_FIELD_IDX_UDP_SRC_PORT].xtrct;
3181                 struct ice_flow_seg_xtrct *udp_dst =
3182                         &seg->fields[ICE_FLOW_FIELD_IDX_UDP_DST_PORT].xtrct;
3183
3184                 struct ice_flow_seg_xtrct *sctp_src =
3185                         &seg->fields[ICE_FLOW_FIELD_IDX_SCTP_SRC_PORT].xtrct;
3186                 struct ice_flow_seg_xtrct *sctp_dst =
3187                         &seg->fields[ICE_FLOW_FIELD_IDX_SCTP_DST_PORT].xtrct;
3188
3189                 /* xor IPv4 */
3190                 if (ipv4_src->prot_id != 0 && ipv4_dst->prot_id != 0)
3191                         ice_rss_config_xor(hw, prof_id,
3192                                            ipv4_src->idx, ipv4_dst->idx, 2);
3193
3194                 /* xor IPv6 */
3195                 if (ipv6_src->prot_id != 0 && ipv6_dst->prot_id != 0)
3196                         ice_rss_config_xor(hw, prof_id,
3197                                            ipv6_src->idx, ipv6_dst->idx, 8);
3198
3199                 /* xor TCP */
3200                 if (tcp_src->prot_id != 0 && tcp_dst->prot_id != 0)
3201                         ice_rss_config_xor(hw, prof_id,
3202                                            tcp_src->idx, tcp_dst->idx, 1);
3203
3204                 /* xor UDP */
3205                 if (udp_src->prot_id != 0 && udp_dst->prot_id != 0)
3206                         ice_rss_config_xor(hw, prof_id,
3207                                            udp_src->idx, udp_dst->idx, 1);
3208
3209                 /* xor SCTP */
3210                 if (sctp_src->prot_id != 0 && sctp_dst->prot_id != 0)
3211                         ice_rss_config_xor(hw, prof_id,
3212                                            sctp_src->idx, sctp_dst->idx, 1);
3213         }
3214 }
3215
3216 /**
3217  * ice_add_rss_cfg_sync - add an RSS configuration
3218  * @hw: pointer to the hardware structure
3219  * @vsi_handle: software VSI handle
3220  * @hashed_flds: hash bit fields (ICE_FLOW_HASH_*) to configure
3221  * @addl_hdrs: protocol header fields
3222  * @segs_cnt: packet segment count
3223  * @symm: symmetric hash enable/disable
3224  *
3225  * Assumption: lock has already been acquired for RSS list
3226  */
3227 static enum ice_status
3228 ice_add_rss_cfg_sync(struct ice_hw *hw, u16 vsi_handle, u64 hashed_flds,
3229                      u32 addl_hdrs, u8 segs_cnt, bool symm)
3230 {
3231         const enum ice_block blk = ICE_BLK_RSS;
3232         struct ice_flow_prof *prof = NULL;
3233         struct ice_flow_seg_info *segs;
3234         enum ice_status status;
3235
3236         if (!segs_cnt || segs_cnt > ICE_FLOW_SEG_MAX)
3237                 return ICE_ERR_PARAM;
3238
3239         segs = (struct ice_flow_seg_info *)ice_calloc(hw, segs_cnt,
3240                                                       sizeof(*segs));
3241         if (!segs)
3242                 return ICE_ERR_NO_MEMORY;
3243
3244         /* Construct the packet segment info from the hashed fields */
3245         status = ice_flow_set_rss_seg_info(&segs[segs_cnt - 1], hashed_flds,
3246                                            addl_hdrs);
3247         if (status)
3248                 goto exit;
3249
3250         /* Search for a flow profile that has matching headers, hash fields
3251          * and has the input VSI associated to it. If found, no further
3252          * operations required and exit.
3253          */
3254         prof = ice_flow_find_prof_conds(hw, blk, ICE_FLOW_RX, segs, segs_cnt,
3255                                         vsi_handle,
3256                                         ICE_FLOW_FIND_PROF_CHK_FLDS |
3257                                         ICE_FLOW_FIND_PROF_CHK_VSI);
3258         if (prof) {
3259                 if (prof->cfg.symm == symm)
3260                         goto exit;
3261                 prof->cfg.symm = symm;
3262                 goto update_symm;
3263         }
3264
3265         /* Check if a flow profile exists with the same protocol headers and
3266          * associated with the input VSI. If so disasscociate the VSI from
3267          * this profile. The VSI will be added to a new profile created with
3268          * the protocol header and new hash field configuration.
3269          */
3270         prof = ice_flow_find_prof_conds(hw, blk, ICE_FLOW_RX, segs, segs_cnt,
3271                                         vsi_handle, ICE_FLOW_FIND_PROF_CHK_VSI);
3272         if (prof) {
3273                 status = ice_flow_disassoc_prof(hw, blk, prof, vsi_handle);
3274                 if (!status)
3275                         ice_rem_rss_list(hw, vsi_handle, prof);
3276                 else
3277                         goto exit;
3278
3279                 /* Remove profile if it has no VSIs associated */
3280                 if (!ice_is_any_bit_set(prof->vsis, ICE_MAX_VSI)) {
3281                         status = ice_flow_rem_prof(hw, blk, prof->id);
3282                         if (status)
3283                                 goto exit;
3284                 }
3285         }
3286
3287         /* Search for a profile that has same match fields only. If this
3288          * exists then associate the VSI to this profile.
3289          */
3290         prof = ice_flow_find_prof_conds(hw, blk, ICE_FLOW_RX, segs, segs_cnt,
3291                                         vsi_handle,
3292                                         ICE_FLOW_FIND_PROF_CHK_FLDS);
3293         if (prof) {
3294                 if (prof->cfg.symm == symm) {
3295                         status = ice_flow_assoc_prof(hw, blk, prof,
3296                                                      vsi_handle);
3297                         if (!status)
3298                                 status = ice_add_rss_list(hw, vsi_handle,
3299                                                           prof);
3300                 } else {
3301                         /* if a profile exist but with different symmetric
3302                          * requirement, just return error.
3303                          */
3304                         status = ICE_ERR_NOT_SUPPORTED;
3305                 }
3306                 goto exit;
3307         }
3308
3309         /* Create a new flow profile with generated profile and packet
3310          * segment information.
3311          */
3312         status = ice_flow_add_prof(hw, blk, ICE_FLOW_RX,
3313                                    ICE_FLOW_GEN_PROFID(hashed_flds,
3314                                                        segs[segs_cnt - 1].hdrs,
3315                                                        segs_cnt),
3316                                    segs, segs_cnt, NULL, 0, &prof);
3317         if (status)
3318                 goto exit;
3319
3320         status = ice_flow_assoc_prof(hw, blk, prof, vsi_handle);
3321         /* If association to a new flow profile failed then this profile can
3322          * be removed.
3323          */
3324         if (status) {
3325                 ice_flow_rem_prof(hw, blk, prof->id);
3326                 goto exit;
3327         }
3328
3329         status = ice_add_rss_list(hw, vsi_handle, prof);
3330
3331         prof->cfg.symm = symm;
3332
3333 update_symm:
3334         ice_rss_update_symm(hw, prof);
3335
3336 exit:
3337         ice_free(hw, segs);
3338         return status;
3339 }
3340
3341 /**
3342  * ice_add_rss_cfg - add an RSS configuration with specified hashed fields
3343  * @hw: pointer to the hardware structure
3344  * @vsi_handle: software VSI handle
3345  * @hashed_flds: hash bit fields (ICE_FLOW_HASH_*) to configure
3346  * @addl_hdrs: protocol header fields
3347  * @symm: symmetric hash enable/disable
3348  *
3349  * This function will generate a flow profile based on fields associated with
3350  * the input fields to hash on, the flow type and use the VSI number to add
3351  * a flow entry to the profile.
3352  */
3353 enum ice_status
3354 ice_add_rss_cfg(struct ice_hw *hw, u16 vsi_handle, u64 hashed_flds,
3355                 u32 addl_hdrs, bool symm)
3356 {
3357         enum ice_status status;
3358
3359         if (hashed_flds == ICE_HASH_INVALID ||
3360             !ice_is_vsi_valid(hw, vsi_handle))
3361                 return ICE_ERR_PARAM;
3362
3363         ice_acquire_lock(&hw->rss_locks);
3364         status = ice_add_rss_cfg_sync(hw, vsi_handle, hashed_flds, addl_hdrs,
3365                                       ICE_RSS_OUTER_HEADERS, symm);
3366         if (!status)
3367                 status = ice_add_rss_cfg_sync(hw, vsi_handle, hashed_flds,
3368                                               addl_hdrs, ICE_RSS_INNER_HEADERS,
3369                                               symm);
3370         ice_release_lock(&hw->rss_locks);
3371
3372         return status;
3373 }
3374
3375 /**
3376  * ice_rem_rss_cfg_sync - remove an existing RSS configuration
3377  * @hw: pointer to the hardware structure
3378  * @vsi_handle: software VSI handle
3379  * @hashed_flds: Packet hash types (ICE_FLOW_HASH_*) to remove
3380  * @addl_hdrs: Protocol header fields within a packet segment
3381  * @segs_cnt: packet segment count
3382  *
3383  * Assumption: lock has already been acquired for RSS list
3384  */
3385 static enum ice_status
3386 ice_rem_rss_cfg_sync(struct ice_hw *hw, u16 vsi_handle, u64 hashed_flds,
3387                      u32 addl_hdrs, u8 segs_cnt)
3388 {
3389         const enum ice_block blk = ICE_BLK_RSS;
3390         struct ice_flow_seg_info *segs;
3391         struct ice_flow_prof *prof;
3392         enum ice_status status;
3393
3394         segs = (struct ice_flow_seg_info *)ice_calloc(hw, segs_cnt,
3395                                                       sizeof(*segs));
3396         if (!segs)
3397                 return ICE_ERR_NO_MEMORY;
3398
3399         /* Construct the packet segment info from the hashed fields */
3400         status = ice_flow_set_rss_seg_info(&segs[segs_cnt - 1], hashed_flds,
3401                                            addl_hdrs);
3402         if (status)
3403                 goto out;
3404
3405         prof = ice_flow_find_prof_conds(hw, blk, ICE_FLOW_RX, segs, segs_cnt,
3406                                         vsi_handle,
3407                                         ICE_FLOW_FIND_PROF_CHK_FLDS);
3408         if (!prof) {
3409                 status = ICE_ERR_DOES_NOT_EXIST;
3410                 goto out;
3411         }
3412
3413         status = ice_flow_disassoc_prof(hw, blk, prof, vsi_handle);
3414         if (status)
3415                 goto out;
3416
3417         /* Remove RSS configuration from VSI context before deleting
3418          * the flow profile.
3419          */
3420         ice_rem_rss_list(hw, vsi_handle, prof);
3421
3422         if (!ice_is_any_bit_set(prof->vsis, ICE_MAX_VSI))
3423                 status = ice_flow_rem_prof(hw, blk, prof->id);
3424
3425 out:
3426         ice_free(hw, segs);
3427         return status;
3428 }
3429
3430 /**
3431  * ice_rem_rss_cfg - remove an existing RSS config with matching hashed fields
3432  * @hw: pointer to the hardware structure
3433  * @vsi_handle: software VSI handle
3434  * @hashed_flds: Packet hash types (ICE_FLOW_HASH_*) to remove
3435  * @addl_hdrs: Protocol header fields within a packet segment
3436  *
3437  * This function will lookup the flow profile based on the input
3438  * hash field bitmap, iterate through the profile entry list of
3439  * that profile and find entry associated with input VSI to be
3440  * removed. Calls are made to underlying flow apis which will in
3441  * turn build or update buffers for RSS XLT1 section.
3442  */
3443 enum ice_status
3444 ice_rem_rss_cfg(struct ice_hw *hw, u16 vsi_handle, u64 hashed_flds,
3445                 u32 addl_hdrs)
3446 {
3447         enum ice_status status;
3448
3449         if (hashed_flds == ICE_HASH_INVALID ||
3450             !ice_is_vsi_valid(hw, vsi_handle))
3451                 return ICE_ERR_PARAM;
3452
3453         ice_acquire_lock(&hw->rss_locks);
3454         status = ice_rem_rss_cfg_sync(hw, vsi_handle, hashed_flds, addl_hdrs,
3455                                       ICE_RSS_OUTER_HEADERS);
3456         if (!status)
3457                 status = ice_rem_rss_cfg_sync(hw, vsi_handle, hashed_flds,
3458                                               addl_hdrs, ICE_RSS_INNER_HEADERS);
3459         ice_release_lock(&hw->rss_locks);
3460
3461         return status;
3462 }
3463
3464 /**
3465  * ice_replay_rss_cfg - replay RSS configurations associated with VSI
3466  * @hw: pointer to the hardware structure
3467  * @vsi_handle: software VSI handle
3468  */
3469 enum ice_status ice_replay_rss_cfg(struct ice_hw *hw, u16 vsi_handle)
3470 {
3471         enum ice_status status = ICE_SUCCESS;
3472         struct ice_rss_cfg *r;
3473
3474         if (!ice_is_vsi_valid(hw, vsi_handle))
3475                 return ICE_ERR_PARAM;
3476
3477         ice_acquire_lock(&hw->rss_locks);
3478         LIST_FOR_EACH_ENTRY(r, &hw->rss_list_head,
3479                             ice_rss_cfg, l_entry) {
3480                 if (ice_is_bit_set(r->vsis, vsi_handle)) {
3481                         status = ice_add_rss_cfg_sync(hw, vsi_handle,
3482                                                       r->hashed_flds,
3483                                                       r->packet_hdr,
3484                                                       ICE_RSS_OUTER_HEADERS,
3485                                                       r->symm);
3486                         if (status)
3487                                 break;
3488                         status = ice_add_rss_cfg_sync(hw, vsi_handle,
3489                                                       r->hashed_flds,
3490                                                       r->packet_hdr,
3491                                                       ICE_RSS_INNER_HEADERS,
3492                                                       r->symm);
3493                         if (status)
3494                                 break;
3495                 }
3496         }
3497         ice_release_lock(&hw->rss_locks);
3498
3499         return status;
3500 }
3501
3502 /**
3503  * ice_get_rss_cfg - returns hashed fields for the given header types
3504  * @hw: pointer to the hardware structure
3505  * @vsi_handle: software VSI handle
3506  * @hdrs: protocol header type
3507  *
3508  * This function will return the match fields of the first instance of flow
3509  * profile having the given header types and containing input VSI
3510  */
3511 u64 ice_get_rss_cfg(struct ice_hw *hw, u16 vsi_handle, u32 hdrs)
3512 {
3513         struct ice_rss_cfg *r, *rss_cfg = NULL;
3514
3515         /* verify if the protocol header is non zero and VSI is valid */
3516         if (hdrs == ICE_FLOW_SEG_HDR_NONE || !ice_is_vsi_valid(hw, vsi_handle))
3517                 return ICE_HASH_INVALID;
3518
3519         ice_acquire_lock(&hw->rss_locks);
3520         LIST_FOR_EACH_ENTRY(r, &hw->rss_list_head,
3521                             ice_rss_cfg, l_entry)
3522                 if (ice_is_bit_set(r->vsis, vsi_handle) &&
3523                     r->packet_hdr == hdrs) {
3524                         rss_cfg = r;
3525                         break;
3526                 }
3527         ice_release_lock(&hw->rss_locks);
3528
3529         return rss_cfg ? rss_cfg->hashed_flds : ICE_HASH_INVALID;
3530 }