cdb4b004fd466b5b932f196fb8e43aec778c171a
[dpdk.git] / drivers / net / ice / base / ice_flow.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2001-2020
3  */
4
5 #include "ice_common.h"
6 #include "ice_flow.h"
7
8 /* Size of known protocol header fields */
9 #define ICE_FLOW_FLD_SZ_ETH_TYPE        2
10 #define ICE_FLOW_FLD_SZ_VLAN            2
11 #define ICE_FLOW_FLD_SZ_IPV4_ADDR       4
12 #define ICE_FLOW_FLD_SZ_IPV6_ADDR       16
13 #define ICE_FLOW_FLD_SZ_IP_DSCP         1
14 #define ICE_FLOW_FLD_SZ_IP_TTL          1
15 #define ICE_FLOW_FLD_SZ_IP_PROT         1
16 #define ICE_FLOW_FLD_SZ_PORT            2
17 #define ICE_FLOW_FLD_SZ_TCP_FLAGS       1
18 #define ICE_FLOW_FLD_SZ_ICMP_TYPE       1
19 #define ICE_FLOW_FLD_SZ_ICMP_CODE       1
20 #define ICE_FLOW_FLD_SZ_ARP_OPER        2
21 #define ICE_FLOW_FLD_SZ_GRE_KEYID       4
22 #define ICE_FLOW_FLD_SZ_GTP_TEID        4
23 #define ICE_FLOW_FLD_SZ_GTP_QFI         2
24 #define ICE_FLOW_FLD_SZ_PPPOE_SESS_ID   2
25
26 /* Describe properties of a protocol header field */
27 struct ice_flow_field_info {
28         enum ice_flow_seg_hdr hdr;
29         s16 off;        /* Offset from start of a protocol header, in bits */
30         u16 size;       /* Size of fields in bits */
31         u16 mask;       /* 16-bit mask for field */
32 };
33
34 #define ICE_FLOW_FLD_INFO(_hdr, _offset_bytes, _size_bytes) { \
35         .hdr = _hdr, \
36         .off = (_offset_bytes) * BITS_PER_BYTE, \
37         .size = (_size_bytes) * BITS_PER_BYTE, \
38         .mask = 0, \
39 }
40
41 #define ICE_FLOW_FLD_INFO_MSK(_hdr, _offset_bytes, _size_bytes, _mask) { \
42         .hdr = _hdr, \
43         .off = (_offset_bytes) * BITS_PER_BYTE, \
44         .size = (_size_bytes) * BITS_PER_BYTE, \
45         .mask = _mask, \
46 }
47
48 /* Table containing properties of supported protocol header fields */
49 static const
50 struct ice_flow_field_info ice_flds_info[ICE_FLOW_FIELD_IDX_MAX] = {
51         /* Ether */
52         /* ICE_FLOW_FIELD_IDX_ETH_DA */
53         ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_ETH, 0, ETH_ALEN),
54         /* ICE_FLOW_FIELD_IDX_ETH_SA */
55         ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_ETH, ETH_ALEN, ETH_ALEN),
56         /* ICE_FLOW_FIELD_IDX_S_VLAN */
57         ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_VLAN, 12, ICE_FLOW_FLD_SZ_VLAN),
58         /* ICE_FLOW_FIELD_IDX_C_VLAN */
59         ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_VLAN, 14, ICE_FLOW_FLD_SZ_VLAN),
60         /* ICE_FLOW_FIELD_IDX_ETH_TYPE */
61         ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_ETH, 12, ICE_FLOW_FLD_SZ_ETH_TYPE),
62         /* IPv4 / IPv6 */
63         /* ICE_FLOW_FIELD_IDX_IPV4_DSCP */
64         ICE_FLOW_FLD_INFO_MSK(ICE_FLOW_SEG_HDR_IPV4, 0, ICE_FLOW_FLD_SZ_IP_DSCP,
65                               0x00fc),
66         /* ICE_FLOW_FIELD_IDX_IPV6_DSCP */
67         ICE_FLOW_FLD_INFO_MSK(ICE_FLOW_SEG_HDR_IPV6, 0, ICE_FLOW_FLD_SZ_IP_DSCP,
68                               0x0ff0),
69         /* ICE_FLOW_FIELD_IDX_IPV4_TTL */
70         ICE_FLOW_FLD_INFO_MSK(ICE_FLOW_SEG_HDR_NONE, 8,
71                               ICE_FLOW_FLD_SZ_IP_TTL, 0xff00),
72         /* ICE_FLOW_FIELD_IDX_IPV4_PROT */
73         ICE_FLOW_FLD_INFO_MSK(ICE_FLOW_SEG_HDR_NONE, 8,
74                               ICE_FLOW_FLD_SZ_IP_PROT, 0x00ff),
75         /* ICE_FLOW_FIELD_IDX_IPV6_TTL */
76         ICE_FLOW_FLD_INFO_MSK(ICE_FLOW_SEG_HDR_NONE, 6,
77                               ICE_FLOW_FLD_SZ_IP_TTL, 0x00ff),
78         /* ICE_FLOW_FIELD_IDX_IPV6_PROT */
79         ICE_FLOW_FLD_INFO_MSK(ICE_FLOW_SEG_HDR_NONE, 6,
80                               ICE_FLOW_FLD_SZ_IP_PROT, 0xff00),
81         /* ICE_FLOW_FIELD_IDX_IPV4_SA */
82         ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_IPV4, 12, ICE_FLOW_FLD_SZ_IPV4_ADDR),
83         /* ICE_FLOW_FIELD_IDX_IPV4_DA */
84         ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_IPV4, 16, ICE_FLOW_FLD_SZ_IPV4_ADDR),
85         /* ICE_FLOW_FIELD_IDX_IPV6_SA */
86         ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_IPV6, 8, ICE_FLOW_FLD_SZ_IPV6_ADDR),
87         /* ICE_FLOW_FIELD_IDX_IPV6_DA */
88         ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_IPV6, 24, ICE_FLOW_FLD_SZ_IPV6_ADDR),
89         /* Transport */
90         /* ICE_FLOW_FIELD_IDX_TCP_SRC_PORT */
91         ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_TCP, 0, ICE_FLOW_FLD_SZ_PORT),
92         /* ICE_FLOW_FIELD_IDX_TCP_DST_PORT */
93         ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_TCP, 2, ICE_FLOW_FLD_SZ_PORT),
94         /* ICE_FLOW_FIELD_IDX_UDP_SRC_PORT */
95         ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_UDP, 0, ICE_FLOW_FLD_SZ_PORT),
96         /* ICE_FLOW_FIELD_IDX_UDP_DST_PORT */
97         ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_UDP, 2, ICE_FLOW_FLD_SZ_PORT),
98         /* ICE_FLOW_FIELD_IDX_SCTP_SRC_PORT */
99         ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_SCTP, 0, ICE_FLOW_FLD_SZ_PORT),
100         /* ICE_FLOW_FIELD_IDX_SCTP_DST_PORT */
101         ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_SCTP, 2, ICE_FLOW_FLD_SZ_PORT),
102         /* ICE_FLOW_FIELD_IDX_TCP_FLAGS */
103         ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_TCP, 13, ICE_FLOW_FLD_SZ_TCP_FLAGS),
104         /* ARP */
105         /* ICE_FLOW_FIELD_IDX_ARP_SIP */
106         ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_ARP, 14, ICE_FLOW_FLD_SZ_IPV4_ADDR),
107         /* ICE_FLOW_FIELD_IDX_ARP_DIP */
108         ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_ARP, 24, ICE_FLOW_FLD_SZ_IPV4_ADDR),
109         /* ICE_FLOW_FIELD_IDX_ARP_SHA */
110         ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_ARP, 8, ETH_ALEN),
111         /* ICE_FLOW_FIELD_IDX_ARP_DHA */
112         ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_ARP, 18, ETH_ALEN),
113         /* ICE_FLOW_FIELD_IDX_ARP_OP */
114         ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_ARP, 6, ICE_FLOW_FLD_SZ_ARP_OPER),
115         /* ICMP */
116         /* ICE_FLOW_FIELD_IDX_ICMP_TYPE */
117         ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_ICMP, 0, ICE_FLOW_FLD_SZ_ICMP_TYPE),
118         /* ICE_FLOW_FIELD_IDX_ICMP_CODE */
119         ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_ICMP, 1, ICE_FLOW_FLD_SZ_ICMP_CODE),
120         /* GRE */
121         /* ICE_FLOW_FIELD_IDX_GRE_KEYID */
122         ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_GRE, 12, ICE_FLOW_FLD_SZ_GRE_KEYID),
123         /* GTP */
124         /* ICE_FLOW_FIELD_IDX_GTPC_TEID */
125         ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_GTPC_TEID, 12,
126                           ICE_FLOW_FLD_SZ_GTP_TEID),
127         /* ICE_FLOW_FIELD_IDX_GTPU_IP_TEID */
128         ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_GTPU_IP, 12,
129                           ICE_FLOW_FLD_SZ_GTP_TEID),
130         /* ICE_FLOW_FIELD_IDX_GTPU_EH_TEID */
131         ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_GTPU_EH, 12,
132                           ICE_FLOW_FLD_SZ_GTP_TEID),
133         /* ICE_FLOW_FIELD_IDX_GTPU_EH_QFI */
134         ICE_FLOW_FLD_INFO_MSK(ICE_FLOW_SEG_HDR_GTPU_EH, 22,
135                               ICE_FLOW_FLD_SZ_GTP_QFI, 0x3f00),
136         /* ICE_FLOW_FIELD_IDX_GTPU_UP_TEID */
137         ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_GTPU_UP, 12,
138                           ICE_FLOW_FLD_SZ_GTP_TEID),
139         /* ICE_FLOW_FIELD_IDX_GTPU_DWN_TEID */
140         ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_GTPU_DWN, 12,
141                           ICE_FLOW_FLD_SZ_GTP_TEID),
142         /* PPPOE */
143         /* ICE_FLOW_FIELD_IDX_PPPOE_SESS_ID */
144         ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_PPPOE, 2,
145                           ICE_FLOW_FLD_SZ_PPPOE_SESS_ID),
146 };
147
148 /* Bitmaps indicating relevant packet types for a particular protocol header
149  *
150  * Packet types for packets with an Outer/First/Single MAC header
151  */
152 static const u32 ice_ptypes_mac_ofos[] = {
153         0xFDC00846, 0xBFBF7F7E, 0xF70001DF, 0xFEFDFDFB,
154         0x0000077E, 0x00000000, 0x00000000, 0x00000000,
155         0x00000000, 0x03FFF000, 0x7FFFFFE0, 0x00000000,
156         0x00000000, 0x00000000, 0x00000000, 0x00000000,
157         0x00000000, 0x00000000, 0x00000000, 0x00000000,
158         0x00000000, 0x00000000, 0x00000000, 0x00000000,
159         0x00000000, 0x00000000, 0x00000000, 0x00000000,
160         0x00000000, 0x00000000, 0x00000000, 0x00000000,
161 };
162
163 /* Packet types for packets with an Innermost/Last MAC VLAN header */
164 static const u32 ice_ptypes_macvlan_il[] = {
165         0x00000000, 0xBC000000, 0x000001DF, 0xF0000000,
166         0x0000077E, 0x00000000, 0x00000000, 0x00000000,
167         0x00000000, 0x00000000, 0x00000000, 0x00000000,
168         0x00000000, 0x00000000, 0x00000000, 0x00000000,
169         0x00000000, 0x00000000, 0x00000000, 0x00000000,
170         0x00000000, 0x00000000, 0x00000000, 0x00000000,
171         0x00000000, 0x00000000, 0x00000000, 0x00000000,
172         0x00000000, 0x00000000, 0x00000000, 0x00000000,
173 };
174
175 /* Packet types for packets with an Outer/First/Single IPv4 header */
176 static const u32 ice_ptypes_ipv4_ofos[] = {
177         0x1DC00000, 0x04000800, 0x00000000, 0x00000000,
178         0x00000000, 0x00000000, 0x00000000, 0x00000000,
179         0x0003000F, 0x000FC000, 0x03E0F800, 0x00000000,
180         0x00000000, 0x00000000, 0x00000000, 0x00000000,
181         0x00000000, 0x00000000, 0x00000000, 0x00000000,
182         0x00000000, 0x00000000, 0x00000000, 0x00000000,
183         0x00000000, 0x00000000, 0x00000000, 0x00000000,
184         0x00000000, 0x00000000, 0x00000000, 0x00000000,
185 };
186
187 /* Packet types for packets with an Innermost/Last IPv4 header */
188 static const u32 ice_ptypes_ipv4_il[] = {
189         0xE0000000, 0xB807700E, 0x80000003, 0xE01DC03B,
190         0x0000000E, 0x00000000, 0x00000000, 0x00000000,
191         0x00000000, 0x00000000, 0x001FF800, 0x00000000,
192         0x00000000, 0x00000000, 0x00000000, 0x00000000,
193         0x00000000, 0x00000000, 0x00000000, 0x00000000,
194         0x00000000, 0x00000000, 0x00000000, 0x00000000,
195         0x00000000, 0x00000000, 0x00000000, 0x00000000,
196         0x00000000, 0x00000000, 0x00000000, 0x00000000,
197 };
198
199 /* Packet types for packets with an Outer/First/Single IPv6 header */
200 static const u32 ice_ptypes_ipv6_ofos[] = {
201         0x00000000, 0x00000000, 0x77000000, 0x10002000,
202         0x00000000, 0x00000000, 0x00000000, 0x00000000,
203         0x00080F00, 0x03F00000, 0x7C1F0000, 0x00000000,
204         0x00000000, 0x00000000, 0x00000000, 0x00000000,
205         0x00000000, 0x00000000, 0x00000000, 0x00000000,
206         0x00000000, 0x00000000, 0x00000000, 0x00000000,
207         0x00000000, 0x00000000, 0x00000000, 0x00000000,
208         0x00000000, 0x00000000, 0x00000000, 0x00000000,
209 };
210
211 /* Packet types for packets with an Innermost/Last IPv6 header */
212 static const u32 ice_ptypes_ipv6_il[] = {
213         0x00000000, 0x03B80770, 0x000001DC, 0x0EE00000,
214         0x00000770, 0x00000000, 0x00000000, 0x00000000,
215         0x00000000, 0x00000000, 0x7FE00000, 0x00000000,
216         0x00000000, 0x00000000, 0x00000000, 0x00000000,
217         0x00000000, 0x00000000, 0x00000000, 0x00000000,
218         0x00000000, 0x00000000, 0x00000000, 0x00000000,
219         0x00000000, 0x00000000, 0x00000000, 0x00000000,
220         0x00000000, 0x00000000, 0x00000000, 0x00000000,
221 };
222
223 /* Packet types for packets with an Outermost/First ARP header */
224 static const u32 ice_ptypes_arp_of[] = {
225         0x00000800, 0x00000000, 0x00000000, 0x00000000,
226         0x00000000, 0x00000000, 0x00000000, 0x00000000,
227         0x00000000, 0x00000000, 0x00000000, 0x00000000,
228         0x00000000, 0x00000000, 0x00000000, 0x00000000,
229         0x00000000, 0x00000000, 0x00000000, 0x00000000,
230         0x00000000, 0x00000000, 0x00000000, 0x00000000,
231         0x00000000, 0x00000000, 0x00000000, 0x00000000,
232         0x00000000, 0x00000000, 0x00000000, 0x00000000,
233 };
234
235 /* UDP Packet types for non-tunneled packets or tunneled
236  * packets with inner UDP.
237  */
238 static const u32 ice_ptypes_udp_il[] = {
239         0x81000000, 0x20204040, 0x04000010, 0x80810102,
240         0x00000040, 0x00000000, 0x00000000, 0x00000000,
241         0x00000000, 0x00410000, 0x10842000, 0x00000000,
242         0x00000000, 0x00000000, 0x00000000, 0x00000000,
243         0x00000000, 0x00000000, 0x00000000, 0x00000000,
244         0x00000000, 0x00000000, 0x00000000, 0x00000000,
245         0x00000000, 0x00000000, 0x00000000, 0x00000000,
246         0x00000000, 0x00000000, 0x00000000, 0x00000000,
247 };
248
249 /* Packet types for packets with an Innermost/Last TCP header */
250 static const u32 ice_ptypes_tcp_il[] = {
251         0x04000000, 0x80810102, 0x10000040, 0x02040408,
252         0x00000102, 0x00000000, 0x00000000, 0x00000000,
253         0x00000000, 0x00820000, 0x21084000, 0x00000000,
254         0x00000000, 0x00000000, 0x00000000, 0x00000000,
255         0x00000000, 0x00000000, 0x00000000, 0x00000000,
256         0x00000000, 0x00000000, 0x00000000, 0x00000000,
257         0x00000000, 0x00000000, 0x00000000, 0x00000000,
258         0x00000000, 0x00000000, 0x00000000, 0x00000000,
259 };
260
261 /* Packet types for packets with an Innermost/Last SCTP header */
262 static const u32 ice_ptypes_sctp_il[] = {
263         0x08000000, 0x01020204, 0x20000081, 0x04080810,
264         0x00000204, 0x00000000, 0x00000000, 0x00000000,
265         0x00000000, 0x01040000, 0x00000000, 0x00000000,
266         0x00000000, 0x00000000, 0x00000000, 0x00000000,
267         0x00000000, 0x00000000, 0x00000000, 0x00000000,
268         0x00000000, 0x00000000, 0x00000000, 0x00000000,
269         0x00000000, 0x00000000, 0x00000000, 0x00000000,
270         0x00000000, 0x00000000, 0x00000000, 0x00000000,
271 };
272
273 /* Packet types for packets with an Outermost/First ICMP header */
274 static const u32 ice_ptypes_icmp_of[] = {
275         0x10000000, 0x00000000, 0x00000000, 0x00000000,
276         0x00000000, 0x00000000, 0x00000000, 0x00000000,
277         0x00000000, 0x00000000, 0x00000000, 0x00000000,
278         0x00000000, 0x00000000, 0x00000000, 0x00000000,
279         0x00000000, 0x00000000, 0x00000000, 0x00000000,
280         0x00000000, 0x00000000, 0x00000000, 0x00000000,
281         0x00000000, 0x00000000, 0x00000000, 0x00000000,
282         0x00000000, 0x00000000, 0x00000000, 0x00000000,
283 };
284
285 /* Packet types for packets with an Innermost/Last ICMP header */
286 static const u32 ice_ptypes_icmp_il[] = {
287         0x00000000, 0x02040408, 0x40000102, 0x08101020,
288         0x00000408, 0x00000000, 0x00000000, 0x00000000,
289         0x00000000, 0x00000000, 0x42108000, 0x00000000,
290         0x00000000, 0x00000000, 0x00000000, 0x00000000,
291         0x00000000, 0x00000000, 0x00000000, 0x00000000,
292         0x00000000, 0x00000000, 0x00000000, 0x00000000,
293         0x00000000, 0x00000000, 0x00000000, 0x00000000,
294         0x00000000, 0x00000000, 0x00000000, 0x00000000,
295 };
296
297 /* Packet types for packets with an Outermost/First GRE header */
298 static const u32 ice_ptypes_gre_of[] = {
299         0x00000000, 0xBFBF7800, 0x000001DF, 0xFEFDE000,
300         0x0000017E, 0x00000000, 0x00000000, 0x00000000,
301         0x00000000, 0x00000000, 0x00000000, 0x00000000,
302         0x00000000, 0x00000000, 0x00000000, 0x00000000,
303         0x00000000, 0x00000000, 0x00000000, 0x00000000,
304         0x00000000, 0x00000000, 0x00000000, 0x00000000,
305         0x00000000, 0x00000000, 0x00000000, 0x00000000,
306         0x00000000, 0x00000000, 0x00000000, 0x00000000,
307 };
308
309 /* Packet types for packets with an Innermost/Last MAC header */
310 static const u32 ice_ptypes_mac_il[] = {
311         0x00000000, 0x00000000, 0x00000000, 0x00000000,
312         0x00000000, 0x00000000, 0x00000000, 0x00000000,
313         0x00000000, 0x00000000, 0x00000000, 0x00000000,
314         0x00000000, 0x00000000, 0x00000000, 0x00000000,
315         0x00000000, 0x00000000, 0x00000000, 0x00000000,
316         0x00000000, 0x00000000, 0x00000000, 0x00000000,
317         0x00000000, 0x00000000, 0x00000000, 0x00000000,
318         0x00000000, 0x00000000, 0x00000000, 0x00000000,
319 };
320
321 /* Packet types for GTPC */
322 static const u32 ice_ptypes_gtpc[] = {
323         0x00000000, 0x00000000, 0x00000000, 0x00000000,
324         0x00000000, 0x00000000, 0x00000000, 0x00000000,
325         0x00000000, 0x00000000, 0x00000180, 0x00000000,
326         0x00000000, 0x00000000, 0x00000000, 0x00000000,
327         0x00000000, 0x00000000, 0x00000000, 0x00000000,
328         0x00000000, 0x00000000, 0x00000000, 0x00000000,
329         0x00000000, 0x00000000, 0x00000000, 0x00000000,
330         0x00000000, 0x00000000, 0x00000000, 0x00000000,
331 };
332
333 /* Packet types for GTPC with TEID */
334 static const u32 ice_ptypes_gtpc_tid[] = {
335         0x00000000, 0x00000000, 0x00000000, 0x00000000,
336         0x00000000, 0x00000000, 0x00000000, 0x00000000,
337         0x00000000, 0x00000000, 0x00000060, 0x00000000,
338         0x00000000, 0x00000000, 0x00000000, 0x00000000,
339         0x00000000, 0x00000000, 0x00000000, 0x00000000,
340         0x00000000, 0x00000000, 0x00000000, 0x00000000,
341         0x00000000, 0x00000000, 0x00000000, 0x00000000,
342         0x00000000, 0x00000000, 0x00000000, 0x00000000,
343 };
344
345 /* Packet types for GTPU */
346 static const struct ice_ptype_attributes ice_attr_gtpu_eh[] = {
347         { ICE_MAC_IPV4_GTPU_IPV4_FRAG,    ICE_PTYPE_ATTR_GTP_PDU_EH },
348         { ICE_MAC_IPV4_GTPU_IPV4_PAY,     ICE_PTYPE_ATTR_GTP_PDU_EH },
349         { ICE_MAC_IPV4_GTPU_IPV4_UDP_PAY, ICE_PTYPE_ATTR_GTP_PDU_EH },
350         { ICE_MAC_IPV4_GTPU_IPV4_TCP,     ICE_PTYPE_ATTR_GTP_PDU_EH },
351         { ICE_MAC_IPV4_GTPU_IPV4_ICMP,    ICE_PTYPE_ATTR_GTP_PDU_EH },
352         { ICE_MAC_IPV6_GTPU_IPV4_FRAG,    ICE_PTYPE_ATTR_GTP_PDU_EH },
353         { ICE_MAC_IPV6_GTPU_IPV4_PAY,     ICE_PTYPE_ATTR_GTP_PDU_EH },
354         { ICE_MAC_IPV6_GTPU_IPV4_UDP_PAY, ICE_PTYPE_ATTR_GTP_PDU_EH },
355         { ICE_MAC_IPV6_GTPU_IPV4_TCP,     ICE_PTYPE_ATTR_GTP_PDU_EH },
356         { ICE_MAC_IPV6_GTPU_IPV4_ICMP,    ICE_PTYPE_ATTR_GTP_PDU_EH },
357         { ICE_MAC_IPV4_GTPU_IPV6_FRAG,    ICE_PTYPE_ATTR_GTP_PDU_EH },
358         { ICE_MAC_IPV4_GTPU_IPV6_PAY,     ICE_PTYPE_ATTR_GTP_PDU_EH },
359         { ICE_MAC_IPV4_GTPU_IPV6_UDP_PAY, ICE_PTYPE_ATTR_GTP_PDU_EH },
360         { ICE_MAC_IPV4_GTPU_IPV6_TCP,     ICE_PTYPE_ATTR_GTP_PDU_EH },
361         { ICE_MAC_IPV4_GTPU_IPV6_ICMPV6,  ICE_PTYPE_ATTR_GTP_PDU_EH },
362         { ICE_MAC_IPV6_GTPU_IPV6_FRAG,    ICE_PTYPE_ATTR_GTP_PDU_EH },
363         { ICE_MAC_IPV6_GTPU_IPV6_PAY,     ICE_PTYPE_ATTR_GTP_PDU_EH },
364         { ICE_MAC_IPV6_GTPU_IPV6_UDP_PAY, ICE_PTYPE_ATTR_GTP_PDU_EH },
365         { ICE_MAC_IPV6_GTPU_IPV6_TCP,     ICE_PTYPE_ATTR_GTP_PDU_EH },
366         { ICE_MAC_IPV6_GTPU_IPV6_ICMPV6,  ICE_PTYPE_ATTR_GTP_PDU_EH },
367 };
368
369 static const struct ice_ptype_attributes ice_attr_gtpu_down[] = {
370         { ICE_MAC_IPV4_GTPU_IPV4_FRAG,    ICE_PTYPE_ATTR_GTP_DOWNLINK },
371         { ICE_MAC_IPV4_GTPU_IPV4_PAY,     ICE_PTYPE_ATTR_GTP_DOWNLINK },
372         { ICE_MAC_IPV4_GTPU_IPV4_UDP_PAY, ICE_PTYPE_ATTR_GTP_DOWNLINK },
373         { ICE_MAC_IPV4_GTPU_IPV4_TCP,     ICE_PTYPE_ATTR_GTP_DOWNLINK },
374         { ICE_MAC_IPV4_GTPU_IPV4_ICMP,    ICE_PTYPE_ATTR_GTP_DOWNLINK },
375         { ICE_MAC_IPV6_GTPU_IPV4_FRAG,    ICE_PTYPE_ATTR_GTP_DOWNLINK },
376         { ICE_MAC_IPV6_GTPU_IPV4_PAY,     ICE_PTYPE_ATTR_GTP_DOWNLINK },
377         { ICE_MAC_IPV6_GTPU_IPV4_UDP_PAY, ICE_PTYPE_ATTR_GTP_DOWNLINK },
378         { ICE_MAC_IPV6_GTPU_IPV4_TCP,     ICE_PTYPE_ATTR_GTP_DOWNLINK },
379         { ICE_MAC_IPV6_GTPU_IPV4_ICMP,    ICE_PTYPE_ATTR_GTP_DOWNLINK },
380         { ICE_MAC_IPV4_GTPU_IPV6_FRAG,    ICE_PTYPE_ATTR_GTP_DOWNLINK },
381         { ICE_MAC_IPV4_GTPU_IPV6_PAY,     ICE_PTYPE_ATTR_GTP_DOWNLINK },
382         { ICE_MAC_IPV4_GTPU_IPV6_UDP_PAY, ICE_PTYPE_ATTR_GTP_DOWNLINK },
383         { ICE_MAC_IPV4_GTPU_IPV6_TCP,     ICE_PTYPE_ATTR_GTP_DOWNLINK },
384         { ICE_MAC_IPV4_GTPU_IPV6_ICMPV6,  ICE_PTYPE_ATTR_GTP_DOWNLINK },
385         { ICE_MAC_IPV6_GTPU_IPV6_FRAG,    ICE_PTYPE_ATTR_GTP_DOWNLINK },
386         { ICE_MAC_IPV6_GTPU_IPV6_PAY,     ICE_PTYPE_ATTR_GTP_DOWNLINK },
387         { ICE_MAC_IPV6_GTPU_IPV6_UDP_PAY, ICE_PTYPE_ATTR_GTP_DOWNLINK },
388         { ICE_MAC_IPV6_GTPU_IPV6_TCP,     ICE_PTYPE_ATTR_GTP_DOWNLINK },
389         { ICE_MAC_IPV6_GTPU_IPV6_ICMPV6,  ICE_PTYPE_ATTR_GTP_DOWNLINK },
390 };
391
392 static const struct ice_ptype_attributes ice_attr_gtpu_up[] = {
393         { ICE_MAC_IPV4_GTPU_IPV4_FRAG,    ICE_PTYPE_ATTR_GTP_UPLINK },
394         { ICE_MAC_IPV4_GTPU_IPV4_PAY,     ICE_PTYPE_ATTR_GTP_UPLINK },
395         { ICE_MAC_IPV4_GTPU_IPV4_UDP_PAY, ICE_PTYPE_ATTR_GTP_UPLINK },
396         { ICE_MAC_IPV4_GTPU_IPV4_TCP,     ICE_PTYPE_ATTR_GTP_UPLINK },
397         { ICE_MAC_IPV4_GTPU_IPV4_ICMP,    ICE_PTYPE_ATTR_GTP_UPLINK },
398         { ICE_MAC_IPV6_GTPU_IPV4_FRAG,    ICE_PTYPE_ATTR_GTP_UPLINK },
399         { ICE_MAC_IPV6_GTPU_IPV4_PAY,     ICE_PTYPE_ATTR_GTP_UPLINK },
400         { ICE_MAC_IPV6_GTPU_IPV4_UDP_PAY, ICE_PTYPE_ATTR_GTP_UPLINK },
401         { ICE_MAC_IPV6_GTPU_IPV4_TCP,     ICE_PTYPE_ATTR_GTP_UPLINK },
402         { ICE_MAC_IPV6_GTPU_IPV4_ICMP,    ICE_PTYPE_ATTR_GTP_UPLINK },
403         { ICE_MAC_IPV4_GTPU_IPV6_FRAG,    ICE_PTYPE_ATTR_GTP_UPLINK },
404         { ICE_MAC_IPV4_GTPU_IPV6_PAY,     ICE_PTYPE_ATTR_GTP_UPLINK },
405         { ICE_MAC_IPV4_GTPU_IPV6_UDP_PAY, ICE_PTYPE_ATTR_GTP_UPLINK },
406         { ICE_MAC_IPV4_GTPU_IPV6_TCP,     ICE_PTYPE_ATTR_GTP_UPLINK },
407         { ICE_MAC_IPV4_GTPU_IPV6_ICMPV6,  ICE_PTYPE_ATTR_GTP_UPLINK },
408         { ICE_MAC_IPV6_GTPU_IPV6_FRAG,    ICE_PTYPE_ATTR_GTP_UPLINK },
409         { ICE_MAC_IPV6_GTPU_IPV6_PAY,     ICE_PTYPE_ATTR_GTP_UPLINK },
410         { ICE_MAC_IPV6_GTPU_IPV6_UDP_PAY, ICE_PTYPE_ATTR_GTP_UPLINK },
411         { ICE_MAC_IPV6_GTPU_IPV6_TCP,     ICE_PTYPE_ATTR_GTP_UPLINK },
412         { ICE_MAC_IPV6_GTPU_IPV6_ICMPV6,  ICE_PTYPE_ATTR_GTP_UPLINK },
413 };
414
415 static const u32 ice_ptypes_gtpu[] = {
416         0x00000000, 0x00000000, 0x00000000, 0x00000000,
417         0x00000000, 0x00000000, 0x00000000, 0x00000000,
418         0x00000000, 0x00000000, 0x7FFFFE00, 0x00000000,
419         0x00000000, 0x00000000, 0x00000000, 0x00000000,
420         0x00000000, 0x00000000, 0x00000000, 0x00000000,
421         0x00000000, 0x00000000, 0x00000000, 0x00000000,
422         0x00000000, 0x00000000, 0x00000000, 0x00000000,
423         0x00000000, 0x00000000, 0x00000000, 0x00000000,
424 };
425
426 /* Packet types for pppoe */
427 static const u32 ice_ptypes_pppoe[] = {
428         0x00000000, 0x00000000, 0x00000000, 0x00000000,
429         0x00000000, 0x00000000, 0x00000000, 0x00000000,
430         0x00000000, 0x03FFF000, 0x00000000, 0x00000000,
431         0x00000000, 0x00000000, 0x00000000, 0x00000000,
432         0x00000000, 0x00000000, 0x00000000, 0x00000000,
433         0x00000000, 0x00000000, 0x00000000, 0x00000000,
434         0x00000000, 0x00000000, 0x00000000, 0x00000000,
435         0x00000000, 0x00000000, 0x00000000, 0x00000000,
436 };
437
438 /* Manage parameters and info. used during the creation of a flow profile */
439 struct ice_flow_prof_params {
440         enum ice_block blk;
441         u16 entry_length; /* # of bytes formatted entry will require */
442         u8 es_cnt;
443         struct ice_flow_prof *prof;
444
445         /* For ACL, the es[0] will have the data of ICE_RX_MDID_PKT_FLAGS_15_0
446          * This will give us the direction flags.
447          */
448         struct ice_fv_word es[ICE_MAX_FV_WORDS];
449         /* attributes can be used to add attributes to a particular PTYPE */
450         const struct ice_ptype_attributes *attr;
451         u16 attr_cnt;
452
453         u16 mask[ICE_MAX_FV_WORDS];
454         ice_declare_bitmap(ptypes, ICE_FLOW_PTYPE_MAX);
455 };
456
457 #define ICE_FLOW_RSS_HDRS_INNER_MASK \
458         (ICE_FLOW_SEG_HDR_PPPOE | ICE_FLOW_SEG_HDR_GTPC | \
459          ICE_FLOW_SEG_HDR_GTPC_TEID | ICE_FLOW_SEG_HDR_GTPU)
460
461 #define ICE_FLOW_SEG_HDRS_L2_MASK       \
462         (ICE_FLOW_SEG_HDR_ETH | ICE_FLOW_SEG_HDR_VLAN)
463 #define ICE_FLOW_SEG_HDRS_L3_MASK       \
464         (ICE_FLOW_SEG_HDR_IPV4 | ICE_FLOW_SEG_HDR_IPV6 | \
465          ICE_FLOW_SEG_HDR_ARP)
466 #define ICE_FLOW_SEG_HDRS_L4_MASK       \
467         (ICE_FLOW_SEG_HDR_ICMP | ICE_FLOW_SEG_HDR_TCP | ICE_FLOW_SEG_HDR_UDP | \
468          ICE_FLOW_SEG_HDR_SCTP)
469
470 /**
471  * ice_flow_val_hdrs - validates packet segments for valid protocol headers
472  * @segs: array of one or more packet segments that describe the flow
473  * @segs_cnt: number of packet segments provided
474  */
475 static enum ice_status
476 ice_flow_val_hdrs(struct ice_flow_seg_info *segs, u8 segs_cnt)
477 {
478         u8 i;
479
480         for (i = 0; i < segs_cnt; i++) {
481                 /* Multiple L3 headers */
482                 if (segs[i].hdrs & ICE_FLOW_SEG_HDRS_L3_MASK &&
483                     !ice_is_pow2(segs[i].hdrs & ICE_FLOW_SEG_HDRS_L3_MASK))
484                         return ICE_ERR_PARAM;
485
486                 /* Multiple L4 headers */
487                 if (segs[i].hdrs & ICE_FLOW_SEG_HDRS_L4_MASK &&
488                     !ice_is_pow2(segs[i].hdrs & ICE_FLOW_SEG_HDRS_L4_MASK))
489                         return ICE_ERR_PARAM;
490         }
491
492         return ICE_SUCCESS;
493 }
494
495 /* Sizes of fixed known protocol headers without header options */
496 #define ICE_FLOW_PROT_HDR_SZ_MAC        14
497 #define ICE_FLOW_PROT_HDR_SZ_MAC_VLAN   (ICE_FLOW_PROT_HDR_SZ_MAC + 2)
498 #define ICE_FLOW_PROT_HDR_SZ_IPV4       20
499 #define ICE_FLOW_PROT_HDR_SZ_IPV6       40
500 #define ICE_FLOW_PROT_HDR_SZ_ARP        28
501 #define ICE_FLOW_PROT_HDR_SZ_ICMP       8
502 #define ICE_FLOW_PROT_HDR_SZ_TCP        20
503 #define ICE_FLOW_PROT_HDR_SZ_UDP        8
504 #define ICE_FLOW_PROT_HDR_SZ_SCTP       12
505
506 /**
507  * ice_flow_calc_seg_sz - calculates size of a packet segment based on headers
508  * @params: information about the flow to be processed
509  * @seg: index of packet segment whose header size is to be determined
510  */
511 static u16 ice_flow_calc_seg_sz(struct ice_flow_prof_params *params, u8 seg)
512 {
513         u16 sz;
514
515         /* L2 headers */
516         sz = (params->prof->segs[seg].hdrs & ICE_FLOW_SEG_HDR_VLAN) ?
517                 ICE_FLOW_PROT_HDR_SZ_MAC_VLAN : ICE_FLOW_PROT_HDR_SZ_MAC;
518
519         /* L3 headers */
520         if (params->prof->segs[seg].hdrs & ICE_FLOW_SEG_HDR_IPV4)
521                 sz += ICE_FLOW_PROT_HDR_SZ_IPV4;
522         else if (params->prof->segs[seg].hdrs & ICE_FLOW_SEG_HDR_IPV6)
523                 sz += ICE_FLOW_PROT_HDR_SZ_IPV6;
524         else if (params->prof->segs[seg].hdrs & ICE_FLOW_SEG_HDR_ARP)
525                 sz += ICE_FLOW_PROT_HDR_SZ_ARP;
526         else if (params->prof->segs[seg].hdrs & ICE_FLOW_SEG_HDRS_L4_MASK)
527                 /* A L3 header is required if L4 is specified */
528                 return 0;
529
530         /* L4 headers */
531         if (params->prof->segs[seg].hdrs & ICE_FLOW_SEG_HDR_ICMP)
532                 sz += ICE_FLOW_PROT_HDR_SZ_ICMP;
533         else if (params->prof->segs[seg].hdrs & ICE_FLOW_SEG_HDR_TCP)
534                 sz += ICE_FLOW_PROT_HDR_SZ_TCP;
535         else if (params->prof->segs[seg].hdrs & ICE_FLOW_SEG_HDR_UDP)
536                 sz += ICE_FLOW_PROT_HDR_SZ_UDP;
537         else if (params->prof->segs[seg].hdrs & ICE_FLOW_SEG_HDR_SCTP)
538                 sz += ICE_FLOW_PROT_HDR_SZ_SCTP;
539
540         return sz;
541 }
542
543 /**
544  * ice_flow_proc_seg_hdrs - process protocol headers present in pkt segments
545  * @params: information about the flow to be processed
546  *
547  * This function identifies the packet types associated with the protocol
548  * headers being present in packet segments of the specified flow profile.
549  */
550 static enum ice_status
551 ice_flow_proc_seg_hdrs(struct ice_flow_prof_params *params)
552 {
553         struct ice_flow_prof *prof;
554         u8 i;
555
556         ice_memset(params->ptypes, 0xff, sizeof(params->ptypes),
557                    ICE_NONDMA_MEM);
558
559         prof = params->prof;
560
561         for (i = 0; i < params->prof->segs_cnt; i++) {
562                 const ice_bitmap_t *src;
563                 u32 hdrs;
564
565                 hdrs = prof->segs[i].hdrs;
566
567                 if (hdrs & ICE_FLOW_SEG_HDR_ETH) {
568                         src = !i ? (const ice_bitmap_t *)ice_ptypes_mac_ofos :
569                                 (const ice_bitmap_t *)ice_ptypes_mac_il;
570                         ice_and_bitmap(params->ptypes, params->ptypes, src,
571                                        ICE_FLOW_PTYPE_MAX);
572                 }
573
574                 if (i && hdrs & ICE_FLOW_SEG_HDR_VLAN) {
575                         src = (const ice_bitmap_t *)ice_ptypes_macvlan_il;
576                         ice_and_bitmap(params->ptypes, params->ptypes, src,
577                                        ICE_FLOW_PTYPE_MAX);
578                 }
579
580                 if (!i && hdrs & ICE_FLOW_SEG_HDR_ARP) {
581                         ice_and_bitmap(params->ptypes, params->ptypes,
582                                        (const ice_bitmap_t *)ice_ptypes_arp_of,
583                                        ICE_FLOW_PTYPE_MAX);
584                 }
585
586                 if (hdrs & ICE_FLOW_SEG_HDR_PPPOE) {
587                         src = (const ice_bitmap_t *)ice_ptypes_pppoe;
588                         ice_and_bitmap(params->ptypes, params->ptypes, src,
589                                        ICE_FLOW_PTYPE_MAX);
590                 }
591
592                 if (hdrs & ICE_FLOW_SEG_HDR_IPV4) {
593                         src = !i ? (const ice_bitmap_t *)ice_ptypes_ipv4_ofos :
594                                 (const ice_bitmap_t *)ice_ptypes_ipv4_il;
595                         ice_and_bitmap(params->ptypes, params->ptypes, src,
596                                        ICE_FLOW_PTYPE_MAX);
597                 } else if (hdrs & ICE_FLOW_SEG_HDR_IPV6) {
598                         src = !i ? (const ice_bitmap_t *)ice_ptypes_ipv6_ofos :
599                                 (const ice_bitmap_t *)ice_ptypes_ipv6_il;
600                         ice_and_bitmap(params->ptypes, params->ptypes, src,
601                                        ICE_FLOW_PTYPE_MAX);
602                 }
603
604                 if (hdrs & ICE_FLOW_SEG_HDR_ICMP) {
605                         src = !i ? (const ice_bitmap_t *)ice_ptypes_icmp_of :
606                                 (const ice_bitmap_t *)ice_ptypes_icmp_il;
607                         ice_and_bitmap(params->ptypes, params->ptypes, src,
608                                        ICE_FLOW_PTYPE_MAX);
609                 } else if (hdrs & ICE_FLOW_SEG_HDR_UDP) {
610                         src = (const ice_bitmap_t *)ice_ptypes_udp_il;
611                         ice_and_bitmap(params->ptypes, params->ptypes, src,
612                                        ICE_FLOW_PTYPE_MAX);
613                 } else if (hdrs & ICE_FLOW_SEG_HDR_TCP) {
614                         ice_and_bitmap(params->ptypes, params->ptypes,
615                                        (const ice_bitmap_t *)ice_ptypes_tcp_il,
616                                        ICE_FLOW_PTYPE_MAX);
617                 } else if (hdrs & ICE_FLOW_SEG_HDR_SCTP) {
618                         src = (const ice_bitmap_t *)ice_ptypes_sctp_il;
619                         ice_and_bitmap(params->ptypes, params->ptypes, src,
620                                        ICE_FLOW_PTYPE_MAX);
621                 } else if (hdrs & ICE_FLOW_SEG_HDR_GRE) {
622                         if (!i) {
623                                 src = (const ice_bitmap_t *)ice_ptypes_gre_of;
624                                 ice_and_bitmap(params->ptypes, params->ptypes,
625                                                src, ICE_FLOW_PTYPE_MAX);
626                         }
627                 } else if (hdrs & ICE_FLOW_SEG_HDR_GTPC) {
628                         src = (const ice_bitmap_t *)ice_ptypes_gtpc;
629                         ice_and_bitmap(params->ptypes, params->ptypes,
630                                        src, ICE_FLOW_PTYPE_MAX);
631                 } else if (hdrs & ICE_FLOW_SEG_HDR_GTPC_TEID) {
632                         src = (const ice_bitmap_t *)ice_ptypes_gtpc_tid;
633                         ice_and_bitmap(params->ptypes, params->ptypes,
634                                        src, ICE_FLOW_PTYPE_MAX);
635                 } else if (hdrs & ICE_FLOW_SEG_HDR_GTPU_DWN) {
636                         src = (const ice_bitmap_t *)ice_ptypes_gtpu;
637                         ice_and_bitmap(params->ptypes, params->ptypes,
638                                        src, ICE_FLOW_PTYPE_MAX);
639
640                         /* Attributes for GTP packet with downlink */
641                         params->attr = ice_attr_gtpu_down;
642                         params->attr_cnt = ARRAY_SIZE(ice_attr_gtpu_down);
643                 } else if (hdrs & ICE_FLOW_SEG_HDR_GTPU_UP) {
644                         src = (const ice_bitmap_t *)ice_ptypes_gtpu;
645                         ice_and_bitmap(params->ptypes, params->ptypes,
646                                        src, ICE_FLOW_PTYPE_MAX);
647
648                         /* Attributes for GTP packet with uplink */
649                         params->attr = ice_attr_gtpu_up;
650                         params->attr_cnt = ARRAY_SIZE(ice_attr_gtpu_up);
651                 } else if (hdrs & ICE_FLOW_SEG_HDR_GTPU_EH) {
652                         src = (const ice_bitmap_t *)ice_ptypes_gtpu;
653                         ice_and_bitmap(params->ptypes, params->ptypes,
654                                        src, ICE_FLOW_PTYPE_MAX);
655
656                         /* Attributes for GTP packet with Extension Header */
657                         params->attr = ice_attr_gtpu_eh;
658                         params->attr_cnt = ARRAY_SIZE(ice_attr_gtpu_eh);
659                 } else if ((hdrs & ICE_FLOW_SEG_HDR_GTPU) ==
660                            ICE_FLOW_SEG_HDR_GTPU) {
661                         src = (const ice_bitmap_t *)ice_ptypes_gtpu;
662                         ice_and_bitmap(params->ptypes, params->ptypes,
663                                        src, ICE_FLOW_PTYPE_MAX);
664                 }
665         }
666
667         return ICE_SUCCESS;
668 }
669
670 /**
671  * ice_flow_xtract_pkt_flags - Create an extr sequence entry for packet flags
672  * @hw: pointer to the HW struct
673  * @params: information about the flow to be processed
674  * @flags: The value of pkt_flags[x:x] in Rx/Tx MDID metadata.
675  *
676  * This function will allocate an extraction sequence entries for a DWORD size
677  * chunk of the packet flags.
678  */
679 static enum ice_status
680 ice_flow_xtract_pkt_flags(struct ice_hw *hw,
681                           struct ice_flow_prof_params *params,
682                           enum ice_flex_mdid_pkt_flags flags)
683 {
684         u8 fv_words = hw->blk[params->blk].es.fvw;
685         u8 idx;
686
687         /* Make sure the number of extraction sequence entries required does not
688          * exceed the block's capacity.
689          */
690         if (params->es_cnt >= fv_words)
691                 return ICE_ERR_MAX_LIMIT;
692
693         /* some blocks require a reversed field vector layout */
694         if (hw->blk[params->blk].es.reverse)
695                 idx = fv_words - params->es_cnt - 1;
696         else
697                 idx = params->es_cnt;
698
699         params->es[idx].prot_id = ICE_PROT_META_ID;
700         params->es[idx].off = flags;
701         params->es_cnt++;
702
703         return ICE_SUCCESS;
704 }
705
706 /**
707  * ice_flow_xtract_fld - Create an extraction sequence entry for the given field
708  * @hw: pointer to the HW struct
709  * @params: information about the flow to be processed
710  * @seg: packet segment index of the field to be extracted
711  * @fld: ID of field to be extracted
712  * @match: bitfield of all fields
713  *
714  * This function determines the protocol ID, offset, and size of the given
715  * field. It then allocates one or more extraction sequence entries for the
716  * given field, and fill the entries with protocol ID and offset information.
717  */
718 static enum ice_status
719 ice_flow_xtract_fld(struct ice_hw *hw, struct ice_flow_prof_params *params,
720                     u8 seg, enum ice_flow_field fld, u64 match)
721 {
722         enum ice_flow_field sib = ICE_FLOW_FIELD_IDX_MAX;
723         enum ice_prot_id prot_id = ICE_PROT_ID_INVAL;
724         u8 fv_words = hw->blk[params->blk].es.fvw;
725         struct ice_flow_fld_info *flds;
726         u16 cnt, ese_bits, i;
727         u16 sib_mask = 0;
728         s16 adj = 0;
729         u16 mask;
730         u16 off;
731
732         flds = params->prof->segs[seg].fields;
733
734         switch (fld) {
735         case ICE_FLOW_FIELD_IDX_ETH_DA:
736         case ICE_FLOW_FIELD_IDX_ETH_SA:
737         case ICE_FLOW_FIELD_IDX_S_VLAN:
738         case ICE_FLOW_FIELD_IDX_C_VLAN:
739                 prot_id = seg == 0 ? ICE_PROT_MAC_OF_OR_S : ICE_PROT_MAC_IL;
740                 break;
741         case ICE_FLOW_FIELD_IDX_ETH_TYPE:
742                 prot_id = seg == 0 ? ICE_PROT_ETYPE_OL : ICE_PROT_ETYPE_IL;
743                 break;
744         case ICE_FLOW_FIELD_IDX_IPV4_DSCP:
745                 prot_id = seg == 0 ? ICE_PROT_IPV4_OF_OR_S : ICE_PROT_IPV4_IL;
746                 break;
747         case ICE_FLOW_FIELD_IDX_IPV6_DSCP:
748                 prot_id = seg == 0 ? ICE_PROT_IPV6_OF_OR_S : ICE_PROT_IPV6_IL;
749                 break;
750         case ICE_FLOW_FIELD_IDX_IPV4_TTL:
751         case ICE_FLOW_FIELD_IDX_IPV4_PROT:
752                 prot_id = seg == 0 ? ICE_PROT_IPV4_OF_OR_S : ICE_PROT_IPV4_IL;
753
754                 /* TTL and PROT share the same extraction seq. entry.
755                  * Each is considered a sibling to the other in terms of sharing
756                  * the same extraction sequence entry.
757                  */
758                 if (fld == ICE_FLOW_FIELD_IDX_IPV4_TTL)
759                         sib = ICE_FLOW_FIELD_IDX_IPV4_PROT;
760                 else if (fld == ICE_FLOW_FIELD_IDX_IPV4_PROT)
761                         sib = ICE_FLOW_FIELD_IDX_IPV4_TTL;
762
763                 /* If the sibling field is also included, that field's
764                  * mask needs to be included.
765                  */
766                 if (match & BIT(sib))
767                         sib_mask = ice_flds_info[sib].mask;
768                 break;
769         case ICE_FLOW_FIELD_IDX_IPV6_TTL:
770         case ICE_FLOW_FIELD_IDX_IPV6_PROT:
771                 prot_id = seg == 0 ? ICE_PROT_IPV6_OF_OR_S : ICE_PROT_IPV6_IL;
772
773                 /* TTL and PROT share the same extraction seq. entry.
774                  * Each is considered a sibling to the other in terms of sharing
775                  * the same extraction sequence entry.
776                  */
777                 if (fld == ICE_FLOW_FIELD_IDX_IPV6_TTL)
778                         sib = ICE_FLOW_FIELD_IDX_IPV6_PROT;
779                 else if (fld == ICE_FLOW_FIELD_IDX_IPV6_PROT)
780                         sib = ICE_FLOW_FIELD_IDX_IPV6_TTL;
781
782                 /* If the sibling field is also included, that field's
783                  * mask needs to be included.
784                  */
785                 if (match & BIT(sib))
786                         sib_mask = ice_flds_info[sib].mask;
787                 break;
788         case ICE_FLOW_FIELD_IDX_IPV4_SA:
789         case ICE_FLOW_FIELD_IDX_IPV4_DA:
790                 prot_id = seg == 0 ? ICE_PROT_IPV4_OF_OR_S : ICE_PROT_IPV4_IL;
791                 break;
792         case ICE_FLOW_FIELD_IDX_IPV6_SA:
793         case ICE_FLOW_FIELD_IDX_IPV6_DA:
794                 prot_id = seg == 0 ? ICE_PROT_IPV6_OF_OR_S : ICE_PROT_IPV6_IL;
795                 break;
796         case ICE_FLOW_FIELD_IDX_TCP_SRC_PORT:
797         case ICE_FLOW_FIELD_IDX_TCP_DST_PORT:
798         case ICE_FLOW_FIELD_IDX_TCP_FLAGS:
799                 prot_id = ICE_PROT_TCP_IL;
800                 break;
801         case ICE_FLOW_FIELD_IDX_UDP_SRC_PORT:
802         case ICE_FLOW_FIELD_IDX_UDP_DST_PORT:
803                 prot_id = ICE_PROT_UDP_IL_OR_S;
804                 break;
805         case ICE_FLOW_FIELD_IDX_SCTP_SRC_PORT:
806         case ICE_FLOW_FIELD_IDX_SCTP_DST_PORT:
807                 prot_id = ICE_PROT_SCTP_IL;
808                 break;
809         case ICE_FLOW_FIELD_IDX_GTPC_TEID:
810         case ICE_FLOW_FIELD_IDX_GTPU_IP_TEID:
811         case ICE_FLOW_FIELD_IDX_GTPU_UP_TEID:
812         case ICE_FLOW_FIELD_IDX_GTPU_DWN_TEID:
813         case ICE_FLOW_FIELD_IDX_GTPU_EH_TEID:
814         case ICE_FLOW_FIELD_IDX_GTPU_EH_QFI:
815                 /* GTP is accessed through UDP OF protocol */
816                 prot_id = ICE_PROT_UDP_OF;
817                 break;
818         case ICE_FLOW_FIELD_IDX_PPPOE_SESS_ID:
819                 prot_id = ICE_PROT_PPPOE;
820                 break;
821         case ICE_FLOW_FIELD_IDX_ARP_SIP:
822         case ICE_FLOW_FIELD_IDX_ARP_DIP:
823         case ICE_FLOW_FIELD_IDX_ARP_SHA:
824         case ICE_FLOW_FIELD_IDX_ARP_DHA:
825         case ICE_FLOW_FIELD_IDX_ARP_OP:
826                 prot_id = ICE_PROT_ARP_OF;
827                 break;
828         case ICE_FLOW_FIELD_IDX_ICMP_TYPE:
829         case ICE_FLOW_FIELD_IDX_ICMP_CODE:
830                 /* ICMP type and code share the same extraction seq. entry */
831                 prot_id = (params->prof->segs[seg].hdrs &
832                            ICE_FLOW_SEG_HDR_IPV4) ?
833                         ICE_PROT_ICMP_IL : ICE_PROT_ICMPV6_IL;
834                 sib = fld == ICE_FLOW_FIELD_IDX_ICMP_TYPE ?
835                         ICE_FLOW_FIELD_IDX_ICMP_CODE :
836                         ICE_FLOW_FIELD_IDX_ICMP_TYPE;
837                 break;
838         case ICE_FLOW_FIELD_IDX_GRE_KEYID:
839                 prot_id = ICE_PROT_GRE_OF;
840                 break;
841         default:
842                 return ICE_ERR_NOT_IMPL;
843         }
844
845         /* Each extraction sequence entry is a word in size, and extracts a
846          * word-aligned offset from a protocol header.
847          */
848         ese_bits = ICE_FLOW_FV_EXTRACT_SZ * BITS_PER_BYTE;
849
850         flds[fld].xtrct.prot_id = prot_id;
851         flds[fld].xtrct.off = (ice_flds_info[fld].off / ese_bits) *
852                 ICE_FLOW_FV_EXTRACT_SZ;
853         flds[fld].xtrct.disp = (u8)((ice_flds_info[fld].off + adj) % ese_bits);
854         flds[fld].xtrct.idx = params->es_cnt;
855         flds[fld].xtrct.mask = ice_flds_info[fld].mask;
856
857         /* Adjust the next field-entry index after accommodating the number of
858          * entries this field consumes
859          */
860         cnt = DIVIDE_AND_ROUND_UP(flds[fld].xtrct.disp +
861                                   ice_flds_info[fld].size, ese_bits);
862
863         /* Fill in the extraction sequence entries needed for this field */
864         off = flds[fld].xtrct.off;
865         mask = flds[fld].xtrct.mask;
866         for (i = 0; i < cnt; i++) {
867                 /* Only consume an extraction sequence entry if there is no
868                  * sibling field associated with this field or the sibling entry
869                  * already extracts the word shared with this field.
870                  */
871                 if (sib == ICE_FLOW_FIELD_IDX_MAX ||
872                     flds[sib].xtrct.prot_id == ICE_PROT_ID_INVAL ||
873                     flds[sib].xtrct.off != off) {
874                         u8 idx;
875
876                         /* Make sure the number of extraction sequence required
877                          * does not exceed the block's capability
878                          */
879                         if (params->es_cnt >= fv_words)
880                                 return ICE_ERR_MAX_LIMIT;
881
882                         /* some blocks require a reversed field vector layout */
883                         if (hw->blk[params->blk].es.reverse)
884                                 idx = fv_words - params->es_cnt - 1;
885                         else
886                                 idx = params->es_cnt;
887
888                         params->es[idx].prot_id = prot_id;
889                         params->es[idx].off = off;
890                         params->mask[idx] = mask | sib_mask;
891                         params->es_cnt++;
892                 }
893
894                 off += ICE_FLOW_FV_EXTRACT_SZ;
895         }
896
897         return ICE_SUCCESS;
898 }
899
900 /**
901  * ice_flow_xtract_raws - Create extract sequence entries for raw bytes
902  * @hw: pointer to the HW struct
903  * @params: information about the flow to be processed
904  * @seg: index of packet segment whose raw fields are to be be extracted
905  */
906 static enum ice_status
907 ice_flow_xtract_raws(struct ice_hw *hw, struct ice_flow_prof_params *params,
908                      u8 seg)
909 {
910         u16 fv_words;
911         u16 hdrs_sz;
912         u8 i;
913
914         if (!params->prof->segs[seg].raws_cnt)
915                 return ICE_SUCCESS;
916
917         if (params->prof->segs[seg].raws_cnt >
918             ARRAY_SIZE(params->prof->segs[seg].raws))
919                 return ICE_ERR_MAX_LIMIT;
920
921         /* Offsets within the segment headers are not supported */
922         hdrs_sz = ice_flow_calc_seg_sz(params, seg);
923         if (!hdrs_sz)
924                 return ICE_ERR_PARAM;
925
926         fv_words = hw->blk[params->blk].es.fvw;
927
928         for (i = 0; i < params->prof->segs[seg].raws_cnt; i++) {
929                 struct ice_flow_seg_fld_raw *raw;
930                 u16 off, cnt, j;
931
932                 raw = &params->prof->segs[seg].raws[i];
933
934                 /* Storing extraction information */
935                 raw->info.xtrct.prot_id = ICE_PROT_MAC_OF_OR_S;
936                 raw->info.xtrct.off = (raw->off / ICE_FLOW_FV_EXTRACT_SZ) *
937                         ICE_FLOW_FV_EXTRACT_SZ;
938                 raw->info.xtrct.disp = (raw->off % ICE_FLOW_FV_EXTRACT_SZ) *
939                         BITS_PER_BYTE;
940                 raw->info.xtrct.idx = params->es_cnt;
941
942                 /* Determine the number of field vector entries this raw field
943                  * consumes.
944                  */
945                 cnt = DIVIDE_AND_ROUND_UP(raw->info.xtrct.disp +
946                                           (raw->info.src.last * BITS_PER_BYTE),
947                                           (ICE_FLOW_FV_EXTRACT_SZ *
948                                            BITS_PER_BYTE));
949                 off = raw->info.xtrct.off;
950                 for (j = 0; j < cnt; j++) {
951                         u16 idx;
952
953                         /* Make sure the number of extraction sequence required
954                          * does not exceed the block's capability
955                          */
956                         if (params->es_cnt >= hw->blk[params->blk].es.count ||
957                             params->es_cnt >= ICE_MAX_FV_WORDS)
958                                 return ICE_ERR_MAX_LIMIT;
959
960                         /* some blocks require a reversed field vector layout */
961                         if (hw->blk[params->blk].es.reverse)
962                                 idx = fv_words - params->es_cnt - 1;
963                         else
964                                 idx = params->es_cnt;
965
966                         params->es[idx].prot_id = raw->info.xtrct.prot_id;
967                         params->es[idx].off = off;
968                         params->es_cnt++;
969                         off += ICE_FLOW_FV_EXTRACT_SZ;
970                 }
971         }
972
973         return ICE_SUCCESS;
974 }
975
976 /**
977  * ice_flow_create_xtrct_seq - Create an extraction sequence for given segments
978  * @hw: pointer to the HW struct
979  * @params: information about the flow to be processed
980  *
981  * This function iterates through all matched fields in the given segments, and
982  * creates an extraction sequence for the fields.
983  */
984 static enum ice_status
985 ice_flow_create_xtrct_seq(struct ice_hw *hw,
986                           struct ice_flow_prof_params *params)
987 {
988         enum ice_status status = ICE_SUCCESS;
989         u8 i;
990
991         /* For ACL, we also need to extract the direction bit (Rx,Tx) data from
992          * packet flags
993          */
994         if (params->blk == ICE_BLK_ACL) {
995                 status = ice_flow_xtract_pkt_flags(hw, params,
996                                                    ICE_RX_MDID_PKT_FLAGS_15_0);
997                 if (status)
998                         return status;
999         }
1000
1001         for (i = 0; i < params->prof->segs_cnt; i++) {
1002                 u64 match = params->prof->segs[i].match;
1003                 enum ice_flow_field j;
1004
1005                 for (j = 0; j < ICE_FLOW_FIELD_IDX_MAX && match; j++) {
1006                         const u64 bit = BIT_ULL(j);
1007
1008                         if (match & bit) {
1009                                 status = ice_flow_xtract_fld(hw, params, i, j,
1010                                                              match);
1011                                 if (status)
1012                                         return status;
1013                                 match &= ~bit;
1014                         }
1015                 }
1016
1017                 /* Process raw matching bytes */
1018                 status = ice_flow_xtract_raws(hw, params, i);
1019                 if (status)
1020                         return status;
1021         }
1022
1023         return status;
1024 }
1025
1026 /**
1027  * ice_flow_sel_acl_scen - returns the specific scenario
1028  * @hw: pointer to the hardware structure
1029  * @params: information about the flow to be processed
1030  *
1031  * This function will return the specific scenario based on the
1032  * params passed to it
1033  */
1034 static enum ice_status
1035 ice_flow_sel_acl_scen(struct ice_hw *hw, struct ice_flow_prof_params *params)
1036 {
1037         /* Find the best-fit scenario for the provided match width */
1038         struct ice_acl_scen *cand_scen = NULL, *scen;
1039
1040         if (!hw->acl_tbl)
1041                 return ICE_ERR_DOES_NOT_EXIST;
1042
1043         /* Loop through each scenario and match against the scenario width
1044          * to select the specific scenario
1045          */
1046         LIST_FOR_EACH_ENTRY(scen, &hw->acl_tbl->scens, ice_acl_scen, list_entry)
1047                 if (scen->eff_width >= params->entry_length &&
1048                     (!cand_scen || cand_scen->eff_width > scen->eff_width))
1049                         cand_scen = scen;
1050         if (!cand_scen)
1051                 return ICE_ERR_DOES_NOT_EXIST;
1052
1053         params->prof->cfg.scen = cand_scen;
1054
1055         return ICE_SUCCESS;
1056 }
1057
1058 /**
1059  * ice_flow_acl_def_entry_frmt - Determine the layout of flow entries
1060  * @params: information about the flow to be processed
1061  */
1062 static enum ice_status
1063 ice_flow_acl_def_entry_frmt(struct ice_flow_prof_params *params)
1064 {
1065         u16 index, i, range_idx = 0;
1066
1067         index = ICE_AQC_ACL_PROF_BYTE_SEL_START_IDX;
1068
1069         for (i = 0; i < params->prof->segs_cnt; i++) {
1070                 struct ice_flow_seg_info *seg = &params->prof->segs[i];
1071                 u64 match = seg->match;
1072                 u8 j;
1073
1074                 for (j = 0; j < ICE_FLOW_FIELD_IDX_MAX && match; j++) {
1075                         struct ice_flow_fld_info *fld;
1076                         const u64 bit = BIT_ULL(j);
1077
1078                         if (!(match & bit))
1079                                 continue;
1080
1081                         fld = &seg->fields[j];
1082                         fld->entry.mask = ICE_FLOW_FLD_OFF_INVAL;
1083
1084                         if (fld->type == ICE_FLOW_FLD_TYPE_RANGE) {
1085                                 fld->entry.last = ICE_FLOW_FLD_OFF_INVAL;
1086
1087                                 /* Range checking only supported for single
1088                                  * words
1089                                  */
1090                                 if (DIVIDE_AND_ROUND_UP(ice_flds_info[j].size +
1091                                                         fld->xtrct.disp,
1092                                                         BITS_PER_BYTE * 2) > 1)
1093                                         return ICE_ERR_PARAM;
1094
1095                                 /* Ranges must define low and high values */
1096                                 if (fld->src.val == ICE_FLOW_FLD_OFF_INVAL ||
1097                                     fld->src.last == ICE_FLOW_FLD_OFF_INVAL)
1098                                         return ICE_ERR_PARAM;
1099
1100                                 fld->entry.val = range_idx++;
1101                         } else {
1102                                 /* Store adjusted byte-length of field for later
1103                                  * use, taking into account potential
1104                                  * non-byte-aligned displacement
1105                                  */
1106                                 fld->entry.last = DIVIDE_AND_ROUND_UP
1107                                         (ice_flds_info[j].size +
1108                                          (fld->xtrct.disp % BITS_PER_BYTE),
1109                                          BITS_PER_BYTE);
1110                                 fld->entry.val = index;
1111                                 index += fld->entry.last;
1112                         }
1113
1114                         match &= ~bit;
1115                 }
1116
1117                 for (j = 0; j < seg->raws_cnt; j++) {
1118                         struct ice_flow_seg_fld_raw *raw = &seg->raws[j];
1119
1120                         raw->info.entry.mask = ICE_FLOW_FLD_OFF_INVAL;
1121                         raw->info.entry.val = index;
1122                         raw->info.entry.last = raw->info.src.last;
1123                         index += raw->info.entry.last;
1124                 }
1125         }
1126
1127         /* Currently only support using the byte selection base, which only
1128          * allows for an effective entry size of 30 bytes. Reject anything
1129          * larger.
1130          */
1131         if (index > ICE_AQC_ACL_PROF_BYTE_SEL_ELEMS)
1132                 return ICE_ERR_PARAM;
1133
1134         /* Only 8 range checkers per profile, reject anything trying to use
1135          * more
1136          */
1137         if (range_idx > ICE_AQC_ACL_PROF_RANGES_NUM_CFG)
1138                 return ICE_ERR_PARAM;
1139
1140         /* Store # bytes required for entry for later use */
1141         params->entry_length = index - ICE_AQC_ACL_PROF_BYTE_SEL_START_IDX;
1142
1143         return ICE_SUCCESS;
1144 }
1145
1146 /**
1147  * ice_flow_proc_segs - process all packet segments associated with a profile
1148  * @hw: pointer to the HW struct
1149  * @params: information about the flow to be processed
1150  */
1151 static enum ice_status
1152 ice_flow_proc_segs(struct ice_hw *hw, struct ice_flow_prof_params *params)
1153 {
1154         enum ice_status status;
1155
1156         status = ice_flow_proc_seg_hdrs(params);
1157         if (status)
1158                 return status;
1159
1160         status = ice_flow_create_xtrct_seq(hw, params);
1161         if (status)
1162                 return status;
1163
1164         switch (params->blk) {
1165         case ICE_BLK_RSS:
1166                 /* Only header information is provided for RSS configuration.
1167                  * No further processing is needed.
1168                  */
1169                 status = ICE_SUCCESS;
1170                 break;
1171         case ICE_BLK_ACL:
1172                 status = ice_flow_acl_def_entry_frmt(params);
1173                 if (status)
1174                         return status;
1175                 status = ice_flow_sel_acl_scen(hw, params);
1176                 if (status)
1177                         return status;
1178                 break;
1179         case ICE_BLK_FD:
1180                 status = ICE_SUCCESS;
1181                 break;
1182         case ICE_BLK_SW:
1183         default:
1184                 return ICE_ERR_NOT_IMPL;
1185         }
1186
1187         return status;
1188 }
1189
1190 #define ICE_FLOW_FIND_PROF_CHK_FLDS     0x00000001
1191 #define ICE_FLOW_FIND_PROF_CHK_VSI      0x00000002
1192 #define ICE_FLOW_FIND_PROF_NOT_CHK_DIR  0x00000004
1193
1194 /**
1195  * ice_flow_find_prof_conds - Find a profile matching headers and conditions
1196  * @hw: pointer to the HW struct
1197  * @blk: classification stage
1198  * @dir: flow direction
1199  * @segs: array of one or more packet segments that describe the flow
1200  * @segs_cnt: number of packet segments provided
1201  * @vsi_handle: software VSI handle to check VSI (ICE_FLOW_FIND_PROF_CHK_VSI)
1202  * @conds: additional conditions to be checked (ICE_FLOW_FIND_PROF_CHK_*)
1203  */
1204 static struct ice_flow_prof *
1205 ice_flow_find_prof_conds(struct ice_hw *hw, enum ice_block blk,
1206                          enum ice_flow_dir dir, struct ice_flow_seg_info *segs,
1207                          u8 segs_cnt, u16 vsi_handle, u32 conds)
1208 {
1209         struct ice_flow_prof *p, *prof = NULL;
1210
1211         ice_acquire_lock(&hw->fl_profs_locks[blk]);
1212         LIST_FOR_EACH_ENTRY(p, &hw->fl_profs[blk], ice_flow_prof, l_entry) {
1213                 if ((p->dir == dir || conds & ICE_FLOW_FIND_PROF_NOT_CHK_DIR) &&
1214                     segs_cnt && segs_cnt == p->segs_cnt) {
1215                         u8 i;
1216
1217                         /* Check for profile-VSI association if specified */
1218                         if ((conds & ICE_FLOW_FIND_PROF_CHK_VSI) &&
1219                             ice_is_vsi_valid(hw, vsi_handle) &&
1220                             !ice_is_bit_set(p->vsis, vsi_handle))
1221                                 continue;
1222
1223                         /* Protocol headers must be checked. Matched fields are
1224                          * checked if specified.
1225                          */
1226                         for (i = 0; i < segs_cnt; i++)
1227                                 if (segs[i].hdrs != p->segs[i].hdrs ||
1228                                     ((conds & ICE_FLOW_FIND_PROF_CHK_FLDS) &&
1229                                      segs[i].match != p->segs[i].match))
1230                                         break;
1231
1232                         /* A match is found if all segments are matched */
1233                         if (i == segs_cnt) {
1234                                 prof = p;
1235                                 break;
1236                         }
1237                 }
1238         }
1239         ice_release_lock(&hw->fl_profs_locks[blk]);
1240
1241         return prof;
1242 }
1243
1244 /**
1245  * ice_flow_find_prof - Look up a profile matching headers and matched fields
1246  * @hw: pointer to the HW struct
1247  * @blk: classification stage
1248  * @dir: flow direction
1249  * @segs: array of one or more packet segments that describe the flow
1250  * @segs_cnt: number of packet segments provided
1251  */
1252 u64
1253 ice_flow_find_prof(struct ice_hw *hw, enum ice_block blk, enum ice_flow_dir dir,
1254                    struct ice_flow_seg_info *segs, u8 segs_cnt)
1255 {
1256         struct ice_flow_prof *p;
1257
1258         p = ice_flow_find_prof_conds(hw, blk, dir, segs, segs_cnt,
1259                                      ICE_MAX_VSI, ICE_FLOW_FIND_PROF_CHK_FLDS);
1260
1261         return p ? p->id : ICE_FLOW_PROF_ID_INVAL;
1262 }
1263
1264 /**
1265  * ice_flow_find_prof_id - Look up a profile with given profile ID
1266  * @hw: pointer to the HW struct
1267  * @blk: classification stage
1268  * @prof_id: unique ID to identify this flow profile
1269  */
1270 static struct ice_flow_prof *
1271 ice_flow_find_prof_id(struct ice_hw *hw, enum ice_block blk, u64 prof_id)
1272 {
1273         struct ice_flow_prof *p;
1274
1275         LIST_FOR_EACH_ENTRY(p, &hw->fl_profs[blk], ice_flow_prof, l_entry) {
1276                 if (p->id == prof_id)
1277                         return p;
1278         }
1279
1280         return NULL;
1281 }
1282
1283 /**
1284  * ice_dealloc_flow_entry - Deallocate flow entry memory
1285  * @hw: pointer to the HW struct
1286  * @entry: flow entry to be removed
1287  */
1288 static void
1289 ice_dealloc_flow_entry(struct ice_hw *hw, struct ice_flow_entry *entry)
1290 {
1291         if (!entry)
1292                 return;
1293
1294         if (entry->entry)
1295                 ice_free(hw, entry->entry);
1296
1297         if (entry->range_buf) {
1298                 ice_free(hw, entry->range_buf);
1299                 entry->range_buf = NULL;
1300         }
1301
1302         if (entry->acts) {
1303                 ice_free(hw, entry->acts);
1304                 entry->acts = NULL;
1305                 entry->acts_cnt = 0;
1306         }
1307
1308         ice_free(hw, entry);
1309 }
1310
1311 #define ICE_ACL_INVALID_SCEN    0x3f
1312
1313 /**
1314  * ice_flow_acl_is_prof_in_use - Verify if the profile is associated to any pf
1315  * @hw: pointer to the hardware structure
1316  * @prof: pointer to flow profile
1317  * @buf: destination buffer function writes partial xtrct sequence to
1318  *
1319  * returns ICE_SUCCESS if no pf is associated to the given profile
1320  * returns ICE_ERR_IN_USE if at least one pf is associated to the given profile
1321  * returns other error code for real error
1322  */
1323 static enum ice_status
1324 ice_flow_acl_is_prof_in_use(struct ice_hw *hw, struct ice_flow_prof *prof,
1325                             struct ice_aqc_acl_prof_generic_frmt *buf)
1326 {
1327         enum ice_status status;
1328         u8 prof_id = 0;
1329
1330         status = ice_flow_get_hw_prof(hw, ICE_BLK_ACL, prof->id, &prof_id);
1331         if (status)
1332                 return status;
1333
1334         status = ice_query_acl_prof(hw, prof_id, buf, NULL);
1335         if (status)
1336                 return status;
1337
1338         /* If all pf's associated scenarios are all 0 or all
1339          * ICE_ACL_INVALID_SCEN (63) for the given profile then the latter has
1340          * not been configured yet.
1341          */
1342         if (buf->pf_scenario_num[0] == 0 && buf->pf_scenario_num[1] == 0 &&
1343             buf->pf_scenario_num[2] == 0 && buf->pf_scenario_num[3] == 0 &&
1344             buf->pf_scenario_num[4] == 0 && buf->pf_scenario_num[5] == 0 &&
1345             buf->pf_scenario_num[6] == 0 && buf->pf_scenario_num[7] == 0)
1346                 return ICE_SUCCESS;
1347
1348         if (buf->pf_scenario_num[0] == ICE_ACL_INVALID_SCEN &&
1349             buf->pf_scenario_num[1] == ICE_ACL_INVALID_SCEN &&
1350             buf->pf_scenario_num[2] == ICE_ACL_INVALID_SCEN &&
1351             buf->pf_scenario_num[3] == ICE_ACL_INVALID_SCEN &&
1352             buf->pf_scenario_num[4] == ICE_ACL_INVALID_SCEN &&
1353             buf->pf_scenario_num[5] == ICE_ACL_INVALID_SCEN &&
1354             buf->pf_scenario_num[6] == ICE_ACL_INVALID_SCEN &&
1355             buf->pf_scenario_num[7] == ICE_ACL_INVALID_SCEN)
1356                 return ICE_SUCCESS;
1357         else
1358                 return ICE_ERR_IN_USE;
1359 }
1360
1361 /**
1362  * ice_flow_acl_free_act_cntr - Free the acl rule's actions
1363  * @hw: pointer to the hardware structure
1364  * @acts: array of actions to be performed on a match
1365  * @acts_cnt: number of actions
1366  */
1367 static enum ice_status
1368 ice_flow_acl_free_act_cntr(struct ice_hw *hw, struct ice_flow_action *acts,
1369                            u8 acts_cnt)
1370 {
1371         int i;
1372
1373         for (i = 0; i < acts_cnt; i++) {
1374                 if (acts[i].type == ICE_FLOW_ACT_CNTR_PKT ||
1375                     acts[i].type == ICE_FLOW_ACT_CNTR_BYTES ||
1376                     acts[i].type == ICE_FLOW_ACT_CNTR_PKT_BYTES) {
1377                         struct ice_acl_cntrs cntrs;
1378                         enum ice_status status;
1379
1380                         cntrs.bank = 0; /* Only bank0 for the moment */
1381                         cntrs.first_cntr =
1382                                         LE16_TO_CPU(acts[i].data.acl_act.value);
1383                         cntrs.last_cntr =
1384                                         LE16_TO_CPU(acts[i].data.acl_act.value);
1385
1386                         if (acts[i].type == ICE_FLOW_ACT_CNTR_PKT_BYTES)
1387                                 cntrs.type = ICE_AQC_ACL_CNT_TYPE_DUAL;
1388                         else
1389                                 cntrs.type = ICE_AQC_ACL_CNT_TYPE_SINGLE;
1390
1391                         status = ice_aq_dealloc_acl_cntrs(hw, &cntrs, NULL);
1392                         if (status)
1393                                 return status;
1394                 }
1395         }
1396         return ICE_SUCCESS;
1397 }
1398
1399 /**
1400  * ice_flow_acl_disassoc_scen - Disassociate the scenario to the Profile
1401  * @hw: pointer to the hardware structure
1402  * @prof: pointer to flow profile
1403  *
1404  * Disassociate the scenario to the Profile for the PF of the VSI.
1405  */
1406 static enum ice_status
1407 ice_flow_acl_disassoc_scen(struct ice_hw *hw, struct ice_flow_prof *prof)
1408 {
1409         struct ice_aqc_acl_prof_generic_frmt buf;
1410         enum ice_status status = ICE_SUCCESS;
1411         u8 prof_id = 0;
1412
1413         ice_memset(&buf, 0, sizeof(buf), ICE_NONDMA_MEM);
1414
1415         status = ice_flow_get_hw_prof(hw, ICE_BLK_ACL, prof->id, &prof_id);
1416         if (status)
1417                 return status;
1418
1419         status = ice_query_acl_prof(hw, prof_id, &buf, NULL);
1420         if (status)
1421                 return status;
1422
1423         /* Clear scenario for this pf */
1424         buf.pf_scenario_num[hw->pf_id] = ICE_ACL_INVALID_SCEN;
1425         status = ice_prgm_acl_prof_extrt(hw, prof_id, &buf, NULL);
1426
1427         return status;
1428 }
1429
1430 /**
1431  * ice_flow_rem_entry_sync - Remove a flow entry
1432  * @hw: pointer to the HW struct
1433  * @blk: classification stage
1434  * @entry: flow entry to be removed
1435  */
1436 static enum ice_status
1437 ice_flow_rem_entry_sync(struct ice_hw *hw, enum ice_block blk,
1438                         struct ice_flow_entry *entry)
1439 {
1440         if (!entry)
1441                 return ICE_ERR_BAD_PTR;
1442
1443         if (blk == ICE_BLK_ACL) {
1444                 enum ice_status status;
1445
1446                 if (!entry->prof)
1447                         return ICE_ERR_BAD_PTR;
1448
1449                 status = ice_acl_rem_entry(hw, entry->prof->cfg.scen,
1450                                            entry->scen_entry_idx);
1451                 if (status)
1452                         return status;
1453
1454                 /* Checks if we need to release an ACL counter. */
1455                 if (entry->acts_cnt && entry->acts)
1456                         ice_flow_acl_free_act_cntr(hw, entry->acts,
1457                                                    entry->acts_cnt);
1458         }
1459
1460         LIST_DEL(&entry->l_entry);
1461
1462         ice_dealloc_flow_entry(hw, entry);
1463
1464         return ICE_SUCCESS;
1465 }
1466
1467 /**
1468  * ice_flow_add_prof_sync - Add a flow profile for packet segments and fields
1469  * @hw: pointer to the HW struct
1470  * @blk: classification stage
1471  * @dir: flow direction
1472  * @prof_id: unique ID to identify this flow profile
1473  * @segs: array of one or more packet segments that describe the flow
1474  * @segs_cnt: number of packet segments provided
1475  * @acts: array of default actions
1476  * @acts_cnt: number of default actions
1477  * @prof: stores the returned flow profile added
1478  *
1479  * Assumption: the caller has acquired the lock to the profile list
1480  */
1481 static enum ice_status
1482 ice_flow_add_prof_sync(struct ice_hw *hw, enum ice_block blk,
1483                        enum ice_flow_dir dir, u64 prof_id,
1484                        struct ice_flow_seg_info *segs, u8 segs_cnt,
1485                        struct ice_flow_action *acts, u8 acts_cnt,
1486                        struct ice_flow_prof **prof)
1487 {
1488         struct ice_flow_prof_params params;
1489         enum ice_status status;
1490         u8 i;
1491
1492         if (!prof || (acts_cnt && !acts))
1493                 return ICE_ERR_BAD_PTR;
1494
1495         ice_memset(&params, 0, sizeof(params), ICE_NONDMA_MEM);
1496         params.prof = (struct ice_flow_prof *)
1497                 ice_malloc(hw, sizeof(*params.prof));
1498         if (!params.prof)
1499                 return ICE_ERR_NO_MEMORY;
1500
1501         /* initialize extraction sequence to all invalid (0xff) */
1502         for (i = 0; i < ICE_MAX_FV_WORDS; i++) {
1503                 params.es[i].prot_id = ICE_PROT_INVALID;
1504                 params.es[i].off = ICE_FV_OFFSET_INVAL;
1505         }
1506
1507         params.blk = blk;
1508         params.prof->id = prof_id;
1509         params.prof->dir = dir;
1510         params.prof->segs_cnt = segs_cnt;
1511
1512         /* Make a copy of the segments that need to be persistent in the flow
1513          * profile instance
1514          */
1515         for (i = 0; i < segs_cnt; i++)
1516                 ice_memcpy(&params.prof->segs[i], &segs[i], sizeof(*segs),
1517                            ICE_NONDMA_TO_NONDMA);
1518
1519         /* Make a copy of the actions that need to be persistent in the flow
1520          * profile instance.
1521          */
1522         if (acts_cnt) {
1523                 params.prof->acts = (struct ice_flow_action *)
1524                         ice_memdup(hw, acts, acts_cnt * sizeof(*acts),
1525                                    ICE_NONDMA_TO_NONDMA);
1526
1527                 if (!params.prof->acts) {
1528                         status = ICE_ERR_NO_MEMORY;
1529                         goto out;
1530                 }
1531         }
1532
1533         status = ice_flow_proc_segs(hw, &params);
1534         if (status) {
1535                 ice_debug(hw, ICE_DBG_FLOW,
1536                           "Error processing a flow's packet segments\n");
1537                 goto out;
1538         }
1539
1540         /* Add a HW profile for this flow profile */
1541         status = ice_add_prof(hw, blk, prof_id, (u8 *)params.ptypes,
1542                               params.attr, params.attr_cnt, params.es,
1543                               params.mask);
1544         if (status) {
1545                 ice_debug(hw, ICE_DBG_FLOW, "Error adding a HW flow profile\n");
1546                 goto out;
1547         }
1548
1549         INIT_LIST_HEAD(&params.prof->entries);
1550         ice_init_lock(&params.prof->entries_lock);
1551         *prof = params.prof;
1552
1553 out:
1554         if (status) {
1555                 if (params.prof->acts)
1556                         ice_free(hw, params.prof->acts);
1557                 ice_free(hw, params.prof);
1558         }
1559
1560         return status;
1561 }
1562
1563 /**
1564  * ice_flow_rem_prof_sync - remove a flow profile
1565  * @hw: pointer to the hardware structure
1566  * @blk: classification stage
1567  * @prof: pointer to flow profile to remove
1568  *
1569  * Assumption: the caller has acquired the lock to the profile list
1570  */
1571 static enum ice_status
1572 ice_flow_rem_prof_sync(struct ice_hw *hw, enum ice_block blk,
1573                        struct ice_flow_prof *prof)
1574 {
1575         enum ice_status status;
1576
1577         /* Remove all remaining flow entries before removing the flow profile */
1578         if (!LIST_EMPTY(&prof->entries)) {
1579                 struct ice_flow_entry *e, *t;
1580
1581                 ice_acquire_lock(&prof->entries_lock);
1582
1583                 LIST_FOR_EACH_ENTRY_SAFE(e, t, &prof->entries, ice_flow_entry,
1584                                          l_entry) {
1585                         status = ice_flow_rem_entry_sync(hw, blk, e);
1586                         if (status)
1587                                 break;
1588                 }
1589
1590                 ice_release_lock(&prof->entries_lock);
1591         }
1592
1593         if (blk == ICE_BLK_ACL) {
1594                 struct ice_aqc_acl_profile_ranges query_rng_buf;
1595                 struct ice_aqc_acl_prof_generic_frmt buf;
1596                 u8 prof_id = 0;
1597
1598                 /* Deassociate the scenario to the Profile for the PF */
1599                 status = ice_flow_acl_disassoc_scen(hw, prof);
1600                 if (status)
1601                         return status;
1602
1603                 /* Clear the range-checker if the profile ID is no longer
1604                  * used by any PF
1605                  */
1606                 status = ice_flow_acl_is_prof_in_use(hw, prof, &buf);
1607                 if (status && status != ICE_ERR_IN_USE) {
1608                         return status;
1609                 } else if (!status) {
1610                         /* Clear the range-checker value for profile ID */
1611                         ice_memset(&query_rng_buf, 0,
1612                                    sizeof(struct ice_aqc_acl_profile_ranges),
1613                                    ICE_NONDMA_MEM);
1614
1615                         status = ice_flow_get_hw_prof(hw, blk, prof->id,
1616                                                       &prof_id);
1617                         if (status)
1618                                 return status;
1619
1620                         status = ice_prog_acl_prof_ranges(hw, prof_id,
1621                                                           &query_rng_buf, NULL);
1622                         if (status)
1623                                 return status;
1624                 }
1625         }
1626
1627         /* Remove all hardware profiles associated with this flow profile */
1628         status = ice_rem_prof(hw, blk, prof->id);
1629         if (!status) {
1630                 LIST_DEL(&prof->l_entry);
1631                 ice_destroy_lock(&prof->entries_lock);
1632                 if (prof->acts)
1633                         ice_free(hw, prof->acts);
1634                 ice_free(hw, prof);
1635         }
1636
1637         return status;
1638 }
1639
1640 /**
1641  * ice_flow_acl_set_xtrct_seq_fld - Populate xtrct seq for single field
1642  * @buf: Destination buffer function writes partial xtrct sequence to
1643  * @info: Info about field
1644  */
1645 static void
1646 ice_flow_acl_set_xtrct_seq_fld(struct ice_aqc_acl_prof_generic_frmt *buf,
1647                                struct ice_flow_fld_info *info)
1648 {
1649         u16 dst, i;
1650         u8 src;
1651
1652         src = info->xtrct.idx * ICE_FLOW_FV_EXTRACT_SZ +
1653                 info->xtrct.disp / BITS_PER_BYTE;
1654         dst = info->entry.val;
1655         for (i = 0; i < info->entry.last; i++)
1656                 /* HW stores field vector words in LE, convert words back to BE
1657                  * so constructed entries will end up in network order
1658                  */
1659                 buf->byte_selection[dst++] = src++ ^ 1;
1660 }
1661
1662 /**
1663  * ice_flow_acl_set_xtrct_seq - Program ACL extraction sequence
1664  * @hw: pointer to the hardware structure
1665  * @prof: pointer to flow profile
1666  */
1667 static enum ice_status
1668 ice_flow_acl_set_xtrct_seq(struct ice_hw *hw, struct ice_flow_prof *prof)
1669 {
1670         struct ice_aqc_acl_prof_generic_frmt buf;
1671         struct ice_flow_fld_info *info;
1672         enum ice_status status;
1673         u8 prof_id = 0;
1674         u16 i;
1675
1676         ice_memset(&buf, 0, sizeof(buf), ICE_NONDMA_MEM);
1677
1678         status = ice_flow_get_hw_prof(hw, ICE_BLK_ACL, prof->id, &prof_id);
1679         if (status)
1680                 return status;
1681
1682         status = ice_flow_acl_is_prof_in_use(hw, prof, &buf);
1683         if (status && status != ICE_ERR_IN_USE)
1684                 return status;
1685
1686         if (!status) {
1687                 /* Program the profile dependent configuration. This is done
1688                  * only once regardless of the number of PFs using that profile
1689                  */
1690                 ice_memset(&buf, 0, sizeof(buf), ICE_NONDMA_MEM);
1691
1692                 for (i = 0; i < prof->segs_cnt; i++) {
1693                         struct ice_flow_seg_info *seg = &prof->segs[i];
1694                         u64 match = seg->match;
1695                         u16 j;
1696
1697                         for (j = 0; j < ICE_FLOW_FIELD_IDX_MAX && match; j++) {
1698                                 const u64 bit = BIT_ULL(j);
1699
1700                                 if (!(match & bit))
1701                                         continue;
1702
1703                                 info = &seg->fields[j];
1704
1705                                 if (info->type == ICE_FLOW_FLD_TYPE_RANGE)
1706                                         buf.word_selection[info->entry.val] =
1707                                                                 info->xtrct.idx;
1708                                 else
1709                                         ice_flow_acl_set_xtrct_seq_fld(&buf,
1710                                                                        info);
1711
1712                                 match &= ~bit;
1713                         }
1714
1715                         for (j = 0; j < seg->raws_cnt; j++) {
1716                                 info = &seg->raws[j].info;
1717                                 ice_flow_acl_set_xtrct_seq_fld(&buf, info);
1718                         }
1719                 }
1720
1721                 ice_memset(&buf.pf_scenario_num[0], ICE_ACL_INVALID_SCEN,
1722                            ICE_AQC_ACL_PROF_PF_SCEN_NUM_ELEMS,
1723                            ICE_NONDMA_MEM);
1724         }
1725
1726         /* Update the current PF */
1727         buf.pf_scenario_num[hw->pf_id] = (u8)prof->cfg.scen->id;
1728         status = ice_prgm_acl_prof_extrt(hw, prof_id, &buf, NULL);
1729
1730         return status;
1731 }
1732
1733 /**
1734  * ice_flow_assoc_vsig_vsi - associate a VSI with VSIG
1735  * @hw: pointer to the hardware structure
1736  * @blk: classification stage
1737  * @vsi_handle: software VSI handle
1738  * @vsig: target VSI group
1739  *
1740  * Assumption: the caller has already verified that the VSI to
1741  * be added has the same characteristics as the VSIG and will
1742  * thereby have access to all resources added to that VSIG.
1743  */
1744 enum ice_status
1745 ice_flow_assoc_vsig_vsi(struct ice_hw *hw, enum ice_block blk, u16 vsi_handle,
1746                         u16 vsig)
1747 {
1748         enum ice_status status;
1749
1750         if (!ice_is_vsi_valid(hw, vsi_handle) || blk >= ICE_BLK_COUNT)
1751                 return ICE_ERR_PARAM;
1752
1753         ice_acquire_lock(&hw->fl_profs_locks[blk]);
1754         status = ice_add_vsi_flow(hw, blk, ice_get_hw_vsi_num(hw, vsi_handle),
1755                                   vsig);
1756         ice_release_lock(&hw->fl_profs_locks[blk]);
1757
1758         return status;
1759 }
1760
1761 /**
1762  * ice_flow_assoc_prof - associate a VSI with a flow profile
1763  * @hw: pointer to the hardware structure
1764  * @blk: classification stage
1765  * @prof: pointer to flow profile
1766  * @vsi_handle: software VSI handle
1767  *
1768  * Assumption: the caller has acquired the lock to the profile list
1769  * and the software VSI handle has been validated
1770  */
1771 static enum ice_status
1772 ice_flow_assoc_prof(struct ice_hw *hw, enum ice_block blk,
1773                     struct ice_flow_prof *prof, u16 vsi_handle)
1774 {
1775         enum ice_status status = ICE_SUCCESS;
1776
1777         if (!ice_is_bit_set(prof->vsis, vsi_handle)) {
1778                 if (blk == ICE_BLK_ACL) {
1779                         status = ice_flow_acl_set_xtrct_seq(hw, prof);
1780                         if (status)
1781                                 return status;
1782                 }
1783                 status = ice_add_prof_id_flow(hw, blk,
1784                                               ice_get_hw_vsi_num(hw,
1785                                                                  vsi_handle),
1786                                               prof->id);
1787                 if (!status)
1788                         ice_set_bit(vsi_handle, prof->vsis);
1789                 else
1790                         ice_debug(hw, ICE_DBG_FLOW,
1791                                   "HW profile add failed, %d\n",
1792                                   status);
1793         }
1794
1795         return status;
1796 }
1797
1798 /**
1799  * ice_flow_disassoc_prof - disassociate a VSI from a flow profile
1800  * @hw: pointer to the hardware structure
1801  * @blk: classification stage
1802  * @prof: pointer to flow profile
1803  * @vsi_handle: software VSI handle
1804  *
1805  * Assumption: the caller has acquired the lock to the profile list
1806  * and the software VSI handle has been validated
1807  */
1808 static enum ice_status
1809 ice_flow_disassoc_prof(struct ice_hw *hw, enum ice_block blk,
1810                        struct ice_flow_prof *prof, u16 vsi_handle)
1811 {
1812         enum ice_status status = ICE_SUCCESS;
1813
1814         if (ice_is_bit_set(prof->vsis, vsi_handle)) {
1815                 status = ice_rem_prof_id_flow(hw, blk,
1816                                               ice_get_hw_vsi_num(hw,
1817                                                                  vsi_handle),
1818                                               prof->id);
1819                 if (!status)
1820                         ice_clear_bit(vsi_handle, prof->vsis);
1821                 else
1822                         ice_debug(hw, ICE_DBG_FLOW,
1823                                   "HW profile remove failed, %d\n",
1824                                   status);
1825         }
1826
1827         return status;
1828 }
1829
1830 /**
1831  * ice_flow_add_prof - Add a flow profile for packet segments and matched fields
1832  * @hw: pointer to the HW struct
1833  * @blk: classification stage
1834  * @dir: flow direction
1835  * @prof_id: unique ID to identify this flow profile
1836  * @segs: array of one or more packet segments that describe the flow
1837  * @segs_cnt: number of packet segments provided
1838  * @acts: array of default actions
1839  * @acts_cnt: number of default actions
1840  * @prof: stores the returned flow profile added
1841  */
1842 enum ice_status
1843 ice_flow_add_prof(struct ice_hw *hw, enum ice_block blk, enum ice_flow_dir dir,
1844                   u64 prof_id, struct ice_flow_seg_info *segs, u8 segs_cnt,
1845                   struct ice_flow_action *acts, u8 acts_cnt,
1846                   struct ice_flow_prof **prof)
1847 {
1848         enum ice_status status;
1849
1850         if (segs_cnt > ICE_FLOW_SEG_MAX)
1851                 return ICE_ERR_MAX_LIMIT;
1852
1853         if (!segs_cnt)
1854                 return ICE_ERR_PARAM;
1855
1856         if (!segs)
1857                 return ICE_ERR_BAD_PTR;
1858
1859         status = ice_flow_val_hdrs(segs, segs_cnt);
1860         if (status)
1861                 return status;
1862
1863         ice_acquire_lock(&hw->fl_profs_locks[blk]);
1864
1865         status = ice_flow_add_prof_sync(hw, blk, dir, prof_id, segs, segs_cnt,
1866                                         acts, acts_cnt, prof);
1867         if (!status)
1868                 LIST_ADD(&(*prof)->l_entry, &hw->fl_profs[blk]);
1869
1870         ice_release_lock(&hw->fl_profs_locks[blk]);
1871
1872         return status;
1873 }
1874
1875 /**
1876  * ice_flow_rem_prof - Remove a flow profile and all entries associated with it
1877  * @hw: pointer to the HW struct
1878  * @blk: the block for which the flow profile is to be removed
1879  * @prof_id: unique ID of the flow profile to be removed
1880  */
1881 enum ice_status
1882 ice_flow_rem_prof(struct ice_hw *hw, enum ice_block blk, u64 prof_id)
1883 {
1884         struct ice_flow_prof *prof;
1885         enum ice_status status;
1886
1887         ice_acquire_lock(&hw->fl_profs_locks[blk]);
1888
1889         prof = ice_flow_find_prof_id(hw, blk, prof_id);
1890         if (!prof) {
1891                 status = ICE_ERR_DOES_NOT_EXIST;
1892                 goto out;
1893         }
1894
1895         /* prof becomes invalid after the call */
1896         status = ice_flow_rem_prof_sync(hw, blk, prof);
1897
1898 out:
1899         ice_release_lock(&hw->fl_profs_locks[blk]);
1900
1901         return status;
1902 }
1903
1904 /**
1905  * ice_flow_get_hw_prof - return the HW profile for a specific profile ID handle
1906  * @hw: pointer to the HW struct
1907  * @blk: classification stage
1908  * @prof_id: the profile ID handle
1909  * @hw_prof_id: pointer to variable to receive the HW profile ID
1910  */
1911 enum ice_status
1912 ice_flow_get_hw_prof(struct ice_hw *hw, enum ice_block blk, u64 prof_id,
1913                      u8 *hw_prof_id)
1914 {
1915         struct ice_prof_map *map;
1916
1917         map = ice_search_prof_id(hw, blk, prof_id);
1918         if (map) {
1919                 *hw_prof_id = map->prof_id;
1920                 return ICE_SUCCESS;
1921         }
1922
1923         return ICE_ERR_DOES_NOT_EXIST;
1924 }
1925
1926 /**
1927  * ice_flow_find_entry - look for a flow entry using its unique ID
1928  * @hw: pointer to the HW struct
1929  * @blk: classification stage
1930  * @entry_id: unique ID to identify this flow entry
1931  *
1932  * This function looks for the flow entry with the specified unique ID in all
1933  * flow profiles of the specified classification stage. If the entry is found,
1934  * and it returns the handle to the flow entry. Otherwise, it returns
1935  * ICE_FLOW_ENTRY_ID_INVAL.
1936  */
1937 u64 ice_flow_find_entry(struct ice_hw *hw, enum ice_block blk, u64 entry_id)
1938 {
1939         struct ice_flow_entry *found = NULL;
1940         struct ice_flow_prof *p;
1941
1942         ice_acquire_lock(&hw->fl_profs_locks[blk]);
1943
1944         LIST_FOR_EACH_ENTRY(p, &hw->fl_profs[blk], ice_flow_prof, l_entry) {
1945                 struct ice_flow_entry *e;
1946
1947                 ice_acquire_lock(&p->entries_lock);
1948                 LIST_FOR_EACH_ENTRY(e, &p->entries, ice_flow_entry, l_entry)
1949                         if (e->id == entry_id) {
1950                                 found = e;
1951                                 break;
1952                         }
1953                 ice_release_lock(&p->entries_lock);
1954
1955                 if (found)
1956                         break;
1957         }
1958
1959         ice_release_lock(&hw->fl_profs_locks[blk]);
1960
1961         return found ? ICE_FLOW_ENTRY_HNDL(found) : ICE_FLOW_ENTRY_HANDLE_INVAL;
1962 }
1963
1964 /**
1965  * ice_flow_acl_check_actions - Checks the acl rule's actions
1966  * @hw: pointer to the hardware structure
1967  * @acts: array of actions to be performed on a match
1968  * @acts_cnt: number of actions
1969  * @cnt_alloc: indicates if a ACL counter has been allocated.
1970  */
1971 static enum ice_status
1972 ice_flow_acl_check_actions(struct ice_hw *hw, struct ice_flow_action *acts,
1973                            u8 acts_cnt, bool *cnt_alloc)
1974 {
1975         ice_declare_bitmap(dup_check, ICE_AQC_TBL_MAX_ACTION_PAIRS * 2);
1976         int i;
1977
1978         ice_zero_bitmap(dup_check, ICE_AQC_TBL_MAX_ACTION_PAIRS * 2);
1979         *cnt_alloc = false;
1980
1981         if (acts_cnt > ICE_FLOW_ACL_MAX_NUM_ACT)
1982                 return ICE_ERR_OUT_OF_RANGE;
1983
1984         for (i = 0; i < acts_cnt; i++) {
1985                 if (acts[i].type != ICE_FLOW_ACT_NOP &&
1986                     acts[i].type != ICE_FLOW_ACT_DROP &&
1987                     acts[i].type != ICE_FLOW_ACT_CNTR_PKT &&
1988                     acts[i].type != ICE_FLOW_ACT_FWD_QUEUE)
1989                         return ICE_ERR_CFG;
1990
1991                 /* If the caller want to add two actions of the same type, then
1992                  * it is considered invalid configuration.
1993                  */
1994                 if (ice_test_and_set_bit(acts[i].type, dup_check))
1995                         return ICE_ERR_PARAM;
1996         }
1997
1998         /* Checks if ACL counters are needed. */
1999         for (i = 0; i < acts_cnt; i++) {
2000                 if (acts[i].type == ICE_FLOW_ACT_CNTR_PKT ||
2001                     acts[i].type == ICE_FLOW_ACT_CNTR_BYTES ||
2002                     acts[i].type == ICE_FLOW_ACT_CNTR_PKT_BYTES) {
2003                         struct ice_acl_cntrs cntrs;
2004                         enum ice_status status;
2005
2006                         cntrs.amount = 1;
2007                         cntrs.bank = 0; /* Only bank0 for the moment */
2008
2009                         if (acts[i].type == ICE_FLOW_ACT_CNTR_PKT_BYTES)
2010                                 cntrs.type = ICE_AQC_ACL_CNT_TYPE_DUAL;
2011                         else
2012                                 cntrs.type = ICE_AQC_ACL_CNT_TYPE_SINGLE;
2013
2014                         status = ice_aq_alloc_acl_cntrs(hw, &cntrs, NULL);
2015                         if (status)
2016                                 return status;
2017                         /* Counter index within the bank */
2018                         acts[i].data.acl_act.value =
2019                                                 CPU_TO_LE16(cntrs.first_cntr);
2020                         *cnt_alloc = true;
2021                 }
2022         }
2023
2024         return ICE_SUCCESS;
2025 }
2026
2027 /**
2028  * ice_flow_acl_frmt_entry_range - Format an acl range checker for a given field
2029  * @fld: number of the given field
2030  * @info: info about field
2031  * @range_buf: range checker configuration buffer
2032  * @data: pointer to a data buffer containing flow entry's match values/masks
2033  * @range: Input/output param indicating which range checkers are being used
2034  */
2035 static void
2036 ice_flow_acl_frmt_entry_range(u16 fld, struct ice_flow_fld_info *info,
2037                               struct ice_aqc_acl_profile_ranges *range_buf,
2038                               u8 *data, u8 *range)
2039 {
2040         u16 new_mask;
2041
2042         /* If not specified, default mask is all bits in field */
2043         new_mask = (info->src.mask == ICE_FLOW_FLD_OFF_INVAL ?
2044                     BIT(ice_flds_info[fld].size) - 1 :
2045                     (*(u16 *)(data + info->src.mask))) << info->xtrct.disp;
2046
2047         /* If the mask is 0, then we don't need to worry about this input
2048          * range checker value.
2049          */
2050         if (new_mask) {
2051                 u16 new_high =
2052                         (*(u16 *)(data + info->src.last)) << info->xtrct.disp;
2053                 u16 new_low =
2054                         (*(u16 *)(data + info->src.val)) << info->xtrct.disp;
2055                 u8 range_idx = info->entry.val;
2056
2057                 range_buf->checker_cfg[range_idx].low_boundary =
2058                         CPU_TO_BE16(new_low);
2059                 range_buf->checker_cfg[range_idx].high_boundary =
2060                         CPU_TO_BE16(new_high);
2061                 range_buf->checker_cfg[range_idx].mask = CPU_TO_BE16(new_mask);
2062
2063                 /* Indicate which range checker is being used */
2064                 *range |= BIT(range_idx);
2065         }
2066 }
2067
2068 /**
2069  * ice_flow_acl_frmt_entry_fld - Partially format acl entry for a given field
2070  * @fld: number of the given field
2071  * @info: info about the field
2072  * @buf: buffer containing the entry
2073  * @dontcare: buffer containing don't care mask for entry
2074  * @data: pointer to a data buffer containing flow entry's match values/masks
2075  */
2076 static void
2077 ice_flow_acl_frmt_entry_fld(u16 fld, struct ice_flow_fld_info *info, u8 *buf,
2078                             u8 *dontcare, u8 *data)
2079 {
2080         u16 dst, src, mask, k, end_disp, tmp_s = 0, tmp_m = 0;
2081         bool use_mask = false;
2082         u8 disp;
2083
2084         src = info->src.val;
2085         mask = info->src.mask;
2086         dst = info->entry.val - ICE_AQC_ACL_PROF_BYTE_SEL_START_IDX;
2087         disp = info->xtrct.disp % BITS_PER_BYTE;
2088
2089         if (mask != ICE_FLOW_FLD_OFF_INVAL)
2090                 use_mask = true;
2091
2092         for (k = 0; k < info->entry.last; k++, dst++) {
2093                 /* Add overflow bits from previous byte */
2094                 buf[dst] = (tmp_s & 0xff00) >> 8;
2095
2096                 /* If mask is not valid, tmp_m is always zero, so just setting
2097                  * dontcare to 0 (no masked bits). If mask is valid, pulls in
2098                  * overflow bits of mask from prev byte
2099                  */
2100                 dontcare[dst] = (tmp_m & 0xff00) >> 8;
2101
2102                 /* If there is displacement, last byte will only contain
2103                  * displaced data, but there is no more data to read from user
2104                  * buffer, so skip so as not to potentially read beyond end of
2105                  * user buffer
2106                  */
2107                 if (!disp || k < info->entry.last - 1) {
2108                         /* Store shifted data to use in next byte */
2109                         tmp_s = data[src++] << disp;
2110
2111                         /* Add current (shifted) byte */
2112                         buf[dst] |= tmp_s & 0xff;
2113
2114                         /* Handle mask if valid */
2115                         if (use_mask) {
2116                                 tmp_m = (~data[mask++] & 0xff) << disp;
2117                                 dontcare[dst] |= tmp_m & 0xff;
2118                         }
2119                 }
2120         }
2121
2122         /* Fill in don't care bits at beginning of field */
2123         if (disp) {
2124                 dst = info->entry.val - ICE_AQC_ACL_PROF_BYTE_SEL_START_IDX;
2125                 for (k = 0; k < disp; k++)
2126                         dontcare[dst] |= BIT(k);
2127         }
2128
2129         end_disp = (disp + ice_flds_info[fld].size) % BITS_PER_BYTE;
2130
2131         /* Fill in don't care bits at end of field */
2132         if (end_disp) {
2133                 dst = info->entry.val - ICE_AQC_ACL_PROF_BYTE_SEL_START_IDX +
2134                       info->entry.last - 1;
2135                 for (k = end_disp; k < BITS_PER_BYTE; k++)
2136                         dontcare[dst] |= BIT(k);
2137         }
2138 }
2139
2140 /**
2141  * ice_flow_acl_frmt_entry - Format acl entry
2142  * @hw: pointer to the hardware structure
2143  * @prof: pointer to flow profile
2144  * @e: pointer to the flow entry
2145  * @data: pointer to a data buffer containing flow entry's match values/masks
2146  * @acts: array of actions to be performed on a match
2147  * @acts_cnt: number of actions
2148  *
2149  * Formats the key (and key_inverse) to be matched from the data passed in,
2150  * along with data from the flow profile. This key/key_inverse pair makes up
2151  * the 'entry' for an acl flow entry.
2152  */
2153 static enum ice_status
2154 ice_flow_acl_frmt_entry(struct ice_hw *hw, struct ice_flow_prof *prof,
2155                         struct ice_flow_entry *e, u8 *data,
2156                         struct ice_flow_action *acts, u8 acts_cnt)
2157 {
2158         u8 *buf = NULL, *dontcare = NULL, *key = NULL, range = 0, dir_flag_msk;
2159         struct ice_aqc_acl_profile_ranges *range_buf = NULL;
2160         enum ice_status status;
2161         bool cnt_alloc;
2162         u8 prof_id = 0;
2163         u16 i, buf_sz;
2164
2165         status = ice_flow_get_hw_prof(hw, ICE_BLK_ACL, prof->id, &prof_id);
2166         if (status)
2167                 return status;
2168
2169         /* Format the result action */
2170
2171         status = ice_flow_acl_check_actions(hw, acts, acts_cnt, &cnt_alloc);
2172         if (status)
2173                 return status;
2174
2175         status = ICE_ERR_NO_MEMORY;
2176
2177         e->acts = (struct ice_flow_action *)
2178                 ice_memdup(hw, acts, acts_cnt * sizeof(*acts),
2179                            ICE_NONDMA_TO_NONDMA);
2180
2181         if (!e->acts)
2182                 goto out;
2183
2184         e->acts_cnt = acts_cnt;
2185
2186         /* Format the matching data */
2187         buf_sz = prof->cfg.scen->width;
2188         buf = (u8 *)ice_malloc(hw, buf_sz);
2189         if (!buf)
2190                 goto out;
2191
2192         dontcare = (u8 *)ice_malloc(hw, buf_sz);
2193         if (!dontcare)
2194                 goto out;
2195
2196         /* 'key' buffer will store both key and key_inverse, so must be twice
2197          * size of buf
2198          */
2199         key = (u8 *)ice_malloc(hw, buf_sz * 2);
2200         if (!key)
2201                 goto out;
2202
2203         range_buf = (struct ice_aqc_acl_profile_ranges *)
2204                 ice_malloc(hw, sizeof(struct ice_aqc_acl_profile_ranges));
2205         if (!range_buf)
2206                 goto out;
2207
2208         /* Set don't care mask to all 1's to start, will zero out used bytes */
2209         ice_memset(dontcare, 0xff, buf_sz, ICE_NONDMA_MEM);
2210
2211         for (i = 0; i < prof->segs_cnt; i++) {
2212                 struct ice_flow_seg_info *seg = &prof->segs[i];
2213                 u64 match = seg->match;
2214                 u16 j;
2215
2216                 for (j = 0; j < ICE_FLOW_FIELD_IDX_MAX && match; j++) {
2217                         struct ice_flow_fld_info *info;
2218                         const u64 bit = BIT_ULL(j);
2219
2220                         if (!(match & bit))
2221                                 continue;
2222
2223                         info = &seg->fields[j];
2224
2225                         if (info->type == ICE_FLOW_FLD_TYPE_RANGE)
2226                                 ice_flow_acl_frmt_entry_range(j, info,
2227                                                               range_buf, data,
2228                                                               &range);
2229                         else
2230                                 ice_flow_acl_frmt_entry_fld(j, info, buf,
2231                                                             dontcare, data);
2232
2233                         match &= ~bit;
2234                 }
2235
2236                 for (j = 0; j < seg->raws_cnt; j++) {
2237                         struct ice_flow_fld_info *info = &seg->raws[j].info;
2238                         u16 dst, src, mask, k;
2239                         bool use_mask = false;
2240
2241                         src = info->src.val;
2242                         dst = info->entry.val -
2243                                         ICE_AQC_ACL_PROF_BYTE_SEL_START_IDX;
2244                         mask = info->src.mask;
2245
2246                         if (mask != ICE_FLOW_FLD_OFF_INVAL)
2247                                 use_mask = true;
2248
2249                         for (k = 0; k < info->entry.last; k++, dst++) {
2250                                 buf[dst] = data[src++];
2251                                 if (use_mask)
2252                                         dontcare[dst] = ~data[mask++];
2253                                 else
2254                                         dontcare[dst] = 0;
2255                         }
2256                 }
2257         }
2258
2259         buf[prof->cfg.scen->pid_idx] = (u8)prof_id;
2260         dontcare[prof->cfg.scen->pid_idx] = 0;
2261
2262         /* Format the buffer for direction flags */
2263         dir_flag_msk = BIT(ICE_FLG_PKT_DIR);
2264
2265         if (prof->dir == ICE_FLOW_RX)
2266                 buf[prof->cfg.scen->pkt_dir_idx] = dir_flag_msk;
2267
2268         if (range) {
2269                 buf[prof->cfg.scen->rng_chk_idx] = range;
2270                 /* Mark any unused range checkers as don't care */
2271                 dontcare[prof->cfg.scen->rng_chk_idx] = ~range;
2272                 e->range_buf = range_buf;
2273         } else {
2274                 ice_free(hw, range_buf);
2275         }
2276
2277         status = ice_set_key(key, buf_sz * 2, buf, NULL, dontcare, NULL, 0,
2278                              buf_sz);
2279         if (status)
2280                 goto out;
2281
2282         e->entry = key;
2283         e->entry_sz = buf_sz * 2;
2284
2285 out:
2286         if (buf)
2287                 ice_free(hw, buf);
2288
2289         if (dontcare)
2290                 ice_free(hw, dontcare);
2291
2292         if (status && key)
2293                 ice_free(hw, key);
2294
2295         if (status && range_buf) {
2296                 ice_free(hw, range_buf);
2297                 e->range_buf = NULL;
2298         }
2299
2300         if (status && e->acts) {
2301                 ice_free(hw, e->acts);
2302                 e->acts = NULL;
2303                 e->acts_cnt = 0;
2304         }
2305
2306         if (status && cnt_alloc)
2307                 ice_flow_acl_free_act_cntr(hw, acts, acts_cnt);
2308
2309         return status;
2310 }
2311
2312 /**
2313  * ice_flow_acl_find_scen_entry_cond - Find an ACL scenario entry that matches
2314  *                                     the compared data.
2315  * @prof: pointer to flow profile
2316  * @e: pointer to the comparing flow entry
2317  * @do_chg_action: decide if we want to change the ACL action
2318  * @do_add_entry: decide if we want to add the new ACL entry
2319  * @do_rem_entry: decide if we want to remove the current ACL entry
2320  *
2321  * Find an ACL scenario entry that matches the compared data. In the same time,
2322  * this function also figure out:
2323  * a/ If we want to change the ACL action
2324  * b/ If we want to add the new ACL entry
2325  * c/ If we want to remove the current ACL entry
2326  */
2327 static struct ice_flow_entry *
2328 ice_flow_acl_find_scen_entry_cond(struct ice_flow_prof *prof,
2329                                   struct ice_flow_entry *e, bool *do_chg_action,
2330                                   bool *do_add_entry, bool *do_rem_entry)
2331 {
2332         struct ice_flow_entry *p, *return_entry = NULL;
2333         u8 i, j;
2334
2335         /* Check if:
2336          * a/ There exists an entry with same matching data, but different
2337          *    priority, then we remove this existing ACL entry. Then, we
2338          *    will add the new entry to the ACL scenario.
2339          * b/ There exists an entry with same matching data, priority, and
2340          *    result action, then we do nothing
2341          * c/ There exists an entry with same matching data, priority, but
2342          *    different, action, then do only change the action's entry.
2343          * d/ Else, we add this new entry to the ACL scenario.
2344          */
2345         *do_chg_action = false;
2346         *do_add_entry = true;
2347         *do_rem_entry = false;
2348         LIST_FOR_EACH_ENTRY(p, &prof->entries, ice_flow_entry, l_entry) {
2349                 if (memcmp(p->entry, e->entry, p->entry_sz))
2350                         continue;
2351
2352                 /* From this point, we have the same matching_data. */
2353                 *do_add_entry = false;
2354                 return_entry = p;
2355
2356                 if (p->priority != e->priority) {
2357                         /* matching data && !priority */
2358                         *do_add_entry = true;
2359                         *do_rem_entry = true;
2360                         break;
2361                 }
2362
2363                 /* From this point, we will have matching_data && priority */
2364                 if (p->acts_cnt != e->acts_cnt)
2365                         *do_chg_action = true;
2366                 for (i = 0; i < p->acts_cnt; i++) {
2367                         bool found_not_match = false;
2368
2369                         for (j = 0; j < e->acts_cnt; j++)
2370                                 if (memcmp(&p->acts[i], &e->acts[j],
2371                                            sizeof(struct ice_flow_action))) {
2372                                         found_not_match = true;
2373                                         break;
2374                                 }
2375
2376                         if (found_not_match) {
2377                                 *do_chg_action = true;
2378                                 break;
2379                         }
2380                 }
2381
2382                 /* (do_chg_action = true) means :
2383                  *    matching_data && priority && !result_action
2384                  * (do_chg_action = false) means :
2385                  *    matching_data && priority && result_action
2386                  */
2387                 break;
2388         }
2389
2390         return return_entry;
2391 }
2392
2393 /**
2394  * ice_flow_acl_convert_to_acl_prior - Convert to ACL priority
2395  * @p: flow priority
2396  */
2397 static enum ice_acl_entry_prior
2398 ice_flow_acl_convert_to_acl_prior(enum ice_flow_priority p)
2399 {
2400         enum ice_acl_entry_prior acl_prior;
2401
2402         switch (p) {
2403         case ICE_FLOW_PRIO_LOW:
2404                 acl_prior = ICE_LOW;
2405                 break;
2406         case ICE_FLOW_PRIO_NORMAL:
2407                 acl_prior = ICE_NORMAL;
2408                 break;
2409         case ICE_FLOW_PRIO_HIGH:
2410                 acl_prior = ICE_HIGH;
2411                 break;
2412         default:
2413                 acl_prior = ICE_NORMAL;
2414                 break;
2415         }
2416
2417         return acl_prior;
2418 }
2419
2420 /**
2421  * ice_flow_acl_union_rng_chk - Perform union operation between two
2422  *                              range-range checker buffers
2423  * @dst_buf: pointer to destination range checker buffer
2424  * @src_buf: pointer to source range checker buffer
2425  *
2426  * For this function, we do the union between dst_buf and src_buf
2427  * range checker buffer, and we will save the result back to dst_buf
2428  */
2429 static enum ice_status
2430 ice_flow_acl_union_rng_chk(struct ice_aqc_acl_profile_ranges *dst_buf,
2431                            struct ice_aqc_acl_profile_ranges *src_buf)
2432 {
2433         u8 i, j;
2434
2435         if (!dst_buf || !src_buf)
2436                 return ICE_ERR_BAD_PTR;
2437
2438         for (i = 0; i < ICE_AQC_ACL_PROF_RANGES_NUM_CFG; i++) {
2439                 struct ice_acl_rng_data *cfg_data = NULL, *in_data;
2440                 bool will_populate = false;
2441
2442                 in_data = &src_buf->checker_cfg[i];
2443
2444                 if (!in_data->mask)
2445                         break;
2446
2447                 for (j = 0; j < ICE_AQC_ACL_PROF_RANGES_NUM_CFG; j++) {
2448                         cfg_data = &dst_buf->checker_cfg[j];
2449
2450                         if (!cfg_data->mask ||
2451                             !memcmp(cfg_data, in_data,
2452                                     sizeof(struct ice_acl_rng_data))) {
2453                                 will_populate = true;
2454                                 break;
2455                         }
2456                 }
2457
2458                 if (will_populate) {
2459                         ice_memcpy(cfg_data, in_data,
2460                                    sizeof(struct ice_acl_rng_data),
2461                                    ICE_NONDMA_TO_NONDMA);
2462                 } else {
2463                         /* No available slot left to program range checker */
2464                         return ICE_ERR_MAX_LIMIT;
2465                 }
2466         }
2467
2468         return ICE_SUCCESS;
2469 }
2470
2471 /**
2472  * ice_flow_acl_add_scen_entry_sync - Add entry to ACL scenario sync
2473  * @hw: pointer to the hardware structure
2474  * @prof: pointer to flow profile
2475  * @entry: double pointer to the flow entry
2476  *
2477  * For this function, we will look at the current added entries in the
2478  * corresponding ACL scenario. Then, we will perform matching logic to
2479  * see if we want to add/modify/do nothing with this new entry.
2480  */
2481 static enum ice_status
2482 ice_flow_acl_add_scen_entry_sync(struct ice_hw *hw, struct ice_flow_prof *prof,
2483                                  struct ice_flow_entry **entry)
2484 {
2485         bool do_add_entry, do_rem_entry, do_chg_action, do_chg_rng_chk;
2486         struct ice_aqc_acl_profile_ranges query_rng_buf, cfg_rng_buf;
2487         struct ice_acl_act_entry *acts = NULL;
2488         struct ice_flow_entry *exist;
2489         enum ice_status status = ICE_SUCCESS;
2490         struct ice_flow_entry *e;
2491         u8 i;
2492
2493         if (!entry || !(*entry) || !prof)
2494                 return ICE_ERR_BAD_PTR;
2495
2496         e = *(entry);
2497
2498         do_chg_rng_chk = false;
2499         if (e->range_buf) {
2500                 u8 prof_id = 0;
2501
2502                 status = ice_flow_get_hw_prof(hw, ICE_BLK_ACL, prof->id,
2503                                               &prof_id);
2504                 if (status)
2505                         return status;
2506
2507                 /* Query the current range-checker value in FW */
2508                 status = ice_query_acl_prof_ranges(hw, prof_id, &query_rng_buf,
2509                                                    NULL);
2510                 if (status)
2511                         return status;
2512                 ice_memcpy(&cfg_rng_buf, &query_rng_buf,
2513                            sizeof(struct ice_aqc_acl_profile_ranges),
2514                            ICE_NONDMA_TO_NONDMA);
2515
2516                 /* Generate the new range-checker value */
2517                 status = ice_flow_acl_union_rng_chk(&cfg_rng_buf, e->range_buf);
2518                 if (status)
2519                         return status;
2520
2521                 /* Reconfigure the range check if the buffer is changed. */
2522                 do_chg_rng_chk = false;
2523                 if (memcmp(&query_rng_buf, &cfg_rng_buf,
2524                            sizeof(struct ice_aqc_acl_profile_ranges))) {
2525                         status = ice_prog_acl_prof_ranges(hw, prof_id,
2526                                                           &cfg_rng_buf, NULL);
2527                         if (status)
2528                                 return status;
2529
2530                         do_chg_rng_chk = true;
2531                 }
2532         }
2533
2534         /* Figure out if we want to (change the ACL action) and/or
2535          * (Add the new ACL entry) and/or (Remove the current ACL entry)
2536          */
2537         exist = ice_flow_acl_find_scen_entry_cond(prof, e, &do_chg_action,
2538                                                   &do_add_entry, &do_rem_entry);
2539
2540         if (do_rem_entry) {
2541                 status = ice_flow_rem_entry_sync(hw, ICE_BLK_ACL, exist);
2542                 if (status)
2543                         return status;
2544         }
2545
2546         /* Prepare the result action buffer */
2547         acts = (struct ice_acl_act_entry *)ice_calloc
2548                 (hw, e->entry_sz, sizeof(struct ice_acl_act_entry));
2549         for (i = 0; i < e->acts_cnt; i++)
2550                 ice_memcpy(&acts[i], &e->acts[i].data.acl_act,
2551                            sizeof(struct ice_acl_act_entry),
2552                            ICE_NONDMA_TO_NONDMA);
2553
2554         if (do_add_entry) {
2555                 enum ice_acl_entry_prior prior;
2556                 u8 *keys, *inverts;
2557                 u16 entry_idx;
2558
2559                 keys = (u8 *)e->entry;
2560                 inverts = keys + (e->entry_sz / 2);
2561                 prior = ice_flow_acl_convert_to_acl_prior(e->priority);
2562
2563                 status = ice_acl_add_entry(hw, prof->cfg.scen, prior, keys,
2564                                            inverts, acts, e->acts_cnt,
2565                                            &entry_idx);
2566                 if (status)
2567                         goto out;
2568
2569                 e->scen_entry_idx = entry_idx;
2570                 LIST_ADD(&e->l_entry, &prof->entries);
2571         } else {
2572                 if (do_chg_action) {
2573                         /* For the action memory info, update the SW's copy of
2574                          * exist entry with e's action memory info
2575                          */
2576                         ice_free(hw, exist->acts);
2577                         exist->acts_cnt = e->acts_cnt;
2578                         exist->acts = (struct ice_flow_action *)
2579                                 ice_calloc(hw, exist->acts_cnt,
2580                                            sizeof(struct ice_flow_action));
2581
2582                         if (!exist->acts) {
2583                                 status = ICE_ERR_NO_MEMORY;
2584                                 goto out;
2585                         }
2586
2587                         ice_memcpy(exist->acts, e->acts,
2588                                    sizeof(struct ice_flow_action) * e->acts_cnt,
2589                                    ICE_NONDMA_TO_NONDMA);
2590
2591                         status = ice_acl_prog_act(hw, prof->cfg.scen, acts,
2592                                                   e->acts_cnt,
2593                                                   exist->scen_entry_idx);
2594                         if (status)
2595                                 goto out;
2596                 }
2597
2598                 if (do_chg_rng_chk) {
2599                         /* In this case, we want to update the range checker
2600                          * information of the exist entry
2601                          */
2602                         status = ice_flow_acl_union_rng_chk(exist->range_buf,
2603                                                             e->range_buf);
2604                         if (status)
2605                                 goto out;
2606                 }
2607
2608                 /* As we don't add the new entry to our SW DB, deallocate its
2609                  * memories, and return the exist entry to the caller
2610                  */
2611                 ice_dealloc_flow_entry(hw, e);
2612                 *(entry) = exist;
2613         }
2614 out:
2615         if (acts)
2616                 ice_free(hw, acts);
2617
2618         return status;
2619 }
2620
2621 /**
2622  * ice_flow_acl_add_scen_entry - Add entry to ACL scenario
2623  * @hw: pointer to the hardware structure
2624  * @prof: pointer to flow profile
2625  * @e: double pointer to the flow entry
2626  */
2627 static enum ice_status
2628 ice_flow_acl_add_scen_entry(struct ice_hw *hw, struct ice_flow_prof *prof,
2629                             struct ice_flow_entry **e)
2630 {
2631         enum ice_status status;
2632
2633         ice_acquire_lock(&prof->entries_lock);
2634         status = ice_flow_acl_add_scen_entry_sync(hw, prof, e);
2635         ice_release_lock(&prof->entries_lock);
2636
2637         return status;
2638 }
2639
2640 /**
2641  * ice_flow_add_entry - Add a flow entry
2642  * @hw: pointer to the HW struct
2643  * @blk: classification stage
2644  * @prof_id: ID of the profile to add a new flow entry to
2645  * @entry_id: unique ID to identify this flow entry
2646  * @vsi_handle: software VSI handle for the flow entry
2647  * @prio: priority of the flow entry
2648  * @data: pointer to a data buffer containing flow entry's match values/masks
2649  * @acts: arrays of actions to be performed on a match
2650  * @acts_cnt: number of actions
2651  * @entry_h: pointer to buffer that receives the new flow entry's handle
2652  */
2653 enum ice_status
2654 ice_flow_add_entry(struct ice_hw *hw, enum ice_block blk, u64 prof_id,
2655                    u64 entry_id, u16 vsi_handle, enum ice_flow_priority prio,
2656                    void *data, struct ice_flow_action *acts, u8 acts_cnt,
2657                    u64 *entry_h)
2658 {
2659         struct ice_flow_prof *prof = NULL;
2660         struct ice_flow_entry *e = NULL;
2661         enum ice_status status = ICE_SUCCESS;
2662
2663         /* ACL entries must indicate an action */
2664         if (blk == ICE_BLK_ACL && (!acts || !acts_cnt))
2665                 return ICE_ERR_PARAM;
2666
2667         /* No flow entry data is expected for RSS */
2668         if (!entry_h || (!data && blk != ICE_BLK_RSS))
2669                 return ICE_ERR_BAD_PTR;
2670
2671         if (!ice_is_vsi_valid(hw, vsi_handle))
2672                 return ICE_ERR_PARAM;
2673
2674         ice_acquire_lock(&hw->fl_profs_locks[blk]);
2675
2676         prof = ice_flow_find_prof_id(hw, blk, prof_id);
2677         if (!prof) {
2678                 status = ICE_ERR_DOES_NOT_EXIST;
2679         } else {
2680                 /* Allocate memory for the entry being added and associate
2681                  * the VSI to the found flow profile
2682                  */
2683                 e = (struct ice_flow_entry *)ice_malloc(hw, sizeof(*e));
2684                 if (!e)
2685                         status = ICE_ERR_NO_MEMORY;
2686                 else
2687                         status = ice_flow_assoc_prof(hw, blk, prof, vsi_handle);
2688         }
2689
2690         ice_release_lock(&hw->fl_profs_locks[blk]);
2691         if (status)
2692                 goto out;
2693
2694         e->id = entry_id;
2695         e->vsi_handle = vsi_handle;
2696         e->prof = prof;
2697         e->priority = prio;
2698
2699         switch (blk) {
2700         case ICE_BLK_RSS:
2701                 /* RSS will add only one entry per VSI per profile */
2702                 break;
2703         case ICE_BLK_ACL:
2704                 /* ACL will handle the entry management */
2705                 status = ice_flow_acl_frmt_entry(hw, prof, e, (u8 *)data, acts,
2706                                                  acts_cnt);
2707                 if (status)
2708                         goto out;
2709
2710                 status = ice_flow_acl_add_scen_entry(hw, prof, &e);
2711                 if (status)
2712                         goto out;
2713
2714                 break;
2715         case ICE_BLK_FD:
2716                 break;
2717         case ICE_BLK_SW:
2718         case ICE_BLK_PE:
2719         default:
2720                 status = ICE_ERR_NOT_IMPL;
2721                 goto out;
2722         }
2723
2724         if (blk != ICE_BLK_ACL) {
2725                 /* ACL will handle the entry management */
2726                 ice_acquire_lock(&prof->entries_lock);
2727                 LIST_ADD(&e->l_entry, &prof->entries);
2728                 ice_release_lock(&prof->entries_lock);
2729         }
2730
2731         *entry_h = ICE_FLOW_ENTRY_HNDL(e);
2732
2733 out:
2734         if (status && e) {
2735                 if (e->entry)
2736                         ice_free(hw, e->entry);
2737                 ice_free(hw, e);
2738         }
2739
2740         return status;
2741 }
2742
2743 /**
2744  * ice_flow_rem_entry - Remove a flow entry
2745  * @hw: pointer to the HW struct
2746  * @blk: classification stage
2747  * @entry_h: handle to the flow entry to be removed
2748  */
2749 enum ice_status ice_flow_rem_entry(struct ice_hw *hw, enum ice_block blk,
2750                                    u64 entry_h)
2751 {
2752         struct ice_flow_entry *entry;
2753         struct ice_flow_prof *prof;
2754         enum ice_status status = ICE_SUCCESS;
2755
2756         if (entry_h == ICE_FLOW_ENTRY_HANDLE_INVAL)
2757                 return ICE_ERR_PARAM;
2758
2759         entry = ICE_FLOW_ENTRY_PTR((unsigned long)entry_h);
2760
2761         /* Retain the pointer to the flow profile as the entry will be freed */
2762         prof = entry->prof;
2763
2764         if (prof) {
2765                 ice_acquire_lock(&prof->entries_lock);
2766                 status = ice_flow_rem_entry_sync(hw, blk, entry);
2767                 ice_release_lock(&prof->entries_lock);
2768         }
2769
2770         return status;
2771 }
2772
2773 /**
2774  * ice_flow_set_fld_ext - specifies locations of field from entry's input buffer
2775  * @seg: packet segment the field being set belongs to
2776  * @fld: field to be set
2777  * @field_type: type of the field
2778  * @val_loc: if not ICE_FLOW_FLD_OFF_INVAL, location of the value to match from
2779  *           entry's input buffer
2780  * @mask_loc: if not ICE_FLOW_FLD_OFF_INVAL, location of mask value from entry's
2781  *            input buffer
2782  * @last_loc: if not ICE_FLOW_FLD_OFF_INVAL, location of last/upper value from
2783  *            entry's input buffer
2784  *
2785  * This helper function stores information of a field being matched, including
2786  * the type of the field and the locations of the value to match, the mask, and
2787  * and the upper-bound value in the start of the input buffer for a flow entry.
2788  * This function should only be used for fixed-size data structures.
2789  *
2790  * This function also opportunistically determines the protocol headers to be
2791  * present based on the fields being set. Some fields cannot be used alone to
2792  * determine the protocol headers present. Sometimes, fields for particular
2793  * protocol headers are not matched. In those cases, the protocol headers
2794  * must be explicitly set.
2795  */
2796 static void
2797 ice_flow_set_fld_ext(struct ice_flow_seg_info *seg, enum ice_flow_field fld,
2798                      enum ice_flow_fld_match_type field_type, u16 val_loc,
2799                      u16 mask_loc, u16 last_loc)
2800 {
2801         u64 bit = BIT_ULL(fld);
2802
2803         seg->match |= bit;
2804         if (field_type == ICE_FLOW_FLD_TYPE_RANGE)
2805                 seg->range |= bit;
2806
2807         seg->fields[fld].type = field_type;
2808         seg->fields[fld].src.val = val_loc;
2809         seg->fields[fld].src.mask = mask_loc;
2810         seg->fields[fld].src.last = last_loc;
2811
2812         ICE_FLOW_SET_HDRS(seg, ice_flds_info[fld].hdr);
2813 }
2814
2815 /**
2816  * ice_flow_set_fld - specifies locations of field from entry's input buffer
2817  * @seg: packet segment the field being set belongs to
2818  * @fld: field to be set
2819  * @val_loc: if not ICE_FLOW_FLD_OFF_INVAL, location of the value to match from
2820  *           entry's input buffer
2821  * @mask_loc: if not ICE_FLOW_FLD_OFF_INVAL, location of mask value from entry's
2822  *            input buffer
2823  * @last_loc: if not ICE_FLOW_FLD_OFF_INVAL, location of last/upper value from
2824  *            entry's input buffer
2825  * @range: indicate if field being matched is to be in a range
2826  *
2827  * This function specifies the locations, in the form of byte offsets from the
2828  * start of the input buffer for a flow entry, from where the value to match,
2829  * the mask value, and upper value can be extracted. These locations are then
2830  * stored in the flow profile. When adding a flow entry associated with the
2831  * flow profile, these locations will be used to quickly extract the values and
2832  * create the content of a match entry. This function should only be used for
2833  * fixed-size data structures.
2834  */
2835 void
2836 ice_flow_set_fld(struct ice_flow_seg_info *seg, enum ice_flow_field fld,
2837                  u16 val_loc, u16 mask_loc, u16 last_loc, bool range)
2838 {
2839         enum ice_flow_fld_match_type t = range ?
2840                 ICE_FLOW_FLD_TYPE_RANGE : ICE_FLOW_FLD_TYPE_REG;
2841
2842         ice_flow_set_fld_ext(seg, fld, t, val_loc, mask_loc, last_loc);
2843 }
2844
2845 /**
2846  * ice_flow_set_fld_prefix - sets locations of prefix field from entry's buf
2847  * @seg: packet segment the field being set belongs to
2848  * @fld: field to be set
2849  * @val_loc: if not ICE_FLOW_FLD_OFF_INVAL, location of the value to match from
2850  *           entry's input buffer
2851  * @pref_loc: location of prefix value from entry's input buffer
2852  * @pref_sz: size of the location holding the prefix value
2853  *
2854  * This function specifies the locations, in the form of byte offsets from the
2855  * start of the input buffer for a flow entry, from where the value to match
2856  * and the IPv4 prefix value can be extracted. These locations are then stored
2857  * in the flow profile. When adding flow entries to the associated flow profile,
2858  * these locations can be used to quickly extract the values to create the
2859  * content of a match entry. This function should only be used for fixed-size
2860  * data structures.
2861  */
2862 void
2863 ice_flow_set_fld_prefix(struct ice_flow_seg_info *seg, enum ice_flow_field fld,
2864                         u16 val_loc, u16 pref_loc, u8 pref_sz)
2865 {
2866         /* For this type of field, the "mask" location is for the prefix value's
2867          * location and the "last" location is for the size of the location of
2868          * the prefix value.
2869          */
2870         ice_flow_set_fld_ext(seg, fld, ICE_FLOW_FLD_TYPE_PREFIX, val_loc,
2871                              pref_loc, (u16)pref_sz);
2872 }
2873
2874 /**
2875  * ice_flow_add_fld_raw - sets locations of a raw field from entry's input buf
2876  * @seg: packet segment the field being set belongs to
2877  * @off: offset of the raw field from the beginning of the segment in bytes
2878  * @len: length of the raw pattern to be matched
2879  * @val_loc: location of the value to match from entry's input buffer
2880  * @mask_loc: location of mask value from entry's input buffer
2881  *
2882  * This function specifies the offset of the raw field to be match from the
2883  * beginning of the specified packet segment, and the locations, in the form of
2884  * byte offsets from the start of the input buffer for a flow entry, from where
2885  * the value to match and the mask value to be extracted. These locations are
2886  * then stored in the flow profile. When adding flow entries to the associated
2887  * flow profile, these locations can be used to quickly extract the values to
2888  * create the content of a match entry. This function should only be used for
2889  * fixed-size data structures.
2890  */
2891 void
2892 ice_flow_add_fld_raw(struct ice_flow_seg_info *seg, u16 off, u8 len,
2893                      u16 val_loc, u16 mask_loc)
2894 {
2895         if (seg->raws_cnt < ICE_FLOW_SEG_RAW_FLD_MAX) {
2896                 seg->raws[seg->raws_cnt].off = off;
2897                 seg->raws[seg->raws_cnt].info.type = ICE_FLOW_FLD_TYPE_SIZE;
2898                 seg->raws[seg->raws_cnt].info.src.val = val_loc;
2899                 seg->raws[seg->raws_cnt].info.src.mask = mask_loc;
2900                 /* The "last" field is used to store the length of the field */
2901                 seg->raws[seg->raws_cnt].info.src.last = len;
2902         }
2903
2904         /* Overflows of "raws" will be handled as an error condition later in
2905          * the flow when this information is processed.
2906          */
2907         seg->raws_cnt++;
2908 }
2909
2910 #define ICE_FLOW_RSS_SEG_HDR_L2_MASKS \
2911 (ICE_FLOW_SEG_HDR_ETH | ICE_FLOW_SEG_HDR_VLAN)
2912
2913 #define ICE_FLOW_RSS_SEG_HDR_L3_MASKS \
2914         (ICE_FLOW_SEG_HDR_IPV4 | ICE_FLOW_SEG_HDR_IPV6)
2915
2916 #define ICE_FLOW_RSS_SEG_HDR_L4_MASKS \
2917         (ICE_FLOW_SEG_HDR_TCP | ICE_FLOW_SEG_HDR_UDP | \
2918          ICE_FLOW_SEG_HDR_SCTP)
2919
2920 #define ICE_FLOW_RSS_SEG_HDR_VAL_MASKS \
2921         (ICE_FLOW_RSS_SEG_HDR_L2_MASKS | \
2922          ICE_FLOW_RSS_SEG_HDR_L3_MASKS | \
2923          ICE_FLOW_RSS_SEG_HDR_L4_MASKS)
2924
2925 /**
2926  * ice_flow_set_rss_seg_info - setup packet segments for RSS
2927  * @segs: pointer to the flow field segment(s)
2928  * @hash_fields: fields to be hashed on for the segment(s)
2929  * @flow_hdr: protocol header fields within a packet segment
2930  *
2931  * Helper function to extract fields from hash bitmap and use flow
2932  * header value to set flow field segment for further use in flow
2933  * profile entry or removal.
2934  */
2935 static enum ice_status
2936 ice_flow_set_rss_seg_info(struct ice_flow_seg_info *segs, u64 hash_fields,
2937                           u32 flow_hdr)
2938 {
2939         u64 val = hash_fields;
2940         u8 i;
2941
2942         for (i = 0; val && i < ICE_FLOW_FIELD_IDX_MAX; i++) {
2943                 u64 bit = BIT_ULL(i);
2944
2945                 if (val & bit) {
2946                         ice_flow_set_fld(segs, (enum ice_flow_field)i,
2947                                          ICE_FLOW_FLD_OFF_INVAL,
2948                                          ICE_FLOW_FLD_OFF_INVAL,
2949                                          ICE_FLOW_FLD_OFF_INVAL, false);
2950                         val &= ~bit;
2951                 }
2952         }
2953         ICE_FLOW_SET_HDRS(segs, flow_hdr);
2954
2955         if (segs->hdrs & ~ICE_FLOW_RSS_SEG_HDR_VAL_MASKS &
2956             ~ICE_FLOW_RSS_HDRS_INNER_MASK)
2957                 return ICE_ERR_PARAM;
2958
2959         val = (u64)(segs->hdrs & ICE_FLOW_RSS_SEG_HDR_L3_MASKS);
2960         if (val && !ice_is_pow2(val))
2961                 return ICE_ERR_CFG;
2962
2963         val = (u64)(segs->hdrs & ICE_FLOW_RSS_SEG_HDR_L4_MASKS);
2964         if (val && !ice_is_pow2(val))
2965                 return ICE_ERR_CFG;
2966
2967         return ICE_SUCCESS;
2968 }
2969
2970 /**
2971  * ice_rem_vsi_rss_list - remove VSI from RSS list
2972  * @hw: pointer to the hardware structure
2973  * @vsi_handle: software VSI handle
2974  *
2975  * Remove the VSI from all RSS configurations in the list.
2976  */
2977 void ice_rem_vsi_rss_list(struct ice_hw *hw, u16 vsi_handle)
2978 {
2979         struct ice_rss_cfg *r, *tmp;
2980
2981         if (LIST_EMPTY(&hw->rss_list_head))
2982                 return;
2983
2984         ice_acquire_lock(&hw->rss_locks);
2985         LIST_FOR_EACH_ENTRY_SAFE(r, tmp, &hw->rss_list_head,
2986                                  ice_rss_cfg, l_entry) {
2987                 if (ice_test_and_clear_bit(vsi_handle, r->vsis))
2988                         if (!ice_is_any_bit_set(r->vsis, ICE_MAX_VSI)) {
2989                                 LIST_DEL(&r->l_entry);
2990                                 ice_free(hw, r);
2991                         }
2992         }
2993         ice_release_lock(&hw->rss_locks);
2994 }
2995
2996 /**
2997  * ice_rem_vsi_rss_cfg - remove RSS configurations associated with VSI
2998  * @hw: pointer to the hardware structure
2999  * @vsi_handle: software VSI handle
3000  *
3001  * This function will iterate through all flow profiles and disassociate
3002  * the VSI from that profile. If the flow profile has no VSIs it will
3003  * be removed.
3004  */
3005 enum ice_status ice_rem_vsi_rss_cfg(struct ice_hw *hw, u16 vsi_handle)
3006 {
3007         const enum ice_block blk = ICE_BLK_RSS;
3008         struct ice_flow_prof *p, *t;
3009         enum ice_status status = ICE_SUCCESS;
3010
3011         if (!ice_is_vsi_valid(hw, vsi_handle))
3012                 return ICE_ERR_PARAM;
3013
3014         if (LIST_EMPTY(&hw->fl_profs[blk]))
3015                 return ICE_SUCCESS;
3016
3017         ice_acquire_lock(&hw->fl_profs_locks[blk]);
3018         LIST_FOR_EACH_ENTRY_SAFE(p, t, &hw->fl_profs[blk], ice_flow_prof,
3019                                  l_entry) {
3020                 if (ice_is_bit_set(p->vsis, vsi_handle)) {
3021                         status = ice_flow_disassoc_prof(hw, blk, p, vsi_handle);
3022                         if (status)
3023                                 break;
3024
3025                         if (!ice_is_any_bit_set(p->vsis, ICE_MAX_VSI)) {
3026                                 status = ice_flow_rem_prof_sync(hw, blk, p);
3027                                 if (status)
3028                                         break;
3029                         }
3030                 }
3031         }
3032         ice_release_lock(&hw->fl_profs_locks[blk]);
3033
3034         return status;
3035 }
3036
3037 /**
3038  * ice_rem_rss_list - remove RSS configuration from list
3039  * @hw: pointer to the hardware structure
3040  * @vsi_handle: software VSI handle
3041  * @prof: pointer to flow profile
3042  *
3043  * Assumption: lock has already been acquired for RSS list
3044  */
3045 static void
3046 ice_rem_rss_list(struct ice_hw *hw, u16 vsi_handle, struct ice_flow_prof *prof)
3047 {
3048         struct ice_rss_cfg *r, *tmp;
3049
3050         /* Search for RSS hash fields associated to the VSI that match the
3051          * hash configurations associated to the flow profile. If found
3052          * remove from the RSS entry list of the VSI context and delete entry.
3053          */
3054         LIST_FOR_EACH_ENTRY_SAFE(r, tmp, &hw->rss_list_head,
3055                                  ice_rss_cfg, l_entry) {
3056                 if (r->hashed_flds == prof->segs[prof->segs_cnt - 1].match &&
3057                     r->packet_hdr == prof->segs[prof->segs_cnt - 1].hdrs) {
3058                         ice_clear_bit(vsi_handle, r->vsis);
3059                         if (!ice_is_any_bit_set(r->vsis, ICE_MAX_VSI)) {
3060                                 LIST_DEL(&r->l_entry);
3061                                 ice_free(hw, r);
3062                         }
3063                         return;
3064                 }
3065         }
3066 }
3067
3068 /**
3069  * ice_add_rss_list - add RSS configuration to list
3070  * @hw: pointer to the hardware structure
3071  * @vsi_handle: software VSI handle
3072  * @prof: pointer to flow profile
3073  *
3074  * Assumption: lock has already been acquired for RSS list
3075  */
3076 static enum ice_status
3077 ice_add_rss_list(struct ice_hw *hw, u16 vsi_handle, struct ice_flow_prof *prof)
3078 {
3079         struct ice_rss_cfg *r, *rss_cfg;
3080
3081         LIST_FOR_EACH_ENTRY(r, &hw->rss_list_head,
3082                             ice_rss_cfg, l_entry)
3083                 if (r->hashed_flds == prof->segs[prof->segs_cnt - 1].match &&
3084                     r->packet_hdr == prof->segs[prof->segs_cnt - 1].hdrs) {
3085                         ice_set_bit(vsi_handle, r->vsis);
3086                         return ICE_SUCCESS;
3087                 }
3088
3089         rss_cfg = (struct ice_rss_cfg *)ice_malloc(hw, sizeof(*rss_cfg));
3090         if (!rss_cfg)
3091                 return ICE_ERR_NO_MEMORY;
3092
3093         rss_cfg->hashed_flds = prof->segs[prof->segs_cnt - 1].match;
3094         rss_cfg->packet_hdr = prof->segs[prof->segs_cnt - 1].hdrs;
3095         rss_cfg->symm = prof->cfg.symm;
3096         ice_set_bit(vsi_handle, rss_cfg->vsis);
3097
3098         LIST_ADD_TAIL(&rss_cfg->l_entry, &hw->rss_list_head);
3099
3100         return ICE_SUCCESS;
3101 }
3102
3103 #define ICE_FLOW_PROF_HASH_S    0
3104 #define ICE_FLOW_PROF_HASH_M    (0xFFFFFFFFULL << ICE_FLOW_PROF_HASH_S)
3105 #define ICE_FLOW_PROF_HDR_S     32
3106 #define ICE_FLOW_PROF_HDR_M     (0x3FFFFFFFULL << ICE_FLOW_PROF_HDR_S)
3107 #define ICE_FLOW_PROF_ENCAP_S   63
3108 #define ICE_FLOW_PROF_ENCAP_M   (BIT_ULL(ICE_FLOW_PROF_ENCAP_S))
3109
3110 #define ICE_RSS_OUTER_HEADERS   1
3111 #define ICE_RSS_INNER_HEADERS   2
3112
3113 /* Flow profile ID format:
3114  * [0:31] - Packet match fields
3115  * [32:62] - Protocol header
3116  * [63] - Encapsulation flag, 0 if non-tunneled, 1 if tunneled
3117  */
3118 #define ICE_FLOW_GEN_PROFID(hash, hdr, segs_cnt) \
3119         (u64)(((u64)(hash) & ICE_FLOW_PROF_HASH_M) | \
3120               (((u64)(hdr) << ICE_FLOW_PROF_HDR_S) & ICE_FLOW_PROF_HDR_M) | \
3121               ((u8)((segs_cnt) - 1) ? ICE_FLOW_PROF_ENCAP_M : 0))
3122
3123 static void
3124 ice_rss_config_xor_word(struct ice_hw *hw, u8 prof_id, u8 src, u8 dst)
3125 {
3126         u32 s = ((src % 4) << 3); /* byte shift */
3127         u32 v = dst | 0x80; /* value to program */
3128         u8 i = src / 4; /* register index */
3129         u32 reg;
3130
3131         reg = rd32(hw, GLQF_HSYMM(prof_id, i));
3132         reg = (reg & ~(0xff << s)) | (v << s);
3133         wr32(hw, GLQF_HSYMM(prof_id, i), reg);
3134 }
3135
3136 static void
3137 ice_rss_config_xor(struct ice_hw *hw, u8 prof_id, u8 src, u8 dst, u8 len)
3138 {
3139         int fv_last_word =
3140                 ICE_FLOW_SW_FIELD_VECTOR_MAX / ICE_FLOW_FV_EXTRACT_SZ - 1;
3141         int i;
3142
3143         for (i = 0; i < len; i++) {
3144                 ice_rss_config_xor_word(hw, prof_id,
3145                                         /* Yes, field vector in GLQF_HSYMM and
3146                                          * GLQF_HINSET is inversed!
3147                                          */
3148                                         fv_last_word - (src + i),
3149                                         fv_last_word - (dst + i));
3150                 ice_rss_config_xor_word(hw, prof_id,
3151                                         fv_last_word - (dst + i),
3152                                         fv_last_word - (src + i));
3153         }
3154 }
3155
3156 static void
3157 ice_rss_update_symm(struct ice_hw *hw,
3158                     struct ice_flow_prof *prof)
3159 {
3160         struct ice_prof_map *map;
3161         u8 prof_id, m;
3162
3163         map = ice_search_prof_id(hw, ICE_BLK_RSS, prof->id);
3164         prof_id = map->prof_id;
3165
3166         /* clear to default */
3167         for (m = 0; m < 6; m++)
3168                 wr32(hw, GLQF_HSYMM(prof_id, m), 0);
3169         if (prof->cfg.symm) {
3170                 struct ice_flow_seg_info *seg =
3171                         &prof->segs[prof->segs_cnt - 1];
3172
3173                 struct ice_flow_seg_xtrct *ipv4_src =
3174                         &seg->fields[ICE_FLOW_FIELD_IDX_IPV4_SA].xtrct;
3175                 struct ice_flow_seg_xtrct *ipv4_dst =
3176                         &seg->fields[ICE_FLOW_FIELD_IDX_IPV4_DA].xtrct;
3177                 struct ice_flow_seg_xtrct *ipv6_src =
3178                         &seg->fields[ICE_FLOW_FIELD_IDX_IPV6_SA].xtrct;
3179                 struct ice_flow_seg_xtrct *ipv6_dst =
3180                         &seg->fields[ICE_FLOW_FIELD_IDX_IPV6_DA].xtrct;
3181
3182                 struct ice_flow_seg_xtrct *tcp_src =
3183                         &seg->fields[ICE_FLOW_FIELD_IDX_TCP_SRC_PORT].xtrct;
3184                 struct ice_flow_seg_xtrct *tcp_dst =
3185                         &seg->fields[ICE_FLOW_FIELD_IDX_TCP_DST_PORT].xtrct;
3186
3187                 struct ice_flow_seg_xtrct *udp_src =
3188                         &seg->fields[ICE_FLOW_FIELD_IDX_UDP_SRC_PORT].xtrct;
3189                 struct ice_flow_seg_xtrct *udp_dst =
3190                         &seg->fields[ICE_FLOW_FIELD_IDX_UDP_DST_PORT].xtrct;
3191
3192                 struct ice_flow_seg_xtrct *sctp_src =
3193                         &seg->fields[ICE_FLOW_FIELD_IDX_SCTP_SRC_PORT].xtrct;
3194                 struct ice_flow_seg_xtrct *sctp_dst =
3195                         &seg->fields[ICE_FLOW_FIELD_IDX_SCTP_DST_PORT].xtrct;
3196
3197                 /* xor IPv4 */
3198                 if (ipv4_src->prot_id != 0 && ipv4_dst->prot_id != 0)
3199                         ice_rss_config_xor(hw, prof_id,
3200                                            ipv4_src->idx, ipv4_dst->idx, 2);
3201
3202                 /* xor IPv6 */
3203                 if (ipv6_src->prot_id != 0 && ipv6_dst->prot_id != 0)
3204                         ice_rss_config_xor(hw, prof_id,
3205                                            ipv6_src->idx, ipv6_dst->idx, 8);
3206
3207                 /* xor TCP */
3208                 if (tcp_src->prot_id != 0 && tcp_dst->prot_id != 0)
3209                         ice_rss_config_xor(hw, prof_id,
3210                                            tcp_src->idx, tcp_dst->idx, 1);
3211
3212                 /* xor UDP */
3213                 if (udp_src->prot_id != 0 && udp_dst->prot_id != 0)
3214                         ice_rss_config_xor(hw, prof_id,
3215                                            udp_src->idx, udp_dst->idx, 1);
3216
3217                 /* xor SCTP */
3218                 if (sctp_src->prot_id != 0 && sctp_dst->prot_id != 0)
3219                         ice_rss_config_xor(hw, prof_id,
3220                                            sctp_src->idx, sctp_dst->idx, 1);
3221         }
3222 }
3223
3224 /**
3225  * ice_add_rss_cfg_sync - add an RSS configuration
3226  * @hw: pointer to the hardware structure
3227  * @vsi_handle: software VSI handle
3228  * @hashed_flds: hash bit fields (ICE_FLOW_HASH_*) to configure
3229  * @addl_hdrs: protocol header fields
3230  * @segs_cnt: packet segment count
3231  * @symm: symmetric hash enable/disable
3232  *
3233  * Assumption: lock has already been acquired for RSS list
3234  */
3235 static enum ice_status
3236 ice_add_rss_cfg_sync(struct ice_hw *hw, u16 vsi_handle, u64 hashed_flds,
3237                      u32 addl_hdrs, u8 segs_cnt, bool symm)
3238 {
3239         const enum ice_block blk = ICE_BLK_RSS;
3240         struct ice_flow_prof *prof = NULL;
3241         struct ice_flow_seg_info *segs;
3242         enum ice_status status;
3243
3244         if (!segs_cnt || segs_cnt > ICE_FLOW_SEG_MAX)
3245                 return ICE_ERR_PARAM;
3246
3247         segs = (struct ice_flow_seg_info *)ice_calloc(hw, segs_cnt,
3248                                                       sizeof(*segs));
3249         if (!segs)
3250                 return ICE_ERR_NO_MEMORY;
3251
3252         /* Construct the packet segment info from the hashed fields */
3253         status = ice_flow_set_rss_seg_info(&segs[segs_cnt - 1], hashed_flds,
3254                                            addl_hdrs);
3255         if (status)
3256                 goto exit;
3257
3258         /* Search for a flow profile that has matching headers, hash fields
3259          * and has the input VSI associated to it. If found, no further
3260          * operations required and exit.
3261          */
3262         prof = ice_flow_find_prof_conds(hw, blk, ICE_FLOW_RX, segs, segs_cnt,
3263                                         vsi_handle,
3264                                         ICE_FLOW_FIND_PROF_CHK_FLDS |
3265                                         ICE_FLOW_FIND_PROF_CHK_VSI);
3266         if (prof) {
3267                 if (prof->cfg.symm == symm)
3268                         goto exit;
3269                 prof->cfg.symm = symm;
3270                 goto update_symm;
3271         }
3272
3273         /* Check if a flow profile exists with the same protocol headers and
3274          * associated with the input VSI. If so disasscociate the VSI from
3275          * this profile. The VSI will be added to a new profile created with
3276          * the protocol header and new hash field configuration.
3277          */
3278         prof = ice_flow_find_prof_conds(hw, blk, ICE_FLOW_RX, segs, segs_cnt,
3279                                         vsi_handle, ICE_FLOW_FIND_PROF_CHK_VSI);
3280         if (prof) {
3281                 status = ice_flow_disassoc_prof(hw, blk, prof, vsi_handle);
3282                 if (!status)
3283                         ice_rem_rss_list(hw, vsi_handle, prof);
3284                 else
3285                         goto exit;
3286
3287                 /* Remove profile if it has no VSIs associated */
3288                 if (!ice_is_any_bit_set(prof->vsis, ICE_MAX_VSI)) {
3289                         status = ice_flow_rem_prof(hw, blk, prof->id);
3290                         if (status)
3291                                 goto exit;
3292                 }
3293         }
3294
3295         /* Search for a profile that has same match fields only. If this
3296          * exists then associate the VSI to this profile.
3297          */
3298         prof = ice_flow_find_prof_conds(hw, blk, ICE_FLOW_RX, segs, segs_cnt,
3299                                         vsi_handle,
3300                                         ICE_FLOW_FIND_PROF_CHK_FLDS);
3301         if (prof) {
3302                 if (prof->cfg.symm == symm) {
3303                         status = ice_flow_assoc_prof(hw, blk, prof,
3304                                                      vsi_handle);
3305                         if (!status)
3306                                 status = ice_add_rss_list(hw, vsi_handle,
3307                                                           prof);
3308                 } else {
3309                         /* if a profile exist but with different symmetric
3310                          * requirement, just return error.
3311                          */
3312                         status = ICE_ERR_NOT_SUPPORTED;
3313                 }
3314                 goto exit;
3315         }
3316
3317         /* Create a new flow profile with generated profile and packet
3318          * segment information.
3319          */
3320         status = ice_flow_add_prof(hw, blk, ICE_FLOW_RX,
3321                                    ICE_FLOW_GEN_PROFID(hashed_flds,
3322                                                        segs[segs_cnt - 1].hdrs,
3323                                                        segs_cnt),
3324                                    segs, segs_cnt, NULL, 0, &prof);
3325         if (status)
3326                 goto exit;
3327
3328         status = ice_flow_assoc_prof(hw, blk, prof, vsi_handle);
3329         /* If association to a new flow profile failed then this profile can
3330          * be removed.
3331          */
3332         if (status) {
3333                 ice_flow_rem_prof(hw, blk, prof->id);
3334                 goto exit;
3335         }
3336
3337         status = ice_add_rss_list(hw, vsi_handle, prof);
3338
3339         prof->cfg.symm = symm;
3340
3341 update_symm:
3342         ice_rss_update_symm(hw, prof);
3343
3344 exit:
3345         ice_free(hw, segs);
3346         return status;
3347 }
3348
3349 /**
3350  * ice_add_rss_cfg - add an RSS configuration with specified hashed fields
3351  * @hw: pointer to the hardware structure
3352  * @vsi_handle: software VSI handle
3353  * @hashed_flds: hash bit fields (ICE_FLOW_HASH_*) to configure
3354  * @addl_hdrs: protocol header fields
3355  * @symm: symmetric hash enable/disable
3356  *
3357  * This function will generate a flow profile based on fields associated with
3358  * the input fields to hash on, the flow type and use the VSI number to add
3359  * a flow entry to the profile.
3360  */
3361 enum ice_status
3362 ice_add_rss_cfg(struct ice_hw *hw, u16 vsi_handle, u64 hashed_flds,
3363                 u32 addl_hdrs, bool symm)
3364 {
3365         enum ice_status status;
3366
3367         if (hashed_flds == ICE_HASH_INVALID ||
3368             !ice_is_vsi_valid(hw, vsi_handle))
3369                 return ICE_ERR_PARAM;
3370
3371         ice_acquire_lock(&hw->rss_locks);
3372         status = ice_add_rss_cfg_sync(hw, vsi_handle, hashed_flds, addl_hdrs,
3373                                       ICE_RSS_OUTER_HEADERS, symm);
3374         if (!status)
3375                 status = ice_add_rss_cfg_sync(hw, vsi_handle, hashed_flds,
3376                                               addl_hdrs, ICE_RSS_INNER_HEADERS,
3377                                               symm);
3378         ice_release_lock(&hw->rss_locks);
3379
3380         return status;
3381 }
3382
3383 /**
3384  * ice_rem_rss_cfg_sync - remove an existing RSS configuration
3385  * @hw: pointer to the hardware structure
3386  * @vsi_handle: software VSI handle
3387  * @hashed_flds: Packet hash types (ICE_FLOW_HASH_*) to remove
3388  * @addl_hdrs: Protocol header fields within a packet segment
3389  * @segs_cnt: packet segment count
3390  *
3391  * Assumption: lock has already been acquired for RSS list
3392  */
3393 static enum ice_status
3394 ice_rem_rss_cfg_sync(struct ice_hw *hw, u16 vsi_handle, u64 hashed_flds,
3395                      u32 addl_hdrs, u8 segs_cnt)
3396 {
3397         const enum ice_block blk = ICE_BLK_RSS;
3398         struct ice_flow_seg_info *segs;
3399         struct ice_flow_prof *prof;
3400         enum ice_status status;
3401
3402         segs = (struct ice_flow_seg_info *)ice_calloc(hw, segs_cnt,
3403                                                       sizeof(*segs));
3404         if (!segs)
3405                 return ICE_ERR_NO_MEMORY;
3406
3407         /* Construct the packet segment info from the hashed fields */
3408         status = ice_flow_set_rss_seg_info(&segs[segs_cnt - 1], hashed_flds,
3409                                            addl_hdrs);
3410         if (status)
3411                 goto out;
3412
3413         prof = ice_flow_find_prof_conds(hw, blk, ICE_FLOW_RX, segs, segs_cnt,
3414                                         vsi_handle,
3415                                         ICE_FLOW_FIND_PROF_CHK_FLDS);
3416         if (!prof) {
3417                 status = ICE_ERR_DOES_NOT_EXIST;
3418                 goto out;
3419         }
3420
3421         status = ice_flow_disassoc_prof(hw, blk, prof, vsi_handle);
3422         if (status)
3423                 goto out;
3424
3425         /* Remove RSS configuration from VSI context before deleting
3426          * the flow profile.
3427          */
3428         ice_rem_rss_list(hw, vsi_handle, prof);
3429
3430         if (!ice_is_any_bit_set(prof->vsis, ICE_MAX_VSI))
3431                 status = ice_flow_rem_prof(hw, blk, prof->id);
3432
3433 out:
3434         ice_free(hw, segs);
3435         return status;
3436 }
3437
3438 /**
3439  * ice_rem_rss_cfg - remove an existing RSS config with matching hashed fields
3440  * @hw: pointer to the hardware structure
3441  * @vsi_handle: software VSI handle
3442  * @hashed_flds: Packet hash types (ICE_FLOW_HASH_*) to remove
3443  * @addl_hdrs: Protocol header fields within a packet segment
3444  *
3445  * This function will lookup the flow profile based on the input
3446  * hash field bitmap, iterate through the profile entry list of
3447  * that profile and find entry associated with input VSI to be
3448  * removed. Calls are made to underlying flow apis which will in
3449  * turn build or update buffers for RSS XLT1 section.
3450  */
3451 enum ice_status
3452 ice_rem_rss_cfg(struct ice_hw *hw, u16 vsi_handle, u64 hashed_flds,
3453                 u32 addl_hdrs)
3454 {
3455         enum ice_status status;
3456
3457         if (hashed_flds == ICE_HASH_INVALID ||
3458             !ice_is_vsi_valid(hw, vsi_handle))
3459                 return ICE_ERR_PARAM;
3460
3461         ice_acquire_lock(&hw->rss_locks);
3462         status = ice_rem_rss_cfg_sync(hw, vsi_handle, hashed_flds, addl_hdrs,
3463                                       ICE_RSS_OUTER_HEADERS);
3464         if (!status)
3465                 status = ice_rem_rss_cfg_sync(hw, vsi_handle, hashed_flds,
3466                                               addl_hdrs, ICE_RSS_INNER_HEADERS);
3467         ice_release_lock(&hw->rss_locks);
3468
3469         return status;
3470 }
3471
3472 /**
3473  * ice_replay_rss_cfg - replay RSS configurations associated with VSI
3474  * @hw: pointer to the hardware structure
3475  * @vsi_handle: software VSI handle
3476  */
3477 enum ice_status ice_replay_rss_cfg(struct ice_hw *hw, u16 vsi_handle)
3478 {
3479         enum ice_status status = ICE_SUCCESS;
3480         struct ice_rss_cfg *r;
3481
3482         if (!ice_is_vsi_valid(hw, vsi_handle))
3483                 return ICE_ERR_PARAM;
3484
3485         ice_acquire_lock(&hw->rss_locks);
3486         LIST_FOR_EACH_ENTRY(r, &hw->rss_list_head,
3487                             ice_rss_cfg, l_entry) {
3488                 if (ice_is_bit_set(r->vsis, vsi_handle)) {
3489                         status = ice_add_rss_cfg_sync(hw, vsi_handle,
3490                                                       r->hashed_flds,
3491                                                       r->packet_hdr,
3492                                                       ICE_RSS_OUTER_HEADERS,
3493                                                       r->symm);
3494                         if (status)
3495                                 break;
3496                         status = ice_add_rss_cfg_sync(hw, vsi_handle,
3497                                                       r->hashed_flds,
3498                                                       r->packet_hdr,
3499                                                       ICE_RSS_INNER_HEADERS,
3500                                                       r->symm);
3501                         if (status)
3502                                 break;
3503                 }
3504         }
3505         ice_release_lock(&hw->rss_locks);
3506
3507         return status;
3508 }
3509
3510 /**
3511  * ice_get_rss_cfg - returns hashed fields for the given header types
3512  * @hw: pointer to the hardware structure
3513  * @vsi_handle: software VSI handle
3514  * @hdrs: protocol header type
3515  *
3516  * This function will return the match fields of the first instance of flow
3517  * profile having the given header types and containing input VSI
3518  */
3519 u64 ice_get_rss_cfg(struct ice_hw *hw, u16 vsi_handle, u32 hdrs)
3520 {
3521         struct ice_rss_cfg *r, *rss_cfg = NULL;
3522
3523         /* verify if the protocol header is non zero and VSI is valid */
3524         if (hdrs == ICE_FLOW_SEG_HDR_NONE || !ice_is_vsi_valid(hw, vsi_handle))
3525                 return ICE_HASH_INVALID;
3526
3527         ice_acquire_lock(&hw->rss_locks);
3528         LIST_FOR_EACH_ENTRY(r, &hw->rss_list_head,
3529                             ice_rss_cfg, l_entry)
3530                 if (ice_is_bit_set(r->vsis, vsi_handle) &&
3531                     r->packet_hdr == hdrs) {
3532                         rss_cfg = r;
3533                         break;
3534                 }
3535         ice_release_lock(&hw->rss_locks);
3536
3537         return rss_cfg ? rss_cfg->hashed_flds : ICE_HASH_INVALID;
3538 }