net/ice/base: improve GTPU extend header handle
[dpdk.git] / drivers / net / ice / base / ice_flow.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2001-2020
3  */
4
5 #include "ice_common.h"
6 #include "ice_flow.h"
7
8 /* Size of known protocol header fields */
9 #define ICE_FLOW_FLD_SZ_ETH_TYPE        2
10 #define ICE_FLOW_FLD_SZ_VLAN            2
11 #define ICE_FLOW_FLD_SZ_IPV4_ADDR       4
12 #define ICE_FLOW_FLD_SZ_IPV6_ADDR       16
13 #define ICE_FLOW_FLD_SZ_IP_DSCP         1
14 #define ICE_FLOW_FLD_SZ_IP_TTL          1
15 #define ICE_FLOW_FLD_SZ_IP_PROT         1
16 #define ICE_FLOW_FLD_SZ_PORT            2
17 #define ICE_FLOW_FLD_SZ_TCP_FLAGS       1
18 #define ICE_FLOW_FLD_SZ_ICMP_TYPE       1
19 #define ICE_FLOW_FLD_SZ_ICMP_CODE       1
20 #define ICE_FLOW_FLD_SZ_ARP_OPER        2
21 #define ICE_FLOW_FLD_SZ_GRE_KEYID       4
22 #define ICE_FLOW_FLD_SZ_GTP_TEID        4
23 #define ICE_FLOW_FLD_SZ_GTP_QFI         2
24 #define ICE_FLOW_FLD_SZ_PPPOE_SESS_ID   2
25
26 /* Describe properties of a protocol header field */
27 struct ice_flow_field_info {
28         enum ice_flow_seg_hdr hdr;
29         s16 off;        /* Offset from start of a protocol header, in bits */
30         u16 size;       /* Size of fields in bits */
31         u16 mask;       /* 16-bit mask for field */
32 };
33
34 #define ICE_FLOW_FLD_INFO(_hdr, _offset_bytes, _size_bytes) { \
35         .hdr = _hdr, \
36         .off = (_offset_bytes) * BITS_PER_BYTE, \
37         .size = (_size_bytes) * BITS_PER_BYTE, \
38         .mask = 0, \
39 }
40
41 #define ICE_FLOW_FLD_INFO_MSK(_hdr, _offset_bytes, _size_bytes, _mask) { \
42         .hdr = _hdr, \
43         .off = (_offset_bytes) * BITS_PER_BYTE, \
44         .size = (_size_bytes) * BITS_PER_BYTE, \
45         .mask = _mask, \
46 }
47
48 /* Table containing properties of supported protocol header fields */
49 static const
50 struct ice_flow_field_info ice_flds_info[ICE_FLOW_FIELD_IDX_MAX] = {
51         /* Ether */
52         /* ICE_FLOW_FIELD_IDX_ETH_DA */
53         ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_ETH, 0, ETH_ALEN),
54         /* ICE_FLOW_FIELD_IDX_ETH_SA */
55         ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_ETH, ETH_ALEN, ETH_ALEN),
56         /* ICE_FLOW_FIELD_IDX_S_VLAN */
57         ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_VLAN, 12, ICE_FLOW_FLD_SZ_VLAN),
58         /* ICE_FLOW_FIELD_IDX_C_VLAN */
59         ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_VLAN, 14, ICE_FLOW_FLD_SZ_VLAN),
60         /* ICE_FLOW_FIELD_IDX_ETH_TYPE */
61         ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_ETH, 12, ICE_FLOW_FLD_SZ_ETH_TYPE),
62         /* IPv4 / IPv6 */
63         /* ICE_FLOW_FIELD_IDX_IPV4_DSCP */
64         ICE_FLOW_FLD_INFO_MSK(ICE_FLOW_SEG_HDR_IPV4, 0, ICE_FLOW_FLD_SZ_IP_DSCP,
65                               0x00fc),
66         /* ICE_FLOW_FIELD_IDX_IPV6_DSCP */
67         ICE_FLOW_FLD_INFO_MSK(ICE_FLOW_SEG_HDR_IPV6, 0, ICE_FLOW_FLD_SZ_IP_DSCP,
68                               0x0ff0),
69         /* ICE_FLOW_FIELD_IDX_IPV4_TTL */
70         ICE_FLOW_FLD_INFO_MSK(ICE_FLOW_SEG_HDR_NONE, 8,
71                               ICE_FLOW_FLD_SZ_IP_TTL, 0xff00),
72         /* ICE_FLOW_FIELD_IDX_IPV4_PROT */
73         ICE_FLOW_FLD_INFO_MSK(ICE_FLOW_SEG_HDR_NONE, 8,
74                               ICE_FLOW_FLD_SZ_IP_PROT, 0x00ff),
75         /* ICE_FLOW_FIELD_IDX_IPV6_TTL */
76         ICE_FLOW_FLD_INFO_MSK(ICE_FLOW_SEG_HDR_NONE, 6,
77                               ICE_FLOW_FLD_SZ_IP_TTL, 0x00ff),
78         /* ICE_FLOW_FIELD_IDX_IPV6_PROT */
79         ICE_FLOW_FLD_INFO_MSK(ICE_FLOW_SEG_HDR_NONE, 6,
80                               ICE_FLOW_FLD_SZ_IP_PROT, 0xff00),
81         /* ICE_FLOW_FIELD_IDX_IPV4_SA */
82         ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_IPV4, 12, ICE_FLOW_FLD_SZ_IPV4_ADDR),
83         /* ICE_FLOW_FIELD_IDX_IPV4_DA */
84         ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_IPV4, 16, ICE_FLOW_FLD_SZ_IPV4_ADDR),
85         /* ICE_FLOW_FIELD_IDX_IPV6_SA */
86         ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_IPV6, 8, ICE_FLOW_FLD_SZ_IPV6_ADDR),
87         /* ICE_FLOW_FIELD_IDX_IPV6_DA */
88         ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_IPV6, 24, ICE_FLOW_FLD_SZ_IPV6_ADDR),
89         /* Transport */
90         /* ICE_FLOW_FIELD_IDX_TCP_SRC_PORT */
91         ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_TCP, 0, ICE_FLOW_FLD_SZ_PORT),
92         /* ICE_FLOW_FIELD_IDX_TCP_DST_PORT */
93         ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_TCP, 2, ICE_FLOW_FLD_SZ_PORT),
94         /* ICE_FLOW_FIELD_IDX_UDP_SRC_PORT */
95         ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_UDP, 0, ICE_FLOW_FLD_SZ_PORT),
96         /* ICE_FLOW_FIELD_IDX_UDP_DST_PORT */
97         ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_UDP, 2, ICE_FLOW_FLD_SZ_PORT),
98         /* ICE_FLOW_FIELD_IDX_SCTP_SRC_PORT */
99         ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_SCTP, 0, ICE_FLOW_FLD_SZ_PORT),
100         /* ICE_FLOW_FIELD_IDX_SCTP_DST_PORT */
101         ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_SCTP, 2, ICE_FLOW_FLD_SZ_PORT),
102         /* ICE_FLOW_FIELD_IDX_TCP_FLAGS */
103         ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_TCP, 13, ICE_FLOW_FLD_SZ_TCP_FLAGS),
104         /* ARP */
105         /* ICE_FLOW_FIELD_IDX_ARP_SIP */
106         ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_ARP, 14, ICE_FLOW_FLD_SZ_IPV4_ADDR),
107         /* ICE_FLOW_FIELD_IDX_ARP_DIP */
108         ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_ARP, 24, ICE_FLOW_FLD_SZ_IPV4_ADDR),
109         /* ICE_FLOW_FIELD_IDX_ARP_SHA */
110         ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_ARP, 8, ETH_ALEN),
111         /* ICE_FLOW_FIELD_IDX_ARP_DHA */
112         ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_ARP, 18, ETH_ALEN),
113         /* ICE_FLOW_FIELD_IDX_ARP_OP */
114         ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_ARP, 6, ICE_FLOW_FLD_SZ_ARP_OPER),
115         /* ICMP */
116         /* ICE_FLOW_FIELD_IDX_ICMP_TYPE */
117         ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_ICMP, 0, ICE_FLOW_FLD_SZ_ICMP_TYPE),
118         /* ICE_FLOW_FIELD_IDX_ICMP_CODE */
119         ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_ICMP, 1, ICE_FLOW_FLD_SZ_ICMP_CODE),
120         /* GRE */
121         /* ICE_FLOW_FIELD_IDX_GRE_KEYID */
122         ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_GRE, 12, ICE_FLOW_FLD_SZ_GRE_KEYID),
123         /* GTP */
124         /* ICE_FLOW_FIELD_IDX_GTPC_TEID */
125         ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_GTPC_TEID, 12,
126                           ICE_FLOW_FLD_SZ_GTP_TEID),
127         /* ICE_FLOW_FIELD_IDX_GTPU_IP_TEID */
128         ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_GTPU_IP, 12,
129                           ICE_FLOW_FLD_SZ_GTP_TEID),
130         /* ICE_FLOW_FIELD_IDX_GTPU_EH_TEID */
131         ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_GTPU_EH, 12,
132                           ICE_FLOW_FLD_SZ_GTP_TEID),
133         /* ICE_FLOW_FIELD_IDX_GTPU_EH_QFI */
134         ICE_FLOW_FLD_INFO_MSK(ICE_FLOW_SEG_HDR_GTPU_EH, 22,
135                               ICE_FLOW_FLD_SZ_GTP_QFI, 0x3f00),
136         /* ICE_FLOW_FIELD_IDX_GTPU_UP_TEID */
137         ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_GTPU_UP, 12,
138                           ICE_FLOW_FLD_SZ_GTP_TEID),
139         /* ICE_FLOW_FIELD_IDX_GTPU_DWN_TEID */
140         ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_GTPU_DWN, 12,
141                           ICE_FLOW_FLD_SZ_GTP_TEID),
142         /* PPPOE */
143         /* ICE_FLOW_FIELD_IDX_PPPOE_SESS_ID */
144         ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_PPPOE, 2,
145                           ICE_FLOW_FLD_SZ_PPPOE_SESS_ID),
146 };
147
148 /* Bitmaps indicating relevant packet types for a particular protocol header
149  *
150  * Packet types for packets with an Outer/First/Single MAC header
151  */
152 static const u32 ice_ptypes_mac_ofos[] = {
153         0xFDC00846, 0xBFBF7F7E, 0xF70001DF, 0xFEFDFDFB,
154         0x0000077E, 0x00000000, 0x00000000, 0x00000000,
155         0x00000000, 0x03FFF000, 0x7FFFFFE0, 0x00000000,
156         0x00000000, 0x00000000, 0x00000000, 0x00000000,
157         0x00000000, 0x00000000, 0x00000000, 0x00000000,
158         0x00000000, 0x00000000, 0x00000000, 0x00000000,
159         0x00000000, 0x00000000, 0x00000000, 0x00000000,
160         0x00000000, 0x00000000, 0x00000000, 0x00000000,
161 };
162
163 /* Packet types for packets with an Innermost/Last MAC VLAN header */
164 static const u32 ice_ptypes_macvlan_il[] = {
165         0x00000000, 0xBC000000, 0x000001DF, 0xF0000000,
166         0x0000077E, 0x00000000, 0x00000000, 0x00000000,
167         0x00000000, 0x00000000, 0x00000000, 0x00000000,
168         0x00000000, 0x00000000, 0x00000000, 0x00000000,
169         0x00000000, 0x00000000, 0x00000000, 0x00000000,
170         0x00000000, 0x00000000, 0x00000000, 0x00000000,
171         0x00000000, 0x00000000, 0x00000000, 0x00000000,
172         0x00000000, 0x00000000, 0x00000000, 0x00000000,
173 };
174
175 /* Packet types for packets with an Outer/First/Single IPv4 header */
176 static const u32 ice_ptypes_ipv4_ofos[] = {
177         0x1DC00000, 0x04000800, 0x00000000, 0x00000000,
178         0x00000000, 0x00000000, 0x00000000, 0x00000000,
179         0x0003000F, 0x000FC000, 0x03E0F800, 0x00000000,
180         0x00000000, 0x00000000, 0x00000000, 0x00000000,
181         0x00000000, 0x00000000, 0x00000000, 0x00000000,
182         0x00000000, 0x00000000, 0x00000000, 0x00000000,
183         0x00000000, 0x00000000, 0x00000000, 0x00000000,
184         0x00000000, 0x00000000, 0x00000000, 0x00000000,
185 };
186
187 /* Packet types for packets with an Innermost/Last IPv4 header */
188 static const u32 ice_ptypes_ipv4_il[] = {
189         0xE0000000, 0xB807700E, 0x80000003, 0xE01DC03B,
190         0x0000000E, 0x00000000, 0x00000000, 0x00000000,
191         0x00000000, 0x00000000, 0x001FF800, 0x00000000,
192         0x00000000, 0x00000000, 0x00000000, 0x00000000,
193         0x00000000, 0x00000000, 0x00000000, 0x00000000,
194         0x00000000, 0x00000000, 0x00000000, 0x00000000,
195         0x00000000, 0x00000000, 0x00000000, 0x00000000,
196         0x00000000, 0x00000000, 0x00000000, 0x00000000,
197 };
198
199 /* Packet types for packets with an Outer/First/Single IPv6 header */
200 static const u32 ice_ptypes_ipv6_ofos[] = {
201         0x00000000, 0x00000000, 0x77000000, 0x10002000,
202         0x00000000, 0x00000000, 0x00000000, 0x00000000,
203         0x00080F00, 0x03F00000, 0x7C1F0000, 0x00000000,
204         0x00000000, 0x00000000, 0x00000000, 0x00000000,
205         0x00000000, 0x00000000, 0x00000000, 0x00000000,
206         0x00000000, 0x00000000, 0x00000000, 0x00000000,
207         0x00000000, 0x00000000, 0x00000000, 0x00000000,
208         0x00000000, 0x00000000, 0x00000000, 0x00000000,
209 };
210
211 /* Packet types for packets with an Innermost/Last IPv6 header */
212 static const u32 ice_ptypes_ipv6_il[] = {
213         0x00000000, 0x03B80770, 0x000001DC, 0x0EE00000,
214         0x00000770, 0x00000000, 0x00000000, 0x00000000,
215         0x00000000, 0x00000000, 0x7FE00000, 0x00000000,
216         0x00000000, 0x00000000, 0x00000000, 0x00000000,
217         0x00000000, 0x00000000, 0x00000000, 0x00000000,
218         0x00000000, 0x00000000, 0x00000000, 0x00000000,
219         0x00000000, 0x00000000, 0x00000000, 0x00000000,
220         0x00000000, 0x00000000, 0x00000000, 0x00000000,
221 };
222
223 /* Packet types for packets with an Outermost/First ARP header */
224 static const u32 ice_ptypes_arp_of[] = {
225         0x00000800, 0x00000000, 0x00000000, 0x00000000,
226         0x00000000, 0x00000000, 0x00000000, 0x00000000,
227         0x00000000, 0x00000000, 0x00000000, 0x00000000,
228         0x00000000, 0x00000000, 0x00000000, 0x00000000,
229         0x00000000, 0x00000000, 0x00000000, 0x00000000,
230         0x00000000, 0x00000000, 0x00000000, 0x00000000,
231         0x00000000, 0x00000000, 0x00000000, 0x00000000,
232         0x00000000, 0x00000000, 0x00000000, 0x00000000,
233 };
234
235 /* UDP Packet types for non-tunneled packets or tunneled
236  * packets with inner UDP.
237  */
238 static const u32 ice_ptypes_udp_il[] = {
239         0x81000000, 0x20204040, 0x04000010, 0x80810102,
240         0x00000040, 0x00000000, 0x00000000, 0x00000000,
241         0x00000000, 0x00410000, 0x10842000, 0x00000000,
242         0x00000000, 0x00000000, 0x00000000, 0x00000000,
243         0x00000000, 0x00000000, 0x00000000, 0x00000000,
244         0x00000000, 0x00000000, 0x00000000, 0x00000000,
245         0x00000000, 0x00000000, 0x00000000, 0x00000000,
246         0x00000000, 0x00000000, 0x00000000, 0x00000000,
247 };
248
249 /* Packet types for packets with an Innermost/Last TCP header */
250 static const u32 ice_ptypes_tcp_il[] = {
251         0x04000000, 0x80810102, 0x10000040, 0x02040408,
252         0x00000102, 0x00000000, 0x00000000, 0x00000000,
253         0x00000000, 0x00820000, 0x21084000, 0x00000000,
254         0x00000000, 0x00000000, 0x00000000, 0x00000000,
255         0x00000000, 0x00000000, 0x00000000, 0x00000000,
256         0x00000000, 0x00000000, 0x00000000, 0x00000000,
257         0x00000000, 0x00000000, 0x00000000, 0x00000000,
258         0x00000000, 0x00000000, 0x00000000, 0x00000000,
259 };
260
261 /* Packet types for packets with an Innermost/Last SCTP header */
262 static const u32 ice_ptypes_sctp_il[] = {
263         0x08000000, 0x01020204, 0x20000081, 0x04080810,
264         0x00000204, 0x00000000, 0x00000000, 0x00000000,
265         0x00000000, 0x01040000, 0x00000000, 0x00000000,
266         0x00000000, 0x00000000, 0x00000000, 0x00000000,
267         0x00000000, 0x00000000, 0x00000000, 0x00000000,
268         0x00000000, 0x00000000, 0x00000000, 0x00000000,
269         0x00000000, 0x00000000, 0x00000000, 0x00000000,
270         0x00000000, 0x00000000, 0x00000000, 0x00000000,
271 };
272
273 /* Packet types for packets with an Outermost/First ICMP header */
274 static const u32 ice_ptypes_icmp_of[] = {
275         0x10000000, 0x00000000, 0x00000000, 0x00000000,
276         0x00000000, 0x00000000, 0x00000000, 0x00000000,
277         0x00000000, 0x00000000, 0x00000000, 0x00000000,
278         0x00000000, 0x00000000, 0x00000000, 0x00000000,
279         0x00000000, 0x00000000, 0x00000000, 0x00000000,
280         0x00000000, 0x00000000, 0x00000000, 0x00000000,
281         0x00000000, 0x00000000, 0x00000000, 0x00000000,
282         0x00000000, 0x00000000, 0x00000000, 0x00000000,
283 };
284
285 /* Packet types for packets with an Innermost/Last ICMP header */
286 static const u32 ice_ptypes_icmp_il[] = {
287         0x00000000, 0x02040408, 0x40000102, 0x08101020,
288         0x00000408, 0x00000000, 0x00000000, 0x00000000,
289         0x00000000, 0x00000000, 0x42108000, 0x00000000,
290         0x00000000, 0x00000000, 0x00000000, 0x00000000,
291         0x00000000, 0x00000000, 0x00000000, 0x00000000,
292         0x00000000, 0x00000000, 0x00000000, 0x00000000,
293         0x00000000, 0x00000000, 0x00000000, 0x00000000,
294         0x00000000, 0x00000000, 0x00000000, 0x00000000,
295 };
296
297 /* Packet types for packets with an Outermost/First GRE header */
298 static const u32 ice_ptypes_gre_of[] = {
299         0x00000000, 0xBFBF7800, 0x000001DF, 0xFEFDE000,
300         0x0000017E, 0x00000000, 0x00000000, 0x00000000,
301         0x00000000, 0x00000000, 0x00000000, 0x00000000,
302         0x00000000, 0x00000000, 0x00000000, 0x00000000,
303         0x00000000, 0x00000000, 0x00000000, 0x00000000,
304         0x00000000, 0x00000000, 0x00000000, 0x00000000,
305         0x00000000, 0x00000000, 0x00000000, 0x00000000,
306         0x00000000, 0x00000000, 0x00000000, 0x00000000,
307 };
308
309 /* Packet types for packets with an Innermost/Last MAC header */
310 static const u32 ice_ptypes_mac_il[] = {
311         0x00000000, 0x00000000, 0x00000000, 0x00000000,
312         0x00000000, 0x00000000, 0x00000000, 0x00000000,
313         0x00000000, 0x00000000, 0x00000000, 0x00000000,
314         0x00000000, 0x00000000, 0x00000000, 0x00000000,
315         0x00000000, 0x00000000, 0x00000000, 0x00000000,
316         0x00000000, 0x00000000, 0x00000000, 0x00000000,
317         0x00000000, 0x00000000, 0x00000000, 0x00000000,
318         0x00000000, 0x00000000, 0x00000000, 0x00000000,
319 };
320
321 /* Packet types for GTPC */
322 static const u32 ice_ptypes_gtpc[] = {
323         0x00000000, 0x00000000, 0x00000000, 0x00000000,
324         0x00000000, 0x00000000, 0x00000000, 0x00000000,
325         0x00000000, 0x00000000, 0x00000180, 0x00000000,
326         0x00000000, 0x00000000, 0x00000000, 0x00000000,
327         0x00000000, 0x00000000, 0x00000000, 0x00000000,
328         0x00000000, 0x00000000, 0x00000000, 0x00000000,
329         0x00000000, 0x00000000, 0x00000000, 0x00000000,
330         0x00000000, 0x00000000, 0x00000000, 0x00000000,
331 };
332
333 /* Packet types for GTPC with TEID */
334 static const u32 ice_ptypes_gtpc_tid[] = {
335         0x00000000, 0x00000000, 0x00000000, 0x00000000,
336         0x00000000, 0x00000000, 0x00000000, 0x00000000,
337         0x00000000, 0x00000000, 0x00000060, 0x00000000,
338         0x00000000, 0x00000000, 0x00000000, 0x00000000,
339         0x00000000, 0x00000000, 0x00000000, 0x00000000,
340         0x00000000, 0x00000000, 0x00000000, 0x00000000,
341         0x00000000, 0x00000000, 0x00000000, 0x00000000,
342         0x00000000, 0x00000000, 0x00000000, 0x00000000,
343 };
344
345 /* Packet types for GTPU */
346 static const struct ice_ptype_attributes ice_attr_gtpu_eh[] = {
347         { ICE_MAC_IPV4_GTPU_IPV4_FRAG,    ICE_PTYPE_ATTR_GTP_PDU_EH },
348         { ICE_MAC_IPV4_GTPU_IPV4_PAY,     ICE_PTYPE_ATTR_GTP_PDU_EH },
349         { ICE_MAC_IPV4_GTPU_IPV4_UDP_PAY, ICE_PTYPE_ATTR_GTP_PDU_EH },
350         { ICE_MAC_IPV4_GTPU_IPV4_TCP,     ICE_PTYPE_ATTR_GTP_PDU_EH },
351         { ICE_MAC_IPV4_GTPU_IPV4_ICMP,    ICE_PTYPE_ATTR_GTP_PDU_EH },
352         { ICE_MAC_IPV6_GTPU_IPV4_FRAG,    ICE_PTYPE_ATTR_GTP_PDU_EH },
353         { ICE_MAC_IPV6_GTPU_IPV4_PAY,     ICE_PTYPE_ATTR_GTP_PDU_EH },
354         { ICE_MAC_IPV6_GTPU_IPV4_UDP_PAY, ICE_PTYPE_ATTR_GTP_PDU_EH },
355         { ICE_MAC_IPV6_GTPU_IPV4_TCP,     ICE_PTYPE_ATTR_GTP_PDU_EH },
356         { ICE_MAC_IPV6_GTPU_IPV4_ICMP,    ICE_PTYPE_ATTR_GTP_PDU_EH },
357         { ICE_MAC_IPV4_GTPU_IPV6_FRAG,    ICE_PTYPE_ATTR_GTP_PDU_EH },
358         { ICE_MAC_IPV4_GTPU_IPV6_PAY,     ICE_PTYPE_ATTR_GTP_PDU_EH },
359         { ICE_MAC_IPV4_GTPU_IPV6_UDP_PAY, ICE_PTYPE_ATTR_GTP_PDU_EH },
360         { ICE_MAC_IPV4_GTPU_IPV6_TCP,     ICE_PTYPE_ATTR_GTP_PDU_EH },
361         { ICE_MAC_IPV4_GTPU_IPV6_ICMPV6,  ICE_PTYPE_ATTR_GTP_PDU_EH },
362         { ICE_MAC_IPV6_GTPU_IPV6_FRAG,    ICE_PTYPE_ATTR_GTP_PDU_EH },
363         { ICE_MAC_IPV6_GTPU_IPV6_PAY,     ICE_PTYPE_ATTR_GTP_PDU_EH },
364         { ICE_MAC_IPV6_GTPU_IPV6_UDP_PAY, ICE_PTYPE_ATTR_GTP_PDU_EH },
365         { ICE_MAC_IPV6_GTPU_IPV6_TCP,     ICE_PTYPE_ATTR_GTP_PDU_EH },
366         { ICE_MAC_IPV6_GTPU_IPV6_ICMPV6,  ICE_PTYPE_ATTR_GTP_PDU_EH },
367 };
368
369 static const struct ice_ptype_attributes ice_attr_gtpu_down[] = {
370         { ICE_MAC_IPV4_GTPU_IPV4_FRAG,    ICE_PTYPE_ATTR_GTP_DOWNLINK },
371         { ICE_MAC_IPV4_GTPU_IPV4_PAY,     ICE_PTYPE_ATTR_GTP_DOWNLINK },
372         { ICE_MAC_IPV4_GTPU_IPV4_UDP_PAY, ICE_PTYPE_ATTR_GTP_DOWNLINK },
373         { ICE_MAC_IPV4_GTPU_IPV4_TCP,     ICE_PTYPE_ATTR_GTP_DOWNLINK },
374         { ICE_MAC_IPV4_GTPU_IPV4_ICMP,    ICE_PTYPE_ATTR_GTP_DOWNLINK },
375         { ICE_MAC_IPV6_GTPU_IPV4_FRAG,    ICE_PTYPE_ATTR_GTP_DOWNLINK },
376         { ICE_MAC_IPV6_GTPU_IPV4_PAY,     ICE_PTYPE_ATTR_GTP_DOWNLINK },
377         { ICE_MAC_IPV6_GTPU_IPV4_UDP_PAY, ICE_PTYPE_ATTR_GTP_DOWNLINK },
378         { ICE_MAC_IPV6_GTPU_IPV4_TCP,     ICE_PTYPE_ATTR_GTP_DOWNLINK },
379         { ICE_MAC_IPV6_GTPU_IPV4_ICMP,    ICE_PTYPE_ATTR_GTP_DOWNLINK },
380         { ICE_MAC_IPV4_GTPU_IPV6_FRAG,    ICE_PTYPE_ATTR_GTP_DOWNLINK },
381         { ICE_MAC_IPV4_GTPU_IPV6_PAY,     ICE_PTYPE_ATTR_GTP_DOWNLINK },
382         { ICE_MAC_IPV4_GTPU_IPV6_UDP_PAY, ICE_PTYPE_ATTR_GTP_DOWNLINK },
383         { ICE_MAC_IPV4_GTPU_IPV6_TCP,     ICE_PTYPE_ATTR_GTP_DOWNLINK },
384         { ICE_MAC_IPV4_GTPU_IPV6_ICMPV6,  ICE_PTYPE_ATTR_GTP_DOWNLINK },
385         { ICE_MAC_IPV6_GTPU_IPV6_FRAG,    ICE_PTYPE_ATTR_GTP_DOWNLINK },
386         { ICE_MAC_IPV6_GTPU_IPV6_PAY,     ICE_PTYPE_ATTR_GTP_DOWNLINK },
387         { ICE_MAC_IPV6_GTPU_IPV6_UDP_PAY, ICE_PTYPE_ATTR_GTP_DOWNLINK },
388         { ICE_MAC_IPV6_GTPU_IPV6_TCP,     ICE_PTYPE_ATTR_GTP_DOWNLINK },
389         { ICE_MAC_IPV6_GTPU_IPV6_ICMPV6,  ICE_PTYPE_ATTR_GTP_DOWNLINK },
390 };
391
392 static const struct ice_ptype_attributes ice_attr_gtpu_up[] = {
393         { ICE_MAC_IPV4_GTPU_IPV4_FRAG,    ICE_PTYPE_ATTR_GTP_UPLINK },
394         { ICE_MAC_IPV4_GTPU_IPV4_PAY,     ICE_PTYPE_ATTR_GTP_UPLINK },
395         { ICE_MAC_IPV4_GTPU_IPV4_UDP_PAY, ICE_PTYPE_ATTR_GTP_UPLINK },
396         { ICE_MAC_IPV4_GTPU_IPV4_TCP,     ICE_PTYPE_ATTR_GTP_UPLINK },
397         { ICE_MAC_IPV4_GTPU_IPV4_ICMP,    ICE_PTYPE_ATTR_GTP_UPLINK },
398         { ICE_MAC_IPV6_GTPU_IPV4_FRAG,    ICE_PTYPE_ATTR_GTP_UPLINK },
399         { ICE_MAC_IPV6_GTPU_IPV4_PAY,     ICE_PTYPE_ATTR_GTP_UPLINK },
400         { ICE_MAC_IPV6_GTPU_IPV4_UDP_PAY, ICE_PTYPE_ATTR_GTP_UPLINK },
401         { ICE_MAC_IPV6_GTPU_IPV4_TCP,     ICE_PTYPE_ATTR_GTP_UPLINK },
402         { ICE_MAC_IPV6_GTPU_IPV4_ICMP,    ICE_PTYPE_ATTR_GTP_UPLINK },
403         { ICE_MAC_IPV4_GTPU_IPV6_FRAG,    ICE_PTYPE_ATTR_GTP_UPLINK },
404         { ICE_MAC_IPV4_GTPU_IPV6_PAY,     ICE_PTYPE_ATTR_GTP_UPLINK },
405         { ICE_MAC_IPV4_GTPU_IPV6_UDP_PAY, ICE_PTYPE_ATTR_GTP_UPLINK },
406         { ICE_MAC_IPV4_GTPU_IPV6_TCP,     ICE_PTYPE_ATTR_GTP_UPLINK },
407         { ICE_MAC_IPV4_GTPU_IPV6_ICMPV6,  ICE_PTYPE_ATTR_GTP_UPLINK },
408         { ICE_MAC_IPV6_GTPU_IPV6_FRAG,    ICE_PTYPE_ATTR_GTP_UPLINK },
409         { ICE_MAC_IPV6_GTPU_IPV6_PAY,     ICE_PTYPE_ATTR_GTP_UPLINK },
410         { ICE_MAC_IPV6_GTPU_IPV6_UDP_PAY, ICE_PTYPE_ATTR_GTP_UPLINK },
411         { ICE_MAC_IPV6_GTPU_IPV6_TCP,     ICE_PTYPE_ATTR_GTP_UPLINK },
412         { ICE_MAC_IPV6_GTPU_IPV6_ICMPV6,  ICE_PTYPE_ATTR_GTP_UPLINK },
413 };
414
415 static const u32 ice_ptypes_gtpu[] = {
416         0x00000000, 0x00000000, 0x00000000, 0x00000000,
417         0x00000000, 0x00000000, 0x00000000, 0x00000000,
418         0x00000000, 0x00000000, 0x7FFFFE00, 0x00000000,
419         0x00000000, 0x00000000, 0x00000000, 0x00000000,
420         0x00000000, 0x00000000, 0x00000000, 0x00000000,
421         0x00000000, 0x00000000, 0x00000000, 0x00000000,
422         0x00000000, 0x00000000, 0x00000000, 0x00000000,
423         0x00000000, 0x00000000, 0x00000000, 0x00000000,
424 };
425
426 /* Packet types for pppoe */
427 static const u32 ice_ptypes_pppoe[] = {
428         0x00000000, 0x00000000, 0x00000000, 0x00000000,
429         0x00000000, 0x00000000, 0x00000000, 0x00000000,
430         0x00000000, 0x03FFF000, 0x00000000, 0x00000000,
431         0x00000000, 0x00000000, 0x00000000, 0x00000000,
432         0x00000000, 0x00000000, 0x00000000, 0x00000000,
433         0x00000000, 0x00000000, 0x00000000, 0x00000000,
434         0x00000000, 0x00000000, 0x00000000, 0x00000000,
435         0x00000000, 0x00000000, 0x00000000, 0x00000000,
436 };
437
438 /* Manage parameters and info. used during the creation of a flow profile */
439 struct ice_flow_prof_params {
440         enum ice_block blk;
441         u16 entry_length; /* # of bytes formatted entry will require */
442         u8 es_cnt;
443         struct ice_flow_prof *prof;
444
445         /* For ACL, the es[0] will have the data of ICE_RX_MDID_PKT_FLAGS_15_0
446          * This will give us the direction flags.
447          */
448         struct ice_fv_word es[ICE_MAX_FV_WORDS];
449         /* attributes can be used to add attributes to a particular PTYPE */
450         const struct ice_ptype_attributes *attr;
451         u16 attr_cnt;
452
453         u16 mask[ICE_MAX_FV_WORDS];
454         ice_declare_bitmap(ptypes, ICE_FLOW_PTYPE_MAX);
455 };
456
457 #define ICE_FLOW_RSS_HDRS_INNER_MASK \
458         (ICE_FLOW_SEG_HDR_PPPOE | ICE_FLOW_SEG_HDR_GTPC | \
459          ICE_FLOW_SEG_HDR_GTPC_TEID | ICE_FLOW_SEG_HDR_GTPU)
460
461 #define ICE_FLOW_SEG_HDRS_L2_MASK       \
462         (ICE_FLOW_SEG_HDR_ETH | ICE_FLOW_SEG_HDR_VLAN)
463 #define ICE_FLOW_SEG_HDRS_L3_MASK       \
464         (ICE_FLOW_SEG_HDR_IPV4 | ICE_FLOW_SEG_HDR_IPV6 | \
465          ICE_FLOW_SEG_HDR_ARP)
466 #define ICE_FLOW_SEG_HDRS_L4_MASK       \
467         (ICE_FLOW_SEG_HDR_ICMP | ICE_FLOW_SEG_HDR_TCP | ICE_FLOW_SEG_HDR_UDP | \
468          ICE_FLOW_SEG_HDR_SCTP)
469
470 /**
471  * ice_flow_val_hdrs - validates packet segments for valid protocol headers
472  * @segs: array of one or more packet segments that describe the flow
473  * @segs_cnt: number of packet segments provided
474  */
475 static enum ice_status
476 ice_flow_val_hdrs(struct ice_flow_seg_info *segs, u8 segs_cnt)
477 {
478         u8 i;
479
480         for (i = 0; i < segs_cnt; i++) {
481                 /* Multiple L3 headers */
482                 if (segs[i].hdrs & ICE_FLOW_SEG_HDRS_L3_MASK &&
483                     !ice_is_pow2(segs[i].hdrs & ICE_FLOW_SEG_HDRS_L3_MASK))
484                         return ICE_ERR_PARAM;
485
486                 /* Multiple L4 headers */
487                 if (segs[i].hdrs & ICE_FLOW_SEG_HDRS_L4_MASK &&
488                     !ice_is_pow2(segs[i].hdrs & ICE_FLOW_SEG_HDRS_L4_MASK))
489                         return ICE_ERR_PARAM;
490         }
491
492         return ICE_SUCCESS;
493 }
494
495 /* Sizes of fixed known protocol headers without header options */
496 #define ICE_FLOW_PROT_HDR_SZ_MAC        14
497 #define ICE_FLOW_PROT_HDR_SZ_MAC_VLAN   (ICE_FLOW_PROT_HDR_SZ_MAC + 2)
498 #define ICE_FLOW_PROT_HDR_SZ_IPV4       20
499 #define ICE_FLOW_PROT_HDR_SZ_IPV6       40
500 #define ICE_FLOW_PROT_HDR_SZ_ARP        28
501 #define ICE_FLOW_PROT_HDR_SZ_ICMP       8
502 #define ICE_FLOW_PROT_HDR_SZ_TCP        20
503 #define ICE_FLOW_PROT_HDR_SZ_UDP        8
504 #define ICE_FLOW_PROT_HDR_SZ_SCTP       12
505
506 /**
507  * ice_flow_calc_seg_sz - calculates size of a packet segment based on headers
508  * @params: information about the flow to be processed
509  * @seg: index of packet segment whose header size is to be determined
510  */
511 static u16 ice_flow_calc_seg_sz(struct ice_flow_prof_params *params, u8 seg)
512 {
513         u16 sz;
514
515         /* L2 headers */
516         sz = (params->prof->segs[seg].hdrs & ICE_FLOW_SEG_HDR_VLAN) ?
517                 ICE_FLOW_PROT_HDR_SZ_MAC_VLAN : ICE_FLOW_PROT_HDR_SZ_MAC;
518
519         /* L3 headers */
520         if (params->prof->segs[seg].hdrs & ICE_FLOW_SEG_HDR_IPV4)
521                 sz += ICE_FLOW_PROT_HDR_SZ_IPV4;
522         else if (params->prof->segs[seg].hdrs & ICE_FLOW_SEG_HDR_IPV6)
523                 sz += ICE_FLOW_PROT_HDR_SZ_IPV6;
524         else if (params->prof->segs[seg].hdrs & ICE_FLOW_SEG_HDR_ARP)
525                 sz += ICE_FLOW_PROT_HDR_SZ_ARP;
526         else if (params->prof->segs[seg].hdrs & ICE_FLOW_SEG_HDRS_L4_MASK)
527                 /* A L3 header is required if L4 is specified */
528                 return 0;
529
530         /* L4 headers */
531         if (params->prof->segs[seg].hdrs & ICE_FLOW_SEG_HDR_ICMP)
532                 sz += ICE_FLOW_PROT_HDR_SZ_ICMP;
533         else if (params->prof->segs[seg].hdrs & ICE_FLOW_SEG_HDR_TCP)
534                 sz += ICE_FLOW_PROT_HDR_SZ_TCP;
535         else if (params->prof->segs[seg].hdrs & ICE_FLOW_SEG_HDR_UDP)
536                 sz += ICE_FLOW_PROT_HDR_SZ_UDP;
537         else if (params->prof->segs[seg].hdrs & ICE_FLOW_SEG_HDR_SCTP)
538                 sz += ICE_FLOW_PROT_HDR_SZ_SCTP;
539
540         return sz;
541 }
542
543 /**
544  * ice_flow_proc_seg_hdrs - process protocol headers present in pkt segments
545  * @params: information about the flow to be processed
546  *
547  * This function identifies the packet types associated with the protocol
548  * headers being present in packet segments of the specified flow profile.
549  */
550 static enum ice_status
551 ice_flow_proc_seg_hdrs(struct ice_flow_prof_params *params)
552 {
553         struct ice_flow_prof *prof;
554         u8 i;
555
556         ice_memset(params->ptypes, 0xff, sizeof(params->ptypes),
557                    ICE_NONDMA_MEM);
558
559         prof = params->prof;
560
561         for (i = 0; i < params->prof->segs_cnt; i++) {
562                 const ice_bitmap_t *src;
563                 u32 hdrs;
564
565                 hdrs = prof->segs[i].hdrs;
566
567                 if (hdrs & ICE_FLOW_SEG_HDR_ETH) {
568                         src = !i ? (const ice_bitmap_t *)ice_ptypes_mac_ofos :
569                                 (const ice_bitmap_t *)ice_ptypes_mac_il;
570                         ice_and_bitmap(params->ptypes, params->ptypes, src,
571                                        ICE_FLOW_PTYPE_MAX);
572                 }
573
574                 if (i && hdrs & ICE_FLOW_SEG_HDR_VLAN) {
575                         src = (const ice_bitmap_t *)ice_ptypes_macvlan_il;
576                         ice_and_bitmap(params->ptypes, params->ptypes, src,
577                                        ICE_FLOW_PTYPE_MAX);
578                 }
579
580                 if (!i && hdrs & ICE_FLOW_SEG_HDR_ARP) {
581                         ice_and_bitmap(params->ptypes, params->ptypes,
582                                        (const ice_bitmap_t *)ice_ptypes_arp_of,
583                                        ICE_FLOW_PTYPE_MAX);
584                 }
585
586                 if (hdrs & ICE_FLOW_SEG_HDR_PPPOE) {
587                         src = (const ice_bitmap_t *)ice_ptypes_pppoe;
588                         ice_and_bitmap(params->ptypes, params->ptypes, src,
589                                        ICE_FLOW_PTYPE_MAX);
590                 }
591
592                 if (hdrs & ICE_FLOW_SEG_HDR_IPV4) {
593                         src = !i ? (const ice_bitmap_t *)ice_ptypes_ipv4_ofos :
594                                 (const ice_bitmap_t *)ice_ptypes_ipv4_il;
595                         ice_and_bitmap(params->ptypes, params->ptypes, src,
596                                        ICE_FLOW_PTYPE_MAX);
597                 } else if (hdrs & ICE_FLOW_SEG_HDR_IPV6) {
598                         src = !i ? (const ice_bitmap_t *)ice_ptypes_ipv6_ofos :
599                                 (const ice_bitmap_t *)ice_ptypes_ipv6_il;
600                         ice_and_bitmap(params->ptypes, params->ptypes, src,
601                                        ICE_FLOW_PTYPE_MAX);
602                 }
603
604                 if (hdrs & ICE_FLOW_SEG_HDR_ICMP) {
605                         src = !i ? (const ice_bitmap_t *)ice_ptypes_icmp_of :
606                                 (const ice_bitmap_t *)ice_ptypes_icmp_il;
607                         ice_and_bitmap(params->ptypes, params->ptypes, src,
608                                        ICE_FLOW_PTYPE_MAX);
609                 } else if (hdrs & ICE_FLOW_SEG_HDR_UDP) {
610                         src = (const ice_bitmap_t *)ice_ptypes_udp_il;
611                         ice_and_bitmap(params->ptypes, params->ptypes, src,
612                                        ICE_FLOW_PTYPE_MAX);
613                 } else if (hdrs & ICE_FLOW_SEG_HDR_TCP) {
614                         ice_and_bitmap(params->ptypes, params->ptypes,
615                                        (const ice_bitmap_t *)ice_ptypes_tcp_il,
616                                        ICE_FLOW_PTYPE_MAX);
617                 } else if (hdrs & ICE_FLOW_SEG_HDR_SCTP) {
618                         src = (const ice_bitmap_t *)ice_ptypes_sctp_il;
619                         ice_and_bitmap(params->ptypes, params->ptypes, src,
620                                        ICE_FLOW_PTYPE_MAX);
621                 } else if (hdrs & ICE_FLOW_SEG_HDR_GRE) {
622                         if (!i) {
623                                 src = (const ice_bitmap_t *)ice_ptypes_gre_of;
624                                 ice_and_bitmap(params->ptypes, params->ptypes,
625                                                src, ICE_FLOW_PTYPE_MAX);
626                         }
627                 } else if (hdrs & ICE_FLOW_SEG_HDR_GTPC) {
628                         src = (const ice_bitmap_t *)ice_ptypes_gtpc;
629                         ice_and_bitmap(params->ptypes, params->ptypes,
630                                        src, ICE_FLOW_PTYPE_MAX);
631                 } else if (hdrs & ICE_FLOW_SEG_HDR_GTPC_TEID) {
632                         src = (const ice_bitmap_t *)ice_ptypes_gtpc_tid;
633                         ice_and_bitmap(params->ptypes, params->ptypes,
634                                        src, ICE_FLOW_PTYPE_MAX);
635                 } else if (hdrs & ICE_FLOW_SEG_HDR_GTPU_DWN) {
636                         src = (const ice_bitmap_t *)ice_ptypes_gtpu;
637                         ice_and_bitmap(params->ptypes, params->ptypes,
638                                        src, ICE_FLOW_PTYPE_MAX);
639
640                         /* Attributes for GTP packet with downlink */
641                         params->attr = ice_attr_gtpu_down;
642                         params->attr_cnt = ARRAY_SIZE(ice_attr_gtpu_down);
643                 } else if (hdrs & ICE_FLOW_SEG_HDR_GTPU_UP) {
644                         src = (const ice_bitmap_t *)ice_ptypes_gtpu;
645                         ice_and_bitmap(params->ptypes, params->ptypes,
646                                        src, ICE_FLOW_PTYPE_MAX);
647
648                         /* Attributes for GTP packet with uplink */
649                         params->attr = ice_attr_gtpu_up;
650                         params->attr_cnt = ARRAY_SIZE(ice_attr_gtpu_up);
651                 } else if (hdrs & ICE_FLOW_SEG_HDR_GTPU_EH) {
652                         src = (const ice_bitmap_t *)ice_ptypes_gtpu;
653                         ice_and_bitmap(params->ptypes, params->ptypes,
654                                        src, ICE_FLOW_PTYPE_MAX);
655
656                         /* Attributes for GTP packet with Extension Header */
657                         params->attr = ice_attr_gtpu_eh;
658                         params->attr_cnt = ARRAY_SIZE(ice_attr_gtpu_eh);
659                 } else if (hdrs & ICE_FLOW_SEG_HDR_GTPU_IP) {
660                         src = (const ice_bitmap_t *)ice_ptypes_gtpu;
661                         ice_and_bitmap(params->ptypes, params->ptypes,
662                                        src, ICE_FLOW_PTYPE_MAX);
663                 }
664         }
665
666         return ICE_SUCCESS;
667 }
668
669 /**
670  * ice_flow_xtract_pkt_flags - Create an extr sequence entry for packet flags
671  * @hw: pointer to the HW struct
672  * @params: information about the flow to be processed
673  * @flags: The value of pkt_flags[x:x] in Rx/Tx MDID metadata.
674  *
675  * This function will allocate an extraction sequence entries for a DWORD size
676  * chunk of the packet flags.
677  */
678 static enum ice_status
679 ice_flow_xtract_pkt_flags(struct ice_hw *hw,
680                           struct ice_flow_prof_params *params,
681                           enum ice_flex_mdid_pkt_flags flags)
682 {
683         u8 fv_words = hw->blk[params->blk].es.fvw;
684         u8 idx;
685
686         /* Make sure the number of extraction sequence entries required does not
687          * exceed the block's capacity.
688          */
689         if (params->es_cnt >= fv_words)
690                 return ICE_ERR_MAX_LIMIT;
691
692         /* some blocks require a reversed field vector layout */
693         if (hw->blk[params->blk].es.reverse)
694                 idx = fv_words - params->es_cnt - 1;
695         else
696                 idx = params->es_cnt;
697
698         params->es[idx].prot_id = ICE_PROT_META_ID;
699         params->es[idx].off = flags;
700         params->es_cnt++;
701
702         return ICE_SUCCESS;
703 }
704
705 /**
706  * ice_flow_xtract_fld - Create an extraction sequence entry for the given field
707  * @hw: pointer to the HW struct
708  * @params: information about the flow to be processed
709  * @seg: packet segment index of the field to be extracted
710  * @fld: ID of field to be extracted
711  * @match: bitfield of all fields
712  *
713  * This function determines the protocol ID, offset, and size of the given
714  * field. It then allocates one or more extraction sequence entries for the
715  * given field, and fill the entries with protocol ID and offset information.
716  */
717 static enum ice_status
718 ice_flow_xtract_fld(struct ice_hw *hw, struct ice_flow_prof_params *params,
719                     u8 seg, enum ice_flow_field fld, u64 match)
720 {
721         enum ice_flow_field sib = ICE_FLOW_FIELD_IDX_MAX;
722         enum ice_prot_id prot_id = ICE_PROT_ID_INVAL;
723         u8 fv_words = hw->blk[params->blk].es.fvw;
724         struct ice_flow_fld_info *flds;
725         u16 cnt, ese_bits, i;
726         u16 sib_mask = 0;
727         s16 adj = 0;
728         u16 mask;
729         u16 off;
730
731         flds = params->prof->segs[seg].fields;
732
733         switch (fld) {
734         case ICE_FLOW_FIELD_IDX_ETH_DA:
735         case ICE_FLOW_FIELD_IDX_ETH_SA:
736         case ICE_FLOW_FIELD_IDX_S_VLAN:
737         case ICE_FLOW_FIELD_IDX_C_VLAN:
738                 prot_id = seg == 0 ? ICE_PROT_MAC_OF_OR_S : ICE_PROT_MAC_IL;
739                 break;
740         case ICE_FLOW_FIELD_IDX_ETH_TYPE:
741                 prot_id = seg == 0 ? ICE_PROT_ETYPE_OL : ICE_PROT_ETYPE_IL;
742                 break;
743         case ICE_FLOW_FIELD_IDX_IPV4_DSCP:
744                 prot_id = seg == 0 ? ICE_PROT_IPV4_OF_OR_S : ICE_PROT_IPV4_IL;
745                 break;
746         case ICE_FLOW_FIELD_IDX_IPV6_DSCP:
747                 prot_id = seg == 0 ? ICE_PROT_IPV6_OF_OR_S : ICE_PROT_IPV6_IL;
748                 break;
749         case ICE_FLOW_FIELD_IDX_IPV4_TTL:
750         case ICE_FLOW_FIELD_IDX_IPV4_PROT:
751                 prot_id = seg == 0 ? ICE_PROT_IPV4_OF_OR_S : ICE_PROT_IPV4_IL;
752
753                 /* TTL and PROT share the same extraction seq. entry.
754                  * Each is considered a sibling to the other in terms of sharing
755                  * the same extraction sequence entry.
756                  */
757                 if (fld == ICE_FLOW_FIELD_IDX_IPV4_TTL)
758                         sib = ICE_FLOW_FIELD_IDX_IPV4_PROT;
759                 else if (fld == ICE_FLOW_FIELD_IDX_IPV4_PROT)
760                         sib = ICE_FLOW_FIELD_IDX_IPV4_TTL;
761
762                 /* If the sibling field is also included, that field's
763                  * mask needs to be included.
764                  */
765                 if (match & BIT(sib))
766                         sib_mask = ice_flds_info[sib].mask;
767                 break;
768         case ICE_FLOW_FIELD_IDX_IPV6_TTL:
769         case ICE_FLOW_FIELD_IDX_IPV6_PROT:
770                 prot_id = seg == 0 ? ICE_PROT_IPV6_OF_OR_S : ICE_PROT_IPV6_IL;
771
772                 /* TTL and PROT share the same extraction seq. entry.
773                  * Each is considered a sibling to the other in terms of sharing
774                  * the same extraction sequence entry.
775                  */
776                 if (fld == ICE_FLOW_FIELD_IDX_IPV6_TTL)
777                         sib = ICE_FLOW_FIELD_IDX_IPV6_PROT;
778                 else if (fld == ICE_FLOW_FIELD_IDX_IPV6_PROT)
779                         sib = ICE_FLOW_FIELD_IDX_IPV6_TTL;
780
781                 /* If the sibling field is also included, that field's
782                  * mask needs to be included.
783                  */
784                 if (match & BIT(sib))
785                         sib_mask = ice_flds_info[sib].mask;
786                 break;
787         case ICE_FLOW_FIELD_IDX_IPV4_SA:
788         case ICE_FLOW_FIELD_IDX_IPV4_DA:
789                 prot_id = seg == 0 ? ICE_PROT_IPV4_OF_OR_S : ICE_PROT_IPV4_IL;
790                 break;
791         case ICE_FLOW_FIELD_IDX_IPV6_SA:
792         case ICE_FLOW_FIELD_IDX_IPV6_DA:
793                 prot_id = seg == 0 ? ICE_PROT_IPV6_OF_OR_S : ICE_PROT_IPV6_IL;
794                 break;
795         case ICE_FLOW_FIELD_IDX_TCP_SRC_PORT:
796         case ICE_FLOW_FIELD_IDX_TCP_DST_PORT:
797         case ICE_FLOW_FIELD_IDX_TCP_FLAGS:
798                 prot_id = ICE_PROT_TCP_IL;
799                 break;
800         case ICE_FLOW_FIELD_IDX_UDP_SRC_PORT:
801         case ICE_FLOW_FIELD_IDX_UDP_DST_PORT:
802                 prot_id = ICE_PROT_UDP_IL_OR_S;
803                 break;
804         case ICE_FLOW_FIELD_IDX_SCTP_SRC_PORT:
805         case ICE_FLOW_FIELD_IDX_SCTP_DST_PORT:
806                 prot_id = ICE_PROT_SCTP_IL;
807                 break;
808         case ICE_FLOW_FIELD_IDX_GTPC_TEID:
809         case ICE_FLOW_FIELD_IDX_GTPU_IP_TEID:
810         case ICE_FLOW_FIELD_IDX_GTPU_UP_TEID:
811         case ICE_FLOW_FIELD_IDX_GTPU_DWN_TEID:
812         case ICE_FLOW_FIELD_IDX_GTPU_EH_TEID:
813         case ICE_FLOW_FIELD_IDX_GTPU_EH_QFI:
814                 /* GTP is accessed through UDP OF protocol */
815                 prot_id = ICE_PROT_UDP_OF;
816                 break;
817         case ICE_FLOW_FIELD_IDX_PPPOE_SESS_ID:
818                 prot_id = ICE_PROT_PPPOE;
819                 break;
820         case ICE_FLOW_FIELD_IDX_ARP_SIP:
821         case ICE_FLOW_FIELD_IDX_ARP_DIP:
822         case ICE_FLOW_FIELD_IDX_ARP_SHA:
823         case ICE_FLOW_FIELD_IDX_ARP_DHA:
824         case ICE_FLOW_FIELD_IDX_ARP_OP:
825                 prot_id = ICE_PROT_ARP_OF;
826                 break;
827         case ICE_FLOW_FIELD_IDX_ICMP_TYPE:
828         case ICE_FLOW_FIELD_IDX_ICMP_CODE:
829                 /* ICMP type and code share the same extraction seq. entry */
830                 prot_id = (params->prof->segs[seg].hdrs &
831                            ICE_FLOW_SEG_HDR_IPV4) ?
832                         ICE_PROT_ICMP_IL : ICE_PROT_ICMPV6_IL;
833                 sib = fld == ICE_FLOW_FIELD_IDX_ICMP_TYPE ?
834                         ICE_FLOW_FIELD_IDX_ICMP_CODE :
835                         ICE_FLOW_FIELD_IDX_ICMP_TYPE;
836                 break;
837         case ICE_FLOW_FIELD_IDX_GRE_KEYID:
838                 prot_id = ICE_PROT_GRE_OF;
839                 break;
840         default:
841                 return ICE_ERR_NOT_IMPL;
842         }
843
844         /* Each extraction sequence entry is a word in size, and extracts a
845          * word-aligned offset from a protocol header.
846          */
847         ese_bits = ICE_FLOW_FV_EXTRACT_SZ * BITS_PER_BYTE;
848
849         flds[fld].xtrct.prot_id = prot_id;
850         flds[fld].xtrct.off = (ice_flds_info[fld].off / ese_bits) *
851                 ICE_FLOW_FV_EXTRACT_SZ;
852         flds[fld].xtrct.disp = (u8)((ice_flds_info[fld].off + adj) % ese_bits);
853         flds[fld].xtrct.idx = params->es_cnt;
854         flds[fld].xtrct.mask = ice_flds_info[fld].mask;
855
856         /* Adjust the next field-entry index after accommodating the number of
857          * entries this field consumes
858          */
859         cnt = DIVIDE_AND_ROUND_UP(flds[fld].xtrct.disp +
860                                   ice_flds_info[fld].size, ese_bits);
861
862         /* Fill in the extraction sequence entries needed for this field */
863         off = flds[fld].xtrct.off;
864         mask = flds[fld].xtrct.mask;
865         for (i = 0; i < cnt; i++) {
866                 /* Only consume an extraction sequence entry if there is no
867                  * sibling field associated with this field or the sibling entry
868                  * already extracts the word shared with this field.
869                  */
870                 if (sib == ICE_FLOW_FIELD_IDX_MAX ||
871                     flds[sib].xtrct.prot_id == ICE_PROT_ID_INVAL ||
872                     flds[sib].xtrct.off != off) {
873                         u8 idx;
874
875                         /* Make sure the number of extraction sequence required
876                          * does not exceed the block's capability
877                          */
878                         if (params->es_cnt >= fv_words)
879                                 return ICE_ERR_MAX_LIMIT;
880
881                         /* some blocks require a reversed field vector layout */
882                         if (hw->blk[params->blk].es.reverse)
883                                 idx = fv_words - params->es_cnt - 1;
884                         else
885                                 idx = params->es_cnt;
886
887                         params->es[idx].prot_id = prot_id;
888                         params->es[idx].off = off;
889                         params->mask[idx] = mask | sib_mask;
890                         params->es_cnt++;
891                 }
892
893                 off += ICE_FLOW_FV_EXTRACT_SZ;
894         }
895
896         return ICE_SUCCESS;
897 }
898
899 /**
900  * ice_flow_xtract_raws - Create extract sequence entries for raw bytes
901  * @hw: pointer to the HW struct
902  * @params: information about the flow to be processed
903  * @seg: index of packet segment whose raw fields are to be be extracted
904  */
905 static enum ice_status
906 ice_flow_xtract_raws(struct ice_hw *hw, struct ice_flow_prof_params *params,
907                      u8 seg)
908 {
909         u16 fv_words;
910         u16 hdrs_sz;
911         u8 i;
912
913         if (!params->prof->segs[seg].raws_cnt)
914                 return ICE_SUCCESS;
915
916         if (params->prof->segs[seg].raws_cnt >
917             ARRAY_SIZE(params->prof->segs[seg].raws))
918                 return ICE_ERR_MAX_LIMIT;
919
920         /* Offsets within the segment headers are not supported */
921         hdrs_sz = ice_flow_calc_seg_sz(params, seg);
922         if (!hdrs_sz)
923                 return ICE_ERR_PARAM;
924
925         fv_words = hw->blk[params->blk].es.fvw;
926
927         for (i = 0; i < params->prof->segs[seg].raws_cnt; i++) {
928                 struct ice_flow_seg_fld_raw *raw;
929                 u16 off, cnt, j;
930
931                 raw = &params->prof->segs[seg].raws[i];
932
933                 /* Storing extraction information */
934                 raw->info.xtrct.prot_id = ICE_PROT_MAC_OF_OR_S;
935                 raw->info.xtrct.off = (raw->off / ICE_FLOW_FV_EXTRACT_SZ) *
936                         ICE_FLOW_FV_EXTRACT_SZ;
937                 raw->info.xtrct.disp = (raw->off % ICE_FLOW_FV_EXTRACT_SZ) *
938                         BITS_PER_BYTE;
939                 raw->info.xtrct.idx = params->es_cnt;
940
941                 /* Determine the number of field vector entries this raw field
942                  * consumes.
943                  */
944                 cnt = DIVIDE_AND_ROUND_UP(raw->info.xtrct.disp +
945                                           (raw->info.src.last * BITS_PER_BYTE),
946                                           (ICE_FLOW_FV_EXTRACT_SZ *
947                                            BITS_PER_BYTE));
948                 off = raw->info.xtrct.off;
949                 for (j = 0; j < cnt; j++) {
950                         u16 idx;
951
952                         /* Make sure the number of extraction sequence required
953                          * does not exceed the block's capability
954                          */
955                         if (params->es_cnt >= hw->blk[params->blk].es.count ||
956                             params->es_cnt >= ICE_MAX_FV_WORDS)
957                                 return ICE_ERR_MAX_LIMIT;
958
959                         /* some blocks require a reversed field vector layout */
960                         if (hw->blk[params->blk].es.reverse)
961                                 idx = fv_words - params->es_cnt - 1;
962                         else
963                                 idx = params->es_cnt;
964
965                         params->es[idx].prot_id = raw->info.xtrct.prot_id;
966                         params->es[idx].off = off;
967                         params->es_cnt++;
968                         off += ICE_FLOW_FV_EXTRACT_SZ;
969                 }
970         }
971
972         return ICE_SUCCESS;
973 }
974
975 /**
976  * ice_flow_create_xtrct_seq - Create an extraction sequence for given segments
977  * @hw: pointer to the HW struct
978  * @params: information about the flow to be processed
979  *
980  * This function iterates through all matched fields in the given segments, and
981  * creates an extraction sequence for the fields.
982  */
983 static enum ice_status
984 ice_flow_create_xtrct_seq(struct ice_hw *hw,
985                           struct ice_flow_prof_params *params)
986 {
987         enum ice_status status = ICE_SUCCESS;
988         u8 i;
989
990         /* For ACL, we also need to extract the direction bit (Rx,Tx) data from
991          * packet flags
992          */
993         if (params->blk == ICE_BLK_ACL) {
994                 status = ice_flow_xtract_pkt_flags(hw, params,
995                                                    ICE_RX_MDID_PKT_FLAGS_15_0);
996                 if (status)
997                         return status;
998         }
999
1000         for (i = 0; i < params->prof->segs_cnt; i++) {
1001                 u64 match = params->prof->segs[i].match;
1002                 enum ice_flow_field j;
1003
1004                 for (j = 0; j < ICE_FLOW_FIELD_IDX_MAX && match; j++) {
1005                         const u64 bit = BIT_ULL(j);
1006
1007                         if (match & bit) {
1008                                 status = ice_flow_xtract_fld(hw, params, i, j,
1009                                                              match);
1010                                 if (status)
1011                                         return status;
1012                                 match &= ~bit;
1013                         }
1014                 }
1015
1016                 /* Process raw matching bytes */
1017                 status = ice_flow_xtract_raws(hw, params, i);
1018                 if (status)
1019                         return status;
1020         }
1021
1022         return status;
1023 }
1024
1025 /**
1026  * ice_flow_sel_acl_scen - returns the specific scenario
1027  * @hw: pointer to the hardware structure
1028  * @params: information about the flow to be processed
1029  *
1030  * This function will return the specific scenario based on the
1031  * params passed to it
1032  */
1033 static enum ice_status
1034 ice_flow_sel_acl_scen(struct ice_hw *hw, struct ice_flow_prof_params *params)
1035 {
1036         /* Find the best-fit scenario for the provided match width */
1037         struct ice_acl_scen *cand_scen = NULL, *scen;
1038
1039         if (!hw->acl_tbl)
1040                 return ICE_ERR_DOES_NOT_EXIST;
1041
1042         /* Loop through each scenario and match against the scenario width
1043          * to select the specific scenario
1044          */
1045         LIST_FOR_EACH_ENTRY(scen, &hw->acl_tbl->scens, ice_acl_scen, list_entry)
1046                 if (scen->eff_width >= params->entry_length &&
1047                     (!cand_scen || cand_scen->eff_width > scen->eff_width))
1048                         cand_scen = scen;
1049         if (!cand_scen)
1050                 return ICE_ERR_DOES_NOT_EXIST;
1051
1052         params->prof->cfg.scen = cand_scen;
1053
1054         return ICE_SUCCESS;
1055 }
1056
1057 /**
1058  * ice_flow_acl_def_entry_frmt - Determine the layout of flow entries
1059  * @params: information about the flow to be processed
1060  */
1061 static enum ice_status
1062 ice_flow_acl_def_entry_frmt(struct ice_flow_prof_params *params)
1063 {
1064         u16 index, i, range_idx = 0;
1065
1066         index = ICE_AQC_ACL_PROF_BYTE_SEL_START_IDX;
1067
1068         for (i = 0; i < params->prof->segs_cnt; i++) {
1069                 struct ice_flow_seg_info *seg = &params->prof->segs[i];
1070                 u64 match = seg->match;
1071                 u8 j;
1072
1073                 for (j = 0; j < ICE_FLOW_FIELD_IDX_MAX && match; j++) {
1074                         struct ice_flow_fld_info *fld;
1075                         const u64 bit = BIT_ULL(j);
1076
1077                         if (!(match & bit))
1078                                 continue;
1079
1080                         fld = &seg->fields[j];
1081                         fld->entry.mask = ICE_FLOW_FLD_OFF_INVAL;
1082
1083                         if (fld->type == ICE_FLOW_FLD_TYPE_RANGE) {
1084                                 fld->entry.last = ICE_FLOW_FLD_OFF_INVAL;
1085
1086                                 /* Range checking only supported for single
1087                                  * words
1088                                  */
1089                                 if (DIVIDE_AND_ROUND_UP(ice_flds_info[j].size +
1090                                                         fld->xtrct.disp,
1091                                                         BITS_PER_BYTE * 2) > 1)
1092                                         return ICE_ERR_PARAM;
1093
1094                                 /* Ranges must define low and high values */
1095                                 if (fld->src.val == ICE_FLOW_FLD_OFF_INVAL ||
1096                                     fld->src.last == ICE_FLOW_FLD_OFF_INVAL)
1097                                         return ICE_ERR_PARAM;
1098
1099                                 fld->entry.val = range_idx++;
1100                         } else {
1101                                 /* Store adjusted byte-length of field for later
1102                                  * use, taking into account potential
1103                                  * non-byte-aligned displacement
1104                                  */
1105                                 fld->entry.last = DIVIDE_AND_ROUND_UP
1106                                         (ice_flds_info[j].size +
1107                                          (fld->xtrct.disp % BITS_PER_BYTE),
1108                                          BITS_PER_BYTE);
1109                                 fld->entry.val = index;
1110                                 index += fld->entry.last;
1111                         }
1112
1113                         match &= ~bit;
1114                 }
1115
1116                 for (j = 0; j < seg->raws_cnt; j++) {
1117                         struct ice_flow_seg_fld_raw *raw = &seg->raws[j];
1118
1119                         raw->info.entry.mask = ICE_FLOW_FLD_OFF_INVAL;
1120                         raw->info.entry.val = index;
1121                         raw->info.entry.last = raw->info.src.last;
1122                         index += raw->info.entry.last;
1123                 }
1124         }
1125
1126         /* Currently only support using the byte selection base, which only
1127          * allows for an effective entry size of 30 bytes. Reject anything
1128          * larger.
1129          */
1130         if (index > ICE_AQC_ACL_PROF_BYTE_SEL_ELEMS)
1131                 return ICE_ERR_PARAM;
1132
1133         /* Only 8 range checkers per profile, reject anything trying to use
1134          * more
1135          */
1136         if (range_idx > ICE_AQC_ACL_PROF_RANGES_NUM_CFG)
1137                 return ICE_ERR_PARAM;
1138
1139         /* Store # bytes required for entry for later use */
1140         params->entry_length = index - ICE_AQC_ACL_PROF_BYTE_SEL_START_IDX;
1141
1142         return ICE_SUCCESS;
1143 }
1144
1145 /**
1146  * ice_flow_proc_segs - process all packet segments associated with a profile
1147  * @hw: pointer to the HW struct
1148  * @params: information about the flow to be processed
1149  */
1150 static enum ice_status
1151 ice_flow_proc_segs(struct ice_hw *hw, struct ice_flow_prof_params *params)
1152 {
1153         enum ice_status status;
1154
1155         status = ice_flow_proc_seg_hdrs(params);
1156         if (status)
1157                 return status;
1158
1159         status = ice_flow_create_xtrct_seq(hw, params);
1160         if (status)
1161                 return status;
1162
1163         switch (params->blk) {
1164         case ICE_BLK_RSS:
1165                 /* Only header information is provided for RSS configuration.
1166                  * No further processing is needed.
1167                  */
1168                 status = ICE_SUCCESS;
1169                 break;
1170         case ICE_BLK_ACL:
1171                 status = ice_flow_acl_def_entry_frmt(params);
1172                 if (status)
1173                         return status;
1174                 status = ice_flow_sel_acl_scen(hw, params);
1175                 if (status)
1176                         return status;
1177                 break;
1178         case ICE_BLK_FD:
1179                 status = ICE_SUCCESS;
1180                 break;
1181         case ICE_BLK_SW:
1182         default:
1183                 return ICE_ERR_NOT_IMPL;
1184         }
1185
1186         return status;
1187 }
1188
1189 #define ICE_FLOW_FIND_PROF_CHK_FLDS     0x00000001
1190 #define ICE_FLOW_FIND_PROF_CHK_VSI      0x00000002
1191 #define ICE_FLOW_FIND_PROF_NOT_CHK_DIR  0x00000004
1192
1193 /**
1194  * ice_flow_find_prof_conds - Find a profile matching headers and conditions
1195  * @hw: pointer to the HW struct
1196  * @blk: classification stage
1197  * @dir: flow direction
1198  * @segs: array of one or more packet segments that describe the flow
1199  * @segs_cnt: number of packet segments provided
1200  * @vsi_handle: software VSI handle to check VSI (ICE_FLOW_FIND_PROF_CHK_VSI)
1201  * @conds: additional conditions to be checked (ICE_FLOW_FIND_PROF_CHK_*)
1202  */
1203 static struct ice_flow_prof *
1204 ice_flow_find_prof_conds(struct ice_hw *hw, enum ice_block blk,
1205                          enum ice_flow_dir dir, struct ice_flow_seg_info *segs,
1206                          u8 segs_cnt, u16 vsi_handle, u32 conds)
1207 {
1208         struct ice_flow_prof *p, *prof = NULL;
1209
1210         ice_acquire_lock(&hw->fl_profs_locks[blk]);
1211         LIST_FOR_EACH_ENTRY(p, &hw->fl_profs[blk], ice_flow_prof, l_entry) {
1212                 if ((p->dir == dir || conds & ICE_FLOW_FIND_PROF_NOT_CHK_DIR) &&
1213                     segs_cnt && segs_cnt == p->segs_cnt) {
1214                         u8 i;
1215
1216                         /* Check for profile-VSI association if specified */
1217                         if ((conds & ICE_FLOW_FIND_PROF_CHK_VSI) &&
1218                             ice_is_vsi_valid(hw, vsi_handle) &&
1219                             !ice_is_bit_set(p->vsis, vsi_handle))
1220                                 continue;
1221
1222                         /* Protocol headers must be checked. Matched fields are
1223                          * checked if specified.
1224                          */
1225                         for (i = 0; i < segs_cnt; i++)
1226                                 if (segs[i].hdrs != p->segs[i].hdrs ||
1227                                     ((conds & ICE_FLOW_FIND_PROF_CHK_FLDS) &&
1228                                      segs[i].match != p->segs[i].match))
1229                                         break;
1230
1231                         /* A match is found if all segments are matched */
1232                         if (i == segs_cnt) {
1233                                 prof = p;
1234                                 break;
1235                         }
1236                 }
1237         }
1238         ice_release_lock(&hw->fl_profs_locks[blk]);
1239
1240         return prof;
1241 }
1242
1243 /**
1244  * ice_flow_find_prof - Look up a profile matching headers and matched fields
1245  * @hw: pointer to the HW struct
1246  * @blk: classification stage
1247  * @dir: flow direction
1248  * @segs: array of one or more packet segments that describe the flow
1249  * @segs_cnt: number of packet segments provided
1250  */
1251 u64
1252 ice_flow_find_prof(struct ice_hw *hw, enum ice_block blk, enum ice_flow_dir dir,
1253                    struct ice_flow_seg_info *segs, u8 segs_cnt)
1254 {
1255         struct ice_flow_prof *p;
1256
1257         p = ice_flow_find_prof_conds(hw, blk, dir, segs, segs_cnt,
1258                                      ICE_MAX_VSI, ICE_FLOW_FIND_PROF_CHK_FLDS);
1259
1260         return p ? p->id : ICE_FLOW_PROF_ID_INVAL;
1261 }
1262
1263 /**
1264  * ice_flow_find_prof_id - Look up a profile with given profile ID
1265  * @hw: pointer to the HW struct
1266  * @blk: classification stage
1267  * @prof_id: unique ID to identify this flow profile
1268  */
1269 static struct ice_flow_prof *
1270 ice_flow_find_prof_id(struct ice_hw *hw, enum ice_block blk, u64 prof_id)
1271 {
1272         struct ice_flow_prof *p;
1273
1274         LIST_FOR_EACH_ENTRY(p, &hw->fl_profs[blk], ice_flow_prof, l_entry) {
1275                 if (p->id == prof_id)
1276                         return p;
1277         }
1278
1279         return NULL;
1280 }
1281
1282 /**
1283  * ice_dealloc_flow_entry - Deallocate flow entry memory
1284  * @hw: pointer to the HW struct
1285  * @entry: flow entry to be removed
1286  */
1287 static void
1288 ice_dealloc_flow_entry(struct ice_hw *hw, struct ice_flow_entry *entry)
1289 {
1290         if (!entry)
1291                 return;
1292
1293         if (entry->entry)
1294                 ice_free(hw, entry->entry);
1295
1296         if (entry->range_buf) {
1297                 ice_free(hw, entry->range_buf);
1298                 entry->range_buf = NULL;
1299         }
1300
1301         if (entry->acts) {
1302                 ice_free(hw, entry->acts);
1303                 entry->acts = NULL;
1304                 entry->acts_cnt = 0;
1305         }
1306
1307         ice_free(hw, entry);
1308 }
1309
1310 #define ICE_ACL_INVALID_SCEN    0x3f
1311
1312 /**
1313  * ice_flow_acl_is_prof_in_use - Verify if the profile is associated to any pf
1314  * @hw: pointer to the hardware structure
1315  * @prof: pointer to flow profile
1316  * @buf: destination buffer function writes partial xtrct sequence to
1317  *
1318  * returns ICE_SUCCESS if no pf is associated to the given profile
1319  * returns ICE_ERR_IN_USE if at least one pf is associated to the given profile
1320  * returns other error code for real error
1321  */
1322 static enum ice_status
1323 ice_flow_acl_is_prof_in_use(struct ice_hw *hw, struct ice_flow_prof *prof,
1324                             struct ice_aqc_acl_prof_generic_frmt *buf)
1325 {
1326         enum ice_status status;
1327         u8 prof_id = 0;
1328
1329         status = ice_flow_get_hw_prof(hw, ICE_BLK_ACL, prof->id, &prof_id);
1330         if (status)
1331                 return status;
1332
1333         status = ice_query_acl_prof(hw, prof_id, buf, NULL);
1334         if (status)
1335                 return status;
1336
1337         /* If all pf's associated scenarios are all 0 or all
1338          * ICE_ACL_INVALID_SCEN (63) for the given profile then the latter has
1339          * not been configured yet.
1340          */
1341         if (buf->pf_scenario_num[0] == 0 && buf->pf_scenario_num[1] == 0 &&
1342             buf->pf_scenario_num[2] == 0 && buf->pf_scenario_num[3] == 0 &&
1343             buf->pf_scenario_num[4] == 0 && buf->pf_scenario_num[5] == 0 &&
1344             buf->pf_scenario_num[6] == 0 && buf->pf_scenario_num[7] == 0)
1345                 return ICE_SUCCESS;
1346
1347         if (buf->pf_scenario_num[0] == ICE_ACL_INVALID_SCEN &&
1348             buf->pf_scenario_num[1] == ICE_ACL_INVALID_SCEN &&
1349             buf->pf_scenario_num[2] == ICE_ACL_INVALID_SCEN &&
1350             buf->pf_scenario_num[3] == ICE_ACL_INVALID_SCEN &&
1351             buf->pf_scenario_num[4] == ICE_ACL_INVALID_SCEN &&
1352             buf->pf_scenario_num[5] == ICE_ACL_INVALID_SCEN &&
1353             buf->pf_scenario_num[6] == ICE_ACL_INVALID_SCEN &&
1354             buf->pf_scenario_num[7] == ICE_ACL_INVALID_SCEN)
1355                 return ICE_SUCCESS;
1356         else
1357                 return ICE_ERR_IN_USE;
1358 }
1359
1360 /**
1361  * ice_flow_acl_free_act_cntr - Free the acl rule's actions
1362  * @hw: pointer to the hardware structure
1363  * @acts: array of actions to be performed on a match
1364  * @acts_cnt: number of actions
1365  */
1366 static enum ice_status
1367 ice_flow_acl_free_act_cntr(struct ice_hw *hw, struct ice_flow_action *acts,
1368                            u8 acts_cnt)
1369 {
1370         int i;
1371
1372         for (i = 0; i < acts_cnt; i++) {
1373                 if (acts[i].type == ICE_FLOW_ACT_CNTR_PKT ||
1374                     acts[i].type == ICE_FLOW_ACT_CNTR_BYTES ||
1375                     acts[i].type == ICE_FLOW_ACT_CNTR_PKT_BYTES) {
1376                         struct ice_acl_cntrs cntrs;
1377                         enum ice_status status;
1378
1379                         cntrs.bank = 0; /* Only bank0 for the moment */
1380                         cntrs.first_cntr =
1381                                         LE16_TO_CPU(acts[i].data.acl_act.value);
1382                         cntrs.last_cntr =
1383                                         LE16_TO_CPU(acts[i].data.acl_act.value);
1384
1385                         if (acts[i].type == ICE_FLOW_ACT_CNTR_PKT_BYTES)
1386                                 cntrs.type = ICE_AQC_ACL_CNT_TYPE_DUAL;
1387                         else
1388                                 cntrs.type = ICE_AQC_ACL_CNT_TYPE_SINGLE;
1389
1390                         status = ice_aq_dealloc_acl_cntrs(hw, &cntrs, NULL);
1391                         if (status)
1392                                 return status;
1393                 }
1394         }
1395         return ICE_SUCCESS;
1396 }
1397
1398 /**
1399  * ice_flow_acl_disassoc_scen - Disassociate the scenario to the Profile
1400  * @hw: pointer to the hardware structure
1401  * @prof: pointer to flow profile
1402  *
1403  * Disassociate the scenario to the Profile for the PF of the VSI.
1404  */
1405 static enum ice_status
1406 ice_flow_acl_disassoc_scen(struct ice_hw *hw, struct ice_flow_prof *prof)
1407 {
1408         struct ice_aqc_acl_prof_generic_frmt buf;
1409         enum ice_status status = ICE_SUCCESS;
1410         u8 prof_id = 0;
1411
1412         ice_memset(&buf, 0, sizeof(buf), ICE_NONDMA_MEM);
1413
1414         status = ice_flow_get_hw_prof(hw, ICE_BLK_ACL, prof->id, &prof_id);
1415         if (status)
1416                 return status;
1417
1418         status = ice_query_acl_prof(hw, prof_id, &buf, NULL);
1419         if (status)
1420                 return status;
1421
1422         /* Clear scenario for this pf */
1423         buf.pf_scenario_num[hw->pf_id] = ICE_ACL_INVALID_SCEN;
1424         status = ice_prgm_acl_prof_extrt(hw, prof_id, &buf, NULL);
1425
1426         return status;
1427 }
1428
1429 /**
1430  * ice_flow_rem_entry_sync - Remove a flow entry
1431  * @hw: pointer to the HW struct
1432  * @blk: classification stage
1433  * @entry: flow entry to be removed
1434  */
1435 static enum ice_status
1436 ice_flow_rem_entry_sync(struct ice_hw *hw, enum ice_block blk,
1437                         struct ice_flow_entry *entry)
1438 {
1439         if (!entry)
1440                 return ICE_ERR_BAD_PTR;
1441
1442         if (blk == ICE_BLK_ACL) {
1443                 enum ice_status status;
1444
1445                 if (!entry->prof)
1446                         return ICE_ERR_BAD_PTR;
1447
1448                 status = ice_acl_rem_entry(hw, entry->prof->cfg.scen,
1449                                            entry->scen_entry_idx);
1450                 if (status)
1451                         return status;
1452
1453                 /* Checks if we need to release an ACL counter. */
1454                 if (entry->acts_cnt && entry->acts)
1455                         ice_flow_acl_free_act_cntr(hw, entry->acts,
1456                                                    entry->acts_cnt);
1457         }
1458
1459         LIST_DEL(&entry->l_entry);
1460
1461         ice_dealloc_flow_entry(hw, entry);
1462
1463         return ICE_SUCCESS;
1464 }
1465
1466 /**
1467  * ice_flow_add_prof_sync - Add a flow profile for packet segments and fields
1468  * @hw: pointer to the HW struct
1469  * @blk: classification stage
1470  * @dir: flow direction
1471  * @prof_id: unique ID to identify this flow profile
1472  * @segs: array of one or more packet segments that describe the flow
1473  * @segs_cnt: number of packet segments provided
1474  * @acts: array of default actions
1475  * @acts_cnt: number of default actions
1476  * @prof: stores the returned flow profile added
1477  *
1478  * Assumption: the caller has acquired the lock to the profile list
1479  */
1480 static enum ice_status
1481 ice_flow_add_prof_sync(struct ice_hw *hw, enum ice_block blk,
1482                        enum ice_flow_dir dir, u64 prof_id,
1483                        struct ice_flow_seg_info *segs, u8 segs_cnt,
1484                        struct ice_flow_action *acts, u8 acts_cnt,
1485                        struct ice_flow_prof **prof)
1486 {
1487         struct ice_flow_prof_params params;
1488         enum ice_status status;
1489         u8 i;
1490
1491         if (!prof || (acts_cnt && !acts))
1492                 return ICE_ERR_BAD_PTR;
1493
1494         ice_memset(&params, 0, sizeof(params), ICE_NONDMA_MEM);
1495         params.prof = (struct ice_flow_prof *)
1496                 ice_malloc(hw, sizeof(*params.prof));
1497         if (!params.prof)
1498                 return ICE_ERR_NO_MEMORY;
1499
1500         /* initialize extraction sequence to all invalid (0xff) */
1501         for (i = 0; i < ICE_MAX_FV_WORDS; i++) {
1502                 params.es[i].prot_id = ICE_PROT_INVALID;
1503                 params.es[i].off = ICE_FV_OFFSET_INVAL;
1504         }
1505
1506         params.blk = blk;
1507         params.prof->id = prof_id;
1508         params.prof->dir = dir;
1509         params.prof->segs_cnt = segs_cnt;
1510
1511         /* Make a copy of the segments that need to be persistent in the flow
1512          * profile instance
1513          */
1514         for (i = 0; i < segs_cnt; i++)
1515                 ice_memcpy(&params.prof->segs[i], &segs[i], sizeof(*segs),
1516                            ICE_NONDMA_TO_NONDMA);
1517
1518         /* Make a copy of the actions that need to be persistent in the flow
1519          * profile instance.
1520          */
1521         if (acts_cnt) {
1522                 params.prof->acts = (struct ice_flow_action *)
1523                         ice_memdup(hw, acts, acts_cnt * sizeof(*acts),
1524                                    ICE_NONDMA_TO_NONDMA);
1525
1526                 if (!params.prof->acts) {
1527                         status = ICE_ERR_NO_MEMORY;
1528                         goto out;
1529                 }
1530         }
1531
1532         status = ice_flow_proc_segs(hw, &params);
1533         if (status) {
1534                 ice_debug(hw, ICE_DBG_FLOW,
1535                           "Error processing a flow's packet segments\n");
1536                 goto out;
1537         }
1538
1539         /* Add a HW profile for this flow profile */
1540         status = ice_add_prof(hw, blk, prof_id, (u8 *)params.ptypes,
1541                               params.attr, params.attr_cnt, params.es,
1542                               params.mask);
1543         if (status) {
1544                 ice_debug(hw, ICE_DBG_FLOW, "Error adding a HW flow profile\n");
1545                 goto out;
1546         }
1547
1548         INIT_LIST_HEAD(&params.prof->entries);
1549         ice_init_lock(&params.prof->entries_lock);
1550         *prof = params.prof;
1551
1552 out:
1553         if (status) {
1554                 if (params.prof->acts)
1555                         ice_free(hw, params.prof->acts);
1556                 ice_free(hw, params.prof);
1557         }
1558
1559         return status;
1560 }
1561
1562 /**
1563  * ice_flow_rem_prof_sync - remove a flow profile
1564  * @hw: pointer to the hardware structure
1565  * @blk: classification stage
1566  * @prof: pointer to flow profile to remove
1567  *
1568  * Assumption: the caller has acquired the lock to the profile list
1569  */
1570 static enum ice_status
1571 ice_flow_rem_prof_sync(struct ice_hw *hw, enum ice_block blk,
1572                        struct ice_flow_prof *prof)
1573 {
1574         enum ice_status status;
1575
1576         /* Remove all remaining flow entries before removing the flow profile */
1577         if (!LIST_EMPTY(&prof->entries)) {
1578                 struct ice_flow_entry *e, *t;
1579
1580                 ice_acquire_lock(&prof->entries_lock);
1581
1582                 LIST_FOR_EACH_ENTRY_SAFE(e, t, &prof->entries, ice_flow_entry,
1583                                          l_entry) {
1584                         status = ice_flow_rem_entry_sync(hw, blk, e);
1585                         if (status)
1586                                 break;
1587                 }
1588
1589                 ice_release_lock(&prof->entries_lock);
1590         }
1591
1592         if (blk == ICE_BLK_ACL) {
1593                 struct ice_aqc_acl_profile_ranges query_rng_buf;
1594                 struct ice_aqc_acl_prof_generic_frmt buf;
1595                 u8 prof_id = 0;
1596
1597                 /* Deassociate the scenario to the Profile for the PF */
1598                 status = ice_flow_acl_disassoc_scen(hw, prof);
1599                 if (status)
1600                         return status;
1601
1602                 /* Clear the range-checker if the profile ID is no longer
1603                  * used by any PF
1604                  */
1605                 status = ice_flow_acl_is_prof_in_use(hw, prof, &buf);
1606                 if (status && status != ICE_ERR_IN_USE) {
1607                         return status;
1608                 } else if (!status) {
1609                         /* Clear the range-checker value for profile ID */
1610                         ice_memset(&query_rng_buf, 0,
1611                                    sizeof(struct ice_aqc_acl_profile_ranges),
1612                                    ICE_NONDMA_MEM);
1613
1614                         status = ice_flow_get_hw_prof(hw, blk, prof->id,
1615                                                       &prof_id);
1616                         if (status)
1617                                 return status;
1618
1619                         status = ice_prog_acl_prof_ranges(hw, prof_id,
1620                                                           &query_rng_buf, NULL);
1621                         if (status)
1622                                 return status;
1623                 }
1624         }
1625
1626         /* Remove all hardware profiles associated with this flow profile */
1627         status = ice_rem_prof(hw, blk, prof->id);
1628         if (!status) {
1629                 LIST_DEL(&prof->l_entry);
1630                 ice_destroy_lock(&prof->entries_lock);
1631                 if (prof->acts)
1632                         ice_free(hw, prof->acts);
1633                 ice_free(hw, prof);
1634         }
1635
1636         return status;
1637 }
1638
1639 /**
1640  * ice_flow_acl_set_xtrct_seq_fld - Populate xtrct seq for single field
1641  * @buf: Destination buffer function writes partial xtrct sequence to
1642  * @info: Info about field
1643  */
1644 static void
1645 ice_flow_acl_set_xtrct_seq_fld(struct ice_aqc_acl_prof_generic_frmt *buf,
1646                                struct ice_flow_fld_info *info)
1647 {
1648         u16 dst, i;
1649         u8 src;
1650
1651         src = info->xtrct.idx * ICE_FLOW_FV_EXTRACT_SZ +
1652                 info->xtrct.disp / BITS_PER_BYTE;
1653         dst = info->entry.val;
1654         for (i = 0; i < info->entry.last; i++)
1655                 /* HW stores field vector words in LE, convert words back to BE
1656                  * so constructed entries will end up in network order
1657                  */
1658                 buf->byte_selection[dst++] = src++ ^ 1;
1659 }
1660
1661 /**
1662  * ice_flow_acl_set_xtrct_seq - Program ACL extraction sequence
1663  * @hw: pointer to the hardware structure
1664  * @prof: pointer to flow profile
1665  */
1666 static enum ice_status
1667 ice_flow_acl_set_xtrct_seq(struct ice_hw *hw, struct ice_flow_prof *prof)
1668 {
1669         struct ice_aqc_acl_prof_generic_frmt buf;
1670         struct ice_flow_fld_info *info;
1671         enum ice_status status;
1672         u8 prof_id = 0;
1673         u16 i;
1674
1675         ice_memset(&buf, 0, sizeof(buf), ICE_NONDMA_MEM);
1676
1677         status = ice_flow_get_hw_prof(hw, ICE_BLK_ACL, prof->id, &prof_id);
1678         if (status)
1679                 return status;
1680
1681         status = ice_flow_acl_is_prof_in_use(hw, prof, &buf);
1682         if (status && status != ICE_ERR_IN_USE)
1683                 return status;
1684
1685         if (!status) {
1686                 /* Program the profile dependent configuration. This is done
1687                  * only once regardless of the number of PFs using that profile
1688                  */
1689                 ice_memset(&buf, 0, sizeof(buf), ICE_NONDMA_MEM);
1690
1691                 for (i = 0; i < prof->segs_cnt; i++) {
1692                         struct ice_flow_seg_info *seg = &prof->segs[i];
1693                         u64 match = seg->match;
1694                         u16 j;
1695
1696                         for (j = 0; j < ICE_FLOW_FIELD_IDX_MAX && match; j++) {
1697                                 const u64 bit = BIT_ULL(j);
1698
1699                                 if (!(match & bit))
1700                                         continue;
1701
1702                                 info = &seg->fields[j];
1703
1704                                 if (info->type == ICE_FLOW_FLD_TYPE_RANGE)
1705                                         buf.word_selection[info->entry.val] =
1706                                                                 info->xtrct.idx;
1707                                 else
1708                                         ice_flow_acl_set_xtrct_seq_fld(&buf,
1709                                                                        info);
1710
1711                                 match &= ~bit;
1712                         }
1713
1714                         for (j = 0; j < seg->raws_cnt; j++) {
1715                                 info = &seg->raws[j].info;
1716                                 ice_flow_acl_set_xtrct_seq_fld(&buf, info);
1717                         }
1718                 }
1719
1720                 ice_memset(&buf.pf_scenario_num[0], ICE_ACL_INVALID_SCEN,
1721                            ICE_AQC_ACL_PROF_PF_SCEN_NUM_ELEMS,
1722                            ICE_NONDMA_MEM);
1723         }
1724
1725         /* Update the current PF */
1726         buf.pf_scenario_num[hw->pf_id] = (u8)prof->cfg.scen->id;
1727         status = ice_prgm_acl_prof_extrt(hw, prof_id, &buf, NULL);
1728
1729         return status;
1730 }
1731
1732 /**
1733  * ice_flow_assoc_vsig_vsi - associate a VSI with VSIG
1734  * @hw: pointer to the hardware structure
1735  * @blk: classification stage
1736  * @vsi_handle: software VSI handle
1737  * @vsig: target VSI group
1738  *
1739  * Assumption: the caller has already verified that the VSI to
1740  * be added has the same characteristics as the VSIG and will
1741  * thereby have access to all resources added to that VSIG.
1742  */
1743 enum ice_status
1744 ice_flow_assoc_vsig_vsi(struct ice_hw *hw, enum ice_block blk, u16 vsi_handle,
1745                         u16 vsig)
1746 {
1747         enum ice_status status;
1748
1749         if (!ice_is_vsi_valid(hw, vsi_handle) || blk >= ICE_BLK_COUNT)
1750                 return ICE_ERR_PARAM;
1751
1752         ice_acquire_lock(&hw->fl_profs_locks[blk]);
1753         status = ice_add_vsi_flow(hw, blk, ice_get_hw_vsi_num(hw, vsi_handle),
1754                                   vsig);
1755         ice_release_lock(&hw->fl_profs_locks[blk]);
1756
1757         return status;
1758 }
1759
1760 /**
1761  * ice_flow_assoc_prof - associate a VSI with a flow profile
1762  * @hw: pointer to the hardware structure
1763  * @blk: classification stage
1764  * @prof: pointer to flow profile
1765  * @vsi_handle: software VSI handle
1766  *
1767  * Assumption: the caller has acquired the lock to the profile list
1768  * and the software VSI handle has been validated
1769  */
1770 static enum ice_status
1771 ice_flow_assoc_prof(struct ice_hw *hw, enum ice_block blk,
1772                     struct ice_flow_prof *prof, u16 vsi_handle)
1773 {
1774         enum ice_status status = ICE_SUCCESS;
1775
1776         if (!ice_is_bit_set(prof->vsis, vsi_handle)) {
1777                 if (blk == ICE_BLK_ACL) {
1778                         status = ice_flow_acl_set_xtrct_seq(hw, prof);
1779                         if (status)
1780                                 return status;
1781                 }
1782                 status = ice_add_prof_id_flow(hw, blk,
1783                                               ice_get_hw_vsi_num(hw,
1784                                                                  vsi_handle),
1785                                               prof->id);
1786                 if (!status)
1787                         ice_set_bit(vsi_handle, prof->vsis);
1788                 else
1789                         ice_debug(hw, ICE_DBG_FLOW,
1790                                   "HW profile add failed, %d\n",
1791                                   status);
1792         }
1793
1794         return status;
1795 }
1796
1797 /**
1798  * ice_flow_disassoc_prof - disassociate a VSI from a flow profile
1799  * @hw: pointer to the hardware structure
1800  * @blk: classification stage
1801  * @prof: pointer to flow profile
1802  * @vsi_handle: software VSI handle
1803  *
1804  * Assumption: the caller has acquired the lock to the profile list
1805  * and the software VSI handle has been validated
1806  */
1807 static enum ice_status
1808 ice_flow_disassoc_prof(struct ice_hw *hw, enum ice_block blk,
1809                        struct ice_flow_prof *prof, u16 vsi_handle)
1810 {
1811         enum ice_status status = ICE_SUCCESS;
1812
1813         if (ice_is_bit_set(prof->vsis, vsi_handle)) {
1814                 status = ice_rem_prof_id_flow(hw, blk,
1815                                               ice_get_hw_vsi_num(hw,
1816                                                                  vsi_handle),
1817                                               prof->id);
1818                 if (!status)
1819                         ice_clear_bit(vsi_handle, prof->vsis);
1820                 else
1821                         ice_debug(hw, ICE_DBG_FLOW,
1822                                   "HW profile remove failed, %d\n",
1823                                   status);
1824         }
1825
1826         return status;
1827 }
1828
1829 /**
1830  * ice_flow_add_prof - Add a flow profile for packet segments and matched fields
1831  * @hw: pointer to the HW struct
1832  * @blk: classification stage
1833  * @dir: flow direction
1834  * @prof_id: unique ID to identify this flow profile
1835  * @segs: array of one or more packet segments that describe the flow
1836  * @segs_cnt: number of packet segments provided
1837  * @acts: array of default actions
1838  * @acts_cnt: number of default actions
1839  * @prof: stores the returned flow profile added
1840  */
1841 enum ice_status
1842 ice_flow_add_prof(struct ice_hw *hw, enum ice_block blk, enum ice_flow_dir dir,
1843                   u64 prof_id, struct ice_flow_seg_info *segs, u8 segs_cnt,
1844                   struct ice_flow_action *acts, u8 acts_cnt,
1845                   struct ice_flow_prof **prof)
1846 {
1847         enum ice_status status;
1848
1849         if (segs_cnt > ICE_FLOW_SEG_MAX)
1850                 return ICE_ERR_MAX_LIMIT;
1851
1852         if (!segs_cnt)
1853                 return ICE_ERR_PARAM;
1854
1855         if (!segs)
1856                 return ICE_ERR_BAD_PTR;
1857
1858         status = ice_flow_val_hdrs(segs, segs_cnt);
1859         if (status)
1860                 return status;
1861
1862         ice_acquire_lock(&hw->fl_profs_locks[blk]);
1863
1864         status = ice_flow_add_prof_sync(hw, blk, dir, prof_id, segs, segs_cnt,
1865                                         acts, acts_cnt, prof);
1866         if (!status)
1867                 LIST_ADD(&(*prof)->l_entry, &hw->fl_profs[blk]);
1868
1869         ice_release_lock(&hw->fl_profs_locks[blk]);
1870
1871         return status;
1872 }
1873
1874 /**
1875  * ice_flow_rem_prof - Remove a flow profile and all entries associated with it
1876  * @hw: pointer to the HW struct
1877  * @blk: the block for which the flow profile is to be removed
1878  * @prof_id: unique ID of the flow profile to be removed
1879  */
1880 enum ice_status
1881 ice_flow_rem_prof(struct ice_hw *hw, enum ice_block blk, u64 prof_id)
1882 {
1883         struct ice_flow_prof *prof;
1884         enum ice_status status;
1885
1886         ice_acquire_lock(&hw->fl_profs_locks[blk]);
1887
1888         prof = ice_flow_find_prof_id(hw, blk, prof_id);
1889         if (!prof) {
1890                 status = ICE_ERR_DOES_NOT_EXIST;
1891                 goto out;
1892         }
1893
1894         /* prof becomes invalid after the call */
1895         status = ice_flow_rem_prof_sync(hw, blk, prof);
1896
1897 out:
1898         ice_release_lock(&hw->fl_profs_locks[blk]);
1899
1900         return status;
1901 }
1902
1903 /**
1904  * ice_flow_get_hw_prof - return the HW profile for a specific profile ID handle
1905  * @hw: pointer to the HW struct
1906  * @blk: classification stage
1907  * @prof_id: the profile ID handle
1908  * @hw_prof_id: pointer to variable to receive the HW profile ID
1909  */
1910 enum ice_status
1911 ice_flow_get_hw_prof(struct ice_hw *hw, enum ice_block blk, u64 prof_id,
1912                      u8 *hw_prof_id)
1913 {
1914         struct ice_prof_map *map;
1915
1916         map = ice_search_prof_id(hw, blk, prof_id);
1917         if (map) {
1918                 *hw_prof_id = map->prof_id;
1919                 return ICE_SUCCESS;
1920         }
1921
1922         return ICE_ERR_DOES_NOT_EXIST;
1923 }
1924
1925 /**
1926  * ice_flow_find_entry - look for a flow entry using its unique ID
1927  * @hw: pointer to the HW struct
1928  * @blk: classification stage
1929  * @entry_id: unique ID to identify this flow entry
1930  *
1931  * This function looks for the flow entry with the specified unique ID in all
1932  * flow profiles of the specified classification stage. If the entry is found,
1933  * and it returns the handle to the flow entry. Otherwise, it returns
1934  * ICE_FLOW_ENTRY_ID_INVAL.
1935  */
1936 u64 ice_flow_find_entry(struct ice_hw *hw, enum ice_block blk, u64 entry_id)
1937 {
1938         struct ice_flow_entry *found = NULL;
1939         struct ice_flow_prof *p;
1940
1941         ice_acquire_lock(&hw->fl_profs_locks[blk]);
1942
1943         LIST_FOR_EACH_ENTRY(p, &hw->fl_profs[blk], ice_flow_prof, l_entry) {
1944                 struct ice_flow_entry *e;
1945
1946                 ice_acquire_lock(&p->entries_lock);
1947                 LIST_FOR_EACH_ENTRY(e, &p->entries, ice_flow_entry, l_entry)
1948                         if (e->id == entry_id) {
1949                                 found = e;
1950                                 break;
1951                         }
1952                 ice_release_lock(&p->entries_lock);
1953
1954                 if (found)
1955                         break;
1956         }
1957
1958         ice_release_lock(&hw->fl_profs_locks[blk]);
1959
1960         return found ? ICE_FLOW_ENTRY_HNDL(found) : ICE_FLOW_ENTRY_HANDLE_INVAL;
1961 }
1962
1963 /**
1964  * ice_flow_acl_check_actions - Checks the acl rule's actions
1965  * @hw: pointer to the hardware structure
1966  * @acts: array of actions to be performed on a match
1967  * @acts_cnt: number of actions
1968  * @cnt_alloc: indicates if a ACL counter has been allocated.
1969  */
1970 static enum ice_status
1971 ice_flow_acl_check_actions(struct ice_hw *hw, struct ice_flow_action *acts,
1972                            u8 acts_cnt, bool *cnt_alloc)
1973 {
1974         ice_declare_bitmap(dup_check, ICE_AQC_TBL_MAX_ACTION_PAIRS * 2);
1975         int i;
1976
1977         ice_zero_bitmap(dup_check, ICE_AQC_TBL_MAX_ACTION_PAIRS * 2);
1978         *cnt_alloc = false;
1979
1980         if (acts_cnt > ICE_FLOW_ACL_MAX_NUM_ACT)
1981                 return ICE_ERR_OUT_OF_RANGE;
1982
1983         for (i = 0; i < acts_cnt; i++) {
1984                 if (acts[i].type != ICE_FLOW_ACT_NOP &&
1985                     acts[i].type != ICE_FLOW_ACT_DROP &&
1986                     acts[i].type != ICE_FLOW_ACT_CNTR_PKT &&
1987                     acts[i].type != ICE_FLOW_ACT_FWD_QUEUE)
1988                         return ICE_ERR_CFG;
1989
1990                 /* If the caller want to add two actions of the same type, then
1991                  * it is considered invalid configuration.
1992                  */
1993                 if (ice_test_and_set_bit(acts[i].type, dup_check))
1994                         return ICE_ERR_PARAM;
1995         }
1996
1997         /* Checks if ACL counters are needed. */
1998         for (i = 0; i < acts_cnt; i++) {
1999                 if (acts[i].type == ICE_FLOW_ACT_CNTR_PKT ||
2000                     acts[i].type == ICE_FLOW_ACT_CNTR_BYTES ||
2001                     acts[i].type == ICE_FLOW_ACT_CNTR_PKT_BYTES) {
2002                         struct ice_acl_cntrs cntrs;
2003                         enum ice_status status;
2004
2005                         cntrs.amount = 1;
2006                         cntrs.bank = 0; /* Only bank0 for the moment */
2007
2008                         if (acts[i].type == ICE_FLOW_ACT_CNTR_PKT_BYTES)
2009                                 cntrs.type = ICE_AQC_ACL_CNT_TYPE_DUAL;
2010                         else
2011                                 cntrs.type = ICE_AQC_ACL_CNT_TYPE_SINGLE;
2012
2013                         status = ice_aq_alloc_acl_cntrs(hw, &cntrs, NULL);
2014                         if (status)
2015                                 return status;
2016                         /* Counter index within the bank */
2017                         acts[i].data.acl_act.value =
2018                                                 CPU_TO_LE16(cntrs.first_cntr);
2019                         *cnt_alloc = true;
2020                 }
2021         }
2022
2023         return ICE_SUCCESS;
2024 }
2025
2026 /**
2027  * ice_flow_acl_frmt_entry_range - Format an acl range checker for a given field
2028  * @fld: number of the given field
2029  * @info: info about field
2030  * @range_buf: range checker configuration buffer
2031  * @data: pointer to a data buffer containing flow entry's match values/masks
2032  * @range: Input/output param indicating which range checkers are being used
2033  */
2034 static void
2035 ice_flow_acl_frmt_entry_range(u16 fld, struct ice_flow_fld_info *info,
2036                               struct ice_aqc_acl_profile_ranges *range_buf,
2037                               u8 *data, u8 *range)
2038 {
2039         u16 new_mask;
2040
2041         /* If not specified, default mask is all bits in field */
2042         new_mask = (info->src.mask == ICE_FLOW_FLD_OFF_INVAL ?
2043                     BIT(ice_flds_info[fld].size) - 1 :
2044                     (*(u16 *)(data + info->src.mask))) << info->xtrct.disp;
2045
2046         /* If the mask is 0, then we don't need to worry about this input
2047          * range checker value.
2048          */
2049         if (new_mask) {
2050                 u16 new_high =
2051                         (*(u16 *)(data + info->src.last)) << info->xtrct.disp;
2052                 u16 new_low =
2053                         (*(u16 *)(data + info->src.val)) << info->xtrct.disp;
2054                 u8 range_idx = info->entry.val;
2055
2056                 range_buf->checker_cfg[range_idx].low_boundary =
2057                         CPU_TO_BE16(new_low);
2058                 range_buf->checker_cfg[range_idx].high_boundary =
2059                         CPU_TO_BE16(new_high);
2060                 range_buf->checker_cfg[range_idx].mask = CPU_TO_BE16(new_mask);
2061
2062                 /* Indicate which range checker is being used */
2063                 *range |= BIT(range_idx);
2064         }
2065 }
2066
2067 /**
2068  * ice_flow_acl_frmt_entry_fld - Partially format acl entry for a given field
2069  * @fld: number of the given field
2070  * @info: info about the field
2071  * @buf: buffer containing the entry
2072  * @dontcare: buffer containing don't care mask for entry
2073  * @data: pointer to a data buffer containing flow entry's match values/masks
2074  */
2075 static void
2076 ice_flow_acl_frmt_entry_fld(u16 fld, struct ice_flow_fld_info *info, u8 *buf,
2077                             u8 *dontcare, u8 *data)
2078 {
2079         u16 dst, src, mask, k, end_disp, tmp_s = 0, tmp_m = 0;
2080         bool use_mask = false;
2081         u8 disp;
2082
2083         src = info->src.val;
2084         mask = info->src.mask;
2085         dst = info->entry.val - ICE_AQC_ACL_PROF_BYTE_SEL_START_IDX;
2086         disp = info->xtrct.disp % BITS_PER_BYTE;
2087
2088         if (mask != ICE_FLOW_FLD_OFF_INVAL)
2089                 use_mask = true;
2090
2091         for (k = 0; k < info->entry.last; k++, dst++) {
2092                 /* Add overflow bits from previous byte */
2093                 buf[dst] = (tmp_s & 0xff00) >> 8;
2094
2095                 /* If mask is not valid, tmp_m is always zero, so just setting
2096                  * dontcare to 0 (no masked bits). If mask is valid, pulls in
2097                  * overflow bits of mask from prev byte
2098                  */
2099                 dontcare[dst] = (tmp_m & 0xff00) >> 8;
2100
2101                 /* If there is displacement, last byte will only contain
2102                  * displaced data, but there is no more data to read from user
2103                  * buffer, so skip so as not to potentially read beyond end of
2104                  * user buffer
2105                  */
2106                 if (!disp || k < info->entry.last - 1) {
2107                         /* Store shifted data to use in next byte */
2108                         tmp_s = data[src++] << disp;
2109
2110                         /* Add current (shifted) byte */
2111                         buf[dst] |= tmp_s & 0xff;
2112
2113                         /* Handle mask if valid */
2114                         if (use_mask) {
2115                                 tmp_m = (~data[mask++] & 0xff) << disp;
2116                                 dontcare[dst] |= tmp_m & 0xff;
2117                         }
2118                 }
2119         }
2120
2121         /* Fill in don't care bits at beginning of field */
2122         if (disp) {
2123                 dst = info->entry.val - ICE_AQC_ACL_PROF_BYTE_SEL_START_IDX;
2124                 for (k = 0; k < disp; k++)
2125                         dontcare[dst] |= BIT(k);
2126         }
2127
2128         end_disp = (disp + ice_flds_info[fld].size) % BITS_PER_BYTE;
2129
2130         /* Fill in don't care bits at end of field */
2131         if (end_disp) {
2132                 dst = info->entry.val - ICE_AQC_ACL_PROF_BYTE_SEL_START_IDX +
2133                       info->entry.last - 1;
2134                 for (k = end_disp; k < BITS_PER_BYTE; k++)
2135                         dontcare[dst] |= BIT(k);
2136         }
2137 }
2138
2139 /**
2140  * ice_flow_acl_frmt_entry - Format acl entry
2141  * @hw: pointer to the hardware structure
2142  * @prof: pointer to flow profile
2143  * @e: pointer to the flow entry
2144  * @data: pointer to a data buffer containing flow entry's match values/masks
2145  * @acts: array of actions to be performed on a match
2146  * @acts_cnt: number of actions
2147  *
2148  * Formats the key (and key_inverse) to be matched from the data passed in,
2149  * along with data from the flow profile. This key/key_inverse pair makes up
2150  * the 'entry' for an acl flow entry.
2151  */
2152 static enum ice_status
2153 ice_flow_acl_frmt_entry(struct ice_hw *hw, struct ice_flow_prof *prof,
2154                         struct ice_flow_entry *e, u8 *data,
2155                         struct ice_flow_action *acts, u8 acts_cnt)
2156 {
2157         u8 *buf = NULL, *dontcare = NULL, *key = NULL, range = 0, dir_flag_msk;
2158         struct ice_aqc_acl_profile_ranges *range_buf = NULL;
2159         enum ice_status status;
2160         bool cnt_alloc;
2161         u8 prof_id = 0;
2162         u16 i, buf_sz;
2163
2164         status = ice_flow_get_hw_prof(hw, ICE_BLK_ACL, prof->id, &prof_id);
2165         if (status)
2166                 return status;
2167
2168         /* Format the result action */
2169
2170         status = ice_flow_acl_check_actions(hw, acts, acts_cnt, &cnt_alloc);
2171         if (status)
2172                 return status;
2173
2174         status = ICE_ERR_NO_MEMORY;
2175
2176         e->acts = (struct ice_flow_action *)
2177                 ice_memdup(hw, acts, acts_cnt * sizeof(*acts),
2178                            ICE_NONDMA_TO_NONDMA);
2179
2180         if (!e->acts)
2181                 goto out;
2182
2183         e->acts_cnt = acts_cnt;
2184
2185         /* Format the matching data */
2186         buf_sz = prof->cfg.scen->width;
2187         buf = (u8 *)ice_malloc(hw, buf_sz);
2188         if (!buf)
2189                 goto out;
2190
2191         dontcare = (u8 *)ice_malloc(hw, buf_sz);
2192         if (!dontcare)
2193                 goto out;
2194
2195         /* 'key' buffer will store both key and key_inverse, so must be twice
2196          * size of buf
2197          */
2198         key = (u8 *)ice_malloc(hw, buf_sz * 2);
2199         if (!key)
2200                 goto out;
2201
2202         range_buf = (struct ice_aqc_acl_profile_ranges *)
2203                 ice_malloc(hw, sizeof(struct ice_aqc_acl_profile_ranges));
2204         if (!range_buf)
2205                 goto out;
2206
2207         /* Set don't care mask to all 1's to start, will zero out used bytes */
2208         ice_memset(dontcare, 0xff, buf_sz, ICE_NONDMA_MEM);
2209
2210         for (i = 0; i < prof->segs_cnt; i++) {
2211                 struct ice_flow_seg_info *seg = &prof->segs[i];
2212                 u64 match = seg->match;
2213                 u16 j;
2214
2215                 for (j = 0; j < ICE_FLOW_FIELD_IDX_MAX && match; j++) {
2216                         struct ice_flow_fld_info *info;
2217                         const u64 bit = BIT_ULL(j);
2218
2219                         if (!(match & bit))
2220                                 continue;
2221
2222                         info = &seg->fields[j];
2223
2224                         if (info->type == ICE_FLOW_FLD_TYPE_RANGE)
2225                                 ice_flow_acl_frmt_entry_range(j, info,
2226                                                               range_buf, data,
2227                                                               &range);
2228                         else
2229                                 ice_flow_acl_frmt_entry_fld(j, info, buf,
2230                                                             dontcare, data);
2231
2232                         match &= ~bit;
2233                 }
2234
2235                 for (j = 0; j < seg->raws_cnt; j++) {
2236                         struct ice_flow_fld_info *info = &seg->raws[j].info;
2237                         u16 dst, src, mask, k;
2238                         bool use_mask = false;
2239
2240                         src = info->src.val;
2241                         dst = info->entry.val -
2242                                         ICE_AQC_ACL_PROF_BYTE_SEL_START_IDX;
2243                         mask = info->src.mask;
2244
2245                         if (mask != ICE_FLOW_FLD_OFF_INVAL)
2246                                 use_mask = true;
2247
2248                         for (k = 0; k < info->entry.last; k++, dst++) {
2249                                 buf[dst] = data[src++];
2250                                 if (use_mask)
2251                                         dontcare[dst] = ~data[mask++];
2252                                 else
2253                                         dontcare[dst] = 0;
2254                         }
2255                 }
2256         }
2257
2258         buf[prof->cfg.scen->pid_idx] = (u8)prof_id;
2259         dontcare[prof->cfg.scen->pid_idx] = 0;
2260
2261         /* Format the buffer for direction flags */
2262         dir_flag_msk = BIT(ICE_FLG_PKT_DIR);
2263
2264         if (prof->dir == ICE_FLOW_RX)
2265                 buf[prof->cfg.scen->pkt_dir_idx] = dir_flag_msk;
2266
2267         if (range) {
2268                 buf[prof->cfg.scen->rng_chk_idx] = range;
2269                 /* Mark any unused range checkers as don't care */
2270                 dontcare[prof->cfg.scen->rng_chk_idx] = ~range;
2271                 e->range_buf = range_buf;
2272         } else {
2273                 ice_free(hw, range_buf);
2274         }
2275
2276         status = ice_set_key(key, buf_sz * 2, buf, NULL, dontcare, NULL, 0,
2277                              buf_sz);
2278         if (status)
2279                 goto out;
2280
2281         e->entry = key;
2282         e->entry_sz = buf_sz * 2;
2283
2284 out:
2285         if (buf)
2286                 ice_free(hw, buf);
2287
2288         if (dontcare)
2289                 ice_free(hw, dontcare);
2290
2291         if (status && key)
2292                 ice_free(hw, key);
2293
2294         if (status && range_buf) {
2295                 ice_free(hw, range_buf);
2296                 e->range_buf = NULL;
2297         }
2298
2299         if (status && e->acts) {
2300                 ice_free(hw, e->acts);
2301                 e->acts = NULL;
2302                 e->acts_cnt = 0;
2303         }
2304
2305         if (status && cnt_alloc)
2306                 ice_flow_acl_free_act_cntr(hw, acts, acts_cnt);
2307
2308         return status;
2309 }
2310
2311 /**
2312  * ice_flow_acl_find_scen_entry_cond - Find an ACL scenario entry that matches
2313  *                                     the compared data.
2314  * @prof: pointer to flow profile
2315  * @e: pointer to the comparing flow entry
2316  * @do_chg_action: decide if we want to change the ACL action
2317  * @do_add_entry: decide if we want to add the new ACL entry
2318  * @do_rem_entry: decide if we want to remove the current ACL entry
2319  *
2320  * Find an ACL scenario entry that matches the compared data. In the same time,
2321  * this function also figure out:
2322  * a/ If we want to change the ACL action
2323  * b/ If we want to add the new ACL entry
2324  * c/ If we want to remove the current ACL entry
2325  */
2326 static struct ice_flow_entry *
2327 ice_flow_acl_find_scen_entry_cond(struct ice_flow_prof *prof,
2328                                   struct ice_flow_entry *e, bool *do_chg_action,
2329                                   bool *do_add_entry, bool *do_rem_entry)
2330 {
2331         struct ice_flow_entry *p, *return_entry = NULL;
2332         u8 i, j;
2333
2334         /* Check if:
2335          * a/ There exists an entry with same matching data, but different
2336          *    priority, then we remove this existing ACL entry. Then, we
2337          *    will add the new entry to the ACL scenario.
2338          * b/ There exists an entry with same matching data, priority, and
2339          *    result action, then we do nothing
2340          * c/ There exists an entry with same matching data, priority, but
2341          *    different, action, then do only change the action's entry.
2342          * d/ Else, we add this new entry to the ACL scenario.
2343          */
2344         *do_chg_action = false;
2345         *do_add_entry = true;
2346         *do_rem_entry = false;
2347         LIST_FOR_EACH_ENTRY(p, &prof->entries, ice_flow_entry, l_entry) {
2348                 if (memcmp(p->entry, e->entry, p->entry_sz))
2349                         continue;
2350
2351                 /* From this point, we have the same matching_data. */
2352                 *do_add_entry = false;
2353                 return_entry = p;
2354
2355                 if (p->priority != e->priority) {
2356                         /* matching data && !priority */
2357                         *do_add_entry = true;
2358                         *do_rem_entry = true;
2359                         break;
2360                 }
2361
2362                 /* From this point, we will have matching_data && priority */
2363                 if (p->acts_cnt != e->acts_cnt)
2364                         *do_chg_action = true;
2365                 for (i = 0; i < p->acts_cnt; i++) {
2366                         bool found_not_match = false;
2367
2368                         for (j = 0; j < e->acts_cnt; j++)
2369                                 if (memcmp(&p->acts[i], &e->acts[j],
2370                                            sizeof(struct ice_flow_action))) {
2371                                         found_not_match = true;
2372                                         break;
2373                                 }
2374
2375                         if (found_not_match) {
2376                                 *do_chg_action = true;
2377                                 break;
2378                         }
2379                 }
2380
2381                 /* (do_chg_action = true) means :
2382                  *    matching_data && priority && !result_action
2383                  * (do_chg_action = false) means :
2384                  *    matching_data && priority && result_action
2385                  */
2386                 break;
2387         }
2388
2389         return return_entry;
2390 }
2391
2392 /**
2393  * ice_flow_acl_convert_to_acl_prior - Convert to ACL priority
2394  * @p: flow priority
2395  */
2396 static enum ice_acl_entry_prior
2397 ice_flow_acl_convert_to_acl_prior(enum ice_flow_priority p)
2398 {
2399         enum ice_acl_entry_prior acl_prior;
2400
2401         switch (p) {
2402         case ICE_FLOW_PRIO_LOW:
2403                 acl_prior = ICE_LOW;
2404                 break;
2405         case ICE_FLOW_PRIO_NORMAL:
2406                 acl_prior = ICE_NORMAL;
2407                 break;
2408         case ICE_FLOW_PRIO_HIGH:
2409                 acl_prior = ICE_HIGH;
2410                 break;
2411         default:
2412                 acl_prior = ICE_NORMAL;
2413                 break;
2414         }
2415
2416         return acl_prior;
2417 }
2418
2419 /**
2420  * ice_flow_acl_union_rng_chk - Perform union operation between two
2421  *                              range-range checker buffers
2422  * @dst_buf: pointer to destination range checker buffer
2423  * @src_buf: pointer to source range checker buffer
2424  *
2425  * For this function, we do the union between dst_buf and src_buf
2426  * range checker buffer, and we will save the result back to dst_buf
2427  */
2428 static enum ice_status
2429 ice_flow_acl_union_rng_chk(struct ice_aqc_acl_profile_ranges *dst_buf,
2430                            struct ice_aqc_acl_profile_ranges *src_buf)
2431 {
2432         u8 i, j;
2433
2434         if (!dst_buf || !src_buf)
2435                 return ICE_ERR_BAD_PTR;
2436
2437         for (i = 0; i < ICE_AQC_ACL_PROF_RANGES_NUM_CFG; i++) {
2438                 struct ice_acl_rng_data *cfg_data = NULL, *in_data;
2439                 bool will_populate = false;
2440
2441                 in_data = &src_buf->checker_cfg[i];
2442
2443                 if (!in_data->mask)
2444                         break;
2445
2446                 for (j = 0; j < ICE_AQC_ACL_PROF_RANGES_NUM_CFG; j++) {
2447                         cfg_data = &dst_buf->checker_cfg[j];
2448
2449                         if (!cfg_data->mask ||
2450                             !memcmp(cfg_data, in_data,
2451                                     sizeof(struct ice_acl_rng_data))) {
2452                                 will_populate = true;
2453                                 break;
2454                         }
2455                 }
2456
2457                 if (will_populate) {
2458                         ice_memcpy(cfg_data, in_data,
2459                                    sizeof(struct ice_acl_rng_data),
2460                                    ICE_NONDMA_TO_NONDMA);
2461                 } else {
2462                         /* No available slot left to program range checker */
2463                         return ICE_ERR_MAX_LIMIT;
2464                 }
2465         }
2466
2467         return ICE_SUCCESS;
2468 }
2469
2470 /**
2471  * ice_flow_acl_add_scen_entry_sync - Add entry to ACL scenario sync
2472  * @hw: pointer to the hardware structure
2473  * @prof: pointer to flow profile
2474  * @entry: double pointer to the flow entry
2475  *
2476  * For this function, we will look at the current added entries in the
2477  * corresponding ACL scenario. Then, we will perform matching logic to
2478  * see if we want to add/modify/do nothing with this new entry.
2479  */
2480 static enum ice_status
2481 ice_flow_acl_add_scen_entry_sync(struct ice_hw *hw, struct ice_flow_prof *prof,
2482                                  struct ice_flow_entry **entry)
2483 {
2484         bool do_add_entry, do_rem_entry, do_chg_action, do_chg_rng_chk;
2485         struct ice_aqc_acl_profile_ranges query_rng_buf, cfg_rng_buf;
2486         struct ice_acl_act_entry *acts = NULL;
2487         struct ice_flow_entry *exist;
2488         enum ice_status status = ICE_SUCCESS;
2489         struct ice_flow_entry *e;
2490         u8 i;
2491
2492         if (!entry || !(*entry) || !prof)
2493                 return ICE_ERR_BAD_PTR;
2494
2495         e = *(entry);
2496
2497         do_chg_rng_chk = false;
2498         if (e->range_buf) {
2499                 u8 prof_id = 0;
2500
2501                 status = ice_flow_get_hw_prof(hw, ICE_BLK_ACL, prof->id,
2502                                               &prof_id);
2503                 if (status)
2504                         return status;
2505
2506                 /* Query the current range-checker value in FW */
2507                 status = ice_query_acl_prof_ranges(hw, prof_id, &query_rng_buf,
2508                                                    NULL);
2509                 if (status)
2510                         return status;
2511                 ice_memcpy(&cfg_rng_buf, &query_rng_buf,
2512                            sizeof(struct ice_aqc_acl_profile_ranges),
2513                            ICE_NONDMA_TO_NONDMA);
2514
2515                 /* Generate the new range-checker value */
2516                 status = ice_flow_acl_union_rng_chk(&cfg_rng_buf, e->range_buf);
2517                 if (status)
2518                         return status;
2519
2520                 /* Reconfigure the range check if the buffer is changed. */
2521                 do_chg_rng_chk = false;
2522                 if (memcmp(&query_rng_buf, &cfg_rng_buf,
2523                            sizeof(struct ice_aqc_acl_profile_ranges))) {
2524                         status = ice_prog_acl_prof_ranges(hw, prof_id,
2525                                                           &cfg_rng_buf, NULL);
2526                         if (status)
2527                                 return status;
2528
2529                         do_chg_rng_chk = true;
2530                 }
2531         }
2532
2533         /* Figure out if we want to (change the ACL action) and/or
2534          * (Add the new ACL entry) and/or (Remove the current ACL entry)
2535          */
2536         exist = ice_flow_acl_find_scen_entry_cond(prof, e, &do_chg_action,
2537                                                   &do_add_entry, &do_rem_entry);
2538
2539         if (do_rem_entry) {
2540                 status = ice_flow_rem_entry_sync(hw, ICE_BLK_ACL, exist);
2541                 if (status)
2542                         return status;
2543         }
2544
2545         /* Prepare the result action buffer */
2546         acts = (struct ice_acl_act_entry *)ice_calloc
2547                 (hw, e->entry_sz, sizeof(struct ice_acl_act_entry));
2548         for (i = 0; i < e->acts_cnt; i++)
2549                 ice_memcpy(&acts[i], &e->acts[i].data.acl_act,
2550                            sizeof(struct ice_acl_act_entry),
2551                            ICE_NONDMA_TO_NONDMA);
2552
2553         if (do_add_entry) {
2554                 enum ice_acl_entry_prior prior;
2555                 u8 *keys, *inverts;
2556                 u16 entry_idx;
2557
2558                 keys = (u8 *)e->entry;
2559                 inverts = keys + (e->entry_sz / 2);
2560                 prior = ice_flow_acl_convert_to_acl_prior(e->priority);
2561
2562                 status = ice_acl_add_entry(hw, prof->cfg.scen, prior, keys,
2563                                            inverts, acts, e->acts_cnt,
2564                                            &entry_idx);
2565                 if (status)
2566                         goto out;
2567
2568                 e->scen_entry_idx = entry_idx;
2569                 LIST_ADD(&e->l_entry, &prof->entries);
2570         } else {
2571                 if (do_chg_action) {
2572                         /* For the action memory info, update the SW's copy of
2573                          * exist entry with e's action memory info
2574                          */
2575                         ice_free(hw, exist->acts);
2576                         exist->acts_cnt = e->acts_cnt;
2577                         exist->acts = (struct ice_flow_action *)
2578                                 ice_calloc(hw, exist->acts_cnt,
2579                                            sizeof(struct ice_flow_action));
2580
2581                         if (!exist->acts) {
2582                                 status = ICE_ERR_NO_MEMORY;
2583                                 goto out;
2584                         }
2585
2586                         ice_memcpy(exist->acts, e->acts,
2587                                    sizeof(struct ice_flow_action) * e->acts_cnt,
2588                                    ICE_NONDMA_TO_NONDMA);
2589
2590                         status = ice_acl_prog_act(hw, prof->cfg.scen, acts,
2591                                                   e->acts_cnt,
2592                                                   exist->scen_entry_idx);
2593                         if (status)
2594                                 goto out;
2595                 }
2596
2597                 if (do_chg_rng_chk) {
2598                         /* In this case, we want to update the range checker
2599                          * information of the exist entry
2600                          */
2601                         status = ice_flow_acl_union_rng_chk(exist->range_buf,
2602                                                             e->range_buf);
2603                         if (status)
2604                                 goto out;
2605                 }
2606
2607                 /* As we don't add the new entry to our SW DB, deallocate its
2608                  * memories, and return the exist entry to the caller
2609                  */
2610                 ice_dealloc_flow_entry(hw, e);
2611                 *(entry) = exist;
2612         }
2613 out:
2614         if (acts)
2615                 ice_free(hw, acts);
2616
2617         return status;
2618 }
2619
2620 /**
2621  * ice_flow_acl_add_scen_entry - Add entry to ACL scenario
2622  * @hw: pointer to the hardware structure
2623  * @prof: pointer to flow profile
2624  * @e: double pointer to the flow entry
2625  */
2626 static enum ice_status
2627 ice_flow_acl_add_scen_entry(struct ice_hw *hw, struct ice_flow_prof *prof,
2628                             struct ice_flow_entry **e)
2629 {
2630         enum ice_status status;
2631
2632         ice_acquire_lock(&prof->entries_lock);
2633         status = ice_flow_acl_add_scen_entry_sync(hw, prof, e);
2634         ice_release_lock(&prof->entries_lock);
2635
2636         return status;
2637 }
2638
2639 /**
2640  * ice_flow_add_entry - Add a flow entry
2641  * @hw: pointer to the HW struct
2642  * @blk: classification stage
2643  * @prof_id: ID of the profile to add a new flow entry to
2644  * @entry_id: unique ID to identify this flow entry
2645  * @vsi_handle: software VSI handle for the flow entry
2646  * @prio: priority of the flow entry
2647  * @data: pointer to a data buffer containing flow entry's match values/masks
2648  * @acts: arrays of actions to be performed on a match
2649  * @acts_cnt: number of actions
2650  * @entry_h: pointer to buffer that receives the new flow entry's handle
2651  */
2652 enum ice_status
2653 ice_flow_add_entry(struct ice_hw *hw, enum ice_block blk, u64 prof_id,
2654                    u64 entry_id, u16 vsi_handle, enum ice_flow_priority prio,
2655                    void *data, struct ice_flow_action *acts, u8 acts_cnt,
2656                    u64 *entry_h)
2657 {
2658         struct ice_flow_entry *e = NULL;
2659         struct ice_flow_prof *prof;
2660         enum ice_status status = ICE_SUCCESS;
2661
2662         /* ACL entries must indicate an action */
2663         if (blk == ICE_BLK_ACL && (!acts || !acts_cnt))
2664                 return ICE_ERR_PARAM;
2665
2666         /* No flow entry data is expected for RSS */
2667         if (!entry_h || (!data && blk != ICE_BLK_RSS))
2668                 return ICE_ERR_BAD_PTR;
2669
2670         if (!ice_is_vsi_valid(hw, vsi_handle))
2671                 return ICE_ERR_PARAM;
2672
2673         ice_acquire_lock(&hw->fl_profs_locks[blk]);
2674
2675         prof = ice_flow_find_prof_id(hw, blk, prof_id);
2676         if (!prof) {
2677                 status = ICE_ERR_DOES_NOT_EXIST;
2678         } else {
2679                 /* Allocate memory for the entry being added and associate
2680                  * the VSI to the found flow profile
2681                  */
2682                 e = (struct ice_flow_entry *)ice_malloc(hw, sizeof(*e));
2683                 if (!e)
2684                         status = ICE_ERR_NO_MEMORY;
2685                 else
2686                         status = ice_flow_assoc_prof(hw, blk, prof, vsi_handle);
2687         }
2688
2689         ice_release_lock(&hw->fl_profs_locks[blk]);
2690         if (status)
2691                 goto out;
2692
2693         e->id = entry_id;
2694         e->vsi_handle = vsi_handle;
2695         e->prof = prof;
2696         e->priority = prio;
2697
2698         switch (blk) {
2699         case ICE_BLK_RSS:
2700                 /* RSS will add only one entry per VSI per profile */
2701                 break;
2702         case ICE_BLK_ACL:
2703                 /* ACL will handle the entry management */
2704                 status = ice_flow_acl_frmt_entry(hw, prof, e, (u8 *)data, acts,
2705                                                  acts_cnt);
2706                 if (status)
2707                         goto out;
2708
2709                 status = ice_flow_acl_add_scen_entry(hw, prof, &e);
2710                 if (status)
2711                         goto out;
2712
2713                 break;
2714         case ICE_BLK_FD:
2715                 break;
2716         case ICE_BLK_SW:
2717         case ICE_BLK_PE:
2718         default:
2719                 status = ICE_ERR_NOT_IMPL;
2720                 goto out;
2721         }
2722
2723         if (blk != ICE_BLK_ACL) {
2724                 /* ACL will handle the entry management */
2725                 ice_acquire_lock(&prof->entries_lock);
2726                 LIST_ADD(&e->l_entry, &prof->entries);
2727                 ice_release_lock(&prof->entries_lock);
2728         }
2729
2730         *entry_h = ICE_FLOW_ENTRY_HNDL(e);
2731
2732 out:
2733         if (status && e) {
2734                 if (e->entry)
2735                         ice_free(hw, e->entry);
2736                 ice_free(hw, e);
2737         }
2738
2739         return status;
2740 }
2741
2742 /**
2743  * ice_flow_rem_entry - Remove a flow entry
2744  * @hw: pointer to the HW struct
2745  * @blk: classification stage
2746  * @entry_h: handle to the flow entry to be removed
2747  */
2748 enum ice_status ice_flow_rem_entry(struct ice_hw *hw, enum ice_block blk,
2749                                    u64 entry_h)
2750 {
2751         struct ice_flow_entry *entry;
2752         struct ice_flow_prof *prof;
2753         enum ice_status status = ICE_SUCCESS;
2754
2755         if (entry_h == ICE_FLOW_ENTRY_HANDLE_INVAL)
2756                 return ICE_ERR_PARAM;
2757
2758         entry = ICE_FLOW_ENTRY_PTR((unsigned long)entry_h);
2759
2760         /* Retain the pointer to the flow profile as the entry will be freed */
2761         prof = entry->prof;
2762
2763         if (prof) {
2764                 ice_acquire_lock(&prof->entries_lock);
2765                 status = ice_flow_rem_entry_sync(hw, blk, entry);
2766                 ice_release_lock(&prof->entries_lock);
2767         }
2768
2769         return status;
2770 }
2771
2772 /**
2773  * ice_flow_set_fld_ext - specifies locations of field from entry's input buffer
2774  * @seg: packet segment the field being set belongs to
2775  * @fld: field to be set
2776  * @field_type: type of the field
2777  * @val_loc: if not ICE_FLOW_FLD_OFF_INVAL, location of the value to match from
2778  *           entry's input buffer
2779  * @mask_loc: if not ICE_FLOW_FLD_OFF_INVAL, location of mask value from entry's
2780  *            input buffer
2781  * @last_loc: if not ICE_FLOW_FLD_OFF_INVAL, location of last/upper value from
2782  *            entry's input buffer
2783  *
2784  * This helper function stores information of a field being matched, including
2785  * the type of the field and the locations of the value to match, the mask, and
2786  * and the upper-bound value in the start of the input buffer for a flow entry.
2787  * This function should only be used for fixed-size data structures.
2788  *
2789  * This function also opportunistically determines the protocol headers to be
2790  * present based on the fields being set. Some fields cannot be used alone to
2791  * determine the protocol headers present. Sometimes, fields for particular
2792  * protocol headers are not matched. In those cases, the protocol headers
2793  * must be explicitly set.
2794  */
2795 static void
2796 ice_flow_set_fld_ext(struct ice_flow_seg_info *seg, enum ice_flow_field fld,
2797                      enum ice_flow_fld_match_type field_type, u16 val_loc,
2798                      u16 mask_loc, u16 last_loc)
2799 {
2800         u64 bit = BIT_ULL(fld);
2801
2802         seg->match |= bit;
2803         if (field_type == ICE_FLOW_FLD_TYPE_RANGE)
2804                 seg->range |= bit;
2805
2806         seg->fields[fld].type = field_type;
2807         seg->fields[fld].src.val = val_loc;
2808         seg->fields[fld].src.mask = mask_loc;
2809         seg->fields[fld].src.last = last_loc;
2810
2811         ICE_FLOW_SET_HDRS(seg, ice_flds_info[fld].hdr);
2812 }
2813
2814 /**
2815  * ice_flow_set_fld - specifies locations of field from entry's input buffer
2816  * @seg: packet segment the field being set belongs to
2817  * @fld: field to be set
2818  * @val_loc: if not ICE_FLOW_FLD_OFF_INVAL, location of the value to match from
2819  *           entry's input buffer
2820  * @mask_loc: if not ICE_FLOW_FLD_OFF_INVAL, location of mask value from entry's
2821  *            input buffer
2822  * @last_loc: if not ICE_FLOW_FLD_OFF_INVAL, location of last/upper value from
2823  *            entry's input buffer
2824  * @range: indicate if field being matched is to be in a range
2825  *
2826  * This function specifies the locations, in the form of byte offsets from the
2827  * start of the input buffer for a flow entry, from where the value to match,
2828  * the mask value, and upper value can be extracted. These locations are then
2829  * stored in the flow profile. When adding a flow entry associated with the
2830  * flow profile, these locations will be used to quickly extract the values and
2831  * create the content of a match entry. This function should only be used for
2832  * fixed-size data structures.
2833  */
2834 void
2835 ice_flow_set_fld(struct ice_flow_seg_info *seg, enum ice_flow_field fld,
2836                  u16 val_loc, u16 mask_loc, u16 last_loc, bool range)
2837 {
2838         enum ice_flow_fld_match_type t = range ?
2839                 ICE_FLOW_FLD_TYPE_RANGE : ICE_FLOW_FLD_TYPE_REG;
2840
2841         ice_flow_set_fld_ext(seg, fld, t, val_loc, mask_loc, last_loc);
2842 }
2843
2844 /**
2845  * ice_flow_set_fld_prefix - sets locations of prefix field from entry's buf
2846  * @seg: packet segment the field being set belongs to
2847  * @fld: field to be set
2848  * @val_loc: if not ICE_FLOW_FLD_OFF_INVAL, location of the value to match from
2849  *           entry's input buffer
2850  * @pref_loc: location of prefix value from entry's input buffer
2851  * @pref_sz: size of the location holding the prefix value
2852  *
2853  * This function specifies the locations, in the form of byte offsets from the
2854  * start of the input buffer for a flow entry, from where the value to match
2855  * and the IPv4 prefix value can be extracted. These locations are then stored
2856  * in the flow profile. When adding flow entries to the associated flow profile,
2857  * these locations can be used to quickly extract the values to create the
2858  * content of a match entry. This function should only be used for fixed-size
2859  * data structures.
2860  */
2861 void
2862 ice_flow_set_fld_prefix(struct ice_flow_seg_info *seg, enum ice_flow_field fld,
2863                         u16 val_loc, u16 pref_loc, u8 pref_sz)
2864 {
2865         /* For this type of field, the "mask" location is for the prefix value's
2866          * location and the "last" location is for the size of the location of
2867          * the prefix value.
2868          */
2869         ice_flow_set_fld_ext(seg, fld, ICE_FLOW_FLD_TYPE_PREFIX, val_loc,
2870                              pref_loc, (u16)pref_sz);
2871 }
2872
2873 /**
2874  * ice_flow_add_fld_raw - sets locations of a raw field from entry's input buf
2875  * @seg: packet segment the field being set belongs to
2876  * @off: offset of the raw field from the beginning of the segment in bytes
2877  * @len: length of the raw pattern to be matched
2878  * @val_loc: location of the value to match from entry's input buffer
2879  * @mask_loc: location of mask value from entry's input buffer
2880  *
2881  * This function specifies the offset of the raw field to be match from the
2882  * beginning of the specified packet segment, and the locations, in the form of
2883  * byte offsets from the start of the input buffer for a flow entry, from where
2884  * the value to match and the mask value to be extracted. These locations are
2885  * then stored in the flow profile. When adding flow entries to the associated
2886  * flow profile, these locations can be used to quickly extract the values to
2887  * create the content of a match entry. This function should only be used for
2888  * fixed-size data structures.
2889  */
2890 void
2891 ice_flow_add_fld_raw(struct ice_flow_seg_info *seg, u16 off, u8 len,
2892                      u16 val_loc, u16 mask_loc)
2893 {
2894         if (seg->raws_cnt < ICE_FLOW_SEG_RAW_FLD_MAX) {
2895                 seg->raws[seg->raws_cnt].off = off;
2896                 seg->raws[seg->raws_cnt].info.type = ICE_FLOW_FLD_TYPE_SIZE;
2897                 seg->raws[seg->raws_cnt].info.src.val = val_loc;
2898                 seg->raws[seg->raws_cnt].info.src.mask = mask_loc;
2899                 /* The "last" field is used to store the length of the field */
2900                 seg->raws[seg->raws_cnt].info.src.last = len;
2901         }
2902
2903         /* Overflows of "raws" will be handled as an error condition later in
2904          * the flow when this information is processed.
2905          */
2906         seg->raws_cnt++;
2907 }
2908
2909 #define ICE_FLOW_RSS_SEG_HDR_L2_MASKS \
2910 (ICE_FLOW_SEG_HDR_ETH | ICE_FLOW_SEG_HDR_VLAN)
2911
2912 #define ICE_FLOW_RSS_SEG_HDR_L3_MASKS \
2913         (ICE_FLOW_SEG_HDR_IPV4 | ICE_FLOW_SEG_HDR_IPV6)
2914
2915 #define ICE_FLOW_RSS_SEG_HDR_L4_MASKS \
2916         (ICE_FLOW_SEG_HDR_TCP | ICE_FLOW_SEG_HDR_UDP | \
2917          ICE_FLOW_SEG_HDR_SCTP)
2918
2919 #define ICE_FLOW_RSS_SEG_HDR_VAL_MASKS \
2920         (ICE_FLOW_RSS_SEG_HDR_L2_MASKS | \
2921          ICE_FLOW_RSS_SEG_HDR_L3_MASKS | \
2922          ICE_FLOW_RSS_SEG_HDR_L4_MASKS)
2923
2924 /**
2925  * ice_flow_set_rss_seg_info - setup packet segments for RSS
2926  * @segs: pointer to the flow field segment(s)
2927  * @hash_fields: fields to be hashed on for the segment(s)
2928  * @flow_hdr: protocol header fields within a packet segment
2929  *
2930  * Helper function to extract fields from hash bitmap and use flow
2931  * header value to set flow field segment for further use in flow
2932  * profile entry or removal.
2933  */
2934 static enum ice_status
2935 ice_flow_set_rss_seg_info(struct ice_flow_seg_info *segs, u64 hash_fields,
2936                           u32 flow_hdr)
2937 {
2938         u64 val = hash_fields;
2939         u8 i;
2940
2941         for (i = 0; val && i < ICE_FLOW_FIELD_IDX_MAX; i++) {
2942                 u64 bit = BIT_ULL(i);
2943
2944                 if (val & bit) {
2945                         ice_flow_set_fld(segs, (enum ice_flow_field)i,
2946                                          ICE_FLOW_FLD_OFF_INVAL,
2947                                          ICE_FLOW_FLD_OFF_INVAL,
2948                                          ICE_FLOW_FLD_OFF_INVAL, false);
2949                         val &= ~bit;
2950                 }
2951         }
2952         ICE_FLOW_SET_HDRS(segs, flow_hdr);
2953
2954         if (segs->hdrs & ~ICE_FLOW_RSS_SEG_HDR_VAL_MASKS &
2955             ~ICE_FLOW_RSS_HDRS_INNER_MASK)
2956                 return ICE_ERR_PARAM;
2957
2958         val = (u64)(segs->hdrs & ICE_FLOW_RSS_SEG_HDR_L3_MASKS);
2959         if (val && !ice_is_pow2(val))
2960                 return ICE_ERR_CFG;
2961
2962         val = (u64)(segs->hdrs & ICE_FLOW_RSS_SEG_HDR_L4_MASKS);
2963         if (val && !ice_is_pow2(val))
2964                 return ICE_ERR_CFG;
2965
2966         return ICE_SUCCESS;
2967 }
2968
2969 /**
2970  * ice_rem_vsi_rss_list - remove VSI from RSS list
2971  * @hw: pointer to the hardware structure
2972  * @vsi_handle: software VSI handle
2973  *
2974  * Remove the VSI from all RSS configurations in the list.
2975  */
2976 void ice_rem_vsi_rss_list(struct ice_hw *hw, u16 vsi_handle)
2977 {
2978         struct ice_rss_cfg *r, *tmp;
2979
2980         if (LIST_EMPTY(&hw->rss_list_head))
2981                 return;
2982
2983         ice_acquire_lock(&hw->rss_locks);
2984         LIST_FOR_EACH_ENTRY_SAFE(r, tmp, &hw->rss_list_head,
2985                                  ice_rss_cfg, l_entry) {
2986                 if (ice_test_and_clear_bit(vsi_handle, r->vsis))
2987                         if (!ice_is_any_bit_set(r->vsis, ICE_MAX_VSI)) {
2988                                 LIST_DEL(&r->l_entry);
2989                                 ice_free(hw, r);
2990                         }
2991         }
2992         ice_release_lock(&hw->rss_locks);
2993 }
2994
2995 /**
2996  * ice_rem_vsi_rss_cfg - remove RSS configurations associated with VSI
2997  * @hw: pointer to the hardware structure
2998  * @vsi_handle: software VSI handle
2999  *
3000  * This function will iterate through all flow profiles and disassociate
3001  * the VSI from that profile. If the flow profile has no VSIs it will
3002  * be removed.
3003  */
3004 enum ice_status ice_rem_vsi_rss_cfg(struct ice_hw *hw, u16 vsi_handle)
3005 {
3006         const enum ice_block blk = ICE_BLK_RSS;
3007         struct ice_flow_prof *p, *t;
3008         enum ice_status status = ICE_SUCCESS;
3009
3010         if (!ice_is_vsi_valid(hw, vsi_handle))
3011                 return ICE_ERR_PARAM;
3012
3013         if (LIST_EMPTY(&hw->fl_profs[blk]))
3014                 return ICE_SUCCESS;
3015
3016         ice_acquire_lock(&hw->fl_profs_locks[blk]);
3017         LIST_FOR_EACH_ENTRY_SAFE(p, t, &hw->fl_profs[blk], ice_flow_prof,
3018                                  l_entry) {
3019                 if (ice_is_bit_set(p->vsis, vsi_handle)) {
3020                         status = ice_flow_disassoc_prof(hw, blk, p, vsi_handle);
3021                         if (status)
3022                                 break;
3023
3024                         if (!ice_is_any_bit_set(p->vsis, ICE_MAX_VSI)) {
3025                                 status = ice_flow_rem_prof_sync(hw, blk, p);
3026                                 if (status)
3027                                         break;
3028                         }
3029                 }
3030         }
3031         ice_release_lock(&hw->fl_profs_locks[blk]);
3032
3033         return status;
3034 }
3035
3036 /**
3037  * ice_rem_rss_list - remove RSS configuration from list
3038  * @hw: pointer to the hardware structure
3039  * @vsi_handle: software VSI handle
3040  * @prof: pointer to flow profile
3041  *
3042  * Assumption: lock has already been acquired for RSS list
3043  */
3044 static void
3045 ice_rem_rss_list(struct ice_hw *hw, u16 vsi_handle, struct ice_flow_prof *prof)
3046 {
3047         struct ice_rss_cfg *r, *tmp;
3048
3049         /* Search for RSS hash fields associated to the VSI that match the
3050          * hash configurations associated to the flow profile. If found
3051          * remove from the RSS entry list of the VSI context and delete entry.
3052          */
3053         LIST_FOR_EACH_ENTRY_SAFE(r, tmp, &hw->rss_list_head,
3054                                  ice_rss_cfg, l_entry) {
3055                 if (r->hashed_flds == prof->segs[prof->segs_cnt - 1].match &&
3056                     r->packet_hdr == prof->segs[prof->segs_cnt - 1].hdrs) {
3057                         ice_clear_bit(vsi_handle, r->vsis);
3058                         if (!ice_is_any_bit_set(r->vsis, ICE_MAX_VSI)) {
3059                                 LIST_DEL(&r->l_entry);
3060                                 ice_free(hw, r);
3061                         }
3062                         return;
3063                 }
3064         }
3065 }
3066
3067 /**
3068  * ice_add_rss_list - add RSS configuration to list
3069  * @hw: pointer to the hardware structure
3070  * @vsi_handle: software VSI handle
3071  * @prof: pointer to flow profile
3072  *
3073  * Assumption: lock has already been acquired for RSS list
3074  */
3075 static enum ice_status
3076 ice_add_rss_list(struct ice_hw *hw, u16 vsi_handle, struct ice_flow_prof *prof)
3077 {
3078         struct ice_rss_cfg *r, *rss_cfg;
3079
3080         LIST_FOR_EACH_ENTRY(r, &hw->rss_list_head,
3081                             ice_rss_cfg, l_entry)
3082                 if (r->hashed_flds == prof->segs[prof->segs_cnt - 1].match &&
3083                     r->packet_hdr == prof->segs[prof->segs_cnt - 1].hdrs) {
3084                         ice_set_bit(vsi_handle, r->vsis);
3085                         return ICE_SUCCESS;
3086                 }
3087
3088         rss_cfg = (struct ice_rss_cfg *)ice_malloc(hw, sizeof(*rss_cfg));
3089         if (!rss_cfg)
3090                 return ICE_ERR_NO_MEMORY;
3091
3092         rss_cfg->hashed_flds = prof->segs[prof->segs_cnt - 1].match;
3093         rss_cfg->packet_hdr = prof->segs[prof->segs_cnt - 1].hdrs;
3094         rss_cfg->symm = prof->cfg.symm;
3095         ice_set_bit(vsi_handle, rss_cfg->vsis);
3096
3097         LIST_ADD_TAIL(&rss_cfg->l_entry, &hw->rss_list_head);
3098
3099         return ICE_SUCCESS;
3100 }
3101
3102 #define ICE_FLOW_PROF_HASH_S    0
3103 #define ICE_FLOW_PROF_HASH_M    (0xFFFFFFFFULL << ICE_FLOW_PROF_HASH_S)
3104 #define ICE_FLOW_PROF_HDR_S     32
3105 #define ICE_FLOW_PROF_HDR_M     (0x3FFFFFFFULL << ICE_FLOW_PROF_HDR_S)
3106 #define ICE_FLOW_PROF_ENCAP_S   63
3107 #define ICE_FLOW_PROF_ENCAP_M   (BIT_ULL(ICE_FLOW_PROF_ENCAP_S))
3108
3109 #define ICE_RSS_OUTER_HEADERS   1
3110 #define ICE_RSS_INNER_HEADERS   2
3111
3112 /* Flow profile ID format:
3113  * [0:31] - Packet match fields
3114  * [32:62] - Protocol header
3115  * [63] - Encapsulation flag, 0 if non-tunneled, 1 if tunneled
3116  */
3117 #define ICE_FLOW_GEN_PROFID(hash, hdr, segs_cnt) \
3118         (u64)(((u64)(hash) & ICE_FLOW_PROF_HASH_M) | \
3119               (((u64)(hdr) << ICE_FLOW_PROF_HDR_S) & ICE_FLOW_PROF_HDR_M) | \
3120               ((u8)((segs_cnt) - 1) ? ICE_FLOW_PROF_ENCAP_M : 0))
3121
3122 static void
3123 ice_rss_config_xor_word(struct ice_hw *hw, u8 prof_id, u8 src, u8 dst)
3124 {
3125         u32 s = ((src % 4) << 3); /* byte shift */
3126         u32 v = dst | 0x80; /* value to program */
3127         u8 i = src / 4; /* register index */
3128         u32 reg;
3129
3130         reg = rd32(hw, GLQF_HSYMM(prof_id, i));
3131         reg = (reg & ~(0xff << s)) | (v << s);
3132         wr32(hw, GLQF_HSYMM(prof_id, i), reg);
3133 }
3134
3135 static void
3136 ice_rss_config_xor(struct ice_hw *hw, u8 prof_id, u8 src, u8 dst, u8 len)
3137 {
3138         int fv_last_word =
3139                 ICE_FLOW_SW_FIELD_VECTOR_MAX / ICE_FLOW_FV_EXTRACT_SZ - 1;
3140         int i;
3141
3142         for (i = 0; i < len; i++) {
3143                 ice_rss_config_xor_word(hw, prof_id,
3144                                         /* Yes, field vector in GLQF_HSYMM and
3145                                          * GLQF_HINSET is inversed!
3146                                          */
3147                                         fv_last_word - (src + i),
3148                                         fv_last_word - (dst + i));
3149                 ice_rss_config_xor_word(hw, prof_id,
3150                                         fv_last_word - (dst + i),
3151                                         fv_last_word - (src + i));
3152         }
3153 }
3154
3155 static void
3156 ice_rss_update_symm(struct ice_hw *hw,
3157                     struct ice_flow_prof *prof)
3158 {
3159         struct ice_prof_map *map;
3160         u8 prof_id, m;
3161
3162         map = ice_search_prof_id(hw, ICE_BLK_RSS, prof->id);
3163         prof_id = map->prof_id;
3164
3165         /* clear to default */
3166         for (m = 0; m < 6; m++)
3167                 wr32(hw, GLQF_HSYMM(prof_id, m), 0);
3168         if (prof->cfg.symm) {
3169                 struct ice_flow_seg_info *seg =
3170                         &prof->segs[prof->segs_cnt - 1];
3171
3172                 struct ice_flow_seg_xtrct *ipv4_src =
3173                         &seg->fields[ICE_FLOW_FIELD_IDX_IPV4_SA].xtrct;
3174                 struct ice_flow_seg_xtrct *ipv4_dst =
3175                         &seg->fields[ICE_FLOW_FIELD_IDX_IPV4_DA].xtrct;
3176                 struct ice_flow_seg_xtrct *ipv6_src =
3177                         &seg->fields[ICE_FLOW_FIELD_IDX_IPV6_SA].xtrct;
3178                 struct ice_flow_seg_xtrct *ipv6_dst =
3179                         &seg->fields[ICE_FLOW_FIELD_IDX_IPV6_DA].xtrct;
3180
3181                 struct ice_flow_seg_xtrct *tcp_src =
3182                         &seg->fields[ICE_FLOW_FIELD_IDX_TCP_SRC_PORT].xtrct;
3183                 struct ice_flow_seg_xtrct *tcp_dst =
3184                         &seg->fields[ICE_FLOW_FIELD_IDX_TCP_DST_PORT].xtrct;
3185
3186                 struct ice_flow_seg_xtrct *udp_src =
3187                         &seg->fields[ICE_FLOW_FIELD_IDX_UDP_SRC_PORT].xtrct;
3188                 struct ice_flow_seg_xtrct *udp_dst =
3189                         &seg->fields[ICE_FLOW_FIELD_IDX_UDP_DST_PORT].xtrct;
3190
3191                 struct ice_flow_seg_xtrct *sctp_src =
3192                         &seg->fields[ICE_FLOW_FIELD_IDX_SCTP_SRC_PORT].xtrct;
3193                 struct ice_flow_seg_xtrct *sctp_dst =
3194                         &seg->fields[ICE_FLOW_FIELD_IDX_SCTP_DST_PORT].xtrct;
3195
3196                 /* xor IPv4 */
3197                 if (ipv4_src->prot_id != 0 && ipv4_dst->prot_id != 0)
3198                         ice_rss_config_xor(hw, prof_id,
3199                                            ipv4_src->idx, ipv4_dst->idx, 2);
3200
3201                 /* xor IPv6 */
3202                 if (ipv6_src->prot_id != 0 && ipv6_dst->prot_id != 0)
3203                         ice_rss_config_xor(hw, prof_id,
3204                                            ipv6_src->idx, ipv6_dst->idx, 8);
3205
3206                 /* xor TCP */
3207                 if (tcp_src->prot_id != 0 && tcp_dst->prot_id != 0)
3208                         ice_rss_config_xor(hw, prof_id,
3209                                            tcp_src->idx, tcp_dst->idx, 1);
3210
3211                 /* xor UDP */
3212                 if (udp_src->prot_id != 0 && udp_dst->prot_id != 0)
3213                         ice_rss_config_xor(hw, prof_id,
3214                                            udp_src->idx, udp_dst->idx, 1);
3215
3216                 /* xor SCTP */
3217                 if (sctp_src->prot_id != 0 && sctp_dst->prot_id != 0)
3218                         ice_rss_config_xor(hw, prof_id,
3219                                            sctp_src->idx, sctp_dst->idx, 1);
3220         }
3221 }
3222
3223 /**
3224  * ice_add_rss_cfg_sync - add an RSS configuration
3225  * @hw: pointer to the hardware structure
3226  * @vsi_handle: software VSI handle
3227  * @hashed_flds: hash bit fields (ICE_FLOW_HASH_*) to configure
3228  * @addl_hdrs: protocol header fields
3229  * @segs_cnt: packet segment count
3230  * @symm: symmetric hash enable/disable
3231  *
3232  * Assumption: lock has already been acquired for RSS list
3233  */
3234 static enum ice_status
3235 ice_add_rss_cfg_sync(struct ice_hw *hw, u16 vsi_handle, u64 hashed_flds,
3236                      u32 addl_hdrs, u8 segs_cnt, bool symm)
3237 {
3238         const enum ice_block blk = ICE_BLK_RSS;
3239         struct ice_flow_prof *prof = NULL;
3240         struct ice_flow_seg_info *segs;
3241         enum ice_status status;
3242
3243         if (!segs_cnt || segs_cnt > ICE_FLOW_SEG_MAX)
3244                 return ICE_ERR_PARAM;
3245
3246         segs = (struct ice_flow_seg_info *)ice_calloc(hw, segs_cnt,
3247                                                       sizeof(*segs));
3248         if (!segs)
3249                 return ICE_ERR_NO_MEMORY;
3250
3251         /* Construct the packet segment info from the hashed fields */
3252         status = ice_flow_set_rss_seg_info(&segs[segs_cnt - 1], hashed_flds,
3253                                            addl_hdrs);
3254         if (status)
3255                 goto exit;
3256
3257         /* Search for a flow profile that has matching headers, hash fields
3258          * and has the input VSI associated to it. If found, no further
3259          * operations required and exit.
3260          */
3261         prof = ice_flow_find_prof_conds(hw, blk, ICE_FLOW_RX, segs, segs_cnt,
3262                                         vsi_handle,
3263                                         ICE_FLOW_FIND_PROF_CHK_FLDS |
3264                                         ICE_FLOW_FIND_PROF_CHK_VSI);
3265         if (prof) {
3266                 if (prof->cfg.symm == symm)
3267                         goto exit;
3268                 prof->cfg.symm = symm;
3269                 goto update_symm;
3270         }
3271
3272         /* Check if a flow profile exists with the same protocol headers and
3273          * associated with the input VSI. If so disasscociate the VSI from
3274          * this profile. The VSI will be added to a new profile created with
3275          * the protocol header and new hash field configuration.
3276          */
3277         prof = ice_flow_find_prof_conds(hw, blk, ICE_FLOW_RX, segs, segs_cnt,
3278                                         vsi_handle, ICE_FLOW_FIND_PROF_CHK_VSI);
3279         if (prof) {
3280                 status = ice_flow_disassoc_prof(hw, blk, prof, vsi_handle);
3281                 if (!status)
3282                         ice_rem_rss_list(hw, vsi_handle, prof);
3283                 else
3284                         goto exit;
3285
3286                 /* Remove profile if it has no VSIs associated */
3287                 if (!ice_is_any_bit_set(prof->vsis, ICE_MAX_VSI)) {
3288                         status = ice_flow_rem_prof(hw, blk, prof->id);
3289                         if (status)
3290                                 goto exit;
3291                 }
3292         }
3293
3294         /* Search for a profile that has same match fields only. If this
3295          * exists then associate the VSI to this profile.
3296          */
3297         prof = ice_flow_find_prof_conds(hw, blk, ICE_FLOW_RX, segs, segs_cnt,
3298                                         vsi_handle,
3299                                         ICE_FLOW_FIND_PROF_CHK_FLDS);
3300         if (prof) {
3301                 if (prof->cfg.symm == symm) {
3302                         status = ice_flow_assoc_prof(hw, blk, prof,
3303                                                      vsi_handle);
3304                         if (!status)
3305                                 status = ice_add_rss_list(hw, vsi_handle,
3306                                                           prof);
3307                 } else {
3308                         /* if a profile exist but with different symmetric
3309                          * requirement, just return error.
3310                          */
3311                         status = ICE_ERR_NOT_SUPPORTED;
3312                 }
3313                 goto exit;
3314         }
3315
3316         /* Create a new flow profile with generated profile and packet
3317          * segment information.
3318          */
3319         status = ice_flow_add_prof(hw, blk, ICE_FLOW_RX,
3320                                    ICE_FLOW_GEN_PROFID(hashed_flds,
3321                                                        segs[segs_cnt - 1].hdrs,
3322                                                        segs_cnt),
3323                                    segs, segs_cnt, NULL, 0, &prof);
3324         if (status)
3325                 goto exit;
3326
3327         status = ice_flow_assoc_prof(hw, blk, prof, vsi_handle);
3328         /* If association to a new flow profile failed then this profile can
3329          * be removed.
3330          */
3331         if (status) {
3332                 ice_flow_rem_prof(hw, blk, prof->id);
3333                 goto exit;
3334         }
3335
3336         status = ice_add_rss_list(hw, vsi_handle, prof);
3337
3338         prof->cfg.symm = symm;
3339
3340 update_symm:
3341         ice_rss_update_symm(hw, prof);
3342
3343 exit:
3344         ice_free(hw, segs);
3345         return status;
3346 }
3347
3348 /**
3349  * ice_add_rss_cfg - add an RSS configuration with specified hashed fields
3350  * @hw: pointer to the hardware structure
3351  * @vsi_handle: software VSI handle
3352  * @hashed_flds: hash bit fields (ICE_FLOW_HASH_*) to configure
3353  * @addl_hdrs: protocol header fields
3354  * @symm: symmetric hash enable/disable
3355  *
3356  * This function will generate a flow profile based on fields associated with
3357  * the input fields to hash on, the flow type and use the VSI number to add
3358  * a flow entry to the profile.
3359  */
3360 enum ice_status
3361 ice_add_rss_cfg(struct ice_hw *hw, u16 vsi_handle, u64 hashed_flds,
3362                 u32 addl_hdrs, bool symm)
3363 {
3364         enum ice_status status;
3365
3366         if (hashed_flds == ICE_HASH_INVALID ||
3367             !ice_is_vsi_valid(hw, vsi_handle))
3368                 return ICE_ERR_PARAM;
3369
3370         ice_acquire_lock(&hw->rss_locks);
3371         status = ice_add_rss_cfg_sync(hw, vsi_handle, hashed_flds, addl_hdrs,
3372                                       ICE_RSS_OUTER_HEADERS, symm);
3373         if (!status)
3374                 status = ice_add_rss_cfg_sync(hw, vsi_handle, hashed_flds,
3375                                               addl_hdrs, ICE_RSS_INNER_HEADERS,
3376                                               symm);
3377         ice_release_lock(&hw->rss_locks);
3378
3379         return status;
3380 }
3381
3382 /**
3383  * ice_rem_rss_cfg_sync - remove an existing RSS configuration
3384  * @hw: pointer to the hardware structure
3385  * @vsi_handle: software VSI handle
3386  * @hashed_flds: Packet hash types (ICE_FLOW_HASH_*) to remove
3387  * @addl_hdrs: Protocol header fields within a packet segment
3388  * @segs_cnt: packet segment count
3389  *
3390  * Assumption: lock has already been acquired for RSS list
3391  */
3392 static enum ice_status
3393 ice_rem_rss_cfg_sync(struct ice_hw *hw, u16 vsi_handle, u64 hashed_flds,
3394                      u32 addl_hdrs, u8 segs_cnt)
3395 {
3396         const enum ice_block blk = ICE_BLK_RSS;
3397         struct ice_flow_seg_info *segs;
3398         struct ice_flow_prof *prof;
3399         enum ice_status status;
3400
3401         segs = (struct ice_flow_seg_info *)ice_calloc(hw, segs_cnt,
3402                                                       sizeof(*segs));
3403         if (!segs)
3404                 return ICE_ERR_NO_MEMORY;
3405
3406         /* Construct the packet segment info from the hashed fields */
3407         status = ice_flow_set_rss_seg_info(&segs[segs_cnt - 1], hashed_flds,
3408                                            addl_hdrs);
3409         if (status)
3410                 goto out;
3411
3412         prof = ice_flow_find_prof_conds(hw, blk, ICE_FLOW_RX, segs, segs_cnt,
3413                                         vsi_handle,
3414                                         ICE_FLOW_FIND_PROF_CHK_FLDS);
3415         if (!prof) {
3416                 status = ICE_ERR_DOES_NOT_EXIST;
3417                 goto out;
3418         }
3419
3420         status = ice_flow_disassoc_prof(hw, blk, prof, vsi_handle);
3421         if (status)
3422                 goto out;
3423
3424         /* Remove RSS configuration from VSI context before deleting
3425          * the flow profile.
3426          */
3427         ice_rem_rss_list(hw, vsi_handle, prof);
3428
3429         if (!ice_is_any_bit_set(prof->vsis, ICE_MAX_VSI))
3430                 status = ice_flow_rem_prof(hw, blk, prof->id);
3431
3432 out:
3433         ice_free(hw, segs);
3434         return status;
3435 }
3436
3437 /**
3438  * ice_rem_rss_cfg - remove an existing RSS config with matching hashed fields
3439  * @hw: pointer to the hardware structure
3440  * @vsi_handle: software VSI handle
3441  * @hashed_flds: Packet hash types (ICE_FLOW_HASH_*) to remove
3442  * @addl_hdrs: Protocol header fields within a packet segment
3443  *
3444  * This function will lookup the flow profile based on the input
3445  * hash field bitmap, iterate through the profile entry list of
3446  * that profile and find entry associated with input VSI to be
3447  * removed. Calls are made to underlying flow apis which will in
3448  * turn build or update buffers for RSS XLT1 section.
3449  */
3450 enum ice_status
3451 ice_rem_rss_cfg(struct ice_hw *hw, u16 vsi_handle, u64 hashed_flds,
3452                 u32 addl_hdrs)
3453 {
3454         enum ice_status status;
3455
3456         if (hashed_flds == ICE_HASH_INVALID ||
3457             !ice_is_vsi_valid(hw, vsi_handle))
3458                 return ICE_ERR_PARAM;
3459
3460         ice_acquire_lock(&hw->rss_locks);
3461         status = ice_rem_rss_cfg_sync(hw, vsi_handle, hashed_flds, addl_hdrs,
3462                                       ICE_RSS_OUTER_HEADERS);
3463         if (!status)
3464                 status = ice_rem_rss_cfg_sync(hw, vsi_handle, hashed_flds,
3465                                               addl_hdrs, ICE_RSS_INNER_HEADERS);
3466         ice_release_lock(&hw->rss_locks);
3467
3468         return status;
3469 }
3470
3471 /**
3472  * ice_replay_rss_cfg - replay RSS configurations associated with VSI
3473  * @hw: pointer to the hardware structure
3474  * @vsi_handle: software VSI handle
3475  */
3476 enum ice_status ice_replay_rss_cfg(struct ice_hw *hw, u16 vsi_handle)
3477 {
3478         enum ice_status status = ICE_SUCCESS;
3479         struct ice_rss_cfg *r;
3480
3481         if (!ice_is_vsi_valid(hw, vsi_handle))
3482                 return ICE_ERR_PARAM;
3483
3484         ice_acquire_lock(&hw->rss_locks);
3485         LIST_FOR_EACH_ENTRY(r, &hw->rss_list_head,
3486                             ice_rss_cfg, l_entry) {
3487                 if (ice_is_bit_set(r->vsis, vsi_handle)) {
3488                         status = ice_add_rss_cfg_sync(hw, vsi_handle,
3489                                                       r->hashed_flds,
3490                                                       r->packet_hdr,
3491                                                       ICE_RSS_OUTER_HEADERS,
3492                                                       r->symm);
3493                         if (status)
3494                                 break;
3495                         status = ice_add_rss_cfg_sync(hw, vsi_handle,
3496                                                       r->hashed_flds,
3497                                                       r->packet_hdr,
3498                                                       ICE_RSS_INNER_HEADERS,
3499                                                       r->symm);
3500                         if (status)
3501                                 break;
3502                 }
3503         }
3504         ice_release_lock(&hw->rss_locks);
3505
3506         return status;
3507 }
3508
3509 /**
3510  * ice_get_rss_cfg - returns hashed fields for the given header types
3511  * @hw: pointer to the hardware structure
3512  * @vsi_handle: software VSI handle
3513  * @hdrs: protocol header type
3514  *
3515  * This function will return the match fields of the first instance of flow
3516  * profile having the given header types and containing input VSI
3517  */
3518 u64 ice_get_rss_cfg(struct ice_hw *hw, u16 vsi_handle, u32 hdrs)
3519 {
3520         struct ice_rss_cfg *r, *rss_cfg = NULL;
3521
3522         /* verify if the protocol header is non zero and VSI is valid */
3523         if (hdrs == ICE_FLOW_SEG_HDR_NONE || !ice_is_vsi_valid(hw, vsi_handle))
3524                 return ICE_HASH_INVALID;
3525
3526         ice_acquire_lock(&hw->rss_locks);
3527         LIST_FOR_EACH_ENTRY(r, &hw->rss_list_head,
3528                             ice_rss_cfg, l_entry)
3529                 if (ice_is_bit_set(r->vsis, vsi_handle) &&
3530                     r->packet_hdr == hdrs) {
3531                         rss_cfg = r;
3532                         break;
3533                 }
3534         ice_release_lock(&hw->rss_locks);
3535
3536         return rss_cfg ? rss_cfg->hashed_flds : ICE_HASH_INVALID;
3537 }