net/ice/base: introduce and use for each bit iterator
[dpdk.git] / drivers / net / ice / base / ice_flow.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2001-2020 Intel Corporation
3  */
4
5 #include "ice_common.h"
6 #include "ice_flow.h"
7
8 /* Size of known protocol header fields */
9 #define ICE_FLOW_FLD_SZ_ETH_TYPE        2
10 #define ICE_FLOW_FLD_SZ_VLAN            2
11 #define ICE_FLOW_FLD_SZ_IPV4_ADDR       4
12 #define ICE_FLOW_FLD_SZ_IPV6_ADDR       16
13 #define ICE_FLOW_FLD_SZ_IPV6_PRE32_ADDR 4
14 #define ICE_FLOW_FLD_SZ_IPV6_PRE48_ADDR 6
15 #define ICE_FLOW_FLD_SZ_IPV6_PRE64_ADDR 8
16 #define ICE_FLOW_FLD_SZ_IP_DSCP         1
17 #define ICE_FLOW_FLD_SZ_IP_TTL          1
18 #define ICE_FLOW_FLD_SZ_IP_PROT         1
19 #define ICE_FLOW_FLD_SZ_PORT            2
20 #define ICE_FLOW_FLD_SZ_TCP_FLAGS       1
21 #define ICE_FLOW_FLD_SZ_ICMP_TYPE       1
22 #define ICE_FLOW_FLD_SZ_ICMP_CODE       1
23 #define ICE_FLOW_FLD_SZ_ARP_OPER        2
24 #define ICE_FLOW_FLD_SZ_GRE_KEYID       4
25 #define ICE_FLOW_FLD_SZ_GTP_TEID        4
26 #define ICE_FLOW_FLD_SZ_GTP_QFI         2
27 #define ICE_FLOW_FLD_SZ_PPPOE_SESS_ID   2
28 #define ICE_FLOW_FLD_SZ_PFCP_SEID 8
29 #define ICE_FLOW_FLD_SZ_L2TPV3_SESS_ID  4
30 #define ICE_FLOW_FLD_SZ_ESP_SPI 4
31 #define ICE_FLOW_FLD_SZ_AH_SPI  4
32 #define ICE_FLOW_FLD_SZ_NAT_T_ESP_SPI   4
33
34 /* Describe properties of a protocol header field */
35 struct ice_flow_field_info {
36         enum ice_flow_seg_hdr hdr;
37         s16 off;        /* Offset from start of a protocol header, in bits */
38         u16 size;       /* Size of fields in bits */
39         u16 mask;       /* 16-bit mask for field */
40 };
41
42 #define ICE_FLOW_FLD_INFO(_hdr, _offset_bytes, _size_bytes) { \
43         .hdr = _hdr, \
44         .off = (_offset_bytes) * BITS_PER_BYTE, \
45         .size = (_size_bytes) * BITS_PER_BYTE, \
46         .mask = 0, \
47 }
48
49 #define ICE_FLOW_FLD_INFO_MSK(_hdr, _offset_bytes, _size_bytes, _mask) { \
50         .hdr = _hdr, \
51         .off = (_offset_bytes) * BITS_PER_BYTE, \
52         .size = (_size_bytes) * BITS_PER_BYTE, \
53         .mask = _mask, \
54 }
55
56 /* Table containing properties of supported protocol header fields */
57 static const
58 struct ice_flow_field_info ice_flds_info[ICE_FLOW_FIELD_IDX_MAX] = {
59         /* Ether */
60         /* ICE_FLOW_FIELD_IDX_ETH_DA */
61         ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_ETH, 0, ETH_ALEN),
62         /* ICE_FLOW_FIELD_IDX_ETH_SA */
63         ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_ETH, ETH_ALEN, ETH_ALEN),
64         /* ICE_FLOW_FIELD_IDX_S_VLAN */
65         ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_VLAN, 12, ICE_FLOW_FLD_SZ_VLAN),
66         /* ICE_FLOW_FIELD_IDX_C_VLAN */
67         ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_VLAN, 14, ICE_FLOW_FLD_SZ_VLAN),
68         /* ICE_FLOW_FIELD_IDX_ETH_TYPE */
69         ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_ETH, 0, ICE_FLOW_FLD_SZ_ETH_TYPE),
70         /* IPv4 / IPv6 */
71         /* ICE_FLOW_FIELD_IDX_IPV4_DSCP */
72         ICE_FLOW_FLD_INFO_MSK(ICE_FLOW_SEG_HDR_IPV4, 0, ICE_FLOW_FLD_SZ_IP_DSCP,
73                               0x00fc),
74         /* ICE_FLOW_FIELD_IDX_IPV6_DSCP */
75         ICE_FLOW_FLD_INFO_MSK(ICE_FLOW_SEG_HDR_IPV6, 0, ICE_FLOW_FLD_SZ_IP_DSCP,
76                               0x0ff0),
77         /* ICE_FLOW_FIELD_IDX_IPV4_TTL */
78         ICE_FLOW_FLD_INFO_MSK(ICE_FLOW_SEG_HDR_NONE, 8,
79                               ICE_FLOW_FLD_SZ_IP_TTL, 0xff00),
80         /* ICE_FLOW_FIELD_IDX_IPV4_PROT */
81         ICE_FLOW_FLD_INFO_MSK(ICE_FLOW_SEG_HDR_NONE, 8,
82                               ICE_FLOW_FLD_SZ_IP_PROT, 0x00ff),
83         /* ICE_FLOW_FIELD_IDX_IPV6_TTL */
84         ICE_FLOW_FLD_INFO_MSK(ICE_FLOW_SEG_HDR_NONE, 6,
85                               ICE_FLOW_FLD_SZ_IP_TTL, 0x00ff),
86         /* ICE_FLOW_FIELD_IDX_IPV6_PROT */
87         ICE_FLOW_FLD_INFO_MSK(ICE_FLOW_SEG_HDR_NONE, 6,
88                               ICE_FLOW_FLD_SZ_IP_PROT, 0xff00),
89         /* ICE_FLOW_FIELD_IDX_IPV4_SA */
90         ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_IPV4, 12, ICE_FLOW_FLD_SZ_IPV4_ADDR),
91         /* ICE_FLOW_FIELD_IDX_IPV4_DA */
92         ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_IPV4, 16, ICE_FLOW_FLD_SZ_IPV4_ADDR),
93         /* ICE_FLOW_FIELD_IDX_IPV6_SA */
94         ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_IPV6, 8, ICE_FLOW_FLD_SZ_IPV6_ADDR),
95         /* ICE_FLOW_FIELD_IDX_IPV6_DA */
96         ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_IPV6, 24, ICE_FLOW_FLD_SZ_IPV6_ADDR),
97         /* ICE_FLOW_FIELD_IDX_IPV6_PRE32_SA */
98         ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_IPV6, 8,
99                           ICE_FLOW_FLD_SZ_IPV6_PRE32_ADDR),
100         /* ICE_FLOW_FIELD_IDX_IPV6_PRE32_DA */
101         ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_IPV6, 24,
102                           ICE_FLOW_FLD_SZ_IPV6_PRE32_ADDR),
103         /* ICE_FLOW_FIELD_IDX_IPV6_PRE48_SA */
104         ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_IPV6, 8,
105                           ICE_FLOW_FLD_SZ_IPV6_PRE48_ADDR),
106         /* ICE_FLOW_FIELD_IDX_IPV6_PRE48_DA */
107         ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_IPV6, 24,
108                           ICE_FLOW_FLD_SZ_IPV6_PRE48_ADDR),
109         /* ICE_FLOW_FIELD_IDX_IPV6_PRE64_SA */
110         ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_IPV6, 8,
111                           ICE_FLOW_FLD_SZ_IPV6_PRE64_ADDR),
112         /* ICE_FLOW_FIELD_IDX_IPV6_PRE64_DA */
113         ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_IPV6, 24,
114                           ICE_FLOW_FLD_SZ_IPV6_PRE64_ADDR),
115         /* Transport */
116         /* ICE_FLOW_FIELD_IDX_TCP_SRC_PORT */
117         ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_TCP, 0, ICE_FLOW_FLD_SZ_PORT),
118         /* ICE_FLOW_FIELD_IDX_TCP_DST_PORT */
119         ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_TCP, 2, ICE_FLOW_FLD_SZ_PORT),
120         /* ICE_FLOW_FIELD_IDX_UDP_SRC_PORT */
121         ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_UDP, 0, ICE_FLOW_FLD_SZ_PORT),
122         /* ICE_FLOW_FIELD_IDX_UDP_DST_PORT */
123         ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_UDP, 2, ICE_FLOW_FLD_SZ_PORT),
124         /* ICE_FLOW_FIELD_IDX_SCTP_SRC_PORT */
125         ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_SCTP, 0, ICE_FLOW_FLD_SZ_PORT),
126         /* ICE_FLOW_FIELD_IDX_SCTP_DST_PORT */
127         ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_SCTP, 2, ICE_FLOW_FLD_SZ_PORT),
128         /* ICE_FLOW_FIELD_IDX_TCP_FLAGS */
129         ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_TCP, 13, ICE_FLOW_FLD_SZ_TCP_FLAGS),
130         /* ARP */
131         /* ICE_FLOW_FIELD_IDX_ARP_SIP */
132         ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_ARP, 14, ICE_FLOW_FLD_SZ_IPV4_ADDR),
133         /* ICE_FLOW_FIELD_IDX_ARP_DIP */
134         ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_ARP, 24, ICE_FLOW_FLD_SZ_IPV4_ADDR),
135         /* ICE_FLOW_FIELD_IDX_ARP_SHA */
136         ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_ARP, 8, ETH_ALEN),
137         /* ICE_FLOW_FIELD_IDX_ARP_DHA */
138         ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_ARP, 18, ETH_ALEN),
139         /* ICE_FLOW_FIELD_IDX_ARP_OP */
140         ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_ARP, 6, ICE_FLOW_FLD_SZ_ARP_OPER),
141         /* ICMP */
142         /* ICE_FLOW_FIELD_IDX_ICMP_TYPE */
143         ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_ICMP, 0, ICE_FLOW_FLD_SZ_ICMP_TYPE),
144         /* ICE_FLOW_FIELD_IDX_ICMP_CODE */
145         ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_ICMP, 1, ICE_FLOW_FLD_SZ_ICMP_CODE),
146         /* GRE */
147         /* ICE_FLOW_FIELD_IDX_GRE_KEYID */
148         ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_GRE, 12, ICE_FLOW_FLD_SZ_GRE_KEYID),
149         /* GTP */
150         /* ICE_FLOW_FIELD_IDX_GTPC_TEID */
151         ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_GTPC_TEID, 12,
152                           ICE_FLOW_FLD_SZ_GTP_TEID),
153         /* ICE_FLOW_FIELD_IDX_GTPU_IP_TEID */
154         ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_GTPU_IP, 12,
155                           ICE_FLOW_FLD_SZ_GTP_TEID),
156         /* ICE_FLOW_FIELD_IDX_GTPU_EH_TEID */
157         ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_GTPU_EH, 12,
158                           ICE_FLOW_FLD_SZ_GTP_TEID),
159         /* ICE_FLOW_FIELD_IDX_GTPU_EH_QFI */
160         ICE_FLOW_FLD_INFO_MSK(ICE_FLOW_SEG_HDR_GTPU_EH, 22,
161                               ICE_FLOW_FLD_SZ_GTP_QFI, 0x3f00),
162         /* ICE_FLOW_FIELD_IDX_GTPU_UP_TEID */
163         ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_GTPU_UP, 12,
164                           ICE_FLOW_FLD_SZ_GTP_TEID),
165         /* ICE_FLOW_FIELD_IDX_GTPU_DWN_TEID */
166         ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_GTPU_DWN, 12,
167                           ICE_FLOW_FLD_SZ_GTP_TEID),
168         /* PPPOE */
169         /* ICE_FLOW_FIELD_IDX_PPPOE_SESS_ID */
170         ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_PPPOE, 2,
171                           ICE_FLOW_FLD_SZ_PPPOE_SESS_ID),
172         /* PFCP */
173         /* ICE_FLOW_FIELD_IDX_PFCP_SEID */
174         ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_PFCP_SESSION, 12,
175                           ICE_FLOW_FLD_SZ_PFCP_SEID),
176         /* L2TPV3 */
177         /* ICE_FLOW_FIELD_IDX_L2TPV3_SESS_ID */
178         ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_L2TPV3, 0,
179                           ICE_FLOW_FLD_SZ_L2TPV3_SESS_ID),
180         /* ESP */
181         /* ICE_FLOW_FIELD_IDX_ESP_SPI */
182         ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_ESP, 0,
183                           ICE_FLOW_FLD_SZ_ESP_SPI),
184         /* AH */
185         /* ICE_FLOW_FIELD_IDX_AH_SPI */
186         ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_AH, 4,
187                           ICE_FLOW_FLD_SZ_AH_SPI),
188         /* NAT_T_ESP */
189         /* ICE_FLOW_FIELD_IDX_NAT_T_ESP_SPI */
190         ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_NAT_T_ESP, 8,
191                           ICE_FLOW_FLD_SZ_NAT_T_ESP_SPI),
192 };
193
194 /* Bitmaps indicating relevant packet types for a particular protocol header
195  *
196  * Packet types for packets with an Outer/First/Single MAC header
197  */
198 static const u32 ice_ptypes_mac_ofos[] = {
199         0xFDC00846, 0xBFBF7F7E, 0xF70001DF, 0xFEFDFDFB,
200         0x0000077E, 0x00000000, 0x00000000, 0x00000000,
201         0x00400000, 0x03FFF000, 0x7FFFFFE0, 0x00000000,
202         0x00000000, 0x00000000, 0x00000000, 0x00000000,
203         0x00000000, 0x00000000, 0x00000000, 0x00000000,
204         0x00000000, 0x00000000, 0x00000000, 0x00000000,
205         0x00000000, 0x00000000, 0x00000000, 0x00000000,
206         0x00000000, 0x00000000, 0x00000000, 0x00000000,
207 };
208
209 /* Packet types for packets with an Innermost/Last MAC VLAN header */
210 static const u32 ice_ptypes_macvlan_il[] = {
211         0x00000000, 0xBC000000, 0x000001DF, 0xF0000000,
212         0x0000077E, 0x00000000, 0x00000000, 0x00000000,
213         0x00000000, 0x00000000, 0x00000000, 0x00000000,
214         0x00000000, 0x00000000, 0x00000000, 0x00000000,
215         0x00000000, 0x00000000, 0x00000000, 0x00000000,
216         0x00000000, 0x00000000, 0x00000000, 0x00000000,
217         0x00000000, 0x00000000, 0x00000000, 0x00000000,
218         0x00000000, 0x00000000, 0x00000000, 0x00000000,
219 };
220
221 /* Packet types for packets with an Outer/First/Single IPv4 header, does NOT
222  * include IPV4 other PTYPEs
223  */
224 static const u32 ice_ptypes_ipv4_ofos[] = {
225         0x1DC00000, 0x04000800, 0x00000000, 0x00000000,
226         0x00000000, 0x00000155, 0x00000000, 0x00000000,
227         0x00000000, 0x000FC000, 0x00000000, 0x00000000,
228         0x00000000, 0x00000000, 0x00000000, 0x00000000,
229         0x00000000, 0x00000000, 0x00000000, 0x00000000,
230         0x00000000, 0x00000000, 0x00000000, 0x00000000,
231         0x00000000, 0x00000000, 0x00000000, 0x00000000,
232         0x00000000, 0x00000000, 0x00000000, 0x00000000,
233 };
234
235 /* Packet types for packets with an Outer/First/Single IPv4 header, includes
236  * IPV4 other PTYPEs
237  */
238 static const u32 ice_ptypes_ipv4_ofos_all[] = {
239         0x1DC00000, 0x04000800, 0x00000000, 0x00000000,
240         0x00000000, 0x00000155, 0x00000000, 0x00000000,
241         0x00000000, 0x000FC000, 0x83E0F800, 0x00000101,
242         0x00000000, 0x00000000, 0x00000000, 0x00000000,
243         0x00000000, 0x00000000, 0x00000000, 0x00000000,
244         0x00000000, 0x00000000, 0x00000000, 0x00000000,
245         0x00000000, 0x00000000, 0x00000000, 0x00000000,
246         0x00000000, 0x00000000, 0x00000000, 0x00000000,
247 };
248
249 /* Packet types for packets with an Innermost/Last IPv4 header */
250 static const u32 ice_ptypes_ipv4_il[] = {
251         0xE0000000, 0xB807700E, 0x80000003, 0xE01DC03B,
252         0x0000000E, 0x00000000, 0x00000000, 0x00000000,
253         0x00000000, 0x00000000, 0x001FF800, 0x00000000,
254         0x00000000, 0x00000000, 0x00000000, 0x00000000,
255         0x00000000, 0x00000000, 0x00000000, 0x00000000,
256         0x00000000, 0x00000000, 0x00000000, 0x00000000,
257         0x00000000, 0x00000000, 0x00000000, 0x00000000,
258         0x00000000, 0x00000000, 0x00000000, 0x00000000,
259 };
260
261 /* Packet types for packets with an Outer/First/Single IPv6 header, does NOT
262  * include IVP6 other PTYPEs
263  */
264 static const u32 ice_ptypes_ipv6_ofos[] = {
265         0x00000000, 0x00000000, 0x77000000, 0x10002000,
266         0x00000000, 0x000002AA, 0x00000000, 0x00000000,
267         0x00000000, 0x03F00000, 0x00000000, 0x00000000,
268         0x00000000, 0x00000000, 0x00000000, 0x00000000,
269         0x00000000, 0x00000000, 0x00000000, 0x00000000,
270         0x00000000, 0x00000000, 0x00000000, 0x00000000,
271         0x00000000, 0x00000000, 0x00000000, 0x00000000,
272         0x00000000, 0x00000000, 0x00000000, 0x00000000,
273 };
274
275 /* Packet types for packets with an Outer/First/Single IPv6 header, includes
276  * IPV6 other PTYPEs
277  */
278 static const u32 ice_ptypes_ipv6_ofos_all[] = {
279         0x00000000, 0x00000000, 0x77000000, 0x10002000,
280         0x00000000, 0x000002AA, 0x00000000, 0x00000000,
281         0x00000000, 0x03F00000, 0x7C1F0000, 0x00000206,
282         0x00000000, 0x00000000, 0x00000000, 0x00000000,
283         0x00000000, 0x00000000, 0x00000000, 0x00000000,
284         0x00000000, 0x00000000, 0x00000000, 0x00000000,
285         0x00000000, 0x00000000, 0x00000000, 0x00000000,
286         0x00000000, 0x00000000, 0x00000000, 0x00000000,
287 };
288
289 /* Packet types for packets with an Innermost/Last IPv6 header */
290 static const u32 ice_ptypes_ipv6_il[] = {
291         0x00000000, 0x03B80770, 0x000001DC, 0x0EE00000,
292         0x00000770, 0x00000000, 0x00000000, 0x00000000,
293         0x00000000, 0x00000000, 0x7FE00000, 0x00000000,
294         0x00000000, 0x00000000, 0x00000000, 0x00000000,
295         0x00000000, 0x00000000, 0x00000000, 0x00000000,
296         0x00000000, 0x00000000, 0x00000000, 0x00000000,
297         0x00000000, 0x00000000, 0x00000000, 0x00000000,
298         0x00000000, 0x00000000, 0x00000000, 0x00000000,
299 };
300
301 /* Packet types for packets with an Outer/First/Single IPv4 header - no L4 */
302 static const u32 ice_ipv4_ofos_no_l4[] = {
303         0x10C00000, 0x04000800, 0x00000000, 0x00000000,
304         0x00000000, 0x00000000, 0x00000000, 0x00000000,
305         0x00000000, 0x000cc000, 0x00000000, 0x00000000,
306         0x00000000, 0x00000000, 0x00000000, 0x00000000,
307         0x00000000, 0x00000000, 0x00000000, 0x00000000,
308         0x00000000, 0x00000000, 0x00000000, 0x00000000,
309         0x00000000, 0x00000000, 0x00000000, 0x00000000,
310         0x00000000, 0x00000000, 0x00000000, 0x00000000,
311 };
312
313 /* Packet types for packets with an Innermost/Last IPv4 header - no L4 */
314 static const u32 ice_ipv4_il_no_l4[] = {
315         0x60000000, 0x18043008, 0x80000002, 0x6010c021,
316         0x00000008, 0x00000000, 0x00000000, 0x00000000,
317         0x00000000, 0x00000000, 0x00139800, 0x00000000,
318         0x00000000, 0x00000000, 0x00000000, 0x00000000,
319         0x00000000, 0x00000000, 0x00000000, 0x00000000,
320         0x00000000, 0x00000000, 0x00000000, 0x00000000,
321         0x00000000, 0x00000000, 0x00000000, 0x00000000,
322         0x00000000, 0x00000000, 0x00000000, 0x00000000,
323 };
324
325 /* Packet types for packets with an Outer/First/Single IPv6 header - no L4 */
326 static const u32 ice_ipv6_ofos_no_l4[] = {
327         0x00000000, 0x00000000, 0x43000000, 0x10002000,
328         0x00000000, 0x00000000, 0x00000000, 0x00000000,
329         0x00000000, 0x02300000, 0x00000000, 0x00000000,
330         0x00000000, 0x00000000, 0x00000000, 0x00000000,
331         0x00000000, 0x00000000, 0x00000000, 0x00000000,
332         0x00000000, 0x00000000, 0x00000000, 0x00000000,
333         0x00000000, 0x00000000, 0x00000000, 0x00000000,
334         0x00000000, 0x00000000, 0x00000000, 0x00000000,
335 };
336
337 /* Packet types for packets with an Innermost/Last IPv6 header - no L4 */
338 static const u32 ice_ipv6_il_no_l4[] = {
339         0x00000000, 0x02180430, 0x0000010c, 0x086010c0,
340         0x00000430, 0x00000000, 0x00000000, 0x00000000,
341         0x00000000, 0x00000000, 0x4e600000, 0x00000000,
342         0x00000000, 0x00000000, 0x00000000, 0x00000000,
343         0x00000000, 0x00000000, 0x00000000, 0x00000000,
344         0x00000000, 0x00000000, 0x00000000, 0x00000000,
345         0x00000000, 0x00000000, 0x00000000, 0x00000000,
346         0x00000000, 0x00000000, 0x00000000, 0x00000000,
347 };
348
349 /* Packet types for packets with an Outermost/First ARP header */
350 static const u32 ice_ptypes_arp_of[] = {
351         0x00000800, 0x00000000, 0x00000000, 0x00000000,
352         0x00000000, 0x00000000, 0x00000000, 0x00000000,
353         0x00000000, 0x00000000, 0x00000000, 0x00000000,
354         0x00000000, 0x00000000, 0x00000000, 0x00000000,
355         0x00000000, 0x00000000, 0x00000000, 0x00000000,
356         0x00000000, 0x00000000, 0x00000000, 0x00000000,
357         0x00000000, 0x00000000, 0x00000000, 0x00000000,
358         0x00000000, 0x00000000, 0x00000000, 0x00000000,
359 };
360
361 /* UDP Packet types for non-tunneled packets or tunneled
362  * packets with inner UDP.
363  */
364 static const u32 ice_ptypes_udp_il[] = {
365         0x81000000, 0x20204040, 0x04000010, 0x80810102,
366         0x00000040, 0x00000000, 0x00000000, 0x00000000,
367         0x00000000, 0x00410000, 0x90842000, 0x00000007,
368         0x00000000, 0x00000000, 0x00000000, 0x00000000,
369         0x00000000, 0x00000000, 0x00000000, 0x00000000,
370         0x00000000, 0x00000000, 0x00000000, 0x00000000,
371         0x00000000, 0x00000000, 0x00000000, 0x00000000,
372         0x00000000, 0x00000000, 0x00000000, 0x00000000,
373 };
374
375 /* Packet types for packets with an Innermost/Last TCP header */
376 static const u32 ice_ptypes_tcp_il[] = {
377         0x04000000, 0x80810102, 0x10000040, 0x02040408,
378         0x00000102, 0x00000000, 0x00000000, 0x00000000,
379         0x00000000, 0x00820000, 0x21084000, 0x00000000,
380         0x00000000, 0x00000000, 0x00000000, 0x00000000,
381         0x00000000, 0x00000000, 0x00000000, 0x00000000,
382         0x00000000, 0x00000000, 0x00000000, 0x00000000,
383         0x00000000, 0x00000000, 0x00000000, 0x00000000,
384         0x00000000, 0x00000000, 0x00000000, 0x00000000,
385 };
386
387 /* Packet types for packets with an Innermost/Last SCTP header */
388 static const u32 ice_ptypes_sctp_il[] = {
389         0x08000000, 0x01020204, 0x20000081, 0x04080810,
390         0x00000204, 0x00000000, 0x00000000, 0x00000000,
391         0x00000000, 0x01040000, 0x00000000, 0x00000000,
392         0x00000000, 0x00000000, 0x00000000, 0x00000000,
393         0x00000000, 0x00000000, 0x00000000, 0x00000000,
394         0x00000000, 0x00000000, 0x00000000, 0x00000000,
395         0x00000000, 0x00000000, 0x00000000, 0x00000000,
396         0x00000000, 0x00000000, 0x00000000, 0x00000000,
397 };
398
399 /* Packet types for packets with an Outermost/First ICMP header */
400 static const u32 ice_ptypes_icmp_of[] = {
401         0x10000000, 0x00000000, 0x00000000, 0x00000000,
402         0x00000000, 0x00000000, 0x00000000, 0x00000000,
403         0x00000000, 0x00000000, 0x00000000, 0x00000000,
404         0x00000000, 0x00000000, 0x00000000, 0x00000000,
405         0x00000000, 0x00000000, 0x00000000, 0x00000000,
406         0x00000000, 0x00000000, 0x00000000, 0x00000000,
407         0x00000000, 0x00000000, 0x00000000, 0x00000000,
408         0x00000000, 0x00000000, 0x00000000, 0x00000000,
409 };
410
411 /* Packet types for packets with an Innermost/Last ICMP header */
412 static const u32 ice_ptypes_icmp_il[] = {
413         0x00000000, 0x02040408, 0x40000102, 0x08101020,
414         0x00000408, 0x00000000, 0x00000000, 0x00000000,
415         0x00000000, 0x00000000, 0x42108000, 0x00000000,
416         0x00000000, 0x00000000, 0x00000000, 0x00000000,
417         0x00000000, 0x00000000, 0x00000000, 0x00000000,
418         0x00000000, 0x00000000, 0x00000000, 0x00000000,
419         0x00000000, 0x00000000, 0x00000000, 0x00000000,
420         0x00000000, 0x00000000, 0x00000000, 0x00000000,
421 };
422
423 /* Packet types for packets with an Outermost/First GRE header */
424 static const u32 ice_ptypes_gre_of[] = {
425         0x00000000, 0xBFBF7800, 0x000001DF, 0xFEFDE000,
426         0x0000017E, 0x00000000, 0x00000000, 0x00000000,
427         0x00000000, 0x00000000, 0x00000000, 0x00000000,
428         0x00000000, 0x00000000, 0x00000000, 0x00000000,
429         0x00000000, 0x00000000, 0x00000000, 0x00000000,
430         0x00000000, 0x00000000, 0x00000000, 0x00000000,
431         0x00000000, 0x00000000, 0x00000000, 0x00000000,
432         0x00000000, 0x00000000, 0x00000000, 0x00000000,
433 };
434
435 /* Packet types for packets with an Innermost/Last MAC header */
436 static const u32 ice_ptypes_mac_il[] = {
437         0x00000000, 0x00000000, 0x00000000, 0x00000000,
438         0x00000000, 0x00000000, 0x00000000, 0x00000000,
439         0x00000000, 0x00000000, 0x00000000, 0x00000000,
440         0x00000000, 0x00000000, 0x00000000, 0x00000000,
441         0x00000000, 0x00000000, 0x00000000, 0x00000000,
442         0x00000000, 0x00000000, 0x00000000, 0x00000000,
443         0x00000000, 0x00000000, 0x00000000, 0x00000000,
444         0x00000000, 0x00000000, 0x00000000, 0x00000000,
445 };
446
447 /* Packet types for GTPC */
448 static const u32 ice_ptypes_gtpc[] = {
449         0x00000000, 0x00000000, 0x00000000, 0x00000000,
450         0x00000000, 0x00000000, 0x00000000, 0x00000000,
451         0x00000000, 0x00000000, 0x00000180, 0x00000000,
452         0x00000000, 0x00000000, 0x00000000, 0x00000000,
453         0x00000000, 0x00000000, 0x00000000, 0x00000000,
454         0x00000000, 0x00000000, 0x00000000, 0x00000000,
455         0x00000000, 0x00000000, 0x00000000, 0x00000000,
456         0x00000000, 0x00000000, 0x00000000, 0x00000000,
457 };
458
459 /* Packet types for GTPC with TEID */
460 static const u32 ice_ptypes_gtpc_tid[] = {
461         0x00000000, 0x00000000, 0x00000000, 0x00000000,
462         0x00000000, 0x00000000, 0x00000000, 0x00000000,
463         0x00000000, 0x00000000, 0x00000060, 0x00000000,
464         0x00000000, 0x00000000, 0x00000000, 0x00000000,
465         0x00000000, 0x00000000, 0x00000000, 0x00000000,
466         0x00000000, 0x00000000, 0x00000000, 0x00000000,
467         0x00000000, 0x00000000, 0x00000000, 0x00000000,
468         0x00000000, 0x00000000, 0x00000000, 0x00000000,
469 };
470
471 /* Packet types for GTPU */
472 static const struct ice_ptype_attributes ice_attr_gtpu_session[] = {
473         { ICE_MAC_IPV4_GTPU_IPV4_FRAG,    ICE_PTYPE_ATTR_GTP_SESSION },
474         { ICE_MAC_IPV4_GTPU_IPV4_PAY,     ICE_PTYPE_ATTR_GTP_SESSION },
475         { ICE_MAC_IPV4_GTPU_IPV4_UDP_PAY, ICE_PTYPE_ATTR_GTP_SESSION },
476         { ICE_MAC_IPV4_GTPU_IPV4_TCP,     ICE_PTYPE_ATTR_GTP_SESSION },
477         { ICE_MAC_IPV4_GTPU_IPV4_ICMP,    ICE_PTYPE_ATTR_GTP_SESSION },
478         { ICE_MAC_IPV6_GTPU_IPV4_FRAG,    ICE_PTYPE_ATTR_GTP_SESSION },
479         { ICE_MAC_IPV6_GTPU_IPV4_PAY,     ICE_PTYPE_ATTR_GTP_SESSION },
480         { ICE_MAC_IPV6_GTPU_IPV4_UDP_PAY, ICE_PTYPE_ATTR_GTP_SESSION },
481         { ICE_MAC_IPV6_GTPU_IPV4_TCP,     ICE_PTYPE_ATTR_GTP_SESSION },
482         { ICE_MAC_IPV6_GTPU_IPV4_ICMP,    ICE_PTYPE_ATTR_GTP_SESSION },
483         { ICE_MAC_IPV4_GTPU_IPV6_FRAG,    ICE_PTYPE_ATTR_GTP_SESSION },
484         { ICE_MAC_IPV4_GTPU_IPV6_PAY,     ICE_PTYPE_ATTR_GTP_SESSION },
485         { ICE_MAC_IPV4_GTPU_IPV6_UDP_PAY, ICE_PTYPE_ATTR_GTP_SESSION },
486         { ICE_MAC_IPV4_GTPU_IPV6_TCP,     ICE_PTYPE_ATTR_GTP_SESSION },
487         { ICE_MAC_IPV4_GTPU_IPV6_ICMPV6,  ICE_PTYPE_ATTR_GTP_SESSION },
488         { ICE_MAC_IPV6_GTPU_IPV6_FRAG,    ICE_PTYPE_ATTR_GTP_SESSION },
489         { ICE_MAC_IPV6_GTPU_IPV6_PAY,     ICE_PTYPE_ATTR_GTP_SESSION },
490         { ICE_MAC_IPV6_GTPU_IPV6_UDP_PAY, ICE_PTYPE_ATTR_GTP_SESSION },
491         { ICE_MAC_IPV6_GTPU_IPV6_TCP,     ICE_PTYPE_ATTR_GTP_SESSION },
492         { ICE_MAC_IPV6_GTPU_IPV6_ICMPV6,  ICE_PTYPE_ATTR_GTP_SESSION },
493 };
494
495 static const struct ice_ptype_attributes ice_attr_gtpu_eh[] = {
496         { ICE_MAC_IPV4_GTPU_IPV4_FRAG,    ICE_PTYPE_ATTR_GTP_PDU_EH },
497         { ICE_MAC_IPV4_GTPU_IPV4_PAY,     ICE_PTYPE_ATTR_GTP_PDU_EH },
498         { ICE_MAC_IPV4_GTPU_IPV4_UDP_PAY, ICE_PTYPE_ATTR_GTP_PDU_EH },
499         { ICE_MAC_IPV4_GTPU_IPV4_TCP,     ICE_PTYPE_ATTR_GTP_PDU_EH },
500         { ICE_MAC_IPV4_GTPU_IPV4_ICMP,    ICE_PTYPE_ATTR_GTP_PDU_EH },
501         { ICE_MAC_IPV6_GTPU_IPV4_FRAG,    ICE_PTYPE_ATTR_GTP_PDU_EH },
502         { ICE_MAC_IPV6_GTPU_IPV4_PAY,     ICE_PTYPE_ATTR_GTP_PDU_EH },
503         { ICE_MAC_IPV6_GTPU_IPV4_UDP_PAY, ICE_PTYPE_ATTR_GTP_PDU_EH },
504         { ICE_MAC_IPV6_GTPU_IPV4_TCP,     ICE_PTYPE_ATTR_GTP_PDU_EH },
505         { ICE_MAC_IPV6_GTPU_IPV4_ICMP,    ICE_PTYPE_ATTR_GTP_PDU_EH },
506         { ICE_MAC_IPV4_GTPU_IPV6_FRAG,    ICE_PTYPE_ATTR_GTP_PDU_EH },
507         { ICE_MAC_IPV4_GTPU_IPV6_PAY,     ICE_PTYPE_ATTR_GTP_PDU_EH },
508         { ICE_MAC_IPV4_GTPU_IPV6_UDP_PAY, ICE_PTYPE_ATTR_GTP_PDU_EH },
509         { ICE_MAC_IPV4_GTPU_IPV6_TCP,     ICE_PTYPE_ATTR_GTP_PDU_EH },
510         { ICE_MAC_IPV4_GTPU_IPV6_ICMPV6,  ICE_PTYPE_ATTR_GTP_PDU_EH },
511         { ICE_MAC_IPV6_GTPU_IPV6_FRAG,    ICE_PTYPE_ATTR_GTP_PDU_EH },
512         { ICE_MAC_IPV6_GTPU_IPV6_PAY,     ICE_PTYPE_ATTR_GTP_PDU_EH },
513         { ICE_MAC_IPV6_GTPU_IPV6_UDP_PAY, ICE_PTYPE_ATTR_GTP_PDU_EH },
514         { ICE_MAC_IPV6_GTPU_IPV6_TCP,     ICE_PTYPE_ATTR_GTP_PDU_EH },
515         { ICE_MAC_IPV6_GTPU_IPV6_ICMPV6,  ICE_PTYPE_ATTR_GTP_PDU_EH },
516 };
517
518 static const struct ice_ptype_attributes ice_attr_gtpu_down[] = {
519         { ICE_MAC_IPV4_GTPU_IPV4_FRAG,    ICE_PTYPE_ATTR_GTP_DOWNLINK },
520         { ICE_MAC_IPV4_GTPU_IPV4_PAY,     ICE_PTYPE_ATTR_GTP_DOWNLINK },
521         { ICE_MAC_IPV4_GTPU_IPV4_UDP_PAY, ICE_PTYPE_ATTR_GTP_DOWNLINK },
522         { ICE_MAC_IPV4_GTPU_IPV4_TCP,     ICE_PTYPE_ATTR_GTP_DOWNLINK },
523         { ICE_MAC_IPV4_GTPU_IPV4_ICMP,    ICE_PTYPE_ATTR_GTP_DOWNLINK },
524         { ICE_MAC_IPV6_GTPU_IPV4_FRAG,    ICE_PTYPE_ATTR_GTP_DOWNLINK },
525         { ICE_MAC_IPV6_GTPU_IPV4_PAY,     ICE_PTYPE_ATTR_GTP_DOWNLINK },
526         { ICE_MAC_IPV6_GTPU_IPV4_UDP_PAY, ICE_PTYPE_ATTR_GTP_DOWNLINK },
527         { ICE_MAC_IPV6_GTPU_IPV4_TCP,     ICE_PTYPE_ATTR_GTP_DOWNLINK },
528         { ICE_MAC_IPV6_GTPU_IPV4_ICMP,    ICE_PTYPE_ATTR_GTP_DOWNLINK },
529         { ICE_MAC_IPV4_GTPU_IPV6_FRAG,    ICE_PTYPE_ATTR_GTP_DOWNLINK },
530         { ICE_MAC_IPV4_GTPU_IPV6_PAY,     ICE_PTYPE_ATTR_GTP_DOWNLINK },
531         { ICE_MAC_IPV4_GTPU_IPV6_UDP_PAY, ICE_PTYPE_ATTR_GTP_DOWNLINK },
532         { ICE_MAC_IPV4_GTPU_IPV6_TCP,     ICE_PTYPE_ATTR_GTP_DOWNLINK },
533         { ICE_MAC_IPV4_GTPU_IPV6_ICMPV6,  ICE_PTYPE_ATTR_GTP_DOWNLINK },
534         { ICE_MAC_IPV6_GTPU_IPV6_FRAG,    ICE_PTYPE_ATTR_GTP_DOWNLINK },
535         { ICE_MAC_IPV6_GTPU_IPV6_PAY,     ICE_PTYPE_ATTR_GTP_DOWNLINK },
536         { ICE_MAC_IPV6_GTPU_IPV6_UDP_PAY, ICE_PTYPE_ATTR_GTP_DOWNLINK },
537         { ICE_MAC_IPV6_GTPU_IPV6_TCP,     ICE_PTYPE_ATTR_GTP_DOWNLINK },
538         { ICE_MAC_IPV6_GTPU_IPV6_ICMPV6,  ICE_PTYPE_ATTR_GTP_DOWNLINK },
539 };
540
541 static const struct ice_ptype_attributes ice_attr_gtpu_up[] = {
542         { ICE_MAC_IPV4_GTPU_IPV4_FRAG,    ICE_PTYPE_ATTR_GTP_UPLINK },
543         { ICE_MAC_IPV4_GTPU_IPV4_PAY,     ICE_PTYPE_ATTR_GTP_UPLINK },
544         { ICE_MAC_IPV4_GTPU_IPV4_UDP_PAY, ICE_PTYPE_ATTR_GTP_UPLINK },
545         { ICE_MAC_IPV4_GTPU_IPV4_TCP,     ICE_PTYPE_ATTR_GTP_UPLINK },
546         { ICE_MAC_IPV4_GTPU_IPV4_ICMP,    ICE_PTYPE_ATTR_GTP_UPLINK },
547         { ICE_MAC_IPV6_GTPU_IPV4_FRAG,    ICE_PTYPE_ATTR_GTP_UPLINK },
548         { ICE_MAC_IPV6_GTPU_IPV4_PAY,     ICE_PTYPE_ATTR_GTP_UPLINK },
549         { ICE_MAC_IPV6_GTPU_IPV4_UDP_PAY, ICE_PTYPE_ATTR_GTP_UPLINK },
550         { ICE_MAC_IPV6_GTPU_IPV4_TCP,     ICE_PTYPE_ATTR_GTP_UPLINK },
551         { ICE_MAC_IPV6_GTPU_IPV4_ICMP,    ICE_PTYPE_ATTR_GTP_UPLINK },
552         { ICE_MAC_IPV4_GTPU_IPV6_FRAG,    ICE_PTYPE_ATTR_GTP_UPLINK },
553         { ICE_MAC_IPV4_GTPU_IPV6_PAY,     ICE_PTYPE_ATTR_GTP_UPLINK },
554         { ICE_MAC_IPV4_GTPU_IPV6_UDP_PAY, ICE_PTYPE_ATTR_GTP_UPLINK },
555         { ICE_MAC_IPV4_GTPU_IPV6_TCP,     ICE_PTYPE_ATTR_GTP_UPLINK },
556         { ICE_MAC_IPV4_GTPU_IPV6_ICMPV6,  ICE_PTYPE_ATTR_GTP_UPLINK },
557         { ICE_MAC_IPV6_GTPU_IPV6_FRAG,    ICE_PTYPE_ATTR_GTP_UPLINK },
558         { ICE_MAC_IPV6_GTPU_IPV6_PAY,     ICE_PTYPE_ATTR_GTP_UPLINK },
559         { ICE_MAC_IPV6_GTPU_IPV6_UDP_PAY, ICE_PTYPE_ATTR_GTP_UPLINK },
560         { ICE_MAC_IPV6_GTPU_IPV6_TCP,     ICE_PTYPE_ATTR_GTP_UPLINK },
561         { ICE_MAC_IPV6_GTPU_IPV6_ICMPV6,  ICE_PTYPE_ATTR_GTP_UPLINK },
562 };
563
564 static const u32 ice_ptypes_gtpu[] = {
565         0x00000000, 0x00000000, 0x00000000, 0x00000000,
566         0x00000000, 0x00000000, 0x00000000, 0x00000000,
567         0x00000000, 0x00000000, 0x7FFFFE00, 0x00000000,
568         0x00000000, 0x00000000, 0x00000000, 0x00000000,
569         0x00000000, 0x00000000, 0x00000000, 0x00000000,
570         0x00000000, 0x00000000, 0x00000000, 0x00000000,
571         0x00000000, 0x00000000, 0x00000000, 0x00000000,
572         0x00000000, 0x00000000, 0x00000000, 0x00000000,
573 };
574
575 /* Packet types for pppoe */
576 static const u32 ice_ptypes_pppoe[] = {
577         0x00000000, 0x00000000, 0x00000000, 0x00000000,
578         0x00000000, 0x00000000, 0x00000000, 0x00000000,
579         0x00000000, 0x03ffe000, 0x00000000, 0x00000000,
580         0x00000000, 0x00000000, 0x00000000, 0x00000000,
581         0x00000000, 0x00000000, 0x00000000, 0x00000000,
582         0x00000000, 0x00000000, 0x00000000, 0x00000000,
583         0x00000000, 0x00000000, 0x00000000, 0x00000000,
584         0x00000000, 0x00000000, 0x00000000, 0x00000000,
585 };
586
587 /* Packet types for packets with PFCP NODE header */
588 static const u32 ice_ptypes_pfcp_node[] = {
589         0x00000000, 0x00000000, 0x00000000, 0x00000000,
590         0x00000000, 0x00000000, 0x00000000, 0x00000000,
591         0x00000000, 0x00000000, 0x80000000, 0x00000002,
592         0x00000000, 0x00000000, 0x00000000, 0x00000000,
593         0x00000000, 0x00000000, 0x00000000, 0x00000000,
594         0x00000000, 0x00000000, 0x00000000, 0x00000000,
595         0x00000000, 0x00000000, 0x00000000, 0x00000000,
596         0x00000000, 0x00000000, 0x00000000, 0x00000000,
597 };
598
599 /* Packet types for packets with PFCP SESSION header */
600 static const u32 ice_ptypes_pfcp_session[] = {
601         0x00000000, 0x00000000, 0x00000000, 0x00000000,
602         0x00000000, 0x00000000, 0x00000000, 0x00000000,
603         0x00000000, 0x00000000, 0x00000000, 0x00000005,
604         0x00000000, 0x00000000, 0x00000000, 0x00000000,
605         0x00000000, 0x00000000, 0x00000000, 0x00000000,
606         0x00000000, 0x00000000, 0x00000000, 0x00000000,
607         0x00000000, 0x00000000, 0x00000000, 0x00000000,
608         0x00000000, 0x00000000, 0x00000000, 0x00000000,
609 };
610
611 /* Packet types for l2tpv3 */
612 static const u32 ice_ptypes_l2tpv3[] = {
613         0x00000000, 0x00000000, 0x00000000, 0x00000000,
614         0x00000000, 0x00000000, 0x00000000, 0x00000000,
615         0x00000000, 0x00000000, 0x00000000, 0x00000300,
616         0x00000000, 0x00000000, 0x00000000, 0x00000000,
617         0x00000000, 0x00000000, 0x00000000, 0x00000000,
618         0x00000000, 0x00000000, 0x00000000, 0x00000000,
619         0x00000000, 0x00000000, 0x00000000, 0x00000000,
620         0x00000000, 0x00000000, 0x00000000, 0x00000000,
621 };
622
623 /* Packet types for esp */
624 static const u32 ice_ptypes_esp[] = {
625         0x00000000, 0x00000000, 0x00000000, 0x00000000,
626         0x00000000, 0x00000003, 0x00000000, 0x00000000,
627         0x00000000, 0x00000000, 0x00000000, 0x00000000,
628         0x00000000, 0x00000000, 0x00000000, 0x00000000,
629         0x00000000, 0x00000000, 0x00000000, 0x00000000,
630         0x00000000, 0x00000000, 0x00000000, 0x00000000,
631         0x00000000, 0x00000000, 0x00000000, 0x00000000,
632         0x00000000, 0x00000000, 0x00000000, 0x00000000,
633 };
634
635 /* Packet types for ah */
636 static const u32 ice_ptypes_ah[] = {
637         0x00000000, 0x00000000, 0x00000000, 0x00000000,
638         0x00000000, 0x0000000C, 0x00000000, 0x00000000,
639         0x00000000, 0x00000000, 0x00000000, 0x00000000,
640         0x00000000, 0x00000000, 0x00000000, 0x00000000,
641         0x00000000, 0x00000000, 0x00000000, 0x00000000,
642         0x00000000, 0x00000000, 0x00000000, 0x00000000,
643         0x00000000, 0x00000000, 0x00000000, 0x00000000,
644         0x00000000, 0x00000000, 0x00000000, 0x00000000,
645 };
646
647 /* Packet types for packets with NAT_T ESP header */
648 static const u32 ice_ptypes_nat_t_esp[] = {
649         0x00000000, 0x00000000, 0x00000000, 0x00000000,
650         0x00000000, 0x00000030, 0x00000000, 0x00000000,
651         0x00000000, 0x00000000, 0x00000000, 0x00000000,
652         0x00000000, 0x00000000, 0x00000000, 0x00000000,
653         0x00000000, 0x00000000, 0x00000000, 0x00000000,
654         0x00000000, 0x00000000, 0x00000000, 0x00000000,
655         0x00000000, 0x00000000, 0x00000000, 0x00000000,
656         0x00000000, 0x00000000, 0x00000000, 0x00000000,
657 };
658
659 static const u32 ice_ptypes_mac_non_ip_ofos[] = {
660         0x00000846, 0x00000000, 0x00000000, 0x00000000,
661         0x00000000, 0x00000000, 0x00000000, 0x00000000,
662         0x00400000, 0x03FFF000, 0x00000000, 0x00000000,
663         0x00000000, 0x00000000, 0x00000000, 0x00000000,
664         0x00000000, 0x00000000, 0x00000000, 0x00000000,
665         0x00000000, 0x00000000, 0x00000000, 0x00000000,
666         0x00000000, 0x00000000, 0x00000000, 0x00000000,
667         0x00000000, 0x00000000, 0x00000000, 0x00000000,
668 };
669
670 /* Manage parameters and info. used during the creation of a flow profile */
671 struct ice_flow_prof_params {
672         enum ice_block blk;
673         u16 entry_length; /* # of bytes formatted entry will require */
674         u8 es_cnt;
675         struct ice_flow_prof *prof;
676
677         /* For ACL, the es[0] will have the data of ICE_RX_MDID_PKT_FLAGS_15_0
678          * This will give us the direction flags.
679          */
680         struct ice_fv_word es[ICE_MAX_FV_WORDS];
681         /* attributes can be used to add attributes to a particular PTYPE */
682         const struct ice_ptype_attributes *attr;
683         u16 attr_cnt;
684
685         u16 mask[ICE_MAX_FV_WORDS];
686         ice_declare_bitmap(ptypes, ICE_FLOW_PTYPE_MAX);
687 };
688
689 #define ICE_FLOW_RSS_HDRS_INNER_MASK \
690         (ICE_FLOW_SEG_HDR_PPPOE | ICE_FLOW_SEG_HDR_GTPC | \
691         ICE_FLOW_SEG_HDR_GTPC_TEID | ICE_FLOW_SEG_HDR_GTPU | \
692         ICE_FLOW_SEG_HDR_PFCP_SESSION | ICE_FLOW_SEG_HDR_L2TPV3 | \
693         ICE_FLOW_SEG_HDR_ESP | ICE_FLOW_SEG_HDR_AH | \
694         ICE_FLOW_SEG_HDR_NAT_T_ESP)
695
696 #define ICE_FLOW_SEG_HDRS_L2_MASK       \
697         (ICE_FLOW_SEG_HDR_ETH | ICE_FLOW_SEG_HDR_VLAN)
698 #define ICE_FLOW_SEG_HDRS_L3_MASK       \
699         (ICE_FLOW_SEG_HDR_IPV4 | ICE_FLOW_SEG_HDR_IPV6 | \
700          ICE_FLOW_SEG_HDR_ARP)
701 #define ICE_FLOW_SEG_HDRS_L4_MASK       \
702         (ICE_FLOW_SEG_HDR_ICMP | ICE_FLOW_SEG_HDR_TCP | ICE_FLOW_SEG_HDR_UDP | \
703          ICE_FLOW_SEG_HDR_SCTP)
704 /* mask for L4 protocols that are NOT part of IPV4/6 OTHER PTYPE groups */
705 #define ICE_FLOW_SEG_HDRS_L4_MASK_NO_OTHER      \
706         (ICE_FLOW_SEG_HDR_TCP | ICE_FLOW_SEG_HDR_UDP | ICE_FLOW_SEG_HDR_SCTP)
707
708 /**
709  * ice_flow_val_hdrs - validates packet segments for valid protocol headers
710  * @segs: array of one or more packet segments that describe the flow
711  * @segs_cnt: number of packet segments provided
712  */
713 static enum ice_status
714 ice_flow_val_hdrs(struct ice_flow_seg_info *segs, u8 segs_cnt)
715 {
716         u8 i;
717
718         for (i = 0; i < segs_cnt; i++) {
719                 /* Multiple L3 headers */
720                 if (segs[i].hdrs & ICE_FLOW_SEG_HDRS_L3_MASK &&
721                     !ice_is_pow2(segs[i].hdrs & ICE_FLOW_SEG_HDRS_L3_MASK))
722                         return ICE_ERR_PARAM;
723
724                 /* Multiple L4 headers */
725                 if (segs[i].hdrs & ICE_FLOW_SEG_HDRS_L4_MASK &&
726                     !ice_is_pow2(segs[i].hdrs & ICE_FLOW_SEG_HDRS_L4_MASK))
727                         return ICE_ERR_PARAM;
728         }
729
730         return ICE_SUCCESS;
731 }
732
733 /* Sizes of fixed known protocol headers without header options */
734 #define ICE_FLOW_PROT_HDR_SZ_MAC        14
735 #define ICE_FLOW_PROT_HDR_SZ_MAC_VLAN   (ICE_FLOW_PROT_HDR_SZ_MAC + 2)
736 #define ICE_FLOW_PROT_HDR_SZ_IPV4       20
737 #define ICE_FLOW_PROT_HDR_SZ_IPV6       40
738 #define ICE_FLOW_PROT_HDR_SZ_ARP        28
739 #define ICE_FLOW_PROT_HDR_SZ_ICMP       8
740 #define ICE_FLOW_PROT_HDR_SZ_TCP        20
741 #define ICE_FLOW_PROT_HDR_SZ_UDP        8
742 #define ICE_FLOW_PROT_HDR_SZ_SCTP       12
743
744 /**
745  * ice_flow_calc_seg_sz - calculates size of a packet segment based on headers
746  * @params: information about the flow to be processed
747  * @seg: index of packet segment whose header size is to be determined
748  */
749 static u16 ice_flow_calc_seg_sz(struct ice_flow_prof_params *params, u8 seg)
750 {
751         u16 sz;
752
753         /* L2 headers */
754         sz = (params->prof->segs[seg].hdrs & ICE_FLOW_SEG_HDR_VLAN) ?
755                 ICE_FLOW_PROT_HDR_SZ_MAC_VLAN : ICE_FLOW_PROT_HDR_SZ_MAC;
756
757         /* L3 headers */
758         if (params->prof->segs[seg].hdrs & ICE_FLOW_SEG_HDR_IPV4)
759                 sz += ICE_FLOW_PROT_HDR_SZ_IPV4;
760         else if (params->prof->segs[seg].hdrs & ICE_FLOW_SEG_HDR_IPV6)
761                 sz += ICE_FLOW_PROT_HDR_SZ_IPV6;
762         else if (params->prof->segs[seg].hdrs & ICE_FLOW_SEG_HDR_ARP)
763                 sz += ICE_FLOW_PROT_HDR_SZ_ARP;
764         else if (params->prof->segs[seg].hdrs & ICE_FLOW_SEG_HDRS_L4_MASK)
765                 /* A L3 header is required if L4 is specified */
766                 return 0;
767
768         /* L4 headers */
769         if (params->prof->segs[seg].hdrs & ICE_FLOW_SEG_HDR_ICMP)
770                 sz += ICE_FLOW_PROT_HDR_SZ_ICMP;
771         else if (params->prof->segs[seg].hdrs & ICE_FLOW_SEG_HDR_TCP)
772                 sz += ICE_FLOW_PROT_HDR_SZ_TCP;
773         else if (params->prof->segs[seg].hdrs & ICE_FLOW_SEG_HDR_UDP)
774                 sz += ICE_FLOW_PROT_HDR_SZ_UDP;
775         else if (params->prof->segs[seg].hdrs & ICE_FLOW_SEG_HDR_SCTP)
776                 sz += ICE_FLOW_PROT_HDR_SZ_SCTP;
777
778         return sz;
779 }
780
781 /**
782  * ice_flow_proc_seg_hdrs - process protocol headers present in pkt segments
783  * @params: information about the flow to be processed
784  *
785  * This function identifies the packet types associated with the protocol
786  * headers being present in packet segments of the specified flow profile.
787  */
788 static enum ice_status
789 ice_flow_proc_seg_hdrs(struct ice_flow_prof_params *params)
790 {
791         struct ice_flow_prof *prof;
792         u8 i;
793
794         ice_memset(params->ptypes, 0xff, sizeof(params->ptypes),
795                    ICE_NONDMA_MEM);
796
797         prof = params->prof;
798
799         for (i = 0; i < params->prof->segs_cnt; i++) {
800                 const ice_bitmap_t *src;
801                 u32 hdrs;
802
803                 hdrs = prof->segs[i].hdrs;
804
805                 if (hdrs & ICE_FLOW_SEG_HDR_ETH) {
806                         src = !i ? (const ice_bitmap_t *)ice_ptypes_mac_ofos :
807                                 (const ice_bitmap_t *)ice_ptypes_mac_il;
808                         ice_and_bitmap(params->ptypes, params->ptypes, src,
809                                        ICE_FLOW_PTYPE_MAX);
810                 }
811
812                 if (i && hdrs & ICE_FLOW_SEG_HDR_VLAN) {
813                         src = (const ice_bitmap_t *)ice_ptypes_macvlan_il;
814                         ice_and_bitmap(params->ptypes, params->ptypes, src,
815                                        ICE_FLOW_PTYPE_MAX);
816                 }
817
818                 if (!i && hdrs & ICE_FLOW_SEG_HDR_ARP) {
819                         ice_and_bitmap(params->ptypes, params->ptypes,
820                                        (const ice_bitmap_t *)ice_ptypes_arp_of,
821                                        ICE_FLOW_PTYPE_MAX);
822                 }
823
824                 if (hdrs & ICE_FLOW_SEG_HDR_PPPOE) {
825                         src = (const ice_bitmap_t *)ice_ptypes_pppoe;
826                         ice_and_bitmap(params->ptypes, params->ptypes, src,
827                                        ICE_FLOW_PTYPE_MAX);
828                 }
829                 if ((hdrs & ICE_FLOW_SEG_HDR_IPV4) &&
830                     (hdrs & ICE_FLOW_SEG_HDR_IPV_OTHER)) {
831                         src = i ?
832                                 (const ice_bitmap_t *)ice_ptypes_ipv4_il :
833                                 (const ice_bitmap_t *)ice_ptypes_ipv4_ofos_all;
834                         ice_and_bitmap(params->ptypes, params->ptypes, src,
835                                        ICE_FLOW_PTYPE_MAX);
836                 } else if ((hdrs & ICE_FLOW_SEG_HDR_IPV6) &&
837                            (hdrs & ICE_FLOW_SEG_HDR_IPV_OTHER)) {
838                         src = i ?
839                                 (const ice_bitmap_t *)ice_ptypes_ipv6_il :
840                                 (const ice_bitmap_t *)ice_ptypes_ipv6_ofos_all;
841                         ice_and_bitmap(params->ptypes, params->ptypes, src,
842                                        ICE_FLOW_PTYPE_MAX);
843                 } else if ((hdrs & ICE_FLOW_SEG_HDR_IPV4) &&
844                            !(hdrs & ICE_FLOW_SEG_HDRS_L4_MASK_NO_OTHER)) {
845                         src = !i ? (const ice_bitmap_t *)ice_ipv4_ofos_no_l4 :
846                                 (const ice_bitmap_t *)ice_ipv4_il_no_l4;
847                         ice_and_bitmap(params->ptypes, params->ptypes, src,
848                                        ICE_FLOW_PTYPE_MAX);
849                 } else if (hdrs & ICE_FLOW_SEG_HDR_IPV4) {
850                         src = !i ? (const ice_bitmap_t *)ice_ptypes_ipv4_ofos :
851                                 (const ice_bitmap_t *)ice_ptypes_ipv4_il;
852                         ice_and_bitmap(params->ptypes, params->ptypes, src,
853                                        ICE_FLOW_PTYPE_MAX);
854                 } else if ((hdrs & ICE_FLOW_SEG_HDR_IPV6) &&
855                            !(hdrs & ICE_FLOW_SEG_HDRS_L4_MASK_NO_OTHER)) {
856                         src = !i ? (const ice_bitmap_t *)ice_ipv6_ofos_no_l4 :
857                                 (const ice_bitmap_t *)ice_ipv6_il_no_l4;
858                         ice_and_bitmap(params->ptypes, params->ptypes, src,
859                                        ICE_FLOW_PTYPE_MAX);
860                 } else if (hdrs & ICE_FLOW_SEG_HDR_IPV6) {
861                         src = !i ? (const ice_bitmap_t *)ice_ptypes_ipv6_ofos :
862                                 (const ice_bitmap_t *)ice_ptypes_ipv6_il;
863                         ice_and_bitmap(params->ptypes, params->ptypes, src,
864                                        ICE_FLOW_PTYPE_MAX);
865                 }
866
867                 if (hdrs & ICE_FLOW_SEG_HDR_ETH_NON_IP) {
868                         src = (const ice_bitmap_t *)ice_ptypes_mac_non_ip_ofos;
869                         ice_and_bitmap(params->ptypes, params->ptypes,
870                                        src, ICE_FLOW_PTYPE_MAX);
871                 } else if (hdrs & ICE_FLOW_SEG_HDR_PPPOE) {
872                         src = (const ice_bitmap_t *)ice_ptypes_pppoe;
873                         ice_and_bitmap(params->ptypes, params->ptypes, src,
874                                        ICE_FLOW_PTYPE_MAX);
875                 } else {
876                         src = (const ice_bitmap_t *)ice_ptypes_pppoe;
877                         ice_andnot_bitmap(params->ptypes, params->ptypes, src,
878                                           ICE_FLOW_PTYPE_MAX);
879                 }
880
881                 if (hdrs & ICE_FLOW_SEG_HDR_UDP) {
882                         src = (const ice_bitmap_t *)ice_ptypes_udp_il;
883                         ice_and_bitmap(params->ptypes, params->ptypes, src,
884                                        ICE_FLOW_PTYPE_MAX);
885                 } else if (hdrs & ICE_FLOW_SEG_HDR_TCP) {
886                         ice_and_bitmap(params->ptypes, params->ptypes,
887                                        (const ice_bitmap_t *)ice_ptypes_tcp_il,
888                                        ICE_FLOW_PTYPE_MAX);
889                 } else if (hdrs & ICE_FLOW_SEG_HDR_SCTP) {
890                         src = (const ice_bitmap_t *)ice_ptypes_sctp_il;
891                         ice_and_bitmap(params->ptypes, params->ptypes, src,
892                                        ICE_FLOW_PTYPE_MAX);
893                 }
894
895                 if (hdrs & ICE_FLOW_SEG_HDR_ICMP) {
896                         src = !i ? (const ice_bitmap_t *)ice_ptypes_icmp_of :
897                                 (const ice_bitmap_t *)ice_ptypes_icmp_il;
898                         ice_and_bitmap(params->ptypes, params->ptypes, src,
899                                        ICE_FLOW_PTYPE_MAX);
900                 } else if (hdrs & ICE_FLOW_SEG_HDR_GRE) {
901                         if (!i) {
902                                 src = (const ice_bitmap_t *)ice_ptypes_gre_of;
903                                 ice_and_bitmap(params->ptypes, params->ptypes,
904                                                src, ICE_FLOW_PTYPE_MAX);
905                         }
906                 } else if (hdrs & ICE_FLOW_SEG_HDR_GTPC) {
907                         src = (const ice_bitmap_t *)ice_ptypes_gtpc;
908                         ice_and_bitmap(params->ptypes, params->ptypes,
909                                        src, ICE_FLOW_PTYPE_MAX);
910                 } else if (hdrs & ICE_FLOW_SEG_HDR_GTPC_TEID) {
911                         src = (const ice_bitmap_t *)ice_ptypes_gtpc_tid;
912                         ice_and_bitmap(params->ptypes, params->ptypes,
913                                        src, ICE_FLOW_PTYPE_MAX);
914                 } else if (hdrs & ICE_FLOW_SEG_HDR_GTPU_DWN) {
915                         src = (const ice_bitmap_t *)ice_ptypes_gtpu;
916                         ice_and_bitmap(params->ptypes, params->ptypes,
917                                        src, ICE_FLOW_PTYPE_MAX);
918
919                         /* Attributes for GTP packet with downlink */
920                         params->attr = ice_attr_gtpu_down;
921                         params->attr_cnt = ARRAY_SIZE(ice_attr_gtpu_down);
922                 } else if (hdrs & ICE_FLOW_SEG_HDR_GTPU_UP) {
923                         src = (const ice_bitmap_t *)ice_ptypes_gtpu;
924                         ice_and_bitmap(params->ptypes, params->ptypes,
925                                        src, ICE_FLOW_PTYPE_MAX);
926
927                         /* Attributes for GTP packet with uplink */
928                         params->attr = ice_attr_gtpu_up;
929                         params->attr_cnt = ARRAY_SIZE(ice_attr_gtpu_up);
930                 } else if (hdrs & ICE_FLOW_SEG_HDR_GTPU_EH) {
931                         src = (const ice_bitmap_t *)ice_ptypes_gtpu;
932                         ice_and_bitmap(params->ptypes, params->ptypes,
933                                        src, ICE_FLOW_PTYPE_MAX);
934
935                         /* Attributes for GTP packet with Extension Header */
936                         params->attr = ice_attr_gtpu_eh;
937                         params->attr_cnt = ARRAY_SIZE(ice_attr_gtpu_eh);
938                 } else if (hdrs & ICE_FLOW_SEG_HDR_GTPU_IP) {
939                         src = (const ice_bitmap_t *)ice_ptypes_gtpu;
940                         ice_and_bitmap(params->ptypes, params->ptypes,
941                                        src, ICE_FLOW_PTYPE_MAX);
942
943                         /* Attributes for GTP packet without Extension Header */
944                         params->attr = ice_attr_gtpu_session;
945                         params->attr_cnt = ARRAY_SIZE(ice_attr_gtpu_session);
946                 } else if (hdrs & ICE_FLOW_SEG_HDR_L2TPV3) {
947                         src = (const ice_bitmap_t *)ice_ptypes_l2tpv3;
948                         ice_and_bitmap(params->ptypes, params->ptypes,
949                                        src, ICE_FLOW_PTYPE_MAX);
950                 } else if (hdrs & ICE_FLOW_SEG_HDR_ESP) {
951                         src = (const ice_bitmap_t *)ice_ptypes_esp;
952                         ice_and_bitmap(params->ptypes, params->ptypes,
953                                        src, ICE_FLOW_PTYPE_MAX);
954                 } else if (hdrs & ICE_FLOW_SEG_HDR_AH) {
955                         src = (const ice_bitmap_t *)ice_ptypes_ah;
956                         ice_and_bitmap(params->ptypes, params->ptypes,
957                                        src, ICE_FLOW_PTYPE_MAX);
958                 } else if (hdrs & ICE_FLOW_SEG_HDR_NAT_T_ESP) {
959                         src = (const ice_bitmap_t *)ice_ptypes_nat_t_esp;
960                         ice_and_bitmap(params->ptypes, params->ptypes,
961                                        src, ICE_FLOW_PTYPE_MAX);
962                 }
963
964                 if (hdrs & ICE_FLOW_SEG_HDR_PFCP) {
965                         if (hdrs & ICE_FLOW_SEG_HDR_PFCP_NODE)
966                                 src =
967                                 (const ice_bitmap_t *)ice_ptypes_pfcp_node;
968                         else
969                                 src =
970                                 (const ice_bitmap_t *)ice_ptypes_pfcp_session;
971
972                         ice_and_bitmap(params->ptypes, params->ptypes,
973                                        src, ICE_FLOW_PTYPE_MAX);
974                 } else {
975                         src = (const ice_bitmap_t *)ice_ptypes_pfcp_node;
976                         ice_andnot_bitmap(params->ptypes, params->ptypes,
977                                           src, ICE_FLOW_PTYPE_MAX);
978
979                         src = (const ice_bitmap_t *)ice_ptypes_pfcp_session;
980                         ice_andnot_bitmap(params->ptypes, params->ptypes,
981                                           src, ICE_FLOW_PTYPE_MAX);
982                 }
983         }
984
985         return ICE_SUCCESS;
986 }
987
988 /**
989  * ice_flow_xtract_pkt_flags - Create an extr sequence entry for packet flags
990  * @hw: pointer to the HW struct
991  * @params: information about the flow to be processed
992  * @flags: The value of pkt_flags[x:x] in Rx/Tx MDID metadata.
993  *
994  * This function will allocate an extraction sequence entries for a DWORD size
995  * chunk of the packet flags.
996  */
997 static enum ice_status
998 ice_flow_xtract_pkt_flags(struct ice_hw *hw,
999                           struct ice_flow_prof_params *params,
1000                           enum ice_flex_mdid_pkt_flags flags)
1001 {
1002         u8 fv_words = hw->blk[params->blk].es.fvw;
1003         u8 idx;
1004
1005         /* Make sure the number of extraction sequence entries required does not
1006          * exceed the block's capacity.
1007          */
1008         if (params->es_cnt >= fv_words)
1009                 return ICE_ERR_MAX_LIMIT;
1010
1011         /* some blocks require a reversed field vector layout */
1012         if (hw->blk[params->blk].es.reverse)
1013                 idx = fv_words - params->es_cnt - 1;
1014         else
1015                 idx = params->es_cnt;
1016
1017         params->es[idx].prot_id = ICE_PROT_META_ID;
1018         params->es[idx].off = flags;
1019         params->es_cnt++;
1020
1021         return ICE_SUCCESS;
1022 }
1023
1024 /**
1025  * ice_flow_xtract_fld - Create an extraction sequence entry for the given field
1026  * @hw: pointer to the HW struct
1027  * @params: information about the flow to be processed
1028  * @seg: packet segment index of the field to be extracted
1029  * @fld: ID of field to be extracted
1030  * @match: bitfield of all fields
1031  *
1032  * This function determines the protocol ID, offset, and size of the given
1033  * field. It then allocates one or more extraction sequence entries for the
1034  * given field, and fill the entries with protocol ID and offset information.
1035  */
1036 static enum ice_status
1037 ice_flow_xtract_fld(struct ice_hw *hw, struct ice_flow_prof_params *params,
1038                     u8 seg, enum ice_flow_field fld, u64 match)
1039 {
1040         enum ice_flow_field sib = ICE_FLOW_FIELD_IDX_MAX;
1041         enum ice_prot_id prot_id = ICE_PROT_ID_INVAL;
1042         u8 fv_words = hw->blk[params->blk].es.fvw;
1043         struct ice_flow_fld_info *flds;
1044         u16 cnt, ese_bits, i;
1045         u16 sib_mask = 0;
1046         u16 mask;
1047         u16 off;
1048
1049         flds = params->prof->segs[seg].fields;
1050
1051         switch (fld) {
1052         case ICE_FLOW_FIELD_IDX_ETH_DA:
1053         case ICE_FLOW_FIELD_IDX_ETH_SA:
1054         case ICE_FLOW_FIELD_IDX_S_VLAN:
1055         case ICE_FLOW_FIELD_IDX_C_VLAN:
1056                 prot_id = seg == 0 ? ICE_PROT_MAC_OF_OR_S : ICE_PROT_MAC_IL;
1057                 break;
1058         case ICE_FLOW_FIELD_IDX_ETH_TYPE:
1059                 prot_id = seg == 0 ? ICE_PROT_ETYPE_OL : ICE_PROT_ETYPE_IL;
1060                 break;
1061         case ICE_FLOW_FIELD_IDX_IPV4_DSCP:
1062                 prot_id = seg == 0 ? ICE_PROT_IPV4_OF_OR_S : ICE_PROT_IPV4_IL;
1063                 break;
1064         case ICE_FLOW_FIELD_IDX_IPV6_DSCP:
1065                 prot_id = seg == 0 ? ICE_PROT_IPV6_OF_OR_S : ICE_PROT_IPV6_IL;
1066                 break;
1067         case ICE_FLOW_FIELD_IDX_IPV4_TTL:
1068         case ICE_FLOW_FIELD_IDX_IPV4_PROT:
1069                 prot_id = seg == 0 ? ICE_PROT_IPV4_OF_OR_S : ICE_PROT_IPV4_IL;
1070
1071                 /* TTL and PROT share the same extraction seq. entry.
1072                  * Each is considered a sibling to the other in terms of sharing
1073                  * the same extraction sequence entry.
1074                  */
1075                 if (fld == ICE_FLOW_FIELD_IDX_IPV4_TTL)
1076                         sib = ICE_FLOW_FIELD_IDX_IPV4_PROT;
1077                 else if (fld == ICE_FLOW_FIELD_IDX_IPV4_PROT)
1078                         sib = ICE_FLOW_FIELD_IDX_IPV4_TTL;
1079
1080                 /* If the sibling field is also included, that field's
1081                  * mask needs to be included.
1082                  */
1083                 if (match & BIT(sib))
1084                         sib_mask = ice_flds_info[sib].mask;
1085                 break;
1086         case ICE_FLOW_FIELD_IDX_IPV6_TTL:
1087         case ICE_FLOW_FIELD_IDX_IPV6_PROT:
1088                 prot_id = seg == 0 ? ICE_PROT_IPV6_OF_OR_S : ICE_PROT_IPV6_IL;
1089
1090                 /* TTL and PROT share the same extraction seq. entry.
1091                  * Each is considered a sibling to the other in terms of sharing
1092                  * the same extraction sequence entry.
1093                  */
1094                 if (fld == ICE_FLOW_FIELD_IDX_IPV6_TTL)
1095                         sib = ICE_FLOW_FIELD_IDX_IPV6_PROT;
1096                 else if (fld == ICE_FLOW_FIELD_IDX_IPV6_PROT)
1097                         sib = ICE_FLOW_FIELD_IDX_IPV6_TTL;
1098
1099                 /* If the sibling field is also included, that field's
1100                  * mask needs to be included.
1101                  */
1102                 if (match & BIT(sib))
1103                         sib_mask = ice_flds_info[sib].mask;
1104                 break;
1105         case ICE_FLOW_FIELD_IDX_IPV4_SA:
1106         case ICE_FLOW_FIELD_IDX_IPV4_DA:
1107                 prot_id = seg == 0 ? ICE_PROT_IPV4_OF_OR_S : ICE_PROT_IPV4_IL;
1108                 break;
1109         case ICE_FLOW_FIELD_IDX_IPV6_SA:
1110         case ICE_FLOW_FIELD_IDX_IPV6_DA:
1111         case ICE_FLOW_FIELD_IDX_IPV6_PRE32_SA:
1112         case ICE_FLOW_FIELD_IDX_IPV6_PRE32_DA:
1113         case ICE_FLOW_FIELD_IDX_IPV6_PRE48_SA:
1114         case ICE_FLOW_FIELD_IDX_IPV6_PRE48_DA:
1115         case ICE_FLOW_FIELD_IDX_IPV6_PRE64_SA:
1116         case ICE_FLOW_FIELD_IDX_IPV6_PRE64_DA:
1117                 prot_id = seg == 0 ? ICE_PROT_IPV6_OF_OR_S : ICE_PROT_IPV6_IL;
1118                 break;
1119         case ICE_FLOW_FIELD_IDX_TCP_SRC_PORT:
1120         case ICE_FLOW_FIELD_IDX_TCP_DST_PORT:
1121         case ICE_FLOW_FIELD_IDX_TCP_FLAGS:
1122                 prot_id = ICE_PROT_TCP_IL;
1123                 break;
1124         case ICE_FLOW_FIELD_IDX_UDP_SRC_PORT:
1125         case ICE_FLOW_FIELD_IDX_UDP_DST_PORT:
1126                 prot_id = ICE_PROT_UDP_IL_OR_S;
1127                 break;
1128         case ICE_FLOW_FIELD_IDX_SCTP_SRC_PORT:
1129         case ICE_FLOW_FIELD_IDX_SCTP_DST_PORT:
1130                 prot_id = ICE_PROT_SCTP_IL;
1131                 break;
1132         case ICE_FLOW_FIELD_IDX_GTPC_TEID:
1133         case ICE_FLOW_FIELD_IDX_GTPU_IP_TEID:
1134         case ICE_FLOW_FIELD_IDX_GTPU_UP_TEID:
1135         case ICE_FLOW_FIELD_IDX_GTPU_DWN_TEID:
1136         case ICE_FLOW_FIELD_IDX_GTPU_EH_TEID:
1137         case ICE_FLOW_FIELD_IDX_GTPU_EH_QFI:
1138                 /* GTP is accessed through UDP OF protocol */
1139                 prot_id = ICE_PROT_UDP_OF;
1140                 break;
1141         case ICE_FLOW_FIELD_IDX_PPPOE_SESS_ID:
1142                 prot_id = ICE_PROT_PPPOE;
1143                 break;
1144         case ICE_FLOW_FIELD_IDX_PFCP_SEID:
1145                 prot_id = ICE_PROT_UDP_IL_OR_S;
1146                 break;
1147         case ICE_FLOW_FIELD_IDX_L2TPV3_SESS_ID:
1148                 prot_id = ICE_PROT_L2TPV3;
1149                 break;
1150         case ICE_FLOW_FIELD_IDX_ESP_SPI:
1151                 prot_id = ICE_PROT_ESP_F;
1152                 break;
1153         case ICE_FLOW_FIELD_IDX_AH_SPI:
1154                 prot_id = ICE_PROT_ESP_2;
1155                 break;
1156         case ICE_FLOW_FIELD_IDX_NAT_T_ESP_SPI:
1157                 prot_id = ICE_PROT_UDP_IL_OR_S;
1158                 break;
1159         case ICE_FLOW_FIELD_IDX_ARP_SIP:
1160         case ICE_FLOW_FIELD_IDX_ARP_DIP:
1161         case ICE_FLOW_FIELD_IDX_ARP_SHA:
1162         case ICE_FLOW_FIELD_IDX_ARP_DHA:
1163         case ICE_FLOW_FIELD_IDX_ARP_OP:
1164                 prot_id = ICE_PROT_ARP_OF;
1165                 break;
1166         case ICE_FLOW_FIELD_IDX_ICMP_TYPE:
1167         case ICE_FLOW_FIELD_IDX_ICMP_CODE:
1168                 /* ICMP type and code share the same extraction seq. entry */
1169                 prot_id = (params->prof->segs[seg].hdrs &
1170                            ICE_FLOW_SEG_HDR_IPV4) ?
1171                         ICE_PROT_ICMP_IL : ICE_PROT_ICMPV6_IL;
1172                 sib = fld == ICE_FLOW_FIELD_IDX_ICMP_TYPE ?
1173                         ICE_FLOW_FIELD_IDX_ICMP_CODE :
1174                         ICE_FLOW_FIELD_IDX_ICMP_TYPE;
1175                 break;
1176         case ICE_FLOW_FIELD_IDX_GRE_KEYID:
1177                 prot_id = ICE_PROT_GRE_OF;
1178                 break;
1179         default:
1180                 return ICE_ERR_NOT_IMPL;
1181         }
1182
1183         /* Each extraction sequence entry is a word in size, and extracts a
1184          * word-aligned offset from a protocol header.
1185          */
1186         ese_bits = ICE_FLOW_FV_EXTRACT_SZ * BITS_PER_BYTE;
1187
1188         flds[fld].xtrct.prot_id = prot_id;
1189         flds[fld].xtrct.off = (ice_flds_info[fld].off / ese_bits) *
1190                 ICE_FLOW_FV_EXTRACT_SZ;
1191         flds[fld].xtrct.disp = (u8)(ice_flds_info[fld].off % ese_bits);
1192         flds[fld].xtrct.idx = params->es_cnt;
1193         flds[fld].xtrct.mask = ice_flds_info[fld].mask;
1194
1195         /* Adjust the next field-entry index after accommodating the number of
1196          * entries this field consumes
1197          */
1198         cnt = DIVIDE_AND_ROUND_UP(flds[fld].xtrct.disp +
1199                                   ice_flds_info[fld].size, ese_bits);
1200
1201         /* Fill in the extraction sequence entries needed for this field */
1202         off = flds[fld].xtrct.off;
1203         mask = flds[fld].xtrct.mask;
1204         for (i = 0; i < cnt; i++) {
1205                 /* Only consume an extraction sequence entry if there is no
1206                  * sibling field associated with this field or the sibling entry
1207                  * already extracts the word shared with this field.
1208                  */
1209                 if (sib == ICE_FLOW_FIELD_IDX_MAX ||
1210                     flds[sib].xtrct.prot_id == ICE_PROT_ID_INVAL ||
1211                     flds[sib].xtrct.off != off) {
1212                         u8 idx;
1213
1214                         /* Make sure the number of extraction sequence required
1215                          * does not exceed the block's capability
1216                          */
1217                         if (params->es_cnt >= fv_words)
1218                                 return ICE_ERR_MAX_LIMIT;
1219
1220                         /* some blocks require a reversed field vector layout */
1221                         if (hw->blk[params->blk].es.reverse)
1222                                 idx = fv_words - params->es_cnt - 1;
1223                         else
1224                                 idx = params->es_cnt;
1225
1226                         params->es[idx].prot_id = prot_id;
1227                         params->es[idx].off = off;
1228                         params->mask[idx] = mask | sib_mask;
1229                         params->es_cnt++;
1230                 }
1231
1232                 off += ICE_FLOW_FV_EXTRACT_SZ;
1233         }
1234
1235         return ICE_SUCCESS;
1236 }
1237
1238 /**
1239  * ice_flow_xtract_raws - Create extract sequence entries for raw bytes
1240  * @hw: pointer to the HW struct
1241  * @params: information about the flow to be processed
1242  * @seg: index of packet segment whose raw fields are to be be extracted
1243  */
1244 static enum ice_status
1245 ice_flow_xtract_raws(struct ice_hw *hw, struct ice_flow_prof_params *params,
1246                      u8 seg)
1247 {
1248         u16 fv_words;
1249         u16 hdrs_sz;
1250         u8 i;
1251
1252         if (!params->prof->segs[seg].raws_cnt)
1253                 return ICE_SUCCESS;
1254
1255         if (params->prof->segs[seg].raws_cnt >
1256             ARRAY_SIZE(params->prof->segs[seg].raws))
1257                 return ICE_ERR_MAX_LIMIT;
1258
1259         /* Offsets within the segment headers are not supported */
1260         hdrs_sz = ice_flow_calc_seg_sz(params, seg);
1261         if (!hdrs_sz)
1262                 return ICE_ERR_PARAM;
1263
1264         fv_words = hw->blk[params->blk].es.fvw;
1265
1266         for (i = 0; i < params->prof->segs[seg].raws_cnt; i++) {
1267                 struct ice_flow_seg_fld_raw *raw;
1268                 u16 off, cnt, j;
1269
1270                 raw = &params->prof->segs[seg].raws[i];
1271
1272                 /* Storing extraction information */
1273                 raw->info.xtrct.prot_id = ICE_PROT_MAC_OF_OR_S;
1274                 raw->info.xtrct.off = (raw->off / ICE_FLOW_FV_EXTRACT_SZ) *
1275                         ICE_FLOW_FV_EXTRACT_SZ;
1276                 raw->info.xtrct.disp = (raw->off % ICE_FLOW_FV_EXTRACT_SZ) *
1277                         BITS_PER_BYTE;
1278                 raw->info.xtrct.idx = params->es_cnt;
1279
1280                 /* Determine the number of field vector entries this raw field
1281                  * consumes.
1282                  */
1283                 cnt = DIVIDE_AND_ROUND_UP(raw->info.xtrct.disp +
1284                                           (raw->info.src.last * BITS_PER_BYTE),
1285                                           (ICE_FLOW_FV_EXTRACT_SZ *
1286                                            BITS_PER_BYTE));
1287                 off = raw->info.xtrct.off;
1288                 for (j = 0; j < cnt; j++) {
1289                         u16 idx;
1290
1291                         /* Make sure the number of extraction sequence required
1292                          * does not exceed the block's capability
1293                          */
1294                         if (params->es_cnt >= hw->blk[params->blk].es.count ||
1295                             params->es_cnt >= ICE_MAX_FV_WORDS)
1296                                 return ICE_ERR_MAX_LIMIT;
1297
1298                         /* some blocks require a reversed field vector layout */
1299                         if (hw->blk[params->blk].es.reverse)
1300                                 idx = fv_words - params->es_cnt - 1;
1301                         else
1302                                 idx = params->es_cnt;
1303
1304                         params->es[idx].prot_id = raw->info.xtrct.prot_id;
1305                         params->es[idx].off = off;
1306                         params->es_cnt++;
1307                         off += ICE_FLOW_FV_EXTRACT_SZ;
1308                 }
1309         }
1310
1311         return ICE_SUCCESS;
1312 }
1313
1314 /**
1315  * ice_flow_create_xtrct_seq - Create an extraction sequence for given segments
1316  * @hw: pointer to the HW struct
1317  * @params: information about the flow to be processed
1318  *
1319  * This function iterates through all matched fields in the given segments, and
1320  * creates an extraction sequence for the fields.
1321  */
1322 static enum ice_status
1323 ice_flow_create_xtrct_seq(struct ice_hw *hw,
1324                           struct ice_flow_prof_params *params)
1325 {
1326         enum ice_status status = ICE_SUCCESS;
1327         u8 i;
1328
1329         /* For ACL, we also need to extract the direction bit (Rx,Tx) data from
1330          * packet flags
1331          */
1332         if (params->blk == ICE_BLK_ACL) {
1333                 status = ice_flow_xtract_pkt_flags(hw, params,
1334                                                    ICE_RX_MDID_PKT_FLAGS_15_0);
1335                 if (status)
1336                         return status;
1337         }
1338
1339         for (i = 0; i < params->prof->segs_cnt; i++) {
1340                 u64 match = params->prof->segs[i].match;
1341                 enum ice_flow_field j;
1342
1343                 ice_for_each_set_bit(j, (ice_bitmap_t *)&match,
1344                                      ICE_FLOW_FIELD_IDX_MAX) {
1345                         status = ice_flow_xtract_fld(hw, params, i, j, match);
1346                         if (status)
1347                                 return status;
1348                         ice_clear_bit(j, (ice_bitmap_t *)&match);
1349                 }
1350
1351                 /* Process raw matching bytes */
1352                 status = ice_flow_xtract_raws(hw, params, i);
1353                 if (status)
1354                         return status;
1355         }
1356
1357         return status;
1358 }
1359
1360 /**
1361  * ice_flow_sel_acl_scen - returns the specific scenario
1362  * @hw: pointer to the hardware structure
1363  * @params: information about the flow to be processed
1364  *
1365  * This function will return the specific scenario based on the
1366  * params passed to it
1367  */
1368 static enum ice_status
1369 ice_flow_sel_acl_scen(struct ice_hw *hw, struct ice_flow_prof_params *params)
1370 {
1371         /* Find the best-fit scenario for the provided match width */
1372         struct ice_acl_scen *cand_scen = NULL, *scen;
1373
1374         if (!hw->acl_tbl)
1375                 return ICE_ERR_DOES_NOT_EXIST;
1376
1377         /* Loop through each scenario and match against the scenario width
1378          * to select the specific scenario
1379          */
1380         LIST_FOR_EACH_ENTRY(scen, &hw->acl_tbl->scens, ice_acl_scen, list_entry)
1381                 if (scen->eff_width >= params->entry_length &&
1382                     (!cand_scen || cand_scen->eff_width > scen->eff_width))
1383                         cand_scen = scen;
1384         if (!cand_scen)
1385                 return ICE_ERR_DOES_NOT_EXIST;
1386
1387         params->prof->cfg.scen = cand_scen;
1388
1389         return ICE_SUCCESS;
1390 }
1391
1392 /**
1393  * ice_flow_acl_def_entry_frmt - Determine the layout of flow entries
1394  * @params: information about the flow to be processed
1395  */
1396 static enum ice_status
1397 ice_flow_acl_def_entry_frmt(struct ice_flow_prof_params *params)
1398 {
1399         u16 index, i, range_idx = 0;
1400
1401         index = ICE_AQC_ACL_PROF_BYTE_SEL_START_IDX;
1402
1403         for (i = 0; i < params->prof->segs_cnt; i++) {
1404                 struct ice_flow_seg_info *seg = &params->prof->segs[i];
1405                 u8 j;
1406
1407                 ice_for_each_set_bit(j, (ice_bitmap_t *)&seg->match,
1408                                      ICE_FLOW_FIELD_IDX_MAX) {
1409                         struct ice_flow_fld_info *fld = &seg->fields[j];
1410
1411                         fld->entry.mask = ICE_FLOW_FLD_OFF_INVAL;
1412
1413                         if (fld->type == ICE_FLOW_FLD_TYPE_RANGE) {
1414                                 fld->entry.last = ICE_FLOW_FLD_OFF_INVAL;
1415
1416                                 /* Range checking only supported for single
1417                                  * words
1418                                  */
1419                                 if (DIVIDE_AND_ROUND_UP(ice_flds_info[j].size +
1420                                                         fld->xtrct.disp,
1421                                                         BITS_PER_BYTE * 2) > 1)
1422                                         return ICE_ERR_PARAM;
1423
1424                                 /* Ranges must define low and high values */
1425                                 if (fld->src.val == ICE_FLOW_FLD_OFF_INVAL ||
1426                                     fld->src.last == ICE_FLOW_FLD_OFF_INVAL)
1427                                         return ICE_ERR_PARAM;
1428
1429                                 fld->entry.val = range_idx++;
1430                         } else {
1431                                 /* Store adjusted byte-length of field for later
1432                                  * use, taking into account potential
1433                                  * non-byte-aligned displacement
1434                                  */
1435                                 fld->entry.last = DIVIDE_AND_ROUND_UP
1436                                         (ice_flds_info[j].size +
1437                                          (fld->xtrct.disp % BITS_PER_BYTE),
1438                                          BITS_PER_BYTE);
1439                                 fld->entry.val = index;
1440                                 index += fld->entry.last;
1441                         }
1442                 }
1443
1444                 for (j = 0; j < seg->raws_cnt; j++) {
1445                         struct ice_flow_seg_fld_raw *raw = &seg->raws[j];
1446
1447                         raw->info.entry.mask = ICE_FLOW_FLD_OFF_INVAL;
1448                         raw->info.entry.val = index;
1449                         raw->info.entry.last = raw->info.src.last;
1450                         index += raw->info.entry.last;
1451                 }
1452         }
1453
1454         /* Currently only support using the byte selection base, which only
1455          * allows for an effective entry size of 30 bytes. Reject anything
1456          * larger.
1457          */
1458         if (index > ICE_AQC_ACL_PROF_BYTE_SEL_ELEMS)
1459                 return ICE_ERR_PARAM;
1460
1461         /* Only 8 range checkers per profile, reject anything trying to use
1462          * more
1463          */
1464         if (range_idx > ICE_AQC_ACL_PROF_RANGES_NUM_CFG)
1465                 return ICE_ERR_PARAM;
1466
1467         /* Store # bytes required for entry for later use */
1468         params->entry_length = index - ICE_AQC_ACL_PROF_BYTE_SEL_START_IDX;
1469
1470         return ICE_SUCCESS;
1471 }
1472
1473 /**
1474  * ice_flow_proc_segs - process all packet segments associated with a profile
1475  * @hw: pointer to the HW struct
1476  * @params: information about the flow to be processed
1477  */
1478 static enum ice_status
1479 ice_flow_proc_segs(struct ice_hw *hw, struct ice_flow_prof_params *params)
1480 {
1481         enum ice_status status;
1482
1483         status = ice_flow_proc_seg_hdrs(params);
1484         if (status)
1485                 return status;
1486
1487         status = ice_flow_create_xtrct_seq(hw, params);
1488         if (status)
1489                 return status;
1490
1491         switch (params->blk) {
1492         case ICE_BLK_FD:
1493         case ICE_BLK_RSS:
1494                 status = ICE_SUCCESS;
1495                 break;
1496         case ICE_BLK_ACL:
1497                 status = ice_flow_acl_def_entry_frmt(params);
1498                 if (status)
1499                         return status;
1500                 status = ice_flow_sel_acl_scen(hw, params);
1501                 if (status)
1502                         return status;
1503                 break;
1504         default:
1505                 return ICE_ERR_NOT_IMPL;
1506         }
1507
1508         return status;
1509 }
1510
1511 #define ICE_FLOW_FIND_PROF_CHK_FLDS     0x00000001
1512 #define ICE_FLOW_FIND_PROF_CHK_VSI      0x00000002
1513 #define ICE_FLOW_FIND_PROF_NOT_CHK_DIR  0x00000004
1514
1515 /**
1516  * ice_flow_find_prof_conds - Find a profile matching headers and conditions
1517  * @hw: pointer to the HW struct
1518  * @blk: classification stage
1519  * @dir: flow direction
1520  * @segs: array of one or more packet segments that describe the flow
1521  * @segs_cnt: number of packet segments provided
1522  * @vsi_handle: software VSI handle to check VSI (ICE_FLOW_FIND_PROF_CHK_VSI)
1523  * @conds: additional conditions to be checked (ICE_FLOW_FIND_PROF_CHK_*)
1524  */
1525 static struct ice_flow_prof *
1526 ice_flow_find_prof_conds(struct ice_hw *hw, enum ice_block blk,
1527                          enum ice_flow_dir dir, struct ice_flow_seg_info *segs,
1528                          u8 segs_cnt, u16 vsi_handle, u32 conds)
1529 {
1530         struct ice_flow_prof *p, *prof = NULL;
1531
1532         ice_acquire_lock(&hw->fl_profs_locks[blk]);
1533         LIST_FOR_EACH_ENTRY(p, &hw->fl_profs[blk], ice_flow_prof, l_entry)
1534                 if ((p->dir == dir || conds & ICE_FLOW_FIND_PROF_NOT_CHK_DIR) &&
1535                     segs_cnt && segs_cnt == p->segs_cnt) {
1536                         u8 i;
1537
1538                         /* Check for profile-VSI association if specified */
1539                         if ((conds & ICE_FLOW_FIND_PROF_CHK_VSI) &&
1540                             ice_is_vsi_valid(hw, vsi_handle) &&
1541                             !ice_is_bit_set(p->vsis, vsi_handle))
1542                                 continue;
1543
1544                         /* Protocol headers must be checked. Matched fields are
1545                          * checked if specified.
1546                          */
1547                         for (i = 0; i < segs_cnt; i++)
1548                                 if (segs[i].hdrs != p->segs[i].hdrs ||
1549                                     ((conds & ICE_FLOW_FIND_PROF_CHK_FLDS) &&
1550                                      segs[i].match != p->segs[i].match))
1551                                         break;
1552
1553                         /* A match is found if all segments are matched */
1554                         if (i == segs_cnt) {
1555                                 prof = p;
1556                                 break;
1557                         }
1558                 }
1559         ice_release_lock(&hw->fl_profs_locks[blk]);
1560
1561         return prof;
1562 }
1563
1564 /**
1565  * ice_flow_find_prof - Look up a profile matching headers and matched fields
1566  * @hw: pointer to the HW struct
1567  * @blk: classification stage
1568  * @dir: flow direction
1569  * @segs: array of one or more packet segments that describe the flow
1570  * @segs_cnt: number of packet segments provided
1571  */
1572 u64
1573 ice_flow_find_prof(struct ice_hw *hw, enum ice_block blk, enum ice_flow_dir dir,
1574                    struct ice_flow_seg_info *segs, u8 segs_cnt)
1575 {
1576         struct ice_flow_prof *p;
1577
1578         p = ice_flow_find_prof_conds(hw, blk, dir, segs, segs_cnt,
1579                                      ICE_MAX_VSI, ICE_FLOW_FIND_PROF_CHK_FLDS);
1580
1581         return p ? p->id : ICE_FLOW_PROF_ID_INVAL;
1582 }
1583
1584 /**
1585  * ice_flow_find_prof_id - Look up a profile with given profile ID
1586  * @hw: pointer to the HW struct
1587  * @blk: classification stage
1588  * @prof_id: unique ID to identify this flow profile
1589  */
1590 static struct ice_flow_prof *
1591 ice_flow_find_prof_id(struct ice_hw *hw, enum ice_block blk, u64 prof_id)
1592 {
1593         struct ice_flow_prof *p;
1594
1595         LIST_FOR_EACH_ENTRY(p, &hw->fl_profs[blk], ice_flow_prof, l_entry)
1596                 if (p->id == prof_id)
1597                         return p;
1598
1599         return NULL;
1600 }
1601
1602 /**
1603  * ice_dealloc_flow_entry - Deallocate flow entry memory
1604  * @hw: pointer to the HW struct
1605  * @entry: flow entry to be removed
1606  */
1607 static void
1608 ice_dealloc_flow_entry(struct ice_hw *hw, struct ice_flow_entry *entry)
1609 {
1610         if (!entry)
1611                 return;
1612
1613         if (entry->entry)
1614                 ice_free(hw, entry->entry);
1615
1616         if (entry->range_buf) {
1617                 ice_free(hw, entry->range_buf);
1618                 entry->range_buf = NULL;
1619         }
1620
1621         if (entry->acts) {
1622                 ice_free(hw, entry->acts);
1623                 entry->acts = NULL;
1624                 entry->acts_cnt = 0;
1625         }
1626
1627         ice_free(hw, entry);
1628 }
1629
1630 #define ICE_ACL_INVALID_SCEN    0x3f
1631
1632 /**
1633  * ice_flow_acl_is_prof_in_use - Verify if the profile is associated to any PF
1634  * @hw: pointer to the hardware structure
1635  * @prof: pointer to flow profile
1636  * @buf: destination buffer function writes partial extraction sequence to
1637  *
1638  * returns ICE_SUCCESS if no PF is associated to the given profile
1639  * returns ICE_ERR_IN_USE if at least one PF is associated to the given profile
1640  * returns other error code for real error
1641  */
1642 static enum ice_status
1643 ice_flow_acl_is_prof_in_use(struct ice_hw *hw, struct ice_flow_prof *prof,
1644                             struct ice_aqc_acl_prof_generic_frmt *buf)
1645 {
1646         enum ice_status status;
1647         u8 prof_id = 0;
1648
1649         status = ice_flow_get_hw_prof(hw, ICE_BLK_ACL, prof->id, &prof_id);
1650         if (status)
1651                 return status;
1652
1653         status = ice_query_acl_prof(hw, prof_id, buf, NULL);
1654         if (status)
1655                 return status;
1656
1657         /* If all PF's associated scenarios are all 0 or all
1658          * ICE_ACL_INVALID_SCEN (63) for the given profile then the latter has
1659          * not been configured yet.
1660          */
1661         if (buf->pf_scenario_num[0] == 0 && buf->pf_scenario_num[1] == 0 &&
1662             buf->pf_scenario_num[2] == 0 && buf->pf_scenario_num[3] == 0 &&
1663             buf->pf_scenario_num[4] == 0 && buf->pf_scenario_num[5] == 0 &&
1664             buf->pf_scenario_num[6] == 0 && buf->pf_scenario_num[7] == 0)
1665                 return ICE_SUCCESS;
1666
1667         if (buf->pf_scenario_num[0] == ICE_ACL_INVALID_SCEN &&
1668             buf->pf_scenario_num[1] == ICE_ACL_INVALID_SCEN &&
1669             buf->pf_scenario_num[2] == ICE_ACL_INVALID_SCEN &&
1670             buf->pf_scenario_num[3] == ICE_ACL_INVALID_SCEN &&
1671             buf->pf_scenario_num[4] == ICE_ACL_INVALID_SCEN &&
1672             buf->pf_scenario_num[5] == ICE_ACL_INVALID_SCEN &&
1673             buf->pf_scenario_num[6] == ICE_ACL_INVALID_SCEN &&
1674             buf->pf_scenario_num[7] == ICE_ACL_INVALID_SCEN)
1675                 return ICE_SUCCESS;
1676         else
1677                 return ICE_ERR_IN_USE;
1678 }
1679
1680 /**
1681  * ice_flow_acl_free_act_cntr - Free the ACL rule's actions
1682  * @hw: pointer to the hardware structure
1683  * @acts: array of actions to be performed on a match
1684  * @acts_cnt: number of actions
1685  */
1686 static enum ice_status
1687 ice_flow_acl_free_act_cntr(struct ice_hw *hw, struct ice_flow_action *acts,
1688                            u8 acts_cnt)
1689 {
1690         int i;
1691
1692         for (i = 0; i < acts_cnt; i++) {
1693                 if (acts[i].type == ICE_FLOW_ACT_CNTR_PKT ||
1694                     acts[i].type == ICE_FLOW_ACT_CNTR_BYTES ||
1695                     acts[i].type == ICE_FLOW_ACT_CNTR_PKT_BYTES) {
1696                         struct ice_acl_cntrs cntrs;
1697                         enum ice_status status;
1698
1699                         cntrs.bank = 0; /* Only bank0 for the moment */
1700                         cntrs.first_cntr =
1701                                         LE16_TO_CPU(acts[i].data.acl_act.value);
1702                         cntrs.last_cntr =
1703                                         LE16_TO_CPU(acts[i].data.acl_act.value);
1704
1705                         if (acts[i].type == ICE_FLOW_ACT_CNTR_PKT_BYTES)
1706                                 cntrs.type = ICE_AQC_ACL_CNT_TYPE_DUAL;
1707                         else
1708                                 cntrs.type = ICE_AQC_ACL_CNT_TYPE_SINGLE;
1709
1710                         status = ice_aq_dealloc_acl_cntrs(hw, &cntrs, NULL);
1711                         if (status)
1712                                 return status;
1713                 }
1714         }
1715         return ICE_SUCCESS;
1716 }
1717
1718 /**
1719  * ice_flow_acl_disassoc_scen - Disassociate the scenario from the profile
1720  * @hw: pointer to the hardware structure
1721  * @prof: pointer to flow profile
1722  *
1723  * Disassociate the scenario from the profile for the PF of the VSI.
1724  */
1725 static enum ice_status
1726 ice_flow_acl_disassoc_scen(struct ice_hw *hw, struct ice_flow_prof *prof)
1727 {
1728         struct ice_aqc_acl_prof_generic_frmt buf;
1729         enum ice_status status = ICE_SUCCESS;
1730         u8 prof_id = 0;
1731
1732         ice_memset(&buf, 0, sizeof(buf), ICE_NONDMA_MEM);
1733
1734         status = ice_flow_get_hw_prof(hw, ICE_BLK_ACL, prof->id, &prof_id);
1735         if (status)
1736                 return status;
1737
1738         status = ice_query_acl_prof(hw, prof_id, &buf, NULL);
1739         if (status)
1740                 return status;
1741
1742         /* Clear scenario for this PF */
1743         buf.pf_scenario_num[hw->pf_id] = ICE_ACL_INVALID_SCEN;
1744         status = ice_prgm_acl_prof_extrt(hw, prof_id, &buf, NULL);
1745
1746         return status;
1747 }
1748
1749 /**
1750  * ice_flow_rem_entry_sync - Remove a flow entry
1751  * @hw: pointer to the HW struct
1752  * @blk: classification stage
1753  * @entry: flow entry to be removed
1754  */
1755 static enum ice_status
1756 ice_flow_rem_entry_sync(struct ice_hw *hw, enum ice_block blk,
1757                         struct ice_flow_entry *entry)
1758 {
1759         if (!entry)
1760                 return ICE_ERR_BAD_PTR;
1761
1762         if (blk == ICE_BLK_ACL) {
1763                 enum ice_status status;
1764
1765                 if (!entry->prof)
1766                         return ICE_ERR_BAD_PTR;
1767
1768                 status = ice_acl_rem_entry(hw, entry->prof->cfg.scen,
1769                                            entry->scen_entry_idx);
1770                 if (status)
1771                         return status;
1772
1773                 /* Checks if we need to release an ACL counter. */
1774                 if (entry->acts_cnt && entry->acts)
1775                         ice_flow_acl_free_act_cntr(hw, entry->acts,
1776                                                    entry->acts_cnt);
1777         }
1778
1779         LIST_DEL(&entry->l_entry);
1780
1781         ice_dealloc_flow_entry(hw, entry);
1782
1783         return ICE_SUCCESS;
1784 }
1785
1786 /**
1787  * ice_flow_add_prof_sync - Add a flow profile for packet segments and fields
1788  * @hw: pointer to the HW struct
1789  * @blk: classification stage
1790  * @dir: flow direction
1791  * @prof_id: unique ID to identify this flow profile
1792  * @segs: array of one or more packet segments that describe the flow
1793  * @segs_cnt: number of packet segments provided
1794  * @acts: array of default actions
1795  * @acts_cnt: number of default actions
1796  * @prof: stores the returned flow profile added
1797  *
1798  * Assumption: the caller has acquired the lock to the profile list
1799  */
1800 static enum ice_status
1801 ice_flow_add_prof_sync(struct ice_hw *hw, enum ice_block blk,
1802                        enum ice_flow_dir dir, u64 prof_id,
1803                        struct ice_flow_seg_info *segs, u8 segs_cnt,
1804                        struct ice_flow_action *acts, u8 acts_cnt,
1805                        struct ice_flow_prof **prof)
1806 {
1807         struct ice_flow_prof_params *params;
1808         enum ice_status status;
1809         u8 i;
1810
1811         if (!prof || (acts_cnt && !acts))
1812                 return ICE_ERR_BAD_PTR;
1813
1814         params = (struct ice_flow_prof_params *)ice_malloc(hw, sizeof(*params));
1815         if (!params)
1816                 return ICE_ERR_NO_MEMORY;
1817
1818         params->prof = (struct ice_flow_prof *)
1819                 ice_malloc(hw, sizeof(*params->prof));
1820         if (!params->prof) {
1821                 status = ICE_ERR_NO_MEMORY;
1822                 goto free_params;
1823         }
1824
1825         /* initialize extraction sequence to all invalid (0xff) */
1826         for (i = 0; i < ICE_MAX_FV_WORDS; i++) {
1827                 params->es[i].prot_id = ICE_PROT_INVALID;
1828                 params->es[i].off = ICE_FV_OFFSET_INVAL;
1829         }
1830
1831         params->blk = blk;
1832         params->prof->id = prof_id;
1833         params->prof->dir = dir;
1834         params->prof->segs_cnt = segs_cnt;
1835
1836         /* Make a copy of the segments that need to be persistent in the flow
1837          * profile instance
1838          */
1839         for (i = 0; i < segs_cnt; i++)
1840                 ice_memcpy(&params->prof->segs[i], &segs[i], sizeof(*segs),
1841                            ICE_NONDMA_TO_NONDMA);
1842
1843         /* Make a copy of the actions that need to be persistent in the flow
1844          * profile instance.
1845          */
1846         if (acts_cnt) {
1847                 params->prof->acts = (struct ice_flow_action *)
1848                         ice_memdup(hw, acts, acts_cnt * sizeof(*acts),
1849                                    ICE_NONDMA_TO_NONDMA);
1850
1851                 if (!params->prof->acts) {
1852                         status = ICE_ERR_NO_MEMORY;
1853                         goto out;
1854                 }
1855         }
1856
1857         status = ice_flow_proc_segs(hw, params);
1858         if (status) {
1859                 ice_debug(hw, ICE_DBG_FLOW,
1860                           "Error processing a flow's packet segments\n");
1861                 goto out;
1862         }
1863
1864         /* Add a HW profile for this flow profile */
1865         status = ice_add_prof(hw, blk, prof_id, (u8 *)params->ptypes,
1866                               params->attr, params->attr_cnt, params->es,
1867                               params->mask);
1868         if (status) {
1869                 ice_debug(hw, ICE_DBG_FLOW, "Error adding a HW flow profile\n");
1870                 goto out;
1871         }
1872
1873         INIT_LIST_HEAD(&params->prof->entries);
1874         ice_init_lock(&params->prof->entries_lock);
1875         *prof = params->prof;
1876
1877 out:
1878         if (status) {
1879                 if (params->prof->acts)
1880                         ice_free(hw, params->prof->acts);
1881                 ice_free(hw, params->prof);
1882         }
1883 free_params:
1884         ice_free(hw, params);
1885
1886         return status;
1887 }
1888
1889 /**
1890  * ice_flow_rem_prof_sync - remove a flow profile
1891  * @hw: pointer to the hardware structure
1892  * @blk: classification stage
1893  * @prof: pointer to flow profile to remove
1894  *
1895  * Assumption: the caller has acquired the lock to the profile list
1896  */
1897 static enum ice_status
1898 ice_flow_rem_prof_sync(struct ice_hw *hw, enum ice_block blk,
1899                        struct ice_flow_prof *prof)
1900 {
1901         enum ice_status status;
1902
1903         /* Remove all remaining flow entries before removing the flow profile */
1904         if (!LIST_EMPTY(&prof->entries)) {
1905                 struct ice_flow_entry *e, *t;
1906
1907                 ice_acquire_lock(&prof->entries_lock);
1908
1909                 LIST_FOR_EACH_ENTRY_SAFE(e, t, &prof->entries, ice_flow_entry,
1910                                          l_entry) {
1911                         status = ice_flow_rem_entry_sync(hw, blk, e);
1912                         if (status)
1913                                 break;
1914                 }
1915
1916                 ice_release_lock(&prof->entries_lock);
1917         }
1918
1919         if (blk == ICE_BLK_ACL) {
1920                 struct ice_aqc_acl_profile_ranges query_rng_buf;
1921                 struct ice_aqc_acl_prof_generic_frmt buf;
1922                 u8 prof_id = 0;
1923
1924                 /* Disassociate the scenario from the profile for the PF */
1925                 status = ice_flow_acl_disassoc_scen(hw, prof);
1926                 if (status)
1927                         return status;
1928
1929                 /* Clear the range-checker if the profile ID is no longer
1930                  * used by any PF
1931                  */
1932                 status = ice_flow_acl_is_prof_in_use(hw, prof, &buf);
1933                 if (status && status != ICE_ERR_IN_USE) {
1934                         return status;
1935                 } else if (!status) {
1936                         /* Clear the range-checker value for profile ID */
1937                         ice_memset(&query_rng_buf, 0,
1938                                    sizeof(struct ice_aqc_acl_profile_ranges),
1939                                    ICE_NONDMA_MEM);
1940
1941                         status = ice_flow_get_hw_prof(hw, blk, prof->id,
1942                                                       &prof_id);
1943                         if (status)
1944                                 return status;
1945
1946                         status = ice_prog_acl_prof_ranges(hw, prof_id,
1947                                                           &query_rng_buf, NULL);
1948                         if (status)
1949                                 return status;
1950                 }
1951         }
1952
1953         /* Remove all hardware profiles associated with this flow profile */
1954         status = ice_rem_prof(hw, blk, prof->id);
1955         if (!status) {
1956                 LIST_DEL(&prof->l_entry);
1957                 ice_destroy_lock(&prof->entries_lock);
1958                 if (prof->acts)
1959                         ice_free(hw, prof->acts);
1960                 ice_free(hw, prof);
1961         }
1962
1963         return status;
1964 }
1965
1966 /**
1967  * ice_flow_acl_set_xtrct_seq_fld - Populate xtrct seq for single field
1968  * @buf: Destination buffer function writes partial xtrct sequence to
1969  * @info: Info about field
1970  */
1971 static void
1972 ice_flow_acl_set_xtrct_seq_fld(struct ice_aqc_acl_prof_generic_frmt *buf,
1973                                struct ice_flow_fld_info *info)
1974 {
1975         u16 dst, i;
1976         u8 src;
1977
1978         src = info->xtrct.idx * ICE_FLOW_FV_EXTRACT_SZ +
1979                 info->xtrct.disp / BITS_PER_BYTE;
1980         dst = info->entry.val;
1981         for (i = 0; i < info->entry.last; i++)
1982                 /* HW stores field vector words in LE, convert words back to BE
1983                  * so constructed entries will end up in network order
1984                  */
1985                 buf->byte_selection[dst++] = src++ ^ 1;
1986 }
1987
1988 /**
1989  * ice_flow_acl_set_xtrct_seq - Program ACL extraction sequence
1990  * @hw: pointer to the hardware structure
1991  * @prof: pointer to flow profile
1992  */
1993 static enum ice_status
1994 ice_flow_acl_set_xtrct_seq(struct ice_hw *hw, struct ice_flow_prof *prof)
1995 {
1996         struct ice_aqc_acl_prof_generic_frmt buf;
1997         struct ice_flow_fld_info *info;
1998         enum ice_status status;
1999         u8 prof_id = 0;
2000         u16 i;
2001
2002         ice_memset(&buf, 0, sizeof(buf), ICE_NONDMA_MEM);
2003
2004         status = ice_flow_get_hw_prof(hw, ICE_BLK_ACL, prof->id, &prof_id);
2005         if (status)
2006                 return status;
2007
2008         status = ice_flow_acl_is_prof_in_use(hw, prof, &buf);
2009         if (status && status != ICE_ERR_IN_USE)
2010                 return status;
2011
2012         if (!status) {
2013                 /* Program the profile dependent configuration. This is done
2014                  * only once regardless of the number of PFs using that profile
2015                  */
2016                 ice_memset(&buf, 0, sizeof(buf), ICE_NONDMA_MEM);
2017
2018                 for (i = 0; i < prof->segs_cnt; i++) {
2019                         struct ice_flow_seg_info *seg = &prof->segs[i];
2020                         u16 j;
2021
2022                         ice_for_each_set_bit(j, (ice_bitmap_t *)&seg->match,
2023                                              ICE_FLOW_FIELD_IDX_MAX) {
2024                                 info = &seg->fields[j];
2025
2026                                 if (info->type == ICE_FLOW_FLD_TYPE_RANGE)
2027                                         buf.word_selection[info->entry.val] =
2028                                                 info->xtrct.idx;
2029                                 else
2030                                         ice_flow_acl_set_xtrct_seq_fld(&buf,
2031                                                                        info);
2032                         }
2033
2034                         for (j = 0; j < seg->raws_cnt; j++) {
2035                                 info = &seg->raws[j].info;
2036                                 ice_flow_acl_set_xtrct_seq_fld(&buf, info);
2037                         }
2038                 }
2039
2040                 ice_memset(&buf.pf_scenario_num[0], ICE_ACL_INVALID_SCEN,
2041                            ICE_AQC_ACL_PROF_PF_SCEN_NUM_ELEMS,
2042                            ICE_NONDMA_MEM);
2043         }
2044
2045         /* Update the current PF */
2046         buf.pf_scenario_num[hw->pf_id] = (u8)prof->cfg.scen->id;
2047         status = ice_prgm_acl_prof_extrt(hw, prof_id, &buf, NULL);
2048
2049         return status;
2050 }
2051
2052 /**
2053  * ice_flow_assoc_vsig_vsi - associate a VSI with VSIG
2054  * @hw: pointer to the hardware structure
2055  * @blk: classification stage
2056  * @vsi_handle: software VSI handle
2057  * @vsig: target VSI group
2058  *
2059  * Assumption: the caller has already verified that the VSI to
2060  * be added has the same characteristics as the VSIG and will
2061  * thereby have access to all resources added to that VSIG.
2062  */
2063 enum ice_status
2064 ice_flow_assoc_vsig_vsi(struct ice_hw *hw, enum ice_block blk, u16 vsi_handle,
2065                         u16 vsig)
2066 {
2067         enum ice_status status;
2068
2069         if (!ice_is_vsi_valid(hw, vsi_handle) || blk >= ICE_BLK_COUNT)
2070                 return ICE_ERR_PARAM;
2071
2072         ice_acquire_lock(&hw->fl_profs_locks[blk]);
2073         status = ice_add_vsi_flow(hw, blk, ice_get_hw_vsi_num(hw, vsi_handle),
2074                                   vsig);
2075         ice_release_lock(&hw->fl_profs_locks[blk]);
2076
2077         return status;
2078 }
2079
2080 /**
2081  * ice_flow_assoc_prof - associate a VSI with a flow profile
2082  * @hw: pointer to the hardware structure
2083  * @blk: classification stage
2084  * @prof: pointer to flow profile
2085  * @vsi_handle: software VSI handle
2086  *
2087  * Assumption: the caller has acquired the lock to the profile list
2088  * and the software VSI handle has been validated
2089  */
2090 static enum ice_status
2091 ice_flow_assoc_prof(struct ice_hw *hw, enum ice_block blk,
2092                     struct ice_flow_prof *prof, u16 vsi_handle)
2093 {
2094         enum ice_status status = ICE_SUCCESS;
2095
2096         if (!ice_is_bit_set(prof->vsis, vsi_handle)) {
2097                 if (blk == ICE_BLK_ACL) {
2098                         status = ice_flow_acl_set_xtrct_seq(hw, prof);
2099                         if (status)
2100                                 return status;
2101                 }
2102                 status = ice_add_prof_id_flow(hw, blk,
2103                                               ice_get_hw_vsi_num(hw,
2104                                                                  vsi_handle),
2105                                               prof->id);
2106                 if (!status)
2107                         ice_set_bit(vsi_handle, prof->vsis);
2108                 else
2109                         ice_debug(hw, ICE_DBG_FLOW,
2110                                   "HW profile add failed, %d\n",
2111                                   status);
2112         }
2113
2114         return status;
2115 }
2116
2117 /**
2118  * ice_flow_disassoc_prof - disassociate a VSI from a flow profile
2119  * @hw: pointer to the hardware structure
2120  * @blk: classification stage
2121  * @prof: pointer to flow profile
2122  * @vsi_handle: software VSI handle
2123  *
2124  * Assumption: the caller has acquired the lock to the profile list
2125  * and the software VSI handle has been validated
2126  */
2127 static enum ice_status
2128 ice_flow_disassoc_prof(struct ice_hw *hw, enum ice_block blk,
2129                        struct ice_flow_prof *prof, u16 vsi_handle)
2130 {
2131         enum ice_status status = ICE_SUCCESS;
2132
2133         if (ice_is_bit_set(prof->vsis, vsi_handle)) {
2134                 status = ice_rem_prof_id_flow(hw, blk,
2135                                               ice_get_hw_vsi_num(hw,
2136                                                                  vsi_handle),
2137                                               prof->id);
2138                 if (!status)
2139                         ice_clear_bit(vsi_handle, prof->vsis);
2140                 else
2141                         ice_debug(hw, ICE_DBG_FLOW,
2142                                   "HW profile remove failed, %d\n",
2143                                   status);
2144         }
2145
2146         return status;
2147 }
2148
2149 /**
2150  * ice_flow_add_prof - Add a flow profile for packet segments and matched fields
2151  * @hw: pointer to the HW struct
2152  * @blk: classification stage
2153  * @dir: flow direction
2154  * @prof_id: unique ID to identify this flow profile
2155  * @segs: array of one or more packet segments that describe the flow
2156  * @segs_cnt: number of packet segments provided
2157  * @acts: array of default actions
2158  * @acts_cnt: number of default actions
2159  * @prof: stores the returned flow profile added
2160  */
2161 enum ice_status
2162 ice_flow_add_prof(struct ice_hw *hw, enum ice_block blk, enum ice_flow_dir dir,
2163                   u64 prof_id, struct ice_flow_seg_info *segs, u8 segs_cnt,
2164                   struct ice_flow_action *acts, u8 acts_cnt,
2165                   struct ice_flow_prof **prof)
2166 {
2167         enum ice_status status;
2168
2169         if (segs_cnt > ICE_FLOW_SEG_MAX)
2170                 return ICE_ERR_MAX_LIMIT;
2171
2172         if (!segs_cnt)
2173                 return ICE_ERR_PARAM;
2174
2175         if (!segs)
2176                 return ICE_ERR_BAD_PTR;
2177
2178         status = ice_flow_val_hdrs(segs, segs_cnt);
2179         if (status)
2180                 return status;
2181
2182         ice_acquire_lock(&hw->fl_profs_locks[blk]);
2183
2184         status = ice_flow_add_prof_sync(hw, blk, dir, prof_id, segs, segs_cnt,
2185                                         acts, acts_cnt, prof);
2186         if (!status)
2187                 LIST_ADD(&(*prof)->l_entry, &hw->fl_profs[blk]);
2188
2189         ice_release_lock(&hw->fl_profs_locks[blk]);
2190
2191         return status;
2192 }
2193
2194 /**
2195  * ice_flow_rem_prof - Remove a flow profile and all entries associated with it
2196  * @hw: pointer to the HW struct
2197  * @blk: the block for which the flow profile is to be removed
2198  * @prof_id: unique ID of the flow profile to be removed
2199  */
2200 enum ice_status
2201 ice_flow_rem_prof(struct ice_hw *hw, enum ice_block blk, u64 prof_id)
2202 {
2203         struct ice_flow_prof *prof;
2204         enum ice_status status;
2205
2206         ice_acquire_lock(&hw->fl_profs_locks[blk]);
2207
2208         prof = ice_flow_find_prof_id(hw, blk, prof_id);
2209         if (!prof) {
2210                 status = ICE_ERR_DOES_NOT_EXIST;
2211                 goto out;
2212         }
2213
2214         /* prof becomes invalid after the call */
2215         status = ice_flow_rem_prof_sync(hw, blk, prof);
2216
2217 out:
2218         ice_release_lock(&hw->fl_profs_locks[blk]);
2219
2220         return status;
2221 }
2222
2223 /**
2224  * ice_flow_get_hw_prof - return the HW profile for a specific profile ID handle
2225  * @hw: pointer to the HW struct
2226  * @blk: classification stage
2227  * @prof_id: the profile ID handle
2228  * @hw_prof_id: pointer to variable to receive the HW profile ID
2229  */
2230 enum ice_status
2231 ice_flow_get_hw_prof(struct ice_hw *hw, enum ice_block blk, u64 prof_id,
2232                      u8 *hw_prof_id)
2233 {
2234         enum ice_status status = ICE_ERR_DOES_NOT_EXIST;
2235         struct ice_prof_map *map;
2236
2237         ice_acquire_lock(&hw->blk[blk].es.prof_map_lock);
2238         map = ice_search_prof_id(hw, blk, prof_id);
2239         if (map) {
2240                 *hw_prof_id = map->prof_id;
2241                 status = ICE_SUCCESS;
2242         }
2243         ice_release_lock(&hw->blk[blk].es.prof_map_lock);
2244         return status;
2245 }
2246
2247 /**
2248  * ice_flow_find_entry - look for a flow entry using its unique ID
2249  * @hw: pointer to the HW struct
2250  * @blk: classification stage
2251  * @entry_id: unique ID to identify this flow entry
2252  *
2253  * This function looks for the flow entry with the specified unique ID in all
2254  * flow profiles of the specified classification stage. If the entry is found,
2255  * and it returns the handle to the flow entry. Otherwise, it returns
2256  * ICE_FLOW_ENTRY_ID_INVAL.
2257  */
2258 u64 ice_flow_find_entry(struct ice_hw *hw, enum ice_block blk, u64 entry_id)
2259 {
2260         struct ice_flow_entry *found = NULL;
2261         struct ice_flow_prof *p;
2262
2263         ice_acquire_lock(&hw->fl_profs_locks[blk]);
2264
2265         LIST_FOR_EACH_ENTRY(p, &hw->fl_profs[blk], ice_flow_prof, l_entry) {
2266                 struct ice_flow_entry *e;
2267
2268                 ice_acquire_lock(&p->entries_lock);
2269                 LIST_FOR_EACH_ENTRY(e, &p->entries, ice_flow_entry, l_entry)
2270                         if (e->id == entry_id) {
2271                                 found = e;
2272                                 break;
2273                         }
2274                 ice_release_lock(&p->entries_lock);
2275
2276                 if (found)
2277                         break;
2278         }
2279
2280         ice_release_lock(&hw->fl_profs_locks[blk]);
2281
2282         return found ? ICE_FLOW_ENTRY_HNDL(found) : ICE_FLOW_ENTRY_HANDLE_INVAL;
2283 }
2284
2285 /**
2286  * ice_flow_acl_check_actions - Checks the ACL rule's actions
2287  * @hw: pointer to the hardware structure
2288  * @acts: array of actions to be performed on a match
2289  * @acts_cnt: number of actions
2290  * @cnt_alloc: indicates if an ACL counter has been allocated.
2291  */
2292 static enum ice_status
2293 ice_flow_acl_check_actions(struct ice_hw *hw, struct ice_flow_action *acts,
2294                            u8 acts_cnt, bool *cnt_alloc)
2295 {
2296         ice_declare_bitmap(dup_check, ICE_AQC_TBL_MAX_ACTION_PAIRS * 2);
2297         int i;
2298
2299         ice_zero_bitmap(dup_check, ICE_AQC_TBL_MAX_ACTION_PAIRS * 2);
2300         *cnt_alloc = false;
2301
2302         if (acts_cnt > ICE_FLOW_ACL_MAX_NUM_ACT)
2303                 return ICE_ERR_OUT_OF_RANGE;
2304
2305         for (i = 0; i < acts_cnt; i++) {
2306                 if (acts[i].type != ICE_FLOW_ACT_NOP &&
2307                     acts[i].type != ICE_FLOW_ACT_DROP &&
2308                     acts[i].type != ICE_FLOW_ACT_CNTR_PKT &&
2309                     acts[i].type != ICE_FLOW_ACT_FWD_QUEUE)
2310                         return ICE_ERR_CFG;
2311
2312                 /* If the caller want to add two actions of the same type, then
2313                  * it is considered invalid configuration.
2314                  */
2315                 if (ice_test_and_set_bit(acts[i].type, dup_check))
2316                         return ICE_ERR_PARAM;
2317         }
2318
2319         /* Checks if ACL counters are needed. */
2320         for (i = 0; i < acts_cnt; i++) {
2321                 if (acts[i].type == ICE_FLOW_ACT_CNTR_PKT ||
2322                     acts[i].type == ICE_FLOW_ACT_CNTR_BYTES ||
2323                     acts[i].type == ICE_FLOW_ACT_CNTR_PKT_BYTES) {
2324                         struct ice_acl_cntrs cntrs;
2325                         enum ice_status status;
2326
2327                         cntrs.amount = 1;
2328                         cntrs.bank = 0; /* Only bank0 for the moment */
2329
2330                         if (acts[i].type == ICE_FLOW_ACT_CNTR_PKT_BYTES)
2331                                 cntrs.type = ICE_AQC_ACL_CNT_TYPE_DUAL;
2332                         else
2333                                 cntrs.type = ICE_AQC_ACL_CNT_TYPE_SINGLE;
2334
2335                         status = ice_aq_alloc_acl_cntrs(hw, &cntrs, NULL);
2336                         if (status)
2337                                 return status;
2338                         /* Counter index within the bank */
2339                         acts[i].data.acl_act.value =
2340                                                 CPU_TO_LE16(cntrs.first_cntr);
2341                         *cnt_alloc = true;
2342                 }
2343         }
2344
2345         return ICE_SUCCESS;
2346 }
2347
2348 /**
2349  * ice_flow_acl_frmt_entry_range - Format an ACL range checker for a given field
2350  * @fld: number of the given field
2351  * @info: info about field
2352  * @range_buf: range checker configuration buffer
2353  * @data: pointer to a data buffer containing flow entry's match values/masks
2354  * @range: Input/output param indicating which range checkers are being used
2355  */
2356 static void
2357 ice_flow_acl_frmt_entry_range(u16 fld, struct ice_flow_fld_info *info,
2358                               struct ice_aqc_acl_profile_ranges *range_buf,
2359                               u8 *data, u8 *range)
2360 {
2361         u16 new_mask;
2362
2363         /* If not specified, default mask is all bits in field */
2364         new_mask = (info->src.mask == ICE_FLOW_FLD_OFF_INVAL ?
2365                     BIT(ice_flds_info[fld].size) - 1 :
2366                     (*(u16 *)(data + info->src.mask))) << info->xtrct.disp;
2367
2368         /* If the mask is 0, then we don't need to worry about this input
2369          * range checker value.
2370          */
2371         if (new_mask) {
2372                 u16 new_high =
2373                         (*(u16 *)(data + info->src.last)) << info->xtrct.disp;
2374                 u16 new_low =
2375                         (*(u16 *)(data + info->src.val)) << info->xtrct.disp;
2376                 u8 range_idx = info->entry.val;
2377
2378                 range_buf->checker_cfg[range_idx].low_boundary =
2379                         CPU_TO_BE16(new_low);
2380                 range_buf->checker_cfg[range_idx].high_boundary =
2381                         CPU_TO_BE16(new_high);
2382                 range_buf->checker_cfg[range_idx].mask = CPU_TO_BE16(new_mask);
2383
2384                 /* Indicate which range checker is being used */
2385                 *range |= BIT(range_idx);
2386         }
2387 }
2388
2389 /**
2390  * ice_flow_acl_frmt_entry_fld - Partially format ACL entry for a given field
2391  * @fld: number of the given field
2392  * @info: info about the field
2393  * @buf: buffer containing the entry
2394  * @dontcare: buffer containing don't care mask for entry
2395  * @data: pointer to a data buffer containing flow entry's match values/masks
2396  */
2397 static void
2398 ice_flow_acl_frmt_entry_fld(u16 fld, struct ice_flow_fld_info *info, u8 *buf,
2399                             u8 *dontcare, u8 *data)
2400 {
2401         u16 dst, src, mask, k, end_disp, tmp_s = 0, tmp_m = 0;
2402         bool use_mask = false;
2403         u8 disp;
2404
2405         src = info->src.val;
2406         mask = info->src.mask;
2407         dst = info->entry.val - ICE_AQC_ACL_PROF_BYTE_SEL_START_IDX;
2408         disp = info->xtrct.disp % BITS_PER_BYTE;
2409
2410         if (mask != ICE_FLOW_FLD_OFF_INVAL)
2411                 use_mask = true;
2412
2413         for (k = 0; k < info->entry.last; k++, dst++) {
2414                 /* Add overflow bits from previous byte */
2415                 buf[dst] = (tmp_s & 0xff00) >> 8;
2416
2417                 /* If mask is not valid, tmp_m is always zero, so just setting
2418                  * dontcare to 0 (no masked bits). If mask is valid, pulls in
2419                  * overflow bits of mask from prev byte
2420                  */
2421                 dontcare[dst] = (tmp_m & 0xff00) >> 8;
2422
2423                 /* If there is displacement, last byte will only contain
2424                  * displaced data, but there is no more data to read from user
2425                  * buffer, so skip so as not to potentially read beyond end of
2426                  * user buffer
2427                  */
2428                 if (!disp || k < info->entry.last - 1) {
2429                         /* Store shifted data to use in next byte */
2430                         tmp_s = data[src++] << disp;
2431
2432                         /* Add current (shifted) byte */
2433                         buf[dst] |= tmp_s & 0xff;
2434
2435                         /* Handle mask if valid */
2436                         if (use_mask) {
2437                                 tmp_m = (~data[mask++] & 0xff) << disp;
2438                                 dontcare[dst] |= tmp_m & 0xff;
2439                         }
2440                 }
2441         }
2442
2443         /* Fill in don't care bits at beginning of field */
2444         if (disp) {
2445                 dst = info->entry.val - ICE_AQC_ACL_PROF_BYTE_SEL_START_IDX;
2446                 for (k = 0; k < disp; k++)
2447                         dontcare[dst] |= BIT(k);
2448         }
2449
2450         end_disp = (disp + ice_flds_info[fld].size) % BITS_PER_BYTE;
2451
2452         /* Fill in don't care bits at end of field */
2453         if (end_disp) {
2454                 dst = info->entry.val - ICE_AQC_ACL_PROF_BYTE_SEL_START_IDX +
2455                       info->entry.last - 1;
2456                 for (k = end_disp; k < BITS_PER_BYTE; k++)
2457                         dontcare[dst] |= BIT(k);
2458         }
2459 }
2460
2461 /**
2462  * ice_flow_acl_frmt_entry - Format ACL entry
2463  * @hw: pointer to the hardware structure
2464  * @prof: pointer to flow profile
2465  * @e: pointer to the flow entry
2466  * @data: pointer to a data buffer containing flow entry's match values/masks
2467  * @acts: array of actions to be performed on a match
2468  * @acts_cnt: number of actions
2469  *
2470  * Formats the key (and key_inverse) to be matched from the data passed in,
2471  * along with data from the flow profile. This key/key_inverse pair makes up
2472  * the 'entry' for an ACL flow entry.
2473  */
2474 static enum ice_status
2475 ice_flow_acl_frmt_entry(struct ice_hw *hw, struct ice_flow_prof *prof,
2476                         struct ice_flow_entry *e, u8 *data,
2477                         struct ice_flow_action *acts, u8 acts_cnt)
2478 {
2479         u8 *buf = NULL, *dontcare = NULL, *key = NULL, range = 0, dir_flag_msk;
2480         struct ice_aqc_acl_profile_ranges *range_buf = NULL;
2481         enum ice_status status;
2482         bool cnt_alloc;
2483         u8 prof_id = 0;
2484         u16 i, buf_sz;
2485
2486         status = ice_flow_get_hw_prof(hw, ICE_BLK_ACL, prof->id, &prof_id);
2487         if (status)
2488                 return status;
2489
2490         /* Format the result action */
2491
2492         status = ice_flow_acl_check_actions(hw, acts, acts_cnt, &cnt_alloc);
2493         if (status)
2494                 return status;
2495
2496         status = ICE_ERR_NO_MEMORY;
2497
2498         e->acts = (struct ice_flow_action *)
2499                 ice_memdup(hw, acts, acts_cnt * sizeof(*acts),
2500                            ICE_NONDMA_TO_NONDMA);
2501
2502         if (!e->acts)
2503                 goto out;
2504
2505         e->acts_cnt = acts_cnt;
2506
2507         /* Format the matching data */
2508         buf_sz = prof->cfg.scen->width;
2509         buf = (u8 *)ice_malloc(hw, buf_sz);
2510         if (!buf)
2511                 goto out;
2512
2513         dontcare = (u8 *)ice_malloc(hw, buf_sz);
2514         if (!dontcare)
2515                 goto out;
2516
2517         /* 'key' buffer will store both key and key_inverse, so must be twice
2518          * size of buf
2519          */
2520         key = (u8 *)ice_malloc(hw, buf_sz * 2);
2521         if (!key)
2522                 goto out;
2523
2524         range_buf = (struct ice_aqc_acl_profile_ranges *)
2525                 ice_malloc(hw, sizeof(struct ice_aqc_acl_profile_ranges));
2526         if (!range_buf)
2527                 goto out;
2528
2529         /* Set don't care mask to all 1's to start, will zero out used bytes */
2530         ice_memset(dontcare, 0xff, buf_sz, ICE_NONDMA_MEM);
2531
2532         for (i = 0; i < prof->segs_cnt; i++) {
2533                 struct ice_flow_seg_info *seg = &prof->segs[i];
2534                 u8 j;
2535
2536                 ice_for_each_set_bit(j, (ice_bitmap_t *)&seg->match,
2537                                      ICE_FLOW_FIELD_IDX_MAX) {
2538                         struct ice_flow_fld_info *info = &seg->fields[j];
2539
2540                         if (info->type == ICE_FLOW_FLD_TYPE_RANGE)
2541                                 ice_flow_acl_frmt_entry_range(j, info,
2542                                                               range_buf, data,
2543                                                               &range);
2544                         else
2545                                 ice_flow_acl_frmt_entry_fld(j, info, buf,
2546                                                             dontcare, data);
2547                 }
2548
2549                 for (j = 0; j < seg->raws_cnt; j++) {
2550                         struct ice_flow_fld_info *info = &seg->raws[j].info;
2551                         u16 dst, src, mask, k;
2552                         bool use_mask = false;
2553
2554                         src = info->src.val;
2555                         dst = info->entry.val -
2556                                         ICE_AQC_ACL_PROF_BYTE_SEL_START_IDX;
2557                         mask = info->src.mask;
2558
2559                         if (mask != ICE_FLOW_FLD_OFF_INVAL)
2560                                 use_mask = true;
2561
2562                         for (k = 0; k < info->entry.last; k++, dst++) {
2563                                 buf[dst] = data[src++];
2564                                 if (use_mask)
2565                                         dontcare[dst] = ~data[mask++];
2566                                 else
2567                                         dontcare[dst] = 0;
2568                         }
2569                 }
2570         }
2571
2572         buf[prof->cfg.scen->pid_idx] = (u8)prof_id;
2573         dontcare[prof->cfg.scen->pid_idx] = 0;
2574
2575         /* Format the buffer for direction flags */
2576         dir_flag_msk = BIT(ICE_FLG_PKT_DIR);
2577
2578         if (prof->dir == ICE_FLOW_RX)
2579                 buf[prof->cfg.scen->pkt_dir_idx] = dir_flag_msk;
2580
2581         if (range) {
2582                 buf[prof->cfg.scen->rng_chk_idx] = range;
2583                 /* Mark any unused range checkers as don't care */
2584                 dontcare[prof->cfg.scen->rng_chk_idx] = ~range;
2585                 e->range_buf = range_buf;
2586         } else {
2587                 ice_free(hw, range_buf);
2588         }
2589
2590         status = ice_set_key(key, buf_sz * 2, buf, NULL, dontcare, NULL, 0,
2591                              buf_sz);
2592         if (status)
2593                 goto out;
2594
2595         e->entry = key;
2596         e->entry_sz = buf_sz * 2;
2597
2598 out:
2599         if (buf)
2600                 ice_free(hw, buf);
2601
2602         if (dontcare)
2603                 ice_free(hw, dontcare);
2604
2605         if (status && key)
2606                 ice_free(hw, key);
2607
2608         if (status && range_buf) {
2609                 ice_free(hw, range_buf);
2610                 e->range_buf = NULL;
2611         }
2612
2613         if (status && e->acts) {
2614                 ice_free(hw, e->acts);
2615                 e->acts = NULL;
2616                 e->acts_cnt = 0;
2617         }
2618
2619         if (status && cnt_alloc)
2620                 ice_flow_acl_free_act_cntr(hw, acts, acts_cnt);
2621
2622         return status;
2623 }
2624
2625 /**
2626  * ice_flow_acl_find_scen_entry_cond - Find an ACL scenario entry that matches
2627  *                                     the compared data.
2628  * @prof: pointer to flow profile
2629  * @e: pointer to the comparing flow entry
2630  * @do_chg_action: decide if we want to change the ACL action
2631  * @do_add_entry: decide if we want to add the new ACL entry
2632  * @do_rem_entry: decide if we want to remove the current ACL entry
2633  *
2634  * Find an ACL scenario entry that matches the compared data. In the same time,
2635  * this function also figure out:
2636  * a/ If we want to change the ACL action
2637  * b/ If we want to add the new ACL entry
2638  * c/ If we want to remove the current ACL entry
2639  */
2640 static struct ice_flow_entry *
2641 ice_flow_acl_find_scen_entry_cond(struct ice_flow_prof *prof,
2642                                   struct ice_flow_entry *e, bool *do_chg_action,
2643                                   bool *do_add_entry, bool *do_rem_entry)
2644 {
2645         struct ice_flow_entry *p, *return_entry = NULL;
2646         u8 i, j;
2647
2648         /* Check if:
2649          * a/ There exists an entry with same matching data, but different
2650          *    priority, then we remove this existing ACL entry. Then, we
2651          *    will add the new entry to the ACL scenario.
2652          * b/ There exists an entry with same matching data, priority, and
2653          *    result action, then we do nothing
2654          * c/ There exists an entry with same matching data, priority, but
2655          *    different, action, then do only change the action's entry.
2656          * d/ Else, we add this new entry to the ACL scenario.
2657          */
2658         *do_chg_action = false;
2659         *do_add_entry = true;
2660         *do_rem_entry = false;
2661         LIST_FOR_EACH_ENTRY(p, &prof->entries, ice_flow_entry, l_entry) {
2662                 if (memcmp(p->entry, e->entry, p->entry_sz))
2663                         continue;
2664
2665                 /* From this point, we have the same matching_data. */
2666                 *do_add_entry = false;
2667                 return_entry = p;
2668
2669                 if (p->priority != e->priority) {
2670                         /* matching data && !priority */
2671                         *do_add_entry = true;
2672                         *do_rem_entry = true;
2673                         break;
2674                 }
2675
2676                 /* From this point, we will have matching_data && priority */
2677                 if (p->acts_cnt != e->acts_cnt)
2678                         *do_chg_action = true;
2679                 for (i = 0; i < p->acts_cnt; i++) {
2680                         bool found_not_match = false;
2681
2682                         for (j = 0; j < e->acts_cnt; j++)
2683                                 if (memcmp(&p->acts[i], &e->acts[j],
2684                                            sizeof(struct ice_flow_action))) {
2685                                         found_not_match = true;
2686                                         break;
2687                                 }
2688
2689                         if (found_not_match) {
2690                                 *do_chg_action = true;
2691                                 break;
2692                         }
2693                 }
2694
2695                 /* (do_chg_action = true) means :
2696                  *    matching_data && priority && !result_action
2697                  * (do_chg_action = false) means :
2698                  *    matching_data && priority && result_action
2699                  */
2700                 break;
2701         }
2702
2703         return return_entry;
2704 }
2705
2706 /**
2707  * ice_flow_acl_convert_to_acl_prior - Convert to ACL priority
2708  * @p: flow priority
2709  */
2710 static enum ice_acl_entry_prior
2711 ice_flow_acl_convert_to_acl_prior(enum ice_flow_priority p)
2712 {
2713         enum ice_acl_entry_prior acl_prior;
2714
2715         switch (p) {
2716         case ICE_FLOW_PRIO_LOW:
2717                 acl_prior = ICE_LOW;
2718                 break;
2719         case ICE_FLOW_PRIO_NORMAL:
2720                 acl_prior = ICE_NORMAL;
2721                 break;
2722         case ICE_FLOW_PRIO_HIGH:
2723                 acl_prior = ICE_HIGH;
2724                 break;
2725         default:
2726                 acl_prior = ICE_NORMAL;
2727                 break;
2728         }
2729
2730         return acl_prior;
2731 }
2732
2733 /**
2734  * ice_flow_acl_union_rng_chk - Perform union operation between two
2735  *                              range-range checker buffers
2736  * @dst_buf: pointer to destination range checker buffer
2737  * @src_buf: pointer to source range checker buffer
2738  *
2739  * For this function, we do the union between dst_buf and src_buf
2740  * range checker buffer, and we will save the result back to dst_buf
2741  */
2742 static enum ice_status
2743 ice_flow_acl_union_rng_chk(struct ice_aqc_acl_profile_ranges *dst_buf,
2744                            struct ice_aqc_acl_profile_ranges *src_buf)
2745 {
2746         u8 i, j;
2747
2748         if (!dst_buf || !src_buf)
2749                 return ICE_ERR_BAD_PTR;
2750
2751         for (i = 0; i < ICE_AQC_ACL_PROF_RANGES_NUM_CFG; i++) {
2752                 struct ice_acl_rng_data *cfg_data = NULL, *in_data;
2753                 bool will_populate = false;
2754
2755                 in_data = &src_buf->checker_cfg[i];
2756
2757                 if (!in_data->mask)
2758                         break;
2759
2760                 for (j = 0; j < ICE_AQC_ACL_PROF_RANGES_NUM_CFG; j++) {
2761                         cfg_data = &dst_buf->checker_cfg[j];
2762
2763                         if (!cfg_data->mask ||
2764                             !memcmp(cfg_data, in_data,
2765                                     sizeof(struct ice_acl_rng_data))) {
2766                                 will_populate = true;
2767                                 break;
2768                         }
2769                 }
2770
2771                 if (will_populate) {
2772                         ice_memcpy(cfg_data, in_data,
2773                                    sizeof(struct ice_acl_rng_data),
2774                                    ICE_NONDMA_TO_NONDMA);
2775                 } else {
2776                         /* No available slot left to program range checker */
2777                         return ICE_ERR_MAX_LIMIT;
2778                 }
2779         }
2780
2781         return ICE_SUCCESS;
2782 }
2783
2784 /**
2785  * ice_flow_acl_add_scen_entry_sync - Add entry to ACL scenario sync
2786  * @hw: pointer to the hardware structure
2787  * @prof: pointer to flow profile
2788  * @entry: double pointer to the flow entry
2789  *
2790  * For this function, we will look at the current added entries in the
2791  * corresponding ACL scenario. Then, we will perform matching logic to
2792  * see if we want to add/modify/do nothing with this new entry.
2793  */
2794 static enum ice_status
2795 ice_flow_acl_add_scen_entry_sync(struct ice_hw *hw, struct ice_flow_prof *prof,
2796                                  struct ice_flow_entry **entry)
2797 {
2798         bool do_add_entry, do_rem_entry, do_chg_action, do_chg_rng_chk;
2799         struct ice_aqc_acl_profile_ranges query_rng_buf, cfg_rng_buf;
2800         struct ice_acl_act_entry *acts = NULL;
2801         struct ice_flow_entry *exist;
2802         enum ice_status status = ICE_SUCCESS;
2803         struct ice_flow_entry *e;
2804         u8 i;
2805
2806         if (!entry || !(*entry) || !prof)
2807                 return ICE_ERR_BAD_PTR;
2808
2809         e = *(entry);
2810
2811         do_chg_rng_chk = false;
2812         if (e->range_buf) {
2813                 u8 prof_id = 0;
2814
2815                 status = ice_flow_get_hw_prof(hw, ICE_BLK_ACL, prof->id,
2816                                               &prof_id);
2817                 if (status)
2818                         return status;
2819
2820                 /* Query the current range-checker value in FW */
2821                 status = ice_query_acl_prof_ranges(hw, prof_id, &query_rng_buf,
2822                                                    NULL);
2823                 if (status)
2824                         return status;
2825                 ice_memcpy(&cfg_rng_buf, &query_rng_buf,
2826                            sizeof(struct ice_aqc_acl_profile_ranges),
2827                            ICE_NONDMA_TO_NONDMA);
2828
2829                 /* Generate the new range-checker value */
2830                 status = ice_flow_acl_union_rng_chk(&cfg_rng_buf, e->range_buf);
2831                 if (status)
2832                         return status;
2833
2834                 /* Reconfigure the range check if the buffer is changed. */
2835                 do_chg_rng_chk = false;
2836                 if (memcmp(&query_rng_buf, &cfg_rng_buf,
2837                            sizeof(struct ice_aqc_acl_profile_ranges))) {
2838                         status = ice_prog_acl_prof_ranges(hw, prof_id,
2839                                                           &cfg_rng_buf, NULL);
2840                         if (status)
2841                                 return status;
2842
2843                         do_chg_rng_chk = true;
2844                 }
2845         }
2846
2847         /* Figure out if we want to (change the ACL action) and/or
2848          * (Add the new ACL entry) and/or (Remove the current ACL entry)
2849          */
2850         exist = ice_flow_acl_find_scen_entry_cond(prof, e, &do_chg_action,
2851                                                   &do_add_entry, &do_rem_entry);
2852
2853         if (do_rem_entry) {
2854                 status = ice_flow_rem_entry_sync(hw, ICE_BLK_ACL, exist);
2855                 if (status)
2856                         return status;
2857         }
2858
2859         /* Prepare the result action buffer */
2860         acts = (struct ice_acl_act_entry *)ice_calloc
2861                 (hw, e->entry_sz, sizeof(struct ice_acl_act_entry));
2862         for (i = 0; i < e->acts_cnt; i++)
2863                 ice_memcpy(&acts[i], &e->acts[i].data.acl_act,
2864                            sizeof(struct ice_acl_act_entry),
2865                            ICE_NONDMA_TO_NONDMA);
2866
2867         if (do_add_entry) {
2868                 enum ice_acl_entry_prior prior;
2869                 u8 *keys, *inverts;
2870                 u16 entry_idx;
2871
2872                 keys = (u8 *)e->entry;
2873                 inverts = keys + (e->entry_sz / 2);
2874                 prior = ice_flow_acl_convert_to_acl_prior(e->priority);
2875
2876                 status = ice_acl_add_entry(hw, prof->cfg.scen, prior, keys,
2877                                            inverts, acts, e->acts_cnt,
2878                                            &entry_idx);
2879                 if (status)
2880                         goto out;
2881
2882                 e->scen_entry_idx = entry_idx;
2883                 LIST_ADD(&e->l_entry, &prof->entries);
2884         } else {
2885                 if (do_chg_action) {
2886                         /* For the action memory info, update the SW's copy of
2887                          * exist entry with e's action memory info
2888                          */
2889                         ice_free(hw, exist->acts);
2890                         exist->acts_cnt = e->acts_cnt;
2891                         exist->acts = (struct ice_flow_action *)
2892                                 ice_calloc(hw, exist->acts_cnt,
2893                                            sizeof(struct ice_flow_action));
2894
2895                         if (!exist->acts) {
2896                                 status = ICE_ERR_NO_MEMORY;
2897                                 goto out;
2898                         }
2899
2900                         ice_memcpy(exist->acts, e->acts,
2901                                    sizeof(struct ice_flow_action) * e->acts_cnt,
2902                                    ICE_NONDMA_TO_NONDMA);
2903
2904                         status = ice_acl_prog_act(hw, prof->cfg.scen, acts,
2905                                                   e->acts_cnt,
2906                                                   exist->scen_entry_idx);
2907                         if (status)
2908                                 goto out;
2909                 }
2910
2911                 if (do_chg_rng_chk) {
2912                         /* In this case, we want to update the range checker
2913                          * information of the exist entry
2914                          */
2915                         status = ice_flow_acl_union_rng_chk(exist->range_buf,
2916                                                             e->range_buf);
2917                         if (status)
2918                                 goto out;
2919                 }
2920
2921                 /* As we don't add the new entry to our SW DB, deallocate its
2922                  * memories, and return the exist entry to the caller
2923                  */
2924                 ice_dealloc_flow_entry(hw, e);
2925                 *(entry) = exist;
2926         }
2927 out:
2928         if (acts)
2929                 ice_free(hw, acts);
2930
2931         return status;
2932 }
2933
2934 /**
2935  * ice_flow_acl_add_scen_entry - Add entry to ACL scenario
2936  * @hw: pointer to the hardware structure
2937  * @prof: pointer to flow profile
2938  * @e: double pointer to the flow entry
2939  */
2940 static enum ice_status
2941 ice_flow_acl_add_scen_entry(struct ice_hw *hw, struct ice_flow_prof *prof,
2942                             struct ice_flow_entry **e)
2943 {
2944         enum ice_status status;
2945
2946         ice_acquire_lock(&prof->entries_lock);
2947         status = ice_flow_acl_add_scen_entry_sync(hw, prof, e);
2948         ice_release_lock(&prof->entries_lock);
2949
2950         return status;
2951 }
2952
2953 /**
2954  * ice_flow_add_entry - Add a flow entry
2955  * @hw: pointer to the HW struct
2956  * @blk: classification stage
2957  * @prof_id: ID of the profile to add a new flow entry to
2958  * @entry_id: unique ID to identify this flow entry
2959  * @vsi_handle: software VSI handle for the flow entry
2960  * @prio: priority of the flow entry
2961  * @data: pointer to a data buffer containing flow entry's match values/masks
2962  * @acts: arrays of actions to be performed on a match
2963  * @acts_cnt: number of actions
2964  * @entry_h: pointer to buffer that receives the new flow entry's handle
2965  */
2966 enum ice_status
2967 ice_flow_add_entry(struct ice_hw *hw, enum ice_block blk, u64 prof_id,
2968                    u64 entry_id, u16 vsi_handle, enum ice_flow_priority prio,
2969                    void *data, struct ice_flow_action *acts, u8 acts_cnt,
2970                    u64 *entry_h)
2971 {
2972         struct ice_flow_entry *e = NULL;
2973         struct ice_flow_prof *prof;
2974         enum ice_status status = ICE_SUCCESS;
2975
2976         /* ACL entries must indicate an action */
2977         if (blk == ICE_BLK_ACL && (!acts || !acts_cnt))
2978                 return ICE_ERR_PARAM;
2979
2980         /* No flow entry data is expected for RSS */
2981         if (!entry_h || (!data && blk != ICE_BLK_RSS))
2982                 return ICE_ERR_BAD_PTR;
2983
2984         if (!ice_is_vsi_valid(hw, vsi_handle))
2985                 return ICE_ERR_PARAM;
2986
2987         ice_acquire_lock(&hw->fl_profs_locks[blk]);
2988
2989         prof = ice_flow_find_prof_id(hw, blk, prof_id);
2990         if (!prof) {
2991                 status = ICE_ERR_DOES_NOT_EXIST;
2992         } else {
2993                 /* Allocate memory for the entry being added and associate
2994                  * the VSI to the found flow profile
2995                  */
2996                 e = (struct ice_flow_entry *)ice_malloc(hw, sizeof(*e));
2997                 if (!e)
2998                         status = ICE_ERR_NO_MEMORY;
2999                 else
3000                         status = ice_flow_assoc_prof(hw, blk, prof, vsi_handle);
3001         }
3002
3003         ice_release_lock(&hw->fl_profs_locks[blk]);
3004         if (status)
3005                 goto out;
3006
3007         e->id = entry_id;
3008         e->vsi_handle = vsi_handle;
3009         e->prof = prof;
3010         e->priority = prio;
3011
3012         switch (blk) {
3013         case ICE_BLK_FD:
3014         case ICE_BLK_RSS:
3015                 break;
3016         case ICE_BLK_ACL:
3017                 /* ACL will handle the entry management */
3018                 status = ice_flow_acl_frmt_entry(hw, prof, e, (u8 *)data, acts,
3019                                                  acts_cnt);
3020                 if (status)
3021                         goto out;
3022
3023                 status = ice_flow_acl_add_scen_entry(hw, prof, &e);
3024                 if (status)
3025                         goto out;
3026
3027                 break;
3028         default:
3029                 status = ICE_ERR_NOT_IMPL;
3030                 goto out;
3031         }
3032
3033         if (blk != ICE_BLK_ACL) {
3034                 /* ACL will handle the entry management */
3035                 ice_acquire_lock(&prof->entries_lock);
3036                 LIST_ADD(&e->l_entry, &prof->entries);
3037                 ice_release_lock(&prof->entries_lock);
3038         }
3039
3040         *entry_h = ICE_FLOW_ENTRY_HNDL(e);
3041
3042 out:
3043         if (status && e) {
3044                 if (e->entry)
3045                         ice_free(hw, e->entry);
3046                 ice_free(hw, e);
3047         }
3048
3049         return status;
3050 }
3051
3052 /**
3053  * ice_flow_rem_entry - Remove a flow entry
3054  * @hw: pointer to the HW struct
3055  * @blk: classification stage
3056  * @entry_h: handle to the flow entry to be removed
3057  */
3058 enum ice_status ice_flow_rem_entry(struct ice_hw *hw, enum ice_block blk,
3059                                    u64 entry_h)
3060 {
3061         struct ice_flow_entry *entry;
3062         struct ice_flow_prof *prof;
3063         enum ice_status status = ICE_SUCCESS;
3064
3065         if (entry_h == ICE_FLOW_ENTRY_HANDLE_INVAL)
3066                 return ICE_ERR_PARAM;
3067
3068         entry = ICE_FLOW_ENTRY_PTR((unsigned long)entry_h);
3069
3070         /* Retain the pointer to the flow profile as the entry will be freed */
3071         prof = entry->prof;
3072
3073         if (prof) {
3074                 ice_acquire_lock(&prof->entries_lock);
3075                 status = ice_flow_rem_entry_sync(hw, blk, entry);
3076                 ice_release_lock(&prof->entries_lock);
3077         }
3078
3079         return status;
3080 }
3081
3082 /**
3083  * ice_flow_set_fld_ext - specifies locations of field from entry's input buffer
3084  * @seg: packet segment the field being set belongs to
3085  * @fld: field to be set
3086  * @field_type: type of the field
3087  * @val_loc: if not ICE_FLOW_FLD_OFF_INVAL, location of the value to match from
3088  *           entry's input buffer
3089  * @mask_loc: if not ICE_FLOW_FLD_OFF_INVAL, location of mask value from entry's
3090  *            input buffer
3091  * @last_loc: if not ICE_FLOW_FLD_OFF_INVAL, location of last/upper value from
3092  *            entry's input buffer
3093  *
3094  * This helper function stores information of a field being matched, including
3095  * the type of the field and the locations of the value to match, the mask, and
3096  * and the upper-bound value in the start of the input buffer for a flow entry.
3097  * This function should only be used for fixed-size data structures.
3098  *
3099  * This function also opportunistically determines the protocol headers to be
3100  * present based on the fields being set. Some fields cannot be used alone to
3101  * determine the protocol headers present. Sometimes, fields for particular
3102  * protocol headers are not matched. In those cases, the protocol headers
3103  * must be explicitly set.
3104  */
3105 static void
3106 ice_flow_set_fld_ext(struct ice_flow_seg_info *seg, enum ice_flow_field fld,
3107                      enum ice_flow_fld_match_type field_type, u16 val_loc,
3108                      u16 mask_loc, u16 last_loc)
3109 {
3110         u64 bit = BIT_ULL(fld);
3111
3112         seg->match |= bit;
3113         if (field_type == ICE_FLOW_FLD_TYPE_RANGE)
3114                 seg->range |= bit;
3115
3116         seg->fields[fld].type = field_type;
3117         seg->fields[fld].src.val = val_loc;
3118         seg->fields[fld].src.mask = mask_loc;
3119         seg->fields[fld].src.last = last_loc;
3120
3121         ICE_FLOW_SET_HDRS(seg, ice_flds_info[fld].hdr);
3122 }
3123
3124 /**
3125  * ice_flow_set_fld - specifies locations of field from entry's input buffer
3126  * @seg: packet segment the field being set belongs to
3127  * @fld: field to be set
3128  * @val_loc: if not ICE_FLOW_FLD_OFF_INVAL, location of the value to match from
3129  *           entry's input buffer
3130  * @mask_loc: if not ICE_FLOW_FLD_OFF_INVAL, location of mask value from entry's
3131  *            input buffer
3132  * @last_loc: if not ICE_FLOW_FLD_OFF_INVAL, location of last/upper value from
3133  *            entry's input buffer
3134  * @range: indicate if field being matched is to be in a range
3135  *
3136  * This function specifies the locations, in the form of byte offsets from the
3137  * start of the input buffer for a flow entry, from where the value to match,
3138  * the mask value, and upper value can be extracted. These locations are then
3139  * stored in the flow profile. When adding a flow entry associated with the
3140  * flow profile, these locations will be used to quickly extract the values and
3141  * create the content of a match entry. This function should only be used for
3142  * fixed-size data structures.
3143  */
3144 void
3145 ice_flow_set_fld(struct ice_flow_seg_info *seg, enum ice_flow_field fld,
3146                  u16 val_loc, u16 mask_loc, u16 last_loc, bool range)
3147 {
3148         enum ice_flow_fld_match_type t = range ?
3149                 ICE_FLOW_FLD_TYPE_RANGE : ICE_FLOW_FLD_TYPE_REG;
3150
3151         ice_flow_set_fld_ext(seg, fld, t, val_loc, mask_loc, last_loc);
3152 }
3153
3154 /**
3155  * ice_flow_set_fld_prefix - sets locations of prefix field from entry's buf
3156  * @seg: packet segment the field being set belongs to
3157  * @fld: field to be set
3158  * @val_loc: if not ICE_FLOW_FLD_OFF_INVAL, location of the value to match from
3159  *           entry's input buffer
3160  * @pref_loc: location of prefix value from entry's input buffer
3161  * @pref_sz: size of the location holding the prefix value
3162  *
3163  * This function specifies the locations, in the form of byte offsets from the
3164  * start of the input buffer for a flow entry, from where the value to match
3165  * and the IPv4 prefix value can be extracted. These locations are then stored
3166  * in the flow profile. When adding flow entries to the associated flow profile,
3167  * these locations can be used to quickly extract the values to create the
3168  * content of a match entry. This function should only be used for fixed-size
3169  * data structures.
3170  */
3171 void
3172 ice_flow_set_fld_prefix(struct ice_flow_seg_info *seg, enum ice_flow_field fld,
3173                         u16 val_loc, u16 pref_loc, u8 pref_sz)
3174 {
3175         /* For this type of field, the "mask" location is for the prefix value's
3176          * location and the "last" location is for the size of the location of
3177          * the prefix value.
3178          */
3179         ice_flow_set_fld_ext(seg, fld, ICE_FLOW_FLD_TYPE_PREFIX, val_loc,
3180                              pref_loc, (u16)pref_sz);
3181 }
3182
3183 /**
3184  * ice_flow_add_fld_raw - sets locations of a raw field from entry's input buf
3185  * @seg: packet segment the field being set belongs to
3186  * @off: offset of the raw field from the beginning of the segment in bytes
3187  * @len: length of the raw pattern to be matched
3188  * @val_loc: location of the value to match from entry's input buffer
3189  * @mask_loc: location of mask value from entry's input buffer
3190  *
3191  * This function specifies the offset of the raw field to be match from the
3192  * beginning of the specified packet segment, and the locations, in the form of
3193  * byte offsets from the start of the input buffer for a flow entry, from where
3194  * the value to match and the mask value to be extracted. These locations are
3195  * then stored in the flow profile. When adding flow entries to the associated
3196  * flow profile, these locations can be used to quickly extract the values to
3197  * create the content of a match entry. This function should only be used for
3198  * fixed-size data structures.
3199  */
3200 void
3201 ice_flow_add_fld_raw(struct ice_flow_seg_info *seg, u16 off, u8 len,
3202                      u16 val_loc, u16 mask_loc)
3203 {
3204         if (seg->raws_cnt < ICE_FLOW_SEG_RAW_FLD_MAX) {
3205                 seg->raws[seg->raws_cnt].off = off;
3206                 seg->raws[seg->raws_cnt].info.type = ICE_FLOW_FLD_TYPE_SIZE;
3207                 seg->raws[seg->raws_cnt].info.src.val = val_loc;
3208                 seg->raws[seg->raws_cnt].info.src.mask = mask_loc;
3209                 /* The "last" field is used to store the length of the field */
3210                 seg->raws[seg->raws_cnt].info.src.last = len;
3211         }
3212
3213         /* Overflows of "raws" will be handled as an error condition later in
3214          * the flow when this information is processed.
3215          */
3216         seg->raws_cnt++;
3217 }
3218
3219 #define ICE_FLOW_RSS_SEG_HDR_L2_MASKS \
3220 (ICE_FLOW_SEG_HDR_ETH | ICE_FLOW_SEG_HDR_VLAN)
3221
3222 #define ICE_FLOW_RSS_SEG_HDR_L3_MASKS \
3223         (ICE_FLOW_SEG_HDR_IPV4 | ICE_FLOW_SEG_HDR_IPV6)
3224
3225 #define ICE_FLOW_RSS_SEG_HDR_L4_MASKS \
3226         (ICE_FLOW_SEG_HDR_TCP | ICE_FLOW_SEG_HDR_UDP | \
3227          ICE_FLOW_SEG_HDR_SCTP)
3228
3229 #define ICE_FLOW_RSS_SEG_HDR_VAL_MASKS \
3230         (ICE_FLOW_RSS_SEG_HDR_L2_MASKS | \
3231          ICE_FLOW_RSS_SEG_HDR_L3_MASKS | \
3232          ICE_FLOW_RSS_SEG_HDR_L4_MASKS)
3233
3234 /**
3235  * ice_flow_set_rss_seg_info - setup packet segments for RSS
3236  * @segs: pointer to the flow field segment(s)
3237  * @hash_fields: fields to be hashed on for the segment(s)
3238  * @flow_hdr: protocol header fields within a packet segment
3239  *
3240  * Helper function to extract fields from hash bitmap and use flow
3241  * header value to set flow field segment for further use in flow
3242  * profile entry or removal.
3243  */
3244 static enum ice_status
3245 ice_flow_set_rss_seg_info(struct ice_flow_seg_info *segs, u64 hash_fields,
3246                           u32 flow_hdr)
3247 {
3248         u64 val;
3249         u8 i;
3250
3251         ice_for_each_set_bit(i, (ice_bitmap_t *)&hash_fields,
3252                              ICE_FLOW_FIELD_IDX_MAX)
3253                 ice_flow_set_fld(segs, (enum ice_flow_field)i,
3254                                  ICE_FLOW_FLD_OFF_INVAL, ICE_FLOW_FLD_OFF_INVAL,
3255                                  ICE_FLOW_FLD_OFF_INVAL, false);
3256
3257         ICE_FLOW_SET_HDRS(segs, flow_hdr);
3258
3259         if (segs->hdrs & ~ICE_FLOW_RSS_SEG_HDR_VAL_MASKS &
3260             ~ICE_FLOW_RSS_HDRS_INNER_MASK & ~ICE_FLOW_SEG_HDR_IPV_OTHER)
3261                 return ICE_ERR_PARAM;
3262
3263         val = (u64)(segs->hdrs & ICE_FLOW_RSS_SEG_HDR_L3_MASKS);
3264         if (val && !ice_is_pow2(val))
3265                 return ICE_ERR_CFG;
3266
3267         val = (u64)(segs->hdrs & ICE_FLOW_RSS_SEG_HDR_L4_MASKS);
3268         if (val && !ice_is_pow2(val))
3269                 return ICE_ERR_CFG;
3270
3271         return ICE_SUCCESS;
3272 }
3273
3274 /**
3275  * ice_rem_vsi_rss_list - remove VSI from RSS list
3276  * @hw: pointer to the hardware structure
3277  * @vsi_handle: software VSI handle
3278  *
3279  * Remove the VSI from all RSS configurations in the list.
3280  */
3281 void ice_rem_vsi_rss_list(struct ice_hw *hw, u16 vsi_handle)
3282 {
3283         struct ice_rss_cfg *r, *tmp;
3284
3285         if (LIST_EMPTY(&hw->rss_list_head))
3286                 return;
3287
3288         ice_acquire_lock(&hw->rss_locks);
3289         LIST_FOR_EACH_ENTRY_SAFE(r, tmp, &hw->rss_list_head,
3290                                  ice_rss_cfg, l_entry)
3291                 if (ice_test_and_clear_bit(vsi_handle, r->vsis))
3292                         if (!ice_is_any_bit_set(r->vsis, ICE_MAX_VSI)) {
3293                                 LIST_DEL(&r->l_entry);
3294                                 ice_free(hw, r);
3295                         }
3296         ice_release_lock(&hw->rss_locks);
3297 }
3298
3299 /**
3300  * ice_rem_vsi_rss_cfg - remove RSS configurations associated with VSI
3301  * @hw: pointer to the hardware structure
3302  * @vsi_handle: software VSI handle
3303  *
3304  * This function will iterate through all flow profiles and disassociate
3305  * the VSI from that profile. If the flow profile has no VSIs it will
3306  * be removed.
3307  */
3308 enum ice_status ice_rem_vsi_rss_cfg(struct ice_hw *hw, u16 vsi_handle)
3309 {
3310         const enum ice_block blk = ICE_BLK_RSS;
3311         struct ice_flow_prof *p, *t;
3312         enum ice_status status = ICE_SUCCESS;
3313
3314         if (!ice_is_vsi_valid(hw, vsi_handle))
3315                 return ICE_ERR_PARAM;
3316
3317         if (LIST_EMPTY(&hw->fl_profs[blk]))
3318                 return ICE_SUCCESS;
3319
3320         ice_acquire_lock(&hw->rss_locks);
3321         LIST_FOR_EACH_ENTRY_SAFE(p, t, &hw->fl_profs[blk], ice_flow_prof,
3322                                  l_entry)
3323                 if (ice_is_bit_set(p->vsis, vsi_handle)) {
3324                         status = ice_flow_disassoc_prof(hw, blk, p, vsi_handle);
3325                         if (status)
3326                                 break;
3327
3328                         if (!ice_is_any_bit_set(p->vsis, ICE_MAX_VSI)) {
3329                                 status = ice_flow_rem_prof(hw, blk, p->id);
3330                                 if (status)
3331                                         break;
3332                         }
3333                 }
3334         ice_release_lock(&hw->rss_locks);
3335
3336         return status;
3337 }
3338
3339 /**
3340  * ice_rem_rss_list - remove RSS configuration from list
3341  * @hw: pointer to the hardware structure
3342  * @vsi_handle: software VSI handle
3343  * @prof: pointer to flow profile
3344  *
3345  * Assumption: lock has already been acquired for RSS list
3346  */
3347 static void
3348 ice_rem_rss_list(struct ice_hw *hw, u16 vsi_handle, struct ice_flow_prof *prof)
3349 {
3350         struct ice_rss_cfg *r, *tmp;
3351
3352         /* Search for RSS hash fields associated to the VSI that match the
3353          * hash configurations associated to the flow profile. If found
3354          * remove from the RSS entry list of the VSI context and delete entry.
3355          */
3356         LIST_FOR_EACH_ENTRY_SAFE(r, tmp, &hw->rss_list_head,
3357                                  ice_rss_cfg, l_entry)
3358                 if (r->hashed_flds == prof->segs[prof->segs_cnt - 1].match &&
3359                     r->packet_hdr == prof->segs[prof->segs_cnt - 1].hdrs) {
3360                         ice_clear_bit(vsi_handle, r->vsis);
3361                         if (!ice_is_any_bit_set(r->vsis, ICE_MAX_VSI)) {
3362                                 LIST_DEL(&r->l_entry);
3363                                 ice_free(hw, r);
3364                         }
3365                         return;
3366                 }
3367 }
3368
3369 /**
3370  * ice_add_rss_list - add RSS configuration to list
3371  * @hw: pointer to the hardware structure
3372  * @vsi_handle: software VSI handle
3373  * @prof: pointer to flow profile
3374  *
3375  * Assumption: lock has already been acquired for RSS list
3376  */
3377 static enum ice_status
3378 ice_add_rss_list(struct ice_hw *hw, u16 vsi_handle, struct ice_flow_prof *prof)
3379 {
3380         struct ice_rss_cfg *r, *rss_cfg;
3381
3382         LIST_FOR_EACH_ENTRY(r, &hw->rss_list_head,
3383                             ice_rss_cfg, l_entry)
3384                 if (r->hashed_flds == prof->segs[prof->segs_cnt - 1].match &&
3385                     r->packet_hdr == prof->segs[prof->segs_cnt - 1].hdrs) {
3386                         ice_set_bit(vsi_handle, r->vsis);
3387                         return ICE_SUCCESS;
3388                 }
3389
3390         rss_cfg = (struct ice_rss_cfg *)ice_malloc(hw, sizeof(*rss_cfg));
3391         if (!rss_cfg)
3392                 return ICE_ERR_NO_MEMORY;
3393
3394         rss_cfg->hashed_flds = prof->segs[prof->segs_cnt - 1].match;
3395         rss_cfg->packet_hdr = prof->segs[prof->segs_cnt - 1].hdrs;
3396         rss_cfg->symm = prof->cfg.symm;
3397         ice_set_bit(vsi_handle, rss_cfg->vsis);
3398
3399         LIST_ADD_TAIL(&rss_cfg->l_entry, &hw->rss_list_head);
3400
3401         return ICE_SUCCESS;
3402 }
3403
3404 #define ICE_FLOW_PROF_HASH_S    0
3405 #define ICE_FLOW_PROF_HASH_M    (0xFFFFFFFFULL << ICE_FLOW_PROF_HASH_S)
3406 #define ICE_FLOW_PROF_HDR_S     32
3407 #define ICE_FLOW_PROF_HDR_M     (0x3FFFFFFFULL << ICE_FLOW_PROF_HDR_S)
3408 #define ICE_FLOW_PROF_ENCAP_S   63
3409 #define ICE_FLOW_PROF_ENCAP_M   (BIT_ULL(ICE_FLOW_PROF_ENCAP_S))
3410
3411 #define ICE_RSS_OUTER_HEADERS   1
3412 #define ICE_RSS_INNER_HEADERS   2
3413
3414 /* Flow profile ID format:
3415  * [0:31] - Packet match fields
3416  * [32:62] - Protocol header
3417  * [63] - Encapsulation flag, 0 if non-tunneled, 1 if tunneled
3418  */
3419 #define ICE_FLOW_GEN_PROFID(hash, hdr, segs_cnt) \
3420         (u64)(((u64)(hash) & ICE_FLOW_PROF_HASH_M) | \
3421               (((u64)(hdr) << ICE_FLOW_PROF_HDR_S) & ICE_FLOW_PROF_HDR_M) | \
3422               ((u8)((segs_cnt) - 1) ? ICE_FLOW_PROF_ENCAP_M : 0))
3423
3424 static void
3425 ice_rss_config_xor_word(struct ice_hw *hw, u8 prof_id, u8 src, u8 dst)
3426 {
3427         u32 s = ((src % 4) << 3); /* byte shift */
3428         u32 v = dst | 0x80; /* value to program */
3429         u8 i = src / 4; /* register index */
3430         u32 reg;
3431
3432         reg = rd32(hw, GLQF_HSYMM(prof_id, i));
3433         reg = (reg & ~(0xff << s)) | (v << s);
3434         wr32(hw, GLQF_HSYMM(prof_id, i), reg);
3435 }
3436
3437 static void
3438 ice_rss_config_xor(struct ice_hw *hw, u8 prof_id, u8 src, u8 dst, u8 len)
3439 {
3440         int fv_last_word =
3441                 ICE_FLOW_SW_FIELD_VECTOR_MAX / ICE_FLOW_FV_EXTRACT_SZ - 1;
3442         int i;
3443
3444         for (i = 0; i < len; i++) {
3445                 ice_rss_config_xor_word(hw, prof_id,
3446                                         /* Yes, field vector in GLQF_HSYMM and
3447                                          * GLQF_HINSET is inversed!
3448                                          */
3449                                         fv_last_word - (src + i),
3450                                         fv_last_word - (dst + i));
3451                 ice_rss_config_xor_word(hw, prof_id,
3452                                         fv_last_word - (dst + i),
3453                                         fv_last_word - (src + i));
3454         }
3455 }
3456
3457 static void
3458 ice_rss_update_symm(struct ice_hw *hw,
3459                     struct ice_flow_prof *prof)
3460 {
3461         struct ice_prof_map *map;
3462         u8 prof_id, m;
3463
3464         ice_acquire_lock(&hw->blk[ICE_BLK_RSS].es.prof_map_lock);
3465         map = ice_search_prof_id(hw, ICE_BLK_RSS, prof->id);
3466         if (map)
3467                 prof_id = map->prof_id;
3468         ice_release_lock(&hw->blk[ICE_BLK_RSS].es.prof_map_lock);
3469         if (!map)
3470                 return;
3471         /* clear to default */
3472         for (m = 0; m < 6; m++)
3473                 wr32(hw, GLQF_HSYMM(prof_id, m), 0);
3474         if (prof->cfg.symm) {
3475                 struct ice_flow_seg_info *seg =
3476                         &prof->segs[prof->segs_cnt - 1];
3477
3478                 struct ice_flow_seg_xtrct *ipv4_src =
3479                         &seg->fields[ICE_FLOW_FIELD_IDX_IPV4_SA].xtrct;
3480                 struct ice_flow_seg_xtrct *ipv4_dst =
3481                         &seg->fields[ICE_FLOW_FIELD_IDX_IPV4_DA].xtrct;
3482                 struct ice_flow_seg_xtrct *ipv6_src =
3483                         &seg->fields[ICE_FLOW_FIELD_IDX_IPV6_SA].xtrct;
3484                 struct ice_flow_seg_xtrct *ipv6_dst =
3485                         &seg->fields[ICE_FLOW_FIELD_IDX_IPV6_DA].xtrct;
3486
3487                 struct ice_flow_seg_xtrct *tcp_src =
3488                         &seg->fields[ICE_FLOW_FIELD_IDX_TCP_SRC_PORT].xtrct;
3489                 struct ice_flow_seg_xtrct *tcp_dst =
3490                         &seg->fields[ICE_FLOW_FIELD_IDX_TCP_DST_PORT].xtrct;
3491
3492                 struct ice_flow_seg_xtrct *udp_src =
3493                         &seg->fields[ICE_FLOW_FIELD_IDX_UDP_SRC_PORT].xtrct;
3494                 struct ice_flow_seg_xtrct *udp_dst =
3495                         &seg->fields[ICE_FLOW_FIELD_IDX_UDP_DST_PORT].xtrct;
3496
3497                 struct ice_flow_seg_xtrct *sctp_src =
3498                         &seg->fields[ICE_FLOW_FIELD_IDX_SCTP_SRC_PORT].xtrct;
3499                 struct ice_flow_seg_xtrct *sctp_dst =
3500                         &seg->fields[ICE_FLOW_FIELD_IDX_SCTP_DST_PORT].xtrct;
3501
3502                 /* xor IPv4 */
3503                 if (ipv4_src->prot_id != 0 && ipv4_dst->prot_id != 0)
3504                         ice_rss_config_xor(hw, prof_id,
3505                                            ipv4_src->idx, ipv4_dst->idx, 2);
3506
3507                 /* xor IPv6 */
3508                 if (ipv6_src->prot_id != 0 && ipv6_dst->prot_id != 0)
3509                         ice_rss_config_xor(hw, prof_id,
3510                                            ipv6_src->idx, ipv6_dst->idx, 8);
3511
3512                 /* xor TCP */
3513                 if (tcp_src->prot_id != 0 && tcp_dst->prot_id != 0)
3514                         ice_rss_config_xor(hw, prof_id,
3515                                            tcp_src->idx, tcp_dst->idx, 1);
3516
3517                 /* xor UDP */
3518                 if (udp_src->prot_id != 0 && udp_dst->prot_id != 0)
3519                         ice_rss_config_xor(hw, prof_id,
3520                                            udp_src->idx, udp_dst->idx, 1);
3521
3522                 /* xor SCTP */
3523                 if (sctp_src->prot_id != 0 && sctp_dst->prot_id != 0)
3524                         ice_rss_config_xor(hw, prof_id,
3525                                            sctp_src->idx, sctp_dst->idx, 1);
3526         }
3527 }
3528
3529 /**
3530  * ice_add_rss_cfg_sync - add an RSS configuration
3531  * @hw: pointer to the hardware structure
3532  * @vsi_handle: software VSI handle
3533  * @hashed_flds: hash bit fields (ICE_FLOW_HASH_*) to configure
3534  * @addl_hdrs: protocol header fields
3535  * @segs_cnt: packet segment count
3536  * @symm: symmetric hash enable/disable
3537  *
3538  * Assumption: lock has already been acquired for RSS list
3539  */
3540 static enum ice_status
3541 ice_add_rss_cfg_sync(struct ice_hw *hw, u16 vsi_handle, u64 hashed_flds,
3542                      u32 addl_hdrs, u8 segs_cnt, bool symm)
3543 {
3544         const enum ice_block blk = ICE_BLK_RSS;
3545         struct ice_flow_prof *prof = NULL;
3546         struct ice_flow_seg_info *segs;
3547         enum ice_status status;
3548
3549         if (!segs_cnt || segs_cnt > ICE_FLOW_SEG_MAX)
3550                 return ICE_ERR_PARAM;
3551
3552         segs = (struct ice_flow_seg_info *)ice_calloc(hw, segs_cnt,
3553                                                       sizeof(*segs));
3554         if (!segs)
3555                 return ICE_ERR_NO_MEMORY;
3556
3557         /* Construct the packet segment info from the hashed fields */
3558         status = ice_flow_set_rss_seg_info(&segs[segs_cnt - 1], hashed_flds,
3559                                            addl_hdrs);
3560         if (status)
3561                 goto exit;
3562
3563         /* don't do RSS for GTPU outer */
3564         if (segs_cnt == ICE_RSS_OUTER_HEADERS &&
3565             segs[segs_cnt - 1].hdrs & ICE_FLOW_SEG_HDR_GTPU) {
3566                 status = ICE_SUCCESS;
3567                 goto exit;
3568         }
3569
3570         /* Search for a flow profile that has matching headers, hash fields
3571          * and has the input VSI associated to it. If found, no further
3572          * operations required and exit.
3573          */
3574         prof = ice_flow_find_prof_conds(hw, blk, ICE_FLOW_RX, segs, segs_cnt,
3575                                         vsi_handle,
3576                                         ICE_FLOW_FIND_PROF_CHK_FLDS |
3577                                         ICE_FLOW_FIND_PROF_CHK_VSI);
3578         if (prof) {
3579                 if (prof->cfg.symm == symm)
3580                         goto exit;
3581                 prof->cfg.symm = symm;
3582                 goto update_symm;
3583         }
3584
3585         /* Check if a flow profile exists with the same protocol headers and
3586          * associated with the input VSI. If so disassociate the VSI from
3587          * this profile. The VSI will be added to a new profile created with
3588          * the protocol header and new hash field configuration.
3589          */
3590         prof = ice_flow_find_prof_conds(hw, blk, ICE_FLOW_RX, segs, segs_cnt,
3591                                         vsi_handle, ICE_FLOW_FIND_PROF_CHK_VSI);
3592         if (prof) {
3593                 status = ice_flow_disassoc_prof(hw, blk, prof, vsi_handle);
3594                 if (!status)
3595                         ice_rem_rss_list(hw, vsi_handle, prof);
3596                 else
3597                         goto exit;
3598
3599                 /* Remove profile if it has no VSIs associated */
3600                 if (!ice_is_any_bit_set(prof->vsis, ICE_MAX_VSI)) {
3601                         status = ice_flow_rem_prof(hw, blk, prof->id);
3602                         if (status)
3603                                 goto exit;
3604                 }
3605         }
3606
3607         /* Search for a profile that has same match fields only. If this
3608          * exists then associate the VSI to this profile.
3609          */
3610         prof = ice_flow_find_prof_conds(hw, blk, ICE_FLOW_RX, segs, segs_cnt,
3611                                         vsi_handle,
3612                                         ICE_FLOW_FIND_PROF_CHK_FLDS);
3613         if (prof) {
3614                 if (prof->cfg.symm == symm) {
3615                         status = ice_flow_assoc_prof(hw, blk, prof,
3616                                                      vsi_handle);
3617                         if (!status)
3618                                 status = ice_add_rss_list(hw, vsi_handle,
3619                                                           prof);
3620                 } else {
3621                         /* if a profile exist but with different symmetric
3622                          * requirement, just return error.
3623                          */
3624                         status = ICE_ERR_NOT_SUPPORTED;
3625                 }
3626                 goto exit;
3627         }
3628
3629         /* Create a new flow profile with generated profile and packet
3630          * segment information.
3631          */
3632         status = ice_flow_add_prof(hw, blk, ICE_FLOW_RX,
3633                                    ICE_FLOW_GEN_PROFID(hashed_flds,
3634                                                        segs[segs_cnt - 1].hdrs,
3635                                                        segs_cnt),
3636                                    segs, segs_cnt, NULL, 0, &prof);
3637         if (status)
3638                 goto exit;
3639
3640         status = ice_flow_assoc_prof(hw, blk, prof, vsi_handle);
3641         /* If association to a new flow profile failed then this profile can
3642          * be removed.
3643          */
3644         if (status) {
3645                 ice_flow_rem_prof(hw, blk, prof->id);
3646                 goto exit;
3647         }
3648
3649         status = ice_add_rss_list(hw, vsi_handle, prof);
3650
3651         prof->cfg.symm = symm;
3652
3653 update_symm:
3654         ice_rss_update_symm(hw, prof);
3655
3656 exit:
3657         ice_free(hw, segs);
3658         return status;
3659 }
3660
3661 /**
3662  * ice_add_rss_cfg - add an RSS configuration with specified hashed fields
3663  * @hw: pointer to the hardware structure
3664  * @vsi_handle: software VSI handle
3665  * @hashed_flds: hash bit fields (ICE_FLOW_HASH_*) to configure
3666  * @addl_hdrs: protocol header fields
3667  * @symm: symmetric hash enable/disable
3668  *
3669  * This function will generate a flow profile based on fields associated with
3670  * the input fields to hash on, the flow type and use the VSI number to add
3671  * a flow entry to the profile.
3672  */
3673 enum ice_status
3674 ice_add_rss_cfg(struct ice_hw *hw, u16 vsi_handle, u64 hashed_flds,
3675                 u32 addl_hdrs, bool symm)
3676 {
3677         enum ice_status status;
3678
3679         if (hashed_flds == ICE_HASH_INVALID ||
3680             !ice_is_vsi_valid(hw, vsi_handle))
3681                 return ICE_ERR_PARAM;
3682
3683         ice_acquire_lock(&hw->rss_locks);
3684         status = ice_add_rss_cfg_sync(hw, vsi_handle, hashed_flds, addl_hdrs,
3685                                       ICE_RSS_OUTER_HEADERS, symm);
3686
3687         if (!status)
3688                 status = ice_add_rss_cfg_sync(hw, vsi_handle, hashed_flds,
3689                                               addl_hdrs, ICE_RSS_INNER_HEADERS,
3690                                               symm);
3691         ice_release_lock(&hw->rss_locks);
3692
3693         return status;
3694 }
3695
3696 /**
3697  * ice_rem_rss_cfg_sync - remove an existing RSS configuration
3698  * @hw: pointer to the hardware structure
3699  * @vsi_handle: software VSI handle
3700  * @hashed_flds: Packet hash types (ICE_FLOW_HASH_*) to remove
3701  * @addl_hdrs: Protocol header fields within a packet segment
3702  * @segs_cnt: packet segment count
3703  *
3704  * Assumption: lock has already been acquired for RSS list
3705  */
3706 static enum ice_status
3707 ice_rem_rss_cfg_sync(struct ice_hw *hw, u16 vsi_handle, u64 hashed_flds,
3708                      u32 addl_hdrs, u8 segs_cnt)
3709 {
3710         const enum ice_block blk = ICE_BLK_RSS;
3711         struct ice_flow_seg_info *segs;
3712         struct ice_flow_prof *prof;
3713         enum ice_status status;
3714
3715         segs = (struct ice_flow_seg_info *)ice_calloc(hw, segs_cnt,
3716                                                       sizeof(*segs));
3717         if (!segs)
3718                 return ICE_ERR_NO_MEMORY;
3719
3720         /* Construct the packet segment info from the hashed fields */
3721         status = ice_flow_set_rss_seg_info(&segs[segs_cnt - 1], hashed_flds,
3722                                            addl_hdrs);
3723         if (status)
3724                 goto out;
3725
3726         if (segs_cnt == ICE_RSS_OUTER_HEADERS &&
3727             segs[segs_cnt - 1].hdrs & ICE_FLOW_SEG_HDR_GTPU) {
3728                 status = ICE_SUCCESS;
3729                 goto out;
3730         }
3731
3732         prof = ice_flow_find_prof_conds(hw, blk, ICE_FLOW_RX, segs, segs_cnt,
3733                                         vsi_handle,
3734                                         ICE_FLOW_FIND_PROF_CHK_FLDS);
3735         if (!prof) {
3736                 status = ICE_ERR_DOES_NOT_EXIST;
3737                 goto out;
3738         }
3739
3740         status = ice_flow_disassoc_prof(hw, blk, prof, vsi_handle);
3741         if (status)
3742                 goto out;
3743
3744         /* Remove RSS configuration from VSI context before deleting
3745          * the flow profile.
3746          */
3747         ice_rem_rss_list(hw, vsi_handle, prof);
3748
3749         if (!ice_is_any_bit_set(prof->vsis, ICE_MAX_VSI))
3750                 status = ice_flow_rem_prof(hw, blk, prof->id);
3751
3752 out:
3753         ice_free(hw, segs);
3754         return status;
3755 }
3756
3757 /**
3758  * ice_rem_rss_cfg - remove an existing RSS config with matching hashed fields
3759  * @hw: pointer to the hardware structure
3760  * @vsi_handle: software VSI handle
3761  * @hashed_flds: Packet hash types (ICE_FLOW_HASH_*) to remove
3762  * @addl_hdrs: Protocol header fields within a packet segment
3763  *
3764  * This function will lookup the flow profile based on the input
3765  * hash field bitmap, iterate through the profile entry list of
3766  * that profile and find entry associated with input VSI to be
3767  * removed. Calls are made to underlying flow apis which will in
3768  * turn build or update buffers for RSS XLT1 section.
3769  */
3770 enum ice_status
3771 ice_rem_rss_cfg(struct ice_hw *hw, u16 vsi_handle, u64 hashed_flds,
3772                 u32 addl_hdrs)
3773 {
3774         enum ice_status status;
3775
3776         if (hashed_flds == ICE_HASH_INVALID ||
3777             !ice_is_vsi_valid(hw, vsi_handle))
3778                 return ICE_ERR_PARAM;
3779
3780         ice_acquire_lock(&hw->rss_locks);
3781         status = ice_rem_rss_cfg_sync(hw, vsi_handle, hashed_flds, addl_hdrs,
3782                                       ICE_RSS_OUTER_HEADERS);
3783         if (!status)
3784                 status = ice_rem_rss_cfg_sync(hw, vsi_handle, hashed_flds,
3785                                               addl_hdrs, ICE_RSS_INNER_HEADERS);
3786         ice_release_lock(&hw->rss_locks);
3787
3788         return status;
3789 }
3790
3791 /**
3792  * ice_replay_rss_cfg - replay RSS configurations associated with VSI
3793  * @hw: pointer to the hardware structure
3794  * @vsi_handle: software VSI handle
3795  */
3796 enum ice_status ice_replay_rss_cfg(struct ice_hw *hw, u16 vsi_handle)
3797 {
3798         enum ice_status status = ICE_SUCCESS;
3799         struct ice_rss_cfg *r;
3800
3801         if (!ice_is_vsi_valid(hw, vsi_handle))
3802                 return ICE_ERR_PARAM;
3803
3804         ice_acquire_lock(&hw->rss_locks);
3805         LIST_FOR_EACH_ENTRY(r, &hw->rss_list_head,
3806                             ice_rss_cfg, l_entry) {
3807                 if (ice_is_bit_set(r->vsis, vsi_handle)) {
3808                         status = ice_add_rss_cfg_sync(hw, vsi_handle,
3809                                                       r->hashed_flds,
3810                                                       r->packet_hdr,
3811                                                       ICE_RSS_OUTER_HEADERS,
3812                                                       r->symm);
3813                         if (status)
3814                                 break;
3815                         status = ice_add_rss_cfg_sync(hw, vsi_handle,
3816                                                       r->hashed_flds,
3817                                                       r->packet_hdr,
3818                                                       ICE_RSS_INNER_HEADERS,
3819                                                       r->symm);
3820                         if (status)
3821                                 break;
3822                 }
3823         }
3824         ice_release_lock(&hw->rss_locks);
3825
3826         return status;
3827 }
3828
3829 /**
3830  * ice_get_rss_cfg - returns hashed fields for the given header types
3831  * @hw: pointer to the hardware structure
3832  * @vsi_handle: software VSI handle
3833  * @hdrs: protocol header type
3834  *
3835  * This function will return the match fields of the first instance of flow
3836  * profile having the given header types and containing input VSI
3837  */
3838 u64 ice_get_rss_cfg(struct ice_hw *hw, u16 vsi_handle, u32 hdrs)
3839 {
3840         u64 rss_hash = ICE_HASH_INVALID;
3841         struct ice_rss_cfg *r;
3842
3843         /* verify if the protocol header is non zero and VSI is valid */
3844         if (hdrs == ICE_FLOW_SEG_HDR_NONE || !ice_is_vsi_valid(hw, vsi_handle))
3845                 return ICE_HASH_INVALID;
3846
3847         ice_acquire_lock(&hw->rss_locks);
3848         LIST_FOR_EACH_ENTRY(r, &hw->rss_list_head,
3849                             ice_rss_cfg, l_entry)
3850                 if (ice_is_bit_set(r->vsis, vsi_handle) &&
3851                     r->packet_hdr == hdrs) {
3852                         rss_hash = r->hashed_flds;
3853                         break;
3854                 }
3855         ice_release_lock(&hw->rss_locks);
3856
3857         return rss_hash;
3858 }