net/ice/base: rename function
[dpdk.git] / drivers / net / ice / base / ice_flow.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2001-2020 Intel Corporation
3  */
4
5 #include "ice_common.h"
6 #include "ice_flow.h"
7
8 /* Size of known protocol header fields */
9 #define ICE_FLOW_FLD_SZ_ETH_TYPE        2
10 #define ICE_FLOW_FLD_SZ_VLAN            2
11 #define ICE_FLOW_FLD_SZ_IPV4_ADDR       4
12 #define ICE_FLOW_FLD_SZ_IPV6_ADDR       16
13 #define ICE_FLOW_FLD_SZ_IPV6_PRE32_ADDR 4
14 #define ICE_FLOW_FLD_SZ_IPV6_PRE48_ADDR 6
15 #define ICE_FLOW_FLD_SZ_IPV6_PRE64_ADDR 8
16 #define ICE_FLOW_FLD_SZ_IP_DSCP         1
17 #define ICE_FLOW_FLD_SZ_IP_TTL          1
18 #define ICE_FLOW_FLD_SZ_IP_PROT         1
19 #define ICE_FLOW_FLD_SZ_PORT            2
20 #define ICE_FLOW_FLD_SZ_TCP_FLAGS       1
21 #define ICE_FLOW_FLD_SZ_ICMP_TYPE       1
22 #define ICE_FLOW_FLD_SZ_ICMP_CODE       1
23 #define ICE_FLOW_FLD_SZ_ARP_OPER        2
24 #define ICE_FLOW_FLD_SZ_GRE_KEYID       4
25 #define ICE_FLOW_FLD_SZ_GTP_TEID        4
26 #define ICE_FLOW_FLD_SZ_GTP_QFI         2
27 #define ICE_FLOW_FLD_SZ_PPPOE_SESS_ID   2
28 #define ICE_FLOW_FLD_SZ_PFCP_SEID 8
29 #define ICE_FLOW_FLD_SZ_L2TPV3_SESS_ID  4
30 #define ICE_FLOW_FLD_SZ_ESP_SPI 4
31 #define ICE_FLOW_FLD_SZ_AH_SPI  4
32 #define ICE_FLOW_FLD_SZ_NAT_T_ESP_SPI   4
33
34 /* Describe properties of a protocol header field */
35 struct ice_flow_field_info {
36         enum ice_flow_seg_hdr hdr;
37         s16 off;        /* Offset from start of a protocol header, in bits */
38         u16 size;       /* Size of fields in bits */
39         u16 mask;       /* 16-bit mask for field */
40 };
41
42 #define ICE_FLOW_FLD_INFO(_hdr, _offset_bytes, _size_bytes) { \
43         .hdr = _hdr, \
44         .off = (_offset_bytes) * BITS_PER_BYTE, \
45         .size = (_size_bytes) * BITS_PER_BYTE, \
46         .mask = 0, \
47 }
48
49 #define ICE_FLOW_FLD_INFO_MSK(_hdr, _offset_bytes, _size_bytes, _mask) { \
50         .hdr = _hdr, \
51         .off = (_offset_bytes) * BITS_PER_BYTE, \
52         .size = (_size_bytes) * BITS_PER_BYTE, \
53         .mask = _mask, \
54 }
55
56 /* Table containing properties of supported protocol header fields */
57 static const
58 struct ice_flow_field_info ice_flds_info[ICE_FLOW_FIELD_IDX_MAX] = {
59         /* Ether */
60         /* ICE_FLOW_FIELD_IDX_ETH_DA */
61         ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_ETH, 0, ETH_ALEN),
62         /* ICE_FLOW_FIELD_IDX_ETH_SA */
63         ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_ETH, ETH_ALEN, ETH_ALEN),
64         /* ICE_FLOW_FIELD_IDX_S_VLAN */
65         ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_VLAN, 12, ICE_FLOW_FLD_SZ_VLAN),
66         /* ICE_FLOW_FIELD_IDX_C_VLAN */
67         ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_VLAN, 14, ICE_FLOW_FLD_SZ_VLAN),
68         /* ICE_FLOW_FIELD_IDX_ETH_TYPE */
69         ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_ETH, 0, ICE_FLOW_FLD_SZ_ETH_TYPE),
70         /* IPv4 / IPv6 */
71         /* ICE_FLOW_FIELD_IDX_IPV4_DSCP */
72         ICE_FLOW_FLD_INFO_MSK(ICE_FLOW_SEG_HDR_IPV4, 0, ICE_FLOW_FLD_SZ_IP_DSCP,
73                               0x00fc),
74         /* ICE_FLOW_FIELD_IDX_IPV6_DSCP */
75         ICE_FLOW_FLD_INFO_MSK(ICE_FLOW_SEG_HDR_IPV6, 0, ICE_FLOW_FLD_SZ_IP_DSCP,
76                               0x0ff0),
77         /* ICE_FLOW_FIELD_IDX_IPV4_TTL */
78         ICE_FLOW_FLD_INFO_MSK(ICE_FLOW_SEG_HDR_NONE, 8,
79                               ICE_FLOW_FLD_SZ_IP_TTL, 0xff00),
80         /* ICE_FLOW_FIELD_IDX_IPV4_PROT */
81         ICE_FLOW_FLD_INFO_MSK(ICE_FLOW_SEG_HDR_NONE, 8,
82                               ICE_FLOW_FLD_SZ_IP_PROT, 0x00ff),
83         /* ICE_FLOW_FIELD_IDX_IPV6_TTL */
84         ICE_FLOW_FLD_INFO_MSK(ICE_FLOW_SEG_HDR_NONE, 6,
85                               ICE_FLOW_FLD_SZ_IP_TTL, 0x00ff),
86         /* ICE_FLOW_FIELD_IDX_IPV6_PROT */
87         ICE_FLOW_FLD_INFO_MSK(ICE_FLOW_SEG_HDR_NONE, 6,
88                               ICE_FLOW_FLD_SZ_IP_PROT, 0xff00),
89         /* ICE_FLOW_FIELD_IDX_IPV4_SA */
90         ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_IPV4, 12, ICE_FLOW_FLD_SZ_IPV4_ADDR),
91         /* ICE_FLOW_FIELD_IDX_IPV4_DA */
92         ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_IPV4, 16, ICE_FLOW_FLD_SZ_IPV4_ADDR),
93         /* ICE_FLOW_FIELD_IDX_IPV6_SA */
94         ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_IPV6, 8, ICE_FLOW_FLD_SZ_IPV6_ADDR),
95         /* ICE_FLOW_FIELD_IDX_IPV6_DA */
96         ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_IPV6, 24, ICE_FLOW_FLD_SZ_IPV6_ADDR),
97         /* ICE_FLOW_FIELD_IDX_IPV6_PRE32_SA */
98         ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_IPV6, 8,
99                           ICE_FLOW_FLD_SZ_IPV6_PRE32_ADDR),
100         /* ICE_FLOW_FIELD_IDX_IPV6_PRE32_DA */
101         ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_IPV6, 24,
102                           ICE_FLOW_FLD_SZ_IPV6_PRE32_ADDR),
103         /* ICE_FLOW_FIELD_IDX_IPV6_PRE48_SA */
104         ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_IPV6, 8,
105                           ICE_FLOW_FLD_SZ_IPV6_PRE48_ADDR),
106         /* ICE_FLOW_FIELD_IDX_IPV6_PRE48_DA */
107         ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_IPV6, 24,
108                           ICE_FLOW_FLD_SZ_IPV6_PRE48_ADDR),
109         /* ICE_FLOW_FIELD_IDX_IPV6_PRE64_SA */
110         ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_IPV6, 8,
111                           ICE_FLOW_FLD_SZ_IPV6_PRE64_ADDR),
112         /* ICE_FLOW_FIELD_IDX_IPV6_PRE64_DA */
113         ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_IPV6, 24,
114                           ICE_FLOW_FLD_SZ_IPV6_PRE64_ADDR),
115         /* Transport */
116         /* ICE_FLOW_FIELD_IDX_TCP_SRC_PORT */
117         ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_TCP, 0, ICE_FLOW_FLD_SZ_PORT),
118         /* ICE_FLOW_FIELD_IDX_TCP_DST_PORT */
119         ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_TCP, 2, ICE_FLOW_FLD_SZ_PORT),
120         /* ICE_FLOW_FIELD_IDX_UDP_SRC_PORT */
121         ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_UDP, 0, ICE_FLOW_FLD_SZ_PORT),
122         /* ICE_FLOW_FIELD_IDX_UDP_DST_PORT */
123         ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_UDP, 2, ICE_FLOW_FLD_SZ_PORT),
124         /* ICE_FLOW_FIELD_IDX_SCTP_SRC_PORT */
125         ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_SCTP, 0, ICE_FLOW_FLD_SZ_PORT),
126         /* ICE_FLOW_FIELD_IDX_SCTP_DST_PORT */
127         ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_SCTP, 2, ICE_FLOW_FLD_SZ_PORT),
128         /* ICE_FLOW_FIELD_IDX_TCP_FLAGS */
129         ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_TCP, 13, ICE_FLOW_FLD_SZ_TCP_FLAGS),
130         /* ARP */
131         /* ICE_FLOW_FIELD_IDX_ARP_SIP */
132         ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_ARP, 14, ICE_FLOW_FLD_SZ_IPV4_ADDR),
133         /* ICE_FLOW_FIELD_IDX_ARP_DIP */
134         ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_ARP, 24, ICE_FLOW_FLD_SZ_IPV4_ADDR),
135         /* ICE_FLOW_FIELD_IDX_ARP_SHA */
136         ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_ARP, 8, ETH_ALEN),
137         /* ICE_FLOW_FIELD_IDX_ARP_DHA */
138         ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_ARP, 18, ETH_ALEN),
139         /* ICE_FLOW_FIELD_IDX_ARP_OP */
140         ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_ARP, 6, ICE_FLOW_FLD_SZ_ARP_OPER),
141         /* ICMP */
142         /* ICE_FLOW_FIELD_IDX_ICMP_TYPE */
143         ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_ICMP, 0, ICE_FLOW_FLD_SZ_ICMP_TYPE),
144         /* ICE_FLOW_FIELD_IDX_ICMP_CODE */
145         ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_ICMP, 1, ICE_FLOW_FLD_SZ_ICMP_CODE),
146         /* GRE */
147         /* ICE_FLOW_FIELD_IDX_GRE_KEYID */
148         ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_GRE, 12, ICE_FLOW_FLD_SZ_GRE_KEYID),
149         /* GTP */
150         /* ICE_FLOW_FIELD_IDX_GTPC_TEID */
151         ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_GTPC_TEID, 12,
152                           ICE_FLOW_FLD_SZ_GTP_TEID),
153         /* ICE_FLOW_FIELD_IDX_GTPU_IP_TEID */
154         ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_GTPU_IP, 12,
155                           ICE_FLOW_FLD_SZ_GTP_TEID),
156         /* ICE_FLOW_FIELD_IDX_GTPU_EH_TEID */
157         ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_GTPU_EH, 12,
158                           ICE_FLOW_FLD_SZ_GTP_TEID),
159         /* ICE_FLOW_FIELD_IDX_GTPU_EH_QFI */
160         ICE_FLOW_FLD_INFO_MSK(ICE_FLOW_SEG_HDR_GTPU_EH, 22,
161                               ICE_FLOW_FLD_SZ_GTP_QFI, 0x3f00),
162         /* ICE_FLOW_FIELD_IDX_GTPU_UP_TEID */
163         ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_GTPU_UP, 12,
164                           ICE_FLOW_FLD_SZ_GTP_TEID),
165         /* ICE_FLOW_FIELD_IDX_GTPU_DWN_TEID */
166         ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_GTPU_DWN, 12,
167                           ICE_FLOW_FLD_SZ_GTP_TEID),
168         /* PPPOE */
169         /* ICE_FLOW_FIELD_IDX_PPPOE_SESS_ID */
170         ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_PPPOE, 2,
171                           ICE_FLOW_FLD_SZ_PPPOE_SESS_ID),
172         /* PFCP */
173         /* ICE_FLOW_FIELD_IDX_PFCP_SEID */
174         ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_PFCP_SESSION, 12,
175                           ICE_FLOW_FLD_SZ_PFCP_SEID),
176         /* L2TPV3 */
177         /* ICE_FLOW_FIELD_IDX_L2TPV3_SESS_ID */
178         ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_L2TPV3, 0,
179                           ICE_FLOW_FLD_SZ_L2TPV3_SESS_ID),
180         /* ESP */
181         /* ICE_FLOW_FIELD_IDX_ESP_SPI */
182         ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_ESP, 0,
183                           ICE_FLOW_FLD_SZ_ESP_SPI),
184         /* AH */
185         /* ICE_FLOW_FIELD_IDX_AH_SPI */
186         ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_AH, 4,
187                           ICE_FLOW_FLD_SZ_AH_SPI),
188         /* NAT_T_ESP */
189         /* ICE_FLOW_FIELD_IDX_NAT_T_ESP_SPI */
190         ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_NAT_T_ESP, 8,
191                           ICE_FLOW_FLD_SZ_NAT_T_ESP_SPI),
192 };
193
194 /* Bitmaps indicating relevant packet types for a particular protocol header
195  *
196  * Packet types for packets with an Outer/First/Single MAC header
197  */
198 static const u32 ice_ptypes_mac_ofos[] = {
199         0xFDC00846, 0xBFBF7F7E, 0xF70001DF, 0xFEFDFDFB,
200         0x0000077E, 0x00000000, 0x00000000, 0x00000000,
201         0x00400000, 0x03FFF000, 0x7FFFFFE0, 0x00000000,
202         0x00000000, 0x00000000, 0x00000000, 0x00000000,
203         0x00000000, 0x00000000, 0x00000000, 0x00000000,
204         0x00000000, 0x00000000, 0x00000000, 0x00000000,
205         0x00000000, 0x00000000, 0x00000000, 0x00000000,
206         0x00000000, 0x00000000, 0x00000000, 0x00000000,
207 };
208
209 /* Packet types for packets with an Innermost/Last MAC VLAN header */
210 static const u32 ice_ptypes_macvlan_il[] = {
211         0x00000000, 0xBC000000, 0x000001DF, 0xF0000000,
212         0x0000077E, 0x00000000, 0x00000000, 0x00000000,
213         0x00000000, 0x00000000, 0x00000000, 0x00000000,
214         0x00000000, 0x00000000, 0x00000000, 0x00000000,
215         0x00000000, 0x00000000, 0x00000000, 0x00000000,
216         0x00000000, 0x00000000, 0x00000000, 0x00000000,
217         0x00000000, 0x00000000, 0x00000000, 0x00000000,
218         0x00000000, 0x00000000, 0x00000000, 0x00000000,
219 };
220
221 /* Packet types for packets with an Outer/First/Single IPv4 header, does NOT
222  * include IPV4 other PTYPEs
223  */
224 static const u32 ice_ptypes_ipv4_ofos[] = {
225         0x1DC00000, 0x04000800, 0x00000000, 0x00000000,
226         0x00000000, 0x00000155, 0x00000000, 0x00000000,
227         0x00000000, 0x000FC000, 0x000002A0, 0x00000000,
228         0x00000000, 0x00000000, 0x00000000, 0x00000000,
229         0x00000000, 0x00000000, 0x00000000, 0x00000000,
230         0x00000000, 0x00000000, 0x00000000, 0x00000000,
231         0x00000000, 0x00000000, 0x00000000, 0x00000000,
232         0x00000000, 0x00000000, 0x00000000, 0x00000000,
233 };
234
235 /* Packet types for packets with an Outer/First/Single IPv4 header, includes
236  * IPV4 other PTYPEs
237  */
238 static const u32 ice_ptypes_ipv4_ofos_all[] = {
239         0x1DC00000, 0x04000800, 0x00000000, 0x00000000,
240         0x00000000, 0x00000155, 0x00000000, 0x00000000,
241         0x00000000, 0x000FC000, 0x83E0FAA0, 0x00000101,
242         0x00000000, 0x00000000, 0x00000000, 0x00000000,
243         0x00000000, 0x00000000, 0x00000000, 0x00000000,
244         0x00000000, 0x00000000, 0x00000000, 0x00000000,
245         0x00000000, 0x00000000, 0x00000000, 0x00000000,
246         0x00000000, 0x00000000, 0x00000000, 0x00000000,
247 };
248
249 /* Packet types for packets with an Innermost/Last IPv4 header */
250 static const u32 ice_ptypes_ipv4_il[] = {
251         0xE0000000, 0xB807700E, 0x80000003, 0xE01DC03B,
252         0x0000000E, 0x00000000, 0x00000000, 0x00000000,
253         0x00000000, 0x00000000, 0x001FF800, 0x00000000,
254         0x00000000, 0x00000000, 0x00000000, 0x00000000,
255         0x00000000, 0x00000000, 0x00000000, 0x00000000,
256         0x00000000, 0x00000000, 0x00000000, 0x00000000,
257         0x00000000, 0x00000000, 0x00000000, 0x00000000,
258         0x00000000, 0x00000000, 0x00000000, 0x00000000,
259 };
260
261 /* Packet types for packets with an Outer/First/Single IPv6 header, does NOT
262  * include IVP6 other PTYPEs
263  */
264 static const u32 ice_ptypes_ipv6_ofos[] = {
265         0x00000000, 0x00000000, 0x77000000, 0x10002000,
266         0x00000000, 0x000002AA, 0x00000000, 0x00000000,
267         0x00000000, 0x03F00000, 0x00000540, 0x00000000,
268         0x00000000, 0x00000000, 0x00000000, 0x00000000,
269         0x00000000, 0x00000000, 0x00000000, 0x00000000,
270         0x00000000, 0x00000000, 0x00000000, 0x00000000,
271         0x00000000, 0x00000000, 0x00000000, 0x00000000,
272         0x00000000, 0x00000000, 0x00000000, 0x00000000,
273 };
274
275 /* Packet types for packets with an Outer/First/Single IPv6 header, includes
276  * IPV6 other PTYPEs
277  */
278 static const u32 ice_ptypes_ipv6_ofos_all[] = {
279         0x00000000, 0x00000000, 0x77000000, 0x10002000,
280         0x00000000, 0x000002AA, 0x00000000, 0x00000000,
281         0x00000000, 0x03F00000, 0x7C1F0540, 0x00000206,
282         0x00000000, 0x00000000, 0x00000000, 0x00000000,
283         0x00000000, 0x00000000, 0x00000000, 0x00000000,
284         0x00000000, 0x00000000, 0x00000000, 0x00000000,
285         0x00000000, 0x00000000, 0x00000000, 0x00000000,
286         0x00000000, 0x00000000, 0x00000000, 0x00000000,
287 };
288
289 /* Packet types for packets with an Innermost/Last IPv6 header */
290 static const u32 ice_ptypes_ipv6_il[] = {
291         0x00000000, 0x03B80770, 0x000001DC, 0x0EE00000,
292         0x00000770, 0x00000000, 0x00000000, 0x00000000,
293         0x00000000, 0x00000000, 0x7FE00000, 0x00000000,
294         0x00000000, 0x00000000, 0x00000000, 0x00000000,
295         0x00000000, 0x00000000, 0x00000000, 0x00000000,
296         0x00000000, 0x00000000, 0x00000000, 0x00000000,
297         0x00000000, 0x00000000, 0x00000000, 0x00000000,
298         0x00000000, 0x00000000, 0x00000000, 0x00000000,
299 };
300
301 /* Packet types for packets with an Outer/First/Single IPv4 header - no L4 */
302 static const u32 ice_ipv4_ofos_no_l4[] = {
303         0x10C00000, 0x04000800, 0x00000000, 0x00000000,
304         0x00000000, 0x00000000, 0x00000000, 0x00000000,
305         0x00000000, 0x000cc000, 0x000002A0, 0x00000000,
306         0x00000000, 0x00000000, 0x00000000, 0x00000000,
307         0x00000000, 0x00000000, 0x00000000, 0x00000000,
308         0x00000000, 0x00000000, 0x00000000, 0x00000000,
309         0x00000000, 0x00000000, 0x00000000, 0x00000000,
310         0x00000000, 0x00000000, 0x00000000, 0x00000000,
311 };
312
313 /* Packet types for packets with an Innermost/Last IPv4 header - no L4 */
314 static const u32 ice_ipv4_il_no_l4[] = {
315         0x60000000, 0x18043008, 0x80000002, 0x6010c021,
316         0x00000008, 0x00000000, 0x00000000, 0x00000000,
317         0x00000000, 0x00000000, 0x00139800, 0x00000000,
318         0x00000000, 0x00000000, 0x00000000, 0x00000000,
319         0x00000000, 0x00000000, 0x00000000, 0x00000000,
320         0x00000000, 0x00000000, 0x00000000, 0x00000000,
321         0x00000000, 0x00000000, 0x00000000, 0x00000000,
322         0x00000000, 0x00000000, 0x00000000, 0x00000000,
323 };
324
325 /* Packet types for packets with an Outer/First/Single IPv6 header - no L4 */
326 static const u32 ice_ipv6_ofos_no_l4[] = {
327         0x00000000, 0x00000000, 0x43000000, 0x10002000,
328         0x00000000, 0x00000000, 0x00000000, 0x00000000,
329         0x00000000, 0x02300000, 0x00000540, 0x00000000,
330         0x00000000, 0x00000000, 0x00000000, 0x00000000,
331         0x00000000, 0x00000000, 0x00000000, 0x00000000,
332         0x00000000, 0x00000000, 0x00000000, 0x00000000,
333         0x00000000, 0x00000000, 0x00000000, 0x00000000,
334         0x00000000, 0x00000000, 0x00000000, 0x00000000,
335 };
336
337 /* Packet types for packets with an Innermost/Last IPv6 header - no L4 */
338 static const u32 ice_ipv6_il_no_l4[] = {
339         0x00000000, 0x02180430, 0x0000010c, 0x086010c0,
340         0x00000430, 0x00000000, 0x00000000, 0x00000000,
341         0x00000000, 0x00000000, 0x4e600000, 0x00000000,
342         0x00000000, 0x00000000, 0x00000000, 0x00000000,
343         0x00000000, 0x00000000, 0x00000000, 0x00000000,
344         0x00000000, 0x00000000, 0x00000000, 0x00000000,
345         0x00000000, 0x00000000, 0x00000000, 0x00000000,
346         0x00000000, 0x00000000, 0x00000000, 0x00000000,
347 };
348
349 /* Packet types for packets with an Outermost/First ARP header */
350 static const u32 ice_ptypes_arp_of[] = {
351         0x00000800, 0x00000000, 0x00000000, 0x00000000,
352         0x00000000, 0x00000000, 0x00000000, 0x00000000,
353         0x00000000, 0x00000000, 0x00000000, 0x00000000,
354         0x00000000, 0x00000000, 0x00000000, 0x00000000,
355         0x00000000, 0x00000000, 0x00000000, 0x00000000,
356         0x00000000, 0x00000000, 0x00000000, 0x00000000,
357         0x00000000, 0x00000000, 0x00000000, 0x00000000,
358         0x00000000, 0x00000000, 0x00000000, 0x00000000,
359 };
360
361 /* UDP Packet types for non-tunneled packets or tunneled
362  * packets with inner UDP.
363  */
364 static const u32 ice_ptypes_udp_il[] = {
365         0x81000000, 0x20204040, 0x04000010, 0x80810102,
366         0x00000040, 0x00000000, 0x00000000, 0x00000000,
367         0x00000000, 0x00410000, 0x90842000, 0x00000007,
368         0x00000000, 0x00000000, 0x00000000, 0x00000000,
369         0x00000000, 0x00000000, 0x00000000, 0x00000000,
370         0x00000000, 0x00000000, 0x00000000, 0x00000000,
371         0x00000000, 0x00000000, 0x00000000, 0x00000000,
372         0x00000000, 0x00000000, 0x00000000, 0x00000000,
373 };
374
375 /* Packet types for packets with an Innermost/Last TCP header */
376 static const u32 ice_ptypes_tcp_il[] = {
377         0x04000000, 0x80810102, 0x10000040, 0x02040408,
378         0x00000102, 0x00000000, 0x00000000, 0x00000000,
379         0x00000000, 0x00820000, 0x21084000, 0x00000000,
380         0x00000000, 0x00000000, 0x00000000, 0x00000000,
381         0x00000000, 0x00000000, 0x00000000, 0x00000000,
382         0x00000000, 0x00000000, 0x00000000, 0x00000000,
383         0x00000000, 0x00000000, 0x00000000, 0x00000000,
384         0x00000000, 0x00000000, 0x00000000, 0x00000000,
385 };
386
387 /* Packet types for packets with an Innermost/Last SCTP header */
388 static const u32 ice_ptypes_sctp_il[] = {
389         0x08000000, 0x01020204, 0x20000081, 0x04080810,
390         0x00000204, 0x00000000, 0x00000000, 0x00000000,
391         0x00000000, 0x01040000, 0x00000000, 0x00000000,
392         0x00000000, 0x00000000, 0x00000000, 0x00000000,
393         0x00000000, 0x00000000, 0x00000000, 0x00000000,
394         0x00000000, 0x00000000, 0x00000000, 0x00000000,
395         0x00000000, 0x00000000, 0x00000000, 0x00000000,
396         0x00000000, 0x00000000, 0x00000000, 0x00000000,
397 };
398
399 /* Packet types for packets with an Outermost/First ICMP header */
400 static const u32 ice_ptypes_icmp_of[] = {
401         0x10000000, 0x00000000, 0x00000000, 0x00000000,
402         0x00000000, 0x00000000, 0x00000000, 0x00000000,
403         0x00000000, 0x00000000, 0x00000000, 0x00000000,
404         0x00000000, 0x00000000, 0x00000000, 0x00000000,
405         0x00000000, 0x00000000, 0x00000000, 0x00000000,
406         0x00000000, 0x00000000, 0x00000000, 0x00000000,
407         0x00000000, 0x00000000, 0x00000000, 0x00000000,
408         0x00000000, 0x00000000, 0x00000000, 0x00000000,
409 };
410
411 /* Packet types for packets with an Innermost/Last ICMP header */
412 static const u32 ice_ptypes_icmp_il[] = {
413         0x00000000, 0x02040408, 0x40000102, 0x08101020,
414         0x00000408, 0x00000000, 0x00000000, 0x00000000,
415         0x00000000, 0x00000000, 0x42108000, 0x00000000,
416         0x00000000, 0x00000000, 0x00000000, 0x00000000,
417         0x00000000, 0x00000000, 0x00000000, 0x00000000,
418         0x00000000, 0x00000000, 0x00000000, 0x00000000,
419         0x00000000, 0x00000000, 0x00000000, 0x00000000,
420         0x00000000, 0x00000000, 0x00000000, 0x00000000,
421 };
422
423 /* Packet types for packets with an Outermost/First GRE header */
424 static const u32 ice_ptypes_gre_of[] = {
425         0x00000000, 0xBFBF7800, 0x000001DF, 0xFEFDE000,
426         0x0000017E, 0x00000000, 0x00000000, 0x00000000,
427         0x00000000, 0x00000000, 0x00000000, 0x00000000,
428         0x00000000, 0x00000000, 0x00000000, 0x00000000,
429         0x00000000, 0x00000000, 0x00000000, 0x00000000,
430         0x00000000, 0x00000000, 0x00000000, 0x00000000,
431         0x00000000, 0x00000000, 0x00000000, 0x00000000,
432         0x00000000, 0x00000000, 0x00000000, 0x00000000,
433 };
434
435 /* Packet types for packets with an Innermost/Last MAC header */
436 static const u32 ice_ptypes_mac_il[] = {
437         0x00000000, 0x00000000, 0x00000000, 0x00000000,
438         0x00000000, 0x00000000, 0x00000000, 0x00000000,
439         0x00000000, 0x00000000, 0x00000000, 0x00000000,
440         0x00000000, 0x00000000, 0x00000000, 0x00000000,
441         0x00000000, 0x00000000, 0x00000000, 0x00000000,
442         0x00000000, 0x00000000, 0x00000000, 0x00000000,
443         0x00000000, 0x00000000, 0x00000000, 0x00000000,
444         0x00000000, 0x00000000, 0x00000000, 0x00000000,
445 };
446
447 /* Packet types for GTPC */
448 static const u32 ice_ptypes_gtpc[] = {
449         0x00000000, 0x00000000, 0x00000000, 0x00000000,
450         0x00000000, 0x00000000, 0x00000000, 0x00000000,
451         0x00000000, 0x00000000, 0x000001E0, 0x00000000,
452         0x00000000, 0x00000000, 0x00000000, 0x00000000,
453         0x00000000, 0x00000000, 0x00000000, 0x00000000,
454         0x00000000, 0x00000000, 0x00000000, 0x00000000,
455         0x00000000, 0x00000000, 0x00000000, 0x00000000,
456         0x00000000, 0x00000000, 0x00000000, 0x00000000,
457 };
458
459 /* Packet types for GTPC with TEID */
460 static const u32 ice_ptypes_gtpc_tid[] = {
461         0x00000000, 0x00000000, 0x00000000, 0x00000000,
462         0x00000000, 0x00000000, 0x00000000, 0x00000000,
463         0x00000000, 0x00000000, 0x00000060, 0x00000000,
464         0x00000000, 0x00000000, 0x00000000, 0x00000000,
465         0x00000000, 0x00000000, 0x00000000, 0x00000000,
466         0x00000000, 0x00000000, 0x00000000, 0x00000000,
467         0x00000000, 0x00000000, 0x00000000, 0x00000000,
468         0x00000000, 0x00000000, 0x00000000, 0x00000000,
469 };
470
471 /* Packet types for GTPU */
472 static const struct ice_ptype_attributes ice_attr_gtpu_session[] = {
473         { ICE_MAC_IPV4_GTPU_IPV4_FRAG,    ICE_PTYPE_ATTR_GTP_SESSION },
474         { ICE_MAC_IPV4_GTPU_IPV4_PAY,     ICE_PTYPE_ATTR_GTP_SESSION },
475         { ICE_MAC_IPV4_GTPU_IPV4_UDP_PAY, ICE_PTYPE_ATTR_GTP_SESSION },
476         { ICE_MAC_IPV4_GTPU_IPV4_TCP,     ICE_PTYPE_ATTR_GTP_SESSION },
477         { ICE_MAC_IPV4_GTPU_IPV4_ICMP,    ICE_PTYPE_ATTR_GTP_SESSION },
478         { ICE_MAC_IPV6_GTPU_IPV4_FRAG,    ICE_PTYPE_ATTR_GTP_SESSION },
479         { ICE_MAC_IPV6_GTPU_IPV4_PAY,     ICE_PTYPE_ATTR_GTP_SESSION },
480         { ICE_MAC_IPV6_GTPU_IPV4_UDP_PAY, ICE_PTYPE_ATTR_GTP_SESSION },
481         { ICE_MAC_IPV6_GTPU_IPV4_TCP,     ICE_PTYPE_ATTR_GTP_SESSION },
482         { ICE_MAC_IPV6_GTPU_IPV4_ICMP,    ICE_PTYPE_ATTR_GTP_SESSION },
483         { ICE_MAC_IPV4_GTPU_IPV6_FRAG,    ICE_PTYPE_ATTR_GTP_SESSION },
484         { ICE_MAC_IPV4_GTPU_IPV6_PAY,     ICE_PTYPE_ATTR_GTP_SESSION },
485         { ICE_MAC_IPV4_GTPU_IPV6_UDP_PAY, ICE_PTYPE_ATTR_GTP_SESSION },
486         { ICE_MAC_IPV4_GTPU_IPV6_TCP,     ICE_PTYPE_ATTR_GTP_SESSION },
487         { ICE_MAC_IPV4_GTPU_IPV6_ICMPV6,  ICE_PTYPE_ATTR_GTP_SESSION },
488         { ICE_MAC_IPV6_GTPU_IPV6_FRAG,    ICE_PTYPE_ATTR_GTP_SESSION },
489         { ICE_MAC_IPV6_GTPU_IPV6_PAY,     ICE_PTYPE_ATTR_GTP_SESSION },
490         { ICE_MAC_IPV6_GTPU_IPV6_UDP_PAY, ICE_PTYPE_ATTR_GTP_SESSION },
491         { ICE_MAC_IPV6_GTPU_IPV6_TCP,     ICE_PTYPE_ATTR_GTP_SESSION },
492         { ICE_MAC_IPV6_GTPU_IPV6_ICMPV6,  ICE_PTYPE_ATTR_GTP_SESSION },
493 };
494
495 static const struct ice_ptype_attributes ice_attr_gtpu_eh[] = {
496         { ICE_MAC_IPV4_GTPU_IPV4_FRAG,    ICE_PTYPE_ATTR_GTP_PDU_EH },
497         { ICE_MAC_IPV4_GTPU_IPV4_PAY,     ICE_PTYPE_ATTR_GTP_PDU_EH },
498         { ICE_MAC_IPV4_GTPU_IPV4_UDP_PAY, ICE_PTYPE_ATTR_GTP_PDU_EH },
499         { ICE_MAC_IPV4_GTPU_IPV4_TCP,     ICE_PTYPE_ATTR_GTP_PDU_EH },
500         { ICE_MAC_IPV4_GTPU_IPV4_ICMP,    ICE_PTYPE_ATTR_GTP_PDU_EH },
501         { ICE_MAC_IPV6_GTPU_IPV4_FRAG,    ICE_PTYPE_ATTR_GTP_PDU_EH },
502         { ICE_MAC_IPV6_GTPU_IPV4_PAY,     ICE_PTYPE_ATTR_GTP_PDU_EH },
503         { ICE_MAC_IPV6_GTPU_IPV4_UDP_PAY, ICE_PTYPE_ATTR_GTP_PDU_EH },
504         { ICE_MAC_IPV6_GTPU_IPV4_TCP,     ICE_PTYPE_ATTR_GTP_PDU_EH },
505         { ICE_MAC_IPV6_GTPU_IPV4_ICMP,    ICE_PTYPE_ATTR_GTP_PDU_EH },
506         { ICE_MAC_IPV4_GTPU_IPV6_FRAG,    ICE_PTYPE_ATTR_GTP_PDU_EH },
507         { ICE_MAC_IPV4_GTPU_IPV6_PAY,     ICE_PTYPE_ATTR_GTP_PDU_EH },
508         { ICE_MAC_IPV4_GTPU_IPV6_UDP_PAY, ICE_PTYPE_ATTR_GTP_PDU_EH },
509         { ICE_MAC_IPV4_GTPU_IPV6_TCP,     ICE_PTYPE_ATTR_GTP_PDU_EH },
510         { ICE_MAC_IPV4_GTPU_IPV6_ICMPV6,  ICE_PTYPE_ATTR_GTP_PDU_EH },
511         { ICE_MAC_IPV6_GTPU_IPV6_FRAG,    ICE_PTYPE_ATTR_GTP_PDU_EH },
512         { ICE_MAC_IPV6_GTPU_IPV6_PAY,     ICE_PTYPE_ATTR_GTP_PDU_EH },
513         { ICE_MAC_IPV6_GTPU_IPV6_UDP_PAY, ICE_PTYPE_ATTR_GTP_PDU_EH },
514         { ICE_MAC_IPV6_GTPU_IPV6_TCP,     ICE_PTYPE_ATTR_GTP_PDU_EH },
515         { ICE_MAC_IPV6_GTPU_IPV6_ICMPV6,  ICE_PTYPE_ATTR_GTP_PDU_EH },
516 };
517
518 static const struct ice_ptype_attributes ice_attr_gtpu_down[] = {
519         { ICE_MAC_IPV4_GTPU_IPV4_FRAG,    ICE_PTYPE_ATTR_GTP_DOWNLINK },
520         { ICE_MAC_IPV4_GTPU_IPV4_PAY,     ICE_PTYPE_ATTR_GTP_DOWNLINK },
521         { ICE_MAC_IPV4_GTPU_IPV4_UDP_PAY, ICE_PTYPE_ATTR_GTP_DOWNLINK },
522         { ICE_MAC_IPV4_GTPU_IPV4_TCP,     ICE_PTYPE_ATTR_GTP_DOWNLINK },
523         { ICE_MAC_IPV4_GTPU_IPV4_ICMP,    ICE_PTYPE_ATTR_GTP_DOWNLINK },
524         { ICE_MAC_IPV6_GTPU_IPV4_FRAG,    ICE_PTYPE_ATTR_GTP_DOWNLINK },
525         { ICE_MAC_IPV6_GTPU_IPV4_PAY,     ICE_PTYPE_ATTR_GTP_DOWNLINK },
526         { ICE_MAC_IPV6_GTPU_IPV4_UDP_PAY, ICE_PTYPE_ATTR_GTP_DOWNLINK },
527         { ICE_MAC_IPV6_GTPU_IPV4_TCP,     ICE_PTYPE_ATTR_GTP_DOWNLINK },
528         { ICE_MAC_IPV6_GTPU_IPV4_ICMP,    ICE_PTYPE_ATTR_GTP_DOWNLINK },
529         { ICE_MAC_IPV4_GTPU_IPV6_FRAG,    ICE_PTYPE_ATTR_GTP_DOWNLINK },
530         { ICE_MAC_IPV4_GTPU_IPV6_PAY,     ICE_PTYPE_ATTR_GTP_DOWNLINK },
531         { ICE_MAC_IPV4_GTPU_IPV6_UDP_PAY, ICE_PTYPE_ATTR_GTP_DOWNLINK },
532         { ICE_MAC_IPV4_GTPU_IPV6_TCP,     ICE_PTYPE_ATTR_GTP_DOWNLINK },
533         { ICE_MAC_IPV4_GTPU_IPV6_ICMPV6,  ICE_PTYPE_ATTR_GTP_DOWNLINK },
534         { ICE_MAC_IPV6_GTPU_IPV6_FRAG,    ICE_PTYPE_ATTR_GTP_DOWNLINK },
535         { ICE_MAC_IPV6_GTPU_IPV6_PAY,     ICE_PTYPE_ATTR_GTP_DOWNLINK },
536         { ICE_MAC_IPV6_GTPU_IPV6_UDP_PAY, ICE_PTYPE_ATTR_GTP_DOWNLINK },
537         { ICE_MAC_IPV6_GTPU_IPV6_TCP,     ICE_PTYPE_ATTR_GTP_DOWNLINK },
538         { ICE_MAC_IPV6_GTPU_IPV6_ICMPV6,  ICE_PTYPE_ATTR_GTP_DOWNLINK },
539 };
540
541 static const struct ice_ptype_attributes ice_attr_gtpu_up[] = {
542         { ICE_MAC_IPV4_GTPU_IPV4_FRAG,    ICE_PTYPE_ATTR_GTP_UPLINK },
543         { ICE_MAC_IPV4_GTPU_IPV4_PAY,     ICE_PTYPE_ATTR_GTP_UPLINK },
544         { ICE_MAC_IPV4_GTPU_IPV4_UDP_PAY, ICE_PTYPE_ATTR_GTP_UPLINK },
545         { ICE_MAC_IPV4_GTPU_IPV4_TCP,     ICE_PTYPE_ATTR_GTP_UPLINK },
546         { ICE_MAC_IPV4_GTPU_IPV4_ICMP,    ICE_PTYPE_ATTR_GTP_UPLINK },
547         { ICE_MAC_IPV6_GTPU_IPV4_FRAG,    ICE_PTYPE_ATTR_GTP_UPLINK },
548         { ICE_MAC_IPV6_GTPU_IPV4_PAY,     ICE_PTYPE_ATTR_GTP_UPLINK },
549         { ICE_MAC_IPV6_GTPU_IPV4_UDP_PAY, ICE_PTYPE_ATTR_GTP_UPLINK },
550         { ICE_MAC_IPV6_GTPU_IPV4_TCP,     ICE_PTYPE_ATTR_GTP_UPLINK },
551         { ICE_MAC_IPV6_GTPU_IPV4_ICMP,    ICE_PTYPE_ATTR_GTP_UPLINK },
552         { ICE_MAC_IPV4_GTPU_IPV6_FRAG,    ICE_PTYPE_ATTR_GTP_UPLINK },
553         { ICE_MAC_IPV4_GTPU_IPV6_PAY,     ICE_PTYPE_ATTR_GTP_UPLINK },
554         { ICE_MAC_IPV4_GTPU_IPV6_UDP_PAY, ICE_PTYPE_ATTR_GTP_UPLINK },
555         { ICE_MAC_IPV4_GTPU_IPV6_TCP,     ICE_PTYPE_ATTR_GTP_UPLINK },
556         { ICE_MAC_IPV4_GTPU_IPV6_ICMPV6,  ICE_PTYPE_ATTR_GTP_UPLINK },
557         { ICE_MAC_IPV6_GTPU_IPV6_FRAG,    ICE_PTYPE_ATTR_GTP_UPLINK },
558         { ICE_MAC_IPV6_GTPU_IPV6_PAY,     ICE_PTYPE_ATTR_GTP_UPLINK },
559         { ICE_MAC_IPV6_GTPU_IPV6_UDP_PAY, ICE_PTYPE_ATTR_GTP_UPLINK },
560         { ICE_MAC_IPV6_GTPU_IPV6_TCP,     ICE_PTYPE_ATTR_GTP_UPLINK },
561         { ICE_MAC_IPV6_GTPU_IPV6_ICMPV6,  ICE_PTYPE_ATTR_GTP_UPLINK },
562 };
563
564 static const u32 ice_ptypes_gtpu[] = {
565         0x00000000, 0x00000000, 0x00000000, 0x00000000,
566         0x00000000, 0x00000000, 0x00000000, 0x00000000,
567         0x00000000, 0x00000000, 0x7FFFFE00, 0x00000000,
568         0x00000000, 0x00000000, 0x00000000, 0x00000000,
569         0x00000000, 0x00000000, 0x00000000, 0x00000000,
570         0x00000000, 0x00000000, 0x00000000, 0x00000000,
571         0x00000000, 0x00000000, 0x00000000, 0x00000000,
572         0x00000000, 0x00000000, 0x00000000, 0x00000000,
573 };
574
575 /* Packet types for pppoe */
576 static const u32 ice_ptypes_pppoe[] = {
577         0x00000000, 0x00000000, 0x00000000, 0x00000000,
578         0x00000000, 0x00000000, 0x00000000, 0x00000000,
579         0x00000000, 0x03ffe000, 0x00000000, 0x00000000,
580         0x00000000, 0x00000000, 0x00000000, 0x00000000,
581         0x00000000, 0x00000000, 0x00000000, 0x00000000,
582         0x00000000, 0x00000000, 0x00000000, 0x00000000,
583         0x00000000, 0x00000000, 0x00000000, 0x00000000,
584         0x00000000, 0x00000000, 0x00000000, 0x00000000,
585 };
586
587 /* Packet types for packets with PFCP NODE header */
588 static const u32 ice_ptypes_pfcp_node[] = {
589         0x00000000, 0x00000000, 0x00000000, 0x00000000,
590         0x00000000, 0x00000000, 0x00000000, 0x00000000,
591         0x00000000, 0x00000000, 0x80000000, 0x00000002,
592         0x00000000, 0x00000000, 0x00000000, 0x00000000,
593         0x00000000, 0x00000000, 0x00000000, 0x00000000,
594         0x00000000, 0x00000000, 0x00000000, 0x00000000,
595         0x00000000, 0x00000000, 0x00000000, 0x00000000,
596         0x00000000, 0x00000000, 0x00000000, 0x00000000,
597 };
598
599 /* Packet types for packets with PFCP SESSION header */
600 static const u32 ice_ptypes_pfcp_session[] = {
601         0x00000000, 0x00000000, 0x00000000, 0x00000000,
602         0x00000000, 0x00000000, 0x00000000, 0x00000000,
603         0x00000000, 0x00000000, 0x00000000, 0x00000005,
604         0x00000000, 0x00000000, 0x00000000, 0x00000000,
605         0x00000000, 0x00000000, 0x00000000, 0x00000000,
606         0x00000000, 0x00000000, 0x00000000, 0x00000000,
607         0x00000000, 0x00000000, 0x00000000, 0x00000000,
608         0x00000000, 0x00000000, 0x00000000, 0x00000000,
609 };
610
611 /* Packet types for l2tpv3 */
612 static const u32 ice_ptypes_l2tpv3[] = {
613         0x00000000, 0x00000000, 0x00000000, 0x00000000,
614         0x00000000, 0x00000000, 0x00000000, 0x00000000,
615         0x00000000, 0x00000000, 0x00000000, 0x00000300,
616         0x00000000, 0x00000000, 0x00000000, 0x00000000,
617         0x00000000, 0x00000000, 0x00000000, 0x00000000,
618         0x00000000, 0x00000000, 0x00000000, 0x00000000,
619         0x00000000, 0x00000000, 0x00000000, 0x00000000,
620         0x00000000, 0x00000000, 0x00000000, 0x00000000,
621 };
622
623 /* Packet types for esp */
624 static const u32 ice_ptypes_esp[] = {
625         0x00000000, 0x00000000, 0x00000000, 0x00000000,
626         0x00000000, 0x00000003, 0x00000000, 0x00000000,
627         0x00000000, 0x00000000, 0x00000000, 0x00000000,
628         0x00000000, 0x00000000, 0x00000000, 0x00000000,
629         0x00000000, 0x00000000, 0x00000000, 0x00000000,
630         0x00000000, 0x00000000, 0x00000000, 0x00000000,
631         0x00000000, 0x00000000, 0x00000000, 0x00000000,
632         0x00000000, 0x00000000, 0x00000000, 0x00000000,
633 };
634
635 /* Packet types for ah */
636 static const u32 ice_ptypes_ah[] = {
637         0x00000000, 0x00000000, 0x00000000, 0x00000000,
638         0x00000000, 0x0000000C, 0x00000000, 0x00000000,
639         0x00000000, 0x00000000, 0x00000000, 0x00000000,
640         0x00000000, 0x00000000, 0x00000000, 0x00000000,
641         0x00000000, 0x00000000, 0x00000000, 0x00000000,
642         0x00000000, 0x00000000, 0x00000000, 0x00000000,
643         0x00000000, 0x00000000, 0x00000000, 0x00000000,
644         0x00000000, 0x00000000, 0x00000000, 0x00000000,
645 };
646
647 /* Packet types for packets with NAT_T ESP header */
648 static const u32 ice_ptypes_nat_t_esp[] = {
649         0x00000000, 0x00000000, 0x00000000, 0x00000000,
650         0x00000000, 0x00000030, 0x00000000, 0x00000000,
651         0x00000000, 0x00000000, 0x00000000, 0x00000000,
652         0x00000000, 0x00000000, 0x00000000, 0x00000000,
653         0x00000000, 0x00000000, 0x00000000, 0x00000000,
654         0x00000000, 0x00000000, 0x00000000, 0x00000000,
655         0x00000000, 0x00000000, 0x00000000, 0x00000000,
656         0x00000000, 0x00000000, 0x00000000, 0x00000000,
657 };
658
659 static const u32 ice_ptypes_mac_non_ip_ofos[] = {
660         0x00000846, 0x00000000, 0x00000000, 0x00000000,
661         0x00000000, 0x00000000, 0x00000000, 0x00000000,
662         0x00400000, 0x03FFF000, 0x00000000, 0x00000000,
663         0x00000000, 0x00000000, 0x00000000, 0x00000000,
664         0x00000000, 0x00000000, 0x00000000, 0x00000000,
665         0x00000000, 0x00000000, 0x00000000, 0x00000000,
666         0x00000000, 0x00000000, 0x00000000, 0x00000000,
667         0x00000000, 0x00000000, 0x00000000, 0x00000000,
668 };
669
670 static const u32 ice_ptypes_gtpu_no_ip[] = {
671         0x00000000, 0x00000000, 0x00000000, 0x00000000,
672         0x00000000, 0x00000000, 0x00000000, 0x00000000,
673         0x00000000, 0x00000000, 0x00000600, 0x00000000,
674         0x00000000, 0x00000000, 0x00000000, 0x00000000,
675         0x00000000, 0x00000000, 0x00000000, 0x00000000,
676         0x00000000, 0x00000000, 0x00000000, 0x00000000,
677         0x00000000, 0x00000000, 0x00000000, 0x00000000,
678         0x00000000, 0x00000000, 0x00000000, 0x00000000,
679 };
680
681 /* Manage parameters and info. used during the creation of a flow profile */
682 struct ice_flow_prof_params {
683         enum ice_block blk;
684         u16 entry_length; /* # of bytes formatted entry will require */
685         u8 es_cnt;
686         struct ice_flow_prof *prof;
687
688         /* For ACL, the es[0] will have the data of ICE_RX_MDID_PKT_FLAGS_15_0
689          * This will give us the direction flags.
690          */
691         struct ice_fv_word es[ICE_MAX_FV_WORDS];
692         /* attributes can be used to add attributes to a particular PTYPE */
693         const struct ice_ptype_attributes *attr;
694         u16 attr_cnt;
695
696         u16 mask[ICE_MAX_FV_WORDS];
697         ice_declare_bitmap(ptypes, ICE_FLOW_PTYPE_MAX);
698 };
699
700 #define ICE_FLOW_RSS_HDRS_INNER_MASK \
701         (ICE_FLOW_SEG_HDR_PPPOE | ICE_FLOW_SEG_HDR_GTPC | \
702         ICE_FLOW_SEG_HDR_GTPC_TEID | ICE_FLOW_SEG_HDR_GTPU | \
703         ICE_FLOW_SEG_HDR_PFCP_SESSION | ICE_FLOW_SEG_HDR_L2TPV3 | \
704         ICE_FLOW_SEG_HDR_ESP | ICE_FLOW_SEG_HDR_AH | \
705         ICE_FLOW_SEG_HDR_NAT_T_ESP | ICE_FLOW_SEG_HDR_GTPU_NON_IP)
706
707 #define ICE_FLOW_SEG_HDRS_L2_MASK       \
708         (ICE_FLOW_SEG_HDR_ETH | ICE_FLOW_SEG_HDR_VLAN)
709 #define ICE_FLOW_SEG_HDRS_L3_MASK       \
710         (ICE_FLOW_SEG_HDR_IPV4 | ICE_FLOW_SEG_HDR_IPV6 | \
711          ICE_FLOW_SEG_HDR_ARP)
712 #define ICE_FLOW_SEG_HDRS_L4_MASK       \
713         (ICE_FLOW_SEG_HDR_ICMP | ICE_FLOW_SEG_HDR_TCP | ICE_FLOW_SEG_HDR_UDP | \
714          ICE_FLOW_SEG_HDR_SCTP)
715 /* mask for L4 protocols that are NOT part of IPV4/6 OTHER PTYPE groups */
716 #define ICE_FLOW_SEG_HDRS_L4_MASK_NO_OTHER      \
717         (ICE_FLOW_SEG_HDR_TCP | ICE_FLOW_SEG_HDR_UDP | ICE_FLOW_SEG_HDR_SCTP)
718
719 /**
720  * ice_flow_val_hdrs - validates packet segments for valid protocol headers
721  * @segs: array of one or more packet segments that describe the flow
722  * @segs_cnt: number of packet segments provided
723  */
724 static enum ice_status
725 ice_flow_val_hdrs(struct ice_flow_seg_info *segs, u8 segs_cnt)
726 {
727         u8 i;
728
729         for (i = 0; i < segs_cnt; i++) {
730                 /* Multiple L3 headers */
731                 if (segs[i].hdrs & ICE_FLOW_SEG_HDRS_L3_MASK &&
732                     !ice_is_pow2(segs[i].hdrs & ICE_FLOW_SEG_HDRS_L3_MASK))
733                         return ICE_ERR_PARAM;
734
735                 /* Multiple L4 headers */
736                 if (segs[i].hdrs & ICE_FLOW_SEG_HDRS_L4_MASK &&
737                     !ice_is_pow2(segs[i].hdrs & ICE_FLOW_SEG_HDRS_L4_MASK))
738                         return ICE_ERR_PARAM;
739         }
740
741         return ICE_SUCCESS;
742 }
743
744 /* Sizes of fixed known protocol headers without header options */
745 #define ICE_FLOW_PROT_HDR_SZ_MAC        14
746 #define ICE_FLOW_PROT_HDR_SZ_MAC_VLAN   (ICE_FLOW_PROT_HDR_SZ_MAC + 2)
747 #define ICE_FLOW_PROT_HDR_SZ_IPV4       20
748 #define ICE_FLOW_PROT_HDR_SZ_IPV6       40
749 #define ICE_FLOW_PROT_HDR_SZ_ARP        28
750 #define ICE_FLOW_PROT_HDR_SZ_ICMP       8
751 #define ICE_FLOW_PROT_HDR_SZ_TCP        20
752 #define ICE_FLOW_PROT_HDR_SZ_UDP        8
753 #define ICE_FLOW_PROT_HDR_SZ_SCTP       12
754
755 /**
756  * ice_flow_calc_seg_sz - calculates size of a packet segment based on headers
757  * @params: information about the flow to be processed
758  * @seg: index of packet segment whose header size is to be determined
759  */
760 static u16 ice_flow_calc_seg_sz(struct ice_flow_prof_params *params, u8 seg)
761 {
762         u16 sz;
763
764         /* L2 headers */
765         sz = (params->prof->segs[seg].hdrs & ICE_FLOW_SEG_HDR_VLAN) ?
766                 ICE_FLOW_PROT_HDR_SZ_MAC_VLAN : ICE_FLOW_PROT_HDR_SZ_MAC;
767
768         /* L3 headers */
769         if (params->prof->segs[seg].hdrs & ICE_FLOW_SEG_HDR_IPV4)
770                 sz += ICE_FLOW_PROT_HDR_SZ_IPV4;
771         else if (params->prof->segs[seg].hdrs & ICE_FLOW_SEG_HDR_IPV6)
772                 sz += ICE_FLOW_PROT_HDR_SZ_IPV6;
773         else if (params->prof->segs[seg].hdrs & ICE_FLOW_SEG_HDR_ARP)
774                 sz += ICE_FLOW_PROT_HDR_SZ_ARP;
775         else if (params->prof->segs[seg].hdrs & ICE_FLOW_SEG_HDRS_L4_MASK)
776                 /* A L3 header is required if L4 is specified */
777                 return 0;
778
779         /* L4 headers */
780         if (params->prof->segs[seg].hdrs & ICE_FLOW_SEG_HDR_ICMP)
781                 sz += ICE_FLOW_PROT_HDR_SZ_ICMP;
782         else if (params->prof->segs[seg].hdrs & ICE_FLOW_SEG_HDR_TCP)
783                 sz += ICE_FLOW_PROT_HDR_SZ_TCP;
784         else if (params->prof->segs[seg].hdrs & ICE_FLOW_SEG_HDR_UDP)
785                 sz += ICE_FLOW_PROT_HDR_SZ_UDP;
786         else if (params->prof->segs[seg].hdrs & ICE_FLOW_SEG_HDR_SCTP)
787                 sz += ICE_FLOW_PROT_HDR_SZ_SCTP;
788
789         return sz;
790 }
791
792 /**
793  * ice_flow_proc_seg_hdrs - process protocol headers present in pkt segments
794  * @params: information about the flow to be processed
795  *
796  * This function identifies the packet types associated with the protocol
797  * headers being present in packet segments of the specified flow profile.
798  */
799 static enum ice_status
800 ice_flow_proc_seg_hdrs(struct ice_flow_prof_params *params)
801 {
802         struct ice_flow_prof *prof;
803         u8 i;
804
805         ice_memset(params->ptypes, 0xff, sizeof(params->ptypes),
806                    ICE_NONDMA_MEM);
807
808         prof = params->prof;
809
810         for (i = 0; i < params->prof->segs_cnt; i++) {
811                 const ice_bitmap_t *src;
812                 u32 hdrs;
813
814                 hdrs = prof->segs[i].hdrs;
815
816                 if (hdrs & ICE_FLOW_SEG_HDR_ETH) {
817                         src = !i ? (const ice_bitmap_t *)ice_ptypes_mac_ofos :
818                                 (const ice_bitmap_t *)ice_ptypes_mac_il;
819                         ice_and_bitmap(params->ptypes, params->ptypes, src,
820                                        ICE_FLOW_PTYPE_MAX);
821                 }
822
823                 if (i && hdrs & ICE_FLOW_SEG_HDR_VLAN) {
824                         src = (const ice_bitmap_t *)ice_ptypes_macvlan_il;
825                         ice_and_bitmap(params->ptypes, params->ptypes, src,
826                                        ICE_FLOW_PTYPE_MAX);
827                 }
828
829                 if (!i && hdrs & ICE_FLOW_SEG_HDR_ARP) {
830                         ice_and_bitmap(params->ptypes, params->ptypes,
831                                        (const ice_bitmap_t *)ice_ptypes_arp_of,
832                                        ICE_FLOW_PTYPE_MAX);
833                 }
834
835                 if (hdrs & ICE_FLOW_SEG_HDR_PPPOE) {
836                         src = (const ice_bitmap_t *)ice_ptypes_pppoe;
837                         ice_and_bitmap(params->ptypes, params->ptypes, src,
838                                        ICE_FLOW_PTYPE_MAX);
839                 }
840                 if ((hdrs & ICE_FLOW_SEG_HDR_IPV4) &&
841                     (hdrs & ICE_FLOW_SEG_HDR_IPV_OTHER)) {
842                         src = i ?
843                                 (const ice_bitmap_t *)ice_ptypes_ipv4_il :
844                                 (const ice_bitmap_t *)ice_ptypes_ipv4_ofos_all;
845                         ice_and_bitmap(params->ptypes, params->ptypes, src,
846                                        ICE_FLOW_PTYPE_MAX);
847                 } else if ((hdrs & ICE_FLOW_SEG_HDR_IPV6) &&
848                            (hdrs & ICE_FLOW_SEG_HDR_IPV_OTHER)) {
849                         src = i ?
850                                 (const ice_bitmap_t *)ice_ptypes_ipv6_il :
851                                 (const ice_bitmap_t *)ice_ptypes_ipv6_ofos_all;
852                         ice_and_bitmap(params->ptypes, params->ptypes, src,
853                                        ICE_FLOW_PTYPE_MAX);
854                 } else if ((hdrs & ICE_FLOW_SEG_HDR_IPV4) &&
855                            !(hdrs & ICE_FLOW_SEG_HDRS_L4_MASK_NO_OTHER)) {
856                         src = !i ? (const ice_bitmap_t *)ice_ipv4_ofos_no_l4 :
857                                 (const ice_bitmap_t *)ice_ipv4_il_no_l4;
858                         ice_and_bitmap(params->ptypes, params->ptypes, src,
859                                        ICE_FLOW_PTYPE_MAX);
860                 } else if (hdrs & ICE_FLOW_SEG_HDR_IPV4) {
861                         src = !i ? (const ice_bitmap_t *)ice_ptypes_ipv4_ofos :
862                                 (const ice_bitmap_t *)ice_ptypes_ipv4_il;
863                         ice_and_bitmap(params->ptypes, params->ptypes, src,
864                                        ICE_FLOW_PTYPE_MAX);
865                 } else if ((hdrs & ICE_FLOW_SEG_HDR_IPV6) &&
866                            !(hdrs & ICE_FLOW_SEG_HDRS_L4_MASK_NO_OTHER)) {
867                         src = !i ? (const ice_bitmap_t *)ice_ipv6_ofos_no_l4 :
868                                 (const ice_bitmap_t *)ice_ipv6_il_no_l4;
869                         ice_and_bitmap(params->ptypes, params->ptypes, src,
870                                        ICE_FLOW_PTYPE_MAX);
871                 } else if (hdrs & ICE_FLOW_SEG_HDR_IPV6) {
872                         src = !i ? (const ice_bitmap_t *)ice_ptypes_ipv6_ofos :
873                                 (const ice_bitmap_t *)ice_ptypes_ipv6_il;
874                         ice_and_bitmap(params->ptypes, params->ptypes, src,
875                                        ICE_FLOW_PTYPE_MAX);
876                 }
877
878                 if (hdrs & ICE_FLOW_SEG_HDR_ETH_NON_IP) {
879                         src = (const ice_bitmap_t *)ice_ptypes_mac_non_ip_ofos;
880                         ice_and_bitmap(params->ptypes, params->ptypes,
881                                        src, ICE_FLOW_PTYPE_MAX);
882                 } else if (hdrs & ICE_FLOW_SEG_HDR_PPPOE) {
883                         src = (const ice_bitmap_t *)ice_ptypes_pppoe;
884                         ice_and_bitmap(params->ptypes, params->ptypes, src,
885                                        ICE_FLOW_PTYPE_MAX);
886                 } else {
887                         src = (const ice_bitmap_t *)ice_ptypes_pppoe;
888                         ice_andnot_bitmap(params->ptypes, params->ptypes, src,
889                                           ICE_FLOW_PTYPE_MAX);
890                 }
891
892                 if (hdrs & ICE_FLOW_SEG_HDR_UDP) {
893                         src = (const ice_bitmap_t *)ice_ptypes_udp_il;
894                         ice_and_bitmap(params->ptypes, params->ptypes, src,
895                                        ICE_FLOW_PTYPE_MAX);
896                 } else if (hdrs & ICE_FLOW_SEG_HDR_TCP) {
897                         ice_and_bitmap(params->ptypes, params->ptypes,
898                                        (const ice_bitmap_t *)ice_ptypes_tcp_il,
899                                        ICE_FLOW_PTYPE_MAX);
900                 } else if (hdrs & ICE_FLOW_SEG_HDR_SCTP) {
901                         src = (const ice_bitmap_t *)ice_ptypes_sctp_il;
902                         ice_and_bitmap(params->ptypes, params->ptypes, src,
903                                        ICE_FLOW_PTYPE_MAX);
904                 }
905
906                 if (hdrs & ICE_FLOW_SEG_HDR_ICMP) {
907                         src = !i ? (const ice_bitmap_t *)ice_ptypes_icmp_of :
908                                 (const ice_bitmap_t *)ice_ptypes_icmp_il;
909                         ice_and_bitmap(params->ptypes, params->ptypes, src,
910                                        ICE_FLOW_PTYPE_MAX);
911                 } else if (hdrs & ICE_FLOW_SEG_HDR_GRE) {
912                         if (!i) {
913                                 src = (const ice_bitmap_t *)ice_ptypes_gre_of;
914                                 ice_and_bitmap(params->ptypes, params->ptypes,
915                                                src, ICE_FLOW_PTYPE_MAX);
916                         }
917                 } else if (hdrs & ICE_FLOW_SEG_HDR_GTPC) {
918                         src = (const ice_bitmap_t *)ice_ptypes_gtpc;
919                         ice_and_bitmap(params->ptypes, params->ptypes,
920                                        src, ICE_FLOW_PTYPE_MAX);
921                 } else if (hdrs & ICE_FLOW_SEG_HDR_GTPC_TEID) {
922                         src = (const ice_bitmap_t *)ice_ptypes_gtpc_tid;
923                         ice_and_bitmap(params->ptypes, params->ptypes,
924                                        src, ICE_FLOW_PTYPE_MAX);
925                 } else if (hdrs & ICE_FLOW_SEG_HDR_GTPU_NON_IP) {
926                         src = (const ice_bitmap_t *)ice_ptypes_gtpu_no_ip;
927                         ice_and_bitmap(params->ptypes, params->ptypes,
928                                        src, ICE_FLOW_PTYPE_MAX);
929                 } else if (hdrs & ICE_FLOW_SEG_HDR_GTPU_DWN) {
930                         src = (const ice_bitmap_t *)ice_ptypes_gtpu;
931                         ice_and_bitmap(params->ptypes, params->ptypes,
932                                        src, ICE_FLOW_PTYPE_MAX);
933
934                         /* Attributes for GTP packet with downlink */
935                         params->attr = ice_attr_gtpu_down;
936                         params->attr_cnt = ARRAY_SIZE(ice_attr_gtpu_down);
937                 } else if (hdrs & ICE_FLOW_SEG_HDR_GTPU_UP) {
938                         src = (const ice_bitmap_t *)ice_ptypes_gtpu;
939                         ice_and_bitmap(params->ptypes, params->ptypes,
940                                        src, ICE_FLOW_PTYPE_MAX);
941
942                         /* Attributes for GTP packet with uplink */
943                         params->attr = ice_attr_gtpu_up;
944                         params->attr_cnt = ARRAY_SIZE(ice_attr_gtpu_up);
945                 } else if (hdrs & ICE_FLOW_SEG_HDR_GTPU_EH) {
946                         src = (const ice_bitmap_t *)ice_ptypes_gtpu;
947                         ice_and_bitmap(params->ptypes, params->ptypes,
948                                        src, ICE_FLOW_PTYPE_MAX);
949
950                         /* Attributes for GTP packet with Extension Header */
951                         params->attr = ice_attr_gtpu_eh;
952                         params->attr_cnt = ARRAY_SIZE(ice_attr_gtpu_eh);
953                 } else if (hdrs & ICE_FLOW_SEG_HDR_GTPU_IP) {
954                         src = (const ice_bitmap_t *)ice_ptypes_gtpu;
955                         ice_and_bitmap(params->ptypes, params->ptypes,
956                                        src, ICE_FLOW_PTYPE_MAX);
957
958                         /* Attributes for GTP packet without Extension Header */
959                         params->attr = ice_attr_gtpu_session;
960                         params->attr_cnt = ARRAY_SIZE(ice_attr_gtpu_session);
961                 } else if (hdrs & ICE_FLOW_SEG_HDR_L2TPV3) {
962                         src = (const ice_bitmap_t *)ice_ptypes_l2tpv3;
963                         ice_and_bitmap(params->ptypes, params->ptypes,
964                                        src, ICE_FLOW_PTYPE_MAX);
965                 } else if (hdrs & ICE_FLOW_SEG_HDR_ESP) {
966                         src = (const ice_bitmap_t *)ice_ptypes_esp;
967                         ice_and_bitmap(params->ptypes, params->ptypes,
968                                        src, ICE_FLOW_PTYPE_MAX);
969                 } else if (hdrs & ICE_FLOW_SEG_HDR_AH) {
970                         src = (const ice_bitmap_t *)ice_ptypes_ah;
971                         ice_and_bitmap(params->ptypes, params->ptypes,
972                                        src, ICE_FLOW_PTYPE_MAX);
973                 } else if (hdrs & ICE_FLOW_SEG_HDR_NAT_T_ESP) {
974                         src = (const ice_bitmap_t *)ice_ptypes_nat_t_esp;
975                         ice_and_bitmap(params->ptypes, params->ptypes,
976                                        src, ICE_FLOW_PTYPE_MAX);
977                 }
978
979                 if (hdrs & ICE_FLOW_SEG_HDR_PFCP) {
980                         if (hdrs & ICE_FLOW_SEG_HDR_PFCP_NODE)
981                                 src =
982                                 (const ice_bitmap_t *)ice_ptypes_pfcp_node;
983                         else
984                                 src =
985                                 (const ice_bitmap_t *)ice_ptypes_pfcp_session;
986
987                         ice_and_bitmap(params->ptypes, params->ptypes,
988                                        src, ICE_FLOW_PTYPE_MAX);
989                 } else {
990                         src = (const ice_bitmap_t *)ice_ptypes_pfcp_node;
991                         ice_andnot_bitmap(params->ptypes, params->ptypes,
992                                           src, ICE_FLOW_PTYPE_MAX);
993
994                         src = (const ice_bitmap_t *)ice_ptypes_pfcp_session;
995                         ice_andnot_bitmap(params->ptypes, params->ptypes,
996                                           src, ICE_FLOW_PTYPE_MAX);
997                 }
998         }
999
1000         return ICE_SUCCESS;
1001 }
1002
1003 /**
1004  * ice_flow_xtract_pkt_flags - Create an extr sequence entry for packet flags
1005  * @hw: pointer to the HW struct
1006  * @params: information about the flow to be processed
1007  * @flags: The value of pkt_flags[x:x] in Rx/Tx MDID metadata.
1008  *
1009  * This function will allocate an extraction sequence entries for a DWORD size
1010  * chunk of the packet flags.
1011  */
1012 static enum ice_status
1013 ice_flow_xtract_pkt_flags(struct ice_hw *hw,
1014                           struct ice_flow_prof_params *params,
1015                           enum ice_flex_mdid_pkt_flags flags)
1016 {
1017         u8 fv_words = hw->blk[params->blk].es.fvw;
1018         u8 idx;
1019
1020         /* Make sure the number of extraction sequence entries required does not
1021          * exceed the block's capacity.
1022          */
1023         if (params->es_cnt >= fv_words)
1024                 return ICE_ERR_MAX_LIMIT;
1025
1026         /* some blocks require a reversed field vector layout */
1027         if (hw->blk[params->blk].es.reverse)
1028                 idx = fv_words - params->es_cnt - 1;
1029         else
1030                 idx = params->es_cnt;
1031
1032         params->es[idx].prot_id = ICE_PROT_META_ID;
1033         params->es[idx].off = flags;
1034         params->es_cnt++;
1035
1036         return ICE_SUCCESS;
1037 }
1038
1039 /**
1040  * ice_flow_xtract_fld - Create an extraction sequence entry for the given field
1041  * @hw: pointer to the HW struct
1042  * @params: information about the flow to be processed
1043  * @seg: packet segment index of the field to be extracted
1044  * @fld: ID of field to be extracted
1045  * @match: bitfield of all fields
1046  *
1047  * This function determines the protocol ID, offset, and size of the given
1048  * field. It then allocates one or more extraction sequence entries for the
1049  * given field, and fill the entries with protocol ID and offset information.
1050  */
1051 static enum ice_status
1052 ice_flow_xtract_fld(struct ice_hw *hw, struct ice_flow_prof_params *params,
1053                     u8 seg, enum ice_flow_field fld, u64 match)
1054 {
1055         enum ice_flow_field sib = ICE_FLOW_FIELD_IDX_MAX;
1056         enum ice_prot_id prot_id = ICE_PROT_ID_INVAL;
1057         u8 fv_words = hw->blk[params->blk].es.fvw;
1058         struct ice_flow_fld_info *flds;
1059         u16 cnt, ese_bits, i;
1060         u16 sib_mask = 0;
1061         u16 mask;
1062         u16 off;
1063
1064         flds = params->prof->segs[seg].fields;
1065
1066         switch (fld) {
1067         case ICE_FLOW_FIELD_IDX_ETH_DA:
1068         case ICE_FLOW_FIELD_IDX_ETH_SA:
1069         case ICE_FLOW_FIELD_IDX_S_VLAN:
1070         case ICE_FLOW_FIELD_IDX_C_VLAN:
1071                 prot_id = seg == 0 ? ICE_PROT_MAC_OF_OR_S : ICE_PROT_MAC_IL;
1072                 break;
1073         case ICE_FLOW_FIELD_IDX_ETH_TYPE:
1074                 prot_id = seg == 0 ? ICE_PROT_ETYPE_OL : ICE_PROT_ETYPE_IL;
1075                 break;
1076         case ICE_FLOW_FIELD_IDX_IPV4_DSCP:
1077                 prot_id = seg == 0 ? ICE_PROT_IPV4_OF_OR_S : ICE_PROT_IPV4_IL;
1078                 break;
1079         case ICE_FLOW_FIELD_IDX_IPV6_DSCP:
1080                 prot_id = seg == 0 ? ICE_PROT_IPV6_OF_OR_S : ICE_PROT_IPV6_IL;
1081                 break;
1082         case ICE_FLOW_FIELD_IDX_IPV4_TTL:
1083         case ICE_FLOW_FIELD_IDX_IPV4_PROT:
1084                 prot_id = seg == 0 ? ICE_PROT_IPV4_OF_OR_S : ICE_PROT_IPV4_IL;
1085
1086                 /* TTL and PROT share the same extraction seq. entry.
1087                  * Each is considered a sibling to the other in terms of sharing
1088                  * the same extraction sequence entry.
1089                  */
1090                 if (fld == ICE_FLOW_FIELD_IDX_IPV4_TTL)
1091                         sib = ICE_FLOW_FIELD_IDX_IPV4_PROT;
1092                 else if (fld == ICE_FLOW_FIELD_IDX_IPV4_PROT)
1093                         sib = ICE_FLOW_FIELD_IDX_IPV4_TTL;
1094
1095                 /* If the sibling field is also included, that field's
1096                  * mask needs to be included.
1097                  */
1098                 if (match & BIT(sib))
1099                         sib_mask = ice_flds_info[sib].mask;
1100                 break;
1101         case ICE_FLOW_FIELD_IDX_IPV6_TTL:
1102         case ICE_FLOW_FIELD_IDX_IPV6_PROT:
1103                 prot_id = seg == 0 ? ICE_PROT_IPV6_OF_OR_S : ICE_PROT_IPV6_IL;
1104
1105                 /* TTL and PROT share the same extraction seq. entry.
1106                  * Each is considered a sibling to the other in terms of sharing
1107                  * the same extraction sequence entry.
1108                  */
1109                 if (fld == ICE_FLOW_FIELD_IDX_IPV6_TTL)
1110                         sib = ICE_FLOW_FIELD_IDX_IPV6_PROT;
1111                 else if (fld == ICE_FLOW_FIELD_IDX_IPV6_PROT)
1112                         sib = ICE_FLOW_FIELD_IDX_IPV6_TTL;
1113
1114                 /* If the sibling field is also included, that field's
1115                  * mask needs to be included.
1116                  */
1117                 if (match & BIT(sib))
1118                         sib_mask = ice_flds_info[sib].mask;
1119                 break;
1120         case ICE_FLOW_FIELD_IDX_IPV4_SA:
1121         case ICE_FLOW_FIELD_IDX_IPV4_DA:
1122                 prot_id = seg == 0 ? ICE_PROT_IPV4_OF_OR_S : ICE_PROT_IPV4_IL;
1123                 break;
1124         case ICE_FLOW_FIELD_IDX_IPV6_SA:
1125         case ICE_FLOW_FIELD_IDX_IPV6_DA:
1126         case ICE_FLOW_FIELD_IDX_IPV6_PRE32_SA:
1127         case ICE_FLOW_FIELD_IDX_IPV6_PRE32_DA:
1128         case ICE_FLOW_FIELD_IDX_IPV6_PRE48_SA:
1129         case ICE_FLOW_FIELD_IDX_IPV6_PRE48_DA:
1130         case ICE_FLOW_FIELD_IDX_IPV6_PRE64_SA:
1131         case ICE_FLOW_FIELD_IDX_IPV6_PRE64_DA:
1132                 prot_id = seg == 0 ? ICE_PROT_IPV6_OF_OR_S : ICE_PROT_IPV6_IL;
1133                 break;
1134         case ICE_FLOW_FIELD_IDX_TCP_SRC_PORT:
1135         case ICE_FLOW_FIELD_IDX_TCP_DST_PORT:
1136         case ICE_FLOW_FIELD_IDX_TCP_FLAGS:
1137                 prot_id = ICE_PROT_TCP_IL;
1138                 break;
1139         case ICE_FLOW_FIELD_IDX_UDP_SRC_PORT:
1140         case ICE_FLOW_FIELD_IDX_UDP_DST_PORT:
1141                 prot_id = ICE_PROT_UDP_IL_OR_S;
1142                 break;
1143         case ICE_FLOW_FIELD_IDX_SCTP_SRC_PORT:
1144         case ICE_FLOW_FIELD_IDX_SCTP_DST_PORT:
1145                 prot_id = ICE_PROT_SCTP_IL;
1146                 break;
1147         case ICE_FLOW_FIELD_IDX_GTPC_TEID:
1148         case ICE_FLOW_FIELD_IDX_GTPU_IP_TEID:
1149         case ICE_FLOW_FIELD_IDX_GTPU_UP_TEID:
1150         case ICE_FLOW_FIELD_IDX_GTPU_DWN_TEID:
1151         case ICE_FLOW_FIELD_IDX_GTPU_EH_TEID:
1152         case ICE_FLOW_FIELD_IDX_GTPU_EH_QFI:
1153                 /* GTP is accessed through UDP OF protocol */
1154                 prot_id = ICE_PROT_UDP_OF;
1155                 break;
1156         case ICE_FLOW_FIELD_IDX_PPPOE_SESS_ID:
1157                 prot_id = ICE_PROT_PPPOE;
1158                 break;
1159         case ICE_FLOW_FIELD_IDX_PFCP_SEID:
1160                 prot_id = ICE_PROT_UDP_IL_OR_S;
1161                 break;
1162         case ICE_FLOW_FIELD_IDX_L2TPV3_SESS_ID:
1163                 prot_id = ICE_PROT_L2TPV3;
1164                 break;
1165         case ICE_FLOW_FIELD_IDX_ESP_SPI:
1166                 prot_id = ICE_PROT_ESP_F;
1167                 break;
1168         case ICE_FLOW_FIELD_IDX_AH_SPI:
1169                 prot_id = ICE_PROT_ESP_2;
1170                 break;
1171         case ICE_FLOW_FIELD_IDX_NAT_T_ESP_SPI:
1172                 prot_id = ICE_PROT_UDP_IL_OR_S;
1173                 break;
1174         case ICE_FLOW_FIELD_IDX_ARP_SIP:
1175         case ICE_FLOW_FIELD_IDX_ARP_DIP:
1176         case ICE_FLOW_FIELD_IDX_ARP_SHA:
1177         case ICE_FLOW_FIELD_IDX_ARP_DHA:
1178         case ICE_FLOW_FIELD_IDX_ARP_OP:
1179                 prot_id = ICE_PROT_ARP_OF;
1180                 break;
1181         case ICE_FLOW_FIELD_IDX_ICMP_TYPE:
1182         case ICE_FLOW_FIELD_IDX_ICMP_CODE:
1183                 /* ICMP type and code share the same extraction seq. entry */
1184                 prot_id = (params->prof->segs[seg].hdrs &
1185                            ICE_FLOW_SEG_HDR_IPV4) ?
1186                         ICE_PROT_ICMP_IL : ICE_PROT_ICMPV6_IL;
1187                 sib = fld == ICE_FLOW_FIELD_IDX_ICMP_TYPE ?
1188                         ICE_FLOW_FIELD_IDX_ICMP_CODE :
1189                         ICE_FLOW_FIELD_IDX_ICMP_TYPE;
1190                 break;
1191         case ICE_FLOW_FIELD_IDX_GRE_KEYID:
1192                 prot_id = ICE_PROT_GRE_OF;
1193                 break;
1194         default:
1195                 return ICE_ERR_NOT_IMPL;
1196         }
1197
1198         /* Each extraction sequence entry is a word in size, and extracts a
1199          * word-aligned offset from a protocol header.
1200          */
1201         ese_bits = ICE_FLOW_FV_EXTRACT_SZ * BITS_PER_BYTE;
1202
1203         flds[fld].xtrct.prot_id = prot_id;
1204         flds[fld].xtrct.off = (ice_flds_info[fld].off / ese_bits) *
1205                 ICE_FLOW_FV_EXTRACT_SZ;
1206         flds[fld].xtrct.disp = (u8)(ice_flds_info[fld].off % ese_bits);
1207         flds[fld].xtrct.idx = params->es_cnt;
1208         flds[fld].xtrct.mask = ice_flds_info[fld].mask;
1209
1210         /* Adjust the next field-entry index after accommodating the number of
1211          * entries this field consumes
1212          */
1213         cnt = DIVIDE_AND_ROUND_UP(flds[fld].xtrct.disp +
1214                                   ice_flds_info[fld].size, ese_bits);
1215
1216         /* Fill in the extraction sequence entries needed for this field */
1217         off = flds[fld].xtrct.off;
1218         mask = flds[fld].xtrct.mask;
1219         for (i = 0; i < cnt; i++) {
1220                 /* Only consume an extraction sequence entry if there is no
1221                  * sibling field associated with this field or the sibling entry
1222                  * already extracts the word shared with this field.
1223                  */
1224                 if (sib == ICE_FLOW_FIELD_IDX_MAX ||
1225                     flds[sib].xtrct.prot_id == ICE_PROT_ID_INVAL ||
1226                     flds[sib].xtrct.off != off) {
1227                         u8 idx;
1228
1229                         /* Make sure the number of extraction sequence required
1230                          * does not exceed the block's capability
1231                          */
1232                         if (params->es_cnt >= fv_words)
1233                                 return ICE_ERR_MAX_LIMIT;
1234
1235                         /* some blocks require a reversed field vector layout */
1236                         if (hw->blk[params->blk].es.reverse)
1237                                 idx = fv_words - params->es_cnt - 1;
1238                         else
1239                                 idx = params->es_cnt;
1240
1241                         params->es[idx].prot_id = prot_id;
1242                         params->es[idx].off = off;
1243                         params->mask[idx] = mask | sib_mask;
1244                         params->es_cnt++;
1245                 }
1246
1247                 off += ICE_FLOW_FV_EXTRACT_SZ;
1248         }
1249
1250         return ICE_SUCCESS;
1251 }
1252
1253 /**
1254  * ice_flow_xtract_raws - Create extract sequence entries for raw bytes
1255  * @hw: pointer to the HW struct
1256  * @params: information about the flow to be processed
1257  * @seg: index of packet segment whose raw fields are to be extracted
1258  */
1259 static enum ice_status
1260 ice_flow_xtract_raws(struct ice_hw *hw, struct ice_flow_prof_params *params,
1261                      u8 seg)
1262 {
1263         u16 fv_words;
1264         u16 hdrs_sz;
1265         u8 i;
1266
1267         if (!params->prof->segs[seg].raws_cnt)
1268                 return ICE_SUCCESS;
1269
1270         if (params->prof->segs[seg].raws_cnt >
1271             ARRAY_SIZE(params->prof->segs[seg].raws))
1272                 return ICE_ERR_MAX_LIMIT;
1273
1274         /* Offsets within the segment headers are not supported */
1275         hdrs_sz = ice_flow_calc_seg_sz(params, seg);
1276         if (!hdrs_sz)
1277                 return ICE_ERR_PARAM;
1278
1279         fv_words = hw->blk[params->blk].es.fvw;
1280
1281         for (i = 0; i < params->prof->segs[seg].raws_cnt; i++) {
1282                 struct ice_flow_seg_fld_raw *raw;
1283                 u16 off, cnt, j;
1284
1285                 raw = &params->prof->segs[seg].raws[i];
1286
1287                 /* Storing extraction information */
1288                 raw->info.xtrct.prot_id = ICE_PROT_MAC_OF_OR_S;
1289                 raw->info.xtrct.off = (raw->off / ICE_FLOW_FV_EXTRACT_SZ) *
1290                         ICE_FLOW_FV_EXTRACT_SZ;
1291                 raw->info.xtrct.disp = (raw->off % ICE_FLOW_FV_EXTRACT_SZ) *
1292                         BITS_PER_BYTE;
1293                 raw->info.xtrct.idx = params->es_cnt;
1294
1295                 /* Determine the number of field vector entries this raw field
1296                  * consumes.
1297                  */
1298                 cnt = DIVIDE_AND_ROUND_UP(raw->info.xtrct.disp +
1299                                           (raw->info.src.last * BITS_PER_BYTE),
1300                                           (ICE_FLOW_FV_EXTRACT_SZ *
1301                                            BITS_PER_BYTE));
1302                 off = raw->info.xtrct.off;
1303                 for (j = 0; j < cnt; j++) {
1304                         u16 idx;
1305
1306                         /* Make sure the number of extraction sequence required
1307                          * does not exceed the block's capability
1308                          */
1309                         if (params->es_cnt >= hw->blk[params->blk].es.count ||
1310                             params->es_cnt >= ICE_MAX_FV_WORDS)
1311                                 return ICE_ERR_MAX_LIMIT;
1312
1313                         /* some blocks require a reversed field vector layout */
1314                         if (hw->blk[params->blk].es.reverse)
1315                                 idx = fv_words - params->es_cnt - 1;
1316                         else
1317                                 idx = params->es_cnt;
1318
1319                         params->es[idx].prot_id = raw->info.xtrct.prot_id;
1320                         params->es[idx].off = off;
1321                         params->es_cnt++;
1322                         off += ICE_FLOW_FV_EXTRACT_SZ;
1323                 }
1324         }
1325
1326         return ICE_SUCCESS;
1327 }
1328
1329 /**
1330  * ice_flow_create_xtrct_seq - Create an extraction sequence for given segments
1331  * @hw: pointer to the HW struct
1332  * @params: information about the flow to be processed
1333  *
1334  * This function iterates through all matched fields in the given segments, and
1335  * creates an extraction sequence for the fields.
1336  */
1337 static enum ice_status
1338 ice_flow_create_xtrct_seq(struct ice_hw *hw,
1339                           struct ice_flow_prof_params *params)
1340 {
1341         enum ice_status status = ICE_SUCCESS;
1342         u8 i;
1343
1344         /* For ACL, we also need to extract the direction bit (Rx,Tx) data from
1345          * packet flags
1346          */
1347         if (params->blk == ICE_BLK_ACL) {
1348                 status = ice_flow_xtract_pkt_flags(hw, params,
1349                                                    ICE_RX_MDID_PKT_FLAGS_15_0);
1350                 if (status)
1351                         return status;
1352         }
1353
1354         for (i = 0; i < params->prof->segs_cnt; i++) {
1355                 u64 match = params->prof->segs[i].match;
1356                 enum ice_flow_field j;
1357
1358                 ice_for_each_set_bit(j, (ice_bitmap_t *)&match,
1359                                      ICE_FLOW_FIELD_IDX_MAX) {
1360                         status = ice_flow_xtract_fld(hw, params, i, j, match);
1361                         if (status)
1362                                 return status;
1363                         ice_clear_bit(j, (ice_bitmap_t *)&match);
1364                 }
1365
1366                 /* Process raw matching bytes */
1367                 status = ice_flow_xtract_raws(hw, params, i);
1368                 if (status)
1369                         return status;
1370         }
1371
1372         return status;
1373 }
1374
1375 /**
1376  * ice_flow_sel_acl_scen - returns the specific scenario
1377  * @hw: pointer to the hardware structure
1378  * @params: information about the flow to be processed
1379  *
1380  * This function will return the specific scenario based on the
1381  * params passed to it
1382  */
1383 static enum ice_status
1384 ice_flow_sel_acl_scen(struct ice_hw *hw, struct ice_flow_prof_params *params)
1385 {
1386         /* Find the best-fit scenario for the provided match width */
1387         struct ice_acl_scen *cand_scen = NULL, *scen;
1388
1389         if (!hw->acl_tbl)
1390                 return ICE_ERR_DOES_NOT_EXIST;
1391
1392         /* Loop through each scenario and match against the scenario width
1393          * to select the specific scenario
1394          */
1395         LIST_FOR_EACH_ENTRY(scen, &hw->acl_tbl->scens, ice_acl_scen, list_entry)
1396                 if (scen->eff_width >= params->entry_length &&
1397                     (!cand_scen || cand_scen->eff_width > scen->eff_width))
1398                         cand_scen = scen;
1399         if (!cand_scen)
1400                 return ICE_ERR_DOES_NOT_EXIST;
1401
1402         params->prof->cfg.scen = cand_scen;
1403
1404         return ICE_SUCCESS;
1405 }
1406
1407 /**
1408  * ice_flow_acl_def_entry_frmt - Determine the layout of flow entries
1409  * @params: information about the flow to be processed
1410  */
1411 static enum ice_status
1412 ice_flow_acl_def_entry_frmt(struct ice_flow_prof_params *params)
1413 {
1414         u16 index, i, range_idx = 0;
1415
1416         index = ICE_AQC_ACL_PROF_BYTE_SEL_START_IDX;
1417
1418         for (i = 0; i < params->prof->segs_cnt; i++) {
1419                 struct ice_flow_seg_info *seg = &params->prof->segs[i];
1420                 u8 j;
1421
1422                 ice_for_each_set_bit(j, (ice_bitmap_t *)&seg->match,
1423                                      ICE_FLOW_FIELD_IDX_MAX) {
1424                         struct ice_flow_fld_info *fld = &seg->fields[j];
1425
1426                         fld->entry.mask = ICE_FLOW_FLD_OFF_INVAL;
1427
1428                         if (fld->type == ICE_FLOW_FLD_TYPE_RANGE) {
1429                                 fld->entry.last = ICE_FLOW_FLD_OFF_INVAL;
1430
1431                                 /* Range checking only supported for single
1432                                  * words
1433                                  */
1434                                 if (DIVIDE_AND_ROUND_UP(ice_flds_info[j].size +
1435                                                         fld->xtrct.disp,
1436                                                         BITS_PER_BYTE * 2) > 1)
1437                                         return ICE_ERR_PARAM;
1438
1439                                 /* Ranges must define low and high values */
1440                                 if (fld->src.val == ICE_FLOW_FLD_OFF_INVAL ||
1441                                     fld->src.last == ICE_FLOW_FLD_OFF_INVAL)
1442                                         return ICE_ERR_PARAM;
1443
1444                                 fld->entry.val = range_idx++;
1445                         } else {
1446                                 /* Store adjusted byte-length of field for later
1447                                  * use, taking into account potential
1448                                  * non-byte-aligned displacement
1449                                  */
1450                                 fld->entry.last = DIVIDE_AND_ROUND_UP
1451                                         (ice_flds_info[j].size +
1452                                          (fld->xtrct.disp % BITS_PER_BYTE),
1453                                          BITS_PER_BYTE);
1454                                 fld->entry.val = index;
1455                                 index += fld->entry.last;
1456                         }
1457                 }
1458
1459                 for (j = 0; j < seg->raws_cnt; j++) {
1460                         struct ice_flow_seg_fld_raw *raw = &seg->raws[j];
1461
1462                         raw->info.entry.mask = ICE_FLOW_FLD_OFF_INVAL;
1463                         raw->info.entry.val = index;
1464                         raw->info.entry.last = raw->info.src.last;
1465                         index += raw->info.entry.last;
1466                 }
1467         }
1468
1469         /* Currently only support using the byte selection base, which only
1470          * allows for an effective entry size of 30 bytes. Reject anything
1471          * larger.
1472          */
1473         if (index > ICE_AQC_ACL_PROF_BYTE_SEL_ELEMS)
1474                 return ICE_ERR_PARAM;
1475
1476         /* Only 8 range checkers per profile, reject anything trying to use
1477          * more
1478          */
1479         if (range_idx > ICE_AQC_ACL_PROF_RANGES_NUM_CFG)
1480                 return ICE_ERR_PARAM;
1481
1482         /* Store # bytes required for entry for later use */
1483         params->entry_length = index - ICE_AQC_ACL_PROF_BYTE_SEL_START_IDX;
1484
1485         return ICE_SUCCESS;
1486 }
1487
1488 /**
1489  * ice_flow_proc_segs - process all packet segments associated with a profile
1490  * @hw: pointer to the HW struct
1491  * @params: information about the flow to be processed
1492  */
1493 static enum ice_status
1494 ice_flow_proc_segs(struct ice_hw *hw, struct ice_flow_prof_params *params)
1495 {
1496         enum ice_status status;
1497
1498         status = ice_flow_proc_seg_hdrs(params);
1499         if (status)
1500                 return status;
1501
1502         status = ice_flow_create_xtrct_seq(hw, params);
1503         if (status)
1504                 return status;
1505
1506         switch (params->blk) {
1507         case ICE_BLK_FD:
1508         case ICE_BLK_RSS:
1509                 status = ICE_SUCCESS;
1510                 break;
1511         case ICE_BLK_ACL:
1512                 status = ice_flow_acl_def_entry_frmt(params);
1513                 if (status)
1514                         return status;
1515                 status = ice_flow_sel_acl_scen(hw, params);
1516                 if (status)
1517                         return status;
1518                 break;
1519         default:
1520                 return ICE_ERR_NOT_IMPL;
1521         }
1522
1523         return status;
1524 }
1525
1526 #define ICE_FLOW_FIND_PROF_CHK_FLDS     0x00000001
1527 #define ICE_FLOW_FIND_PROF_CHK_VSI      0x00000002
1528 #define ICE_FLOW_FIND_PROF_NOT_CHK_DIR  0x00000004
1529
1530 /**
1531  * ice_flow_find_prof_conds - Find a profile matching headers and conditions
1532  * @hw: pointer to the HW struct
1533  * @blk: classification stage
1534  * @dir: flow direction
1535  * @segs: array of one or more packet segments that describe the flow
1536  * @segs_cnt: number of packet segments provided
1537  * @vsi_handle: software VSI handle to check VSI (ICE_FLOW_FIND_PROF_CHK_VSI)
1538  * @conds: additional conditions to be checked (ICE_FLOW_FIND_PROF_CHK_*)
1539  */
1540 static struct ice_flow_prof *
1541 ice_flow_find_prof_conds(struct ice_hw *hw, enum ice_block blk,
1542                          enum ice_flow_dir dir, struct ice_flow_seg_info *segs,
1543                          u8 segs_cnt, u16 vsi_handle, u32 conds)
1544 {
1545         struct ice_flow_prof *p, *prof = NULL;
1546
1547         ice_acquire_lock(&hw->fl_profs_locks[blk]);
1548         LIST_FOR_EACH_ENTRY(p, &hw->fl_profs[blk], ice_flow_prof, l_entry)
1549                 if ((p->dir == dir || conds & ICE_FLOW_FIND_PROF_NOT_CHK_DIR) &&
1550                     segs_cnt && segs_cnt == p->segs_cnt) {
1551                         u8 i;
1552
1553                         /* Check for profile-VSI association if specified */
1554                         if ((conds & ICE_FLOW_FIND_PROF_CHK_VSI) &&
1555                             ice_is_vsi_valid(hw, vsi_handle) &&
1556                             !ice_is_bit_set(p->vsis, vsi_handle))
1557                                 continue;
1558
1559                         /* Protocol headers must be checked. Matched fields are
1560                          * checked if specified.
1561                          */
1562                         for (i = 0; i < segs_cnt; i++)
1563                                 if (segs[i].hdrs != p->segs[i].hdrs ||
1564                                     ((conds & ICE_FLOW_FIND_PROF_CHK_FLDS) &&
1565                                      segs[i].match != p->segs[i].match))
1566                                         break;
1567
1568                         /* A match is found if all segments are matched */
1569                         if (i == segs_cnt) {
1570                                 prof = p;
1571                                 break;
1572                         }
1573                 }
1574         ice_release_lock(&hw->fl_profs_locks[blk]);
1575
1576         return prof;
1577 }
1578
1579 /**
1580  * ice_flow_find_prof - Look up a profile matching headers and matched fields
1581  * @hw: pointer to the HW struct
1582  * @blk: classification stage
1583  * @dir: flow direction
1584  * @segs: array of one or more packet segments that describe the flow
1585  * @segs_cnt: number of packet segments provided
1586  */
1587 u64
1588 ice_flow_find_prof(struct ice_hw *hw, enum ice_block blk, enum ice_flow_dir dir,
1589                    struct ice_flow_seg_info *segs, u8 segs_cnt)
1590 {
1591         struct ice_flow_prof *p;
1592
1593         p = ice_flow_find_prof_conds(hw, blk, dir, segs, segs_cnt,
1594                                      ICE_MAX_VSI, ICE_FLOW_FIND_PROF_CHK_FLDS);
1595
1596         return p ? p->id : ICE_FLOW_PROF_ID_INVAL;
1597 }
1598
1599 /**
1600  * ice_flow_find_prof_id - Look up a profile with given profile ID
1601  * @hw: pointer to the HW struct
1602  * @blk: classification stage
1603  * @prof_id: unique ID to identify this flow profile
1604  */
1605 static struct ice_flow_prof *
1606 ice_flow_find_prof_id(struct ice_hw *hw, enum ice_block blk, u64 prof_id)
1607 {
1608         struct ice_flow_prof *p;
1609
1610         LIST_FOR_EACH_ENTRY(p, &hw->fl_profs[blk], ice_flow_prof, l_entry)
1611                 if (p->id == prof_id)
1612                         return p;
1613
1614         return NULL;
1615 }
1616
1617 /**
1618  * ice_dealloc_flow_entry - Deallocate flow entry memory
1619  * @hw: pointer to the HW struct
1620  * @entry: flow entry to be removed
1621  */
1622 static void
1623 ice_dealloc_flow_entry(struct ice_hw *hw, struct ice_flow_entry *entry)
1624 {
1625         if (!entry)
1626                 return;
1627
1628         if (entry->entry)
1629                 ice_free(hw, entry->entry);
1630
1631         if (entry->range_buf) {
1632                 ice_free(hw, entry->range_buf);
1633                 entry->range_buf = NULL;
1634         }
1635
1636         if (entry->acts) {
1637                 ice_free(hw, entry->acts);
1638                 entry->acts = NULL;
1639                 entry->acts_cnt = 0;
1640         }
1641
1642         ice_free(hw, entry);
1643 }
1644
1645 /**
1646  * ice_flow_get_hw_prof - return the HW profile for a specific profile ID handle
1647  * @hw: pointer to the HW struct
1648  * @blk: classification stage
1649  * @prof_id: the profile ID handle
1650  * @hw_prof_id: pointer to variable to receive the HW profile ID
1651  */
1652 enum ice_status
1653 ice_flow_get_hw_prof(struct ice_hw *hw, enum ice_block blk, u64 prof_id,
1654                      u8 *hw_prof_id)
1655 {
1656         enum ice_status status = ICE_ERR_DOES_NOT_EXIST;
1657         struct ice_prof_map *map;
1658
1659         ice_acquire_lock(&hw->blk[blk].es.prof_map_lock);
1660         map = ice_search_prof_id(hw, blk, prof_id);
1661         if (map) {
1662                 *hw_prof_id = map->prof_id;
1663                 status = ICE_SUCCESS;
1664         }
1665         ice_release_lock(&hw->blk[blk].es.prof_map_lock);
1666         return status;
1667 }
1668
1669 #define ICE_ACL_INVALID_SCEN    0x3f
1670
1671 /**
1672  * ice_flow_acl_is_prof_in_use - Verify if the profile is associated to any PF
1673  * @hw: pointer to the hardware structure
1674  * @prof: pointer to flow profile
1675  * @buf: destination buffer function writes partial extraction sequence to
1676  *
1677  * returns ICE_SUCCESS if no PF is associated to the given profile
1678  * returns ICE_ERR_IN_USE if at least one PF is associated to the given profile
1679  * returns other error code for real error
1680  */
1681 static enum ice_status
1682 ice_flow_acl_is_prof_in_use(struct ice_hw *hw, struct ice_flow_prof *prof,
1683                             struct ice_aqc_acl_prof_generic_frmt *buf)
1684 {
1685         enum ice_status status;
1686         u8 prof_id = 0;
1687
1688         status = ice_flow_get_hw_prof(hw, ICE_BLK_ACL, prof->id, &prof_id);
1689         if (status)
1690                 return status;
1691
1692         status = ice_query_acl_prof(hw, prof_id, buf, NULL);
1693         if (status)
1694                 return status;
1695
1696         /* If all PF's associated scenarios are all 0 or all
1697          * ICE_ACL_INVALID_SCEN (63) for the given profile then the latter has
1698          * not been configured yet.
1699          */
1700         if (buf->pf_scenario_num[0] == 0 && buf->pf_scenario_num[1] == 0 &&
1701             buf->pf_scenario_num[2] == 0 && buf->pf_scenario_num[3] == 0 &&
1702             buf->pf_scenario_num[4] == 0 && buf->pf_scenario_num[5] == 0 &&
1703             buf->pf_scenario_num[6] == 0 && buf->pf_scenario_num[7] == 0)
1704                 return ICE_SUCCESS;
1705
1706         if (buf->pf_scenario_num[0] == ICE_ACL_INVALID_SCEN &&
1707             buf->pf_scenario_num[1] == ICE_ACL_INVALID_SCEN &&
1708             buf->pf_scenario_num[2] == ICE_ACL_INVALID_SCEN &&
1709             buf->pf_scenario_num[3] == ICE_ACL_INVALID_SCEN &&
1710             buf->pf_scenario_num[4] == ICE_ACL_INVALID_SCEN &&
1711             buf->pf_scenario_num[5] == ICE_ACL_INVALID_SCEN &&
1712             buf->pf_scenario_num[6] == ICE_ACL_INVALID_SCEN &&
1713             buf->pf_scenario_num[7] == ICE_ACL_INVALID_SCEN)
1714                 return ICE_SUCCESS;
1715
1716         return ICE_ERR_IN_USE;
1717 }
1718
1719 /**
1720  * ice_flow_acl_free_act_cntr - Free the ACL rule's actions
1721  * @hw: pointer to the hardware structure
1722  * @acts: array of actions to be performed on a match
1723  * @acts_cnt: number of actions
1724  */
1725 static enum ice_status
1726 ice_flow_acl_free_act_cntr(struct ice_hw *hw, struct ice_flow_action *acts,
1727                            u8 acts_cnt)
1728 {
1729         int i;
1730
1731         for (i = 0; i < acts_cnt; i++) {
1732                 if (acts[i].type == ICE_FLOW_ACT_CNTR_PKT ||
1733                     acts[i].type == ICE_FLOW_ACT_CNTR_BYTES ||
1734                     acts[i].type == ICE_FLOW_ACT_CNTR_PKT_BYTES) {
1735                         struct ice_acl_cntrs cntrs;
1736                         enum ice_status status;
1737
1738                         cntrs.bank = 0; /* Only bank0 for the moment */
1739                         cntrs.first_cntr =
1740                                         LE16_TO_CPU(acts[i].data.acl_act.value);
1741                         cntrs.last_cntr =
1742                                         LE16_TO_CPU(acts[i].data.acl_act.value);
1743
1744                         if (acts[i].type == ICE_FLOW_ACT_CNTR_PKT_BYTES)
1745                                 cntrs.type = ICE_AQC_ACL_CNT_TYPE_DUAL;
1746                         else
1747                                 cntrs.type = ICE_AQC_ACL_CNT_TYPE_SINGLE;
1748
1749                         status = ice_aq_dealloc_acl_cntrs(hw, &cntrs, NULL);
1750                         if (status)
1751                                 return status;
1752                 }
1753         }
1754         return ICE_SUCCESS;
1755 }
1756
1757 /**
1758  * ice_flow_acl_disassoc_scen - Disassociate the scenario from the profile
1759  * @hw: pointer to the hardware structure
1760  * @prof: pointer to flow profile
1761  *
1762  * Disassociate the scenario from the profile for the PF of the VSI.
1763  */
1764 static enum ice_status
1765 ice_flow_acl_disassoc_scen(struct ice_hw *hw, struct ice_flow_prof *prof)
1766 {
1767         struct ice_aqc_acl_prof_generic_frmt buf;
1768         enum ice_status status = ICE_SUCCESS;
1769         u8 prof_id = 0;
1770
1771         ice_memset(&buf, 0, sizeof(buf), ICE_NONDMA_MEM);
1772
1773         status = ice_flow_get_hw_prof(hw, ICE_BLK_ACL, prof->id, &prof_id);
1774         if (status)
1775                 return status;
1776
1777         status = ice_query_acl_prof(hw, prof_id, &buf, NULL);
1778         if (status)
1779                 return status;
1780
1781         /* Clear scenario for this PF */
1782         buf.pf_scenario_num[hw->pf_id] = ICE_ACL_INVALID_SCEN;
1783         status = ice_prgm_acl_prof_xtrct(hw, prof_id, &buf, NULL);
1784
1785         return status;
1786 }
1787
1788 /**
1789  * ice_flow_rem_entry_sync - Remove a flow entry
1790  * @hw: pointer to the HW struct
1791  * @blk: classification stage
1792  * @entry: flow entry to be removed
1793  */
1794 static enum ice_status
1795 ice_flow_rem_entry_sync(struct ice_hw *hw, enum ice_block blk,
1796                         struct ice_flow_entry *entry)
1797 {
1798         if (!entry)
1799                 return ICE_ERR_BAD_PTR;
1800
1801         if (blk == ICE_BLK_ACL) {
1802                 enum ice_status status;
1803
1804                 if (!entry->prof)
1805                         return ICE_ERR_BAD_PTR;
1806
1807                 status = ice_acl_rem_entry(hw, entry->prof->cfg.scen,
1808                                            entry->scen_entry_idx);
1809                 if (status)
1810                         return status;
1811
1812                 /* Checks if we need to release an ACL counter. */
1813                 if (entry->acts_cnt && entry->acts)
1814                         ice_flow_acl_free_act_cntr(hw, entry->acts,
1815                                                    entry->acts_cnt);
1816         }
1817
1818         LIST_DEL(&entry->l_entry);
1819
1820         ice_dealloc_flow_entry(hw, entry);
1821
1822         return ICE_SUCCESS;
1823 }
1824
1825 /**
1826  * ice_flow_add_prof_sync - Add a flow profile for packet segments and fields
1827  * @hw: pointer to the HW struct
1828  * @blk: classification stage
1829  * @dir: flow direction
1830  * @prof_id: unique ID to identify this flow profile
1831  * @segs: array of one or more packet segments that describe the flow
1832  * @segs_cnt: number of packet segments provided
1833  * @acts: array of default actions
1834  * @acts_cnt: number of default actions
1835  * @prof: stores the returned flow profile added
1836  *
1837  * Assumption: the caller has acquired the lock to the profile list
1838  */
1839 static enum ice_status
1840 ice_flow_add_prof_sync(struct ice_hw *hw, enum ice_block blk,
1841                        enum ice_flow_dir dir, u64 prof_id,
1842                        struct ice_flow_seg_info *segs, u8 segs_cnt,
1843                        struct ice_flow_action *acts, u8 acts_cnt,
1844                        struct ice_flow_prof **prof)
1845 {
1846         struct ice_flow_prof_params *params;
1847         enum ice_status status;
1848         u8 i;
1849
1850         if (!prof || (acts_cnt && !acts))
1851                 return ICE_ERR_BAD_PTR;
1852
1853         params = (struct ice_flow_prof_params *)ice_malloc(hw, sizeof(*params));
1854         if (!params)
1855                 return ICE_ERR_NO_MEMORY;
1856
1857         params->prof = (struct ice_flow_prof *)
1858                 ice_malloc(hw, sizeof(*params->prof));
1859         if (!params->prof) {
1860                 status = ICE_ERR_NO_MEMORY;
1861                 goto free_params;
1862         }
1863
1864         /* initialize extraction sequence to all invalid (0xff) */
1865         for (i = 0; i < ICE_MAX_FV_WORDS; i++) {
1866                 params->es[i].prot_id = ICE_PROT_INVALID;
1867                 params->es[i].off = ICE_FV_OFFSET_INVAL;
1868         }
1869
1870         params->blk = blk;
1871         params->prof->id = prof_id;
1872         params->prof->dir = dir;
1873         params->prof->segs_cnt = segs_cnt;
1874
1875         /* Make a copy of the segments that need to be persistent in the flow
1876          * profile instance
1877          */
1878         for (i = 0; i < segs_cnt; i++)
1879                 ice_memcpy(&params->prof->segs[i], &segs[i], sizeof(*segs),
1880                            ICE_NONDMA_TO_NONDMA);
1881
1882         /* Make a copy of the actions that need to be persistent in the flow
1883          * profile instance.
1884          */
1885         if (acts_cnt) {
1886                 params->prof->acts = (struct ice_flow_action *)
1887                         ice_memdup(hw, acts, acts_cnt * sizeof(*acts),
1888                                    ICE_NONDMA_TO_NONDMA);
1889
1890                 if (!params->prof->acts) {
1891                         status = ICE_ERR_NO_MEMORY;
1892                         goto out;
1893                 }
1894         }
1895
1896         status = ice_flow_proc_segs(hw, params);
1897         if (status) {
1898                 ice_debug(hw, ICE_DBG_FLOW, "Error processing a flow's packet segments\n");
1899                 goto out;
1900         }
1901
1902         /* Add a HW profile for this flow profile */
1903         status = ice_add_prof(hw, blk, prof_id, (u8 *)params->ptypes,
1904                               params->attr, params->attr_cnt, params->es,
1905                               params->mask);
1906         if (status) {
1907                 ice_debug(hw, ICE_DBG_FLOW, "Error adding a HW flow profile\n");
1908                 goto out;
1909         }
1910
1911         INIT_LIST_HEAD(&params->prof->entries);
1912         ice_init_lock(&params->prof->entries_lock);
1913         *prof = params->prof;
1914
1915 out:
1916         if (status) {
1917                 if (params->prof->acts)
1918                         ice_free(hw, params->prof->acts);
1919                 ice_free(hw, params->prof);
1920         }
1921 free_params:
1922         ice_free(hw, params);
1923
1924         return status;
1925 }
1926
1927 /**
1928  * ice_flow_rem_prof_sync - remove a flow profile
1929  * @hw: pointer to the hardware structure
1930  * @blk: classification stage
1931  * @prof: pointer to flow profile to remove
1932  *
1933  * Assumption: the caller has acquired the lock to the profile list
1934  */
1935 static enum ice_status
1936 ice_flow_rem_prof_sync(struct ice_hw *hw, enum ice_block blk,
1937                        struct ice_flow_prof *prof)
1938 {
1939         enum ice_status status;
1940
1941         /* Remove all remaining flow entries before removing the flow profile */
1942         if (!LIST_EMPTY(&prof->entries)) {
1943                 struct ice_flow_entry *e, *t;
1944
1945                 ice_acquire_lock(&prof->entries_lock);
1946
1947                 LIST_FOR_EACH_ENTRY_SAFE(e, t, &prof->entries, ice_flow_entry,
1948                                          l_entry) {
1949                         status = ice_flow_rem_entry_sync(hw, blk, e);
1950                         if (status)
1951                                 break;
1952                 }
1953
1954                 ice_release_lock(&prof->entries_lock);
1955         }
1956
1957         if (blk == ICE_BLK_ACL) {
1958                 struct ice_aqc_acl_profile_ranges query_rng_buf;
1959                 struct ice_aqc_acl_prof_generic_frmt buf;
1960                 u8 prof_id = 0;
1961
1962                 /* Disassociate the scenario from the profile for the PF */
1963                 status = ice_flow_acl_disassoc_scen(hw, prof);
1964                 if (status)
1965                         return status;
1966
1967                 /* Clear the range-checker if the profile ID is no longer
1968                  * used by any PF
1969                  */
1970                 status = ice_flow_acl_is_prof_in_use(hw, prof, &buf);
1971                 if (status && status != ICE_ERR_IN_USE) {
1972                         return status;
1973                 } else if (!status) {
1974                         /* Clear the range-checker value for profile ID */
1975                         ice_memset(&query_rng_buf, 0,
1976                                    sizeof(struct ice_aqc_acl_profile_ranges),
1977                                    ICE_NONDMA_MEM);
1978
1979                         status = ice_flow_get_hw_prof(hw, blk, prof->id,
1980                                                       &prof_id);
1981                         if (status)
1982                                 return status;
1983
1984                         status = ice_prog_acl_prof_ranges(hw, prof_id,
1985                                                           &query_rng_buf, NULL);
1986                         if (status)
1987                                 return status;
1988                 }
1989         }
1990
1991         /* Remove all hardware profiles associated with this flow profile */
1992         status = ice_rem_prof(hw, blk, prof->id);
1993         if (!status) {
1994                 LIST_DEL(&prof->l_entry);
1995                 ice_destroy_lock(&prof->entries_lock);
1996                 if (prof->acts)
1997                         ice_free(hw, prof->acts);
1998                 ice_free(hw, prof);
1999         }
2000
2001         return status;
2002 }
2003
2004 /**
2005  * ice_flow_acl_set_xtrct_seq_fld - Populate xtrct seq for single field
2006  * @buf: Destination buffer function writes partial xtrct sequence to
2007  * @info: Info about field
2008  */
2009 static void
2010 ice_flow_acl_set_xtrct_seq_fld(struct ice_aqc_acl_prof_generic_frmt *buf,
2011                                struct ice_flow_fld_info *info)
2012 {
2013         u16 dst, i;
2014         u8 src;
2015
2016         src = info->xtrct.idx * ICE_FLOW_FV_EXTRACT_SZ +
2017                 info->xtrct.disp / BITS_PER_BYTE;
2018         dst = info->entry.val;
2019         for (i = 0; i < info->entry.last; i++)
2020                 /* HW stores field vector words in LE, convert words back to BE
2021                  * so constructed entries will end up in network order
2022                  */
2023                 buf->byte_selection[dst++] = src++ ^ 1;
2024 }
2025
2026 /**
2027  * ice_flow_acl_set_xtrct_seq - Program ACL extraction sequence
2028  * @hw: pointer to the hardware structure
2029  * @prof: pointer to flow profile
2030  */
2031 static enum ice_status
2032 ice_flow_acl_set_xtrct_seq(struct ice_hw *hw, struct ice_flow_prof *prof)
2033 {
2034         struct ice_aqc_acl_prof_generic_frmt buf;
2035         struct ice_flow_fld_info *info;
2036         enum ice_status status;
2037         u8 prof_id = 0;
2038         u16 i;
2039
2040         ice_memset(&buf, 0, sizeof(buf), ICE_NONDMA_MEM);
2041
2042         status = ice_flow_get_hw_prof(hw, ICE_BLK_ACL, prof->id, &prof_id);
2043         if (status)
2044                 return status;
2045
2046         status = ice_flow_acl_is_prof_in_use(hw, prof, &buf);
2047         if (status && status != ICE_ERR_IN_USE)
2048                 return status;
2049
2050         if (!status) {
2051                 /* Program the profile dependent configuration. This is done
2052                  * only once regardless of the number of PFs using that profile
2053                  */
2054                 ice_memset(&buf, 0, sizeof(buf), ICE_NONDMA_MEM);
2055
2056                 for (i = 0; i < prof->segs_cnt; i++) {
2057                         struct ice_flow_seg_info *seg = &prof->segs[i];
2058                         u16 j;
2059
2060                         ice_for_each_set_bit(j, (ice_bitmap_t *)&seg->match,
2061                                              ICE_FLOW_FIELD_IDX_MAX) {
2062                                 info = &seg->fields[j];
2063
2064                                 if (info->type == ICE_FLOW_FLD_TYPE_RANGE)
2065                                         buf.word_selection[info->entry.val] =
2066                                                 info->xtrct.idx;
2067                                 else
2068                                         ice_flow_acl_set_xtrct_seq_fld(&buf,
2069                                                                        info);
2070                         }
2071
2072                         for (j = 0; j < seg->raws_cnt; j++) {
2073                                 info = &seg->raws[j].info;
2074                                 ice_flow_acl_set_xtrct_seq_fld(&buf, info);
2075                         }
2076                 }
2077
2078                 ice_memset(&buf.pf_scenario_num[0], ICE_ACL_INVALID_SCEN,
2079                            ICE_AQC_ACL_PROF_PF_SCEN_NUM_ELEMS,
2080                            ICE_NONDMA_MEM);
2081         }
2082
2083         /* Update the current PF */
2084         buf.pf_scenario_num[hw->pf_id] = (u8)prof->cfg.scen->id;
2085         status = ice_prgm_acl_prof_xtrct(hw, prof_id, &buf, NULL);
2086
2087         return status;
2088 }
2089
2090 /**
2091  * ice_flow_assoc_vsig_vsi - associate a VSI with VSIG
2092  * @hw: pointer to the hardware structure
2093  * @blk: classification stage
2094  * @vsi_handle: software VSI handle
2095  * @vsig: target VSI group
2096  *
2097  * Assumption: the caller has already verified that the VSI to
2098  * be added has the same characteristics as the VSIG and will
2099  * thereby have access to all resources added to that VSIG.
2100  */
2101 enum ice_status
2102 ice_flow_assoc_vsig_vsi(struct ice_hw *hw, enum ice_block blk, u16 vsi_handle,
2103                         u16 vsig)
2104 {
2105         enum ice_status status;
2106
2107         if (!ice_is_vsi_valid(hw, vsi_handle) || blk >= ICE_BLK_COUNT)
2108                 return ICE_ERR_PARAM;
2109
2110         ice_acquire_lock(&hw->fl_profs_locks[blk]);
2111         status = ice_add_vsi_flow(hw, blk, ice_get_hw_vsi_num(hw, vsi_handle),
2112                                   vsig);
2113         ice_release_lock(&hw->fl_profs_locks[blk]);
2114
2115         return status;
2116 }
2117
2118 /**
2119  * ice_flow_assoc_prof - associate a VSI with a flow profile
2120  * @hw: pointer to the hardware structure
2121  * @blk: classification stage
2122  * @prof: pointer to flow profile
2123  * @vsi_handle: software VSI handle
2124  *
2125  * Assumption: the caller has acquired the lock to the profile list
2126  * and the software VSI handle has been validated
2127  */
2128 static enum ice_status
2129 ice_flow_assoc_prof(struct ice_hw *hw, enum ice_block blk,
2130                     struct ice_flow_prof *prof, u16 vsi_handle)
2131 {
2132         enum ice_status status = ICE_SUCCESS;
2133
2134         if (!ice_is_bit_set(prof->vsis, vsi_handle)) {
2135                 if (blk == ICE_BLK_ACL) {
2136                         status = ice_flow_acl_set_xtrct_seq(hw, prof);
2137                         if (status)
2138                                 return status;
2139                 }
2140                 status = ice_add_prof_id_flow(hw, blk,
2141                                               ice_get_hw_vsi_num(hw,
2142                                                                  vsi_handle),
2143                                               prof->id);
2144                 if (!status)
2145                         ice_set_bit(vsi_handle, prof->vsis);
2146                 else
2147                         ice_debug(hw, ICE_DBG_FLOW, "HW profile add failed, %d\n",
2148                                   status);
2149         }
2150
2151         return status;
2152 }
2153
2154 /**
2155  * ice_flow_disassoc_prof - disassociate a VSI from a flow profile
2156  * @hw: pointer to the hardware structure
2157  * @blk: classification stage
2158  * @prof: pointer to flow profile
2159  * @vsi_handle: software VSI handle
2160  *
2161  * Assumption: the caller has acquired the lock to the profile list
2162  * and the software VSI handle has been validated
2163  */
2164 static enum ice_status
2165 ice_flow_disassoc_prof(struct ice_hw *hw, enum ice_block blk,
2166                        struct ice_flow_prof *prof, u16 vsi_handle)
2167 {
2168         enum ice_status status = ICE_SUCCESS;
2169
2170         if (ice_is_bit_set(prof->vsis, vsi_handle)) {
2171                 status = ice_rem_prof_id_flow(hw, blk,
2172                                               ice_get_hw_vsi_num(hw,
2173                                                                  vsi_handle),
2174                                               prof->id);
2175                 if (!status)
2176                         ice_clear_bit(vsi_handle, prof->vsis);
2177                 else
2178                         ice_debug(hw, ICE_DBG_FLOW, "HW profile remove failed, %d\n",
2179                                   status);
2180         }
2181
2182         return status;
2183 }
2184
2185 /**
2186  * ice_flow_add_prof - Add a flow profile for packet segments and matched fields
2187  * @hw: pointer to the HW struct
2188  * @blk: classification stage
2189  * @dir: flow direction
2190  * @prof_id: unique ID to identify this flow profile
2191  * @segs: array of one or more packet segments that describe the flow
2192  * @segs_cnt: number of packet segments provided
2193  * @acts: array of default actions
2194  * @acts_cnt: number of default actions
2195  * @prof: stores the returned flow profile added
2196  */
2197 enum ice_status
2198 ice_flow_add_prof(struct ice_hw *hw, enum ice_block blk, enum ice_flow_dir dir,
2199                   u64 prof_id, struct ice_flow_seg_info *segs, u8 segs_cnt,
2200                   struct ice_flow_action *acts, u8 acts_cnt,
2201                   struct ice_flow_prof **prof)
2202 {
2203         enum ice_status status;
2204
2205         if (segs_cnt > ICE_FLOW_SEG_MAX)
2206                 return ICE_ERR_MAX_LIMIT;
2207
2208         if (!segs_cnt)
2209                 return ICE_ERR_PARAM;
2210
2211         if (!segs)
2212                 return ICE_ERR_BAD_PTR;
2213
2214         status = ice_flow_val_hdrs(segs, segs_cnt);
2215         if (status)
2216                 return status;
2217
2218         ice_acquire_lock(&hw->fl_profs_locks[blk]);
2219
2220         status = ice_flow_add_prof_sync(hw, blk, dir, prof_id, segs, segs_cnt,
2221                                         acts, acts_cnt, prof);
2222         if (!status)
2223                 LIST_ADD(&(*prof)->l_entry, &hw->fl_profs[blk]);
2224
2225         ice_release_lock(&hw->fl_profs_locks[blk]);
2226
2227         return status;
2228 }
2229
2230 /**
2231  * ice_flow_rem_prof - Remove a flow profile and all entries associated with it
2232  * @hw: pointer to the HW struct
2233  * @blk: the block for which the flow profile is to be removed
2234  * @prof_id: unique ID of the flow profile to be removed
2235  */
2236 enum ice_status
2237 ice_flow_rem_prof(struct ice_hw *hw, enum ice_block blk, u64 prof_id)
2238 {
2239         struct ice_flow_prof *prof;
2240         enum ice_status status;
2241
2242         ice_acquire_lock(&hw->fl_profs_locks[blk]);
2243
2244         prof = ice_flow_find_prof_id(hw, blk, prof_id);
2245         if (!prof) {
2246                 status = ICE_ERR_DOES_NOT_EXIST;
2247                 goto out;
2248         }
2249
2250         /* prof becomes invalid after the call */
2251         status = ice_flow_rem_prof_sync(hw, blk, prof);
2252
2253 out:
2254         ice_release_lock(&hw->fl_profs_locks[blk]);
2255
2256         return status;
2257 }
2258
2259 /**
2260  * ice_flow_find_entry - look for a flow entry using its unique ID
2261  * @hw: pointer to the HW struct
2262  * @blk: classification stage
2263  * @entry_id: unique ID to identify this flow entry
2264  *
2265  * This function looks for the flow entry with the specified unique ID in all
2266  * flow profiles of the specified classification stage. If the entry is found,
2267  * and it returns the handle to the flow entry. Otherwise, it returns
2268  * ICE_FLOW_ENTRY_ID_INVAL.
2269  */
2270 u64 ice_flow_find_entry(struct ice_hw *hw, enum ice_block blk, u64 entry_id)
2271 {
2272         struct ice_flow_entry *found = NULL;
2273         struct ice_flow_prof *p;
2274
2275         ice_acquire_lock(&hw->fl_profs_locks[blk]);
2276
2277         LIST_FOR_EACH_ENTRY(p, &hw->fl_profs[blk], ice_flow_prof, l_entry) {
2278                 struct ice_flow_entry *e;
2279
2280                 ice_acquire_lock(&p->entries_lock);
2281                 LIST_FOR_EACH_ENTRY(e, &p->entries, ice_flow_entry, l_entry)
2282                         if (e->id == entry_id) {
2283                                 found = e;
2284                                 break;
2285                         }
2286                 ice_release_lock(&p->entries_lock);
2287
2288                 if (found)
2289                         break;
2290         }
2291
2292         ice_release_lock(&hw->fl_profs_locks[blk]);
2293
2294         return found ? ICE_FLOW_ENTRY_HNDL(found) : ICE_FLOW_ENTRY_HANDLE_INVAL;
2295 }
2296
2297 /**
2298  * ice_flow_acl_check_actions - Checks the ACL rule's actions
2299  * @hw: pointer to the hardware structure
2300  * @acts: array of actions to be performed on a match
2301  * @acts_cnt: number of actions
2302  * @cnt_alloc: indicates if an ACL counter has been allocated.
2303  */
2304 static enum ice_status
2305 ice_flow_acl_check_actions(struct ice_hw *hw, struct ice_flow_action *acts,
2306                            u8 acts_cnt, bool *cnt_alloc)
2307 {
2308         ice_declare_bitmap(dup_check, ICE_AQC_TBL_MAX_ACTION_PAIRS * 2);
2309         int i;
2310
2311         ice_zero_bitmap(dup_check, ICE_AQC_TBL_MAX_ACTION_PAIRS * 2);
2312         *cnt_alloc = false;
2313
2314         if (acts_cnt > ICE_FLOW_ACL_MAX_NUM_ACT)
2315                 return ICE_ERR_OUT_OF_RANGE;
2316
2317         for (i = 0; i < acts_cnt; i++) {
2318                 if (acts[i].type != ICE_FLOW_ACT_NOP &&
2319                     acts[i].type != ICE_FLOW_ACT_DROP &&
2320                     acts[i].type != ICE_FLOW_ACT_CNTR_PKT &&
2321                     acts[i].type != ICE_FLOW_ACT_FWD_QUEUE)
2322                         return ICE_ERR_CFG;
2323
2324                 /* If the caller want to add two actions of the same type, then
2325                  * it is considered invalid configuration.
2326                  */
2327                 if (ice_test_and_set_bit(acts[i].type, dup_check))
2328                         return ICE_ERR_PARAM;
2329         }
2330
2331         /* Checks if ACL counters are needed. */
2332         for (i = 0; i < acts_cnt; i++) {
2333                 if (acts[i].type == ICE_FLOW_ACT_CNTR_PKT ||
2334                     acts[i].type == ICE_FLOW_ACT_CNTR_BYTES ||
2335                     acts[i].type == ICE_FLOW_ACT_CNTR_PKT_BYTES) {
2336                         struct ice_acl_cntrs cntrs;
2337                         enum ice_status status;
2338
2339                         cntrs.amount = 1;
2340                         cntrs.bank = 0; /* Only bank0 for the moment */
2341
2342                         if (acts[i].type == ICE_FLOW_ACT_CNTR_PKT_BYTES)
2343                                 cntrs.type = ICE_AQC_ACL_CNT_TYPE_DUAL;
2344                         else
2345                                 cntrs.type = ICE_AQC_ACL_CNT_TYPE_SINGLE;
2346
2347                         status = ice_aq_alloc_acl_cntrs(hw, &cntrs, NULL);
2348                         if (status)
2349                                 return status;
2350                         /* Counter index within the bank */
2351                         acts[i].data.acl_act.value =
2352                                                 CPU_TO_LE16(cntrs.first_cntr);
2353                         *cnt_alloc = true;
2354                 }
2355         }
2356
2357         return ICE_SUCCESS;
2358 }
2359
2360 /**
2361  * ice_flow_acl_frmt_entry_range - Format an ACL range checker for a given field
2362  * @fld: number of the given field
2363  * @info: info about field
2364  * @range_buf: range checker configuration buffer
2365  * @data: pointer to a data buffer containing flow entry's match values/masks
2366  * @range: Input/output param indicating which range checkers are being used
2367  */
2368 static void
2369 ice_flow_acl_frmt_entry_range(u16 fld, struct ice_flow_fld_info *info,
2370                               struct ice_aqc_acl_profile_ranges *range_buf,
2371                               u8 *data, u8 *range)
2372 {
2373         u16 new_mask;
2374
2375         /* If not specified, default mask is all bits in field */
2376         new_mask = (info->src.mask == ICE_FLOW_FLD_OFF_INVAL ?
2377                     BIT(ice_flds_info[fld].size) - 1 :
2378                     (*(u16 *)(data + info->src.mask))) << info->xtrct.disp;
2379
2380         /* If the mask is 0, then we don't need to worry about this input
2381          * range checker value.
2382          */
2383         if (new_mask) {
2384                 u16 new_high =
2385                         (*(u16 *)(data + info->src.last)) << info->xtrct.disp;
2386                 u16 new_low =
2387                         (*(u16 *)(data + info->src.val)) << info->xtrct.disp;
2388                 u8 range_idx = info->entry.val;
2389
2390                 range_buf->checker_cfg[range_idx].low_boundary =
2391                         CPU_TO_BE16(new_low);
2392                 range_buf->checker_cfg[range_idx].high_boundary =
2393                         CPU_TO_BE16(new_high);
2394                 range_buf->checker_cfg[range_idx].mask = CPU_TO_BE16(new_mask);
2395
2396                 /* Indicate which range checker is being used */
2397                 *range |= BIT(range_idx);
2398         }
2399 }
2400
2401 /**
2402  * ice_flow_acl_frmt_entry_fld - Partially format ACL entry for a given field
2403  * @fld: number of the given field
2404  * @info: info about the field
2405  * @buf: buffer containing the entry
2406  * @dontcare: buffer containing don't care mask for entry
2407  * @data: pointer to a data buffer containing flow entry's match values/masks
2408  */
2409 static void
2410 ice_flow_acl_frmt_entry_fld(u16 fld, struct ice_flow_fld_info *info, u8 *buf,
2411                             u8 *dontcare, u8 *data)
2412 {
2413         u16 dst, src, mask, k, end_disp, tmp_s = 0, tmp_m = 0;
2414         bool use_mask = false;
2415         u8 disp;
2416
2417         src = info->src.val;
2418         mask = info->src.mask;
2419         dst = info->entry.val - ICE_AQC_ACL_PROF_BYTE_SEL_START_IDX;
2420         disp = info->xtrct.disp % BITS_PER_BYTE;
2421
2422         if (mask != ICE_FLOW_FLD_OFF_INVAL)
2423                 use_mask = true;
2424
2425         for (k = 0; k < info->entry.last; k++, dst++) {
2426                 /* Add overflow bits from previous byte */
2427                 buf[dst] = (tmp_s & 0xff00) >> 8;
2428
2429                 /* If mask is not valid, tmp_m is always zero, so just setting
2430                  * dontcare to 0 (no masked bits). If mask is valid, pulls in
2431                  * overflow bits of mask from prev byte
2432                  */
2433                 dontcare[dst] = (tmp_m & 0xff00) >> 8;
2434
2435                 /* If there is displacement, last byte will only contain
2436                  * displaced data, but there is no more data to read from user
2437                  * buffer, so skip so as not to potentially read beyond end of
2438                  * user buffer
2439                  */
2440                 if (!disp || k < info->entry.last - 1) {
2441                         /* Store shifted data to use in next byte */
2442                         tmp_s = data[src++] << disp;
2443
2444                         /* Add current (shifted) byte */
2445                         buf[dst] |= tmp_s & 0xff;
2446
2447                         /* Handle mask if valid */
2448                         if (use_mask) {
2449                                 tmp_m = (~data[mask++] & 0xff) << disp;
2450                                 dontcare[dst] |= tmp_m & 0xff;
2451                         }
2452                 }
2453         }
2454
2455         /* Fill in don't care bits at beginning of field */
2456         if (disp) {
2457                 dst = info->entry.val - ICE_AQC_ACL_PROF_BYTE_SEL_START_IDX;
2458                 for (k = 0; k < disp; k++)
2459                         dontcare[dst] |= BIT(k);
2460         }
2461
2462         end_disp = (disp + ice_flds_info[fld].size) % BITS_PER_BYTE;
2463
2464         /* Fill in don't care bits at end of field */
2465         if (end_disp) {
2466                 dst = info->entry.val - ICE_AQC_ACL_PROF_BYTE_SEL_START_IDX +
2467                       info->entry.last - 1;
2468                 for (k = end_disp; k < BITS_PER_BYTE; k++)
2469                         dontcare[dst] |= BIT(k);
2470         }
2471 }
2472
2473 /**
2474  * ice_flow_acl_frmt_entry - Format ACL entry
2475  * @hw: pointer to the hardware structure
2476  * @prof: pointer to flow profile
2477  * @e: pointer to the flow entry
2478  * @data: pointer to a data buffer containing flow entry's match values/masks
2479  * @acts: array of actions to be performed on a match
2480  * @acts_cnt: number of actions
2481  *
2482  * Formats the key (and key_inverse) to be matched from the data passed in,
2483  * along with data from the flow profile. This key/key_inverse pair makes up
2484  * the 'entry' for an ACL flow entry.
2485  */
2486 static enum ice_status
2487 ice_flow_acl_frmt_entry(struct ice_hw *hw, struct ice_flow_prof *prof,
2488                         struct ice_flow_entry *e, u8 *data,
2489                         struct ice_flow_action *acts, u8 acts_cnt)
2490 {
2491         u8 *buf = NULL, *dontcare = NULL, *key = NULL, range = 0, dir_flag_msk;
2492         struct ice_aqc_acl_profile_ranges *range_buf = NULL;
2493         enum ice_status status;
2494         bool cnt_alloc;
2495         u8 prof_id = 0;
2496         u16 i, buf_sz;
2497
2498         status = ice_flow_get_hw_prof(hw, ICE_BLK_ACL, prof->id, &prof_id);
2499         if (status)
2500                 return status;
2501
2502         /* Format the result action */
2503
2504         status = ice_flow_acl_check_actions(hw, acts, acts_cnt, &cnt_alloc);
2505         if (status)
2506                 return status;
2507
2508         status = ICE_ERR_NO_MEMORY;
2509
2510         e->acts = (struct ice_flow_action *)
2511                 ice_memdup(hw, acts, acts_cnt * sizeof(*acts),
2512                            ICE_NONDMA_TO_NONDMA);
2513         if (!e->acts)
2514                 goto out;
2515
2516         e->acts_cnt = acts_cnt;
2517
2518         /* Format the matching data */
2519         buf_sz = prof->cfg.scen->width;
2520         buf = (u8 *)ice_malloc(hw, buf_sz);
2521         if (!buf)
2522                 goto out;
2523
2524         dontcare = (u8 *)ice_malloc(hw, buf_sz);
2525         if (!dontcare)
2526                 goto out;
2527
2528         /* 'key' buffer will store both key and key_inverse, so must be twice
2529          * size of buf
2530          */
2531         key = (u8 *)ice_malloc(hw, buf_sz * 2);
2532         if (!key)
2533                 goto out;
2534
2535         range_buf = (struct ice_aqc_acl_profile_ranges *)
2536                 ice_malloc(hw, sizeof(struct ice_aqc_acl_profile_ranges));
2537         if (!range_buf)
2538                 goto out;
2539
2540         /* Set don't care mask to all 1's to start, will zero out used bytes */
2541         ice_memset(dontcare, 0xff, buf_sz, ICE_NONDMA_MEM);
2542
2543         for (i = 0; i < prof->segs_cnt; i++) {
2544                 struct ice_flow_seg_info *seg = &prof->segs[i];
2545                 u8 j;
2546
2547                 ice_for_each_set_bit(j, (ice_bitmap_t *)&seg->match,
2548                                      ICE_FLOW_FIELD_IDX_MAX) {
2549                         struct ice_flow_fld_info *info = &seg->fields[j];
2550
2551                         if (info->type == ICE_FLOW_FLD_TYPE_RANGE)
2552                                 ice_flow_acl_frmt_entry_range(j, info,
2553                                                               range_buf, data,
2554                                                               &range);
2555                         else
2556                                 ice_flow_acl_frmt_entry_fld(j, info, buf,
2557                                                             dontcare, data);
2558                 }
2559
2560                 for (j = 0; j < seg->raws_cnt; j++) {
2561                         struct ice_flow_fld_info *info = &seg->raws[j].info;
2562                         u16 dst, src, mask, k;
2563                         bool use_mask = false;
2564
2565                         src = info->src.val;
2566                         dst = info->entry.val -
2567                                         ICE_AQC_ACL_PROF_BYTE_SEL_START_IDX;
2568                         mask = info->src.mask;
2569
2570                         if (mask != ICE_FLOW_FLD_OFF_INVAL)
2571                                 use_mask = true;
2572
2573                         for (k = 0; k < info->entry.last; k++, dst++) {
2574                                 buf[dst] = data[src++];
2575                                 if (use_mask)
2576                                         dontcare[dst] = ~data[mask++];
2577                                 else
2578                                         dontcare[dst] = 0;
2579                         }
2580                 }
2581         }
2582
2583         buf[prof->cfg.scen->pid_idx] = (u8)prof_id;
2584         dontcare[prof->cfg.scen->pid_idx] = 0;
2585
2586         /* Format the buffer for direction flags */
2587         dir_flag_msk = BIT(ICE_FLG_PKT_DIR);
2588
2589         if (prof->dir == ICE_FLOW_RX)
2590                 buf[prof->cfg.scen->pkt_dir_idx] = dir_flag_msk;
2591
2592         if (range) {
2593                 buf[prof->cfg.scen->rng_chk_idx] = range;
2594                 /* Mark any unused range checkers as don't care */
2595                 dontcare[prof->cfg.scen->rng_chk_idx] = ~range;
2596                 e->range_buf = range_buf;
2597         } else {
2598                 ice_free(hw, range_buf);
2599         }
2600
2601         status = ice_set_key(key, buf_sz * 2, buf, NULL, dontcare, NULL, 0,
2602                              buf_sz);
2603         if (status)
2604                 goto out;
2605
2606         e->entry = key;
2607         e->entry_sz = buf_sz * 2;
2608
2609 out:
2610         if (buf)
2611                 ice_free(hw, buf);
2612
2613         if (dontcare)
2614                 ice_free(hw, dontcare);
2615
2616         if (status && key)
2617                 ice_free(hw, key);
2618
2619         if (status && range_buf) {
2620                 ice_free(hw, range_buf);
2621                 e->range_buf = NULL;
2622         }
2623
2624         if (status && e->acts) {
2625                 ice_free(hw, e->acts);
2626                 e->acts = NULL;
2627                 e->acts_cnt = 0;
2628         }
2629
2630         if (status && cnt_alloc)
2631                 ice_flow_acl_free_act_cntr(hw, acts, acts_cnt);
2632
2633         return status;
2634 }
2635
2636 /**
2637  * ice_flow_acl_find_scen_entry_cond - Find an ACL scenario entry that matches
2638  *                                     the compared data.
2639  * @prof: pointer to flow profile
2640  * @e: pointer to the comparing flow entry
2641  * @do_chg_action: decide if we want to change the ACL action
2642  * @do_add_entry: decide if we want to add the new ACL entry
2643  * @do_rem_entry: decide if we want to remove the current ACL entry
2644  *
2645  * Find an ACL scenario entry that matches the compared data. In the same time,
2646  * this function also figure out:
2647  * a/ If we want to change the ACL action
2648  * b/ If we want to add the new ACL entry
2649  * c/ If we want to remove the current ACL entry
2650  */
2651 static struct ice_flow_entry *
2652 ice_flow_acl_find_scen_entry_cond(struct ice_flow_prof *prof,
2653                                   struct ice_flow_entry *e, bool *do_chg_action,
2654                                   bool *do_add_entry, bool *do_rem_entry)
2655 {
2656         struct ice_flow_entry *p, *return_entry = NULL;
2657         u8 i, j;
2658
2659         /* Check if:
2660          * a/ There exists an entry with same matching data, but different
2661          *    priority, then we remove this existing ACL entry. Then, we
2662          *    will add the new entry to the ACL scenario.
2663          * b/ There exists an entry with same matching data, priority, and
2664          *    result action, then we do nothing
2665          * c/ There exists an entry with same matching data, priority, but
2666          *    different, action, then do only change the action's entry.
2667          * d/ Else, we add this new entry to the ACL scenario.
2668          */
2669         *do_chg_action = false;
2670         *do_add_entry = true;
2671         *do_rem_entry = false;
2672         LIST_FOR_EACH_ENTRY(p, &prof->entries, ice_flow_entry, l_entry) {
2673                 if (memcmp(p->entry, e->entry, p->entry_sz))
2674                         continue;
2675
2676                 /* From this point, we have the same matching_data. */
2677                 *do_add_entry = false;
2678                 return_entry = p;
2679
2680                 if (p->priority != e->priority) {
2681                         /* matching data && !priority */
2682                         *do_add_entry = true;
2683                         *do_rem_entry = true;
2684                         break;
2685                 }
2686
2687                 /* From this point, we will have matching_data && priority */
2688                 if (p->acts_cnt != e->acts_cnt)
2689                         *do_chg_action = true;
2690                 for (i = 0; i < p->acts_cnt; i++) {
2691                         bool found_not_match = false;
2692
2693                         for (j = 0; j < e->acts_cnt; j++)
2694                                 if (memcmp(&p->acts[i], &e->acts[j],
2695                                            sizeof(struct ice_flow_action))) {
2696                                         found_not_match = true;
2697                                         break;
2698                                 }
2699
2700                         if (found_not_match) {
2701                                 *do_chg_action = true;
2702                                 break;
2703                         }
2704                 }
2705
2706                 /* (do_chg_action = true) means :
2707                  *    matching_data && priority && !result_action
2708                  * (do_chg_action = false) means :
2709                  *    matching_data && priority && result_action
2710                  */
2711                 break;
2712         }
2713
2714         return return_entry;
2715 }
2716
2717 /**
2718  * ice_flow_acl_convert_to_acl_prior - Convert to ACL priority
2719  * @p: flow priority
2720  */
2721 static enum ice_acl_entry_prior
2722 ice_flow_acl_convert_to_acl_prior(enum ice_flow_priority p)
2723 {
2724         enum ice_acl_entry_prior acl_prior;
2725
2726         switch (p) {
2727         case ICE_FLOW_PRIO_LOW:
2728                 acl_prior = ICE_LOW;
2729                 break;
2730         case ICE_FLOW_PRIO_NORMAL:
2731                 acl_prior = ICE_NORMAL;
2732                 break;
2733         case ICE_FLOW_PRIO_HIGH:
2734                 acl_prior = ICE_HIGH;
2735                 break;
2736         default:
2737                 acl_prior = ICE_NORMAL;
2738                 break;
2739         }
2740
2741         return acl_prior;
2742 }
2743
2744 /**
2745  * ice_flow_acl_union_rng_chk - Perform union operation between two
2746  *                              range-range checker buffers
2747  * @dst_buf: pointer to destination range checker buffer
2748  * @src_buf: pointer to source range checker buffer
2749  *
2750  * For this function, we do the union between dst_buf and src_buf
2751  * range checker buffer, and we will save the result back to dst_buf
2752  */
2753 static enum ice_status
2754 ice_flow_acl_union_rng_chk(struct ice_aqc_acl_profile_ranges *dst_buf,
2755                            struct ice_aqc_acl_profile_ranges *src_buf)
2756 {
2757         u8 i, j;
2758
2759         if (!dst_buf || !src_buf)
2760                 return ICE_ERR_BAD_PTR;
2761
2762         for (i = 0; i < ICE_AQC_ACL_PROF_RANGES_NUM_CFG; i++) {
2763                 struct ice_acl_rng_data *cfg_data = NULL, *in_data;
2764                 bool will_populate = false;
2765
2766                 in_data = &src_buf->checker_cfg[i];
2767
2768                 if (!in_data->mask)
2769                         break;
2770
2771                 for (j = 0; j < ICE_AQC_ACL_PROF_RANGES_NUM_CFG; j++) {
2772                         cfg_data = &dst_buf->checker_cfg[j];
2773
2774                         if (!cfg_data->mask ||
2775                             !memcmp(cfg_data, in_data,
2776                                     sizeof(struct ice_acl_rng_data))) {
2777                                 will_populate = true;
2778                                 break;
2779                         }
2780                 }
2781
2782                 if (will_populate) {
2783                         ice_memcpy(cfg_data, in_data,
2784                                    sizeof(struct ice_acl_rng_data),
2785                                    ICE_NONDMA_TO_NONDMA);
2786                 } else {
2787                         /* No available slot left to program range checker */
2788                         return ICE_ERR_MAX_LIMIT;
2789                 }
2790         }
2791
2792         return ICE_SUCCESS;
2793 }
2794
2795 /**
2796  * ice_flow_acl_add_scen_entry_sync - Add entry to ACL scenario sync
2797  * @hw: pointer to the hardware structure
2798  * @prof: pointer to flow profile
2799  * @entry: double pointer to the flow entry
2800  *
2801  * For this function, we will look at the current added entries in the
2802  * corresponding ACL scenario. Then, we will perform matching logic to
2803  * see if we want to add/modify/do nothing with this new entry.
2804  */
2805 static enum ice_status
2806 ice_flow_acl_add_scen_entry_sync(struct ice_hw *hw, struct ice_flow_prof *prof,
2807                                  struct ice_flow_entry **entry)
2808 {
2809         bool do_add_entry, do_rem_entry, do_chg_action, do_chg_rng_chk;
2810         struct ice_aqc_acl_profile_ranges query_rng_buf, cfg_rng_buf;
2811         struct ice_acl_act_entry *acts = NULL;
2812         struct ice_flow_entry *exist;
2813         enum ice_status status = ICE_SUCCESS;
2814         struct ice_flow_entry *e;
2815         u8 i;
2816
2817         if (!entry || !(*entry) || !prof)
2818                 return ICE_ERR_BAD_PTR;
2819
2820         e = *entry;
2821
2822         do_chg_rng_chk = false;
2823         if (e->range_buf) {
2824                 u8 prof_id = 0;
2825
2826                 status = ice_flow_get_hw_prof(hw, ICE_BLK_ACL, prof->id,
2827                                               &prof_id);
2828                 if (status)
2829                         return status;
2830
2831                 /* Query the current range-checker value in FW */
2832                 status = ice_query_acl_prof_ranges(hw, prof_id, &query_rng_buf,
2833                                                    NULL);
2834                 if (status)
2835                         return status;
2836                 ice_memcpy(&cfg_rng_buf, &query_rng_buf,
2837                            sizeof(struct ice_aqc_acl_profile_ranges),
2838                            ICE_NONDMA_TO_NONDMA);
2839
2840                 /* Generate the new range-checker value */
2841                 status = ice_flow_acl_union_rng_chk(&cfg_rng_buf, e->range_buf);
2842                 if (status)
2843                         return status;
2844
2845                 /* Reconfigure the range check if the buffer is changed. */
2846                 do_chg_rng_chk = false;
2847                 if (memcmp(&query_rng_buf, &cfg_rng_buf,
2848                            sizeof(struct ice_aqc_acl_profile_ranges))) {
2849                         status = ice_prog_acl_prof_ranges(hw, prof_id,
2850                                                           &cfg_rng_buf, NULL);
2851                         if (status)
2852                                 return status;
2853
2854                         do_chg_rng_chk = true;
2855                 }
2856         }
2857
2858         /* Figure out if we want to (change the ACL action) and/or
2859          * (Add the new ACL entry) and/or (Remove the current ACL entry)
2860          */
2861         exist = ice_flow_acl_find_scen_entry_cond(prof, e, &do_chg_action,
2862                                                   &do_add_entry, &do_rem_entry);
2863         if (do_rem_entry) {
2864                 status = ice_flow_rem_entry_sync(hw, ICE_BLK_ACL, exist);
2865                 if (status)
2866                         return status;
2867         }
2868
2869         /* Prepare the result action buffer */
2870         acts = (struct ice_acl_act_entry *)
2871                 ice_calloc(hw, e->entry_sz, sizeof(struct ice_acl_act_entry));
2872         if (!acts)
2873                 return ICE_ERR_NO_MEMORY;
2874
2875         for (i = 0; i < e->acts_cnt; i++)
2876                 ice_memcpy(&acts[i], &e->acts[i].data.acl_act,
2877                            sizeof(struct ice_acl_act_entry),
2878                            ICE_NONDMA_TO_NONDMA);
2879
2880         if (do_add_entry) {
2881                 enum ice_acl_entry_prior prior;
2882                 u8 *keys, *inverts;
2883                 u16 entry_idx;
2884
2885                 keys = (u8 *)e->entry;
2886                 inverts = keys + (e->entry_sz / 2);
2887                 prior = ice_flow_acl_convert_to_acl_prior(e->priority);
2888
2889                 status = ice_acl_add_entry(hw, prof->cfg.scen, prior, keys,
2890                                            inverts, acts, e->acts_cnt,
2891                                            &entry_idx);
2892                 if (status)
2893                         goto out;
2894
2895                 e->scen_entry_idx = entry_idx;
2896                 LIST_ADD(&e->l_entry, &prof->entries);
2897         } else {
2898                 if (do_chg_action) {
2899                         /* For the action memory info, update the SW's copy of
2900                          * exist entry with e's action memory info
2901                          */
2902                         ice_free(hw, exist->acts);
2903                         exist->acts_cnt = e->acts_cnt;
2904                         exist->acts = (struct ice_flow_action *)
2905                                 ice_calloc(hw, exist->acts_cnt,
2906                                            sizeof(struct ice_flow_action));
2907
2908                         if (!exist->acts) {
2909                                 status = ICE_ERR_NO_MEMORY;
2910                                 goto out;
2911                         }
2912
2913                         ice_memcpy(exist->acts, e->acts,
2914                                    sizeof(struct ice_flow_action) * e->acts_cnt,
2915                                    ICE_NONDMA_TO_NONDMA);
2916
2917                         status = ice_acl_prog_act(hw, prof->cfg.scen, acts,
2918                                                   e->acts_cnt,
2919                                                   exist->scen_entry_idx);
2920                         if (status)
2921                                 goto out;
2922                 }
2923
2924                 if (do_chg_rng_chk) {
2925                         /* In this case, we want to update the range checker
2926                          * information of the exist entry
2927                          */
2928                         status = ice_flow_acl_union_rng_chk(exist->range_buf,
2929                                                             e->range_buf);
2930                         if (status)
2931                                 goto out;
2932                 }
2933
2934                 /* As we don't add the new entry to our SW DB, deallocate its
2935                  * memories, and return the exist entry to the caller
2936                  */
2937                 ice_dealloc_flow_entry(hw, e);
2938                 *(entry) = exist;
2939         }
2940 out:
2941         ice_free(hw, acts);
2942
2943         return status;
2944 }
2945
2946 /**
2947  * ice_flow_acl_add_scen_entry - Add entry to ACL scenario
2948  * @hw: pointer to the hardware structure
2949  * @prof: pointer to flow profile
2950  * @e: double pointer to the flow entry
2951  */
2952 static enum ice_status
2953 ice_flow_acl_add_scen_entry(struct ice_hw *hw, struct ice_flow_prof *prof,
2954                             struct ice_flow_entry **e)
2955 {
2956         enum ice_status status;
2957
2958         ice_acquire_lock(&prof->entries_lock);
2959         status = ice_flow_acl_add_scen_entry_sync(hw, prof, e);
2960         ice_release_lock(&prof->entries_lock);
2961
2962         return status;
2963 }
2964
2965 /**
2966  * ice_flow_add_entry - Add a flow entry
2967  * @hw: pointer to the HW struct
2968  * @blk: classification stage
2969  * @prof_id: ID of the profile to add a new flow entry to
2970  * @entry_id: unique ID to identify this flow entry
2971  * @vsi_handle: software VSI handle for the flow entry
2972  * @prio: priority of the flow entry
2973  * @data: pointer to a data buffer containing flow entry's match values/masks
2974  * @acts: arrays of actions to be performed on a match
2975  * @acts_cnt: number of actions
2976  * @entry_h: pointer to buffer that receives the new flow entry's handle
2977  */
2978 enum ice_status
2979 ice_flow_add_entry(struct ice_hw *hw, enum ice_block blk, u64 prof_id,
2980                    u64 entry_id, u16 vsi_handle, enum ice_flow_priority prio,
2981                    void *data, struct ice_flow_action *acts, u8 acts_cnt,
2982                    u64 *entry_h)
2983 {
2984         struct ice_flow_entry *e = NULL;
2985         struct ice_flow_prof *prof;
2986         enum ice_status status = ICE_SUCCESS;
2987
2988         /* ACL entries must indicate an action */
2989         if (blk == ICE_BLK_ACL && (!acts || !acts_cnt))
2990                 return ICE_ERR_PARAM;
2991
2992         /* No flow entry data is expected for RSS */
2993         if (!entry_h || (!data && blk != ICE_BLK_RSS))
2994                 return ICE_ERR_BAD_PTR;
2995
2996         if (!ice_is_vsi_valid(hw, vsi_handle))
2997                 return ICE_ERR_PARAM;
2998
2999         ice_acquire_lock(&hw->fl_profs_locks[blk]);
3000
3001         prof = ice_flow_find_prof_id(hw, blk, prof_id);
3002         if (!prof) {
3003                 status = ICE_ERR_DOES_NOT_EXIST;
3004         } else {
3005                 /* Allocate memory for the entry being added and associate
3006                  * the VSI to the found flow profile
3007                  */
3008                 e = (struct ice_flow_entry *)ice_malloc(hw, sizeof(*e));
3009                 if (!e)
3010                         status = ICE_ERR_NO_MEMORY;
3011                 else
3012                         status = ice_flow_assoc_prof(hw, blk, prof, vsi_handle);
3013         }
3014
3015         ice_release_lock(&hw->fl_profs_locks[blk]);
3016         if (status)
3017                 goto out;
3018
3019         e->id = entry_id;
3020         e->vsi_handle = vsi_handle;
3021         e->prof = prof;
3022         e->priority = prio;
3023
3024         switch (blk) {
3025         case ICE_BLK_FD:
3026         case ICE_BLK_RSS:
3027                 break;
3028         case ICE_BLK_ACL:
3029                 /* ACL will handle the entry management */
3030                 status = ice_flow_acl_frmt_entry(hw, prof, e, (u8 *)data, acts,
3031                                                  acts_cnt);
3032                 if (status)
3033                         goto out;
3034
3035                 status = ice_flow_acl_add_scen_entry(hw, prof, &e);
3036                 if (status)
3037                         goto out;
3038
3039                 break;
3040         default:
3041                 status = ICE_ERR_NOT_IMPL;
3042                 goto out;
3043         }
3044
3045         if (blk != ICE_BLK_ACL) {
3046                 /* ACL will handle the entry management */
3047                 ice_acquire_lock(&prof->entries_lock);
3048                 LIST_ADD(&e->l_entry, &prof->entries);
3049                 ice_release_lock(&prof->entries_lock);
3050         }
3051
3052         *entry_h = ICE_FLOW_ENTRY_HNDL(e);
3053
3054 out:
3055         if (status && e) {
3056                 if (e->entry)
3057                         ice_free(hw, e->entry);
3058                 ice_free(hw, e);
3059         }
3060
3061         return status;
3062 }
3063
3064 /**
3065  * ice_flow_rem_entry - Remove a flow entry
3066  * @hw: pointer to the HW struct
3067  * @blk: classification stage
3068  * @entry_h: handle to the flow entry to be removed
3069  */
3070 enum ice_status ice_flow_rem_entry(struct ice_hw *hw, enum ice_block blk,
3071                                    u64 entry_h)
3072 {
3073         struct ice_flow_entry *entry;
3074         struct ice_flow_prof *prof;
3075         enum ice_status status = ICE_SUCCESS;
3076
3077         if (entry_h == ICE_FLOW_ENTRY_HANDLE_INVAL)
3078                 return ICE_ERR_PARAM;
3079
3080         entry = ICE_FLOW_ENTRY_PTR((unsigned long)entry_h);
3081
3082         /* Retain the pointer to the flow profile as the entry will be freed */
3083         prof = entry->prof;
3084
3085         if (prof) {
3086                 ice_acquire_lock(&prof->entries_lock);
3087                 status = ice_flow_rem_entry_sync(hw, blk, entry);
3088                 ice_release_lock(&prof->entries_lock);
3089         }
3090
3091         return status;
3092 }
3093
3094 /**
3095  * ice_flow_set_fld_ext - specifies locations of field from entry's input buffer
3096  * @seg: packet segment the field being set belongs to
3097  * @fld: field to be set
3098  * @field_type: type of the field
3099  * @val_loc: if not ICE_FLOW_FLD_OFF_INVAL, location of the value to match from
3100  *           entry's input buffer
3101  * @mask_loc: if not ICE_FLOW_FLD_OFF_INVAL, location of mask value from entry's
3102  *            input buffer
3103  * @last_loc: if not ICE_FLOW_FLD_OFF_INVAL, location of last/upper value from
3104  *            entry's input buffer
3105  *
3106  * This helper function stores information of a field being matched, including
3107  * the type of the field and the locations of the value to match, the mask, and
3108  * the upper-bound value in the start of the input buffer for a flow entry.
3109  * This function should only be used for fixed-size data structures.
3110  *
3111  * This function also opportunistically determines the protocol headers to be
3112  * present based on the fields being set. Some fields cannot be used alone to
3113  * determine the protocol headers present. Sometimes, fields for particular
3114  * protocol headers are not matched. In those cases, the protocol headers
3115  * must be explicitly set.
3116  */
3117 static void
3118 ice_flow_set_fld_ext(struct ice_flow_seg_info *seg, enum ice_flow_field fld,
3119                      enum ice_flow_fld_match_type field_type, u16 val_loc,
3120                      u16 mask_loc, u16 last_loc)
3121 {
3122         u64 bit = BIT_ULL(fld);
3123
3124         seg->match |= bit;
3125         if (field_type == ICE_FLOW_FLD_TYPE_RANGE)
3126                 seg->range |= bit;
3127
3128         seg->fields[fld].type = field_type;
3129         seg->fields[fld].src.val = val_loc;
3130         seg->fields[fld].src.mask = mask_loc;
3131         seg->fields[fld].src.last = last_loc;
3132
3133         ICE_FLOW_SET_HDRS(seg, ice_flds_info[fld].hdr);
3134 }
3135
3136 /**
3137  * ice_flow_set_fld - specifies locations of field from entry's input buffer
3138  * @seg: packet segment the field being set belongs to
3139  * @fld: field to be set
3140  * @val_loc: if not ICE_FLOW_FLD_OFF_INVAL, location of the value to match from
3141  *           entry's input buffer
3142  * @mask_loc: if not ICE_FLOW_FLD_OFF_INVAL, location of mask value from entry's
3143  *            input buffer
3144  * @last_loc: if not ICE_FLOW_FLD_OFF_INVAL, location of last/upper value from
3145  *            entry's input buffer
3146  * @range: indicate if field being matched is to be in a range
3147  *
3148  * This function specifies the locations, in the form of byte offsets from the
3149  * start of the input buffer for a flow entry, from where the value to match,
3150  * the mask value, and upper value can be extracted. These locations are then
3151  * stored in the flow profile. When adding a flow entry associated with the
3152  * flow profile, these locations will be used to quickly extract the values and
3153  * create the content of a match entry. This function should only be used for
3154  * fixed-size data structures.
3155  */
3156 void
3157 ice_flow_set_fld(struct ice_flow_seg_info *seg, enum ice_flow_field fld,
3158                  u16 val_loc, u16 mask_loc, u16 last_loc, bool range)
3159 {
3160         enum ice_flow_fld_match_type t = range ?
3161                 ICE_FLOW_FLD_TYPE_RANGE : ICE_FLOW_FLD_TYPE_REG;
3162
3163         ice_flow_set_fld_ext(seg, fld, t, val_loc, mask_loc, last_loc);
3164 }
3165
3166 /**
3167  * ice_flow_set_fld_prefix - sets locations of prefix field from entry's buf
3168  * @seg: packet segment the field being set belongs to
3169  * @fld: field to be set
3170  * @val_loc: if not ICE_FLOW_FLD_OFF_INVAL, location of the value to match from
3171  *           entry's input buffer
3172  * @pref_loc: location of prefix value from entry's input buffer
3173  * @pref_sz: size of the location holding the prefix value
3174  *
3175  * This function specifies the locations, in the form of byte offsets from the
3176  * start of the input buffer for a flow entry, from where the value to match
3177  * and the IPv4 prefix value can be extracted. These locations are then stored
3178  * in the flow profile. When adding flow entries to the associated flow profile,
3179  * these locations can be used to quickly extract the values to create the
3180  * content of a match entry. This function should only be used for fixed-size
3181  * data structures.
3182  */
3183 void
3184 ice_flow_set_fld_prefix(struct ice_flow_seg_info *seg, enum ice_flow_field fld,
3185                         u16 val_loc, u16 pref_loc, u8 pref_sz)
3186 {
3187         /* For this type of field, the "mask" location is for the prefix value's
3188          * location and the "last" location is for the size of the location of
3189          * the prefix value.
3190          */
3191         ice_flow_set_fld_ext(seg, fld, ICE_FLOW_FLD_TYPE_PREFIX, val_loc,
3192                              pref_loc, (u16)pref_sz);
3193 }
3194
3195 /**
3196  * ice_flow_add_fld_raw - sets locations of a raw field from entry's input buf
3197  * @seg: packet segment the field being set belongs to
3198  * @off: offset of the raw field from the beginning of the segment in bytes
3199  * @len: length of the raw pattern to be matched
3200  * @val_loc: location of the value to match from entry's input buffer
3201  * @mask_loc: location of mask value from entry's input buffer
3202  *
3203  * This function specifies the offset of the raw field to be match from the
3204  * beginning of the specified packet segment, and the locations, in the form of
3205  * byte offsets from the start of the input buffer for a flow entry, from where
3206  * the value to match and the mask value to be extracted. These locations are
3207  * then stored in the flow profile. When adding flow entries to the associated
3208  * flow profile, these locations can be used to quickly extract the values to
3209  * create the content of a match entry. This function should only be used for
3210  * fixed-size data structures.
3211  */
3212 void
3213 ice_flow_add_fld_raw(struct ice_flow_seg_info *seg, u16 off, u8 len,
3214                      u16 val_loc, u16 mask_loc)
3215 {
3216         if (seg->raws_cnt < ICE_FLOW_SEG_RAW_FLD_MAX) {
3217                 seg->raws[seg->raws_cnt].off = off;
3218                 seg->raws[seg->raws_cnt].info.type = ICE_FLOW_FLD_TYPE_SIZE;
3219                 seg->raws[seg->raws_cnt].info.src.val = val_loc;
3220                 seg->raws[seg->raws_cnt].info.src.mask = mask_loc;
3221                 /* The "last" field is used to store the length of the field */
3222                 seg->raws[seg->raws_cnt].info.src.last = len;
3223         }
3224
3225         /* Overflows of "raws" will be handled as an error condition later in
3226          * the flow when this information is processed.
3227          */
3228         seg->raws_cnt++;
3229 }
3230
3231 #define ICE_FLOW_RSS_SEG_HDR_L2_MASKS \
3232 (ICE_FLOW_SEG_HDR_ETH | ICE_FLOW_SEG_HDR_VLAN)
3233
3234 #define ICE_FLOW_RSS_SEG_HDR_L3_MASKS \
3235         (ICE_FLOW_SEG_HDR_IPV4 | ICE_FLOW_SEG_HDR_IPV6)
3236
3237 #define ICE_FLOW_RSS_SEG_HDR_L4_MASKS \
3238         (ICE_FLOW_SEG_HDR_TCP | ICE_FLOW_SEG_HDR_UDP | ICE_FLOW_SEG_HDR_SCTP)
3239
3240 #define ICE_FLOW_RSS_SEG_HDR_VAL_MASKS \
3241         (ICE_FLOW_RSS_SEG_HDR_L2_MASKS | \
3242          ICE_FLOW_RSS_SEG_HDR_L3_MASKS | \
3243          ICE_FLOW_RSS_SEG_HDR_L4_MASKS)
3244
3245 /**
3246  * ice_flow_set_rss_seg_info - setup packet segments for RSS
3247  * @segs: pointer to the flow field segment(s)
3248  * @hash_fields: fields to be hashed on for the segment(s)
3249  * @flow_hdr: protocol header fields within a packet segment
3250  *
3251  * Helper function to extract fields from hash bitmap and use flow
3252  * header value to set flow field segment for further use in flow
3253  * profile entry or removal.
3254  */
3255 static enum ice_status
3256 ice_flow_set_rss_seg_info(struct ice_flow_seg_info *segs, u64 hash_fields,
3257                           u32 flow_hdr)
3258 {
3259         u64 val;
3260         u8 i;
3261
3262         ice_for_each_set_bit(i, (ice_bitmap_t *)&hash_fields,
3263                              ICE_FLOW_FIELD_IDX_MAX)
3264                 ice_flow_set_fld(segs, (enum ice_flow_field)i,
3265                                  ICE_FLOW_FLD_OFF_INVAL, ICE_FLOW_FLD_OFF_INVAL,
3266                                  ICE_FLOW_FLD_OFF_INVAL, false);
3267
3268         ICE_FLOW_SET_HDRS(segs, flow_hdr);
3269
3270         if (segs->hdrs & ~ICE_FLOW_RSS_SEG_HDR_VAL_MASKS &
3271             ~ICE_FLOW_RSS_HDRS_INNER_MASK & ~ICE_FLOW_SEG_HDR_IPV_OTHER)
3272                 return ICE_ERR_PARAM;
3273
3274         val = (u64)(segs->hdrs & ICE_FLOW_RSS_SEG_HDR_L3_MASKS);
3275         if (val && !ice_is_pow2(val))
3276                 return ICE_ERR_CFG;
3277
3278         val = (u64)(segs->hdrs & ICE_FLOW_RSS_SEG_HDR_L4_MASKS);
3279         if (val && !ice_is_pow2(val))
3280                 return ICE_ERR_CFG;
3281
3282         return ICE_SUCCESS;
3283 }
3284
3285 /**
3286  * ice_rem_vsi_rss_list - remove VSI from RSS list
3287  * @hw: pointer to the hardware structure
3288  * @vsi_handle: software VSI handle
3289  *
3290  * Remove the VSI from all RSS configurations in the list.
3291  */
3292 void ice_rem_vsi_rss_list(struct ice_hw *hw, u16 vsi_handle)
3293 {
3294         struct ice_rss_cfg *r, *tmp;
3295
3296         if (LIST_EMPTY(&hw->rss_list_head))
3297                 return;
3298
3299         ice_acquire_lock(&hw->rss_locks);
3300         LIST_FOR_EACH_ENTRY_SAFE(r, tmp, &hw->rss_list_head,
3301                                  ice_rss_cfg, l_entry)
3302                 if (ice_test_and_clear_bit(vsi_handle, r->vsis))
3303                         if (!ice_is_any_bit_set(r->vsis, ICE_MAX_VSI)) {
3304                                 LIST_DEL(&r->l_entry);
3305                                 ice_free(hw, r);
3306                         }
3307         ice_release_lock(&hw->rss_locks);
3308 }
3309
3310 /**
3311  * ice_rem_vsi_rss_cfg - remove RSS configurations associated with VSI
3312  * @hw: pointer to the hardware structure
3313  * @vsi_handle: software VSI handle
3314  *
3315  * This function will iterate through all flow profiles and disassociate
3316  * the VSI from that profile. If the flow profile has no VSIs it will
3317  * be removed.
3318  */
3319 enum ice_status ice_rem_vsi_rss_cfg(struct ice_hw *hw, u16 vsi_handle)
3320 {
3321         const enum ice_block blk = ICE_BLK_RSS;
3322         struct ice_flow_prof *p, *t;
3323         enum ice_status status = ICE_SUCCESS;
3324
3325         if (!ice_is_vsi_valid(hw, vsi_handle))
3326                 return ICE_ERR_PARAM;
3327
3328         if (LIST_EMPTY(&hw->fl_profs[blk]))
3329                 return ICE_SUCCESS;
3330
3331         ice_acquire_lock(&hw->rss_locks);
3332         LIST_FOR_EACH_ENTRY_SAFE(p, t, &hw->fl_profs[blk], ice_flow_prof,
3333                                  l_entry)
3334                 if (ice_is_bit_set(p->vsis, vsi_handle)) {
3335                         status = ice_flow_disassoc_prof(hw, blk, p, vsi_handle);
3336                         if (status)
3337                                 break;
3338
3339                         if (!ice_is_any_bit_set(p->vsis, ICE_MAX_VSI)) {
3340                                 status = ice_flow_rem_prof(hw, blk, p->id);
3341                                 if (status)
3342                                         break;
3343                         }
3344                 }
3345         ice_release_lock(&hw->rss_locks);
3346
3347         return status;
3348 }
3349
3350 /**
3351  * ice_rem_rss_list - remove RSS configuration from list
3352  * @hw: pointer to the hardware structure
3353  * @vsi_handle: software VSI handle
3354  * @prof: pointer to flow profile
3355  *
3356  * Assumption: lock has already been acquired for RSS list
3357  */
3358 static void
3359 ice_rem_rss_list(struct ice_hw *hw, u16 vsi_handle, struct ice_flow_prof *prof)
3360 {
3361         struct ice_rss_cfg *r, *tmp;
3362
3363         /* Search for RSS hash fields associated to the VSI that match the
3364          * hash configurations associated to the flow profile. If found
3365          * remove from the RSS entry list of the VSI context and delete entry.
3366          */
3367         LIST_FOR_EACH_ENTRY_SAFE(r, tmp, &hw->rss_list_head,
3368                                  ice_rss_cfg, l_entry)
3369                 if (r->hashed_flds == prof->segs[prof->segs_cnt - 1].match &&
3370                     r->packet_hdr == prof->segs[prof->segs_cnt - 1].hdrs) {
3371                         ice_clear_bit(vsi_handle, r->vsis);
3372                         if (!ice_is_any_bit_set(r->vsis, ICE_MAX_VSI)) {
3373                                 LIST_DEL(&r->l_entry);
3374                                 ice_free(hw, r);
3375                         }
3376                         return;
3377                 }
3378 }
3379
3380 /**
3381  * ice_add_rss_list - add RSS configuration to list
3382  * @hw: pointer to the hardware structure
3383  * @vsi_handle: software VSI handle
3384  * @prof: pointer to flow profile
3385  *
3386  * Assumption: lock has already been acquired for RSS list
3387  */
3388 static enum ice_status
3389 ice_add_rss_list(struct ice_hw *hw, u16 vsi_handle, struct ice_flow_prof *prof)
3390 {
3391         struct ice_rss_cfg *r, *rss_cfg;
3392
3393         LIST_FOR_EACH_ENTRY(r, &hw->rss_list_head,
3394                             ice_rss_cfg, l_entry)
3395                 if (r->hashed_flds == prof->segs[prof->segs_cnt - 1].match &&
3396                     r->packet_hdr == prof->segs[prof->segs_cnt - 1].hdrs) {
3397                         ice_set_bit(vsi_handle, r->vsis);
3398                         return ICE_SUCCESS;
3399                 }
3400
3401         rss_cfg = (struct ice_rss_cfg *)ice_malloc(hw, sizeof(*rss_cfg));
3402         if (!rss_cfg)
3403                 return ICE_ERR_NO_MEMORY;
3404
3405         rss_cfg->hashed_flds = prof->segs[prof->segs_cnt - 1].match;
3406         rss_cfg->packet_hdr = prof->segs[prof->segs_cnt - 1].hdrs;
3407         rss_cfg->symm = prof->cfg.symm;
3408         ice_set_bit(vsi_handle, rss_cfg->vsis);
3409
3410         LIST_ADD_TAIL(&rss_cfg->l_entry, &hw->rss_list_head);
3411
3412         return ICE_SUCCESS;
3413 }
3414
3415 #define ICE_FLOW_PROF_HASH_S    0
3416 #define ICE_FLOW_PROF_HASH_M    (0xFFFFFFFFULL << ICE_FLOW_PROF_HASH_S)
3417 #define ICE_FLOW_PROF_HDR_S     32
3418 #define ICE_FLOW_PROF_HDR_M     (0x3FFFFFFFULL << ICE_FLOW_PROF_HDR_S)
3419 #define ICE_FLOW_PROF_ENCAP_S   63
3420 #define ICE_FLOW_PROF_ENCAP_M   (BIT_ULL(ICE_FLOW_PROF_ENCAP_S))
3421
3422 #define ICE_RSS_OUTER_HEADERS   1
3423 #define ICE_RSS_INNER_HEADERS   2
3424
3425 /* Flow profile ID format:
3426  * [0:31] - Packet match fields
3427  * [32:62] - Protocol header
3428  * [63] - Encapsulation flag, 0 if non-tunneled, 1 if tunneled
3429  */
3430 #define ICE_FLOW_GEN_PROFID(hash, hdr, segs_cnt) \
3431         (u64)(((u64)(hash) & ICE_FLOW_PROF_HASH_M) | \
3432               (((u64)(hdr) << ICE_FLOW_PROF_HDR_S) & ICE_FLOW_PROF_HDR_M) | \
3433               ((u8)((segs_cnt) - 1) ? ICE_FLOW_PROF_ENCAP_M : 0))
3434
3435 static void
3436 ice_rss_config_xor_word(struct ice_hw *hw, u8 prof_id, u8 src, u8 dst)
3437 {
3438         u32 s = ((src % 4) << 3); /* byte shift */
3439         u32 v = dst | 0x80; /* value to program */
3440         u8 i = src / 4; /* register index */
3441         u32 reg;
3442
3443         reg = rd32(hw, GLQF_HSYMM(prof_id, i));
3444         reg = (reg & ~(0xff << s)) | (v << s);
3445         wr32(hw, GLQF_HSYMM(prof_id, i), reg);
3446 }
3447
3448 static void
3449 ice_rss_config_xor(struct ice_hw *hw, u8 prof_id, u8 src, u8 dst, u8 len)
3450 {
3451         int fv_last_word =
3452                 ICE_FLOW_SW_FIELD_VECTOR_MAX / ICE_FLOW_FV_EXTRACT_SZ - 1;
3453         int i;
3454
3455         for (i = 0; i < len; i++) {
3456                 ice_rss_config_xor_word(hw, prof_id,
3457                                         /* Yes, field vector in GLQF_HSYMM and
3458                                          * GLQF_HINSET is inversed!
3459                                          */
3460                                         fv_last_word - (src + i),
3461                                         fv_last_word - (dst + i));
3462                 ice_rss_config_xor_word(hw, prof_id,
3463                                         fv_last_word - (dst + i),
3464                                         fv_last_word - (src + i));
3465         }
3466 }
3467
3468 static void
3469 ice_rss_update_symm(struct ice_hw *hw,
3470                     struct ice_flow_prof *prof)
3471 {
3472         struct ice_prof_map *map;
3473         u8 prof_id, m;
3474
3475         ice_acquire_lock(&hw->blk[ICE_BLK_RSS].es.prof_map_lock);
3476         map = ice_search_prof_id(hw, ICE_BLK_RSS, prof->id);
3477         if (map)
3478                 prof_id = map->prof_id;
3479         ice_release_lock(&hw->blk[ICE_BLK_RSS].es.prof_map_lock);
3480         if (!map)
3481                 return;
3482         /* clear to default */
3483         for (m = 0; m < 6; m++)
3484                 wr32(hw, GLQF_HSYMM(prof_id, m), 0);
3485         if (prof->cfg.symm) {
3486                 struct ice_flow_seg_info *seg =
3487                         &prof->segs[prof->segs_cnt - 1];
3488
3489                 struct ice_flow_seg_xtrct *ipv4_src =
3490                         &seg->fields[ICE_FLOW_FIELD_IDX_IPV4_SA].xtrct;
3491                 struct ice_flow_seg_xtrct *ipv4_dst =
3492                         &seg->fields[ICE_FLOW_FIELD_IDX_IPV4_DA].xtrct;
3493                 struct ice_flow_seg_xtrct *ipv6_src =
3494                         &seg->fields[ICE_FLOW_FIELD_IDX_IPV6_SA].xtrct;
3495                 struct ice_flow_seg_xtrct *ipv6_dst =
3496                         &seg->fields[ICE_FLOW_FIELD_IDX_IPV6_DA].xtrct;
3497
3498                 struct ice_flow_seg_xtrct *tcp_src =
3499                         &seg->fields[ICE_FLOW_FIELD_IDX_TCP_SRC_PORT].xtrct;
3500                 struct ice_flow_seg_xtrct *tcp_dst =
3501                         &seg->fields[ICE_FLOW_FIELD_IDX_TCP_DST_PORT].xtrct;
3502
3503                 struct ice_flow_seg_xtrct *udp_src =
3504                         &seg->fields[ICE_FLOW_FIELD_IDX_UDP_SRC_PORT].xtrct;
3505                 struct ice_flow_seg_xtrct *udp_dst =
3506                         &seg->fields[ICE_FLOW_FIELD_IDX_UDP_DST_PORT].xtrct;
3507
3508                 struct ice_flow_seg_xtrct *sctp_src =
3509                         &seg->fields[ICE_FLOW_FIELD_IDX_SCTP_SRC_PORT].xtrct;
3510                 struct ice_flow_seg_xtrct *sctp_dst =
3511                         &seg->fields[ICE_FLOW_FIELD_IDX_SCTP_DST_PORT].xtrct;
3512
3513                 /* xor IPv4 */
3514                 if (ipv4_src->prot_id != 0 && ipv4_dst->prot_id != 0)
3515                         ice_rss_config_xor(hw, prof_id,
3516                                            ipv4_src->idx, ipv4_dst->idx, 2);
3517
3518                 /* xor IPv6 */
3519                 if (ipv6_src->prot_id != 0 && ipv6_dst->prot_id != 0)
3520                         ice_rss_config_xor(hw, prof_id,
3521                                            ipv6_src->idx, ipv6_dst->idx, 8);
3522
3523                 /* xor TCP */
3524                 if (tcp_src->prot_id != 0 && tcp_dst->prot_id != 0)
3525                         ice_rss_config_xor(hw, prof_id,
3526                                            tcp_src->idx, tcp_dst->idx, 1);
3527
3528                 /* xor UDP */
3529                 if (udp_src->prot_id != 0 && udp_dst->prot_id != 0)
3530                         ice_rss_config_xor(hw, prof_id,
3531                                            udp_src->idx, udp_dst->idx, 1);
3532
3533                 /* xor SCTP */
3534                 if (sctp_src->prot_id != 0 && sctp_dst->prot_id != 0)
3535                         ice_rss_config_xor(hw, prof_id,
3536                                            sctp_src->idx, sctp_dst->idx, 1);
3537         }
3538 }
3539
3540 /**
3541  * ice_add_rss_cfg_sync - add an RSS configuration
3542  * @hw: pointer to the hardware structure
3543  * @vsi_handle: software VSI handle
3544  * @hashed_flds: hash bit fields (ICE_FLOW_HASH_*) to configure
3545  * @addl_hdrs: protocol header fields
3546  * @segs_cnt: packet segment count
3547  * @symm: symmetric hash enable/disable
3548  *
3549  * Assumption: lock has already been acquired for RSS list
3550  */
3551 static enum ice_status
3552 ice_add_rss_cfg_sync(struct ice_hw *hw, u16 vsi_handle, u64 hashed_flds,
3553                      u32 addl_hdrs, u8 segs_cnt, bool symm)
3554 {
3555         const enum ice_block blk = ICE_BLK_RSS;
3556         struct ice_flow_prof *prof = NULL;
3557         struct ice_flow_seg_info *segs;
3558         enum ice_status status;
3559
3560         if (!segs_cnt || segs_cnt > ICE_FLOW_SEG_MAX)
3561                 return ICE_ERR_PARAM;
3562
3563         segs = (struct ice_flow_seg_info *)ice_calloc(hw, segs_cnt,
3564                                                       sizeof(*segs));
3565         if (!segs)
3566                 return ICE_ERR_NO_MEMORY;
3567
3568         /* Construct the packet segment info from the hashed fields */
3569         status = ice_flow_set_rss_seg_info(&segs[segs_cnt - 1], hashed_flds,
3570                                            addl_hdrs);
3571         if (status)
3572                 goto exit;
3573
3574         /* Don't do RSS for GTPU Outer */
3575         if (segs_cnt == ICE_RSS_OUTER_HEADERS &&
3576             segs[segs_cnt - 1].hdrs & ICE_FLOW_SEG_HDR_GTPU) {
3577                 status = ICE_SUCCESS;
3578                 goto exit;
3579         }
3580
3581         /* Search for a flow profile that has matching headers, hash fields
3582          * and has the input VSI associated to it. If found, no further
3583          * operations required and exit.
3584          */
3585         prof = ice_flow_find_prof_conds(hw, blk, ICE_FLOW_RX, segs, segs_cnt,
3586                                         vsi_handle,
3587                                         ICE_FLOW_FIND_PROF_CHK_FLDS |
3588                                         ICE_FLOW_FIND_PROF_CHK_VSI);
3589         if (prof) {
3590                 if (prof->cfg.symm == symm)
3591                         goto exit;
3592                 prof->cfg.symm = symm;
3593                 goto update_symm;
3594         }
3595
3596         /* Check if a flow profile exists with the same protocol headers and
3597          * associated with the input VSI. If so disassociate the VSI from
3598          * this profile. The VSI will be added to a new profile created with
3599          * the protocol header and new hash field configuration.
3600          */
3601         prof = ice_flow_find_prof_conds(hw, blk, ICE_FLOW_RX, segs, segs_cnt,
3602                                         vsi_handle, ICE_FLOW_FIND_PROF_CHK_VSI);
3603         if (prof) {
3604                 status = ice_flow_disassoc_prof(hw, blk, prof, vsi_handle);
3605                 if (!status)
3606                         ice_rem_rss_list(hw, vsi_handle, prof);
3607                 else
3608                         goto exit;
3609
3610                 /* Remove profile if it has no VSIs associated */
3611                 if (!ice_is_any_bit_set(prof->vsis, ICE_MAX_VSI)) {
3612                         status = ice_flow_rem_prof(hw, blk, prof->id);
3613                         if (status)
3614                                 goto exit;
3615                 }
3616         }
3617
3618         /* Search for a profile that has same match fields only. If this
3619          * exists then associate the VSI to this profile.
3620          */
3621         prof = ice_flow_find_prof_conds(hw, blk, ICE_FLOW_RX, segs, segs_cnt,
3622                                         vsi_handle,
3623                                         ICE_FLOW_FIND_PROF_CHK_FLDS);
3624         if (prof) {
3625                 if (prof->cfg.symm == symm) {
3626                         status = ice_flow_assoc_prof(hw, blk, prof,
3627                                                      vsi_handle);
3628                         if (!status)
3629                                 status = ice_add_rss_list(hw, vsi_handle,
3630                                                           prof);
3631                 } else {
3632                         /* if a profile exist but with different symmetric
3633                          * requirement, just return error.
3634                          */
3635                         status = ICE_ERR_NOT_SUPPORTED;
3636                 }
3637                 goto exit;
3638         }
3639
3640         /* Create a new flow profile with generated profile and packet
3641          * segment information.
3642          */
3643         status = ice_flow_add_prof(hw, blk, ICE_FLOW_RX,
3644                                    ICE_FLOW_GEN_PROFID(hashed_flds,
3645                                                        segs[segs_cnt - 1].hdrs,
3646                                                        segs_cnt),
3647                                    segs, segs_cnt, NULL, 0, &prof);
3648         if (status)
3649                 goto exit;
3650
3651         status = ice_flow_assoc_prof(hw, blk, prof, vsi_handle);
3652         /* If association to a new flow profile failed then this profile can
3653          * be removed.
3654          */
3655         if (status) {
3656                 ice_flow_rem_prof(hw, blk, prof->id);
3657                 goto exit;
3658         }
3659
3660         status = ice_add_rss_list(hw, vsi_handle, prof);
3661
3662         prof->cfg.symm = symm;
3663
3664 update_symm:
3665         ice_rss_update_symm(hw, prof);
3666
3667 exit:
3668         ice_free(hw, segs);
3669         return status;
3670 }
3671
3672 /**
3673  * ice_add_rss_cfg - add an RSS configuration with specified hashed fields
3674  * @hw: pointer to the hardware structure
3675  * @vsi_handle: software VSI handle
3676  * @hashed_flds: hash bit fields (ICE_FLOW_HASH_*) to configure
3677  * @addl_hdrs: protocol header fields
3678  * @symm: symmetric hash enable/disable
3679  *
3680  * This function will generate a flow profile based on fields associated with
3681  * the input fields to hash on, the flow type and use the VSI number to add
3682  * a flow entry to the profile.
3683  */
3684 enum ice_status
3685 ice_add_rss_cfg(struct ice_hw *hw, u16 vsi_handle, u64 hashed_flds,
3686                 u32 addl_hdrs, bool symm)
3687 {
3688         enum ice_status status;
3689
3690         if (hashed_flds == ICE_HASH_INVALID ||
3691             !ice_is_vsi_valid(hw, vsi_handle))
3692                 return ICE_ERR_PARAM;
3693
3694         ice_acquire_lock(&hw->rss_locks);
3695         status = ice_add_rss_cfg_sync(hw, vsi_handle, hashed_flds, addl_hdrs,
3696                                       ICE_RSS_OUTER_HEADERS, symm);
3697         if (!status)
3698                 status = ice_add_rss_cfg_sync(hw, vsi_handle, hashed_flds,
3699                                               addl_hdrs, ICE_RSS_INNER_HEADERS,
3700                                               symm);
3701         ice_release_lock(&hw->rss_locks);
3702
3703         return status;
3704 }
3705
3706 /**
3707  * ice_rem_rss_cfg_sync - remove an existing RSS configuration
3708  * @hw: pointer to the hardware structure
3709  * @vsi_handle: software VSI handle
3710  * @hashed_flds: Packet hash types (ICE_FLOW_HASH_*) to remove
3711  * @addl_hdrs: Protocol header fields within a packet segment
3712  * @segs_cnt: packet segment count
3713  *
3714  * Assumption: lock has already been acquired for RSS list
3715  */
3716 static enum ice_status
3717 ice_rem_rss_cfg_sync(struct ice_hw *hw, u16 vsi_handle, u64 hashed_flds,
3718                      u32 addl_hdrs, u8 segs_cnt)
3719 {
3720         const enum ice_block blk = ICE_BLK_RSS;
3721         struct ice_flow_seg_info *segs;
3722         struct ice_flow_prof *prof;
3723         enum ice_status status;
3724
3725         segs = (struct ice_flow_seg_info *)ice_calloc(hw, segs_cnt,
3726                                                       sizeof(*segs));
3727         if (!segs)
3728                 return ICE_ERR_NO_MEMORY;
3729
3730         /* Construct the packet segment info from the hashed fields */
3731         status = ice_flow_set_rss_seg_info(&segs[segs_cnt - 1], hashed_flds,
3732                                            addl_hdrs);
3733         if (status)
3734                 goto out;
3735
3736         /* Don't do RSS for GTPU Outer */
3737         if (segs_cnt == ICE_RSS_OUTER_HEADERS &&
3738             segs[segs_cnt - 1].hdrs & ICE_FLOW_SEG_HDR_GTPU) {
3739                 status = ICE_SUCCESS;
3740                 goto out;
3741         }
3742
3743         prof = ice_flow_find_prof_conds(hw, blk, ICE_FLOW_RX, segs, segs_cnt,
3744                                         vsi_handle,
3745                                         ICE_FLOW_FIND_PROF_CHK_FLDS);
3746         if (!prof) {
3747                 status = ICE_ERR_DOES_NOT_EXIST;
3748                 goto out;
3749         }
3750
3751         status = ice_flow_disassoc_prof(hw, blk, prof, vsi_handle);
3752         if (status)
3753                 goto out;
3754
3755         /* Remove RSS configuration from VSI context before deleting
3756          * the flow profile.
3757          */
3758         ice_rem_rss_list(hw, vsi_handle, prof);
3759
3760         if (!ice_is_any_bit_set(prof->vsis, ICE_MAX_VSI))
3761                 status = ice_flow_rem_prof(hw, blk, prof->id);
3762
3763 out:
3764         ice_free(hw, segs);
3765         return status;
3766 }
3767
3768 /**
3769  * ice_rem_rss_cfg - remove an existing RSS config with matching hashed fields
3770  * @hw: pointer to the hardware structure
3771  * @vsi_handle: software VSI handle
3772  * @hashed_flds: Packet hash types (ICE_FLOW_HASH_*) to remove
3773  * @addl_hdrs: Protocol header fields within a packet segment
3774  *
3775  * This function will lookup the flow profile based on the input
3776  * hash field bitmap, iterate through the profile entry list of
3777  * that profile and find entry associated with input VSI to be
3778  * removed. Calls are made to underlying flow apis which will in
3779  * turn build or update buffers for RSS XLT1 section.
3780  */
3781 enum ice_status
3782 ice_rem_rss_cfg(struct ice_hw *hw, u16 vsi_handle, u64 hashed_flds,
3783                 u32 addl_hdrs)
3784 {
3785         enum ice_status status;
3786
3787         if (hashed_flds == ICE_HASH_INVALID ||
3788             !ice_is_vsi_valid(hw, vsi_handle))
3789                 return ICE_ERR_PARAM;
3790
3791         ice_acquire_lock(&hw->rss_locks);
3792         status = ice_rem_rss_cfg_sync(hw, vsi_handle, hashed_flds, addl_hdrs,
3793                                       ICE_RSS_OUTER_HEADERS);
3794         if (!status)
3795                 status = ice_rem_rss_cfg_sync(hw, vsi_handle, hashed_flds,
3796                                               addl_hdrs, ICE_RSS_INNER_HEADERS);
3797         ice_release_lock(&hw->rss_locks);
3798
3799         return status;
3800 }
3801
3802 /**
3803  * ice_replay_rss_cfg - replay RSS configurations associated with VSI
3804  * @hw: pointer to the hardware structure
3805  * @vsi_handle: software VSI handle
3806  */
3807 enum ice_status ice_replay_rss_cfg(struct ice_hw *hw, u16 vsi_handle)
3808 {
3809         enum ice_status status = ICE_SUCCESS;
3810         struct ice_rss_cfg *r;
3811
3812         if (!ice_is_vsi_valid(hw, vsi_handle))
3813                 return ICE_ERR_PARAM;
3814
3815         ice_acquire_lock(&hw->rss_locks);
3816         LIST_FOR_EACH_ENTRY(r, &hw->rss_list_head,
3817                             ice_rss_cfg, l_entry) {
3818                 if (ice_is_bit_set(r->vsis, vsi_handle)) {
3819                         status = ice_add_rss_cfg_sync(hw, vsi_handle,
3820                                                       r->hashed_flds,
3821                                                       r->packet_hdr,
3822                                                       ICE_RSS_OUTER_HEADERS,
3823                                                       r->symm);
3824                         if (status)
3825                                 break;
3826                         status = ice_add_rss_cfg_sync(hw, vsi_handle,
3827                                                       r->hashed_flds,
3828                                                       r->packet_hdr,
3829                                                       ICE_RSS_INNER_HEADERS,
3830                                                       r->symm);
3831                         if (status)
3832                                 break;
3833                 }
3834         }
3835         ice_release_lock(&hw->rss_locks);
3836
3837         return status;
3838 }
3839
3840 /**
3841  * ice_get_rss_cfg - returns hashed fields for the given header types
3842  * @hw: pointer to the hardware structure
3843  * @vsi_handle: software VSI handle
3844  * @hdrs: protocol header type
3845  *
3846  * This function will return the match fields of the first instance of flow
3847  * profile having the given header types and containing input VSI
3848  */
3849 u64 ice_get_rss_cfg(struct ice_hw *hw, u16 vsi_handle, u32 hdrs)
3850 {
3851         u64 rss_hash = ICE_HASH_INVALID;
3852         struct ice_rss_cfg *r;
3853
3854         /* verify if the protocol header is non zero and VSI is valid */
3855         if (hdrs == ICE_FLOW_SEG_HDR_NONE || !ice_is_vsi_valid(hw, vsi_handle))
3856                 return ICE_HASH_INVALID;
3857
3858         ice_acquire_lock(&hw->rss_locks);
3859         LIST_FOR_EACH_ENTRY(r, &hw->rss_list_head,
3860                             ice_rss_cfg, l_entry)
3861                 if (ice_is_bit_set(r->vsis, vsi_handle) &&
3862                     r->packet_hdr == hdrs) {
3863                         rss_hash = r->hashed_flds;
3864                         break;
3865                 }
3866         ice_release_lock(&hw->rss_locks);
3867
3868         return rss_hash;
3869 }