3bc78f87bfe1a83d42bb980231a90d3818365a4e
[dpdk.git] / drivers / net / ice / base / ice_flow.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2001-2020 Intel Corporation
3  */
4
5 #include "ice_common.h"
6 #include "ice_flow.h"
7
8 /* Size of known protocol header fields */
9 #define ICE_FLOW_FLD_SZ_ETH_TYPE        2
10 #define ICE_FLOW_FLD_SZ_VLAN            2
11 #define ICE_FLOW_FLD_SZ_IPV4_ADDR       4
12 #define ICE_FLOW_FLD_SZ_IPV6_ADDR       16
13 #define ICE_FLOW_FLD_SZ_IPV6_PRE32_ADDR 4
14 #define ICE_FLOW_FLD_SZ_IPV6_PRE48_ADDR 6
15 #define ICE_FLOW_FLD_SZ_IPV6_PRE64_ADDR 8
16 #define ICE_FLOW_FLD_SZ_IP_DSCP         1
17 #define ICE_FLOW_FLD_SZ_IP_TTL          1
18 #define ICE_FLOW_FLD_SZ_IP_PROT         1
19 #define ICE_FLOW_FLD_SZ_PORT            2
20 #define ICE_FLOW_FLD_SZ_TCP_FLAGS       1
21 #define ICE_FLOW_FLD_SZ_ICMP_TYPE       1
22 #define ICE_FLOW_FLD_SZ_ICMP_CODE       1
23 #define ICE_FLOW_FLD_SZ_ARP_OPER        2
24 #define ICE_FLOW_FLD_SZ_GRE_KEYID       4
25 #define ICE_FLOW_FLD_SZ_GTP_TEID        4
26 #define ICE_FLOW_FLD_SZ_GTP_QFI         2
27 #define ICE_FLOW_FLD_SZ_PPPOE_SESS_ID   2
28 #define ICE_FLOW_FLD_SZ_PFCP_SEID 8
29 #define ICE_FLOW_FLD_SZ_L2TPV3_SESS_ID  4
30 #define ICE_FLOW_FLD_SZ_ESP_SPI 4
31 #define ICE_FLOW_FLD_SZ_AH_SPI  4
32 #define ICE_FLOW_FLD_SZ_NAT_T_ESP_SPI   4
33
34 /* Describe properties of a protocol header field */
35 struct ice_flow_field_info {
36         enum ice_flow_seg_hdr hdr;
37         s16 off;        /* Offset from start of a protocol header, in bits */
38         u16 size;       /* Size of fields in bits */
39         u16 mask;       /* 16-bit mask for field */
40 };
41
42 #define ICE_FLOW_FLD_INFO(_hdr, _offset_bytes, _size_bytes) { \
43         .hdr = _hdr, \
44         .off = (_offset_bytes) * BITS_PER_BYTE, \
45         .size = (_size_bytes) * BITS_PER_BYTE, \
46         .mask = 0, \
47 }
48
49 #define ICE_FLOW_FLD_INFO_MSK(_hdr, _offset_bytes, _size_bytes, _mask) { \
50         .hdr = _hdr, \
51         .off = (_offset_bytes) * BITS_PER_BYTE, \
52         .size = (_size_bytes) * BITS_PER_BYTE, \
53         .mask = _mask, \
54 }
55
56 /* Table containing properties of supported protocol header fields */
57 static const
58 struct ice_flow_field_info ice_flds_info[ICE_FLOW_FIELD_IDX_MAX] = {
59         /* Ether */
60         /* ICE_FLOW_FIELD_IDX_ETH_DA */
61         ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_ETH, 0, ETH_ALEN),
62         /* ICE_FLOW_FIELD_IDX_ETH_SA */
63         ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_ETH, ETH_ALEN, ETH_ALEN),
64         /* ICE_FLOW_FIELD_IDX_S_VLAN */
65         ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_VLAN, 12, ICE_FLOW_FLD_SZ_VLAN),
66         /* ICE_FLOW_FIELD_IDX_C_VLAN */
67         ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_VLAN, 14, ICE_FLOW_FLD_SZ_VLAN),
68         /* ICE_FLOW_FIELD_IDX_ETH_TYPE */
69         ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_ETH, 0, ICE_FLOW_FLD_SZ_ETH_TYPE),
70         /* IPv4 / IPv6 */
71         /* ICE_FLOW_FIELD_IDX_IPV4_DSCP */
72         ICE_FLOW_FLD_INFO_MSK(ICE_FLOW_SEG_HDR_IPV4, 0, ICE_FLOW_FLD_SZ_IP_DSCP,
73                               0x00fc),
74         /* ICE_FLOW_FIELD_IDX_IPV6_DSCP */
75         ICE_FLOW_FLD_INFO_MSK(ICE_FLOW_SEG_HDR_IPV6, 0, ICE_FLOW_FLD_SZ_IP_DSCP,
76                               0x0ff0),
77         /* ICE_FLOW_FIELD_IDX_IPV4_TTL */
78         ICE_FLOW_FLD_INFO_MSK(ICE_FLOW_SEG_HDR_NONE, 8,
79                               ICE_FLOW_FLD_SZ_IP_TTL, 0xff00),
80         /* ICE_FLOW_FIELD_IDX_IPV4_PROT */
81         ICE_FLOW_FLD_INFO_MSK(ICE_FLOW_SEG_HDR_NONE, 8,
82                               ICE_FLOW_FLD_SZ_IP_PROT, 0x00ff),
83         /* ICE_FLOW_FIELD_IDX_IPV6_TTL */
84         ICE_FLOW_FLD_INFO_MSK(ICE_FLOW_SEG_HDR_NONE, 6,
85                               ICE_FLOW_FLD_SZ_IP_TTL, 0x00ff),
86         /* ICE_FLOW_FIELD_IDX_IPV6_PROT */
87         ICE_FLOW_FLD_INFO_MSK(ICE_FLOW_SEG_HDR_NONE, 6,
88                               ICE_FLOW_FLD_SZ_IP_PROT, 0xff00),
89         /* ICE_FLOW_FIELD_IDX_IPV4_SA */
90         ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_IPV4, 12, ICE_FLOW_FLD_SZ_IPV4_ADDR),
91         /* ICE_FLOW_FIELD_IDX_IPV4_DA */
92         ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_IPV4, 16, ICE_FLOW_FLD_SZ_IPV4_ADDR),
93         /* ICE_FLOW_FIELD_IDX_IPV6_SA */
94         ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_IPV6, 8, ICE_FLOW_FLD_SZ_IPV6_ADDR),
95         /* ICE_FLOW_FIELD_IDX_IPV6_DA */
96         ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_IPV6, 24, ICE_FLOW_FLD_SZ_IPV6_ADDR),
97         /* ICE_FLOW_FIELD_IDX_IPV6_PRE32_SA */
98         ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_IPV6, 8,
99                           ICE_FLOW_FLD_SZ_IPV6_PRE32_ADDR),
100         /* ICE_FLOW_FIELD_IDX_IPV6_PRE32_DA */
101         ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_IPV6, 24,
102                           ICE_FLOW_FLD_SZ_IPV6_PRE32_ADDR),
103         /* ICE_FLOW_FIELD_IDX_IPV6_PRE48_SA */
104         ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_IPV6, 8,
105                           ICE_FLOW_FLD_SZ_IPV6_PRE48_ADDR),
106         /* ICE_FLOW_FIELD_IDX_IPV6_PRE48_DA */
107         ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_IPV6, 24,
108                           ICE_FLOW_FLD_SZ_IPV6_PRE48_ADDR),
109         /* ICE_FLOW_FIELD_IDX_IPV6_PRE64_SA */
110         ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_IPV6, 8,
111                           ICE_FLOW_FLD_SZ_IPV6_PRE64_ADDR),
112         /* ICE_FLOW_FIELD_IDX_IPV6_PRE64_DA */
113         ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_IPV6, 24,
114                           ICE_FLOW_FLD_SZ_IPV6_PRE64_ADDR),
115         /* Transport */
116         /* ICE_FLOW_FIELD_IDX_TCP_SRC_PORT */
117         ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_TCP, 0, ICE_FLOW_FLD_SZ_PORT),
118         /* ICE_FLOW_FIELD_IDX_TCP_DST_PORT */
119         ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_TCP, 2, ICE_FLOW_FLD_SZ_PORT),
120         /* ICE_FLOW_FIELD_IDX_UDP_SRC_PORT */
121         ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_UDP, 0, ICE_FLOW_FLD_SZ_PORT),
122         /* ICE_FLOW_FIELD_IDX_UDP_DST_PORT */
123         ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_UDP, 2, ICE_FLOW_FLD_SZ_PORT),
124         /* ICE_FLOW_FIELD_IDX_SCTP_SRC_PORT */
125         ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_SCTP, 0, ICE_FLOW_FLD_SZ_PORT),
126         /* ICE_FLOW_FIELD_IDX_SCTP_DST_PORT */
127         ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_SCTP, 2, ICE_FLOW_FLD_SZ_PORT),
128         /* ICE_FLOW_FIELD_IDX_TCP_FLAGS */
129         ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_TCP, 13, ICE_FLOW_FLD_SZ_TCP_FLAGS),
130         /* ARP */
131         /* ICE_FLOW_FIELD_IDX_ARP_SIP */
132         ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_ARP, 14, ICE_FLOW_FLD_SZ_IPV4_ADDR),
133         /* ICE_FLOW_FIELD_IDX_ARP_DIP */
134         ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_ARP, 24, ICE_FLOW_FLD_SZ_IPV4_ADDR),
135         /* ICE_FLOW_FIELD_IDX_ARP_SHA */
136         ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_ARP, 8, ETH_ALEN),
137         /* ICE_FLOW_FIELD_IDX_ARP_DHA */
138         ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_ARP, 18, ETH_ALEN),
139         /* ICE_FLOW_FIELD_IDX_ARP_OP */
140         ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_ARP, 6, ICE_FLOW_FLD_SZ_ARP_OPER),
141         /* ICMP */
142         /* ICE_FLOW_FIELD_IDX_ICMP_TYPE */
143         ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_ICMP, 0, ICE_FLOW_FLD_SZ_ICMP_TYPE),
144         /* ICE_FLOW_FIELD_IDX_ICMP_CODE */
145         ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_ICMP, 1, ICE_FLOW_FLD_SZ_ICMP_CODE),
146         /* GRE */
147         /* ICE_FLOW_FIELD_IDX_GRE_KEYID */
148         ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_GRE, 12, ICE_FLOW_FLD_SZ_GRE_KEYID),
149         /* GTP */
150         /* ICE_FLOW_FIELD_IDX_GTPC_TEID */
151         ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_GTPC_TEID, 12,
152                           ICE_FLOW_FLD_SZ_GTP_TEID),
153         /* ICE_FLOW_FIELD_IDX_GTPU_IP_TEID */
154         ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_GTPU_IP, 12,
155                           ICE_FLOW_FLD_SZ_GTP_TEID),
156         /* ICE_FLOW_FIELD_IDX_GTPU_EH_TEID */
157         ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_GTPU_EH, 12,
158                           ICE_FLOW_FLD_SZ_GTP_TEID),
159         /* ICE_FLOW_FIELD_IDX_GTPU_EH_QFI */
160         ICE_FLOW_FLD_INFO_MSK(ICE_FLOW_SEG_HDR_GTPU_EH, 22,
161                               ICE_FLOW_FLD_SZ_GTP_QFI, 0x3f00),
162         /* ICE_FLOW_FIELD_IDX_GTPU_UP_TEID */
163         ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_GTPU_UP, 12,
164                           ICE_FLOW_FLD_SZ_GTP_TEID),
165         /* ICE_FLOW_FIELD_IDX_GTPU_DWN_TEID */
166         ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_GTPU_DWN, 12,
167                           ICE_FLOW_FLD_SZ_GTP_TEID),
168         /* PPPOE */
169         /* ICE_FLOW_FIELD_IDX_PPPOE_SESS_ID */
170         ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_PPPOE, 2,
171                           ICE_FLOW_FLD_SZ_PPPOE_SESS_ID),
172         /* PFCP */
173         /* ICE_FLOW_FIELD_IDX_PFCP_SEID */
174         ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_PFCP_SESSION, 12,
175                           ICE_FLOW_FLD_SZ_PFCP_SEID),
176         /* L2TPV3 */
177         /* ICE_FLOW_FIELD_IDX_L2TPV3_SESS_ID */
178         ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_L2TPV3, 0,
179                           ICE_FLOW_FLD_SZ_L2TPV3_SESS_ID),
180         /* ESP */
181         /* ICE_FLOW_FIELD_IDX_ESP_SPI */
182         ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_ESP, 0,
183                           ICE_FLOW_FLD_SZ_ESP_SPI),
184         /* AH */
185         /* ICE_FLOW_FIELD_IDX_AH_SPI */
186         ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_AH, 4,
187                           ICE_FLOW_FLD_SZ_AH_SPI),
188         /* NAT_T_ESP */
189         /* ICE_FLOW_FIELD_IDX_NAT_T_ESP_SPI */
190         ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_NAT_T_ESP, 8,
191                           ICE_FLOW_FLD_SZ_NAT_T_ESP_SPI),
192 };
193
194 /* Bitmaps indicating relevant packet types for a particular protocol header
195  *
196  * Packet types for packets with an Outer/First/Single MAC header
197  */
198 static const u32 ice_ptypes_mac_ofos[] = {
199         0xFDC00846, 0xBFBF7F7E, 0xF70001DF, 0xFEFDFDFB,
200         0x0000077E, 0x00000000, 0x00000000, 0x00000000,
201         0x00400000, 0x03FFF000, 0x7FFFFFE0, 0x00000000,
202         0x00000000, 0x00000000, 0x00000000, 0x00000000,
203         0x00000000, 0x00000000, 0x00000000, 0x00000000,
204         0x00000000, 0x00000000, 0x00000000, 0x00000000,
205         0x00000000, 0x00000000, 0x00000000, 0x00000000,
206         0x00000000, 0x00000000, 0x00000000, 0x00000000,
207 };
208
209 /* Packet types for packets with an Innermost/Last MAC VLAN header */
210 static const u32 ice_ptypes_macvlan_il[] = {
211         0x00000000, 0xBC000000, 0x000001DF, 0xF0000000,
212         0x0000077E, 0x00000000, 0x00000000, 0x00000000,
213         0x00000000, 0x00000000, 0x00000000, 0x00000000,
214         0x00000000, 0x00000000, 0x00000000, 0x00000000,
215         0x00000000, 0x00000000, 0x00000000, 0x00000000,
216         0x00000000, 0x00000000, 0x00000000, 0x00000000,
217         0x00000000, 0x00000000, 0x00000000, 0x00000000,
218         0x00000000, 0x00000000, 0x00000000, 0x00000000,
219 };
220
221 /* Packet types for packets with an Outer/First/Single IPv4 header, does NOT
222  * include IPV4 other PTYPEs
223  */
224 static const u32 ice_ptypes_ipv4_ofos[] = {
225         0x1DC00000, 0x04000800, 0x00000000, 0x00000000,
226         0x00000000, 0x00000155, 0x00000000, 0x00000000,
227         0x00000000, 0x000FC000, 0x00000000, 0x00000000,
228         0x00000000, 0x00000000, 0x00000000, 0x00000000,
229         0x00000000, 0x00000000, 0x00000000, 0x00000000,
230         0x00000000, 0x00000000, 0x00000000, 0x00000000,
231         0x00000000, 0x00000000, 0x00000000, 0x00000000,
232         0x00000000, 0x00000000, 0x00000000, 0x00000000,
233 };
234
235 /* Packet types for packets with an Outer/First/Single IPv4 header, includes
236  * IPV4 other PTYPEs
237  */
238 static const u32 ice_ptypes_ipv4_ofos_all[] = {
239         0x1DC00000, 0x04000800, 0x00000000, 0x00000000,
240         0x00000000, 0x00000155, 0x00000000, 0x00000000,
241         0x00000000, 0x000FC000, 0x83E0F800, 0x00000101,
242         0x00000000, 0x00000000, 0x00000000, 0x00000000,
243         0x00000000, 0x00000000, 0x00000000, 0x00000000,
244         0x00000000, 0x00000000, 0x00000000, 0x00000000,
245         0x00000000, 0x00000000, 0x00000000, 0x00000000,
246         0x00000000, 0x00000000, 0x00000000, 0x00000000,
247 };
248
249 /* Packet types for packets with an Innermost/Last IPv4 header */
250 static const u32 ice_ptypes_ipv4_il[] = {
251         0xE0000000, 0xB807700E, 0x80000003, 0xE01DC03B,
252         0x0000000E, 0x00000000, 0x00000000, 0x00000000,
253         0x00000000, 0x00000000, 0x001FF800, 0x00000000,
254         0x00000000, 0x00000000, 0x00000000, 0x00000000,
255         0x00000000, 0x00000000, 0x00000000, 0x00000000,
256         0x00000000, 0x00000000, 0x00000000, 0x00000000,
257         0x00000000, 0x00000000, 0x00000000, 0x00000000,
258         0x00000000, 0x00000000, 0x00000000, 0x00000000,
259 };
260
261 /* Packet types for packets with an Outer/First/Single IPv6 header, does NOT
262  * include IVP6 other PTYPEs
263  */
264 static const u32 ice_ptypes_ipv6_ofos[] = {
265         0x00000000, 0x00000000, 0x77000000, 0x10002000,
266         0x00000000, 0x000002AA, 0x00000000, 0x00000000,
267         0x00000000, 0x03F00000, 0x00000000, 0x00000000,
268         0x00000000, 0x00000000, 0x00000000, 0x00000000,
269         0x00000000, 0x00000000, 0x00000000, 0x00000000,
270         0x00000000, 0x00000000, 0x00000000, 0x00000000,
271         0x00000000, 0x00000000, 0x00000000, 0x00000000,
272         0x00000000, 0x00000000, 0x00000000, 0x00000000,
273 };
274
275 /* Packet types for packets with an Outer/First/Single IPv6 header, includes
276  * IPV6 other PTYPEs
277  */
278 static const u32 ice_ptypes_ipv6_ofos_all[] = {
279         0x00000000, 0x00000000, 0x77000000, 0x10002000,
280         0x00000000, 0x000002AA, 0x00000000, 0x00000000,
281         0x00000000, 0x03F00000, 0x7C1F0000, 0x00000206,
282         0x00000000, 0x00000000, 0x00000000, 0x00000000,
283         0x00000000, 0x00000000, 0x00000000, 0x00000000,
284         0x00000000, 0x00000000, 0x00000000, 0x00000000,
285         0x00000000, 0x00000000, 0x00000000, 0x00000000,
286         0x00000000, 0x00000000, 0x00000000, 0x00000000,
287 };
288
289 /* Packet types for packets with an Innermost/Last IPv6 header */
290 static const u32 ice_ptypes_ipv6_il[] = {
291         0x00000000, 0x03B80770, 0x000001DC, 0x0EE00000,
292         0x00000770, 0x00000000, 0x00000000, 0x00000000,
293         0x00000000, 0x00000000, 0x7FE00000, 0x00000000,
294         0x00000000, 0x00000000, 0x00000000, 0x00000000,
295         0x00000000, 0x00000000, 0x00000000, 0x00000000,
296         0x00000000, 0x00000000, 0x00000000, 0x00000000,
297         0x00000000, 0x00000000, 0x00000000, 0x00000000,
298         0x00000000, 0x00000000, 0x00000000, 0x00000000,
299 };
300
301 /* Packet types for packets with an Outer/First/Single IPv4 header - no L4 */
302 static const u32 ice_ipv4_ofos_no_l4[] = {
303         0x10C00000, 0x04000800, 0x00000000, 0x00000000,
304         0x00000000, 0x00000000, 0x00000000, 0x00000000,
305         0x00000000, 0x000cc000, 0x00000000, 0x00000000,
306         0x00000000, 0x00000000, 0x00000000, 0x00000000,
307         0x00000000, 0x00000000, 0x00000000, 0x00000000,
308         0x00000000, 0x00000000, 0x00000000, 0x00000000,
309         0x00000000, 0x00000000, 0x00000000, 0x00000000,
310         0x00000000, 0x00000000, 0x00000000, 0x00000000,
311 };
312
313 /* Packet types for packets with an Innermost/Last IPv4 header - no L4 */
314 static const u32 ice_ipv4_il_no_l4[] = {
315         0x60000000, 0x18043008, 0x80000002, 0x6010c021,
316         0x00000008, 0x00000000, 0x00000000, 0x00000000,
317         0x00000000, 0x00000000, 0x00139800, 0x00000000,
318         0x00000000, 0x00000000, 0x00000000, 0x00000000,
319         0x00000000, 0x00000000, 0x00000000, 0x00000000,
320         0x00000000, 0x00000000, 0x00000000, 0x00000000,
321         0x00000000, 0x00000000, 0x00000000, 0x00000000,
322         0x00000000, 0x00000000, 0x00000000, 0x00000000,
323 };
324
325 /* Packet types for packets with an Outer/First/Single IPv6 header - no L4 */
326 static const u32 ice_ipv6_ofos_no_l4[] = {
327         0x00000000, 0x00000000, 0x43000000, 0x10002000,
328         0x00000000, 0x00000000, 0x00000000, 0x00000000,
329         0x00000000, 0x02300000, 0x00000000, 0x00000000,
330         0x00000000, 0x00000000, 0x00000000, 0x00000000,
331         0x00000000, 0x00000000, 0x00000000, 0x00000000,
332         0x00000000, 0x00000000, 0x00000000, 0x00000000,
333         0x00000000, 0x00000000, 0x00000000, 0x00000000,
334         0x00000000, 0x00000000, 0x00000000, 0x00000000,
335 };
336
337 /* Packet types for packets with an Innermost/Last IPv6 header - no L4 */
338 static const u32 ice_ipv6_il_no_l4[] = {
339         0x00000000, 0x02180430, 0x0000010c, 0x086010c0,
340         0x00000430, 0x00000000, 0x00000000, 0x00000000,
341         0x00000000, 0x00000000, 0x4e600000, 0x00000000,
342         0x00000000, 0x00000000, 0x00000000, 0x00000000,
343         0x00000000, 0x00000000, 0x00000000, 0x00000000,
344         0x00000000, 0x00000000, 0x00000000, 0x00000000,
345         0x00000000, 0x00000000, 0x00000000, 0x00000000,
346         0x00000000, 0x00000000, 0x00000000, 0x00000000,
347 };
348
349 /* Packet types for packets with an Outermost/First ARP header */
350 static const u32 ice_ptypes_arp_of[] = {
351         0x00000800, 0x00000000, 0x00000000, 0x00000000,
352         0x00000000, 0x00000000, 0x00000000, 0x00000000,
353         0x00000000, 0x00000000, 0x00000000, 0x00000000,
354         0x00000000, 0x00000000, 0x00000000, 0x00000000,
355         0x00000000, 0x00000000, 0x00000000, 0x00000000,
356         0x00000000, 0x00000000, 0x00000000, 0x00000000,
357         0x00000000, 0x00000000, 0x00000000, 0x00000000,
358         0x00000000, 0x00000000, 0x00000000, 0x00000000,
359 };
360
361 /* UDP Packet types for non-tunneled packets or tunneled
362  * packets with inner UDP.
363  */
364 static const u32 ice_ptypes_udp_il[] = {
365         0x81000000, 0x20204040, 0x04000010, 0x80810102,
366         0x00000040, 0x00000000, 0x00000000, 0x00000000,
367         0x00000000, 0x00410000, 0x90842000, 0x00000007,
368         0x00000000, 0x00000000, 0x00000000, 0x00000000,
369         0x00000000, 0x00000000, 0x00000000, 0x00000000,
370         0x00000000, 0x00000000, 0x00000000, 0x00000000,
371         0x00000000, 0x00000000, 0x00000000, 0x00000000,
372         0x00000000, 0x00000000, 0x00000000, 0x00000000,
373 };
374
375 /* Packet types for packets with an Innermost/Last TCP header */
376 static const u32 ice_ptypes_tcp_il[] = {
377         0x04000000, 0x80810102, 0x10000040, 0x02040408,
378         0x00000102, 0x00000000, 0x00000000, 0x00000000,
379         0x00000000, 0x00820000, 0x21084000, 0x00000000,
380         0x00000000, 0x00000000, 0x00000000, 0x00000000,
381         0x00000000, 0x00000000, 0x00000000, 0x00000000,
382         0x00000000, 0x00000000, 0x00000000, 0x00000000,
383         0x00000000, 0x00000000, 0x00000000, 0x00000000,
384         0x00000000, 0x00000000, 0x00000000, 0x00000000,
385 };
386
387 /* Packet types for packets with an Innermost/Last SCTP header */
388 static const u32 ice_ptypes_sctp_il[] = {
389         0x08000000, 0x01020204, 0x20000081, 0x04080810,
390         0x00000204, 0x00000000, 0x00000000, 0x00000000,
391         0x00000000, 0x01040000, 0x00000000, 0x00000000,
392         0x00000000, 0x00000000, 0x00000000, 0x00000000,
393         0x00000000, 0x00000000, 0x00000000, 0x00000000,
394         0x00000000, 0x00000000, 0x00000000, 0x00000000,
395         0x00000000, 0x00000000, 0x00000000, 0x00000000,
396         0x00000000, 0x00000000, 0x00000000, 0x00000000,
397 };
398
399 /* Packet types for packets with an Outermost/First ICMP header */
400 static const u32 ice_ptypes_icmp_of[] = {
401         0x10000000, 0x00000000, 0x00000000, 0x00000000,
402         0x00000000, 0x00000000, 0x00000000, 0x00000000,
403         0x00000000, 0x00000000, 0x00000000, 0x00000000,
404         0x00000000, 0x00000000, 0x00000000, 0x00000000,
405         0x00000000, 0x00000000, 0x00000000, 0x00000000,
406         0x00000000, 0x00000000, 0x00000000, 0x00000000,
407         0x00000000, 0x00000000, 0x00000000, 0x00000000,
408         0x00000000, 0x00000000, 0x00000000, 0x00000000,
409 };
410
411 /* Packet types for packets with an Innermost/Last ICMP header */
412 static const u32 ice_ptypes_icmp_il[] = {
413         0x00000000, 0x02040408, 0x40000102, 0x08101020,
414         0x00000408, 0x00000000, 0x00000000, 0x00000000,
415         0x00000000, 0x00000000, 0x42108000, 0x00000000,
416         0x00000000, 0x00000000, 0x00000000, 0x00000000,
417         0x00000000, 0x00000000, 0x00000000, 0x00000000,
418         0x00000000, 0x00000000, 0x00000000, 0x00000000,
419         0x00000000, 0x00000000, 0x00000000, 0x00000000,
420         0x00000000, 0x00000000, 0x00000000, 0x00000000,
421 };
422
423 /* Packet types for packets with an Outermost/First GRE header */
424 static const u32 ice_ptypes_gre_of[] = {
425         0x00000000, 0xBFBF7800, 0x000001DF, 0xFEFDE000,
426         0x0000017E, 0x00000000, 0x00000000, 0x00000000,
427         0x00000000, 0x00000000, 0x00000000, 0x00000000,
428         0x00000000, 0x00000000, 0x00000000, 0x00000000,
429         0x00000000, 0x00000000, 0x00000000, 0x00000000,
430         0x00000000, 0x00000000, 0x00000000, 0x00000000,
431         0x00000000, 0x00000000, 0x00000000, 0x00000000,
432         0x00000000, 0x00000000, 0x00000000, 0x00000000,
433 };
434
435 /* Packet types for packets with an Innermost/Last MAC header */
436 static const u32 ice_ptypes_mac_il[] = {
437         0x00000000, 0x00000000, 0x00000000, 0x00000000,
438         0x00000000, 0x00000000, 0x00000000, 0x00000000,
439         0x00000000, 0x00000000, 0x00000000, 0x00000000,
440         0x00000000, 0x00000000, 0x00000000, 0x00000000,
441         0x00000000, 0x00000000, 0x00000000, 0x00000000,
442         0x00000000, 0x00000000, 0x00000000, 0x00000000,
443         0x00000000, 0x00000000, 0x00000000, 0x00000000,
444         0x00000000, 0x00000000, 0x00000000, 0x00000000,
445 };
446
447 /* Packet types for GTPC */
448 static const u32 ice_ptypes_gtpc[] = {
449         0x00000000, 0x00000000, 0x00000000, 0x00000000,
450         0x00000000, 0x00000000, 0x00000000, 0x00000000,
451         0x00000000, 0x00000000, 0x00000180, 0x00000000,
452         0x00000000, 0x00000000, 0x00000000, 0x00000000,
453         0x00000000, 0x00000000, 0x00000000, 0x00000000,
454         0x00000000, 0x00000000, 0x00000000, 0x00000000,
455         0x00000000, 0x00000000, 0x00000000, 0x00000000,
456         0x00000000, 0x00000000, 0x00000000, 0x00000000,
457 };
458
459 /* Packet types for GTPC with TEID */
460 static const u32 ice_ptypes_gtpc_tid[] = {
461         0x00000000, 0x00000000, 0x00000000, 0x00000000,
462         0x00000000, 0x00000000, 0x00000000, 0x00000000,
463         0x00000000, 0x00000000, 0x00000060, 0x00000000,
464         0x00000000, 0x00000000, 0x00000000, 0x00000000,
465         0x00000000, 0x00000000, 0x00000000, 0x00000000,
466         0x00000000, 0x00000000, 0x00000000, 0x00000000,
467         0x00000000, 0x00000000, 0x00000000, 0x00000000,
468         0x00000000, 0x00000000, 0x00000000, 0x00000000,
469 };
470
471 /* Packet types for GTPU */
472 static const struct ice_ptype_attributes ice_attr_gtpu_session[] = {
473         { ICE_MAC_IPV4_GTPU_IPV4_FRAG,    ICE_PTYPE_ATTR_GTP_SESSION },
474         { ICE_MAC_IPV4_GTPU_IPV4_PAY,     ICE_PTYPE_ATTR_GTP_SESSION },
475         { ICE_MAC_IPV4_GTPU_IPV4_UDP_PAY, ICE_PTYPE_ATTR_GTP_SESSION },
476         { ICE_MAC_IPV4_GTPU_IPV4_TCP,     ICE_PTYPE_ATTR_GTP_SESSION },
477         { ICE_MAC_IPV4_GTPU_IPV4_ICMP,    ICE_PTYPE_ATTR_GTP_SESSION },
478         { ICE_MAC_IPV6_GTPU_IPV4_FRAG,    ICE_PTYPE_ATTR_GTP_SESSION },
479         { ICE_MAC_IPV6_GTPU_IPV4_PAY,     ICE_PTYPE_ATTR_GTP_SESSION },
480         { ICE_MAC_IPV6_GTPU_IPV4_UDP_PAY, ICE_PTYPE_ATTR_GTP_SESSION },
481         { ICE_MAC_IPV6_GTPU_IPV4_TCP,     ICE_PTYPE_ATTR_GTP_SESSION },
482         { ICE_MAC_IPV6_GTPU_IPV4_ICMP,    ICE_PTYPE_ATTR_GTP_SESSION },
483         { ICE_MAC_IPV4_GTPU_IPV6_FRAG,    ICE_PTYPE_ATTR_GTP_SESSION },
484         { ICE_MAC_IPV4_GTPU_IPV6_PAY,     ICE_PTYPE_ATTR_GTP_SESSION },
485         { ICE_MAC_IPV4_GTPU_IPV6_UDP_PAY, ICE_PTYPE_ATTR_GTP_SESSION },
486         { ICE_MAC_IPV4_GTPU_IPV6_TCP,     ICE_PTYPE_ATTR_GTP_SESSION },
487         { ICE_MAC_IPV4_GTPU_IPV6_ICMPV6,  ICE_PTYPE_ATTR_GTP_SESSION },
488         { ICE_MAC_IPV6_GTPU_IPV6_FRAG,    ICE_PTYPE_ATTR_GTP_SESSION },
489         { ICE_MAC_IPV6_GTPU_IPV6_PAY,     ICE_PTYPE_ATTR_GTP_SESSION },
490         { ICE_MAC_IPV6_GTPU_IPV6_UDP_PAY, ICE_PTYPE_ATTR_GTP_SESSION },
491         { ICE_MAC_IPV6_GTPU_IPV6_TCP,     ICE_PTYPE_ATTR_GTP_SESSION },
492         { ICE_MAC_IPV6_GTPU_IPV6_ICMPV6,  ICE_PTYPE_ATTR_GTP_SESSION },
493 };
494
495 static const struct ice_ptype_attributes ice_attr_gtpu_eh[] = {
496         { ICE_MAC_IPV4_GTPU_IPV4_FRAG,    ICE_PTYPE_ATTR_GTP_PDU_EH },
497         { ICE_MAC_IPV4_GTPU_IPV4_PAY,     ICE_PTYPE_ATTR_GTP_PDU_EH },
498         { ICE_MAC_IPV4_GTPU_IPV4_UDP_PAY, ICE_PTYPE_ATTR_GTP_PDU_EH },
499         { ICE_MAC_IPV4_GTPU_IPV4_TCP,     ICE_PTYPE_ATTR_GTP_PDU_EH },
500         { ICE_MAC_IPV4_GTPU_IPV4_ICMP,    ICE_PTYPE_ATTR_GTP_PDU_EH },
501         { ICE_MAC_IPV6_GTPU_IPV4_FRAG,    ICE_PTYPE_ATTR_GTP_PDU_EH },
502         { ICE_MAC_IPV6_GTPU_IPV4_PAY,     ICE_PTYPE_ATTR_GTP_PDU_EH },
503         { ICE_MAC_IPV6_GTPU_IPV4_UDP_PAY, ICE_PTYPE_ATTR_GTP_PDU_EH },
504         { ICE_MAC_IPV6_GTPU_IPV4_TCP,     ICE_PTYPE_ATTR_GTP_PDU_EH },
505         { ICE_MAC_IPV6_GTPU_IPV4_ICMP,    ICE_PTYPE_ATTR_GTP_PDU_EH },
506         { ICE_MAC_IPV4_GTPU_IPV6_FRAG,    ICE_PTYPE_ATTR_GTP_PDU_EH },
507         { ICE_MAC_IPV4_GTPU_IPV6_PAY,     ICE_PTYPE_ATTR_GTP_PDU_EH },
508         { ICE_MAC_IPV4_GTPU_IPV6_UDP_PAY, ICE_PTYPE_ATTR_GTP_PDU_EH },
509         { ICE_MAC_IPV4_GTPU_IPV6_TCP,     ICE_PTYPE_ATTR_GTP_PDU_EH },
510         { ICE_MAC_IPV4_GTPU_IPV6_ICMPV6,  ICE_PTYPE_ATTR_GTP_PDU_EH },
511         { ICE_MAC_IPV6_GTPU_IPV6_FRAG,    ICE_PTYPE_ATTR_GTP_PDU_EH },
512         { ICE_MAC_IPV6_GTPU_IPV6_PAY,     ICE_PTYPE_ATTR_GTP_PDU_EH },
513         { ICE_MAC_IPV6_GTPU_IPV6_UDP_PAY, ICE_PTYPE_ATTR_GTP_PDU_EH },
514         { ICE_MAC_IPV6_GTPU_IPV6_TCP,     ICE_PTYPE_ATTR_GTP_PDU_EH },
515         { ICE_MAC_IPV6_GTPU_IPV6_ICMPV6,  ICE_PTYPE_ATTR_GTP_PDU_EH },
516 };
517
518 static const struct ice_ptype_attributes ice_attr_gtpu_down[] = {
519         { ICE_MAC_IPV4_GTPU_IPV4_FRAG,    ICE_PTYPE_ATTR_GTP_DOWNLINK },
520         { ICE_MAC_IPV4_GTPU_IPV4_PAY,     ICE_PTYPE_ATTR_GTP_DOWNLINK },
521         { ICE_MAC_IPV4_GTPU_IPV4_UDP_PAY, ICE_PTYPE_ATTR_GTP_DOWNLINK },
522         { ICE_MAC_IPV4_GTPU_IPV4_TCP,     ICE_PTYPE_ATTR_GTP_DOWNLINK },
523         { ICE_MAC_IPV4_GTPU_IPV4_ICMP,    ICE_PTYPE_ATTR_GTP_DOWNLINK },
524         { ICE_MAC_IPV6_GTPU_IPV4_FRAG,    ICE_PTYPE_ATTR_GTP_DOWNLINK },
525         { ICE_MAC_IPV6_GTPU_IPV4_PAY,     ICE_PTYPE_ATTR_GTP_DOWNLINK },
526         { ICE_MAC_IPV6_GTPU_IPV4_UDP_PAY, ICE_PTYPE_ATTR_GTP_DOWNLINK },
527         { ICE_MAC_IPV6_GTPU_IPV4_TCP,     ICE_PTYPE_ATTR_GTP_DOWNLINK },
528         { ICE_MAC_IPV6_GTPU_IPV4_ICMP,    ICE_PTYPE_ATTR_GTP_DOWNLINK },
529         { ICE_MAC_IPV4_GTPU_IPV6_FRAG,    ICE_PTYPE_ATTR_GTP_DOWNLINK },
530         { ICE_MAC_IPV4_GTPU_IPV6_PAY,     ICE_PTYPE_ATTR_GTP_DOWNLINK },
531         { ICE_MAC_IPV4_GTPU_IPV6_UDP_PAY, ICE_PTYPE_ATTR_GTP_DOWNLINK },
532         { ICE_MAC_IPV4_GTPU_IPV6_TCP,     ICE_PTYPE_ATTR_GTP_DOWNLINK },
533         { ICE_MAC_IPV4_GTPU_IPV6_ICMPV6,  ICE_PTYPE_ATTR_GTP_DOWNLINK },
534         { ICE_MAC_IPV6_GTPU_IPV6_FRAG,    ICE_PTYPE_ATTR_GTP_DOWNLINK },
535         { ICE_MAC_IPV6_GTPU_IPV6_PAY,     ICE_PTYPE_ATTR_GTP_DOWNLINK },
536         { ICE_MAC_IPV6_GTPU_IPV6_UDP_PAY, ICE_PTYPE_ATTR_GTP_DOWNLINK },
537         { ICE_MAC_IPV6_GTPU_IPV6_TCP,     ICE_PTYPE_ATTR_GTP_DOWNLINK },
538         { ICE_MAC_IPV6_GTPU_IPV6_ICMPV6,  ICE_PTYPE_ATTR_GTP_DOWNLINK },
539 };
540
541 static const struct ice_ptype_attributes ice_attr_gtpu_up[] = {
542         { ICE_MAC_IPV4_GTPU_IPV4_FRAG,    ICE_PTYPE_ATTR_GTP_UPLINK },
543         { ICE_MAC_IPV4_GTPU_IPV4_PAY,     ICE_PTYPE_ATTR_GTP_UPLINK },
544         { ICE_MAC_IPV4_GTPU_IPV4_UDP_PAY, ICE_PTYPE_ATTR_GTP_UPLINK },
545         { ICE_MAC_IPV4_GTPU_IPV4_TCP,     ICE_PTYPE_ATTR_GTP_UPLINK },
546         { ICE_MAC_IPV4_GTPU_IPV4_ICMP,    ICE_PTYPE_ATTR_GTP_UPLINK },
547         { ICE_MAC_IPV6_GTPU_IPV4_FRAG,    ICE_PTYPE_ATTR_GTP_UPLINK },
548         { ICE_MAC_IPV6_GTPU_IPV4_PAY,     ICE_PTYPE_ATTR_GTP_UPLINK },
549         { ICE_MAC_IPV6_GTPU_IPV4_UDP_PAY, ICE_PTYPE_ATTR_GTP_UPLINK },
550         { ICE_MAC_IPV6_GTPU_IPV4_TCP,     ICE_PTYPE_ATTR_GTP_UPLINK },
551         { ICE_MAC_IPV6_GTPU_IPV4_ICMP,    ICE_PTYPE_ATTR_GTP_UPLINK },
552         { ICE_MAC_IPV4_GTPU_IPV6_FRAG,    ICE_PTYPE_ATTR_GTP_UPLINK },
553         { ICE_MAC_IPV4_GTPU_IPV6_PAY,     ICE_PTYPE_ATTR_GTP_UPLINK },
554         { ICE_MAC_IPV4_GTPU_IPV6_UDP_PAY, ICE_PTYPE_ATTR_GTP_UPLINK },
555         { ICE_MAC_IPV4_GTPU_IPV6_TCP,     ICE_PTYPE_ATTR_GTP_UPLINK },
556         { ICE_MAC_IPV4_GTPU_IPV6_ICMPV6,  ICE_PTYPE_ATTR_GTP_UPLINK },
557         { ICE_MAC_IPV6_GTPU_IPV6_FRAG,    ICE_PTYPE_ATTR_GTP_UPLINK },
558         { ICE_MAC_IPV6_GTPU_IPV6_PAY,     ICE_PTYPE_ATTR_GTP_UPLINK },
559         { ICE_MAC_IPV6_GTPU_IPV6_UDP_PAY, ICE_PTYPE_ATTR_GTP_UPLINK },
560         { ICE_MAC_IPV6_GTPU_IPV6_TCP,     ICE_PTYPE_ATTR_GTP_UPLINK },
561         { ICE_MAC_IPV6_GTPU_IPV6_ICMPV6,  ICE_PTYPE_ATTR_GTP_UPLINK },
562 };
563
564 static const u32 ice_ptypes_gtpu[] = {
565         0x00000000, 0x00000000, 0x00000000, 0x00000000,
566         0x00000000, 0x00000000, 0x00000000, 0x00000000,
567         0x00000000, 0x00000000, 0x7FFFFE00, 0x00000000,
568         0x00000000, 0x00000000, 0x00000000, 0x00000000,
569         0x00000000, 0x00000000, 0x00000000, 0x00000000,
570         0x00000000, 0x00000000, 0x00000000, 0x00000000,
571         0x00000000, 0x00000000, 0x00000000, 0x00000000,
572         0x00000000, 0x00000000, 0x00000000, 0x00000000,
573 };
574
575 /* Packet types for pppoe */
576 static const u32 ice_ptypes_pppoe[] = {
577         0x00000000, 0x00000000, 0x00000000, 0x00000000,
578         0x00000000, 0x00000000, 0x00000000, 0x00000000,
579         0x00000000, 0x03ffe000, 0x00000000, 0x00000000,
580         0x00000000, 0x00000000, 0x00000000, 0x00000000,
581         0x00000000, 0x00000000, 0x00000000, 0x00000000,
582         0x00000000, 0x00000000, 0x00000000, 0x00000000,
583         0x00000000, 0x00000000, 0x00000000, 0x00000000,
584         0x00000000, 0x00000000, 0x00000000, 0x00000000,
585 };
586
587 /* Packet types for packets with PFCP NODE header */
588 static const u32 ice_ptypes_pfcp_node[] = {
589         0x00000000, 0x00000000, 0x00000000, 0x00000000,
590         0x00000000, 0x00000000, 0x00000000, 0x00000000,
591         0x00000000, 0x00000000, 0x80000000, 0x00000002,
592         0x00000000, 0x00000000, 0x00000000, 0x00000000,
593         0x00000000, 0x00000000, 0x00000000, 0x00000000,
594         0x00000000, 0x00000000, 0x00000000, 0x00000000,
595         0x00000000, 0x00000000, 0x00000000, 0x00000000,
596         0x00000000, 0x00000000, 0x00000000, 0x00000000,
597 };
598
599 /* Packet types for packets with PFCP SESSION header */
600 static const u32 ice_ptypes_pfcp_session[] = {
601         0x00000000, 0x00000000, 0x00000000, 0x00000000,
602         0x00000000, 0x00000000, 0x00000000, 0x00000000,
603         0x00000000, 0x00000000, 0x00000000, 0x00000005,
604         0x00000000, 0x00000000, 0x00000000, 0x00000000,
605         0x00000000, 0x00000000, 0x00000000, 0x00000000,
606         0x00000000, 0x00000000, 0x00000000, 0x00000000,
607         0x00000000, 0x00000000, 0x00000000, 0x00000000,
608         0x00000000, 0x00000000, 0x00000000, 0x00000000,
609 };
610
611 /* Packet types for l2tpv3 */
612 static const u32 ice_ptypes_l2tpv3[] = {
613         0x00000000, 0x00000000, 0x00000000, 0x00000000,
614         0x00000000, 0x00000000, 0x00000000, 0x00000000,
615         0x00000000, 0x00000000, 0x00000000, 0x00000300,
616         0x00000000, 0x00000000, 0x00000000, 0x00000000,
617         0x00000000, 0x00000000, 0x00000000, 0x00000000,
618         0x00000000, 0x00000000, 0x00000000, 0x00000000,
619         0x00000000, 0x00000000, 0x00000000, 0x00000000,
620         0x00000000, 0x00000000, 0x00000000, 0x00000000,
621 };
622
623 /* Packet types for esp */
624 static const u32 ice_ptypes_esp[] = {
625         0x00000000, 0x00000000, 0x00000000, 0x00000000,
626         0x00000000, 0x00000003, 0x00000000, 0x00000000,
627         0x00000000, 0x00000000, 0x00000000, 0x00000000,
628         0x00000000, 0x00000000, 0x00000000, 0x00000000,
629         0x00000000, 0x00000000, 0x00000000, 0x00000000,
630         0x00000000, 0x00000000, 0x00000000, 0x00000000,
631         0x00000000, 0x00000000, 0x00000000, 0x00000000,
632         0x00000000, 0x00000000, 0x00000000, 0x00000000,
633 };
634
635 /* Packet types for ah */
636 static const u32 ice_ptypes_ah[] = {
637         0x00000000, 0x00000000, 0x00000000, 0x00000000,
638         0x00000000, 0x0000000C, 0x00000000, 0x00000000,
639         0x00000000, 0x00000000, 0x00000000, 0x00000000,
640         0x00000000, 0x00000000, 0x00000000, 0x00000000,
641         0x00000000, 0x00000000, 0x00000000, 0x00000000,
642         0x00000000, 0x00000000, 0x00000000, 0x00000000,
643         0x00000000, 0x00000000, 0x00000000, 0x00000000,
644         0x00000000, 0x00000000, 0x00000000, 0x00000000,
645 };
646
647 /* Packet types for packets with NAT_T ESP header */
648 static const u32 ice_ptypes_nat_t_esp[] = {
649         0x00000000, 0x00000000, 0x00000000, 0x00000000,
650         0x00000000, 0x00000030, 0x00000000, 0x00000000,
651         0x00000000, 0x00000000, 0x00000000, 0x00000000,
652         0x00000000, 0x00000000, 0x00000000, 0x00000000,
653         0x00000000, 0x00000000, 0x00000000, 0x00000000,
654         0x00000000, 0x00000000, 0x00000000, 0x00000000,
655         0x00000000, 0x00000000, 0x00000000, 0x00000000,
656         0x00000000, 0x00000000, 0x00000000, 0x00000000,
657 };
658
659 static const u32 ice_ptypes_mac_non_ip_ofos[] = {
660         0x00000846, 0x00000000, 0x00000000, 0x00000000,
661         0x00000000, 0x00000000, 0x00000000, 0x00000000,
662         0x00400000, 0x03FFF000, 0x00000000, 0x00000000,
663         0x00000000, 0x00000000, 0x00000000, 0x00000000,
664         0x00000000, 0x00000000, 0x00000000, 0x00000000,
665         0x00000000, 0x00000000, 0x00000000, 0x00000000,
666         0x00000000, 0x00000000, 0x00000000, 0x00000000,
667         0x00000000, 0x00000000, 0x00000000, 0x00000000,
668 };
669
670 /* Manage parameters and info. used during the creation of a flow profile */
671 struct ice_flow_prof_params {
672         enum ice_block blk;
673         u16 entry_length; /* # of bytes formatted entry will require */
674         u8 es_cnt;
675         struct ice_flow_prof *prof;
676
677         /* For ACL, the es[0] will have the data of ICE_RX_MDID_PKT_FLAGS_15_0
678          * This will give us the direction flags.
679          */
680         struct ice_fv_word es[ICE_MAX_FV_WORDS];
681         /* attributes can be used to add attributes to a particular PTYPE */
682         const struct ice_ptype_attributes *attr;
683         u16 attr_cnt;
684
685         u16 mask[ICE_MAX_FV_WORDS];
686         ice_declare_bitmap(ptypes, ICE_FLOW_PTYPE_MAX);
687 };
688
689 #define ICE_FLOW_RSS_HDRS_INNER_MASK \
690         (ICE_FLOW_SEG_HDR_PPPOE | ICE_FLOW_SEG_HDR_GTPC | \
691         ICE_FLOW_SEG_HDR_GTPC_TEID | ICE_FLOW_SEG_HDR_GTPU | \
692         ICE_FLOW_SEG_HDR_PFCP_SESSION | ICE_FLOW_SEG_HDR_L2TPV3 | \
693         ICE_FLOW_SEG_HDR_ESP | ICE_FLOW_SEG_HDR_AH | \
694         ICE_FLOW_SEG_HDR_NAT_T_ESP)
695
696 #define ICE_FLOW_SEG_HDRS_L2_MASK       \
697         (ICE_FLOW_SEG_HDR_ETH | ICE_FLOW_SEG_HDR_VLAN)
698 #define ICE_FLOW_SEG_HDRS_L3_MASK       \
699         (ICE_FLOW_SEG_HDR_IPV4 | ICE_FLOW_SEG_HDR_IPV6 | \
700          ICE_FLOW_SEG_HDR_ARP)
701 #define ICE_FLOW_SEG_HDRS_L4_MASK       \
702         (ICE_FLOW_SEG_HDR_ICMP | ICE_FLOW_SEG_HDR_TCP | ICE_FLOW_SEG_HDR_UDP | \
703          ICE_FLOW_SEG_HDR_SCTP)
704 /* mask for L4 protocols that are NOT part of IPV4/6 OTHER PTYPE groups */
705 #define ICE_FLOW_SEG_HDRS_L4_MASK_NO_OTHER      \
706         (ICE_FLOW_SEG_HDR_TCP | ICE_FLOW_SEG_HDR_UDP | ICE_FLOW_SEG_HDR_SCTP)
707
708 /**
709  * ice_flow_val_hdrs - validates packet segments for valid protocol headers
710  * @segs: array of one or more packet segments that describe the flow
711  * @segs_cnt: number of packet segments provided
712  */
713 static enum ice_status
714 ice_flow_val_hdrs(struct ice_flow_seg_info *segs, u8 segs_cnt)
715 {
716         u8 i;
717
718         for (i = 0; i < segs_cnt; i++) {
719                 /* Multiple L3 headers */
720                 if (segs[i].hdrs & ICE_FLOW_SEG_HDRS_L3_MASK &&
721                     !ice_is_pow2(segs[i].hdrs & ICE_FLOW_SEG_HDRS_L3_MASK))
722                         return ICE_ERR_PARAM;
723
724                 /* Multiple L4 headers */
725                 if (segs[i].hdrs & ICE_FLOW_SEG_HDRS_L4_MASK &&
726                     !ice_is_pow2(segs[i].hdrs & ICE_FLOW_SEG_HDRS_L4_MASK))
727                         return ICE_ERR_PARAM;
728         }
729
730         return ICE_SUCCESS;
731 }
732
733 /* Sizes of fixed known protocol headers without header options */
734 #define ICE_FLOW_PROT_HDR_SZ_MAC        14
735 #define ICE_FLOW_PROT_HDR_SZ_MAC_VLAN   (ICE_FLOW_PROT_HDR_SZ_MAC + 2)
736 #define ICE_FLOW_PROT_HDR_SZ_IPV4       20
737 #define ICE_FLOW_PROT_HDR_SZ_IPV6       40
738 #define ICE_FLOW_PROT_HDR_SZ_ARP        28
739 #define ICE_FLOW_PROT_HDR_SZ_ICMP       8
740 #define ICE_FLOW_PROT_HDR_SZ_TCP        20
741 #define ICE_FLOW_PROT_HDR_SZ_UDP        8
742 #define ICE_FLOW_PROT_HDR_SZ_SCTP       12
743
744 /**
745  * ice_flow_calc_seg_sz - calculates size of a packet segment based on headers
746  * @params: information about the flow to be processed
747  * @seg: index of packet segment whose header size is to be determined
748  */
749 static u16 ice_flow_calc_seg_sz(struct ice_flow_prof_params *params, u8 seg)
750 {
751         u16 sz;
752
753         /* L2 headers */
754         sz = (params->prof->segs[seg].hdrs & ICE_FLOW_SEG_HDR_VLAN) ?
755                 ICE_FLOW_PROT_HDR_SZ_MAC_VLAN : ICE_FLOW_PROT_HDR_SZ_MAC;
756
757         /* L3 headers */
758         if (params->prof->segs[seg].hdrs & ICE_FLOW_SEG_HDR_IPV4)
759                 sz += ICE_FLOW_PROT_HDR_SZ_IPV4;
760         else if (params->prof->segs[seg].hdrs & ICE_FLOW_SEG_HDR_IPV6)
761                 sz += ICE_FLOW_PROT_HDR_SZ_IPV6;
762         else if (params->prof->segs[seg].hdrs & ICE_FLOW_SEG_HDR_ARP)
763                 sz += ICE_FLOW_PROT_HDR_SZ_ARP;
764         else if (params->prof->segs[seg].hdrs & ICE_FLOW_SEG_HDRS_L4_MASK)
765                 /* A L3 header is required if L4 is specified */
766                 return 0;
767
768         /* L4 headers */
769         if (params->prof->segs[seg].hdrs & ICE_FLOW_SEG_HDR_ICMP)
770                 sz += ICE_FLOW_PROT_HDR_SZ_ICMP;
771         else if (params->prof->segs[seg].hdrs & ICE_FLOW_SEG_HDR_TCP)
772                 sz += ICE_FLOW_PROT_HDR_SZ_TCP;
773         else if (params->prof->segs[seg].hdrs & ICE_FLOW_SEG_HDR_UDP)
774                 sz += ICE_FLOW_PROT_HDR_SZ_UDP;
775         else if (params->prof->segs[seg].hdrs & ICE_FLOW_SEG_HDR_SCTP)
776                 sz += ICE_FLOW_PROT_HDR_SZ_SCTP;
777
778         return sz;
779 }
780
781 /**
782  * ice_flow_proc_seg_hdrs - process protocol headers present in pkt segments
783  * @params: information about the flow to be processed
784  *
785  * This function identifies the packet types associated with the protocol
786  * headers being present in packet segments of the specified flow profile.
787  */
788 static enum ice_status
789 ice_flow_proc_seg_hdrs(struct ice_flow_prof_params *params)
790 {
791         struct ice_flow_prof *prof;
792         u8 i;
793
794         ice_memset(params->ptypes, 0xff, sizeof(params->ptypes),
795                    ICE_NONDMA_MEM);
796
797         prof = params->prof;
798
799         for (i = 0; i < params->prof->segs_cnt; i++) {
800                 const ice_bitmap_t *src;
801                 u32 hdrs;
802
803                 hdrs = prof->segs[i].hdrs;
804
805                 if (hdrs & ICE_FLOW_SEG_HDR_ETH) {
806                         src = !i ? (const ice_bitmap_t *)ice_ptypes_mac_ofos :
807                                 (const ice_bitmap_t *)ice_ptypes_mac_il;
808                         ice_and_bitmap(params->ptypes, params->ptypes, src,
809                                        ICE_FLOW_PTYPE_MAX);
810                 }
811
812                 if (i && hdrs & ICE_FLOW_SEG_HDR_VLAN) {
813                         src = (const ice_bitmap_t *)ice_ptypes_macvlan_il;
814                         ice_and_bitmap(params->ptypes, params->ptypes, src,
815                                        ICE_FLOW_PTYPE_MAX);
816                 }
817
818                 if (!i && hdrs & ICE_FLOW_SEG_HDR_ARP) {
819                         ice_and_bitmap(params->ptypes, params->ptypes,
820                                        (const ice_bitmap_t *)ice_ptypes_arp_of,
821                                        ICE_FLOW_PTYPE_MAX);
822                 }
823
824                 if (hdrs & ICE_FLOW_SEG_HDR_PPPOE) {
825                         src = (const ice_bitmap_t *)ice_ptypes_pppoe;
826                         ice_and_bitmap(params->ptypes, params->ptypes, src,
827                                        ICE_FLOW_PTYPE_MAX);
828                 }
829                 if ((hdrs & ICE_FLOW_SEG_HDR_IPV4) &&
830                     (hdrs & ICE_FLOW_SEG_HDR_IPV_OTHER)) {
831                         src = i ?
832                                 (const ice_bitmap_t *)ice_ptypes_ipv4_il :
833                                 (const ice_bitmap_t *)ice_ptypes_ipv4_ofos_all;
834                         ice_and_bitmap(params->ptypes, params->ptypes, src,
835                                        ICE_FLOW_PTYPE_MAX);
836                 } else if ((hdrs & ICE_FLOW_SEG_HDR_IPV6) &&
837                            (hdrs & ICE_FLOW_SEG_HDR_IPV_OTHER)) {
838                         src = i ?
839                                 (const ice_bitmap_t *)ice_ptypes_ipv6_il :
840                                 (const ice_bitmap_t *)ice_ptypes_ipv6_ofos_all;
841                         ice_and_bitmap(params->ptypes, params->ptypes, src,
842                                        ICE_FLOW_PTYPE_MAX);
843                 } else if ((hdrs & ICE_FLOW_SEG_HDR_IPV4) &&
844                            !(hdrs & ICE_FLOW_SEG_HDRS_L4_MASK_NO_OTHER)) {
845                         src = !i ? (const ice_bitmap_t *)ice_ipv4_ofos_no_l4 :
846                                 (const ice_bitmap_t *)ice_ipv4_il_no_l4;
847                         ice_and_bitmap(params->ptypes, params->ptypes, src,
848                                        ICE_FLOW_PTYPE_MAX);
849                 } else if (hdrs & ICE_FLOW_SEG_HDR_IPV4) {
850                         src = !i ? (const ice_bitmap_t *)ice_ptypes_ipv4_ofos :
851                                 (const ice_bitmap_t *)ice_ptypes_ipv4_il;
852                         ice_and_bitmap(params->ptypes, params->ptypes, src,
853                                        ICE_FLOW_PTYPE_MAX);
854                 } else if ((hdrs & ICE_FLOW_SEG_HDR_IPV6) &&
855                            !(hdrs & ICE_FLOW_SEG_HDRS_L4_MASK_NO_OTHER)) {
856                         src = !i ? (const ice_bitmap_t *)ice_ipv6_ofos_no_l4 :
857                                 (const ice_bitmap_t *)ice_ipv6_il_no_l4;
858                         ice_and_bitmap(params->ptypes, params->ptypes, src,
859                                        ICE_FLOW_PTYPE_MAX);
860                 } else if (hdrs & ICE_FLOW_SEG_HDR_IPV6) {
861                         src = !i ? (const ice_bitmap_t *)ice_ptypes_ipv6_ofos :
862                                 (const ice_bitmap_t *)ice_ptypes_ipv6_il;
863                         ice_and_bitmap(params->ptypes, params->ptypes, src,
864                                        ICE_FLOW_PTYPE_MAX);
865                 }
866
867                 if (hdrs & ICE_FLOW_SEG_HDR_ETH_NON_IP) {
868                         src = (const ice_bitmap_t *)ice_ptypes_mac_non_ip_ofos;
869                         ice_and_bitmap(params->ptypes, params->ptypes,
870                                        src, ICE_FLOW_PTYPE_MAX);
871                 } else if (hdrs & ICE_FLOW_SEG_HDR_PPPOE) {
872                         src = (const ice_bitmap_t *)ice_ptypes_pppoe;
873                         ice_and_bitmap(params->ptypes, params->ptypes, src,
874                                        ICE_FLOW_PTYPE_MAX);
875                 } else {
876                         src = (const ice_bitmap_t *)ice_ptypes_pppoe;
877                         ice_andnot_bitmap(params->ptypes, params->ptypes, src,
878                                           ICE_FLOW_PTYPE_MAX);
879                 }
880
881                 if (hdrs & ICE_FLOW_SEG_HDR_UDP) {
882                         src = (const ice_bitmap_t *)ice_ptypes_udp_il;
883                         ice_and_bitmap(params->ptypes, params->ptypes, src,
884                                        ICE_FLOW_PTYPE_MAX);
885                 } else if (hdrs & ICE_FLOW_SEG_HDR_TCP) {
886                         ice_and_bitmap(params->ptypes, params->ptypes,
887                                        (const ice_bitmap_t *)ice_ptypes_tcp_il,
888                                        ICE_FLOW_PTYPE_MAX);
889                 } else if (hdrs & ICE_FLOW_SEG_HDR_SCTP) {
890                         src = (const ice_bitmap_t *)ice_ptypes_sctp_il;
891                         ice_and_bitmap(params->ptypes, params->ptypes, src,
892                                        ICE_FLOW_PTYPE_MAX);
893                 }
894
895                 if (hdrs & ICE_FLOW_SEG_HDR_ICMP) {
896                         src = !i ? (const ice_bitmap_t *)ice_ptypes_icmp_of :
897                                 (const ice_bitmap_t *)ice_ptypes_icmp_il;
898                         ice_and_bitmap(params->ptypes, params->ptypes, src,
899                                        ICE_FLOW_PTYPE_MAX);
900                 } else if (hdrs & ICE_FLOW_SEG_HDR_GRE) {
901                         if (!i) {
902                                 src = (const ice_bitmap_t *)ice_ptypes_gre_of;
903                                 ice_and_bitmap(params->ptypes, params->ptypes,
904                                                src, ICE_FLOW_PTYPE_MAX);
905                         }
906                 } else if (hdrs & ICE_FLOW_SEG_HDR_GTPC) {
907                         src = (const ice_bitmap_t *)ice_ptypes_gtpc;
908                         ice_and_bitmap(params->ptypes, params->ptypes,
909                                        src, ICE_FLOW_PTYPE_MAX);
910                 } else if (hdrs & ICE_FLOW_SEG_HDR_GTPC_TEID) {
911                         src = (const ice_bitmap_t *)ice_ptypes_gtpc_tid;
912                         ice_and_bitmap(params->ptypes, params->ptypes,
913                                        src, ICE_FLOW_PTYPE_MAX);
914                 } else if (hdrs & ICE_FLOW_SEG_HDR_GTPU_DWN) {
915                         src = (const ice_bitmap_t *)ice_ptypes_gtpu;
916                         ice_and_bitmap(params->ptypes, params->ptypes,
917                                        src, ICE_FLOW_PTYPE_MAX);
918
919                         /* Attributes for GTP packet with downlink */
920                         params->attr = ice_attr_gtpu_down;
921                         params->attr_cnt = ARRAY_SIZE(ice_attr_gtpu_down);
922                 } else if (hdrs & ICE_FLOW_SEG_HDR_GTPU_UP) {
923                         src = (const ice_bitmap_t *)ice_ptypes_gtpu;
924                         ice_and_bitmap(params->ptypes, params->ptypes,
925                                        src, ICE_FLOW_PTYPE_MAX);
926
927                         /* Attributes for GTP packet with uplink */
928                         params->attr = ice_attr_gtpu_up;
929                         params->attr_cnt = ARRAY_SIZE(ice_attr_gtpu_up);
930                 } else if (hdrs & ICE_FLOW_SEG_HDR_GTPU_EH) {
931                         src = (const ice_bitmap_t *)ice_ptypes_gtpu;
932                         ice_and_bitmap(params->ptypes, params->ptypes,
933                                        src, ICE_FLOW_PTYPE_MAX);
934
935                         /* Attributes for GTP packet with Extension Header */
936                         params->attr = ice_attr_gtpu_eh;
937                         params->attr_cnt = ARRAY_SIZE(ice_attr_gtpu_eh);
938                 } else if (hdrs & ICE_FLOW_SEG_HDR_GTPU_IP) {
939                         src = (const ice_bitmap_t *)ice_ptypes_gtpu;
940                         ice_and_bitmap(params->ptypes, params->ptypes,
941                                        src, ICE_FLOW_PTYPE_MAX);
942
943                         /* Attributes for GTP packet without Extension Header */
944                         params->attr = ice_attr_gtpu_session;
945                         params->attr_cnt = ARRAY_SIZE(ice_attr_gtpu_session);
946                 } else if (hdrs & ICE_FLOW_SEG_HDR_L2TPV3) {
947                         src = (const ice_bitmap_t *)ice_ptypes_l2tpv3;
948                         ice_and_bitmap(params->ptypes, params->ptypes,
949                                        src, ICE_FLOW_PTYPE_MAX);
950                 } else if (hdrs & ICE_FLOW_SEG_HDR_ESP) {
951                         src = (const ice_bitmap_t *)ice_ptypes_esp;
952                         ice_and_bitmap(params->ptypes, params->ptypes,
953                                        src, ICE_FLOW_PTYPE_MAX);
954                 } else if (hdrs & ICE_FLOW_SEG_HDR_AH) {
955                         src = (const ice_bitmap_t *)ice_ptypes_ah;
956                         ice_and_bitmap(params->ptypes, params->ptypes,
957                                        src, ICE_FLOW_PTYPE_MAX);
958                 } else if (hdrs & ICE_FLOW_SEG_HDR_NAT_T_ESP) {
959                         src = (const ice_bitmap_t *)ice_ptypes_nat_t_esp;
960                         ice_and_bitmap(params->ptypes, params->ptypes,
961                                        src, ICE_FLOW_PTYPE_MAX);
962                 }
963
964                 if (hdrs & ICE_FLOW_SEG_HDR_PFCP) {
965                         if (hdrs & ICE_FLOW_SEG_HDR_PFCP_NODE)
966                                 src =
967                                 (const ice_bitmap_t *)ice_ptypes_pfcp_node;
968                         else
969                                 src =
970                                 (const ice_bitmap_t *)ice_ptypes_pfcp_session;
971
972                         ice_and_bitmap(params->ptypes, params->ptypes,
973                                        src, ICE_FLOW_PTYPE_MAX);
974                 } else {
975                         src = (const ice_bitmap_t *)ice_ptypes_pfcp_node;
976                         ice_andnot_bitmap(params->ptypes, params->ptypes,
977                                           src, ICE_FLOW_PTYPE_MAX);
978
979                         src = (const ice_bitmap_t *)ice_ptypes_pfcp_session;
980                         ice_andnot_bitmap(params->ptypes, params->ptypes,
981                                           src, ICE_FLOW_PTYPE_MAX);
982                 }
983         }
984
985         return ICE_SUCCESS;
986 }
987
988 /**
989  * ice_flow_xtract_pkt_flags - Create an extr sequence entry for packet flags
990  * @hw: pointer to the HW struct
991  * @params: information about the flow to be processed
992  * @flags: The value of pkt_flags[x:x] in Rx/Tx MDID metadata.
993  *
994  * This function will allocate an extraction sequence entries for a DWORD size
995  * chunk of the packet flags.
996  */
997 static enum ice_status
998 ice_flow_xtract_pkt_flags(struct ice_hw *hw,
999                           struct ice_flow_prof_params *params,
1000                           enum ice_flex_mdid_pkt_flags flags)
1001 {
1002         u8 fv_words = hw->blk[params->blk].es.fvw;
1003         u8 idx;
1004
1005         /* Make sure the number of extraction sequence entries required does not
1006          * exceed the block's capacity.
1007          */
1008         if (params->es_cnt >= fv_words)
1009                 return ICE_ERR_MAX_LIMIT;
1010
1011         /* some blocks require a reversed field vector layout */
1012         if (hw->blk[params->blk].es.reverse)
1013                 idx = fv_words - params->es_cnt - 1;
1014         else
1015                 idx = params->es_cnt;
1016
1017         params->es[idx].prot_id = ICE_PROT_META_ID;
1018         params->es[idx].off = flags;
1019         params->es_cnt++;
1020
1021         return ICE_SUCCESS;
1022 }
1023
1024 /**
1025  * ice_flow_xtract_fld - Create an extraction sequence entry for the given field
1026  * @hw: pointer to the HW struct
1027  * @params: information about the flow to be processed
1028  * @seg: packet segment index of the field to be extracted
1029  * @fld: ID of field to be extracted
1030  * @match: bitfield of all fields
1031  *
1032  * This function determines the protocol ID, offset, and size of the given
1033  * field. It then allocates one or more extraction sequence entries for the
1034  * given field, and fill the entries with protocol ID and offset information.
1035  */
1036 static enum ice_status
1037 ice_flow_xtract_fld(struct ice_hw *hw, struct ice_flow_prof_params *params,
1038                     u8 seg, enum ice_flow_field fld, u64 match)
1039 {
1040         enum ice_flow_field sib = ICE_FLOW_FIELD_IDX_MAX;
1041         enum ice_prot_id prot_id = ICE_PROT_ID_INVAL;
1042         u8 fv_words = hw->blk[params->blk].es.fvw;
1043         struct ice_flow_fld_info *flds;
1044         u16 cnt, ese_bits, i;
1045         u16 sib_mask = 0;
1046         u16 mask;
1047         u16 off;
1048
1049         flds = params->prof->segs[seg].fields;
1050
1051         switch (fld) {
1052         case ICE_FLOW_FIELD_IDX_ETH_DA:
1053         case ICE_FLOW_FIELD_IDX_ETH_SA:
1054         case ICE_FLOW_FIELD_IDX_S_VLAN:
1055         case ICE_FLOW_FIELD_IDX_C_VLAN:
1056                 prot_id = seg == 0 ? ICE_PROT_MAC_OF_OR_S : ICE_PROT_MAC_IL;
1057                 break;
1058         case ICE_FLOW_FIELD_IDX_ETH_TYPE:
1059                 prot_id = seg == 0 ? ICE_PROT_ETYPE_OL : ICE_PROT_ETYPE_IL;
1060                 break;
1061         case ICE_FLOW_FIELD_IDX_IPV4_DSCP:
1062                 prot_id = seg == 0 ? ICE_PROT_IPV4_OF_OR_S : ICE_PROT_IPV4_IL;
1063                 break;
1064         case ICE_FLOW_FIELD_IDX_IPV6_DSCP:
1065                 prot_id = seg == 0 ? ICE_PROT_IPV6_OF_OR_S : ICE_PROT_IPV6_IL;
1066                 break;
1067         case ICE_FLOW_FIELD_IDX_IPV4_TTL:
1068         case ICE_FLOW_FIELD_IDX_IPV4_PROT:
1069                 prot_id = seg == 0 ? ICE_PROT_IPV4_OF_OR_S : ICE_PROT_IPV4_IL;
1070
1071                 /* TTL and PROT share the same extraction seq. entry.
1072                  * Each is considered a sibling to the other in terms of sharing
1073                  * the same extraction sequence entry.
1074                  */
1075                 if (fld == ICE_FLOW_FIELD_IDX_IPV4_TTL)
1076                         sib = ICE_FLOW_FIELD_IDX_IPV4_PROT;
1077                 else if (fld == ICE_FLOW_FIELD_IDX_IPV4_PROT)
1078                         sib = ICE_FLOW_FIELD_IDX_IPV4_TTL;
1079
1080                 /* If the sibling field is also included, that field's
1081                  * mask needs to be included.
1082                  */
1083                 if (match & BIT(sib))
1084                         sib_mask = ice_flds_info[sib].mask;
1085                 break;
1086         case ICE_FLOW_FIELD_IDX_IPV6_TTL:
1087         case ICE_FLOW_FIELD_IDX_IPV6_PROT:
1088                 prot_id = seg == 0 ? ICE_PROT_IPV6_OF_OR_S : ICE_PROT_IPV6_IL;
1089
1090                 /* TTL and PROT share the same extraction seq. entry.
1091                  * Each is considered a sibling to the other in terms of sharing
1092                  * the same extraction sequence entry.
1093                  */
1094                 if (fld == ICE_FLOW_FIELD_IDX_IPV6_TTL)
1095                         sib = ICE_FLOW_FIELD_IDX_IPV6_PROT;
1096                 else if (fld == ICE_FLOW_FIELD_IDX_IPV6_PROT)
1097                         sib = ICE_FLOW_FIELD_IDX_IPV6_TTL;
1098
1099                 /* If the sibling field is also included, that field's
1100                  * mask needs to be included.
1101                  */
1102                 if (match & BIT(sib))
1103                         sib_mask = ice_flds_info[sib].mask;
1104                 break;
1105         case ICE_FLOW_FIELD_IDX_IPV4_SA:
1106         case ICE_FLOW_FIELD_IDX_IPV4_DA:
1107                 prot_id = seg == 0 ? ICE_PROT_IPV4_OF_OR_S : ICE_PROT_IPV4_IL;
1108                 break;
1109         case ICE_FLOW_FIELD_IDX_IPV6_SA:
1110         case ICE_FLOW_FIELD_IDX_IPV6_DA:
1111         case ICE_FLOW_FIELD_IDX_IPV6_PRE32_SA:
1112         case ICE_FLOW_FIELD_IDX_IPV6_PRE32_DA:
1113         case ICE_FLOW_FIELD_IDX_IPV6_PRE48_SA:
1114         case ICE_FLOW_FIELD_IDX_IPV6_PRE48_DA:
1115         case ICE_FLOW_FIELD_IDX_IPV6_PRE64_SA:
1116         case ICE_FLOW_FIELD_IDX_IPV6_PRE64_DA:
1117                 prot_id = seg == 0 ? ICE_PROT_IPV6_OF_OR_S : ICE_PROT_IPV6_IL;
1118                 break;
1119         case ICE_FLOW_FIELD_IDX_TCP_SRC_PORT:
1120         case ICE_FLOW_FIELD_IDX_TCP_DST_PORT:
1121         case ICE_FLOW_FIELD_IDX_TCP_FLAGS:
1122                 prot_id = ICE_PROT_TCP_IL;
1123                 break;
1124         case ICE_FLOW_FIELD_IDX_UDP_SRC_PORT:
1125         case ICE_FLOW_FIELD_IDX_UDP_DST_PORT:
1126                 prot_id = ICE_PROT_UDP_IL_OR_S;
1127                 break;
1128         case ICE_FLOW_FIELD_IDX_SCTP_SRC_PORT:
1129         case ICE_FLOW_FIELD_IDX_SCTP_DST_PORT:
1130                 prot_id = ICE_PROT_SCTP_IL;
1131                 break;
1132         case ICE_FLOW_FIELD_IDX_GTPC_TEID:
1133         case ICE_FLOW_FIELD_IDX_GTPU_IP_TEID:
1134         case ICE_FLOW_FIELD_IDX_GTPU_UP_TEID:
1135         case ICE_FLOW_FIELD_IDX_GTPU_DWN_TEID:
1136         case ICE_FLOW_FIELD_IDX_GTPU_EH_TEID:
1137         case ICE_FLOW_FIELD_IDX_GTPU_EH_QFI:
1138                 /* GTP is accessed through UDP OF protocol */
1139                 prot_id = ICE_PROT_UDP_OF;
1140                 break;
1141         case ICE_FLOW_FIELD_IDX_PPPOE_SESS_ID:
1142                 prot_id = ICE_PROT_PPPOE;
1143                 break;
1144         case ICE_FLOW_FIELD_IDX_PFCP_SEID:
1145                 prot_id = ICE_PROT_UDP_IL_OR_S;
1146                 break;
1147         case ICE_FLOW_FIELD_IDX_L2TPV3_SESS_ID:
1148                 prot_id = ICE_PROT_L2TPV3;
1149                 break;
1150         case ICE_FLOW_FIELD_IDX_ESP_SPI:
1151                 prot_id = ICE_PROT_ESP_F;
1152                 break;
1153         case ICE_FLOW_FIELD_IDX_AH_SPI:
1154                 prot_id = ICE_PROT_ESP_2;
1155                 break;
1156         case ICE_FLOW_FIELD_IDX_NAT_T_ESP_SPI:
1157                 prot_id = ICE_PROT_UDP_IL_OR_S;
1158                 break;
1159         case ICE_FLOW_FIELD_IDX_ARP_SIP:
1160         case ICE_FLOW_FIELD_IDX_ARP_DIP:
1161         case ICE_FLOW_FIELD_IDX_ARP_SHA:
1162         case ICE_FLOW_FIELD_IDX_ARP_DHA:
1163         case ICE_FLOW_FIELD_IDX_ARP_OP:
1164                 prot_id = ICE_PROT_ARP_OF;
1165                 break;
1166         case ICE_FLOW_FIELD_IDX_ICMP_TYPE:
1167         case ICE_FLOW_FIELD_IDX_ICMP_CODE:
1168                 /* ICMP type and code share the same extraction seq. entry */
1169                 prot_id = (params->prof->segs[seg].hdrs &
1170                            ICE_FLOW_SEG_HDR_IPV4) ?
1171                         ICE_PROT_ICMP_IL : ICE_PROT_ICMPV6_IL;
1172                 sib = fld == ICE_FLOW_FIELD_IDX_ICMP_TYPE ?
1173                         ICE_FLOW_FIELD_IDX_ICMP_CODE :
1174                         ICE_FLOW_FIELD_IDX_ICMP_TYPE;
1175                 break;
1176         case ICE_FLOW_FIELD_IDX_GRE_KEYID:
1177                 prot_id = ICE_PROT_GRE_OF;
1178                 break;
1179         default:
1180                 return ICE_ERR_NOT_IMPL;
1181         }
1182
1183         /* Each extraction sequence entry is a word in size, and extracts a
1184          * word-aligned offset from a protocol header.
1185          */
1186         ese_bits = ICE_FLOW_FV_EXTRACT_SZ * BITS_PER_BYTE;
1187
1188         flds[fld].xtrct.prot_id = prot_id;
1189         flds[fld].xtrct.off = (ice_flds_info[fld].off / ese_bits) *
1190                 ICE_FLOW_FV_EXTRACT_SZ;
1191         flds[fld].xtrct.disp = (u8)(ice_flds_info[fld].off % ese_bits);
1192         flds[fld].xtrct.idx = params->es_cnt;
1193         flds[fld].xtrct.mask = ice_flds_info[fld].mask;
1194
1195         /* Adjust the next field-entry index after accommodating the number of
1196          * entries this field consumes
1197          */
1198         cnt = DIVIDE_AND_ROUND_UP(flds[fld].xtrct.disp +
1199                                   ice_flds_info[fld].size, ese_bits);
1200
1201         /* Fill in the extraction sequence entries needed for this field */
1202         off = flds[fld].xtrct.off;
1203         mask = flds[fld].xtrct.mask;
1204         for (i = 0; i < cnt; i++) {
1205                 /* Only consume an extraction sequence entry if there is no
1206                  * sibling field associated with this field or the sibling entry
1207                  * already extracts the word shared with this field.
1208                  */
1209                 if (sib == ICE_FLOW_FIELD_IDX_MAX ||
1210                     flds[sib].xtrct.prot_id == ICE_PROT_ID_INVAL ||
1211                     flds[sib].xtrct.off != off) {
1212                         u8 idx;
1213
1214                         /* Make sure the number of extraction sequence required
1215                          * does not exceed the block's capability
1216                          */
1217                         if (params->es_cnt >= fv_words)
1218                                 return ICE_ERR_MAX_LIMIT;
1219
1220                         /* some blocks require a reversed field vector layout */
1221                         if (hw->blk[params->blk].es.reverse)
1222                                 idx = fv_words - params->es_cnt - 1;
1223                         else
1224                                 idx = params->es_cnt;
1225
1226                         params->es[idx].prot_id = prot_id;
1227                         params->es[idx].off = off;
1228                         params->mask[idx] = mask | sib_mask;
1229                         params->es_cnt++;
1230                 }
1231
1232                 off += ICE_FLOW_FV_EXTRACT_SZ;
1233         }
1234
1235         return ICE_SUCCESS;
1236 }
1237
1238 /**
1239  * ice_flow_xtract_raws - Create extract sequence entries for raw bytes
1240  * @hw: pointer to the HW struct
1241  * @params: information about the flow to be processed
1242  * @seg: index of packet segment whose raw fields are to be be extracted
1243  */
1244 static enum ice_status
1245 ice_flow_xtract_raws(struct ice_hw *hw, struct ice_flow_prof_params *params,
1246                      u8 seg)
1247 {
1248         u16 fv_words;
1249         u16 hdrs_sz;
1250         u8 i;
1251
1252         if (!params->prof->segs[seg].raws_cnt)
1253                 return ICE_SUCCESS;
1254
1255         if (params->prof->segs[seg].raws_cnt >
1256             ARRAY_SIZE(params->prof->segs[seg].raws))
1257                 return ICE_ERR_MAX_LIMIT;
1258
1259         /* Offsets within the segment headers are not supported */
1260         hdrs_sz = ice_flow_calc_seg_sz(params, seg);
1261         if (!hdrs_sz)
1262                 return ICE_ERR_PARAM;
1263
1264         fv_words = hw->blk[params->blk].es.fvw;
1265
1266         for (i = 0; i < params->prof->segs[seg].raws_cnt; i++) {
1267                 struct ice_flow_seg_fld_raw *raw;
1268                 u16 off, cnt, j;
1269
1270                 raw = &params->prof->segs[seg].raws[i];
1271
1272                 /* Storing extraction information */
1273                 raw->info.xtrct.prot_id = ICE_PROT_MAC_OF_OR_S;
1274                 raw->info.xtrct.off = (raw->off / ICE_FLOW_FV_EXTRACT_SZ) *
1275                         ICE_FLOW_FV_EXTRACT_SZ;
1276                 raw->info.xtrct.disp = (raw->off % ICE_FLOW_FV_EXTRACT_SZ) *
1277                         BITS_PER_BYTE;
1278                 raw->info.xtrct.idx = params->es_cnt;
1279
1280                 /* Determine the number of field vector entries this raw field
1281                  * consumes.
1282                  */
1283                 cnt = DIVIDE_AND_ROUND_UP(raw->info.xtrct.disp +
1284                                           (raw->info.src.last * BITS_PER_BYTE),
1285                                           (ICE_FLOW_FV_EXTRACT_SZ *
1286                                            BITS_PER_BYTE));
1287                 off = raw->info.xtrct.off;
1288                 for (j = 0; j < cnt; j++) {
1289                         u16 idx;
1290
1291                         /* Make sure the number of extraction sequence required
1292                          * does not exceed the block's capability
1293                          */
1294                         if (params->es_cnt >= hw->blk[params->blk].es.count ||
1295                             params->es_cnt >= ICE_MAX_FV_WORDS)
1296                                 return ICE_ERR_MAX_LIMIT;
1297
1298                         /* some blocks require a reversed field vector layout */
1299                         if (hw->blk[params->blk].es.reverse)
1300                                 idx = fv_words - params->es_cnt - 1;
1301                         else
1302                                 idx = params->es_cnt;
1303
1304                         params->es[idx].prot_id = raw->info.xtrct.prot_id;
1305                         params->es[idx].off = off;
1306                         params->es_cnt++;
1307                         off += ICE_FLOW_FV_EXTRACT_SZ;
1308                 }
1309         }
1310
1311         return ICE_SUCCESS;
1312 }
1313
1314 /**
1315  * ice_flow_create_xtrct_seq - Create an extraction sequence for given segments
1316  * @hw: pointer to the HW struct
1317  * @params: information about the flow to be processed
1318  *
1319  * This function iterates through all matched fields in the given segments, and
1320  * creates an extraction sequence for the fields.
1321  */
1322 static enum ice_status
1323 ice_flow_create_xtrct_seq(struct ice_hw *hw,
1324                           struct ice_flow_prof_params *params)
1325 {
1326         enum ice_status status = ICE_SUCCESS;
1327         u8 i;
1328
1329         /* For ACL, we also need to extract the direction bit (Rx,Tx) data from
1330          * packet flags
1331          */
1332         if (params->blk == ICE_BLK_ACL) {
1333                 status = ice_flow_xtract_pkt_flags(hw, params,
1334                                                    ICE_RX_MDID_PKT_FLAGS_15_0);
1335                 if (status)
1336                         return status;
1337         }
1338
1339         for (i = 0; i < params->prof->segs_cnt; i++) {
1340                 u64 match = params->prof->segs[i].match;
1341                 enum ice_flow_field j;
1342
1343                 ice_for_each_set_bit(j, (ice_bitmap_t *)&match,
1344                                      ICE_FLOW_FIELD_IDX_MAX) {
1345                         status = ice_flow_xtract_fld(hw, params, i, j, match);
1346                         if (status)
1347                                 return status;
1348                         ice_clear_bit(j, (ice_bitmap_t *)&match);
1349                 }
1350
1351                 /* Process raw matching bytes */
1352                 status = ice_flow_xtract_raws(hw, params, i);
1353                 if (status)
1354                         return status;
1355         }
1356
1357         return status;
1358 }
1359
1360 /**
1361  * ice_flow_sel_acl_scen - returns the specific scenario
1362  * @hw: pointer to the hardware structure
1363  * @params: information about the flow to be processed
1364  *
1365  * This function will return the specific scenario based on the
1366  * params passed to it
1367  */
1368 static enum ice_status
1369 ice_flow_sel_acl_scen(struct ice_hw *hw, struct ice_flow_prof_params *params)
1370 {
1371         /* Find the best-fit scenario for the provided match width */
1372         struct ice_acl_scen *cand_scen = NULL, *scen;
1373
1374         if (!hw->acl_tbl)
1375                 return ICE_ERR_DOES_NOT_EXIST;
1376
1377         /* Loop through each scenario and match against the scenario width
1378          * to select the specific scenario
1379          */
1380         LIST_FOR_EACH_ENTRY(scen, &hw->acl_tbl->scens, ice_acl_scen, list_entry)
1381                 if (scen->eff_width >= params->entry_length &&
1382                     (!cand_scen || cand_scen->eff_width > scen->eff_width))
1383                         cand_scen = scen;
1384         if (!cand_scen)
1385                 return ICE_ERR_DOES_NOT_EXIST;
1386
1387         params->prof->cfg.scen = cand_scen;
1388
1389         return ICE_SUCCESS;
1390 }
1391
1392 /**
1393  * ice_flow_acl_def_entry_frmt - Determine the layout of flow entries
1394  * @params: information about the flow to be processed
1395  */
1396 static enum ice_status
1397 ice_flow_acl_def_entry_frmt(struct ice_flow_prof_params *params)
1398 {
1399         u16 index, i, range_idx = 0;
1400
1401         index = ICE_AQC_ACL_PROF_BYTE_SEL_START_IDX;
1402
1403         for (i = 0; i < params->prof->segs_cnt; i++) {
1404                 struct ice_flow_seg_info *seg = &params->prof->segs[i];
1405                 u8 j;
1406
1407                 ice_for_each_set_bit(j, (ice_bitmap_t *)&seg->match,
1408                                      ICE_FLOW_FIELD_IDX_MAX) {
1409                         struct ice_flow_fld_info *fld = &seg->fields[j];
1410
1411                         fld->entry.mask = ICE_FLOW_FLD_OFF_INVAL;
1412
1413                         if (fld->type == ICE_FLOW_FLD_TYPE_RANGE) {
1414                                 fld->entry.last = ICE_FLOW_FLD_OFF_INVAL;
1415
1416                                 /* Range checking only supported for single
1417                                  * words
1418                                  */
1419                                 if (DIVIDE_AND_ROUND_UP(ice_flds_info[j].size +
1420                                                         fld->xtrct.disp,
1421                                                         BITS_PER_BYTE * 2) > 1)
1422                                         return ICE_ERR_PARAM;
1423
1424                                 /* Ranges must define low and high values */
1425                                 if (fld->src.val == ICE_FLOW_FLD_OFF_INVAL ||
1426                                     fld->src.last == ICE_FLOW_FLD_OFF_INVAL)
1427                                         return ICE_ERR_PARAM;
1428
1429                                 fld->entry.val = range_idx++;
1430                         } else {
1431                                 /* Store adjusted byte-length of field for later
1432                                  * use, taking into account potential
1433                                  * non-byte-aligned displacement
1434                                  */
1435                                 fld->entry.last = DIVIDE_AND_ROUND_UP
1436                                         (ice_flds_info[j].size +
1437                                          (fld->xtrct.disp % BITS_PER_BYTE),
1438                                          BITS_PER_BYTE);
1439                                 fld->entry.val = index;
1440                                 index += fld->entry.last;
1441                         }
1442                 }
1443
1444                 for (j = 0; j < seg->raws_cnt; j++) {
1445                         struct ice_flow_seg_fld_raw *raw = &seg->raws[j];
1446
1447                         raw->info.entry.mask = ICE_FLOW_FLD_OFF_INVAL;
1448                         raw->info.entry.val = index;
1449                         raw->info.entry.last = raw->info.src.last;
1450                         index += raw->info.entry.last;
1451                 }
1452         }
1453
1454         /* Currently only support using the byte selection base, which only
1455          * allows for an effective entry size of 30 bytes. Reject anything
1456          * larger.
1457          */
1458         if (index > ICE_AQC_ACL_PROF_BYTE_SEL_ELEMS)
1459                 return ICE_ERR_PARAM;
1460
1461         /* Only 8 range checkers per profile, reject anything trying to use
1462          * more
1463          */
1464         if (range_idx > ICE_AQC_ACL_PROF_RANGES_NUM_CFG)
1465                 return ICE_ERR_PARAM;
1466
1467         /* Store # bytes required for entry for later use */
1468         params->entry_length = index - ICE_AQC_ACL_PROF_BYTE_SEL_START_IDX;
1469
1470         return ICE_SUCCESS;
1471 }
1472
1473 /**
1474  * ice_flow_proc_segs - process all packet segments associated with a profile
1475  * @hw: pointer to the HW struct
1476  * @params: information about the flow to be processed
1477  */
1478 static enum ice_status
1479 ice_flow_proc_segs(struct ice_hw *hw, struct ice_flow_prof_params *params)
1480 {
1481         enum ice_status status;
1482
1483         status = ice_flow_proc_seg_hdrs(params);
1484         if (status)
1485                 return status;
1486
1487         status = ice_flow_create_xtrct_seq(hw, params);
1488         if (status)
1489                 return status;
1490
1491         switch (params->blk) {
1492         case ICE_BLK_FD:
1493         case ICE_BLK_RSS:
1494                 status = ICE_SUCCESS;
1495                 break;
1496         case ICE_BLK_ACL:
1497                 status = ice_flow_acl_def_entry_frmt(params);
1498                 if (status)
1499                         return status;
1500                 status = ice_flow_sel_acl_scen(hw, params);
1501                 if (status)
1502                         return status;
1503                 break;
1504         default:
1505                 return ICE_ERR_NOT_IMPL;
1506         }
1507
1508         return status;
1509 }
1510
1511 #define ICE_FLOW_FIND_PROF_CHK_FLDS     0x00000001
1512 #define ICE_FLOW_FIND_PROF_CHK_VSI      0x00000002
1513 #define ICE_FLOW_FIND_PROF_NOT_CHK_DIR  0x00000004
1514
1515 /**
1516  * ice_flow_find_prof_conds - Find a profile matching headers and conditions
1517  * @hw: pointer to the HW struct
1518  * @blk: classification stage
1519  * @dir: flow direction
1520  * @segs: array of one or more packet segments that describe the flow
1521  * @segs_cnt: number of packet segments provided
1522  * @vsi_handle: software VSI handle to check VSI (ICE_FLOW_FIND_PROF_CHK_VSI)
1523  * @conds: additional conditions to be checked (ICE_FLOW_FIND_PROF_CHK_*)
1524  */
1525 static struct ice_flow_prof *
1526 ice_flow_find_prof_conds(struct ice_hw *hw, enum ice_block blk,
1527                          enum ice_flow_dir dir, struct ice_flow_seg_info *segs,
1528                          u8 segs_cnt, u16 vsi_handle, u32 conds)
1529 {
1530         struct ice_flow_prof *p, *prof = NULL;
1531
1532         ice_acquire_lock(&hw->fl_profs_locks[blk]);
1533         LIST_FOR_EACH_ENTRY(p, &hw->fl_profs[blk], ice_flow_prof, l_entry)
1534                 if ((p->dir == dir || conds & ICE_FLOW_FIND_PROF_NOT_CHK_DIR) &&
1535                     segs_cnt && segs_cnt == p->segs_cnt) {
1536                         u8 i;
1537
1538                         /* Check for profile-VSI association if specified */
1539                         if ((conds & ICE_FLOW_FIND_PROF_CHK_VSI) &&
1540                             ice_is_vsi_valid(hw, vsi_handle) &&
1541                             !ice_is_bit_set(p->vsis, vsi_handle))
1542                                 continue;
1543
1544                         /* Protocol headers must be checked. Matched fields are
1545                          * checked if specified.
1546                          */
1547                         for (i = 0; i < segs_cnt; i++)
1548                                 if (segs[i].hdrs != p->segs[i].hdrs ||
1549                                     ((conds & ICE_FLOW_FIND_PROF_CHK_FLDS) &&
1550                                      segs[i].match != p->segs[i].match))
1551                                         break;
1552
1553                         /* A match is found if all segments are matched */
1554                         if (i == segs_cnt) {
1555                                 prof = p;
1556                                 break;
1557                         }
1558                 }
1559         ice_release_lock(&hw->fl_profs_locks[blk]);
1560
1561         return prof;
1562 }
1563
1564 /**
1565  * ice_flow_find_prof - Look up a profile matching headers and matched fields
1566  * @hw: pointer to the HW struct
1567  * @blk: classification stage
1568  * @dir: flow direction
1569  * @segs: array of one or more packet segments that describe the flow
1570  * @segs_cnt: number of packet segments provided
1571  */
1572 u64
1573 ice_flow_find_prof(struct ice_hw *hw, enum ice_block blk, enum ice_flow_dir dir,
1574                    struct ice_flow_seg_info *segs, u8 segs_cnt)
1575 {
1576         struct ice_flow_prof *p;
1577
1578         p = ice_flow_find_prof_conds(hw, blk, dir, segs, segs_cnt,
1579                                      ICE_MAX_VSI, ICE_FLOW_FIND_PROF_CHK_FLDS);
1580
1581         return p ? p->id : ICE_FLOW_PROF_ID_INVAL;
1582 }
1583
1584 /**
1585  * ice_flow_find_prof_id - Look up a profile with given profile ID
1586  * @hw: pointer to the HW struct
1587  * @blk: classification stage
1588  * @prof_id: unique ID to identify this flow profile
1589  */
1590 static struct ice_flow_prof *
1591 ice_flow_find_prof_id(struct ice_hw *hw, enum ice_block blk, u64 prof_id)
1592 {
1593         struct ice_flow_prof *p;
1594
1595         LIST_FOR_EACH_ENTRY(p, &hw->fl_profs[blk], ice_flow_prof, l_entry)
1596                 if (p->id == prof_id)
1597                         return p;
1598
1599         return NULL;
1600 }
1601
1602 /**
1603  * ice_dealloc_flow_entry - Deallocate flow entry memory
1604  * @hw: pointer to the HW struct
1605  * @entry: flow entry to be removed
1606  */
1607 static void
1608 ice_dealloc_flow_entry(struct ice_hw *hw, struct ice_flow_entry *entry)
1609 {
1610         if (!entry)
1611                 return;
1612
1613         if (entry->entry)
1614                 ice_free(hw, entry->entry);
1615
1616         if (entry->range_buf) {
1617                 ice_free(hw, entry->range_buf);
1618                 entry->range_buf = NULL;
1619         }
1620
1621         if (entry->acts) {
1622                 ice_free(hw, entry->acts);
1623                 entry->acts = NULL;
1624                 entry->acts_cnt = 0;
1625         }
1626
1627         ice_free(hw, entry);
1628 }
1629
1630 #define ICE_ACL_INVALID_SCEN    0x3f
1631
1632 /**
1633  * ice_flow_acl_is_prof_in_use - Verify if the profile is associated to any PF
1634  * @hw: pointer to the hardware structure
1635  * @prof: pointer to flow profile
1636  * @buf: destination buffer function writes partial extraction sequence to
1637  *
1638  * returns ICE_SUCCESS if no PF is associated to the given profile
1639  * returns ICE_ERR_IN_USE if at least one PF is associated to the given profile
1640  * returns other error code for real error
1641  */
1642 static enum ice_status
1643 ice_flow_acl_is_prof_in_use(struct ice_hw *hw, struct ice_flow_prof *prof,
1644                             struct ice_aqc_acl_prof_generic_frmt *buf)
1645 {
1646         enum ice_status status;
1647         u8 prof_id = 0;
1648
1649         status = ice_flow_get_hw_prof(hw, ICE_BLK_ACL, prof->id, &prof_id);
1650         if (status)
1651                 return status;
1652
1653         status = ice_query_acl_prof(hw, prof_id, buf, NULL);
1654         if (status)
1655                 return status;
1656
1657         /* If all PF's associated scenarios are all 0 or all
1658          * ICE_ACL_INVALID_SCEN (63) for the given profile then the latter has
1659          * not been configured yet.
1660          */
1661         if (buf->pf_scenario_num[0] == 0 && buf->pf_scenario_num[1] == 0 &&
1662             buf->pf_scenario_num[2] == 0 && buf->pf_scenario_num[3] == 0 &&
1663             buf->pf_scenario_num[4] == 0 && buf->pf_scenario_num[5] == 0 &&
1664             buf->pf_scenario_num[6] == 0 && buf->pf_scenario_num[7] == 0)
1665                 return ICE_SUCCESS;
1666
1667         if (buf->pf_scenario_num[0] == ICE_ACL_INVALID_SCEN &&
1668             buf->pf_scenario_num[1] == ICE_ACL_INVALID_SCEN &&
1669             buf->pf_scenario_num[2] == ICE_ACL_INVALID_SCEN &&
1670             buf->pf_scenario_num[3] == ICE_ACL_INVALID_SCEN &&
1671             buf->pf_scenario_num[4] == ICE_ACL_INVALID_SCEN &&
1672             buf->pf_scenario_num[5] == ICE_ACL_INVALID_SCEN &&
1673             buf->pf_scenario_num[6] == ICE_ACL_INVALID_SCEN &&
1674             buf->pf_scenario_num[7] == ICE_ACL_INVALID_SCEN)
1675                 return ICE_SUCCESS;
1676         else
1677                 return ICE_ERR_IN_USE;
1678 }
1679
1680 /**
1681  * ice_flow_acl_free_act_cntr - Free the ACL rule's actions
1682  * @hw: pointer to the hardware structure
1683  * @acts: array of actions to be performed on a match
1684  * @acts_cnt: number of actions
1685  */
1686 static enum ice_status
1687 ice_flow_acl_free_act_cntr(struct ice_hw *hw, struct ice_flow_action *acts,
1688                            u8 acts_cnt)
1689 {
1690         int i;
1691
1692         for (i = 0; i < acts_cnt; i++) {
1693                 if (acts[i].type == ICE_FLOW_ACT_CNTR_PKT ||
1694                     acts[i].type == ICE_FLOW_ACT_CNTR_BYTES ||
1695                     acts[i].type == ICE_FLOW_ACT_CNTR_PKT_BYTES) {
1696                         struct ice_acl_cntrs cntrs;
1697                         enum ice_status status;
1698
1699                         cntrs.bank = 0; /* Only bank0 for the moment */
1700                         cntrs.first_cntr =
1701                                         LE16_TO_CPU(acts[i].data.acl_act.value);
1702                         cntrs.last_cntr =
1703                                         LE16_TO_CPU(acts[i].data.acl_act.value);
1704
1705                         if (acts[i].type == ICE_FLOW_ACT_CNTR_PKT_BYTES)
1706                                 cntrs.type = ICE_AQC_ACL_CNT_TYPE_DUAL;
1707                         else
1708                                 cntrs.type = ICE_AQC_ACL_CNT_TYPE_SINGLE;
1709
1710                         status = ice_aq_dealloc_acl_cntrs(hw, &cntrs, NULL);
1711                         if (status)
1712                                 return status;
1713                 }
1714         }
1715         return ICE_SUCCESS;
1716 }
1717
1718 /**
1719  * ice_flow_acl_disassoc_scen - Disassociate the scenario from the profile
1720  * @hw: pointer to the hardware structure
1721  * @prof: pointer to flow profile
1722  *
1723  * Disassociate the scenario from the profile for the PF of the VSI.
1724  */
1725 static enum ice_status
1726 ice_flow_acl_disassoc_scen(struct ice_hw *hw, struct ice_flow_prof *prof)
1727 {
1728         struct ice_aqc_acl_prof_generic_frmt buf;
1729         enum ice_status status = ICE_SUCCESS;
1730         u8 prof_id = 0;
1731
1732         ice_memset(&buf, 0, sizeof(buf), ICE_NONDMA_MEM);
1733
1734         status = ice_flow_get_hw_prof(hw, ICE_BLK_ACL, prof->id, &prof_id);
1735         if (status)
1736                 return status;
1737
1738         status = ice_query_acl_prof(hw, prof_id, &buf, NULL);
1739         if (status)
1740                 return status;
1741
1742         /* Clear scenario for this PF */
1743         buf.pf_scenario_num[hw->pf_id] = ICE_ACL_INVALID_SCEN;
1744         status = ice_prgm_acl_prof_extrt(hw, prof_id, &buf, NULL);
1745
1746         return status;
1747 }
1748
1749 /**
1750  * ice_flow_rem_entry_sync - Remove a flow entry
1751  * @hw: pointer to the HW struct
1752  * @blk: classification stage
1753  * @entry: flow entry to be removed
1754  */
1755 static enum ice_status
1756 ice_flow_rem_entry_sync(struct ice_hw *hw, enum ice_block blk,
1757                         struct ice_flow_entry *entry)
1758 {
1759         if (!entry)
1760                 return ICE_ERR_BAD_PTR;
1761
1762         if (blk == ICE_BLK_ACL) {
1763                 enum ice_status status;
1764
1765                 if (!entry->prof)
1766                         return ICE_ERR_BAD_PTR;
1767
1768                 status = ice_acl_rem_entry(hw, entry->prof->cfg.scen,
1769                                            entry->scen_entry_idx);
1770                 if (status)
1771                         return status;
1772
1773                 /* Checks if we need to release an ACL counter. */
1774                 if (entry->acts_cnt && entry->acts)
1775                         ice_flow_acl_free_act_cntr(hw, entry->acts,
1776                                                    entry->acts_cnt);
1777         }
1778
1779         LIST_DEL(&entry->l_entry);
1780
1781         ice_dealloc_flow_entry(hw, entry);
1782
1783         return ICE_SUCCESS;
1784 }
1785
1786 /**
1787  * ice_flow_add_prof_sync - Add a flow profile for packet segments and fields
1788  * @hw: pointer to the HW struct
1789  * @blk: classification stage
1790  * @dir: flow direction
1791  * @prof_id: unique ID to identify this flow profile
1792  * @segs: array of one or more packet segments that describe the flow
1793  * @segs_cnt: number of packet segments provided
1794  * @acts: array of default actions
1795  * @acts_cnt: number of default actions
1796  * @prof: stores the returned flow profile added
1797  *
1798  * Assumption: the caller has acquired the lock to the profile list
1799  */
1800 static enum ice_status
1801 ice_flow_add_prof_sync(struct ice_hw *hw, enum ice_block blk,
1802                        enum ice_flow_dir dir, u64 prof_id,
1803                        struct ice_flow_seg_info *segs, u8 segs_cnt,
1804                        struct ice_flow_action *acts, u8 acts_cnt,
1805                        struct ice_flow_prof **prof)
1806 {
1807         struct ice_flow_prof_params *params;
1808         enum ice_status status;
1809         u8 i;
1810
1811         if (!prof || (acts_cnt && !acts))
1812                 return ICE_ERR_BAD_PTR;
1813
1814         params = (struct ice_flow_prof_params *)ice_malloc(hw, sizeof(*params));
1815         if (!params)
1816                 return ICE_ERR_NO_MEMORY;
1817
1818         params->prof = (struct ice_flow_prof *)
1819                 ice_malloc(hw, sizeof(*params->prof));
1820         if (!params->prof) {
1821                 status = ICE_ERR_NO_MEMORY;
1822                 goto free_params;
1823         }
1824
1825         /* initialize extraction sequence to all invalid (0xff) */
1826         for (i = 0; i < ICE_MAX_FV_WORDS; i++) {
1827                 params->es[i].prot_id = ICE_PROT_INVALID;
1828                 params->es[i].off = ICE_FV_OFFSET_INVAL;
1829         }
1830
1831         params->blk = blk;
1832         params->prof->id = prof_id;
1833         params->prof->dir = dir;
1834         params->prof->segs_cnt = segs_cnt;
1835
1836         /* Make a copy of the segments that need to be persistent in the flow
1837          * profile instance
1838          */
1839         for (i = 0; i < segs_cnt; i++)
1840                 ice_memcpy(&params->prof->segs[i], &segs[i], sizeof(*segs),
1841                            ICE_NONDMA_TO_NONDMA);
1842
1843         /* Make a copy of the actions that need to be persistent in the flow
1844          * profile instance.
1845          */
1846         if (acts_cnt) {
1847                 params->prof->acts = (struct ice_flow_action *)
1848                         ice_memdup(hw, acts, acts_cnt * sizeof(*acts),
1849                                    ICE_NONDMA_TO_NONDMA);
1850
1851                 if (!params->prof->acts) {
1852                         status = ICE_ERR_NO_MEMORY;
1853                         goto out;
1854                 }
1855         }
1856
1857         status = ice_flow_proc_segs(hw, params);
1858         if (status) {
1859                 ice_debug(hw, ICE_DBG_FLOW, "Error processing a flow's packet segments\n");
1860                 goto out;
1861         }
1862
1863         /* Add a HW profile for this flow profile */
1864         status = ice_add_prof(hw, blk, prof_id, (u8 *)params->ptypes,
1865                               params->attr, params->attr_cnt, params->es,
1866                               params->mask);
1867         if (status) {
1868                 ice_debug(hw, ICE_DBG_FLOW, "Error adding a HW flow profile\n");
1869                 goto out;
1870         }
1871
1872         INIT_LIST_HEAD(&params->prof->entries);
1873         ice_init_lock(&params->prof->entries_lock);
1874         *prof = params->prof;
1875
1876 out:
1877         if (status) {
1878                 if (params->prof->acts)
1879                         ice_free(hw, params->prof->acts);
1880                 ice_free(hw, params->prof);
1881         }
1882 free_params:
1883         ice_free(hw, params);
1884
1885         return status;
1886 }
1887
1888 /**
1889  * ice_flow_rem_prof_sync - remove a flow profile
1890  * @hw: pointer to the hardware structure
1891  * @blk: classification stage
1892  * @prof: pointer to flow profile to remove
1893  *
1894  * Assumption: the caller has acquired the lock to the profile list
1895  */
1896 static enum ice_status
1897 ice_flow_rem_prof_sync(struct ice_hw *hw, enum ice_block blk,
1898                        struct ice_flow_prof *prof)
1899 {
1900         enum ice_status status;
1901
1902         /* Remove all remaining flow entries before removing the flow profile */
1903         if (!LIST_EMPTY(&prof->entries)) {
1904                 struct ice_flow_entry *e, *t;
1905
1906                 ice_acquire_lock(&prof->entries_lock);
1907
1908                 LIST_FOR_EACH_ENTRY_SAFE(e, t, &prof->entries, ice_flow_entry,
1909                                          l_entry) {
1910                         status = ice_flow_rem_entry_sync(hw, blk, e);
1911                         if (status)
1912                                 break;
1913                 }
1914
1915                 ice_release_lock(&prof->entries_lock);
1916         }
1917
1918         if (blk == ICE_BLK_ACL) {
1919                 struct ice_aqc_acl_profile_ranges query_rng_buf;
1920                 struct ice_aqc_acl_prof_generic_frmt buf;
1921                 u8 prof_id = 0;
1922
1923                 /* Disassociate the scenario from the profile for the PF */
1924                 status = ice_flow_acl_disassoc_scen(hw, prof);
1925                 if (status)
1926                         return status;
1927
1928                 /* Clear the range-checker if the profile ID is no longer
1929                  * used by any PF
1930                  */
1931                 status = ice_flow_acl_is_prof_in_use(hw, prof, &buf);
1932                 if (status && status != ICE_ERR_IN_USE) {
1933                         return status;
1934                 } else if (!status) {
1935                         /* Clear the range-checker value for profile ID */
1936                         ice_memset(&query_rng_buf, 0,
1937                                    sizeof(struct ice_aqc_acl_profile_ranges),
1938                                    ICE_NONDMA_MEM);
1939
1940                         status = ice_flow_get_hw_prof(hw, blk, prof->id,
1941                                                       &prof_id);
1942                         if (status)
1943                                 return status;
1944
1945                         status = ice_prog_acl_prof_ranges(hw, prof_id,
1946                                                           &query_rng_buf, NULL);
1947                         if (status)
1948                                 return status;
1949                 }
1950         }
1951
1952         /* Remove all hardware profiles associated with this flow profile */
1953         status = ice_rem_prof(hw, blk, prof->id);
1954         if (!status) {
1955                 LIST_DEL(&prof->l_entry);
1956                 ice_destroy_lock(&prof->entries_lock);
1957                 if (prof->acts)
1958                         ice_free(hw, prof->acts);
1959                 ice_free(hw, prof);
1960         }
1961
1962         return status;
1963 }
1964
1965 /**
1966  * ice_flow_acl_set_xtrct_seq_fld - Populate xtrct seq for single field
1967  * @buf: Destination buffer function writes partial xtrct sequence to
1968  * @info: Info about field
1969  */
1970 static void
1971 ice_flow_acl_set_xtrct_seq_fld(struct ice_aqc_acl_prof_generic_frmt *buf,
1972                                struct ice_flow_fld_info *info)
1973 {
1974         u16 dst, i;
1975         u8 src;
1976
1977         src = info->xtrct.idx * ICE_FLOW_FV_EXTRACT_SZ +
1978                 info->xtrct.disp / BITS_PER_BYTE;
1979         dst = info->entry.val;
1980         for (i = 0; i < info->entry.last; i++)
1981                 /* HW stores field vector words in LE, convert words back to BE
1982                  * so constructed entries will end up in network order
1983                  */
1984                 buf->byte_selection[dst++] = src++ ^ 1;
1985 }
1986
1987 /**
1988  * ice_flow_acl_set_xtrct_seq - Program ACL extraction sequence
1989  * @hw: pointer to the hardware structure
1990  * @prof: pointer to flow profile
1991  */
1992 static enum ice_status
1993 ice_flow_acl_set_xtrct_seq(struct ice_hw *hw, struct ice_flow_prof *prof)
1994 {
1995         struct ice_aqc_acl_prof_generic_frmt buf;
1996         struct ice_flow_fld_info *info;
1997         enum ice_status status;
1998         u8 prof_id = 0;
1999         u16 i;
2000
2001         ice_memset(&buf, 0, sizeof(buf), ICE_NONDMA_MEM);
2002
2003         status = ice_flow_get_hw_prof(hw, ICE_BLK_ACL, prof->id, &prof_id);
2004         if (status)
2005                 return status;
2006
2007         status = ice_flow_acl_is_prof_in_use(hw, prof, &buf);
2008         if (status && status != ICE_ERR_IN_USE)
2009                 return status;
2010
2011         if (!status) {
2012                 /* Program the profile dependent configuration. This is done
2013                  * only once regardless of the number of PFs using that profile
2014                  */
2015                 ice_memset(&buf, 0, sizeof(buf), ICE_NONDMA_MEM);
2016
2017                 for (i = 0; i < prof->segs_cnt; i++) {
2018                         struct ice_flow_seg_info *seg = &prof->segs[i];
2019                         u16 j;
2020
2021                         ice_for_each_set_bit(j, (ice_bitmap_t *)&seg->match,
2022                                              ICE_FLOW_FIELD_IDX_MAX) {
2023                                 info = &seg->fields[j];
2024
2025                                 if (info->type == ICE_FLOW_FLD_TYPE_RANGE)
2026                                         buf.word_selection[info->entry.val] =
2027                                                 info->xtrct.idx;
2028                                 else
2029                                         ice_flow_acl_set_xtrct_seq_fld(&buf,
2030                                                                        info);
2031                         }
2032
2033                         for (j = 0; j < seg->raws_cnt; j++) {
2034                                 info = &seg->raws[j].info;
2035                                 ice_flow_acl_set_xtrct_seq_fld(&buf, info);
2036                         }
2037                 }
2038
2039                 ice_memset(&buf.pf_scenario_num[0], ICE_ACL_INVALID_SCEN,
2040                            ICE_AQC_ACL_PROF_PF_SCEN_NUM_ELEMS,
2041                            ICE_NONDMA_MEM);
2042         }
2043
2044         /* Update the current PF */
2045         buf.pf_scenario_num[hw->pf_id] = (u8)prof->cfg.scen->id;
2046         status = ice_prgm_acl_prof_extrt(hw, prof_id, &buf, NULL);
2047
2048         return status;
2049 }
2050
2051 /**
2052  * ice_flow_assoc_vsig_vsi - associate a VSI with VSIG
2053  * @hw: pointer to the hardware structure
2054  * @blk: classification stage
2055  * @vsi_handle: software VSI handle
2056  * @vsig: target VSI group
2057  *
2058  * Assumption: the caller has already verified that the VSI to
2059  * be added has the same characteristics as the VSIG and will
2060  * thereby have access to all resources added to that VSIG.
2061  */
2062 enum ice_status
2063 ice_flow_assoc_vsig_vsi(struct ice_hw *hw, enum ice_block blk, u16 vsi_handle,
2064                         u16 vsig)
2065 {
2066         enum ice_status status;
2067
2068         if (!ice_is_vsi_valid(hw, vsi_handle) || blk >= ICE_BLK_COUNT)
2069                 return ICE_ERR_PARAM;
2070
2071         ice_acquire_lock(&hw->fl_profs_locks[blk]);
2072         status = ice_add_vsi_flow(hw, blk, ice_get_hw_vsi_num(hw, vsi_handle),
2073                                   vsig);
2074         ice_release_lock(&hw->fl_profs_locks[blk]);
2075
2076         return status;
2077 }
2078
2079 /**
2080  * ice_flow_assoc_prof - associate a VSI with a flow profile
2081  * @hw: pointer to the hardware structure
2082  * @blk: classification stage
2083  * @prof: pointer to flow profile
2084  * @vsi_handle: software VSI handle
2085  *
2086  * Assumption: the caller has acquired the lock to the profile list
2087  * and the software VSI handle has been validated
2088  */
2089 static enum ice_status
2090 ice_flow_assoc_prof(struct ice_hw *hw, enum ice_block blk,
2091                     struct ice_flow_prof *prof, u16 vsi_handle)
2092 {
2093         enum ice_status status = ICE_SUCCESS;
2094
2095         if (!ice_is_bit_set(prof->vsis, vsi_handle)) {
2096                 if (blk == ICE_BLK_ACL) {
2097                         status = ice_flow_acl_set_xtrct_seq(hw, prof);
2098                         if (status)
2099                                 return status;
2100                 }
2101                 status = ice_add_prof_id_flow(hw, blk,
2102                                               ice_get_hw_vsi_num(hw,
2103                                                                  vsi_handle),
2104                                               prof->id);
2105                 if (!status)
2106                         ice_set_bit(vsi_handle, prof->vsis);
2107                 else
2108                         ice_debug(hw, ICE_DBG_FLOW, "HW profile add failed, %d\n",
2109                                   status);
2110         }
2111
2112         return status;
2113 }
2114
2115 /**
2116  * ice_flow_disassoc_prof - disassociate a VSI from a flow profile
2117  * @hw: pointer to the hardware structure
2118  * @blk: classification stage
2119  * @prof: pointer to flow profile
2120  * @vsi_handle: software VSI handle
2121  *
2122  * Assumption: the caller has acquired the lock to the profile list
2123  * and the software VSI handle has been validated
2124  */
2125 static enum ice_status
2126 ice_flow_disassoc_prof(struct ice_hw *hw, enum ice_block blk,
2127                        struct ice_flow_prof *prof, u16 vsi_handle)
2128 {
2129         enum ice_status status = ICE_SUCCESS;
2130
2131         if (ice_is_bit_set(prof->vsis, vsi_handle)) {
2132                 status = ice_rem_prof_id_flow(hw, blk,
2133                                               ice_get_hw_vsi_num(hw,
2134                                                                  vsi_handle),
2135                                               prof->id);
2136                 if (!status)
2137                         ice_clear_bit(vsi_handle, prof->vsis);
2138                 else
2139                         ice_debug(hw, ICE_DBG_FLOW, "HW profile remove failed, %d\n",
2140                                   status);
2141         }
2142
2143         return status;
2144 }
2145
2146 /**
2147  * ice_flow_add_prof - Add a flow profile for packet segments and matched fields
2148  * @hw: pointer to the HW struct
2149  * @blk: classification stage
2150  * @dir: flow direction
2151  * @prof_id: unique ID to identify this flow profile
2152  * @segs: array of one or more packet segments that describe the flow
2153  * @segs_cnt: number of packet segments provided
2154  * @acts: array of default actions
2155  * @acts_cnt: number of default actions
2156  * @prof: stores the returned flow profile added
2157  */
2158 enum ice_status
2159 ice_flow_add_prof(struct ice_hw *hw, enum ice_block blk, enum ice_flow_dir dir,
2160                   u64 prof_id, struct ice_flow_seg_info *segs, u8 segs_cnt,
2161                   struct ice_flow_action *acts, u8 acts_cnt,
2162                   struct ice_flow_prof **prof)
2163 {
2164         enum ice_status status;
2165
2166         if (segs_cnt > ICE_FLOW_SEG_MAX)
2167                 return ICE_ERR_MAX_LIMIT;
2168
2169         if (!segs_cnt)
2170                 return ICE_ERR_PARAM;
2171
2172         if (!segs)
2173                 return ICE_ERR_BAD_PTR;
2174
2175         status = ice_flow_val_hdrs(segs, segs_cnt);
2176         if (status)
2177                 return status;
2178
2179         ice_acquire_lock(&hw->fl_profs_locks[blk]);
2180
2181         status = ice_flow_add_prof_sync(hw, blk, dir, prof_id, segs, segs_cnt,
2182                                         acts, acts_cnt, prof);
2183         if (!status)
2184                 LIST_ADD(&(*prof)->l_entry, &hw->fl_profs[blk]);
2185
2186         ice_release_lock(&hw->fl_profs_locks[blk]);
2187
2188         return status;
2189 }
2190
2191 /**
2192  * ice_flow_rem_prof - Remove a flow profile and all entries associated with it
2193  * @hw: pointer to the HW struct
2194  * @blk: the block for which the flow profile is to be removed
2195  * @prof_id: unique ID of the flow profile to be removed
2196  */
2197 enum ice_status
2198 ice_flow_rem_prof(struct ice_hw *hw, enum ice_block blk, u64 prof_id)
2199 {
2200         struct ice_flow_prof *prof;
2201         enum ice_status status;
2202
2203         ice_acquire_lock(&hw->fl_profs_locks[blk]);
2204
2205         prof = ice_flow_find_prof_id(hw, blk, prof_id);
2206         if (!prof) {
2207                 status = ICE_ERR_DOES_NOT_EXIST;
2208                 goto out;
2209         }
2210
2211         /* prof becomes invalid after the call */
2212         status = ice_flow_rem_prof_sync(hw, blk, prof);
2213
2214 out:
2215         ice_release_lock(&hw->fl_profs_locks[blk]);
2216
2217         return status;
2218 }
2219
2220 /**
2221  * ice_flow_get_hw_prof - return the HW profile for a specific profile ID handle
2222  * @hw: pointer to the HW struct
2223  * @blk: classification stage
2224  * @prof_id: the profile ID handle
2225  * @hw_prof_id: pointer to variable to receive the HW profile ID
2226  */
2227 enum ice_status
2228 ice_flow_get_hw_prof(struct ice_hw *hw, enum ice_block blk, u64 prof_id,
2229                      u8 *hw_prof_id)
2230 {
2231         enum ice_status status = ICE_ERR_DOES_NOT_EXIST;
2232         struct ice_prof_map *map;
2233
2234         ice_acquire_lock(&hw->blk[blk].es.prof_map_lock);
2235         map = ice_search_prof_id(hw, blk, prof_id);
2236         if (map) {
2237                 *hw_prof_id = map->prof_id;
2238                 status = ICE_SUCCESS;
2239         }
2240         ice_release_lock(&hw->blk[blk].es.prof_map_lock);
2241         return status;
2242 }
2243
2244 /**
2245  * ice_flow_find_entry - look for a flow entry using its unique ID
2246  * @hw: pointer to the HW struct
2247  * @blk: classification stage
2248  * @entry_id: unique ID to identify this flow entry
2249  *
2250  * This function looks for the flow entry with the specified unique ID in all
2251  * flow profiles of the specified classification stage. If the entry is found,
2252  * and it returns the handle to the flow entry. Otherwise, it returns
2253  * ICE_FLOW_ENTRY_ID_INVAL.
2254  */
2255 u64 ice_flow_find_entry(struct ice_hw *hw, enum ice_block blk, u64 entry_id)
2256 {
2257         struct ice_flow_entry *found = NULL;
2258         struct ice_flow_prof *p;
2259
2260         ice_acquire_lock(&hw->fl_profs_locks[blk]);
2261
2262         LIST_FOR_EACH_ENTRY(p, &hw->fl_profs[blk], ice_flow_prof, l_entry) {
2263                 struct ice_flow_entry *e;
2264
2265                 ice_acquire_lock(&p->entries_lock);
2266                 LIST_FOR_EACH_ENTRY(e, &p->entries, ice_flow_entry, l_entry)
2267                         if (e->id == entry_id) {
2268                                 found = e;
2269                                 break;
2270                         }
2271                 ice_release_lock(&p->entries_lock);
2272
2273                 if (found)
2274                         break;
2275         }
2276
2277         ice_release_lock(&hw->fl_profs_locks[blk]);
2278
2279         return found ? ICE_FLOW_ENTRY_HNDL(found) : ICE_FLOW_ENTRY_HANDLE_INVAL;
2280 }
2281
2282 /**
2283  * ice_flow_acl_check_actions - Checks the ACL rule's actions
2284  * @hw: pointer to the hardware structure
2285  * @acts: array of actions to be performed on a match
2286  * @acts_cnt: number of actions
2287  * @cnt_alloc: indicates if an ACL counter has been allocated.
2288  */
2289 static enum ice_status
2290 ice_flow_acl_check_actions(struct ice_hw *hw, struct ice_flow_action *acts,
2291                            u8 acts_cnt, bool *cnt_alloc)
2292 {
2293         ice_declare_bitmap(dup_check, ICE_AQC_TBL_MAX_ACTION_PAIRS * 2);
2294         int i;
2295
2296         ice_zero_bitmap(dup_check, ICE_AQC_TBL_MAX_ACTION_PAIRS * 2);
2297         *cnt_alloc = false;
2298
2299         if (acts_cnt > ICE_FLOW_ACL_MAX_NUM_ACT)
2300                 return ICE_ERR_OUT_OF_RANGE;
2301
2302         for (i = 0; i < acts_cnt; i++) {
2303                 if (acts[i].type != ICE_FLOW_ACT_NOP &&
2304                     acts[i].type != ICE_FLOW_ACT_DROP &&
2305                     acts[i].type != ICE_FLOW_ACT_CNTR_PKT &&
2306                     acts[i].type != ICE_FLOW_ACT_FWD_QUEUE)
2307                         return ICE_ERR_CFG;
2308
2309                 /* If the caller want to add two actions of the same type, then
2310                  * it is considered invalid configuration.
2311                  */
2312                 if (ice_test_and_set_bit(acts[i].type, dup_check))
2313                         return ICE_ERR_PARAM;
2314         }
2315
2316         /* Checks if ACL counters are needed. */
2317         for (i = 0; i < acts_cnt; i++) {
2318                 if (acts[i].type == ICE_FLOW_ACT_CNTR_PKT ||
2319                     acts[i].type == ICE_FLOW_ACT_CNTR_BYTES ||
2320                     acts[i].type == ICE_FLOW_ACT_CNTR_PKT_BYTES) {
2321                         struct ice_acl_cntrs cntrs;
2322                         enum ice_status status;
2323
2324                         cntrs.amount = 1;
2325                         cntrs.bank = 0; /* Only bank0 for the moment */
2326
2327                         if (acts[i].type == ICE_FLOW_ACT_CNTR_PKT_BYTES)
2328                                 cntrs.type = ICE_AQC_ACL_CNT_TYPE_DUAL;
2329                         else
2330                                 cntrs.type = ICE_AQC_ACL_CNT_TYPE_SINGLE;
2331
2332                         status = ice_aq_alloc_acl_cntrs(hw, &cntrs, NULL);
2333                         if (status)
2334                                 return status;
2335                         /* Counter index within the bank */
2336                         acts[i].data.acl_act.value =
2337                                                 CPU_TO_LE16(cntrs.first_cntr);
2338                         *cnt_alloc = true;
2339                 }
2340         }
2341
2342         return ICE_SUCCESS;
2343 }
2344
2345 /**
2346  * ice_flow_acl_frmt_entry_range - Format an ACL range checker for a given field
2347  * @fld: number of the given field
2348  * @info: info about field
2349  * @range_buf: range checker configuration buffer
2350  * @data: pointer to a data buffer containing flow entry's match values/masks
2351  * @range: Input/output param indicating which range checkers are being used
2352  */
2353 static void
2354 ice_flow_acl_frmt_entry_range(u16 fld, struct ice_flow_fld_info *info,
2355                               struct ice_aqc_acl_profile_ranges *range_buf,
2356                               u8 *data, u8 *range)
2357 {
2358         u16 new_mask;
2359
2360         /* If not specified, default mask is all bits in field */
2361         new_mask = (info->src.mask == ICE_FLOW_FLD_OFF_INVAL ?
2362                     BIT(ice_flds_info[fld].size) - 1 :
2363                     (*(u16 *)(data + info->src.mask))) << info->xtrct.disp;
2364
2365         /* If the mask is 0, then we don't need to worry about this input
2366          * range checker value.
2367          */
2368         if (new_mask) {
2369                 u16 new_high =
2370                         (*(u16 *)(data + info->src.last)) << info->xtrct.disp;
2371                 u16 new_low =
2372                         (*(u16 *)(data + info->src.val)) << info->xtrct.disp;
2373                 u8 range_idx = info->entry.val;
2374
2375                 range_buf->checker_cfg[range_idx].low_boundary =
2376                         CPU_TO_BE16(new_low);
2377                 range_buf->checker_cfg[range_idx].high_boundary =
2378                         CPU_TO_BE16(new_high);
2379                 range_buf->checker_cfg[range_idx].mask = CPU_TO_BE16(new_mask);
2380
2381                 /* Indicate which range checker is being used */
2382                 *range |= BIT(range_idx);
2383         }
2384 }
2385
2386 /**
2387  * ice_flow_acl_frmt_entry_fld - Partially format ACL entry for a given field
2388  * @fld: number of the given field
2389  * @info: info about the field
2390  * @buf: buffer containing the entry
2391  * @dontcare: buffer containing don't care mask for entry
2392  * @data: pointer to a data buffer containing flow entry's match values/masks
2393  */
2394 static void
2395 ice_flow_acl_frmt_entry_fld(u16 fld, struct ice_flow_fld_info *info, u8 *buf,
2396                             u8 *dontcare, u8 *data)
2397 {
2398         u16 dst, src, mask, k, end_disp, tmp_s = 0, tmp_m = 0;
2399         bool use_mask = false;
2400         u8 disp;
2401
2402         src = info->src.val;
2403         mask = info->src.mask;
2404         dst = info->entry.val - ICE_AQC_ACL_PROF_BYTE_SEL_START_IDX;
2405         disp = info->xtrct.disp % BITS_PER_BYTE;
2406
2407         if (mask != ICE_FLOW_FLD_OFF_INVAL)
2408                 use_mask = true;
2409
2410         for (k = 0; k < info->entry.last; k++, dst++) {
2411                 /* Add overflow bits from previous byte */
2412                 buf[dst] = (tmp_s & 0xff00) >> 8;
2413
2414                 /* If mask is not valid, tmp_m is always zero, so just setting
2415                  * dontcare to 0 (no masked bits). If mask is valid, pulls in
2416                  * overflow bits of mask from prev byte
2417                  */
2418                 dontcare[dst] = (tmp_m & 0xff00) >> 8;
2419
2420                 /* If there is displacement, last byte will only contain
2421                  * displaced data, but there is no more data to read from user
2422                  * buffer, so skip so as not to potentially read beyond end of
2423                  * user buffer
2424                  */
2425                 if (!disp || k < info->entry.last - 1) {
2426                         /* Store shifted data to use in next byte */
2427                         tmp_s = data[src++] << disp;
2428
2429                         /* Add current (shifted) byte */
2430                         buf[dst] |= tmp_s & 0xff;
2431
2432                         /* Handle mask if valid */
2433                         if (use_mask) {
2434                                 tmp_m = (~data[mask++] & 0xff) << disp;
2435                                 dontcare[dst] |= tmp_m & 0xff;
2436                         }
2437                 }
2438         }
2439
2440         /* Fill in don't care bits at beginning of field */
2441         if (disp) {
2442                 dst = info->entry.val - ICE_AQC_ACL_PROF_BYTE_SEL_START_IDX;
2443                 for (k = 0; k < disp; k++)
2444                         dontcare[dst] |= BIT(k);
2445         }
2446
2447         end_disp = (disp + ice_flds_info[fld].size) % BITS_PER_BYTE;
2448
2449         /* Fill in don't care bits at end of field */
2450         if (end_disp) {
2451                 dst = info->entry.val - ICE_AQC_ACL_PROF_BYTE_SEL_START_IDX +
2452                       info->entry.last - 1;
2453                 for (k = end_disp; k < BITS_PER_BYTE; k++)
2454                         dontcare[dst] |= BIT(k);
2455         }
2456 }
2457
2458 /**
2459  * ice_flow_acl_frmt_entry - Format ACL entry
2460  * @hw: pointer to the hardware structure
2461  * @prof: pointer to flow profile
2462  * @e: pointer to the flow entry
2463  * @data: pointer to a data buffer containing flow entry's match values/masks
2464  * @acts: array of actions to be performed on a match
2465  * @acts_cnt: number of actions
2466  *
2467  * Formats the key (and key_inverse) to be matched from the data passed in,
2468  * along with data from the flow profile. This key/key_inverse pair makes up
2469  * the 'entry' for an ACL flow entry.
2470  */
2471 static enum ice_status
2472 ice_flow_acl_frmt_entry(struct ice_hw *hw, struct ice_flow_prof *prof,
2473                         struct ice_flow_entry *e, u8 *data,
2474                         struct ice_flow_action *acts, u8 acts_cnt)
2475 {
2476         u8 *buf = NULL, *dontcare = NULL, *key = NULL, range = 0, dir_flag_msk;
2477         struct ice_aqc_acl_profile_ranges *range_buf = NULL;
2478         enum ice_status status;
2479         bool cnt_alloc;
2480         u8 prof_id = 0;
2481         u16 i, buf_sz;
2482
2483         status = ice_flow_get_hw_prof(hw, ICE_BLK_ACL, prof->id, &prof_id);
2484         if (status)
2485                 return status;
2486
2487         /* Format the result action */
2488
2489         status = ice_flow_acl_check_actions(hw, acts, acts_cnt, &cnt_alloc);
2490         if (status)
2491                 return status;
2492
2493         status = ICE_ERR_NO_MEMORY;
2494
2495         e->acts = (struct ice_flow_action *)
2496                 ice_memdup(hw, acts, acts_cnt * sizeof(*acts),
2497                            ICE_NONDMA_TO_NONDMA);
2498
2499         if (!e->acts)
2500                 goto out;
2501
2502         e->acts_cnt = acts_cnt;
2503
2504         /* Format the matching data */
2505         buf_sz = prof->cfg.scen->width;
2506         buf = (u8 *)ice_malloc(hw, buf_sz);
2507         if (!buf)
2508                 goto out;
2509
2510         dontcare = (u8 *)ice_malloc(hw, buf_sz);
2511         if (!dontcare)
2512                 goto out;
2513
2514         /* 'key' buffer will store both key and key_inverse, so must be twice
2515          * size of buf
2516          */
2517         key = (u8 *)ice_malloc(hw, buf_sz * 2);
2518         if (!key)
2519                 goto out;
2520
2521         range_buf = (struct ice_aqc_acl_profile_ranges *)
2522                 ice_malloc(hw, sizeof(struct ice_aqc_acl_profile_ranges));
2523         if (!range_buf)
2524                 goto out;
2525
2526         /* Set don't care mask to all 1's to start, will zero out used bytes */
2527         ice_memset(dontcare, 0xff, buf_sz, ICE_NONDMA_MEM);
2528
2529         for (i = 0; i < prof->segs_cnt; i++) {
2530                 struct ice_flow_seg_info *seg = &prof->segs[i];
2531                 u8 j;
2532
2533                 ice_for_each_set_bit(j, (ice_bitmap_t *)&seg->match,
2534                                      ICE_FLOW_FIELD_IDX_MAX) {
2535                         struct ice_flow_fld_info *info = &seg->fields[j];
2536
2537                         if (info->type == ICE_FLOW_FLD_TYPE_RANGE)
2538                                 ice_flow_acl_frmt_entry_range(j, info,
2539                                                               range_buf, data,
2540                                                               &range);
2541                         else
2542                                 ice_flow_acl_frmt_entry_fld(j, info, buf,
2543                                                             dontcare, data);
2544                 }
2545
2546                 for (j = 0; j < seg->raws_cnt; j++) {
2547                         struct ice_flow_fld_info *info = &seg->raws[j].info;
2548                         u16 dst, src, mask, k;
2549                         bool use_mask = false;
2550
2551                         src = info->src.val;
2552                         dst = info->entry.val -
2553                                         ICE_AQC_ACL_PROF_BYTE_SEL_START_IDX;
2554                         mask = info->src.mask;
2555
2556                         if (mask != ICE_FLOW_FLD_OFF_INVAL)
2557                                 use_mask = true;
2558
2559                         for (k = 0; k < info->entry.last; k++, dst++) {
2560                                 buf[dst] = data[src++];
2561                                 if (use_mask)
2562                                         dontcare[dst] = ~data[mask++];
2563                                 else
2564                                         dontcare[dst] = 0;
2565                         }
2566                 }
2567         }
2568
2569         buf[prof->cfg.scen->pid_idx] = (u8)prof_id;
2570         dontcare[prof->cfg.scen->pid_idx] = 0;
2571
2572         /* Format the buffer for direction flags */
2573         dir_flag_msk = BIT(ICE_FLG_PKT_DIR);
2574
2575         if (prof->dir == ICE_FLOW_RX)
2576                 buf[prof->cfg.scen->pkt_dir_idx] = dir_flag_msk;
2577
2578         if (range) {
2579                 buf[prof->cfg.scen->rng_chk_idx] = range;
2580                 /* Mark any unused range checkers as don't care */
2581                 dontcare[prof->cfg.scen->rng_chk_idx] = ~range;
2582                 e->range_buf = range_buf;
2583         } else {
2584                 ice_free(hw, range_buf);
2585         }
2586
2587         status = ice_set_key(key, buf_sz * 2, buf, NULL, dontcare, NULL, 0,
2588                              buf_sz);
2589         if (status)
2590                 goto out;
2591
2592         e->entry = key;
2593         e->entry_sz = buf_sz * 2;
2594
2595 out:
2596         if (buf)
2597                 ice_free(hw, buf);
2598
2599         if (dontcare)
2600                 ice_free(hw, dontcare);
2601
2602         if (status && key)
2603                 ice_free(hw, key);
2604
2605         if (status && range_buf) {
2606                 ice_free(hw, range_buf);
2607                 e->range_buf = NULL;
2608         }
2609
2610         if (status && e->acts) {
2611                 ice_free(hw, e->acts);
2612                 e->acts = NULL;
2613                 e->acts_cnt = 0;
2614         }
2615
2616         if (status && cnt_alloc)
2617                 ice_flow_acl_free_act_cntr(hw, acts, acts_cnt);
2618
2619         return status;
2620 }
2621
2622 /**
2623  * ice_flow_acl_find_scen_entry_cond - Find an ACL scenario entry that matches
2624  *                                     the compared data.
2625  * @prof: pointer to flow profile
2626  * @e: pointer to the comparing flow entry
2627  * @do_chg_action: decide if we want to change the ACL action
2628  * @do_add_entry: decide if we want to add the new ACL entry
2629  * @do_rem_entry: decide if we want to remove the current ACL entry
2630  *
2631  * Find an ACL scenario entry that matches the compared data. In the same time,
2632  * this function also figure out:
2633  * a/ If we want to change the ACL action
2634  * b/ If we want to add the new ACL entry
2635  * c/ If we want to remove the current ACL entry
2636  */
2637 static struct ice_flow_entry *
2638 ice_flow_acl_find_scen_entry_cond(struct ice_flow_prof *prof,
2639                                   struct ice_flow_entry *e, bool *do_chg_action,
2640                                   bool *do_add_entry, bool *do_rem_entry)
2641 {
2642         struct ice_flow_entry *p, *return_entry = NULL;
2643         u8 i, j;
2644
2645         /* Check if:
2646          * a/ There exists an entry with same matching data, but different
2647          *    priority, then we remove this existing ACL entry. Then, we
2648          *    will add the new entry to the ACL scenario.
2649          * b/ There exists an entry with same matching data, priority, and
2650          *    result action, then we do nothing
2651          * c/ There exists an entry with same matching data, priority, but
2652          *    different, action, then do only change the action's entry.
2653          * d/ Else, we add this new entry to the ACL scenario.
2654          */
2655         *do_chg_action = false;
2656         *do_add_entry = true;
2657         *do_rem_entry = false;
2658         LIST_FOR_EACH_ENTRY(p, &prof->entries, ice_flow_entry, l_entry) {
2659                 if (memcmp(p->entry, e->entry, p->entry_sz))
2660                         continue;
2661
2662                 /* From this point, we have the same matching_data. */
2663                 *do_add_entry = false;
2664                 return_entry = p;
2665
2666                 if (p->priority != e->priority) {
2667                         /* matching data && !priority */
2668                         *do_add_entry = true;
2669                         *do_rem_entry = true;
2670                         break;
2671                 }
2672
2673                 /* From this point, we will have matching_data && priority */
2674                 if (p->acts_cnt != e->acts_cnt)
2675                         *do_chg_action = true;
2676                 for (i = 0; i < p->acts_cnt; i++) {
2677                         bool found_not_match = false;
2678
2679                         for (j = 0; j < e->acts_cnt; j++)
2680                                 if (memcmp(&p->acts[i], &e->acts[j],
2681                                            sizeof(struct ice_flow_action))) {
2682                                         found_not_match = true;
2683                                         break;
2684                                 }
2685
2686                         if (found_not_match) {
2687                                 *do_chg_action = true;
2688                                 break;
2689                         }
2690                 }
2691
2692                 /* (do_chg_action = true) means :
2693                  *    matching_data && priority && !result_action
2694                  * (do_chg_action = false) means :
2695                  *    matching_data && priority && result_action
2696                  */
2697                 break;
2698         }
2699
2700         return return_entry;
2701 }
2702
2703 /**
2704  * ice_flow_acl_convert_to_acl_prior - Convert to ACL priority
2705  * @p: flow priority
2706  */
2707 static enum ice_acl_entry_prior
2708 ice_flow_acl_convert_to_acl_prior(enum ice_flow_priority p)
2709 {
2710         enum ice_acl_entry_prior acl_prior;
2711
2712         switch (p) {
2713         case ICE_FLOW_PRIO_LOW:
2714                 acl_prior = ICE_LOW;
2715                 break;
2716         case ICE_FLOW_PRIO_NORMAL:
2717                 acl_prior = ICE_NORMAL;
2718                 break;
2719         case ICE_FLOW_PRIO_HIGH:
2720                 acl_prior = ICE_HIGH;
2721                 break;
2722         default:
2723                 acl_prior = ICE_NORMAL;
2724                 break;
2725         }
2726
2727         return acl_prior;
2728 }
2729
2730 /**
2731  * ice_flow_acl_union_rng_chk - Perform union operation between two
2732  *                              range-range checker buffers
2733  * @dst_buf: pointer to destination range checker buffer
2734  * @src_buf: pointer to source range checker buffer
2735  *
2736  * For this function, we do the union between dst_buf and src_buf
2737  * range checker buffer, and we will save the result back to dst_buf
2738  */
2739 static enum ice_status
2740 ice_flow_acl_union_rng_chk(struct ice_aqc_acl_profile_ranges *dst_buf,
2741                            struct ice_aqc_acl_profile_ranges *src_buf)
2742 {
2743         u8 i, j;
2744
2745         if (!dst_buf || !src_buf)
2746                 return ICE_ERR_BAD_PTR;
2747
2748         for (i = 0; i < ICE_AQC_ACL_PROF_RANGES_NUM_CFG; i++) {
2749                 struct ice_acl_rng_data *cfg_data = NULL, *in_data;
2750                 bool will_populate = false;
2751
2752                 in_data = &src_buf->checker_cfg[i];
2753
2754                 if (!in_data->mask)
2755                         break;
2756
2757                 for (j = 0; j < ICE_AQC_ACL_PROF_RANGES_NUM_CFG; j++) {
2758                         cfg_data = &dst_buf->checker_cfg[j];
2759
2760                         if (!cfg_data->mask ||
2761                             !memcmp(cfg_data, in_data,
2762                                     sizeof(struct ice_acl_rng_data))) {
2763                                 will_populate = true;
2764                                 break;
2765                         }
2766                 }
2767
2768                 if (will_populate) {
2769                         ice_memcpy(cfg_data, in_data,
2770                                    sizeof(struct ice_acl_rng_data),
2771                                    ICE_NONDMA_TO_NONDMA);
2772                 } else {
2773                         /* No available slot left to program range checker */
2774                         return ICE_ERR_MAX_LIMIT;
2775                 }
2776         }
2777
2778         return ICE_SUCCESS;
2779 }
2780
2781 /**
2782  * ice_flow_acl_add_scen_entry_sync - Add entry to ACL scenario sync
2783  * @hw: pointer to the hardware structure
2784  * @prof: pointer to flow profile
2785  * @entry: double pointer to the flow entry
2786  *
2787  * For this function, we will look at the current added entries in the
2788  * corresponding ACL scenario. Then, we will perform matching logic to
2789  * see if we want to add/modify/do nothing with this new entry.
2790  */
2791 static enum ice_status
2792 ice_flow_acl_add_scen_entry_sync(struct ice_hw *hw, struct ice_flow_prof *prof,
2793                                  struct ice_flow_entry **entry)
2794 {
2795         bool do_add_entry, do_rem_entry, do_chg_action, do_chg_rng_chk;
2796         struct ice_aqc_acl_profile_ranges query_rng_buf, cfg_rng_buf;
2797         struct ice_acl_act_entry *acts = NULL;
2798         struct ice_flow_entry *exist;
2799         enum ice_status status = ICE_SUCCESS;
2800         struct ice_flow_entry *e;
2801         u8 i;
2802
2803         if (!entry || !(*entry) || !prof)
2804                 return ICE_ERR_BAD_PTR;
2805
2806         e = *(entry);
2807
2808         do_chg_rng_chk = false;
2809         if (e->range_buf) {
2810                 u8 prof_id = 0;
2811
2812                 status = ice_flow_get_hw_prof(hw, ICE_BLK_ACL, prof->id,
2813                                               &prof_id);
2814                 if (status)
2815                         return status;
2816
2817                 /* Query the current range-checker value in FW */
2818                 status = ice_query_acl_prof_ranges(hw, prof_id, &query_rng_buf,
2819                                                    NULL);
2820                 if (status)
2821                         return status;
2822                 ice_memcpy(&cfg_rng_buf, &query_rng_buf,
2823                            sizeof(struct ice_aqc_acl_profile_ranges),
2824                            ICE_NONDMA_TO_NONDMA);
2825
2826                 /* Generate the new range-checker value */
2827                 status = ice_flow_acl_union_rng_chk(&cfg_rng_buf, e->range_buf);
2828                 if (status)
2829                         return status;
2830
2831                 /* Reconfigure the range check if the buffer is changed. */
2832                 do_chg_rng_chk = false;
2833                 if (memcmp(&query_rng_buf, &cfg_rng_buf,
2834                            sizeof(struct ice_aqc_acl_profile_ranges))) {
2835                         status = ice_prog_acl_prof_ranges(hw, prof_id,
2836                                                           &cfg_rng_buf, NULL);
2837                         if (status)
2838                                 return status;
2839
2840                         do_chg_rng_chk = true;
2841                 }
2842         }
2843
2844         /* Figure out if we want to (change the ACL action) and/or
2845          * (Add the new ACL entry) and/or (Remove the current ACL entry)
2846          */
2847         exist = ice_flow_acl_find_scen_entry_cond(prof, e, &do_chg_action,
2848                                                   &do_add_entry, &do_rem_entry);
2849
2850         if (do_rem_entry) {
2851                 status = ice_flow_rem_entry_sync(hw, ICE_BLK_ACL, exist);
2852                 if (status)
2853                         return status;
2854         }
2855
2856         /* Prepare the result action buffer */
2857         acts = (struct ice_acl_act_entry *)ice_calloc
2858                 (hw, e->entry_sz, sizeof(struct ice_acl_act_entry));
2859         for (i = 0; i < e->acts_cnt; i++)
2860                 ice_memcpy(&acts[i], &e->acts[i].data.acl_act,
2861                            sizeof(struct ice_acl_act_entry),
2862                            ICE_NONDMA_TO_NONDMA);
2863
2864         if (do_add_entry) {
2865                 enum ice_acl_entry_prior prior;
2866                 u8 *keys, *inverts;
2867                 u16 entry_idx;
2868
2869                 keys = (u8 *)e->entry;
2870                 inverts = keys + (e->entry_sz / 2);
2871                 prior = ice_flow_acl_convert_to_acl_prior(e->priority);
2872
2873                 status = ice_acl_add_entry(hw, prof->cfg.scen, prior, keys,
2874                                            inverts, acts, e->acts_cnt,
2875                                            &entry_idx);
2876                 if (status)
2877                         goto out;
2878
2879                 e->scen_entry_idx = entry_idx;
2880                 LIST_ADD(&e->l_entry, &prof->entries);
2881         } else {
2882                 if (do_chg_action) {
2883                         /* For the action memory info, update the SW's copy of
2884                          * exist entry with e's action memory info
2885                          */
2886                         ice_free(hw, exist->acts);
2887                         exist->acts_cnt = e->acts_cnt;
2888                         exist->acts = (struct ice_flow_action *)
2889                                 ice_calloc(hw, exist->acts_cnt,
2890                                            sizeof(struct ice_flow_action));
2891
2892                         if (!exist->acts) {
2893                                 status = ICE_ERR_NO_MEMORY;
2894                                 goto out;
2895                         }
2896
2897                         ice_memcpy(exist->acts, e->acts,
2898                                    sizeof(struct ice_flow_action) * e->acts_cnt,
2899                                    ICE_NONDMA_TO_NONDMA);
2900
2901                         status = ice_acl_prog_act(hw, prof->cfg.scen, acts,
2902                                                   e->acts_cnt,
2903                                                   exist->scen_entry_idx);
2904                         if (status)
2905                                 goto out;
2906                 }
2907
2908                 if (do_chg_rng_chk) {
2909                         /* In this case, we want to update the range checker
2910                          * information of the exist entry
2911                          */
2912                         status = ice_flow_acl_union_rng_chk(exist->range_buf,
2913                                                             e->range_buf);
2914                         if (status)
2915                                 goto out;
2916                 }
2917
2918                 /* As we don't add the new entry to our SW DB, deallocate its
2919                  * memories, and return the exist entry to the caller
2920                  */
2921                 ice_dealloc_flow_entry(hw, e);
2922                 *(entry) = exist;
2923         }
2924 out:
2925         if (acts)
2926                 ice_free(hw, acts);
2927
2928         return status;
2929 }
2930
2931 /**
2932  * ice_flow_acl_add_scen_entry - Add entry to ACL scenario
2933  * @hw: pointer to the hardware structure
2934  * @prof: pointer to flow profile
2935  * @e: double pointer to the flow entry
2936  */
2937 static enum ice_status
2938 ice_flow_acl_add_scen_entry(struct ice_hw *hw, struct ice_flow_prof *prof,
2939                             struct ice_flow_entry **e)
2940 {
2941         enum ice_status status;
2942
2943         ice_acquire_lock(&prof->entries_lock);
2944         status = ice_flow_acl_add_scen_entry_sync(hw, prof, e);
2945         ice_release_lock(&prof->entries_lock);
2946
2947         return status;
2948 }
2949
2950 /**
2951  * ice_flow_add_entry - Add a flow entry
2952  * @hw: pointer to the HW struct
2953  * @blk: classification stage
2954  * @prof_id: ID of the profile to add a new flow entry to
2955  * @entry_id: unique ID to identify this flow entry
2956  * @vsi_handle: software VSI handle for the flow entry
2957  * @prio: priority of the flow entry
2958  * @data: pointer to a data buffer containing flow entry's match values/masks
2959  * @acts: arrays of actions to be performed on a match
2960  * @acts_cnt: number of actions
2961  * @entry_h: pointer to buffer that receives the new flow entry's handle
2962  */
2963 enum ice_status
2964 ice_flow_add_entry(struct ice_hw *hw, enum ice_block blk, u64 prof_id,
2965                    u64 entry_id, u16 vsi_handle, enum ice_flow_priority prio,
2966                    void *data, struct ice_flow_action *acts, u8 acts_cnt,
2967                    u64 *entry_h)
2968 {
2969         struct ice_flow_entry *e = NULL;
2970         struct ice_flow_prof *prof;
2971         enum ice_status status = ICE_SUCCESS;
2972
2973         /* ACL entries must indicate an action */
2974         if (blk == ICE_BLK_ACL && (!acts || !acts_cnt))
2975                 return ICE_ERR_PARAM;
2976
2977         /* No flow entry data is expected for RSS */
2978         if (!entry_h || (!data && blk != ICE_BLK_RSS))
2979                 return ICE_ERR_BAD_PTR;
2980
2981         if (!ice_is_vsi_valid(hw, vsi_handle))
2982                 return ICE_ERR_PARAM;
2983
2984         ice_acquire_lock(&hw->fl_profs_locks[blk]);
2985
2986         prof = ice_flow_find_prof_id(hw, blk, prof_id);
2987         if (!prof) {
2988                 status = ICE_ERR_DOES_NOT_EXIST;
2989         } else {
2990                 /* Allocate memory for the entry being added and associate
2991                  * the VSI to the found flow profile
2992                  */
2993                 e = (struct ice_flow_entry *)ice_malloc(hw, sizeof(*e));
2994                 if (!e)
2995                         status = ICE_ERR_NO_MEMORY;
2996                 else
2997                         status = ice_flow_assoc_prof(hw, blk, prof, vsi_handle);
2998         }
2999
3000         ice_release_lock(&hw->fl_profs_locks[blk]);
3001         if (status)
3002                 goto out;
3003
3004         e->id = entry_id;
3005         e->vsi_handle = vsi_handle;
3006         e->prof = prof;
3007         e->priority = prio;
3008
3009         switch (blk) {
3010         case ICE_BLK_FD:
3011         case ICE_BLK_RSS:
3012                 break;
3013         case ICE_BLK_ACL:
3014                 /* ACL will handle the entry management */
3015                 status = ice_flow_acl_frmt_entry(hw, prof, e, (u8 *)data, acts,
3016                                                  acts_cnt);
3017                 if (status)
3018                         goto out;
3019
3020                 status = ice_flow_acl_add_scen_entry(hw, prof, &e);
3021                 if (status)
3022                         goto out;
3023
3024                 break;
3025         default:
3026                 status = ICE_ERR_NOT_IMPL;
3027                 goto out;
3028         }
3029
3030         if (blk != ICE_BLK_ACL) {
3031                 /* ACL will handle the entry management */
3032                 ice_acquire_lock(&prof->entries_lock);
3033                 LIST_ADD(&e->l_entry, &prof->entries);
3034                 ice_release_lock(&prof->entries_lock);
3035         }
3036
3037         *entry_h = ICE_FLOW_ENTRY_HNDL(e);
3038
3039 out:
3040         if (status && e) {
3041                 if (e->entry)
3042                         ice_free(hw, e->entry);
3043                 ice_free(hw, e);
3044         }
3045
3046         return status;
3047 }
3048
3049 /**
3050  * ice_flow_rem_entry - Remove a flow entry
3051  * @hw: pointer to the HW struct
3052  * @blk: classification stage
3053  * @entry_h: handle to the flow entry to be removed
3054  */
3055 enum ice_status ice_flow_rem_entry(struct ice_hw *hw, enum ice_block blk,
3056                                    u64 entry_h)
3057 {
3058         struct ice_flow_entry *entry;
3059         struct ice_flow_prof *prof;
3060         enum ice_status status = ICE_SUCCESS;
3061
3062         if (entry_h == ICE_FLOW_ENTRY_HANDLE_INVAL)
3063                 return ICE_ERR_PARAM;
3064
3065         entry = ICE_FLOW_ENTRY_PTR((unsigned long)entry_h);
3066
3067         /* Retain the pointer to the flow profile as the entry will be freed */
3068         prof = entry->prof;
3069
3070         if (prof) {
3071                 ice_acquire_lock(&prof->entries_lock);
3072                 status = ice_flow_rem_entry_sync(hw, blk, entry);
3073                 ice_release_lock(&prof->entries_lock);
3074         }
3075
3076         return status;
3077 }
3078
3079 /**
3080  * ice_flow_set_fld_ext - specifies locations of field from entry's input buffer
3081  * @seg: packet segment the field being set belongs to
3082  * @fld: field to be set
3083  * @field_type: type of the field
3084  * @val_loc: if not ICE_FLOW_FLD_OFF_INVAL, location of the value to match from
3085  *           entry's input buffer
3086  * @mask_loc: if not ICE_FLOW_FLD_OFF_INVAL, location of mask value from entry's
3087  *            input buffer
3088  * @last_loc: if not ICE_FLOW_FLD_OFF_INVAL, location of last/upper value from
3089  *            entry's input buffer
3090  *
3091  * This helper function stores information of a field being matched, including
3092  * the type of the field and the locations of the value to match, the mask, and
3093  * and the upper-bound value in the start of the input buffer for a flow entry.
3094  * This function should only be used for fixed-size data structures.
3095  *
3096  * This function also opportunistically determines the protocol headers to be
3097  * present based on the fields being set. Some fields cannot be used alone to
3098  * determine the protocol headers present. Sometimes, fields for particular
3099  * protocol headers are not matched. In those cases, the protocol headers
3100  * must be explicitly set.
3101  */
3102 static void
3103 ice_flow_set_fld_ext(struct ice_flow_seg_info *seg, enum ice_flow_field fld,
3104                      enum ice_flow_fld_match_type field_type, u16 val_loc,
3105                      u16 mask_loc, u16 last_loc)
3106 {
3107         u64 bit = BIT_ULL(fld);
3108
3109         seg->match |= bit;
3110         if (field_type == ICE_FLOW_FLD_TYPE_RANGE)
3111                 seg->range |= bit;
3112
3113         seg->fields[fld].type = field_type;
3114         seg->fields[fld].src.val = val_loc;
3115         seg->fields[fld].src.mask = mask_loc;
3116         seg->fields[fld].src.last = last_loc;
3117
3118         ICE_FLOW_SET_HDRS(seg, ice_flds_info[fld].hdr);
3119 }
3120
3121 /**
3122  * ice_flow_set_fld - specifies locations of field from entry's input buffer
3123  * @seg: packet segment the field being set belongs to
3124  * @fld: field to be set
3125  * @val_loc: if not ICE_FLOW_FLD_OFF_INVAL, location of the value to match from
3126  *           entry's input buffer
3127  * @mask_loc: if not ICE_FLOW_FLD_OFF_INVAL, location of mask value from entry's
3128  *            input buffer
3129  * @last_loc: if not ICE_FLOW_FLD_OFF_INVAL, location of last/upper value from
3130  *            entry's input buffer
3131  * @range: indicate if field being matched is to be in a range
3132  *
3133  * This function specifies the locations, in the form of byte offsets from the
3134  * start of the input buffer for a flow entry, from where the value to match,
3135  * the mask value, and upper value can be extracted. These locations are then
3136  * stored in the flow profile. When adding a flow entry associated with the
3137  * flow profile, these locations will be used to quickly extract the values and
3138  * create the content of a match entry. This function should only be used for
3139  * fixed-size data structures.
3140  */
3141 void
3142 ice_flow_set_fld(struct ice_flow_seg_info *seg, enum ice_flow_field fld,
3143                  u16 val_loc, u16 mask_loc, u16 last_loc, bool range)
3144 {
3145         enum ice_flow_fld_match_type t = range ?
3146                 ICE_FLOW_FLD_TYPE_RANGE : ICE_FLOW_FLD_TYPE_REG;
3147
3148         ice_flow_set_fld_ext(seg, fld, t, val_loc, mask_loc, last_loc);
3149 }
3150
3151 /**
3152  * ice_flow_set_fld_prefix - sets locations of prefix field from entry's buf
3153  * @seg: packet segment the field being set belongs to
3154  * @fld: field to be set
3155  * @val_loc: if not ICE_FLOW_FLD_OFF_INVAL, location of the value to match from
3156  *           entry's input buffer
3157  * @pref_loc: location of prefix value from entry's input buffer
3158  * @pref_sz: size of the location holding the prefix value
3159  *
3160  * This function specifies the locations, in the form of byte offsets from the
3161  * start of the input buffer for a flow entry, from where the value to match
3162  * and the IPv4 prefix value can be extracted. These locations are then stored
3163  * in the flow profile. When adding flow entries to the associated flow profile,
3164  * these locations can be used to quickly extract the values to create the
3165  * content of a match entry. This function should only be used for fixed-size
3166  * data structures.
3167  */
3168 void
3169 ice_flow_set_fld_prefix(struct ice_flow_seg_info *seg, enum ice_flow_field fld,
3170                         u16 val_loc, u16 pref_loc, u8 pref_sz)
3171 {
3172         /* For this type of field, the "mask" location is for the prefix value's
3173          * location and the "last" location is for the size of the location of
3174          * the prefix value.
3175          */
3176         ice_flow_set_fld_ext(seg, fld, ICE_FLOW_FLD_TYPE_PREFIX, val_loc,
3177                              pref_loc, (u16)pref_sz);
3178 }
3179
3180 /**
3181  * ice_flow_add_fld_raw - sets locations of a raw field from entry's input buf
3182  * @seg: packet segment the field being set belongs to
3183  * @off: offset of the raw field from the beginning of the segment in bytes
3184  * @len: length of the raw pattern to be matched
3185  * @val_loc: location of the value to match from entry's input buffer
3186  * @mask_loc: location of mask value from entry's input buffer
3187  *
3188  * This function specifies the offset of the raw field to be match from the
3189  * beginning of the specified packet segment, and the locations, in the form of
3190  * byte offsets from the start of the input buffer for a flow entry, from where
3191  * the value to match and the mask value to be extracted. These locations are
3192  * then stored in the flow profile. When adding flow entries to the associated
3193  * flow profile, these locations can be used to quickly extract the values to
3194  * create the content of a match entry. This function should only be used for
3195  * fixed-size data structures.
3196  */
3197 void
3198 ice_flow_add_fld_raw(struct ice_flow_seg_info *seg, u16 off, u8 len,
3199                      u16 val_loc, u16 mask_loc)
3200 {
3201         if (seg->raws_cnt < ICE_FLOW_SEG_RAW_FLD_MAX) {
3202                 seg->raws[seg->raws_cnt].off = off;
3203                 seg->raws[seg->raws_cnt].info.type = ICE_FLOW_FLD_TYPE_SIZE;
3204                 seg->raws[seg->raws_cnt].info.src.val = val_loc;
3205                 seg->raws[seg->raws_cnt].info.src.mask = mask_loc;
3206                 /* The "last" field is used to store the length of the field */
3207                 seg->raws[seg->raws_cnt].info.src.last = len;
3208         }
3209
3210         /* Overflows of "raws" will be handled as an error condition later in
3211          * the flow when this information is processed.
3212          */
3213         seg->raws_cnt++;
3214 }
3215
3216 #define ICE_FLOW_RSS_SEG_HDR_L2_MASKS \
3217 (ICE_FLOW_SEG_HDR_ETH | ICE_FLOW_SEG_HDR_VLAN)
3218
3219 #define ICE_FLOW_RSS_SEG_HDR_L3_MASKS \
3220         (ICE_FLOW_SEG_HDR_IPV4 | ICE_FLOW_SEG_HDR_IPV6)
3221
3222 #define ICE_FLOW_RSS_SEG_HDR_L4_MASKS \
3223         (ICE_FLOW_SEG_HDR_TCP | ICE_FLOW_SEG_HDR_UDP | \
3224          ICE_FLOW_SEG_HDR_SCTP)
3225
3226 #define ICE_FLOW_RSS_SEG_HDR_VAL_MASKS \
3227         (ICE_FLOW_RSS_SEG_HDR_L2_MASKS | \
3228          ICE_FLOW_RSS_SEG_HDR_L3_MASKS | \
3229          ICE_FLOW_RSS_SEG_HDR_L4_MASKS)
3230
3231 /**
3232  * ice_flow_set_rss_seg_info - setup packet segments for RSS
3233  * @segs: pointer to the flow field segment(s)
3234  * @hash_fields: fields to be hashed on for the segment(s)
3235  * @flow_hdr: protocol header fields within a packet segment
3236  *
3237  * Helper function to extract fields from hash bitmap and use flow
3238  * header value to set flow field segment for further use in flow
3239  * profile entry or removal.
3240  */
3241 static enum ice_status
3242 ice_flow_set_rss_seg_info(struct ice_flow_seg_info *segs, u64 hash_fields,
3243                           u32 flow_hdr)
3244 {
3245         u64 val;
3246         u8 i;
3247
3248         ice_for_each_set_bit(i, (ice_bitmap_t *)&hash_fields,
3249                              ICE_FLOW_FIELD_IDX_MAX)
3250                 ice_flow_set_fld(segs, (enum ice_flow_field)i,
3251                                  ICE_FLOW_FLD_OFF_INVAL, ICE_FLOW_FLD_OFF_INVAL,
3252                                  ICE_FLOW_FLD_OFF_INVAL, false);
3253
3254         ICE_FLOW_SET_HDRS(segs, flow_hdr);
3255
3256         if (segs->hdrs & ~ICE_FLOW_RSS_SEG_HDR_VAL_MASKS &
3257             ~ICE_FLOW_RSS_HDRS_INNER_MASK & ~ICE_FLOW_SEG_HDR_IPV_OTHER)
3258                 return ICE_ERR_PARAM;
3259
3260         val = (u64)(segs->hdrs & ICE_FLOW_RSS_SEG_HDR_L3_MASKS);
3261         if (val && !ice_is_pow2(val))
3262                 return ICE_ERR_CFG;
3263
3264         val = (u64)(segs->hdrs & ICE_FLOW_RSS_SEG_HDR_L4_MASKS);
3265         if (val && !ice_is_pow2(val))
3266                 return ICE_ERR_CFG;
3267
3268         return ICE_SUCCESS;
3269 }
3270
3271 /**
3272  * ice_rem_vsi_rss_list - remove VSI from RSS list
3273  * @hw: pointer to the hardware structure
3274  * @vsi_handle: software VSI handle
3275  *
3276  * Remove the VSI from all RSS configurations in the list.
3277  */
3278 void ice_rem_vsi_rss_list(struct ice_hw *hw, u16 vsi_handle)
3279 {
3280         struct ice_rss_cfg *r, *tmp;
3281
3282         if (LIST_EMPTY(&hw->rss_list_head))
3283                 return;
3284
3285         ice_acquire_lock(&hw->rss_locks);
3286         LIST_FOR_EACH_ENTRY_SAFE(r, tmp, &hw->rss_list_head,
3287                                  ice_rss_cfg, l_entry)
3288                 if (ice_test_and_clear_bit(vsi_handle, r->vsis))
3289                         if (!ice_is_any_bit_set(r->vsis, ICE_MAX_VSI)) {
3290                                 LIST_DEL(&r->l_entry);
3291                                 ice_free(hw, r);
3292                         }
3293         ice_release_lock(&hw->rss_locks);
3294 }
3295
3296 /**
3297  * ice_rem_vsi_rss_cfg - remove RSS configurations associated with VSI
3298  * @hw: pointer to the hardware structure
3299  * @vsi_handle: software VSI handle
3300  *
3301  * This function will iterate through all flow profiles and disassociate
3302  * the VSI from that profile. If the flow profile has no VSIs it will
3303  * be removed.
3304  */
3305 enum ice_status ice_rem_vsi_rss_cfg(struct ice_hw *hw, u16 vsi_handle)
3306 {
3307         const enum ice_block blk = ICE_BLK_RSS;
3308         struct ice_flow_prof *p, *t;
3309         enum ice_status status = ICE_SUCCESS;
3310
3311         if (!ice_is_vsi_valid(hw, vsi_handle))
3312                 return ICE_ERR_PARAM;
3313
3314         if (LIST_EMPTY(&hw->fl_profs[blk]))
3315                 return ICE_SUCCESS;
3316
3317         ice_acquire_lock(&hw->rss_locks);
3318         LIST_FOR_EACH_ENTRY_SAFE(p, t, &hw->fl_profs[blk], ice_flow_prof,
3319                                  l_entry)
3320                 if (ice_is_bit_set(p->vsis, vsi_handle)) {
3321                         status = ice_flow_disassoc_prof(hw, blk, p, vsi_handle);
3322                         if (status)
3323                                 break;
3324
3325                         if (!ice_is_any_bit_set(p->vsis, ICE_MAX_VSI)) {
3326                                 status = ice_flow_rem_prof(hw, blk, p->id);
3327                                 if (status)
3328                                         break;
3329                         }
3330                 }
3331         ice_release_lock(&hw->rss_locks);
3332
3333         return status;
3334 }
3335
3336 /**
3337  * ice_rem_rss_list - remove RSS configuration from list
3338  * @hw: pointer to the hardware structure
3339  * @vsi_handle: software VSI handle
3340  * @prof: pointer to flow profile
3341  *
3342  * Assumption: lock has already been acquired for RSS list
3343  */
3344 static void
3345 ice_rem_rss_list(struct ice_hw *hw, u16 vsi_handle, struct ice_flow_prof *prof)
3346 {
3347         struct ice_rss_cfg *r, *tmp;
3348
3349         /* Search for RSS hash fields associated to the VSI that match the
3350          * hash configurations associated to the flow profile. If found
3351          * remove from the RSS entry list of the VSI context and delete entry.
3352          */
3353         LIST_FOR_EACH_ENTRY_SAFE(r, tmp, &hw->rss_list_head,
3354                                  ice_rss_cfg, l_entry)
3355                 if (r->hashed_flds == prof->segs[prof->segs_cnt - 1].match &&
3356                     r->packet_hdr == prof->segs[prof->segs_cnt - 1].hdrs) {
3357                         ice_clear_bit(vsi_handle, r->vsis);
3358                         if (!ice_is_any_bit_set(r->vsis, ICE_MAX_VSI)) {
3359                                 LIST_DEL(&r->l_entry);
3360                                 ice_free(hw, r);
3361                         }
3362                         return;
3363                 }
3364 }
3365
3366 /**
3367  * ice_add_rss_list - add RSS configuration to list
3368  * @hw: pointer to the hardware structure
3369  * @vsi_handle: software VSI handle
3370  * @prof: pointer to flow profile
3371  *
3372  * Assumption: lock has already been acquired for RSS list
3373  */
3374 static enum ice_status
3375 ice_add_rss_list(struct ice_hw *hw, u16 vsi_handle, struct ice_flow_prof *prof)
3376 {
3377         struct ice_rss_cfg *r, *rss_cfg;
3378
3379         LIST_FOR_EACH_ENTRY(r, &hw->rss_list_head,
3380                             ice_rss_cfg, l_entry)
3381                 if (r->hashed_flds == prof->segs[prof->segs_cnt - 1].match &&
3382                     r->packet_hdr == prof->segs[prof->segs_cnt - 1].hdrs) {
3383                         ice_set_bit(vsi_handle, r->vsis);
3384                         return ICE_SUCCESS;
3385                 }
3386
3387         rss_cfg = (struct ice_rss_cfg *)ice_malloc(hw, sizeof(*rss_cfg));
3388         if (!rss_cfg)
3389                 return ICE_ERR_NO_MEMORY;
3390
3391         rss_cfg->hashed_flds = prof->segs[prof->segs_cnt - 1].match;
3392         rss_cfg->packet_hdr = prof->segs[prof->segs_cnt - 1].hdrs;
3393         rss_cfg->symm = prof->cfg.symm;
3394         ice_set_bit(vsi_handle, rss_cfg->vsis);
3395
3396         LIST_ADD_TAIL(&rss_cfg->l_entry, &hw->rss_list_head);
3397
3398         return ICE_SUCCESS;
3399 }
3400
3401 #define ICE_FLOW_PROF_HASH_S    0
3402 #define ICE_FLOW_PROF_HASH_M    (0xFFFFFFFFULL << ICE_FLOW_PROF_HASH_S)
3403 #define ICE_FLOW_PROF_HDR_S     32
3404 #define ICE_FLOW_PROF_HDR_M     (0x3FFFFFFFULL << ICE_FLOW_PROF_HDR_S)
3405 #define ICE_FLOW_PROF_ENCAP_S   63
3406 #define ICE_FLOW_PROF_ENCAP_M   (BIT_ULL(ICE_FLOW_PROF_ENCAP_S))
3407
3408 #define ICE_RSS_OUTER_HEADERS   1
3409 #define ICE_RSS_INNER_HEADERS   2
3410
3411 /* Flow profile ID format:
3412  * [0:31] - Packet match fields
3413  * [32:62] - Protocol header
3414  * [63] - Encapsulation flag, 0 if non-tunneled, 1 if tunneled
3415  */
3416 #define ICE_FLOW_GEN_PROFID(hash, hdr, segs_cnt) \
3417         (u64)(((u64)(hash) & ICE_FLOW_PROF_HASH_M) | \
3418               (((u64)(hdr) << ICE_FLOW_PROF_HDR_S) & ICE_FLOW_PROF_HDR_M) | \
3419               ((u8)((segs_cnt) - 1) ? ICE_FLOW_PROF_ENCAP_M : 0))
3420
3421 static void
3422 ice_rss_config_xor_word(struct ice_hw *hw, u8 prof_id, u8 src, u8 dst)
3423 {
3424         u32 s = ((src % 4) << 3); /* byte shift */
3425         u32 v = dst | 0x80; /* value to program */
3426         u8 i = src / 4; /* register index */
3427         u32 reg;
3428
3429         reg = rd32(hw, GLQF_HSYMM(prof_id, i));
3430         reg = (reg & ~(0xff << s)) | (v << s);
3431         wr32(hw, GLQF_HSYMM(prof_id, i), reg);
3432 }
3433
3434 static void
3435 ice_rss_config_xor(struct ice_hw *hw, u8 prof_id, u8 src, u8 dst, u8 len)
3436 {
3437         int fv_last_word =
3438                 ICE_FLOW_SW_FIELD_VECTOR_MAX / ICE_FLOW_FV_EXTRACT_SZ - 1;
3439         int i;
3440
3441         for (i = 0; i < len; i++) {
3442                 ice_rss_config_xor_word(hw, prof_id,
3443                                         /* Yes, field vector in GLQF_HSYMM and
3444                                          * GLQF_HINSET is inversed!
3445                                          */
3446                                         fv_last_word - (src + i),
3447                                         fv_last_word - (dst + i));
3448                 ice_rss_config_xor_word(hw, prof_id,
3449                                         fv_last_word - (dst + i),
3450                                         fv_last_word - (src + i));
3451         }
3452 }
3453
3454 static void
3455 ice_rss_update_symm(struct ice_hw *hw,
3456                     struct ice_flow_prof *prof)
3457 {
3458         struct ice_prof_map *map;
3459         u8 prof_id, m;
3460
3461         ice_acquire_lock(&hw->blk[ICE_BLK_RSS].es.prof_map_lock);
3462         map = ice_search_prof_id(hw, ICE_BLK_RSS, prof->id);
3463         if (map)
3464                 prof_id = map->prof_id;
3465         ice_release_lock(&hw->blk[ICE_BLK_RSS].es.prof_map_lock);
3466         if (!map)
3467                 return;
3468         /* clear to default */
3469         for (m = 0; m < 6; m++)
3470                 wr32(hw, GLQF_HSYMM(prof_id, m), 0);
3471         if (prof->cfg.symm) {
3472                 struct ice_flow_seg_info *seg =
3473                         &prof->segs[prof->segs_cnt - 1];
3474
3475                 struct ice_flow_seg_xtrct *ipv4_src =
3476                         &seg->fields[ICE_FLOW_FIELD_IDX_IPV4_SA].xtrct;
3477                 struct ice_flow_seg_xtrct *ipv4_dst =
3478                         &seg->fields[ICE_FLOW_FIELD_IDX_IPV4_DA].xtrct;
3479                 struct ice_flow_seg_xtrct *ipv6_src =
3480                         &seg->fields[ICE_FLOW_FIELD_IDX_IPV6_SA].xtrct;
3481                 struct ice_flow_seg_xtrct *ipv6_dst =
3482                         &seg->fields[ICE_FLOW_FIELD_IDX_IPV6_DA].xtrct;
3483
3484                 struct ice_flow_seg_xtrct *tcp_src =
3485                         &seg->fields[ICE_FLOW_FIELD_IDX_TCP_SRC_PORT].xtrct;
3486                 struct ice_flow_seg_xtrct *tcp_dst =
3487                         &seg->fields[ICE_FLOW_FIELD_IDX_TCP_DST_PORT].xtrct;
3488
3489                 struct ice_flow_seg_xtrct *udp_src =
3490                         &seg->fields[ICE_FLOW_FIELD_IDX_UDP_SRC_PORT].xtrct;
3491                 struct ice_flow_seg_xtrct *udp_dst =
3492                         &seg->fields[ICE_FLOW_FIELD_IDX_UDP_DST_PORT].xtrct;
3493
3494                 struct ice_flow_seg_xtrct *sctp_src =
3495                         &seg->fields[ICE_FLOW_FIELD_IDX_SCTP_SRC_PORT].xtrct;
3496                 struct ice_flow_seg_xtrct *sctp_dst =
3497                         &seg->fields[ICE_FLOW_FIELD_IDX_SCTP_DST_PORT].xtrct;
3498
3499                 /* xor IPv4 */
3500                 if (ipv4_src->prot_id != 0 && ipv4_dst->prot_id != 0)
3501                         ice_rss_config_xor(hw, prof_id,
3502                                            ipv4_src->idx, ipv4_dst->idx, 2);
3503
3504                 /* xor IPv6 */
3505                 if (ipv6_src->prot_id != 0 && ipv6_dst->prot_id != 0)
3506                         ice_rss_config_xor(hw, prof_id,
3507                                            ipv6_src->idx, ipv6_dst->idx, 8);
3508
3509                 /* xor TCP */
3510                 if (tcp_src->prot_id != 0 && tcp_dst->prot_id != 0)
3511                         ice_rss_config_xor(hw, prof_id,
3512                                            tcp_src->idx, tcp_dst->idx, 1);
3513
3514                 /* xor UDP */
3515                 if (udp_src->prot_id != 0 && udp_dst->prot_id != 0)
3516                         ice_rss_config_xor(hw, prof_id,
3517                                            udp_src->idx, udp_dst->idx, 1);
3518
3519                 /* xor SCTP */
3520                 if (sctp_src->prot_id != 0 && sctp_dst->prot_id != 0)
3521                         ice_rss_config_xor(hw, prof_id,
3522                                            sctp_src->idx, sctp_dst->idx, 1);
3523         }
3524 }
3525
3526 /**
3527  * ice_add_rss_cfg_sync - add an RSS configuration
3528  * @hw: pointer to the hardware structure
3529  * @vsi_handle: software VSI handle
3530  * @hashed_flds: hash bit fields (ICE_FLOW_HASH_*) to configure
3531  * @addl_hdrs: protocol header fields
3532  * @segs_cnt: packet segment count
3533  * @symm: symmetric hash enable/disable
3534  *
3535  * Assumption: lock has already been acquired for RSS list
3536  */
3537 static enum ice_status
3538 ice_add_rss_cfg_sync(struct ice_hw *hw, u16 vsi_handle, u64 hashed_flds,
3539                      u32 addl_hdrs, u8 segs_cnt, bool symm)
3540 {
3541         const enum ice_block blk = ICE_BLK_RSS;
3542         struct ice_flow_prof *prof = NULL;
3543         struct ice_flow_seg_info *segs;
3544         enum ice_status status;
3545
3546         if (!segs_cnt || segs_cnt > ICE_FLOW_SEG_MAX)
3547                 return ICE_ERR_PARAM;
3548
3549         segs = (struct ice_flow_seg_info *)ice_calloc(hw, segs_cnt,
3550                                                       sizeof(*segs));
3551         if (!segs)
3552                 return ICE_ERR_NO_MEMORY;
3553
3554         /* Construct the packet segment info from the hashed fields */
3555         status = ice_flow_set_rss_seg_info(&segs[segs_cnt - 1], hashed_flds,
3556                                            addl_hdrs);
3557         if (status)
3558                 goto exit;
3559
3560         /* don't do RSS for GTPU outer */
3561         if (segs_cnt == ICE_RSS_OUTER_HEADERS &&
3562             segs[segs_cnt - 1].hdrs & ICE_FLOW_SEG_HDR_GTPU) {
3563                 status = ICE_SUCCESS;
3564                 goto exit;
3565         }
3566
3567         /* Search for a flow profile that has matching headers, hash fields
3568          * and has the input VSI associated to it. If found, no further
3569          * operations required and exit.
3570          */
3571         prof = ice_flow_find_prof_conds(hw, blk, ICE_FLOW_RX, segs, segs_cnt,
3572                                         vsi_handle,
3573                                         ICE_FLOW_FIND_PROF_CHK_FLDS |
3574                                         ICE_FLOW_FIND_PROF_CHK_VSI);
3575         if (prof) {
3576                 if (prof->cfg.symm == symm)
3577                         goto exit;
3578                 prof->cfg.symm = symm;
3579                 goto update_symm;
3580         }
3581
3582         /* Check if a flow profile exists with the same protocol headers and
3583          * associated with the input VSI. If so disassociate the VSI from
3584          * this profile. The VSI will be added to a new profile created with
3585          * the protocol header and new hash field configuration.
3586          */
3587         prof = ice_flow_find_prof_conds(hw, blk, ICE_FLOW_RX, segs, segs_cnt,
3588                                         vsi_handle, ICE_FLOW_FIND_PROF_CHK_VSI);
3589         if (prof) {
3590                 status = ice_flow_disassoc_prof(hw, blk, prof, vsi_handle);
3591                 if (!status)
3592                         ice_rem_rss_list(hw, vsi_handle, prof);
3593                 else
3594                         goto exit;
3595
3596                 /* Remove profile if it has no VSIs associated */
3597                 if (!ice_is_any_bit_set(prof->vsis, ICE_MAX_VSI)) {
3598                         status = ice_flow_rem_prof(hw, blk, prof->id);
3599                         if (status)
3600                                 goto exit;
3601                 }
3602         }
3603
3604         /* Search for a profile that has same match fields only. If this
3605          * exists then associate the VSI to this profile.
3606          */
3607         prof = ice_flow_find_prof_conds(hw, blk, ICE_FLOW_RX, segs, segs_cnt,
3608                                         vsi_handle,
3609                                         ICE_FLOW_FIND_PROF_CHK_FLDS);
3610         if (prof) {
3611                 if (prof->cfg.symm == symm) {
3612                         status = ice_flow_assoc_prof(hw, blk, prof,
3613                                                      vsi_handle);
3614                         if (!status)
3615                                 status = ice_add_rss_list(hw, vsi_handle,
3616                                                           prof);
3617                 } else {
3618                         /* if a profile exist but with different symmetric
3619                          * requirement, just return error.
3620                          */
3621                         status = ICE_ERR_NOT_SUPPORTED;
3622                 }
3623                 goto exit;
3624         }
3625
3626         /* Create a new flow profile with generated profile and packet
3627          * segment information.
3628          */
3629         status = ice_flow_add_prof(hw, blk, ICE_FLOW_RX,
3630                                    ICE_FLOW_GEN_PROFID(hashed_flds,
3631                                                        segs[segs_cnt - 1].hdrs,
3632                                                        segs_cnt),
3633                                    segs, segs_cnt, NULL, 0, &prof);
3634         if (status)
3635                 goto exit;
3636
3637         status = ice_flow_assoc_prof(hw, blk, prof, vsi_handle);
3638         /* If association to a new flow profile failed then this profile can
3639          * be removed.
3640          */
3641         if (status) {
3642                 ice_flow_rem_prof(hw, blk, prof->id);
3643                 goto exit;
3644         }
3645
3646         status = ice_add_rss_list(hw, vsi_handle, prof);
3647
3648         prof->cfg.symm = symm;
3649
3650 update_symm:
3651         ice_rss_update_symm(hw, prof);
3652
3653 exit:
3654         ice_free(hw, segs);
3655         return status;
3656 }
3657
3658 /**
3659  * ice_add_rss_cfg - add an RSS configuration with specified hashed fields
3660  * @hw: pointer to the hardware structure
3661  * @vsi_handle: software VSI handle
3662  * @hashed_flds: hash bit fields (ICE_FLOW_HASH_*) to configure
3663  * @addl_hdrs: protocol header fields
3664  * @symm: symmetric hash enable/disable
3665  *
3666  * This function will generate a flow profile based on fields associated with
3667  * the input fields to hash on, the flow type and use the VSI number to add
3668  * a flow entry to the profile.
3669  */
3670 enum ice_status
3671 ice_add_rss_cfg(struct ice_hw *hw, u16 vsi_handle, u64 hashed_flds,
3672                 u32 addl_hdrs, bool symm)
3673 {
3674         enum ice_status status;
3675
3676         if (hashed_flds == ICE_HASH_INVALID ||
3677             !ice_is_vsi_valid(hw, vsi_handle))
3678                 return ICE_ERR_PARAM;
3679
3680         ice_acquire_lock(&hw->rss_locks);
3681         status = ice_add_rss_cfg_sync(hw, vsi_handle, hashed_flds, addl_hdrs,
3682                                       ICE_RSS_OUTER_HEADERS, symm);
3683
3684         if (!status)
3685                 status = ice_add_rss_cfg_sync(hw, vsi_handle, hashed_flds,
3686                                               addl_hdrs, ICE_RSS_INNER_HEADERS,
3687                                               symm);
3688         ice_release_lock(&hw->rss_locks);
3689
3690         return status;
3691 }
3692
3693 /**
3694  * ice_rem_rss_cfg_sync - remove an existing RSS configuration
3695  * @hw: pointer to the hardware structure
3696  * @vsi_handle: software VSI handle
3697  * @hashed_flds: Packet hash types (ICE_FLOW_HASH_*) to remove
3698  * @addl_hdrs: Protocol header fields within a packet segment
3699  * @segs_cnt: packet segment count
3700  *
3701  * Assumption: lock has already been acquired for RSS list
3702  */
3703 static enum ice_status
3704 ice_rem_rss_cfg_sync(struct ice_hw *hw, u16 vsi_handle, u64 hashed_flds,
3705                      u32 addl_hdrs, u8 segs_cnt)
3706 {
3707         const enum ice_block blk = ICE_BLK_RSS;
3708         struct ice_flow_seg_info *segs;
3709         struct ice_flow_prof *prof;
3710         enum ice_status status;
3711
3712         segs = (struct ice_flow_seg_info *)ice_calloc(hw, segs_cnt,
3713                                                       sizeof(*segs));
3714         if (!segs)
3715                 return ICE_ERR_NO_MEMORY;
3716
3717         /* Construct the packet segment info from the hashed fields */
3718         status = ice_flow_set_rss_seg_info(&segs[segs_cnt - 1], hashed_flds,
3719                                            addl_hdrs);
3720         if (status)
3721                 goto out;
3722
3723         if (segs_cnt == ICE_RSS_OUTER_HEADERS &&
3724             segs[segs_cnt - 1].hdrs & ICE_FLOW_SEG_HDR_GTPU) {
3725                 status = ICE_SUCCESS;
3726                 goto out;
3727         }
3728
3729         prof = ice_flow_find_prof_conds(hw, blk, ICE_FLOW_RX, segs, segs_cnt,
3730                                         vsi_handle,
3731                                         ICE_FLOW_FIND_PROF_CHK_FLDS);
3732         if (!prof) {
3733                 status = ICE_ERR_DOES_NOT_EXIST;
3734                 goto out;
3735         }
3736
3737         status = ice_flow_disassoc_prof(hw, blk, prof, vsi_handle);
3738         if (status)
3739                 goto out;
3740
3741         /* Remove RSS configuration from VSI context before deleting
3742          * the flow profile.
3743          */
3744         ice_rem_rss_list(hw, vsi_handle, prof);
3745
3746         if (!ice_is_any_bit_set(prof->vsis, ICE_MAX_VSI))
3747                 status = ice_flow_rem_prof(hw, blk, prof->id);
3748
3749 out:
3750         ice_free(hw, segs);
3751         return status;
3752 }
3753
3754 /**
3755  * ice_rem_rss_cfg - remove an existing RSS config with matching hashed fields
3756  * @hw: pointer to the hardware structure
3757  * @vsi_handle: software VSI handle
3758  * @hashed_flds: Packet hash types (ICE_FLOW_HASH_*) to remove
3759  * @addl_hdrs: Protocol header fields within a packet segment
3760  *
3761  * This function will lookup the flow profile based on the input
3762  * hash field bitmap, iterate through the profile entry list of
3763  * that profile and find entry associated with input VSI to be
3764  * removed. Calls are made to underlying flow apis which will in
3765  * turn build or update buffers for RSS XLT1 section.
3766  */
3767 enum ice_status
3768 ice_rem_rss_cfg(struct ice_hw *hw, u16 vsi_handle, u64 hashed_flds,
3769                 u32 addl_hdrs)
3770 {
3771         enum ice_status status;
3772
3773         if (hashed_flds == ICE_HASH_INVALID ||
3774             !ice_is_vsi_valid(hw, vsi_handle))
3775                 return ICE_ERR_PARAM;
3776
3777         ice_acquire_lock(&hw->rss_locks);
3778         status = ice_rem_rss_cfg_sync(hw, vsi_handle, hashed_flds, addl_hdrs,
3779                                       ICE_RSS_OUTER_HEADERS);
3780         if (!status)
3781                 status = ice_rem_rss_cfg_sync(hw, vsi_handle, hashed_flds,
3782                                               addl_hdrs, ICE_RSS_INNER_HEADERS);
3783         ice_release_lock(&hw->rss_locks);
3784
3785         return status;
3786 }
3787
3788 /**
3789  * ice_replay_rss_cfg - replay RSS configurations associated with VSI
3790  * @hw: pointer to the hardware structure
3791  * @vsi_handle: software VSI handle
3792  */
3793 enum ice_status ice_replay_rss_cfg(struct ice_hw *hw, u16 vsi_handle)
3794 {
3795         enum ice_status status = ICE_SUCCESS;
3796         struct ice_rss_cfg *r;
3797
3798         if (!ice_is_vsi_valid(hw, vsi_handle))
3799                 return ICE_ERR_PARAM;
3800
3801         ice_acquire_lock(&hw->rss_locks);
3802         LIST_FOR_EACH_ENTRY(r, &hw->rss_list_head,
3803                             ice_rss_cfg, l_entry) {
3804                 if (ice_is_bit_set(r->vsis, vsi_handle)) {
3805                         status = ice_add_rss_cfg_sync(hw, vsi_handle,
3806                                                       r->hashed_flds,
3807                                                       r->packet_hdr,
3808                                                       ICE_RSS_OUTER_HEADERS,
3809                                                       r->symm);
3810                         if (status)
3811                                 break;
3812                         status = ice_add_rss_cfg_sync(hw, vsi_handle,
3813                                                       r->hashed_flds,
3814                                                       r->packet_hdr,
3815                                                       ICE_RSS_INNER_HEADERS,
3816                                                       r->symm);
3817                         if (status)
3818                                 break;
3819                 }
3820         }
3821         ice_release_lock(&hw->rss_locks);
3822
3823         return status;
3824 }
3825
3826 /**
3827  * ice_get_rss_cfg - returns hashed fields for the given header types
3828  * @hw: pointer to the hardware structure
3829  * @vsi_handle: software VSI handle
3830  * @hdrs: protocol header type
3831  *
3832  * This function will return the match fields of the first instance of flow
3833  * profile having the given header types and containing input VSI
3834  */
3835 u64 ice_get_rss_cfg(struct ice_hw *hw, u16 vsi_handle, u32 hdrs)
3836 {
3837         u64 rss_hash = ICE_HASH_INVALID;
3838         struct ice_rss_cfg *r;
3839
3840         /* verify if the protocol header is non zero and VSI is valid */
3841         if (hdrs == ICE_FLOW_SEG_HDR_NONE || !ice_is_vsi_valid(hw, vsi_handle))
3842                 return ICE_HASH_INVALID;
3843
3844         ice_acquire_lock(&hw->rss_locks);
3845         LIST_FOR_EACH_ENTRY(r, &hw->rss_list_head,
3846                             ice_rss_cfg, l_entry)
3847                 if (ice_is_bit_set(r->vsis, vsi_handle) &&
3848                     r->packet_hdr == hdrs) {
3849                         rss_hash = r->hashed_flds;
3850                         break;
3851                 }
3852         ice_release_lock(&hw->rss_locks);
3853
3854         return rss_hash;
3855 }