0169da9958180a1f2233aeafc6b6e99134fdeda5
[dpdk.git] / drivers / net / ice / base / ice_flow.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2001-2020 Intel Corporation
3  */
4
5 #include "ice_common.h"
6 #include "ice_flow.h"
7
8 /* Size of known protocol header fields */
9 #define ICE_FLOW_FLD_SZ_ETH_TYPE        2
10 #define ICE_FLOW_FLD_SZ_VLAN            2
11 #define ICE_FLOW_FLD_SZ_IPV4_ADDR       4
12 #define ICE_FLOW_FLD_SZ_IPV6_ADDR       16
13 #define ICE_FLOW_FLD_SZ_IPV6_PRE32_ADDR 4
14 #define ICE_FLOW_FLD_SZ_IPV6_PRE48_ADDR 6
15 #define ICE_FLOW_FLD_SZ_IPV6_PRE64_ADDR 8
16 #define ICE_FLOW_FLD_SZ_IP_DSCP         1
17 #define ICE_FLOW_FLD_SZ_IP_TTL          1
18 #define ICE_FLOW_FLD_SZ_IP_PROT         1
19 #define ICE_FLOW_FLD_SZ_PORT            2
20 #define ICE_FLOW_FLD_SZ_TCP_FLAGS       1
21 #define ICE_FLOW_FLD_SZ_ICMP_TYPE       1
22 #define ICE_FLOW_FLD_SZ_ICMP_CODE       1
23 #define ICE_FLOW_FLD_SZ_ARP_OPER        2
24 #define ICE_FLOW_FLD_SZ_GRE_KEYID       4
25 #define ICE_FLOW_FLD_SZ_GTP_TEID        4
26 #define ICE_FLOW_FLD_SZ_GTP_QFI         2
27 #define ICE_FLOW_FLD_SZ_PPPOE_SESS_ID   2
28 #define ICE_FLOW_FLD_SZ_PFCP_SEID 8
29 #define ICE_FLOW_FLD_SZ_L2TPV3_SESS_ID  4
30 #define ICE_FLOW_FLD_SZ_ESP_SPI 4
31 #define ICE_FLOW_FLD_SZ_AH_SPI  4
32 #define ICE_FLOW_FLD_SZ_NAT_T_ESP_SPI   4
33
34 /* Describe properties of a protocol header field */
35 struct ice_flow_field_info {
36         enum ice_flow_seg_hdr hdr;
37         s16 off;        /* Offset from start of a protocol header, in bits */
38         u16 size;       /* Size of fields in bits */
39         u16 mask;       /* 16-bit mask for field */
40 };
41
42 #define ICE_FLOW_FLD_INFO(_hdr, _offset_bytes, _size_bytes) { \
43         .hdr = _hdr, \
44         .off = (_offset_bytes) * BITS_PER_BYTE, \
45         .size = (_size_bytes) * BITS_PER_BYTE, \
46         .mask = 0, \
47 }
48
49 #define ICE_FLOW_FLD_INFO_MSK(_hdr, _offset_bytes, _size_bytes, _mask) { \
50         .hdr = _hdr, \
51         .off = (_offset_bytes) * BITS_PER_BYTE, \
52         .size = (_size_bytes) * BITS_PER_BYTE, \
53         .mask = _mask, \
54 }
55
56 /* Table containing properties of supported protocol header fields */
57 static const
58 struct ice_flow_field_info ice_flds_info[ICE_FLOW_FIELD_IDX_MAX] = {
59         /* Ether */
60         /* ICE_FLOW_FIELD_IDX_ETH_DA */
61         ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_ETH, 0, ETH_ALEN),
62         /* ICE_FLOW_FIELD_IDX_ETH_SA */
63         ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_ETH, ETH_ALEN, ETH_ALEN),
64         /* ICE_FLOW_FIELD_IDX_S_VLAN */
65         ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_VLAN, 12, ICE_FLOW_FLD_SZ_VLAN),
66         /* ICE_FLOW_FIELD_IDX_C_VLAN */
67         ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_VLAN, 14, ICE_FLOW_FLD_SZ_VLAN),
68         /* ICE_FLOW_FIELD_IDX_ETH_TYPE */
69         ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_ETH, 0, ICE_FLOW_FLD_SZ_ETH_TYPE),
70         /* IPv4 / IPv6 */
71         /* ICE_FLOW_FIELD_IDX_IPV4_DSCP */
72         ICE_FLOW_FLD_INFO_MSK(ICE_FLOW_SEG_HDR_IPV4, 0, ICE_FLOW_FLD_SZ_IP_DSCP,
73                               0x00fc),
74         /* ICE_FLOW_FIELD_IDX_IPV6_DSCP */
75         ICE_FLOW_FLD_INFO_MSK(ICE_FLOW_SEG_HDR_IPV6, 0, ICE_FLOW_FLD_SZ_IP_DSCP,
76                               0x0ff0),
77         /* ICE_FLOW_FIELD_IDX_IPV4_TTL */
78         ICE_FLOW_FLD_INFO_MSK(ICE_FLOW_SEG_HDR_NONE, 8,
79                               ICE_FLOW_FLD_SZ_IP_TTL, 0xff00),
80         /* ICE_FLOW_FIELD_IDX_IPV4_PROT */
81         ICE_FLOW_FLD_INFO_MSK(ICE_FLOW_SEG_HDR_NONE, 8,
82                               ICE_FLOW_FLD_SZ_IP_PROT, 0x00ff),
83         /* ICE_FLOW_FIELD_IDX_IPV6_TTL */
84         ICE_FLOW_FLD_INFO_MSK(ICE_FLOW_SEG_HDR_NONE, 6,
85                               ICE_FLOW_FLD_SZ_IP_TTL, 0x00ff),
86         /* ICE_FLOW_FIELD_IDX_IPV6_PROT */
87         ICE_FLOW_FLD_INFO_MSK(ICE_FLOW_SEG_HDR_NONE, 6,
88                               ICE_FLOW_FLD_SZ_IP_PROT, 0xff00),
89         /* ICE_FLOW_FIELD_IDX_IPV4_SA */
90         ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_IPV4, 12, ICE_FLOW_FLD_SZ_IPV4_ADDR),
91         /* ICE_FLOW_FIELD_IDX_IPV4_DA */
92         ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_IPV4, 16, ICE_FLOW_FLD_SZ_IPV4_ADDR),
93         /* ICE_FLOW_FIELD_IDX_IPV6_SA */
94         ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_IPV6, 8, ICE_FLOW_FLD_SZ_IPV6_ADDR),
95         /* ICE_FLOW_FIELD_IDX_IPV6_DA */
96         ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_IPV6, 24, ICE_FLOW_FLD_SZ_IPV6_ADDR),
97         /* ICE_FLOW_FIELD_IDX_IPV6_PRE32_SA */
98         ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_IPV6, 8,
99                           ICE_FLOW_FLD_SZ_IPV6_PRE32_ADDR),
100         /* ICE_FLOW_FIELD_IDX_IPV6_PRE32_DA */
101         ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_IPV6, 24,
102                           ICE_FLOW_FLD_SZ_IPV6_PRE32_ADDR),
103         /* ICE_FLOW_FIELD_IDX_IPV6_PRE48_SA */
104         ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_IPV6, 8,
105                           ICE_FLOW_FLD_SZ_IPV6_PRE48_ADDR),
106         /* ICE_FLOW_FIELD_IDX_IPV6_PRE48_DA */
107         ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_IPV6, 24,
108                           ICE_FLOW_FLD_SZ_IPV6_PRE48_ADDR),
109         /* ICE_FLOW_FIELD_IDX_IPV6_PRE64_SA */
110         ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_IPV6, 8,
111                           ICE_FLOW_FLD_SZ_IPV6_PRE64_ADDR),
112         /* ICE_FLOW_FIELD_IDX_IPV6_PRE64_DA */
113         ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_IPV6, 24,
114                           ICE_FLOW_FLD_SZ_IPV6_PRE64_ADDR),
115         /* Transport */
116         /* ICE_FLOW_FIELD_IDX_TCP_SRC_PORT */
117         ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_TCP, 0, ICE_FLOW_FLD_SZ_PORT),
118         /* ICE_FLOW_FIELD_IDX_TCP_DST_PORT */
119         ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_TCP, 2, ICE_FLOW_FLD_SZ_PORT),
120         /* ICE_FLOW_FIELD_IDX_UDP_SRC_PORT */
121         ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_UDP, 0, ICE_FLOW_FLD_SZ_PORT),
122         /* ICE_FLOW_FIELD_IDX_UDP_DST_PORT */
123         ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_UDP, 2, ICE_FLOW_FLD_SZ_PORT),
124         /* ICE_FLOW_FIELD_IDX_SCTP_SRC_PORT */
125         ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_SCTP, 0, ICE_FLOW_FLD_SZ_PORT),
126         /* ICE_FLOW_FIELD_IDX_SCTP_DST_PORT */
127         ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_SCTP, 2, ICE_FLOW_FLD_SZ_PORT),
128         /* ICE_FLOW_FIELD_IDX_TCP_FLAGS */
129         ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_TCP, 13, ICE_FLOW_FLD_SZ_TCP_FLAGS),
130         /* ARP */
131         /* ICE_FLOW_FIELD_IDX_ARP_SIP */
132         ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_ARP, 14, ICE_FLOW_FLD_SZ_IPV4_ADDR),
133         /* ICE_FLOW_FIELD_IDX_ARP_DIP */
134         ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_ARP, 24, ICE_FLOW_FLD_SZ_IPV4_ADDR),
135         /* ICE_FLOW_FIELD_IDX_ARP_SHA */
136         ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_ARP, 8, ETH_ALEN),
137         /* ICE_FLOW_FIELD_IDX_ARP_DHA */
138         ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_ARP, 18, ETH_ALEN),
139         /* ICE_FLOW_FIELD_IDX_ARP_OP */
140         ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_ARP, 6, ICE_FLOW_FLD_SZ_ARP_OPER),
141         /* ICMP */
142         /* ICE_FLOW_FIELD_IDX_ICMP_TYPE */
143         ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_ICMP, 0, ICE_FLOW_FLD_SZ_ICMP_TYPE),
144         /* ICE_FLOW_FIELD_IDX_ICMP_CODE */
145         ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_ICMP, 1, ICE_FLOW_FLD_SZ_ICMP_CODE),
146         /* GRE */
147         /* ICE_FLOW_FIELD_IDX_GRE_KEYID */
148         ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_GRE, 12, ICE_FLOW_FLD_SZ_GRE_KEYID),
149         /* GTP */
150         /* ICE_FLOW_FIELD_IDX_GTPC_TEID */
151         ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_GTPC_TEID, 12,
152                           ICE_FLOW_FLD_SZ_GTP_TEID),
153         /* ICE_FLOW_FIELD_IDX_GTPU_IP_TEID */
154         ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_GTPU_IP, 12,
155                           ICE_FLOW_FLD_SZ_GTP_TEID),
156         /* ICE_FLOW_FIELD_IDX_GTPU_EH_TEID */
157         ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_GTPU_EH, 12,
158                           ICE_FLOW_FLD_SZ_GTP_TEID),
159         /* ICE_FLOW_FIELD_IDX_GTPU_EH_QFI */
160         ICE_FLOW_FLD_INFO_MSK(ICE_FLOW_SEG_HDR_GTPU_EH, 22,
161                               ICE_FLOW_FLD_SZ_GTP_QFI, 0x3f00),
162         /* ICE_FLOW_FIELD_IDX_GTPU_UP_TEID */
163         ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_GTPU_UP, 12,
164                           ICE_FLOW_FLD_SZ_GTP_TEID),
165         /* ICE_FLOW_FIELD_IDX_GTPU_DWN_TEID */
166         ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_GTPU_DWN, 12,
167                           ICE_FLOW_FLD_SZ_GTP_TEID),
168         /* PPPOE */
169         /* ICE_FLOW_FIELD_IDX_PPPOE_SESS_ID */
170         ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_PPPOE, 2,
171                           ICE_FLOW_FLD_SZ_PPPOE_SESS_ID),
172         /* PFCP */
173         /* ICE_FLOW_FIELD_IDX_PFCP_SEID */
174         ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_PFCP_SESSION, 12,
175                           ICE_FLOW_FLD_SZ_PFCP_SEID),
176         /* L2TPV3 */
177         /* ICE_FLOW_FIELD_IDX_L2TPV3_SESS_ID */
178         ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_L2TPV3, 0,
179                           ICE_FLOW_FLD_SZ_L2TPV3_SESS_ID),
180         /* ESP */
181         /* ICE_FLOW_FIELD_IDX_ESP_SPI */
182         ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_ESP, 0,
183                           ICE_FLOW_FLD_SZ_ESP_SPI),
184         /* AH */
185         /* ICE_FLOW_FIELD_IDX_AH_SPI */
186         ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_AH, 4,
187                           ICE_FLOW_FLD_SZ_AH_SPI),
188         /* NAT_T_ESP */
189         /* ICE_FLOW_FIELD_IDX_NAT_T_ESP_SPI */
190         ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_NAT_T_ESP, 8,
191                           ICE_FLOW_FLD_SZ_NAT_T_ESP_SPI),
192 };
193
194 /* Bitmaps indicating relevant packet types for a particular protocol header
195  *
196  * Packet types for packets with an Outer/First/Single MAC header
197  */
198 static const u32 ice_ptypes_mac_ofos[] = {
199         0xFDC00846, 0xBFBF7F7E, 0xF70001DF, 0xFEFDFDFB,
200         0x0000077E, 0x00000000, 0x00000000, 0x00000000,
201         0x00400000, 0x03FFF000, 0x7FFFFFE0, 0x00000000,
202         0x00000000, 0x00000000, 0x00000000, 0x00000000,
203         0x00000000, 0x00000000, 0x00000000, 0x00000000,
204         0x00000000, 0x00000000, 0x00000000, 0x00000000,
205         0x00000000, 0x00000000, 0x00000000, 0x00000000,
206         0x00000000, 0x00000000, 0x00000000, 0x00000000,
207 };
208
209 /* Packet types for packets with an Innermost/Last MAC VLAN header */
210 static const u32 ice_ptypes_macvlan_il[] = {
211         0x00000000, 0xBC000000, 0x000001DF, 0xF0000000,
212         0x0000077E, 0x00000000, 0x00000000, 0x00000000,
213         0x00000000, 0x00000000, 0x00000000, 0x00000000,
214         0x00000000, 0x00000000, 0x00000000, 0x00000000,
215         0x00000000, 0x00000000, 0x00000000, 0x00000000,
216         0x00000000, 0x00000000, 0x00000000, 0x00000000,
217         0x00000000, 0x00000000, 0x00000000, 0x00000000,
218         0x00000000, 0x00000000, 0x00000000, 0x00000000,
219 };
220
221 /* Packet types for packets with an Outer/First/Single IPv4 header, does NOT
222  * include IPV4 other PTYPEs
223  */
224 static const u32 ice_ptypes_ipv4_ofos[] = {
225         0x1DC00000, 0x04000800, 0x00000000, 0x00000000,
226         0x00000000, 0x00000155, 0x00000000, 0x00000000,
227         0x00000000, 0x000FC000, 0x00000000, 0x00000000,
228         0x00000000, 0x00000000, 0x00000000, 0x00000000,
229         0x00000000, 0x00000000, 0x00000000, 0x00000000,
230         0x00000000, 0x00000000, 0x00000000, 0x00000000,
231         0x00000000, 0x00000000, 0x00000000, 0x00000000,
232         0x00000000, 0x00000000, 0x00000000, 0x00000000,
233 };
234
235 /* Packet types for packets with an Outer/First/Single IPv4 header, includes
236  * IPV4 other PTYPEs
237  */
238 static const u32 ice_ptypes_ipv4_ofos_all[] = {
239         0x1DC00000, 0x04000800, 0x00000000, 0x00000000,
240         0x00000000, 0x00000155, 0x00000000, 0x00000000,
241         0x00000000, 0x000FC000, 0x83E0F800, 0x00000101,
242         0x00000000, 0x00000000, 0x00000000, 0x00000000,
243         0x00000000, 0x00000000, 0x00000000, 0x00000000,
244         0x00000000, 0x00000000, 0x00000000, 0x00000000,
245         0x00000000, 0x00000000, 0x00000000, 0x00000000,
246         0x00000000, 0x00000000, 0x00000000, 0x00000000,
247 };
248
249 /* Packet types for packets with an Innermost/Last IPv4 header */
250 static const u32 ice_ptypes_ipv4_il[] = {
251         0xE0000000, 0xB807700E, 0x80000003, 0xE01DC03B,
252         0x0000000E, 0x00000000, 0x00000000, 0x00000000,
253         0x00000000, 0x00000000, 0x001FF800, 0x00000000,
254         0x00000000, 0x00000000, 0x00000000, 0x00000000,
255         0x00000000, 0x00000000, 0x00000000, 0x00000000,
256         0x00000000, 0x00000000, 0x00000000, 0x00000000,
257         0x00000000, 0x00000000, 0x00000000, 0x00000000,
258         0x00000000, 0x00000000, 0x00000000, 0x00000000,
259 };
260
261 /* Packet types for packets with an Outer/First/Single IPv6 header, does NOT
262  * include IVP6 other PTYPEs
263  */
264 static const u32 ice_ptypes_ipv6_ofos[] = {
265         0x00000000, 0x00000000, 0x77000000, 0x10002000,
266         0x00000000, 0x000002AA, 0x00000000, 0x00000000,
267         0x00000000, 0x03F00000, 0x00000000, 0x00000000,
268         0x00000000, 0x00000000, 0x00000000, 0x00000000,
269         0x00000000, 0x00000000, 0x00000000, 0x00000000,
270         0x00000000, 0x00000000, 0x00000000, 0x00000000,
271         0x00000000, 0x00000000, 0x00000000, 0x00000000,
272         0x00000000, 0x00000000, 0x00000000, 0x00000000,
273 };
274
275 /* Packet types for packets with an Outer/First/Single IPv6 header, includes
276  * IPV6 other PTYPEs
277  */
278 static const u32 ice_ptypes_ipv6_ofos_all[] = {
279         0x00000000, 0x00000000, 0x77000000, 0x10002000,
280         0x00000000, 0x000002AA, 0x00000000, 0x00000000,
281         0x00000000, 0x03F00000, 0x7C1F0000, 0x00000206,
282         0x00000000, 0x00000000, 0x00000000, 0x00000000,
283         0x00000000, 0x00000000, 0x00000000, 0x00000000,
284         0x00000000, 0x00000000, 0x00000000, 0x00000000,
285         0x00000000, 0x00000000, 0x00000000, 0x00000000,
286         0x00000000, 0x00000000, 0x00000000, 0x00000000,
287 };
288
289 /* Packet types for packets with an Innermost/Last IPv6 header */
290 static const u32 ice_ptypes_ipv6_il[] = {
291         0x00000000, 0x03B80770, 0x000001DC, 0x0EE00000,
292         0x00000770, 0x00000000, 0x00000000, 0x00000000,
293         0x00000000, 0x00000000, 0x7FE00000, 0x00000000,
294         0x00000000, 0x00000000, 0x00000000, 0x00000000,
295         0x00000000, 0x00000000, 0x00000000, 0x00000000,
296         0x00000000, 0x00000000, 0x00000000, 0x00000000,
297         0x00000000, 0x00000000, 0x00000000, 0x00000000,
298         0x00000000, 0x00000000, 0x00000000, 0x00000000,
299 };
300
301 /* Packet types for packets with an Outer/First/Single IPv4 header - no L4 */
302 static const u32 ice_ipv4_ofos_no_l4[] = {
303         0x10C00000, 0x04000800, 0x00000000, 0x00000000,
304         0x00000000, 0x00000000, 0x00000000, 0x00000000,
305         0x00000000, 0x000cc000, 0x00000000, 0x00000000,
306         0x00000000, 0x00000000, 0x00000000, 0x00000000,
307         0x00000000, 0x00000000, 0x00000000, 0x00000000,
308         0x00000000, 0x00000000, 0x00000000, 0x00000000,
309         0x00000000, 0x00000000, 0x00000000, 0x00000000,
310         0x00000000, 0x00000000, 0x00000000, 0x00000000,
311 };
312
313 /* Packet types for packets with an Innermost/Last IPv4 header - no L4 */
314 static const u32 ice_ipv4_il_no_l4[] = {
315         0x60000000, 0x18043008, 0x80000002, 0x6010c021,
316         0x00000008, 0x00000000, 0x00000000, 0x00000000,
317         0x00000000, 0x00000000, 0x00139800, 0x00000000,
318         0x00000000, 0x00000000, 0x00000000, 0x00000000,
319         0x00000000, 0x00000000, 0x00000000, 0x00000000,
320         0x00000000, 0x00000000, 0x00000000, 0x00000000,
321         0x00000000, 0x00000000, 0x00000000, 0x00000000,
322         0x00000000, 0x00000000, 0x00000000, 0x00000000,
323 };
324
325 /* Packet types for packets with an Outer/First/Single IPv6 header - no L4 */
326 static const u32 ice_ipv6_ofos_no_l4[] = {
327         0x00000000, 0x00000000, 0x43000000, 0x10002000,
328         0x00000000, 0x00000000, 0x00000000, 0x00000000,
329         0x00000000, 0x02300000, 0x00000000, 0x00000000,
330         0x00000000, 0x00000000, 0x00000000, 0x00000000,
331         0x00000000, 0x00000000, 0x00000000, 0x00000000,
332         0x00000000, 0x00000000, 0x00000000, 0x00000000,
333         0x00000000, 0x00000000, 0x00000000, 0x00000000,
334         0x00000000, 0x00000000, 0x00000000, 0x00000000,
335 };
336
337 /* Packet types for packets with an Innermost/Last IPv6 header - no L4 */
338 static const u32 ice_ipv6_il_no_l4[] = {
339         0x00000000, 0x02180430, 0x0000010c, 0x086010c0,
340         0x00000430, 0x00000000, 0x00000000, 0x00000000,
341         0x00000000, 0x00000000, 0x4e600000, 0x00000000,
342         0x00000000, 0x00000000, 0x00000000, 0x00000000,
343         0x00000000, 0x00000000, 0x00000000, 0x00000000,
344         0x00000000, 0x00000000, 0x00000000, 0x00000000,
345         0x00000000, 0x00000000, 0x00000000, 0x00000000,
346         0x00000000, 0x00000000, 0x00000000, 0x00000000,
347 };
348
349 /* Packet types for packets with an Outermost/First ARP header */
350 static const u32 ice_ptypes_arp_of[] = {
351         0x00000800, 0x00000000, 0x00000000, 0x00000000,
352         0x00000000, 0x00000000, 0x00000000, 0x00000000,
353         0x00000000, 0x00000000, 0x00000000, 0x00000000,
354         0x00000000, 0x00000000, 0x00000000, 0x00000000,
355         0x00000000, 0x00000000, 0x00000000, 0x00000000,
356         0x00000000, 0x00000000, 0x00000000, 0x00000000,
357         0x00000000, 0x00000000, 0x00000000, 0x00000000,
358         0x00000000, 0x00000000, 0x00000000, 0x00000000,
359 };
360
361 /* UDP Packet types for non-tunneled packets or tunneled
362  * packets with inner UDP.
363  */
364 static const u32 ice_ptypes_udp_il[] = {
365         0x81000000, 0x20204040, 0x04000010, 0x80810102,
366         0x00000040, 0x00000000, 0x00000000, 0x00000000,
367         0x00000000, 0x00410000, 0x90842000, 0x00000007,
368         0x00000000, 0x00000000, 0x00000000, 0x00000000,
369         0x00000000, 0x00000000, 0x00000000, 0x00000000,
370         0x00000000, 0x00000000, 0x00000000, 0x00000000,
371         0x00000000, 0x00000000, 0x00000000, 0x00000000,
372         0x00000000, 0x00000000, 0x00000000, 0x00000000,
373 };
374
375 /* Packet types for packets with an Innermost/Last TCP header */
376 static const u32 ice_ptypes_tcp_il[] = {
377         0x04000000, 0x80810102, 0x10000040, 0x02040408,
378         0x00000102, 0x00000000, 0x00000000, 0x00000000,
379         0x00000000, 0x00820000, 0x21084000, 0x00000000,
380         0x00000000, 0x00000000, 0x00000000, 0x00000000,
381         0x00000000, 0x00000000, 0x00000000, 0x00000000,
382         0x00000000, 0x00000000, 0x00000000, 0x00000000,
383         0x00000000, 0x00000000, 0x00000000, 0x00000000,
384         0x00000000, 0x00000000, 0x00000000, 0x00000000,
385 };
386
387 /* Packet types for packets with an Innermost/Last SCTP header */
388 static const u32 ice_ptypes_sctp_il[] = {
389         0x08000000, 0x01020204, 0x20000081, 0x04080810,
390         0x00000204, 0x00000000, 0x00000000, 0x00000000,
391         0x00000000, 0x01040000, 0x00000000, 0x00000000,
392         0x00000000, 0x00000000, 0x00000000, 0x00000000,
393         0x00000000, 0x00000000, 0x00000000, 0x00000000,
394         0x00000000, 0x00000000, 0x00000000, 0x00000000,
395         0x00000000, 0x00000000, 0x00000000, 0x00000000,
396         0x00000000, 0x00000000, 0x00000000, 0x00000000,
397 };
398
399 /* Packet types for packets with an Outermost/First ICMP header */
400 static const u32 ice_ptypes_icmp_of[] = {
401         0x10000000, 0x00000000, 0x00000000, 0x00000000,
402         0x00000000, 0x00000000, 0x00000000, 0x00000000,
403         0x00000000, 0x00000000, 0x00000000, 0x00000000,
404         0x00000000, 0x00000000, 0x00000000, 0x00000000,
405         0x00000000, 0x00000000, 0x00000000, 0x00000000,
406         0x00000000, 0x00000000, 0x00000000, 0x00000000,
407         0x00000000, 0x00000000, 0x00000000, 0x00000000,
408         0x00000000, 0x00000000, 0x00000000, 0x00000000,
409 };
410
411 /* Packet types for packets with an Innermost/Last ICMP header */
412 static const u32 ice_ptypes_icmp_il[] = {
413         0x00000000, 0x02040408, 0x40000102, 0x08101020,
414         0x00000408, 0x00000000, 0x00000000, 0x00000000,
415         0x00000000, 0x00000000, 0x42108000, 0x00000000,
416         0x00000000, 0x00000000, 0x00000000, 0x00000000,
417         0x00000000, 0x00000000, 0x00000000, 0x00000000,
418         0x00000000, 0x00000000, 0x00000000, 0x00000000,
419         0x00000000, 0x00000000, 0x00000000, 0x00000000,
420         0x00000000, 0x00000000, 0x00000000, 0x00000000,
421 };
422
423 /* Packet types for packets with an Outermost/First GRE header */
424 static const u32 ice_ptypes_gre_of[] = {
425         0x00000000, 0xBFBF7800, 0x000001DF, 0xFEFDE000,
426         0x0000017E, 0x00000000, 0x00000000, 0x00000000,
427         0x00000000, 0x00000000, 0x00000000, 0x00000000,
428         0x00000000, 0x00000000, 0x00000000, 0x00000000,
429         0x00000000, 0x00000000, 0x00000000, 0x00000000,
430         0x00000000, 0x00000000, 0x00000000, 0x00000000,
431         0x00000000, 0x00000000, 0x00000000, 0x00000000,
432         0x00000000, 0x00000000, 0x00000000, 0x00000000,
433 };
434
435 /* Packet types for packets with an Innermost/Last MAC header */
436 static const u32 ice_ptypes_mac_il[] = {
437         0x00000000, 0x00000000, 0x00000000, 0x00000000,
438         0x00000000, 0x00000000, 0x00000000, 0x00000000,
439         0x00000000, 0x00000000, 0x00000000, 0x00000000,
440         0x00000000, 0x00000000, 0x00000000, 0x00000000,
441         0x00000000, 0x00000000, 0x00000000, 0x00000000,
442         0x00000000, 0x00000000, 0x00000000, 0x00000000,
443         0x00000000, 0x00000000, 0x00000000, 0x00000000,
444         0x00000000, 0x00000000, 0x00000000, 0x00000000,
445 };
446
447 /* Packet types for GTPC */
448 static const u32 ice_ptypes_gtpc[] = {
449         0x00000000, 0x00000000, 0x00000000, 0x00000000,
450         0x00000000, 0x00000000, 0x00000000, 0x00000000,
451         0x00000000, 0x00000000, 0x00000180, 0x00000000,
452         0x00000000, 0x00000000, 0x00000000, 0x00000000,
453         0x00000000, 0x00000000, 0x00000000, 0x00000000,
454         0x00000000, 0x00000000, 0x00000000, 0x00000000,
455         0x00000000, 0x00000000, 0x00000000, 0x00000000,
456         0x00000000, 0x00000000, 0x00000000, 0x00000000,
457 };
458
459 /* Packet types for GTPC with TEID */
460 static const u32 ice_ptypes_gtpc_tid[] = {
461         0x00000000, 0x00000000, 0x00000000, 0x00000000,
462         0x00000000, 0x00000000, 0x00000000, 0x00000000,
463         0x00000000, 0x00000000, 0x00000060, 0x00000000,
464         0x00000000, 0x00000000, 0x00000000, 0x00000000,
465         0x00000000, 0x00000000, 0x00000000, 0x00000000,
466         0x00000000, 0x00000000, 0x00000000, 0x00000000,
467         0x00000000, 0x00000000, 0x00000000, 0x00000000,
468         0x00000000, 0x00000000, 0x00000000, 0x00000000,
469 };
470
471 /* Packet types for GTPU */
472 static const struct ice_ptype_attributes ice_attr_gtpu_session[] = {
473         { ICE_MAC_IPV4_GTPU_IPV4_FRAG,    ICE_PTYPE_ATTR_GTP_SESSION },
474         { ICE_MAC_IPV4_GTPU_IPV4_PAY,     ICE_PTYPE_ATTR_GTP_SESSION },
475         { ICE_MAC_IPV4_GTPU_IPV4_UDP_PAY, ICE_PTYPE_ATTR_GTP_SESSION },
476         { ICE_MAC_IPV4_GTPU_IPV4_TCP,     ICE_PTYPE_ATTR_GTP_SESSION },
477         { ICE_MAC_IPV4_GTPU_IPV4_ICMP,    ICE_PTYPE_ATTR_GTP_SESSION },
478         { ICE_MAC_IPV6_GTPU_IPV4_FRAG,    ICE_PTYPE_ATTR_GTP_SESSION },
479         { ICE_MAC_IPV6_GTPU_IPV4_PAY,     ICE_PTYPE_ATTR_GTP_SESSION },
480         { ICE_MAC_IPV6_GTPU_IPV4_UDP_PAY, ICE_PTYPE_ATTR_GTP_SESSION },
481         { ICE_MAC_IPV6_GTPU_IPV4_TCP,     ICE_PTYPE_ATTR_GTP_SESSION },
482         { ICE_MAC_IPV6_GTPU_IPV4_ICMP,    ICE_PTYPE_ATTR_GTP_SESSION },
483         { ICE_MAC_IPV4_GTPU_IPV6_FRAG,    ICE_PTYPE_ATTR_GTP_SESSION },
484         { ICE_MAC_IPV4_GTPU_IPV6_PAY,     ICE_PTYPE_ATTR_GTP_SESSION },
485         { ICE_MAC_IPV4_GTPU_IPV6_UDP_PAY, ICE_PTYPE_ATTR_GTP_SESSION },
486         { ICE_MAC_IPV4_GTPU_IPV6_TCP,     ICE_PTYPE_ATTR_GTP_SESSION },
487         { ICE_MAC_IPV4_GTPU_IPV6_ICMPV6,  ICE_PTYPE_ATTR_GTP_SESSION },
488         { ICE_MAC_IPV6_GTPU_IPV6_FRAG,    ICE_PTYPE_ATTR_GTP_SESSION },
489         { ICE_MAC_IPV6_GTPU_IPV6_PAY,     ICE_PTYPE_ATTR_GTP_SESSION },
490         { ICE_MAC_IPV6_GTPU_IPV6_UDP_PAY, ICE_PTYPE_ATTR_GTP_SESSION },
491         { ICE_MAC_IPV6_GTPU_IPV6_TCP,     ICE_PTYPE_ATTR_GTP_SESSION },
492         { ICE_MAC_IPV6_GTPU_IPV6_ICMPV6,  ICE_PTYPE_ATTR_GTP_SESSION },
493 };
494
495 static const struct ice_ptype_attributes ice_attr_gtpu_eh[] = {
496         { ICE_MAC_IPV4_GTPU_IPV4_FRAG,    ICE_PTYPE_ATTR_GTP_PDU_EH },
497         { ICE_MAC_IPV4_GTPU_IPV4_PAY,     ICE_PTYPE_ATTR_GTP_PDU_EH },
498         { ICE_MAC_IPV4_GTPU_IPV4_UDP_PAY, ICE_PTYPE_ATTR_GTP_PDU_EH },
499         { ICE_MAC_IPV4_GTPU_IPV4_TCP,     ICE_PTYPE_ATTR_GTP_PDU_EH },
500         { ICE_MAC_IPV4_GTPU_IPV4_ICMP,    ICE_PTYPE_ATTR_GTP_PDU_EH },
501         { ICE_MAC_IPV6_GTPU_IPV4_FRAG,    ICE_PTYPE_ATTR_GTP_PDU_EH },
502         { ICE_MAC_IPV6_GTPU_IPV4_PAY,     ICE_PTYPE_ATTR_GTP_PDU_EH },
503         { ICE_MAC_IPV6_GTPU_IPV4_UDP_PAY, ICE_PTYPE_ATTR_GTP_PDU_EH },
504         { ICE_MAC_IPV6_GTPU_IPV4_TCP,     ICE_PTYPE_ATTR_GTP_PDU_EH },
505         { ICE_MAC_IPV6_GTPU_IPV4_ICMP,    ICE_PTYPE_ATTR_GTP_PDU_EH },
506         { ICE_MAC_IPV4_GTPU_IPV6_FRAG,    ICE_PTYPE_ATTR_GTP_PDU_EH },
507         { ICE_MAC_IPV4_GTPU_IPV6_PAY,     ICE_PTYPE_ATTR_GTP_PDU_EH },
508         { ICE_MAC_IPV4_GTPU_IPV6_UDP_PAY, ICE_PTYPE_ATTR_GTP_PDU_EH },
509         { ICE_MAC_IPV4_GTPU_IPV6_TCP,     ICE_PTYPE_ATTR_GTP_PDU_EH },
510         { ICE_MAC_IPV4_GTPU_IPV6_ICMPV6,  ICE_PTYPE_ATTR_GTP_PDU_EH },
511         { ICE_MAC_IPV6_GTPU_IPV6_FRAG,    ICE_PTYPE_ATTR_GTP_PDU_EH },
512         { ICE_MAC_IPV6_GTPU_IPV6_PAY,     ICE_PTYPE_ATTR_GTP_PDU_EH },
513         { ICE_MAC_IPV6_GTPU_IPV6_UDP_PAY, ICE_PTYPE_ATTR_GTP_PDU_EH },
514         { ICE_MAC_IPV6_GTPU_IPV6_TCP,     ICE_PTYPE_ATTR_GTP_PDU_EH },
515         { ICE_MAC_IPV6_GTPU_IPV6_ICMPV6,  ICE_PTYPE_ATTR_GTP_PDU_EH },
516 };
517
518 static const struct ice_ptype_attributes ice_attr_gtpu_down[] = {
519         { ICE_MAC_IPV4_GTPU_IPV4_FRAG,    ICE_PTYPE_ATTR_GTP_DOWNLINK },
520         { ICE_MAC_IPV4_GTPU_IPV4_PAY,     ICE_PTYPE_ATTR_GTP_DOWNLINK },
521         { ICE_MAC_IPV4_GTPU_IPV4_UDP_PAY, ICE_PTYPE_ATTR_GTP_DOWNLINK },
522         { ICE_MAC_IPV4_GTPU_IPV4_TCP,     ICE_PTYPE_ATTR_GTP_DOWNLINK },
523         { ICE_MAC_IPV4_GTPU_IPV4_ICMP,    ICE_PTYPE_ATTR_GTP_DOWNLINK },
524         { ICE_MAC_IPV6_GTPU_IPV4_FRAG,    ICE_PTYPE_ATTR_GTP_DOWNLINK },
525         { ICE_MAC_IPV6_GTPU_IPV4_PAY,     ICE_PTYPE_ATTR_GTP_DOWNLINK },
526         { ICE_MAC_IPV6_GTPU_IPV4_UDP_PAY, ICE_PTYPE_ATTR_GTP_DOWNLINK },
527         { ICE_MAC_IPV6_GTPU_IPV4_TCP,     ICE_PTYPE_ATTR_GTP_DOWNLINK },
528         { ICE_MAC_IPV6_GTPU_IPV4_ICMP,    ICE_PTYPE_ATTR_GTP_DOWNLINK },
529         { ICE_MAC_IPV4_GTPU_IPV6_FRAG,    ICE_PTYPE_ATTR_GTP_DOWNLINK },
530         { ICE_MAC_IPV4_GTPU_IPV6_PAY,     ICE_PTYPE_ATTR_GTP_DOWNLINK },
531         { ICE_MAC_IPV4_GTPU_IPV6_UDP_PAY, ICE_PTYPE_ATTR_GTP_DOWNLINK },
532         { ICE_MAC_IPV4_GTPU_IPV6_TCP,     ICE_PTYPE_ATTR_GTP_DOWNLINK },
533         { ICE_MAC_IPV4_GTPU_IPV6_ICMPV6,  ICE_PTYPE_ATTR_GTP_DOWNLINK },
534         { ICE_MAC_IPV6_GTPU_IPV6_FRAG,    ICE_PTYPE_ATTR_GTP_DOWNLINK },
535         { ICE_MAC_IPV6_GTPU_IPV6_PAY,     ICE_PTYPE_ATTR_GTP_DOWNLINK },
536         { ICE_MAC_IPV6_GTPU_IPV6_UDP_PAY, ICE_PTYPE_ATTR_GTP_DOWNLINK },
537         { ICE_MAC_IPV6_GTPU_IPV6_TCP,     ICE_PTYPE_ATTR_GTP_DOWNLINK },
538         { ICE_MAC_IPV6_GTPU_IPV6_ICMPV6,  ICE_PTYPE_ATTR_GTP_DOWNLINK },
539 };
540
541 static const struct ice_ptype_attributes ice_attr_gtpu_up[] = {
542         { ICE_MAC_IPV4_GTPU_IPV4_FRAG,    ICE_PTYPE_ATTR_GTP_UPLINK },
543         { ICE_MAC_IPV4_GTPU_IPV4_PAY,     ICE_PTYPE_ATTR_GTP_UPLINK },
544         { ICE_MAC_IPV4_GTPU_IPV4_UDP_PAY, ICE_PTYPE_ATTR_GTP_UPLINK },
545         { ICE_MAC_IPV4_GTPU_IPV4_TCP,     ICE_PTYPE_ATTR_GTP_UPLINK },
546         { ICE_MAC_IPV4_GTPU_IPV4_ICMP,    ICE_PTYPE_ATTR_GTP_UPLINK },
547         { ICE_MAC_IPV6_GTPU_IPV4_FRAG,    ICE_PTYPE_ATTR_GTP_UPLINK },
548         { ICE_MAC_IPV6_GTPU_IPV4_PAY,     ICE_PTYPE_ATTR_GTP_UPLINK },
549         { ICE_MAC_IPV6_GTPU_IPV4_UDP_PAY, ICE_PTYPE_ATTR_GTP_UPLINK },
550         { ICE_MAC_IPV6_GTPU_IPV4_TCP,     ICE_PTYPE_ATTR_GTP_UPLINK },
551         { ICE_MAC_IPV6_GTPU_IPV4_ICMP,    ICE_PTYPE_ATTR_GTP_UPLINK },
552         { ICE_MAC_IPV4_GTPU_IPV6_FRAG,    ICE_PTYPE_ATTR_GTP_UPLINK },
553         { ICE_MAC_IPV4_GTPU_IPV6_PAY,     ICE_PTYPE_ATTR_GTP_UPLINK },
554         { ICE_MAC_IPV4_GTPU_IPV6_UDP_PAY, ICE_PTYPE_ATTR_GTP_UPLINK },
555         { ICE_MAC_IPV4_GTPU_IPV6_TCP,     ICE_PTYPE_ATTR_GTP_UPLINK },
556         { ICE_MAC_IPV4_GTPU_IPV6_ICMPV6,  ICE_PTYPE_ATTR_GTP_UPLINK },
557         { ICE_MAC_IPV6_GTPU_IPV6_FRAG,    ICE_PTYPE_ATTR_GTP_UPLINK },
558         { ICE_MAC_IPV6_GTPU_IPV6_PAY,     ICE_PTYPE_ATTR_GTP_UPLINK },
559         { ICE_MAC_IPV6_GTPU_IPV6_UDP_PAY, ICE_PTYPE_ATTR_GTP_UPLINK },
560         { ICE_MAC_IPV6_GTPU_IPV6_TCP,     ICE_PTYPE_ATTR_GTP_UPLINK },
561         { ICE_MAC_IPV6_GTPU_IPV6_ICMPV6,  ICE_PTYPE_ATTR_GTP_UPLINK },
562 };
563
564 static const u32 ice_ptypes_gtpu[] = {
565         0x00000000, 0x00000000, 0x00000000, 0x00000000,
566         0x00000000, 0x00000000, 0x00000000, 0x00000000,
567         0x00000000, 0x00000000, 0x7FFFFE00, 0x00000000,
568         0x00000000, 0x00000000, 0x00000000, 0x00000000,
569         0x00000000, 0x00000000, 0x00000000, 0x00000000,
570         0x00000000, 0x00000000, 0x00000000, 0x00000000,
571         0x00000000, 0x00000000, 0x00000000, 0x00000000,
572         0x00000000, 0x00000000, 0x00000000, 0x00000000,
573 };
574
575 /* Packet types for pppoe */
576 static const u32 ice_ptypes_pppoe[] = {
577         0x00000000, 0x00000000, 0x00000000, 0x00000000,
578         0x00000000, 0x00000000, 0x00000000, 0x00000000,
579         0x00000000, 0x03ffe000, 0x00000000, 0x00000000,
580         0x00000000, 0x00000000, 0x00000000, 0x00000000,
581         0x00000000, 0x00000000, 0x00000000, 0x00000000,
582         0x00000000, 0x00000000, 0x00000000, 0x00000000,
583         0x00000000, 0x00000000, 0x00000000, 0x00000000,
584         0x00000000, 0x00000000, 0x00000000, 0x00000000,
585 };
586
587 /* Packet types for packets with PFCP NODE header */
588 static const u32 ice_ptypes_pfcp_node[] = {
589         0x00000000, 0x00000000, 0x00000000, 0x00000000,
590         0x00000000, 0x00000000, 0x00000000, 0x00000000,
591         0x00000000, 0x00000000, 0x80000000, 0x00000002,
592         0x00000000, 0x00000000, 0x00000000, 0x00000000,
593         0x00000000, 0x00000000, 0x00000000, 0x00000000,
594         0x00000000, 0x00000000, 0x00000000, 0x00000000,
595         0x00000000, 0x00000000, 0x00000000, 0x00000000,
596         0x00000000, 0x00000000, 0x00000000, 0x00000000,
597 };
598
599 /* Packet types for packets with PFCP SESSION header */
600 static const u32 ice_ptypes_pfcp_session[] = {
601         0x00000000, 0x00000000, 0x00000000, 0x00000000,
602         0x00000000, 0x00000000, 0x00000000, 0x00000000,
603         0x00000000, 0x00000000, 0x00000000, 0x00000005,
604         0x00000000, 0x00000000, 0x00000000, 0x00000000,
605         0x00000000, 0x00000000, 0x00000000, 0x00000000,
606         0x00000000, 0x00000000, 0x00000000, 0x00000000,
607         0x00000000, 0x00000000, 0x00000000, 0x00000000,
608         0x00000000, 0x00000000, 0x00000000, 0x00000000,
609 };
610
611 /* Packet types for l2tpv3 */
612 static const u32 ice_ptypes_l2tpv3[] = {
613         0x00000000, 0x00000000, 0x00000000, 0x00000000,
614         0x00000000, 0x00000000, 0x00000000, 0x00000000,
615         0x00000000, 0x00000000, 0x00000000, 0x00000300,
616         0x00000000, 0x00000000, 0x00000000, 0x00000000,
617         0x00000000, 0x00000000, 0x00000000, 0x00000000,
618         0x00000000, 0x00000000, 0x00000000, 0x00000000,
619         0x00000000, 0x00000000, 0x00000000, 0x00000000,
620         0x00000000, 0x00000000, 0x00000000, 0x00000000,
621 };
622
623 /* Packet types for esp */
624 static const u32 ice_ptypes_esp[] = {
625         0x00000000, 0x00000000, 0x00000000, 0x00000000,
626         0x00000000, 0x00000003, 0x00000000, 0x00000000,
627         0x00000000, 0x00000000, 0x00000000, 0x00000000,
628         0x00000000, 0x00000000, 0x00000000, 0x00000000,
629         0x00000000, 0x00000000, 0x00000000, 0x00000000,
630         0x00000000, 0x00000000, 0x00000000, 0x00000000,
631         0x00000000, 0x00000000, 0x00000000, 0x00000000,
632         0x00000000, 0x00000000, 0x00000000, 0x00000000,
633 };
634
635 /* Packet types for ah */
636 static const u32 ice_ptypes_ah[] = {
637         0x00000000, 0x00000000, 0x00000000, 0x00000000,
638         0x00000000, 0x0000000C, 0x00000000, 0x00000000,
639         0x00000000, 0x00000000, 0x00000000, 0x00000000,
640         0x00000000, 0x00000000, 0x00000000, 0x00000000,
641         0x00000000, 0x00000000, 0x00000000, 0x00000000,
642         0x00000000, 0x00000000, 0x00000000, 0x00000000,
643         0x00000000, 0x00000000, 0x00000000, 0x00000000,
644         0x00000000, 0x00000000, 0x00000000, 0x00000000,
645 };
646
647 /* Packet types for packets with NAT_T ESP header */
648 static const u32 ice_ptypes_nat_t_esp[] = {
649         0x00000000, 0x00000000, 0x00000000, 0x00000000,
650         0x00000000, 0x00000030, 0x00000000, 0x00000000,
651         0x00000000, 0x00000000, 0x00000000, 0x00000000,
652         0x00000000, 0x00000000, 0x00000000, 0x00000000,
653         0x00000000, 0x00000000, 0x00000000, 0x00000000,
654         0x00000000, 0x00000000, 0x00000000, 0x00000000,
655         0x00000000, 0x00000000, 0x00000000, 0x00000000,
656         0x00000000, 0x00000000, 0x00000000, 0x00000000,
657 };
658
659 static const u32 ice_ptypes_mac_non_ip_ofos[] = {
660         0x00000846, 0x00000000, 0x00000000, 0x00000000,
661         0x00000000, 0x00000000, 0x00000000, 0x00000000,
662         0x00400000, 0x03FFF000, 0x00000000, 0x00000000,
663         0x00000000, 0x00000000, 0x00000000, 0x00000000,
664         0x00000000, 0x00000000, 0x00000000, 0x00000000,
665         0x00000000, 0x00000000, 0x00000000, 0x00000000,
666         0x00000000, 0x00000000, 0x00000000, 0x00000000,
667         0x00000000, 0x00000000, 0x00000000, 0x00000000,
668 };
669
670 /* Manage parameters and info. used during the creation of a flow profile */
671 struct ice_flow_prof_params {
672         enum ice_block blk;
673         u16 entry_length; /* # of bytes formatted entry will require */
674         u8 es_cnt;
675         struct ice_flow_prof *prof;
676
677         /* For ACL, the es[0] will have the data of ICE_RX_MDID_PKT_FLAGS_15_0
678          * This will give us the direction flags.
679          */
680         struct ice_fv_word es[ICE_MAX_FV_WORDS];
681         /* attributes can be used to add attributes to a particular PTYPE */
682         const struct ice_ptype_attributes *attr;
683         u16 attr_cnt;
684
685         u16 mask[ICE_MAX_FV_WORDS];
686         ice_declare_bitmap(ptypes, ICE_FLOW_PTYPE_MAX);
687 };
688
689 #define ICE_FLOW_RSS_HDRS_INNER_MASK \
690         (ICE_FLOW_SEG_HDR_PPPOE | ICE_FLOW_SEG_HDR_GTPC | \
691         ICE_FLOW_SEG_HDR_GTPC_TEID | ICE_FLOW_SEG_HDR_GTPU | \
692         ICE_FLOW_SEG_HDR_PFCP_SESSION | ICE_FLOW_SEG_HDR_L2TPV3 | \
693         ICE_FLOW_SEG_HDR_ESP | ICE_FLOW_SEG_HDR_AH | \
694         ICE_FLOW_SEG_HDR_NAT_T_ESP)
695
696 #define ICE_FLOW_SEG_HDRS_L2_MASK       \
697         (ICE_FLOW_SEG_HDR_ETH | ICE_FLOW_SEG_HDR_VLAN)
698 #define ICE_FLOW_SEG_HDRS_L3_MASK       \
699         (ICE_FLOW_SEG_HDR_IPV4 | ICE_FLOW_SEG_HDR_IPV6 | \
700          ICE_FLOW_SEG_HDR_ARP)
701 #define ICE_FLOW_SEG_HDRS_L4_MASK       \
702         (ICE_FLOW_SEG_HDR_ICMP | ICE_FLOW_SEG_HDR_TCP | ICE_FLOW_SEG_HDR_UDP | \
703          ICE_FLOW_SEG_HDR_SCTP)
704 /* mask for L4 protocols that are NOT part of IPV4/6 OTHER PTYPE groups */
705 #define ICE_FLOW_SEG_HDRS_L4_MASK_NO_OTHER      \
706         (ICE_FLOW_SEG_HDR_TCP | ICE_FLOW_SEG_HDR_UDP | ICE_FLOW_SEG_HDR_SCTP)
707
708 /**
709  * ice_flow_val_hdrs - validates packet segments for valid protocol headers
710  * @segs: array of one or more packet segments that describe the flow
711  * @segs_cnt: number of packet segments provided
712  */
713 static enum ice_status
714 ice_flow_val_hdrs(struct ice_flow_seg_info *segs, u8 segs_cnt)
715 {
716         u8 i;
717
718         for (i = 0; i < segs_cnt; i++) {
719                 /* Multiple L3 headers */
720                 if (segs[i].hdrs & ICE_FLOW_SEG_HDRS_L3_MASK &&
721                     !ice_is_pow2(segs[i].hdrs & ICE_FLOW_SEG_HDRS_L3_MASK))
722                         return ICE_ERR_PARAM;
723
724                 /* Multiple L4 headers */
725                 if (segs[i].hdrs & ICE_FLOW_SEG_HDRS_L4_MASK &&
726                     !ice_is_pow2(segs[i].hdrs & ICE_FLOW_SEG_HDRS_L4_MASK))
727                         return ICE_ERR_PARAM;
728         }
729
730         return ICE_SUCCESS;
731 }
732
733 /* Sizes of fixed known protocol headers without header options */
734 #define ICE_FLOW_PROT_HDR_SZ_MAC        14
735 #define ICE_FLOW_PROT_HDR_SZ_MAC_VLAN   (ICE_FLOW_PROT_HDR_SZ_MAC + 2)
736 #define ICE_FLOW_PROT_HDR_SZ_IPV4       20
737 #define ICE_FLOW_PROT_HDR_SZ_IPV6       40
738 #define ICE_FLOW_PROT_HDR_SZ_ARP        28
739 #define ICE_FLOW_PROT_HDR_SZ_ICMP       8
740 #define ICE_FLOW_PROT_HDR_SZ_TCP        20
741 #define ICE_FLOW_PROT_HDR_SZ_UDP        8
742 #define ICE_FLOW_PROT_HDR_SZ_SCTP       12
743
744 /**
745  * ice_flow_calc_seg_sz - calculates size of a packet segment based on headers
746  * @params: information about the flow to be processed
747  * @seg: index of packet segment whose header size is to be determined
748  */
749 static u16 ice_flow_calc_seg_sz(struct ice_flow_prof_params *params, u8 seg)
750 {
751         u16 sz;
752
753         /* L2 headers */
754         sz = (params->prof->segs[seg].hdrs & ICE_FLOW_SEG_HDR_VLAN) ?
755                 ICE_FLOW_PROT_HDR_SZ_MAC_VLAN : ICE_FLOW_PROT_HDR_SZ_MAC;
756
757         /* L3 headers */
758         if (params->prof->segs[seg].hdrs & ICE_FLOW_SEG_HDR_IPV4)
759                 sz += ICE_FLOW_PROT_HDR_SZ_IPV4;
760         else if (params->prof->segs[seg].hdrs & ICE_FLOW_SEG_HDR_IPV6)
761                 sz += ICE_FLOW_PROT_HDR_SZ_IPV6;
762         else if (params->prof->segs[seg].hdrs & ICE_FLOW_SEG_HDR_ARP)
763                 sz += ICE_FLOW_PROT_HDR_SZ_ARP;
764         else if (params->prof->segs[seg].hdrs & ICE_FLOW_SEG_HDRS_L4_MASK)
765                 /* A L3 header is required if L4 is specified */
766                 return 0;
767
768         /* L4 headers */
769         if (params->prof->segs[seg].hdrs & ICE_FLOW_SEG_HDR_ICMP)
770                 sz += ICE_FLOW_PROT_HDR_SZ_ICMP;
771         else if (params->prof->segs[seg].hdrs & ICE_FLOW_SEG_HDR_TCP)
772                 sz += ICE_FLOW_PROT_HDR_SZ_TCP;
773         else if (params->prof->segs[seg].hdrs & ICE_FLOW_SEG_HDR_UDP)
774                 sz += ICE_FLOW_PROT_HDR_SZ_UDP;
775         else if (params->prof->segs[seg].hdrs & ICE_FLOW_SEG_HDR_SCTP)
776                 sz += ICE_FLOW_PROT_HDR_SZ_SCTP;
777
778         return sz;
779 }
780
781 /**
782  * ice_flow_proc_seg_hdrs - process protocol headers present in pkt segments
783  * @params: information about the flow to be processed
784  *
785  * This function identifies the packet types associated with the protocol
786  * headers being present in packet segments of the specified flow profile.
787  */
788 static enum ice_status
789 ice_flow_proc_seg_hdrs(struct ice_flow_prof_params *params)
790 {
791         struct ice_flow_prof *prof;
792         u8 i;
793
794         ice_memset(params->ptypes, 0xff, sizeof(params->ptypes),
795                    ICE_NONDMA_MEM);
796
797         prof = params->prof;
798
799         for (i = 0; i < params->prof->segs_cnt; i++) {
800                 const ice_bitmap_t *src;
801                 u32 hdrs;
802
803                 hdrs = prof->segs[i].hdrs;
804
805                 if (hdrs & ICE_FLOW_SEG_HDR_ETH) {
806                         src = !i ? (const ice_bitmap_t *)ice_ptypes_mac_ofos :
807                                 (const ice_bitmap_t *)ice_ptypes_mac_il;
808                         ice_and_bitmap(params->ptypes, params->ptypes, src,
809                                        ICE_FLOW_PTYPE_MAX);
810                 }
811
812                 if (i && hdrs & ICE_FLOW_SEG_HDR_VLAN) {
813                         src = (const ice_bitmap_t *)ice_ptypes_macvlan_il;
814                         ice_and_bitmap(params->ptypes, params->ptypes, src,
815                                        ICE_FLOW_PTYPE_MAX);
816                 }
817
818                 if (!i && hdrs & ICE_FLOW_SEG_HDR_ARP) {
819                         ice_and_bitmap(params->ptypes, params->ptypes,
820                                        (const ice_bitmap_t *)ice_ptypes_arp_of,
821                                        ICE_FLOW_PTYPE_MAX);
822                 }
823
824                 if (hdrs & ICE_FLOW_SEG_HDR_PPPOE) {
825                         src = (const ice_bitmap_t *)ice_ptypes_pppoe;
826                         ice_and_bitmap(params->ptypes, params->ptypes, src,
827                                        ICE_FLOW_PTYPE_MAX);
828                 }
829                 if ((hdrs & ICE_FLOW_SEG_HDR_IPV4) &&
830                     (hdrs & ICE_FLOW_SEG_HDR_IPV_OTHER)) {
831                         src = i ?
832                                 (const ice_bitmap_t *)ice_ptypes_ipv4_il :
833                                 (const ice_bitmap_t *)ice_ptypes_ipv4_ofos_all;
834                         ice_and_bitmap(params->ptypes, params->ptypes, src,
835                                        ICE_FLOW_PTYPE_MAX);
836                 } else if ((hdrs & ICE_FLOW_SEG_HDR_IPV6) &&
837                            (hdrs & ICE_FLOW_SEG_HDR_IPV_OTHER)) {
838                         src = i ?
839                                 (const ice_bitmap_t *)ice_ptypes_ipv6_il :
840                                 (const ice_bitmap_t *)ice_ptypes_ipv6_ofos_all;
841                         ice_and_bitmap(params->ptypes, params->ptypes, src,
842                                        ICE_FLOW_PTYPE_MAX);
843                 } else if ((hdrs & ICE_FLOW_SEG_HDR_IPV4) &&
844                            !(hdrs & ICE_FLOW_SEG_HDRS_L4_MASK_NO_OTHER)) {
845                         src = !i ? (const ice_bitmap_t *)ice_ipv4_ofos_no_l4 :
846                                 (const ice_bitmap_t *)ice_ipv4_il_no_l4;
847                         ice_and_bitmap(params->ptypes, params->ptypes, src,
848                                        ICE_FLOW_PTYPE_MAX);
849                 } else if (hdrs & ICE_FLOW_SEG_HDR_IPV4) {
850                         src = !i ? (const ice_bitmap_t *)ice_ptypes_ipv4_ofos :
851                                 (const ice_bitmap_t *)ice_ptypes_ipv4_il;
852                         ice_and_bitmap(params->ptypes, params->ptypes, src,
853                                        ICE_FLOW_PTYPE_MAX);
854                 } else if ((hdrs & ICE_FLOW_SEG_HDR_IPV6) &&
855                            !(hdrs & ICE_FLOW_SEG_HDRS_L4_MASK_NO_OTHER)) {
856                         src = !i ? (const ice_bitmap_t *)ice_ipv6_ofos_no_l4 :
857                                 (const ice_bitmap_t *)ice_ipv6_il_no_l4;
858                         ice_and_bitmap(params->ptypes, params->ptypes, src,
859                                        ICE_FLOW_PTYPE_MAX);
860                 } else if (hdrs & ICE_FLOW_SEG_HDR_IPV6) {
861                         src = !i ? (const ice_bitmap_t *)ice_ptypes_ipv6_ofos :
862                                 (const ice_bitmap_t *)ice_ptypes_ipv6_il;
863                         ice_and_bitmap(params->ptypes, params->ptypes, src,
864                                        ICE_FLOW_PTYPE_MAX);
865                 }
866
867                 if (hdrs & ICE_FLOW_SEG_HDR_ETH_NON_IP) {
868                         src = (const ice_bitmap_t *)ice_ptypes_mac_non_ip_ofos;
869                         ice_and_bitmap(params->ptypes, params->ptypes,
870                                        src, ICE_FLOW_PTYPE_MAX);
871                 } else if (hdrs & ICE_FLOW_SEG_HDR_PPPOE) {
872                         src = (const ice_bitmap_t *)ice_ptypes_pppoe;
873                         ice_and_bitmap(params->ptypes, params->ptypes, src,
874                                        ICE_FLOW_PTYPE_MAX);
875                 } else {
876                         src = (const ice_bitmap_t *)ice_ptypes_pppoe;
877                         ice_andnot_bitmap(params->ptypes, params->ptypes, src,
878                                           ICE_FLOW_PTYPE_MAX);
879                 }
880
881                 if (hdrs & ICE_FLOW_SEG_HDR_UDP) {
882                         src = (const ice_bitmap_t *)ice_ptypes_udp_il;
883                         ice_and_bitmap(params->ptypes, params->ptypes, src,
884                                        ICE_FLOW_PTYPE_MAX);
885                 } else if (hdrs & ICE_FLOW_SEG_HDR_TCP) {
886                         ice_and_bitmap(params->ptypes, params->ptypes,
887                                        (const ice_bitmap_t *)ice_ptypes_tcp_il,
888                                        ICE_FLOW_PTYPE_MAX);
889                 } else if (hdrs & ICE_FLOW_SEG_HDR_SCTP) {
890                         src = (const ice_bitmap_t *)ice_ptypes_sctp_il;
891                         ice_and_bitmap(params->ptypes, params->ptypes, src,
892                                        ICE_FLOW_PTYPE_MAX);
893                 }
894
895                 if (hdrs & ICE_FLOW_SEG_HDR_ICMP) {
896                         src = !i ? (const ice_bitmap_t *)ice_ptypes_icmp_of :
897                                 (const ice_bitmap_t *)ice_ptypes_icmp_il;
898                         ice_and_bitmap(params->ptypes, params->ptypes, src,
899                                        ICE_FLOW_PTYPE_MAX);
900                 } else if (hdrs & ICE_FLOW_SEG_HDR_GRE) {
901                         if (!i) {
902                                 src = (const ice_bitmap_t *)ice_ptypes_gre_of;
903                                 ice_and_bitmap(params->ptypes, params->ptypes,
904                                                src, ICE_FLOW_PTYPE_MAX);
905                         }
906                 } else if (hdrs & ICE_FLOW_SEG_HDR_GTPC) {
907                         src = (const ice_bitmap_t *)ice_ptypes_gtpc;
908                         ice_and_bitmap(params->ptypes, params->ptypes,
909                                        src, ICE_FLOW_PTYPE_MAX);
910                 } else if (hdrs & ICE_FLOW_SEG_HDR_GTPC_TEID) {
911                         src = (const ice_bitmap_t *)ice_ptypes_gtpc_tid;
912                         ice_and_bitmap(params->ptypes, params->ptypes,
913                                        src, ICE_FLOW_PTYPE_MAX);
914                 } else if (hdrs & ICE_FLOW_SEG_HDR_GTPU_DWN) {
915                         src = (const ice_bitmap_t *)ice_ptypes_gtpu;
916                         ice_and_bitmap(params->ptypes, params->ptypes,
917                                        src, ICE_FLOW_PTYPE_MAX);
918
919                         /* Attributes for GTP packet with downlink */
920                         params->attr = ice_attr_gtpu_down;
921                         params->attr_cnt = ARRAY_SIZE(ice_attr_gtpu_down);
922                 } else if (hdrs & ICE_FLOW_SEG_HDR_GTPU_UP) {
923                         src = (const ice_bitmap_t *)ice_ptypes_gtpu;
924                         ice_and_bitmap(params->ptypes, params->ptypes,
925                                        src, ICE_FLOW_PTYPE_MAX);
926
927                         /* Attributes for GTP packet with uplink */
928                         params->attr = ice_attr_gtpu_up;
929                         params->attr_cnt = ARRAY_SIZE(ice_attr_gtpu_up);
930                 } else if (hdrs & ICE_FLOW_SEG_HDR_GTPU_EH) {
931                         src = (const ice_bitmap_t *)ice_ptypes_gtpu;
932                         ice_and_bitmap(params->ptypes, params->ptypes,
933                                        src, ICE_FLOW_PTYPE_MAX);
934
935                         /* Attributes for GTP packet with Extension Header */
936                         params->attr = ice_attr_gtpu_eh;
937                         params->attr_cnt = ARRAY_SIZE(ice_attr_gtpu_eh);
938                 } else if (hdrs & ICE_FLOW_SEG_HDR_GTPU_IP) {
939                         src = (const ice_bitmap_t *)ice_ptypes_gtpu;
940                         ice_and_bitmap(params->ptypes, params->ptypes,
941                                        src, ICE_FLOW_PTYPE_MAX);
942
943                         /* Attributes for GTP packet without Extension Header */
944                         params->attr = ice_attr_gtpu_session;
945                         params->attr_cnt = ARRAY_SIZE(ice_attr_gtpu_session);
946                 } else if (hdrs & ICE_FLOW_SEG_HDR_L2TPV3) {
947                         src = (const ice_bitmap_t *)ice_ptypes_l2tpv3;
948                         ice_and_bitmap(params->ptypes, params->ptypes,
949                                        src, ICE_FLOW_PTYPE_MAX);
950                 } else if (hdrs & ICE_FLOW_SEG_HDR_ESP) {
951                         src = (const ice_bitmap_t *)ice_ptypes_esp;
952                         ice_and_bitmap(params->ptypes, params->ptypes,
953                                        src, ICE_FLOW_PTYPE_MAX);
954                 } else if (hdrs & ICE_FLOW_SEG_HDR_AH) {
955                         src = (const ice_bitmap_t *)ice_ptypes_ah;
956                         ice_and_bitmap(params->ptypes, params->ptypes,
957                                        src, ICE_FLOW_PTYPE_MAX);
958                 } else if (hdrs & ICE_FLOW_SEG_HDR_NAT_T_ESP) {
959                         src = (const ice_bitmap_t *)ice_ptypes_nat_t_esp;
960                         ice_and_bitmap(params->ptypes, params->ptypes,
961                                        src, ICE_FLOW_PTYPE_MAX);
962                 }
963
964                 if (hdrs & ICE_FLOW_SEG_HDR_PFCP) {
965                         if (hdrs & ICE_FLOW_SEG_HDR_PFCP_NODE)
966                                 src =
967                                 (const ice_bitmap_t *)ice_ptypes_pfcp_node;
968                         else
969                                 src =
970                                 (const ice_bitmap_t *)ice_ptypes_pfcp_session;
971
972                         ice_and_bitmap(params->ptypes, params->ptypes,
973                                        src, ICE_FLOW_PTYPE_MAX);
974                 } else {
975                         src = (const ice_bitmap_t *)ice_ptypes_pfcp_node;
976                         ice_andnot_bitmap(params->ptypes, params->ptypes,
977                                           src, ICE_FLOW_PTYPE_MAX);
978
979                         src = (const ice_bitmap_t *)ice_ptypes_pfcp_session;
980                         ice_andnot_bitmap(params->ptypes, params->ptypes,
981                                           src, ICE_FLOW_PTYPE_MAX);
982                 }
983         }
984
985         return ICE_SUCCESS;
986 }
987
988 /**
989  * ice_flow_xtract_pkt_flags - Create an extr sequence entry for packet flags
990  * @hw: pointer to the HW struct
991  * @params: information about the flow to be processed
992  * @flags: The value of pkt_flags[x:x] in Rx/Tx MDID metadata.
993  *
994  * This function will allocate an extraction sequence entries for a DWORD size
995  * chunk of the packet flags.
996  */
997 static enum ice_status
998 ice_flow_xtract_pkt_flags(struct ice_hw *hw,
999                           struct ice_flow_prof_params *params,
1000                           enum ice_flex_mdid_pkt_flags flags)
1001 {
1002         u8 fv_words = hw->blk[params->blk].es.fvw;
1003         u8 idx;
1004
1005         /* Make sure the number of extraction sequence entries required does not
1006          * exceed the block's capacity.
1007          */
1008         if (params->es_cnt >= fv_words)
1009                 return ICE_ERR_MAX_LIMIT;
1010
1011         /* some blocks require a reversed field vector layout */
1012         if (hw->blk[params->blk].es.reverse)
1013                 idx = fv_words - params->es_cnt - 1;
1014         else
1015                 idx = params->es_cnt;
1016
1017         params->es[idx].prot_id = ICE_PROT_META_ID;
1018         params->es[idx].off = flags;
1019         params->es_cnt++;
1020
1021         return ICE_SUCCESS;
1022 }
1023
1024 /**
1025  * ice_flow_xtract_fld - Create an extraction sequence entry for the given field
1026  * @hw: pointer to the HW struct
1027  * @params: information about the flow to be processed
1028  * @seg: packet segment index of the field to be extracted
1029  * @fld: ID of field to be extracted
1030  * @match: bitfield of all fields
1031  *
1032  * This function determines the protocol ID, offset, and size of the given
1033  * field. It then allocates one or more extraction sequence entries for the
1034  * given field, and fill the entries with protocol ID and offset information.
1035  */
1036 static enum ice_status
1037 ice_flow_xtract_fld(struct ice_hw *hw, struct ice_flow_prof_params *params,
1038                     u8 seg, enum ice_flow_field fld, u64 match)
1039 {
1040         enum ice_flow_field sib = ICE_FLOW_FIELD_IDX_MAX;
1041         enum ice_prot_id prot_id = ICE_PROT_ID_INVAL;
1042         u8 fv_words = hw->blk[params->blk].es.fvw;
1043         struct ice_flow_fld_info *flds;
1044         u16 cnt, ese_bits, i;
1045         u16 sib_mask = 0;
1046         u16 mask;
1047         u16 off;
1048
1049         flds = params->prof->segs[seg].fields;
1050
1051         switch (fld) {
1052         case ICE_FLOW_FIELD_IDX_ETH_DA:
1053         case ICE_FLOW_FIELD_IDX_ETH_SA:
1054         case ICE_FLOW_FIELD_IDX_S_VLAN:
1055         case ICE_FLOW_FIELD_IDX_C_VLAN:
1056                 prot_id = seg == 0 ? ICE_PROT_MAC_OF_OR_S : ICE_PROT_MAC_IL;
1057                 break;
1058         case ICE_FLOW_FIELD_IDX_ETH_TYPE:
1059                 prot_id = seg == 0 ? ICE_PROT_ETYPE_OL : ICE_PROT_ETYPE_IL;
1060                 break;
1061         case ICE_FLOW_FIELD_IDX_IPV4_DSCP:
1062                 prot_id = seg == 0 ? ICE_PROT_IPV4_OF_OR_S : ICE_PROT_IPV4_IL;
1063                 break;
1064         case ICE_FLOW_FIELD_IDX_IPV6_DSCP:
1065                 prot_id = seg == 0 ? ICE_PROT_IPV6_OF_OR_S : ICE_PROT_IPV6_IL;
1066                 break;
1067         case ICE_FLOW_FIELD_IDX_IPV4_TTL:
1068         case ICE_FLOW_FIELD_IDX_IPV4_PROT:
1069                 prot_id = seg == 0 ? ICE_PROT_IPV4_OF_OR_S : ICE_PROT_IPV4_IL;
1070
1071                 /* TTL and PROT share the same extraction seq. entry.
1072                  * Each is considered a sibling to the other in terms of sharing
1073                  * the same extraction sequence entry.
1074                  */
1075                 if (fld == ICE_FLOW_FIELD_IDX_IPV4_TTL)
1076                         sib = ICE_FLOW_FIELD_IDX_IPV4_PROT;
1077                 else if (fld == ICE_FLOW_FIELD_IDX_IPV4_PROT)
1078                         sib = ICE_FLOW_FIELD_IDX_IPV4_TTL;
1079
1080                 /* If the sibling field is also included, that field's
1081                  * mask needs to be included.
1082                  */
1083                 if (match & BIT(sib))
1084                         sib_mask = ice_flds_info[sib].mask;
1085                 break;
1086         case ICE_FLOW_FIELD_IDX_IPV6_TTL:
1087         case ICE_FLOW_FIELD_IDX_IPV6_PROT:
1088                 prot_id = seg == 0 ? ICE_PROT_IPV6_OF_OR_S : ICE_PROT_IPV6_IL;
1089
1090                 /* TTL and PROT share the same extraction seq. entry.
1091                  * Each is considered a sibling to the other in terms of sharing
1092                  * the same extraction sequence entry.
1093                  */
1094                 if (fld == ICE_FLOW_FIELD_IDX_IPV6_TTL)
1095                         sib = ICE_FLOW_FIELD_IDX_IPV6_PROT;
1096                 else if (fld == ICE_FLOW_FIELD_IDX_IPV6_PROT)
1097                         sib = ICE_FLOW_FIELD_IDX_IPV6_TTL;
1098
1099                 /* If the sibling field is also included, that field's
1100                  * mask needs to be included.
1101                  */
1102                 if (match & BIT(sib))
1103                         sib_mask = ice_flds_info[sib].mask;
1104                 break;
1105         case ICE_FLOW_FIELD_IDX_IPV4_SA:
1106         case ICE_FLOW_FIELD_IDX_IPV4_DA:
1107                 prot_id = seg == 0 ? ICE_PROT_IPV4_OF_OR_S : ICE_PROT_IPV4_IL;
1108                 break;
1109         case ICE_FLOW_FIELD_IDX_IPV6_SA:
1110         case ICE_FLOW_FIELD_IDX_IPV6_DA:
1111         case ICE_FLOW_FIELD_IDX_IPV6_PRE32_SA:
1112         case ICE_FLOW_FIELD_IDX_IPV6_PRE32_DA:
1113         case ICE_FLOW_FIELD_IDX_IPV6_PRE48_SA:
1114         case ICE_FLOW_FIELD_IDX_IPV6_PRE48_DA:
1115         case ICE_FLOW_FIELD_IDX_IPV6_PRE64_SA:
1116         case ICE_FLOW_FIELD_IDX_IPV6_PRE64_DA:
1117                 prot_id = seg == 0 ? ICE_PROT_IPV6_OF_OR_S : ICE_PROT_IPV6_IL;
1118                 break;
1119         case ICE_FLOW_FIELD_IDX_TCP_SRC_PORT:
1120         case ICE_FLOW_FIELD_IDX_TCP_DST_PORT:
1121         case ICE_FLOW_FIELD_IDX_TCP_FLAGS:
1122                 prot_id = ICE_PROT_TCP_IL;
1123                 break;
1124         case ICE_FLOW_FIELD_IDX_UDP_SRC_PORT:
1125         case ICE_FLOW_FIELD_IDX_UDP_DST_PORT:
1126                 prot_id = ICE_PROT_UDP_IL_OR_S;
1127                 break;
1128         case ICE_FLOW_FIELD_IDX_SCTP_SRC_PORT:
1129         case ICE_FLOW_FIELD_IDX_SCTP_DST_PORT:
1130                 prot_id = ICE_PROT_SCTP_IL;
1131                 break;
1132         case ICE_FLOW_FIELD_IDX_GTPC_TEID:
1133         case ICE_FLOW_FIELD_IDX_GTPU_IP_TEID:
1134         case ICE_FLOW_FIELD_IDX_GTPU_UP_TEID:
1135         case ICE_FLOW_FIELD_IDX_GTPU_DWN_TEID:
1136         case ICE_FLOW_FIELD_IDX_GTPU_EH_TEID:
1137         case ICE_FLOW_FIELD_IDX_GTPU_EH_QFI:
1138                 /* GTP is accessed through UDP OF protocol */
1139                 prot_id = ICE_PROT_UDP_OF;
1140                 break;
1141         case ICE_FLOW_FIELD_IDX_PPPOE_SESS_ID:
1142                 prot_id = ICE_PROT_PPPOE;
1143                 break;
1144         case ICE_FLOW_FIELD_IDX_PFCP_SEID:
1145                 prot_id = ICE_PROT_UDP_IL_OR_S;
1146                 break;
1147         case ICE_FLOW_FIELD_IDX_L2TPV3_SESS_ID:
1148                 prot_id = ICE_PROT_L2TPV3;
1149                 break;
1150         case ICE_FLOW_FIELD_IDX_ESP_SPI:
1151                 prot_id = ICE_PROT_ESP_F;
1152                 break;
1153         case ICE_FLOW_FIELD_IDX_AH_SPI:
1154                 prot_id = ICE_PROT_ESP_2;
1155                 break;
1156         case ICE_FLOW_FIELD_IDX_NAT_T_ESP_SPI:
1157                 prot_id = ICE_PROT_UDP_IL_OR_S;
1158                 break;
1159         case ICE_FLOW_FIELD_IDX_ARP_SIP:
1160         case ICE_FLOW_FIELD_IDX_ARP_DIP:
1161         case ICE_FLOW_FIELD_IDX_ARP_SHA:
1162         case ICE_FLOW_FIELD_IDX_ARP_DHA:
1163         case ICE_FLOW_FIELD_IDX_ARP_OP:
1164                 prot_id = ICE_PROT_ARP_OF;
1165                 break;
1166         case ICE_FLOW_FIELD_IDX_ICMP_TYPE:
1167         case ICE_FLOW_FIELD_IDX_ICMP_CODE:
1168                 /* ICMP type and code share the same extraction seq. entry */
1169                 prot_id = (params->prof->segs[seg].hdrs &
1170                            ICE_FLOW_SEG_HDR_IPV4) ?
1171                         ICE_PROT_ICMP_IL : ICE_PROT_ICMPV6_IL;
1172                 sib = fld == ICE_FLOW_FIELD_IDX_ICMP_TYPE ?
1173                         ICE_FLOW_FIELD_IDX_ICMP_CODE :
1174                         ICE_FLOW_FIELD_IDX_ICMP_TYPE;
1175                 break;
1176         case ICE_FLOW_FIELD_IDX_GRE_KEYID:
1177                 prot_id = ICE_PROT_GRE_OF;
1178                 break;
1179         default:
1180                 return ICE_ERR_NOT_IMPL;
1181         }
1182
1183         /* Each extraction sequence entry is a word in size, and extracts a
1184          * word-aligned offset from a protocol header.
1185          */
1186         ese_bits = ICE_FLOW_FV_EXTRACT_SZ * BITS_PER_BYTE;
1187
1188         flds[fld].xtrct.prot_id = prot_id;
1189         flds[fld].xtrct.off = (ice_flds_info[fld].off / ese_bits) *
1190                 ICE_FLOW_FV_EXTRACT_SZ;
1191         flds[fld].xtrct.disp = (u8)(ice_flds_info[fld].off % ese_bits);
1192         flds[fld].xtrct.idx = params->es_cnt;
1193         flds[fld].xtrct.mask = ice_flds_info[fld].mask;
1194
1195         /* Adjust the next field-entry index after accommodating the number of
1196          * entries this field consumes
1197          */
1198         cnt = DIVIDE_AND_ROUND_UP(flds[fld].xtrct.disp +
1199                                   ice_flds_info[fld].size, ese_bits);
1200
1201         /* Fill in the extraction sequence entries needed for this field */
1202         off = flds[fld].xtrct.off;
1203         mask = flds[fld].xtrct.mask;
1204         for (i = 0; i < cnt; i++) {
1205                 /* Only consume an extraction sequence entry if there is no
1206                  * sibling field associated with this field or the sibling entry
1207                  * already extracts the word shared with this field.
1208                  */
1209                 if (sib == ICE_FLOW_FIELD_IDX_MAX ||
1210                     flds[sib].xtrct.prot_id == ICE_PROT_ID_INVAL ||
1211                     flds[sib].xtrct.off != off) {
1212                         u8 idx;
1213
1214                         /* Make sure the number of extraction sequence required
1215                          * does not exceed the block's capability
1216                          */
1217                         if (params->es_cnt >= fv_words)
1218                                 return ICE_ERR_MAX_LIMIT;
1219
1220                         /* some blocks require a reversed field vector layout */
1221                         if (hw->blk[params->blk].es.reverse)
1222                                 idx = fv_words - params->es_cnt - 1;
1223                         else
1224                                 idx = params->es_cnt;
1225
1226                         params->es[idx].prot_id = prot_id;
1227                         params->es[idx].off = off;
1228                         params->mask[idx] = mask | sib_mask;
1229                         params->es_cnt++;
1230                 }
1231
1232                 off += ICE_FLOW_FV_EXTRACT_SZ;
1233         }
1234
1235         return ICE_SUCCESS;
1236 }
1237
1238 /**
1239  * ice_flow_xtract_raws - Create extract sequence entries for raw bytes
1240  * @hw: pointer to the HW struct
1241  * @params: information about the flow to be processed
1242  * @seg: index of packet segment whose raw fields are to be be extracted
1243  */
1244 static enum ice_status
1245 ice_flow_xtract_raws(struct ice_hw *hw, struct ice_flow_prof_params *params,
1246                      u8 seg)
1247 {
1248         u16 fv_words;
1249         u16 hdrs_sz;
1250         u8 i;
1251
1252         if (!params->prof->segs[seg].raws_cnt)
1253                 return ICE_SUCCESS;
1254
1255         if (params->prof->segs[seg].raws_cnt >
1256             ARRAY_SIZE(params->prof->segs[seg].raws))
1257                 return ICE_ERR_MAX_LIMIT;
1258
1259         /* Offsets within the segment headers are not supported */
1260         hdrs_sz = ice_flow_calc_seg_sz(params, seg);
1261         if (!hdrs_sz)
1262                 return ICE_ERR_PARAM;
1263
1264         fv_words = hw->blk[params->blk].es.fvw;
1265
1266         for (i = 0; i < params->prof->segs[seg].raws_cnt; i++) {
1267                 struct ice_flow_seg_fld_raw *raw;
1268                 u16 off, cnt, j;
1269
1270                 raw = &params->prof->segs[seg].raws[i];
1271
1272                 /* Storing extraction information */
1273                 raw->info.xtrct.prot_id = ICE_PROT_MAC_OF_OR_S;
1274                 raw->info.xtrct.off = (raw->off / ICE_FLOW_FV_EXTRACT_SZ) *
1275                         ICE_FLOW_FV_EXTRACT_SZ;
1276                 raw->info.xtrct.disp = (raw->off % ICE_FLOW_FV_EXTRACT_SZ) *
1277                         BITS_PER_BYTE;
1278                 raw->info.xtrct.idx = params->es_cnt;
1279
1280                 /* Determine the number of field vector entries this raw field
1281                  * consumes.
1282                  */
1283                 cnt = DIVIDE_AND_ROUND_UP(raw->info.xtrct.disp +
1284                                           (raw->info.src.last * BITS_PER_BYTE),
1285                                           (ICE_FLOW_FV_EXTRACT_SZ *
1286                                            BITS_PER_BYTE));
1287                 off = raw->info.xtrct.off;
1288                 for (j = 0; j < cnt; j++) {
1289                         u16 idx;
1290
1291                         /* Make sure the number of extraction sequence required
1292                          * does not exceed the block's capability
1293                          */
1294                         if (params->es_cnt >= hw->blk[params->blk].es.count ||
1295                             params->es_cnt >= ICE_MAX_FV_WORDS)
1296                                 return ICE_ERR_MAX_LIMIT;
1297
1298                         /* some blocks require a reversed field vector layout */
1299                         if (hw->blk[params->blk].es.reverse)
1300                                 idx = fv_words - params->es_cnt - 1;
1301                         else
1302                                 idx = params->es_cnt;
1303
1304                         params->es[idx].prot_id = raw->info.xtrct.prot_id;
1305                         params->es[idx].off = off;
1306                         params->es_cnt++;
1307                         off += ICE_FLOW_FV_EXTRACT_SZ;
1308                 }
1309         }
1310
1311         return ICE_SUCCESS;
1312 }
1313
1314 /**
1315  * ice_flow_create_xtrct_seq - Create an extraction sequence for given segments
1316  * @hw: pointer to the HW struct
1317  * @params: information about the flow to be processed
1318  *
1319  * This function iterates through all matched fields in the given segments, and
1320  * creates an extraction sequence for the fields.
1321  */
1322 static enum ice_status
1323 ice_flow_create_xtrct_seq(struct ice_hw *hw,
1324                           struct ice_flow_prof_params *params)
1325 {
1326         enum ice_status status = ICE_SUCCESS;
1327         u8 i;
1328
1329         /* For ACL, we also need to extract the direction bit (Rx,Tx) data from
1330          * packet flags
1331          */
1332         if (params->blk == ICE_BLK_ACL) {
1333                 status = ice_flow_xtract_pkt_flags(hw, params,
1334                                                    ICE_RX_MDID_PKT_FLAGS_15_0);
1335                 if (status)
1336                         return status;
1337         }
1338
1339         for (i = 0; i < params->prof->segs_cnt; i++) {
1340                 u64 match = params->prof->segs[i].match;
1341                 enum ice_flow_field j;
1342
1343                 for (j = 0; j < ICE_FLOW_FIELD_IDX_MAX && match; j++) {
1344                         const u64 bit = BIT_ULL(j);
1345
1346                         if (match & bit) {
1347                                 status = ice_flow_xtract_fld(hw, params, i, j,
1348                                                              match);
1349                                 if (status)
1350                                         return status;
1351                                 match &= ~bit;
1352                         }
1353                 }
1354
1355                 /* Process raw matching bytes */
1356                 status = ice_flow_xtract_raws(hw, params, i);
1357                 if (status)
1358                         return status;
1359         }
1360
1361         return status;
1362 }
1363
1364 /**
1365  * ice_flow_sel_acl_scen - returns the specific scenario
1366  * @hw: pointer to the hardware structure
1367  * @params: information about the flow to be processed
1368  *
1369  * This function will return the specific scenario based on the
1370  * params passed to it
1371  */
1372 static enum ice_status
1373 ice_flow_sel_acl_scen(struct ice_hw *hw, struct ice_flow_prof_params *params)
1374 {
1375         /* Find the best-fit scenario for the provided match width */
1376         struct ice_acl_scen *cand_scen = NULL, *scen;
1377
1378         if (!hw->acl_tbl)
1379                 return ICE_ERR_DOES_NOT_EXIST;
1380
1381         /* Loop through each scenario and match against the scenario width
1382          * to select the specific scenario
1383          */
1384         LIST_FOR_EACH_ENTRY(scen, &hw->acl_tbl->scens, ice_acl_scen, list_entry)
1385                 if (scen->eff_width >= params->entry_length &&
1386                     (!cand_scen || cand_scen->eff_width > scen->eff_width))
1387                         cand_scen = scen;
1388         if (!cand_scen)
1389                 return ICE_ERR_DOES_NOT_EXIST;
1390
1391         params->prof->cfg.scen = cand_scen;
1392
1393         return ICE_SUCCESS;
1394 }
1395
1396 /**
1397  * ice_flow_acl_def_entry_frmt - Determine the layout of flow entries
1398  * @params: information about the flow to be processed
1399  */
1400 static enum ice_status
1401 ice_flow_acl_def_entry_frmt(struct ice_flow_prof_params *params)
1402 {
1403         u16 index, i, range_idx = 0;
1404
1405         index = ICE_AQC_ACL_PROF_BYTE_SEL_START_IDX;
1406
1407         for (i = 0; i < params->prof->segs_cnt; i++) {
1408                 struct ice_flow_seg_info *seg = &params->prof->segs[i];
1409                 u64 match = seg->match;
1410                 u8 j;
1411
1412                 for (j = 0; j < ICE_FLOW_FIELD_IDX_MAX && match; j++) {
1413                         struct ice_flow_fld_info *fld;
1414                         const u64 bit = BIT_ULL(j);
1415
1416                         if (!(match & bit))
1417                                 continue;
1418
1419                         fld = &seg->fields[j];
1420                         fld->entry.mask = ICE_FLOW_FLD_OFF_INVAL;
1421
1422                         if (fld->type == ICE_FLOW_FLD_TYPE_RANGE) {
1423                                 fld->entry.last = ICE_FLOW_FLD_OFF_INVAL;
1424
1425                                 /* Range checking only supported for single
1426                                  * words
1427                                  */
1428                                 if (DIVIDE_AND_ROUND_UP(ice_flds_info[j].size +
1429                                                         fld->xtrct.disp,
1430                                                         BITS_PER_BYTE * 2) > 1)
1431                                         return ICE_ERR_PARAM;
1432
1433                                 /* Ranges must define low and high values */
1434                                 if (fld->src.val == ICE_FLOW_FLD_OFF_INVAL ||
1435                                     fld->src.last == ICE_FLOW_FLD_OFF_INVAL)
1436                                         return ICE_ERR_PARAM;
1437
1438                                 fld->entry.val = range_idx++;
1439                         } else {
1440                                 /* Store adjusted byte-length of field for later
1441                                  * use, taking into account potential
1442                                  * non-byte-aligned displacement
1443                                  */
1444                                 fld->entry.last = DIVIDE_AND_ROUND_UP
1445                                         (ice_flds_info[j].size +
1446                                          (fld->xtrct.disp % BITS_PER_BYTE),
1447                                          BITS_PER_BYTE);
1448                                 fld->entry.val = index;
1449                                 index += fld->entry.last;
1450                         }
1451
1452                         match &= ~bit;
1453                 }
1454
1455                 for (j = 0; j < seg->raws_cnt; j++) {
1456                         struct ice_flow_seg_fld_raw *raw = &seg->raws[j];
1457
1458                         raw->info.entry.mask = ICE_FLOW_FLD_OFF_INVAL;
1459                         raw->info.entry.val = index;
1460                         raw->info.entry.last = raw->info.src.last;
1461                         index += raw->info.entry.last;
1462                 }
1463         }
1464
1465         /* Currently only support using the byte selection base, which only
1466          * allows for an effective entry size of 30 bytes. Reject anything
1467          * larger.
1468          */
1469         if (index > ICE_AQC_ACL_PROF_BYTE_SEL_ELEMS)
1470                 return ICE_ERR_PARAM;
1471
1472         /* Only 8 range checkers per profile, reject anything trying to use
1473          * more
1474          */
1475         if (range_idx > ICE_AQC_ACL_PROF_RANGES_NUM_CFG)
1476                 return ICE_ERR_PARAM;
1477
1478         /* Store # bytes required for entry for later use */
1479         params->entry_length = index - ICE_AQC_ACL_PROF_BYTE_SEL_START_IDX;
1480
1481         return ICE_SUCCESS;
1482 }
1483
1484 /**
1485  * ice_flow_proc_segs - process all packet segments associated with a profile
1486  * @hw: pointer to the HW struct
1487  * @params: information about the flow to be processed
1488  */
1489 static enum ice_status
1490 ice_flow_proc_segs(struct ice_hw *hw, struct ice_flow_prof_params *params)
1491 {
1492         enum ice_status status;
1493
1494         status = ice_flow_proc_seg_hdrs(params);
1495         if (status)
1496                 return status;
1497
1498         status = ice_flow_create_xtrct_seq(hw, params);
1499         if (status)
1500                 return status;
1501
1502         switch (params->blk) {
1503         case ICE_BLK_FD:
1504         case ICE_BLK_RSS:
1505                 status = ICE_SUCCESS;
1506                 break;
1507         case ICE_BLK_ACL:
1508                 status = ice_flow_acl_def_entry_frmt(params);
1509                 if (status)
1510                         return status;
1511                 status = ice_flow_sel_acl_scen(hw, params);
1512                 if (status)
1513                         return status;
1514                 break;
1515         default:
1516                 return ICE_ERR_NOT_IMPL;
1517         }
1518
1519         return status;
1520 }
1521
1522 #define ICE_FLOW_FIND_PROF_CHK_FLDS     0x00000001
1523 #define ICE_FLOW_FIND_PROF_CHK_VSI      0x00000002
1524 #define ICE_FLOW_FIND_PROF_NOT_CHK_DIR  0x00000004
1525
1526 /**
1527  * ice_flow_find_prof_conds - Find a profile matching headers and conditions
1528  * @hw: pointer to the HW struct
1529  * @blk: classification stage
1530  * @dir: flow direction
1531  * @segs: array of one or more packet segments that describe the flow
1532  * @segs_cnt: number of packet segments provided
1533  * @vsi_handle: software VSI handle to check VSI (ICE_FLOW_FIND_PROF_CHK_VSI)
1534  * @conds: additional conditions to be checked (ICE_FLOW_FIND_PROF_CHK_*)
1535  */
1536 static struct ice_flow_prof *
1537 ice_flow_find_prof_conds(struct ice_hw *hw, enum ice_block blk,
1538                          enum ice_flow_dir dir, struct ice_flow_seg_info *segs,
1539                          u8 segs_cnt, u16 vsi_handle, u32 conds)
1540 {
1541         struct ice_flow_prof *p, *prof = NULL;
1542
1543         ice_acquire_lock(&hw->fl_profs_locks[blk]);
1544         LIST_FOR_EACH_ENTRY(p, &hw->fl_profs[blk], ice_flow_prof, l_entry)
1545                 if ((p->dir == dir || conds & ICE_FLOW_FIND_PROF_NOT_CHK_DIR) &&
1546                     segs_cnt && segs_cnt == p->segs_cnt) {
1547                         u8 i;
1548
1549                         /* Check for profile-VSI association if specified */
1550                         if ((conds & ICE_FLOW_FIND_PROF_CHK_VSI) &&
1551                             ice_is_vsi_valid(hw, vsi_handle) &&
1552                             !ice_is_bit_set(p->vsis, vsi_handle))
1553                                 continue;
1554
1555                         /* Protocol headers must be checked. Matched fields are
1556                          * checked if specified.
1557                          */
1558                         for (i = 0; i < segs_cnt; i++)
1559                                 if (segs[i].hdrs != p->segs[i].hdrs ||
1560                                     ((conds & ICE_FLOW_FIND_PROF_CHK_FLDS) &&
1561                                      segs[i].match != p->segs[i].match))
1562                                         break;
1563
1564                         /* A match is found if all segments are matched */
1565                         if (i == segs_cnt) {
1566                                 prof = p;
1567                                 break;
1568                         }
1569                 }
1570         ice_release_lock(&hw->fl_profs_locks[blk]);
1571
1572         return prof;
1573 }
1574
1575 /**
1576  * ice_flow_find_prof - Look up a profile matching headers and matched fields
1577  * @hw: pointer to the HW struct
1578  * @blk: classification stage
1579  * @dir: flow direction
1580  * @segs: array of one or more packet segments that describe the flow
1581  * @segs_cnt: number of packet segments provided
1582  */
1583 u64
1584 ice_flow_find_prof(struct ice_hw *hw, enum ice_block blk, enum ice_flow_dir dir,
1585                    struct ice_flow_seg_info *segs, u8 segs_cnt)
1586 {
1587         struct ice_flow_prof *p;
1588
1589         p = ice_flow_find_prof_conds(hw, blk, dir, segs, segs_cnt,
1590                                      ICE_MAX_VSI, ICE_FLOW_FIND_PROF_CHK_FLDS);
1591
1592         return p ? p->id : ICE_FLOW_PROF_ID_INVAL;
1593 }
1594
1595 /**
1596  * ice_flow_find_prof_id - Look up a profile with given profile ID
1597  * @hw: pointer to the HW struct
1598  * @blk: classification stage
1599  * @prof_id: unique ID to identify this flow profile
1600  */
1601 static struct ice_flow_prof *
1602 ice_flow_find_prof_id(struct ice_hw *hw, enum ice_block blk, u64 prof_id)
1603 {
1604         struct ice_flow_prof *p;
1605
1606         LIST_FOR_EACH_ENTRY(p, &hw->fl_profs[blk], ice_flow_prof, l_entry)
1607                 if (p->id == prof_id)
1608                         return p;
1609
1610         return NULL;
1611 }
1612
1613 /**
1614  * ice_dealloc_flow_entry - Deallocate flow entry memory
1615  * @hw: pointer to the HW struct
1616  * @entry: flow entry to be removed
1617  */
1618 static void
1619 ice_dealloc_flow_entry(struct ice_hw *hw, struct ice_flow_entry *entry)
1620 {
1621         if (!entry)
1622                 return;
1623
1624         if (entry->entry)
1625                 ice_free(hw, entry->entry);
1626
1627         if (entry->range_buf) {
1628                 ice_free(hw, entry->range_buf);
1629                 entry->range_buf = NULL;
1630         }
1631
1632         if (entry->acts) {
1633                 ice_free(hw, entry->acts);
1634                 entry->acts = NULL;
1635                 entry->acts_cnt = 0;
1636         }
1637
1638         ice_free(hw, entry);
1639 }
1640
1641 #define ICE_ACL_INVALID_SCEN    0x3f
1642
1643 /**
1644  * ice_flow_acl_is_prof_in_use - Verify if the profile is associated to any PF
1645  * @hw: pointer to the hardware structure
1646  * @prof: pointer to flow profile
1647  * @buf: destination buffer function writes partial extraction sequence to
1648  *
1649  * returns ICE_SUCCESS if no PF is associated to the given profile
1650  * returns ICE_ERR_IN_USE if at least one PF is associated to the given profile
1651  * returns other error code for real error
1652  */
1653 static enum ice_status
1654 ice_flow_acl_is_prof_in_use(struct ice_hw *hw, struct ice_flow_prof *prof,
1655                             struct ice_aqc_acl_prof_generic_frmt *buf)
1656 {
1657         enum ice_status status;
1658         u8 prof_id = 0;
1659
1660         status = ice_flow_get_hw_prof(hw, ICE_BLK_ACL, prof->id, &prof_id);
1661         if (status)
1662                 return status;
1663
1664         status = ice_query_acl_prof(hw, prof_id, buf, NULL);
1665         if (status)
1666                 return status;
1667
1668         /* If all PF's associated scenarios are all 0 or all
1669          * ICE_ACL_INVALID_SCEN (63) for the given profile then the latter has
1670          * not been configured yet.
1671          */
1672         if (buf->pf_scenario_num[0] == 0 && buf->pf_scenario_num[1] == 0 &&
1673             buf->pf_scenario_num[2] == 0 && buf->pf_scenario_num[3] == 0 &&
1674             buf->pf_scenario_num[4] == 0 && buf->pf_scenario_num[5] == 0 &&
1675             buf->pf_scenario_num[6] == 0 && buf->pf_scenario_num[7] == 0)
1676                 return ICE_SUCCESS;
1677
1678         if (buf->pf_scenario_num[0] == ICE_ACL_INVALID_SCEN &&
1679             buf->pf_scenario_num[1] == ICE_ACL_INVALID_SCEN &&
1680             buf->pf_scenario_num[2] == ICE_ACL_INVALID_SCEN &&
1681             buf->pf_scenario_num[3] == ICE_ACL_INVALID_SCEN &&
1682             buf->pf_scenario_num[4] == ICE_ACL_INVALID_SCEN &&
1683             buf->pf_scenario_num[5] == ICE_ACL_INVALID_SCEN &&
1684             buf->pf_scenario_num[6] == ICE_ACL_INVALID_SCEN &&
1685             buf->pf_scenario_num[7] == ICE_ACL_INVALID_SCEN)
1686                 return ICE_SUCCESS;
1687         else
1688                 return ICE_ERR_IN_USE;
1689 }
1690
1691 /**
1692  * ice_flow_acl_free_act_cntr - Free the ACL rule's actions
1693  * @hw: pointer to the hardware structure
1694  * @acts: array of actions to be performed on a match
1695  * @acts_cnt: number of actions
1696  */
1697 static enum ice_status
1698 ice_flow_acl_free_act_cntr(struct ice_hw *hw, struct ice_flow_action *acts,
1699                            u8 acts_cnt)
1700 {
1701         int i;
1702
1703         for (i = 0; i < acts_cnt; i++) {
1704                 if (acts[i].type == ICE_FLOW_ACT_CNTR_PKT ||
1705                     acts[i].type == ICE_FLOW_ACT_CNTR_BYTES ||
1706                     acts[i].type == ICE_FLOW_ACT_CNTR_PKT_BYTES) {
1707                         struct ice_acl_cntrs cntrs;
1708                         enum ice_status status;
1709
1710                         cntrs.bank = 0; /* Only bank0 for the moment */
1711                         cntrs.first_cntr =
1712                                         LE16_TO_CPU(acts[i].data.acl_act.value);
1713                         cntrs.last_cntr =
1714                                         LE16_TO_CPU(acts[i].data.acl_act.value);
1715
1716                         if (acts[i].type == ICE_FLOW_ACT_CNTR_PKT_BYTES)
1717                                 cntrs.type = ICE_AQC_ACL_CNT_TYPE_DUAL;
1718                         else
1719                                 cntrs.type = ICE_AQC_ACL_CNT_TYPE_SINGLE;
1720
1721                         status = ice_aq_dealloc_acl_cntrs(hw, &cntrs, NULL);
1722                         if (status)
1723                                 return status;
1724                 }
1725         }
1726         return ICE_SUCCESS;
1727 }
1728
1729 /**
1730  * ice_flow_acl_disassoc_scen - Disassociate the scenario from the profile
1731  * @hw: pointer to the hardware structure
1732  * @prof: pointer to flow profile
1733  *
1734  * Disassociate the scenario from the profile for the PF of the VSI.
1735  */
1736 static enum ice_status
1737 ice_flow_acl_disassoc_scen(struct ice_hw *hw, struct ice_flow_prof *prof)
1738 {
1739         struct ice_aqc_acl_prof_generic_frmt buf;
1740         enum ice_status status = ICE_SUCCESS;
1741         u8 prof_id = 0;
1742
1743         ice_memset(&buf, 0, sizeof(buf), ICE_NONDMA_MEM);
1744
1745         status = ice_flow_get_hw_prof(hw, ICE_BLK_ACL, prof->id, &prof_id);
1746         if (status)
1747                 return status;
1748
1749         status = ice_query_acl_prof(hw, prof_id, &buf, NULL);
1750         if (status)
1751                 return status;
1752
1753         /* Clear scenario for this PF */
1754         buf.pf_scenario_num[hw->pf_id] = ICE_ACL_INVALID_SCEN;
1755         status = ice_prgm_acl_prof_extrt(hw, prof_id, &buf, NULL);
1756
1757         return status;
1758 }
1759
1760 /**
1761  * ice_flow_rem_entry_sync - Remove a flow entry
1762  * @hw: pointer to the HW struct
1763  * @blk: classification stage
1764  * @entry: flow entry to be removed
1765  */
1766 static enum ice_status
1767 ice_flow_rem_entry_sync(struct ice_hw *hw, enum ice_block blk,
1768                         struct ice_flow_entry *entry)
1769 {
1770         if (!entry)
1771                 return ICE_ERR_BAD_PTR;
1772
1773         if (blk == ICE_BLK_ACL) {
1774                 enum ice_status status;
1775
1776                 if (!entry->prof)
1777                         return ICE_ERR_BAD_PTR;
1778
1779                 status = ice_acl_rem_entry(hw, entry->prof->cfg.scen,
1780                                            entry->scen_entry_idx);
1781                 if (status)
1782                         return status;
1783
1784                 /* Checks if we need to release an ACL counter. */
1785                 if (entry->acts_cnt && entry->acts)
1786                         ice_flow_acl_free_act_cntr(hw, entry->acts,
1787                                                    entry->acts_cnt);
1788         }
1789
1790         LIST_DEL(&entry->l_entry);
1791
1792         ice_dealloc_flow_entry(hw, entry);
1793
1794         return ICE_SUCCESS;
1795 }
1796
1797 /**
1798  * ice_flow_add_prof_sync - Add a flow profile for packet segments and fields
1799  * @hw: pointer to the HW struct
1800  * @blk: classification stage
1801  * @dir: flow direction
1802  * @prof_id: unique ID to identify this flow profile
1803  * @segs: array of one or more packet segments that describe the flow
1804  * @segs_cnt: number of packet segments provided
1805  * @acts: array of default actions
1806  * @acts_cnt: number of default actions
1807  * @prof: stores the returned flow profile added
1808  *
1809  * Assumption: the caller has acquired the lock to the profile list
1810  */
1811 static enum ice_status
1812 ice_flow_add_prof_sync(struct ice_hw *hw, enum ice_block blk,
1813                        enum ice_flow_dir dir, u64 prof_id,
1814                        struct ice_flow_seg_info *segs, u8 segs_cnt,
1815                        struct ice_flow_action *acts, u8 acts_cnt,
1816                        struct ice_flow_prof **prof)
1817 {
1818         struct ice_flow_prof_params *params;
1819         enum ice_status status;
1820         u8 i;
1821
1822         if (!prof || (acts_cnt && !acts))
1823                 return ICE_ERR_BAD_PTR;
1824
1825         params = (struct ice_flow_prof_params *)ice_malloc(hw, sizeof(*params));
1826         if (!params)
1827                 return ICE_ERR_NO_MEMORY;
1828
1829         params->prof = (struct ice_flow_prof *)
1830                 ice_malloc(hw, sizeof(*params->prof));
1831         if (!params->prof) {
1832                 status = ICE_ERR_NO_MEMORY;
1833                 goto free_params;
1834         }
1835
1836         /* initialize extraction sequence to all invalid (0xff) */
1837         for (i = 0; i < ICE_MAX_FV_WORDS; i++) {
1838                 params->es[i].prot_id = ICE_PROT_INVALID;
1839                 params->es[i].off = ICE_FV_OFFSET_INVAL;
1840         }
1841
1842         params->blk = blk;
1843         params->prof->id = prof_id;
1844         params->prof->dir = dir;
1845         params->prof->segs_cnt = segs_cnt;
1846
1847         /* Make a copy of the segments that need to be persistent in the flow
1848          * profile instance
1849          */
1850         for (i = 0; i < segs_cnt; i++)
1851                 ice_memcpy(&params->prof->segs[i], &segs[i], sizeof(*segs),
1852                            ICE_NONDMA_TO_NONDMA);
1853
1854         /* Make a copy of the actions that need to be persistent in the flow
1855          * profile instance.
1856          */
1857         if (acts_cnt) {
1858                 params->prof->acts = (struct ice_flow_action *)
1859                         ice_memdup(hw, acts, acts_cnt * sizeof(*acts),
1860                                    ICE_NONDMA_TO_NONDMA);
1861
1862                 if (!params->prof->acts) {
1863                         status = ICE_ERR_NO_MEMORY;
1864                         goto out;
1865                 }
1866         }
1867
1868         status = ice_flow_proc_segs(hw, params);
1869         if (status) {
1870                 ice_debug(hw, ICE_DBG_FLOW,
1871                           "Error processing a flow's packet segments\n");
1872                 goto out;
1873         }
1874
1875         /* Add a HW profile for this flow profile */
1876         status = ice_add_prof(hw, blk, prof_id, (u8 *)params->ptypes,
1877                               params->attr, params->attr_cnt, params->es,
1878                               params->mask);
1879         if (status) {
1880                 ice_debug(hw, ICE_DBG_FLOW, "Error adding a HW flow profile\n");
1881                 goto out;
1882         }
1883
1884         INIT_LIST_HEAD(&params->prof->entries);
1885         ice_init_lock(&params->prof->entries_lock);
1886         *prof = params->prof;
1887
1888 out:
1889         if (status) {
1890                 if (params->prof->acts)
1891                         ice_free(hw, params->prof->acts);
1892                 ice_free(hw, params->prof);
1893         }
1894 free_params:
1895         ice_free(hw, params);
1896
1897         return status;
1898 }
1899
1900 /**
1901  * ice_flow_rem_prof_sync - remove a flow profile
1902  * @hw: pointer to the hardware structure
1903  * @blk: classification stage
1904  * @prof: pointer to flow profile to remove
1905  *
1906  * Assumption: the caller has acquired the lock to the profile list
1907  */
1908 static enum ice_status
1909 ice_flow_rem_prof_sync(struct ice_hw *hw, enum ice_block blk,
1910                        struct ice_flow_prof *prof)
1911 {
1912         enum ice_status status;
1913
1914         /* Remove all remaining flow entries before removing the flow profile */
1915         if (!LIST_EMPTY(&prof->entries)) {
1916                 struct ice_flow_entry *e, *t;
1917
1918                 ice_acquire_lock(&prof->entries_lock);
1919
1920                 LIST_FOR_EACH_ENTRY_SAFE(e, t, &prof->entries, ice_flow_entry,
1921                                          l_entry) {
1922                         status = ice_flow_rem_entry_sync(hw, blk, e);
1923                         if (status)
1924                                 break;
1925                 }
1926
1927                 ice_release_lock(&prof->entries_lock);
1928         }
1929
1930         if (blk == ICE_BLK_ACL) {
1931                 struct ice_aqc_acl_profile_ranges query_rng_buf;
1932                 struct ice_aqc_acl_prof_generic_frmt buf;
1933                 u8 prof_id = 0;
1934
1935                 /* Disassociate the scenario from the profile for the PF */
1936                 status = ice_flow_acl_disassoc_scen(hw, prof);
1937                 if (status)
1938                         return status;
1939
1940                 /* Clear the range-checker if the profile ID is no longer
1941                  * used by any PF
1942                  */
1943                 status = ice_flow_acl_is_prof_in_use(hw, prof, &buf);
1944                 if (status && status != ICE_ERR_IN_USE) {
1945                         return status;
1946                 } else if (!status) {
1947                         /* Clear the range-checker value for profile ID */
1948                         ice_memset(&query_rng_buf, 0,
1949                                    sizeof(struct ice_aqc_acl_profile_ranges),
1950                                    ICE_NONDMA_MEM);
1951
1952                         status = ice_flow_get_hw_prof(hw, blk, prof->id,
1953                                                       &prof_id);
1954                         if (status)
1955                                 return status;
1956
1957                         status = ice_prog_acl_prof_ranges(hw, prof_id,
1958                                                           &query_rng_buf, NULL);
1959                         if (status)
1960                                 return status;
1961                 }
1962         }
1963
1964         /* Remove all hardware profiles associated with this flow profile */
1965         status = ice_rem_prof(hw, blk, prof->id);
1966         if (!status) {
1967                 LIST_DEL(&prof->l_entry);
1968                 ice_destroy_lock(&prof->entries_lock);
1969                 if (prof->acts)
1970                         ice_free(hw, prof->acts);
1971                 ice_free(hw, prof);
1972         }
1973
1974         return status;
1975 }
1976
1977 /**
1978  * ice_flow_acl_set_xtrct_seq_fld - Populate xtrct seq for single field
1979  * @buf: Destination buffer function writes partial xtrct sequence to
1980  * @info: Info about field
1981  */
1982 static void
1983 ice_flow_acl_set_xtrct_seq_fld(struct ice_aqc_acl_prof_generic_frmt *buf,
1984                                struct ice_flow_fld_info *info)
1985 {
1986         u16 dst, i;
1987         u8 src;
1988
1989         src = info->xtrct.idx * ICE_FLOW_FV_EXTRACT_SZ +
1990                 info->xtrct.disp / BITS_PER_BYTE;
1991         dst = info->entry.val;
1992         for (i = 0; i < info->entry.last; i++)
1993                 /* HW stores field vector words in LE, convert words back to BE
1994                  * so constructed entries will end up in network order
1995                  */
1996                 buf->byte_selection[dst++] = src++ ^ 1;
1997 }
1998
1999 /**
2000  * ice_flow_acl_set_xtrct_seq - Program ACL extraction sequence
2001  * @hw: pointer to the hardware structure
2002  * @prof: pointer to flow profile
2003  */
2004 static enum ice_status
2005 ice_flow_acl_set_xtrct_seq(struct ice_hw *hw, struct ice_flow_prof *prof)
2006 {
2007         struct ice_aqc_acl_prof_generic_frmt buf;
2008         struct ice_flow_fld_info *info;
2009         enum ice_status status;
2010         u8 prof_id = 0;
2011         u16 i;
2012
2013         ice_memset(&buf, 0, sizeof(buf), ICE_NONDMA_MEM);
2014
2015         status = ice_flow_get_hw_prof(hw, ICE_BLK_ACL, prof->id, &prof_id);
2016         if (status)
2017                 return status;
2018
2019         status = ice_flow_acl_is_prof_in_use(hw, prof, &buf);
2020         if (status && status != ICE_ERR_IN_USE)
2021                 return status;
2022
2023         if (!status) {
2024                 /* Program the profile dependent configuration. This is done
2025                  * only once regardless of the number of PFs using that profile
2026                  */
2027                 ice_memset(&buf, 0, sizeof(buf), ICE_NONDMA_MEM);
2028
2029                 for (i = 0; i < prof->segs_cnt; i++) {
2030                         struct ice_flow_seg_info *seg = &prof->segs[i];
2031                         u64 match = seg->match;
2032                         u16 j;
2033
2034                         for (j = 0; j < ICE_FLOW_FIELD_IDX_MAX && match; j++) {
2035                                 const u64 bit = BIT_ULL(j);
2036
2037                                 if (!(match & bit))
2038                                         continue;
2039
2040                                 info = &seg->fields[j];
2041
2042                                 if (info->type == ICE_FLOW_FLD_TYPE_RANGE)
2043                                         buf.word_selection[info->entry.val] =
2044                                                                 info->xtrct.idx;
2045                                 else
2046                                         ice_flow_acl_set_xtrct_seq_fld(&buf,
2047                                                                        info);
2048
2049                                 match &= ~bit;
2050                         }
2051
2052                         for (j = 0; j < seg->raws_cnt; j++) {
2053                                 info = &seg->raws[j].info;
2054                                 ice_flow_acl_set_xtrct_seq_fld(&buf, info);
2055                         }
2056                 }
2057
2058                 ice_memset(&buf.pf_scenario_num[0], ICE_ACL_INVALID_SCEN,
2059                            ICE_AQC_ACL_PROF_PF_SCEN_NUM_ELEMS,
2060                            ICE_NONDMA_MEM);
2061         }
2062
2063         /* Update the current PF */
2064         buf.pf_scenario_num[hw->pf_id] = (u8)prof->cfg.scen->id;
2065         status = ice_prgm_acl_prof_extrt(hw, prof_id, &buf, NULL);
2066
2067         return status;
2068 }
2069
2070 /**
2071  * ice_flow_assoc_vsig_vsi - associate a VSI with VSIG
2072  * @hw: pointer to the hardware structure
2073  * @blk: classification stage
2074  * @vsi_handle: software VSI handle
2075  * @vsig: target VSI group
2076  *
2077  * Assumption: the caller has already verified that the VSI to
2078  * be added has the same characteristics as the VSIG and will
2079  * thereby have access to all resources added to that VSIG.
2080  */
2081 enum ice_status
2082 ice_flow_assoc_vsig_vsi(struct ice_hw *hw, enum ice_block blk, u16 vsi_handle,
2083                         u16 vsig)
2084 {
2085         enum ice_status status;
2086
2087         if (!ice_is_vsi_valid(hw, vsi_handle) || blk >= ICE_BLK_COUNT)
2088                 return ICE_ERR_PARAM;
2089
2090         ice_acquire_lock(&hw->fl_profs_locks[blk]);
2091         status = ice_add_vsi_flow(hw, blk, ice_get_hw_vsi_num(hw, vsi_handle),
2092                                   vsig);
2093         ice_release_lock(&hw->fl_profs_locks[blk]);
2094
2095         return status;
2096 }
2097
2098 /**
2099  * ice_flow_assoc_prof - associate a VSI with a flow profile
2100  * @hw: pointer to the hardware structure
2101  * @blk: classification stage
2102  * @prof: pointer to flow profile
2103  * @vsi_handle: software VSI handle
2104  *
2105  * Assumption: the caller has acquired the lock to the profile list
2106  * and the software VSI handle has been validated
2107  */
2108 static enum ice_status
2109 ice_flow_assoc_prof(struct ice_hw *hw, enum ice_block blk,
2110                     struct ice_flow_prof *prof, u16 vsi_handle)
2111 {
2112         enum ice_status status = ICE_SUCCESS;
2113
2114         if (!ice_is_bit_set(prof->vsis, vsi_handle)) {
2115                 if (blk == ICE_BLK_ACL) {
2116                         status = ice_flow_acl_set_xtrct_seq(hw, prof);
2117                         if (status)
2118                                 return status;
2119                 }
2120                 status = ice_add_prof_id_flow(hw, blk,
2121                                               ice_get_hw_vsi_num(hw,
2122                                                                  vsi_handle),
2123                                               prof->id);
2124                 if (!status)
2125                         ice_set_bit(vsi_handle, prof->vsis);
2126                 else
2127                         ice_debug(hw, ICE_DBG_FLOW,
2128                                   "HW profile add failed, %d\n",
2129                                   status);
2130         }
2131
2132         return status;
2133 }
2134
2135 /**
2136  * ice_flow_disassoc_prof - disassociate a VSI from a flow profile
2137  * @hw: pointer to the hardware structure
2138  * @blk: classification stage
2139  * @prof: pointer to flow profile
2140  * @vsi_handle: software VSI handle
2141  *
2142  * Assumption: the caller has acquired the lock to the profile list
2143  * and the software VSI handle has been validated
2144  */
2145 static enum ice_status
2146 ice_flow_disassoc_prof(struct ice_hw *hw, enum ice_block blk,
2147                        struct ice_flow_prof *prof, u16 vsi_handle)
2148 {
2149         enum ice_status status = ICE_SUCCESS;
2150
2151         if (ice_is_bit_set(prof->vsis, vsi_handle)) {
2152                 status = ice_rem_prof_id_flow(hw, blk,
2153                                               ice_get_hw_vsi_num(hw,
2154                                                                  vsi_handle),
2155                                               prof->id);
2156                 if (!status)
2157                         ice_clear_bit(vsi_handle, prof->vsis);
2158                 else
2159                         ice_debug(hw, ICE_DBG_FLOW,
2160                                   "HW profile remove failed, %d\n",
2161                                   status);
2162         }
2163
2164         return status;
2165 }
2166
2167 /**
2168  * ice_flow_add_prof - Add a flow profile for packet segments and matched fields
2169  * @hw: pointer to the HW struct
2170  * @blk: classification stage
2171  * @dir: flow direction
2172  * @prof_id: unique ID to identify this flow profile
2173  * @segs: array of one or more packet segments that describe the flow
2174  * @segs_cnt: number of packet segments provided
2175  * @acts: array of default actions
2176  * @acts_cnt: number of default actions
2177  * @prof: stores the returned flow profile added
2178  */
2179 enum ice_status
2180 ice_flow_add_prof(struct ice_hw *hw, enum ice_block blk, enum ice_flow_dir dir,
2181                   u64 prof_id, struct ice_flow_seg_info *segs, u8 segs_cnt,
2182                   struct ice_flow_action *acts, u8 acts_cnt,
2183                   struct ice_flow_prof **prof)
2184 {
2185         enum ice_status status;
2186
2187         if (segs_cnt > ICE_FLOW_SEG_MAX)
2188                 return ICE_ERR_MAX_LIMIT;
2189
2190         if (!segs_cnt)
2191                 return ICE_ERR_PARAM;
2192
2193         if (!segs)
2194                 return ICE_ERR_BAD_PTR;
2195
2196         status = ice_flow_val_hdrs(segs, segs_cnt);
2197         if (status)
2198                 return status;
2199
2200         ice_acquire_lock(&hw->fl_profs_locks[blk]);
2201
2202         status = ice_flow_add_prof_sync(hw, blk, dir, prof_id, segs, segs_cnt,
2203                                         acts, acts_cnt, prof);
2204         if (!status)
2205                 LIST_ADD(&(*prof)->l_entry, &hw->fl_profs[blk]);
2206
2207         ice_release_lock(&hw->fl_profs_locks[blk]);
2208
2209         return status;
2210 }
2211
2212 /**
2213  * ice_flow_rem_prof - Remove a flow profile and all entries associated with it
2214  * @hw: pointer to the HW struct
2215  * @blk: the block for which the flow profile is to be removed
2216  * @prof_id: unique ID of the flow profile to be removed
2217  */
2218 enum ice_status
2219 ice_flow_rem_prof(struct ice_hw *hw, enum ice_block blk, u64 prof_id)
2220 {
2221         struct ice_flow_prof *prof;
2222         enum ice_status status;
2223
2224         ice_acquire_lock(&hw->fl_profs_locks[blk]);
2225
2226         prof = ice_flow_find_prof_id(hw, blk, prof_id);
2227         if (!prof) {
2228                 status = ICE_ERR_DOES_NOT_EXIST;
2229                 goto out;
2230         }
2231
2232         /* prof becomes invalid after the call */
2233         status = ice_flow_rem_prof_sync(hw, blk, prof);
2234
2235 out:
2236         ice_release_lock(&hw->fl_profs_locks[blk]);
2237
2238         return status;
2239 }
2240
2241 /**
2242  * ice_flow_get_hw_prof - return the HW profile for a specific profile ID handle
2243  * @hw: pointer to the HW struct
2244  * @blk: classification stage
2245  * @prof_id: the profile ID handle
2246  * @hw_prof_id: pointer to variable to receive the HW profile ID
2247  */
2248 enum ice_status
2249 ice_flow_get_hw_prof(struct ice_hw *hw, enum ice_block blk, u64 prof_id,
2250                      u8 *hw_prof_id)
2251 {
2252         enum ice_status status = ICE_ERR_DOES_NOT_EXIST;
2253         struct ice_prof_map *map;
2254
2255         ice_acquire_lock(&hw->blk[blk].es.prof_map_lock);
2256         map = ice_search_prof_id(hw, blk, prof_id);
2257         if (map) {
2258                 *hw_prof_id = map->prof_id;
2259                 status = ICE_SUCCESS;
2260         }
2261         ice_release_lock(&hw->blk[blk].es.prof_map_lock);
2262         return status;
2263 }
2264
2265 /**
2266  * ice_flow_find_entry - look for a flow entry using its unique ID
2267  * @hw: pointer to the HW struct
2268  * @blk: classification stage
2269  * @entry_id: unique ID to identify this flow entry
2270  *
2271  * This function looks for the flow entry with the specified unique ID in all
2272  * flow profiles of the specified classification stage. If the entry is found,
2273  * and it returns the handle to the flow entry. Otherwise, it returns
2274  * ICE_FLOW_ENTRY_ID_INVAL.
2275  */
2276 u64 ice_flow_find_entry(struct ice_hw *hw, enum ice_block blk, u64 entry_id)
2277 {
2278         struct ice_flow_entry *found = NULL;
2279         struct ice_flow_prof *p;
2280
2281         ice_acquire_lock(&hw->fl_profs_locks[blk]);
2282
2283         LIST_FOR_EACH_ENTRY(p, &hw->fl_profs[blk], ice_flow_prof, l_entry) {
2284                 struct ice_flow_entry *e;
2285
2286                 ice_acquire_lock(&p->entries_lock);
2287                 LIST_FOR_EACH_ENTRY(e, &p->entries, ice_flow_entry, l_entry)
2288                         if (e->id == entry_id) {
2289                                 found = e;
2290                                 break;
2291                         }
2292                 ice_release_lock(&p->entries_lock);
2293
2294                 if (found)
2295                         break;
2296         }
2297
2298         ice_release_lock(&hw->fl_profs_locks[blk]);
2299
2300         return found ? ICE_FLOW_ENTRY_HNDL(found) : ICE_FLOW_ENTRY_HANDLE_INVAL;
2301 }
2302
2303 /**
2304  * ice_flow_acl_check_actions - Checks the ACL rule's actions
2305  * @hw: pointer to the hardware structure
2306  * @acts: array of actions to be performed on a match
2307  * @acts_cnt: number of actions
2308  * @cnt_alloc: indicates if an ACL counter has been allocated.
2309  */
2310 static enum ice_status
2311 ice_flow_acl_check_actions(struct ice_hw *hw, struct ice_flow_action *acts,
2312                            u8 acts_cnt, bool *cnt_alloc)
2313 {
2314         ice_declare_bitmap(dup_check, ICE_AQC_TBL_MAX_ACTION_PAIRS * 2);
2315         int i;
2316
2317         ice_zero_bitmap(dup_check, ICE_AQC_TBL_MAX_ACTION_PAIRS * 2);
2318         *cnt_alloc = false;
2319
2320         if (acts_cnt > ICE_FLOW_ACL_MAX_NUM_ACT)
2321                 return ICE_ERR_OUT_OF_RANGE;
2322
2323         for (i = 0; i < acts_cnt; i++) {
2324                 if (acts[i].type != ICE_FLOW_ACT_NOP &&
2325                     acts[i].type != ICE_FLOW_ACT_DROP &&
2326                     acts[i].type != ICE_FLOW_ACT_CNTR_PKT &&
2327                     acts[i].type != ICE_FLOW_ACT_FWD_QUEUE)
2328                         return ICE_ERR_CFG;
2329
2330                 /* If the caller want to add two actions of the same type, then
2331                  * it is considered invalid configuration.
2332                  */
2333                 if (ice_test_and_set_bit(acts[i].type, dup_check))
2334                         return ICE_ERR_PARAM;
2335         }
2336
2337         /* Checks if ACL counters are needed. */
2338         for (i = 0; i < acts_cnt; i++) {
2339                 if (acts[i].type == ICE_FLOW_ACT_CNTR_PKT ||
2340                     acts[i].type == ICE_FLOW_ACT_CNTR_BYTES ||
2341                     acts[i].type == ICE_FLOW_ACT_CNTR_PKT_BYTES) {
2342                         struct ice_acl_cntrs cntrs;
2343                         enum ice_status status;
2344
2345                         cntrs.amount = 1;
2346                         cntrs.bank = 0; /* Only bank0 for the moment */
2347
2348                         if (acts[i].type == ICE_FLOW_ACT_CNTR_PKT_BYTES)
2349                                 cntrs.type = ICE_AQC_ACL_CNT_TYPE_DUAL;
2350                         else
2351                                 cntrs.type = ICE_AQC_ACL_CNT_TYPE_SINGLE;
2352
2353                         status = ice_aq_alloc_acl_cntrs(hw, &cntrs, NULL);
2354                         if (status)
2355                                 return status;
2356                         /* Counter index within the bank */
2357                         acts[i].data.acl_act.value =
2358                                                 CPU_TO_LE16(cntrs.first_cntr);
2359                         *cnt_alloc = true;
2360                 }
2361         }
2362
2363         return ICE_SUCCESS;
2364 }
2365
2366 /**
2367  * ice_flow_acl_frmt_entry_range - Format an ACL range checker for a given field
2368  * @fld: number of the given field
2369  * @info: info about field
2370  * @range_buf: range checker configuration buffer
2371  * @data: pointer to a data buffer containing flow entry's match values/masks
2372  * @range: Input/output param indicating which range checkers are being used
2373  */
2374 static void
2375 ice_flow_acl_frmt_entry_range(u16 fld, struct ice_flow_fld_info *info,
2376                               struct ice_aqc_acl_profile_ranges *range_buf,
2377                               u8 *data, u8 *range)
2378 {
2379         u16 new_mask;
2380
2381         /* If not specified, default mask is all bits in field */
2382         new_mask = (info->src.mask == ICE_FLOW_FLD_OFF_INVAL ?
2383                     BIT(ice_flds_info[fld].size) - 1 :
2384                     (*(u16 *)(data + info->src.mask))) << info->xtrct.disp;
2385
2386         /* If the mask is 0, then we don't need to worry about this input
2387          * range checker value.
2388          */
2389         if (new_mask) {
2390                 u16 new_high =
2391                         (*(u16 *)(data + info->src.last)) << info->xtrct.disp;
2392                 u16 new_low =
2393                         (*(u16 *)(data + info->src.val)) << info->xtrct.disp;
2394                 u8 range_idx = info->entry.val;
2395
2396                 range_buf->checker_cfg[range_idx].low_boundary =
2397                         CPU_TO_BE16(new_low);
2398                 range_buf->checker_cfg[range_idx].high_boundary =
2399                         CPU_TO_BE16(new_high);
2400                 range_buf->checker_cfg[range_idx].mask = CPU_TO_BE16(new_mask);
2401
2402                 /* Indicate which range checker is being used */
2403                 *range |= BIT(range_idx);
2404         }
2405 }
2406
2407 /**
2408  * ice_flow_acl_frmt_entry_fld - Partially format ACL entry for a given field
2409  * @fld: number of the given field
2410  * @info: info about the field
2411  * @buf: buffer containing the entry
2412  * @dontcare: buffer containing don't care mask for entry
2413  * @data: pointer to a data buffer containing flow entry's match values/masks
2414  */
2415 static void
2416 ice_flow_acl_frmt_entry_fld(u16 fld, struct ice_flow_fld_info *info, u8 *buf,
2417                             u8 *dontcare, u8 *data)
2418 {
2419         u16 dst, src, mask, k, end_disp, tmp_s = 0, tmp_m = 0;
2420         bool use_mask = false;
2421         u8 disp;
2422
2423         src = info->src.val;
2424         mask = info->src.mask;
2425         dst = info->entry.val - ICE_AQC_ACL_PROF_BYTE_SEL_START_IDX;
2426         disp = info->xtrct.disp % BITS_PER_BYTE;
2427
2428         if (mask != ICE_FLOW_FLD_OFF_INVAL)
2429                 use_mask = true;
2430
2431         for (k = 0; k < info->entry.last; k++, dst++) {
2432                 /* Add overflow bits from previous byte */
2433                 buf[dst] = (tmp_s & 0xff00) >> 8;
2434
2435                 /* If mask is not valid, tmp_m is always zero, so just setting
2436                  * dontcare to 0 (no masked bits). If mask is valid, pulls in
2437                  * overflow bits of mask from prev byte
2438                  */
2439                 dontcare[dst] = (tmp_m & 0xff00) >> 8;
2440
2441                 /* If there is displacement, last byte will only contain
2442                  * displaced data, but there is no more data to read from user
2443                  * buffer, so skip so as not to potentially read beyond end of
2444                  * user buffer
2445                  */
2446                 if (!disp || k < info->entry.last - 1) {
2447                         /* Store shifted data to use in next byte */
2448                         tmp_s = data[src++] << disp;
2449
2450                         /* Add current (shifted) byte */
2451                         buf[dst] |= tmp_s & 0xff;
2452
2453                         /* Handle mask if valid */
2454                         if (use_mask) {
2455                                 tmp_m = (~data[mask++] & 0xff) << disp;
2456                                 dontcare[dst] |= tmp_m & 0xff;
2457                         }
2458                 }
2459         }
2460
2461         /* Fill in don't care bits at beginning of field */
2462         if (disp) {
2463                 dst = info->entry.val - ICE_AQC_ACL_PROF_BYTE_SEL_START_IDX;
2464                 for (k = 0; k < disp; k++)
2465                         dontcare[dst] |= BIT(k);
2466         }
2467
2468         end_disp = (disp + ice_flds_info[fld].size) % BITS_PER_BYTE;
2469
2470         /* Fill in don't care bits at end of field */
2471         if (end_disp) {
2472                 dst = info->entry.val - ICE_AQC_ACL_PROF_BYTE_SEL_START_IDX +
2473                       info->entry.last - 1;
2474                 for (k = end_disp; k < BITS_PER_BYTE; k++)
2475                         dontcare[dst] |= BIT(k);
2476         }
2477 }
2478
2479 /**
2480  * ice_flow_acl_frmt_entry - Format ACL entry
2481  * @hw: pointer to the hardware structure
2482  * @prof: pointer to flow profile
2483  * @e: pointer to the flow entry
2484  * @data: pointer to a data buffer containing flow entry's match values/masks
2485  * @acts: array of actions to be performed on a match
2486  * @acts_cnt: number of actions
2487  *
2488  * Formats the key (and key_inverse) to be matched from the data passed in,
2489  * along with data from the flow profile. This key/key_inverse pair makes up
2490  * the 'entry' for an ACL flow entry.
2491  */
2492 static enum ice_status
2493 ice_flow_acl_frmt_entry(struct ice_hw *hw, struct ice_flow_prof *prof,
2494                         struct ice_flow_entry *e, u8 *data,
2495                         struct ice_flow_action *acts, u8 acts_cnt)
2496 {
2497         u8 *buf = NULL, *dontcare = NULL, *key = NULL, range = 0, dir_flag_msk;
2498         struct ice_aqc_acl_profile_ranges *range_buf = NULL;
2499         enum ice_status status;
2500         bool cnt_alloc;
2501         u8 prof_id = 0;
2502         u16 i, buf_sz;
2503
2504         status = ice_flow_get_hw_prof(hw, ICE_BLK_ACL, prof->id, &prof_id);
2505         if (status)
2506                 return status;
2507
2508         /* Format the result action */
2509
2510         status = ice_flow_acl_check_actions(hw, acts, acts_cnt, &cnt_alloc);
2511         if (status)
2512                 return status;
2513
2514         status = ICE_ERR_NO_MEMORY;
2515
2516         e->acts = (struct ice_flow_action *)
2517                 ice_memdup(hw, acts, acts_cnt * sizeof(*acts),
2518                            ICE_NONDMA_TO_NONDMA);
2519
2520         if (!e->acts)
2521                 goto out;
2522
2523         e->acts_cnt = acts_cnt;
2524
2525         /* Format the matching data */
2526         buf_sz = prof->cfg.scen->width;
2527         buf = (u8 *)ice_malloc(hw, buf_sz);
2528         if (!buf)
2529                 goto out;
2530
2531         dontcare = (u8 *)ice_malloc(hw, buf_sz);
2532         if (!dontcare)
2533                 goto out;
2534
2535         /* 'key' buffer will store both key and key_inverse, so must be twice
2536          * size of buf
2537          */
2538         key = (u8 *)ice_malloc(hw, buf_sz * 2);
2539         if (!key)
2540                 goto out;
2541
2542         range_buf = (struct ice_aqc_acl_profile_ranges *)
2543                 ice_malloc(hw, sizeof(struct ice_aqc_acl_profile_ranges));
2544         if (!range_buf)
2545                 goto out;
2546
2547         /* Set don't care mask to all 1's to start, will zero out used bytes */
2548         ice_memset(dontcare, 0xff, buf_sz, ICE_NONDMA_MEM);
2549
2550         for (i = 0; i < prof->segs_cnt; i++) {
2551                 struct ice_flow_seg_info *seg = &prof->segs[i];
2552                 u64 match = seg->match;
2553                 u16 j;
2554
2555                 for (j = 0; j < ICE_FLOW_FIELD_IDX_MAX && match; j++) {
2556                         struct ice_flow_fld_info *info;
2557                         const u64 bit = BIT_ULL(j);
2558
2559                         if (!(match & bit))
2560                                 continue;
2561
2562                         info = &seg->fields[j];
2563
2564                         if (info->type == ICE_FLOW_FLD_TYPE_RANGE)
2565                                 ice_flow_acl_frmt_entry_range(j, info,
2566                                                               range_buf, data,
2567                                                               &range);
2568                         else
2569                                 ice_flow_acl_frmt_entry_fld(j, info, buf,
2570                                                             dontcare, data);
2571
2572                         match &= ~bit;
2573                 }
2574
2575                 for (j = 0; j < seg->raws_cnt; j++) {
2576                         struct ice_flow_fld_info *info = &seg->raws[j].info;
2577                         u16 dst, src, mask, k;
2578                         bool use_mask = false;
2579
2580                         src = info->src.val;
2581                         dst = info->entry.val -
2582                                         ICE_AQC_ACL_PROF_BYTE_SEL_START_IDX;
2583                         mask = info->src.mask;
2584
2585                         if (mask != ICE_FLOW_FLD_OFF_INVAL)
2586                                 use_mask = true;
2587
2588                         for (k = 0; k < info->entry.last; k++, dst++) {
2589                                 buf[dst] = data[src++];
2590                                 if (use_mask)
2591                                         dontcare[dst] = ~data[mask++];
2592                                 else
2593                                         dontcare[dst] = 0;
2594                         }
2595                 }
2596         }
2597
2598         buf[prof->cfg.scen->pid_idx] = (u8)prof_id;
2599         dontcare[prof->cfg.scen->pid_idx] = 0;
2600
2601         /* Format the buffer for direction flags */
2602         dir_flag_msk = BIT(ICE_FLG_PKT_DIR);
2603
2604         if (prof->dir == ICE_FLOW_RX)
2605                 buf[prof->cfg.scen->pkt_dir_idx] = dir_flag_msk;
2606
2607         if (range) {
2608                 buf[prof->cfg.scen->rng_chk_idx] = range;
2609                 /* Mark any unused range checkers as don't care */
2610                 dontcare[prof->cfg.scen->rng_chk_idx] = ~range;
2611                 e->range_buf = range_buf;
2612         } else {
2613                 ice_free(hw, range_buf);
2614         }
2615
2616         status = ice_set_key(key, buf_sz * 2, buf, NULL, dontcare, NULL, 0,
2617                              buf_sz);
2618         if (status)
2619                 goto out;
2620
2621         e->entry = key;
2622         e->entry_sz = buf_sz * 2;
2623
2624 out:
2625         if (buf)
2626                 ice_free(hw, buf);
2627
2628         if (dontcare)
2629                 ice_free(hw, dontcare);
2630
2631         if (status && key)
2632                 ice_free(hw, key);
2633
2634         if (status && range_buf) {
2635                 ice_free(hw, range_buf);
2636                 e->range_buf = NULL;
2637         }
2638
2639         if (status && e->acts) {
2640                 ice_free(hw, e->acts);
2641                 e->acts = NULL;
2642                 e->acts_cnt = 0;
2643         }
2644
2645         if (status && cnt_alloc)
2646                 ice_flow_acl_free_act_cntr(hw, acts, acts_cnt);
2647
2648         return status;
2649 }
2650
2651 /**
2652  * ice_flow_acl_find_scen_entry_cond - Find an ACL scenario entry that matches
2653  *                                     the compared data.
2654  * @prof: pointer to flow profile
2655  * @e: pointer to the comparing flow entry
2656  * @do_chg_action: decide if we want to change the ACL action
2657  * @do_add_entry: decide if we want to add the new ACL entry
2658  * @do_rem_entry: decide if we want to remove the current ACL entry
2659  *
2660  * Find an ACL scenario entry that matches the compared data. In the same time,
2661  * this function also figure out:
2662  * a/ If we want to change the ACL action
2663  * b/ If we want to add the new ACL entry
2664  * c/ If we want to remove the current ACL entry
2665  */
2666 static struct ice_flow_entry *
2667 ice_flow_acl_find_scen_entry_cond(struct ice_flow_prof *prof,
2668                                   struct ice_flow_entry *e, bool *do_chg_action,
2669                                   bool *do_add_entry, bool *do_rem_entry)
2670 {
2671         struct ice_flow_entry *p, *return_entry = NULL;
2672         u8 i, j;
2673
2674         /* Check if:
2675          * a/ There exists an entry with same matching data, but different
2676          *    priority, then we remove this existing ACL entry. Then, we
2677          *    will add the new entry to the ACL scenario.
2678          * b/ There exists an entry with same matching data, priority, and
2679          *    result action, then we do nothing
2680          * c/ There exists an entry with same matching data, priority, but
2681          *    different, action, then do only change the action's entry.
2682          * d/ Else, we add this new entry to the ACL scenario.
2683          */
2684         *do_chg_action = false;
2685         *do_add_entry = true;
2686         *do_rem_entry = false;
2687         LIST_FOR_EACH_ENTRY(p, &prof->entries, ice_flow_entry, l_entry) {
2688                 if (memcmp(p->entry, e->entry, p->entry_sz))
2689                         continue;
2690
2691                 /* From this point, we have the same matching_data. */
2692                 *do_add_entry = false;
2693                 return_entry = p;
2694
2695                 if (p->priority != e->priority) {
2696                         /* matching data && !priority */
2697                         *do_add_entry = true;
2698                         *do_rem_entry = true;
2699                         break;
2700                 }
2701
2702                 /* From this point, we will have matching_data && priority */
2703                 if (p->acts_cnt != e->acts_cnt)
2704                         *do_chg_action = true;
2705                 for (i = 0; i < p->acts_cnt; i++) {
2706                         bool found_not_match = false;
2707
2708                         for (j = 0; j < e->acts_cnt; j++)
2709                                 if (memcmp(&p->acts[i], &e->acts[j],
2710                                            sizeof(struct ice_flow_action))) {
2711                                         found_not_match = true;
2712                                         break;
2713                                 }
2714
2715                         if (found_not_match) {
2716                                 *do_chg_action = true;
2717                                 break;
2718                         }
2719                 }
2720
2721                 /* (do_chg_action = true) means :
2722                  *    matching_data && priority && !result_action
2723                  * (do_chg_action = false) means :
2724                  *    matching_data && priority && result_action
2725                  */
2726                 break;
2727         }
2728
2729         return return_entry;
2730 }
2731
2732 /**
2733  * ice_flow_acl_convert_to_acl_prior - Convert to ACL priority
2734  * @p: flow priority
2735  */
2736 static enum ice_acl_entry_prior
2737 ice_flow_acl_convert_to_acl_prior(enum ice_flow_priority p)
2738 {
2739         enum ice_acl_entry_prior acl_prior;
2740
2741         switch (p) {
2742         case ICE_FLOW_PRIO_LOW:
2743                 acl_prior = ICE_LOW;
2744                 break;
2745         case ICE_FLOW_PRIO_NORMAL:
2746                 acl_prior = ICE_NORMAL;
2747                 break;
2748         case ICE_FLOW_PRIO_HIGH:
2749                 acl_prior = ICE_HIGH;
2750                 break;
2751         default:
2752                 acl_prior = ICE_NORMAL;
2753                 break;
2754         }
2755
2756         return acl_prior;
2757 }
2758
2759 /**
2760  * ice_flow_acl_union_rng_chk - Perform union operation between two
2761  *                              range-range checker buffers
2762  * @dst_buf: pointer to destination range checker buffer
2763  * @src_buf: pointer to source range checker buffer
2764  *
2765  * For this function, we do the union between dst_buf and src_buf
2766  * range checker buffer, and we will save the result back to dst_buf
2767  */
2768 static enum ice_status
2769 ice_flow_acl_union_rng_chk(struct ice_aqc_acl_profile_ranges *dst_buf,
2770                            struct ice_aqc_acl_profile_ranges *src_buf)
2771 {
2772         u8 i, j;
2773
2774         if (!dst_buf || !src_buf)
2775                 return ICE_ERR_BAD_PTR;
2776
2777         for (i = 0; i < ICE_AQC_ACL_PROF_RANGES_NUM_CFG; i++) {
2778                 struct ice_acl_rng_data *cfg_data = NULL, *in_data;
2779                 bool will_populate = false;
2780
2781                 in_data = &src_buf->checker_cfg[i];
2782
2783                 if (!in_data->mask)
2784                         break;
2785
2786                 for (j = 0; j < ICE_AQC_ACL_PROF_RANGES_NUM_CFG; j++) {
2787                         cfg_data = &dst_buf->checker_cfg[j];
2788
2789                         if (!cfg_data->mask ||
2790                             !memcmp(cfg_data, in_data,
2791                                     sizeof(struct ice_acl_rng_data))) {
2792                                 will_populate = true;
2793                                 break;
2794                         }
2795                 }
2796
2797                 if (will_populate) {
2798                         ice_memcpy(cfg_data, in_data,
2799                                    sizeof(struct ice_acl_rng_data),
2800                                    ICE_NONDMA_TO_NONDMA);
2801                 } else {
2802                         /* No available slot left to program range checker */
2803                         return ICE_ERR_MAX_LIMIT;
2804                 }
2805         }
2806
2807         return ICE_SUCCESS;
2808 }
2809
2810 /**
2811  * ice_flow_acl_add_scen_entry_sync - Add entry to ACL scenario sync
2812  * @hw: pointer to the hardware structure
2813  * @prof: pointer to flow profile
2814  * @entry: double pointer to the flow entry
2815  *
2816  * For this function, we will look at the current added entries in the
2817  * corresponding ACL scenario. Then, we will perform matching logic to
2818  * see if we want to add/modify/do nothing with this new entry.
2819  */
2820 static enum ice_status
2821 ice_flow_acl_add_scen_entry_sync(struct ice_hw *hw, struct ice_flow_prof *prof,
2822                                  struct ice_flow_entry **entry)
2823 {
2824         bool do_add_entry, do_rem_entry, do_chg_action, do_chg_rng_chk;
2825         struct ice_aqc_acl_profile_ranges query_rng_buf, cfg_rng_buf;
2826         struct ice_acl_act_entry *acts = NULL;
2827         struct ice_flow_entry *exist;
2828         enum ice_status status = ICE_SUCCESS;
2829         struct ice_flow_entry *e;
2830         u8 i;
2831
2832         if (!entry || !(*entry) || !prof)
2833                 return ICE_ERR_BAD_PTR;
2834
2835         e = *(entry);
2836
2837         do_chg_rng_chk = false;
2838         if (e->range_buf) {
2839                 u8 prof_id = 0;
2840
2841                 status = ice_flow_get_hw_prof(hw, ICE_BLK_ACL, prof->id,
2842                                               &prof_id);
2843                 if (status)
2844                         return status;
2845
2846                 /* Query the current range-checker value in FW */
2847                 status = ice_query_acl_prof_ranges(hw, prof_id, &query_rng_buf,
2848                                                    NULL);
2849                 if (status)
2850                         return status;
2851                 ice_memcpy(&cfg_rng_buf, &query_rng_buf,
2852                            sizeof(struct ice_aqc_acl_profile_ranges),
2853                            ICE_NONDMA_TO_NONDMA);
2854
2855                 /* Generate the new range-checker value */
2856                 status = ice_flow_acl_union_rng_chk(&cfg_rng_buf, e->range_buf);
2857                 if (status)
2858                         return status;
2859
2860                 /* Reconfigure the range check if the buffer is changed. */
2861                 do_chg_rng_chk = false;
2862                 if (memcmp(&query_rng_buf, &cfg_rng_buf,
2863                            sizeof(struct ice_aqc_acl_profile_ranges))) {
2864                         status = ice_prog_acl_prof_ranges(hw, prof_id,
2865                                                           &cfg_rng_buf, NULL);
2866                         if (status)
2867                                 return status;
2868
2869                         do_chg_rng_chk = true;
2870                 }
2871         }
2872
2873         /* Figure out if we want to (change the ACL action) and/or
2874          * (Add the new ACL entry) and/or (Remove the current ACL entry)
2875          */
2876         exist = ice_flow_acl_find_scen_entry_cond(prof, e, &do_chg_action,
2877                                                   &do_add_entry, &do_rem_entry);
2878
2879         if (do_rem_entry) {
2880                 status = ice_flow_rem_entry_sync(hw, ICE_BLK_ACL, exist);
2881                 if (status)
2882                         return status;
2883         }
2884
2885         /* Prepare the result action buffer */
2886         acts = (struct ice_acl_act_entry *)ice_calloc
2887                 (hw, e->entry_sz, sizeof(struct ice_acl_act_entry));
2888         for (i = 0; i < e->acts_cnt; i++)
2889                 ice_memcpy(&acts[i], &e->acts[i].data.acl_act,
2890                            sizeof(struct ice_acl_act_entry),
2891                            ICE_NONDMA_TO_NONDMA);
2892
2893         if (do_add_entry) {
2894                 enum ice_acl_entry_prior prior;
2895                 u8 *keys, *inverts;
2896                 u16 entry_idx;
2897
2898                 keys = (u8 *)e->entry;
2899                 inverts = keys + (e->entry_sz / 2);
2900                 prior = ice_flow_acl_convert_to_acl_prior(e->priority);
2901
2902                 status = ice_acl_add_entry(hw, prof->cfg.scen, prior, keys,
2903                                            inverts, acts, e->acts_cnt,
2904                                            &entry_idx);
2905                 if (status)
2906                         goto out;
2907
2908                 e->scen_entry_idx = entry_idx;
2909                 LIST_ADD(&e->l_entry, &prof->entries);
2910         } else {
2911                 if (do_chg_action) {
2912                         /* For the action memory info, update the SW's copy of
2913                          * exist entry with e's action memory info
2914                          */
2915                         ice_free(hw, exist->acts);
2916                         exist->acts_cnt = e->acts_cnt;
2917                         exist->acts = (struct ice_flow_action *)
2918                                 ice_calloc(hw, exist->acts_cnt,
2919                                            sizeof(struct ice_flow_action));
2920
2921                         if (!exist->acts) {
2922                                 status = ICE_ERR_NO_MEMORY;
2923                                 goto out;
2924                         }
2925
2926                         ice_memcpy(exist->acts, e->acts,
2927                                    sizeof(struct ice_flow_action) * e->acts_cnt,
2928                                    ICE_NONDMA_TO_NONDMA);
2929
2930                         status = ice_acl_prog_act(hw, prof->cfg.scen, acts,
2931                                                   e->acts_cnt,
2932                                                   exist->scen_entry_idx);
2933                         if (status)
2934                                 goto out;
2935                 }
2936
2937                 if (do_chg_rng_chk) {
2938                         /* In this case, we want to update the range checker
2939                          * information of the exist entry
2940                          */
2941                         status = ice_flow_acl_union_rng_chk(exist->range_buf,
2942                                                             e->range_buf);
2943                         if (status)
2944                                 goto out;
2945                 }
2946
2947                 /* As we don't add the new entry to our SW DB, deallocate its
2948                  * memories, and return the exist entry to the caller
2949                  */
2950                 ice_dealloc_flow_entry(hw, e);
2951                 *(entry) = exist;
2952         }
2953 out:
2954         if (acts)
2955                 ice_free(hw, acts);
2956
2957         return status;
2958 }
2959
2960 /**
2961  * ice_flow_acl_add_scen_entry - Add entry to ACL scenario
2962  * @hw: pointer to the hardware structure
2963  * @prof: pointer to flow profile
2964  * @e: double pointer to the flow entry
2965  */
2966 static enum ice_status
2967 ice_flow_acl_add_scen_entry(struct ice_hw *hw, struct ice_flow_prof *prof,
2968                             struct ice_flow_entry **e)
2969 {
2970         enum ice_status status;
2971
2972         ice_acquire_lock(&prof->entries_lock);
2973         status = ice_flow_acl_add_scen_entry_sync(hw, prof, e);
2974         ice_release_lock(&prof->entries_lock);
2975
2976         return status;
2977 }
2978
2979 /**
2980  * ice_flow_add_entry - Add a flow entry
2981  * @hw: pointer to the HW struct
2982  * @blk: classification stage
2983  * @prof_id: ID of the profile to add a new flow entry to
2984  * @entry_id: unique ID to identify this flow entry
2985  * @vsi_handle: software VSI handle for the flow entry
2986  * @prio: priority of the flow entry
2987  * @data: pointer to a data buffer containing flow entry's match values/masks
2988  * @acts: arrays of actions to be performed on a match
2989  * @acts_cnt: number of actions
2990  * @entry_h: pointer to buffer that receives the new flow entry's handle
2991  */
2992 enum ice_status
2993 ice_flow_add_entry(struct ice_hw *hw, enum ice_block blk, u64 prof_id,
2994                    u64 entry_id, u16 vsi_handle, enum ice_flow_priority prio,
2995                    void *data, struct ice_flow_action *acts, u8 acts_cnt,
2996                    u64 *entry_h)
2997 {
2998         struct ice_flow_entry *e = NULL;
2999         struct ice_flow_prof *prof;
3000         enum ice_status status = ICE_SUCCESS;
3001
3002         /* ACL entries must indicate an action */
3003         if (blk == ICE_BLK_ACL && (!acts || !acts_cnt))
3004                 return ICE_ERR_PARAM;
3005
3006         /* No flow entry data is expected for RSS */
3007         if (!entry_h || (!data && blk != ICE_BLK_RSS))
3008                 return ICE_ERR_BAD_PTR;
3009
3010         if (!ice_is_vsi_valid(hw, vsi_handle))
3011                 return ICE_ERR_PARAM;
3012
3013         ice_acquire_lock(&hw->fl_profs_locks[blk]);
3014
3015         prof = ice_flow_find_prof_id(hw, blk, prof_id);
3016         if (!prof) {
3017                 status = ICE_ERR_DOES_NOT_EXIST;
3018         } else {
3019                 /* Allocate memory for the entry being added and associate
3020                  * the VSI to the found flow profile
3021                  */
3022                 e = (struct ice_flow_entry *)ice_malloc(hw, sizeof(*e));
3023                 if (!e)
3024                         status = ICE_ERR_NO_MEMORY;
3025                 else
3026                         status = ice_flow_assoc_prof(hw, blk, prof, vsi_handle);
3027         }
3028
3029         ice_release_lock(&hw->fl_profs_locks[blk]);
3030         if (status)
3031                 goto out;
3032
3033         e->id = entry_id;
3034         e->vsi_handle = vsi_handle;
3035         e->prof = prof;
3036         e->priority = prio;
3037
3038         switch (blk) {
3039         case ICE_BLK_FD:
3040         case ICE_BLK_RSS:
3041                 break;
3042         case ICE_BLK_ACL:
3043                 /* ACL will handle the entry management */
3044                 status = ice_flow_acl_frmt_entry(hw, prof, e, (u8 *)data, acts,
3045                                                  acts_cnt);
3046                 if (status)
3047                         goto out;
3048
3049                 status = ice_flow_acl_add_scen_entry(hw, prof, &e);
3050                 if (status)
3051                         goto out;
3052
3053                 break;
3054         default:
3055                 status = ICE_ERR_NOT_IMPL;
3056                 goto out;
3057         }
3058
3059         if (blk != ICE_BLK_ACL) {
3060                 /* ACL will handle the entry management */
3061                 ice_acquire_lock(&prof->entries_lock);
3062                 LIST_ADD(&e->l_entry, &prof->entries);
3063                 ice_release_lock(&prof->entries_lock);
3064         }
3065
3066         *entry_h = ICE_FLOW_ENTRY_HNDL(e);
3067
3068 out:
3069         if (status && e) {
3070                 if (e->entry)
3071                         ice_free(hw, e->entry);
3072                 ice_free(hw, e);
3073         }
3074
3075         return status;
3076 }
3077
3078 /**
3079  * ice_flow_rem_entry - Remove a flow entry
3080  * @hw: pointer to the HW struct
3081  * @blk: classification stage
3082  * @entry_h: handle to the flow entry to be removed
3083  */
3084 enum ice_status ice_flow_rem_entry(struct ice_hw *hw, enum ice_block blk,
3085                                    u64 entry_h)
3086 {
3087         struct ice_flow_entry *entry;
3088         struct ice_flow_prof *prof;
3089         enum ice_status status = ICE_SUCCESS;
3090
3091         if (entry_h == ICE_FLOW_ENTRY_HANDLE_INVAL)
3092                 return ICE_ERR_PARAM;
3093
3094         entry = ICE_FLOW_ENTRY_PTR((unsigned long)entry_h);
3095
3096         /* Retain the pointer to the flow profile as the entry will be freed */
3097         prof = entry->prof;
3098
3099         if (prof) {
3100                 ice_acquire_lock(&prof->entries_lock);
3101                 status = ice_flow_rem_entry_sync(hw, blk, entry);
3102                 ice_release_lock(&prof->entries_lock);
3103         }
3104
3105         return status;
3106 }
3107
3108 /**
3109  * ice_flow_set_fld_ext - specifies locations of field from entry's input buffer
3110  * @seg: packet segment the field being set belongs to
3111  * @fld: field to be set
3112  * @field_type: type of the field
3113  * @val_loc: if not ICE_FLOW_FLD_OFF_INVAL, location of the value to match from
3114  *           entry's input buffer
3115  * @mask_loc: if not ICE_FLOW_FLD_OFF_INVAL, location of mask value from entry's
3116  *            input buffer
3117  * @last_loc: if not ICE_FLOW_FLD_OFF_INVAL, location of last/upper value from
3118  *            entry's input buffer
3119  *
3120  * This helper function stores information of a field being matched, including
3121  * the type of the field and the locations of the value to match, the mask, and
3122  * and the upper-bound value in the start of the input buffer for a flow entry.
3123  * This function should only be used for fixed-size data structures.
3124  *
3125  * This function also opportunistically determines the protocol headers to be
3126  * present based on the fields being set. Some fields cannot be used alone to
3127  * determine the protocol headers present. Sometimes, fields for particular
3128  * protocol headers are not matched. In those cases, the protocol headers
3129  * must be explicitly set.
3130  */
3131 static void
3132 ice_flow_set_fld_ext(struct ice_flow_seg_info *seg, enum ice_flow_field fld,
3133                      enum ice_flow_fld_match_type field_type, u16 val_loc,
3134                      u16 mask_loc, u16 last_loc)
3135 {
3136         u64 bit = BIT_ULL(fld);
3137
3138         seg->match |= bit;
3139         if (field_type == ICE_FLOW_FLD_TYPE_RANGE)
3140                 seg->range |= bit;
3141
3142         seg->fields[fld].type = field_type;
3143         seg->fields[fld].src.val = val_loc;
3144         seg->fields[fld].src.mask = mask_loc;
3145         seg->fields[fld].src.last = last_loc;
3146
3147         ICE_FLOW_SET_HDRS(seg, ice_flds_info[fld].hdr);
3148 }
3149
3150 /**
3151  * ice_flow_set_fld - specifies locations of field from entry's input buffer
3152  * @seg: packet segment the field being set belongs to
3153  * @fld: field to be set
3154  * @val_loc: if not ICE_FLOW_FLD_OFF_INVAL, location of the value to match from
3155  *           entry's input buffer
3156  * @mask_loc: if not ICE_FLOW_FLD_OFF_INVAL, location of mask value from entry's
3157  *            input buffer
3158  * @last_loc: if not ICE_FLOW_FLD_OFF_INVAL, location of last/upper value from
3159  *            entry's input buffer
3160  * @range: indicate if field being matched is to be in a range
3161  *
3162  * This function specifies the locations, in the form of byte offsets from the
3163  * start of the input buffer for a flow entry, from where the value to match,
3164  * the mask value, and upper value can be extracted. These locations are then
3165  * stored in the flow profile. When adding a flow entry associated with the
3166  * flow profile, these locations will be used to quickly extract the values and
3167  * create the content of a match entry. This function should only be used for
3168  * fixed-size data structures.
3169  */
3170 void
3171 ice_flow_set_fld(struct ice_flow_seg_info *seg, enum ice_flow_field fld,
3172                  u16 val_loc, u16 mask_loc, u16 last_loc, bool range)
3173 {
3174         enum ice_flow_fld_match_type t = range ?
3175                 ICE_FLOW_FLD_TYPE_RANGE : ICE_FLOW_FLD_TYPE_REG;
3176
3177         ice_flow_set_fld_ext(seg, fld, t, val_loc, mask_loc, last_loc);
3178 }
3179
3180 /**
3181  * ice_flow_set_fld_prefix - sets locations of prefix field from entry's buf
3182  * @seg: packet segment the field being set belongs to
3183  * @fld: field to be set
3184  * @val_loc: if not ICE_FLOW_FLD_OFF_INVAL, location of the value to match from
3185  *           entry's input buffer
3186  * @pref_loc: location of prefix value from entry's input buffer
3187  * @pref_sz: size of the location holding the prefix value
3188  *
3189  * This function specifies the locations, in the form of byte offsets from the
3190  * start of the input buffer for a flow entry, from where the value to match
3191  * and the IPv4 prefix value can be extracted. These locations are then stored
3192  * in the flow profile. When adding flow entries to the associated flow profile,
3193  * these locations can be used to quickly extract the values to create the
3194  * content of a match entry. This function should only be used for fixed-size
3195  * data structures.
3196  */
3197 void
3198 ice_flow_set_fld_prefix(struct ice_flow_seg_info *seg, enum ice_flow_field fld,
3199                         u16 val_loc, u16 pref_loc, u8 pref_sz)
3200 {
3201         /* For this type of field, the "mask" location is for the prefix value's
3202          * location and the "last" location is for the size of the location of
3203          * the prefix value.
3204          */
3205         ice_flow_set_fld_ext(seg, fld, ICE_FLOW_FLD_TYPE_PREFIX, val_loc,
3206                              pref_loc, (u16)pref_sz);
3207 }
3208
3209 /**
3210  * ice_flow_add_fld_raw - sets locations of a raw field from entry's input buf
3211  * @seg: packet segment the field being set belongs to
3212  * @off: offset of the raw field from the beginning of the segment in bytes
3213  * @len: length of the raw pattern to be matched
3214  * @val_loc: location of the value to match from entry's input buffer
3215  * @mask_loc: location of mask value from entry's input buffer
3216  *
3217  * This function specifies the offset of the raw field to be match from the
3218  * beginning of the specified packet segment, and the locations, in the form of
3219  * byte offsets from the start of the input buffer for a flow entry, from where
3220  * the value to match and the mask value to be extracted. These locations are
3221  * then stored in the flow profile. When adding flow entries to the associated
3222  * flow profile, these locations can be used to quickly extract the values to
3223  * create the content of a match entry. This function should only be used for
3224  * fixed-size data structures.
3225  */
3226 void
3227 ice_flow_add_fld_raw(struct ice_flow_seg_info *seg, u16 off, u8 len,
3228                      u16 val_loc, u16 mask_loc)
3229 {
3230         if (seg->raws_cnt < ICE_FLOW_SEG_RAW_FLD_MAX) {
3231                 seg->raws[seg->raws_cnt].off = off;
3232                 seg->raws[seg->raws_cnt].info.type = ICE_FLOW_FLD_TYPE_SIZE;
3233                 seg->raws[seg->raws_cnt].info.src.val = val_loc;
3234                 seg->raws[seg->raws_cnt].info.src.mask = mask_loc;
3235                 /* The "last" field is used to store the length of the field */
3236                 seg->raws[seg->raws_cnt].info.src.last = len;
3237         }
3238
3239         /* Overflows of "raws" will be handled as an error condition later in
3240          * the flow when this information is processed.
3241          */
3242         seg->raws_cnt++;
3243 }
3244
3245 #define ICE_FLOW_RSS_SEG_HDR_L2_MASKS \
3246 (ICE_FLOW_SEG_HDR_ETH | ICE_FLOW_SEG_HDR_VLAN)
3247
3248 #define ICE_FLOW_RSS_SEG_HDR_L3_MASKS \
3249         (ICE_FLOW_SEG_HDR_IPV4 | ICE_FLOW_SEG_HDR_IPV6)
3250
3251 #define ICE_FLOW_RSS_SEG_HDR_L4_MASKS \
3252         (ICE_FLOW_SEG_HDR_TCP | ICE_FLOW_SEG_HDR_UDP | \
3253          ICE_FLOW_SEG_HDR_SCTP)
3254
3255 #define ICE_FLOW_RSS_SEG_HDR_VAL_MASKS \
3256         (ICE_FLOW_RSS_SEG_HDR_L2_MASKS | \
3257          ICE_FLOW_RSS_SEG_HDR_L3_MASKS | \
3258          ICE_FLOW_RSS_SEG_HDR_L4_MASKS)
3259
3260 /**
3261  * ice_flow_set_rss_seg_info - setup packet segments for RSS
3262  * @segs: pointer to the flow field segment(s)
3263  * @hash_fields: fields to be hashed on for the segment(s)
3264  * @flow_hdr: protocol header fields within a packet segment
3265  *
3266  * Helper function to extract fields from hash bitmap and use flow
3267  * header value to set flow field segment for further use in flow
3268  * profile entry or removal.
3269  */
3270 static enum ice_status
3271 ice_flow_set_rss_seg_info(struct ice_flow_seg_info *segs, u64 hash_fields,
3272                           u32 flow_hdr)
3273 {
3274         u64 val = hash_fields;
3275         u8 i;
3276
3277         for (i = 0; val && i < ICE_FLOW_FIELD_IDX_MAX; i++) {
3278                 u64 bit = BIT_ULL(i);
3279
3280                 if (val & bit) {
3281                         ice_flow_set_fld(segs, (enum ice_flow_field)i,
3282                                          ICE_FLOW_FLD_OFF_INVAL,
3283                                          ICE_FLOW_FLD_OFF_INVAL,
3284                                          ICE_FLOW_FLD_OFF_INVAL, false);
3285                         val &= ~bit;
3286                 }
3287         }
3288         ICE_FLOW_SET_HDRS(segs, flow_hdr);
3289
3290         if (segs->hdrs & ~ICE_FLOW_RSS_SEG_HDR_VAL_MASKS &
3291             ~ICE_FLOW_RSS_HDRS_INNER_MASK & ~ICE_FLOW_SEG_HDR_IPV_OTHER)
3292                 return ICE_ERR_PARAM;
3293
3294         val = (u64)(segs->hdrs & ICE_FLOW_RSS_SEG_HDR_L3_MASKS);
3295         if (val && !ice_is_pow2(val))
3296                 return ICE_ERR_CFG;
3297
3298         val = (u64)(segs->hdrs & ICE_FLOW_RSS_SEG_HDR_L4_MASKS);
3299         if (val && !ice_is_pow2(val))
3300                 return ICE_ERR_CFG;
3301
3302         return ICE_SUCCESS;
3303 }
3304
3305 /**
3306  * ice_rem_vsi_rss_list - remove VSI from RSS list
3307  * @hw: pointer to the hardware structure
3308  * @vsi_handle: software VSI handle
3309  *
3310  * Remove the VSI from all RSS configurations in the list.
3311  */
3312 void ice_rem_vsi_rss_list(struct ice_hw *hw, u16 vsi_handle)
3313 {
3314         struct ice_rss_cfg *r, *tmp;
3315
3316         if (LIST_EMPTY(&hw->rss_list_head))
3317                 return;
3318
3319         ice_acquire_lock(&hw->rss_locks);
3320         LIST_FOR_EACH_ENTRY_SAFE(r, tmp, &hw->rss_list_head,
3321                                  ice_rss_cfg, l_entry)
3322                 if (ice_test_and_clear_bit(vsi_handle, r->vsis))
3323                         if (!ice_is_any_bit_set(r->vsis, ICE_MAX_VSI)) {
3324                                 LIST_DEL(&r->l_entry);
3325                                 ice_free(hw, r);
3326                         }
3327         ice_release_lock(&hw->rss_locks);
3328 }
3329
3330 /**
3331  * ice_rem_vsi_rss_cfg - remove RSS configurations associated with VSI
3332  * @hw: pointer to the hardware structure
3333  * @vsi_handle: software VSI handle
3334  *
3335  * This function will iterate through all flow profiles and disassociate
3336  * the VSI from that profile. If the flow profile has no VSIs it will
3337  * be removed.
3338  */
3339 enum ice_status ice_rem_vsi_rss_cfg(struct ice_hw *hw, u16 vsi_handle)
3340 {
3341         const enum ice_block blk = ICE_BLK_RSS;
3342         struct ice_flow_prof *p, *t;
3343         enum ice_status status = ICE_SUCCESS;
3344
3345         if (!ice_is_vsi_valid(hw, vsi_handle))
3346                 return ICE_ERR_PARAM;
3347
3348         if (LIST_EMPTY(&hw->fl_profs[blk]))
3349                 return ICE_SUCCESS;
3350
3351         ice_acquire_lock(&hw->rss_locks);
3352         LIST_FOR_EACH_ENTRY_SAFE(p, t, &hw->fl_profs[blk], ice_flow_prof,
3353                                  l_entry)
3354                 if (ice_is_bit_set(p->vsis, vsi_handle)) {
3355                         status = ice_flow_disassoc_prof(hw, blk, p, vsi_handle);
3356                         if (status)
3357                                 break;
3358
3359                         if (!ice_is_any_bit_set(p->vsis, ICE_MAX_VSI)) {
3360                                 status = ice_flow_rem_prof(hw, blk, p->id);
3361                                 if (status)
3362                                         break;
3363                         }
3364                 }
3365         ice_release_lock(&hw->rss_locks);
3366
3367         return status;
3368 }
3369
3370 /**
3371  * ice_rem_rss_list - remove RSS configuration from list
3372  * @hw: pointer to the hardware structure
3373  * @vsi_handle: software VSI handle
3374  * @prof: pointer to flow profile
3375  *
3376  * Assumption: lock has already been acquired for RSS list
3377  */
3378 static void
3379 ice_rem_rss_list(struct ice_hw *hw, u16 vsi_handle, struct ice_flow_prof *prof)
3380 {
3381         struct ice_rss_cfg *r, *tmp;
3382
3383         /* Search for RSS hash fields associated to the VSI that match the
3384          * hash configurations associated to the flow profile. If found
3385          * remove from the RSS entry list of the VSI context and delete entry.
3386          */
3387         LIST_FOR_EACH_ENTRY_SAFE(r, tmp, &hw->rss_list_head,
3388                                  ice_rss_cfg, l_entry)
3389                 if (r->hashed_flds == prof->segs[prof->segs_cnt - 1].match &&
3390                     r->packet_hdr == prof->segs[prof->segs_cnt - 1].hdrs) {
3391                         ice_clear_bit(vsi_handle, r->vsis);
3392                         if (!ice_is_any_bit_set(r->vsis, ICE_MAX_VSI)) {
3393                                 LIST_DEL(&r->l_entry);
3394                                 ice_free(hw, r);
3395                         }
3396                         return;
3397                 }
3398 }
3399
3400 /**
3401  * ice_add_rss_list - add RSS configuration to list
3402  * @hw: pointer to the hardware structure
3403  * @vsi_handle: software VSI handle
3404  * @prof: pointer to flow profile
3405  *
3406  * Assumption: lock has already been acquired for RSS list
3407  */
3408 static enum ice_status
3409 ice_add_rss_list(struct ice_hw *hw, u16 vsi_handle, struct ice_flow_prof *prof)
3410 {
3411         struct ice_rss_cfg *r, *rss_cfg;
3412
3413         LIST_FOR_EACH_ENTRY(r, &hw->rss_list_head,
3414                             ice_rss_cfg, l_entry)
3415                 if (r->hashed_flds == prof->segs[prof->segs_cnt - 1].match &&
3416                     r->packet_hdr == prof->segs[prof->segs_cnt - 1].hdrs) {
3417                         ice_set_bit(vsi_handle, r->vsis);
3418                         return ICE_SUCCESS;
3419                 }
3420
3421         rss_cfg = (struct ice_rss_cfg *)ice_malloc(hw, sizeof(*rss_cfg));
3422         if (!rss_cfg)
3423                 return ICE_ERR_NO_MEMORY;
3424
3425         rss_cfg->hashed_flds = prof->segs[prof->segs_cnt - 1].match;
3426         rss_cfg->packet_hdr = prof->segs[prof->segs_cnt - 1].hdrs;
3427         rss_cfg->symm = prof->cfg.symm;
3428         ice_set_bit(vsi_handle, rss_cfg->vsis);
3429
3430         LIST_ADD_TAIL(&rss_cfg->l_entry, &hw->rss_list_head);
3431
3432         return ICE_SUCCESS;
3433 }
3434
3435 #define ICE_FLOW_PROF_HASH_S    0
3436 #define ICE_FLOW_PROF_HASH_M    (0xFFFFFFFFULL << ICE_FLOW_PROF_HASH_S)
3437 #define ICE_FLOW_PROF_HDR_S     32
3438 #define ICE_FLOW_PROF_HDR_M     (0x3FFFFFFFULL << ICE_FLOW_PROF_HDR_S)
3439 #define ICE_FLOW_PROF_ENCAP_S   63
3440 #define ICE_FLOW_PROF_ENCAP_M   (BIT_ULL(ICE_FLOW_PROF_ENCAP_S))
3441
3442 #define ICE_RSS_OUTER_HEADERS   1
3443 #define ICE_RSS_INNER_HEADERS   2
3444
3445 /* Flow profile ID format:
3446  * [0:31] - Packet match fields
3447  * [32:62] - Protocol header
3448  * [63] - Encapsulation flag, 0 if non-tunneled, 1 if tunneled
3449  */
3450 #define ICE_FLOW_GEN_PROFID(hash, hdr, segs_cnt) \
3451         (u64)(((u64)(hash) & ICE_FLOW_PROF_HASH_M) | \
3452               (((u64)(hdr) << ICE_FLOW_PROF_HDR_S) & ICE_FLOW_PROF_HDR_M) | \
3453               ((u8)((segs_cnt) - 1) ? ICE_FLOW_PROF_ENCAP_M : 0))
3454
3455 static void
3456 ice_rss_config_xor_word(struct ice_hw *hw, u8 prof_id, u8 src, u8 dst)
3457 {
3458         u32 s = ((src % 4) << 3); /* byte shift */
3459         u32 v = dst | 0x80; /* value to program */
3460         u8 i = src / 4; /* register index */
3461         u32 reg;
3462
3463         reg = rd32(hw, GLQF_HSYMM(prof_id, i));
3464         reg = (reg & ~(0xff << s)) | (v << s);
3465         wr32(hw, GLQF_HSYMM(prof_id, i), reg);
3466 }
3467
3468 static void
3469 ice_rss_config_xor(struct ice_hw *hw, u8 prof_id, u8 src, u8 dst, u8 len)
3470 {
3471         int fv_last_word =
3472                 ICE_FLOW_SW_FIELD_VECTOR_MAX / ICE_FLOW_FV_EXTRACT_SZ - 1;
3473         int i;
3474
3475         for (i = 0; i < len; i++) {
3476                 ice_rss_config_xor_word(hw, prof_id,
3477                                         /* Yes, field vector in GLQF_HSYMM and
3478                                          * GLQF_HINSET is inversed!
3479                                          */
3480                                         fv_last_word - (src + i),
3481                                         fv_last_word - (dst + i));
3482                 ice_rss_config_xor_word(hw, prof_id,
3483                                         fv_last_word - (dst + i),
3484                                         fv_last_word - (src + i));
3485         }
3486 }
3487
3488 static void
3489 ice_rss_update_symm(struct ice_hw *hw,
3490                     struct ice_flow_prof *prof)
3491 {
3492         struct ice_prof_map *map;
3493         u8 prof_id, m;
3494
3495         ice_acquire_lock(&hw->blk[ICE_BLK_RSS].es.prof_map_lock);
3496         map = ice_search_prof_id(hw, ICE_BLK_RSS, prof->id);
3497         if (map)
3498                 prof_id = map->prof_id;
3499         ice_release_lock(&hw->blk[ICE_BLK_RSS].es.prof_map_lock);
3500         if (!map)
3501                 return;
3502         /* clear to default */
3503         for (m = 0; m < 6; m++)
3504                 wr32(hw, GLQF_HSYMM(prof_id, m), 0);
3505         if (prof->cfg.symm) {
3506                 struct ice_flow_seg_info *seg =
3507                         &prof->segs[prof->segs_cnt - 1];
3508
3509                 struct ice_flow_seg_xtrct *ipv4_src =
3510                         &seg->fields[ICE_FLOW_FIELD_IDX_IPV4_SA].xtrct;
3511                 struct ice_flow_seg_xtrct *ipv4_dst =
3512                         &seg->fields[ICE_FLOW_FIELD_IDX_IPV4_DA].xtrct;
3513                 struct ice_flow_seg_xtrct *ipv6_src =
3514                         &seg->fields[ICE_FLOW_FIELD_IDX_IPV6_SA].xtrct;
3515                 struct ice_flow_seg_xtrct *ipv6_dst =
3516                         &seg->fields[ICE_FLOW_FIELD_IDX_IPV6_DA].xtrct;
3517
3518                 struct ice_flow_seg_xtrct *tcp_src =
3519                         &seg->fields[ICE_FLOW_FIELD_IDX_TCP_SRC_PORT].xtrct;
3520                 struct ice_flow_seg_xtrct *tcp_dst =
3521                         &seg->fields[ICE_FLOW_FIELD_IDX_TCP_DST_PORT].xtrct;
3522
3523                 struct ice_flow_seg_xtrct *udp_src =
3524                         &seg->fields[ICE_FLOW_FIELD_IDX_UDP_SRC_PORT].xtrct;
3525                 struct ice_flow_seg_xtrct *udp_dst =
3526                         &seg->fields[ICE_FLOW_FIELD_IDX_UDP_DST_PORT].xtrct;
3527
3528                 struct ice_flow_seg_xtrct *sctp_src =
3529                         &seg->fields[ICE_FLOW_FIELD_IDX_SCTP_SRC_PORT].xtrct;
3530                 struct ice_flow_seg_xtrct *sctp_dst =
3531                         &seg->fields[ICE_FLOW_FIELD_IDX_SCTP_DST_PORT].xtrct;
3532
3533                 /* xor IPv4 */
3534                 if (ipv4_src->prot_id != 0 && ipv4_dst->prot_id != 0)
3535                         ice_rss_config_xor(hw, prof_id,
3536                                            ipv4_src->idx, ipv4_dst->idx, 2);
3537
3538                 /* xor IPv6 */
3539                 if (ipv6_src->prot_id != 0 && ipv6_dst->prot_id != 0)
3540                         ice_rss_config_xor(hw, prof_id,
3541                                            ipv6_src->idx, ipv6_dst->idx, 8);
3542
3543                 /* xor TCP */
3544                 if (tcp_src->prot_id != 0 && tcp_dst->prot_id != 0)
3545                         ice_rss_config_xor(hw, prof_id,
3546                                            tcp_src->idx, tcp_dst->idx, 1);
3547
3548                 /* xor UDP */
3549                 if (udp_src->prot_id != 0 && udp_dst->prot_id != 0)
3550                         ice_rss_config_xor(hw, prof_id,
3551                                            udp_src->idx, udp_dst->idx, 1);
3552
3553                 /* xor SCTP */
3554                 if (sctp_src->prot_id != 0 && sctp_dst->prot_id != 0)
3555                         ice_rss_config_xor(hw, prof_id,
3556                                            sctp_src->idx, sctp_dst->idx, 1);
3557         }
3558 }
3559
3560 /**
3561  * ice_add_rss_cfg_sync - add an RSS configuration
3562  * @hw: pointer to the hardware structure
3563  * @vsi_handle: software VSI handle
3564  * @hashed_flds: hash bit fields (ICE_FLOW_HASH_*) to configure
3565  * @addl_hdrs: protocol header fields
3566  * @segs_cnt: packet segment count
3567  * @symm: symmetric hash enable/disable
3568  *
3569  * Assumption: lock has already been acquired for RSS list
3570  */
3571 static enum ice_status
3572 ice_add_rss_cfg_sync(struct ice_hw *hw, u16 vsi_handle, u64 hashed_flds,
3573                      u32 addl_hdrs, u8 segs_cnt, bool symm)
3574 {
3575         const enum ice_block blk = ICE_BLK_RSS;
3576         struct ice_flow_prof *prof = NULL;
3577         struct ice_flow_seg_info *segs;
3578         enum ice_status status;
3579
3580         if (!segs_cnt || segs_cnt > ICE_FLOW_SEG_MAX)
3581                 return ICE_ERR_PARAM;
3582
3583         segs = (struct ice_flow_seg_info *)ice_calloc(hw, segs_cnt,
3584                                                       sizeof(*segs));
3585         if (!segs)
3586                 return ICE_ERR_NO_MEMORY;
3587
3588         /* Construct the packet segment info from the hashed fields */
3589         status = ice_flow_set_rss_seg_info(&segs[segs_cnt - 1], hashed_flds,
3590                                            addl_hdrs);
3591         if (status)
3592                 goto exit;
3593
3594         /* don't do RSS for GTPU outer */
3595         if (segs_cnt == ICE_RSS_OUTER_HEADERS &&
3596             segs[segs_cnt - 1].hdrs & ICE_FLOW_SEG_HDR_GTPU) {
3597                 status = ICE_SUCCESS;
3598                 goto exit;
3599         }
3600
3601         /* Search for a flow profile that has matching headers, hash fields
3602          * and has the input VSI associated to it. If found, no further
3603          * operations required and exit.
3604          */
3605         prof = ice_flow_find_prof_conds(hw, blk, ICE_FLOW_RX, segs, segs_cnt,
3606                                         vsi_handle,
3607                                         ICE_FLOW_FIND_PROF_CHK_FLDS |
3608                                         ICE_FLOW_FIND_PROF_CHK_VSI);
3609         if (prof) {
3610                 if (prof->cfg.symm == symm)
3611                         goto exit;
3612                 prof->cfg.symm = symm;
3613                 goto update_symm;
3614         }
3615
3616         /* Check if a flow profile exists with the same protocol headers and
3617          * associated with the input VSI. If so disassociate the VSI from
3618          * this profile. The VSI will be added to a new profile created with
3619          * the protocol header and new hash field configuration.
3620          */
3621         prof = ice_flow_find_prof_conds(hw, blk, ICE_FLOW_RX, segs, segs_cnt,
3622                                         vsi_handle, ICE_FLOW_FIND_PROF_CHK_VSI);
3623         if (prof) {
3624                 status = ice_flow_disassoc_prof(hw, blk, prof, vsi_handle);
3625                 if (!status)
3626                         ice_rem_rss_list(hw, vsi_handle, prof);
3627                 else
3628                         goto exit;
3629
3630                 /* Remove profile if it has no VSIs associated */
3631                 if (!ice_is_any_bit_set(prof->vsis, ICE_MAX_VSI)) {
3632                         status = ice_flow_rem_prof(hw, blk, prof->id);
3633                         if (status)
3634                                 goto exit;
3635                 }
3636         }
3637
3638         /* Search for a profile that has same match fields only. If this
3639          * exists then associate the VSI to this profile.
3640          */
3641         prof = ice_flow_find_prof_conds(hw, blk, ICE_FLOW_RX, segs, segs_cnt,
3642                                         vsi_handle,
3643                                         ICE_FLOW_FIND_PROF_CHK_FLDS);
3644         if (prof) {
3645                 if (prof->cfg.symm == symm) {
3646                         status = ice_flow_assoc_prof(hw, blk, prof,
3647                                                      vsi_handle);
3648                         if (!status)
3649                                 status = ice_add_rss_list(hw, vsi_handle,
3650                                                           prof);
3651                 } else {
3652                         /* if a profile exist but with different symmetric
3653                          * requirement, just return error.
3654                          */
3655                         status = ICE_ERR_NOT_SUPPORTED;
3656                 }
3657                 goto exit;
3658         }
3659
3660         /* Create a new flow profile with generated profile and packet
3661          * segment information.
3662          */
3663         status = ice_flow_add_prof(hw, blk, ICE_FLOW_RX,
3664                                    ICE_FLOW_GEN_PROFID(hashed_flds,
3665                                                        segs[segs_cnt - 1].hdrs,
3666                                                        segs_cnt),
3667                                    segs, segs_cnt, NULL, 0, &prof);
3668         if (status)
3669                 goto exit;
3670
3671         status = ice_flow_assoc_prof(hw, blk, prof, vsi_handle);
3672         /* If association to a new flow profile failed then this profile can
3673          * be removed.
3674          */
3675         if (status) {
3676                 ice_flow_rem_prof(hw, blk, prof->id);
3677                 goto exit;
3678         }
3679
3680         status = ice_add_rss_list(hw, vsi_handle, prof);
3681
3682         prof->cfg.symm = symm;
3683
3684 update_symm:
3685         ice_rss_update_symm(hw, prof);
3686
3687 exit:
3688         ice_free(hw, segs);
3689         return status;
3690 }
3691
3692 /**
3693  * ice_add_rss_cfg - add an RSS configuration with specified hashed fields
3694  * @hw: pointer to the hardware structure
3695  * @vsi_handle: software VSI handle
3696  * @hashed_flds: hash bit fields (ICE_FLOW_HASH_*) to configure
3697  * @addl_hdrs: protocol header fields
3698  * @symm: symmetric hash enable/disable
3699  *
3700  * This function will generate a flow profile based on fields associated with
3701  * the input fields to hash on, the flow type and use the VSI number to add
3702  * a flow entry to the profile.
3703  */
3704 enum ice_status
3705 ice_add_rss_cfg(struct ice_hw *hw, u16 vsi_handle, u64 hashed_flds,
3706                 u32 addl_hdrs, bool symm)
3707 {
3708         enum ice_status status;
3709
3710         if (hashed_flds == ICE_HASH_INVALID ||
3711             !ice_is_vsi_valid(hw, vsi_handle))
3712                 return ICE_ERR_PARAM;
3713
3714         ice_acquire_lock(&hw->rss_locks);
3715         status = ice_add_rss_cfg_sync(hw, vsi_handle, hashed_flds, addl_hdrs,
3716                                       ICE_RSS_OUTER_HEADERS, symm);
3717
3718         if (!status)
3719                 status = ice_add_rss_cfg_sync(hw, vsi_handle, hashed_flds,
3720                                               addl_hdrs, ICE_RSS_INNER_HEADERS,
3721                                               symm);
3722         ice_release_lock(&hw->rss_locks);
3723
3724         return status;
3725 }
3726
3727 /**
3728  * ice_rem_rss_cfg_sync - remove an existing RSS configuration
3729  * @hw: pointer to the hardware structure
3730  * @vsi_handle: software VSI handle
3731  * @hashed_flds: Packet hash types (ICE_FLOW_HASH_*) to remove
3732  * @addl_hdrs: Protocol header fields within a packet segment
3733  * @segs_cnt: packet segment count
3734  *
3735  * Assumption: lock has already been acquired for RSS list
3736  */
3737 static enum ice_status
3738 ice_rem_rss_cfg_sync(struct ice_hw *hw, u16 vsi_handle, u64 hashed_flds,
3739                      u32 addl_hdrs, u8 segs_cnt)
3740 {
3741         const enum ice_block blk = ICE_BLK_RSS;
3742         struct ice_flow_seg_info *segs;
3743         struct ice_flow_prof *prof;
3744         enum ice_status status;
3745
3746         segs = (struct ice_flow_seg_info *)ice_calloc(hw, segs_cnt,
3747                                                       sizeof(*segs));
3748         if (!segs)
3749                 return ICE_ERR_NO_MEMORY;
3750
3751         /* Construct the packet segment info from the hashed fields */
3752         status = ice_flow_set_rss_seg_info(&segs[segs_cnt - 1], hashed_flds,
3753                                            addl_hdrs);
3754         if (status)
3755                 goto out;
3756
3757         if (segs_cnt == ICE_RSS_OUTER_HEADERS &&
3758             segs[segs_cnt - 1].hdrs & ICE_FLOW_SEG_HDR_GTPU) {
3759                 status = ICE_SUCCESS;
3760                 goto out;
3761         }
3762
3763         prof = ice_flow_find_prof_conds(hw, blk, ICE_FLOW_RX, segs, segs_cnt,
3764                                         vsi_handle,
3765                                         ICE_FLOW_FIND_PROF_CHK_FLDS);
3766         if (!prof) {
3767                 status = ICE_ERR_DOES_NOT_EXIST;
3768                 goto out;
3769         }
3770
3771         status = ice_flow_disassoc_prof(hw, blk, prof, vsi_handle);
3772         if (status)
3773                 goto out;
3774
3775         /* Remove RSS configuration from VSI context before deleting
3776          * the flow profile.
3777          */
3778         ice_rem_rss_list(hw, vsi_handle, prof);
3779
3780         if (!ice_is_any_bit_set(prof->vsis, ICE_MAX_VSI))
3781                 status = ice_flow_rem_prof(hw, blk, prof->id);
3782
3783 out:
3784         ice_free(hw, segs);
3785         return status;
3786 }
3787
3788 /**
3789  * ice_rem_rss_cfg - remove an existing RSS config with matching hashed fields
3790  * @hw: pointer to the hardware structure
3791  * @vsi_handle: software VSI handle
3792  * @hashed_flds: Packet hash types (ICE_FLOW_HASH_*) to remove
3793  * @addl_hdrs: Protocol header fields within a packet segment
3794  *
3795  * This function will lookup the flow profile based on the input
3796  * hash field bitmap, iterate through the profile entry list of
3797  * that profile and find entry associated with input VSI to be
3798  * removed. Calls are made to underlying flow apis which will in
3799  * turn build or update buffers for RSS XLT1 section.
3800  */
3801 enum ice_status
3802 ice_rem_rss_cfg(struct ice_hw *hw, u16 vsi_handle, u64 hashed_flds,
3803                 u32 addl_hdrs)
3804 {
3805         enum ice_status status;
3806
3807         if (hashed_flds == ICE_HASH_INVALID ||
3808             !ice_is_vsi_valid(hw, vsi_handle))
3809                 return ICE_ERR_PARAM;
3810
3811         ice_acquire_lock(&hw->rss_locks);
3812         status = ice_rem_rss_cfg_sync(hw, vsi_handle, hashed_flds, addl_hdrs,
3813                                       ICE_RSS_OUTER_HEADERS);
3814         if (!status)
3815                 status = ice_rem_rss_cfg_sync(hw, vsi_handle, hashed_flds,
3816                                               addl_hdrs, ICE_RSS_INNER_HEADERS);
3817         ice_release_lock(&hw->rss_locks);
3818
3819         return status;
3820 }
3821
3822 /**
3823  * ice_replay_rss_cfg - replay RSS configurations associated with VSI
3824  * @hw: pointer to the hardware structure
3825  * @vsi_handle: software VSI handle
3826  */
3827 enum ice_status ice_replay_rss_cfg(struct ice_hw *hw, u16 vsi_handle)
3828 {
3829         enum ice_status status = ICE_SUCCESS;
3830         struct ice_rss_cfg *r;
3831
3832         if (!ice_is_vsi_valid(hw, vsi_handle))
3833                 return ICE_ERR_PARAM;
3834
3835         ice_acquire_lock(&hw->rss_locks);
3836         LIST_FOR_EACH_ENTRY(r, &hw->rss_list_head,
3837                             ice_rss_cfg, l_entry) {
3838                 if (ice_is_bit_set(r->vsis, vsi_handle)) {
3839                         status = ice_add_rss_cfg_sync(hw, vsi_handle,
3840                                                       r->hashed_flds,
3841                                                       r->packet_hdr,
3842                                                       ICE_RSS_OUTER_HEADERS,
3843                                                       r->symm);
3844                         if (status)
3845                                 break;
3846                         status = ice_add_rss_cfg_sync(hw, vsi_handle,
3847                                                       r->hashed_flds,
3848                                                       r->packet_hdr,
3849                                                       ICE_RSS_INNER_HEADERS,
3850                                                       r->symm);
3851                         if (status)
3852                                 break;
3853                 }
3854         }
3855         ice_release_lock(&hw->rss_locks);
3856
3857         return status;
3858 }
3859
3860 /**
3861  * ice_get_rss_cfg - returns hashed fields for the given header types
3862  * @hw: pointer to the hardware structure
3863  * @vsi_handle: software VSI handle
3864  * @hdrs: protocol header type
3865  *
3866  * This function will return the match fields of the first instance of flow
3867  * profile having the given header types and containing input VSI
3868  */
3869 u64 ice_get_rss_cfg(struct ice_hw *hw, u16 vsi_handle, u32 hdrs)
3870 {
3871         u64 rss_hash = ICE_HASH_INVALID;
3872         struct ice_rss_cfg *r;
3873
3874         /* verify if the protocol header is non zero and VSI is valid */
3875         if (hdrs == ICE_FLOW_SEG_HDR_NONE || !ice_is_vsi_valid(hw, vsi_handle))
3876                 return ICE_HASH_INVALID;
3877
3878         ice_acquire_lock(&hw->rss_locks);
3879         LIST_FOR_EACH_ENTRY(r, &hw->rss_list_head,
3880                             ice_rss_cfg, l_entry)
3881                 if (ice_is_bit_set(r->vsis, vsi_handle) &&
3882                     r->packet_hdr == hdrs) {
3883                         rss_hash = r->hashed_flds;
3884                         break;
3885                 }
3886         ice_release_lock(&hw->rss_locks);
3887
3888         return rss_hash;
3889 }