net/ice/base: remove PPPoD from PPPoE bitmap
[dpdk.git] / drivers / net / ice / base / ice_flow.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2001-2020 Intel Corporation
3  */
4
5 #include "ice_common.h"
6 #include "ice_flow.h"
7
8 /* Size of known protocol header fields */
9 #define ICE_FLOW_FLD_SZ_ETH_TYPE        2
10 #define ICE_FLOW_FLD_SZ_VLAN            2
11 #define ICE_FLOW_FLD_SZ_IPV4_ADDR       4
12 #define ICE_FLOW_FLD_SZ_IPV6_ADDR       16
13 #define ICE_FLOW_FLD_SZ_IPV6_PRE32_ADDR 4
14 #define ICE_FLOW_FLD_SZ_IPV6_PRE48_ADDR 6
15 #define ICE_FLOW_FLD_SZ_IPV6_PRE64_ADDR 8
16 #define ICE_FLOW_FLD_SZ_IP_DSCP         1
17 #define ICE_FLOW_FLD_SZ_IP_TTL          1
18 #define ICE_FLOW_FLD_SZ_IP_PROT         1
19 #define ICE_FLOW_FLD_SZ_PORT            2
20 #define ICE_FLOW_FLD_SZ_TCP_FLAGS       1
21 #define ICE_FLOW_FLD_SZ_ICMP_TYPE       1
22 #define ICE_FLOW_FLD_SZ_ICMP_CODE       1
23 #define ICE_FLOW_FLD_SZ_ARP_OPER        2
24 #define ICE_FLOW_FLD_SZ_GRE_KEYID       4
25 #define ICE_FLOW_FLD_SZ_GTP_TEID        4
26 #define ICE_FLOW_FLD_SZ_GTP_QFI         2
27 #define ICE_FLOW_FLD_SZ_PPPOE_SESS_ID   2
28 #define ICE_FLOW_FLD_SZ_PFCP_SEID 8
29 #define ICE_FLOW_FLD_SZ_L2TPV3_SESS_ID  4
30 #define ICE_FLOW_FLD_SZ_ESP_SPI 4
31 #define ICE_FLOW_FLD_SZ_AH_SPI  4
32 #define ICE_FLOW_FLD_SZ_NAT_T_ESP_SPI   4
33
34 /* Describe properties of a protocol header field */
35 struct ice_flow_field_info {
36         enum ice_flow_seg_hdr hdr;
37         s16 off;        /* Offset from start of a protocol header, in bits */
38         u16 size;       /* Size of fields in bits */
39         u16 mask;       /* 16-bit mask for field */
40 };
41
42 #define ICE_FLOW_FLD_INFO(_hdr, _offset_bytes, _size_bytes) { \
43         .hdr = _hdr, \
44         .off = (_offset_bytes) * BITS_PER_BYTE, \
45         .size = (_size_bytes) * BITS_PER_BYTE, \
46         .mask = 0, \
47 }
48
49 #define ICE_FLOW_FLD_INFO_MSK(_hdr, _offset_bytes, _size_bytes, _mask) { \
50         .hdr = _hdr, \
51         .off = (_offset_bytes) * BITS_PER_BYTE, \
52         .size = (_size_bytes) * BITS_PER_BYTE, \
53         .mask = _mask, \
54 }
55
56 /* Table containing properties of supported protocol header fields */
57 static const
58 struct ice_flow_field_info ice_flds_info[ICE_FLOW_FIELD_IDX_MAX] = {
59         /* Ether */
60         /* ICE_FLOW_FIELD_IDX_ETH_DA */
61         ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_ETH, 0, ETH_ALEN),
62         /* ICE_FLOW_FIELD_IDX_ETH_SA */
63         ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_ETH, ETH_ALEN, ETH_ALEN),
64         /* ICE_FLOW_FIELD_IDX_S_VLAN */
65         ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_VLAN, 12, ICE_FLOW_FLD_SZ_VLAN),
66         /* ICE_FLOW_FIELD_IDX_C_VLAN */
67         ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_VLAN, 14, ICE_FLOW_FLD_SZ_VLAN),
68         /* ICE_FLOW_FIELD_IDX_ETH_TYPE */
69         ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_ETH, 0, ICE_FLOW_FLD_SZ_ETH_TYPE),
70         /* IPv4 / IPv6 */
71         /* ICE_FLOW_FIELD_IDX_IPV4_DSCP */
72         ICE_FLOW_FLD_INFO_MSK(ICE_FLOW_SEG_HDR_IPV4, 0, ICE_FLOW_FLD_SZ_IP_DSCP,
73                               0x00fc),
74         /* ICE_FLOW_FIELD_IDX_IPV6_DSCP */
75         ICE_FLOW_FLD_INFO_MSK(ICE_FLOW_SEG_HDR_IPV6, 0, ICE_FLOW_FLD_SZ_IP_DSCP,
76                               0x0ff0),
77         /* ICE_FLOW_FIELD_IDX_IPV4_TTL */
78         ICE_FLOW_FLD_INFO_MSK(ICE_FLOW_SEG_HDR_NONE, 8,
79                               ICE_FLOW_FLD_SZ_IP_TTL, 0xff00),
80         /* ICE_FLOW_FIELD_IDX_IPV4_PROT */
81         ICE_FLOW_FLD_INFO_MSK(ICE_FLOW_SEG_HDR_NONE, 8,
82                               ICE_FLOW_FLD_SZ_IP_PROT, 0x00ff),
83         /* ICE_FLOW_FIELD_IDX_IPV6_TTL */
84         ICE_FLOW_FLD_INFO_MSK(ICE_FLOW_SEG_HDR_NONE, 6,
85                               ICE_FLOW_FLD_SZ_IP_TTL, 0x00ff),
86         /* ICE_FLOW_FIELD_IDX_IPV6_PROT */
87         ICE_FLOW_FLD_INFO_MSK(ICE_FLOW_SEG_HDR_NONE, 6,
88                               ICE_FLOW_FLD_SZ_IP_PROT, 0xff00),
89         /* ICE_FLOW_FIELD_IDX_IPV4_SA */
90         ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_IPV4, 12, ICE_FLOW_FLD_SZ_IPV4_ADDR),
91         /* ICE_FLOW_FIELD_IDX_IPV4_DA */
92         ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_IPV4, 16, ICE_FLOW_FLD_SZ_IPV4_ADDR),
93         /* ICE_FLOW_FIELD_IDX_IPV6_SA */
94         ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_IPV6, 8, ICE_FLOW_FLD_SZ_IPV6_ADDR),
95         /* ICE_FLOW_FIELD_IDX_IPV6_DA */
96         ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_IPV6, 24, ICE_FLOW_FLD_SZ_IPV6_ADDR),
97         /* ICE_FLOW_FIELD_IDX_IPV6_PRE32_SA */
98         ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_IPV6, 8,
99                           ICE_FLOW_FLD_SZ_IPV6_PRE32_ADDR),
100         /* ICE_FLOW_FIELD_IDX_IPV6_PRE32_DA */
101         ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_IPV6, 24,
102                           ICE_FLOW_FLD_SZ_IPV6_PRE32_ADDR),
103         /* ICE_FLOW_FIELD_IDX_IPV6_PRE48_SA */
104         ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_IPV6, 8,
105                           ICE_FLOW_FLD_SZ_IPV6_PRE48_ADDR),
106         /* ICE_FLOW_FIELD_IDX_IPV6_PRE48_DA */
107         ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_IPV6, 24,
108                           ICE_FLOW_FLD_SZ_IPV6_PRE48_ADDR),
109         /* ICE_FLOW_FIELD_IDX_IPV6_PRE64_SA */
110         ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_IPV6, 8,
111                           ICE_FLOW_FLD_SZ_IPV6_PRE64_ADDR),
112         /* ICE_FLOW_FIELD_IDX_IPV6_PRE64_DA */
113         ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_IPV6, 24,
114                           ICE_FLOW_FLD_SZ_IPV6_PRE64_ADDR),
115         /* Transport */
116         /* ICE_FLOW_FIELD_IDX_TCP_SRC_PORT */
117         ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_TCP, 0, ICE_FLOW_FLD_SZ_PORT),
118         /* ICE_FLOW_FIELD_IDX_TCP_DST_PORT */
119         ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_TCP, 2, ICE_FLOW_FLD_SZ_PORT),
120         /* ICE_FLOW_FIELD_IDX_UDP_SRC_PORT */
121         ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_UDP, 0, ICE_FLOW_FLD_SZ_PORT),
122         /* ICE_FLOW_FIELD_IDX_UDP_DST_PORT */
123         ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_UDP, 2, ICE_FLOW_FLD_SZ_PORT),
124         /* ICE_FLOW_FIELD_IDX_SCTP_SRC_PORT */
125         ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_SCTP, 0, ICE_FLOW_FLD_SZ_PORT),
126         /* ICE_FLOW_FIELD_IDX_SCTP_DST_PORT */
127         ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_SCTP, 2, ICE_FLOW_FLD_SZ_PORT),
128         /* ICE_FLOW_FIELD_IDX_TCP_FLAGS */
129         ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_TCP, 13, ICE_FLOW_FLD_SZ_TCP_FLAGS),
130         /* ARP */
131         /* ICE_FLOW_FIELD_IDX_ARP_SIP */
132         ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_ARP, 14, ICE_FLOW_FLD_SZ_IPV4_ADDR),
133         /* ICE_FLOW_FIELD_IDX_ARP_DIP */
134         ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_ARP, 24, ICE_FLOW_FLD_SZ_IPV4_ADDR),
135         /* ICE_FLOW_FIELD_IDX_ARP_SHA */
136         ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_ARP, 8, ETH_ALEN),
137         /* ICE_FLOW_FIELD_IDX_ARP_DHA */
138         ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_ARP, 18, ETH_ALEN),
139         /* ICE_FLOW_FIELD_IDX_ARP_OP */
140         ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_ARP, 6, ICE_FLOW_FLD_SZ_ARP_OPER),
141         /* ICMP */
142         /* ICE_FLOW_FIELD_IDX_ICMP_TYPE */
143         ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_ICMP, 0, ICE_FLOW_FLD_SZ_ICMP_TYPE),
144         /* ICE_FLOW_FIELD_IDX_ICMP_CODE */
145         ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_ICMP, 1, ICE_FLOW_FLD_SZ_ICMP_CODE),
146         /* GRE */
147         /* ICE_FLOW_FIELD_IDX_GRE_KEYID */
148         ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_GRE, 12, ICE_FLOW_FLD_SZ_GRE_KEYID),
149         /* GTP */
150         /* ICE_FLOW_FIELD_IDX_GTPC_TEID */
151         ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_GTPC_TEID, 12,
152                           ICE_FLOW_FLD_SZ_GTP_TEID),
153         /* ICE_FLOW_FIELD_IDX_GTPU_IP_TEID */
154         ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_GTPU_IP, 12,
155                           ICE_FLOW_FLD_SZ_GTP_TEID),
156         /* ICE_FLOW_FIELD_IDX_GTPU_EH_TEID */
157         ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_GTPU_EH, 12,
158                           ICE_FLOW_FLD_SZ_GTP_TEID),
159         /* ICE_FLOW_FIELD_IDX_GTPU_EH_QFI */
160         ICE_FLOW_FLD_INFO_MSK(ICE_FLOW_SEG_HDR_GTPU_EH, 22,
161                               ICE_FLOW_FLD_SZ_GTP_QFI, 0x3f00),
162         /* ICE_FLOW_FIELD_IDX_GTPU_UP_TEID */
163         ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_GTPU_UP, 12,
164                           ICE_FLOW_FLD_SZ_GTP_TEID),
165         /* ICE_FLOW_FIELD_IDX_GTPU_DWN_TEID */
166         ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_GTPU_DWN, 12,
167                           ICE_FLOW_FLD_SZ_GTP_TEID),
168         /* PPPOE */
169         /* ICE_FLOW_FIELD_IDX_PPPOE_SESS_ID */
170         ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_PPPOE, 2,
171                           ICE_FLOW_FLD_SZ_PPPOE_SESS_ID),
172         /* PFCP */
173         /* ICE_FLOW_FIELD_IDX_PFCP_SEID */
174         ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_PFCP_SESSION, 12,
175                           ICE_FLOW_FLD_SZ_PFCP_SEID),
176         /* L2TPV3 */
177         /* ICE_FLOW_FIELD_IDX_L2TPV3_SESS_ID */
178         ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_L2TPV3, 0,
179                           ICE_FLOW_FLD_SZ_L2TPV3_SESS_ID),
180         /* ESP */
181         /* ICE_FLOW_FIELD_IDX_ESP_SPI */
182         ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_ESP, 0,
183                           ICE_FLOW_FLD_SZ_ESP_SPI),
184         /* AH */
185         /* ICE_FLOW_FIELD_IDX_AH_SPI */
186         ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_AH, 4,
187                           ICE_FLOW_FLD_SZ_AH_SPI),
188         /* NAT_T_ESP */
189         /* ICE_FLOW_FIELD_IDX_NAT_T_ESP_SPI */
190         ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_NAT_T_ESP, 8,
191                           ICE_FLOW_FLD_SZ_NAT_T_ESP_SPI),
192 };
193
194 /* Bitmaps indicating relevant packet types for a particular protocol header
195  *
196  * Packet types for packets with an Outer/First/Single MAC header
197  */
198 static const u32 ice_ptypes_mac_ofos[] = {
199         0xFDC00846, 0xBFBF7F7E, 0xF70001DF, 0xFEFDFDFB,
200         0x0000077E, 0x00000000, 0x00000000, 0x00000000,
201         0x00400000, 0x03FFF000, 0x7FFFFFE0, 0x00000000,
202         0x00000000, 0x00000000, 0x00000000, 0x00000000,
203         0x00000000, 0x00000000, 0x00000000, 0x00000000,
204         0x00000000, 0x00000000, 0x00000000, 0x00000000,
205         0x00000000, 0x00000000, 0x00000000, 0x00000000,
206         0x00000000, 0x00000000, 0x00000000, 0x00000000,
207 };
208
209 /* Packet types for packets with an Innermost/Last MAC VLAN header */
210 static const u32 ice_ptypes_macvlan_il[] = {
211         0x00000000, 0xBC000000, 0x000001DF, 0xF0000000,
212         0x0000077E, 0x00000000, 0x00000000, 0x00000000,
213         0x00000000, 0x00000000, 0x00000000, 0x00000000,
214         0x00000000, 0x00000000, 0x00000000, 0x00000000,
215         0x00000000, 0x00000000, 0x00000000, 0x00000000,
216         0x00000000, 0x00000000, 0x00000000, 0x00000000,
217         0x00000000, 0x00000000, 0x00000000, 0x00000000,
218         0x00000000, 0x00000000, 0x00000000, 0x00000000,
219 };
220
221 /* Packet types for packets with an Outer/First/Single IPv4 header, does NOT
222  * include IPV4 other PTYPEs
223  */
224 static const u32 ice_ptypes_ipv4_ofos[] = {
225         0x1DC00000, 0x04000800, 0x00000000, 0x00000000,
226         0x00000000, 0x00000155, 0x00000000, 0x00000000,
227         0x00000000, 0x000FC000, 0x00000000, 0x00000000,
228         0x00000000, 0x00000000, 0x00000000, 0x00000000,
229         0x00000000, 0x00000000, 0x00000000, 0x00000000,
230         0x00000000, 0x00000000, 0x00000000, 0x00000000,
231         0x00000000, 0x00000000, 0x00000000, 0x00000000,
232         0x00000000, 0x00000000, 0x00000000, 0x00000000,
233 };
234
235 /* Packet types for packets with an Outer/First/Single IPv4 header, includes
236  * IPV4 other PTYPEs
237  */
238 static const u32 ice_ptypes_ipv4_ofos_all[] = {
239         0x1DC00000, 0x04000800, 0x00000000, 0x00000000,
240         0x00000000, 0x00000155, 0x00000000, 0x00000000,
241         0x00000000, 0x000FC000, 0x83E0F800, 0x00000101,
242         0x00000000, 0x00000000, 0x00000000, 0x00000000,
243         0x00000000, 0x00000000, 0x00000000, 0x00000000,
244         0x00000000, 0x00000000, 0x00000000, 0x00000000,
245         0x00000000, 0x00000000, 0x00000000, 0x00000000,
246         0x00000000, 0x00000000, 0x00000000, 0x00000000,
247 };
248
249 /* Packet types for packets with an Innermost/Last IPv4 header */
250 static const u32 ice_ptypes_ipv4_il[] = {
251         0xE0000000, 0xB807700E, 0x80000003, 0xE01DC03B,
252         0x0000000E, 0x00000000, 0x00000000, 0x00000000,
253         0x00000000, 0x00000000, 0x001FF800, 0x00000000,
254         0x00000000, 0x00000000, 0x00000000, 0x00000000,
255         0x00000000, 0x00000000, 0x00000000, 0x00000000,
256         0x00000000, 0x00000000, 0x00000000, 0x00000000,
257         0x00000000, 0x00000000, 0x00000000, 0x00000000,
258         0x00000000, 0x00000000, 0x00000000, 0x00000000,
259 };
260
261 /* Packet types for packets with an Outer/First/Single IPv6 header, does NOT
262  * include IVP6 other PTYPEs
263  */
264 static const u32 ice_ptypes_ipv6_ofos[] = {
265         0x00000000, 0x00000000, 0x77000000, 0x10002000,
266         0x00000000, 0x000002AA, 0x00000000, 0x00000000,
267         0x00000000, 0x03F00000, 0x00000000, 0x00000000,
268         0x00000000, 0x00000000, 0x00000000, 0x00000000,
269         0x00000000, 0x00000000, 0x00000000, 0x00000000,
270         0x00000000, 0x00000000, 0x00000000, 0x00000000,
271         0x00000000, 0x00000000, 0x00000000, 0x00000000,
272         0x00000000, 0x00000000, 0x00000000, 0x00000000,
273 };
274
275 /* Packet types for packets with an Outer/First/Single IPv6 header, includes
276  * IPV6 other PTYPEs
277  */
278 static const u32 ice_ptypes_ipv6_ofos_all[] = {
279         0x00000000, 0x00000000, 0x77000000, 0x10002000,
280         0x00000000, 0x000002AA, 0x00000000, 0x00000000,
281         0x00080F00, 0x03F00000, 0x7C1F0000, 0x00000206,
282         0x00000000, 0x00000000, 0x00000000, 0x00000000,
283         0x00000000, 0x00000000, 0x00000000, 0x00000000,
284         0x00000000, 0x00000000, 0x00000000, 0x00000000,
285         0x00000000, 0x00000000, 0x00000000, 0x00000000,
286         0x00000000, 0x00000000, 0x00000000, 0x00000000,
287 };
288
289 /* Packet types for packets with an Innermost/Last IPv6 header */
290 static const u32 ice_ptypes_ipv6_il[] = {
291         0x00000000, 0x03B80770, 0x000001DC, 0x0EE00000,
292         0x00000770, 0x00000000, 0x00000000, 0x00000000,
293         0x00000000, 0x00000000, 0x7FE00000, 0x00000000,
294         0x00000000, 0x00000000, 0x00000000, 0x00000000,
295         0x00000000, 0x00000000, 0x00000000, 0x00000000,
296         0x00000000, 0x00000000, 0x00000000, 0x00000000,
297         0x00000000, 0x00000000, 0x00000000, 0x00000000,
298         0x00000000, 0x00000000, 0x00000000, 0x00000000,
299 };
300
301 /* Packet types for packets with an Outer/First/Single IPv4 header - no L4 */
302 static const u32 ice_ipv4_ofos_no_l4[] = {
303         0x10C00000, 0x04000800, 0x00000000, 0x00000000,
304         0x00000000, 0x00000000, 0x00000000, 0x00000000,
305         0x00000000, 0x000cc000, 0x00000000, 0x00000000,
306         0x00000000, 0x00000000, 0x00000000, 0x00000000,
307         0x00000000, 0x00000000, 0x00000000, 0x00000000,
308         0x00000000, 0x00000000, 0x00000000, 0x00000000,
309         0x00000000, 0x00000000, 0x00000000, 0x00000000,
310         0x00000000, 0x00000000, 0x00000000, 0x00000000,
311 };
312
313 /* Packet types for packets with an Innermost/Last IPv4 header - no L4 */
314 static const u32 ice_ipv4_il_no_l4[] = {
315         0x60000000, 0x18043008, 0x80000002, 0x6010c021,
316         0x00000008, 0x00000000, 0x00000000, 0x00000000,
317         0x00000000, 0x00000000, 0x00139800, 0x00000000,
318         0x00000000, 0x00000000, 0x00000000, 0x00000000,
319         0x00000000, 0x00000000, 0x00000000, 0x00000000,
320         0x00000000, 0x00000000, 0x00000000, 0x00000000,
321         0x00000000, 0x00000000, 0x00000000, 0x00000000,
322         0x00000000, 0x00000000, 0x00000000, 0x00000000,
323 };
324
325 /* Packet types for packets with an Outer/First/Single IPv6 header - no L4 */
326 static const u32 ice_ipv6_ofos_no_l4[] = {
327         0x00000000, 0x00000000, 0x43000000, 0x10002000,
328         0x00000000, 0x00000000, 0x00000000, 0x00000000,
329         0x00000000, 0x02300000, 0x00000000, 0x00000000,
330         0x00000000, 0x00000000, 0x00000000, 0x00000000,
331         0x00000000, 0x00000000, 0x00000000, 0x00000000,
332         0x00000000, 0x00000000, 0x00000000, 0x00000000,
333         0x00000000, 0x00000000, 0x00000000, 0x00000000,
334         0x00000000, 0x00000000, 0x00000000, 0x00000000,
335 };
336
337 /* Packet types for packets with an Innermost/Last IPv6 header - no L4 */
338 static const u32 ice_ipv6_il_no_l4[] = {
339         0x00000000, 0x02180430, 0x0000010c, 0x086010c0,
340         0x00000430, 0x00000000, 0x00000000, 0x00000000,
341         0x00000000, 0x00000000, 0x4e600000, 0x00000000,
342         0x00000000, 0x00000000, 0x00000000, 0x00000000,
343         0x00000000, 0x00000000, 0x00000000, 0x00000000,
344         0x00000000, 0x00000000, 0x00000000, 0x00000000,
345         0x00000000, 0x00000000, 0x00000000, 0x00000000,
346         0x00000000, 0x00000000, 0x00000000, 0x00000000,
347 };
348
349 /* Packet types for packets with an Outermost/First ARP header */
350 static const u32 ice_ptypes_arp_of[] = {
351         0x00000800, 0x00000000, 0x00000000, 0x00000000,
352         0x00000000, 0x00000000, 0x00000000, 0x00000000,
353         0x00000000, 0x00000000, 0x00000000, 0x00000000,
354         0x00000000, 0x00000000, 0x00000000, 0x00000000,
355         0x00000000, 0x00000000, 0x00000000, 0x00000000,
356         0x00000000, 0x00000000, 0x00000000, 0x00000000,
357         0x00000000, 0x00000000, 0x00000000, 0x00000000,
358         0x00000000, 0x00000000, 0x00000000, 0x00000000,
359 };
360
361 /* UDP Packet types for non-tunneled packets or tunneled
362  * packets with inner UDP.
363  */
364 static const u32 ice_ptypes_udp_il[] = {
365         0x81000000, 0x20204040, 0x04000010, 0x80810102,
366         0x00000040, 0x00000000, 0x00000000, 0x00000000,
367         0x00000000, 0x00410000, 0x90842000, 0x00000007,
368         0x00000000, 0x00000000, 0x00000000, 0x00000000,
369         0x00000000, 0x00000000, 0x00000000, 0x00000000,
370         0x00000000, 0x00000000, 0x00000000, 0x00000000,
371         0x00000000, 0x00000000, 0x00000000, 0x00000000,
372         0x00000000, 0x00000000, 0x00000000, 0x00000000,
373 };
374
375 /* Packet types for packets with an Innermost/Last TCP header */
376 static const u32 ice_ptypes_tcp_il[] = {
377         0x04000000, 0x80810102, 0x10000040, 0x02040408,
378         0x00000102, 0x00000000, 0x00000000, 0x00000000,
379         0x00000000, 0x00820000, 0x21084000, 0x00000000,
380         0x00000000, 0x00000000, 0x00000000, 0x00000000,
381         0x00000000, 0x00000000, 0x00000000, 0x00000000,
382         0x00000000, 0x00000000, 0x00000000, 0x00000000,
383         0x00000000, 0x00000000, 0x00000000, 0x00000000,
384         0x00000000, 0x00000000, 0x00000000, 0x00000000,
385 };
386
387 /* Packet types for packets with an Innermost/Last SCTP header */
388 static const u32 ice_ptypes_sctp_il[] = {
389         0x08000000, 0x01020204, 0x20000081, 0x04080810,
390         0x00000204, 0x00000000, 0x00000000, 0x00000000,
391         0x00000000, 0x01040000, 0x00000000, 0x00000000,
392         0x00000000, 0x00000000, 0x00000000, 0x00000000,
393         0x00000000, 0x00000000, 0x00000000, 0x00000000,
394         0x00000000, 0x00000000, 0x00000000, 0x00000000,
395         0x00000000, 0x00000000, 0x00000000, 0x00000000,
396         0x00000000, 0x00000000, 0x00000000, 0x00000000,
397 };
398
399 /* Packet types for packets with an Outermost/First ICMP header */
400 static const u32 ice_ptypes_icmp_of[] = {
401         0x10000000, 0x00000000, 0x00000000, 0x00000000,
402         0x00000000, 0x00000000, 0x00000000, 0x00000000,
403         0x00000000, 0x00000000, 0x00000000, 0x00000000,
404         0x00000000, 0x00000000, 0x00000000, 0x00000000,
405         0x00000000, 0x00000000, 0x00000000, 0x00000000,
406         0x00000000, 0x00000000, 0x00000000, 0x00000000,
407         0x00000000, 0x00000000, 0x00000000, 0x00000000,
408         0x00000000, 0x00000000, 0x00000000, 0x00000000,
409 };
410
411 /* Packet types for packets with an Innermost/Last ICMP header */
412 static const u32 ice_ptypes_icmp_il[] = {
413         0x00000000, 0x02040408, 0x40000102, 0x08101020,
414         0x00000408, 0x00000000, 0x00000000, 0x00000000,
415         0x00000000, 0x00000000, 0x42108000, 0x00000000,
416         0x00000000, 0x00000000, 0x00000000, 0x00000000,
417         0x00000000, 0x00000000, 0x00000000, 0x00000000,
418         0x00000000, 0x00000000, 0x00000000, 0x00000000,
419         0x00000000, 0x00000000, 0x00000000, 0x00000000,
420         0x00000000, 0x00000000, 0x00000000, 0x00000000,
421 };
422
423 /* Packet types for packets with an Outermost/First GRE header */
424 static const u32 ice_ptypes_gre_of[] = {
425         0x00000000, 0xBFBF7800, 0x000001DF, 0xFEFDE000,
426         0x0000017E, 0x00000000, 0x00000000, 0x00000000,
427         0x00000000, 0x00000000, 0x00000000, 0x00000000,
428         0x00000000, 0x00000000, 0x00000000, 0x00000000,
429         0x00000000, 0x00000000, 0x00000000, 0x00000000,
430         0x00000000, 0x00000000, 0x00000000, 0x00000000,
431         0x00000000, 0x00000000, 0x00000000, 0x00000000,
432         0x00000000, 0x00000000, 0x00000000, 0x00000000,
433 };
434
435 /* Packet types for packets with an Innermost/Last MAC header */
436 static const u32 ice_ptypes_mac_il[] = {
437         0x00000000, 0x00000000, 0x00000000, 0x00000000,
438         0x00000000, 0x00000000, 0x00000000, 0x00000000,
439         0x00000000, 0x00000000, 0x00000000, 0x00000000,
440         0x00000000, 0x00000000, 0x00000000, 0x00000000,
441         0x00000000, 0x00000000, 0x00000000, 0x00000000,
442         0x00000000, 0x00000000, 0x00000000, 0x00000000,
443         0x00000000, 0x00000000, 0x00000000, 0x00000000,
444         0x00000000, 0x00000000, 0x00000000, 0x00000000,
445 };
446
447 /* Packet types for GTPC */
448 static const u32 ice_ptypes_gtpc[] = {
449         0x00000000, 0x00000000, 0x00000000, 0x00000000,
450         0x00000000, 0x00000000, 0x00000000, 0x00000000,
451         0x00000000, 0x00000000, 0x00000180, 0x00000000,
452         0x00000000, 0x00000000, 0x00000000, 0x00000000,
453         0x00000000, 0x00000000, 0x00000000, 0x00000000,
454         0x00000000, 0x00000000, 0x00000000, 0x00000000,
455         0x00000000, 0x00000000, 0x00000000, 0x00000000,
456         0x00000000, 0x00000000, 0x00000000, 0x00000000,
457 };
458
459 /* Packet types for GTPC with TEID */
460 static const u32 ice_ptypes_gtpc_tid[] = {
461         0x00000000, 0x00000000, 0x00000000, 0x00000000,
462         0x00000000, 0x00000000, 0x00000000, 0x00000000,
463         0x00000000, 0x00000000, 0x00000060, 0x00000000,
464         0x00000000, 0x00000000, 0x00000000, 0x00000000,
465         0x00000000, 0x00000000, 0x00000000, 0x00000000,
466         0x00000000, 0x00000000, 0x00000000, 0x00000000,
467         0x00000000, 0x00000000, 0x00000000, 0x00000000,
468         0x00000000, 0x00000000, 0x00000000, 0x00000000,
469 };
470
471 /* Packet types for GTPU */
472 static const struct ice_ptype_attributes ice_attr_gtpu_eh[] = {
473         { ICE_MAC_IPV4_GTPU_IPV4_FRAG,    ICE_PTYPE_ATTR_GTP_PDU_EH },
474         { ICE_MAC_IPV4_GTPU_IPV4_PAY,     ICE_PTYPE_ATTR_GTP_PDU_EH },
475         { ICE_MAC_IPV4_GTPU_IPV4_UDP_PAY, ICE_PTYPE_ATTR_GTP_PDU_EH },
476         { ICE_MAC_IPV4_GTPU_IPV4_TCP,     ICE_PTYPE_ATTR_GTP_PDU_EH },
477         { ICE_MAC_IPV4_GTPU_IPV4_ICMP,    ICE_PTYPE_ATTR_GTP_PDU_EH },
478         { ICE_MAC_IPV6_GTPU_IPV4_FRAG,    ICE_PTYPE_ATTR_GTP_PDU_EH },
479         { ICE_MAC_IPV6_GTPU_IPV4_PAY,     ICE_PTYPE_ATTR_GTP_PDU_EH },
480         { ICE_MAC_IPV6_GTPU_IPV4_UDP_PAY, ICE_PTYPE_ATTR_GTP_PDU_EH },
481         { ICE_MAC_IPV6_GTPU_IPV4_TCP,     ICE_PTYPE_ATTR_GTP_PDU_EH },
482         { ICE_MAC_IPV6_GTPU_IPV4_ICMP,    ICE_PTYPE_ATTR_GTP_PDU_EH },
483         { ICE_MAC_IPV4_GTPU_IPV6_FRAG,    ICE_PTYPE_ATTR_GTP_PDU_EH },
484         { ICE_MAC_IPV4_GTPU_IPV6_PAY,     ICE_PTYPE_ATTR_GTP_PDU_EH },
485         { ICE_MAC_IPV4_GTPU_IPV6_UDP_PAY, ICE_PTYPE_ATTR_GTP_PDU_EH },
486         { ICE_MAC_IPV4_GTPU_IPV6_TCP,     ICE_PTYPE_ATTR_GTP_PDU_EH },
487         { ICE_MAC_IPV4_GTPU_IPV6_ICMPV6,  ICE_PTYPE_ATTR_GTP_PDU_EH },
488         { ICE_MAC_IPV6_GTPU_IPV6_FRAG,    ICE_PTYPE_ATTR_GTP_PDU_EH },
489         { ICE_MAC_IPV6_GTPU_IPV6_PAY,     ICE_PTYPE_ATTR_GTP_PDU_EH },
490         { ICE_MAC_IPV6_GTPU_IPV6_UDP_PAY, ICE_PTYPE_ATTR_GTP_PDU_EH },
491         { ICE_MAC_IPV6_GTPU_IPV6_TCP,     ICE_PTYPE_ATTR_GTP_PDU_EH },
492         { ICE_MAC_IPV6_GTPU_IPV6_ICMPV6,  ICE_PTYPE_ATTR_GTP_PDU_EH },
493 };
494
495 static const struct ice_ptype_attributes ice_attr_gtpu_down[] = {
496         { ICE_MAC_IPV4_GTPU_IPV4_FRAG,    ICE_PTYPE_ATTR_GTP_DOWNLINK },
497         { ICE_MAC_IPV4_GTPU_IPV4_PAY,     ICE_PTYPE_ATTR_GTP_DOWNLINK },
498         { ICE_MAC_IPV4_GTPU_IPV4_UDP_PAY, ICE_PTYPE_ATTR_GTP_DOWNLINK },
499         { ICE_MAC_IPV4_GTPU_IPV4_TCP,     ICE_PTYPE_ATTR_GTP_DOWNLINK },
500         { ICE_MAC_IPV4_GTPU_IPV4_ICMP,    ICE_PTYPE_ATTR_GTP_DOWNLINK },
501         { ICE_MAC_IPV6_GTPU_IPV4_FRAG,    ICE_PTYPE_ATTR_GTP_DOWNLINK },
502         { ICE_MAC_IPV6_GTPU_IPV4_PAY,     ICE_PTYPE_ATTR_GTP_DOWNLINK },
503         { ICE_MAC_IPV6_GTPU_IPV4_UDP_PAY, ICE_PTYPE_ATTR_GTP_DOWNLINK },
504         { ICE_MAC_IPV6_GTPU_IPV4_TCP,     ICE_PTYPE_ATTR_GTP_DOWNLINK },
505         { ICE_MAC_IPV6_GTPU_IPV4_ICMP,    ICE_PTYPE_ATTR_GTP_DOWNLINK },
506         { ICE_MAC_IPV4_GTPU_IPV6_FRAG,    ICE_PTYPE_ATTR_GTP_DOWNLINK },
507         { ICE_MAC_IPV4_GTPU_IPV6_PAY,     ICE_PTYPE_ATTR_GTP_DOWNLINK },
508         { ICE_MAC_IPV4_GTPU_IPV6_UDP_PAY, ICE_PTYPE_ATTR_GTP_DOWNLINK },
509         { ICE_MAC_IPV4_GTPU_IPV6_TCP,     ICE_PTYPE_ATTR_GTP_DOWNLINK },
510         { ICE_MAC_IPV4_GTPU_IPV6_ICMPV6,  ICE_PTYPE_ATTR_GTP_DOWNLINK },
511         { ICE_MAC_IPV6_GTPU_IPV6_FRAG,    ICE_PTYPE_ATTR_GTP_DOWNLINK },
512         { ICE_MAC_IPV6_GTPU_IPV6_PAY,     ICE_PTYPE_ATTR_GTP_DOWNLINK },
513         { ICE_MAC_IPV6_GTPU_IPV6_UDP_PAY, ICE_PTYPE_ATTR_GTP_DOWNLINK },
514         { ICE_MAC_IPV6_GTPU_IPV6_TCP,     ICE_PTYPE_ATTR_GTP_DOWNLINK },
515         { ICE_MAC_IPV6_GTPU_IPV6_ICMPV6,  ICE_PTYPE_ATTR_GTP_DOWNLINK },
516 };
517
518 static const struct ice_ptype_attributes ice_attr_gtpu_up[] = {
519         { ICE_MAC_IPV4_GTPU_IPV4_FRAG,    ICE_PTYPE_ATTR_GTP_UPLINK },
520         { ICE_MAC_IPV4_GTPU_IPV4_PAY,     ICE_PTYPE_ATTR_GTP_UPLINK },
521         { ICE_MAC_IPV4_GTPU_IPV4_UDP_PAY, ICE_PTYPE_ATTR_GTP_UPLINK },
522         { ICE_MAC_IPV4_GTPU_IPV4_TCP,     ICE_PTYPE_ATTR_GTP_UPLINK },
523         { ICE_MAC_IPV4_GTPU_IPV4_ICMP,    ICE_PTYPE_ATTR_GTP_UPLINK },
524         { ICE_MAC_IPV6_GTPU_IPV4_FRAG,    ICE_PTYPE_ATTR_GTP_UPLINK },
525         { ICE_MAC_IPV6_GTPU_IPV4_PAY,     ICE_PTYPE_ATTR_GTP_UPLINK },
526         { ICE_MAC_IPV6_GTPU_IPV4_UDP_PAY, ICE_PTYPE_ATTR_GTP_UPLINK },
527         { ICE_MAC_IPV6_GTPU_IPV4_TCP,     ICE_PTYPE_ATTR_GTP_UPLINK },
528         { ICE_MAC_IPV6_GTPU_IPV4_ICMP,    ICE_PTYPE_ATTR_GTP_UPLINK },
529         { ICE_MAC_IPV4_GTPU_IPV6_FRAG,    ICE_PTYPE_ATTR_GTP_UPLINK },
530         { ICE_MAC_IPV4_GTPU_IPV6_PAY,     ICE_PTYPE_ATTR_GTP_UPLINK },
531         { ICE_MAC_IPV4_GTPU_IPV6_UDP_PAY, ICE_PTYPE_ATTR_GTP_UPLINK },
532         { ICE_MAC_IPV4_GTPU_IPV6_TCP,     ICE_PTYPE_ATTR_GTP_UPLINK },
533         { ICE_MAC_IPV4_GTPU_IPV6_ICMPV6,  ICE_PTYPE_ATTR_GTP_UPLINK },
534         { ICE_MAC_IPV6_GTPU_IPV6_FRAG,    ICE_PTYPE_ATTR_GTP_UPLINK },
535         { ICE_MAC_IPV6_GTPU_IPV6_PAY,     ICE_PTYPE_ATTR_GTP_UPLINK },
536         { ICE_MAC_IPV6_GTPU_IPV6_UDP_PAY, ICE_PTYPE_ATTR_GTP_UPLINK },
537         { ICE_MAC_IPV6_GTPU_IPV6_TCP,     ICE_PTYPE_ATTR_GTP_UPLINK },
538         { ICE_MAC_IPV6_GTPU_IPV6_ICMPV6,  ICE_PTYPE_ATTR_GTP_UPLINK },
539 };
540
541 static const u32 ice_ptypes_gtpu[] = {
542         0x00000000, 0x00000000, 0x00000000, 0x00000000,
543         0x00000000, 0x00000000, 0x00000000, 0x00000000,
544         0x00000000, 0x00000000, 0x7FFFFE00, 0x00000000,
545         0x00000000, 0x00000000, 0x00000000, 0x00000000,
546         0x00000000, 0x00000000, 0x00000000, 0x00000000,
547         0x00000000, 0x00000000, 0x00000000, 0x00000000,
548         0x00000000, 0x00000000, 0x00000000, 0x00000000,
549         0x00000000, 0x00000000, 0x00000000, 0x00000000,
550 };
551
552 /* Packet types for pppoe */
553 static const u32 ice_ptypes_pppoe[] = {
554         0x00000000, 0x00000000, 0x00000000, 0x00000000,
555         0x00000000, 0x00000000, 0x00000000, 0x00000000,
556         0x00000000, 0x03ffe000, 0x00000000, 0x00000000,
557         0x00000000, 0x00000000, 0x00000000, 0x00000000,
558         0x00000000, 0x00000000, 0x00000000, 0x00000000,
559         0x00000000, 0x00000000, 0x00000000, 0x00000000,
560         0x00000000, 0x00000000, 0x00000000, 0x00000000,
561         0x00000000, 0x00000000, 0x00000000, 0x00000000,
562 };
563
564 /* Packet types for packets with PFCP NODE header */
565 static const u32 ice_ptypes_pfcp_node[] = {
566         0x00000000, 0x00000000, 0x00000000, 0x00000000,
567         0x00000000, 0x00000000, 0x00000000, 0x00000000,
568         0x00000000, 0x00000000, 0x80000000, 0x00000002,
569         0x00000000, 0x00000000, 0x00000000, 0x00000000,
570         0x00000000, 0x00000000, 0x00000000, 0x00000000,
571         0x00000000, 0x00000000, 0x00000000, 0x00000000,
572         0x00000000, 0x00000000, 0x00000000, 0x00000000,
573         0x00000000, 0x00000000, 0x00000000, 0x00000000,
574 };
575
576 /* Packet types for packets with PFCP SESSION header */
577 static const u32 ice_ptypes_pfcp_session[] = {
578         0x00000000, 0x00000000, 0x00000000, 0x00000000,
579         0x00000000, 0x00000000, 0x00000000, 0x00000000,
580         0x00000000, 0x00000000, 0x00000000, 0x00000005,
581         0x00000000, 0x00000000, 0x00000000, 0x00000000,
582         0x00000000, 0x00000000, 0x00000000, 0x00000000,
583         0x00000000, 0x00000000, 0x00000000, 0x00000000,
584         0x00000000, 0x00000000, 0x00000000, 0x00000000,
585         0x00000000, 0x00000000, 0x00000000, 0x00000000,
586 };
587
588 /* Packet types for l2tpv3 */
589 static const u32 ice_ptypes_l2tpv3[] = {
590         0x00000000, 0x00000000, 0x00000000, 0x00000000,
591         0x00000000, 0x00000000, 0x00000000, 0x00000000,
592         0x00000000, 0x00000000, 0x00000000, 0x00000300,
593         0x00000000, 0x00000000, 0x00000000, 0x00000000,
594         0x00000000, 0x00000000, 0x00000000, 0x00000000,
595         0x00000000, 0x00000000, 0x00000000, 0x00000000,
596         0x00000000, 0x00000000, 0x00000000, 0x00000000,
597         0x00000000, 0x00000000, 0x00000000, 0x00000000,
598 };
599
600 /* Packet types for esp */
601 static const u32 ice_ptypes_esp[] = {
602         0x00000000, 0x00000000, 0x00000000, 0x00000000,
603         0x00000000, 0x00000003, 0x00000000, 0x00000000,
604         0x00000000, 0x00000000, 0x00000000, 0x00000000,
605         0x00000000, 0x00000000, 0x00000000, 0x00000000,
606         0x00000000, 0x00000000, 0x00000000, 0x00000000,
607         0x00000000, 0x00000000, 0x00000000, 0x00000000,
608         0x00000000, 0x00000000, 0x00000000, 0x00000000,
609         0x00000000, 0x00000000, 0x00000000, 0x00000000,
610 };
611
612 /* Packet types for ah */
613 static const u32 ice_ptypes_ah[] = {
614         0x00000000, 0x00000000, 0x00000000, 0x00000000,
615         0x00000000, 0x0000000C, 0x00000000, 0x00000000,
616         0x00000000, 0x00000000, 0x00000000, 0x00000000,
617         0x00000000, 0x00000000, 0x00000000, 0x00000000,
618         0x00000000, 0x00000000, 0x00000000, 0x00000000,
619         0x00000000, 0x00000000, 0x00000000, 0x00000000,
620         0x00000000, 0x00000000, 0x00000000, 0x00000000,
621         0x00000000, 0x00000000, 0x00000000, 0x00000000,
622 };
623
624 /* Packet types for packets with NAT_T ESP header */
625 static const u32 ice_ptypes_nat_t_esp[] = {
626         0x00000000, 0x00000000, 0x00000000, 0x00000000,
627         0x00000000, 0x00000030, 0x00000000, 0x00000000,
628         0x00000000, 0x00000000, 0x00000000, 0x00000000,
629         0x00000000, 0x00000000, 0x00000000, 0x00000000,
630         0x00000000, 0x00000000, 0x00000000, 0x00000000,
631         0x00000000, 0x00000000, 0x00000000, 0x00000000,
632         0x00000000, 0x00000000, 0x00000000, 0x00000000,
633         0x00000000, 0x00000000, 0x00000000, 0x00000000,
634 };
635
636 static const u32 ice_ptypes_mac_non_ip_ofos[] = {
637         0x00000846, 0x00000000, 0x00000000, 0x00000000,
638         0x00000000, 0x00000000, 0x00000000, 0x00000000,
639         0x00400000, 0x03FFF000, 0x00000000, 0x00000000,
640         0x00000000, 0x00000000, 0x00000000, 0x00000000,
641         0x00000000, 0x00000000, 0x00000000, 0x00000000,
642         0x00000000, 0x00000000, 0x00000000, 0x00000000,
643         0x00000000, 0x00000000, 0x00000000, 0x00000000,
644         0x00000000, 0x00000000, 0x00000000, 0x00000000,
645 };
646
647 /* Manage parameters and info. used during the creation of a flow profile */
648 struct ice_flow_prof_params {
649         enum ice_block blk;
650         u16 entry_length; /* # of bytes formatted entry will require */
651         u8 es_cnt;
652         struct ice_flow_prof *prof;
653
654         /* For ACL, the es[0] will have the data of ICE_RX_MDID_PKT_FLAGS_15_0
655          * This will give us the direction flags.
656          */
657         struct ice_fv_word es[ICE_MAX_FV_WORDS];
658         /* attributes can be used to add attributes to a particular PTYPE */
659         const struct ice_ptype_attributes *attr;
660         u16 attr_cnt;
661
662         u16 mask[ICE_MAX_FV_WORDS];
663         ice_declare_bitmap(ptypes, ICE_FLOW_PTYPE_MAX);
664 };
665
666 #define ICE_FLOW_RSS_HDRS_INNER_MASK \
667         (ICE_FLOW_SEG_HDR_PPPOE | ICE_FLOW_SEG_HDR_GTPC | \
668         ICE_FLOW_SEG_HDR_GTPC_TEID | ICE_FLOW_SEG_HDR_GTPU | \
669         ICE_FLOW_SEG_HDR_PFCP_SESSION | ICE_FLOW_SEG_HDR_L2TPV3 | \
670         ICE_FLOW_SEG_HDR_ESP | ICE_FLOW_SEG_HDR_AH | \
671         ICE_FLOW_SEG_HDR_NAT_T_ESP)
672
673 #define ICE_FLOW_SEG_HDRS_L2_MASK       \
674         (ICE_FLOW_SEG_HDR_ETH | ICE_FLOW_SEG_HDR_VLAN)
675 #define ICE_FLOW_SEG_HDRS_L3_MASK       \
676         (ICE_FLOW_SEG_HDR_IPV4 | ICE_FLOW_SEG_HDR_IPV6 | \
677          ICE_FLOW_SEG_HDR_ARP)
678 #define ICE_FLOW_SEG_HDRS_L4_MASK       \
679         (ICE_FLOW_SEG_HDR_ICMP | ICE_FLOW_SEG_HDR_TCP | ICE_FLOW_SEG_HDR_UDP | \
680          ICE_FLOW_SEG_HDR_SCTP)
681 /* mask for L4 protocols that are NOT part of IPV4/6 OTHER PTYPE groups */
682 #define ICE_FLOW_SEG_HDRS_L4_MASK_NO_OTHER      \
683         (ICE_FLOW_SEG_HDR_TCP | ICE_FLOW_SEG_HDR_UDP | ICE_FLOW_SEG_HDR_SCTP)
684
685 /**
686  * ice_flow_val_hdrs - validates packet segments for valid protocol headers
687  * @segs: array of one or more packet segments that describe the flow
688  * @segs_cnt: number of packet segments provided
689  */
690 static enum ice_status
691 ice_flow_val_hdrs(struct ice_flow_seg_info *segs, u8 segs_cnt)
692 {
693         u8 i;
694
695         for (i = 0; i < segs_cnt; i++) {
696                 /* Multiple L3 headers */
697                 if (segs[i].hdrs & ICE_FLOW_SEG_HDRS_L3_MASK &&
698                     !ice_is_pow2(segs[i].hdrs & ICE_FLOW_SEG_HDRS_L3_MASK))
699                         return ICE_ERR_PARAM;
700
701                 /* Multiple L4 headers */
702                 if (segs[i].hdrs & ICE_FLOW_SEG_HDRS_L4_MASK &&
703                     !ice_is_pow2(segs[i].hdrs & ICE_FLOW_SEG_HDRS_L4_MASK))
704                         return ICE_ERR_PARAM;
705         }
706
707         return ICE_SUCCESS;
708 }
709
710 /* Sizes of fixed known protocol headers without header options */
711 #define ICE_FLOW_PROT_HDR_SZ_MAC        14
712 #define ICE_FLOW_PROT_HDR_SZ_MAC_VLAN   (ICE_FLOW_PROT_HDR_SZ_MAC + 2)
713 #define ICE_FLOW_PROT_HDR_SZ_IPV4       20
714 #define ICE_FLOW_PROT_HDR_SZ_IPV6       40
715 #define ICE_FLOW_PROT_HDR_SZ_ARP        28
716 #define ICE_FLOW_PROT_HDR_SZ_ICMP       8
717 #define ICE_FLOW_PROT_HDR_SZ_TCP        20
718 #define ICE_FLOW_PROT_HDR_SZ_UDP        8
719 #define ICE_FLOW_PROT_HDR_SZ_SCTP       12
720
721 /**
722  * ice_flow_calc_seg_sz - calculates size of a packet segment based on headers
723  * @params: information about the flow to be processed
724  * @seg: index of packet segment whose header size is to be determined
725  */
726 static u16 ice_flow_calc_seg_sz(struct ice_flow_prof_params *params, u8 seg)
727 {
728         u16 sz;
729
730         /* L2 headers */
731         sz = (params->prof->segs[seg].hdrs & ICE_FLOW_SEG_HDR_VLAN) ?
732                 ICE_FLOW_PROT_HDR_SZ_MAC_VLAN : ICE_FLOW_PROT_HDR_SZ_MAC;
733
734         /* L3 headers */
735         if (params->prof->segs[seg].hdrs & ICE_FLOW_SEG_HDR_IPV4)
736                 sz += ICE_FLOW_PROT_HDR_SZ_IPV4;
737         else if (params->prof->segs[seg].hdrs & ICE_FLOW_SEG_HDR_IPV6)
738                 sz += ICE_FLOW_PROT_HDR_SZ_IPV6;
739         else if (params->prof->segs[seg].hdrs & ICE_FLOW_SEG_HDR_ARP)
740                 sz += ICE_FLOW_PROT_HDR_SZ_ARP;
741         else if (params->prof->segs[seg].hdrs & ICE_FLOW_SEG_HDRS_L4_MASK)
742                 /* A L3 header is required if L4 is specified */
743                 return 0;
744
745         /* L4 headers */
746         if (params->prof->segs[seg].hdrs & ICE_FLOW_SEG_HDR_ICMP)
747                 sz += ICE_FLOW_PROT_HDR_SZ_ICMP;
748         else if (params->prof->segs[seg].hdrs & ICE_FLOW_SEG_HDR_TCP)
749                 sz += ICE_FLOW_PROT_HDR_SZ_TCP;
750         else if (params->prof->segs[seg].hdrs & ICE_FLOW_SEG_HDR_UDP)
751                 sz += ICE_FLOW_PROT_HDR_SZ_UDP;
752         else if (params->prof->segs[seg].hdrs & ICE_FLOW_SEG_HDR_SCTP)
753                 sz += ICE_FLOW_PROT_HDR_SZ_SCTP;
754
755         return sz;
756 }
757
758 /**
759  * ice_flow_proc_seg_hdrs - process protocol headers present in pkt segments
760  * @params: information about the flow to be processed
761  *
762  * This function identifies the packet types associated with the protocol
763  * headers being present in packet segments of the specified flow profile.
764  */
765 static enum ice_status
766 ice_flow_proc_seg_hdrs(struct ice_flow_prof_params *params)
767 {
768         struct ice_flow_prof *prof;
769         u8 i;
770
771         ice_memset(params->ptypes, 0xff, sizeof(params->ptypes),
772                    ICE_NONDMA_MEM);
773
774         prof = params->prof;
775
776         for (i = 0; i < params->prof->segs_cnt; i++) {
777                 const ice_bitmap_t *src;
778                 u32 hdrs;
779
780                 hdrs = prof->segs[i].hdrs;
781
782                 if (hdrs & ICE_FLOW_SEG_HDR_ETH) {
783                         src = !i ? (const ice_bitmap_t *)ice_ptypes_mac_ofos :
784                                 (const ice_bitmap_t *)ice_ptypes_mac_il;
785                         ice_and_bitmap(params->ptypes, params->ptypes, src,
786                                        ICE_FLOW_PTYPE_MAX);
787                 }
788
789                 if (i && hdrs & ICE_FLOW_SEG_HDR_VLAN) {
790                         src = (const ice_bitmap_t *)ice_ptypes_macvlan_il;
791                         ice_and_bitmap(params->ptypes, params->ptypes, src,
792                                        ICE_FLOW_PTYPE_MAX);
793                 }
794
795                 if (!i && hdrs & ICE_FLOW_SEG_HDR_ARP) {
796                         ice_and_bitmap(params->ptypes, params->ptypes,
797                                        (const ice_bitmap_t *)ice_ptypes_arp_of,
798                                        ICE_FLOW_PTYPE_MAX);
799                 }
800
801                 if (hdrs & ICE_FLOW_SEG_HDR_PPPOE) {
802                         src = (const ice_bitmap_t *)ice_ptypes_pppoe;
803                         ice_and_bitmap(params->ptypes, params->ptypes, src,
804                                        ICE_FLOW_PTYPE_MAX);
805                 }
806                 if ((hdrs & ICE_FLOW_SEG_HDR_IPV4) &&
807                     (hdrs & ICE_FLOW_SEG_HDR_IPV_OTHER)) {
808                         src = !i ?
809                                 (const ice_bitmap_t *)ice_ptypes_ipv4_ofos_all :
810                                 (const ice_bitmap_t *)ice_ptypes_ipv4_il;
811                         ice_and_bitmap(params->ptypes, params->ptypes, src,
812                                        ICE_FLOW_PTYPE_MAX);
813                 } else if ((hdrs & ICE_FLOW_SEG_HDR_IPV6) &&
814                            (hdrs & ICE_FLOW_SEG_HDR_IPV_OTHER)) {
815                         src = !i ?
816                                 (const ice_bitmap_t *)ice_ptypes_ipv6_ofos_all :
817                                 (const ice_bitmap_t *)ice_ptypes_ipv6_il;
818                         ice_and_bitmap(params->ptypes, params->ptypes, src,
819                                        ICE_FLOW_PTYPE_MAX);
820                 } else if ((hdrs & ICE_FLOW_SEG_HDR_IPV4) &&
821                            !(hdrs & ICE_FLOW_SEG_HDRS_L4_MASK_NO_OTHER)) {
822                         src = !i ? (const ice_bitmap_t *)ice_ipv4_ofos_no_l4 :
823                                 (const ice_bitmap_t *)ice_ipv4_il_no_l4;
824                         ice_and_bitmap(params->ptypes, params->ptypes, src,
825                                        ICE_FLOW_PTYPE_MAX);
826                 } else if (hdrs & ICE_FLOW_SEG_HDR_IPV4) {
827                         src = !i ? (const ice_bitmap_t *)ice_ptypes_ipv4_ofos :
828                                 (const ice_bitmap_t *)ice_ptypes_ipv4_il;
829                         ice_and_bitmap(params->ptypes, params->ptypes, src,
830                                        ICE_FLOW_PTYPE_MAX);
831                 } else if ((hdrs & ICE_FLOW_SEG_HDR_IPV6) &&
832                            !(hdrs & ICE_FLOW_SEG_HDRS_L4_MASK_NO_OTHER)) {
833                         src = !i ? (const ice_bitmap_t *)ice_ipv6_ofos_no_l4 :
834                                 (const ice_bitmap_t *)ice_ipv6_il_no_l4;
835                         ice_and_bitmap(params->ptypes, params->ptypes, src,
836                                        ICE_FLOW_PTYPE_MAX);
837                 } else if (hdrs & ICE_FLOW_SEG_HDR_IPV6) {
838                         src = !i ? (const ice_bitmap_t *)ice_ptypes_ipv6_ofos :
839                                 (const ice_bitmap_t *)ice_ptypes_ipv6_il;
840                         ice_and_bitmap(params->ptypes, params->ptypes, src,
841                                        ICE_FLOW_PTYPE_MAX);
842                 }
843
844                 if (hdrs & ICE_FLOW_SEG_HDR_ETH_NON_IP) {
845                         src = (const ice_bitmap_t *)ice_ptypes_mac_non_ip_ofos;
846                         ice_and_bitmap(params->ptypes, params->ptypes,
847                                        src, ICE_FLOW_PTYPE_MAX);
848                 } else if (hdrs & ICE_FLOW_SEG_HDR_PPPOE) {
849                         src = (const ice_bitmap_t *)ice_ptypes_pppoe;
850                         ice_and_bitmap(params->ptypes, params->ptypes, src,
851                                        ICE_FLOW_PTYPE_MAX);
852                 } else {
853                         src = (const ice_bitmap_t *)ice_ptypes_pppoe;
854                         ice_andnot_bitmap(params->ptypes, params->ptypes, src,
855                                           ICE_FLOW_PTYPE_MAX);
856                 }
857
858                 if (hdrs & ICE_FLOW_SEG_HDR_UDP) {
859                         src = (const ice_bitmap_t *)ice_ptypes_udp_il;
860                         ice_and_bitmap(params->ptypes, params->ptypes, src,
861                                        ICE_FLOW_PTYPE_MAX);
862                 } else if (hdrs & ICE_FLOW_SEG_HDR_TCP) {
863                         ice_and_bitmap(params->ptypes, params->ptypes,
864                                        (const ice_bitmap_t *)ice_ptypes_tcp_il,
865                                        ICE_FLOW_PTYPE_MAX);
866                 } else if (hdrs & ICE_FLOW_SEG_HDR_SCTP) {
867                         src = (const ice_bitmap_t *)ice_ptypes_sctp_il;
868                         ice_and_bitmap(params->ptypes, params->ptypes, src,
869                                        ICE_FLOW_PTYPE_MAX);
870                 }
871
872                 if (hdrs & ICE_FLOW_SEG_HDR_ICMP) {
873                         src = !i ? (const ice_bitmap_t *)ice_ptypes_icmp_of :
874                                 (const ice_bitmap_t *)ice_ptypes_icmp_il;
875                         ice_and_bitmap(params->ptypes, params->ptypes, src,
876                                        ICE_FLOW_PTYPE_MAX);
877                 } else if (hdrs & ICE_FLOW_SEG_HDR_GRE) {
878                         if (!i) {
879                                 src = (const ice_bitmap_t *)ice_ptypes_gre_of;
880                                 ice_and_bitmap(params->ptypes, params->ptypes,
881                                                src, ICE_FLOW_PTYPE_MAX);
882                         }
883                 } else if (hdrs & ICE_FLOW_SEG_HDR_GTPC) {
884                         src = (const ice_bitmap_t *)ice_ptypes_gtpc;
885                         ice_and_bitmap(params->ptypes, params->ptypes,
886                                        src, ICE_FLOW_PTYPE_MAX);
887                 } else if (hdrs & ICE_FLOW_SEG_HDR_GTPC_TEID) {
888                         src = (const ice_bitmap_t *)ice_ptypes_gtpc_tid;
889                         ice_and_bitmap(params->ptypes, params->ptypes,
890                                        src, ICE_FLOW_PTYPE_MAX);
891                 } else if (hdrs & ICE_FLOW_SEG_HDR_GTPU_DWN) {
892                         src = (const ice_bitmap_t *)ice_ptypes_gtpu;
893                         ice_and_bitmap(params->ptypes, params->ptypes,
894                                        src, ICE_FLOW_PTYPE_MAX);
895
896                         /* Attributes for GTP packet with downlink */
897                         params->attr = ice_attr_gtpu_down;
898                         params->attr_cnt = ARRAY_SIZE(ice_attr_gtpu_down);
899                 } else if (hdrs & ICE_FLOW_SEG_HDR_GTPU_UP) {
900                         src = (const ice_bitmap_t *)ice_ptypes_gtpu;
901                         ice_and_bitmap(params->ptypes, params->ptypes,
902                                        src, ICE_FLOW_PTYPE_MAX);
903
904                         /* Attributes for GTP packet with uplink */
905                         params->attr = ice_attr_gtpu_up;
906                         params->attr_cnt = ARRAY_SIZE(ice_attr_gtpu_up);
907                 } else if (hdrs & ICE_FLOW_SEG_HDR_GTPU_EH) {
908                         src = (const ice_bitmap_t *)ice_ptypes_gtpu;
909                         ice_and_bitmap(params->ptypes, params->ptypes,
910                                        src, ICE_FLOW_PTYPE_MAX);
911
912                         /* Attributes for GTP packet with Extension Header */
913                         params->attr = ice_attr_gtpu_eh;
914                         params->attr_cnt = ARRAY_SIZE(ice_attr_gtpu_eh);
915                 } else if (hdrs & ICE_FLOW_SEG_HDR_GTPU_IP) {
916                         src = (const ice_bitmap_t *)ice_ptypes_gtpu;
917                         ice_and_bitmap(params->ptypes, params->ptypes,
918                                        src, ICE_FLOW_PTYPE_MAX);
919                 } else if (hdrs & ICE_FLOW_SEG_HDR_L2TPV3) {
920                         src = (const ice_bitmap_t *)ice_ptypes_l2tpv3;
921                         ice_and_bitmap(params->ptypes, params->ptypes,
922                                        src, ICE_FLOW_PTYPE_MAX);
923                 } else if (hdrs & ICE_FLOW_SEG_HDR_ESP) {
924                         src = (const ice_bitmap_t *)ice_ptypes_esp;
925                         ice_and_bitmap(params->ptypes, params->ptypes,
926                                        src, ICE_FLOW_PTYPE_MAX);
927                 } else if (hdrs & ICE_FLOW_SEG_HDR_AH) {
928                         src = (const ice_bitmap_t *)ice_ptypes_ah;
929                         ice_and_bitmap(params->ptypes, params->ptypes,
930                                        src, ICE_FLOW_PTYPE_MAX);
931                 } else if (hdrs & ICE_FLOW_SEG_HDR_NAT_T_ESP) {
932                         src = (const ice_bitmap_t *)ice_ptypes_nat_t_esp;
933                         ice_and_bitmap(params->ptypes, params->ptypes,
934                                        src, ICE_FLOW_PTYPE_MAX);
935                 }
936
937                 if (hdrs & ICE_FLOW_SEG_HDR_PFCP) {
938                         if (hdrs & ICE_FLOW_SEG_HDR_PFCP_NODE)
939                                 src =
940                                 (const ice_bitmap_t *)ice_ptypes_pfcp_node;
941                         else
942                                 src =
943                                 (const ice_bitmap_t *)ice_ptypes_pfcp_session;
944
945                         ice_and_bitmap(params->ptypes, params->ptypes,
946                                        src, ICE_FLOW_PTYPE_MAX);
947                 } else {
948                         src = (const ice_bitmap_t *)ice_ptypes_pfcp_node;
949                         ice_andnot_bitmap(params->ptypes, params->ptypes,
950                                           src, ICE_FLOW_PTYPE_MAX);
951
952                         src = (const ice_bitmap_t *)ice_ptypes_pfcp_session;
953                         ice_andnot_bitmap(params->ptypes, params->ptypes,
954                                           src, ICE_FLOW_PTYPE_MAX);
955                 }
956         }
957
958         return ICE_SUCCESS;
959 }
960
961 /**
962  * ice_flow_xtract_pkt_flags - Create an extr sequence entry for packet flags
963  * @hw: pointer to the HW struct
964  * @params: information about the flow to be processed
965  * @flags: The value of pkt_flags[x:x] in Rx/Tx MDID metadata.
966  *
967  * This function will allocate an extraction sequence entries for a DWORD size
968  * chunk of the packet flags.
969  */
970 static enum ice_status
971 ice_flow_xtract_pkt_flags(struct ice_hw *hw,
972                           struct ice_flow_prof_params *params,
973                           enum ice_flex_mdid_pkt_flags flags)
974 {
975         u8 fv_words = hw->blk[params->blk].es.fvw;
976         u8 idx;
977
978         /* Make sure the number of extraction sequence entries required does not
979          * exceed the block's capacity.
980          */
981         if (params->es_cnt >= fv_words)
982                 return ICE_ERR_MAX_LIMIT;
983
984         /* some blocks require a reversed field vector layout */
985         if (hw->blk[params->blk].es.reverse)
986                 idx = fv_words - params->es_cnt - 1;
987         else
988                 idx = params->es_cnt;
989
990         params->es[idx].prot_id = ICE_PROT_META_ID;
991         params->es[idx].off = flags;
992         params->es_cnt++;
993
994         return ICE_SUCCESS;
995 }
996
997 /**
998  * ice_flow_xtract_fld - Create an extraction sequence entry for the given field
999  * @hw: pointer to the HW struct
1000  * @params: information about the flow to be processed
1001  * @seg: packet segment index of the field to be extracted
1002  * @fld: ID of field to be extracted
1003  * @match: bitfield of all fields
1004  *
1005  * This function determines the protocol ID, offset, and size of the given
1006  * field. It then allocates one or more extraction sequence entries for the
1007  * given field, and fill the entries with protocol ID and offset information.
1008  */
1009 static enum ice_status
1010 ice_flow_xtract_fld(struct ice_hw *hw, struct ice_flow_prof_params *params,
1011                     u8 seg, enum ice_flow_field fld, u64 match)
1012 {
1013         enum ice_flow_field sib = ICE_FLOW_FIELD_IDX_MAX;
1014         enum ice_prot_id prot_id = ICE_PROT_ID_INVAL;
1015         u8 fv_words = hw->blk[params->blk].es.fvw;
1016         struct ice_flow_fld_info *flds;
1017         u16 cnt, ese_bits, i;
1018         u16 sib_mask = 0;
1019         u16 mask;
1020         u16 off;
1021
1022         flds = params->prof->segs[seg].fields;
1023
1024         switch (fld) {
1025         case ICE_FLOW_FIELD_IDX_ETH_DA:
1026         case ICE_FLOW_FIELD_IDX_ETH_SA:
1027         case ICE_FLOW_FIELD_IDX_S_VLAN:
1028         case ICE_FLOW_FIELD_IDX_C_VLAN:
1029                 prot_id = seg == 0 ? ICE_PROT_MAC_OF_OR_S : ICE_PROT_MAC_IL;
1030                 break;
1031         case ICE_FLOW_FIELD_IDX_ETH_TYPE:
1032                 prot_id = seg == 0 ? ICE_PROT_ETYPE_OL : ICE_PROT_ETYPE_IL;
1033                 break;
1034         case ICE_FLOW_FIELD_IDX_IPV4_DSCP:
1035                 prot_id = seg == 0 ? ICE_PROT_IPV4_OF_OR_S : ICE_PROT_IPV4_IL;
1036                 break;
1037         case ICE_FLOW_FIELD_IDX_IPV6_DSCP:
1038                 prot_id = seg == 0 ? ICE_PROT_IPV6_OF_OR_S : ICE_PROT_IPV6_IL;
1039                 break;
1040         case ICE_FLOW_FIELD_IDX_IPV4_TTL:
1041         case ICE_FLOW_FIELD_IDX_IPV4_PROT:
1042                 prot_id = seg == 0 ? ICE_PROT_IPV4_OF_OR_S : ICE_PROT_IPV4_IL;
1043
1044                 /* TTL and PROT share the same extraction seq. entry.
1045                  * Each is considered a sibling to the other in terms of sharing
1046                  * the same extraction sequence entry.
1047                  */
1048                 if (fld == ICE_FLOW_FIELD_IDX_IPV4_TTL)
1049                         sib = ICE_FLOW_FIELD_IDX_IPV4_PROT;
1050                 else if (fld == ICE_FLOW_FIELD_IDX_IPV4_PROT)
1051                         sib = ICE_FLOW_FIELD_IDX_IPV4_TTL;
1052
1053                 /* If the sibling field is also included, that field's
1054                  * mask needs to be included.
1055                  */
1056                 if (match & BIT(sib))
1057                         sib_mask = ice_flds_info[sib].mask;
1058                 break;
1059         case ICE_FLOW_FIELD_IDX_IPV6_TTL:
1060         case ICE_FLOW_FIELD_IDX_IPV6_PROT:
1061                 prot_id = seg == 0 ? ICE_PROT_IPV6_OF_OR_S : ICE_PROT_IPV6_IL;
1062
1063                 /* TTL and PROT share the same extraction seq. entry.
1064                  * Each is considered a sibling to the other in terms of sharing
1065                  * the same extraction sequence entry.
1066                  */
1067                 if (fld == ICE_FLOW_FIELD_IDX_IPV6_TTL)
1068                         sib = ICE_FLOW_FIELD_IDX_IPV6_PROT;
1069                 else if (fld == ICE_FLOW_FIELD_IDX_IPV6_PROT)
1070                         sib = ICE_FLOW_FIELD_IDX_IPV6_TTL;
1071
1072                 /* If the sibling field is also included, that field's
1073                  * mask needs to be included.
1074                  */
1075                 if (match & BIT(sib))
1076                         sib_mask = ice_flds_info[sib].mask;
1077                 break;
1078         case ICE_FLOW_FIELD_IDX_IPV4_SA:
1079         case ICE_FLOW_FIELD_IDX_IPV4_DA:
1080                 prot_id = seg == 0 ? ICE_PROT_IPV4_OF_OR_S : ICE_PROT_IPV4_IL;
1081                 break;
1082         case ICE_FLOW_FIELD_IDX_IPV6_SA:
1083         case ICE_FLOW_FIELD_IDX_IPV6_DA:
1084         case ICE_FLOW_FIELD_IDX_IPV6_PRE32_SA:
1085         case ICE_FLOW_FIELD_IDX_IPV6_PRE32_DA:
1086         case ICE_FLOW_FIELD_IDX_IPV6_PRE48_SA:
1087         case ICE_FLOW_FIELD_IDX_IPV6_PRE48_DA:
1088         case ICE_FLOW_FIELD_IDX_IPV6_PRE64_SA:
1089         case ICE_FLOW_FIELD_IDX_IPV6_PRE64_DA:
1090                 prot_id = seg == 0 ? ICE_PROT_IPV6_OF_OR_S : ICE_PROT_IPV6_IL;
1091                 break;
1092         case ICE_FLOW_FIELD_IDX_TCP_SRC_PORT:
1093         case ICE_FLOW_FIELD_IDX_TCP_DST_PORT:
1094         case ICE_FLOW_FIELD_IDX_TCP_FLAGS:
1095                 prot_id = ICE_PROT_TCP_IL;
1096                 break;
1097         case ICE_FLOW_FIELD_IDX_UDP_SRC_PORT:
1098         case ICE_FLOW_FIELD_IDX_UDP_DST_PORT:
1099                 prot_id = ICE_PROT_UDP_IL_OR_S;
1100                 break;
1101         case ICE_FLOW_FIELD_IDX_SCTP_SRC_PORT:
1102         case ICE_FLOW_FIELD_IDX_SCTP_DST_PORT:
1103                 prot_id = ICE_PROT_SCTP_IL;
1104                 break;
1105         case ICE_FLOW_FIELD_IDX_GTPC_TEID:
1106         case ICE_FLOW_FIELD_IDX_GTPU_IP_TEID:
1107         case ICE_FLOW_FIELD_IDX_GTPU_UP_TEID:
1108         case ICE_FLOW_FIELD_IDX_GTPU_DWN_TEID:
1109         case ICE_FLOW_FIELD_IDX_GTPU_EH_TEID:
1110         case ICE_FLOW_FIELD_IDX_GTPU_EH_QFI:
1111                 /* GTP is accessed through UDP OF protocol */
1112                 prot_id = ICE_PROT_UDP_OF;
1113                 break;
1114         case ICE_FLOW_FIELD_IDX_PPPOE_SESS_ID:
1115                 prot_id = ICE_PROT_PPPOE;
1116                 break;
1117         case ICE_FLOW_FIELD_IDX_PFCP_SEID:
1118                 prot_id = ICE_PROT_UDP_IL_OR_S;
1119                 break;
1120         case ICE_FLOW_FIELD_IDX_L2TPV3_SESS_ID:
1121                 prot_id = ICE_PROT_L2TPV3;
1122                 break;
1123         case ICE_FLOW_FIELD_IDX_ESP_SPI:
1124                 prot_id = ICE_PROT_ESP_F;
1125                 break;
1126         case ICE_FLOW_FIELD_IDX_AH_SPI:
1127                 prot_id = ICE_PROT_ESP_2;
1128                 break;
1129         case ICE_FLOW_FIELD_IDX_NAT_T_ESP_SPI:
1130                 prot_id = ICE_PROT_UDP_IL_OR_S;
1131                 break;
1132         case ICE_FLOW_FIELD_IDX_ARP_SIP:
1133         case ICE_FLOW_FIELD_IDX_ARP_DIP:
1134         case ICE_FLOW_FIELD_IDX_ARP_SHA:
1135         case ICE_FLOW_FIELD_IDX_ARP_DHA:
1136         case ICE_FLOW_FIELD_IDX_ARP_OP:
1137                 prot_id = ICE_PROT_ARP_OF;
1138                 break;
1139         case ICE_FLOW_FIELD_IDX_ICMP_TYPE:
1140         case ICE_FLOW_FIELD_IDX_ICMP_CODE:
1141                 /* ICMP type and code share the same extraction seq. entry */
1142                 prot_id = (params->prof->segs[seg].hdrs &
1143                            ICE_FLOW_SEG_HDR_IPV4) ?
1144                         ICE_PROT_ICMP_IL : ICE_PROT_ICMPV6_IL;
1145                 sib = fld == ICE_FLOW_FIELD_IDX_ICMP_TYPE ?
1146                         ICE_FLOW_FIELD_IDX_ICMP_CODE :
1147                         ICE_FLOW_FIELD_IDX_ICMP_TYPE;
1148                 break;
1149         case ICE_FLOW_FIELD_IDX_GRE_KEYID:
1150                 prot_id = ICE_PROT_GRE_OF;
1151                 break;
1152         default:
1153                 return ICE_ERR_NOT_IMPL;
1154         }
1155
1156         /* Each extraction sequence entry is a word in size, and extracts a
1157          * word-aligned offset from a protocol header.
1158          */
1159         ese_bits = ICE_FLOW_FV_EXTRACT_SZ * BITS_PER_BYTE;
1160
1161         flds[fld].xtrct.prot_id = prot_id;
1162         flds[fld].xtrct.off = (ice_flds_info[fld].off / ese_bits) *
1163                 ICE_FLOW_FV_EXTRACT_SZ;
1164         flds[fld].xtrct.disp = (u8)(ice_flds_info[fld].off % ese_bits);
1165         flds[fld].xtrct.idx = params->es_cnt;
1166         flds[fld].xtrct.mask = ice_flds_info[fld].mask;
1167
1168         /* Adjust the next field-entry index after accommodating the number of
1169          * entries this field consumes
1170          */
1171         cnt = DIVIDE_AND_ROUND_UP(flds[fld].xtrct.disp +
1172                                   ice_flds_info[fld].size, ese_bits);
1173
1174         /* Fill in the extraction sequence entries needed for this field */
1175         off = flds[fld].xtrct.off;
1176         mask = flds[fld].xtrct.mask;
1177         for (i = 0; i < cnt; i++) {
1178                 /* Only consume an extraction sequence entry if there is no
1179                  * sibling field associated with this field or the sibling entry
1180                  * already extracts the word shared with this field.
1181                  */
1182                 if (sib == ICE_FLOW_FIELD_IDX_MAX ||
1183                     flds[sib].xtrct.prot_id == ICE_PROT_ID_INVAL ||
1184                     flds[sib].xtrct.off != off) {
1185                         u8 idx;
1186
1187                         /* Make sure the number of extraction sequence required
1188                          * does not exceed the block's capability
1189                          */
1190                         if (params->es_cnt >= fv_words)
1191                                 return ICE_ERR_MAX_LIMIT;
1192
1193                         /* some blocks require a reversed field vector layout */
1194                         if (hw->blk[params->blk].es.reverse)
1195                                 idx = fv_words - params->es_cnt - 1;
1196                         else
1197                                 idx = params->es_cnt;
1198
1199                         params->es[idx].prot_id = prot_id;
1200                         params->es[idx].off = off;
1201                         params->mask[idx] = mask | sib_mask;
1202                         params->es_cnt++;
1203                 }
1204
1205                 off += ICE_FLOW_FV_EXTRACT_SZ;
1206         }
1207
1208         return ICE_SUCCESS;
1209 }
1210
1211 /**
1212  * ice_flow_xtract_raws - Create extract sequence entries for raw bytes
1213  * @hw: pointer to the HW struct
1214  * @params: information about the flow to be processed
1215  * @seg: index of packet segment whose raw fields are to be be extracted
1216  */
1217 static enum ice_status
1218 ice_flow_xtract_raws(struct ice_hw *hw, struct ice_flow_prof_params *params,
1219                      u8 seg)
1220 {
1221         u16 fv_words;
1222         u16 hdrs_sz;
1223         u8 i;
1224
1225         if (!params->prof->segs[seg].raws_cnt)
1226                 return ICE_SUCCESS;
1227
1228         if (params->prof->segs[seg].raws_cnt >
1229             ARRAY_SIZE(params->prof->segs[seg].raws))
1230                 return ICE_ERR_MAX_LIMIT;
1231
1232         /* Offsets within the segment headers are not supported */
1233         hdrs_sz = ice_flow_calc_seg_sz(params, seg);
1234         if (!hdrs_sz)
1235                 return ICE_ERR_PARAM;
1236
1237         fv_words = hw->blk[params->blk].es.fvw;
1238
1239         for (i = 0; i < params->prof->segs[seg].raws_cnt; i++) {
1240                 struct ice_flow_seg_fld_raw *raw;
1241                 u16 off, cnt, j;
1242
1243                 raw = &params->prof->segs[seg].raws[i];
1244
1245                 /* Storing extraction information */
1246                 raw->info.xtrct.prot_id = ICE_PROT_MAC_OF_OR_S;
1247                 raw->info.xtrct.off = (raw->off / ICE_FLOW_FV_EXTRACT_SZ) *
1248                         ICE_FLOW_FV_EXTRACT_SZ;
1249                 raw->info.xtrct.disp = (raw->off % ICE_FLOW_FV_EXTRACT_SZ) *
1250                         BITS_PER_BYTE;
1251                 raw->info.xtrct.idx = params->es_cnt;
1252
1253                 /* Determine the number of field vector entries this raw field
1254                  * consumes.
1255                  */
1256                 cnt = DIVIDE_AND_ROUND_UP(raw->info.xtrct.disp +
1257                                           (raw->info.src.last * BITS_PER_BYTE),
1258                                           (ICE_FLOW_FV_EXTRACT_SZ *
1259                                            BITS_PER_BYTE));
1260                 off = raw->info.xtrct.off;
1261                 for (j = 0; j < cnt; j++) {
1262                         u16 idx;
1263
1264                         /* Make sure the number of extraction sequence required
1265                          * does not exceed the block's capability
1266                          */
1267                         if (params->es_cnt >= hw->blk[params->blk].es.count ||
1268                             params->es_cnt >= ICE_MAX_FV_WORDS)
1269                                 return ICE_ERR_MAX_LIMIT;
1270
1271                         /* some blocks require a reversed field vector layout */
1272                         if (hw->blk[params->blk].es.reverse)
1273                                 idx = fv_words - params->es_cnt - 1;
1274                         else
1275                                 idx = params->es_cnt;
1276
1277                         params->es[idx].prot_id = raw->info.xtrct.prot_id;
1278                         params->es[idx].off = off;
1279                         params->es_cnt++;
1280                         off += ICE_FLOW_FV_EXTRACT_SZ;
1281                 }
1282         }
1283
1284         return ICE_SUCCESS;
1285 }
1286
1287 /**
1288  * ice_flow_create_xtrct_seq - Create an extraction sequence for given segments
1289  * @hw: pointer to the HW struct
1290  * @params: information about the flow to be processed
1291  *
1292  * This function iterates through all matched fields in the given segments, and
1293  * creates an extraction sequence for the fields.
1294  */
1295 static enum ice_status
1296 ice_flow_create_xtrct_seq(struct ice_hw *hw,
1297                           struct ice_flow_prof_params *params)
1298 {
1299         enum ice_status status = ICE_SUCCESS;
1300         u8 i;
1301
1302         /* For ACL, we also need to extract the direction bit (Rx,Tx) data from
1303          * packet flags
1304          */
1305         if (params->blk == ICE_BLK_ACL) {
1306                 status = ice_flow_xtract_pkt_flags(hw, params,
1307                                                    ICE_RX_MDID_PKT_FLAGS_15_0);
1308                 if (status)
1309                         return status;
1310         }
1311
1312         for (i = 0; i < params->prof->segs_cnt; i++) {
1313                 u64 match = params->prof->segs[i].match;
1314                 enum ice_flow_field j;
1315
1316                 for (j = 0; j < ICE_FLOW_FIELD_IDX_MAX && match; j++) {
1317                         const u64 bit = BIT_ULL(j);
1318
1319                         if (match & bit) {
1320                                 status = ice_flow_xtract_fld(hw, params, i, j,
1321                                                              match);
1322                                 if (status)
1323                                         return status;
1324                                 match &= ~bit;
1325                         }
1326                 }
1327
1328                 /* Process raw matching bytes */
1329                 status = ice_flow_xtract_raws(hw, params, i);
1330                 if (status)
1331                         return status;
1332         }
1333
1334         return status;
1335 }
1336
1337 /**
1338  * ice_flow_sel_acl_scen - returns the specific scenario
1339  * @hw: pointer to the hardware structure
1340  * @params: information about the flow to be processed
1341  *
1342  * This function will return the specific scenario based on the
1343  * params passed to it
1344  */
1345 static enum ice_status
1346 ice_flow_sel_acl_scen(struct ice_hw *hw, struct ice_flow_prof_params *params)
1347 {
1348         /* Find the best-fit scenario for the provided match width */
1349         struct ice_acl_scen *cand_scen = NULL, *scen;
1350
1351         if (!hw->acl_tbl)
1352                 return ICE_ERR_DOES_NOT_EXIST;
1353
1354         /* Loop through each scenario and match against the scenario width
1355          * to select the specific scenario
1356          */
1357         LIST_FOR_EACH_ENTRY(scen, &hw->acl_tbl->scens, ice_acl_scen, list_entry)
1358                 if (scen->eff_width >= params->entry_length &&
1359                     (!cand_scen || cand_scen->eff_width > scen->eff_width))
1360                         cand_scen = scen;
1361         if (!cand_scen)
1362                 return ICE_ERR_DOES_NOT_EXIST;
1363
1364         params->prof->cfg.scen = cand_scen;
1365
1366         return ICE_SUCCESS;
1367 }
1368
1369 /**
1370  * ice_flow_acl_def_entry_frmt - Determine the layout of flow entries
1371  * @params: information about the flow to be processed
1372  */
1373 static enum ice_status
1374 ice_flow_acl_def_entry_frmt(struct ice_flow_prof_params *params)
1375 {
1376         u16 index, i, range_idx = 0;
1377
1378         index = ICE_AQC_ACL_PROF_BYTE_SEL_START_IDX;
1379
1380         for (i = 0; i < params->prof->segs_cnt; i++) {
1381                 struct ice_flow_seg_info *seg = &params->prof->segs[i];
1382                 u64 match = seg->match;
1383                 u8 j;
1384
1385                 for (j = 0; j < ICE_FLOW_FIELD_IDX_MAX && match; j++) {
1386                         struct ice_flow_fld_info *fld;
1387                         const u64 bit = BIT_ULL(j);
1388
1389                         if (!(match & bit))
1390                                 continue;
1391
1392                         fld = &seg->fields[j];
1393                         fld->entry.mask = ICE_FLOW_FLD_OFF_INVAL;
1394
1395                         if (fld->type == ICE_FLOW_FLD_TYPE_RANGE) {
1396                                 fld->entry.last = ICE_FLOW_FLD_OFF_INVAL;
1397
1398                                 /* Range checking only supported for single
1399                                  * words
1400                                  */
1401                                 if (DIVIDE_AND_ROUND_UP(ice_flds_info[j].size +
1402                                                         fld->xtrct.disp,
1403                                                         BITS_PER_BYTE * 2) > 1)
1404                                         return ICE_ERR_PARAM;
1405
1406                                 /* Ranges must define low and high values */
1407                                 if (fld->src.val == ICE_FLOW_FLD_OFF_INVAL ||
1408                                     fld->src.last == ICE_FLOW_FLD_OFF_INVAL)
1409                                         return ICE_ERR_PARAM;
1410
1411                                 fld->entry.val = range_idx++;
1412                         } else {
1413                                 /* Store adjusted byte-length of field for later
1414                                  * use, taking into account potential
1415                                  * non-byte-aligned displacement
1416                                  */
1417                                 fld->entry.last = DIVIDE_AND_ROUND_UP
1418                                         (ice_flds_info[j].size +
1419                                          (fld->xtrct.disp % BITS_PER_BYTE),
1420                                          BITS_PER_BYTE);
1421                                 fld->entry.val = index;
1422                                 index += fld->entry.last;
1423                         }
1424
1425                         match &= ~bit;
1426                 }
1427
1428                 for (j = 0; j < seg->raws_cnt; j++) {
1429                         struct ice_flow_seg_fld_raw *raw = &seg->raws[j];
1430
1431                         raw->info.entry.mask = ICE_FLOW_FLD_OFF_INVAL;
1432                         raw->info.entry.val = index;
1433                         raw->info.entry.last = raw->info.src.last;
1434                         index += raw->info.entry.last;
1435                 }
1436         }
1437
1438         /* Currently only support using the byte selection base, which only
1439          * allows for an effective entry size of 30 bytes. Reject anything
1440          * larger.
1441          */
1442         if (index > ICE_AQC_ACL_PROF_BYTE_SEL_ELEMS)
1443                 return ICE_ERR_PARAM;
1444
1445         /* Only 8 range checkers per profile, reject anything trying to use
1446          * more
1447          */
1448         if (range_idx > ICE_AQC_ACL_PROF_RANGES_NUM_CFG)
1449                 return ICE_ERR_PARAM;
1450
1451         /* Store # bytes required for entry for later use */
1452         params->entry_length = index - ICE_AQC_ACL_PROF_BYTE_SEL_START_IDX;
1453
1454         return ICE_SUCCESS;
1455 }
1456
1457 /**
1458  * ice_flow_proc_segs - process all packet segments associated with a profile
1459  * @hw: pointer to the HW struct
1460  * @params: information about the flow to be processed
1461  */
1462 static enum ice_status
1463 ice_flow_proc_segs(struct ice_hw *hw, struct ice_flow_prof_params *params)
1464 {
1465         enum ice_status status;
1466
1467         status = ice_flow_proc_seg_hdrs(params);
1468         if (status)
1469                 return status;
1470
1471         status = ice_flow_create_xtrct_seq(hw, params);
1472         if (status)
1473                 return status;
1474
1475         switch (params->blk) {
1476         case ICE_BLK_FD:
1477         case ICE_BLK_RSS:
1478                 status = ICE_SUCCESS;
1479                 break;
1480         case ICE_BLK_ACL:
1481                 status = ice_flow_acl_def_entry_frmt(params);
1482                 if (status)
1483                         return status;
1484                 status = ice_flow_sel_acl_scen(hw, params);
1485                 if (status)
1486                         return status;
1487                 break;
1488         default:
1489                 return ICE_ERR_NOT_IMPL;
1490         }
1491
1492         return status;
1493 }
1494
1495 #define ICE_FLOW_FIND_PROF_CHK_FLDS     0x00000001
1496 #define ICE_FLOW_FIND_PROF_CHK_VSI      0x00000002
1497 #define ICE_FLOW_FIND_PROF_NOT_CHK_DIR  0x00000004
1498
1499 /**
1500  * ice_flow_find_prof_conds - Find a profile matching headers and conditions
1501  * @hw: pointer to the HW struct
1502  * @blk: classification stage
1503  * @dir: flow direction
1504  * @segs: array of one or more packet segments that describe the flow
1505  * @segs_cnt: number of packet segments provided
1506  * @vsi_handle: software VSI handle to check VSI (ICE_FLOW_FIND_PROF_CHK_VSI)
1507  * @conds: additional conditions to be checked (ICE_FLOW_FIND_PROF_CHK_*)
1508  */
1509 static struct ice_flow_prof *
1510 ice_flow_find_prof_conds(struct ice_hw *hw, enum ice_block blk,
1511                          enum ice_flow_dir dir, struct ice_flow_seg_info *segs,
1512                          u8 segs_cnt, u16 vsi_handle, u32 conds)
1513 {
1514         struct ice_flow_prof *p, *prof = NULL;
1515
1516         ice_acquire_lock(&hw->fl_profs_locks[blk]);
1517         LIST_FOR_EACH_ENTRY(p, &hw->fl_profs[blk], ice_flow_prof, l_entry)
1518                 if ((p->dir == dir || conds & ICE_FLOW_FIND_PROF_NOT_CHK_DIR) &&
1519                     segs_cnt && segs_cnt == p->segs_cnt) {
1520                         u8 i;
1521
1522                         /* Check for profile-VSI association if specified */
1523                         if ((conds & ICE_FLOW_FIND_PROF_CHK_VSI) &&
1524                             ice_is_vsi_valid(hw, vsi_handle) &&
1525                             !ice_is_bit_set(p->vsis, vsi_handle))
1526                                 continue;
1527
1528                         /* Protocol headers must be checked. Matched fields are
1529                          * checked if specified.
1530                          */
1531                         for (i = 0; i < segs_cnt; i++)
1532                                 if (segs[i].hdrs != p->segs[i].hdrs ||
1533                                     ((conds & ICE_FLOW_FIND_PROF_CHK_FLDS) &&
1534                                      segs[i].match != p->segs[i].match))
1535                                         break;
1536
1537                         /* A match is found if all segments are matched */
1538                         if (i == segs_cnt) {
1539                                 prof = p;
1540                                 break;
1541                         }
1542                 }
1543         ice_release_lock(&hw->fl_profs_locks[blk]);
1544
1545         return prof;
1546 }
1547
1548 /**
1549  * ice_flow_find_prof - Look up a profile matching headers and matched fields
1550  * @hw: pointer to the HW struct
1551  * @blk: classification stage
1552  * @dir: flow direction
1553  * @segs: array of one or more packet segments that describe the flow
1554  * @segs_cnt: number of packet segments provided
1555  */
1556 u64
1557 ice_flow_find_prof(struct ice_hw *hw, enum ice_block blk, enum ice_flow_dir dir,
1558                    struct ice_flow_seg_info *segs, u8 segs_cnt)
1559 {
1560         struct ice_flow_prof *p;
1561
1562         p = ice_flow_find_prof_conds(hw, blk, dir, segs, segs_cnt,
1563                                      ICE_MAX_VSI, ICE_FLOW_FIND_PROF_CHK_FLDS);
1564
1565         return p ? p->id : ICE_FLOW_PROF_ID_INVAL;
1566 }
1567
1568 /**
1569  * ice_flow_find_prof_id - Look up a profile with given profile ID
1570  * @hw: pointer to the HW struct
1571  * @blk: classification stage
1572  * @prof_id: unique ID to identify this flow profile
1573  */
1574 static struct ice_flow_prof *
1575 ice_flow_find_prof_id(struct ice_hw *hw, enum ice_block blk, u64 prof_id)
1576 {
1577         struct ice_flow_prof *p;
1578
1579         LIST_FOR_EACH_ENTRY(p, &hw->fl_profs[blk], ice_flow_prof, l_entry)
1580                 if (p->id == prof_id)
1581                         return p;
1582
1583         return NULL;
1584 }
1585
1586 /**
1587  * ice_dealloc_flow_entry - Deallocate flow entry memory
1588  * @hw: pointer to the HW struct
1589  * @entry: flow entry to be removed
1590  */
1591 static void
1592 ice_dealloc_flow_entry(struct ice_hw *hw, struct ice_flow_entry *entry)
1593 {
1594         if (!entry)
1595                 return;
1596
1597         if (entry->entry)
1598                 ice_free(hw, entry->entry);
1599
1600         if (entry->range_buf) {
1601                 ice_free(hw, entry->range_buf);
1602                 entry->range_buf = NULL;
1603         }
1604
1605         if (entry->acts) {
1606                 ice_free(hw, entry->acts);
1607                 entry->acts = NULL;
1608                 entry->acts_cnt = 0;
1609         }
1610
1611         ice_free(hw, entry);
1612 }
1613
1614 #define ICE_ACL_INVALID_SCEN    0x3f
1615
1616 /**
1617  * ice_flow_acl_is_prof_in_use - Verify if the profile is associated to any PF
1618  * @hw: pointer to the hardware structure
1619  * @prof: pointer to flow profile
1620  * @buf: destination buffer function writes partial extraction sequence to
1621  *
1622  * returns ICE_SUCCESS if no PF is associated to the given profile
1623  * returns ICE_ERR_IN_USE if at least one PF is associated to the given profile
1624  * returns other error code for real error
1625  */
1626 static enum ice_status
1627 ice_flow_acl_is_prof_in_use(struct ice_hw *hw, struct ice_flow_prof *prof,
1628                             struct ice_aqc_acl_prof_generic_frmt *buf)
1629 {
1630         enum ice_status status;
1631         u8 prof_id = 0;
1632
1633         status = ice_flow_get_hw_prof(hw, ICE_BLK_ACL, prof->id, &prof_id);
1634         if (status)
1635                 return status;
1636
1637         status = ice_query_acl_prof(hw, prof_id, buf, NULL);
1638         if (status)
1639                 return status;
1640
1641         /* If all PF's associated scenarios are all 0 or all
1642          * ICE_ACL_INVALID_SCEN (63) for the given profile then the latter has
1643          * not been configured yet.
1644          */
1645         if (buf->pf_scenario_num[0] == 0 && buf->pf_scenario_num[1] == 0 &&
1646             buf->pf_scenario_num[2] == 0 && buf->pf_scenario_num[3] == 0 &&
1647             buf->pf_scenario_num[4] == 0 && buf->pf_scenario_num[5] == 0 &&
1648             buf->pf_scenario_num[6] == 0 && buf->pf_scenario_num[7] == 0)
1649                 return ICE_SUCCESS;
1650
1651         if (buf->pf_scenario_num[0] == ICE_ACL_INVALID_SCEN &&
1652             buf->pf_scenario_num[1] == ICE_ACL_INVALID_SCEN &&
1653             buf->pf_scenario_num[2] == ICE_ACL_INVALID_SCEN &&
1654             buf->pf_scenario_num[3] == ICE_ACL_INVALID_SCEN &&
1655             buf->pf_scenario_num[4] == ICE_ACL_INVALID_SCEN &&
1656             buf->pf_scenario_num[5] == ICE_ACL_INVALID_SCEN &&
1657             buf->pf_scenario_num[6] == ICE_ACL_INVALID_SCEN &&
1658             buf->pf_scenario_num[7] == ICE_ACL_INVALID_SCEN)
1659                 return ICE_SUCCESS;
1660         else
1661                 return ICE_ERR_IN_USE;
1662 }
1663
1664 /**
1665  * ice_flow_acl_free_act_cntr - Free the ACL rule's actions
1666  * @hw: pointer to the hardware structure
1667  * @acts: array of actions to be performed on a match
1668  * @acts_cnt: number of actions
1669  */
1670 static enum ice_status
1671 ice_flow_acl_free_act_cntr(struct ice_hw *hw, struct ice_flow_action *acts,
1672                            u8 acts_cnt)
1673 {
1674         int i;
1675
1676         for (i = 0; i < acts_cnt; i++) {
1677                 if (acts[i].type == ICE_FLOW_ACT_CNTR_PKT ||
1678                     acts[i].type == ICE_FLOW_ACT_CNTR_BYTES ||
1679                     acts[i].type == ICE_FLOW_ACT_CNTR_PKT_BYTES) {
1680                         struct ice_acl_cntrs cntrs;
1681                         enum ice_status status;
1682
1683                         cntrs.bank = 0; /* Only bank0 for the moment */
1684                         cntrs.first_cntr =
1685                                         LE16_TO_CPU(acts[i].data.acl_act.value);
1686                         cntrs.last_cntr =
1687                                         LE16_TO_CPU(acts[i].data.acl_act.value);
1688
1689                         if (acts[i].type == ICE_FLOW_ACT_CNTR_PKT_BYTES)
1690                                 cntrs.type = ICE_AQC_ACL_CNT_TYPE_DUAL;
1691                         else
1692                                 cntrs.type = ICE_AQC_ACL_CNT_TYPE_SINGLE;
1693
1694                         status = ice_aq_dealloc_acl_cntrs(hw, &cntrs, NULL);
1695                         if (status)
1696                                 return status;
1697                 }
1698         }
1699         return ICE_SUCCESS;
1700 }
1701
1702 /**
1703  * ice_flow_acl_disassoc_scen - Disassociate the scenario from the profile
1704  * @hw: pointer to the hardware structure
1705  * @prof: pointer to flow profile
1706  *
1707  * Disassociate the scenario from the profile for the PF of the VSI.
1708  */
1709 static enum ice_status
1710 ice_flow_acl_disassoc_scen(struct ice_hw *hw, struct ice_flow_prof *prof)
1711 {
1712         struct ice_aqc_acl_prof_generic_frmt buf;
1713         enum ice_status status = ICE_SUCCESS;
1714         u8 prof_id = 0;
1715
1716         ice_memset(&buf, 0, sizeof(buf), ICE_NONDMA_MEM);
1717
1718         status = ice_flow_get_hw_prof(hw, ICE_BLK_ACL, prof->id, &prof_id);
1719         if (status)
1720                 return status;
1721
1722         status = ice_query_acl_prof(hw, prof_id, &buf, NULL);
1723         if (status)
1724                 return status;
1725
1726         /* Clear scenario for this PF */
1727         buf.pf_scenario_num[hw->pf_id] = ICE_ACL_INVALID_SCEN;
1728         status = ice_prgm_acl_prof_extrt(hw, prof_id, &buf, NULL);
1729
1730         return status;
1731 }
1732
1733 /**
1734  * ice_flow_rem_entry_sync - Remove a flow entry
1735  * @hw: pointer to the HW struct
1736  * @blk: classification stage
1737  * @entry: flow entry to be removed
1738  */
1739 static enum ice_status
1740 ice_flow_rem_entry_sync(struct ice_hw *hw, enum ice_block blk,
1741                         struct ice_flow_entry *entry)
1742 {
1743         if (!entry)
1744                 return ICE_ERR_BAD_PTR;
1745
1746         if (blk == ICE_BLK_ACL) {
1747                 enum ice_status status;
1748
1749                 if (!entry->prof)
1750                         return ICE_ERR_BAD_PTR;
1751
1752                 status = ice_acl_rem_entry(hw, entry->prof->cfg.scen,
1753                                            entry->scen_entry_idx);
1754                 if (status)
1755                         return status;
1756
1757                 /* Checks if we need to release an ACL counter. */
1758                 if (entry->acts_cnt && entry->acts)
1759                         ice_flow_acl_free_act_cntr(hw, entry->acts,
1760                                                    entry->acts_cnt);
1761         }
1762
1763         LIST_DEL(&entry->l_entry);
1764
1765         ice_dealloc_flow_entry(hw, entry);
1766
1767         return ICE_SUCCESS;
1768 }
1769
1770 /**
1771  * ice_flow_add_prof_sync - Add a flow profile for packet segments and fields
1772  * @hw: pointer to the HW struct
1773  * @blk: classification stage
1774  * @dir: flow direction
1775  * @prof_id: unique ID to identify this flow profile
1776  * @segs: array of one or more packet segments that describe the flow
1777  * @segs_cnt: number of packet segments provided
1778  * @acts: array of default actions
1779  * @acts_cnt: number of default actions
1780  * @prof: stores the returned flow profile added
1781  *
1782  * Assumption: the caller has acquired the lock to the profile list
1783  */
1784 static enum ice_status
1785 ice_flow_add_prof_sync(struct ice_hw *hw, enum ice_block blk,
1786                        enum ice_flow_dir dir, u64 prof_id,
1787                        struct ice_flow_seg_info *segs, u8 segs_cnt,
1788                        struct ice_flow_action *acts, u8 acts_cnt,
1789                        struct ice_flow_prof **prof)
1790 {
1791         struct ice_flow_prof_params params;
1792         enum ice_status status;
1793         u8 i;
1794
1795         if (!prof || (acts_cnt && !acts))
1796                 return ICE_ERR_BAD_PTR;
1797
1798         ice_memset(&params, 0, sizeof(params), ICE_NONDMA_MEM);
1799         params.prof = (struct ice_flow_prof *)
1800                 ice_malloc(hw, sizeof(*params.prof));
1801         if (!params.prof)
1802                 return ICE_ERR_NO_MEMORY;
1803
1804         /* initialize extraction sequence to all invalid (0xff) */
1805         for (i = 0; i < ICE_MAX_FV_WORDS; i++) {
1806                 params.es[i].prot_id = ICE_PROT_INVALID;
1807                 params.es[i].off = ICE_FV_OFFSET_INVAL;
1808         }
1809
1810         params.blk = blk;
1811         params.prof->id = prof_id;
1812         params.prof->dir = dir;
1813         params.prof->segs_cnt = segs_cnt;
1814
1815         /* Make a copy of the segments that need to be persistent in the flow
1816          * profile instance
1817          */
1818         for (i = 0; i < segs_cnt; i++)
1819                 ice_memcpy(&params.prof->segs[i], &segs[i], sizeof(*segs),
1820                            ICE_NONDMA_TO_NONDMA);
1821
1822         /* Make a copy of the actions that need to be persistent in the flow
1823          * profile instance.
1824          */
1825         if (acts_cnt) {
1826                 params.prof->acts = (struct ice_flow_action *)
1827                         ice_memdup(hw, acts, acts_cnt * sizeof(*acts),
1828                                    ICE_NONDMA_TO_NONDMA);
1829
1830                 if (!params.prof->acts) {
1831                         status = ICE_ERR_NO_MEMORY;
1832                         goto out;
1833                 }
1834         }
1835
1836         status = ice_flow_proc_segs(hw, &params);
1837         if (status) {
1838                 ice_debug(hw, ICE_DBG_FLOW,
1839                           "Error processing a flow's packet segments\n");
1840                 goto out;
1841         }
1842
1843         /* Add a HW profile for this flow profile */
1844         status = ice_add_prof(hw, blk, prof_id, (u8 *)params.ptypes,
1845                               params.attr, params.attr_cnt, params.es,
1846                               params.mask);
1847         if (status) {
1848                 ice_debug(hw, ICE_DBG_FLOW, "Error adding a HW flow profile\n");
1849                 goto out;
1850         }
1851
1852         INIT_LIST_HEAD(&params.prof->entries);
1853         ice_init_lock(&params.prof->entries_lock);
1854         *prof = params.prof;
1855
1856 out:
1857         if (status) {
1858                 if (params.prof->acts)
1859                         ice_free(hw, params.prof->acts);
1860                 ice_free(hw, params.prof);
1861         }
1862
1863         return status;
1864 }
1865
1866 /**
1867  * ice_flow_rem_prof_sync - remove a flow profile
1868  * @hw: pointer to the hardware structure
1869  * @blk: classification stage
1870  * @prof: pointer to flow profile to remove
1871  *
1872  * Assumption: the caller has acquired the lock to the profile list
1873  */
1874 static enum ice_status
1875 ice_flow_rem_prof_sync(struct ice_hw *hw, enum ice_block blk,
1876                        struct ice_flow_prof *prof)
1877 {
1878         enum ice_status status;
1879
1880         /* Remove all remaining flow entries before removing the flow profile */
1881         if (!LIST_EMPTY(&prof->entries)) {
1882                 struct ice_flow_entry *e, *t;
1883
1884                 ice_acquire_lock(&prof->entries_lock);
1885
1886                 LIST_FOR_EACH_ENTRY_SAFE(e, t, &prof->entries, ice_flow_entry,
1887                                          l_entry) {
1888                         status = ice_flow_rem_entry_sync(hw, blk, e);
1889                         if (status)
1890                                 break;
1891                 }
1892
1893                 ice_release_lock(&prof->entries_lock);
1894         }
1895
1896         if (blk == ICE_BLK_ACL) {
1897                 struct ice_aqc_acl_profile_ranges query_rng_buf;
1898                 struct ice_aqc_acl_prof_generic_frmt buf;
1899                 u8 prof_id = 0;
1900
1901                 /* Disassociate the scenario from the profile for the PF */
1902                 status = ice_flow_acl_disassoc_scen(hw, prof);
1903                 if (status)
1904                         return status;
1905
1906                 /* Clear the range-checker if the profile ID is no longer
1907                  * used by any PF
1908                  */
1909                 status = ice_flow_acl_is_prof_in_use(hw, prof, &buf);
1910                 if (status && status != ICE_ERR_IN_USE) {
1911                         return status;
1912                 } else if (!status) {
1913                         /* Clear the range-checker value for profile ID */
1914                         ice_memset(&query_rng_buf, 0,
1915                                    sizeof(struct ice_aqc_acl_profile_ranges),
1916                                    ICE_NONDMA_MEM);
1917
1918                         status = ice_flow_get_hw_prof(hw, blk, prof->id,
1919                                                       &prof_id);
1920                         if (status)
1921                                 return status;
1922
1923                         status = ice_prog_acl_prof_ranges(hw, prof_id,
1924                                                           &query_rng_buf, NULL);
1925                         if (status)
1926                                 return status;
1927                 }
1928         }
1929
1930         /* Remove all hardware profiles associated with this flow profile */
1931         status = ice_rem_prof(hw, blk, prof->id);
1932         if (!status) {
1933                 LIST_DEL(&prof->l_entry);
1934                 ice_destroy_lock(&prof->entries_lock);
1935                 if (prof->acts)
1936                         ice_free(hw, prof->acts);
1937                 ice_free(hw, prof);
1938         }
1939
1940         return status;
1941 }
1942
1943 /**
1944  * ice_flow_acl_set_xtrct_seq_fld - Populate xtrct seq for single field
1945  * @buf: Destination buffer function writes partial xtrct sequence to
1946  * @info: Info about field
1947  */
1948 static void
1949 ice_flow_acl_set_xtrct_seq_fld(struct ice_aqc_acl_prof_generic_frmt *buf,
1950                                struct ice_flow_fld_info *info)
1951 {
1952         u16 dst, i;
1953         u8 src;
1954
1955         src = info->xtrct.idx * ICE_FLOW_FV_EXTRACT_SZ +
1956                 info->xtrct.disp / BITS_PER_BYTE;
1957         dst = info->entry.val;
1958         for (i = 0; i < info->entry.last; i++)
1959                 /* HW stores field vector words in LE, convert words back to BE
1960                  * so constructed entries will end up in network order
1961                  */
1962                 buf->byte_selection[dst++] = src++ ^ 1;
1963 }
1964
1965 /**
1966  * ice_flow_acl_set_xtrct_seq - Program ACL extraction sequence
1967  * @hw: pointer to the hardware structure
1968  * @prof: pointer to flow profile
1969  */
1970 static enum ice_status
1971 ice_flow_acl_set_xtrct_seq(struct ice_hw *hw, struct ice_flow_prof *prof)
1972 {
1973         struct ice_aqc_acl_prof_generic_frmt buf;
1974         struct ice_flow_fld_info *info;
1975         enum ice_status status;
1976         u8 prof_id = 0;
1977         u16 i;
1978
1979         ice_memset(&buf, 0, sizeof(buf), ICE_NONDMA_MEM);
1980
1981         status = ice_flow_get_hw_prof(hw, ICE_BLK_ACL, prof->id, &prof_id);
1982         if (status)
1983                 return status;
1984
1985         status = ice_flow_acl_is_prof_in_use(hw, prof, &buf);
1986         if (status && status != ICE_ERR_IN_USE)
1987                 return status;
1988
1989         if (!status) {
1990                 /* Program the profile dependent configuration. This is done
1991                  * only once regardless of the number of PFs using that profile
1992                  */
1993                 ice_memset(&buf, 0, sizeof(buf), ICE_NONDMA_MEM);
1994
1995                 for (i = 0; i < prof->segs_cnt; i++) {
1996                         struct ice_flow_seg_info *seg = &prof->segs[i];
1997                         u64 match = seg->match;
1998                         u16 j;
1999
2000                         for (j = 0; j < ICE_FLOW_FIELD_IDX_MAX && match; j++) {
2001                                 const u64 bit = BIT_ULL(j);
2002
2003                                 if (!(match & bit))
2004                                         continue;
2005
2006                                 info = &seg->fields[j];
2007
2008                                 if (info->type == ICE_FLOW_FLD_TYPE_RANGE)
2009                                         buf.word_selection[info->entry.val] =
2010                                                                 info->xtrct.idx;
2011                                 else
2012                                         ice_flow_acl_set_xtrct_seq_fld(&buf,
2013                                                                        info);
2014
2015                                 match &= ~bit;
2016                         }
2017
2018                         for (j = 0; j < seg->raws_cnt; j++) {
2019                                 info = &seg->raws[j].info;
2020                                 ice_flow_acl_set_xtrct_seq_fld(&buf, info);
2021                         }
2022                 }
2023
2024                 ice_memset(&buf.pf_scenario_num[0], ICE_ACL_INVALID_SCEN,
2025                            ICE_AQC_ACL_PROF_PF_SCEN_NUM_ELEMS,
2026                            ICE_NONDMA_MEM);
2027         }
2028
2029         /* Update the current PF */
2030         buf.pf_scenario_num[hw->pf_id] = (u8)prof->cfg.scen->id;
2031         status = ice_prgm_acl_prof_extrt(hw, prof_id, &buf, NULL);
2032
2033         return status;
2034 }
2035
2036 /**
2037  * ice_flow_assoc_vsig_vsi - associate a VSI with VSIG
2038  * @hw: pointer to the hardware structure
2039  * @blk: classification stage
2040  * @vsi_handle: software VSI handle
2041  * @vsig: target VSI group
2042  *
2043  * Assumption: the caller has already verified that the VSI to
2044  * be added has the same characteristics as the VSIG and will
2045  * thereby have access to all resources added to that VSIG.
2046  */
2047 enum ice_status
2048 ice_flow_assoc_vsig_vsi(struct ice_hw *hw, enum ice_block blk, u16 vsi_handle,
2049                         u16 vsig)
2050 {
2051         enum ice_status status;
2052
2053         if (!ice_is_vsi_valid(hw, vsi_handle) || blk >= ICE_BLK_COUNT)
2054                 return ICE_ERR_PARAM;
2055
2056         ice_acquire_lock(&hw->fl_profs_locks[blk]);
2057         status = ice_add_vsi_flow(hw, blk, ice_get_hw_vsi_num(hw, vsi_handle),
2058                                   vsig);
2059         ice_release_lock(&hw->fl_profs_locks[blk]);
2060
2061         return status;
2062 }
2063
2064 /**
2065  * ice_flow_assoc_prof - associate a VSI with a flow profile
2066  * @hw: pointer to the hardware structure
2067  * @blk: classification stage
2068  * @prof: pointer to flow profile
2069  * @vsi_handle: software VSI handle
2070  *
2071  * Assumption: the caller has acquired the lock to the profile list
2072  * and the software VSI handle has been validated
2073  */
2074 static enum ice_status
2075 ice_flow_assoc_prof(struct ice_hw *hw, enum ice_block blk,
2076                     struct ice_flow_prof *prof, u16 vsi_handle)
2077 {
2078         enum ice_status status = ICE_SUCCESS;
2079
2080         if (!ice_is_bit_set(prof->vsis, vsi_handle)) {
2081                 if (blk == ICE_BLK_ACL) {
2082                         status = ice_flow_acl_set_xtrct_seq(hw, prof);
2083                         if (status)
2084                                 return status;
2085                 }
2086                 status = ice_add_prof_id_flow(hw, blk,
2087                                               ice_get_hw_vsi_num(hw,
2088                                                                  vsi_handle),
2089                                               prof->id);
2090                 if (!status)
2091                         ice_set_bit(vsi_handle, prof->vsis);
2092                 else
2093                         ice_debug(hw, ICE_DBG_FLOW,
2094                                   "HW profile add failed, %d\n",
2095                                   status);
2096         }
2097
2098         return status;
2099 }
2100
2101 /**
2102  * ice_flow_disassoc_prof - disassociate a VSI from a flow profile
2103  * @hw: pointer to the hardware structure
2104  * @blk: classification stage
2105  * @prof: pointer to flow profile
2106  * @vsi_handle: software VSI handle
2107  *
2108  * Assumption: the caller has acquired the lock to the profile list
2109  * and the software VSI handle has been validated
2110  */
2111 static enum ice_status
2112 ice_flow_disassoc_prof(struct ice_hw *hw, enum ice_block blk,
2113                        struct ice_flow_prof *prof, u16 vsi_handle)
2114 {
2115         enum ice_status status = ICE_SUCCESS;
2116
2117         if (ice_is_bit_set(prof->vsis, vsi_handle)) {
2118                 status = ice_rem_prof_id_flow(hw, blk,
2119                                               ice_get_hw_vsi_num(hw,
2120                                                                  vsi_handle),
2121                                               prof->id);
2122                 if (!status)
2123                         ice_clear_bit(vsi_handle, prof->vsis);
2124                 else
2125                         ice_debug(hw, ICE_DBG_FLOW,
2126                                   "HW profile remove failed, %d\n",
2127                                   status);
2128         }
2129
2130         return status;
2131 }
2132
2133 /**
2134  * ice_flow_add_prof - Add a flow profile for packet segments and matched fields
2135  * @hw: pointer to the HW struct
2136  * @blk: classification stage
2137  * @dir: flow direction
2138  * @prof_id: unique ID to identify this flow profile
2139  * @segs: array of one or more packet segments that describe the flow
2140  * @segs_cnt: number of packet segments provided
2141  * @acts: array of default actions
2142  * @acts_cnt: number of default actions
2143  * @prof: stores the returned flow profile added
2144  */
2145 enum ice_status
2146 ice_flow_add_prof(struct ice_hw *hw, enum ice_block blk, enum ice_flow_dir dir,
2147                   u64 prof_id, struct ice_flow_seg_info *segs, u8 segs_cnt,
2148                   struct ice_flow_action *acts, u8 acts_cnt,
2149                   struct ice_flow_prof **prof)
2150 {
2151         enum ice_status status;
2152
2153         if (segs_cnt > ICE_FLOW_SEG_MAX)
2154                 return ICE_ERR_MAX_LIMIT;
2155
2156         if (!segs_cnt)
2157                 return ICE_ERR_PARAM;
2158
2159         if (!segs)
2160                 return ICE_ERR_BAD_PTR;
2161
2162         status = ice_flow_val_hdrs(segs, segs_cnt);
2163         if (status)
2164                 return status;
2165
2166         ice_acquire_lock(&hw->fl_profs_locks[blk]);
2167
2168         status = ice_flow_add_prof_sync(hw, blk, dir, prof_id, segs, segs_cnt,
2169                                         acts, acts_cnt, prof);
2170         if (!status)
2171                 LIST_ADD(&(*prof)->l_entry, &hw->fl_profs[blk]);
2172
2173         ice_release_lock(&hw->fl_profs_locks[blk]);
2174
2175         return status;
2176 }
2177
2178 /**
2179  * ice_flow_rem_prof - Remove a flow profile and all entries associated with it
2180  * @hw: pointer to the HW struct
2181  * @blk: the block for which the flow profile is to be removed
2182  * @prof_id: unique ID of the flow profile to be removed
2183  */
2184 enum ice_status
2185 ice_flow_rem_prof(struct ice_hw *hw, enum ice_block blk, u64 prof_id)
2186 {
2187         struct ice_flow_prof *prof;
2188         enum ice_status status;
2189
2190         ice_acquire_lock(&hw->fl_profs_locks[blk]);
2191
2192         prof = ice_flow_find_prof_id(hw, blk, prof_id);
2193         if (!prof) {
2194                 status = ICE_ERR_DOES_NOT_EXIST;
2195                 goto out;
2196         }
2197
2198         /* prof becomes invalid after the call */
2199         status = ice_flow_rem_prof_sync(hw, blk, prof);
2200
2201 out:
2202         ice_release_lock(&hw->fl_profs_locks[blk]);
2203
2204         return status;
2205 }
2206
2207 /**
2208  * ice_flow_get_hw_prof - return the HW profile for a specific profile ID handle
2209  * @hw: pointer to the HW struct
2210  * @blk: classification stage
2211  * @prof_id: the profile ID handle
2212  * @hw_prof_id: pointer to variable to receive the HW profile ID
2213  */
2214 enum ice_status
2215 ice_flow_get_hw_prof(struct ice_hw *hw, enum ice_block blk, u64 prof_id,
2216                      u8 *hw_prof_id)
2217 {
2218         struct ice_prof_map *map;
2219
2220         map = ice_search_prof_id(hw, blk, prof_id);
2221         if (map) {
2222                 *hw_prof_id = map->prof_id;
2223                 return ICE_SUCCESS;
2224         }
2225
2226         return ICE_ERR_DOES_NOT_EXIST;
2227 }
2228
2229 /**
2230  * ice_flow_find_entry - look for a flow entry using its unique ID
2231  * @hw: pointer to the HW struct
2232  * @blk: classification stage
2233  * @entry_id: unique ID to identify this flow entry
2234  *
2235  * This function looks for the flow entry with the specified unique ID in all
2236  * flow profiles of the specified classification stage. If the entry is found,
2237  * and it returns the handle to the flow entry. Otherwise, it returns
2238  * ICE_FLOW_ENTRY_ID_INVAL.
2239  */
2240 u64 ice_flow_find_entry(struct ice_hw *hw, enum ice_block blk, u64 entry_id)
2241 {
2242         struct ice_flow_entry *found = NULL;
2243         struct ice_flow_prof *p;
2244
2245         ice_acquire_lock(&hw->fl_profs_locks[blk]);
2246
2247         LIST_FOR_EACH_ENTRY(p, &hw->fl_profs[blk], ice_flow_prof, l_entry) {
2248                 struct ice_flow_entry *e;
2249
2250                 ice_acquire_lock(&p->entries_lock);
2251                 LIST_FOR_EACH_ENTRY(e, &p->entries, ice_flow_entry, l_entry)
2252                         if (e->id == entry_id) {
2253                                 found = e;
2254                                 break;
2255                         }
2256                 ice_release_lock(&p->entries_lock);
2257
2258                 if (found)
2259                         break;
2260         }
2261
2262         ice_release_lock(&hw->fl_profs_locks[blk]);
2263
2264         return found ? ICE_FLOW_ENTRY_HNDL(found) : ICE_FLOW_ENTRY_HANDLE_INVAL;
2265 }
2266
2267 /**
2268  * ice_flow_acl_check_actions - Checks the ACL rule's actions
2269  * @hw: pointer to the hardware structure
2270  * @acts: array of actions to be performed on a match
2271  * @acts_cnt: number of actions
2272  * @cnt_alloc: indicates if an ACL counter has been allocated.
2273  */
2274 static enum ice_status
2275 ice_flow_acl_check_actions(struct ice_hw *hw, struct ice_flow_action *acts,
2276                            u8 acts_cnt, bool *cnt_alloc)
2277 {
2278         ice_declare_bitmap(dup_check, ICE_AQC_TBL_MAX_ACTION_PAIRS * 2);
2279         int i;
2280
2281         ice_zero_bitmap(dup_check, ICE_AQC_TBL_MAX_ACTION_PAIRS * 2);
2282         *cnt_alloc = false;
2283
2284         if (acts_cnt > ICE_FLOW_ACL_MAX_NUM_ACT)
2285                 return ICE_ERR_OUT_OF_RANGE;
2286
2287         for (i = 0; i < acts_cnt; i++) {
2288                 if (acts[i].type != ICE_FLOW_ACT_NOP &&
2289                     acts[i].type != ICE_FLOW_ACT_DROP &&
2290                     acts[i].type != ICE_FLOW_ACT_CNTR_PKT &&
2291                     acts[i].type != ICE_FLOW_ACT_FWD_QUEUE)
2292                         return ICE_ERR_CFG;
2293
2294                 /* If the caller want to add two actions of the same type, then
2295                  * it is considered invalid configuration.
2296                  */
2297                 if (ice_test_and_set_bit(acts[i].type, dup_check))
2298                         return ICE_ERR_PARAM;
2299         }
2300
2301         /* Checks if ACL counters are needed. */
2302         for (i = 0; i < acts_cnt; i++) {
2303                 if (acts[i].type == ICE_FLOW_ACT_CNTR_PKT ||
2304                     acts[i].type == ICE_FLOW_ACT_CNTR_BYTES ||
2305                     acts[i].type == ICE_FLOW_ACT_CNTR_PKT_BYTES) {
2306                         struct ice_acl_cntrs cntrs;
2307                         enum ice_status status;
2308
2309                         cntrs.amount = 1;
2310                         cntrs.bank = 0; /* Only bank0 for the moment */
2311
2312                         if (acts[i].type == ICE_FLOW_ACT_CNTR_PKT_BYTES)
2313                                 cntrs.type = ICE_AQC_ACL_CNT_TYPE_DUAL;
2314                         else
2315                                 cntrs.type = ICE_AQC_ACL_CNT_TYPE_SINGLE;
2316
2317                         status = ice_aq_alloc_acl_cntrs(hw, &cntrs, NULL);
2318                         if (status)
2319                                 return status;
2320                         /* Counter index within the bank */
2321                         acts[i].data.acl_act.value =
2322                                                 CPU_TO_LE16(cntrs.first_cntr);
2323                         *cnt_alloc = true;
2324                 }
2325         }
2326
2327         return ICE_SUCCESS;
2328 }
2329
2330 /**
2331  * ice_flow_acl_frmt_entry_range - Format an ACL range checker for a given field
2332  * @fld: number of the given field
2333  * @info: info about field
2334  * @range_buf: range checker configuration buffer
2335  * @data: pointer to a data buffer containing flow entry's match values/masks
2336  * @range: Input/output param indicating which range checkers are being used
2337  */
2338 static void
2339 ice_flow_acl_frmt_entry_range(u16 fld, struct ice_flow_fld_info *info,
2340                               struct ice_aqc_acl_profile_ranges *range_buf,
2341                               u8 *data, u8 *range)
2342 {
2343         u16 new_mask;
2344
2345         /* If not specified, default mask is all bits in field */
2346         new_mask = (info->src.mask == ICE_FLOW_FLD_OFF_INVAL ?
2347                     BIT(ice_flds_info[fld].size) - 1 :
2348                     (*(u16 *)(data + info->src.mask))) << info->xtrct.disp;
2349
2350         /* If the mask is 0, then we don't need to worry about this input
2351          * range checker value.
2352          */
2353         if (new_mask) {
2354                 u16 new_high =
2355                         (*(u16 *)(data + info->src.last)) << info->xtrct.disp;
2356                 u16 new_low =
2357                         (*(u16 *)(data + info->src.val)) << info->xtrct.disp;
2358                 u8 range_idx = info->entry.val;
2359
2360                 range_buf->checker_cfg[range_idx].low_boundary =
2361                         CPU_TO_BE16(new_low);
2362                 range_buf->checker_cfg[range_idx].high_boundary =
2363                         CPU_TO_BE16(new_high);
2364                 range_buf->checker_cfg[range_idx].mask = CPU_TO_BE16(new_mask);
2365
2366                 /* Indicate which range checker is being used */
2367                 *range |= BIT(range_idx);
2368         }
2369 }
2370
2371 /**
2372  * ice_flow_acl_frmt_entry_fld - Partially format ACL entry for a given field
2373  * @fld: number of the given field
2374  * @info: info about the field
2375  * @buf: buffer containing the entry
2376  * @dontcare: buffer containing don't care mask for entry
2377  * @data: pointer to a data buffer containing flow entry's match values/masks
2378  */
2379 static void
2380 ice_flow_acl_frmt_entry_fld(u16 fld, struct ice_flow_fld_info *info, u8 *buf,
2381                             u8 *dontcare, u8 *data)
2382 {
2383         u16 dst, src, mask, k, end_disp, tmp_s = 0, tmp_m = 0;
2384         bool use_mask = false;
2385         u8 disp;
2386
2387         src = info->src.val;
2388         mask = info->src.mask;
2389         dst = info->entry.val - ICE_AQC_ACL_PROF_BYTE_SEL_START_IDX;
2390         disp = info->xtrct.disp % BITS_PER_BYTE;
2391
2392         if (mask != ICE_FLOW_FLD_OFF_INVAL)
2393                 use_mask = true;
2394
2395         for (k = 0; k < info->entry.last; k++, dst++) {
2396                 /* Add overflow bits from previous byte */
2397                 buf[dst] = (tmp_s & 0xff00) >> 8;
2398
2399                 /* If mask is not valid, tmp_m is always zero, so just setting
2400                  * dontcare to 0 (no masked bits). If mask is valid, pulls in
2401                  * overflow bits of mask from prev byte
2402                  */
2403                 dontcare[dst] = (tmp_m & 0xff00) >> 8;
2404
2405                 /* If there is displacement, last byte will only contain
2406                  * displaced data, but there is no more data to read from user
2407                  * buffer, so skip so as not to potentially read beyond end of
2408                  * user buffer
2409                  */
2410                 if (!disp || k < info->entry.last - 1) {
2411                         /* Store shifted data to use in next byte */
2412                         tmp_s = data[src++] << disp;
2413
2414                         /* Add current (shifted) byte */
2415                         buf[dst] |= tmp_s & 0xff;
2416
2417                         /* Handle mask if valid */
2418                         if (use_mask) {
2419                                 tmp_m = (~data[mask++] & 0xff) << disp;
2420                                 dontcare[dst] |= tmp_m & 0xff;
2421                         }
2422                 }
2423         }
2424
2425         /* Fill in don't care bits at beginning of field */
2426         if (disp) {
2427                 dst = info->entry.val - ICE_AQC_ACL_PROF_BYTE_SEL_START_IDX;
2428                 for (k = 0; k < disp; k++)
2429                         dontcare[dst] |= BIT(k);
2430         }
2431
2432         end_disp = (disp + ice_flds_info[fld].size) % BITS_PER_BYTE;
2433
2434         /* Fill in don't care bits at end of field */
2435         if (end_disp) {
2436                 dst = info->entry.val - ICE_AQC_ACL_PROF_BYTE_SEL_START_IDX +
2437                       info->entry.last - 1;
2438                 for (k = end_disp; k < BITS_PER_BYTE; k++)
2439                         dontcare[dst] |= BIT(k);
2440         }
2441 }
2442
2443 /**
2444  * ice_flow_acl_frmt_entry - Format ACL entry
2445  * @hw: pointer to the hardware structure
2446  * @prof: pointer to flow profile
2447  * @e: pointer to the flow entry
2448  * @data: pointer to a data buffer containing flow entry's match values/masks
2449  * @acts: array of actions to be performed on a match
2450  * @acts_cnt: number of actions
2451  *
2452  * Formats the key (and key_inverse) to be matched from the data passed in,
2453  * along with data from the flow profile. This key/key_inverse pair makes up
2454  * the 'entry' for an ACL flow entry.
2455  */
2456 static enum ice_status
2457 ice_flow_acl_frmt_entry(struct ice_hw *hw, struct ice_flow_prof *prof,
2458                         struct ice_flow_entry *e, u8 *data,
2459                         struct ice_flow_action *acts, u8 acts_cnt)
2460 {
2461         u8 *buf = NULL, *dontcare = NULL, *key = NULL, range = 0, dir_flag_msk;
2462         struct ice_aqc_acl_profile_ranges *range_buf = NULL;
2463         enum ice_status status;
2464         bool cnt_alloc;
2465         u8 prof_id = 0;
2466         u16 i, buf_sz;
2467
2468         status = ice_flow_get_hw_prof(hw, ICE_BLK_ACL, prof->id, &prof_id);
2469         if (status)
2470                 return status;
2471
2472         /* Format the result action */
2473
2474         status = ice_flow_acl_check_actions(hw, acts, acts_cnt, &cnt_alloc);
2475         if (status)
2476                 return status;
2477
2478         status = ICE_ERR_NO_MEMORY;
2479
2480         e->acts = (struct ice_flow_action *)
2481                 ice_memdup(hw, acts, acts_cnt * sizeof(*acts),
2482                            ICE_NONDMA_TO_NONDMA);
2483
2484         if (!e->acts)
2485                 goto out;
2486
2487         e->acts_cnt = acts_cnt;
2488
2489         /* Format the matching data */
2490         buf_sz = prof->cfg.scen->width;
2491         buf = (u8 *)ice_malloc(hw, buf_sz);
2492         if (!buf)
2493                 goto out;
2494
2495         dontcare = (u8 *)ice_malloc(hw, buf_sz);
2496         if (!dontcare)
2497                 goto out;
2498
2499         /* 'key' buffer will store both key and key_inverse, so must be twice
2500          * size of buf
2501          */
2502         key = (u8 *)ice_malloc(hw, buf_sz * 2);
2503         if (!key)
2504                 goto out;
2505
2506         range_buf = (struct ice_aqc_acl_profile_ranges *)
2507                 ice_malloc(hw, sizeof(struct ice_aqc_acl_profile_ranges));
2508         if (!range_buf)
2509                 goto out;
2510
2511         /* Set don't care mask to all 1's to start, will zero out used bytes */
2512         ice_memset(dontcare, 0xff, buf_sz, ICE_NONDMA_MEM);
2513
2514         for (i = 0; i < prof->segs_cnt; i++) {
2515                 struct ice_flow_seg_info *seg = &prof->segs[i];
2516                 u64 match = seg->match;
2517                 u16 j;
2518
2519                 for (j = 0; j < ICE_FLOW_FIELD_IDX_MAX && match; j++) {
2520                         struct ice_flow_fld_info *info;
2521                         const u64 bit = BIT_ULL(j);
2522
2523                         if (!(match & bit))
2524                                 continue;
2525
2526                         info = &seg->fields[j];
2527
2528                         if (info->type == ICE_FLOW_FLD_TYPE_RANGE)
2529                                 ice_flow_acl_frmt_entry_range(j, info,
2530                                                               range_buf, data,
2531                                                               &range);
2532                         else
2533                                 ice_flow_acl_frmt_entry_fld(j, info, buf,
2534                                                             dontcare, data);
2535
2536                         match &= ~bit;
2537                 }
2538
2539                 for (j = 0; j < seg->raws_cnt; j++) {
2540                         struct ice_flow_fld_info *info = &seg->raws[j].info;
2541                         u16 dst, src, mask, k;
2542                         bool use_mask = false;
2543
2544                         src = info->src.val;
2545                         dst = info->entry.val -
2546                                         ICE_AQC_ACL_PROF_BYTE_SEL_START_IDX;
2547                         mask = info->src.mask;
2548
2549                         if (mask != ICE_FLOW_FLD_OFF_INVAL)
2550                                 use_mask = true;
2551
2552                         for (k = 0; k < info->entry.last; k++, dst++) {
2553                                 buf[dst] = data[src++];
2554                                 if (use_mask)
2555                                         dontcare[dst] = ~data[mask++];
2556                                 else
2557                                         dontcare[dst] = 0;
2558                         }
2559                 }
2560         }
2561
2562         buf[prof->cfg.scen->pid_idx] = (u8)prof_id;
2563         dontcare[prof->cfg.scen->pid_idx] = 0;
2564
2565         /* Format the buffer for direction flags */
2566         dir_flag_msk = BIT(ICE_FLG_PKT_DIR);
2567
2568         if (prof->dir == ICE_FLOW_RX)
2569                 buf[prof->cfg.scen->pkt_dir_idx] = dir_flag_msk;
2570
2571         if (range) {
2572                 buf[prof->cfg.scen->rng_chk_idx] = range;
2573                 /* Mark any unused range checkers as don't care */
2574                 dontcare[prof->cfg.scen->rng_chk_idx] = ~range;
2575                 e->range_buf = range_buf;
2576         } else {
2577                 ice_free(hw, range_buf);
2578         }
2579
2580         status = ice_set_key(key, buf_sz * 2, buf, NULL, dontcare, NULL, 0,
2581                              buf_sz);
2582         if (status)
2583                 goto out;
2584
2585         e->entry = key;
2586         e->entry_sz = buf_sz * 2;
2587
2588 out:
2589         if (buf)
2590                 ice_free(hw, buf);
2591
2592         if (dontcare)
2593                 ice_free(hw, dontcare);
2594
2595         if (status && key)
2596                 ice_free(hw, key);
2597
2598         if (status && range_buf) {
2599                 ice_free(hw, range_buf);
2600                 e->range_buf = NULL;
2601         }
2602
2603         if (status && e->acts) {
2604                 ice_free(hw, e->acts);
2605                 e->acts = NULL;
2606                 e->acts_cnt = 0;
2607         }
2608
2609         if (status && cnt_alloc)
2610                 ice_flow_acl_free_act_cntr(hw, acts, acts_cnt);
2611
2612         return status;
2613 }
2614
2615 /**
2616  * ice_flow_acl_find_scen_entry_cond - Find an ACL scenario entry that matches
2617  *                                     the compared data.
2618  * @prof: pointer to flow profile
2619  * @e: pointer to the comparing flow entry
2620  * @do_chg_action: decide if we want to change the ACL action
2621  * @do_add_entry: decide if we want to add the new ACL entry
2622  * @do_rem_entry: decide if we want to remove the current ACL entry
2623  *
2624  * Find an ACL scenario entry that matches the compared data. In the same time,
2625  * this function also figure out:
2626  * a/ If we want to change the ACL action
2627  * b/ If we want to add the new ACL entry
2628  * c/ If we want to remove the current ACL entry
2629  */
2630 static struct ice_flow_entry *
2631 ice_flow_acl_find_scen_entry_cond(struct ice_flow_prof *prof,
2632                                   struct ice_flow_entry *e, bool *do_chg_action,
2633                                   bool *do_add_entry, bool *do_rem_entry)
2634 {
2635         struct ice_flow_entry *p, *return_entry = NULL;
2636         u8 i, j;
2637
2638         /* Check if:
2639          * a/ There exists an entry with same matching data, but different
2640          *    priority, then we remove this existing ACL entry. Then, we
2641          *    will add the new entry to the ACL scenario.
2642          * b/ There exists an entry with same matching data, priority, and
2643          *    result action, then we do nothing
2644          * c/ There exists an entry with same matching data, priority, but
2645          *    different, action, then do only change the action's entry.
2646          * d/ Else, we add this new entry to the ACL scenario.
2647          */
2648         *do_chg_action = false;
2649         *do_add_entry = true;
2650         *do_rem_entry = false;
2651         LIST_FOR_EACH_ENTRY(p, &prof->entries, ice_flow_entry, l_entry) {
2652                 if (memcmp(p->entry, e->entry, p->entry_sz))
2653                         continue;
2654
2655                 /* From this point, we have the same matching_data. */
2656                 *do_add_entry = false;
2657                 return_entry = p;
2658
2659                 if (p->priority != e->priority) {
2660                         /* matching data && !priority */
2661                         *do_add_entry = true;
2662                         *do_rem_entry = true;
2663                         break;
2664                 }
2665
2666                 /* From this point, we will have matching_data && priority */
2667                 if (p->acts_cnt != e->acts_cnt)
2668                         *do_chg_action = true;
2669                 for (i = 0; i < p->acts_cnt; i++) {
2670                         bool found_not_match = false;
2671
2672                         for (j = 0; j < e->acts_cnt; j++)
2673                                 if (memcmp(&p->acts[i], &e->acts[j],
2674                                            sizeof(struct ice_flow_action))) {
2675                                         found_not_match = true;
2676                                         break;
2677                                 }
2678
2679                         if (found_not_match) {
2680                                 *do_chg_action = true;
2681                                 break;
2682                         }
2683                 }
2684
2685                 /* (do_chg_action = true) means :
2686                  *    matching_data && priority && !result_action
2687                  * (do_chg_action = false) means :
2688                  *    matching_data && priority && result_action
2689                  */
2690                 break;
2691         }
2692
2693         return return_entry;
2694 }
2695
2696 /**
2697  * ice_flow_acl_convert_to_acl_prior - Convert to ACL priority
2698  * @p: flow priority
2699  */
2700 static enum ice_acl_entry_prior
2701 ice_flow_acl_convert_to_acl_prior(enum ice_flow_priority p)
2702 {
2703         enum ice_acl_entry_prior acl_prior;
2704
2705         switch (p) {
2706         case ICE_FLOW_PRIO_LOW:
2707                 acl_prior = ICE_LOW;
2708                 break;
2709         case ICE_FLOW_PRIO_NORMAL:
2710                 acl_prior = ICE_NORMAL;
2711                 break;
2712         case ICE_FLOW_PRIO_HIGH:
2713                 acl_prior = ICE_HIGH;
2714                 break;
2715         default:
2716                 acl_prior = ICE_NORMAL;
2717                 break;
2718         }
2719
2720         return acl_prior;
2721 }
2722
2723 /**
2724  * ice_flow_acl_union_rng_chk - Perform union operation between two
2725  *                              range-range checker buffers
2726  * @dst_buf: pointer to destination range checker buffer
2727  * @src_buf: pointer to source range checker buffer
2728  *
2729  * For this function, we do the union between dst_buf and src_buf
2730  * range checker buffer, and we will save the result back to dst_buf
2731  */
2732 static enum ice_status
2733 ice_flow_acl_union_rng_chk(struct ice_aqc_acl_profile_ranges *dst_buf,
2734                            struct ice_aqc_acl_profile_ranges *src_buf)
2735 {
2736         u8 i, j;
2737
2738         if (!dst_buf || !src_buf)
2739                 return ICE_ERR_BAD_PTR;
2740
2741         for (i = 0; i < ICE_AQC_ACL_PROF_RANGES_NUM_CFG; i++) {
2742                 struct ice_acl_rng_data *cfg_data = NULL, *in_data;
2743                 bool will_populate = false;
2744
2745                 in_data = &src_buf->checker_cfg[i];
2746
2747                 if (!in_data->mask)
2748                         break;
2749
2750                 for (j = 0; j < ICE_AQC_ACL_PROF_RANGES_NUM_CFG; j++) {
2751                         cfg_data = &dst_buf->checker_cfg[j];
2752
2753                         if (!cfg_data->mask ||
2754                             !memcmp(cfg_data, in_data,
2755                                     sizeof(struct ice_acl_rng_data))) {
2756                                 will_populate = true;
2757                                 break;
2758                         }
2759                 }
2760
2761                 if (will_populate) {
2762                         ice_memcpy(cfg_data, in_data,
2763                                    sizeof(struct ice_acl_rng_data),
2764                                    ICE_NONDMA_TO_NONDMA);
2765                 } else {
2766                         /* No available slot left to program range checker */
2767                         return ICE_ERR_MAX_LIMIT;
2768                 }
2769         }
2770
2771         return ICE_SUCCESS;
2772 }
2773
2774 /**
2775  * ice_flow_acl_add_scen_entry_sync - Add entry to ACL scenario sync
2776  * @hw: pointer to the hardware structure
2777  * @prof: pointer to flow profile
2778  * @entry: double pointer to the flow entry
2779  *
2780  * For this function, we will look at the current added entries in the
2781  * corresponding ACL scenario. Then, we will perform matching logic to
2782  * see if we want to add/modify/do nothing with this new entry.
2783  */
2784 static enum ice_status
2785 ice_flow_acl_add_scen_entry_sync(struct ice_hw *hw, struct ice_flow_prof *prof,
2786                                  struct ice_flow_entry **entry)
2787 {
2788         bool do_add_entry, do_rem_entry, do_chg_action, do_chg_rng_chk;
2789         struct ice_aqc_acl_profile_ranges query_rng_buf, cfg_rng_buf;
2790         struct ice_acl_act_entry *acts = NULL;
2791         struct ice_flow_entry *exist;
2792         enum ice_status status = ICE_SUCCESS;
2793         struct ice_flow_entry *e;
2794         u8 i;
2795
2796         if (!entry || !(*entry) || !prof)
2797                 return ICE_ERR_BAD_PTR;
2798
2799         e = *(entry);
2800
2801         do_chg_rng_chk = false;
2802         if (e->range_buf) {
2803                 u8 prof_id = 0;
2804
2805                 status = ice_flow_get_hw_prof(hw, ICE_BLK_ACL, prof->id,
2806                                               &prof_id);
2807                 if (status)
2808                         return status;
2809
2810                 /* Query the current range-checker value in FW */
2811                 status = ice_query_acl_prof_ranges(hw, prof_id, &query_rng_buf,
2812                                                    NULL);
2813                 if (status)
2814                         return status;
2815                 ice_memcpy(&cfg_rng_buf, &query_rng_buf,
2816                            sizeof(struct ice_aqc_acl_profile_ranges),
2817                            ICE_NONDMA_TO_NONDMA);
2818
2819                 /* Generate the new range-checker value */
2820                 status = ice_flow_acl_union_rng_chk(&cfg_rng_buf, e->range_buf);
2821                 if (status)
2822                         return status;
2823
2824                 /* Reconfigure the range check if the buffer is changed. */
2825                 do_chg_rng_chk = false;
2826                 if (memcmp(&query_rng_buf, &cfg_rng_buf,
2827                            sizeof(struct ice_aqc_acl_profile_ranges))) {
2828                         status = ice_prog_acl_prof_ranges(hw, prof_id,
2829                                                           &cfg_rng_buf, NULL);
2830                         if (status)
2831                                 return status;
2832
2833                         do_chg_rng_chk = true;
2834                 }
2835         }
2836
2837         /* Figure out if we want to (change the ACL action) and/or
2838          * (Add the new ACL entry) and/or (Remove the current ACL entry)
2839          */
2840         exist = ice_flow_acl_find_scen_entry_cond(prof, e, &do_chg_action,
2841                                                   &do_add_entry, &do_rem_entry);
2842
2843         if (do_rem_entry) {
2844                 status = ice_flow_rem_entry_sync(hw, ICE_BLK_ACL, exist);
2845                 if (status)
2846                         return status;
2847         }
2848
2849         /* Prepare the result action buffer */
2850         acts = (struct ice_acl_act_entry *)ice_calloc
2851                 (hw, e->entry_sz, sizeof(struct ice_acl_act_entry));
2852         for (i = 0; i < e->acts_cnt; i++)
2853                 ice_memcpy(&acts[i], &e->acts[i].data.acl_act,
2854                            sizeof(struct ice_acl_act_entry),
2855                            ICE_NONDMA_TO_NONDMA);
2856
2857         if (do_add_entry) {
2858                 enum ice_acl_entry_prior prior;
2859                 u8 *keys, *inverts;
2860                 u16 entry_idx;
2861
2862                 keys = (u8 *)e->entry;
2863                 inverts = keys + (e->entry_sz / 2);
2864                 prior = ice_flow_acl_convert_to_acl_prior(e->priority);
2865
2866                 status = ice_acl_add_entry(hw, prof->cfg.scen, prior, keys,
2867                                            inverts, acts, e->acts_cnt,
2868                                            &entry_idx);
2869                 if (status)
2870                         goto out;
2871
2872                 e->scen_entry_idx = entry_idx;
2873                 LIST_ADD(&e->l_entry, &prof->entries);
2874         } else {
2875                 if (do_chg_action) {
2876                         /* For the action memory info, update the SW's copy of
2877                          * exist entry with e's action memory info
2878                          */
2879                         ice_free(hw, exist->acts);
2880                         exist->acts_cnt = e->acts_cnt;
2881                         exist->acts = (struct ice_flow_action *)
2882                                 ice_calloc(hw, exist->acts_cnt,
2883                                            sizeof(struct ice_flow_action));
2884
2885                         if (!exist->acts) {
2886                                 status = ICE_ERR_NO_MEMORY;
2887                                 goto out;
2888                         }
2889
2890                         ice_memcpy(exist->acts, e->acts,
2891                                    sizeof(struct ice_flow_action) * e->acts_cnt,
2892                                    ICE_NONDMA_TO_NONDMA);
2893
2894                         status = ice_acl_prog_act(hw, prof->cfg.scen, acts,
2895                                                   e->acts_cnt,
2896                                                   exist->scen_entry_idx);
2897                         if (status)
2898                                 goto out;
2899                 }
2900
2901                 if (do_chg_rng_chk) {
2902                         /* In this case, we want to update the range checker
2903                          * information of the exist entry
2904                          */
2905                         status = ice_flow_acl_union_rng_chk(exist->range_buf,
2906                                                             e->range_buf);
2907                         if (status)
2908                                 goto out;
2909                 }
2910
2911                 /* As we don't add the new entry to our SW DB, deallocate its
2912                  * memories, and return the exist entry to the caller
2913                  */
2914                 ice_dealloc_flow_entry(hw, e);
2915                 *(entry) = exist;
2916         }
2917 out:
2918         if (acts)
2919                 ice_free(hw, acts);
2920
2921         return status;
2922 }
2923
2924 /**
2925  * ice_flow_acl_add_scen_entry - Add entry to ACL scenario
2926  * @hw: pointer to the hardware structure
2927  * @prof: pointer to flow profile
2928  * @e: double pointer to the flow entry
2929  */
2930 static enum ice_status
2931 ice_flow_acl_add_scen_entry(struct ice_hw *hw, struct ice_flow_prof *prof,
2932                             struct ice_flow_entry **e)
2933 {
2934         enum ice_status status;
2935
2936         ice_acquire_lock(&prof->entries_lock);
2937         status = ice_flow_acl_add_scen_entry_sync(hw, prof, e);
2938         ice_release_lock(&prof->entries_lock);
2939
2940         return status;
2941 }
2942
2943 /**
2944  * ice_flow_add_entry - Add a flow entry
2945  * @hw: pointer to the HW struct
2946  * @blk: classification stage
2947  * @prof_id: ID of the profile to add a new flow entry to
2948  * @entry_id: unique ID to identify this flow entry
2949  * @vsi_handle: software VSI handle for the flow entry
2950  * @prio: priority of the flow entry
2951  * @data: pointer to a data buffer containing flow entry's match values/masks
2952  * @acts: arrays of actions to be performed on a match
2953  * @acts_cnt: number of actions
2954  * @entry_h: pointer to buffer that receives the new flow entry's handle
2955  */
2956 enum ice_status
2957 ice_flow_add_entry(struct ice_hw *hw, enum ice_block blk, u64 prof_id,
2958                    u64 entry_id, u16 vsi_handle, enum ice_flow_priority prio,
2959                    void *data, struct ice_flow_action *acts, u8 acts_cnt,
2960                    u64 *entry_h)
2961 {
2962         struct ice_flow_entry *e = NULL;
2963         struct ice_flow_prof *prof;
2964         enum ice_status status = ICE_SUCCESS;
2965
2966         /* ACL entries must indicate an action */
2967         if (blk == ICE_BLK_ACL && (!acts || !acts_cnt))
2968                 return ICE_ERR_PARAM;
2969
2970         /* No flow entry data is expected for RSS */
2971         if (!entry_h || (!data && blk != ICE_BLK_RSS))
2972                 return ICE_ERR_BAD_PTR;
2973
2974         if (!ice_is_vsi_valid(hw, vsi_handle))
2975                 return ICE_ERR_PARAM;
2976
2977         ice_acquire_lock(&hw->fl_profs_locks[blk]);
2978
2979         prof = ice_flow_find_prof_id(hw, blk, prof_id);
2980         if (!prof) {
2981                 status = ICE_ERR_DOES_NOT_EXIST;
2982         } else {
2983                 /* Allocate memory for the entry being added and associate
2984                  * the VSI to the found flow profile
2985                  */
2986                 e = (struct ice_flow_entry *)ice_malloc(hw, sizeof(*e));
2987                 if (!e)
2988                         status = ICE_ERR_NO_MEMORY;
2989                 else
2990                         status = ice_flow_assoc_prof(hw, blk, prof, vsi_handle);
2991         }
2992
2993         ice_release_lock(&hw->fl_profs_locks[blk]);
2994         if (status)
2995                 goto out;
2996
2997         e->id = entry_id;
2998         e->vsi_handle = vsi_handle;
2999         e->prof = prof;
3000         e->priority = prio;
3001
3002         switch (blk) {
3003         case ICE_BLK_FD:
3004         case ICE_BLK_RSS:
3005                 break;
3006         case ICE_BLK_ACL:
3007                 /* ACL will handle the entry management */
3008                 status = ice_flow_acl_frmt_entry(hw, prof, e, (u8 *)data, acts,
3009                                                  acts_cnt);
3010                 if (status)
3011                         goto out;
3012
3013                 status = ice_flow_acl_add_scen_entry(hw, prof, &e);
3014                 if (status)
3015                         goto out;
3016
3017                 break;
3018         default:
3019                 status = ICE_ERR_NOT_IMPL;
3020                 goto out;
3021         }
3022
3023         if (blk != ICE_BLK_ACL) {
3024                 /* ACL will handle the entry management */
3025                 ice_acquire_lock(&prof->entries_lock);
3026                 LIST_ADD(&e->l_entry, &prof->entries);
3027                 ice_release_lock(&prof->entries_lock);
3028         }
3029
3030         *entry_h = ICE_FLOW_ENTRY_HNDL(e);
3031
3032 out:
3033         if (status && e) {
3034                 if (e->entry)
3035                         ice_free(hw, e->entry);
3036                 ice_free(hw, e);
3037         }
3038
3039         return status;
3040 }
3041
3042 /**
3043  * ice_flow_rem_entry - Remove a flow entry
3044  * @hw: pointer to the HW struct
3045  * @blk: classification stage
3046  * @entry_h: handle to the flow entry to be removed
3047  */
3048 enum ice_status ice_flow_rem_entry(struct ice_hw *hw, enum ice_block blk,
3049                                    u64 entry_h)
3050 {
3051         struct ice_flow_entry *entry;
3052         struct ice_flow_prof *prof;
3053         enum ice_status status = ICE_SUCCESS;
3054
3055         if (entry_h == ICE_FLOW_ENTRY_HANDLE_INVAL)
3056                 return ICE_ERR_PARAM;
3057
3058         entry = ICE_FLOW_ENTRY_PTR((unsigned long)entry_h);
3059
3060         /* Retain the pointer to the flow profile as the entry will be freed */
3061         prof = entry->prof;
3062
3063         if (prof) {
3064                 ice_acquire_lock(&prof->entries_lock);
3065                 status = ice_flow_rem_entry_sync(hw, blk, entry);
3066                 ice_release_lock(&prof->entries_lock);
3067         }
3068
3069         return status;
3070 }
3071
3072 /**
3073  * ice_flow_set_fld_ext - specifies locations of field from entry's input buffer
3074  * @seg: packet segment the field being set belongs to
3075  * @fld: field to be set
3076  * @field_type: type of the field
3077  * @val_loc: if not ICE_FLOW_FLD_OFF_INVAL, location of the value to match from
3078  *           entry's input buffer
3079  * @mask_loc: if not ICE_FLOW_FLD_OFF_INVAL, location of mask value from entry's
3080  *            input buffer
3081  * @last_loc: if not ICE_FLOW_FLD_OFF_INVAL, location of last/upper value from
3082  *            entry's input buffer
3083  *
3084  * This helper function stores information of a field being matched, including
3085  * the type of the field and the locations of the value to match, the mask, and
3086  * and the upper-bound value in the start of the input buffer for a flow entry.
3087  * This function should only be used for fixed-size data structures.
3088  *
3089  * This function also opportunistically determines the protocol headers to be
3090  * present based on the fields being set. Some fields cannot be used alone to
3091  * determine the protocol headers present. Sometimes, fields for particular
3092  * protocol headers are not matched. In those cases, the protocol headers
3093  * must be explicitly set.
3094  */
3095 static void
3096 ice_flow_set_fld_ext(struct ice_flow_seg_info *seg, enum ice_flow_field fld,
3097                      enum ice_flow_fld_match_type field_type, u16 val_loc,
3098                      u16 mask_loc, u16 last_loc)
3099 {
3100         u64 bit = BIT_ULL(fld);
3101
3102         seg->match |= bit;
3103         if (field_type == ICE_FLOW_FLD_TYPE_RANGE)
3104                 seg->range |= bit;
3105
3106         seg->fields[fld].type = field_type;
3107         seg->fields[fld].src.val = val_loc;
3108         seg->fields[fld].src.mask = mask_loc;
3109         seg->fields[fld].src.last = last_loc;
3110
3111         ICE_FLOW_SET_HDRS(seg, ice_flds_info[fld].hdr);
3112 }
3113
3114 /**
3115  * ice_flow_set_fld - specifies locations of field from entry's input buffer
3116  * @seg: packet segment the field being set belongs to
3117  * @fld: field to be set
3118  * @val_loc: if not ICE_FLOW_FLD_OFF_INVAL, location of the value to match from
3119  *           entry's input buffer
3120  * @mask_loc: if not ICE_FLOW_FLD_OFF_INVAL, location of mask value from entry's
3121  *            input buffer
3122  * @last_loc: if not ICE_FLOW_FLD_OFF_INVAL, location of last/upper value from
3123  *            entry's input buffer
3124  * @range: indicate if field being matched is to be in a range
3125  *
3126  * This function specifies the locations, in the form of byte offsets from the
3127  * start of the input buffer for a flow entry, from where the value to match,
3128  * the mask value, and upper value can be extracted. These locations are then
3129  * stored in the flow profile. When adding a flow entry associated with the
3130  * flow profile, these locations will be used to quickly extract the values and
3131  * create the content of a match entry. This function should only be used for
3132  * fixed-size data structures.
3133  */
3134 void
3135 ice_flow_set_fld(struct ice_flow_seg_info *seg, enum ice_flow_field fld,
3136                  u16 val_loc, u16 mask_loc, u16 last_loc, bool range)
3137 {
3138         enum ice_flow_fld_match_type t = range ?
3139                 ICE_FLOW_FLD_TYPE_RANGE : ICE_FLOW_FLD_TYPE_REG;
3140
3141         ice_flow_set_fld_ext(seg, fld, t, val_loc, mask_loc, last_loc);
3142 }
3143
3144 /**
3145  * ice_flow_set_fld_prefix - sets locations of prefix field from entry's buf
3146  * @seg: packet segment the field being set belongs to
3147  * @fld: field to be set
3148  * @val_loc: if not ICE_FLOW_FLD_OFF_INVAL, location of the value to match from
3149  *           entry's input buffer
3150  * @pref_loc: location of prefix value from entry's input buffer
3151  * @pref_sz: size of the location holding the prefix value
3152  *
3153  * This function specifies the locations, in the form of byte offsets from the
3154  * start of the input buffer for a flow entry, from where the value to match
3155  * and the IPv4 prefix value can be extracted. These locations are then stored
3156  * in the flow profile. When adding flow entries to the associated flow profile,
3157  * these locations can be used to quickly extract the values to create the
3158  * content of a match entry. This function should only be used for fixed-size
3159  * data structures.
3160  */
3161 void
3162 ice_flow_set_fld_prefix(struct ice_flow_seg_info *seg, enum ice_flow_field fld,
3163                         u16 val_loc, u16 pref_loc, u8 pref_sz)
3164 {
3165         /* For this type of field, the "mask" location is for the prefix value's
3166          * location and the "last" location is for the size of the location of
3167          * the prefix value.
3168          */
3169         ice_flow_set_fld_ext(seg, fld, ICE_FLOW_FLD_TYPE_PREFIX, val_loc,
3170                              pref_loc, (u16)pref_sz);
3171 }
3172
3173 /**
3174  * ice_flow_add_fld_raw - sets locations of a raw field from entry's input buf
3175  * @seg: packet segment the field being set belongs to
3176  * @off: offset of the raw field from the beginning of the segment in bytes
3177  * @len: length of the raw pattern to be matched
3178  * @val_loc: location of the value to match from entry's input buffer
3179  * @mask_loc: location of mask value from entry's input buffer
3180  *
3181  * This function specifies the offset of the raw field to be match from the
3182  * beginning of the specified packet segment, and the locations, in the form of
3183  * byte offsets from the start of the input buffer for a flow entry, from where
3184  * the value to match and the mask value to be extracted. These locations are
3185  * then stored in the flow profile. When adding flow entries to the associated
3186  * flow profile, these locations can be used to quickly extract the values to
3187  * create the content of a match entry. This function should only be used for
3188  * fixed-size data structures.
3189  */
3190 void
3191 ice_flow_add_fld_raw(struct ice_flow_seg_info *seg, u16 off, u8 len,
3192                      u16 val_loc, u16 mask_loc)
3193 {
3194         if (seg->raws_cnt < ICE_FLOW_SEG_RAW_FLD_MAX) {
3195                 seg->raws[seg->raws_cnt].off = off;
3196                 seg->raws[seg->raws_cnt].info.type = ICE_FLOW_FLD_TYPE_SIZE;
3197                 seg->raws[seg->raws_cnt].info.src.val = val_loc;
3198                 seg->raws[seg->raws_cnt].info.src.mask = mask_loc;
3199                 /* The "last" field is used to store the length of the field */
3200                 seg->raws[seg->raws_cnt].info.src.last = len;
3201         }
3202
3203         /* Overflows of "raws" will be handled as an error condition later in
3204          * the flow when this information is processed.
3205          */
3206         seg->raws_cnt++;
3207 }
3208
3209 #define ICE_FLOW_RSS_SEG_HDR_L2_MASKS \
3210 (ICE_FLOW_SEG_HDR_ETH | ICE_FLOW_SEG_HDR_VLAN)
3211
3212 #define ICE_FLOW_RSS_SEG_HDR_L3_MASKS \
3213         (ICE_FLOW_SEG_HDR_IPV4 | ICE_FLOW_SEG_HDR_IPV6)
3214
3215 #define ICE_FLOW_RSS_SEG_HDR_L4_MASKS \
3216         (ICE_FLOW_SEG_HDR_TCP | ICE_FLOW_SEG_HDR_UDP | \
3217          ICE_FLOW_SEG_HDR_SCTP)
3218
3219 #define ICE_FLOW_RSS_SEG_HDR_VAL_MASKS \
3220         (ICE_FLOW_RSS_SEG_HDR_L2_MASKS | \
3221          ICE_FLOW_RSS_SEG_HDR_L3_MASKS | \
3222          ICE_FLOW_RSS_SEG_HDR_L4_MASKS)
3223
3224 /**
3225  * ice_flow_set_rss_seg_info - setup packet segments for RSS
3226  * @segs: pointer to the flow field segment(s)
3227  * @hash_fields: fields to be hashed on for the segment(s)
3228  * @flow_hdr: protocol header fields within a packet segment
3229  *
3230  * Helper function to extract fields from hash bitmap and use flow
3231  * header value to set flow field segment for further use in flow
3232  * profile entry or removal.
3233  */
3234 static enum ice_status
3235 ice_flow_set_rss_seg_info(struct ice_flow_seg_info *segs, u64 hash_fields,
3236                           u32 flow_hdr)
3237 {
3238         u64 val = hash_fields;
3239         u8 i;
3240
3241         for (i = 0; val && i < ICE_FLOW_FIELD_IDX_MAX; i++) {
3242                 u64 bit = BIT_ULL(i);
3243
3244                 if (val & bit) {
3245                         ice_flow_set_fld(segs, (enum ice_flow_field)i,
3246                                          ICE_FLOW_FLD_OFF_INVAL,
3247                                          ICE_FLOW_FLD_OFF_INVAL,
3248                                          ICE_FLOW_FLD_OFF_INVAL, false);
3249                         val &= ~bit;
3250                 }
3251         }
3252         ICE_FLOW_SET_HDRS(segs, flow_hdr);
3253
3254         if (segs->hdrs & ~ICE_FLOW_RSS_SEG_HDR_VAL_MASKS &
3255             ~ICE_FLOW_RSS_HDRS_INNER_MASK & ~ICE_FLOW_SEG_HDR_IPV_OTHER)
3256                 return ICE_ERR_PARAM;
3257
3258         val = (u64)(segs->hdrs & ICE_FLOW_RSS_SEG_HDR_L3_MASKS);
3259         if (val && !ice_is_pow2(val))
3260                 return ICE_ERR_CFG;
3261
3262         val = (u64)(segs->hdrs & ICE_FLOW_RSS_SEG_HDR_L4_MASKS);
3263         if (val && !ice_is_pow2(val))
3264                 return ICE_ERR_CFG;
3265
3266         return ICE_SUCCESS;
3267 }
3268
3269 /**
3270  * ice_rem_vsi_rss_list - remove VSI from RSS list
3271  * @hw: pointer to the hardware structure
3272  * @vsi_handle: software VSI handle
3273  *
3274  * Remove the VSI from all RSS configurations in the list.
3275  */
3276 void ice_rem_vsi_rss_list(struct ice_hw *hw, u16 vsi_handle)
3277 {
3278         struct ice_rss_cfg *r, *tmp;
3279
3280         if (LIST_EMPTY(&hw->rss_list_head))
3281                 return;
3282
3283         ice_acquire_lock(&hw->rss_locks);
3284         LIST_FOR_EACH_ENTRY_SAFE(r, tmp, &hw->rss_list_head,
3285                                  ice_rss_cfg, l_entry)
3286                 if (ice_test_and_clear_bit(vsi_handle, r->vsis))
3287                         if (!ice_is_any_bit_set(r->vsis, ICE_MAX_VSI)) {
3288                                 LIST_DEL(&r->l_entry);
3289                                 ice_free(hw, r);
3290                         }
3291         ice_release_lock(&hw->rss_locks);
3292 }
3293
3294 /**
3295  * ice_rem_vsi_rss_cfg - remove RSS configurations associated with VSI
3296  * @hw: pointer to the hardware structure
3297  * @vsi_handle: software VSI handle
3298  *
3299  * This function will iterate through all flow profiles and disassociate
3300  * the VSI from that profile. If the flow profile has no VSIs it will
3301  * be removed.
3302  */
3303 enum ice_status ice_rem_vsi_rss_cfg(struct ice_hw *hw, u16 vsi_handle)
3304 {
3305         const enum ice_block blk = ICE_BLK_RSS;
3306         struct ice_flow_prof *p, *t;
3307         enum ice_status status = ICE_SUCCESS;
3308
3309         if (!ice_is_vsi_valid(hw, vsi_handle))
3310                 return ICE_ERR_PARAM;
3311
3312         if (LIST_EMPTY(&hw->fl_profs[blk]))
3313                 return ICE_SUCCESS;
3314
3315         ice_acquire_lock(&hw->fl_profs_locks[blk]);
3316         LIST_FOR_EACH_ENTRY_SAFE(p, t, &hw->fl_profs[blk], ice_flow_prof,
3317                                  l_entry)
3318                 if (ice_is_bit_set(p->vsis, vsi_handle)) {
3319                         status = ice_flow_disassoc_prof(hw, blk, p, vsi_handle);
3320                         if (status)
3321                                 break;
3322
3323                         if (!ice_is_any_bit_set(p->vsis, ICE_MAX_VSI)) {
3324                                 status = ice_flow_rem_prof_sync(hw, blk, p);
3325                                 if (status)
3326                                         break;
3327                         }
3328                 }
3329         ice_release_lock(&hw->fl_profs_locks[blk]);
3330
3331         return status;
3332 }
3333
3334 /**
3335  * ice_rem_rss_list - remove RSS configuration from list
3336  * @hw: pointer to the hardware structure
3337  * @vsi_handle: software VSI handle
3338  * @prof: pointer to flow profile
3339  *
3340  * Assumption: lock has already been acquired for RSS list
3341  */
3342 static void
3343 ice_rem_rss_list(struct ice_hw *hw, u16 vsi_handle, struct ice_flow_prof *prof)
3344 {
3345         struct ice_rss_cfg *r, *tmp;
3346
3347         /* Search for RSS hash fields associated to the VSI that match the
3348          * hash configurations associated to the flow profile. If found
3349          * remove from the RSS entry list of the VSI context and delete entry.
3350          */
3351         LIST_FOR_EACH_ENTRY_SAFE(r, tmp, &hw->rss_list_head,
3352                                  ice_rss_cfg, l_entry)
3353                 if (r->hashed_flds == prof->segs[prof->segs_cnt - 1].match &&
3354                     r->packet_hdr == prof->segs[prof->segs_cnt - 1].hdrs) {
3355                         ice_clear_bit(vsi_handle, r->vsis);
3356                         if (!ice_is_any_bit_set(r->vsis, ICE_MAX_VSI)) {
3357                                 LIST_DEL(&r->l_entry);
3358                                 ice_free(hw, r);
3359                         }
3360                         return;
3361                 }
3362 }
3363
3364 /**
3365  * ice_add_rss_list - add RSS configuration to list
3366  * @hw: pointer to the hardware structure
3367  * @vsi_handle: software VSI handle
3368  * @prof: pointer to flow profile
3369  *
3370  * Assumption: lock has already been acquired for RSS list
3371  */
3372 static enum ice_status
3373 ice_add_rss_list(struct ice_hw *hw, u16 vsi_handle, struct ice_flow_prof *prof)
3374 {
3375         struct ice_rss_cfg *r, *rss_cfg;
3376
3377         LIST_FOR_EACH_ENTRY(r, &hw->rss_list_head,
3378                             ice_rss_cfg, l_entry)
3379                 if (r->hashed_flds == prof->segs[prof->segs_cnt - 1].match &&
3380                     r->packet_hdr == prof->segs[prof->segs_cnt - 1].hdrs) {
3381                         ice_set_bit(vsi_handle, r->vsis);
3382                         return ICE_SUCCESS;
3383                 }
3384
3385         rss_cfg = (struct ice_rss_cfg *)ice_malloc(hw, sizeof(*rss_cfg));
3386         if (!rss_cfg)
3387                 return ICE_ERR_NO_MEMORY;
3388
3389         rss_cfg->hashed_flds = prof->segs[prof->segs_cnt - 1].match;
3390         rss_cfg->packet_hdr = prof->segs[prof->segs_cnt - 1].hdrs;
3391         rss_cfg->symm = prof->cfg.symm;
3392         ice_set_bit(vsi_handle, rss_cfg->vsis);
3393
3394         LIST_ADD_TAIL(&rss_cfg->l_entry, &hw->rss_list_head);
3395
3396         return ICE_SUCCESS;
3397 }
3398
3399 #define ICE_FLOW_PROF_HASH_S    0
3400 #define ICE_FLOW_PROF_HASH_M    (0xFFFFFFFFULL << ICE_FLOW_PROF_HASH_S)
3401 #define ICE_FLOW_PROF_HDR_S     32
3402 #define ICE_FLOW_PROF_HDR_M     (0x3FFFFFFFULL << ICE_FLOW_PROF_HDR_S)
3403 #define ICE_FLOW_PROF_ENCAP_S   63
3404 #define ICE_FLOW_PROF_ENCAP_M   (BIT_ULL(ICE_FLOW_PROF_ENCAP_S))
3405
3406 #define ICE_RSS_OUTER_HEADERS   1
3407 #define ICE_RSS_INNER_HEADERS   2
3408
3409 /* Flow profile ID format:
3410  * [0:31] - Packet match fields
3411  * [32:62] - Protocol header
3412  * [63] - Encapsulation flag, 0 if non-tunneled, 1 if tunneled
3413  */
3414 #define ICE_FLOW_GEN_PROFID(hash, hdr, segs_cnt) \
3415         (u64)(((u64)(hash) & ICE_FLOW_PROF_HASH_M) | \
3416               (((u64)(hdr) << ICE_FLOW_PROF_HDR_S) & ICE_FLOW_PROF_HDR_M) | \
3417               ((u8)((segs_cnt) - 1) ? ICE_FLOW_PROF_ENCAP_M : 0))
3418
3419 static void
3420 ice_rss_config_xor_word(struct ice_hw *hw, u8 prof_id, u8 src, u8 dst)
3421 {
3422         u32 s = ((src % 4) << 3); /* byte shift */
3423         u32 v = dst | 0x80; /* value to program */
3424         u8 i = src / 4; /* register index */
3425         u32 reg;
3426
3427         reg = rd32(hw, GLQF_HSYMM(prof_id, i));
3428         reg = (reg & ~(0xff << s)) | (v << s);
3429         wr32(hw, GLQF_HSYMM(prof_id, i), reg);
3430 }
3431
3432 static void
3433 ice_rss_config_xor(struct ice_hw *hw, u8 prof_id, u8 src, u8 dst, u8 len)
3434 {
3435         int fv_last_word =
3436                 ICE_FLOW_SW_FIELD_VECTOR_MAX / ICE_FLOW_FV_EXTRACT_SZ - 1;
3437         int i;
3438
3439         for (i = 0; i < len; i++) {
3440                 ice_rss_config_xor_word(hw, prof_id,
3441                                         /* Yes, field vector in GLQF_HSYMM and
3442                                          * GLQF_HINSET is inversed!
3443                                          */
3444                                         fv_last_word - (src + i),
3445                                         fv_last_word - (dst + i));
3446                 ice_rss_config_xor_word(hw, prof_id,
3447                                         fv_last_word - (dst + i),
3448                                         fv_last_word - (src + i));
3449         }
3450 }
3451
3452 static void
3453 ice_rss_update_symm(struct ice_hw *hw,
3454                     struct ice_flow_prof *prof)
3455 {
3456         struct ice_prof_map *map;
3457         u8 prof_id, m;
3458
3459         map = ice_search_prof_id(hw, ICE_BLK_RSS, prof->id);
3460         prof_id = map->prof_id;
3461
3462         /* clear to default */
3463         for (m = 0; m < 6; m++)
3464                 wr32(hw, GLQF_HSYMM(prof_id, m), 0);
3465         if (prof->cfg.symm) {
3466                 struct ice_flow_seg_info *seg =
3467                         &prof->segs[prof->segs_cnt - 1];
3468
3469                 struct ice_flow_seg_xtrct *ipv4_src =
3470                         &seg->fields[ICE_FLOW_FIELD_IDX_IPV4_SA].xtrct;
3471                 struct ice_flow_seg_xtrct *ipv4_dst =
3472                         &seg->fields[ICE_FLOW_FIELD_IDX_IPV4_DA].xtrct;
3473                 struct ice_flow_seg_xtrct *ipv6_src =
3474                         &seg->fields[ICE_FLOW_FIELD_IDX_IPV6_SA].xtrct;
3475                 struct ice_flow_seg_xtrct *ipv6_dst =
3476                         &seg->fields[ICE_FLOW_FIELD_IDX_IPV6_DA].xtrct;
3477
3478                 struct ice_flow_seg_xtrct *tcp_src =
3479                         &seg->fields[ICE_FLOW_FIELD_IDX_TCP_SRC_PORT].xtrct;
3480                 struct ice_flow_seg_xtrct *tcp_dst =
3481                         &seg->fields[ICE_FLOW_FIELD_IDX_TCP_DST_PORT].xtrct;
3482
3483                 struct ice_flow_seg_xtrct *udp_src =
3484                         &seg->fields[ICE_FLOW_FIELD_IDX_UDP_SRC_PORT].xtrct;
3485                 struct ice_flow_seg_xtrct *udp_dst =
3486                         &seg->fields[ICE_FLOW_FIELD_IDX_UDP_DST_PORT].xtrct;
3487
3488                 struct ice_flow_seg_xtrct *sctp_src =
3489                         &seg->fields[ICE_FLOW_FIELD_IDX_SCTP_SRC_PORT].xtrct;
3490                 struct ice_flow_seg_xtrct *sctp_dst =
3491                         &seg->fields[ICE_FLOW_FIELD_IDX_SCTP_DST_PORT].xtrct;
3492
3493                 /* xor IPv4 */
3494                 if (ipv4_src->prot_id != 0 && ipv4_dst->prot_id != 0)
3495                         ice_rss_config_xor(hw, prof_id,
3496                                            ipv4_src->idx, ipv4_dst->idx, 2);
3497
3498                 /* xor IPv6 */
3499                 if (ipv6_src->prot_id != 0 && ipv6_dst->prot_id != 0)
3500                         ice_rss_config_xor(hw, prof_id,
3501                                            ipv6_src->idx, ipv6_dst->idx, 8);
3502
3503                 /* xor TCP */
3504                 if (tcp_src->prot_id != 0 && tcp_dst->prot_id != 0)
3505                         ice_rss_config_xor(hw, prof_id,
3506                                            tcp_src->idx, tcp_dst->idx, 1);
3507
3508                 /* xor UDP */
3509                 if (udp_src->prot_id != 0 && udp_dst->prot_id != 0)
3510                         ice_rss_config_xor(hw, prof_id,
3511                                            udp_src->idx, udp_dst->idx, 1);
3512
3513                 /* xor SCTP */
3514                 if (sctp_src->prot_id != 0 && sctp_dst->prot_id != 0)
3515                         ice_rss_config_xor(hw, prof_id,
3516                                            sctp_src->idx, sctp_dst->idx, 1);
3517         }
3518 }
3519
3520 /**
3521  * ice_add_rss_cfg_sync - add an RSS configuration
3522  * @hw: pointer to the hardware structure
3523  * @vsi_handle: software VSI handle
3524  * @hashed_flds: hash bit fields (ICE_FLOW_HASH_*) to configure
3525  * @addl_hdrs: protocol header fields
3526  * @segs_cnt: packet segment count
3527  * @symm: symmetric hash enable/disable
3528  *
3529  * Assumption: lock has already been acquired for RSS list
3530  */
3531 static enum ice_status
3532 ice_add_rss_cfg_sync(struct ice_hw *hw, u16 vsi_handle, u64 hashed_flds,
3533                      u32 addl_hdrs, u8 segs_cnt, bool symm)
3534 {
3535         const enum ice_block blk = ICE_BLK_RSS;
3536         struct ice_flow_prof *prof = NULL;
3537         struct ice_flow_seg_info *segs;
3538         enum ice_status status;
3539
3540         if (!segs_cnt || segs_cnt > ICE_FLOW_SEG_MAX)
3541                 return ICE_ERR_PARAM;
3542
3543         segs = (struct ice_flow_seg_info *)ice_calloc(hw, segs_cnt,
3544                                                       sizeof(*segs));
3545         if (!segs)
3546                 return ICE_ERR_NO_MEMORY;
3547
3548         /* Construct the packet segment info from the hashed fields */
3549         status = ice_flow_set_rss_seg_info(&segs[segs_cnt - 1], hashed_flds,
3550                                            addl_hdrs);
3551         if (status)
3552                 goto exit;
3553
3554         /* Search for a flow profile that has matching headers, hash fields
3555          * and has the input VSI associated to it. If found, no further
3556          * operations required and exit.
3557          */
3558         prof = ice_flow_find_prof_conds(hw, blk, ICE_FLOW_RX, segs, segs_cnt,
3559                                         vsi_handle,
3560                                         ICE_FLOW_FIND_PROF_CHK_FLDS |
3561                                         ICE_FLOW_FIND_PROF_CHK_VSI);
3562         if (prof) {
3563                 if (prof->cfg.symm == symm)
3564                         goto exit;
3565                 prof->cfg.symm = symm;
3566                 goto update_symm;
3567         }
3568
3569         /* Check if a flow profile exists with the same protocol headers and
3570          * associated with the input VSI. If so disassociate the VSI from
3571          * this profile. The VSI will be added to a new profile created with
3572          * the protocol header and new hash field configuration.
3573          */
3574         prof = ice_flow_find_prof_conds(hw, blk, ICE_FLOW_RX, segs, segs_cnt,
3575                                         vsi_handle, ICE_FLOW_FIND_PROF_CHK_VSI);
3576         if (prof) {
3577                 status = ice_flow_disassoc_prof(hw, blk, prof, vsi_handle);
3578                 if (!status)
3579                         ice_rem_rss_list(hw, vsi_handle, prof);
3580                 else
3581                         goto exit;
3582
3583                 /* Remove profile if it has no VSIs associated */
3584                 if (!ice_is_any_bit_set(prof->vsis, ICE_MAX_VSI)) {
3585                         status = ice_flow_rem_prof(hw, blk, prof->id);
3586                         if (status)
3587                                 goto exit;
3588                 }
3589         }
3590
3591         /* Search for a profile that has same match fields only. If this
3592          * exists then associate the VSI to this profile.
3593          */
3594         prof = ice_flow_find_prof_conds(hw, blk, ICE_FLOW_RX, segs, segs_cnt,
3595                                         vsi_handle,
3596                                         ICE_FLOW_FIND_PROF_CHK_FLDS);
3597         if (prof) {
3598                 if (prof->cfg.symm == symm) {
3599                         status = ice_flow_assoc_prof(hw, blk, prof,
3600                                                      vsi_handle);
3601                         if (!status)
3602                                 status = ice_add_rss_list(hw, vsi_handle,
3603                                                           prof);
3604                 } else {
3605                         /* if a profile exist but with different symmetric
3606                          * requirement, just return error.
3607                          */
3608                         status = ICE_ERR_NOT_SUPPORTED;
3609                 }
3610                 goto exit;
3611         }
3612
3613         /* Create a new flow profile with generated profile and packet
3614          * segment information.
3615          */
3616         status = ice_flow_add_prof(hw, blk, ICE_FLOW_RX,
3617                                    ICE_FLOW_GEN_PROFID(hashed_flds,
3618                                                        segs[segs_cnt - 1].hdrs,
3619                                                        segs_cnt),
3620                                    segs, segs_cnt, NULL, 0, &prof);
3621         if (status)
3622                 goto exit;
3623
3624         status = ice_flow_assoc_prof(hw, blk, prof, vsi_handle);
3625         /* If association to a new flow profile failed then this profile can
3626          * be removed.
3627          */
3628         if (status) {
3629                 ice_flow_rem_prof(hw, blk, prof->id);
3630                 goto exit;
3631         }
3632
3633         status = ice_add_rss_list(hw, vsi_handle, prof);
3634
3635         prof->cfg.symm = symm;
3636
3637 update_symm:
3638         ice_rss_update_symm(hw, prof);
3639
3640 exit:
3641         ice_free(hw, segs);
3642         return status;
3643 }
3644
3645 /**
3646  * ice_add_rss_cfg - add an RSS configuration with specified hashed fields
3647  * @hw: pointer to the hardware structure
3648  * @vsi_handle: software VSI handle
3649  * @hashed_flds: hash bit fields (ICE_FLOW_HASH_*) to configure
3650  * @addl_hdrs: protocol header fields
3651  * @symm: symmetric hash enable/disable
3652  *
3653  * This function will generate a flow profile based on fields associated with
3654  * the input fields to hash on, the flow type and use the VSI number to add
3655  * a flow entry to the profile.
3656  */
3657 enum ice_status
3658 ice_add_rss_cfg(struct ice_hw *hw, u16 vsi_handle, u64 hashed_flds,
3659                 u32 addl_hdrs, bool symm)
3660 {
3661         enum ice_status status;
3662
3663         if (hashed_flds == ICE_HASH_INVALID ||
3664             !ice_is_vsi_valid(hw, vsi_handle))
3665                 return ICE_ERR_PARAM;
3666
3667         ice_acquire_lock(&hw->rss_locks);
3668         status = ice_add_rss_cfg_sync(hw, vsi_handle, hashed_flds, addl_hdrs,
3669                                       ICE_RSS_OUTER_HEADERS, symm);
3670         if (!status)
3671                 status = ice_add_rss_cfg_sync(hw, vsi_handle, hashed_flds,
3672                                               addl_hdrs, ICE_RSS_INNER_HEADERS,
3673                                               symm);
3674         ice_release_lock(&hw->rss_locks);
3675
3676         return status;
3677 }
3678
3679 /**
3680  * ice_rem_rss_cfg_sync - remove an existing RSS configuration
3681  * @hw: pointer to the hardware structure
3682  * @vsi_handle: software VSI handle
3683  * @hashed_flds: Packet hash types (ICE_FLOW_HASH_*) to remove
3684  * @addl_hdrs: Protocol header fields within a packet segment
3685  * @segs_cnt: packet segment count
3686  *
3687  * Assumption: lock has already been acquired for RSS list
3688  */
3689 static enum ice_status
3690 ice_rem_rss_cfg_sync(struct ice_hw *hw, u16 vsi_handle, u64 hashed_flds,
3691                      u32 addl_hdrs, u8 segs_cnt)
3692 {
3693         const enum ice_block blk = ICE_BLK_RSS;
3694         struct ice_flow_seg_info *segs;
3695         struct ice_flow_prof *prof;
3696         enum ice_status status;
3697
3698         segs = (struct ice_flow_seg_info *)ice_calloc(hw, segs_cnt,
3699                                                       sizeof(*segs));
3700         if (!segs)
3701                 return ICE_ERR_NO_MEMORY;
3702
3703         /* Construct the packet segment info from the hashed fields */
3704         status = ice_flow_set_rss_seg_info(&segs[segs_cnt - 1], hashed_flds,
3705                                            addl_hdrs);
3706         if (status)
3707                 goto out;
3708
3709         prof = ice_flow_find_prof_conds(hw, blk, ICE_FLOW_RX, segs, segs_cnt,
3710                                         vsi_handle,
3711                                         ICE_FLOW_FIND_PROF_CHK_FLDS);
3712         if (!prof) {
3713                 status = ICE_ERR_DOES_NOT_EXIST;
3714                 goto out;
3715         }
3716
3717         status = ice_flow_disassoc_prof(hw, blk, prof, vsi_handle);
3718         if (status)
3719                 goto out;
3720
3721         /* Remove RSS configuration from VSI context before deleting
3722          * the flow profile.
3723          */
3724         ice_rem_rss_list(hw, vsi_handle, prof);
3725
3726         if (!ice_is_any_bit_set(prof->vsis, ICE_MAX_VSI))
3727                 status = ice_flow_rem_prof(hw, blk, prof->id);
3728
3729 out:
3730         ice_free(hw, segs);
3731         return status;
3732 }
3733
3734 /**
3735  * ice_rem_rss_cfg - remove an existing RSS config with matching hashed fields
3736  * @hw: pointer to the hardware structure
3737  * @vsi_handle: software VSI handle
3738  * @hashed_flds: Packet hash types (ICE_FLOW_HASH_*) to remove
3739  * @addl_hdrs: Protocol header fields within a packet segment
3740  *
3741  * This function will lookup the flow profile based on the input
3742  * hash field bitmap, iterate through the profile entry list of
3743  * that profile and find entry associated with input VSI to be
3744  * removed. Calls are made to underlying flow apis which will in
3745  * turn build or update buffers for RSS XLT1 section.
3746  */
3747 enum ice_status
3748 ice_rem_rss_cfg(struct ice_hw *hw, u16 vsi_handle, u64 hashed_flds,
3749                 u32 addl_hdrs)
3750 {
3751         enum ice_status status;
3752
3753         if (hashed_flds == ICE_HASH_INVALID ||
3754             !ice_is_vsi_valid(hw, vsi_handle))
3755                 return ICE_ERR_PARAM;
3756
3757         ice_acquire_lock(&hw->rss_locks);
3758         status = ice_rem_rss_cfg_sync(hw, vsi_handle, hashed_flds, addl_hdrs,
3759                                       ICE_RSS_OUTER_HEADERS);
3760         if (!status)
3761                 status = ice_rem_rss_cfg_sync(hw, vsi_handle, hashed_flds,
3762                                               addl_hdrs, ICE_RSS_INNER_HEADERS);
3763         ice_release_lock(&hw->rss_locks);
3764
3765         return status;
3766 }
3767
3768 /**
3769  * ice_replay_rss_cfg - replay RSS configurations associated with VSI
3770  * @hw: pointer to the hardware structure
3771  * @vsi_handle: software VSI handle
3772  */
3773 enum ice_status ice_replay_rss_cfg(struct ice_hw *hw, u16 vsi_handle)
3774 {
3775         enum ice_status status = ICE_SUCCESS;
3776         struct ice_rss_cfg *r;
3777
3778         if (!ice_is_vsi_valid(hw, vsi_handle))
3779                 return ICE_ERR_PARAM;
3780
3781         ice_acquire_lock(&hw->rss_locks);
3782         LIST_FOR_EACH_ENTRY(r, &hw->rss_list_head,
3783                             ice_rss_cfg, l_entry) {
3784                 if (ice_is_bit_set(r->vsis, vsi_handle)) {
3785                         status = ice_add_rss_cfg_sync(hw, vsi_handle,
3786                                                       r->hashed_flds,
3787                                                       r->packet_hdr,
3788                                                       ICE_RSS_OUTER_HEADERS,
3789                                                       r->symm);
3790                         if (status)
3791                                 break;
3792                         status = ice_add_rss_cfg_sync(hw, vsi_handle,
3793                                                       r->hashed_flds,
3794                                                       r->packet_hdr,
3795                                                       ICE_RSS_INNER_HEADERS,
3796                                                       r->symm);
3797                         if (status)
3798                                 break;
3799                 }
3800         }
3801         ice_release_lock(&hw->rss_locks);
3802
3803         return status;
3804 }
3805
3806 /**
3807  * ice_get_rss_cfg - returns hashed fields for the given header types
3808  * @hw: pointer to the hardware structure
3809  * @vsi_handle: software VSI handle
3810  * @hdrs: protocol header type
3811  *
3812  * This function will return the match fields of the first instance of flow
3813  * profile having the given header types and containing input VSI
3814  */
3815 u64 ice_get_rss_cfg(struct ice_hw *hw, u16 vsi_handle, u32 hdrs)
3816 {
3817         struct ice_rss_cfg *r, *rss_cfg = NULL;
3818
3819         /* verify if the protocol header is non zero and VSI is valid */
3820         if (hdrs == ICE_FLOW_SEG_HDR_NONE || !ice_is_vsi_valid(hw, vsi_handle))
3821                 return ICE_HASH_INVALID;
3822
3823         ice_acquire_lock(&hw->rss_locks);
3824         LIST_FOR_EACH_ENTRY(r, &hw->rss_list_head,
3825                             ice_rss_cfg, l_entry)
3826                 if (ice_is_bit_set(r->vsis, vsi_handle) &&
3827                     r->packet_hdr == hdrs) {
3828                         rss_cfg = r;
3829                         break;
3830                 }
3831         ice_release_lock(&hw->rss_locks);
3832
3833         return rss_cfg ? rss_cfg->hashed_flds : ICE_HASH_INVALID;
3834 }