net/ice/base: avoid PPPoE IPv4 overlap
[dpdk.git] / drivers / net / ice / base / ice_flow.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2001-2020 Intel Corporation
3  */
4
5 #include "ice_common.h"
6 #include "ice_flow.h"
7
8 /* Size of known protocol header fields */
9 #define ICE_FLOW_FLD_SZ_ETH_TYPE        2
10 #define ICE_FLOW_FLD_SZ_VLAN            2
11 #define ICE_FLOW_FLD_SZ_IPV4_ADDR       4
12 #define ICE_FLOW_FLD_SZ_IPV6_ADDR       16
13 #define ICE_FLOW_FLD_SZ_IPV6_PRE32_ADDR 4
14 #define ICE_FLOW_FLD_SZ_IPV6_PRE48_ADDR 6
15 #define ICE_FLOW_FLD_SZ_IPV6_PRE64_ADDR 8
16 #define ICE_FLOW_FLD_SZ_IP_DSCP         1
17 #define ICE_FLOW_FLD_SZ_IP_TTL          1
18 #define ICE_FLOW_FLD_SZ_IP_PROT         1
19 #define ICE_FLOW_FLD_SZ_PORT            2
20 #define ICE_FLOW_FLD_SZ_TCP_FLAGS       1
21 #define ICE_FLOW_FLD_SZ_ICMP_TYPE       1
22 #define ICE_FLOW_FLD_SZ_ICMP_CODE       1
23 #define ICE_FLOW_FLD_SZ_ARP_OPER        2
24 #define ICE_FLOW_FLD_SZ_GRE_KEYID       4
25 #define ICE_FLOW_FLD_SZ_GTP_TEID        4
26 #define ICE_FLOW_FLD_SZ_GTP_QFI         2
27 #define ICE_FLOW_FLD_SZ_PPPOE_SESS_ID   2
28 #define ICE_FLOW_FLD_SZ_PFCP_SEID 8
29 #define ICE_FLOW_FLD_SZ_L2TPV3_SESS_ID  4
30 #define ICE_FLOW_FLD_SZ_ESP_SPI 4
31 #define ICE_FLOW_FLD_SZ_AH_SPI  4
32 #define ICE_FLOW_FLD_SZ_NAT_T_ESP_SPI   4
33
34 /* Describe properties of a protocol header field */
35 struct ice_flow_field_info {
36         enum ice_flow_seg_hdr hdr;
37         s16 off;        /* Offset from start of a protocol header, in bits */
38         u16 size;       /* Size of fields in bits */
39         u16 mask;       /* 16-bit mask for field */
40 };
41
42 #define ICE_FLOW_FLD_INFO(_hdr, _offset_bytes, _size_bytes) { \
43         .hdr = _hdr, \
44         .off = (_offset_bytes) * BITS_PER_BYTE, \
45         .size = (_size_bytes) * BITS_PER_BYTE, \
46         .mask = 0, \
47 }
48
49 #define ICE_FLOW_FLD_INFO_MSK(_hdr, _offset_bytes, _size_bytes, _mask) { \
50         .hdr = _hdr, \
51         .off = (_offset_bytes) * BITS_PER_BYTE, \
52         .size = (_size_bytes) * BITS_PER_BYTE, \
53         .mask = _mask, \
54 }
55
56 /* Table containing properties of supported protocol header fields */
57 static const
58 struct ice_flow_field_info ice_flds_info[ICE_FLOW_FIELD_IDX_MAX] = {
59         /* Ether */
60         /* ICE_FLOW_FIELD_IDX_ETH_DA */
61         ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_ETH, 0, ETH_ALEN),
62         /* ICE_FLOW_FIELD_IDX_ETH_SA */
63         ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_ETH, ETH_ALEN, ETH_ALEN),
64         /* ICE_FLOW_FIELD_IDX_S_VLAN */
65         ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_VLAN, 12, ICE_FLOW_FLD_SZ_VLAN),
66         /* ICE_FLOW_FIELD_IDX_C_VLAN */
67         ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_VLAN, 14, ICE_FLOW_FLD_SZ_VLAN),
68         /* ICE_FLOW_FIELD_IDX_ETH_TYPE */
69         ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_ETH, 0, ICE_FLOW_FLD_SZ_ETH_TYPE),
70         /* IPv4 / IPv6 */
71         /* ICE_FLOW_FIELD_IDX_IPV4_DSCP */
72         ICE_FLOW_FLD_INFO_MSK(ICE_FLOW_SEG_HDR_IPV4, 0, ICE_FLOW_FLD_SZ_IP_DSCP,
73                               0x00fc),
74         /* ICE_FLOW_FIELD_IDX_IPV6_DSCP */
75         ICE_FLOW_FLD_INFO_MSK(ICE_FLOW_SEG_HDR_IPV6, 0, ICE_FLOW_FLD_SZ_IP_DSCP,
76                               0x0ff0),
77         /* ICE_FLOW_FIELD_IDX_IPV4_TTL */
78         ICE_FLOW_FLD_INFO_MSK(ICE_FLOW_SEG_HDR_NONE, 8,
79                               ICE_FLOW_FLD_SZ_IP_TTL, 0xff00),
80         /* ICE_FLOW_FIELD_IDX_IPV4_PROT */
81         ICE_FLOW_FLD_INFO_MSK(ICE_FLOW_SEG_HDR_NONE, 8,
82                               ICE_FLOW_FLD_SZ_IP_PROT, 0x00ff),
83         /* ICE_FLOW_FIELD_IDX_IPV6_TTL */
84         ICE_FLOW_FLD_INFO_MSK(ICE_FLOW_SEG_HDR_NONE, 6,
85                               ICE_FLOW_FLD_SZ_IP_TTL, 0x00ff),
86         /* ICE_FLOW_FIELD_IDX_IPV6_PROT */
87         ICE_FLOW_FLD_INFO_MSK(ICE_FLOW_SEG_HDR_NONE, 6,
88                               ICE_FLOW_FLD_SZ_IP_PROT, 0xff00),
89         /* ICE_FLOW_FIELD_IDX_IPV4_SA */
90         ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_IPV4, 12, ICE_FLOW_FLD_SZ_IPV4_ADDR),
91         /* ICE_FLOW_FIELD_IDX_IPV4_DA */
92         ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_IPV4, 16, ICE_FLOW_FLD_SZ_IPV4_ADDR),
93         /* ICE_FLOW_FIELD_IDX_IPV6_SA */
94         ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_IPV6, 8, ICE_FLOW_FLD_SZ_IPV6_ADDR),
95         /* ICE_FLOW_FIELD_IDX_IPV6_DA */
96         ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_IPV6, 24, ICE_FLOW_FLD_SZ_IPV6_ADDR),
97         /* ICE_FLOW_FIELD_IDX_IPV6_PRE32_SA */
98         ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_IPV6, 8,
99                           ICE_FLOW_FLD_SZ_IPV6_PRE32_ADDR),
100         /* ICE_FLOW_FIELD_IDX_IPV6_PRE32_DA */
101         ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_IPV6, 24,
102                           ICE_FLOW_FLD_SZ_IPV6_PRE32_ADDR),
103         /* ICE_FLOW_FIELD_IDX_IPV6_PRE48_SA */
104         ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_IPV6, 8,
105                           ICE_FLOW_FLD_SZ_IPV6_PRE48_ADDR),
106         /* ICE_FLOW_FIELD_IDX_IPV6_PRE48_DA */
107         ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_IPV6, 24,
108                           ICE_FLOW_FLD_SZ_IPV6_PRE48_ADDR),
109         /* ICE_FLOW_FIELD_IDX_IPV6_PRE64_SA */
110         ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_IPV6, 8,
111                           ICE_FLOW_FLD_SZ_IPV6_PRE64_ADDR),
112         /* ICE_FLOW_FIELD_IDX_IPV6_PRE64_DA */
113         ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_IPV6, 24,
114                           ICE_FLOW_FLD_SZ_IPV6_PRE64_ADDR),
115         /* Transport */
116         /* ICE_FLOW_FIELD_IDX_TCP_SRC_PORT */
117         ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_TCP, 0, ICE_FLOW_FLD_SZ_PORT),
118         /* ICE_FLOW_FIELD_IDX_TCP_DST_PORT */
119         ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_TCP, 2, ICE_FLOW_FLD_SZ_PORT),
120         /* ICE_FLOW_FIELD_IDX_UDP_SRC_PORT */
121         ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_UDP, 0, ICE_FLOW_FLD_SZ_PORT),
122         /* ICE_FLOW_FIELD_IDX_UDP_DST_PORT */
123         ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_UDP, 2, ICE_FLOW_FLD_SZ_PORT),
124         /* ICE_FLOW_FIELD_IDX_SCTP_SRC_PORT */
125         ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_SCTP, 0, ICE_FLOW_FLD_SZ_PORT),
126         /* ICE_FLOW_FIELD_IDX_SCTP_DST_PORT */
127         ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_SCTP, 2, ICE_FLOW_FLD_SZ_PORT),
128         /* ICE_FLOW_FIELD_IDX_TCP_FLAGS */
129         ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_TCP, 13, ICE_FLOW_FLD_SZ_TCP_FLAGS),
130         /* ARP */
131         /* ICE_FLOW_FIELD_IDX_ARP_SIP */
132         ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_ARP, 14, ICE_FLOW_FLD_SZ_IPV4_ADDR),
133         /* ICE_FLOW_FIELD_IDX_ARP_DIP */
134         ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_ARP, 24, ICE_FLOW_FLD_SZ_IPV4_ADDR),
135         /* ICE_FLOW_FIELD_IDX_ARP_SHA */
136         ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_ARP, 8, ETH_ALEN),
137         /* ICE_FLOW_FIELD_IDX_ARP_DHA */
138         ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_ARP, 18, ETH_ALEN),
139         /* ICE_FLOW_FIELD_IDX_ARP_OP */
140         ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_ARP, 6, ICE_FLOW_FLD_SZ_ARP_OPER),
141         /* ICMP */
142         /* ICE_FLOW_FIELD_IDX_ICMP_TYPE */
143         ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_ICMP, 0, ICE_FLOW_FLD_SZ_ICMP_TYPE),
144         /* ICE_FLOW_FIELD_IDX_ICMP_CODE */
145         ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_ICMP, 1, ICE_FLOW_FLD_SZ_ICMP_CODE),
146         /* GRE */
147         /* ICE_FLOW_FIELD_IDX_GRE_KEYID */
148         ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_GRE, 12, ICE_FLOW_FLD_SZ_GRE_KEYID),
149         /* GTP */
150         /* ICE_FLOW_FIELD_IDX_GTPC_TEID */
151         ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_GTPC_TEID, 12,
152                           ICE_FLOW_FLD_SZ_GTP_TEID),
153         /* ICE_FLOW_FIELD_IDX_GTPU_IP_TEID */
154         ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_GTPU_IP, 12,
155                           ICE_FLOW_FLD_SZ_GTP_TEID),
156         /* ICE_FLOW_FIELD_IDX_GTPU_EH_TEID */
157         ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_GTPU_EH, 12,
158                           ICE_FLOW_FLD_SZ_GTP_TEID),
159         /* ICE_FLOW_FIELD_IDX_GTPU_EH_QFI */
160         ICE_FLOW_FLD_INFO_MSK(ICE_FLOW_SEG_HDR_GTPU_EH, 22,
161                               ICE_FLOW_FLD_SZ_GTP_QFI, 0x3f00),
162         /* ICE_FLOW_FIELD_IDX_GTPU_UP_TEID */
163         ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_GTPU_UP, 12,
164                           ICE_FLOW_FLD_SZ_GTP_TEID),
165         /* ICE_FLOW_FIELD_IDX_GTPU_DWN_TEID */
166         ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_GTPU_DWN, 12,
167                           ICE_FLOW_FLD_SZ_GTP_TEID),
168         /* PPPOE */
169         /* ICE_FLOW_FIELD_IDX_PPPOE_SESS_ID */
170         ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_PPPOE, 2,
171                           ICE_FLOW_FLD_SZ_PPPOE_SESS_ID),
172         /* PFCP */
173         /* ICE_FLOW_FIELD_IDX_PFCP_SEID */
174         ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_PFCP_SESSION, 12,
175                           ICE_FLOW_FLD_SZ_PFCP_SEID),
176         /* L2TPV3 */
177         /* ICE_FLOW_FIELD_IDX_L2TPV3_SESS_ID */
178         ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_L2TPV3, 0,
179                           ICE_FLOW_FLD_SZ_L2TPV3_SESS_ID),
180         /* ESP */
181         /* ICE_FLOW_FIELD_IDX_ESP_SPI */
182         ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_ESP, 0,
183                           ICE_FLOW_FLD_SZ_ESP_SPI),
184         /* AH */
185         /* ICE_FLOW_FIELD_IDX_AH_SPI */
186         ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_AH, 4,
187                           ICE_FLOW_FLD_SZ_AH_SPI),
188         /* NAT_T_ESP */
189         /* ICE_FLOW_FIELD_IDX_NAT_T_ESP_SPI */
190         ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_NAT_T_ESP, 8,
191                           ICE_FLOW_FLD_SZ_NAT_T_ESP_SPI),
192 };
193
194 /* Bitmaps indicating relevant packet types for a particular protocol header
195  *
196  * Packet types for packets with an Outer/First/Single MAC header
197  */
198 static const u32 ice_ptypes_mac_ofos[] = {
199         0xFDC00846, 0xBFBF7F7E, 0xF70001DF, 0xFEFDFDFB,
200         0x0000077E, 0x00000000, 0x00000000, 0x00000000,
201         0x00400000, 0x03FFF000, 0x7FFFFFE0, 0x00000000,
202         0x00000000, 0x00000000, 0x00000000, 0x00000000,
203         0x00000000, 0x00000000, 0x00000000, 0x00000000,
204         0x00000000, 0x00000000, 0x00000000, 0x00000000,
205         0x00000000, 0x00000000, 0x00000000, 0x00000000,
206         0x00000000, 0x00000000, 0x00000000, 0x00000000,
207 };
208
209 /* Packet types for packets with an Innermost/Last MAC VLAN header */
210 static const u32 ice_ptypes_macvlan_il[] = {
211         0x00000000, 0xBC000000, 0x000001DF, 0xF0000000,
212         0x0000077E, 0x00000000, 0x00000000, 0x00000000,
213         0x00000000, 0x00000000, 0x00000000, 0x00000000,
214         0x00000000, 0x00000000, 0x00000000, 0x00000000,
215         0x00000000, 0x00000000, 0x00000000, 0x00000000,
216         0x00000000, 0x00000000, 0x00000000, 0x00000000,
217         0x00000000, 0x00000000, 0x00000000, 0x00000000,
218         0x00000000, 0x00000000, 0x00000000, 0x00000000,
219 };
220
221 /* Packet types for packets with an Outer/First/Single IPv4 header */
222 static const u32 ice_ptypes_ipv4_ofos[] = {
223         0x1DC00000, 0x04000800, 0x00000000, 0x00000000,
224         0x00000000, 0x00000155, 0x00000000, 0x00000000,
225         0x00000000, 0x000FC000, 0x83E0F800, 0x00000101,
226         0x00000000, 0x00000000, 0x00000000, 0x00000000,
227         0x00000000, 0x00000000, 0x00000000, 0x00000000,
228         0x00000000, 0x00000000, 0x00000000, 0x00000000,
229         0x00000000, 0x00000000, 0x00000000, 0x00000000,
230         0x00000000, 0x00000000, 0x00000000, 0x00000000,
231 };
232
233 /* Packet types for packets with an Innermost/Last IPv4 header */
234 static const u32 ice_ptypes_ipv4_il[] = {
235         0xE0000000, 0xB807700E, 0x80000003, 0xE01DC03B,
236         0x0000000E, 0x00000000, 0x00000000, 0x00000000,
237         0x00000000, 0x00000000, 0x001FF800, 0x00000000,
238         0x00000000, 0x00000000, 0x00000000, 0x00000000,
239         0x00000000, 0x00000000, 0x00000000, 0x00000000,
240         0x00000000, 0x00000000, 0x00000000, 0x00000000,
241         0x00000000, 0x00000000, 0x00000000, 0x00000000,
242         0x00000000, 0x00000000, 0x00000000, 0x00000000,
243 };
244
245 /* Packet types for packets with an Outer/First/Single IPv6 header */
246 static const u32 ice_ptypes_ipv6_ofos[] = {
247         0x00000000, 0x00000000, 0x77000000, 0x10002000,
248         0x00000000, 0x000002AA, 0x00000000, 0x00000000,
249         0x00080F00, 0x03F00000, 0x7C1F0000, 0x00000206,
250         0x00000000, 0x00000000, 0x00000000, 0x00000000,
251         0x00000000, 0x00000000, 0x00000000, 0x00000000,
252         0x00000000, 0x00000000, 0x00000000, 0x00000000,
253         0x00000000, 0x00000000, 0x00000000, 0x00000000,
254         0x00000000, 0x00000000, 0x00000000, 0x00000000,
255 };
256
257 /* Packet types for packets with an Innermost/Last IPv6 header */
258 static const u32 ice_ptypes_ipv6_il[] = {
259         0x00000000, 0x03B80770, 0x000001DC, 0x0EE00000,
260         0x00000770, 0x00000000, 0x00000000, 0x00000000,
261         0x00000000, 0x00000000, 0x7FE00000, 0x00000000,
262         0x00000000, 0x00000000, 0x00000000, 0x00000000,
263         0x00000000, 0x00000000, 0x00000000, 0x00000000,
264         0x00000000, 0x00000000, 0x00000000, 0x00000000,
265         0x00000000, 0x00000000, 0x00000000, 0x00000000,
266         0x00000000, 0x00000000, 0x00000000, 0x00000000,
267 };
268
269 /* Packet types for packets with an Outermost/First ARP header */
270 static const u32 ice_ptypes_arp_of[] = {
271         0x00000800, 0x00000000, 0x00000000, 0x00000000,
272         0x00000000, 0x00000000, 0x00000000, 0x00000000,
273         0x00000000, 0x00000000, 0x00000000, 0x00000000,
274         0x00000000, 0x00000000, 0x00000000, 0x00000000,
275         0x00000000, 0x00000000, 0x00000000, 0x00000000,
276         0x00000000, 0x00000000, 0x00000000, 0x00000000,
277         0x00000000, 0x00000000, 0x00000000, 0x00000000,
278         0x00000000, 0x00000000, 0x00000000, 0x00000000,
279 };
280
281 /* UDP Packet types for non-tunneled packets or tunneled
282  * packets with inner UDP.
283  */
284 static const u32 ice_ptypes_udp_il[] = {
285         0x81000000, 0x20204040, 0x04000010, 0x80810102,
286         0x00000040, 0x00000000, 0x00000000, 0x00000000,
287         0x00000000, 0x00410000, 0x90842000, 0x00000007,
288         0x00000000, 0x00000000, 0x00000000, 0x00000000,
289         0x00000000, 0x00000000, 0x00000000, 0x00000000,
290         0x00000000, 0x00000000, 0x00000000, 0x00000000,
291         0x00000000, 0x00000000, 0x00000000, 0x00000000,
292         0x00000000, 0x00000000, 0x00000000, 0x00000000,
293 };
294
295 /* Packet types for packets with an Innermost/Last TCP header */
296 static const u32 ice_ptypes_tcp_il[] = {
297         0x04000000, 0x80810102, 0x10000040, 0x02040408,
298         0x00000102, 0x00000000, 0x00000000, 0x00000000,
299         0x00000000, 0x00820000, 0x21084000, 0x00000000,
300         0x00000000, 0x00000000, 0x00000000, 0x00000000,
301         0x00000000, 0x00000000, 0x00000000, 0x00000000,
302         0x00000000, 0x00000000, 0x00000000, 0x00000000,
303         0x00000000, 0x00000000, 0x00000000, 0x00000000,
304         0x00000000, 0x00000000, 0x00000000, 0x00000000,
305 };
306
307 /* Packet types for packets with an Innermost/Last SCTP header */
308 static const u32 ice_ptypes_sctp_il[] = {
309         0x08000000, 0x01020204, 0x20000081, 0x04080810,
310         0x00000204, 0x00000000, 0x00000000, 0x00000000,
311         0x00000000, 0x01040000, 0x00000000, 0x00000000,
312         0x00000000, 0x00000000, 0x00000000, 0x00000000,
313         0x00000000, 0x00000000, 0x00000000, 0x00000000,
314         0x00000000, 0x00000000, 0x00000000, 0x00000000,
315         0x00000000, 0x00000000, 0x00000000, 0x00000000,
316         0x00000000, 0x00000000, 0x00000000, 0x00000000,
317 };
318
319 /* Packet types for packets with an Outermost/First ICMP header */
320 static const u32 ice_ptypes_icmp_of[] = {
321         0x10000000, 0x00000000, 0x00000000, 0x00000000,
322         0x00000000, 0x00000000, 0x00000000, 0x00000000,
323         0x00000000, 0x00000000, 0x00000000, 0x00000000,
324         0x00000000, 0x00000000, 0x00000000, 0x00000000,
325         0x00000000, 0x00000000, 0x00000000, 0x00000000,
326         0x00000000, 0x00000000, 0x00000000, 0x00000000,
327         0x00000000, 0x00000000, 0x00000000, 0x00000000,
328         0x00000000, 0x00000000, 0x00000000, 0x00000000,
329 };
330
331 /* Packet types for packets with an Innermost/Last ICMP header */
332 static const u32 ice_ptypes_icmp_il[] = {
333         0x00000000, 0x02040408, 0x40000102, 0x08101020,
334         0x00000408, 0x00000000, 0x00000000, 0x00000000,
335         0x00000000, 0x00000000, 0x42108000, 0x00000000,
336         0x00000000, 0x00000000, 0x00000000, 0x00000000,
337         0x00000000, 0x00000000, 0x00000000, 0x00000000,
338         0x00000000, 0x00000000, 0x00000000, 0x00000000,
339         0x00000000, 0x00000000, 0x00000000, 0x00000000,
340         0x00000000, 0x00000000, 0x00000000, 0x00000000,
341 };
342
343 /* Packet types for packets with an Outermost/First GRE header */
344 static const u32 ice_ptypes_gre_of[] = {
345         0x00000000, 0xBFBF7800, 0x000001DF, 0xFEFDE000,
346         0x0000017E, 0x00000000, 0x00000000, 0x00000000,
347         0x00000000, 0x00000000, 0x00000000, 0x00000000,
348         0x00000000, 0x00000000, 0x00000000, 0x00000000,
349         0x00000000, 0x00000000, 0x00000000, 0x00000000,
350         0x00000000, 0x00000000, 0x00000000, 0x00000000,
351         0x00000000, 0x00000000, 0x00000000, 0x00000000,
352         0x00000000, 0x00000000, 0x00000000, 0x00000000,
353 };
354
355 /* Packet types for packets with an Innermost/Last MAC header */
356 static const u32 ice_ptypes_mac_il[] = {
357         0x00000000, 0x00000000, 0x00000000, 0x00000000,
358         0x00000000, 0x00000000, 0x00000000, 0x00000000,
359         0x00000000, 0x00000000, 0x00000000, 0x00000000,
360         0x00000000, 0x00000000, 0x00000000, 0x00000000,
361         0x00000000, 0x00000000, 0x00000000, 0x00000000,
362         0x00000000, 0x00000000, 0x00000000, 0x00000000,
363         0x00000000, 0x00000000, 0x00000000, 0x00000000,
364         0x00000000, 0x00000000, 0x00000000, 0x00000000,
365 };
366
367 /* Packet types for GTPC */
368 static const u32 ice_ptypes_gtpc[] = {
369         0x00000000, 0x00000000, 0x00000000, 0x00000000,
370         0x00000000, 0x00000000, 0x00000000, 0x00000000,
371         0x00000000, 0x00000000, 0x00000180, 0x00000000,
372         0x00000000, 0x00000000, 0x00000000, 0x00000000,
373         0x00000000, 0x00000000, 0x00000000, 0x00000000,
374         0x00000000, 0x00000000, 0x00000000, 0x00000000,
375         0x00000000, 0x00000000, 0x00000000, 0x00000000,
376         0x00000000, 0x00000000, 0x00000000, 0x00000000,
377 };
378
379 /* Packet types for GTPC with TEID */
380 static const u32 ice_ptypes_gtpc_tid[] = {
381         0x00000000, 0x00000000, 0x00000000, 0x00000000,
382         0x00000000, 0x00000000, 0x00000000, 0x00000000,
383         0x00000000, 0x00000000, 0x00000060, 0x00000000,
384         0x00000000, 0x00000000, 0x00000000, 0x00000000,
385         0x00000000, 0x00000000, 0x00000000, 0x00000000,
386         0x00000000, 0x00000000, 0x00000000, 0x00000000,
387         0x00000000, 0x00000000, 0x00000000, 0x00000000,
388         0x00000000, 0x00000000, 0x00000000, 0x00000000,
389 };
390
391 /* Packet types for GTPU */
392 static const struct ice_ptype_attributes ice_attr_gtpu_eh[] = {
393         { ICE_MAC_IPV4_GTPU_IPV4_FRAG,    ICE_PTYPE_ATTR_GTP_PDU_EH },
394         { ICE_MAC_IPV4_GTPU_IPV4_PAY,     ICE_PTYPE_ATTR_GTP_PDU_EH },
395         { ICE_MAC_IPV4_GTPU_IPV4_UDP_PAY, ICE_PTYPE_ATTR_GTP_PDU_EH },
396         { ICE_MAC_IPV4_GTPU_IPV4_TCP,     ICE_PTYPE_ATTR_GTP_PDU_EH },
397         { ICE_MAC_IPV4_GTPU_IPV4_ICMP,    ICE_PTYPE_ATTR_GTP_PDU_EH },
398         { ICE_MAC_IPV6_GTPU_IPV4_FRAG,    ICE_PTYPE_ATTR_GTP_PDU_EH },
399         { ICE_MAC_IPV6_GTPU_IPV4_PAY,     ICE_PTYPE_ATTR_GTP_PDU_EH },
400         { ICE_MAC_IPV6_GTPU_IPV4_UDP_PAY, ICE_PTYPE_ATTR_GTP_PDU_EH },
401         { ICE_MAC_IPV6_GTPU_IPV4_TCP,     ICE_PTYPE_ATTR_GTP_PDU_EH },
402         { ICE_MAC_IPV6_GTPU_IPV4_ICMP,    ICE_PTYPE_ATTR_GTP_PDU_EH },
403         { ICE_MAC_IPV4_GTPU_IPV6_FRAG,    ICE_PTYPE_ATTR_GTP_PDU_EH },
404         { ICE_MAC_IPV4_GTPU_IPV6_PAY,     ICE_PTYPE_ATTR_GTP_PDU_EH },
405         { ICE_MAC_IPV4_GTPU_IPV6_UDP_PAY, ICE_PTYPE_ATTR_GTP_PDU_EH },
406         { ICE_MAC_IPV4_GTPU_IPV6_TCP,     ICE_PTYPE_ATTR_GTP_PDU_EH },
407         { ICE_MAC_IPV4_GTPU_IPV6_ICMPV6,  ICE_PTYPE_ATTR_GTP_PDU_EH },
408         { ICE_MAC_IPV6_GTPU_IPV6_FRAG,    ICE_PTYPE_ATTR_GTP_PDU_EH },
409         { ICE_MAC_IPV6_GTPU_IPV6_PAY,     ICE_PTYPE_ATTR_GTP_PDU_EH },
410         { ICE_MAC_IPV6_GTPU_IPV6_UDP_PAY, ICE_PTYPE_ATTR_GTP_PDU_EH },
411         { ICE_MAC_IPV6_GTPU_IPV6_TCP,     ICE_PTYPE_ATTR_GTP_PDU_EH },
412         { ICE_MAC_IPV6_GTPU_IPV6_ICMPV6,  ICE_PTYPE_ATTR_GTP_PDU_EH },
413 };
414
415 static const struct ice_ptype_attributes ice_attr_gtpu_down[] = {
416         { ICE_MAC_IPV4_GTPU_IPV4_FRAG,    ICE_PTYPE_ATTR_GTP_DOWNLINK },
417         { ICE_MAC_IPV4_GTPU_IPV4_PAY,     ICE_PTYPE_ATTR_GTP_DOWNLINK },
418         { ICE_MAC_IPV4_GTPU_IPV4_UDP_PAY, ICE_PTYPE_ATTR_GTP_DOWNLINK },
419         { ICE_MAC_IPV4_GTPU_IPV4_TCP,     ICE_PTYPE_ATTR_GTP_DOWNLINK },
420         { ICE_MAC_IPV4_GTPU_IPV4_ICMP,    ICE_PTYPE_ATTR_GTP_DOWNLINK },
421         { ICE_MAC_IPV6_GTPU_IPV4_FRAG,    ICE_PTYPE_ATTR_GTP_DOWNLINK },
422         { ICE_MAC_IPV6_GTPU_IPV4_PAY,     ICE_PTYPE_ATTR_GTP_DOWNLINK },
423         { ICE_MAC_IPV6_GTPU_IPV4_UDP_PAY, ICE_PTYPE_ATTR_GTP_DOWNLINK },
424         { ICE_MAC_IPV6_GTPU_IPV4_TCP,     ICE_PTYPE_ATTR_GTP_DOWNLINK },
425         { ICE_MAC_IPV6_GTPU_IPV4_ICMP,    ICE_PTYPE_ATTR_GTP_DOWNLINK },
426         { ICE_MAC_IPV4_GTPU_IPV6_FRAG,    ICE_PTYPE_ATTR_GTP_DOWNLINK },
427         { ICE_MAC_IPV4_GTPU_IPV6_PAY,     ICE_PTYPE_ATTR_GTP_DOWNLINK },
428         { ICE_MAC_IPV4_GTPU_IPV6_UDP_PAY, ICE_PTYPE_ATTR_GTP_DOWNLINK },
429         { ICE_MAC_IPV4_GTPU_IPV6_TCP,     ICE_PTYPE_ATTR_GTP_DOWNLINK },
430         { ICE_MAC_IPV4_GTPU_IPV6_ICMPV6,  ICE_PTYPE_ATTR_GTP_DOWNLINK },
431         { ICE_MAC_IPV6_GTPU_IPV6_FRAG,    ICE_PTYPE_ATTR_GTP_DOWNLINK },
432         { ICE_MAC_IPV6_GTPU_IPV6_PAY,     ICE_PTYPE_ATTR_GTP_DOWNLINK },
433         { ICE_MAC_IPV6_GTPU_IPV6_UDP_PAY, ICE_PTYPE_ATTR_GTP_DOWNLINK },
434         { ICE_MAC_IPV6_GTPU_IPV6_TCP,     ICE_PTYPE_ATTR_GTP_DOWNLINK },
435         { ICE_MAC_IPV6_GTPU_IPV6_ICMPV6,  ICE_PTYPE_ATTR_GTP_DOWNLINK },
436 };
437
438 static const struct ice_ptype_attributes ice_attr_gtpu_up[] = {
439         { ICE_MAC_IPV4_GTPU_IPV4_FRAG,    ICE_PTYPE_ATTR_GTP_UPLINK },
440         { ICE_MAC_IPV4_GTPU_IPV4_PAY,     ICE_PTYPE_ATTR_GTP_UPLINK },
441         { ICE_MAC_IPV4_GTPU_IPV4_UDP_PAY, ICE_PTYPE_ATTR_GTP_UPLINK },
442         { ICE_MAC_IPV4_GTPU_IPV4_TCP,     ICE_PTYPE_ATTR_GTP_UPLINK },
443         { ICE_MAC_IPV4_GTPU_IPV4_ICMP,    ICE_PTYPE_ATTR_GTP_UPLINK },
444         { ICE_MAC_IPV6_GTPU_IPV4_FRAG,    ICE_PTYPE_ATTR_GTP_UPLINK },
445         { ICE_MAC_IPV6_GTPU_IPV4_PAY,     ICE_PTYPE_ATTR_GTP_UPLINK },
446         { ICE_MAC_IPV6_GTPU_IPV4_UDP_PAY, ICE_PTYPE_ATTR_GTP_UPLINK },
447         { ICE_MAC_IPV6_GTPU_IPV4_TCP,     ICE_PTYPE_ATTR_GTP_UPLINK },
448         { ICE_MAC_IPV6_GTPU_IPV4_ICMP,    ICE_PTYPE_ATTR_GTP_UPLINK },
449         { ICE_MAC_IPV4_GTPU_IPV6_FRAG,    ICE_PTYPE_ATTR_GTP_UPLINK },
450         { ICE_MAC_IPV4_GTPU_IPV6_PAY,     ICE_PTYPE_ATTR_GTP_UPLINK },
451         { ICE_MAC_IPV4_GTPU_IPV6_UDP_PAY, ICE_PTYPE_ATTR_GTP_UPLINK },
452         { ICE_MAC_IPV4_GTPU_IPV6_TCP,     ICE_PTYPE_ATTR_GTP_UPLINK },
453         { ICE_MAC_IPV4_GTPU_IPV6_ICMPV6,  ICE_PTYPE_ATTR_GTP_UPLINK },
454         { ICE_MAC_IPV6_GTPU_IPV6_FRAG,    ICE_PTYPE_ATTR_GTP_UPLINK },
455         { ICE_MAC_IPV6_GTPU_IPV6_PAY,     ICE_PTYPE_ATTR_GTP_UPLINK },
456         { ICE_MAC_IPV6_GTPU_IPV6_UDP_PAY, ICE_PTYPE_ATTR_GTP_UPLINK },
457         { ICE_MAC_IPV6_GTPU_IPV6_TCP,     ICE_PTYPE_ATTR_GTP_UPLINK },
458         { ICE_MAC_IPV6_GTPU_IPV6_ICMPV6,  ICE_PTYPE_ATTR_GTP_UPLINK },
459 };
460
461 static const u32 ice_ptypes_gtpu[] = {
462         0x00000000, 0x00000000, 0x00000000, 0x00000000,
463         0x00000000, 0x00000000, 0x00000000, 0x00000000,
464         0x00000000, 0x00000000, 0x7FFFFE00, 0x00000000,
465         0x00000000, 0x00000000, 0x00000000, 0x00000000,
466         0x00000000, 0x00000000, 0x00000000, 0x00000000,
467         0x00000000, 0x00000000, 0x00000000, 0x00000000,
468         0x00000000, 0x00000000, 0x00000000, 0x00000000,
469         0x00000000, 0x00000000, 0x00000000, 0x00000000,
470 };
471
472 /* Packet types for pppoe */
473 static const u32 ice_ptypes_pppoe[] = {
474         0x00000000, 0x00000000, 0x00000000, 0x00000000,
475         0x00000000, 0x00000000, 0x00000000, 0x00000000,
476         0x00000000, 0x03FFF000, 0x00000000, 0x00000000,
477         0x00000000, 0x00000000, 0x00000000, 0x00000000,
478         0x00000000, 0x00000000, 0x00000000, 0x00000000,
479         0x00000000, 0x00000000, 0x00000000, 0x00000000,
480         0x00000000, 0x00000000, 0x00000000, 0x00000000,
481         0x00000000, 0x00000000, 0x00000000, 0x00000000,
482 };
483
484 /* Packet types for packets with PFCP NODE header */
485 static const u32 ice_ptypes_pfcp_node[] = {
486         0x00000000, 0x00000000, 0x00000000, 0x00000000,
487         0x00000000, 0x00000000, 0x00000000, 0x00000000,
488         0x00000000, 0x00000000, 0x80000000, 0x00000002,
489         0x00000000, 0x00000000, 0x00000000, 0x00000000,
490         0x00000000, 0x00000000, 0x00000000, 0x00000000,
491         0x00000000, 0x00000000, 0x00000000, 0x00000000,
492         0x00000000, 0x00000000, 0x00000000, 0x00000000,
493         0x00000000, 0x00000000, 0x00000000, 0x00000000,
494 };
495
496 /* Packet types for packets with PFCP SESSION header */
497 static const u32 ice_ptypes_pfcp_session[] = {
498         0x00000000, 0x00000000, 0x00000000, 0x00000000,
499         0x00000000, 0x00000000, 0x00000000, 0x00000000,
500         0x00000000, 0x00000000, 0x00000000, 0x00000005,
501         0x00000000, 0x00000000, 0x00000000, 0x00000000,
502         0x00000000, 0x00000000, 0x00000000, 0x00000000,
503         0x00000000, 0x00000000, 0x00000000, 0x00000000,
504         0x00000000, 0x00000000, 0x00000000, 0x00000000,
505         0x00000000, 0x00000000, 0x00000000, 0x00000000,
506 };
507
508 /* Packet types for l2tpv3 */
509 static const u32 ice_ptypes_l2tpv3[] = {
510         0x00000000, 0x00000000, 0x00000000, 0x00000000,
511         0x00000000, 0x00000000, 0x00000000, 0x00000000,
512         0x00000000, 0x00000000, 0x00000000, 0x00000300,
513         0x00000000, 0x00000000, 0x00000000, 0x00000000,
514         0x00000000, 0x00000000, 0x00000000, 0x00000000,
515         0x00000000, 0x00000000, 0x00000000, 0x00000000,
516         0x00000000, 0x00000000, 0x00000000, 0x00000000,
517         0x00000000, 0x00000000, 0x00000000, 0x00000000,
518 };
519
520 /* Packet types for esp */
521 static const u32 ice_ptypes_esp[] = {
522         0x00000000, 0x00000000, 0x00000000, 0x00000000,
523         0x00000000, 0x00000003, 0x00000000, 0x00000000,
524         0x00000000, 0x00000000, 0x00000000, 0x00000000,
525         0x00000000, 0x00000000, 0x00000000, 0x00000000,
526         0x00000000, 0x00000000, 0x00000000, 0x00000000,
527         0x00000000, 0x00000000, 0x00000000, 0x00000000,
528         0x00000000, 0x00000000, 0x00000000, 0x00000000,
529         0x00000000, 0x00000000, 0x00000000, 0x00000000,
530 };
531
532 /* Packet types for ah */
533 static const u32 ice_ptypes_ah[] = {
534         0x00000000, 0x00000000, 0x00000000, 0x00000000,
535         0x00000000, 0x0000000C, 0x00000000, 0x00000000,
536         0x00000000, 0x00000000, 0x00000000, 0x00000000,
537         0x00000000, 0x00000000, 0x00000000, 0x00000000,
538         0x00000000, 0x00000000, 0x00000000, 0x00000000,
539         0x00000000, 0x00000000, 0x00000000, 0x00000000,
540         0x00000000, 0x00000000, 0x00000000, 0x00000000,
541         0x00000000, 0x00000000, 0x00000000, 0x00000000,
542 };
543
544 /* Packet types for packets with NAT_T ESP header */
545 static const u32 ice_ptypes_nat_t_esp[] = {
546         0x00000000, 0x00000000, 0x00000000, 0x00000000,
547         0x00000000, 0x00000030, 0x00000000, 0x00000000,
548         0x00000000, 0x00000000, 0x00000000, 0x00000000,
549         0x00000000, 0x00000000, 0x00000000, 0x00000000,
550         0x00000000, 0x00000000, 0x00000000, 0x00000000,
551         0x00000000, 0x00000000, 0x00000000, 0x00000000,
552         0x00000000, 0x00000000, 0x00000000, 0x00000000,
553         0x00000000, 0x00000000, 0x00000000, 0x00000000,
554 };
555
556 static const u32 ice_ptypes_mac_non_ip_ofos[] = {
557         0x00000846, 0x00000000, 0x00000000, 0x00000000,
558         0x00000000, 0x00000000, 0x00000000, 0x00000000,
559         0x00400000, 0x03FFF000, 0x00000000, 0x00000000,
560         0x00000000, 0x00000000, 0x00000000, 0x00000000,
561         0x00000000, 0x00000000, 0x00000000, 0x00000000,
562         0x00000000, 0x00000000, 0x00000000, 0x00000000,
563         0x00000000, 0x00000000, 0x00000000, 0x00000000,
564         0x00000000, 0x00000000, 0x00000000, 0x00000000,
565 };
566
567 /* Manage parameters and info. used during the creation of a flow profile */
568 struct ice_flow_prof_params {
569         enum ice_block blk;
570         u16 entry_length; /* # of bytes formatted entry will require */
571         u8 es_cnt;
572         struct ice_flow_prof *prof;
573
574         /* For ACL, the es[0] will have the data of ICE_RX_MDID_PKT_FLAGS_15_0
575          * This will give us the direction flags.
576          */
577         struct ice_fv_word es[ICE_MAX_FV_WORDS];
578         /* attributes can be used to add attributes to a particular PTYPE */
579         const struct ice_ptype_attributes *attr;
580         u16 attr_cnt;
581
582         u16 mask[ICE_MAX_FV_WORDS];
583         ice_declare_bitmap(ptypes, ICE_FLOW_PTYPE_MAX);
584 };
585
586 #define ICE_FLOW_RSS_HDRS_INNER_MASK \
587         (ICE_FLOW_SEG_HDR_PPPOE | ICE_FLOW_SEG_HDR_GTPC | \
588         ICE_FLOW_SEG_HDR_GTPC_TEID | ICE_FLOW_SEG_HDR_GTPU | \
589         ICE_FLOW_SEG_HDR_PFCP_SESSION | ICE_FLOW_SEG_HDR_L2TPV3 | \
590         ICE_FLOW_SEG_HDR_ESP | ICE_FLOW_SEG_HDR_AH | \
591         ICE_FLOW_SEG_HDR_NAT_T_ESP)
592
593 #define ICE_FLOW_SEG_HDRS_L2_MASK       \
594         (ICE_FLOW_SEG_HDR_ETH | ICE_FLOW_SEG_HDR_VLAN)
595 #define ICE_FLOW_SEG_HDRS_L3_MASK       \
596         (ICE_FLOW_SEG_HDR_IPV4 | ICE_FLOW_SEG_HDR_IPV6 | \
597          ICE_FLOW_SEG_HDR_ARP)
598 #define ICE_FLOW_SEG_HDRS_L4_MASK       \
599         (ICE_FLOW_SEG_HDR_ICMP | ICE_FLOW_SEG_HDR_TCP | ICE_FLOW_SEG_HDR_UDP | \
600          ICE_FLOW_SEG_HDR_SCTP)
601
602 /**
603  * ice_flow_val_hdrs - validates packet segments for valid protocol headers
604  * @segs: array of one or more packet segments that describe the flow
605  * @segs_cnt: number of packet segments provided
606  */
607 static enum ice_status
608 ice_flow_val_hdrs(struct ice_flow_seg_info *segs, u8 segs_cnt)
609 {
610         u8 i;
611
612         for (i = 0; i < segs_cnt; i++) {
613                 /* Multiple L3 headers */
614                 if (segs[i].hdrs & ICE_FLOW_SEG_HDRS_L3_MASK &&
615                     !ice_is_pow2(segs[i].hdrs & ICE_FLOW_SEG_HDRS_L3_MASK))
616                         return ICE_ERR_PARAM;
617
618                 /* Multiple L4 headers */
619                 if (segs[i].hdrs & ICE_FLOW_SEG_HDRS_L4_MASK &&
620                     !ice_is_pow2(segs[i].hdrs & ICE_FLOW_SEG_HDRS_L4_MASK))
621                         return ICE_ERR_PARAM;
622         }
623
624         return ICE_SUCCESS;
625 }
626
627 /* Sizes of fixed known protocol headers without header options */
628 #define ICE_FLOW_PROT_HDR_SZ_MAC        14
629 #define ICE_FLOW_PROT_HDR_SZ_MAC_VLAN   (ICE_FLOW_PROT_HDR_SZ_MAC + 2)
630 #define ICE_FLOW_PROT_HDR_SZ_IPV4       20
631 #define ICE_FLOW_PROT_HDR_SZ_IPV6       40
632 #define ICE_FLOW_PROT_HDR_SZ_ARP        28
633 #define ICE_FLOW_PROT_HDR_SZ_ICMP       8
634 #define ICE_FLOW_PROT_HDR_SZ_TCP        20
635 #define ICE_FLOW_PROT_HDR_SZ_UDP        8
636 #define ICE_FLOW_PROT_HDR_SZ_SCTP       12
637
638 /**
639  * ice_flow_calc_seg_sz - calculates size of a packet segment based on headers
640  * @params: information about the flow to be processed
641  * @seg: index of packet segment whose header size is to be determined
642  */
643 static u16 ice_flow_calc_seg_sz(struct ice_flow_prof_params *params, u8 seg)
644 {
645         u16 sz;
646
647         /* L2 headers */
648         sz = (params->prof->segs[seg].hdrs & ICE_FLOW_SEG_HDR_VLAN) ?
649                 ICE_FLOW_PROT_HDR_SZ_MAC_VLAN : ICE_FLOW_PROT_HDR_SZ_MAC;
650
651         /* L3 headers */
652         if (params->prof->segs[seg].hdrs & ICE_FLOW_SEG_HDR_IPV4)
653                 sz += ICE_FLOW_PROT_HDR_SZ_IPV4;
654         else if (params->prof->segs[seg].hdrs & ICE_FLOW_SEG_HDR_IPV6)
655                 sz += ICE_FLOW_PROT_HDR_SZ_IPV6;
656         else if (params->prof->segs[seg].hdrs & ICE_FLOW_SEG_HDR_ARP)
657                 sz += ICE_FLOW_PROT_HDR_SZ_ARP;
658         else if (params->prof->segs[seg].hdrs & ICE_FLOW_SEG_HDRS_L4_MASK)
659                 /* A L3 header is required if L4 is specified */
660                 return 0;
661
662         /* L4 headers */
663         if (params->prof->segs[seg].hdrs & ICE_FLOW_SEG_HDR_ICMP)
664                 sz += ICE_FLOW_PROT_HDR_SZ_ICMP;
665         else if (params->prof->segs[seg].hdrs & ICE_FLOW_SEG_HDR_TCP)
666                 sz += ICE_FLOW_PROT_HDR_SZ_TCP;
667         else if (params->prof->segs[seg].hdrs & ICE_FLOW_SEG_HDR_UDP)
668                 sz += ICE_FLOW_PROT_HDR_SZ_UDP;
669         else if (params->prof->segs[seg].hdrs & ICE_FLOW_SEG_HDR_SCTP)
670                 sz += ICE_FLOW_PROT_HDR_SZ_SCTP;
671
672         return sz;
673 }
674
675 /**
676  * ice_flow_proc_seg_hdrs - process protocol headers present in pkt segments
677  * @params: information about the flow to be processed
678  *
679  * This function identifies the packet types associated with the protocol
680  * headers being present in packet segments of the specified flow profile.
681  */
682 static enum ice_status
683 ice_flow_proc_seg_hdrs(struct ice_flow_prof_params *params)
684 {
685         struct ice_flow_prof *prof;
686         u8 i;
687
688         ice_memset(params->ptypes, 0xff, sizeof(params->ptypes),
689                    ICE_NONDMA_MEM);
690
691         prof = params->prof;
692
693         for (i = 0; i < params->prof->segs_cnt; i++) {
694                 const ice_bitmap_t *src;
695                 u32 hdrs;
696
697                 hdrs = prof->segs[i].hdrs;
698
699                 if (hdrs & ICE_FLOW_SEG_HDR_ETH) {
700                         src = !i ? (const ice_bitmap_t *)ice_ptypes_mac_ofos :
701                                 (const ice_bitmap_t *)ice_ptypes_mac_il;
702                         ice_and_bitmap(params->ptypes, params->ptypes, src,
703                                        ICE_FLOW_PTYPE_MAX);
704                 }
705
706                 if (i && hdrs & ICE_FLOW_SEG_HDR_VLAN) {
707                         src = (const ice_bitmap_t *)ice_ptypes_macvlan_il;
708                         ice_and_bitmap(params->ptypes, params->ptypes, src,
709                                        ICE_FLOW_PTYPE_MAX);
710                 }
711
712                 if (!i && hdrs & ICE_FLOW_SEG_HDR_ARP) {
713                         ice_and_bitmap(params->ptypes, params->ptypes,
714                                        (const ice_bitmap_t *)ice_ptypes_arp_of,
715                                        ICE_FLOW_PTYPE_MAX);
716                 }
717
718                 if (hdrs & ICE_FLOW_SEG_HDR_IPV4) {
719                         src = !i ? (const ice_bitmap_t *)ice_ptypes_ipv4_ofos :
720                                 (const ice_bitmap_t *)ice_ptypes_ipv4_il;
721                         ice_and_bitmap(params->ptypes, params->ptypes, src,
722                                        ICE_FLOW_PTYPE_MAX);
723                         if (hdrs & ICE_FLOW_SEG_HDR_UDP) {
724                                 src = (const ice_bitmap_t *)ice_ptypes_udp_il;
725                                 ice_and_bitmap(params->ptypes,
726                                                 params->ptypes, src,
727                                                ICE_FLOW_PTYPE_MAX);
728                         } else if (hdrs & ICE_FLOW_SEG_HDR_TCP) {
729                                 ice_and_bitmap(params->ptypes, params->ptypes,
730                                                (const ice_bitmap_t *)
731                                                ice_ptypes_tcp_il,
732                                                ICE_FLOW_PTYPE_MAX);
733                         } else if (hdrs & ICE_FLOW_SEG_HDR_SCTP) {
734                                 src = (const ice_bitmap_t *)ice_ptypes_sctp_il;
735                                 ice_and_bitmap(params->ptypes, params->ptypes,
736                                                src, ICE_FLOW_PTYPE_MAX);
737                         }
738                 } else if (hdrs & ICE_FLOW_SEG_HDR_IPV6) {
739                         src = !i ? (const ice_bitmap_t *)ice_ptypes_ipv6_ofos :
740                                 (const ice_bitmap_t *)ice_ptypes_ipv6_il;
741                         ice_and_bitmap(params->ptypes, params->ptypes, src,
742                                        ICE_FLOW_PTYPE_MAX);
743                         if (hdrs & ICE_FLOW_SEG_HDR_UDP) {
744                                 src = (const ice_bitmap_t *)ice_ptypes_udp_il;
745                                 ice_and_bitmap(params->ptypes,
746                                                 params->ptypes, src,
747                                                ICE_FLOW_PTYPE_MAX);
748                         } else if (hdrs & ICE_FLOW_SEG_HDR_TCP) {
749                                 ice_and_bitmap(params->ptypes, params->ptypes,
750                                                (const ice_bitmap_t *)
751                                                ice_ptypes_tcp_il,
752                                                ICE_FLOW_PTYPE_MAX);
753                         } else if (hdrs & ICE_FLOW_SEG_HDR_SCTP) {
754                                 src = (const ice_bitmap_t *)ice_ptypes_sctp_il;
755                                 ice_and_bitmap(params->ptypes, params->ptypes,
756                                                src, ICE_FLOW_PTYPE_MAX);
757                         }
758                 }
759
760                 if (hdrs & ICE_FLOW_SEG_HDR_ETH_NON_IP) {
761                         src = (const ice_bitmap_t *)ice_ptypes_mac_non_ip_ofos;
762                         ice_and_bitmap(params->ptypes, params->ptypes,
763                                        src, ICE_FLOW_PTYPE_MAX);
764                 } else if (hdrs & ICE_FLOW_SEG_HDR_PPPOE) {
765                         src = (const ice_bitmap_t *)ice_ptypes_pppoe;
766                         ice_and_bitmap(params->ptypes, params->ptypes, src,
767                                        ICE_FLOW_PTYPE_MAX);
768                 } else {
769                         src = (const ice_bitmap_t *)ice_ptypes_pppoe;
770                         ice_andnot_bitmap(params->ptypes, params->ptypes, src,
771                                           ICE_FLOW_PTYPE_MAX);
772                 }
773
774                 if (hdrs & ICE_FLOW_SEG_HDR_ICMP) {
775                         src = !i ? (const ice_bitmap_t *)ice_ptypes_icmp_of :
776                                 (const ice_bitmap_t *)ice_ptypes_icmp_il;
777                         ice_and_bitmap(params->ptypes, params->ptypes, src,
778                                        ICE_FLOW_PTYPE_MAX);
779                 } else if (hdrs & ICE_FLOW_SEG_HDR_GRE) {
780                         if (!i) {
781                                 src = (const ice_bitmap_t *)ice_ptypes_gre_of;
782                                 ice_and_bitmap(params->ptypes, params->ptypes,
783                                                src, ICE_FLOW_PTYPE_MAX);
784                         }
785                 } else if (hdrs & ICE_FLOW_SEG_HDR_GTPC) {
786                         src = (const ice_bitmap_t *)ice_ptypes_gtpc;
787                         ice_and_bitmap(params->ptypes, params->ptypes,
788                                        src, ICE_FLOW_PTYPE_MAX);
789                 } else if (hdrs & ICE_FLOW_SEG_HDR_GTPC_TEID) {
790                         src = (const ice_bitmap_t *)ice_ptypes_gtpc_tid;
791                         ice_and_bitmap(params->ptypes, params->ptypes,
792                                        src, ICE_FLOW_PTYPE_MAX);
793                 } else if (hdrs & ICE_FLOW_SEG_HDR_GTPU_DWN) {
794                         src = (const ice_bitmap_t *)ice_ptypes_gtpu;
795                         ice_and_bitmap(params->ptypes, params->ptypes,
796                                        src, ICE_FLOW_PTYPE_MAX);
797
798                         /* Attributes for GTP packet with downlink */
799                         params->attr = ice_attr_gtpu_down;
800                         params->attr_cnt = ARRAY_SIZE(ice_attr_gtpu_down);
801                 } else if (hdrs & ICE_FLOW_SEG_HDR_GTPU_UP) {
802                         src = (const ice_bitmap_t *)ice_ptypes_gtpu;
803                         ice_and_bitmap(params->ptypes, params->ptypes,
804                                        src, ICE_FLOW_PTYPE_MAX);
805
806                         /* Attributes for GTP packet with uplink */
807                         params->attr = ice_attr_gtpu_up;
808                         params->attr_cnt = ARRAY_SIZE(ice_attr_gtpu_up);
809                 } else if (hdrs & ICE_FLOW_SEG_HDR_GTPU_EH) {
810                         src = (const ice_bitmap_t *)ice_ptypes_gtpu;
811                         ice_and_bitmap(params->ptypes, params->ptypes,
812                                        src, ICE_FLOW_PTYPE_MAX);
813
814                         /* Attributes for GTP packet with Extension Header */
815                         params->attr = ice_attr_gtpu_eh;
816                         params->attr_cnt = ARRAY_SIZE(ice_attr_gtpu_eh);
817                 } else if (hdrs & ICE_FLOW_SEG_HDR_GTPU_IP) {
818                         src = (const ice_bitmap_t *)ice_ptypes_gtpu;
819                         ice_and_bitmap(params->ptypes, params->ptypes,
820                                        src, ICE_FLOW_PTYPE_MAX);
821                 } else if (hdrs & ICE_FLOW_SEG_HDR_L2TPV3) {
822                         src = (const ice_bitmap_t *)ice_ptypes_l2tpv3;
823                         ice_and_bitmap(params->ptypes, params->ptypes,
824                                        src, ICE_FLOW_PTYPE_MAX);
825                 } else if (hdrs & ICE_FLOW_SEG_HDR_ESP) {
826                         src = (const ice_bitmap_t *)ice_ptypes_esp;
827                         ice_and_bitmap(params->ptypes, params->ptypes,
828                                        src, ICE_FLOW_PTYPE_MAX);
829                 } else if (hdrs & ICE_FLOW_SEG_HDR_AH) {
830                         src = (const ice_bitmap_t *)ice_ptypes_ah;
831                         ice_and_bitmap(params->ptypes, params->ptypes,
832                                        src, ICE_FLOW_PTYPE_MAX);
833                 } else if (hdrs & ICE_FLOW_SEG_HDR_NAT_T_ESP) {
834                         src = (const ice_bitmap_t *)ice_ptypes_nat_t_esp;
835                         ice_and_bitmap(params->ptypes, params->ptypes,
836                                        src, ICE_FLOW_PTYPE_MAX);
837                 }
838
839                 if (hdrs & ICE_FLOW_SEG_HDR_PFCP) {
840                         if (hdrs & ICE_FLOW_SEG_HDR_PFCP_NODE)
841                                 src =
842                                 (const ice_bitmap_t *)ice_ptypes_pfcp_node;
843                         else
844                                 src =
845                                 (const ice_bitmap_t *)ice_ptypes_pfcp_session;
846
847                         ice_and_bitmap(params->ptypes, params->ptypes,
848                                        src, ICE_FLOW_PTYPE_MAX);
849                 } else {
850                         src = (const ice_bitmap_t *)ice_ptypes_pfcp_node;
851                         ice_andnot_bitmap(params->ptypes, params->ptypes,
852                                           src, ICE_FLOW_PTYPE_MAX);
853
854                         src = (const ice_bitmap_t *)ice_ptypes_pfcp_session;
855                         ice_andnot_bitmap(params->ptypes, params->ptypes,
856                                           src, ICE_FLOW_PTYPE_MAX);
857                 }
858         }
859
860         return ICE_SUCCESS;
861 }
862
863 /**
864  * ice_flow_xtract_pkt_flags - Create an extr sequence entry for packet flags
865  * @hw: pointer to the HW struct
866  * @params: information about the flow to be processed
867  * @flags: The value of pkt_flags[x:x] in Rx/Tx MDID metadata.
868  *
869  * This function will allocate an extraction sequence entries for a DWORD size
870  * chunk of the packet flags.
871  */
872 static enum ice_status
873 ice_flow_xtract_pkt_flags(struct ice_hw *hw,
874                           struct ice_flow_prof_params *params,
875                           enum ice_flex_mdid_pkt_flags flags)
876 {
877         u8 fv_words = hw->blk[params->blk].es.fvw;
878         u8 idx;
879
880         /* Make sure the number of extraction sequence entries required does not
881          * exceed the block's capacity.
882          */
883         if (params->es_cnt >= fv_words)
884                 return ICE_ERR_MAX_LIMIT;
885
886         /* some blocks require a reversed field vector layout */
887         if (hw->blk[params->blk].es.reverse)
888                 idx = fv_words - params->es_cnt - 1;
889         else
890                 idx = params->es_cnt;
891
892         params->es[idx].prot_id = ICE_PROT_META_ID;
893         params->es[idx].off = flags;
894         params->es_cnt++;
895
896         return ICE_SUCCESS;
897 }
898
899 /**
900  * ice_flow_xtract_fld - Create an extraction sequence entry for the given field
901  * @hw: pointer to the HW struct
902  * @params: information about the flow to be processed
903  * @seg: packet segment index of the field to be extracted
904  * @fld: ID of field to be extracted
905  * @match: bitfield of all fields
906  *
907  * This function determines the protocol ID, offset, and size of the given
908  * field. It then allocates one or more extraction sequence entries for the
909  * given field, and fill the entries with protocol ID and offset information.
910  */
911 static enum ice_status
912 ice_flow_xtract_fld(struct ice_hw *hw, struct ice_flow_prof_params *params,
913                     u8 seg, enum ice_flow_field fld, u64 match)
914 {
915         enum ice_flow_field sib = ICE_FLOW_FIELD_IDX_MAX;
916         enum ice_prot_id prot_id = ICE_PROT_ID_INVAL;
917         u8 fv_words = hw->blk[params->blk].es.fvw;
918         struct ice_flow_fld_info *flds;
919         u16 cnt, ese_bits, i;
920         u16 sib_mask = 0;
921         u16 mask;
922         u16 off;
923
924         flds = params->prof->segs[seg].fields;
925
926         switch (fld) {
927         case ICE_FLOW_FIELD_IDX_ETH_DA:
928         case ICE_FLOW_FIELD_IDX_ETH_SA:
929         case ICE_FLOW_FIELD_IDX_S_VLAN:
930         case ICE_FLOW_FIELD_IDX_C_VLAN:
931                 prot_id = seg == 0 ? ICE_PROT_MAC_OF_OR_S : ICE_PROT_MAC_IL;
932                 break;
933         case ICE_FLOW_FIELD_IDX_ETH_TYPE:
934                 prot_id = seg == 0 ? ICE_PROT_ETYPE_OL : ICE_PROT_ETYPE_IL;
935                 break;
936         case ICE_FLOW_FIELD_IDX_IPV4_DSCP:
937                 prot_id = seg == 0 ? ICE_PROT_IPV4_OF_OR_S : ICE_PROT_IPV4_IL;
938                 break;
939         case ICE_FLOW_FIELD_IDX_IPV6_DSCP:
940                 prot_id = seg == 0 ? ICE_PROT_IPV6_OF_OR_S : ICE_PROT_IPV6_IL;
941                 break;
942         case ICE_FLOW_FIELD_IDX_IPV4_TTL:
943         case ICE_FLOW_FIELD_IDX_IPV4_PROT:
944                 prot_id = seg == 0 ? ICE_PROT_IPV4_OF_OR_S : ICE_PROT_IPV4_IL;
945
946                 /* TTL and PROT share the same extraction seq. entry.
947                  * Each is considered a sibling to the other in terms of sharing
948                  * the same extraction sequence entry.
949                  */
950                 if (fld == ICE_FLOW_FIELD_IDX_IPV4_TTL)
951                         sib = ICE_FLOW_FIELD_IDX_IPV4_PROT;
952                 else if (fld == ICE_FLOW_FIELD_IDX_IPV4_PROT)
953                         sib = ICE_FLOW_FIELD_IDX_IPV4_TTL;
954
955                 /* If the sibling field is also included, that field's
956                  * mask needs to be included.
957                  */
958                 if (match & BIT(sib))
959                         sib_mask = ice_flds_info[sib].mask;
960                 break;
961         case ICE_FLOW_FIELD_IDX_IPV6_TTL:
962         case ICE_FLOW_FIELD_IDX_IPV6_PROT:
963                 prot_id = seg == 0 ? ICE_PROT_IPV6_OF_OR_S : ICE_PROT_IPV6_IL;
964
965                 /* TTL and PROT share the same extraction seq. entry.
966                  * Each is considered a sibling to the other in terms of sharing
967                  * the same extraction sequence entry.
968                  */
969                 if (fld == ICE_FLOW_FIELD_IDX_IPV6_TTL)
970                         sib = ICE_FLOW_FIELD_IDX_IPV6_PROT;
971                 else if (fld == ICE_FLOW_FIELD_IDX_IPV6_PROT)
972                         sib = ICE_FLOW_FIELD_IDX_IPV6_TTL;
973
974                 /* If the sibling field is also included, that field's
975                  * mask needs to be included.
976                  */
977                 if (match & BIT(sib))
978                         sib_mask = ice_flds_info[sib].mask;
979                 break;
980         case ICE_FLOW_FIELD_IDX_IPV4_SA:
981         case ICE_FLOW_FIELD_IDX_IPV4_DA:
982                 prot_id = seg == 0 ? ICE_PROT_IPV4_OF_OR_S : ICE_PROT_IPV4_IL;
983                 break;
984         case ICE_FLOW_FIELD_IDX_IPV6_SA:
985         case ICE_FLOW_FIELD_IDX_IPV6_DA:
986         case ICE_FLOW_FIELD_IDX_IPV6_PRE32_SA:
987         case ICE_FLOW_FIELD_IDX_IPV6_PRE32_DA:
988         case ICE_FLOW_FIELD_IDX_IPV6_PRE48_SA:
989         case ICE_FLOW_FIELD_IDX_IPV6_PRE48_DA:
990         case ICE_FLOW_FIELD_IDX_IPV6_PRE64_SA:
991         case ICE_FLOW_FIELD_IDX_IPV6_PRE64_DA:
992                 prot_id = seg == 0 ? ICE_PROT_IPV6_OF_OR_S : ICE_PROT_IPV6_IL;
993                 break;
994         case ICE_FLOW_FIELD_IDX_TCP_SRC_PORT:
995         case ICE_FLOW_FIELD_IDX_TCP_DST_PORT:
996         case ICE_FLOW_FIELD_IDX_TCP_FLAGS:
997                 prot_id = ICE_PROT_TCP_IL;
998                 break;
999         case ICE_FLOW_FIELD_IDX_UDP_SRC_PORT:
1000         case ICE_FLOW_FIELD_IDX_UDP_DST_PORT:
1001                 prot_id = ICE_PROT_UDP_IL_OR_S;
1002                 break;
1003         case ICE_FLOW_FIELD_IDX_SCTP_SRC_PORT:
1004         case ICE_FLOW_FIELD_IDX_SCTP_DST_PORT:
1005                 prot_id = ICE_PROT_SCTP_IL;
1006                 break;
1007         case ICE_FLOW_FIELD_IDX_GTPC_TEID:
1008         case ICE_FLOW_FIELD_IDX_GTPU_IP_TEID:
1009         case ICE_FLOW_FIELD_IDX_GTPU_UP_TEID:
1010         case ICE_FLOW_FIELD_IDX_GTPU_DWN_TEID:
1011         case ICE_FLOW_FIELD_IDX_GTPU_EH_TEID:
1012         case ICE_FLOW_FIELD_IDX_GTPU_EH_QFI:
1013                 /* GTP is accessed through UDP OF protocol */
1014                 prot_id = ICE_PROT_UDP_OF;
1015                 break;
1016         case ICE_FLOW_FIELD_IDX_PPPOE_SESS_ID:
1017                 prot_id = ICE_PROT_PPPOE;
1018                 break;
1019         case ICE_FLOW_FIELD_IDX_PFCP_SEID:
1020                 prot_id = ICE_PROT_UDP_IL_OR_S;
1021                 break;
1022         case ICE_FLOW_FIELD_IDX_L2TPV3_SESS_ID:
1023                 prot_id = ICE_PROT_L2TPV3;
1024                 break;
1025         case ICE_FLOW_FIELD_IDX_ESP_SPI:
1026                 prot_id = ICE_PROT_ESP_F;
1027                 break;
1028         case ICE_FLOW_FIELD_IDX_AH_SPI:
1029                 prot_id = ICE_PROT_ESP_2;
1030                 break;
1031         case ICE_FLOW_FIELD_IDX_NAT_T_ESP_SPI:
1032                 prot_id = ICE_PROT_UDP_IL_OR_S;
1033                 break;
1034         case ICE_FLOW_FIELD_IDX_ARP_SIP:
1035         case ICE_FLOW_FIELD_IDX_ARP_DIP:
1036         case ICE_FLOW_FIELD_IDX_ARP_SHA:
1037         case ICE_FLOW_FIELD_IDX_ARP_DHA:
1038         case ICE_FLOW_FIELD_IDX_ARP_OP:
1039                 prot_id = ICE_PROT_ARP_OF;
1040                 break;
1041         case ICE_FLOW_FIELD_IDX_ICMP_TYPE:
1042         case ICE_FLOW_FIELD_IDX_ICMP_CODE:
1043                 /* ICMP type and code share the same extraction seq. entry */
1044                 prot_id = (params->prof->segs[seg].hdrs &
1045                            ICE_FLOW_SEG_HDR_IPV4) ?
1046                         ICE_PROT_ICMP_IL : ICE_PROT_ICMPV6_IL;
1047                 sib = fld == ICE_FLOW_FIELD_IDX_ICMP_TYPE ?
1048                         ICE_FLOW_FIELD_IDX_ICMP_CODE :
1049                         ICE_FLOW_FIELD_IDX_ICMP_TYPE;
1050                 break;
1051         case ICE_FLOW_FIELD_IDX_GRE_KEYID:
1052                 prot_id = ICE_PROT_GRE_OF;
1053                 break;
1054         default:
1055                 return ICE_ERR_NOT_IMPL;
1056         }
1057
1058         /* Each extraction sequence entry is a word in size, and extracts a
1059          * word-aligned offset from a protocol header.
1060          */
1061         ese_bits = ICE_FLOW_FV_EXTRACT_SZ * BITS_PER_BYTE;
1062
1063         flds[fld].xtrct.prot_id = prot_id;
1064         flds[fld].xtrct.off = (ice_flds_info[fld].off / ese_bits) *
1065                 ICE_FLOW_FV_EXTRACT_SZ;
1066         flds[fld].xtrct.disp = (u8)(ice_flds_info[fld].off % ese_bits);
1067         flds[fld].xtrct.idx = params->es_cnt;
1068         flds[fld].xtrct.mask = ice_flds_info[fld].mask;
1069
1070         /* Adjust the next field-entry index after accommodating the number of
1071          * entries this field consumes
1072          */
1073         cnt = DIVIDE_AND_ROUND_UP(flds[fld].xtrct.disp +
1074                                   ice_flds_info[fld].size, ese_bits);
1075
1076         /* Fill in the extraction sequence entries needed for this field */
1077         off = flds[fld].xtrct.off;
1078         mask = flds[fld].xtrct.mask;
1079         for (i = 0; i < cnt; i++) {
1080                 /* Only consume an extraction sequence entry if there is no
1081                  * sibling field associated with this field or the sibling entry
1082                  * already extracts the word shared with this field.
1083                  */
1084                 if (sib == ICE_FLOW_FIELD_IDX_MAX ||
1085                     flds[sib].xtrct.prot_id == ICE_PROT_ID_INVAL ||
1086                     flds[sib].xtrct.off != off) {
1087                         u8 idx;
1088
1089                         /* Make sure the number of extraction sequence required
1090                          * does not exceed the block's capability
1091                          */
1092                         if (params->es_cnt >= fv_words)
1093                                 return ICE_ERR_MAX_LIMIT;
1094
1095                         /* some blocks require a reversed field vector layout */
1096                         if (hw->blk[params->blk].es.reverse)
1097                                 idx = fv_words - params->es_cnt - 1;
1098                         else
1099                                 idx = params->es_cnt;
1100
1101                         params->es[idx].prot_id = prot_id;
1102                         params->es[idx].off = off;
1103                         params->mask[idx] = mask | sib_mask;
1104                         params->es_cnt++;
1105                 }
1106
1107                 off += ICE_FLOW_FV_EXTRACT_SZ;
1108         }
1109
1110         return ICE_SUCCESS;
1111 }
1112
1113 /**
1114  * ice_flow_xtract_raws - Create extract sequence entries for raw bytes
1115  * @hw: pointer to the HW struct
1116  * @params: information about the flow to be processed
1117  * @seg: index of packet segment whose raw fields are to be be extracted
1118  */
1119 static enum ice_status
1120 ice_flow_xtract_raws(struct ice_hw *hw, struct ice_flow_prof_params *params,
1121                      u8 seg)
1122 {
1123         u16 fv_words;
1124         u16 hdrs_sz;
1125         u8 i;
1126
1127         if (!params->prof->segs[seg].raws_cnt)
1128                 return ICE_SUCCESS;
1129
1130         if (params->prof->segs[seg].raws_cnt >
1131             ARRAY_SIZE(params->prof->segs[seg].raws))
1132                 return ICE_ERR_MAX_LIMIT;
1133
1134         /* Offsets within the segment headers are not supported */
1135         hdrs_sz = ice_flow_calc_seg_sz(params, seg);
1136         if (!hdrs_sz)
1137                 return ICE_ERR_PARAM;
1138
1139         fv_words = hw->blk[params->blk].es.fvw;
1140
1141         for (i = 0; i < params->prof->segs[seg].raws_cnt; i++) {
1142                 struct ice_flow_seg_fld_raw *raw;
1143                 u16 off, cnt, j;
1144
1145                 raw = &params->prof->segs[seg].raws[i];
1146
1147                 /* Storing extraction information */
1148                 raw->info.xtrct.prot_id = ICE_PROT_MAC_OF_OR_S;
1149                 raw->info.xtrct.off = (raw->off / ICE_FLOW_FV_EXTRACT_SZ) *
1150                         ICE_FLOW_FV_EXTRACT_SZ;
1151                 raw->info.xtrct.disp = (raw->off % ICE_FLOW_FV_EXTRACT_SZ) *
1152                         BITS_PER_BYTE;
1153                 raw->info.xtrct.idx = params->es_cnt;
1154
1155                 /* Determine the number of field vector entries this raw field
1156                  * consumes.
1157                  */
1158                 cnt = DIVIDE_AND_ROUND_UP(raw->info.xtrct.disp +
1159                                           (raw->info.src.last * BITS_PER_BYTE),
1160                                           (ICE_FLOW_FV_EXTRACT_SZ *
1161                                            BITS_PER_BYTE));
1162                 off = raw->info.xtrct.off;
1163                 for (j = 0; j < cnt; j++) {
1164                         u16 idx;
1165
1166                         /* Make sure the number of extraction sequence required
1167                          * does not exceed the block's capability
1168                          */
1169                         if (params->es_cnt >= hw->blk[params->blk].es.count ||
1170                             params->es_cnt >= ICE_MAX_FV_WORDS)
1171                                 return ICE_ERR_MAX_LIMIT;
1172
1173                         /* some blocks require a reversed field vector layout */
1174                         if (hw->blk[params->blk].es.reverse)
1175                                 idx = fv_words - params->es_cnt - 1;
1176                         else
1177                                 idx = params->es_cnt;
1178
1179                         params->es[idx].prot_id = raw->info.xtrct.prot_id;
1180                         params->es[idx].off = off;
1181                         params->es_cnt++;
1182                         off += ICE_FLOW_FV_EXTRACT_SZ;
1183                 }
1184         }
1185
1186         return ICE_SUCCESS;
1187 }
1188
1189 /**
1190  * ice_flow_create_xtrct_seq - Create an extraction sequence for given segments
1191  * @hw: pointer to the HW struct
1192  * @params: information about the flow to be processed
1193  *
1194  * This function iterates through all matched fields in the given segments, and
1195  * creates an extraction sequence for the fields.
1196  */
1197 static enum ice_status
1198 ice_flow_create_xtrct_seq(struct ice_hw *hw,
1199                           struct ice_flow_prof_params *params)
1200 {
1201         enum ice_status status = ICE_SUCCESS;
1202         u8 i;
1203
1204         /* For ACL, we also need to extract the direction bit (Rx,Tx) data from
1205          * packet flags
1206          */
1207         if (params->blk == ICE_BLK_ACL) {
1208                 status = ice_flow_xtract_pkt_flags(hw, params,
1209                                                    ICE_RX_MDID_PKT_FLAGS_15_0);
1210                 if (status)
1211                         return status;
1212         }
1213
1214         for (i = 0; i < params->prof->segs_cnt; i++) {
1215                 u64 match = params->prof->segs[i].match;
1216                 enum ice_flow_field j;
1217
1218                 for (j = 0; j < ICE_FLOW_FIELD_IDX_MAX && match; j++) {
1219                         const u64 bit = BIT_ULL(j);
1220
1221                         if (match & bit) {
1222                                 status = ice_flow_xtract_fld(hw, params, i, j,
1223                                                              match);
1224                                 if (status)
1225                                         return status;
1226                                 match &= ~bit;
1227                         }
1228                 }
1229
1230                 /* Process raw matching bytes */
1231                 status = ice_flow_xtract_raws(hw, params, i);
1232                 if (status)
1233                         return status;
1234         }
1235
1236         return status;
1237 }
1238
1239 /**
1240  * ice_flow_sel_acl_scen - returns the specific scenario
1241  * @hw: pointer to the hardware structure
1242  * @params: information about the flow to be processed
1243  *
1244  * This function will return the specific scenario based on the
1245  * params passed to it
1246  */
1247 static enum ice_status
1248 ice_flow_sel_acl_scen(struct ice_hw *hw, struct ice_flow_prof_params *params)
1249 {
1250         /* Find the best-fit scenario for the provided match width */
1251         struct ice_acl_scen *cand_scen = NULL, *scen;
1252
1253         if (!hw->acl_tbl)
1254                 return ICE_ERR_DOES_NOT_EXIST;
1255
1256         /* Loop through each scenario and match against the scenario width
1257          * to select the specific scenario
1258          */
1259         LIST_FOR_EACH_ENTRY(scen, &hw->acl_tbl->scens, ice_acl_scen, list_entry)
1260                 if (scen->eff_width >= params->entry_length &&
1261                     (!cand_scen || cand_scen->eff_width > scen->eff_width))
1262                         cand_scen = scen;
1263         if (!cand_scen)
1264                 return ICE_ERR_DOES_NOT_EXIST;
1265
1266         params->prof->cfg.scen = cand_scen;
1267
1268         return ICE_SUCCESS;
1269 }
1270
1271 /**
1272  * ice_flow_acl_def_entry_frmt - Determine the layout of flow entries
1273  * @params: information about the flow to be processed
1274  */
1275 static enum ice_status
1276 ice_flow_acl_def_entry_frmt(struct ice_flow_prof_params *params)
1277 {
1278         u16 index, i, range_idx = 0;
1279
1280         index = ICE_AQC_ACL_PROF_BYTE_SEL_START_IDX;
1281
1282         for (i = 0; i < params->prof->segs_cnt; i++) {
1283                 struct ice_flow_seg_info *seg = &params->prof->segs[i];
1284                 u64 match = seg->match;
1285                 u8 j;
1286
1287                 for (j = 0; j < ICE_FLOW_FIELD_IDX_MAX && match; j++) {
1288                         struct ice_flow_fld_info *fld;
1289                         const u64 bit = BIT_ULL(j);
1290
1291                         if (!(match & bit))
1292                                 continue;
1293
1294                         fld = &seg->fields[j];
1295                         fld->entry.mask = ICE_FLOW_FLD_OFF_INVAL;
1296
1297                         if (fld->type == ICE_FLOW_FLD_TYPE_RANGE) {
1298                                 fld->entry.last = ICE_FLOW_FLD_OFF_INVAL;
1299
1300                                 /* Range checking only supported for single
1301                                  * words
1302                                  */
1303                                 if (DIVIDE_AND_ROUND_UP(ice_flds_info[j].size +
1304                                                         fld->xtrct.disp,
1305                                                         BITS_PER_BYTE * 2) > 1)
1306                                         return ICE_ERR_PARAM;
1307
1308                                 /* Ranges must define low and high values */
1309                                 if (fld->src.val == ICE_FLOW_FLD_OFF_INVAL ||
1310                                     fld->src.last == ICE_FLOW_FLD_OFF_INVAL)
1311                                         return ICE_ERR_PARAM;
1312
1313                                 fld->entry.val = range_idx++;
1314                         } else {
1315                                 /* Store adjusted byte-length of field for later
1316                                  * use, taking into account potential
1317                                  * non-byte-aligned displacement
1318                                  */
1319                                 fld->entry.last = DIVIDE_AND_ROUND_UP
1320                                         (ice_flds_info[j].size +
1321                                          (fld->xtrct.disp % BITS_PER_BYTE),
1322                                          BITS_PER_BYTE);
1323                                 fld->entry.val = index;
1324                                 index += fld->entry.last;
1325                         }
1326
1327                         match &= ~bit;
1328                 }
1329
1330                 for (j = 0; j < seg->raws_cnt; j++) {
1331                         struct ice_flow_seg_fld_raw *raw = &seg->raws[j];
1332
1333                         raw->info.entry.mask = ICE_FLOW_FLD_OFF_INVAL;
1334                         raw->info.entry.val = index;
1335                         raw->info.entry.last = raw->info.src.last;
1336                         index += raw->info.entry.last;
1337                 }
1338         }
1339
1340         /* Currently only support using the byte selection base, which only
1341          * allows for an effective entry size of 30 bytes. Reject anything
1342          * larger.
1343          */
1344         if (index > ICE_AQC_ACL_PROF_BYTE_SEL_ELEMS)
1345                 return ICE_ERR_PARAM;
1346
1347         /* Only 8 range checkers per profile, reject anything trying to use
1348          * more
1349          */
1350         if (range_idx > ICE_AQC_ACL_PROF_RANGES_NUM_CFG)
1351                 return ICE_ERR_PARAM;
1352
1353         /* Store # bytes required for entry for later use */
1354         params->entry_length = index - ICE_AQC_ACL_PROF_BYTE_SEL_START_IDX;
1355
1356         return ICE_SUCCESS;
1357 }
1358
1359 /**
1360  * ice_flow_proc_segs - process all packet segments associated with a profile
1361  * @hw: pointer to the HW struct
1362  * @params: information about the flow to be processed
1363  */
1364 static enum ice_status
1365 ice_flow_proc_segs(struct ice_hw *hw, struct ice_flow_prof_params *params)
1366 {
1367         enum ice_status status;
1368
1369         status = ice_flow_proc_seg_hdrs(params);
1370         if (status)
1371                 return status;
1372
1373         status = ice_flow_create_xtrct_seq(hw, params);
1374         if (status)
1375                 return status;
1376
1377         switch (params->blk) {
1378         case ICE_BLK_FD:
1379         case ICE_BLK_RSS:
1380                 status = ICE_SUCCESS;
1381                 break;
1382         case ICE_BLK_ACL:
1383                 status = ice_flow_acl_def_entry_frmt(params);
1384                 if (status)
1385                         return status;
1386                 status = ice_flow_sel_acl_scen(hw, params);
1387                 if (status)
1388                         return status;
1389                 break;
1390         default:
1391                 return ICE_ERR_NOT_IMPL;
1392         }
1393
1394         return status;
1395 }
1396
1397 #define ICE_FLOW_FIND_PROF_CHK_FLDS     0x00000001
1398 #define ICE_FLOW_FIND_PROF_CHK_VSI      0x00000002
1399 #define ICE_FLOW_FIND_PROF_NOT_CHK_DIR  0x00000004
1400
1401 /**
1402  * ice_flow_find_prof_conds - Find a profile matching headers and conditions
1403  * @hw: pointer to the HW struct
1404  * @blk: classification stage
1405  * @dir: flow direction
1406  * @segs: array of one or more packet segments that describe the flow
1407  * @segs_cnt: number of packet segments provided
1408  * @vsi_handle: software VSI handle to check VSI (ICE_FLOW_FIND_PROF_CHK_VSI)
1409  * @conds: additional conditions to be checked (ICE_FLOW_FIND_PROF_CHK_*)
1410  */
1411 static struct ice_flow_prof *
1412 ice_flow_find_prof_conds(struct ice_hw *hw, enum ice_block blk,
1413                          enum ice_flow_dir dir, struct ice_flow_seg_info *segs,
1414                          u8 segs_cnt, u16 vsi_handle, u32 conds)
1415 {
1416         struct ice_flow_prof *p, *prof = NULL;
1417
1418         ice_acquire_lock(&hw->fl_profs_locks[blk]);
1419         LIST_FOR_EACH_ENTRY(p, &hw->fl_profs[blk], ice_flow_prof, l_entry)
1420                 if ((p->dir == dir || conds & ICE_FLOW_FIND_PROF_NOT_CHK_DIR) &&
1421                     segs_cnt && segs_cnt == p->segs_cnt) {
1422                         u8 i;
1423
1424                         /* Check for profile-VSI association if specified */
1425                         if ((conds & ICE_FLOW_FIND_PROF_CHK_VSI) &&
1426                             ice_is_vsi_valid(hw, vsi_handle) &&
1427                             !ice_is_bit_set(p->vsis, vsi_handle))
1428                                 continue;
1429
1430                         /* Protocol headers must be checked. Matched fields are
1431                          * checked if specified.
1432                          */
1433                         for (i = 0; i < segs_cnt; i++)
1434                                 if (segs[i].hdrs != p->segs[i].hdrs ||
1435                                     ((conds & ICE_FLOW_FIND_PROF_CHK_FLDS) &&
1436                                      segs[i].match != p->segs[i].match))
1437                                         break;
1438
1439                         /* A match is found if all segments are matched */
1440                         if (i == segs_cnt) {
1441                                 prof = p;
1442                                 break;
1443                         }
1444                 }
1445         ice_release_lock(&hw->fl_profs_locks[blk]);
1446
1447         return prof;
1448 }
1449
1450 /**
1451  * ice_flow_find_prof - Look up a profile matching headers and matched fields
1452  * @hw: pointer to the HW struct
1453  * @blk: classification stage
1454  * @dir: flow direction
1455  * @segs: array of one or more packet segments that describe the flow
1456  * @segs_cnt: number of packet segments provided
1457  */
1458 u64
1459 ice_flow_find_prof(struct ice_hw *hw, enum ice_block blk, enum ice_flow_dir dir,
1460                    struct ice_flow_seg_info *segs, u8 segs_cnt)
1461 {
1462         struct ice_flow_prof *p;
1463
1464         p = ice_flow_find_prof_conds(hw, blk, dir, segs, segs_cnt,
1465                                      ICE_MAX_VSI, ICE_FLOW_FIND_PROF_CHK_FLDS);
1466
1467         return p ? p->id : ICE_FLOW_PROF_ID_INVAL;
1468 }
1469
1470 /**
1471  * ice_flow_find_prof_id - Look up a profile with given profile ID
1472  * @hw: pointer to the HW struct
1473  * @blk: classification stage
1474  * @prof_id: unique ID to identify this flow profile
1475  */
1476 static struct ice_flow_prof *
1477 ice_flow_find_prof_id(struct ice_hw *hw, enum ice_block blk, u64 prof_id)
1478 {
1479         struct ice_flow_prof *p;
1480
1481         LIST_FOR_EACH_ENTRY(p, &hw->fl_profs[blk], ice_flow_prof, l_entry)
1482                 if (p->id == prof_id)
1483                         return p;
1484
1485         return NULL;
1486 }
1487
1488 /**
1489  * ice_dealloc_flow_entry - Deallocate flow entry memory
1490  * @hw: pointer to the HW struct
1491  * @entry: flow entry to be removed
1492  */
1493 static void
1494 ice_dealloc_flow_entry(struct ice_hw *hw, struct ice_flow_entry *entry)
1495 {
1496         if (!entry)
1497                 return;
1498
1499         if (entry->entry)
1500                 ice_free(hw, entry->entry);
1501
1502         if (entry->range_buf) {
1503                 ice_free(hw, entry->range_buf);
1504                 entry->range_buf = NULL;
1505         }
1506
1507         if (entry->acts) {
1508                 ice_free(hw, entry->acts);
1509                 entry->acts = NULL;
1510                 entry->acts_cnt = 0;
1511         }
1512
1513         ice_free(hw, entry);
1514 }
1515
1516 #define ICE_ACL_INVALID_SCEN    0x3f
1517
1518 /**
1519  * ice_flow_acl_is_prof_in_use - Verify if the profile is associated to any PF
1520  * @hw: pointer to the hardware structure
1521  * @prof: pointer to flow profile
1522  * @buf: destination buffer function writes partial extraction sequence to
1523  *
1524  * returns ICE_SUCCESS if no PF is associated to the given profile
1525  * returns ICE_ERR_IN_USE if at least one PF is associated to the given profile
1526  * returns other error code for real error
1527  */
1528 static enum ice_status
1529 ice_flow_acl_is_prof_in_use(struct ice_hw *hw, struct ice_flow_prof *prof,
1530                             struct ice_aqc_acl_prof_generic_frmt *buf)
1531 {
1532         enum ice_status status;
1533         u8 prof_id = 0;
1534
1535         status = ice_flow_get_hw_prof(hw, ICE_BLK_ACL, prof->id, &prof_id);
1536         if (status)
1537                 return status;
1538
1539         status = ice_query_acl_prof(hw, prof_id, buf, NULL);
1540         if (status)
1541                 return status;
1542
1543         /* If all PF's associated scenarios are all 0 or all
1544          * ICE_ACL_INVALID_SCEN (63) for the given profile then the latter has
1545          * not been configured yet.
1546          */
1547         if (buf->pf_scenario_num[0] == 0 && buf->pf_scenario_num[1] == 0 &&
1548             buf->pf_scenario_num[2] == 0 && buf->pf_scenario_num[3] == 0 &&
1549             buf->pf_scenario_num[4] == 0 && buf->pf_scenario_num[5] == 0 &&
1550             buf->pf_scenario_num[6] == 0 && buf->pf_scenario_num[7] == 0)
1551                 return ICE_SUCCESS;
1552
1553         if (buf->pf_scenario_num[0] == ICE_ACL_INVALID_SCEN &&
1554             buf->pf_scenario_num[1] == ICE_ACL_INVALID_SCEN &&
1555             buf->pf_scenario_num[2] == ICE_ACL_INVALID_SCEN &&
1556             buf->pf_scenario_num[3] == ICE_ACL_INVALID_SCEN &&
1557             buf->pf_scenario_num[4] == ICE_ACL_INVALID_SCEN &&
1558             buf->pf_scenario_num[5] == ICE_ACL_INVALID_SCEN &&
1559             buf->pf_scenario_num[6] == ICE_ACL_INVALID_SCEN &&
1560             buf->pf_scenario_num[7] == ICE_ACL_INVALID_SCEN)
1561                 return ICE_SUCCESS;
1562         else
1563                 return ICE_ERR_IN_USE;
1564 }
1565
1566 /**
1567  * ice_flow_acl_free_act_cntr - Free the ACL rule's actions
1568  * @hw: pointer to the hardware structure
1569  * @acts: array of actions to be performed on a match
1570  * @acts_cnt: number of actions
1571  */
1572 static enum ice_status
1573 ice_flow_acl_free_act_cntr(struct ice_hw *hw, struct ice_flow_action *acts,
1574                            u8 acts_cnt)
1575 {
1576         int i;
1577
1578         for (i = 0; i < acts_cnt; i++) {
1579                 if (acts[i].type == ICE_FLOW_ACT_CNTR_PKT ||
1580                     acts[i].type == ICE_FLOW_ACT_CNTR_BYTES ||
1581                     acts[i].type == ICE_FLOW_ACT_CNTR_PKT_BYTES) {
1582                         struct ice_acl_cntrs cntrs;
1583                         enum ice_status status;
1584
1585                         cntrs.bank = 0; /* Only bank0 for the moment */
1586                         cntrs.first_cntr =
1587                                         LE16_TO_CPU(acts[i].data.acl_act.value);
1588                         cntrs.last_cntr =
1589                                         LE16_TO_CPU(acts[i].data.acl_act.value);
1590
1591                         if (acts[i].type == ICE_FLOW_ACT_CNTR_PKT_BYTES)
1592                                 cntrs.type = ICE_AQC_ACL_CNT_TYPE_DUAL;
1593                         else
1594                                 cntrs.type = ICE_AQC_ACL_CNT_TYPE_SINGLE;
1595
1596                         status = ice_aq_dealloc_acl_cntrs(hw, &cntrs, NULL);
1597                         if (status)
1598                                 return status;
1599                 }
1600         }
1601         return ICE_SUCCESS;
1602 }
1603
1604 /**
1605  * ice_flow_acl_disassoc_scen - Disassociate the scenario from the profile
1606  * @hw: pointer to the hardware structure
1607  * @prof: pointer to flow profile
1608  *
1609  * Disassociate the scenario from the profile for the PF of the VSI.
1610  */
1611 static enum ice_status
1612 ice_flow_acl_disassoc_scen(struct ice_hw *hw, struct ice_flow_prof *prof)
1613 {
1614         struct ice_aqc_acl_prof_generic_frmt buf;
1615         enum ice_status status = ICE_SUCCESS;
1616         u8 prof_id = 0;
1617
1618         ice_memset(&buf, 0, sizeof(buf), ICE_NONDMA_MEM);
1619
1620         status = ice_flow_get_hw_prof(hw, ICE_BLK_ACL, prof->id, &prof_id);
1621         if (status)
1622                 return status;
1623
1624         status = ice_query_acl_prof(hw, prof_id, &buf, NULL);
1625         if (status)
1626                 return status;
1627
1628         /* Clear scenario for this PF */
1629         buf.pf_scenario_num[hw->pf_id] = ICE_ACL_INVALID_SCEN;
1630         status = ice_prgm_acl_prof_extrt(hw, prof_id, &buf, NULL);
1631
1632         return status;
1633 }
1634
1635 /**
1636  * ice_flow_rem_entry_sync - Remove a flow entry
1637  * @hw: pointer to the HW struct
1638  * @blk: classification stage
1639  * @entry: flow entry to be removed
1640  */
1641 static enum ice_status
1642 ice_flow_rem_entry_sync(struct ice_hw *hw, enum ice_block blk,
1643                         struct ice_flow_entry *entry)
1644 {
1645         if (!entry)
1646                 return ICE_ERR_BAD_PTR;
1647
1648         if (blk == ICE_BLK_ACL) {
1649                 enum ice_status status;
1650
1651                 if (!entry->prof)
1652                         return ICE_ERR_BAD_PTR;
1653
1654                 status = ice_acl_rem_entry(hw, entry->prof->cfg.scen,
1655                                            entry->scen_entry_idx);
1656                 if (status)
1657                         return status;
1658
1659                 /* Checks if we need to release an ACL counter. */
1660                 if (entry->acts_cnt && entry->acts)
1661                         ice_flow_acl_free_act_cntr(hw, entry->acts,
1662                                                    entry->acts_cnt);
1663         }
1664
1665         LIST_DEL(&entry->l_entry);
1666
1667         ice_dealloc_flow_entry(hw, entry);
1668
1669         return ICE_SUCCESS;
1670 }
1671
1672 /**
1673  * ice_flow_add_prof_sync - Add a flow profile for packet segments and fields
1674  * @hw: pointer to the HW struct
1675  * @blk: classification stage
1676  * @dir: flow direction
1677  * @prof_id: unique ID to identify this flow profile
1678  * @segs: array of one or more packet segments that describe the flow
1679  * @segs_cnt: number of packet segments provided
1680  * @acts: array of default actions
1681  * @acts_cnt: number of default actions
1682  * @prof: stores the returned flow profile added
1683  *
1684  * Assumption: the caller has acquired the lock to the profile list
1685  */
1686 static enum ice_status
1687 ice_flow_add_prof_sync(struct ice_hw *hw, enum ice_block blk,
1688                        enum ice_flow_dir dir, u64 prof_id,
1689                        struct ice_flow_seg_info *segs, u8 segs_cnt,
1690                        struct ice_flow_action *acts, u8 acts_cnt,
1691                        struct ice_flow_prof **prof)
1692 {
1693         struct ice_flow_prof_params params;
1694         enum ice_status status;
1695         u8 i;
1696
1697         if (!prof || (acts_cnt && !acts))
1698                 return ICE_ERR_BAD_PTR;
1699
1700         ice_memset(&params, 0, sizeof(params), ICE_NONDMA_MEM);
1701         params.prof = (struct ice_flow_prof *)
1702                 ice_malloc(hw, sizeof(*params.prof));
1703         if (!params.prof)
1704                 return ICE_ERR_NO_MEMORY;
1705
1706         /* initialize extraction sequence to all invalid (0xff) */
1707         for (i = 0; i < ICE_MAX_FV_WORDS; i++) {
1708                 params.es[i].prot_id = ICE_PROT_INVALID;
1709                 params.es[i].off = ICE_FV_OFFSET_INVAL;
1710         }
1711
1712         params.blk = blk;
1713         params.prof->id = prof_id;
1714         params.prof->dir = dir;
1715         params.prof->segs_cnt = segs_cnt;
1716
1717         /* Make a copy of the segments that need to be persistent in the flow
1718          * profile instance
1719          */
1720         for (i = 0; i < segs_cnt; i++)
1721                 ice_memcpy(&params.prof->segs[i], &segs[i], sizeof(*segs),
1722                            ICE_NONDMA_TO_NONDMA);
1723
1724         /* Make a copy of the actions that need to be persistent in the flow
1725          * profile instance.
1726          */
1727         if (acts_cnt) {
1728                 params.prof->acts = (struct ice_flow_action *)
1729                         ice_memdup(hw, acts, acts_cnt * sizeof(*acts),
1730                                    ICE_NONDMA_TO_NONDMA);
1731
1732                 if (!params.prof->acts) {
1733                         status = ICE_ERR_NO_MEMORY;
1734                         goto out;
1735                 }
1736         }
1737
1738         status = ice_flow_proc_segs(hw, &params);
1739         if (status) {
1740                 ice_debug(hw, ICE_DBG_FLOW,
1741                           "Error processing a flow's packet segments\n");
1742                 goto out;
1743         }
1744
1745         /* Add a HW profile for this flow profile */
1746         status = ice_add_prof(hw, blk, prof_id, (u8 *)params.ptypes,
1747                               params.attr, params.attr_cnt, params.es,
1748                               params.mask);
1749         if (status) {
1750                 ice_debug(hw, ICE_DBG_FLOW, "Error adding a HW flow profile\n");
1751                 goto out;
1752         }
1753
1754         INIT_LIST_HEAD(&params.prof->entries);
1755         ice_init_lock(&params.prof->entries_lock);
1756         *prof = params.prof;
1757
1758 out:
1759         if (status) {
1760                 if (params.prof->acts)
1761                         ice_free(hw, params.prof->acts);
1762                 ice_free(hw, params.prof);
1763         }
1764
1765         return status;
1766 }
1767
1768 /**
1769  * ice_flow_rem_prof_sync - remove a flow profile
1770  * @hw: pointer to the hardware structure
1771  * @blk: classification stage
1772  * @prof: pointer to flow profile to remove
1773  *
1774  * Assumption: the caller has acquired the lock to the profile list
1775  */
1776 static enum ice_status
1777 ice_flow_rem_prof_sync(struct ice_hw *hw, enum ice_block blk,
1778                        struct ice_flow_prof *prof)
1779 {
1780         enum ice_status status;
1781
1782         /* Remove all remaining flow entries before removing the flow profile */
1783         if (!LIST_EMPTY(&prof->entries)) {
1784                 struct ice_flow_entry *e, *t;
1785
1786                 ice_acquire_lock(&prof->entries_lock);
1787
1788                 LIST_FOR_EACH_ENTRY_SAFE(e, t, &prof->entries, ice_flow_entry,
1789                                          l_entry) {
1790                         status = ice_flow_rem_entry_sync(hw, blk, e);
1791                         if (status)
1792                                 break;
1793                 }
1794
1795                 ice_release_lock(&prof->entries_lock);
1796         }
1797
1798         if (blk == ICE_BLK_ACL) {
1799                 struct ice_aqc_acl_profile_ranges query_rng_buf;
1800                 struct ice_aqc_acl_prof_generic_frmt buf;
1801                 u8 prof_id = 0;
1802
1803                 /* Disassociate the scenario from the profile for the PF */
1804                 status = ice_flow_acl_disassoc_scen(hw, prof);
1805                 if (status)
1806                         return status;
1807
1808                 /* Clear the range-checker if the profile ID is no longer
1809                  * used by any PF
1810                  */
1811                 status = ice_flow_acl_is_prof_in_use(hw, prof, &buf);
1812                 if (status && status != ICE_ERR_IN_USE) {
1813                         return status;
1814                 } else if (!status) {
1815                         /* Clear the range-checker value for profile ID */
1816                         ice_memset(&query_rng_buf, 0,
1817                                    sizeof(struct ice_aqc_acl_profile_ranges),
1818                                    ICE_NONDMA_MEM);
1819
1820                         status = ice_flow_get_hw_prof(hw, blk, prof->id,
1821                                                       &prof_id);
1822                         if (status)
1823                                 return status;
1824
1825                         status = ice_prog_acl_prof_ranges(hw, prof_id,
1826                                                           &query_rng_buf, NULL);
1827                         if (status)
1828                                 return status;
1829                 }
1830         }
1831
1832         /* Remove all hardware profiles associated with this flow profile */
1833         status = ice_rem_prof(hw, blk, prof->id);
1834         if (!status) {
1835                 LIST_DEL(&prof->l_entry);
1836                 ice_destroy_lock(&prof->entries_lock);
1837                 if (prof->acts)
1838                         ice_free(hw, prof->acts);
1839                 ice_free(hw, prof);
1840         }
1841
1842         return status;
1843 }
1844
1845 /**
1846  * ice_flow_acl_set_xtrct_seq_fld - Populate xtrct seq for single field
1847  * @buf: Destination buffer function writes partial xtrct sequence to
1848  * @info: Info about field
1849  */
1850 static void
1851 ice_flow_acl_set_xtrct_seq_fld(struct ice_aqc_acl_prof_generic_frmt *buf,
1852                                struct ice_flow_fld_info *info)
1853 {
1854         u16 dst, i;
1855         u8 src;
1856
1857         src = info->xtrct.idx * ICE_FLOW_FV_EXTRACT_SZ +
1858                 info->xtrct.disp / BITS_PER_BYTE;
1859         dst = info->entry.val;
1860         for (i = 0; i < info->entry.last; i++)
1861                 /* HW stores field vector words in LE, convert words back to BE
1862                  * so constructed entries will end up in network order
1863                  */
1864                 buf->byte_selection[dst++] = src++ ^ 1;
1865 }
1866
1867 /**
1868  * ice_flow_acl_set_xtrct_seq - Program ACL extraction sequence
1869  * @hw: pointer to the hardware structure
1870  * @prof: pointer to flow profile
1871  */
1872 static enum ice_status
1873 ice_flow_acl_set_xtrct_seq(struct ice_hw *hw, struct ice_flow_prof *prof)
1874 {
1875         struct ice_aqc_acl_prof_generic_frmt buf;
1876         struct ice_flow_fld_info *info;
1877         enum ice_status status;
1878         u8 prof_id = 0;
1879         u16 i;
1880
1881         ice_memset(&buf, 0, sizeof(buf), ICE_NONDMA_MEM);
1882
1883         status = ice_flow_get_hw_prof(hw, ICE_BLK_ACL, prof->id, &prof_id);
1884         if (status)
1885                 return status;
1886
1887         status = ice_flow_acl_is_prof_in_use(hw, prof, &buf);
1888         if (status && status != ICE_ERR_IN_USE)
1889                 return status;
1890
1891         if (!status) {
1892                 /* Program the profile dependent configuration. This is done
1893                  * only once regardless of the number of PFs using that profile
1894                  */
1895                 ice_memset(&buf, 0, sizeof(buf), ICE_NONDMA_MEM);
1896
1897                 for (i = 0; i < prof->segs_cnt; i++) {
1898                         struct ice_flow_seg_info *seg = &prof->segs[i];
1899                         u64 match = seg->match;
1900                         u16 j;
1901
1902                         for (j = 0; j < ICE_FLOW_FIELD_IDX_MAX && match; j++) {
1903                                 const u64 bit = BIT_ULL(j);
1904
1905                                 if (!(match & bit))
1906                                         continue;
1907
1908                                 info = &seg->fields[j];
1909
1910                                 if (info->type == ICE_FLOW_FLD_TYPE_RANGE)
1911                                         buf.word_selection[info->entry.val] =
1912                                                                 info->xtrct.idx;
1913                                 else
1914                                         ice_flow_acl_set_xtrct_seq_fld(&buf,
1915                                                                        info);
1916
1917                                 match &= ~bit;
1918                         }
1919
1920                         for (j = 0; j < seg->raws_cnt; j++) {
1921                                 info = &seg->raws[j].info;
1922                                 ice_flow_acl_set_xtrct_seq_fld(&buf, info);
1923                         }
1924                 }
1925
1926                 ice_memset(&buf.pf_scenario_num[0], ICE_ACL_INVALID_SCEN,
1927                            ICE_AQC_ACL_PROF_PF_SCEN_NUM_ELEMS,
1928                            ICE_NONDMA_MEM);
1929         }
1930
1931         /* Update the current PF */
1932         buf.pf_scenario_num[hw->pf_id] = (u8)prof->cfg.scen->id;
1933         status = ice_prgm_acl_prof_extrt(hw, prof_id, &buf, NULL);
1934
1935         return status;
1936 }
1937
1938 /**
1939  * ice_flow_assoc_vsig_vsi - associate a VSI with VSIG
1940  * @hw: pointer to the hardware structure
1941  * @blk: classification stage
1942  * @vsi_handle: software VSI handle
1943  * @vsig: target VSI group
1944  *
1945  * Assumption: the caller has already verified that the VSI to
1946  * be added has the same characteristics as the VSIG and will
1947  * thereby have access to all resources added to that VSIG.
1948  */
1949 enum ice_status
1950 ice_flow_assoc_vsig_vsi(struct ice_hw *hw, enum ice_block blk, u16 vsi_handle,
1951                         u16 vsig)
1952 {
1953         enum ice_status status;
1954
1955         if (!ice_is_vsi_valid(hw, vsi_handle) || blk >= ICE_BLK_COUNT)
1956                 return ICE_ERR_PARAM;
1957
1958         ice_acquire_lock(&hw->fl_profs_locks[blk]);
1959         status = ice_add_vsi_flow(hw, blk, ice_get_hw_vsi_num(hw, vsi_handle),
1960                                   vsig);
1961         ice_release_lock(&hw->fl_profs_locks[blk]);
1962
1963         return status;
1964 }
1965
1966 /**
1967  * ice_flow_assoc_prof - associate a VSI with a flow profile
1968  * @hw: pointer to the hardware structure
1969  * @blk: classification stage
1970  * @prof: pointer to flow profile
1971  * @vsi_handle: software VSI handle
1972  *
1973  * Assumption: the caller has acquired the lock to the profile list
1974  * and the software VSI handle has been validated
1975  */
1976 static enum ice_status
1977 ice_flow_assoc_prof(struct ice_hw *hw, enum ice_block blk,
1978                     struct ice_flow_prof *prof, u16 vsi_handle)
1979 {
1980         enum ice_status status = ICE_SUCCESS;
1981
1982         if (!ice_is_bit_set(prof->vsis, vsi_handle)) {
1983                 if (blk == ICE_BLK_ACL) {
1984                         status = ice_flow_acl_set_xtrct_seq(hw, prof);
1985                         if (status)
1986                                 return status;
1987                 }
1988                 status = ice_add_prof_id_flow(hw, blk,
1989                                               ice_get_hw_vsi_num(hw,
1990                                                                  vsi_handle),
1991                                               prof->id);
1992                 if (!status)
1993                         ice_set_bit(vsi_handle, prof->vsis);
1994                 else
1995                         ice_debug(hw, ICE_DBG_FLOW,
1996                                   "HW profile add failed, %d\n",
1997                                   status);
1998         }
1999
2000         return status;
2001 }
2002
2003 /**
2004  * ice_flow_disassoc_prof - disassociate a VSI from a flow profile
2005  * @hw: pointer to the hardware structure
2006  * @blk: classification stage
2007  * @prof: pointer to flow profile
2008  * @vsi_handle: software VSI handle
2009  *
2010  * Assumption: the caller has acquired the lock to the profile list
2011  * and the software VSI handle has been validated
2012  */
2013 static enum ice_status
2014 ice_flow_disassoc_prof(struct ice_hw *hw, enum ice_block blk,
2015                        struct ice_flow_prof *prof, u16 vsi_handle)
2016 {
2017         enum ice_status status = ICE_SUCCESS;
2018
2019         if (ice_is_bit_set(prof->vsis, vsi_handle)) {
2020                 status = ice_rem_prof_id_flow(hw, blk,
2021                                               ice_get_hw_vsi_num(hw,
2022                                                                  vsi_handle),
2023                                               prof->id);
2024                 if (!status)
2025                         ice_clear_bit(vsi_handle, prof->vsis);
2026                 else
2027                         ice_debug(hw, ICE_DBG_FLOW,
2028                                   "HW profile remove failed, %d\n",
2029                                   status);
2030         }
2031
2032         return status;
2033 }
2034
2035 /**
2036  * ice_flow_add_prof - Add a flow profile for packet segments and matched fields
2037  * @hw: pointer to the HW struct
2038  * @blk: classification stage
2039  * @dir: flow direction
2040  * @prof_id: unique ID to identify this flow profile
2041  * @segs: array of one or more packet segments that describe the flow
2042  * @segs_cnt: number of packet segments provided
2043  * @acts: array of default actions
2044  * @acts_cnt: number of default actions
2045  * @prof: stores the returned flow profile added
2046  */
2047 enum ice_status
2048 ice_flow_add_prof(struct ice_hw *hw, enum ice_block blk, enum ice_flow_dir dir,
2049                   u64 prof_id, struct ice_flow_seg_info *segs, u8 segs_cnt,
2050                   struct ice_flow_action *acts, u8 acts_cnt,
2051                   struct ice_flow_prof **prof)
2052 {
2053         enum ice_status status;
2054
2055         if (segs_cnt > ICE_FLOW_SEG_MAX)
2056                 return ICE_ERR_MAX_LIMIT;
2057
2058         if (!segs_cnt)
2059                 return ICE_ERR_PARAM;
2060
2061         if (!segs)
2062                 return ICE_ERR_BAD_PTR;
2063
2064         status = ice_flow_val_hdrs(segs, segs_cnt);
2065         if (status)
2066                 return status;
2067
2068         ice_acquire_lock(&hw->fl_profs_locks[blk]);
2069
2070         status = ice_flow_add_prof_sync(hw, blk, dir, prof_id, segs, segs_cnt,
2071                                         acts, acts_cnt, prof);
2072         if (!status)
2073                 LIST_ADD(&(*prof)->l_entry, &hw->fl_profs[blk]);
2074
2075         ice_release_lock(&hw->fl_profs_locks[blk]);
2076
2077         return status;
2078 }
2079
2080 /**
2081  * ice_flow_rem_prof - Remove a flow profile and all entries associated with it
2082  * @hw: pointer to the HW struct
2083  * @blk: the block for which the flow profile is to be removed
2084  * @prof_id: unique ID of the flow profile to be removed
2085  */
2086 enum ice_status
2087 ice_flow_rem_prof(struct ice_hw *hw, enum ice_block blk, u64 prof_id)
2088 {
2089         struct ice_flow_prof *prof;
2090         enum ice_status status;
2091
2092         ice_acquire_lock(&hw->fl_profs_locks[blk]);
2093
2094         prof = ice_flow_find_prof_id(hw, blk, prof_id);
2095         if (!prof) {
2096                 status = ICE_ERR_DOES_NOT_EXIST;
2097                 goto out;
2098         }
2099
2100         /* prof becomes invalid after the call */
2101         status = ice_flow_rem_prof_sync(hw, blk, prof);
2102
2103 out:
2104         ice_release_lock(&hw->fl_profs_locks[blk]);
2105
2106         return status;
2107 }
2108
2109 /**
2110  * ice_flow_get_hw_prof - return the HW profile for a specific profile ID handle
2111  * @hw: pointer to the HW struct
2112  * @blk: classification stage
2113  * @prof_id: the profile ID handle
2114  * @hw_prof_id: pointer to variable to receive the HW profile ID
2115  */
2116 enum ice_status
2117 ice_flow_get_hw_prof(struct ice_hw *hw, enum ice_block blk, u64 prof_id,
2118                      u8 *hw_prof_id)
2119 {
2120         struct ice_prof_map *map;
2121
2122         map = ice_search_prof_id(hw, blk, prof_id);
2123         if (map) {
2124                 *hw_prof_id = map->prof_id;
2125                 return ICE_SUCCESS;
2126         }
2127
2128         return ICE_ERR_DOES_NOT_EXIST;
2129 }
2130
2131 /**
2132  * ice_flow_find_entry - look for a flow entry using its unique ID
2133  * @hw: pointer to the HW struct
2134  * @blk: classification stage
2135  * @entry_id: unique ID to identify this flow entry
2136  *
2137  * This function looks for the flow entry with the specified unique ID in all
2138  * flow profiles of the specified classification stage. If the entry is found,
2139  * and it returns the handle to the flow entry. Otherwise, it returns
2140  * ICE_FLOW_ENTRY_ID_INVAL.
2141  */
2142 u64 ice_flow_find_entry(struct ice_hw *hw, enum ice_block blk, u64 entry_id)
2143 {
2144         struct ice_flow_entry *found = NULL;
2145         struct ice_flow_prof *p;
2146
2147         ice_acquire_lock(&hw->fl_profs_locks[blk]);
2148
2149         LIST_FOR_EACH_ENTRY(p, &hw->fl_profs[blk], ice_flow_prof, l_entry) {
2150                 struct ice_flow_entry *e;
2151
2152                 ice_acquire_lock(&p->entries_lock);
2153                 LIST_FOR_EACH_ENTRY(e, &p->entries, ice_flow_entry, l_entry)
2154                         if (e->id == entry_id) {
2155                                 found = e;
2156                                 break;
2157                         }
2158                 ice_release_lock(&p->entries_lock);
2159
2160                 if (found)
2161                         break;
2162         }
2163
2164         ice_release_lock(&hw->fl_profs_locks[blk]);
2165
2166         return found ? ICE_FLOW_ENTRY_HNDL(found) : ICE_FLOW_ENTRY_HANDLE_INVAL;
2167 }
2168
2169 /**
2170  * ice_flow_acl_check_actions - Checks the ACL rule's actions
2171  * @hw: pointer to the hardware structure
2172  * @acts: array of actions to be performed on a match
2173  * @acts_cnt: number of actions
2174  * @cnt_alloc: indicates if an ACL counter has been allocated.
2175  */
2176 static enum ice_status
2177 ice_flow_acl_check_actions(struct ice_hw *hw, struct ice_flow_action *acts,
2178                            u8 acts_cnt, bool *cnt_alloc)
2179 {
2180         ice_declare_bitmap(dup_check, ICE_AQC_TBL_MAX_ACTION_PAIRS * 2);
2181         int i;
2182
2183         ice_zero_bitmap(dup_check, ICE_AQC_TBL_MAX_ACTION_PAIRS * 2);
2184         *cnt_alloc = false;
2185
2186         if (acts_cnt > ICE_FLOW_ACL_MAX_NUM_ACT)
2187                 return ICE_ERR_OUT_OF_RANGE;
2188
2189         for (i = 0; i < acts_cnt; i++) {
2190                 if (acts[i].type != ICE_FLOW_ACT_NOP &&
2191                     acts[i].type != ICE_FLOW_ACT_DROP &&
2192                     acts[i].type != ICE_FLOW_ACT_CNTR_PKT &&
2193                     acts[i].type != ICE_FLOW_ACT_FWD_QUEUE)
2194                         return ICE_ERR_CFG;
2195
2196                 /* If the caller want to add two actions of the same type, then
2197                  * it is considered invalid configuration.
2198                  */
2199                 if (ice_test_and_set_bit(acts[i].type, dup_check))
2200                         return ICE_ERR_PARAM;
2201         }
2202
2203         /* Checks if ACL counters are needed. */
2204         for (i = 0; i < acts_cnt; i++) {
2205                 if (acts[i].type == ICE_FLOW_ACT_CNTR_PKT ||
2206                     acts[i].type == ICE_FLOW_ACT_CNTR_BYTES ||
2207                     acts[i].type == ICE_FLOW_ACT_CNTR_PKT_BYTES) {
2208                         struct ice_acl_cntrs cntrs;
2209                         enum ice_status status;
2210
2211                         cntrs.amount = 1;
2212                         cntrs.bank = 0; /* Only bank0 for the moment */
2213
2214                         if (acts[i].type == ICE_FLOW_ACT_CNTR_PKT_BYTES)
2215                                 cntrs.type = ICE_AQC_ACL_CNT_TYPE_DUAL;
2216                         else
2217                                 cntrs.type = ICE_AQC_ACL_CNT_TYPE_SINGLE;
2218
2219                         status = ice_aq_alloc_acl_cntrs(hw, &cntrs, NULL);
2220                         if (status)
2221                                 return status;
2222                         /* Counter index within the bank */
2223                         acts[i].data.acl_act.value =
2224                                                 CPU_TO_LE16(cntrs.first_cntr);
2225                         *cnt_alloc = true;
2226                 }
2227         }
2228
2229         return ICE_SUCCESS;
2230 }
2231
2232 /**
2233  * ice_flow_acl_frmt_entry_range - Format an ACL range checker for a given field
2234  * @fld: number of the given field
2235  * @info: info about field
2236  * @range_buf: range checker configuration buffer
2237  * @data: pointer to a data buffer containing flow entry's match values/masks
2238  * @range: Input/output param indicating which range checkers are being used
2239  */
2240 static void
2241 ice_flow_acl_frmt_entry_range(u16 fld, struct ice_flow_fld_info *info,
2242                               struct ice_aqc_acl_profile_ranges *range_buf,
2243                               u8 *data, u8 *range)
2244 {
2245         u16 new_mask;
2246
2247         /* If not specified, default mask is all bits in field */
2248         new_mask = (info->src.mask == ICE_FLOW_FLD_OFF_INVAL ?
2249                     BIT(ice_flds_info[fld].size) - 1 :
2250                     (*(u16 *)(data + info->src.mask))) << info->xtrct.disp;
2251
2252         /* If the mask is 0, then we don't need to worry about this input
2253          * range checker value.
2254          */
2255         if (new_mask) {
2256                 u16 new_high =
2257                         (*(u16 *)(data + info->src.last)) << info->xtrct.disp;
2258                 u16 new_low =
2259                         (*(u16 *)(data + info->src.val)) << info->xtrct.disp;
2260                 u8 range_idx = info->entry.val;
2261
2262                 range_buf->checker_cfg[range_idx].low_boundary =
2263                         CPU_TO_BE16(new_low);
2264                 range_buf->checker_cfg[range_idx].high_boundary =
2265                         CPU_TO_BE16(new_high);
2266                 range_buf->checker_cfg[range_idx].mask = CPU_TO_BE16(new_mask);
2267
2268                 /* Indicate which range checker is being used */
2269                 *range |= BIT(range_idx);
2270         }
2271 }
2272
2273 /**
2274  * ice_flow_acl_frmt_entry_fld - Partially format ACL entry for a given field
2275  * @fld: number of the given field
2276  * @info: info about the field
2277  * @buf: buffer containing the entry
2278  * @dontcare: buffer containing don't care mask for entry
2279  * @data: pointer to a data buffer containing flow entry's match values/masks
2280  */
2281 static void
2282 ice_flow_acl_frmt_entry_fld(u16 fld, struct ice_flow_fld_info *info, u8 *buf,
2283                             u8 *dontcare, u8 *data)
2284 {
2285         u16 dst, src, mask, k, end_disp, tmp_s = 0, tmp_m = 0;
2286         bool use_mask = false;
2287         u8 disp;
2288
2289         src = info->src.val;
2290         mask = info->src.mask;
2291         dst = info->entry.val - ICE_AQC_ACL_PROF_BYTE_SEL_START_IDX;
2292         disp = info->xtrct.disp % BITS_PER_BYTE;
2293
2294         if (mask != ICE_FLOW_FLD_OFF_INVAL)
2295                 use_mask = true;
2296
2297         for (k = 0; k < info->entry.last; k++, dst++) {
2298                 /* Add overflow bits from previous byte */
2299                 buf[dst] = (tmp_s & 0xff00) >> 8;
2300
2301                 /* If mask is not valid, tmp_m is always zero, so just setting
2302                  * dontcare to 0 (no masked bits). If mask is valid, pulls in
2303                  * overflow bits of mask from prev byte
2304                  */
2305                 dontcare[dst] = (tmp_m & 0xff00) >> 8;
2306
2307                 /* If there is displacement, last byte will only contain
2308                  * displaced data, but there is no more data to read from user
2309                  * buffer, so skip so as not to potentially read beyond end of
2310                  * user buffer
2311                  */
2312                 if (!disp || k < info->entry.last - 1) {
2313                         /* Store shifted data to use in next byte */
2314                         tmp_s = data[src++] << disp;
2315
2316                         /* Add current (shifted) byte */
2317                         buf[dst] |= tmp_s & 0xff;
2318
2319                         /* Handle mask if valid */
2320                         if (use_mask) {
2321                                 tmp_m = (~data[mask++] & 0xff) << disp;
2322                                 dontcare[dst] |= tmp_m & 0xff;
2323                         }
2324                 }
2325         }
2326
2327         /* Fill in don't care bits at beginning of field */
2328         if (disp) {
2329                 dst = info->entry.val - ICE_AQC_ACL_PROF_BYTE_SEL_START_IDX;
2330                 for (k = 0; k < disp; k++)
2331                         dontcare[dst] |= BIT(k);
2332         }
2333
2334         end_disp = (disp + ice_flds_info[fld].size) % BITS_PER_BYTE;
2335
2336         /* Fill in don't care bits at end of field */
2337         if (end_disp) {
2338                 dst = info->entry.val - ICE_AQC_ACL_PROF_BYTE_SEL_START_IDX +
2339                       info->entry.last - 1;
2340                 for (k = end_disp; k < BITS_PER_BYTE; k++)
2341                         dontcare[dst] |= BIT(k);
2342         }
2343 }
2344
2345 /**
2346  * ice_flow_acl_frmt_entry - Format ACL entry
2347  * @hw: pointer to the hardware structure
2348  * @prof: pointer to flow profile
2349  * @e: pointer to the flow entry
2350  * @data: pointer to a data buffer containing flow entry's match values/masks
2351  * @acts: array of actions to be performed on a match
2352  * @acts_cnt: number of actions
2353  *
2354  * Formats the key (and key_inverse) to be matched from the data passed in,
2355  * along with data from the flow profile. This key/key_inverse pair makes up
2356  * the 'entry' for an ACL flow entry.
2357  */
2358 static enum ice_status
2359 ice_flow_acl_frmt_entry(struct ice_hw *hw, struct ice_flow_prof *prof,
2360                         struct ice_flow_entry *e, u8 *data,
2361                         struct ice_flow_action *acts, u8 acts_cnt)
2362 {
2363         u8 *buf = NULL, *dontcare = NULL, *key = NULL, range = 0, dir_flag_msk;
2364         struct ice_aqc_acl_profile_ranges *range_buf = NULL;
2365         enum ice_status status;
2366         bool cnt_alloc;
2367         u8 prof_id = 0;
2368         u16 i, buf_sz;
2369
2370         status = ice_flow_get_hw_prof(hw, ICE_BLK_ACL, prof->id, &prof_id);
2371         if (status)
2372                 return status;
2373
2374         /* Format the result action */
2375
2376         status = ice_flow_acl_check_actions(hw, acts, acts_cnt, &cnt_alloc);
2377         if (status)
2378                 return status;
2379
2380         status = ICE_ERR_NO_MEMORY;
2381
2382         e->acts = (struct ice_flow_action *)
2383                 ice_memdup(hw, acts, acts_cnt * sizeof(*acts),
2384                            ICE_NONDMA_TO_NONDMA);
2385
2386         if (!e->acts)
2387                 goto out;
2388
2389         e->acts_cnt = acts_cnt;
2390
2391         /* Format the matching data */
2392         buf_sz = prof->cfg.scen->width;
2393         buf = (u8 *)ice_malloc(hw, buf_sz);
2394         if (!buf)
2395                 goto out;
2396
2397         dontcare = (u8 *)ice_malloc(hw, buf_sz);
2398         if (!dontcare)
2399                 goto out;
2400
2401         /* 'key' buffer will store both key and key_inverse, so must be twice
2402          * size of buf
2403          */
2404         key = (u8 *)ice_malloc(hw, buf_sz * 2);
2405         if (!key)
2406                 goto out;
2407
2408         range_buf = (struct ice_aqc_acl_profile_ranges *)
2409                 ice_malloc(hw, sizeof(struct ice_aqc_acl_profile_ranges));
2410         if (!range_buf)
2411                 goto out;
2412
2413         /* Set don't care mask to all 1's to start, will zero out used bytes */
2414         ice_memset(dontcare, 0xff, buf_sz, ICE_NONDMA_MEM);
2415
2416         for (i = 0; i < prof->segs_cnt; i++) {
2417                 struct ice_flow_seg_info *seg = &prof->segs[i];
2418                 u64 match = seg->match;
2419                 u16 j;
2420
2421                 for (j = 0; j < ICE_FLOW_FIELD_IDX_MAX && match; j++) {
2422                         struct ice_flow_fld_info *info;
2423                         const u64 bit = BIT_ULL(j);
2424
2425                         if (!(match & bit))
2426                                 continue;
2427
2428                         info = &seg->fields[j];
2429
2430                         if (info->type == ICE_FLOW_FLD_TYPE_RANGE)
2431                                 ice_flow_acl_frmt_entry_range(j, info,
2432                                                               range_buf, data,
2433                                                               &range);
2434                         else
2435                                 ice_flow_acl_frmt_entry_fld(j, info, buf,
2436                                                             dontcare, data);
2437
2438                         match &= ~bit;
2439                 }
2440
2441                 for (j = 0; j < seg->raws_cnt; j++) {
2442                         struct ice_flow_fld_info *info = &seg->raws[j].info;
2443                         u16 dst, src, mask, k;
2444                         bool use_mask = false;
2445
2446                         src = info->src.val;
2447                         dst = info->entry.val -
2448                                         ICE_AQC_ACL_PROF_BYTE_SEL_START_IDX;
2449                         mask = info->src.mask;
2450
2451                         if (mask != ICE_FLOW_FLD_OFF_INVAL)
2452                                 use_mask = true;
2453
2454                         for (k = 0; k < info->entry.last; k++, dst++) {
2455                                 buf[dst] = data[src++];
2456                                 if (use_mask)
2457                                         dontcare[dst] = ~data[mask++];
2458                                 else
2459                                         dontcare[dst] = 0;
2460                         }
2461                 }
2462         }
2463
2464         buf[prof->cfg.scen->pid_idx] = (u8)prof_id;
2465         dontcare[prof->cfg.scen->pid_idx] = 0;
2466
2467         /* Format the buffer for direction flags */
2468         dir_flag_msk = BIT(ICE_FLG_PKT_DIR);
2469
2470         if (prof->dir == ICE_FLOW_RX)
2471                 buf[prof->cfg.scen->pkt_dir_idx] = dir_flag_msk;
2472
2473         if (range) {
2474                 buf[prof->cfg.scen->rng_chk_idx] = range;
2475                 /* Mark any unused range checkers as don't care */
2476                 dontcare[prof->cfg.scen->rng_chk_idx] = ~range;
2477                 e->range_buf = range_buf;
2478         } else {
2479                 ice_free(hw, range_buf);
2480         }
2481
2482         status = ice_set_key(key, buf_sz * 2, buf, NULL, dontcare, NULL, 0,
2483                              buf_sz);
2484         if (status)
2485                 goto out;
2486
2487         e->entry = key;
2488         e->entry_sz = buf_sz * 2;
2489
2490 out:
2491         if (buf)
2492                 ice_free(hw, buf);
2493
2494         if (dontcare)
2495                 ice_free(hw, dontcare);
2496
2497         if (status && key)
2498                 ice_free(hw, key);
2499
2500         if (status && range_buf) {
2501                 ice_free(hw, range_buf);
2502                 e->range_buf = NULL;
2503         }
2504
2505         if (status && e->acts) {
2506                 ice_free(hw, e->acts);
2507                 e->acts = NULL;
2508                 e->acts_cnt = 0;
2509         }
2510
2511         if (status && cnt_alloc)
2512                 ice_flow_acl_free_act_cntr(hw, acts, acts_cnt);
2513
2514         return status;
2515 }
2516
2517 /**
2518  * ice_flow_acl_find_scen_entry_cond - Find an ACL scenario entry that matches
2519  *                                     the compared data.
2520  * @prof: pointer to flow profile
2521  * @e: pointer to the comparing flow entry
2522  * @do_chg_action: decide if we want to change the ACL action
2523  * @do_add_entry: decide if we want to add the new ACL entry
2524  * @do_rem_entry: decide if we want to remove the current ACL entry
2525  *
2526  * Find an ACL scenario entry that matches the compared data. In the same time,
2527  * this function also figure out:
2528  * a/ If we want to change the ACL action
2529  * b/ If we want to add the new ACL entry
2530  * c/ If we want to remove the current ACL entry
2531  */
2532 static struct ice_flow_entry *
2533 ice_flow_acl_find_scen_entry_cond(struct ice_flow_prof *prof,
2534                                   struct ice_flow_entry *e, bool *do_chg_action,
2535                                   bool *do_add_entry, bool *do_rem_entry)
2536 {
2537         struct ice_flow_entry *p, *return_entry = NULL;
2538         u8 i, j;
2539
2540         /* Check if:
2541          * a/ There exists an entry with same matching data, but different
2542          *    priority, then we remove this existing ACL entry. Then, we
2543          *    will add the new entry to the ACL scenario.
2544          * b/ There exists an entry with same matching data, priority, and
2545          *    result action, then we do nothing
2546          * c/ There exists an entry with same matching data, priority, but
2547          *    different, action, then do only change the action's entry.
2548          * d/ Else, we add this new entry to the ACL scenario.
2549          */
2550         *do_chg_action = false;
2551         *do_add_entry = true;
2552         *do_rem_entry = false;
2553         LIST_FOR_EACH_ENTRY(p, &prof->entries, ice_flow_entry, l_entry) {
2554                 if (memcmp(p->entry, e->entry, p->entry_sz))
2555                         continue;
2556
2557                 /* From this point, we have the same matching_data. */
2558                 *do_add_entry = false;
2559                 return_entry = p;
2560
2561                 if (p->priority != e->priority) {
2562                         /* matching data && !priority */
2563                         *do_add_entry = true;
2564                         *do_rem_entry = true;
2565                         break;
2566                 }
2567
2568                 /* From this point, we will have matching_data && priority */
2569                 if (p->acts_cnt != e->acts_cnt)
2570                         *do_chg_action = true;
2571                 for (i = 0; i < p->acts_cnt; i++) {
2572                         bool found_not_match = false;
2573
2574                         for (j = 0; j < e->acts_cnt; j++)
2575                                 if (memcmp(&p->acts[i], &e->acts[j],
2576                                            sizeof(struct ice_flow_action))) {
2577                                         found_not_match = true;
2578                                         break;
2579                                 }
2580
2581                         if (found_not_match) {
2582                                 *do_chg_action = true;
2583                                 break;
2584                         }
2585                 }
2586
2587                 /* (do_chg_action = true) means :
2588                  *    matching_data && priority && !result_action
2589                  * (do_chg_action = false) means :
2590                  *    matching_data && priority && result_action
2591                  */
2592                 break;
2593         }
2594
2595         return return_entry;
2596 }
2597
2598 /**
2599  * ice_flow_acl_convert_to_acl_prior - Convert to ACL priority
2600  * @p: flow priority
2601  */
2602 static enum ice_acl_entry_prior
2603 ice_flow_acl_convert_to_acl_prior(enum ice_flow_priority p)
2604 {
2605         enum ice_acl_entry_prior acl_prior;
2606
2607         switch (p) {
2608         case ICE_FLOW_PRIO_LOW:
2609                 acl_prior = ICE_LOW;
2610                 break;
2611         case ICE_FLOW_PRIO_NORMAL:
2612                 acl_prior = ICE_NORMAL;
2613                 break;
2614         case ICE_FLOW_PRIO_HIGH:
2615                 acl_prior = ICE_HIGH;
2616                 break;
2617         default:
2618                 acl_prior = ICE_NORMAL;
2619                 break;
2620         }
2621
2622         return acl_prior;
2623 }
2624
2625 /**
2626  * ice_flow_acl_union_rng_chk - Perform union operation between two
2627  *                              range-range checker buffers
2628  * @dst_buf: pointer to destination range checker buffer
2629  * @src_buf: pointer to source range checker buffer
2630  *
2631  * For this function, we do the union between dst_buf and src_buf
2632  * range checker buffer, and we will save the result back to dst_buf
2633  */
2634 static enum ice_status
2635 ice_flow_acl_union_rng_chk(struct ice_aqc_acl_profile_ranges *dst_buf,
2636                            struct ice_aqc_acl_profile_ranges *src_buf)
2637 {
2638         u8 i, j;
2639
2640         if (!dst_buf || !src_buf)
2641                 return ICE_ERR_BAD_PTR;
2642
2643         for (i = 0; i < ICE_AQC_ACL_PROF_RANGES_NUM_CFG; i++) {
2644                 struct ice_acl_rng_data *cfg_data = NULL, *in_data;
2645                 bool will_populate = false;
2646
2647                 in_data = &src_buf->checker_cfg[i];
2648
2649                 if (!in_data->mask)
2650                         break;
2651
2652                 for (j = 0; j < ICE_AQC_ACL_PROF_RANGES_NUM_CFG; j++) {
2653                         cfg_data = &dst_buf->checker_cfg[j];
2654
2655                         if (!cfg_data->mask ||
2656                             !memcmp(cfg_data, in_data,
2657                                     sizeof(struct ice_acl_rng_data))) {
2658                                 will_populate = true;
2659                                 break;
2660                         }
2661                 }
2662
2663                 if (will_populate) {
2664                         ice_memcpy(cfg_data, in_data,
2665                                    sizeof(struct ice_acl_rng_data),
2666                                    ICE_NONDMA_TO_NONDMA);
2667                 } else {
2668                         /* No available slot left to program range checker */
2669                         return ICE_ERR_MAX_LIMIT;
2670                 }
2671         }
2672
2673         return ICE_SUCCESS;
2674 }
2675
2676 /**
2677  * ice_flow_acl_add_scen_entry_sync - Add entry to ACL scenario sync
2678  * @hw: pointer to the hardware structure
2679  * @prof: pointer to flow profile
2680  * @entry: double pointer to the flow entry
2681  *
2682  * For this function, we will look at the current added entries in the
2683  * corresponding ACL scenario. Then, we will perform matching logic to
2684  * see if we want to add/modify/do nothing with this new entry.
2685  */
2686 static enum ice_status
2687 ice_flow_acl_add_scen_entry_sync(struct ice_hw *hw, struct ice_flow_prof *prof,
2688                                  struct ice_flow_entry **entry)
2689 {
2690         bool do_add_entry, do_rem_entry, do_chg_action, do_chg_rng_chk;
2691         struct ice_aqc_acl_profile_ranges query_rng_buf, cfg_rng_buf;
2692         struct ice_acl_act_entry *acts = NULL;
2693         struct ice_flow_entry *exist;
2694         enum ice_status status = ICE_SUCCESS;
2695         struct ice_flow_entry *e;
2696         u8 i;
2697
2698         if (!entry || !(*entry) || !prof)
2699                 return ICE_ERR_BAD_PTR;
2700
2701         e = *(entry);
2702
2703         do_chg_rng_chk = false;
2704         if (e->range_buf) {
2705                 u8 prof_id = 0;
2706
2707                 status = ice_flow_get_hw_prof(hw, ICE_BLK_ACL, prof->id,
2708                                               &prof_id);
2709                 if (status)
2710                         return status;
2711
2712                 /* Query the current range-checker value in FW */
2713                 status = ice_query_acl_prof_ranges(hw, prof_id, &query_rng_buf,
2714                                                    NULL);
2715                 if (status)
2716                         return status;
2717                 ice_memcpy(&cfg_rng_buf, &query_rng_buf,
2718                            sizeof(struct ice_aqc_acl_profile_ranges),
2719                            ICE_NONDMA_TO_NONDMA);
2720
2721                 /* Generate the new range-checker value */
2722                 status = ice_flow_acl_union_rng_chk(&cfg_rng_buf, e->range_buf);
2723                 if (status)
2724                         return status;
2725
2726                 /* Reconfigure the range check if the buffer is changed. */
2727                 do_chg_rng_chk = false;
2728                 if (memcmp(&query_rng_buf, &cfg_rng_buf,
2729                            sizeof(struct ice_aqc_acl_profile_ranges))) {
2730                         status = ice_prog_acl_prof_ranges(hw, prof_id,
2731                                                           &cfg_rng_buf, NULL);
2732                         if (status)
2733                                 return status;
2734
2735                         do_chg_rng_chk = true;
2736                 }
2737         }
2738
2739         /* Figure out if we want to (change the ACL action) and/or
2740          * (Add the new ACL entry) and/or (Remove the current ACL entry)
2741          */
2742         exist = ice_flow_acl_find_scen_entry_cond(prof, e, &do_chg_action,
2743                                                   &do_add_entry, &do_rem_entry);
2744
2745         if (do_rem_entry) {
2746                 status = ice_flow_rem_entry_sync(hw, ICE_BLK_ACL, exist);
2747                 if (status)
2748                         return status;
2749         }
2750
2751         /* Prepare the result action buffer */
2752         acts = (struct ice_acl_act_entry *)ice_calloc
2753                 (hw, e->entry_sz, sizeof(struct ice_acl_act_entry));
2754         for (i = 0; i < e->acts_cnt; i++)
2755                 ice_memcpy(&acts[i], &e->acts[i].data.acl_act,
2756                            sizeof(struct ice_acl_act_entry),
2757                            ICE_NONDMA_TO_NONDMA);
2758
2759         if (do_add_entry) {
2760                 enum ice_acl_entry_prior prior;
2761                 u8 *keys, *inverts;
2762                 u16 entry_idx;
2763
2764                 keys = (u8 *)e->entry;
2765                 inverts = keys + (e->entry_sz / 2);
2766                 prior = ice_flow_acl_convert_to_acl_prior(e->priority);
2767
2768                 status = ice_acl_add_entry(hw, prof->cfg.scen, prior, keys,
2769                                            inverts, acts, e->acts_cnt,
2770                                            &entry_idx);
2771                 if (status)
2772                         goto out;
2773
2774                 e->scen_entry_idx = entry_idx;
2775                 LIST_ADD(&e->l_entry, &prof->entries);
2776         } else {
2777                 if (do_chg_action) {
2778                         /* For the action memory info, update the SW's copy of
2779                          * exist entry with e's action memory info
2780                          */
2781                         ice_free(hw, exist->acts);
2782                         exist->acts_cnt = e->acts_cnt;
2783                         exist->acts = (struct ice_flow_action *)
2784                                 ice_calloc(hw, exist->acts_cnt,
2785                                            sizeof(struct ice_flow_action));
2786
2787                         if (!exist->acts) {
2788                                 status = ICE_ERR_NO_MEMORY;
2789                                 goto out;
2790                         }
2791
2792                         ice_memcpy(exist->acts, e->acts,
2793                                    sizeof(struct ice_flow_action) * e->acts_cnt,
2794                                    ICE_NONDMA_TO_NONDMA);
2795
2796                         status = ice_acl_prog_act(hw, prof->cfg.scen, acts,
2797                                                   e->acts_cnt,
2798                                                   exist->scen_entry_idx);
2799                         if (status)
2800                                 goto out;
2801                 }
2802
2803                 if (do_chg_rng_chk) {
2804                         /* In this case, we want to update the range checker
2805                          * information of the exist entry
2806                          */
2807                         status = ice_flow_acl_union_rng_chk(exist->range_buf,
2808                                                             e->range_buf);
2809                         if (status)
2810                                 goto out;
2811                 }
2812
2813                 /* As we don't add the new entry to our SW DB, deallocate its
2814                  * memories, and return the exist entry to the caller
2815                  */
2816                 ice_dealloc_flow_entry(hw, e);
2817                 *(entry) = exist;
2818         }
2819 out:
2820         if (acts)
2821                 ice_free(hw, acts);
2822
2823         return status;
2824 }
2825
2826 /**
2827  * ice_flow_acl_add_scen_entry - Add entry to ACL scenario
2828  * @hw: pointer to the hardware structure
2829  * @prof: pointer to flow profile
2830  * @e: double pointer to the flow entry
2831  */
2832 static enum ice_status
2833 ice_flow_acl_add_scen_entry(struct ice_hw *hw, struct ice_flow_prof *prof,
2834                             struct ice_flow_entry **e)
2835 {
2836         enum ice_status status;
2837
2838         ice_acquire_lock(&prof->entries_lock);
2839         status = ice_flow_acl_add_scen_entry_sync(hw, prof, e);
2840         ice_release_lock(&prof->entries_lock);
2841
2842         return status;
2843 }
2844
2845 /**
2846  * ice_flow_add_entry - Add a flow entry
2847  * @hw: pointer to the HW struct
2848  * @blk: classification stage
2849  * @prof_id: ID of the profile to add a new flow entry to
2850  * @entry_id: unique ID to identify this flow entry
2851  * @vsi_handle: software VSI handle for the flow entry
2852  * @prio: priority of the flow entry
2853  * @data: pointer to a data buffer containing flow entry's match values/masks
2854  * @acts: arrays of actions to be performed on a match
2855  * @acts_cnt: number of actions
2856  * @entry_h: pointer to buffer that receives the new flow entry's handle
2857  */
2858 enum ice_status
2859 ice_flow_add_entry(struct ice_hw *hw, enum ice_block blk, u64 prof_id,
2860                    u64 entry_id, u16 vsi_handle, enum ice_flow_priority prio,
2861                    void *data, struct ice_flow_action *acts, u8 acts_cnt,
2862                    u64 *entry_h)
2863 {
2864         struct ice_flow_entry *e = NULL;
2865         struct ice_flow_prof *prof;
2866         enum ice_status status = ICE_SUCCESS;
2867
2868         /* ACL entries must indicate an action */
2869         if (blk == ICE_BLK_ACL && (!acts || !acts_cnt))
2870                 return ICE_ERR_PARAM;
2871
2872         /* No flow entry data is expected for RSS */
2873         if (!entry_h || (!data && blk != ICE_BLK_RSS))
2874                 return ICE_ERR_BAD_PTR;
2875
2876         if (!ice_is_vsi_valid(hw, vsi_handle))
2877                 return ICE_ERR_PARAM;
2878
2879         ice_acquire_lock(&hw->fl_profs_locks[blk]);
2880
2881         prof = ice_flow_find_prof_id(hw, blk, prof_id);
2882         if (!prof) {
2883                 status = ICE_ERR_DOES_NOT_EXIST;
2884         } else {
2885                 /* Allocate memory for the entry being added and associate
2886                  * the VSI to the found flow profile
2887                  */
2888                 e = (struct ice_flow_entry *)ice_malloc(hw, sizeof(*e));
2889                 if (!e)
2890                         status = ICE_ERR_NO_MEMORY;
2891                 else
2892                         status = ice_flow_assoc_prof(hw, blk, prof, vsi_handle);
2893         }
2894
2895         ice_release_lock(&hw->fl_profs_locks[blk]);
2896         if (status)
2897                 goto out;
2898
2899         e->id = entry_id;
2900         e->vsi_handle = vsi_handle;
2901         e->prof = prof;
2902         e->priority = prio;
2903
2904         switch (blk) {
2905         case ICE_BLK_FD:
2906         case ICE_BLK_RSS:
2907                 break;
2908         case ICE_BLK_ACL:
2909                 /* ACL will handle the entry management */
2910                 status = ice_flow_acl_frmt_entry(hw, prof, e, (u8 *)data, acts,
2911                                                  acts_cnt);
2912                 if (status)
2913                         goto out;
2914
2915                 status = ice_flow_acl_add_scen_entry(hw, prof, &e);
2916                 if (status)
2917                         goto out;
2918
2919                 break;
2920         default:
2921                 status = ICE_ERR_NOT_IMPL;
2922                 goto out;
2923         }
2924
2925         if (blk != ICE_BLK_ACL) {
2926                 /* ACL will handle the entry management */
2927                 ice_acquire_lock(&prof->entries_lock);
2928                 LIST_ADD(&e->l_entry, &prof->entries);
2929                 ice_release_lock(&prof->entries_lock);
2930         }
2931
2932         *entry_h = ICE_FLOW_ENTRY_HNDL(e);
2933
2934 out:
2935         if (status && e) {
2936                 if (e->entry)
2937                         ice_free(hw, e->entry);
2938                 ice_free(hw, e);
2939         }
2940
2941         return status;
2942 }
2943
2944 /**
2945  * ice_flow_rem_entry - Remove a flow entry
2946  * @hw: pointer to the HW struct
2947  * @blk: classification stage
2948  * @entry_h: handle to the flow entry to be removed
2949  */
2950 enum ice_status ice_flow_rem_entry(struct ice_hw *hw, enum ice_block blk,
2951                                    u64 entry_h)
2952 {
2953         struct ice_flow_entry *entry;
2954         struct ice_flow_prof *prof;
2955         enum ice_status status = ICE_SUCCESS;
2956
2957         if (entry_h == ICE_FLOW_ENTRY_HANDLE_INVAL)
2958                 return ICE_ERR_PARAM;
2959
2960         entry = ICE_FLOW_ENTRY_PTR((unsigned long)entry_h);
2961
2962         /* Retain the pointer to the flow profile as the entry will be freed */
2963         prof = entry->prof;
2964
2965         if (prof) {
2966                 ice_acquire_lock(&prof->entries_lock);
2967                 status = ice_flow_rem_entry_sync(hw, blk, entry);
2968                 ice_release_lock(&prof->entries_lock);
2969         }
2970
2971         return status;
2972 }
2973
2974 /**
2975  * ice_flow_set_fld_ext - specifies locations of field from entry's input buffer
2976  * @seg: packet segment the field being set belongs to
2977  * @fld: field to be set
2978  * @field_type: type of the field
2979  * @val_loc: if not ICE_FLOW_FLD_OFF_INVAL, location of the value to match from
2980  *           entry's input buffer
2981  * @mask_loc: if not ICE_FLOW_FLD_OFF_INVAL, location of mask value from entry's
2982  *            input buffer
2983  * @last_loc: if not ICE_FLOW_FLD_OFF_INVAL, location of last/upper value from
2984  *            entry's input buffer
2985  *
2986  * This helper function stores information of a field being matched, including
2987  * the type of the field and the locations of the value to match, the mask, and
2988  * and the upper-bound value in the start of the input buffer for a flow entry.
2989  * This function should only be used for fixed-size data structures.
2990  *
2991  * This function also opportunistically determines the protocol headers to be
2992  * present based on the fields being set. Some fields cannot be used alone to
2993  * determine the protocol headers present. Sometimes, fields for particular
2994  * protocol headers are not matched. In those cases, the protocol headers
2995  * must be explicitly set.
2996  */
2997 static void
2998 ice_flow_set_fld_ext(struct ice_flow_seg_info *seg, enum ice_flow_field fld,
2999                      enum ice_flow_fld_match_type field_type, u16 val_loc,
3000                      u16 mask_loc, u16 last_loc)
3001 {
3002         u64 bit = BIT_ULL(fld);
3003
3004         seg->match |= bit;
3005         if (field_type == ICE_FLOW_FLD_TYPE_RANGE)
3006                 seg->range |= bit;
3007
3008         seg->fields[fld].type = field_type;
3009         seg->fields[fld].src.val = val_loc;
3010         seg->fields[fld].src.mask = mask_loc;
3011         seg->fields[fld].src.last = last_loc;
3012
3013         ICE_FLOW_SET_HDRS(seg, ice_flds_info[fld].hdr);
3014 }
3015
3016 /**
3017  * ice_flow_set_fld - specifies locations of field from entry's input buffer
3018  * @seg: packet segment the field being set belongs to
3019  * @fld: field to be set
3020  * @val_loc: if not ICE_FLOW_FLD_OFF_INVAL, location of the value to match from
3021  *           entry's input buffer
3022  * @mask_loc: if not ICE_FLOW_FLD_OFF_INVAL, location of mask value from entry's
3023  *            input buffer
3024  * @last_loc: if not ICE_FLOW_FLD_OFF_INVAL, location of last/upper value from
3025  *            entry's input buffer
3026  * @range: indicate if field being matched is to be in a range
3027  *
3028  * This function specifies the locations, in the form of byte offsets from the
3029  * start of the input buffer for a flow entry, from where the value to match,
3030  * the mask value, and upper value can be extracted. These locations are then
3031  * stored in the flow profile. When adding a flow entry associated with the
3032  * flow profile, these locations will be used to quickly extract the values and
3033  * create the content of a match entry. This function should only be used for
3034  * fixed-size data structures.
3035  */
3036 void
3037 ice_flow_set_fld(struct ice_flow_seg_info *seg, enum ice_flow_field fld,
3038                  u16 val_loc, u16 mask_loc, u16 last_loc, bool range)
3039 {
3040         enum ice_flow_fld_match_type t = range ?
3041                 ICE_FLOW_FLD_TYPE_RANGE : ICE_FLOW_FLD_TYPE_REG;
3042
3043         ice_flow_set_fld_ext(seg, fld, t, val_loc, mask_loc, last_loc);
3044 }
3045
3046 /**
3047  * ice_flow_set_fld_prefix - sets locations of prefix field from entry's buf
3048  * @seg: packet segment the field being set belongs to
3049  * @fld: field to be set
3050  * @val_loc: if not ICE_FLOW_FLD_OFF_INVAL, location of the value to match from
3051  *           entry's input buffer
3052  * @pref_loc: location of prefix value from entry's input buffer
3053  * @pref_sz: size of the location holding the prefix value
3054  *
3055  * This function specifies the locations, in the form of byte offsets from the
3056  * start of the input buffer for a flow entry, from where the value to match
3057  * and the IPv4 prefix value can be extracted. These locations are then stored
3058  * in the flow profile. When adding flow entries to the associated flow profile,
3059  * these locations can be used to quickly extract the values to create the
3060  * content of a match entry. This function should only be used for fixed-size
3061  * data structures.
3062  */
3063 void
3064 ice_flow_set_fld_prefix(struct ice_flow_seg_info *seg, enum ice_flow_field fld,
3065                         u16 val_loc, u16 pref_loc, u8 pref_sz)
3066 {
3067         /* For this type of field, the "mask" location is for the prefix value's
3068          * location and the "last" location is for the size of the location of
3069          * the prefix value.
3070          */
3071         ice_flow_set_fld_ext(seg, fld, ICE_FLOW_FLD_TYPE_PREFIX, val_loc,
3072                              pref_loc, (u16)pref_sz);
3073 }
3074
3075 /**
3076  * ice_flow_add_fld_raw - sets locations of a raw field from entry's input buf
3077  * @seg: packet segment the field being set belongs to
3078  * @off: offset of the raw field from the beginning of the segment in bytes
3079  * @len: length of the raw pattern to be matched
3080  * @val_loc: location of the value to match from entry's input buffer
3081  * @mask_loc: location of mask value from entry's input buffer
3082  *
3083  * This function specifies the offset of the raw field to be match from the
3084  * beginning of the specified packet segment, and the locations, in the form of
3085  * byte offsets from the start of the input buffer for a flow entry, from where
3086  * the value to match and the mask value to be extracted. These locations are
3087  * then stored in the flow profile. When adding flow entries to the associated
3088  * flow profile, these locations can be used to quickly extract the values to
3089  * create the content of a match entry. This function should only be used for
3090  * fixed-size data structures.
3091  */
3092 void
3093 ice_flow_add_fld_raw(struct ice_flow_seg_info *seg, u16 off, u8 len,
3094                      u16 val_loc, u16 mask_loc)
3095 {
3096         if (seg->raws_cnt < ICE_FLOW_SEG_RAW_FLD_MAX) {
3097                 seg->raws[seg->raws_cnt].off = off;
3098                 seg->raws[seg->raws_cnt].info.type = ICE_FLOW_FLD_TYPE_SIZE;
3099                 seg->raws[seg->raws_cnt].info.src.val = val_loc;
3100                 seg->raws[seg->raws_cnt].info.src.mask = mask_loc;
3101                 /* The "last" field is used to store the length of the field */
3102                 seg->raws[seg->raws_cnt].info.src.last = len;
3103         }
3104
3105         /* Overflows of "raws" will be handled as an error condition later in
3106          * the flow when this information is processed.
3107          */
3108         seg->raws_cnt++;
3109 }
3110
3111 #define ICE_FLOW_RSS_SEG_HDR_L2_MASKS \
3112 (ICE_FLOW_SEG_HDR_ETH | ICE_FLOW_SEG_HDR_VLAN)
3113
3114 #define ICE_FLOW_RSS_SEG_HDR_L3_MASKS \
3115         (ICE_FLOW_SEG_HDR_IPV4 | ICE_FLOW_SEG_HDR_IPV6)
3116
3117 #define ICE_FLOW_RSS_SEG_HDR_L4_MASKS \
3118         (ICE_FLOW_SEG_HDR_TCP | ICE_FLOW_SEG_HDR_UDP | \
3119          ICE_FLOW_SEG_HDR_SCTP)
3120
3121 #define ICE_FLOW_RSS_SEG_HDR_VAL_MASKS \
3122         (ICE_FLOW_RSS_SEG_HDR_L2_MASKS | \
3123          ICE_FLOW_RSS_SEG_HDR_L3_MASKS | \
3124          ICE_FLOW_RSS_SEG_HDR_L4_MASKS)
3125
3126 /**
3127  * ice_flow_set_rss_seg_info - setup packet segments for RSS
3128  * @segs: pointer to the flow field segment(s)
3129  * @hash_fields: fields to be hashed on for the segment(s)
3130  * @flow_hdr: protocol header fields within a packet segment
3131  *
3132  * Helper function to extract fields from hash bitmap and use flow
3133  * header value to set flow field segment for further use in flow
3134  * profile entry or removal.
3135  */
3136 static enum ice_status
3137 ice_flow_set_rss_seg_info(struct ice_flow_seg_info *segs, u64 hash_fields,
3138                           u32 flow_hdr)
3139 {
3140         u64 val = hash_fields;
3141         u8 i;
3142
3143         for (i = 0; val && i < ICE_FLOW_FIELD_IDX_MAX; i++) {
3144                 u64 bit = BIT_ULL(i);
3145
3146                 if (val & bit) {
3147                         ice_flow_set_fld(segs, (enum ice_flow_field)i,
3148                                          ICE_FLOW_FLD_OFF_INVAL,
3149                                          ICE_FLOW_FLD_OFF_INVAL,
3150                                          ICE_FLOW_FLD_OFF_INVAL, false);
3151                         val &= ~bit;
3152                 }
3153         }
3154         ICE_FLOW_SET_HDRS(segs, flow_hdr);
3155
3156         if (segs->hdrs & ~ICE_FLOW_RSS_SEG_HDR_VAL_MASKS &
3157             ~ICE_FLOW_RSS_HDRS_INNER_MASK)
3158                 return ICE_ERR_PARAM;
3159
3160         val = (u64)(segs->hdrs & ICE_FLOW_RSS_SEG_HDR_L3_MASKS);
3161         if (val && !ice_is_pow2(val))
3162                 return ICE_ERR_CFG;
3163
3164         val = (u64)(segs->hdrs & ICE_FLOW_RSS_SEG_HDR_L4_MASKS);
3165         if (val && !ice_is_pow2(val))
3166                 return ICE_ERR_CFG;
3167
3168         return ICE_SUCCESS;
3169 }
3170
3171 /**
3172  * ice_rem_vsi_rss_list - remove VSI from RSS list
3173  * @hw: pointer to the hardware structure
3174  * @vsi_handle: software VSI handle
3175  *
3176  * Remove the VSI from all RSS configurations in the list.
3177  */
3178 void ice_rem_vsi_rss_list(struct ice_hw *hw, u16 vsi_handle)
3179 {
3180         struct ice_rss_cfg *r, *tmp;
3181
3182         if (LIST_EMPTY(&hw->rss_list_head))
3183                 return;
3184
3185         ice_acquire_lock(&hw->rss_locks);
3186         LIST_FOR_EACH_ENTRY_SAFE(r, tmp, &hw->rss_list_head,
3187                                  ice_rss_cfg, l_entry)
3188                 if (ice_test_and_clear_bit(vsi_handle, r->vsis))
3189                         if (!ice_is_any_bit_set(r->vsis, ICE_MAX_VSI)) {
3190                                 LIST_DEL(&r->l_entry);
3191                                 ice_free(hw, r);
3192                         }
3193         ice_release_lock(&hw->rss_locks);
3194 }
3195
3196 /**
3197  * ice_rem_vsi_rss_cfg - remove RSS configurations associated with VSI
3198  * @hw: pointer to the hardware structure
3199  * @vsi_handle: software VSI handle
3200  *
3201  * This function will iterate through all flow profiles and disassociate
3202  * the VSI from that profile. If the flow profile has no VSIs it will
3203  * be removed.
3204  */
3205 enum ice_status ice_rem_vsi_rss_cfg(struct ice_hw *hw, u16 vsi_handle)
3206 {
3207         const enum ice_block blk = ICE_BLK_RSS;
3208         struct ice_flow_prof *p, *t;
3209         enum ice_status status = ICE_SUCCESS;
3210
3211         if (!ice_is_vsi_valid(hw, vsi_handle))
3212                 return ICE_ERR_PARAM;
3213
3214         if (LIST_EMPTY(&hw->fl_profs[blk]))
3215                 return ICE_SUCCESS;
3216
3217         ice_acquire_lock(&hw->fl_profs_locks[blk]);
3218         LIST_FOR_EACH_ENTRY_SAFE(p, t, &hw->fl_profs[blk], ice_flow_prof,
3219                                  l_entry)
3220                 if (ice_is_bit_set(p->vsis, vsi_handle)) {
3221                         status = ice_flow_disassoc_prof(hw, blk, p, vsi_handle);
3222                         if (status)
3223                                 break;
3224
3225                         if (!ice_is_any_bit_set(p->vsis, ICE_MAX_VSI)) {
3226                                 status = ice_flow_rem_prof_sync(hw, blk, p);
3227                                 if (status)
3228                                         break;
3229                         }
3230                 }
3231         ice_release_lock(&hw->fl_profs_locks[blk]);
3232
3233         return status;
3234 }
3235
3236 /**
3237  * ice_rem_rss_list - remove RSS configuration from list
3238  * @hw: pointer to the hardware structure
3239  * @vsi_handle: software VSI handle
3240  * @prof: pointer to flow profile
3241  *
3242  * Assumption: lock has already been acquired for RSS list
3243  */
3244 static void
3245 ice_rem_rss_list(struct ice_hw *hw, u16 vsi_handle, struct ice_flow_prof *prof)
3246 {
3247         struct ice_rss_cfg *r, *tmp;
3248
3249         /* Search for RSS hash fields associated to the VSI that match the
3250          * hash configurations associated to the flow profile. If found
3251          * remove from the RSS entry list of the VSI context and delete entry.
3252          */
3253         LIST_FOR_EACH_ENTRY_SAFE(r, tmp, &hw->rss_list_head,
3254                                  ice_rss_cfg, l_entry)
3255                 if (r->hashed_flds == prof->segs[prof->segs_cnt - 1].match &&
3256                     r->packet_hdr == prof->segs[prof->segs_cnt - 1].hdrs) {
3257                         ice_clear_bit(vsi_handle, r->vsis);
3258                         if (!ice_is_any_bit_set(r->vsis, ICE_MAX_VSI)) {
3259                                 LIST_DEL(&r->l_entry);
3260                                 ice_free(hw, r);
3261                         }
3262                         return;
3263                 }
3264 }
3265
3266 /**
3267  * ice_add_rss_list - add RSS configuration to list
3268  * @hw: pointer to the hardware structure
3269  * @vsi_handle: software VSI handle
3270  * @prof: pointer to flow profile
3271  *
3272  * Assumption: lock has already been acquired for RSS list
3273  */
3274 static enum ice_status
3275 ice_add_rss_list(struct ice_hw *hw, u16 vsi_handle, struct ice_flow_prof *prof)
3276 {
3277         struct ice_rss_cfg *r, *rss_cfg;
3278
3279         LIST_FOR_EACH_ENTRY(r, &hw->rss_list_head,
3280                             ice_rss_cfg, l_entry)
3281                 if (r->hashed_flds == prof->segs[prof->segs_cnt - 1].match &&
3282                     r->packet_hdr == prof->segs[prof->segs_cnt - 1].hdrs) {
3283                         ice_set_bit(vsi_handle, r->vsis);
3284                         return ICE_SUCCESS;
3285                 }
3286
3287         rss_cfg = (struct ice_rss_cfg *)ice_malloc(hw, sizeof(*rss_cfg));
3288         if (!rss_cfg)
3289                 return ICE_ERR_NO_MEMORY;
3290
3291         rss_cfg->hashed_flds = prof->segs[prof->segs_cnt - 1].match;
3292         rss_cfg->packet_hdr = prof->segs[prof->segs_cnt - 1].hdrs;
3293         rss_cfg->symm = prof->cfg.symm;
3294         ice_set_bit(vsi_handle, rss_cfg->vsis);
3295
3296         LIST_ADD_TAIL(&rss_cfg->l_entry, &hw->rss_list_head);
3297
3298         return ICE_SUCCESS;
3299 }
3300
3301 #define ICE_FLOW_PROF_HASH_S    0
3302 #define ICE_FLOW_PROF_HASH_M    (0xFFFFFFFFULL << ICE_FLOW_PROF_HASH_S)
3303 #define ICE_FLOW_PROF_HDR_S     32
3304 #define ICE_FLOW_PROF_HDR_M     (0x3FFFFFFFULL << ICE_FLOW_PROF_HDR_S)
3305 #define ICE_FLOW_PROF_ENCAP_S   63
3306 #define ICE_FLOW_PROF_ENCAP_M   (BIT_ULL(ICE_FLOW_PROF_ENCAP_S))
3307
3308 #define ICE_RSS_OUTER_HEADERS   1
3309 #define ICE_RSS_INNER_HEADERS   2
3310
3311 /* Flow profile ID format:
3312  * [0:31] - Packet match fields
3313  * [32:62] - Protocol header
3314  * [63] - Encapsulation flag, 0 if non-tunneled, 1 if tunneled
3315  */
3316 #define ICE_FLOW_GEN_PROFID(hash, hdr, segs_cnt) \
3317         (u64)(((u64)(hash) & ICE_FLOW_PROF_HASH_M) | \
3318               (((u64)(hdr) << ICE_FLOW_PROF_HDR_S) & ICE_FLOW_PROF_HDR_M) | \
3319               ((u8)((segs_cnt) - 1) ? ICE_FLOW_PROF_ENCAP_M : 0))
3320
3321 static void
3322 ice_rss_config_xor_word(struct ice_hw *hw, u8 prof_id, u8 src, u8 dst)
3323 {
3324         u32 s = ((src % 4) << 3); /* byte shift */
3325         u32 v = dst | 0x80; /* value to program */
3326         u8 i = src / 4; /* register index */
3327         u32 reg;
3328
3329         reg = rd32(hw, GLQF_HSYMM(prof_id, i));
3330         reg = (reg & ~(0xff << s)) | (v << s);
3331         wr32(hw, GLQF_HSYMM(prof_id, i), reg);
3332 }
3333
3334 static void
3335 ice_rss_config_xor(struct ice_hw *hw, u8 prof_id, u8 src, u8 dst, u8 len)
3336 {
3337         int fv_last_word =
3338                 ICE_FLOW_SW_FIELD_VECTOR_MAX / ICE_FLOW_FV_EXTRACT_SZ - 1;
3339         int i;
3340
3341         for (i = 0; i < len; i++) {
3342                 ice_rss_config_xor_word(hw, prof_id,
3343                                         /* Yes, field vector in GLQF_HSYMM and
3344                                          * GLQF_HINSET is inversed!
3345                                          */
3346                                         fv_last_word - (src + i),
3347                                         fv_last_word - (dst + i));
3348                 ice_rss_config_xor_word(hw, prof_id,
3349                                         fv_last_word - (dst + i),
3350                                         fv_last_word - (src + i));
3351         }
3352 }
3353
3354 static void
3355 ice_rss_update_symm(struct ice_hw *hw,
3356                     struct ice_flow_prof *prof)
3357 {
3358         struct ice_prof_map *map;
3359         u8 prof_id, m;
3360
3361         map = ice_search_prof_id(hw, ICE_BLK_RSS, prof->id);
3362         prof_id = map->prof_id;
3363
3364         /* clear to default */
3365         for (m = 0; m < 6; m++)
3366                 wr32(hw, GLQF_HSYMM(prof_id, m), 0);
3367         if (prof->cfg.symm) {
3368                 struct ice_flow_seg_info *seg =
3369                         &prof->segs[prof->segs_cnt - 1];
3370
3371                 struct ice_flow_seg_xtrct *ipv4_src =
3372                         &seg->fields[ICE_FLOW_FIELD_IDX_IPV4_SA].xtrct;
3373                 struct ice_flow_seg_xtrct *ipv4_dst =
3374                         &seg->fields[ICE_FLOW_FIELD_IDX_IPV4_DA].xtrct;
3375                 struct ice_flow_seg_xtrct *ipv6_src =
3376                         &seg->fields[ICE_FLOW_FIELD_IDX_IPV6_SA].xtrct;
3377                 struct ice_flow_seg_xtrct *ipv6_dst =
3378                         &seg->fields[ICE_FLOW_FIELD_IDX_IPV6_DA].xtrct;
3379
3380                 struct ice_flow_seg_xtrct *tcp_src =
3381                         &seg->fields[ICE_FLOW_FIELD_IDX_TCP_SRC_PORT].xtrct;
3382                 struct ice_flow_seg_xtrct *tcp_dst =
3383                         &seg->fields[ICE_FLOW_FIELD_IDX_TCP_DST_PORT].xtrct;
3384
3385                 struct ice_flow_seg_xtrct *udp_src =
3386                         &seg->fields[ICE_FLOW_FIELD_IDX_UDP_SRC_PORT].xtrct;
3387                 struct ice_flow_seg_xtrct *udp_dst =
3388                         &seg->fields[ICE_FLOW_FIELD_IDX_UDP_DST_PORT].xtrct;
3389
3390                 struct ice_flow_seg_xtrct *sctp_src =
3391                         &seg->fields[ICE_FLOW_FIELD_IDX_SCTP_SRC_PORT].xtrct;
3392                 struct ice_flow_seg_xtrct *sctp_dst =
3393                         &seg->fields[ICE_FLOW_FIELD_IDX_SCTP_DST_PORT].xtrct;
3394
3395                 /* xor IPv4 */
3396                 if (ipv4_src->prot_id != 0 && ipv4_dst->prot_id != 0)
3397                         ice_rss_config_xor(hw, prof_id,
3398                                            ipv4_src->idx, ipv4_dst->idx, 2);
3399
3400                 /* xor IPv6 */
3401                 if (ipv6_src->prot_id != 0 && ipv6_dst->prot_id != 0)
3402                         ice_rss_config_xor(hw, prof_id,
3403                                            ipv6_src->idx, ipv6_dst->idx, 8);
3404
3405                 /* xor TCP */
3406                 if (tcp_src->prot_id != 0 && tcp_dst->prot_id != 0)
3407                         ice_rss_config_xor(hw, prof_id,
3408                                            tcp_src->idx, tcp_dst->idx, 1);
3409
3410                 /* xor UDP */
3411                 if (udp_src->prot_id != 0 && udp_dst->prot_id != 0)
3412                         ice_rss_config_xor(hw, prof_id,
3413                                            udp_src->idx, udp_dst->idx, 1);
3414
3415                 /* xor SCTP */
3416                 if (sctp_src->prot_id != 0 && sctp_dst->prot_id != 0)
3417                         ice_rss_config_xor(hw, prof_id,
3418                                            sctp_src->idx, sctp_dst->idx, 1);
3419         }
3420 }
3421
3422 /**
3423  * ice_add_rss_cfg_sync - add an RSS configuration
3424  * @hw: pointer to the hardware structure
3425  * @vsi_handle: software VSI handle
3426  * @hashed_flds: hash bit fields (ICE_FLOW_HASH_*) to configure
3427  * @addl_hdrs: protocol header fields
3428  * @segs_cnt: packet segment count
3429  * @symm: symmetric hash enable/disable
3430  *
3431  * Assumption: lock has already been acquired for RSS list
3432  */
3433 static enum ice_status
3434 ice_add_rss_cfg_sync(struct ice_hw *hw, u16 vsi_handle, u64 hashed_flds,
3435                      u32 addl_hdrs, u8 segs_cnt, bool symm)
3436 {
3437         const enum ice_block blk = ICE_BLK_RSS;
3438         struct ice_flow_prof *prof = NULL;
3439         struct ice_flow_seg_info *segs;
3440         enum ice_status status;
3441
3442         if (!segs_cnt || segs_cnt > ICE_FLOW_SEG_MAX)
3443                 return ICE_ERR_PARAM;
3444
3445         segs = (struct ice_flow_seg_info *)ice_calloc(hw, segs_cnt,
3446                                                       sizeof(*segs));
3447         if (!segs)
3448                 return ICE_ERR_NO_MEMORY;
3449
3450         /* Construct the packet segment info from the hashed fields */
3451         status = ice_flow_set_rss_seg_info(&segs[segs_cnt - 1], hashed_flds,
3452                                            addl_hdrs);
3453         if (status)
3454                 goto exit;
3455
3456         /* Search for a flow profile that has matching headers, hash fields
3457          * and has the input VSI associated to it. If found, no further
3458          * operations required and exit.
3459          */
3460         prof = ice_flow_find_prof_conds(hw, blk, ICE_FLOW_RX, segs, segs_cnt,
3461                                         vsi_handle,
3462                                         ICE_FLOW_FIND_PROF_CHK_FLDS |
3463                                         ICE_FLOW_FIND_PROF_CHK_VSI);
3464         if (prof) {
3465                 if (prof->cfg.symm == symm)
3466                         goto exit;
3467                 prof->cfg.symm = symm;
3468                 goto update_symm;
3469         }
3470
3471         /* Check if a flow profile exists with the same protocol headers and
3472          * associated with the input VSI. If so disassociate the VSI from
3473          * this profile. The VSI will be added to a new profile created with
3474          * the protocol header and new hash field configuration.
3475          */
3476         prof = ice_flow_find_prof_conds(hw, blk, ICE_FLOW_RX, segs, segs_cnt,
3477                                         vsi_handle, ICE_FLOW_FIND_PROF_CHK_VSI);
3478         if (prof) {
3479                 status = ice_flow_disassoc_prof(hw, blk, prof, vsi_handle);
3480                 if (!status)
3481                         ice_rem_rss_list(hw, vsi_handle, prof);
3482                 else
3483                         goto exit;
3484
3485                 /* Remove profile if it has no VSIs associated */
3486                 if (!ice_is_any_bit_set(prof->vsis, ICE_MAX_VSI)) {
3487                         status = ice_flow_rem_prof(hw, blk, prof->id);
3488                         if (status)
3489                                 goto exit;
3490                 }
3491         }
3492
3493         /* Search for a profile that has same match fields only. If this
3494          * exists then associate the VSI to this profile.
3495          */
3496         prof = ice_flow_find_prof_conds(hw, blk, ICE_FLOW_RX, segs, segs_cnt,
3497                                         vsi_handle,
3498                                         ICE_FLOW_FIND_PROF_CHK_FLDS);
3499         if (prof) {
3500                 if (prof->cfg.symm == symm) {
3501                         status = ice_flow_assoc_prof(hw, blk, prof,
3502                                                      vsi_handle);
3503                         if (!status)
3504                                 status = ice_add_rss_list(hw, vsi_handle,
3505                                                           prof);
3506                 } else {
3507                         /* if a profile exist but with different symmetric
3508                          * requirement, just return error.
3509                          */
3510                         status = ICE_ERR_NOT_SUPPORTED;
3511                 }
3512                 goto exit;
3513         }
3514
3515         /* Create a new flow profile with generated profile and packet
3516          * segment information.
3517          */
3518         status = ice_flow_add_prof(hw, blk, ICE_FLOW_RX,
3519                                    ICE_FLOW_GEN_PROFID(hashed_flds,
3520                                                        segs[segs_cnt - 1].hdrs,
3521                                                        segs_cnt),
3522                                    segs, segs_cnt, NULL, 0, &prof);
3523         if (status)
3524                 goto exit;
3525
3526         status = ice_flow_assoc_prof(hw, blk, prof, vsi_handle);
3527         /* If association to a new flow profile failed then this profile can
3528          * be removed.
3529          */
3530         if (status) {
3531                 ice_flow_rem_prof(hw, blk, prof->id);
3532                 goto exit;
3533         }
3534
3535         status = ice_add_rss_list(hw, vsi_handle, prof);
3536
3537         prof->cfg.symm = symm;
3538
3539 update_symm:
3540         ice_rss_update_symm(hw, prof);
3541
3542 exit:
3543         ice_free(hw, segs);
3544         return status;
3545 }
3546
3547 /**
3548  * ice_add_rss_cfg - add an RSS configuration with specified hashed fields
3549  * @hw: pointer to the hardware structure
3550  * @vsi_handle: software VSI handle
3551  * @hashed_flds: hash bit fields (ICE_FLOW_HASH_*) to configure
3552  * @addl_hdrs: protocol header fields
3553  * @symm: symmetric hash enable/disable
3554  *
3555  * This function will generate a flow profile based on fields associated with
3556  * the input fields to hash on, the flow type and use the VSI number to add
3557  * a flow entry to the profile.
3558  */
3559 enum ice_status
3560 ice_add_rss_cfg(struct ice_hw *hw, u16 vsi_handle, u64 hashed_flds,
3561                 u32 addl_hdrs, bool symm)
3562 {
3563         enum ice_status status;
3564
3565         if (hashed_flds == ICE_HASH_INVALID ||
3566             !ice_is_vsi_valid(hw, vsi_handle))
3567                 return ICE_ERR_PARAM;
3568
3569         ice_acquire_lock(&hw->rss_locks);
3570         status = ice_add_rss_cfg_sync(hw, vsi_handle, hashed_flds, addl_hdrs,
3571                                       ICE_RSS_OUTER_HEADERS, symm);
3572         if (!status)
3573                 status = ice_add_rss_cfg_sync(hw, vsi_handle, hashed_flds,
3574                                               addl_hdrs, ICE_RSS_INNER_HEADERS,
3575                                               symm);
3576         ice_release_lock(&hw->rss_locks);
3577
3578         return status;
3579 }
3580
3581 /**
3582  * ice_rem_rss_cfg_sync - remove an existing RSS configuration
3583  * @hw: pointer to the hardware structure
3584  * @vsi_handle: software VSI handle
3585  * @hashed_flds: Packet hash types (ICE_FLOW_HASH_*) to remove
3586  * @addl_hdrs: Protocol header fields within a packet segment
3587  * @segs_cnt: packet segment count
3588  *
3589  * Assumption: lock has already been acquired for RSS list
3590  */
3591 static enum ice_status
3592 ice_rem_rss_cfg_sync(struct ice_hw *hw, u16 vsi_handle, u64 hashed_flds,
3593                      u32 addl_hdrs, u8 segs_cnt)
3594 {
3595         const enum ice_block blk = ICE_BLK_RSS;
3596         struct ice_flow_seg_info *segs;
3597         struct ice_flow_prof *prof;
3598         enum ice_status status;
3599
3600         segs = (struct ice_flow_seg_info *)ice_calloc(hw, segs_cnt,
3601                                                       sizeof(*segs));
3602         if (!segs)
3603                 return ICE_ERR_NO_MEMORY;
3604
3605         /* Construct the packet segment info from the hashed fields */
3606         status = ice_flow_set_rss_seg_info(&segs[segs_cnt - 1], hashed_flds,
3607                                            addl_hdrs);
3608         if (status)
3609                 goto out;
3610
3611         prof = ice_flow_find_prof_conds(hw, blk, ICE_FLOW_RX, segs, segs_cnt,
3612                                         vsi_handle,
3613                                         ICE_FLOW_FIND_PROF_CHK_FLDS);
3614         if (!prof) {
3615                 status = ICE_ERR_DOES_NOT_EXIST;
3616                 goto out;
3617         }
3618
3619         status = ice_flow_disassoc_prof(hw, blk, prof, vsi_handle);
3620         if (status)
3621                 goto out;
3622
3623         /* Remove RSS configuration from VSI context before deleting
3624          * the flow profile.
3625          */
3626         ice_rem_rss_list(hw, vsi_handle, prof);
3627
3628         if (!ice_is_any_bit_set(prof->vsis, ICE_MAX_VSI))
3629                 status = ice_flow_rem_prof(hw, blk, prof->id);
3630
3631 out:
3632         ice_free(hw, segs);
3633         return status;
3634 }
3635
3636 /**
3637  * ice_rem_rss_cfg - remove an existing RSS config with matching hashed fields
3638  * @hw: pointer to the hardware structure
3639  * @vsi_handle: software VSI handle
3640  * @hashed_flds: Packet hash types (ICE_FLOW_HASH_*) to remove
3641  * @addl_hdrs: Protocol header fields within a packet segment
3642  *
3643  * This function will lookup the flow profile based on the input
3644  * hash field bitmap, iterate through the profile entry list of
3645  * that profile and find entry associated with input VSI to be
3646  * removed. Calls are made to underlying flow apis which will in
3647  * turn build or update buffers for RSS XLT1 section.
3648  */
3649 enum ice_status
3650 ice_rem_rss_cfg(struct ice_hw *hw, u16 vsi_handle, u64 hashed_flds,
3651                 u32 addl_hdrs)
3652 {
3653         enum ice_status status;
3654
3655         if (hashed_flds == ICE_HASH_INVALID ||
3656             !ice_is_vsi_valid(hw, vsi_handle))
3657                 return ICE_ERR_PARAM;
3658
3659         ice_acquire_lock(&hw->rss_locks);
3660         status = ice_rem_rss_cfg_sync(hw, vsi_handle, hashed_flds, addl_hdrs,
3661                                       ICE_RSS_OUTER_HEADERS);
3662         if (!status)
3663                 status = ice_rem_rss_cfg_sync(hw, vsi_handle, hashed_flds,
3664                                               addl_hdrs, ICE_RSS_INNER_HEADERS);
3665         ice_release_lock(&hw->rss_locks);
3666
3667         return status;
3668 }
3669
3670 /**
3671  * ice_replay_rss_cfg - replay RSS configurations associated with VSI
3672  * @hw: pointer to the hardware structure
3673  * @vsi_handle: software VSI handle
3674  */
3675 enum ice_status ice_replay_rss_cfg(struct ice_hw *hw, u16 vsi_handle)
3676 {
3677         enum ice_status status = ICE_SUCCESS;
3678         struct ice_rss_cfg *r;
3679
3680         if (!ice_is_vsi_valid(hw, vsi_handle))
3681                 return ICE_ERR_PARAM;
3682
3683         ice_acquire_lock(&hw->rss_locks);
3684         LIST_FOR_EACH_ENTRY(r, &hw->rss_list_head,
3685                             ice_rss_cfg, l_entry) {
3686                 if (ice_is_bit_set(r->vsis, vsi_handle)) {
3687                         status = ice_add_rss_cfg_sync(hw, vsi_handle,
3688                                                       r->hashed_flds,
3689                                                       r->packet_hdr,
3690                                                       ICE_RSS_OUTER_HEADERS,
3691                                                       r->symm);
3692                         if (status)
3693                                 break;
3694                         status = ice_add_rss_cfg_sync(hw, vsi_handle,
3695                                                       r->hashed_flds,
3696                                                       r->packet_hdr,
3697                                                       ICE_RSS_INNER_HEADERS,
3698                                                       r->symm);
3699                         if (status)
3700                                 break;
3701                 }
3702         }
3703         ice_release_lock(&hw->rss_locks);
3704
3705         return status;
3706 }
3707
3708 /**
3709  * ice_get_rss_cfg - returns hashed fields for the given header types
3710  * @hw: pointer to the hardware structure
3711  * @vsi_handle: software VSI handle
3712  * @hdrs: protocol header type
3713  *
3714  * This function will return the match fields of the first instance of flow
3715  * profile having the given header types and containing input VSI
3716  */
3717 u64 ice_get_rss_cfg(struct ice_hw *hw, u16 vsi_handle, u32 hdrs)
3718 {
3719         struct ice_rss_cfg *r, *rss_cfg = NULL;
3720
3721         /* verify if the protocol header is non zero and VSI is valid */
3722         if (hdrs == ICE_FLOW_SEG_HDR_NONE || !ice_is_vsi_valid(hw, vsi_handle))
3723                 return ICE_HASH_INVALID;
3724
3725         ice_acquire_lock(&hw->rss_locks);
3726         LIST_FOR_EACH_ENTRY(r, &hw->rss_list_head,
3727                             ice_rss_cfg, l_entry)
3728                 if (ice_is_bit_set(r->vsis, vsi_handle) &&
3729                     r->packet_hdr == hdrs) {
3730                         rss_cfg = r;
3731                         break;
3732                 }
3733         ice_release_lock(&hw->rss_locks);
3734
3735         return rss_cfg ? rss_cfg->hashed_flds : ICE_HASH_INVALID;
3736 }