net/ice/base: remove unnecessary case branches
[dpdk.git] / drivers / net / ice / base / ice_flow.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2001-2020 Intel Corporation
3  */
4
5 #include "ice_common.h"
6 #include "ice_flow.h"
7
8 /* Size of known protocol header fields */
9 #define ICE_FLOW_FLD_SZ_ETH_TYPE        2
10 #define ICE_FLOW_FLD_SZ_VLAN            2
11 #define ICE_FLOW_FLD_SZ_IPV4_ADDR       4
12 #define ICE_FLOW_FLD_SZ_IPV6_ADDR       16
13 #define ICE_FLOW_FLD_SZ_IPV6_PRE32_ADDR 4
14 #define ICE_FLOW_FLD_SZ_IPV6_PRE48_ADDR 6
15 #define ICE_FLOW_FLD_SZ_IPV6_PRE64_ADDR 8
16 #define ICE_FLOW_FLD_SZ_IP_DSCP         1
17 #define ICE_FLOW_FLD_SZ_IP_TTL          1
18 #define ICE_FLOW_FLD_SZ_IP_PROT         1
19 #define ICE_FLOW_FLD_SZ_PORT            2
20 #define ICE_FLOW_FLD_SZ_TCP_FLAGS       1
21 #define ICE_FLOW_FLD_SZ_ICMP_TYPE       1
22 #define ICE_FLOW_FLD_SZ_ICMP_CODE       1
23 #define ICE_FLOW_FLD_SZ_ARP_OPER        2
24 #define ICE_FLOW_FLD_SZ_GRE_KEYID       4
25 #define ICE_FLOW_FLD_SZ_GTP_TEID        4
26 #define ICE_FLOW_FLD_SZ_GTP_QFI         2
27 #define ICE_FLOW_FLD_SZ_PPPOE_SESS_ID   2
28 #define ICE_FLOW_FLD_SZ_PFCP_SEID 8
29 #define ICE_FLOW_FLD_SZ_L2TPV3_SESS_ID  4
30 #define ICE_FLOW_FLD_SZ_ESP_SPI 4
31 #define ICE_FLOW_FLD_SZ_AH_SPI  4
32 #define ICE_FLOW_FLD_SZ_NAT_T_ESP_SPI   4
33
34 /* Describe properties of a protocol header field */
35 struct ice_flow_field_info {
36         enum ice_flow_seg_hdr hdr;
37         s16 off;        /* Offset from start of a protocol header, in bits */
38         u16 size;       /* Size of fields in bits */
39         u16 mask;       /* 16-bit mask for field */
40 };
41
42 #define ICE_FLOW_FLD_INFO(_hdr, _offset_bytes, _size_bytes) { \
43         .hdr = _hdr, \
44         .off = (_offset_bytes) * BITS_PER_BYTE, \
45         .size = (_size_bytes) * BITS_PER_BYTE, \
46         .mask = 0, \
47 }
48
49 #define ICE_FLOW_FLD_INFO_MSK(_hdr, _offset_bytes, _size_bytes, _mask) { \
50         .hdr = _hdr, \
51         .off = (_offset_bytes) * BITS_PER_BYTE, \
52         .size = (_size_bytes) * BITS_PER_BYTE, \
53         .mask = _mask, \
54 }
55
56 /* Table containing properties of supported protocol header fields */
57 static const
58 struct ice_flow_field_info ice_flds_info[ICE_FLOW_FIELD_IDX_MAX] = {
59         /* Ether */
60         /* ICE_FLOW_FIELD_IDX_ETH_DA */
61         ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_ETH, 0, ETH_ALEN),
62         /* ICE_FLOW_FIELD_IDX_ETH_SA */
63         ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_ETH, ETH_ALEN, ETH_ALEN),
64         /* ICE_FLOW_FIELD_IDX_S_VLAN */
65         ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_VLAN, 12, ICE_FLOW_FLD_SZ_VLAN),
66         /* ICE_FLOW_FIELD_IDX_C_VLAN */
67         ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_VLAN, 14, ICE_FLOW_FLD_SZ_VLAN),
68         /* ICE_FLOW_FIELD_IDX_ETH_TYPE */
69         ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_ETH, 0, ICE_FLOW_FLD_SZ_ETH_TYPE),
70         /* IPv4 / IPv6 */
71         /* ICE_FLOW_FIELD_IDX_IPV4_DSCP */
72         ICE_FLOW_FLD_INFO_MSK(ICE_FLOW_SEG_HDR_IPV4, 0, ICE_FLOW_FLD_SZ_IP_DSCP,
73                               0x00fc),
74         /* ICE_FLOW_FIELD_IDX_IPV6_DSCP */
75         ICE_FLOW_FLD_INFO_MSK(ICE_FLOW_SEG_HDR_IPV6, 0, ICE_FLOW_FLD_SZ_IP_DSCP,
76                               0x0ff0),
77         /* ICE_FLOW_FIELD_IDX_IPV4_TTL */
78         ICE_FLOW_FLD_INFO_MSK(ICE_FLOW_SEG_HDR_NONE, 8,
79                               ICE_FLOW_FLD_SZ_IP_TTL, 0xff00),
80         /* ICE_FLOW_FIELD_IDX_IPV4_PROT */
81         ICE_FLOW_FLD_INFO_MSK(ICE_FLOW_SEG_HDR_NONE, 8,
82                               ICE_FLOW_FLD_SZ_IP_PROT, 0x00ff),
83         /* ICE_FLOW_FIELD_IDX_IPV6_TTL */
84         ICE_FLOW_FLD_INFO_MSK(ICE_FLOW_SEG_HDR_NONE, 6,
85                               ICE_FLOW_FLD_SZ_IP_TTL, 0x00ff),
86         /* ICE_FLOW_FIELD_IDX_IPV6_PROT */
87         ICE_FLOW_FLD_INFO_MSK(ICE_FLOW_SEG_HDR_NONE, 6,
88                               ICE_FLOW_FLD_SZ_IP_PROT, 0xff00),
89         /* ICE_FLOW_FIELD_IDX_IPV4_SA */
90         ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_IPV4, 12, ICE_FLOW_FLD_SZ_IPV4_ADDR),
91         /* ICE_FLOW_FIELD_IDX_IPV4_DA */
92         ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_IPV4, 16, ICE_FLOW_FLD_SZ_IPV4_ADDR),
93         /* ICE_FLOW_FIELD_IDX_IPV6_SA */
94         ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_IPV6, 8, ICE_FLOW_FLD_SZ_IPV6_ADDR),
95         /* ICE_FLOW_FIELD_IDX_IPV6_DA */
96         ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_IPV6, 24, ICE_FLOW_FLD_SZ_IPV6_ADDR),
97         /* ICE_FLOW_FIELD_IDX_IPV6_PRE32_SA */
98         ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_IPV6, 8,
99                           ICE_FLOW_FLD_SZ_IPV6_PRE32_ADDR),
100         /* ICE_FLOW_FIELD_IDX_IPV6_PRE32_DA */
101         ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_IPV6, 24,
102                           ICE_FLOW_FLD_SZ_IPV6_PRE32_ADDR),
103         /* ICE_FLOW_FIELD_IDX_IPV6_PRE48_SA */
104         ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_IPV6, 8,
105                           ICE_FLOW_FLD_SZ_IPV6_PRE48_ADDR),
106         /* ICE_FLOW_FIELD_IDX_IPV6_PRE48_DA */
107         ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_IPV6, 24,
108                           ICE_FLOW_FLD_SZ_IPV6_PRE48_ADDR),
109         /* ICE_FLOW_FIELD_IDX_IPV6_PRE64_SA */
110         ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_IPV6, 8,
111                           ICE_FLOW_FLD_SZ_IPV6_PRE64_ADDR),
112         /* ICE_FLOW_FIELD_IDX_IPV6_PRE64_DA */
113         ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_IPV6, 24,
114                           ICE_FLOW_FLD_SZ_IPV6_PRE64_ADDR),
115         /* Transport */
116         /* ICE_FLOW_FIELD_IDX_TCP_SRC_PORT */
117         ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_TCP, 0, ICE_FLOW_FLD_SZ_PORT),
118         /* ICE_FLOW_FIELD_IDX_TCP_DST_PORT */
119         ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_TCP, 2, ICE_FLOW_FLD_SZ_PORT),
120         /* ICE_FLOW_FIELD_IDX_UDP_SRC_PORT */
121         ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_UDP, 0, ICE_FLOW_FLD_SZ_PORT),
122         /* ICE_FLOW_FIELD_IDX_UDP_DST_PORT */
123         ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_UDP, 2, ICE_FLOW_FLD_SZ_PORT),
124         /* ICE_FLOW_FIELD_IDX_SCTP_SRC_PORT */
125         ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_SCTP, 0, ICE_FLOW_FLD_SZ_PORT),
126         /* ICE_FLOW_FIELD_IDX_SCTP_DST_PORT */
127         ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_SCTP, 2, ICE_FLOW_FLD_SZ_PORT),
128         /* ICE_FLOW_FIELD_IDX_TCP_FLAGS */
129         ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_TCP, 13, ICE_FLOW_FLD_SZ_TCP_FLAGS),
130         /* ARP */
131         /* ICE_FLOW_FIELD_IDX_ARP_SIP */
132         ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_ARP, 14, ICE_FLOW_FLD_SZ_IPV4_ADDR),
133         /* ICE_FLOW_FIELD_IDX_ARP_DIP */
134         ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_ARP, 24, ICE_FLOW_FLD_SZ_IPV4_ADDR),
135         /* ICE_FLOW_FIELD_IDX_ARP_SHA */
136         ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_ARP, 8, ETH_ALEN),
137         /* ICE_FLOW_FIELD_IDX_ARP_DHA */
138         ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_ARP, 18, ETH_ALEN),
139         /* ICE_FLOW_FIELD_IDX_ARP_OP */
140         ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_ARP, 6, ICE_FLOW_FLD_SZ_ARP_OPER),
141         /* ICMP */
142         /* ICE_FLOW_FIELD_IDX_ICMP_TYPE */
143         ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_ICMP, 0, ICE_FLOW_FLD_SZ_ICMP_TYPE),
144         /* ICE_FLOW_FIELD_IDX_ICMP_CODE */
145         ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_ICMP, 1, ICE_FLOW_FLD_SZ_ICMP_CODE),
146         /* GRE */
147         /* ICE_FLOW_FIELD_IDX_GRE_KEYID */
148         ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_GRE, 12, ICE_FLOW_FLD_SZ_GRE_KEYID),
149         /* GTP */
150         /* ICE_FLOW_FIELD_IDX_GTPC_TEID */
151         ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_GTPC_TEID, 12,
152                           ICE_FLOW_FLD_SZ_GTP_TEID),
153         /* ICE_FLOW_FIELD_IDX_GTPU_IP_TEID */
154         ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_GTPU_IP, 12,
155                           ICE_FLOW_FLD_SZ_GTP_TEID),
156         /* ICE_FLOW_FIELD_IDX_GTPU_EH_TEID */
157         ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_GTPU_EH, 12,
158                           ICE_FLOW_FLD_SZ_GTP_TEID),
159         /* ICE_FLOW_FIELD_IDX_GTPU_EH_QFI */
160         ICE_FLOW_FLD_INFO_MSK(ICE_FLOW_SEG_HDR_GTPU_EH, 22,
161                               ICE_FLOW_FLD_SZ_GTP_QFI, 0x3f00),
162         /* ICE_FLOW_FIELD_IDX_GTPU_UP_TEID */
163         ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_GTPU_UP, 12,
164                           ICE_FLOW_FLD_SZ_GTP_TEID),
165         /* ICE_FLOW_FIELD_IDX_GTPU_DWN_TEID */
166         ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_GTPU_DWN, 12,
167                           ICE_FLOW_FLD_SZ_GTP_TEID),
168         /* PPPOE */
169         /* ICE_FLOW_FIELD_IDX_PPPOE_SESS_ID */
170         ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_PPPOE, 2,
171                           ICE_FLOW_FLD_SZ_PPPOE_SESS_ID),
172         /* PFCP */
173         /* ICE_FLOW_FIELD_IDX_PFCP_SEID */
174         ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_PFCP_SESSION, 12,
175                           ICE_FLOW_FLD_SZ_PFCP_SEID),
176         /* L2TPV3 */
177         /* ICE_FLOW_FIELD_IDX_L2TPV3_SESS_ID */
178         ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_L2TPV3, 0,
179                           ICE_FLOW_FLD_SZ_L2TPV3_SESS_ID),
180         /* ESP */
181         /* ICE_FLOW_FIELD_IDX_ESP_SPI */
182         ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_ESP, 0,
183                           ICE_FLOW_FLD_SZ_ESP_SPI),
184         /* AH */
185         /* ICE_FLOW_FIELD_IDX_AH_SPI */
186         ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_AH, 4,
187                           ICE_FLOW_FLD_SZ_AH_SPI),
188         /* NAT_T_ESP */
189         /* ICE_FLOW_FIELD_IDX_NAT_T_ESP_SPI */
190         ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_NAT_T_ESP, 8,
191                           ICE_FLOW_FLD_SZ_NAT_T_ESP_SPI),
192 };
193
194 /* Bitmaps indicating relevant packet types for a particular protocol header
195  *
196  * Packet types for packets with an Outer/First/Single MAC header
197  */
198 static const u32 ice_ptypes_mac_ofos[] = {
199         0xFDC00846, 0xBFBF7F7E, 0xF70001DF, 0xFEFDFDFB,
200         0x0000077E, 0x00000000, 0x00000000, 0x00000000,
201         0x00000000, 0x03FFF000, 0x7FFFFFE0, 0x00000000,
202         0x00000000, 0x00000000, 0x00000000, 0x00000000,
203         0x00000000, 0x00000000, 0x00000000, 0x00000000,
204         0x00000000, 0x00000000, 0x00000000, 0x00000000,
205         0x00000000, 0x00000000, 0x00000000, 0x00000000,
206         0x00000000, 0x00000000, 0x00000000, 0x00000000,
207 };
208
209 /* Packet types for packets with an Innermost/Last MAC VLAN header */
210 static const u32 ice_ptypes_macvlan_il[] = {
211         0x00000000, 0xBC000000, 0x000001DF, 0xF0000000,
212         0x0000077E, 0x00000000, 0x00000000, 0x00000000,
213         0x00000000, 0x00000000, 0x00000000, 0x00000000,
214         0x00000000, 0x00000000, 0x00000000, 0x00000000,
215         0x00000000, 0x00000000, 0x00000000, 0x00000000,
216         0x00000000, 0x00000000, 0x00000000, 0x00000000,
217         0x00000000, 0x00000000, 0x00000000, 0x00000000,
218         0x00000000, 0x00000000, 0x00000000, 0x00000000,
219 };
220
221 /* Packet types for packets with an Outer/First/Single IPv4 header */
222 static const u32 ice_ptypes_ipv4_ofos[] = {
223         0x1DC00000, 0x04000800, 0x00000000, 0x00000000,
224         0x00000000, 0x00000155, 0x00000000, 0x00000000,
225         0x0003000F, 0x000FC000, 0x83E0F800, 0x00000101,
226         0x00000000, 0x00000000, 0x00000000, 0x00000000,
227         0x00000000, 0x00000000, 0x00000000, 0x00000000,
228         0x00000000, 0x00000000, 0x00000000, 0x00000000,
229         0x00000000, 0x00000000, 0x00000000, 0x00000000,
230         0x00000000, 0x00000000, 0x00000000, 0x00000000,
231 };
232
233 /* Packet types for packets with an Innermost/Last IPv4 header */
234 static const u32 ice_ptypes_ipv4_il[] = {
235         0xE0000000, 0xB807700E, 0x80000003, 0xE01DC03B,
236         0x0000000E, 0x00000000, 0x00000000, 0x00000000,
237         0x00000000, 0x00000000, 0x001FF800, 0x00000000,
238         0x00000000, 0x00000000, 0x00000000, 0x00000000,
239         0x00000000, 0x00000000, 0x00000000, 0x00000000,
240         0x00000000, 0x00000000, 0x00000000, 0x00000000,
241         0x00000000, 0x00000000, 0x00000000, 0x00000000,
242         0x00000000, 0x00000000, 0x00000000, 0x00000000,
243 };
244
245 /* Packet types for packets with an Outer/First/Single IPv6 header */
246 static const u32 ice_ptypes_ipv6_ofos[] = {
247         0x00000000, 0x00000000, 0x77000000, 0x10002000,
248         0x00000000, 0x000002AA, 0x00000000, 0x00000000,
249         0x00080F00, 0x03F00000, 0x7C1F0000, 0x00000206,
250         0x00000000, 0x00000000, 0x00000000, 0x00000000,
251         0x00000000, 0x00000000, 0x00000000, 0x00000000,
252         0x00000000, 0x00000000, 0x00000000, 0x00000000,
253         0x00000000, 0x00000000, 0x00000000, 0x00000000,
254         0x00000000, 0x00000000, 0x00000000, 0x00000000,
255 };
256
257 /* Packet types for packets with an Innermost/Last IPv6 header */
258 static const u32 ice_ptypes_ipv6_il[] = {
259         0x00000000, 0x03B80770, 0x000001DC, 0x0EE00000,
260         0x00000770, 0x00000000, 0x00000000, 0x00000000,
261         0x00000000, 0x00000000, 0x7FE00000, 0x00000000,
262         0x00000000, 0x00000000, 0x00000000, 0x00000000,
263         0x00000000, 0x00000000, 0x00000000, 0x00000000,
264         0x00000000, 0x00000000, 0x00000000, 0x00000000,
265         0x00000000, 0x00000000, 0x00000000, 0x00000000,
266         0x00000000, 0x00000000, 0x00000000, 0x00000000,
267 };
268
269 /* Packet types for packets with an Outermost/First ARP header */
270 static const u32 ice_ptypes_arp_of[] = {
271         0x00000800, 0x00000000, 0x00000000, 0x00000000,
272         0x00000000, 0x00000000, 0x00000000, 0x00000000,
273         0x00000000, 0x00000000, 0x00000000, 0x00000000,
274         0x00000000, 0x00000000, 0x00000000, 0x00000000,
275         0x00000000, 0x00000000, 0x00000000, 0x00000000,
276         0x00000000, 0x00000000, 0x00000000, 0x00000000,
277         0x00000000, 0x00000000, 0x00000000, 0x00000000,
278         0x00000000, 0x00000000, 0x00000000, 0x00000000,
279 };
280
281 /* UDP Packet types for non-tunneled packets or tunneled
282  * packets with inner UDP.
283  */
284 static const u32 ice_ptypes_udp_il[] = {
285         0x81000000, 0x20204040, 0x04000010, 0x80810102,
286         0x00000040, 0x00000000, 0x00000000, 0x00000000,
287         0x00000000, 0x00410000, 0x90842000, 0x00000007,
288         0x00000000, 0x00000000, 0x00000000, 0x00000000,
289         0x00000000, 0x00000000, 0x00000000, 0x00000000,
290         0x00000000, 0x00000000, 0x00000000, 0x00000000,
291         0x00000000, 0x00000000, 0x00000000, 0x00000000,
292         0x00000000, 0x00000000, 0x00000000, 0x00000000,
293 };
294
295 /* Packet types for packets with an Innermost/Last TCP header */
296 static const u32 ice_ptypes_tcp_il[] = {
297         0x04000000, 0x80810102, 0x10000040, 0x02040408,
298         0x00000102, 0x00000000, 0x00000000, 0x00000000,
299         0x00000000, 0x00820000, 0x21084000, 0x00000000,
300         0x00000000, 0x00000000, 0x00000000, 0x00000000,
301         0x00000000, 0x00000000, 0x00000000, 0x00000000,
302         0x00000000, 0x00000000, 0x00000000, 0x00000000,
303         0x00000000, 0x00000000, 0x00000000, 0x00000000,
304         0x00000000, 0x00000000, 0x00000000, 0x00000000,
305 };
306
307 /* Packet types for packets with an Innermost/Last SCTP header */
308 static const u32 ice_ptypes_sctp_il[] = {
309         0x08000000, 0x01020204, 0x20000081, 0x04080810,
310         0x00000204, 0x00000000, 0x00000000, 0x00000000,
311         0x00000000, 0x01040000, 0x00000000, 0x00000000,
312         0x00000000, 0x00000000, 0x00000000, 0x00000000,
313         0x00000000, 0x00000000, 0x00000000, 0x00000000,
314         0x00000000, 0x00000000, 0x00000000, 0x00000000,
315         0x00000000, 0x00000000, 0x00000000, 0x00000000,
316         0x00000000, 0x00000000, 0x00000000, 0x00000000,
317 };
318
319 /* Packet types for packets with an Outermost/First ICMP header */
320 static const u32 ice_ptypes_icmp_of[] = {
321         0x10000000, 0x00000000, 0x00000000, 0x00000000,
322         0x00000000, 0x00000000, 0x00000000, 0x00000000,
323         0x00000000, 0x00000000, 0x00000000, 0x00000000,
324         0x00000000, 0x00000000, 0x00000000, 0x00000000,
325         0x00000000, 0x00000000, 0x00000000, 0x00000000,
326         0x00000000, 0x00000000, 0x00000000, 0x00000000,
327         0x00000000, 0x00000000, 0x00000000, 0x00000000,
328         0x00000000, 0x00000000, 0x00000000, 0x00000000,
329 };
330
331 /* Packet types for packets with an Innermost/Last ICMP header */
332 static const u32 ice_ptypes_icmp_il[] = {
333         0x00000000, 0x02040408, 0x40000102, 0x08101020,
334         0x00000408, 0x00000000, 0x00000000, 0x00000000,
335         0x00000000, 0x00000000, 0x42108000, 0x00000000,
336         0x00000000, 0x00000000, 0x00000000, 0x00000000,
337         0x00000000, 0x00000000, 0x00000000, 0x00000000,
338         0x00000000, 0x00000000, 0x00000000, 0x00000000,
339         0x00000000, 0x00000000, 0x00000000, 0x00000000,
340         0x00000000, 0x00000000, 0x00000000, 0x00000000,
341 };
342
343 /* Packet types for packets with an Outermost/First GRE header */
344 static const u32 ice_ptypes_gre_of[] = {
345         0x00000000, 0xBFBF7800, 0x000001DF, 0xFEFDE000,
346         0x0000017E, 0x00000000, 0x00000000, 0x00000000,
347         0x00000000, 0x00000000, 0x00000000, 0x00000000,
348         0x00000000, 0x00000000, 0x00000000, 0x00000000,
349         0x00000000, 0x00000000, 0x00000000, 0x00000000,
350         0x00000000, 0x00000000, 0x00000000, 0x00000000,
351         0x00000000, 0x00000000, 0x00000000, 0x00000000,
352         0x00000000, 0x00000000, 0x00000000, 0x00000000,
353 };
354
355 /* Packet types for packets with an Innermost/Last MAC header */
356 static const u32 ice_ptypes_mac_il[] = {
357         0x00000000, 0x00000000, 0x00000000, 0x00000000,
358         0x00000000, 0x00000000, 0x00000000, 0x00000000,
359         0x00000000, 0x00000000, 0x00000000, 0x00000000,
360         0x00000000, 0x00000000, 0x00000000, 0x00000000,
361         0x00000000, 0x00000000, 0x00000000, 0x00000000,
362         0x00000000, 0x00000000, 0x00000000, 0x00000000,
363         0x00000000, 0x00000000, 0x00000000, 0x00000000,
364         0x00000000, 0x00000000, 0x00000000, 0x00000000,
365 };
366
367 /* Packet types for GTPC */
368 static const u32 ice_ptypes_gtpc[] = {
369         0x00000000, 0x00000000, 0x00000000, 0x00000000,
370         0x00000000, 0x00000000, 0x00000000, 0x00000000,
371         0x00000000, 0x00000000, 0x00000180, 0x00000000,
372         0x00000000, 0x00000000, 0x00000000, 0x00000000,
373         0x00000000, 0x00000000, 0x00000000, 0x00000000,
374         0x00000000, 0x00000000, 0x00000000, 0x00000000,
375         0x00000000, 0x00000000, 0x00000000, 0x00000000,
376         0x00000000, 0x00000000, 0x00000000, 0x00000000,
377 };
378
379 /* Packet types for GTPC with TEID */
380 static const u32 ice_ptypes_gtpc_tid[] = {
381         0x00000000, 0x00000000, 0x00000000, 0x00000000,
382         0x00000000, 0x00000000, 0x00000000, 0x00000000,
383         0x00000000, 0x00000000, 0x00000060, 0x00000000,
384         0x00000000, 0x00000000, 0x00000000, 0x00000000,
385         0x00000000, 0x00000000, 0x00000000, 0x00000000,
386         0x00000000, 0x00000000, 0x00000000, 0x00000000,
387         0x00000000, 0x00000000, 0x00000000, 0x00000000,
388         0x00000000, 0x00000000, 0x00000000, 0x00000000,
389 };
390
391 /* Packet types for GTPU */
392 static const struct ice_ptype_attributes ice_attr_gtpu_eh[] = {
393         { ICE_MAC_IPV4_GTPU_IPV4_FRAG,    ICE_PTYPE_ATTR_GTP_PDU_EH },
394         { ICE_MAC_IPV4_GTPU_IPV4_PAY,     ICE_PTYPE_ATTR_GTP_PDU_EH },
395         { ICE_MAC_IPV4_GTPU_IPV4_UDP_PAY, ICE_PTYPE_ATTR_GTP_PDU_EH },
396         { ICE_MAC_IPV4_GTPU_IPV4_TCP,     ICE_PTYPE_ATTR_GTP_PDU_EH },
397         { ICE_MAC_IPV4_GTPU_IPV4_ICMP,    ICE_PTYPE_ATTR_GTP_PDU_EH },
398         { ICE_MAC_IPV6_GTPU_IPV4_FRAG,    ICE_PTYPE_ATTR_GTP_PDU_EH },
399         { ICE_MAC_IPV6_GTPU_IPV4_PAY,     ICE_PTYPE_ATTR_GTP_PDU_EH },
400         { ICE_MAC_IPV6_GTPU_IPV4_UDP_PAY, ICE_PTYPE_ATTR_GTP_PDU_EH },
401         { ICE_MAC_IPV6_GTPU_IPV4_TCP,     ICE_PTYPE_ATTR_GTP_PDU_EH },
402         { ICE_MAC_IPV6_GTPU_IPV4_ICMP,    ICE_PTYPE_ATTR_GTP_PDU_EH },
403         { ICE_MAC_IPV4_GTPU_IPV6_FRAG,    ICE_PTYPE_ATTR_GTP_PDU_EH },
404         { ICE_MAC_IPV4_GTPU_IPV6_PAY,     ICE_PTYPE_ATTR_GTP_PDU_EH },
405         { ICE_MAC_IPV4_GTPU_IPV6_UDP_PAY, ICE_PTYPE_ATTR_GTP_PDU_EH },
406         { ICE_MAC_IPV4_GTPU_IPV6_TCP,     ICE_PTYPE_ATTR_GTP_PDU_EH },
407         { ICE_MAC_IPV4_GTPU_IPV6_ICMPV6,  ICE_PTYPE_ATTR_GTP_PDU_EH },
408         { ICE_MAC_IPV6_GTPU_IPV6_FRAG,    ICE_PTYPE_ATTR_GTP_PDU_EH },
409         { ICE_MAC_IPV6_GTPU_IPV6_PAY,     ICE_PTYPE_ATTR_GTP_PDU_EH },
410         { ICE_MAC_IPV6_GTPU_IPV6_UDP_PAY, ICE_PTYPE_ATTR_GTP_PDU_EH },
411         { ICE_MAC_IPV6_GTPU_IPV6_TCP,     ICE_PTYPE_ATTR_GTP_PDU_EH },
412         { ICE_MAC_IPV6_GTPU_IPV6_ICMPV6,  ICE_PTYPE_ATTR_GTP_PDU_EH },
413 };
414
415 static const struct ice_ptype_attributes ice_attr_gtpu_down[] = {
416         { ICE_MAC_IPV4_GTPU_IPV4_FRAG,    ICE_PTYPE_ATTR_GTP_DOWNLINK },
417         { ICE_MAC_IPV4_GTPU_IPV4_PAY,     ICE_PTYPE_ATTR_GTP_DOWNLINK },
418         { ICE_MAC_IPV4_GTPU_IPV4_UDP_PAY, ICE_PTYPE_ATTR_GTP_DOWNLINK },
419         { ICE_MAC_IPV4_GTPU_IPV4_TCP,     ICE_PTYPE_ATTR_GTP_DOWNLINK },
420         { ICE_MAC_IPV4_GTPU_IPV4_ICMP,    ICE_PTYPE_ATTR_GTP_DOWNLINK },
421         { ICE_MAC_IPV6_GTPU_IPV4_FRAG,    ICE_PTYPE_ATTR_GTP_DOWNLINK },
422         { ICE_MAC_IPV6_GTPU_IPV4_PAY,     ICE_PTYPE_ATTR_GTP_DOWNLINK },
423         { ICE_MAC_IPV6_GTPU_IPV4_UDP_PAY, ICE_PTYPE_ATTR_GTP_DOWNLINK },
424         { ICE_MAC_IPV6_GTPU_IPV4_TCP,     ICE_PTYPE_ATTR_GTP_DOWNLINK },
425         { ICE_MAC_IPV6_GTPU_IPV4_ICMP,    ICE_PTYPE_ATTR_GTP_DOWNLINK },
426         { ICE_MAC_IPV4_GTPU_IPV6_FRAG,    ICE_PTYPE_ATTR_GTP_DOWNLINK },
427         { ICE_MAC_IPV4_GTPU_IPV6_PAY,     ICE_PTYPE_ATTR_GTP_DOWNLINK },
428         { ICE_MAC_IPV4_GTPU_IPV6_UDP_PAY, ICE_PTYPE_ATTR_GTP_DOWNLINK },
429         { ICE_MAC_IPV4_GTPU_IPV6_TCP,     ICE_PTYPE_ATTR_GTP_DOWNLINK },
430         { ICE_MAC_IPV4_GTPU_IPV6_ICMPV6,  ICE_PTYPE_ATTR_GTP_DOWNLINK },
431         { ICE_MAC_IPV6_GTPU_IPV6_FRAG,    ICE_PTYPE_ATTR_GTP_DOWNLINK },
432         { ICE_MAC_IPV6_GTPU_IPV6_PAY,     ICE_PTYPE_ATTR_GTP_DOWNLINK },
433         { ICE_MAC_IPV6_GTPU_IPV6_UDP_PAY, ICE_PTYPE_ATTR_GTP_DOWNLINK },
434         { ICE_MAC_IPV6_GTPU_IPV6_TCP,     ICE_PTYPE_ATTR_GTP_DOWNLINK },
435         { ICE_MAC_IPV6_GTPU_IPV6_ICMPV6,  ICE_PTYPE_ATTR_GTP_DOWNLINK },
436 };
437
438 static const struct ice_ptype_attributes ice_attr_gtpu_up[] = {
439         { ICE_MAC_IPV4_GTPU_IPV4_FRAG,    ICE_PTYPE_ATTR_GTP_UPLINK },
440         { ICE_MAC_IPV4_GTPU_IPV4_PAY,     ICE_PTYPE_ATTR_GTP_UPLINK },
441         { ICE_MAC_IPV4_GTPU_IPV4_UDP_PAY, ICE_PTYPE_ATTR_GTP_UPLINK },
442         { ICE_MAC_IPV4_GTPU_IPV4_TCP,     ICE_PTYPE_ATTR_GTP_UPLINK },
443         { ICE_MAC_IPV4_GTPU_IPV4_ICMP,    ICE_PTYPE_ATTR_GTP_UPLINK },
444         { ICE_MAC_IPV6_GTPU_IPV4_FRAG,    ICE_PTYPE_ATTR_GTP_UPLINK },
445         { ICE_MAC_IPV6_GTPU_IPV4_PAY,     ICE_PTYPE_ATTR_GTP_UPLINK },
446         { ICE_MAC_IPV6_GTPU_IPV4_UDP_PAY, ICE_PTYPE_ATTR_GTP_UPLINK },
447         { ICE_MAC_IPV6_GTPU_IPV4_TCP,     ICE_PTYPE_ATTR_GTP_UPLINK },
448         { ICE_MAC_IPV6_GTPU_IPV4_ICMP,    ICE_PTYPE_ATTR_GTP_UPLINK },
449         { ICE_MAC_IPV4_GTPU_IPV6_FRAG,    ICE_PTYPE_ATTR_GTP_UPLINK },
450         { ICE_MAC_IPV4_GTPU_IPV6_PAY,     ICE_PTYPE_ATTR_GTP_UPLINK },
451         { ICE_MAC_IPV4_GTPU_IPV6_UDP_PAY, ICE_PTYPE_ATTR_GTP_UPLINK },
452         { ICE_MAC_IPV4_GTPU_IPV6_TCP,     ICE_PTYPE_ATTR_GTP_UPLINK },
453         { ICE_MAC_IPV4_GTPU_IPV6_ICMPV6,  ICE_PTYPE_ATTR_GTP_UPLINK },
454         { ICE_MAC_IPV6_GTPU_IPV6_FRAG,    ICE_PTYPE_ATTR_GTP_UPLINK },
455         { ICE_MAC_IPV6_GTPU_IPV6_PAY,     ICE_PTYPE_ATTR_GTP_UPLINK },
456         { ICE_MAC_IPV6_GTPU_IPV6_UDP_PAY, ICE_PTYPE_ATTR_GTP_UPLINK },
457         { ICE_MAC_IPV6_GTPU_IPV6_TCP,     ICE_PTYPE_ATTR_GTP_UPLINK },
458         { ICE_MAC_IPV6_GTPU_IPV6_ICMPV6,  ICE_PTYPE_ATTR_GTP_UPLINK },
459 };
460
461 static const u32 ice_ptypes_gtpu[] = {
462         0x00000000, 0x00000000, 0x00000000, 0x00000000,
463         0x00000000, 0x00000000, 0x00000000, 0x00000000,
464         0x00000000, 0x00000000, 0x7FFFFE00, 0x00000000,
465         0x00000000, 0x00000000, 0x00000000, 0x00000000,
466         0x00000000, 0x00000000, 0x00000000, 0x00000000,
467         0x00000000, 0x00000000, 0x00000000, 0x00000000,
468         0x00000000, 0x00000000, 0x00000000, 0x00000000,
469         0x00000000, 0x00000000, 0x00000000, 0x00000000,
470 };
471
472 /* Packet types for pppoe */
473 static const u32 ice_ptypes_pppoe[] = {
474         0x00000000, 0x00000000, 0x00000000, 0x00000000,
475         0x00000000, 0x00000000, 0x00000000, 0x00000000,
476         0x00000000, 0x03FFF000, 0x00000000, 0x00000000,
477         0x00000000, 0x00000000, 0x00000000, 0x00000000,
478         0x00000000, 0x00000000, 0x00000000, 0x00000000,
479         0x00000000, 0x00000000, 0x00000000, 0x00000000,
480         0x00000000, 0x00000000, 0x00000000, 0x00000000,
481         0x00000000, 0x00000000, 0x00000000, 0x00000000,
482 };
483
484 /* Packet types for packets with PFCP NODE header */
485 static const u32 ice_ptypes_pfcp_node[] = {
486         0x00000000, 0x00000000, 0x00000000, 0x00000000,
487         0x00000000, 0x00000000, 0x00000000, 0x00000000,
488         0x00000000, 0x00000000, 0x80000000, 0x00000002,
489         0x00000000, 0x00000000, 0x00000000, 0x00000000,
490         0x00000000, 0x00000000, 0x00000000, 0x00000000,
491         0x00000000, 0x00000000, 0x00000000, 0x00000000,
492         0x00000000, 0x00000000, 0x00000000, 0x00000000,
493         0x00000000, 0x00000000, 0x00000000, 0x00000000,
494 };
495
496 /* Packet types for packets with PFCP SESSION header */
497 static const u32 ice_ptypes_pfcp_session[] = {
498         0x00000000, 0x00000000, 0x00000000, 0x00000000,
499         0x00000000, 0x00000000, 0x00000000, 0x00000000,
500         0x00000000, 0x00000000, 0x00000000, 0x00000005,
501         0x00000000, 0x00000000, 0x00000000, 0x00000000,
502         0x00000000, 0x00000000, 0x00000000, 0x00000000,
503         0x00000000, 0x00000000, 0x00000000, 0x00000000,
504         0x00000000, 0x00000000, 0x00000000, 0x00000000,
505         0x00000000, 0x00000000, 0x00000000, 0x00000000,
506 };
507
508 /* Packet types for l2tpv3 */
509 static const u32 ice_ptypes_l2tpv3[] = {
510         0x00000000, 0x00000000, 0x00000000, 0x00000000,
511         0x00000000, 0x00000000, 0x00000000, 0x00000000,
512         0x00000000, 0x00000000, 0x00000000, 0x00000300,
513         0x00000000, 0x00000000, 0x00000000, 0x00000000,
514         0x00000000, 0x00000000, 0x00000000, 0x00000000,
515         0x00000000, 0x00000000, 0x00000000, 0x00000000,
516         0x00000000, 0x00000000, 0x00000000, 0x00000000,
517         0x00000000, 0x00000000, 0x00000000, 0x00000000,
518 };
519
520 /* Packet types for esp */
521 static const u32 ice_ptypes_esp[] = {
522         0x00000000, 0x00000000, 0x00000000, 0x00000000,
523         0x00000000, 0x00000003, 0x00000000, 0x00000000,
524         0x00000000, 0x00000000, 0x00000000, 0x00000000,
525         0x00000000, 0x00000000, 0x00000000, 0x00000000,
526         0x00000000, 0x00000000, 0x00000000, 0x00000000,
527         0x00000000, 0x00000000, 0x00000000, 0x00000000,
528         0x00000000, 0x00000000, 0x00000000, 0x00000000,
529         0x00000000, 0x00000000, 0x00000000, 0x00000000,
530 };
531
532 /* Packet types for ah */
533 static const u32 ice_ptypes_ah[] = {
534         0x00000000, 0x00000000, 0x00000000, 0x00000000,
535         0x00000000, 0x0000000C, 0x00000000, 0x00000000,
536         0x00000000, 0x00000000, 0x00000000, 0x00000000,
537         0x00000000, 0x00000000, 0x00000000, 0x00000000,
538         0x00000000, 0x00000000, 0x00000000, 0x00000000,
539         0x00000000, 0x00000000, 0x00000000, 0x00000000,
540         0x00000000, 0x00000000, 0x00000000, 0x00000000,
541         0x00000000, 0x00000000, 0x00000000, 0x00000000,
542 };
543
544 /* Packet types for packets with NAT_T ESP header */
545 static const u32 ice_ptypes_nat_t_esp[] = {
546         0x00000000, 0x00000000, 0x00000000, 0x00000000,
547         0x00000000, 0x00000030, 0x00000000, 0x00000000,
548         0x00000000, 0x00000000, 0x00000000, 0x00000000,
549         0x00000000, 0x00000000, 0x00000000, 0x00000000,
550         0x00000000, 0x00000000, 0x00000000, 0x00000000,
551         0x00000000, 0x00000000, 0x00000000, 0x00000000,
552         0x00000000, 0x00000000, 0x00000000, 0x00000000,
553         0x00000000, 0x00000000, 0x00000000, 0x00000000,
554 };
555
556 static const u32 ice_ptypes_mac_non_ip_ofos[] = {
557         0x00000846, 0x00000000, 0x00000000, 0x00000000,
558         0x00000000, 0x00000000, 0x00000000, 0x00000000,
559         0x00400000, 0x03FFF000, 0x00000000, 0x00000000,
560         0x00000000, 0x00000000, 0x00000000, 0x00000000,
561         0x00000000, 0x00000000, 0x00000000, 0x00000000,
562         0x00000000, 0x00000000, 0x00000000, 0x00000000,
563         0x00000000, 0x00000000, 0x00000000, 0x00000000,
564         0x00000000, 0x00000000, 0x00000000, 0x00000000,
565 };
566
567 /* Manage parameters and info. used during the creation of a flow profile */
568 struct ice_flow_prof_params {
569         enum ice_block blk;
570         u16 entry_length; /* # of bytes formatted entry will require */
571         u8 es_cnt;
572         struct ice_flow_prof *prof;
573
574         /* For ACL, the es[0] will have the data of ICE_RX_MDID_PKT_FLAGS_15_0
575          * This will give us the direction flags.
576          */
577         struct ice_fv_word es[ICE_MAX_FV_WORDS];
578         /* attributes can be used to add attributes to a particular PTYPE */
579         const struct ice_ptype_attributes *attr;
580         u16 attr_cnt;
581
582         u16 mask[ICE_MAX_FV_WORDS];
583         ice_declare_bitmap(ptypes, ICE_FLOW_PTYPE_MAX);
584 };
585
586 #define ICE_FLOW_RSS_HDRS_INNER_MASK \
587         (ICE_FLOW_SEG_HDR_PPPOE | ICE_FLOW_SEG_HDR_GTPC | \
588         ICE_FLOW_SEG_HDR_GTPC_TEID | ICE_FLOW_SEG_HDR_GTPU | \
589         ICE_FLOW_SEG_HDR_PFCP_SESSION | ICE_FLOW_SEG_HDR_L2TPV3 | \
590         ICE_FLOW_SEG_HDR_ESP | ICE_FLOW_SEG_HDR_AH | \
591         ICE_FLOW_SEG_HDR_NAT_T_ESP)
592
593 #define ICE_FLOW_SEG_HDRS_L2_MASK       \
594         (ICE_FLOW_SEG_HDR_ETH | ICE_FLOW_SEG_HDR_VLAN)
595 #define ICE_FLOW_SEG_HDRS_L3_MASK       \
596         (ICE_FLOW_SEG_HDR_IPV4 | ICE_FLOW_SEG_HDR_IPV6 | \
597          ICE_FLOW_SEG_HDR_ARP)
598 #define ICE_FLOW_SEG_HDRS_L4_MASK       \
599         (ICE_FLOW_SEG_HDR_ICMP | ICE_FLOW_SEG_HDR_TCP | ICE_FLOW_SEG_HDR_UDP | \
600          ICE_FLOW_SEG_HDR_SCTP)
601
602 /**
603  * ice_flow_val_hdrs - validates packet segments for valid protocol headers
604  * @segs: array of one or more packet segments that describe the flow
605  * @segs_cnt: number of packet segments provided
606  */
607 static enum ice_status
608 ice_flow_val_hdrs(struct ice_flow_seg_info *segs, u8 segs_cnt)
609 {
610         u8 i;
611
612         for (i = 0; i < segs_cnt; i++) {
613                 /* Multiple L3 headers */
614                 if (segs[i].hdrs & ICE_FLOW_SEG_HDRS_L3_MASK &&
615                     !ice_is_pow2(segs[i].hdrs & ICE_FLOW_SEG_HDRS_L3_MASK))
616                         return ICE_ERR_PARAM;
617
618                 /* Multiple L4 headers */
619                 if (segs[i].hdrs & ICE_FLOW_SEG_HDRS_L4_MASK &&
620                     !ice_is_pow2(segs[i].hdrs & ICE_FLOW_SEG_HDRS_L4_MASK))
621                         return ICE_ERR_PARAM;
622         }
623
624         return ICE_SUCCESS;
625 }
626
627 /* Sizes of fixed known protocol headers without header options */
628 #define ICE_FLOW_PROT_HDR_SZ_MAC        14
629 #define ICE_FLOW_PROT_HDR_SZ_MAC_VLAN   (ICE_FLOW_PROT_HDR_SZ_MAC + 2)
630 #define ICE_FLOW_PROT_HDR_SZ_IPV4       20
631 #define ICE_FLOW_PROT_HDR_SZ_IPV6       40
632 #define ICE_FLOW_PROT_HDR_SZ_ARP        28
633 #define ICE_FLOW_PROT_HDR_SZ_ICMP       8
634 #define ICE_FLOW_PROT_HDR_SZ_TCP        20
635 #define ICE_FLOW_PROT_HDR_SZ_UDP        8
636 #define ICE_FLOW_PROT_HDR_SZ_SCTP       12
637
638 /**
639  * ice_flow_calc_seg_sz - calculates size of a packet segment based on headers
640  * @params: information about the flow to be processed
641  * @seg: index of packet segment whose header size is to be determined
642  */
643 static u16 ice_flow_calc_seg_sz(struct ice_flow_prof_params *params, u8 seg)
644 {
645         u16 sz;
646
647         /* L2 headers */
648         sz = (params->prof->segs[seg].hdrs & ICE_FLOW_SEG_HDR_VLAN) ?
649                 ICE_FLOW_PROT_HDR_SZ_MAC_VLAN : ICE_FLOW_PROT_HDR_SZ_MAC;
650
651         /* L3 headers */
652         if (params->prof->segs[seg].hdrs & ICE_FLOW_SEG_HDR_IPV4)
653                 sz += ICE_FLOW_PROT_HDR_SZ_IPV4;
654         else if (params->prof->segs[seg].hdrs & ICE_FLOW_SEG_HDR_IPV6)
655                 sz += ICE_FLOW_PROT_HDR_SZ_IPV6;
656         else if (params->prof->segs[seg].hdrs & ICE_FLOW_SEG_HDR_ARP)
657                 sz += ICE_FLOW_PROT_HDR_SZ_ARP;
658         else if (params->prof->segs[seg].hdrs & ICE_FLOW_SEG_HDRS_L4_MASK)
659                 /* A L3 header is required if L4 is specified */
660                 return 0;
661
662         /* L4 headers */
663         if (params->prof->segs[seg].hdrs & ICE_FLOW_SEG_HDR_ICMP)
664                 sz += ICE_FLOW_PROT_HDR_SZ_ICMP;
665         else if (params->prof->segs[seg].hdrs & ICE_FLOW_SEG_HDR_TCP)
666                 sz += ICE_FLOW_PROT_HDR_SZ_TCP;
667         else if (params->prof->segs[seg].hdrs & ICE_FLOW_SEG_HDR_UDP)
668                 sz += ICE_FLOW_PROT_HDR_SZ_UDP;
669         else if (params->prof->segs[seg].hdrs & ICE_FLOW_SEG_HDR_SCTP)
670                 sz += ICE_FLOW_PROT_HDR_SZ_SCTP;
671
672         return sz;
673 }
674
675 /**
676  * ice_flow_proc_seg_hdrs - process protocol headers present in pkt segments
677  * @params: information about the flow to be processed
678  *
679  * This function identifies the packet types associated with the protocol
680  * headers being present in packet segments of the specified flow profile.
681  */
682 static enum ice_status
683 ice_flow_proc_seg_hdrs(struct ice_flow_prof_params *params)
684 {
685         struct ice_flow_prof *prof;
686         u8 i;
687
688         ice_memset(params->ptypes, 0xff, sizeof(params->ptypes),
689                    ICE_NONDMA_MEM);
690
691         prof = params->prof;
692
693         for (i = 0; i < params->prof->segs_cnt; i++) {
694                 const ice_bitmap_t *src;
695                 u32 hdrs;
696
697                 hdrs = prof->segs[i].hdrs;
698
699                 if (hdrs & ICE_FLOW_SEG_HDR_ETH) {
700                         src = !i ? (const ice_bitmap_t *)ice_ptypes_mac_ofos :
701                                 (const ice_bitmap_t *)ice_ptypes_mac_il;
702                         ice_and_bitmap(params->ptypes, params->ptypes, src,
703                                        ICE_FLOW_PTYPE_MAX);
704                 }
705
706                 if (i && hdrs & ICE_FLOW_SEG_HDR_VLAN) {
707                         src = (const ice_bitmap_t *)ice_ptypes_macvlan_il;
708                         ice_and_bitmap(params->ptypes, params->ptypes, src,
709                                        ICE_FLOW_PTYPE_MAX);
710                 }
711
712                 if (!i && hdrs & ICE_FLOW_SEG_HDR_ARP) {
713                         ice_and_bitmap(params->ptypes, params->ptypes,
714                                        (const ice_bitmap_t *)ice_ptypes_arp_of,
715                                        ICE_FLOW_PTYPE_MAX);
716                 }
717
718                 if (hdrs & ICE_FLOW_SEG_HDR_IPV4) {
719                         src = !i ? (const ice_bitmap_t *)ice_ptypes_ipv4_ofos :
720                                 (const ice_bitmap_t *)ice_ptypes_ipv4_il;
721                         ice_and_bitmap(params->ptypes, params->ptypes, src,
722                                        ICE_FLOW_PTYPE_MAX);
723                         if (hdrs & ICE_FLOW_SEG_HDR_UDP) {
724                                 src = (const ice_bitmap_t *)ice_ptypes_udp_il;
725                                 ice_and_bitmap(params->ptypes,
726                                                 params->ptypes, src,
727                                                ICE_FLOW_PTYPE_MAX);
728                         } else if (hdrs & ICE_FLOW_SEG_HDR_TCP) {
729                                 ice_and_bitmap(params->ptypes, params->ptypes,
730                                                (const ice_bitmap_t *)
731                                                ice_ptypes_tcp_il,
732                                                ICE_FLOW_PTYPE_MAX);
733                         } else if (hdrs & ICE_FLOW_SEG_HDR_SCTP) {
734                                 src = (const ice_bitmap_t *)ice_ptypes_sctp_il;
735                                 ice_and_bitmap(params->ptypes, params->ptypes,
736                                                src, ICE_FLOW_PTYPE_MAX);
737                         }
738                 } else if (hdrs & ICE_FLOW_SEG_HDR_IPV6) {
739                         src = !i ? (const ice_bitmap_t *)ice_ptypes_ipv6_ofos :
740                                 (const ice_bitmap_t *)ice_ptypes_ipv6_il;
741                         ice_and_bitmap(params->ptypes, params->ptypes, src,
742                                        ICE_FLOW_PTYPE_MAX);
743                         if (hdrs & ICE_FLOW_SEG_HDR_UDP) {
744                                 src = (const ice_bitmap_t *)ice_ptypes_udp_il;
745                                 ice_and_bitmap(params->ptypes,
746                                                 params->ptypes, src,
747                                                ICE_FLOW_PTYPE_MAX);
748                         } else if (hdrs & ICE_FLOW_SEG_HDR_TCP) {
749                                 ice_and_bitmap(params->ptypes, params->ptypes,
750                                                (const ice_bitmap_t *)
751                                                ice_ptypes_tcp_il,
752                                                ICE_FLOW_PTYPE_MAX);
753                         } else if (hdrs & ICE_FLOW_SEG_HDR_SCTP) {
754                                 src = (const ice_bitmap_t *)ice_ptypes_sctp_il;
755                                 ice_and_bitmap(params->ptypes, params->ptypes,
756                                                src, ICE_FLOW_PTYPE_MAX);
757                         }
758                 }
759
760                 if (hdrs & ICE_FLOW_SEG_HDR_ETH_NON_IP) {
761                         src = (const ice_bitmap_t *)ice_ptypes_mac_non_ip_ofos;
762                         ice_and_bitmap(params->ptypes, params->ptypes,
763                                        src, ICE_FLOW_PTYPE_MAX);
764                 } else if (hdrs & ICE_FLOW_SEG_HDR_PPPOE) {
765                         src = (const ice_bitmap_t *)ice_ptypes_pppoe;
766                         ice_and_bitmap(params->ptypes, params->ptypes, src,
767                                        ICE_FLOW_PTYPE_MAX);
768                 }
769
770                 if (hdrs & ICE_FLOW_SEG_HDR_ICMP) {
771                         src = !i ? (const ice_bitmap_t *)ice_ptypes_icmp_of :
772                                 (const ice_bitmap_t *)ice_ptypes_icmp_il;
773                         ice_and_bitmap(params->ptypes, params->ptypes, src,
774                                        ICE_FLOW_PTYPE_MAX);
775                 } else if (hdrs & ICE_FLOW_SEG_HDR_GRE) {
776                         if (!i) {
777                                 src = (const ice_bitmap_t *)ice_ptypes_gre_of;
778                                 ice_and_bitmap(params->ptypes, params->ptypes,
779                                                src, ICE_FLOW_PTYPE_MAX);
780                         }
781                 } else if (hdrs & ICE_FLOW_SEG_HDR_GTPC) {
782                         src = (const ice_bitmap_t *)ice_ptypes_gtpc;
783                         ice_and_bitmap(params->ptypes, params->ptypes,
784                                        src, ICE_FLOW_PTYPE_MAX);
785                 } else if (hdrs & ICE_FLOW_SEG_HDR_GTPC_TEID) {
786                         src = (const ice_bitmap_t *)ice_ptypes_gtpc_tid;
787                         ice_and_bitmap(params->ptypes, params->ptypes,
788                                        src, ICE_FLOW_PTYPE_MAX);
789                 } else if (hdrs & ICE_FLOW_SEG_HDR_GTPU_DWN) {
790                         src = (const ice_bitmap_t *)ice_ptypes_gtpu;
791                         ice_and_bitmap(params->ptypes, params->ptypes,
792                                        src, ICE_FLOW_PTYPE_MAX);
793
794                         /* Attributes for GTP packet with downlink */
795                         params->attr = ice_attr_gtpu_down;
796                         params->attr_cnt = ARRAY_SIZE(ice_attr_gtpu_down);
797                 } else if (hdrs & ICE_FLOW_SEG_HDR_GTPU_UP) {
798                         src = (const ice_bitmap_t *)ice_ptypes_gtpu;
799                         ice_and_bitmap(params->ptypes, params->ptypes,
800                                        src, ICE_FLOW_PTYPE_MAX);
801
802                         /* Attributes for GTP packet with uplink */
803                         params->attr = ice_attr_gtpu_up;
804                         params->attr_cnt = ARRAY_SIZE(ice_attr_gtpu_up);
805                 } else if (hdrs & ICE_FLOW_SEG_HDR_GTPU_EH) {
806                         src = (const ice_bitmap_t *)ice_ptypes_gtpu;
807                         ice_and_bitmap(params->ptypes, params->ptypes,
808                                        src, ICE_FLOW_PTYPE_MAX);
809
810                         /* Attributes for GTP packet with Extension Header */
811                         params->attr = ice_attr_gtpu_eh;
812                         params->attr_cnt = ARRAY_SIZE(ice_attr_gtpu_eh);
813                 } else if (hdrs & ICE_FLOW_SEG_HDR_GTPU_IP) {
814                         src = (const ice_bitmap_t *)ice_ptypes_gtpu;
815                         ice_and_bitmap(params->ptypes, params->ptypes,
816                                        src, ICE_FLOW_PTYPE_MAX);
817                 } else if (hdrs & ICE_FLOW_SEG_HDR_L2TPV3) {
818                         src = (const ice_bitmap_t *)ice_ptypes_l2tpv3;
819                         ice_and_bitmap(params->ptypes, params->ptypes,
820                                        src, ICE_FLOW_PTYPE_MAX);
821                 } else if (hdrs & ICE_FLOW_SEG_HDR_ESP) {
822                         src = (const ice_bitmap_t *)ice_ptypes_esp;
823                         ice_and_bitmap(params->ptypes, params->ptypes,
824                                        src, ICE_FLOW_PTYPE_MAX);
825                 } else if (hdrs & ICE_FLOW_SEG_HDR_AH) {
826                         src = (const ice_bitmap_t *)ice_ptypes_ah;
827                         ice_and_bitmap(params->ptypes, params->ptypes,
828                                        src, ICE_FLOW_PTYPE_MAX);
829                 } else if (hdrs & ICE_FLOW_SEG_HDR_NAT_T_ESP) {
830                         src = (const ice_bitmap_t *)ice_ptypes_nat_t_esp;
831                         ice_and_bitmap(params->ptypes, params->ptypes,
832                                        src, ICE_FLOW_PTYPE_MAX);
833                 }
834
835                 if (hdrs & ICE_FLOW_SEG_HDR_PFCP) {
836                         if (hdrs & ICE_FLOW_SEG_HDR_PFCP_NODE)
837                                 src =
838                                 (const ice_bitmap_t *)ice_ptypes_pfcp_node;
839                         else
840                                 src =
841                                 (const ice_bitmap_t *)ice_ptypes_pfcp_session;
842
843                         ice_and_bitmap(params->ptypes, params->ptypes,
844                                        src, ICE_FLOW_PTYPE_MAX);
845                 } else {
846                         src = (const ice_bitmap_t *)ice_ptypes_pfcp_node;
847                         ice_andnot_bitmap(params->ptypes, params->ptypes,
848                                           src, ICE_FLOW_PTYPE_MAX);
849
850                         src = (const ice_bitmap_t *)ice_ptypes_pfcp_session;
851                         ice_andnot_bitmap(params->ptypes, params->ptypes,
852                                           src, ICE_FLOW_PTYPE_MAX);
853                 }
854         }
855
856         return ICE_SUCCESS;
857 }
858
859 /**
860  * ice_flow_xtract_pkt_flags - Create an extr sequence entry for packet flags
861  * @hw: pointer to the HW struct
862  * @params: information about the flow to be processed
863  * @flags: The value of pkt_flags[x:x] in Rx/Tx MDID metadata.
864  *
865  * This function will allocate an extraction sequence entries for a DWORD size
866  * chunk of the packet flags.
867  */
868 static enum ice_status
869 ice_flow_xtract_pkt_flags(struct ice_hw *hw,
870                           struct ice_flow_prof_params *params,
871                           enum ice_flex_mdid_pkt_flags flags)
872 {
873         u8 fv_words = hw->blk[params->blk].es.fvw;
874         u8 idx;
875
876         /* Make sure the number of extraction sequence entries required does not
877          * exceed the block's capacity.
878          */
879         if (params->es_cnt >= fv_words)
880                 return ICE_ERR_MAX_LIMIT;
881
882         /* some blocks require a reversed field vector layout */
883         if (hw->blk[params->blk].es.reverse)
884                 idx = fv_words - params->es_cnt - 1;
885         else
886                 idx = params->es_cnt;
887
888         params->es[idx].prot_id = ICE_PROT_META_ID;
889         params->es[idx].off = flags;
890         params->es_cnt++;
891
892         return ICE_SUCCESS;
893 }
894
895 /**
896  * ice_flow_xtract_fld - Create an extraction sequence entry for the given field
897  * @hw: pointer to the HW struct
898  * @params: information about the flow to be processed
899  * @seg: packet segment index of the field to be extracted
900  * @fld: ID of field to be extracted
901  * @match: bitfield of all fields
902  *
903  * This function determines the protocol ID, offset, and size of the given
904  * field. It then allocates one or more extraction sequence entries for the
905  * given field, and fill the entries with protocol ID and offset information.
906  */
907 static enum ice_status
908 ice_flow_xtract_fld(struct ice_hw *hw, struct ice_flow_prof_params *params,
909                     u8 seg, enum ice_flow_field fld, u64 match)
910 {
911         enum ice_flow_field sib = ICE_FLOW_FIELD_IDX_MAX;
912         enum ice_prot_id prot_id = ICE_PROT_ID_INVAL;
913         u8 fv_words = hw->blk[params->blk].es.fvw;
914         struct ice_flow_fld_info *flds;
915         u16 cnt, ese_bits, i;
916         u16 sib_mask = 0;
917         s16 adj = 0;
918         u16 mask;
919         u16 off;
920
921         flds = params->prof->segs[seg].fields;
922
923         switch (fld) {
924         case ICE_FLOW_FIELD_IDX_ETH_DA:
925         case ICE_FLOW_FIELD_IDX_ETH_SA:
926         case ICE_FLOW_FIELD_IDX_S_VLAN:
927         case ICE_FLOW_FIELD_IDX_C_VLAN:
928                 prot_id = seg == 0 ? ICE_PROT_MAC_OF_OR_S : ICE_PROT_MAC_IL;
929                 break;
930         case ICE_FLOW_FIELD_IDX_ETH_TYPE:
931                 prot_id = seg == 0 ? ICE_PROT_ETYPE_OL : ICE_PROT_ETYPE_IL;
932                 break;
933         case ICE_FLOW_FIELD_IDX_IPV4_DSCP:
934                 prot_id = seg == 0 ? ICE_PROT_IPV4_OF_OR_S : ICE_PROT_IPV4_IL;
935                 break;
936         case ICE_FLOW_FIELD_IDX_IPV6_DSCP:
937                 prot_id = seg == 0 ? ICE_PROT_IPV6_OF_OR_S : ICE_PROT_IPV6_IL;
938                 break;
939         case ICE_FLOW_FIELD_IDX_IPV4_TTL:
940         case ICE_FLOW_FIELD_IDX_IPV4_PROT:
941                 prot_id = seg == 0 ? ICE_PROT_IPV4_OF_OR_S : ICE_PROT_IPV4_IL;
942
943                 /* TTL and PROT share the same extraction seq. entry.
944                  * Each is considered a sibling to the other in terms of sharing
945                  * the same extraction sequence entry.
946                  */
947                 if (fld == ICE_FLOW_FIELD_IDX_IPV4_TTL)
948                         sib = ICE_FLOW_FIELD_IDX_IPV4_PROT;
949                 else if (fld == ICE_FLOW_FIELD_IDX_IPV4_PROT)
950                         sib = ICE_FLOW_FIELD_IDX_IPV4_TTL;
951
952                 /* If the sibling field is also included, that field's
953                  * mask needs to be included.
954                  */
955                 if (match & BIT(sib))
956                         sib_mask = ice_flds_info[sib].mask;
957                 break;
958         case ICE_FLOW_FIELD_IDX_IPV6_TTL:
959         case ICE_FLOW_FIELD_IDX_IPV6_PROT:
960                 prot_id = seg == 0 ? ICE_PROT_IPV6_OF_OR_S : ICE_PROT_IPV6_IL;
961
962                 /* TTL and PROT share the same extraction seq. entry.
963                  * Each is considered a sibling to the other in terms of sharing
964                  * the same extraction sequence entry.
965                  */
966                 if (fld == ICE_FLOW_FIELD_IDX_IPV6_TTL)
967                         sib = ICE_FLOW_FIELD_IDX_IPV6_PROT;
968                 else if (fld == ICE_FLOW_FIELD_IDX_IPV6_PROT)
969                         sib = ICE_FLOW_FIELD_IDX_IPV6_TTL;
970
971                 /* If the sibling field is also included, that field's
972                  * mask needs to be included.
973                  */
974                 if (match & BIT(sib))
975                         sib_mask = ice_flds_info[sib].mask;
976                 break;
977         case ICE_FLOW_FIELD_IDX_IPV4_SA:
978         case ICE_FLOW_FIELD_IDX_IPV4_DA:
979                 prot_id = seg == 0 ? ICE_PROT_IPV4_OF_OR_S : ICE_PROT_IPV4_IL;
980                 break;
981         case ICE_FLOW_FIELD_IDX_IPV6_SA:
982         case ICE_FLOW_FIELD_IDX_IPV6_DA:
983         case ICE_FLOW_FIELD_IDX_IPV6_PRE32_SA:
984         case ICE_FLOW_FIELD_IDX_IPV6_PRE32_DA:
985         case ICE_FLOW_FIELD_IDX_IPV6_PRE48_SA:
986         case ICE_FLOW_FIELD_IDX_IPV6_PRE48_DA:
987         case ICE_FLOW_FIELD_IDX_IPV6_PRE64_SA:
988         case ICE_FLOW_FIELD_IDX_IPV6_PRE64_DA:
989                 prot_id = seg == 0 ? ICE_PROT_IPV6_OF_OR_S : ICE_PROT_IPV6_IL;
990                 break;
991         case ICE_FLOW_FIELD_IDX_TCP_SRC_PORT:
992         case ICE_FLOW_FIELD_IDX_TCP_DST_PORT:
993         case ICE_FLOW_FIELD_IDX_TCP_FLAGS:
994                 prot_id = ICE_PROT_TCP_IL;
995                 break;
996         case ICE_FLOW_FIELD_IDX_UDP_SRC_PORT:
997         case ICE_FLOW_FIELD_IDX_UDP_DST_PORT:
998                 prot_id = ICE_PROT_UDP_IL_OR_S;
999                 break;
1000         case ICE_FLOW_FIELD_IDX_SCTP_SRC_PORT:
1001         case ICE_FLOW_FIELD_IDX_SCTP_DST_PORT:
1002                 prot_id = ICE_PROT_SCTP_IL;
1003                 break;
1004         case ICE_FLOW_FIELD_IDX_GTPC_TEID:
1005         case ICE_FLOW_FIELD_IDX_GTPU_IP_TEID:
1006         case ICE_FLOW_FIELD_IDX_GTPU_UP_TEID:
1007         case ICE_FLOW_FIELD_IDX_GTPU_DWN_TEID:
1008         case ICE_FLOW_FIELD_IDX_GTPU_EH_TEID:
1009         case ICE_FLOW_FIELD_IDX_GTPU_EH_QFI:
1010                 /* GTP is accessed through UDP OF protocol */
1011                 prot_id = ICE_PROT_UDP_OF;
1012                 break;
1013         case ICE_FLOW_FIELD_IDX_PPPOE_SESS_ID:
1014                 prot_id = ICE_PROT_PPPOE;
1015                 break;
1016         case ICE_FLOW_FIELD_IDX_PFCP_SEID:
1017                 prot_id = ICE_PROT_UDP_IL_OR_S;
1018                 break;
1019         case ICE_FLOW_FIELD_IDX_L2TPV3_SESS_ID:
1020                 prot_id = ICE_PROT_L2TPV3;
1021                 break;
1022         case ICE_FLOW_FIELD_IDX_ESP_SPI:
1023                 prot_id = ICE_PROT_ESP_F;
1024                 break;
1025         case ICE_FLOW_FIELD_IDX_AH_SPI:
1026                 prot_id = ICE_PROT_ESP_2;
1027                 break;
1028         case ICE_FLOW_FIELD_IDX_NAT_T_ESP_SPI:
1029                 prot_id = ICE_PROT_UDP_IL_OR_S;
1030                 break;
1031         case ICE_FLOW_FIELD_IDX_ARP_SIP:
1032         case ICE_FLOW_FIELD_IDX_ARP_DIP:
1033         case ICE_FLOW_FIELD_IDX_ARP_SHA:
1034         case ICE_FLOW_FIELD_IDX_ARP_DHA:
1035         case ICE_FLOW_FIELD_IDX_ARP_OP:
1036                 prot_id = ICE_PROT_ARP_OF;
1037                 break;
1038         case ICE_FLOW_FIELD_IDX_ICMP_TYPE:
1039         case ICE_FLOW_FIELD_IDX_ICMP_CODE:
1040                 /* ICMP type and code share the same extraction seq. entry */
1041                 prot_id = (params->prof->segs[seg].hdrs &
1042                            ICE_FLOW_SEG_HDR_IPV4) ?
1043                         ICE_PROT_ICMP_IL : ICE_PROT_ICMPV6_IL;
1044                 sib = fld == ICE_FLOW_FIELD_IDX_ICMP_TYPE ?
1045                         ICE_FLOW_FIELD_IDX_ICMP_CODE :
1046                         ICE_FLOW_FIELD_IDX_ICMP_TYPE;
1047                 break;
1048         case ICE_FLOW_FIELD_IDX_GRE_KEYID:
1049                 prot_id = ICE_PROT_GRE_OF;
1050                 break;
1051         default:
1052                 return ICE_ERR_NOT_IMPL;
1053         }
1054
1055         /* Each extraction sequence entry is a word in size, and extracts a
1056          * word-aligned offset from a protocol header.
1057          */
1058         ese_bits = ICE_FLOW_FV_EXTRACT_SZ * BITS_PER_BYTE;
1059
1060         flds[fld].xtrct.prot_id = prot_id;
1061         flds[fld].xtrct.off = (ice_flds_info[fld].off / ese_bits) *
1062                 ICE_FLOW_FV_EXTRACT_SZ;
1063         flds[fld].xtrct.disp = (u8)((ice_flds_info[fld].off + adj) % ese_bits);
1064         flds[fld].xtrct.idx = params->es_cnt;
1065         flds[fld].xtrct.mask = ice_flds_info[fld].mask;
1066
1067         /* Adjust the next field-entry index after accommodating the number of
1068          * entries this field consumes
1069          */
1070         cnt = DIVIDE_AND_ROUND_UP(flds[fld].xtrct.disp +
1071                                   ice_flds_info[fld].size, ese_bits);
1072
1073         /* Fill in the extraction sequence entries needed for this field */
1074         off = flds[fld].xtrct.off;
1075         mask = flds[fld].xtrct.mask;
1076         for (i = 0; i < cnt; i++) {
1077                 /* Only consume an extraction sequence entry if there is no
1078                  * sibling field associated with this field or the sibling entry
1079                  * already extracts the word shared with this field.
1080                  */
1081                 if (sib == ICE_FLOW_FIELD_IDX_MAX ||
1082                     flds[sib].xtrct.prot_id == ICE_PROT_ID_INVAL ||
1083                     flds[sib].xtrct.off != off) {
1084                         u8 idx;
1085
1086                         /* Make sure the number of extraction sequence required
1087                          * does not exceed the block's capability
1088                          */
1089                         if (params->es_cnt >= fv_words)
1090                                 return ICE_ERR_MAX_LIMIT;
1091
1092                         /* some blocks require a reversed field vector layout */
1093                         if (hw->blk[params->blk].es.reverse)
1094                                 idx = fv_words - params->es_cnt - 1;
1095                         else
1096                                 idx = params->es_cnt;
1097
1098                         params->es[idx].prot_id = prot_id;
1099                         params->es[idx].off = off;
1100                         params->mask[idx] = mask | sib_mask;
1101                         params->es_cnt++;
1102                 }
1103
1104                 off += ICE_FLOW_FV_EXTRACT_SZ;
1105         }
1106
1107         return ICE_SUCCESS;
1108 }
1109
1110 /**
1111  * ice_flow_xtract_raws - Create extract sequence entries for raw bytes
1112  * @hw: pointer to the HW struct
1113  * @params: information about the flow to be processed
1114  * @seg: index of packet segment whose raw fields are to be be extracted
1115  */
1116 static enum ice_status
1117 ice_flow_xtract_raws(struct ice_hw *hw, struct ice_flow_prof_params *params,
1118                      u8 seg)
1119 {
1120         u16 fv_words;
1121         u16 hdrs_sz;
1122         u8 i;
1123
1124         if (!params->prof->segs[seg].raws_cnt)
1125                 return ICE_SUCCESS;
1126
1127         if (params->prof->segs[seg].raws_cnt >
1128             ARRAY_SIZE(params->prof->segs[seg].raws))
1129                 return ICE_ERR_MAX_LIMIT;
1130
1131         /* Offsets within the segment headers are not supported */
1132         hdrs_sz = ice_flow_calc_seg_sz(params, seg);
1133         if (!hdrs_sz)
1134                 return ICE_ERR_PARAM;
1135
1136         fv_words = hw->blk[params->blk].es.fvw;
1137
1138         for (i = 0; i < params->prof->segs[seg].raws_cnt; i++) {
1139                 struct ice_flow_seg_fld_raw *raw;
1140                 u16 off, cnt, j;
1141
1142                 raw = &params->prof->segs[seg].raws[i];
1143
1144                 /* Storing extraction information */
1145                 raw->info.xtrct.prot_id = ICE_PROT_MAC_OF_OR_S;
1146                 raw->info.xtrct.off = (raw->off / ICE_FLOW_FV_EXTRACT_SZ) *
1147                         ICE_FLOW_FV_EXTRACT_SZ;
1148                 raw->info.xtrct.disp = (raw->off % ICE_FLOW_FV_EXTRACT_SZ) *
1149                         BITS_PER_BYTE;
1150                 raw->info.xtrct.idx = params->es_cnt;
1151
1152                 /* Determine the number of field vector entries this raw field
1153                  * consumes.
1154                  */
1155                 cnt = DIVIDE_AND_ROUND_UP(raw->info.xtrct.disp +
1156                                           (raw->info.src.last * BITS_PER_BYTE),
1157                                           (ICE_FLOW_FV_EXTRACT_SZ *
1158                                            BITS_PER_BYTE));
1159                 off = raw->info.xtrct.off;
1160                 for (j = 0; j < cnt; j++) {
1161                         u16 idx;
1162
1163                         /* Make sure the number of extraction sequence required
1164                          * does not exceed the block's capability
1165                          */
1166                         if (params->es_cnt >= hw->blk[params->blk].es.count ||
1167                             params->es_cnt >= ICE_MAX_FV_WORDS)
1168                                 return ICE_ERR_MAX_LIMIT;
1169
1170                         /* some blocks require a reversed field vector layout */
1171                         if (hw->blk[params->blk].es.reverse)
1172                                 idx = fv_words - params->es_cnt - 1;
1173                         else
1174                                 idx = params->es_cnt;
1175
1176                         params->es[idx].prot_id = raw->info.xtrct.prot_id;
1177                         params->es[idx].off = off;
1178                         params->es_cnt++;
1179                         off += ICE_FLOW_FV_EXTRACT_SZ;
1180                 }
1181         }
1182
1183         return ICE_SUCCESS;
1184 }
1185
1186 /**
1187  * ice_flow_create_xtrct_seq - Create an extraction sequence for given segments
1188  * @hw: pointer to the HW struct
1189  * @params: information about the flow to be processed
1190  *
1191  * This function iterates through all matched fields in the given segments, and
1192  * creates an extraction sequence for the fields.
1193  */
1194 static enum ice_status
1195 ice_flow_create_xtrct_seq(struct ice_hw *hw,
1196                           struct ice_flow_prof_params *params)
1197 {
1198         enum ice_status status = ICE_SUCCESS;
1199         u8 i;
1200
1201         /* For ACL, we also need to extract the direction bit (Rx,Tx) data from
1202          * packet flags
1203          */
1204         if (params->blk == ICE_BLK_ACL) {
1205                 status = ice_flow_xtract_pkt_flags(hw, params,
1206                                                    ICE_RX_MDID_PKT_FLAGS_15_0);
1207                 if (status)
1208                         return status;
1209         }
1210
1211         for (i = 0; i < params->prof->segs_cnt; i++) {
1212                 u64 match = params->prof->segs[i].match;
1213                 enum ice_flow_field j;
1214
1215                 for (j = 0; j < ICE_FLOW_FIELD_IDX_MAX && match; j++) {
1216                         const u64 bit = BIT_ULL(j);
1217
1218                         if (match & bit) {
1219                                 status = ice_flow_xtract_fld(hw, params, i, j,
1220                                                              match);
1221                                 if (status)
1222                                         return status;
1223                                 match &= ~bit;
1224                         }
1225                 }
1226
1227                 /* Process raw matching bytes */
1228                 status = ice_flow_xtract_raws(hw, params, i);
1229                 if (status)
1230                         return status;
1231         }
1232
1233         return status;
1234 }
1235
1236 /**
1237  * ice_flow_sel_acl_scen - returns the specific scenario
1238  * @hw: pointer to the hardware structure
1239  * @params: information about the flow to be processed
1240  *
1241  * This function will return the specific scenario based on the
1242  * params passed to it
1243  */
1244 static enum ice_status
1245 ice_flow_sel_acl_scen(struct ice_hw *hw, struct ice_flow_prof_params *params)
1246 {
1247         /* Find the best-fit scenario for the provided match width */
1248         struct ice_acl_scen *cand_scen = NULL, *scen;
1249
1250         if (!hw->acl_tbl)
1251                 return ICE_ERR_DOES_NOT_EXIST;
1252
1253         /* Loop through each scenario and match against the scenario width
1254          * to select the specific scenario
1255          */
1256         LIST_FOR_EACH_ENTRY(scen, &hw->acl_tbl->scens, ice_acl_scen, list_entry)
1257                 if (scen->eff_width >= params->entry_length &&
1258                     (!cand_scen || cand_scen->eff_width > scen->eff_width))
1259                         cand_scen = scen;
1260         if (!cand_scen)
1261                 return ICE_ERR_DOES_NOT_EXIST;
1262
1263         params->prof->cfg.scen = cand_scen;
1264
1265         return ICE_SUCCESS;
1266 }
1267
1268 /**
1269  * ice_flow_acl_def_entry_frmt - Determine the layout of flow entries
1270  * @params: information about the flow to be processed
1271  */
1272 static enum ice_status
1273 ice_flow_acl_def_entry_frmt(struct ice_flow_prof_params *params)
1274 {
1275         u16 index, i, range_idx = 0;
1276
1277         index = ICE_AQC_ACL_PROF_BYTE_SEL_START_IDX;
1278
1279         for (i = 0; i < params->prof->segs_cnt; i++) {
1280                 struct ice_flow_seg_info *seg = &params->prof->segs[i];
1281                 u64 match = seg->match;
1282                 u8 j;
1283
1284                 for (j = 0; j < ICE_FLOW_FIELD_IDX_MAX && match; j++) {
1285                         struct ice_flow_fld_info *fld;
1286                         const u64 bit = BIT_ULL(j);
1287
1288                         if (!(match & bit))
1289                                 continue;
1290
1291                         fld = &seg->fields[j];
1292                         fld->entry.mask = ICE_FLOW_FLD_OFF_INVAL;
1293
1294                         if (fld->type == ICE_FLOW_FLD_TYPE_RANGE) {
1295                                 fld->entry.last = ICE_FLOW_FLD_OFF_INVAL;
1296
1297                                 /* Range checking only supported for single
1298                                  * words
1299                                  */
1300                                 if (DIVIDE_AND_ROUND_UP(ice_flds_info[j].size +
1301                                                         fld->xtrct.disp,
1302                                                         BITS_PER_BYTE * 2) > 1)
1303                                         return ICE_ERR_PARAM;
1304
1305                                 /* Ranges must define low and high values */
1306                                 if (fld->src.val == ICE_FLOW_FLD_OFF_INVAL ||
1307                                     fld->src.last == ICE_FLOW_FLD_OFF_INVAL)
1308                                         return ICE_ERR_PARAM;
1309
1310                                 fld->entry.val = range_idx++;
1311                         } else {
1312                                 /* Store adjusted byte-length of field for later
1313                                  * use, taking into account potential
1314                                  * non-byte-aligned displacement
1315                                  */
1316                                 fld->entry.last = DIVIDE_AND_ROUND_UP
1317                                         (ice_flds_info[j].size +
1318                                          (fld->xtrct.disp % BITS_PER_BYTE),
1319                                          BITS_PER_BYTE);
1320                                 fld->entry.val = index;
1321                                 index += fld->entry.last;
1322                         }
1323
1324                         match &= ~bit;
1325                 }
1326
1327                 for (j = 0; j < seg->raws_cnt; j++) {
1328                         struct ice_flow_seg_fld_raw *raw = &seg->raws[j];
1329
1330                         raw->info.entry.mask = ICE_FLOW_FLD_OFF_INVAL;
1331                         raw->info.entry.val = index;
1332                         raw->info.entry.last = raw->info.src.last;
1333                         index += raw->info.entry.last;
1334                 }
1335         }
1336
1337         /* Currently only support using the byte selection base, which only
1338          * allows for an effective entry size of 30 bytes. Reject anything
1339          * larger.
1340          */
1341         if (index > ICE_AQC_ACL_PROF_BYTE_SEL_ELEMS)
1342                 return ICE_ERR_PARAM;
1343
1344         /* Only 8 range checkers per profile, reject anything trying to use
1345          * more
1346          */
1347         if (range_idx > ICE_AQC_ACL_PROF_RANGES_NUM_CFG)
1348                 return ICE_ERR_PARAM;
1349
1350         /* Store # bytes required for entry for later use */
1351         params->entry_length = index - ICE_AQC_ACL_PROF_BYTE_SEL_START_IDX;
1352
1353         return ICE_SUCCESS;
1354 }
1355
1356 /**
1357  * ice_flow_proc_segs - process all packet segments associated with a profile
1358  * @hw: pointer to the HW struct
1359  * @params: information about the flow to be processed
1360  */
1361 static enum ice_status
1362 ice_flow_proc_segs(struct ice_hw *hw, struct ice_flow_prof_params *params)
1363 {
1364         enum ice_status status;
1365
1366         status = ice_flow_proc_seg_hdrs(params);
1367         if (status)
1368                 return status;
1369
1370         status = ice_flow_create_xtrct_seq(hw, params);
1371         if (status)
1372                 return status;
1373
1374         switch (params->blk) {
1375         case ICE_BLK_FD:
1376         case ICE_BLK_RSS:
1377                 status = ICE_SUCCESS;
1378                 break;
1379         case ICE_BLK_ACL:
1380                 status = ice_flow_acl_def_entry_frmt(params);
1381                 if (status)
1382                         return status;
1383                 status = ice_flow_sel_acl_scen(hw, params);
1384                 if (status)
1385                         return status;
1386                 break;
1387         default:
1388                 return ICE_ERR_NOT_IMPL;
1389         }
1390
1391         return status;
1392 }
1393
1394 #define ICE_FLOW_FIND_PROF_CHK_FLDS     0x00000001
1395 #define ICE_FLOW_FIND_PROF_CHK_VSI      0x00000002
1396 #define ICE_FLOW_FIND_PROF_NOT_CHK_DIR  0x00000004
1397
1398 /**
1399  * ice_flow_find_prof_conds - Find a profile matching headers and conditions
1400  * @hw: pointer to the HW struct
1401  * @blk: classification stage
1402  * @dir: flow direction
1403  * @segs: array of one or more packet segments that describe the flow
1404  * @segs_cnt: number of packet segments provided
1405  * @vsi_handle: software VSI handle to check VSI (ICE_FLOW_FIND_PROF_CHK_VSI)
1406  * @conds: additional conditions to be checked (ICE_FLOW_FIND_PROF_CHK_*)
1407  */
1408 static struct ice_flow_prof *
1409 ice_flow_find_prof_conds(struct ice_hw *hw, enum ice_block blk,
1410                          enum ice_flow_dir dir, struct ice_flow_seg_info *segs,
1411                          u8 segs_cnt, u16 vsi_handle, u32 conds)
1412 {
1413         struct ice_flow_prof *p, *prof = NULL;
1414
1415         ice_acquire_lock(&hw->fl_profs_locks[blk]);
1416         LIST_FOR_EACH_ENTRY(p, &hw->fl_profs[blk], ice_flow_prof, l_entry)
1417                 if ((p->dir == dir || conds & ICE_FLOW_FIND_PROF_NOT_CHK_DIR) &&
1418                     segs_cnt && segs_cnt == p->segs_cnt) {
1419                         u8 i;
1420
1421                         /* Check for profile-VSI association if specified */
1422                         if ((conds & ICE_FLOW_FIND_PROF_CHK_VSI) &&
1423                             ice_is_vsi_valid(hw, vsi_handle) &&
1424                             !ice_is_bit_set(p->vsis, vsi_handle))
1425                                 continue;
1426
1427                         /* Protocol headers must be checked. Matched fields are
1428                          * checked if specified.
1429                          */
1430                         for (i = 0; i < segs_cnt; i++)
1431                                 if (segs[i].hdrs != p->segs[i].hdrs ||
1432                                     ((conds & ICE_FLOW_FIND_PROF_CHK_FLDS) &&
1433                                      segs[i].match != p->segs[i].match))
1434                                         break;
1435
1436                         /* A match is found if all segments are matched */
1437                         if (i == segs_cnt) {
1438                                 prof = p;
1439                                 break;
1440                         }
1441                 }
1442         ice_release_lock(&hw->fl_profs_locks[blk]);
1443
1444         return prof;
1445 }
1446
1447 /**
1448  * ice_flow_find_prof - Look up a profile matching headers and matched fields
1449  * @hw: pointer to the HW struct
1450  * @blk: classification stage
1451  * @dir: flow direction
1452  * @segs: array of one or more packet segments that describe the flow
1453  * @segs_cnt: number of packet segments provided
1454  */
1455 u64
1456 ice_flow_find_prof(struct ice_hw *hw, enum ice_block blk, enum ice_flow_dir dir,
1457                    struct ice_flow_seg_info *segs, u8 segs_cnt)
1458 {
1459         struct ice_flow_prof *p;
1460
1461         p = ice_flow_find_prof_conds(hw, blk, dir, segs, segs_cnt,
1462                                      ICE_MAX_VSI, ICE_FLOW_FIND_PROF_CHK_FLDS);
1463
1464         return p ? p->id : ICE_FLOW_PROF_ID_INVAL;
1465 }
1466
1467 /**
1468  * ice_flow_find_prof_id - Look up a profile with given profile ID
1469  * @hw: pointer to the HW struct
1470  * @blk: classification stage
1471  * @prof_id: unique ID to identify this flow profile
1472  */
1473 static struct ice_flow_prof *
1474 ice_flow_find_prof_id(struct ice_hw *hw, enum ice_block blk, u64 prof_id)
1475 {
1476         struct ice_flow_prof *p;
1477
1478         LIST_FOR_EACH_ENTRY(p, &hw->fl_profs[blk], ice_flow_prof, l_entry)
1479                 if (p->id == prof_id)
1480                         return p;
1481
1482         return NULL;
1483 }
1484
1485 /**
1486  * ice_dealloc_flow_entry - Deallocate flow entry memory
1487  * @hw: pointer to the HW struct
1488  * @entry: flow entry to be removed
1489  */
1490 static void
1491 ice_dealloc_flow_entry(struct ice_hw *hw, struct ice_flow_entry *entry)
1492 {
1493         if (!entry)
1494                 return;
1495
1496         if (entry->entry)
1497                 ice_free(hw, entry->entry);
1498
1499         if (entry->range_buf) {
1500                 ice_free(hw, entry->range_buf);
1501                 entry->range_buf = NULL;
1502         }
1503
1504         if (entry->acts) {
1505                 ice_free(hw, entry->acts);
1506                 entry->acts = NULL;
1507                 entry->acts_cnt = 0;
1508         }
1509
1510         ice_free(hw, entry);
1511 }
1512
1513 #define ICE_ACL_INVALID_SCEN    0x3f
1514
1515 /**
1516  * ice_flow_acl_is_prof_in_use - Verify if the profile is associated to any PF
1517  * @hw: pointer to the hardware structure
1518  * @prof: pointer to flow profile
1519  * @buf: destination buffer function writes partial extraction sequence to
1520  *
1521  * returns ICE_SUCCESS if no PF is associated to the given profile
1522  * returns ICE_ERR_IN_USE if at least one PF is associated to the given profile
1523  * returns other error code for real error
1524  */
1525 static enum ice_status
1526 ice_flow_acl_is_prof_in_use(struct ice_hw *hw, struct ice_flow_prof *prof,
1527                             struct ice_aqc_acl_prof_generic_frmt *buf)
1528 {
1529         enum ice_status status;
1530         u8 prof_id = 0;
1531
1532         status = ice_flow_get_hw_prof(hw, ICE_BLK_ACL, prof->id, &prof_id);
1533         if (status)
1534                 return status;
1535
1536         status = ice_query_acl_prof(hw, prof_id, buf, NULL);
1537         if (status)
1538                 return status;
1539
1540         /* If all PF's associated scenarios are all 0 or all
1541          * ICE_ACL_INVALID_SCEN (63) for the given profile then the latter has
1542          * not been configured yet.
1543          */
1544         if (buf->pf_scenario_num[0] == 0 && buf->pf_scenario_num[1] == 0 &&
1545             buf->pf_scenario_num[2] == 0 && buf->pf_scenario_num[3] == 0 &&
1546             buf->pf_scenario_num[4] == 0 && buf->pf_scenario_num[5] == 0 &&
1547             buf->pf_scenario_num[6] == 0 && buf->pf_scenario_num[7] == 0)
1548                 return ICE_SUCCESS;
1549
1550         if (buf->pf_scenario_num[0] == ICE_ACL_INVALID_SCEN &&
1551             buf->pf_scenario_num[1] == ICE_ACL_INVALID_SCEN &&
1552             buf->pf_scenario_num[2] == ICE_ACL_INVALID_SCEN &&
1553             buf->pf_scenario_num[3] == ICE_ACL_INVALID_SCEN &&
1554             buf->pf_scenario_num[4] == ICE_ACL_INVALID_SCEN &&
1555             buf->pf_scenario_num[5] == ICE_ACL_INVALID_SCEN &&
1556             buf->pf_scenario_num[6] == ICE_ACL_INVALID_SCEN &&
1557             buf->pf_scenario_num[7] == ICE_ACL_INVALID_SCEN)
1558                 return ICE_SUCCESS;
1559         else
1560                 return ICE_ERR_IN_USE;
1561 }
1562
1563 /**
1564  * ice_flow_acl_free_act_cntr - Free the ACL rule's actions
1565  * @hw: pointer to the hardware structure
1566  * @acts: array of actions to be performed on a match
1567  * @acts_cnt: number of actions
1568  */
1569 static enum ice_status
1570 ice_flow_acl_free_act_cntr(struct ice_hw *hw, struct ice_flow_action *acts,
1571                            u8 acts_cnt)
1572 {
1573         int i;
1574
1575         for (i = 0; i < acts_cnt; i++) {
1576                 if (acts[i].type == ICE_FLOW_ACT_CNTR_PKT ||
1577                     acts[i].type == ICE_FLOW_ACT_CNTR_BYTES ||
1578                     acts[i].type == ICE_FLOW_ACT_CNTR_PKT_BYTES) {
1579                         struct ice_acl_cntrs cntrs;
1580                         enum ice_status status;
1581
1582                         cntrs.bank = 0; /* Only bank0 for the moment */
1583                         cntrs.first_cntr =
1584                                         LE16_TO_CPU(acts[i].data.acl_act.value);
1585                         cntrs.last_cntr =
1586                                         LE16_TO_CPU(acts[i].data.acl_act.value);
1587
1588                         if (acts[i].type == ICE_FLOW_ACT_CNTR_PKT_BYTES)
1589                                 cntrs.type = ICE_AQC_ACL_CNT_TYPE_DUAL;
1590                         else
1591                                 cntrs.type = ICE_AQC_ACL_CNT_TYPE_SINGLE;
1592
1593                         status = ice_aq_dealloc_acl_cntrs(hw, &cntrs, NULL);
1594                         if (status)
1595                                 return status;
1596                 }
1597         }
1598         return ICE_SUCCESS;
1599 }
1600
1601 /**
1602  * ice_flow_acl_disassoc_scen - Disassociate the scenario from the profile
1603  * @hw: pointer to the hardware structure
1604  * @prof: pointer to flow profile
1605  *
1606  * Disassociate the scenario from the profile for the PF of the VSI.
1607  */
1608 static enum ice_status
1609 ice_flow_acl_disassoc_scen(struct ice_hw *hw, struct ice_flow_prof *prof)
1610 {
1611         struct ice_aqc_acl_prof_generic_frmt buf;
1612         enum ice_status status = ICE_SUCCESS;
1613         u8 prof_id = 0;
1614
1615         ice_memset(&buf, 0, sizeof(buf), ICE_NONDMA_MEM);
1616
1617         status = ice_flow_get_hw_prof(hw, ICE_BLK_ACL, prof->id, &prof_id);
1618         if (status)
1619                 return status;
1620
1621         status = ice_query_acl_prof(hw, prof_id, &buf, NULL);
1622         if (status)
1623                 return status;
1624
1625         /* Clear scenario for this PF */
1626         buf.pf_scenario_num[hw->pf_id] = ICE_ACL_INVALID_SCEN;
1627         status = ice_prgm_acl_prof_extrt(hw, prof_id, &buf, NULL);
1628
1629         return status;
1630 }
1631
1632 /**
1633  * ice_flow_rem_entry_sync - Remove a flow entry
1634  * @hw: pointer to the HW struct
1635  * @blk: classification stage
1636  * @entry: flow entry to be removed
1637  */
1638 static enum ice_status
1639 ice_flow_rem_entry_sync(struct ice_hw *hw, enum ice_block blk,
1640                         struct ice_flow_entry *entry)
1641 {
1642         if (!entry)
1643                 return ICE_ERR_BAD_PTR;
1644
1645         if (blk == ICE_BLK_ACL) {
1646                 enum ice_status status;
1647
1648                 if (!entry->prof)
1649                         return ICE_ERR_BAD_PTR;
1650
1651                 status = ice_acl_rem_entry(hw, entry->prof->cfg.scen,
1652                                            entry->scen_entry_idx);
1653                 if (status)
1654                         return status;
1655
1656                 /* Checks if we need to release an ACL counter. */
1657                 if (entry->acts_cnt && entry->acts)
1658                         ice_flow_acl_free_act_cntr(hw, entry->acts,
1659                                                    entry->acts_cnt);
1660         }
1661
1662         LIST_DEL(&entry->l_entry);
1663
1664         ice_dealloc_flow_entry(hw, entry);
1665
1666         return ICE_SUCCESS;
1667 }
1668
1669 /**
1670  * ice_flow_add_prof_sync - Add a flow profile for packet segments and fields
1671  * @hw: pointer to the HW struct
1672  * @blk: classification stage
1673  * @dir: flow direction
1674  * @prof_id: unique ID to identify this flow profile
1675  * @segs: array of one or more packet segments that describe the flow
1676  * @segs_cnt: number of packet segments provided
1677  * @acts: array of default actions
1678  * @acts_cnt: number of default actions
1679  * @prof: stores the returned flow profile added
1680  *
1681  * Assumption: the caller has acquired the lock to the profile list
1682  */
1683 static enum ice_status
1684 ice_flow_add_prof_sync(struct ice_hw *hw, enum ice_block blk,
1685                        enum ice_flow_dir dir, u64 prof_id,
1686                        struct ice_flow_seg_info *segs, u8 segs_cnt,
1687                        struct ice_flow_action *acts, u8 acts_cnt,
1688                        struct ice_flow_prof **prof)
1689 {
1690         struct ice_flow_prof_params params;
1691         enum ice_status status;
1692         u8 i;
1693
1694         if (!prof || (acts_cnt && !acts))
1695                 return ICE_ERR_BAD_PTR;
1696
1697         ice_memset(&params, 0, sizeof(params), ICE_NONDMA_MEM);
1698         params.prof = (struct ice_flow_prof *)
1699                 ice_malloc(hw, sizeof(*params.prof));
1700         if (!params.prof)
1701                 return ICE_ERR_NO_MEMORY;
1702
1703         /* initialize extraction sequence to all invalid (0xff) */
1704         for (i = 0; i < ICE_MAX_FV_WORDS; i++) {
1705                 params.es[i].prot_id = ICE_PROT_INVALID;
1706                 params.es[i].off = ICE_FV_OFFSET_INVAL;
1707         }
1708
1709         params.blk = blk;
1710         params.prof->id = prof_id;
1711         params.prof->dir = dir;
1712         params.prof->segs_cnt = segs_cnt;
1713
1714         /* Make a copy of the segments that need to be persistent in the flow
1715          * profile instance
1716          */
1717         for (i = 0; i < segs_cnt; i++)
1718                 ice_memcpy(&params.prof->segs[i], &segs[i], sizeof(*segs),
1719                            ICE_NONDMA_TO_NONDMA);
1720
1721         /* Make a copy of the actions that need to be persistent in the flow
1722          * profile instance.
1723          */
1724         if (acts_cnt) {
1725                 params.prof->acts = (struct ice_flow_action *)
1726                         ice_memdup(hw, acts, acts_cnt * sizeof(*acts),
1727                                    ICE_NONDMA_TO_NONDMA);
1728
1729                 if (!params.prof->acts) {
1730                         status = ICE_ERR_NO_MEMORY;
1731                         goto out;
1732                 }
1733         }
1734
1735         status = ice_flow_proc_segs(hw, &params);
1736         if (status) {
1737                 ice_debug(hw, ICE_DBG_FLOW,
1738                           "Error processing a flow's packet segments\n");
1739                 goto out;
1740         }
1741
1742         /* Add a HW profile for this flow profile */
1743         status = ice_add_prof(hw, blk, prof_id, (u8 *)params.ptypes,
1744                               params.attr, params.attr_cnt, params.es,
1745                               params.mask);
1746         if (status) {
1747                 ice_debug(hw, ICE_DBG_FLOW, "Error adding a HW flow profile\n");
1748                 goto out;
1749         }
1750
1751         INIT_LIST_HEAD(&params.prof->entries);
1752         ice_init_lock(&params.prof->entries_lock);
1753         *prof = params.prof;
1754
1755 out:
1756         if (status) {
1757                 if (params.prof->acts)
1758                         ice_free(hw, params.prof->acts);
1759                 ice_free(hw, params.prof);
1760         }
1761
1762         return status;
1763 }
1764
1765 /**
1766  * ice_flow_rem_prof_sync - remove a flow profile
1767  * @hw: pointer to the hardware structure
1768  * @blk: classification stage
1769  * @prof: pointer to flow profile to remove
1770  *
1771  * Assumption: the caller has acquired the lock to the profile list
1772  */
1773 static enum ice_status
1774 ice_flow_rem_prof_sync(struct ice_hw *hw, enum ice_block blk,
1775                        struct ice_flow_prof *prof)
1776 {
1777         enum ice_status status;
1778
1779         /* Remove all remaining flow entries before removing the flow profile */
1780         if (!LIST_EMPTY(&prof->entries)) {
1781                 struct ice_flow_entry *e, *t;
1782
1783                 ice_acquire_lock(&prof->entries_lock);
1784
1785                 LIST_FOR_EACH_ENTRY_SAFE(e, t, &prof->entries, ice_flow_entry,
1786                                          l_entry) {
1787                         status = ice_flow_rem_entry_sync(hw, blk, e);
1788                         if (status)
1789                                 break;
1790                 }
1791
1792                 ice_release_lock(&prof->entries_lock);
1793         }
1794
1795         if (blk == ICE_BLK_ACL) {
1796                 struct ice_aqc_acl_profile_ranges query_rng_buf;
1797                 struct ice_aqc_acl_prof_generic_frmt buf;
1798                 u8 prof_id = 0;
1799
1800                 /* Disassociate the scenario from the profile for the PF */
1801                 status = ice_flow_acl_disassoc_scen(hw, prof);
1802                 if (status)
1803                         return status;
1804
1805                 /* Clear the range-checker if the profile ID is no longer
1806                  * used by any PF
1807                  */
1808                 status = ice_flow_acl_is_prof_in_use(hw, prof, &buf);
1809                 if (status && status != ICE_ERR_IN_USE) {
1810                         return status;
1811                 } else if (!status) {
1812                         /* Clear the range-checker value for profile ID */
1813                         ice_memset(&query_rng_buf, 0,
1814                                    sizeof(struct ice_aqc_acl_profile_ranges),
1815                                    ICE_NONDMA_MEM);
1816
1817                         status = ice_flow_get_hw_prof(hw, blk, prof->id,
1818                                                       &prof_id);
1819                         if (status)
1820                                 return status;
1821
1822                         status = ice_prog_acl_prof_ranges(hw, prof_id,
1823                                                           &query_rng_buf, NULL);
1824                         if (status)
1825                                 return status;
1826                 }
1827         }
1828
1829         /* Remove all hardware profiles associated with this flow profile */
1830         status = ice_rem_prof(hw, blk, prof->id);
1831         if (!status) {
1832                 LIST_DEL(&prof->l_entry);
1833                 ice_destroy_lock(&prof->entries_lock);
1834                 if (prof->acts)
1835                         ice_free(hw, prof->acts);
1836                 ice_free(hw, prof);
1837         }
1838
1839         return status;
1840 }
1841
1842 /**
1843  * ice_flow_acl_set_xtrct_seq_fld - Populate xtrct seq for single field
1844  * @buf: Destination buffer function writes partial xtrct sequence to
1845  * @info: Info about field
1846  */
1847 static void
1848 ice_flow_acl_set_xtrct_seq_fld(struct ice_aqc_acl_prof_generic_frmt *buf,
1849                                struct ice_flow_fld_info *info)
1850 {
1851         u16 dst, i;
1852         u8 src;
1853
1854         src = info->xtrct.idx * ICE_FLOW_FV_EXTRACT_SZ +
1855                 info->xtrct.disp / BITS_PER_BYTE;
1856         dst = info->entry.val;
1857         for (i = 0; i < info->entry.last; i++)
1858                 /* HW stores field vector words in LE, convert words back to BE
1859                  * so constructed entries will end up in network order
1860                  */
1861                 buf->byte_selection[dst++] = src++ ^ 1;
1862 }
1863
1864 /**
1865  * ice_flow_acl_set_xtrct_seq - Program ACL extraction sequence
1866  * @hw: pointer to the hardware structure
1867  * @prof: pointer to flow profile
1868  */
1869 static enum ice_status
1870 ice_flow_acl_set_xtrct_seq(struct ice_hw *hw, struct ice_flow_prof *prof)
1871 {
1872         struct ice_aqc_acl_prof_generic_frmt buf;
1873         struct ice_flow_fld_info *info;
1874         enum ice_status status;
1875         u8 prof_id = 0;
1876         u16 i;
1877
1878         ice_memset(&buf, 0, sizeof(buf), ICE_NONDMA_MEM);
1879
1880         status = ice_flow_get_hw_prof(hw, ICE_BLK_ACL, prof->id, &prof_id);
1881         if (status)
1882                 return status;
1883
1884         status = ice_flow_acl_is_prof_in_use(hw, prof, &buf);
1885         if (status && status != ICE_ERR_IN_USE)
1886                 return status;
1887
1888         if (!status) {
1889                 /* Program the profile dependent configuration. This is done
1890                  * only once regardless of the number of PFs using that profile
1891                  */
1892                 ice_memset(&buf, 0, sizeof(buf), ICE_NONDMA_MEM);
1893
1894                 for (i = 0; i < prof->segs_cnt; i++) {
1895                         struct ice_flow_seg_info *seg = &prof->segs[i];
1896                         u64 match = seg->match;
1897                         u16 j;
1898
1899                         for (j = 0; j < ICE_FLOW_FIELD_IDX_MAX && match; j++) {
1900                                 const u64 bit = BIT_ULL(j);
1901
1902                                 if (!(match & bit))
1903                                         continue;
1904
1905                                 info = &seg->fields[j];
1906
1907                                 if (info->type == ICE_FLOW_FLD_TYPE_RANGE)
1908                                         buf.word_selection[info->entry.val] =
1909                                                                 info->xtrct.idx;
1910                                 else
1911                                         ice_flow_acl_set_xtrct_seq_fld(&buf,
1912                                                                        info);
1913
1914                                 match &= ~bit;
1915                         }
1916
1917                         for (j = 0; j < seg->raws_cnt; j++) {
1918                                 info = &seg->raws[j].info;
1919                                 ice_flow_acl_set_xtrct_seq_fld(&buf, info);
1920                         }
1921                 }
1922
1923                 ice_memset(&buf.pf_scenario_num[0], ICE_ACL_INVALID_SCEN,
1924                            ICE_AQC_ACL_PROF_PF_SCEN_NUM_ELEMS,
1925                            ICE_NONDMA_MEM);
1926         }
1927
1928         /* Update the current PF */
1929         buf.pf_scenario_num[hw->pf_id] = (u8)prof->cfg.scen->id;
1930         status = ice_prgm_acl_prof_extrt(hw, prof_id, &buf, NULL);
1931
1932         return status;
1933 }
1934
1935 /**
1936  * ice_flow_assoc_vsig_vsi - associate a VSI with VSIG
1937  * @hw: pointer to the hardware structure
1938  * @blk: classification stage
1939  * @vsi_handle: software VSI handle
1940  * @vsig: target VSI group
1941  *
1942  * Assumption: the caller has already verified that the VSI to
1943  * be added has the same characteristics as the VSIG and will
1944  * thereby have access to all resources added to that VSIG.
1945  */
1946 enum ice_status
1947 ice_flow_assoc_vsig_vsi(struct ice_hw *hw, enum ice_block blk, u16 vsi_handle,
1948                         u16 vsig)
1949 {
1950         enum ice_status status;
1951
1952         if (!ice_is_vsi_valid(hw, vsi_handle) || blk >= ICE_BLK_COUNT)
1953                 return ICE_ERR_PARAM;
1954
1955         ice_acquire_lock(&hw->fl_profs_locks[blk]);
1956         status = ice_add_vsi_flow(hw, blk, ice_get_hw_vsi_num(hw, vsi_handle),
1957                                   vsig);
1958         ice_release_lock(&hw->fl_profs_locks[blk]);
1959
1960         return status;
1961 }
1962
1963 /**
1964  * ice_flow_assoc_prof - associate a VSI with a flow profile
1965  * @hw: pointer to the hardware structure
1966  * @blk: classification stage
1967  * @prof: pointer to flow profile
1968  * @vsi_handle: software VSI handle
1969  *
1970  * Assumption: the caller has acquired the lock to the profile list
1971  * and the software VSI handle has been validated
1972  */
1973 static enum ice_status
1974 ice_flow_assoc_prof(struct ice_hw *hw, enum ice_block blk,
1975                     struct ice_flow_prof *prof, u16 vsi_handle)
1976 {
1977         enum ice_status status = ICE_SUCCESS;
1978
1979         if (!ice_is_bit_set(prof->vsis, vsi_handle)) {
1980                 if (blk == ICE_BLK_ACL) {
1981                         status = ice_flow_acl_set_xtrct_seq(hw, prof);
1982                         if (status)
1983                                 return status;
1984                 }
1985                 status = ice_add_prof_id_flow(hw, blk,
1986                                               ice_get_hw_vsi_num(hw,
1987                                                                  vsi_handle),
1988                                               prof->id);
1989                 if (!status)
1990                         ice_set_bit(vsi_handle, prof->vsis);
1991                 else
1992                         ice_debug(hw, ICE_DBG_FLOW,
1993                                   "HW profile add failed, %d\n",
1994                                   status);
1995         }
1996
1997         return status;
1998 }
1999
2000 /**
2001  * ice_flow_disassoc_prof - disassociate a VSI from a flow profile
2002  * @hw: pointer to the hardware structure
2003  * @blk: classification stage
2004  * @prof: pointer to flow profile
2005  * @vsi_handle: software VSI handle
2006  *
2007  * Assumption: the caller has acquired the lock to the profile list
2008  * and the software VSI handle has been validated
2009  */
2010 static enum ice_status
2011 ice_flow_disassoc_prof(struct ice_hw *hw, enum ice_block blk,
2012                        struct ice_flow_prof *prof, u16 vsi_handle)
2013 {
2014         enum ice_status status = ICE_SUCCESS;
2015
2016         if (ice_is_bit_set(prof->vsis, vsi_handle)) {
2017                 status = ice_rem_prof_id_flow(hw, blk,
2018                                               ice_get_hw_vsi_num(hw,
2019                                                                  vsi_handle),
2020                                               prof->id);
2021                 if (!status)
2022                         ice_clear_bit(vsi_handle, prof->vsis);
2023                 else
2024                         ice_debug(hw, ICE_DBG_FLOW,
2025                                   "HW profile remove failed, %d\n",
2026                                   status);
2027         }
2028
2029         return status;
2030 }
2031
2032 /**
2033  * ice_flow_add_prof - Add a flow profile for packet segments and matched fields
2034  * @hw: pointer to the HW struct
2035  * @blk: classification stage
2036  * @dir: flow direction
2037  * @prof_id: unique ID to identify this flow profile
2038  * @segs: array of one or more packet segments that describe the flow
2039  * @segs_cnt: number of packet segments provided
2040  * @acts: array of default actions
2041  * @acts_cnt: number of default actions
2042  * @prof: stores the returned flow profile added
2043  */
2044 enum ice_status
2045 ice_flow_add_prof(struct ice_hw *hw, enum ice_block blk, enum ice_flow_dir dir,
2046                   u64 prof_id, struct ice_flow_seg_info *segs, u8 segs_cnt,
2047                   struct ice_flow_action *acts, u8 acts_cnt,
2048                   struct ice_flow_prof **prof)
2049 {
2050         enum ice_status status;
2051
2052         if (segs_cnt > ICE_FLOW_SEG_MAX)
2053                 return ICE_ERR_MAX_LIMIT;
2054
2055         if (!segs_cnt)
2056                 return ICE_ERR_PARAM;
2057
2058         if (!segs)
2059                 return ICE_ERR_BAD_PTR;
2060
2061         status = ice_flow_val_hdrs(segs, segs_cnt);
2062         if (status)
2063                 return status;
2064
2065         ice_acquire_lock(&hw->fl_profs_locks[blk]);
2066
2067         status = ice_flow_add_prof_sync(hw, blk, dir, prof_id, segs, segs_cnt,
2068                                         acts, acts_cnt, prof);
2069         if (!status)
2070                 LIST_ADD(&(*prof)->l_entry, &hw->fl_profs[blk]);
2071
2072         ice_release_lock(&hw->fl_profs_locks[blk]);
2073
2074         return status;
2075 }
2076
2077 /**
2078  * ice_flow_rem_prof - Remove a flow profile and all entries associated with it
2079  * @hw: pointer to the HW struct
2080  * @blk: the block for which the flow profile is to be removed
2081  * @prof_id: unique ID of the flow profile to be removed
2082  */
2083 enum ice_status
2084 ice_flow_rem_prof(struct ice_hw *hw, enum ice_block blk, u64 prof_id)
2085 {
2086         struct ice_flow_prof *prof;
2087         enum ice_status status;
2088
2089         ice_acquire_lock(&hw->fl_profs_locks[blk]);
2090
2091         prof = ice_flow_find_prof_id(hw, blk, prof_id);
2092         if (!prof) {
2093                 status = ICE_ERR_DOES_NOT_EXIST;
2094                 goto out;
2095         }
2096
2097         /* prof becomes invalid after the call */
2098         status = ice_flow_rem_prof_sync(hw, blk, prof);
2099
2100 out:
2101         ice_release_lock(&hw->fl_profs_locks[blk]);
2102
2103         return status;
2104 }
2105
2106 /**
2107  * ice_flow_get_hw_prof - return the HW profile for a specific profile ID handle
2108  * @hw: pointer to the HW struct
2109  * @blk: classification stage
2110  * @prof_id: the profile ID handle
2111  * @hw_prof_id: pointer to variable to receive the HW profile ID
2112  */
2113 enum ice_status
2114 ice_flow_get_hw_prof(struct ice_hw *hw, enum ice_block blk, u64 prof_id,
2115                      u8 *hw_prof_id)
2116 {
2117         struct ice_prof_map *map;
2118
2119         map = ice_search_prof_id(hw, blk, prof_id);
2120         if (map) {
2121                 *hw_prof_id = map->prof_id;
2122                 return ICE_SUCCESS;
2123         }
2124
2125         return ICE_ERR_DOES_NOT_EXIST;
2126 }
2127
2128 /**
2129  * ice_flow_find_entry - look for a flow entry using its unique ID
2130  * @hw: pointer to the HW struct
2131  * @blk: classification stage
2132  * @entry_id: unique ID to identify this flow entry
2133  *
2134  * This function looks for the flow entry with the specified unique ID in all
2135  * flow profiles of the specified classification stage. If the entry is found,
2136  * and it returns the handle to the flow entry. Otherwise, it returns
2137  * ICE_FLOW_ENTRY_ID_INVAL.
2138  */
2139 u64 ice_flow_find_entry(struct ice_hw *hw, enum ice_block blk, u64 entry_id)
2140 {
2141         struct ice_flow_entry *found = NULL;
2142         struct ice_flow_prof *p;
2143
2144         ice_acquire_lock(&hw->fl_profs_locks[blk]);
2145
2146         LIST_FOR_EACH_ENTRY(p, &hw->fl_profs[blk], ice_flow_prof, l_entry) {
2147                 struct ice_flow_entry *e;
2148
2149                 ice_acquire_lock(&p->entries_lock);
2150                 LIST_FOR_EACH_ENTRY(e, &p->entries, ice_flow_entry, l_entry)
2151                         if (e->id == entry_id) {
2152                                 found = e;
2153                                 break;
2154                         }
2155                 ice_release_lock(&p->entries_lock);
2156
2157                 if (found)
2158                         break;
2159         }
2160
2161         ice_release_lock(&hw->fl_profs_locks[blk]);
2162
2163         return found ? ICE_FLOW_ENTRY_HNDL(found) : ICE_FLOW_ENTRY_HANDLE_INVAL;
2164 }
2165
2166 /**
2167  * ice_flow_acl_check_actions - Checks the ACL rule's actions
2168  * @hw: pointer to the hardware structure
2169  * @acts: array of actions to be performed on a match
2170  * @acts_cnt: number of actions
2171  * @cnt_alloc: indicates if an ACL counter has been allocated.
2172  */
2173 static enum ice_status
2174 ice_flow_acl_check_actions(struct ice_hw *hw, struct ice_flow_action *acts,
2175                            u8 acts_cnt, bool *cnt_alloc)
2176 {
2177         ice_declare_bitmap(dup_check, ICE_AQC_TBL_MAX_ACTION_PAIRS * 2);
2178         int i;
2179
2180         ice_zero_bitmap(dup_check, ICE_AQC_TBL_MAX_ACTION_PAIRS * 2);
2181         *cnt_alloc = false;
2182
2183         if (acts_cnt > ICE_FLOW_ACL_MAX_NUM_ACT)
2184                 return ICE_ERR_OUT_OF_RANGE;
2185
2186         for (i = 0; i < acts_cnt; i++) {
2187                 if (acts[i].type != ICE_FLOW_ACT_NOP &&
2188                     acts[i].type != ICE_FLOW_ACT_DROP &&
2189                     acts[i].type != ICE_FLOW_ACT_CNTR_PKT &&
2190                     acts[i].type != ICE_FLOW_ACT_FWD_QUEUE)
2191                         return ICE_ERR_CFG;
2192
2193                 /* If the caller want to add two actions of the same type, then
2194                  * it is considered invalid configuration.
2195                  */
2196                 if (ice_test_and_set_bit(acts[i].type, dup_check))
2197                         return ICE_ERR_PARAM;
2198         }
2199
2200         /* Checks if ACL counters are needed. */
2201         for (i = 0; i < acts_cnt; i++) {
2202                 if (acts[i].type == ICE_FLOW_ACT_CNTR_PKT ||
2203                     acts[i].type == ICE_FLOW_ACT_CNTR_BYTES ||
2204                     acts[i].type == ICE_FLOW_ACT_CNTR_PKT_BYTES) {
2205                         struct ice_acl_cntrs cntrs;
2206                         enum ice_status status;
2207
2208                         cntrs.amount = 1;
2209                         cntrs.bank = 0; /* Only bank0 for the moment */
2210
2211                         if (acts[i].type == ICE_FLOW_ACT_CNTR_PKT_BYTES)
2212                                 cntrs.type = ICE_AQC_ACL_CNT_TYPE_DUAL;
2213                         else
2214                                 cntrs.type = ICE_AQC_ACL_CNT_TYPE_SINGLE;
2215
2216                         status = ice_aq_alloc_acl_cntrs(hw, &cntrs, NULL);
2217                         if (status)
2218                                 return status;
2219                         /* Counter index within the bank */
2220                         acts[i].data.acl_act.value =
2221                                                 CPU_TO_LE16(cntrs.first_cntr);
2222                         *cnt_alloc = true;
2223                 }
2224         }
2225
2226         return ICE_SUCCESS;
2227 }
2228
2229 /**
2230  * ice_flow_acl_frmt_entry_range - Format an ACL range checker for a given field
2231  * @fld: number of the given field
2232  * @info: info about field
2233  * @range_buf: range checker configuration buffer
2234  * @data: pointer to a data buffer containing flow entry's match values/masks
2235  * @range: Input/output param indicating which range checkers are being used
2236  */
2237 static void
2238 ice_flow_acl_frmt_entry_range(u16 fld, struct ice_flow_fld_info *info,
2239                               struct ice_aqc_acl_profile_ranges *range_buf,
2240                               u8 *data, u8 *range)
2241 {
2242         u16 new_mask;
2243
2244         /* If not specified, default mask is all bits in field */
2245         new_mask = (info->src.mask == ICE_FLOW_FLD_OFF_INVAL ?
2246                     BIT(ice_flds_info[fld].size) - 1 :
2247                     (*(u16 *)(data + info->src.mask))) << info->xtrct.disp;
2248
2249         /* If the mask is 0, then we don't need to worry about this input
2250          * range checker value.
2251          */
2252         if (new_mask) {
2253                 u16 new_high =
2254                         (*(u16 *)(data + info->src.last)) << info->xtrct.disp;
2255                 u16 new_low =
2256                         (*(u16 *)(data + info->src.val)) << info->xtrct.disp;
2257                 u8 range_idx = info->entry.val;
2258
2259                 range_buf->checker_cfg[range_idx].low_boundary =
2260                         CPU_TO_BE16(new_low);
2261                 range_buf->checker_cfg[range_idx].high_boundary =
2262                         CPU_TO_BE16(new_high);
2263                 range_buf->checker_cfg[range_idx].mask = CPU_TO_BE16(new_mask);
2264
2265                 /* Indicate which range checker is being used */
2266                 *range |= BIT(range_idx);
2267         }
2268 }
2269
2270 /**
2271  * ice_flow_acl_frmt_entry_fld - Partially format ACL entry for a given field
2272  * @fld: number of the given field
2273  * @info: info about the field
2274  * @buf: buffer containing the entry
2275  * @dontcare: buffer containing don't care mask for entry
2276  * @data: pointer to a data buffer containing flow entry's match values/masks
2277  */
2278 static void
2279 ice_flow_acl_frmt_entry_fld(u16 fld, struct ice_flow_fld_info *info, u8 *buf,
2280                             u8 *dontcare, u8 *data)
2281 {
2282         u16 dst, src, mask, k, end_disp, tmp_s = 0, tmp_m = 0;
2283         bool use_mask = false;
2284         u8 disp;
2285
2286         src = info->src.val;
2287         mask = info->src.mask;
2288         dst = info->entry.val - ICE_AQC_ACL_PROF_BYTE_SEL_START_IDX;
2289         disp = info->xtrct.disp % BITS_PER_BYTE;
2290
2291         if (mask != ICE_FLOW_FLD_OFF_INVAL)
2292                 use_mask = true;
2293
2294         for (k = 0; k < info->entry.last; k++, dst++) {
2295                 /* Add overflow bits from previous byte */
2296                 buf[dst] = (tmp_s & 0xff00) >> 8;
2297
2298                 /* If mask is not valid, tmp_m is always zero, so just setting
2299                  * dontcare to 0 (no masked bits). If mask is valid, pulls in
2300                  * overflow bits of mask from prev byte
2301                  */
2302                 dontcare[dst] = (tmp_m & 0xff00) >> 8;
2303
2304                 /* If there is displacement, last byte will only contain
2305                  * displaced data, but there is no more data to read from user
2306                  * buffer, so skip so as not to potentially read beyond end of
2307                  * user buffer
2308                  */
2309                 if (!disp || k < info->entry.last - 1) {
2310                         /* Store shifted data to use in next byte */
2311                         tmp_s = data[src++] << disp;
2312
2313                         /* Add current (shifted) byte */
2314                         buf[dst] |= tmp_s & 0xff;
2315
2316                         /* Handle mask if valid */
2317                         if (use_mask) {
2318                                 tmp_m = (~data[mask++] & 0xff) << disp;
2319                                 dontcare[dst] |= tmp_m & 0xff;
2320                         }
2321                 }
2322         }
2323
2324         /* Fill in don't care bits at beginning of field */
2325         if (disp) {
2326                 dst = info->entry.val - ICE_AQC_ACL_PROF_BYTE_SEL_START_IDX;
2327                 for (k = 0; k < disp; k++)
2328                         dontcare[dst] |= BIT(k);
2329         }
2330
2331         end_disp = (disp + ice_flds_info[fld].size) % BITS_PER_BYTE;
2332
2333         /* Fill in don't care bits at end of field */
2334         if (end_disp) {
2335                 dst = info->entry.val - ICE_AQC_ACL_PROF_BYTE_SEL_START_IDX +
2336                       info->entry.last - 1;
2337                 for (k = end_disp; k < BITS_PER_BYTE; k++)
2338                         dontcare[dst] |= BIT(k);
2339         }
2340 }
2341
2342 /**
2343  * ice_flow_acl_frmt_entry - Format ACL entry
2344  * @hw: pointer to the hardware structure
2345  * @prof: pointer to flow profile
2346  * @e: pointer to the flow entry
2347  * @data: pointer to a data buffer containing flow entry's match values/masks
2348  * @acts: array of actions to be performed on a match
2349  * @acts_cnt: number of actions
2350  *
2351  * Formats the key (and key_inverse) to be matched from the data passed in,
2352  * along with data from the flow profile. This key/key_inverse pair makes up
2353  * the 'entry' for an ACL flow entry.
2354  */
2355 static enum ice_status
2356 ice_flow_acl_frmt_entry(struct ice_hw *hw, struct ice_flow_prof *prof,
2357                         struct ice_flow_entry *e, u8 *data,
2358                         struct ice_flow_action *acts, u8 acts_cnt)
2359 {
2360         u8 *buf = NULL, *dontcare = NULL, *key = NULL, range = 0, dir_flag_msk;
2361         struct ice_aqc_acl_profile_ranges *range_buf = NULL;
2362         enum ice_status status;
2363         bool cnt_alloc;
2364         u8 prof_id = 0;
2365         u16 i, buf_sz;
2366
2367         status = ice_flow_get_hw_prof(hw, ICE_BLK_ACL, prof->id, &prof_id);
2368         if (status)
2369                 return status;
2370
2371         /* Format the result action */
2372
2373         status = ice_flow_acl_check_actions(hw, acts, acts_cnt, &cnt_alloc);
2374         if (status)
2375                 return status;
2376
2377         status = ICE_ERR_NO_MEMORY;
2378
2379         e->acts = (struct ice_flow_action *)
2380                 ice_memdup(hw, acts, acts_cnt * sizeof(*acts),
2381                            ICE_NONDMA_TO_NONDMA);
2382
2383         if (!e->acts)
2384                 goto out;
2385
2386         e->acts_cnt = acts_cnt;
2387
2388         /* Format the matching data */
2389         buf_sz = prof->cfg.scen->width;
2390         buf = (u8 *)ice_malloc(hw, buf_sz);
2391         if (!buf)
2392                 goto out;
2393
2394         dontcare = (u8 *)ice_malloc(hw, buf_sz);
2395         if (!dontcare)
2396                 goto out;
2397
2398         /* 'key' buffer will store both key and key_inverse, so must be twice
2399          * size of buf
2400          */
2401         key = (u8 *)ice_malloc(hw, buf_sz * 2);
2402         if (!key)
2403                 goto out;
2404
2405         range_buf = (struct ice_aqc_acl_profile_ranges *)
2406                 ice_malloc(hw, sizeof(struct ice_aqc_acl_profile_ranges));
2407         if (!range_buf)
2408                 goto out;
2409
2410         /* Set don't care mask to all 1's to start, will zero out used bytes */
2411         ice_memset(dontcare, 0xff, buf_sz, ICE_NONDMA_MEM);
2412
2413         for (i = 0; i < prof->segs_cnt; i++) {
2414                 struct ice_flow_seg_info *seg = &prof->segs[i];
2415                 u64 match = seg->match;
2416                 u16 j;
2417
2418                 for (j = 0; j < ICE_FLOW_FIELD_IDX_MAX && match; j++) {
2419                         struct ice_flow_fld_info *info;
2420                         const u64 bit = BIT_ULL(j);
2421
2422                         if (!(match & bit))
2423                                 continue;
2424
2425                         info = &seg->fields[j];
2426
2427                         if (info->type == ICE_FLOW_FLD_TYPE_RANGE)
2428                                 ice_flow_acl_frmt_entry_range(j, info,
2429                                                               range_buf, data,
2430                                                               &range);
2431                         else
2432                                 ice_flow_acl_frmt_entry_fld(j, info, buf,
2433                                                             dontcare, data);
2434
2435                         match &= ~bit;
2436                 }
2437
2438                 for (j = 0; j < seg->raws_cnt; j++) {
2439                         struct ice_flow_fld_info *info = &seg->raws[j].info;
2440                         u16 dst, src, mask, k;
2441                         bool use_mask = false;
2442
2443                         src = info->src.val;
2444                         dst = info->entry.val -
2445                                         ICE_AQC_ACL_PROF_BYTE_SEL_START_IDX;
2446                         mask = info->src.mask;
2447
2448                         if (mask != ICE_FLOW_FLD_OFF_INVAL)
2449                                 use_mask = true;
2450
2451                         for (k = 0; k < info->entry.last; k++, dst++) {
2452                                 buf[dst] = data[src++];
2453                                 if (use_mask)
2454                                         dontcare[dst] = ~data[mask++];
2455                                 else
2456                                         dontcare[dst] = 0;
2457                         }
2458                 }
2459         }
2460
2461         buf[prof->cfg.scen->pid_idx] = (u8)prof_id;
2462         dontcare[prof->cfg.scen->pid_idx] = 0;
2463
2464         /* Format the buffer for direction flags */
2465         dir_flag_msk = BIT(ICE_FLG_PKT_DIR);
2466
2467         if (prof->dir == ICE_FLOW_RX)
2468                 buf[prof->cfg.scen->pkt_dir_idx] = dir_flag_msk;
2469
2470         if (range) {
2471                 buf[prof->cfg.scen->rng_chk_idx] = range;
2472                 /* Mark any unused range checkers as don't care */
2473                 dontcare[prof->cfg.scen->rng_chk_idx] = ~range;
2474                 e->range_buf = range_buf;
2475         } else {
2476                 ice_free(hw, range_buf);
2477         }
2478
2479         status = ice_set_key(key, buf_sz * 2, buf, NULL, dontcare, NULL, 0,
2480                              buf_sz);
2481         if (status)
2482                 goto out;
2483
2484         e->entry = key;
2485         e->entry_sz = buf_sz * 2;
2486
2487 out:
2488         if (buf)
2489                 ice_free(hw, buf);
2490
2491         if (dontcare)
2492                 ice_free(hw, dontcare);
2493
2494         if (status && key)
2495                 ice_free(hw, key);
2496
2497         if (status && range_buf) {
2498                 ice_free(hw, range_buf);
2499                 e->range_buf = NULL;
2500         }
2501
2502         if (status && e->acts) {
2503                 ice_free(hw, e->acts);
2504                 e->acts = NULL;
2505                 e->acts_cnt = 0;
2506         }
2507
2508         if (status && cnt_alloc)
2509                 ice_flow_acl_free_act_cntr(hw, acts, acts_cnt);
2510
2511         return status;
2512 }
2513
2514 /**
2515  * ice_flow_acl_find_scen_entry_cond - Find an ACL scenario entry that matches
2516  *                                     the compared data.
2517  * @prof: pointer to flow profile
2518  * @e: pointer to the comparing flow entry
2519  * @do_chg_action: decide if we want to change the ACL action
2520  * @do_add_entry: decide if we want to add the new ACL entry
2521  * @do_rem_entry: decide if we want to remove the current ACL entry
2522  *
2523  * Find an ACL scenario entry that matches the compared data. In the same time,
2524  * this function also figure out:
2525  * a/ If we want to change the ACL action
2526  * b/ If we want to add the new ACL entry
2527  * c/ If we want to remove the current ACL entry
2528  */
2529 static struct ice_flow_entry *
2530 ice_flow_acl_find_scen_entry_cond(struct ice_flow_prof *prof,
2531                                   struct ice_flow_entry *e, bool *do_chg_action,
2532                                   bool *do_add_entry, bool *do_rem_entry)
2533 {
2534         struct ice_flow_entry *p, *return_entry = NULL;
2535         u8 i, j;
2536
2537         /* Check if:
2538          * a/ There exists an entry with same matching data, but different
2539          *    priority, then we remove this existing ACL entry. Then, we
2540          *    will add the new entry to the ACL scenario.
2541          * b/ There exists an entry with same matching data, priority, and
2542          *    result action, then we do nothing
2543          * c/ There exists an entry with same matching data, priority, but
2544          *    different, action, then do only change the action's entry.
2545          * d/ Else, we add this new entry to the ACL scenario.
2546          */
2547         *do_chg_action = false;
2548         *do_add_entry = true;
2549         *do_rem_entry = false;
2550         LIST_FOR_EACH_ENTRY(p, &prof->entries, ice_flow_entry, l_entry) {
2551                 if (memcmp(p->entry, e->entry, p->entry_sz))
2552                         continue;
2553
2554                 /* From this point, we have the same matching_data. */
2555                 *do_add_entry = false;
2556                 return_entry = p;
2557
2558                 if (p->priority != e->priority) {
2559                         /* matching data && !priority */
2560                         *do_add_entry = true;
2561                         *do_rem_entry = true;
2562                         break;
2563                 }
2564
2565                 /* From this point, we will have matching_data && priority */
2566                 if (p->acts_cnt != e->acts_cnt)
2567                         *do_chg_action = true;
2568                 for (i = 0; i < p->acts_cnt; i++) {
2569                         bool found_not_match = false;
2570
2571                         for (j = 0; j < e->acts_cnt; j++)
2572                                 if (memcmp(&p->acts[i], &e->acts[j],
2573                                            sizeof(struct ice_flow_action))) {
2574                                         found_not_match = true;
2575                                         break;
2576                                 }
2577
2578                         if (found_not_match) {
2579                                 *do_chg_action = true;
2580                                 break;
2581                         }
2582                 }
2583
2584                 /* (do_chg_action = true) means :
2585                  *    matching_data && priority && !result_action
2586                  * (do_chg_action = false) means :
2587                  *    matching_data && priority && result_action
2588                  */
2589                 break;
2590         }
2591
2592         return return_entry;
2593 }
2594
2595 /**
2596  * ice_flow_acl_convert_to_acl_prior - Convert to ACL priority
2597  * @p: flow priority
2598  */
2599 static enum ice_acl_entry_prior
2600 ice_flow_acl_convert_to_acl_prior(enum ice_flow_priority p)
2601 {
2602         enum ice_acl_entry_prior acl_prior;
2603
2604         switch (p) {
2605         case ICE_FLOW_PRIO_LOW:
2606                 acl_prior = ICE_LOW;
2607                 break;
2608         case ICE_FLOW_PRIO_NORMAL:
2609                 acl_prior = ICE_NORMAL;
2610                 break;
2611         case ICE_FLOW_PRIO_HIGH:
2612                 acl_prior = ICE_HIGH;
2613                 break;
2614         default:
2615                 acl_prior = ICE_NORMAL;
2616                 break;
2617         }
2618
2619         return acl_prior;
2620 }
2621
2622 /**
2623  * ice_flow_acl_union_rng_chk - Perform union operation between two
2624  *                              range-range checker buffers
2625  * @dst_buf: pointer to destination range checker buffer
2626  * @src_buf: pointer to source range checker buffer
2627  *
2628  * For this function, we do the union between dst_buf and src_buf
2629  * range checker buffer, and we will save the result back to dst_buf
2630  */
2631 static enum ice_status
2632 ice_flow_acl_union_rng_chk(struct ice_aqc_acl_profile_ranges *dst_buf,
2633                            struct ice_aqc_acl_profile_ranges *src_buf)
2634 {
2635         u8 i, j;
2636
2637         if (!dst_buf || !src_buf)
2638                 return ICE_ERR_BAD_PTR;
2639
2640         for (i = 0; i < ICE_AQC_ACL_PROF_RANGES_NUM_CFG; i++) {
2641                 struct ice_acl_rng_data *cfg_data = NULL, *in_data;
2642                 bool will_populate = false;
2643
2644                 in_data = &src_buf->checker_cfg[i];
2645
2646                 if (!in_data->mask)
2647                         break;
2648
2649                 for (j = 0; j < ICE_AQC_ACL_PROF_RANGES_NUM_CFG; j++) {
2650                         cfg_data = &dst_buf->checker_cfg[j];
2651
2652                         if (!cfg_data->mask ||
2653                             !memcmp(cfg_data, in_data,
2654                                     sizeof(struct ice_acl_rng_data))) {
2655                                 will_populate = true;
2656                                 break;
2657                         }
2658                 }
2659
2660                 if (will_populate) {
2661                         ice_memcpy(cfg_data, in_data,
2662                                    sizeof(struct ice_acl_rng_data),
2663                                    ICE_NONDMA_TO_NONDMA);
2664                 } else {
2665                         /* No available slot left to program range checker */
2666                         return ICE_ERR_MAX_LIMIT;
2667                 }
2668         }
2669
2670         return ICE_SUCCESS;
2671 }
2672
2673 /**
2674  * ice_flow_acl_add_scen_entry_sync - Add entry to ACL scenario sync
2675  * @hw: pointer to the hardware structure
2676  * @prof: pointer to flow profile
2677  * @entry: double pointer to the flow entry
2678  *
2679  * For this function, we will look at the current added entries in the
2680  * corresponding ACL scenario. Then, we will perform matching logic to
2681  * see if we want to add/modify/do nothing with this new entry.
2682  */
2683 static enum ice_status
2684 ice_flow_acl_add_scen_entry_sync(struct ice_hw *hw, struct ice_flow_prof *prof,
2685                                  struct ice_flow_entry **entry)
2686 {
2687         bool do_add_entry, do_rem_entry, do_chg_action, do_chg_rng_chk;
2688         struct ice_aqc_acl_profile_ranges query_rng_buf, cfg_rng_buf;
2689         struct ice_acl_act_entry *acts = NULL;
2690         struct ice_flow_entry *exist;
2691         enum ice_status status = ICE_SUCCESS;
2692         struct ice_flow_entry *e;
2693         u8 i;
2694
2695         if (!entry || !(*entry) || !prof)
2696                 return ICE_ERR_BAD_PTR;
2697
2698         e = *(entry);
2699
2700         do_chg_rng_chk = false;
2701         if (e->range_buf) {
2702                 u8 prof_id = 0;
2703
2704                 status = ice_flow_get_hw_prof(hw, ICE_BLK_ACL, prof->id,
2705                                               &prof_id);
2706                 if (status)
2707                         return status;
2708
2709                 /* Query the current range-checker value in FW */
2710                 status = ice_query_acl_prof_ranges(hw, prof_id, &query_rng_buf,
2711                                                    NULL);
2712                 if (status)
2713                         return status;
2714                 ice_memcpy(&cfg_rng_buf, &query_rng_buf,
2715                            sizeof(struct ice_aqc_acl_profile_ranges),
2716                            ICE_NONDMA_TO_NONDMA);
2717
2718                 /* Generate the new range-checker value */
2719                 status = ice_flow_acl_union_rng_chk(&cfg_rng_buf, e->range_buf);
2720                 if (status)
2721                         return status;
2722
2723                 /* Reconfigure the range check if the buffer is changed. */
2724                 do_chg_rng_chk = false;
2725                 if (memcmp(&query_rng_buf, &cfg_rng_buf,
2726                            sizeof(struct ice_aqc_acl_profile_ranges))) {
2727                         status = ice_prog_acl_prof_ranges(hw, prof_id,
2728                                                           &cfg_rng_buf, NULL);
2729                         if (status)
2730                                 return status;
2731
2732                         do_chg_rng_chk = true;
2733                 }
2734         }
2735
2736         /* Figure out if we want to (change the ACL action) and/or
2737          * (Add the new ACL entry) and/or (Remove the current ACL entry)
2738          */
2739         exist = ice_flow_acl_find_scen_entry_cond(prof, e, &do_chg_action,
2740                                                   &do_add_entry, &do_rem_entry);
2741
2742         if (do_rem_entry) {
2743                 status = ice_flow_rem_entry_sync(hw, ICE_BLK_ACL, exist);
2744                 if (status)
2745                         return status;
2746         }
2747
2748         /* Prepare the result action buffer */
2749         acts = (struct ice_acl_act_entry *)ice_calloc
2750                 (hw, e->entry_sz, sizeof(struct ice_acl_act_entry));
2751         for (i = 0; i < e->acts_cnt; i++)
2752                 ice_memcpy(&acts[i], &e->acts[i].data.acl_act,
2753                            sizeof(struct ice_acl_act_entry),
2754                            ICE_NONDMA_TO_NONDMA);
2755
2756         if (do_add_entry) {
2757                 enum ice_acl_entry_prior prior;
2758                 u8 *keys, *inverts;
2759                 u16 entry_idx;
2760
2761                 keys = (u8 *)e->entry;
2762                 inverts = keys + (e->entry_sz / 2);
2763                 prior = ice_flow_acl_convert_to_acl_prior(e->priority);
2764
2765                 status = ice_acl_add_entry(hw, prof->cfg.scen, prior, keys,
2766                                            inverts, acts, e->acts_cnt,
2767                                            &entry_idx);
2768                 if (status)
2769                         goto out;
2770
2771                 e->scen_entry_idx = entry_idx;
2772                 LIST_ADD(&e->l_entry, &prof->entries);
2773         } else {
2774                 if (do_chg_action) {
2775                         /* For the action memory info, update the SW's copy of
2776                          * exist entry with e's action memory info
2777                          */
2778                         ice_free(hw, exist->acts);
2779                         exist->acts_cnt = e->acts_cnt;
2780                         exist->acts = (struct ice_flow_action *)
2781                                 ice_calloc(hw, exist->acts_cnt,
2782                                            sizeof(struct ice_flow_action));
2783
2784                         if (!exist->acts) {
2785                                 status = ICE_ERR_NO_MEMORY;
2786                                 goto out;
2787                         }
2788
2789                         ice_memcpy(exist->acts, e->acts,
2790                                    sizeof(struct ice_flow_action) * e->acts_cnt,
2791                                    ICE_NONDMA_TO_NONDMA);
2792
2793                         status = ice_acl_prog_act(hw, prof->cfg.scen, acts,
2794                                                   e->acts_cnt,
2795                                                   exist->scen_entry_idx);
2796                         if (status)
2797                                 goto out;
2798                 }
2799
2800                 if (do_chg_rng_chk) {
2801                         /* In this case, we want to update the range checker
2802                          * information of the exist entry
2803                          */
2804                         status = ice_flow_acl_union_rng_chk(exist->range_buf,
2805                                                             e->range_buf);
2806                         if (status)
2807                                 goto out;
2808                 }
2809
2810                 /* As we don't add the new entry to our SW DB, deallocate its
2811                  * memories, and return the exist entry to the caller
2812                  */
2813                 ice_dealloc_flow_entry(hw, e);
2814                 *(entry) = exist;
2815         }
2816 out:
2817         if (acts)
2818                 ice_free(hw, acts);
2819
2820         return status;
2821 }
2822
2823 /**
2824  * ice_flow_acl_add_scen_entry - Add entry to ACL scenario
2825  * @hw: pointer to the hardware structure
2826  * @prof: pointer to flow profile
2827  * @e: double pointer to the flow entry
2828  */
2829 static enum ice_status
2830 ice_flow_acl_add_scen_entry(struct ice_hw *hw, struct ice_flow_prof *prof,
2831                             struct ice_flow_entry **e)
2832 {
2833         enum ice_status status;
2834
2835         ice_acquire_lock(&prof->entries_lock);
2836         status = ice_flow_acl_add_scen_entry_sync(hw, prof, e);
2837         ice_release_lock(&prof->entries_lock);
2838
2839         return status;
2840 }
2841
2842 /**
2843  * ice_flow_add_entry - Add a flow entry
2844  * @hw: pointer to the HW struct
2845  * @blk: classification stage
2846  * @prof_id: ID of the profile to add a new flow entry to
2847  * @entry_id: unique ID to identify this flow entry
2848  * @vsi_handle: software VSI handle for the flow entry
2849  * @prio: priority of the flow entry
2850  * @data: pointer to a data buffer containing flow entry's match values/masks
2851  * @acts: arrays of actions to be performed on a match
2852  * @acts_cnt: number of actions
2853  * @entry_h: pointer to buffer that receives the new flow entry's handle
2854  */
2855 enum ice_status
2856 ice_flow_add_entry(struct ice_hw *hw, enum ice_block blk, u64 prof_id,
2857                    u64 entry_id, u16 vsi_handle, enum ice_flow_priority prio,
2858                    void *data, struct ice_flow_action *acts, u8 acts_cnt,
2859                    u64 *entry_h)
2860 {
2861         struct ice_flow_entry *e = NULL;
2862         struct ice_flow_prof *prof;
2863         enum ice_status status = ICE_SUCCESS;
2864
2865         /* ACL entries must indicate an action */
2866         if (blk == ICE_BLK_ACL && (!acts || !acts_cnt))
2867                 return ICE_ERR_PARAM;
2868
2869         /* No flow entry data is expected for RSS */
2870         if (!entry_h || (!data && blk != ICE_BLK_RSS))
2871                 return ICE_ERR_BAD_PTR;
2872
2873         if (!ice_is_vsi_valid(hw, vsi_handle))
2874                 return ICE_ERR_PARAM;
2875
2876         ice_acquire_lock(&hw->fl_profs_locks[blk]);
2877
2878         prof = ice_flow_find_prof_id(hw, blk, prof_id);
2879         if (!prof) {
2880                 status = ICE_ERR_DOES_NOT_EXIST;
2881         } else {
2882                 /* Allocate memory for the entry being added and associate
2883                  * the VSI to the found flow profile
2884                  */
2885                 e = (struct ice_flow_entry *)ice_malloc(hw, sizeof(*e));
2886                 if (!e)
2887                         status = ICE_ERR_NO_MEMORY;
2888                 else
2889                         status = ice_flow_assoc_prof(hw, blk, prof, vsi_handle);
2890         }
2891
2892         ice_release_lock(&hw->fl_profs_locks[blk]);
2893         if (status)
2894                 goto out;
2895
2896         e->id = entry_id;
2897         e->vsi_handle = vsi_handle;
2898         e->prof = prof;
2899         e->priority = prio;
2900
2901         switch (blk) {
2902         case ICE_BLK_FD:
2903         case ICE_BLK_RSS:
2904                 break;
2905         case ICE_BLK_ACL:
2906                 /* ACL will handle the entry management */
2907                 status = ice_flow_acl_frmt_entry(hw, prof, e, (u8 *)data, acts,
2908                                                  acts_cnt);
2909                 if (status)
2910                         goto out;
2911
2912                 status = ice_flow_acl_add_scen_entry(hw, prof, &e);
2913                 if (status)
2914                         goto out;
2915
2916                 break;
2917         default:
2918                 status = ICE_ERR_NOT_IMPL;
2919                 goto out;
2920         }
2921
2922         if (blk != ICE_BLK_ACL) {
2923                 /* ACL will handle the entry management */
2924                 ice_acquire_lock(&prof->entries_lock);
2925                 LIST_ADD(&e->l_entry, &prof->entries);
2926                 ice_release_lock(&prof->entries_lock);
2927         }
2928
2929         *entry_h = ICE_FLOW_ENTRY_HNDL(e);
2930
2931 out:
2932         if (status && e) {
2933                 if (e->entry)
2934                         ice_free(hw, e->entry);
2935                 ice_free(hw, e);
2936         }
2937
2938         return status;
2939 }
2940
2941 /**
2942  * ice_flow_rem_entry - Remove a flow entry
2943  * @hw: pointer to the HW struct
2944  * @blk: classification stage
2945  * @entry_h: handle to the flow entry to be removed
2946  */
2947 enum ice_status ice_flow_rem_entry(struct ice_hw *hw, enum ice_block blk,
2948                                    u64 entry_h)
2949 {
2950         struct ice_flow_entry *entry;
2951         struct ice_flow_prof *prof;
2952         enum ice_status status = ICE_SUCCESS;
2953
2954         if (entry_h == ICE_FLOW_ENTRY_HANDLE_INVAL)
2955                 return ICE_ERR_PARAM;
2956
2957         entry = ICE_FLOW_ENTRY_PTR((unsigned long)entry_h);
2958
2959         /* Retain the pointer to the flow profile as the entry will be freed */
2960         prof = entry->prof;
2961
2962         if (prof) {
2963                 ice_acquire_lock(&prof->entries_lock);
2964                 status = ice_flow_rem_entry_sync(hw, blk, entry);
2965                 ice_release_lock(&prof->entries_lock);
2966         }
2967
2968         return status;
2969 }
2970
2971 /**
2972  * ice_flow_set_fld_ext - specifies locations of field from entry's input buffer
2973  * @seg: packet segment the field being set belongs to
2974  * @fld: field to be set
2975  * @field_type: type of the field
2976  * @val_loc: if not ICE_FLOW_FLD_OFF_INVAL, location of the value to match from
2977  *           entry's input buffer
2978  * @mask_loc: if not ICE_FLOW_FLD_OFF_INVAL, location of mask value from entry's
2979  *            input buffer
2980  * @last_loc: if not ICE_FLOW_FLD_OFF_INVAL, location of last/upper value from
2981  *            entry's input buffer
2982  *
2983  * This helper function stores information of a field being matched, including
2984  * the type of the field and the locations of the value to match, the mask, and
2985  * and the upper-bound value in the start of the input buffer for a flow entry.
2986  * This function should only be used for fixed-size data structures.
2987  *
2988  * This function also opportunistically determines the protocol headers to be
2989  * present based on the fields being set. Some fields cannot be used alone to
2990  * determine the protocol headers present. Sometimes, fields for particular
2991  * protocol headers are not matched. In those cases, the protocol headers
2992  * must be explicitly set.
2993  */
2994 static void
2995 ice_flow_set_fld_ext(struct ice_flow_seg_info *seg, enum ice_flow_field fld,
2996                      enum ice_flow_fld_match_type field_type, u16 val_loc,
2997                      u16 mask_loc, u16 last_loc)
2998 {
2999         u64 bit = BIT_ULL(fld);
3000
3001         seg->match |= bit;
3002         if (field_type == ICE_FLOW_FLD_TYPE_RANGE)
3003                 seg->range |= bit;
3004
3005         seg->fields[fld].type = field_type;
3006         seg->fields[fld].src.val = val_loc;
3007         seg->fields[fld].src.mask = mask_loc;
3008         seg->fields[fld].src.last = last_loc;
3009
3010         ICE_FLOW_SET_HDRS(seg, ice_flds_info[fld].hdr);
3011 }
3012
3013 /**
3014  * ice_flow_set_fld - specifies locations of field from entry's input buffer
3015  * @seg: packet segment the field being set belongs to
3016  * @fld: field to be set
3017  * @val_loc: if not ICE_FLOW_FLD_OFF_INVAL, location of the value to match from
3018  *           entry's input buffer
3019  * @mask_loc: if not ICE_FLOW_FLD_OFF_INVAL, location of mask value from entry's
3020  *            input buffer
3021  * @last_loc: if not ICE_FLOW_FLD_OFF_INVAL, location of last/upper value from
3022  *            entry's input buffer
3023  * @range: indicate if field being matched is to be in a range
3024  *
3025  * This function specifies the locations, in the form of byte offsets from the
3026  * start of the input buffer for a flow entry, from where the value to match,
3027  * the mask value, and upper value can be extracted. These locations are then
3028  * stored in the flow profile. When adding a flow entry associated with the
3029  * flow profile, these locations will be used to quickly extract the values and
3030  * create the content of a match entry. This function should only be used for
3031  * fixed-size data structures.
3032  */
3033 void
3034 ice_flow_set_fld(struct ice_flow_seg_info *seg, enum ice_flow_field fld,
3035                  u16 val_loc, u16 mask_loc, u16 last_loc, bool range)
3036 {
3037         enum ice_flow_fld_match_type t = range ?
3038                 ICE_FLOW_FLD_TYPE_RANGE : ICE_FLOW_FLD_TYPE_REG;
3039
3040         ice_flow_set_fld_ext(seg, fld, t, val_loc, mask_loc, last_loc);
3041 }
3042
3043 /**
3044  * ice_flow_set_fld_prefix - sets locations of prefix field from entry's buf
3045  * @seg: packet segment the field being set belongs to
3046  * @fld: field to be set
3047  * @val_loc: if not ICE_FLOW_FLD_OFF_INVAL, location of the value to match from
3048  *           entry's input buffer
3049  * @pref_loc: location of prefix value from entry's input buffer
3050  * @pref_sz: size of the location holding the prefix value
3051  *
3052  * This function specifies the locations, in the form of byte offsets from the
3053  * start of the input buffer for a flow entry, from where the value to match
3054  * and the IPv4 prefix value can be extracted. These locations are then stored
3055  * in the flow profile. When adding flow entries to the associated flow profile,
3056  * these locations can be used to quickly extract the values to create the
3057  * content of a match entry. This function should only be used for fixed-size
3058  * data structures.
3059  */
3060 void
3061 ice_flow_set_fld_prefix(struct ice_flow_seg_info *seg, enum ice_flow_field fld,
3062                         u16 val_loc, u16 pref_loc, u8 pref_sz)
3063 {
3064         /* For this type of field, the "mask" location is for the prefix value's
3065          * location and the "last" location is for the size of the location of
3066          * the prefix value.
3067          */
3068         ice_flow_set_fld_ext(seg, fld, ICE_FLOW_FLD_TYPE_PREFIX, val_loc,
3069                              pref_loc, (u16)pref_sz);
3070 }
3071
3072 /**
3073  * ice_flow_add_fld_raw - sets locations of a raw field from entry's input buf
3074  * @seg: packet segment the field being set belongs to
3075  * @off: offset of the raw field from the beginning of the segment in bytes
3076  * @len: length of the raw pattern to be matched
3077  * @val_loc: location of the value to match from entry's input buffer
3078  * @mask_loc: location of mask value from entry's input buffer
3079  *
3080  * This function specifies the offset of the raw field to be match from the
3081  * beginning of the specified packet segment, and the locations, in the form of
3082  * byte offsets from the start of the input buffer for a flow entry, from where
3083  * the value to match and the mask value to be extracted. These locations are
3084  * then stored in the flow profile. When adding flow entries to the associated
3085  * flow profile, these locations can be used to quickly extract the values to
3086  * create the content of a match entry. This function should only be used for
3087  * fixed-size data structures.
3088  */
3089 void
3090 ice_flow_add_fld_raw(struct ice_flow_seg_info *seg, u16 off, u8 len,
3091                      u16 val_loc, u16 mask_loc)
3092 {
3093         if (seg->raws_cnt < ICE_FLOW_SEG_RAW_FLD_MAX) {
3094                 seg->raws[seg->raws_cnt].off = off;
3095                 seg->raws[seg->raws_cnt].info.type = ICE_FLOW_FLD_TYPE_SIZE;
3096                 seg->raws[seg->raws_cnt].info.src.val = val_loc;
3097                 seg->raws[seg->raws_cnt].info.src.mask = mask_loc;
3098                 /* The "last" field is used to store the length of the field */
3099                 seg->raws[seg->raws_cnt].info.src.last = len;
3100         }
3101
3102         /* Overflows of "raws" will be handled as an error condition later in
3103          * the flow when this information is processed.
3104          */
3105         seg->raws_cnt++;
3106 }
3107
3108 #define ICE_FLOW_RSS_SEG_HDR_L2_MASKS \
3109 (ICE_FLOW_SEG_HDR_ETH | ICE_FLOW_SEG_HDR_VLAN)
3110
3111 #define ICE_FLOW_RSS_SEG_HDR_L3_MASKS \
3112         (ICE_FLOW_SEG_HDR_IPV4 | ICE_FLOW_SEG_HDR_IPV6)
3113
3114 #define ICE_FLOW_RSS_SEG_HDR_L4_MASKS \
3115         (ICE_FLOW_SEG_HDR_TCP | ICE_FLOW_SEG_HDR_UDP | \
3116          ICE_FLOW_SEG_HDR_SCTP)
3117
3118 #define ICE_FLOW_RSS_SEG_HDR_VAL_MASKS \
3119         (ICE_FLOW_RSS_SEG_HDR_L2_MASKS | \
3120          ICE_FLOW_RSS_SEG_HDR_L3_MASKS | \
3121          ICE_FLOW_RSS_SEG_HDR_L4_MASKS)
3122
3123 /**
3124  * ice_flow_set_rss_seg_info - setup packet segments for RSS
3125  * @segs: pointer to the flow field segment(s)
3126  * @hash_fields: fields to be hashed on for the segment(s)
3127  * @flow_hdr: protocol header fields within a packet segment
3128  *
3129  * Helper function to extract fields from hash bitmap and use flow
3130  * header value to set flow field segment for further use in flow
3131  * profile entry or removal.
3132  */
3133 static enum ice_status
3134 ice_flow_set_rss_seg_info(struct ice_flow_seg_info *segs, u64 hash_fields,
3135                           u32 flow_hdr)
3136 {
3137         u64 val = hash_fields;
3138         u8 i;
3139
3140         for (i = 0; val && i < ICE_FLOW_FIELD_IDX_MAX; i++) {
3141                 u64 bit = BIT_ULL(i);
3142
3143                 if (val & bit) {
3144                         ice_flow_set_fld(segs, (enum ice_flow_field)i,
3145                                          ICE_FLOW_FLD_OFF_INVAL,
3146                                          ICE_FLOW_FLD_OFF_INVAL,
3147                                          ICE_FLOW_FLD_OFF_INVAL, false);
3148                         val &= ~bit;
3149                 }
3150         }
3151         ICE_FLOW_SET_HDRS(segs, flow_hdr);
3152
3153         if (segs->hdrs & ~ICE_FLOW_RSS_SEG_HDR_VAL_MASKS &
3154             ~ICE_FLOW_RSS_HDRS_INNER_MASK)
3155                 return ICE_ERR_PARAM;
3156
3157         val = (u64)(segs->hdrs & ICE_FLOW_RSS_SEG_HDR_L3_MASKS);
3158         if (val && !ice_is_pow2(val))
3159                 return ICE_ERR_CFG;
3160
3161         val = (u64)(segs->hdrs & ICE_FLOW_RSS_SEG_HDR_L4_MASKS);
3162         if (val && !ice_is_pow2(val))
3163                 return ICE_ERR_CFG;
3164
3165         return ICE_SUCCESS;
3166 }
3167
3168 /**
3169  * ice_rem_vsi_rss_list - remove VSI from RSS list
3170  * @hw: pointer to the hardware structure
3171  * @vsi_handle: software VSI handle
3172  *
3173  * Remove the VSI from all RSS configurations in the list.
3174  */
3175 void ice_rem_vsi_rss_list(struct ice_hw *hw, u16 vsi_handle)
3176 {
3177         struct ice_rss_cfg *r, *tmp;
3178
3179         if (LIST_EMPTY(&hw->rss_list_head))
3180                 return;
3181
3182         ice_acquire_lock(&hw->rss_locks);
3183         LIST_FOR_EACH_ENTRY_SAFE(r, tmp, &hw->rss_list_head,
3184                                  ice_rss_cfg, l_entry)
3185                 if (ice_test_and_clear_bit(vsi_handle, r->vsis))
3186                         if (!ice_is_any_bit_set(r->vsis, ICE_MAX_VSI)) {
3187                                 LIST_DEL(&r->l_entry);
3188                                 ice_free(hw, r);
3189                         }
3190         ice_release_lock(&hw->rss_locks);
3191 }
3192
3193 /**
3194  * ice_rem_vsi_rss_cfg - remove RSS configurations associated with VSI
3195  * @hw: pointer to the hardware structure
3196  * @vsi_handle: software VSI handle
3197  *
3198  * This function will iterate through all flow profiles and disassociate
3199  * the VSI from that profile. If the flow profile has no VSIs it will
3200  * be removed.
3201  */
3202 enum ice_status ice_rem_vsi_rss_cfg(struct ice_hw *hw, u16 vsi_handle)
3203 {
3204         const enum ice_block blk = ICE_BLK_RSS;
3205         struct ice_flow_prof *p, *t;
3206         enum ice_status status = ICE_SUCCESS;
3207
3208         if (!ice_is_vsi_valid(hw, vsi_handle))
3209                 return ICE_ERR_PARAM;
3210
3211         if (LIST_EMPTY(&hw->fl_profs[blk]))
3212                 return ICE_SUCCESS;
3213
3214         ice_acquire_lock(&hw->fl_profs_locks[blk]);
3215         LIST_FOR_EACH_ENTRY_SAFE(p, t, &hw->fl_profs[blk], ice_flow_prof,
3216                                  l_entry)
3217                 if (ice_is_bit_set(p->vsis, vsi_handle)) {
3218                         status = ice_flow_disassoc_prof(hw, blk, p, vsi_handle);
3219                         if (status)
3220                                 break;
3221
3222                         if (!ice_is_any_bit_set(p->vsis, ICE_MAX_VSI)) {
3223                                 status = ice_flow_rem_prof_sync(hw, blk, p);
3224                                 if (status)
3225                                         break;
3226                         }
3227                 }
3228         ice_release_lock(&hw->fl_profs_locks[blk]);
3229
3230         return status;
3231 }
3232
3233 /**
3234  * ice_rem_rss_list - remove RSS configuration from list
3235  * @hw: pointer to the hardware structure
3236  * @vsi_handle: software VSI handle
3237  * @prof: pointer to flow profile
3238  *
3239  * Assumption: lock has already been acquired for RSS list
3240  */
3241 static void
3242 ice_rem_rss_list(struct ice_hw *hw, u16 vsi_handle, struct ice_flow_prof *prof)
3243 {
3244         struct ice_rss_cfg *r, *tmp;
3245
3246         /* Search for RSS hash fields associated to the VSI that match the
3247          * hash configurations associated to the flow profile. If found
3248          * remove from the RSS entry list of the VSI context and delete entry.
3249          */
3250         LIST_FOR_EACH_ENTRY_SAFE(r, tmp, &hw->rss_list_head,
3251                                  ice_rss_cfg, l_entry)
3252                 if (r->hashed_flds == prof->segs[prof->segs_cnt - 1].match &&
3253                     r->packet_hdr == prof->segs[prof->segs_cnt - 1].hdrs) {
3254                         ice_clear_bit(vsi_handle, r->vsis);
3255                         if (!ice_is_any_bit_set(r->vsis, ICE_MAX_VSI)) {
3256                                 LIST_DEL(&r->l_entry);
3257                                 ice_free(hw, r);
3258                         }
3259                         return;
3260                 }
3261 }
3262
3263 /**
3264  * ice_add_rss_list - add RSS configuration to list
3265  * @hw: pointer to the hardware structure
3266  * @vsi_handle: software VSI handle
3267  * @prof: pointer to flow profile
3268  *
3269  * Assumption: lock has already been acquired for RSS list
3270  */
3271 static enum ice_status
3272 ice_add_rss_list(struct ice_hw *hw, u16 vsi_handle, struct ice_flow_prof *prof)
3273 {
3274         struct ice_rss_cfg *r, *rss_cfg;
3275
3276         LIST_FOR_EACH_ENTRY(r, &hw->rss_list_head,
3277                             ice_rss_cfg, l_entry)
3278                 if (r->hashed_flds == prof->segs[prof->segs_cnt - 1].match &&
3279                     r->packet_hdr == prof->segs[prof->segs_cnt - 1].hdrs) {
3280                         ice_set_bit(vsi_handle, r->vsis);
3281                         return ICE_SUCCESS;
3282                 }
3283
3284         rss_cfg = (struct ice_rss_cfg *)ice_malloc(hw, sizeof(*rss_cfg));
3285         if (!rss_cfg)
3286                 return ICE_ERR_NO_MEMORY;
3287
3288         rss_cfg->hashed_flds = prof->segs[prof->segs_cnt - 1].match;
3289         rss_cfg->packet_hdr = prof->segs[prof->segs_cnt - 1].hdrs;
3290         rss_cfg->symm = prof->cfg.symm;
3291         ice_set_bit(vsi_handle, rss_cfg->vsis);
3292
3293         LIST_ADD_TAIL(&rss_cfg->l_entry, &hw->rss_list_head);
3294
3295         return ICE_SUCCESS;
3296 }
3297
3298 #define ICE_FLOW_PROF_HASH_S    0
3299 #define ICE_FLOW_PROF_HASH_M    (0xFFFFFFFFULL << ICE_FLOW_PROF_HASH_S)
3300 #define ICE_FLOW_PROF_HDR_S     32
3301 #define ICE_FLOW_PROF_HDR_M     (0x3FFFFFFFULL << ICE_FLOW_PROF_HDR_S)
3302 #define ICE_FLOW_PROF_ENCAP_S   63
3303 #define ICE_FLOW_PROF_ENCAP_M   (BIT_ULL(ICE_FLOW_PROF_ENCAP_S))
3304
3305 #define ICE_RSS_OUTER_HEADERS   1
3306 #define ICE_RSS_INNER_HEADERS   2
3307
3308 /* Flow profile ID format:
3309  * [0:31] - Packet match fields
3310  * [32:62] - Protocol header
3311  * [63] - Encapsulation flag, 0 if non-tunneled, 1 if tunneled
3312  */
3313 #define ICE_FLOW_GEN_PROFID(hash, hdr, segs_cnt) \
3314         (u64)(((u64)(hash) & ICE_FLOW_PROF_HASH_M) | \
3315               (((u64)(hdr) << ICE_FLOW_PROF_HDR_S) & ICE_FLOW_PROF_HDR_M) | \
3316               ((u8)((segs_cnt) - 1) ? ICE_FLOW_PROF_ENCAP_M : 0))
3317
3318 static void
3319 ice_rss_config_xor_word(struct ice_hw *hw, u8 prof_id, u8 src, u8 dst)
3320 {
3321         u32 s = ((src % 4) << 3); /* byte shift */
3322         u32 v = dst | 0x80; /* value to program */
3323         u8 i = src / 4; /* register index */
3324         u32 reg;
3325
3326         reg = rd32(hw, GLQF_HSYMM(prof_id, i));
3327         reg = (reg & ~(0xff << s)) | (v << s);
3328         wr32(hw, GLQF_HSYMM(prof_id, i), reg);
3329 }
3330
3331 static void
3332 ice_rss_config_xor(struct ice_hw *hw, u8 prof_id, u8 src, u8 dst, u8 len)
3333 {
3334         int fv_last_word =
3335                 ICE_FLOW_SW_FIELD_VECTOR_MAX / ICE_FLOW_FV_EXTRACT_SZ - 1;
3336         int i;
3337
3338         for (i = 0; i < len; i++) {
3339                 ice_rss_config_xor_word(hw, prof_id,
3340                                         /* Yes, field vector in GLQF_HSYMM and
3341                                          * GLQF_HINSET is inversed!
3342                                          */
3343                                         fv_last_word - (src + i),
3344                                         fv_last_word - (dst + i));
3345                 ice_rss_config_xor_word(hw, prof_id,
3346                                         fv_last_word - (dst + i),
3347                                         fv_last_word - (src + i));
3348         }
3349 }
3350
3351 static void
3352 ice_rss_update_symm(struct ice_hw *hw,
3353                     struct ice_flow_prof *prof)
3354 {
3355         struct ice_prof_map *map;
3356         u8 prof_id, m;
3357
3358         map = ice_search_prof_id(hw, ICE_BLK_RSS, prof->id);
3359         prof_id = map->prof_id;
3360
3361         /* clear to default */
3362         for (m = 0; m < 6; m++)
3363                 wr32(hw, GLQF_HSYMM(prof_id, m), 0);
3364         if (prof->cfg.symm) {
3365                 struct ice_flow_seg_info *seg =
3366                         &prof->segs[prof->segs_cnt - 1];
3367
3368                 struct ice_flow_seg_xtrct *ipv4_src =
3369                         &seg->fields[ICE_FLOW_FIELD_IDX_IPV4_SA].xtrct;
3370                 struct ice_flow_seg_xtrct *ipv4_dst =
3371                         &seg->fields[ICE_FLOW_FIELD_IDX_IPV4_DA].xtrct;
3372                 struct ice_flow_seg_xtrct *ipv6_src =
3373                         &seg->fields[ICE_FLOW_FIELD_IDX_IPV6_SA].xtrct;
3374                 struct ice_flow_seg_xtrct *ipv6_dst =
3375                         &seg->fields[ICE_FLOW_FIELD_IDX_IPV6_DA].xtrct;
3376
3377                 struct ice_flow_seg_xtrct *tcp_src =
3378                         &seg->fields[ICE_FLOW_FIELD_IDX_TCP_SRC_PORT].xtrct;
3379                 struct ice_flow_seg_xtrct *tcp_dst =
3380                         &seg->fields[ICE_FLOW_FIELD_IDX_TCP_DST_PORT].xtrct;
3381
3382                 struct ice_flow_seg_xtrct *udp_src =
3383                         &seg->fields[ICE_FLOW_FIELD_IDX_UDP_SRC_PORT].xtrct;
3384                 struct ice_flow_seg_xtrct *udp_dst =
3385                         &seg->fields[ICE_FLOW_FIELD_IDX_UDP_DST_PORT].xtrct;
3386
3387                 struct ice_flow_seg_xtrct *sctp_src =
3388                         &seg->fields[ICE_FLOW_FIELD_IDX_SCTP_SRC_PORT].xtrct;
3389                 struct ice_flow_seg_xtrct *sctp_dst =
3390                         &seg->fields[ICE_FLOW_FIELD_IDX_SCTP_DST_PORT].xtrct;
3391
3392                 /* xor IPv4 */
3393                 if (ipv4_src->prot_id != 0 && ipv4_dst->prot_id != 0)
3394                         ice_rss_config_xor(hw, prof_id,
3395                                            ipv4_src->idx, ipv4_dst->idx, 2);
3396
3397                 /* xor IPv6 */
3398                 if (ipv6_src->prot_id != 0 && ipv6_dst->prot_id != 0)
3399                         ice_rss_config_xor(hw, prof_id,
3400                                            ipv6_src->idx, ipv6_dst->idx, 8);
3401
3402                 /* xor TCP */
3403                 if (tcp_src->prot_id != 0 && tcp_dst->prot_id != 0)
3404                         ice_rss_config_xor(hw, prof_id,
3405                                            tcp_src->idx, tcp_dst->idx, 1);
3406
3407                 /* xor UDP */
3408                 if (udp_src->prot_id != 0 && udp_dst->prot_id != 0)
3409                         ice_rss_config_xor(hw, prof_id,
3410                                            udp_src->idx, udp_dst->idx, 1);
3411
3412                 /* xor SCTP */
3413                 if (sctp_src->prot_id != 0 && sctp_dst->prot_id != 0)
3414                         ice_rss_config_xor(hw, prof_id,
3415                                            sctp_src->idx, sctp_dst->idx, 1);
3416         }
3417 }
3418
3419 /**
3420  * ice_add_rss_cfg_sync - add an RSS configuration
3421  * @hw: pointer to the hardware structure
3422  * @vsi_handle: software VSI handle
3423  * @hashed_flds: hash bit fields (ICE_FLOW_HASH_*) to configure
3424  * @addl_hdrs: protocol header fields
3425  * @segs_cnt: packet segment count
3426  * @symm: symmetric hash enable/disable
3427  *
3428  * Assumption: lock has already been acquired for RSS list
3429  */
3430 static enum ice_status
3431 ice_add_rss_cfg_sync(struct ice_hw *hw, u16 vsi_handle, u64 hashed_flds,
3432                      u32 addl_hdrs, u8 segs_cnt, bool symm)
3433 {
3434         const enum ice_block blk = ICE_BLK_RSS;
3435         struct ice_flow_prof *prof = NULL;
3436         struct ice_flow_seg_info *segs;
3437         enum ice_status status;
3438
3439         if (!segs_cnt || segs_cnt > ICE_FLOW_SEG_MAX)
3440                 return ICE_ERR_PARAM;
3441
3442         segs = (struct ice_flow_seg_info *)ice_calloc(hw, segs_cnt,
3443                                                       sizeof(*segs));
3444         if (!segs)
3445                 return ICE_ERR_NO_MEMORY;
3446
3447         /* Construct the packet segment info from the hashed fields */
3448         status = ice_flow_set_rss_seg_info(&segs[segs_cnt - 1], hashed_flds,
3449                                            addl_hdrs);
3450         if (status)
3451                 goto exit;
3452
3453         /* Search for a flow profile that has matching headers, hash fields
3454          * and has the input VSI associated to it. If found, no further
3455          * operations required and exit.
3456          */
3457         prof = ice_flow_find_prof_conds(hw, blk, ICE_FLOW_RX, segs, segs_cnt,
3458                                         vsi_handle,
3459                                         ICE_FLOW_FIND_PROF_CHK_FLDS |
3460                                         ICE_FLOW_FIND_PROF_CHK_VSI);
3461         if (prof) {
3462                 if (prof->cfg.symm == symm)
3463                         goto exit;
3464                 prof->cfg.symm = symm;
3465                 goto update_symm;
3466         }
3467
3468         /* Check if a flow profile exists with the same protocol headers and
3469          * associated with the input VSI. If so disassociate the VSI from
3470          * this profile. The VSI will be added to a new profile created with
3471          * the protocol header and new hash field configuration.
3472          */
3473         prof = ice_flow_find_prof_conds(hw, blk, ICE_FLOW_RX, segs, segs_cnt,
3474                                         vsi_handle, ICE_FLOW_FIND_PROF_CHK_VSI);
3475         if (prof) {
3476                 status = ice_flow_disassoc_prof(hw, blk, prof, vsi_handle);
3477                 if (!status)
3478                         ice_rem_rss_list(hw, vsi_handle, prof);
3479                 else
3480                         goto exit;
3481
3482                 /* Remove profile if it has no VSIs associated */
3483                 if (!ice_is_any_bit_set(prof->vsis, ICE_MAX_VSI)) {
3484                         status = ice_flow_rem_prof(hw, blk, prof->id);
3485                         if (status)
3486                                 goto exit;
3487                 }
3488         }
3489
3490         /* Search for a profile that has same match fields only. If this
3491          * exists then associate the VSI to this profile.
3492          */
3493         prof = ice_flow_find_prof_conds(hw, blk, ICE_FLOW_RX, segs, segs_cnt,
3494                                         vsi_handle,
3495                                         ICE_FLOW_FIND_PROF_CHK_FLDS);
3496         if (prof) {
3497                 if (prof->cfg.symm == symm) {
3498                         status = ice_flow_assoc_prof(hw, blk, prof,
3499                                                      vsi_handle);
3500                         if (!status)
3501                                 status = ice_add_rss_list(hw, vsi_handle,
3502                                                           prof);
3503                 } else {
3504                         /* if a profile exist but with different symmetric
3505                          * requirement, just return error.
3506                          */
3507                         status = ICE_ERR_NOT_SUPPORTED;
3508                 }
3509                 goto exit;
3510         }
3511
3512         /* Create a new flow profile with generated profile and packet
3513          * segment information.
3514          */
3515         status = ice_flow_add_prof(hw, blk, ICE_FLOW_RX,
3516                                    ICE_FLOW_GEN_PROFID(hashed_flds,
3517                                                        segs[segs_cnt - 1].hdrs,
3518                                                        segs_cnt),
3519                                    segs, segs_cnt, NULL, 0, &prof);
3520         if (status)
3521                 goto exit;
3522
3523         status = ice_flow_assoc_prof(hw, blk, prof, vsi_handle);
3524         /* If association to a new flow profile failed then this profile can
3525          * be removed.
3526          */
3527         if (status) {
3528                 ice_flow_rem_prof(hw, blk, prof->id);
3529                 goto exit;
3530         }
3531
3532         status = ice_add_rss_list(hw, vsi_handle, prof);
3533
3534         prof->cfg.symm = symm;
3535
3536 update_symm:
3537         ice_rss_update_symm(hw, prof);
3538
3539 exit:
3540         ice_free(hw, segs);
3541         return status;
3542 }
3543
3544 /**
3545  * ice_add_rss_cfg - add an RSS configuration with specified hashed fields
3546  * @hw: pointer to the hardware structure
3547  * @vsi_handle: software VSI handle
3548  * @hashed_flds: hash bit fields (ICE_FLOW_HASH_*) to configure
3549  * @addl_hdrs: protocol header fields
3550  * @symm: symmetric hash enable/disable
3551  *
3552  * This function will generate a flow profile based on fields associated with
3553  * the input fields to hash on, the flow type and use the VSI number to add
3554  * a flow entry to the profile.
3555  */
3556 enum ice_status
3557 ice_add_rss_cfg(struct ice_hw *hw, u16 vsi_handle, u64 hashed_flds,
3558                 u32 addl_hdrs, bool symm)
3559 {
3560         enum ice_status status;
3561
3562         if (hashed_flds == ICE_HASH_INVALID ||
3563             !ice_is_vsi_valid(hw, vsi_handle))
3564                 return ICE_ERR_PARAM;
3565
3566         ice_acquire_lock(&hw->rss_locks);
3567         status = ice_add_rss_cfg_sync(hw, vsi_handle, hashed_flds, addl_hdrs,
3568                                       ICE_RSS_OUTER_HEADERS, symm);
3569         if (!status)
3570                 status = ice_add_rss_cfg_sync(hw, vsi_handle, hashed_flds,
3571                                               addl_hdrs, ICE_RSS_INNER_HEADERS,
3572                                               symm);
3573         ice_release_lock(&hw->rss_locks);
3574
3575         return status;
3576 }
3577
3578 /**
3579  * ice_rem_rss_cfg_sync - remove an existing RSS configuration
3580  * @hw: pointer to the hardware structure
3581  * @vsi_handle: software VSI handle
3582  * @hashed_flds: Packet hash types (ICE_FLOW_HASH_*) to remove
3583  * @addl_hdrs: Protocol header fields within a packet segment
3584  * @segs_cnt: packet segment count
3585  *
3586  * Assumption: lock has already been acquired for RSS list
3587  */
3588 static enum ice_status
3589 ice_rem_rss_cfg_sync(struct ice_hw *hw, u16 vsi_handle, u64 hashed_flds,
3590                      u32 addl_hdrs, u8 segs_cnt)
3591 {
3592         const enum ice_block blk = ICE_BLK_RSS;
3593         struct ice_flow_seg_info *segs;
3594         struct ice_flow_prof *prof;
3595         enum ice_status status;
3596
3597         segs = (struct ice_flow_seg_info *)ice_calloc(hw, segs_cnt,
3598                                                       sizeof(*segs));
3599         if (!segs)
3600                 return ICE_ERR_NO_MEMORY;
3601
3602         /* Construct the packet segment info from the hashed fields */
3603         status = ice_flow_set_rss_seg_info(&segs[segs_cnt - 1], hashed_flds,
3604                                            addl_hdrs);
3605         if (status)
3606                 goto out;
3607
3608         prof = ice_flow_find_prof_conds(hw, blk, ICE_FLOW_RX, segs, segs_cnt,
3609                                         vsi_handle,
3610                                         ICE_FLOW_FIND_PROF_CHK_FLDS);
3611         if (!prof) {
3612                 status = ICE_ERR_DOES_NOT_EXIST;
3613                 goto out;
3614         }
3615
3616         status = ice_flow_disassoc_prof(hw, blk, prof, vsi_handle);
3617         if (status)
3618                 goto out;
3619
3620         /* Remove RSS configuration from VSI context before deleting
3621          * the flow profile.
3622          */
3623         ice_rem_rss_list(hw, vsi_handle, prof);
3624
3625         if (!ice_is_any_bit_set(prof->vsis, ICE_MAX_VSI))
3626                 status = ice_flow_rem_prof(hw, blk, prof->id);
3627
3628 out:
3629         ice_free(hw, segs);
3630         return status;
3631 }
3632
3633 /**
3634  * ice_rem_rss_cfg - remove an existing RSS config with matching hashed fields
3635  * @hw: pointer to the hardware structure
3636  * @vsi_handle: software VSI handle
3637  * @hashed_flds: Packet hash types (ICE_FLOW_HASH_*) to remove
3638  * @addl_hdrs: Protocol header fields within a packet segment
3639  *
3640  * This function will lookup the flow profile based on the input
3641  * hash field bitmap, iterate through the profile entry list of
3642  * that profile and find entry associated with input VSI to be
3643  * removed. Calls are made to underlying flow apis which will in
3644  * turn build or update buffers for RSS XLT1 section.
3645  */
3646 enum ice_status
3647 ice_rem_rss_cfg(struct ice_hw *hw, u16 vsi_handle, u64 hashed_flds,
3648                 u32 addl_hdrs)
3649 {
3650         enum ice_status status;
3651
3652         if (hashed_flds == ICE_HASH_INVALID ||
3653             !ice_is_vsi_valid(hw, vsi_handle))
3654                 return ICE_ERR_PARAM;
3655
3656         ice_acquire_lock(&hw->rss_locks);
3657         status = ice_rem_rss_cfg_sync(hw, vsi_handle, hashed_flds, addl_hdrs,
3658                                       ICE_RSS_OUTER_HEADERS);
3659         if (!status)
3660                 status = ice_rem_rss_cfg_sync(hw, vsi_handle, hashed_flds,
3661                                               addl_hdrs, ICE_RSS_INNER_HEADERS);
3662         ice_release_lock(&hw->rss_locks);
3663
3664         return status;
3665 }
3666
3667 /**
3668  * ice_replay_rss_cfg - replay RSS configurations associated with VSI
3669  * @hw: pointer to the hardware structure
3670  * @vsi_handle: software VSI handle
3671  */
3672 enum ice_status ice_replay_rss_cfg(struct ice_hw *hw, u16 vsi_handle)
3673 {
3674         enum ice_status status = ICE_SUCCESS;
3675         struct ice_rss_cfg *r;
3676
3677         if (!ice_is_vsi_valid(hw, vsi_handle))
3678                 return ICE_ERR_PARAM;
3679
3680         ice_acquire_lock(&hw->rss_locks);
3681         LIST_FOR_EACH_ENTRY(r, &hw->rss_list_head,
3682                             ice_rss_cfg, l_entry) {
3683                 if (ice_is_bit_set(r->vsis, vsi_handle)) {
3684                         status = ice_add_rss_cfg_sync(hw, vsi_handle,
3685                                                       r->hashed_flds,
3686                                                       r->packet_hdr,
3687                                                       ICE_RSS_OUTER_HEADERS,
3688                                                       r->symm);
3689                         if (status)
3690                                 break;
3691                         status = ice_add_rss_cfg_sync(hw, vsi_handle,
3692                                                       r->hashed_flds,
3693                                                       r->packet_hdr,
3694                                                       ICE_RSS_INNER_HEADERS,
3695                                                       r->symm);
3696                         if (status)
3697                                 break;
3698                 }
3699         }
3700         ice_release_lock(&hw->rss_locks);
3701
3702         return status;
3703 }
3704
3705 /**
3706  * ice_get_rss_cfg - returns hashed fields for the given header types
3707  * @hw: pointer to the hardware structure
3708  * @vsi_handle: software VSI handle
3709  * @hdrs: protocol header type
3710  *
3711  * This function will return the match fields of the first instance of flow
3712  * profile having the given header types and containing input VSI
3713  */
3714 u64 ice_get_rss_cfg(struct ice_hw *hw, u16 vsi_handle, u32 hdrs)
3715 {
3716         struct ice_rss_cfg *r, *rss_cfg = NULL;
3717
3718         /* verify if the protocol header is non zero and VSI is valid */
3719         if (hdrs == ICE_FLOW_SEG_HDR_NONE || !ice_is_vsi_valid(hw, vsi_handle))
3720                 return ICE_HASH_INVALID;
3721
3722         ice_acquire_lock(&hw->rss_locks);
3723         LIST_FOR_EACH_ENTRY(r, &hw->rss_list_head,
3724                             ice_rss_cfg, l_entry)
3725                 if (ice_is_bit_set(r->vsis, vsi_handle) &&
3726                     r->packet_hdr == hdrs) {
3727                         rss_cfg = r;
3728                         break;
3729                 }
3730         ice_release_lock(&hw->rss_locks);
3731
3732         return rss_cfg ? rss_cfg->hashed_flds : ICE_HASH_INVALID;
3733 }