fa0b7f54b8b96bbe4b2e56545316608de31f0eed
[dpdk.git] / drivers / net / ice / base / ice_flow.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2001-2020 Intel Corporation
3  */
4
5 #include "ice_common.h"
6 #include "ice_flow.h"
7
8 /* Size of known protocol header fields */
9 #define ICE_FLOW_FLD_SZ_ETH_TYPE        2
10 #define ICE_FLOW_FLD_SZ_VLAN            2
11 #define ICE_FLOW_FLD_SZ_IPV4_ADDR       4
12 #define ICE_FLOW_FLD_SZ_IPV6_ADDR       16
13 #define ICE_FLOW_FLD_SZ_IPV6_PRE32_ADDR 4
14 #define ICE_FLOW_FLD_SZ_IPV6_PRE48_ADDR 6
15 #define ICE_FLOW_FLD_SZ_IPV6_PRE64_ADDR 8
16 #define ICE_FLOW_FLD_SZ_IP_DSCP         1
17 #define ICE_FLOW_FLD_SZ_IP_TTL          1
18 #define ICE_FLOW_FLD_SZ_IP_PROT         1
19 #define ICE_FLOW_FLD_SZ_PORT            2
20 #define ICE_FLOW_FLD_SZ_TCP_FLAGS       1
21 #define ICE_FLOW_FLD_SZ_ICMP_TYPE       1
22 #define ICE_FLOW_FLD_SZ_ICMP_CODE       1
23 #define ICE_FLOW_FLD_SZ_ARP_OPER        2
24 #define ICE_FLOW_FLD_SZ_GRE_KEYID       4
25 #define ICE_FLOW_FLD_SZ_GTP_TEID        4
26 #define ICE_FLOW_FLD_SZ_GTP_QFI         2
27 #define ICE_FLOW_FLD_SZ_PPPOE_SESS_ID   2
28 #define ICE_FLOW_FLD_SZ_PFCP_SEID 8
29 #define ICE_FLOW_FLD_SZ_L2TPV3_SESS_ID  4
30 #define ICE_FLOW_FLD_SZ_ESP_SPI 4
31 #define ICE_FLOW_FLD_SZ_AH_SPI  4
32 #define ICE_FLOW_FLD_SZ_NAT_T_ESP_SPI   4
33
34 /* Describe properties of a protocol header field */
35 struct ice_flow_field_info {
36         enum ice_flow_seg_hdr hdr;
37         s16 off;        /* Offset from start of a protocol header, in bits */
38         u16 size;       /* Size of fields in bits */
39         u16 mask;       /* 16-bit mask for field */
40 };
41
42 #define ICE_FLOW_FLD_INFO(_hdr, _offset_bytes, _size_bytes) { \
43         .hdr = _hdr, \
44         .off = (_offset_bytes) * BITS_PER_BYTE, \
45         .size = (_size_bytes) * BITS_PER_BYTE, \
46         .mask = 0, \
47 }
48
49 #define ICE_FLOW_FLD_INFO_MSK(_hdr, _offset_bytes, _size_bytes, _mask) { \
50         .hdr = _hdr, \
51         .off = (_offset_bytes) * BITS_PER_BYTE, \
52         .size = (_size_bytes) * BITS_PER_BYTE, \
53         .mask = _mask, \
54 }
55
56 /* Table containing properties of supported protocol header fields */
57 static const
58 struct ice_flow_field_info ice_flds_info[ICE_FLOW_FIELD_IDX_MAX] = {
59         /* Ether */
60         /* ICE_FLOW_FIELD_IDX_ETH_DA */
61         ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_ETH, 0, ETH_ALEN),
62         /* ICE_FLOW_FIELD_IDX_ETH_SA */
63         ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_ETH, ETH_ALEN, ETH_ALEN),
64         /* ICE_FLOW_FIELD_IDX_S_VLAN */
65         ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_VLAN, 12, ICE_FLOW_FLD_SZ_VLAN),
66         /* ICE_FLOW_FIELD_IDX_C_VLAN */
67         ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_VLAN, 14, ICE_FLOW_FLD_SZ_VLAN),
68         /* ICE_FLOW_FIELD_IDX_ETH_TYPE */
69         ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_ETH, 0, ICE_FLOW_FLD_SZ_ETH_TYPE),
70         /* IPv4 / IPv6 */
71         /* ICE_FLOW_FIELD_IDX_IPV4_DSCP */
72         ICE_FLOW_FLD_INFO_MSK(ICE_FLOW_SEG_HDR_IPV4, 0, ICE_FLOW_FLD_SZ_IP_DSCP,
73                               0x00fc),
74         /* ICE_FLOW_FIELD_IDX_IPV6_DSCP */
75         ICE_FLOW_FLD_INFO_MSK(ICE_FLOW_SEG_HDR_IPV6, 0, ICE_FLOW_FLD_SZ_IP_DSCP,
76                               0x0ff0),
77         /* ICE_FLOW_FIELD_IDX_IPV4_TTL */
78         ICE_FLOW_FLD_INFO_MSK(ICE_FLOW_SEG_HDR_NONE, 8,
79                               ICE_FLOW_FLD_SZ_IP_TTL, 0xff00),
80         /* ICE_FLOW_FIELD_IDX_IPV4_PROT */
81         ICE_FLOW_FLD_INFO_MSK(ICE_FLOW_SEG_HDR_NONE, 8,
82                               ICE_FLOW_FLD_SZ_IP_PROT, 0x00ff),
83         /* ICE_FLOW_FIELD_IDX_IPV6_TTL */
84         ICE_FLOW_FLD_INFO_MSK(ICE_FLOW_SEG_HDR_NONE, 6,
85                               ICE_FLOW_FLD_SZ_IP_TTL, 0x00ff),
86         /* ICE_FLOW_FIELD_IDX_IPV6_PROT */
87         ICE_FLOW_FLD_INFO_MSK(ICE_FLOW_SEG_HDR_NONE, 6,
88                               ICE_FLOW_FLD_SZ_IP_PROT, 0xff00),
89         /* ICE_FLOW_FIELD_IDX_IPV4_SA */
90         ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_IPV4, 12, ICE_FLOW_FLD_SZ_IPV4_ADDR),
91         /* ICE_FLOW_FIELD_IDX_IPV4_DA */
92         ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_IPV4, 16, ICE_FLOW_FLD_SZ_IPV4_ADDR),
93         /* ICE_FLOW_FIELD_IDX_IPV6_SA */
94         ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_IPV6, 8, ICE_FLOW_FLD_SZ_IPV6_ADDR),
95         /* ICE_FLOW_FIELD_IDX_IPV6_DA */
96         ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_IPV6, 24, ICE_FLOW_FLD_SZ_IPV6_ADDR),
97         /* ICE_FLOW_FIELD_IDX_IPV6_PRE32_SA */
98         ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_IPV6, 8,
99                           ICE_FLOW_FLD_SZ_IPV6_PRE32_ADDR),
100         /* ICE_FLOW_FIELD_IDX_IPV6_PRE32_DA */
101         ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_IPV6, 24,
102                           ICE_FLOW_FLD_SZ_IPV6_PRE32_ADDR),
103         /* ICE_FLOW_FIELD_IDX_IPV6_PRE48_SA */
104         ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_IPV6, 8,
105                           ICE_FLOW_FLD_SZ_IPV6_PRE48_ADDR),
106         /* ICE_FLOW_FIELD_IDX_IPV6_PRE48_DA */
107         ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_IPV6, 24,
108                           ICE_FLOW_FLD_SZ_IPV6_PRE48_ADDR),
109         /* ICE_FLOW_FIELD_IDX_IPV6_PRE64_SA */
110         ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_IPV6, 8,
111                           ICE_FLOW_FLD_SZ_IPV6_PRE64_ADDR),
112         /* ICE_FLOW_FIELD_IDX_IPV6_PRE64_DA */
113         ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_IPV6, 24,
114                           ICE_FLOW_FLD_SZ_IPV6_PRE64_ADDR),
115         /* Transport */
116         /* ICE_FLOW_FIELD_IDX_TCP_SRC_PORT */
117         ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_TCP, 0, ICE_FLOW_FLD_SZ_PORT),
118         /* ICE_FLOW_FIELD_IDX_TCP_DST_PORT */
119         ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_TCP, 2, ICE_FLOW_FLD_SZ_PORT),
120         /* ICE_FLOW_FIELD_IDX_UDP_SRC_PORT */
121         ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_UDP, 0, ICE_FLOW_FLD_SZ_PORT),
122         /* ICE_FLOW_FIELD_IDX_UDP_DST_PORT */
123         ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_UDP, 2, ICE_FLOW_FLD_SZ_PORT),
124         /* ICE_FLOW_FIELD_IDX_SCTP_SRC_PORT */
125         ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_SCTP, 0, ICE_FLOW_FLD_SZ_PORT),
126         /* ICE_FLOW_FIELD_IDX_SCTP_DST_PORT */
127         ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_SCTP, 2, ICE_FLOW_FLD_SZ_PORT),
128         /* ICE_FLOW_FIELD_IDX_TCP_FLAGS */
129         ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_TCP, 13, ICE_FLOW_FLD_SZ_TCP_FLAGS),
130         /* ARP */
131         /* ICE_FLOW_FIELD_IDX_ARP_SIP */
132         ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_ARP, 14, ICE_FLOW_FLD_SZ_IPV4_ADDR),
133         /* ICE_FLOW_FIELD_IDX_ARP_DIP */
134         ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_ARP, 24, ICE_FLOW_FLD_SZ_IPV4_ADDR),
135         /* ICE_FLOW_FIELD_IDX_ARP_SHA */
136         ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_ARP, 8, ETH_ALEN),
137         /* ICE_FLOW_FIELD_IDX_ARP_DHA */
138         ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_ARP, 18, ETH_ALEN),
139         /* ICE_FLOW_FIELD_IDX_ARP_OP */
140         ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_ARP, 6, ICE_FLOW_FLD_SZ_ARP_OPER),
141         /* ICMP */
142         /* ICE_FLOW_FIELD_IDX_ICMP_TYPE */
143         ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_ICMP, 0, ICE_FLOW_FLD_SZ_ICMP_TYPE),
144         /* ICE_FLOW_FIELD_IDX_ICMP_CODE */
145         ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_ICMP, 1, ICE_FLOW_FLD_SZ_ICMP_CODE),
146         /* GRE */
147         /* ICE_FLOW_FIELD_IDX_GRE_KEYID */
148         ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_GRE, 12, ICE_FLOW_FLD_SZ_GRE_KEYID),
149         /* GTP */
150         /* ICE_FLOW_FIELD_IDX_GTPC_TEID */
151         ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_GTPC_TEID, 12,
152                           ICE_FLOW_FLD_SZ_GTP_TEID),
153         /* ICE_FLOW_FIELD_IDX_GTPU_IP_TEID */
154         ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_GTPU_IP, 12,
155                           ICE_FLOW_FLD_SZ_GTP_TEID),
156         /* ICE_FLOW_FIELD_IDX_GTPU_EH_TEID */
157         ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_GTPU_EH, 12,
158                           ICE_FLOW_FLD_SZ_GTP_TEID),
159         /* ICE_FLOW_FIELD_IDX_GTPU_EH_QFI */
160         ICE_FLOW_FLD_INFO_MSK(ICE_FLOW_SEG_HDR_GTPU_EH, 22,
161                               ICE_FLOW_FLD_SZ_GTP_QFI, 0x3f00),
162         /* ICE_FLOW_FIELD_IDX_GTPU_UP_TEID */
163         ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_GTPU_UP, 12,
164                           ICE_FLOW_FLD_SZ_GTP_TEID),
165         /* ICE_FLOW_FIELD_IDX_GTPU_DWN_TEID */
166         ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_GTPU_DWN, 12,
167                           ICE_FLOW_FLD_SZ_GTP_TEID),
168         /* PPPOE */
169         /* ICE_FLOW_FIELD_IDX_PPPOE_SESS_ID */
170         ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_PPPOE, 2,
171                           ICE_FLOW_FLD_SZ_PPPOE_SESS_ID),
172         /* PFCP */
173         /* ICE_FLOW_FIELD_IDX_PFCP_SEID */
174         ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_PFCP_SESSION, 12,
175                           ICE_FLOW_FLD_SZ_PFCP_SEID),
176         /* L2TPV3 */
177         /* ICE_FLOW_FIELD_IDX_L2TPV3_SESS_ID */
178         ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_L2TPV3, 0,
179                           ICE_FLOW_FLD_SZ_L2TPV3_SESS_ID),
180         /* ESP */
181         /* ICE_FLOW_FIELD_IDX_ESP_SPI */
182         ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_ESP, 0,
183                           ICE_FLOW_FLD_SZ_ESP_SPI),
184         /* AH */
185         /* ICE_FLOW_FIELD_IDX_AH_SPI */
186         ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_AH, 4,
187                           ICE_FLOW_FLD_SZ_AH_SPI),
188         /* NAT_T_ESP */
189         /* ICE_FLOW_FIELD_IDX_NAT_T_ESP_SPI */
190         ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_NAT_T_ESP, 8,
191                           ICE_FLOW_FLD_SZ_NAT_T_ESP_SPI),
192 };
193
194 /* Bitmaps indicating relevant packet types for a particular protocol header
195  *
196  * Packet types for packets with an Outer/First/Single MAC header
197  */
198 static const u32 ice_ptypes_mac_ofos[] = {
199         0xFDC00846, 0xBFBF7F7E, 0xF70001DF, 0xFEFDFDFB,
200         0x0000077E, 0x00000000, 0x00000000, 0x00000000,
201         0x00000000, 0x03FFF000, 0x7FFFFFE0, 0x00000000,
202         0x00000000, 0x00000000, 0x00000000, 0x00000000,
203         0x00000000, 0x00000000, 0x00000000, 0x00000000,
204         0x00000000, 0x00000000, 0x00000000, 0x00000000,
205         0x00000000, 0x00000000, 0x00000000, 0x00000000,
206         0x00000000, 0x00000000, 0x00000000, 0x00000000,
207 };
208
209 /* Packet types for packets with an Innermost/Last MAC VLAN header */
210 static const u32 ice_ptypes_macvlan_il[] = {
211         0x00000000, 0xBC000000, 0x000001DF, 0xF0000000,
212         0x0000077E, 0x00000000, 0x00000000, 0x00000000,
213         0x00000000, 0x00000000, 0x00000000, 0x00000000,
214         0x00000000, 0x00000000, 0x00000000, 0x00000000,
215         0x00000000, 0x00000000, 0x00000000, 0x00000000,
216         0x00000000, 0x00000000, 0x00000000, 0x00000000,
217         0x00000000, 0x00000000, 0x00000000, 0x00000000,
218         0x00000000, 0x00000000, 0x00000000, 0x00000000,
219 };
220
221 /* Packet types for packets with an Outer/First/Single IPv4 header */
222 static const u32 ice_ptypes_ipv4_ofos[] = {
223         0x1DC00000, 0x04000800, 0x00000000, 0x00000000,
224         0x00000000, 0x00000155, 0x00000000, 0x00000000,
225         0x0003000F, 0x000FC000, 0x83E0F800, 0x00000101,
226         0x00000000, 0x00000000, 0x00000000, 0x00000000,
227         0x00000000, 0x00000000, 0x00000000, 0x00000000,
228         0x00000000, 0x00000000, 0x00000000, 0x00000000,
229         0x00000000, 0x00000000, 0x00000000, 0x00000000,
230         0x00000000, 0x00000000, 0x00000000, 0x00000000,
231 };
232
233 /* Packet types for packets with an Innermost/Last IPv4 header */
234 static const u32 ice_ptypes_ipv4_il[] = {
235         0xE0000000, 0xB807700E, 0x80000003, 0xE01DC03B,
236         0x0000000E, 0x00000000, 0x00000000, 0x00000000,
237         0x00000000, 0x00000000, 0x001FF800, 0x00000000,
238         0x00000000, 0x00000000, 0x00000000, 0x00000000,
239         0x00000000, 0x00000000, 0x00000000, 0x00000000,
240         0x00000000, 0x00000000, 0x00000000, 0x00000000,
241         0x00000000, 0x00000000, 0x00000000, 0x00000000,
242         0x00000000, 0x00000000, 0x00000000, 0x00000000,
243 };
244
245 /* Packet types for packets with an Outer/First/Single IPv6 header */
246 static const u32 ice_ptypes_ipv6_ofos[] = {
247         0x00000000, 0x00000000, 0x77000000, 0x10002000,
248         0x00000000, 0x000002AA, 0x00000000, 0x00000000,
249         0x00080F00, 0x03F00000, 0x7C1F0000, 0x00000206,
250         0x00000000, 0x00000000, 0x00000000, 0x00000000,
251         0x00000000, 0x00000000, 0x00000000, 0x00000000,
252         0x00000000, 0x00000000, 0x00000000, 0x00000000,
253         0x00000000, 0x00000000, 0x00000000, 0x00000000,
254         0x00000000, 0x00000000, 0x00000000, 0x00000000,
255 };
256
257 /* Packet types for packets with an Innermost/Last IPv6 header */
258 static const u32 ice_ptypes_ipv6_il[] = {
259         0x00000000, 0x03B80770, 0x000001DC, 0x0EE00000,
260         0x00000770, 0x00000000, 0x00000000, 0x00000000,
261         0x00000000, 0x00000000, 0x7FE00000, 0x00000000,
262         0x00000000, 0x00000000, 0x00000000, 0x00000000,
263         0x00000000, 0x00000000, 0x00000000, 0x00000000,
264         0x00000000, 0x00000000, 0x00000000, 0x00000000,
265         0x00000000, 0x00000000, 0x00000000, 0x00000000,
266         0x00000000, 0x00000000, 0x00000000, 0x00000000,
267 };
268
269 /* Packet types for packets with an Outermost/First ARP header */
270 static const u32 ice_ptypes_arp_of[] = {
271         0x00000800, 0x00000000, 0x00000000, 0x00000000,
272         0x00000000, 0x00000000, 0x00000000, 0x00000000,
273         0x00000000, 0x00000000, 0x00000000, 0x00000000,
274         0x00000000, 0x00000000, 0x00000000, 0x00000000,
275         0x00000000, 0x00000000, 0x00000000, 0x00000000,
276         0x00000000, 0x00000000, 0x00000000, 0x00000000,
277         0x00000000, 0x00000000, 0x00000000, 0x00000000,
278         0x00000000, 0x00000000, 0x00000000, 0x00000000,
279 };
280
281 /* UDP Packet types for non-tunneled packets or tunneled
282  * packets with inner UDP.
283  */
284 static const u32 ice_ptypes_udp_il[] = {
285         0x81000000, 0x20204040, 0x04000010, 0x80810102,
286         0x00000040, 0x00000000, 0x00000000, 0x00000000,
287         0x00000000, 0x00410000, 0x90842000, 0x00000007,
288         0x00000000, 0x00000000, 0x00000000, 0x00000000,
289         0x00000000, 0x00000000, 0x00000000, 0x00000000,
290         0x00000000, 0x00000000, 0x00000000, 0x00000000,
291         0x00000000, 0x00000000, 0x00000000, 0x00000000,
292         0x00000000, 0x00000000, 0x00000000, 0x00000000,
293 };
294
295 /* Packet types for packets with an Innermost/Last TCP header */
296 static const u32 ice_ptypes_tcp_il[] = {
297         0x04000000, 0x80810102, 0x10000040, 0x02040408,
298         0x00000102, 0x00000000, 0x00000000, 0x00000000,
299         0x00000000, 0x00820000, 0x21084000, 0x00000000,
300         0x00000000, 0x00000000, 0x00000000, 0x00000000,
301         0x00000000, 0x00000000, 0x00000000, 0x00000000,
302         0x00000000, 0x00000000, 0x00000000, 0x00000000,
303         0x00000000, 0x00000000, 0x00000000, 0x00000000,
304         0x00000000, 0x00000000, 0x00000000, 0x00000000,
305 };
306
307 /* Packet types for packets with an Innermost/Last SCTP header */
308 static const u32 ice_ptypes_sctp_il[] = {
309         0x08000000, 0x01020204, 0x20000081, 0x04080810,
310         0x00000204, 0x00000000, 0x00000000, 0x00000000,
311         0x00000000, 0x01040000, 0x00000000, 0x00000000,
312         0x00000000, 0x00000000, 0x00000000, 0x00000000,
313         0x00000000, 0x00000000, 0x00000000, 0x00000000,
314         0x00000000, 0x00000000, 0x00000000, 0x00000000,
315         0x00000000, 0x00000000, 0x00000000, 0x00000000,
316         0x00000000, 0x00000000, 0x00000000, 0x00000000,
317 };
318
319 /* Packet types for packets with an Outermost/First ICMP header */
320 static const u32 ice_ptypes_icmp_of[] = {
321         0x10000000, 0x00000000, 0x00000000, 0x00000000,
322         0x00000000, 0x00000000, 0x00000000, 0x00000000,
323         0x00000000, 0x00000000, 0x00000000, 0x00000000,
324         0x00000000, 0x00000000, 0x00000000, 0x00000000,
325         0x00000000, 0x00000000, 0x00000000, 0x00000000,
326         0x00000000, 0x00000000, 0x00000000, 0x00000000,
327         0x00000000, 0x00000000, 0x00000000, 0x00000000,
328         0x00000000, 0x00000000, 0x00000000, 0x00000000,
329 };
330
331 /* Packet types for packets with an Innermost/Last ICMP header */
332 static const u32 ice_ptypes_icmp_il[] = {
333         0x00000000, 0x02040408, 0x40000102, 0x08101020,
334         0x00000408, 0x00000000, 0x00000000, 0x00000000,
335         0x00000000, 0x00000000, 0x42108000, 0x00000000,
336         0x00000000, 0x00000000, 0x00000000, 0x00000000,
337         0x00000000, 0x00000000, 0x00000000, 0x00000000,
338         0x00000000, 0x00000000, 0x00000000, 0x00000000,
339         0x00000000, 0x00000000, 0x00000000, 0x00000000,
340         0x00000000, 0x00000000, 0x00000000, 0x00000000,
341 };
342
343 /* Packet types for packets with an Outermost/First GRE header */
344 static const u32 ice_ptypes_gre_of[] = {
345         0x00000000, 0xBFBF7800, 0x000001DF, 0xFEFDE000,
346         0x0000017E, 0x00000000, 0x00000000, 0x00000000,
347         0x00000000, 0x00000000, 0x00000000, 0x00000000,
348         0x00000000, 0x00000000, 0x00000000, 0x00000000,
349         0x00000000, 0x00000000, 0x00000000, 0x00000000,
350         0x00000000, 0x00000000, 0x00000000, 0x00000000,
351         0x00000000, 0x00000000, 0x00000000, 0x00000000,
352         0x00000000, 0x00000000, 0x00000000, 0x00000000,
353 };
354
355 /* Packet types for packets with an Innermost/Last MAC header */
356 static const u32 ice_ptypes_mac_il[] = {
357         0x00000000, 0x00000000, 0x00000000, 0x00000000,
358         0x00000000, 0x00000000, 0x00000000, 0x00000000,
359         0x00000000, 0x00000000, 0x00000000, 0x00000000,
360         0x00000000, 0x00000000, 0x00000000, 0x00000000,
361         0x00000000, 0x00000000, 0x00000000, 0x00000000,
362         0x00000000, 0x00000000, 0x00000000, 0x00000000,
363         0x00000000, 0x00000000, 0x00000000, 0x00000000,
364         0x00000000, 0x00000000, 0x00000000, 0x00000000,
365 };
366
367 /* Packet types for GTPC */
368 static const u32 ice_ptypes_gtpc[] = {
369         0x00000000, 0x00000000, 0x00000000, 0x00000000,
370         0x00000000, 0x00000000, 0x00000000, 0x00000000,
371         0x00000000, 0x00000000, 0x00000180, 0x00000000,
372         0x00000000, 0x00000000, 0x00000000, 0x00000000,
373         0x00000000, 0x00000000, 0x00000000, 0x00000000,
374         0x00000000, 0x00000000, 0x00000000, 0x00000000,
375         0x00000000, 0x00000000, 0x00000000, 0x00000000,
376         0x00000000, 0x00000000, 0x00000000, 0x00000000,
377 };
378
379 /* Packet types for GTPC with TEID */
380 static const u32 ice_ptypes_gtpc_tid[] = {
381         0x00000000, 0x00000000, 0x00000000, 0x00000000,
382         0x00000000, 0x00000000, 0x00000000, 0x00000000,
383         0x00000000, 0x00000000, 0x00000060, 0x00000000,
384         0x00000000, 0x00000000, 0x00000000, 0x00000000,
385         0x00000000, 0x00000000, 0x00000000, 0x00000000,
386         0x00000000, 0x00000000, 0x00000000, 0x00000000,
387         0x00000000, 0x00000000, 0x00000000, 0x00000000,
388         0x00000000, 0x00000000, 0x00000000, 0x00000000,
389 };
390
391 /* Packet types for GTPU */
392 static const struct ice_ptype_attributes ice_attr_gtpu_eh[] = {
393         { ICE_MAC_IPV4_GTPU_IPV4_FRAG,    ICE_PTYPE_ATTR_GTP_PDU_EH },
394         { ICE_MAC_IPV4_GTPU_IPV4_PAY,     ICE_PTYPE_ATTR_GTP_PDU_EH },
395         { ICE_MAC_IPV4_GTPU_IPV4_UDP_PAY, ICE_PTYPE_ATTR_GTP_PDU_EH },
396         { ICE_MAC_IPV4_GTPU_IPV4_TCP,     ICE_PTYPE_ATTR_GTP_PDU_EH },
397         { ICE_MAC_IPV4_GTPU_IPV4_ICMP,    ICE_PTYPE_ATTR_GTP_PDU_EH },
398         { ICE_MAC_IPV6_GTPU_IPV4_FRAG,    ICE_PTYPE_ATTR_GTP_PDU_EH },
399         { ICE_MAC_IPV6_GTPU_IPV4_PAY,     ICE_PTYPE_ATTR_GTP_PDU_EH },
400         { ICE_MAC_IPV6_GTPU_IPV4_UDP_PAY, ICE_PTYPE_ATTR_GTP_PDU_EH },
401         { ICE_MAC_IPV6_GTPU_IPV4_TCP,     ICE_PTYPE_ATTR_GTP_PDU_EH },
402         { ICE_MAC_IPV6_GTPU_IPV4_ICMP,    ICE_PTYPE_ATTR_GTP_PDU_EH },
403         { ICE_MAC_IPV4_GTPU_IPV6_FRAG,    ICE_PTYPE_ATTR_GTP_PDU_EH },
404         { ICE_MAC_IPV4_GTPU_IPV6_PAY,     ICE_PTYPE_ATTR_GTP_PDU_EH },
405         { ICE_MAC_IPV4_GTPU_IPV6_UDP_PAY, ICE_PTYPE_ATTR_GTP_PDU_EH },
406         { ICE_MAC_IPV4_GTPU_IPV6_TCP,     ICE_PTYPE_ATTR_GTP_PDU_EH },
407         { ICE_MAC_IPV4_GTPU_IPV6_ICMPV6,  ICE_PTYPE_ATTR_GTP_PDU_EH },
408         { ICE_MAC_IPV6_GTPU_IPV6_FRAG,    ICE_PTYPE_ATTR_GTP_PDU_EH },
409         { ICE_MAC_IPV6_GTPU_IPV6_PAY,     ICE_PTYPE_ATTR_GTP_PDU_EH },
410         { ICE_MAC_IPV6_GTPU_IPV6_UDP_PAY, ICE_PTYPE_ATTR_GTP_PDU_EH },
411         { ICE_MAC_IPV6_GTPU_IPV6_TCP,     ICE_PTYPE_ATTR_GTP_PDU_EH },
412         { ICE_MAC_IPV6_GTPU_IPV6_ICMPV6,  ICE_PTYPE_ATTR_GTP_PDU_EH },
413 };
414
415 static const struct ice_ptype_attributes ice_attr_gtpu_down[] = {
416         { ICE_MAC_IPV4_GTPU_IPV4_FRAG,    ICE_PTYPE_ATTR_GTP_DOWNLINK },
417         { ICE_MAC_IPV4_GTPU_IPV4_PAY,     ICE_PTYPE_ATTR_GTP_DOWNLINK },
418         { ICE_MAC_IPV4_GTPU_IPV4_UDP_PAY, ICE_PTYPE_ATTR_GTP_DOWNLINK },
419         { ICE_MAC_IPV4_GTPU_IPV4_TCP,     ICE_PTYPE_ATTR_GTP_DOWNLINK },
420         { ICE_MAC_IPV4_GTPU_IPV4_ICMP,    ICE_PTYPE_ATTR_GTP_DOWNLINK },
421         { ICE_MAC_IPV6_GTPU_IPV4_FRAG,    ICE_PTYPE_ATTR_GTP_DOWNLINK },
422         { ICE_MAC_IPV6_GTPU_IPV4_PAY,     ICE_PTYPE_ATTR_GTP_DOWNLINK },
423         { ICE_MAC_IPV6_GTPU_IPV4_UDP_PAY, ICE_PTYPE_ATTR_GTP_DOWNLINK },
424         { ICE_MAC_IPV6_GTPU_IPV4_TCP,     ICE_PTYPE_ATTR_GTP_DOWNLINK },
425         { ICE_MAC_IPV6_GTPU_IPV4_ICMP,    ICE_PTYPE_ATTR_GTP_DOWNLINK },
426         { ICE_MAC_IPV4_GTPU_IPV6_FRAG,    ICE_PTYPE_ATTR_GTP_DOWNLINK },
427         { ICE_MAC_IPV4_GTPU_IPV6_PAY,     ICE_PTYPE_ATTR_GTP_DOWNLINK },
428         { ICE_MAC_IPV4_GTPU_IPV6_UDP_PAY, ICE_PTYPE_ATTR_GTP_DOWNLINK },
429         { ICE_MAC_IPV4_GTPU_IPV6_TCP,     ICE_PTYPE_ATTR_GTP_DOWNLINK },
430         { ICE_MAC_IPV4_GTPU_IPV6_ICMPV6,  ICE_PTYPE_ATTR_GTP_DOWNLINK },
431         { ICE_MAC_IPV6_GTPU_IPV6_FRAG,    ICE_PTYPE_ATTR_GTP_DOWNLINK },
432         { ICE_MAC_IPV6_GTPU_IPV6_PAY,     ICE_PTYPE_ATTR_GTP_DOWNLINK },
433         { ICE_MAC_IPV6_GTPU_IPV6_UDP_PAY, ICE_PTYPE_ATTR_GTP_DOWNLINK },
434         { ICE_MAC_IPV6_GTPU_IPV6_TCP,     ICE_PTYPE_ATTR_GTP_DOWNLINK },
435         { ICE_MAC_IPV6_GTPU_IPV6_ICMPV6,  ICE_PTYPE_ATTR_GTP_DOWNLINK },
436 };
437
438 static const struct ice_ptype_attributes ice_attr_gtpu_up[] = {
439         { ICE_MAC_IPV4_GTPU_IPV4_FRAG,    ICE_PTYPE_ATTR_GTP_UPLINK },
440         { ICE_MAC_IPV4_GTPU_IPV4_PAY,     ICE_PTYPE_ATTR_GTP_UPLINK },
441         { ICE_MAC_IPV4_GTPU_IPV4_UDP_PAY, ICE_PTYPE_ATTR_GTP_UPLINK },
442         { ICE_MAC_IPV4_GTPU_IPV4_TCP,     ICE_PTYPE_ATTR_GTP_UPLINK },
443         { ICE_MAC_IPV4_GTPU_IPV4_ICMP,    ICE_PTYPE_ATTR_GTP_UPLINK },
444         { ICE_MAC_IPV6_GTPU_IPV4_FRAG,    ICE_PTYPE_ATTR_GTP_UPLINK },
445         { ICE_MAC_IPV6_GTPU_IPV4_PAY,     ICE_PTYPE_ATTR_GTP_UPLINK },
446         { ICE_MAC_IPV6_GTPU_IPV4_UDP_PAY, ICE_PTYPE_ATTR_GTP_UPLINK },
447         { ICE_MAC_IPV6_GTPU_IPV4_TCP,     ICE_PTYPE_ATTR_GTP_UPLINK },
448         { ICE_MAC_IPV6_GTPU_IPV4_ICMP,    ICE_PTYPE_ATTR_GTP_UPLINK },
449         { ICE_MAC_IPV4_GTPU_IPV6_FRAG,    ICE_PTYPE_ATTR_GTP_UPLINK },
450         { ICE_MAC_IPV4_GTPU_IPV6_PAY,     ICE_PTYPE_ATTR_GTP_UPLINK },
451         { ICE_MAC_IPV4_GTPU_IPV6_UDP_PAY, ICE_PTYPE_ATTR_GTP_UPLINK },
452         { ICE_MAC_IPV4_GTPU_IPV6_TCP,     ICE_PTYPE_ATTR_GTP_UPLINK },
453         { ICE_MAC_IPV4_GTPU_IPV6_ICMPV6,  ICE_PTYPE_ATTR_GTP_UPLINK },
454         { ICE_MAC_IPV6_GTPU_IPV6_FRAG,    ICE_PTYPE_ATTR_GTP_UPLINK },
455         { ICE_MAC_IPV6_GTPU_IPV6_PAY,     ICE_PTYPE_ATTR_GTP_UPLINK },
456         { ICE_MAC_IPV6_GTPU_IPV6_UDP_PAY, ICE_PTYPE_ATTR_GTP_UPLINK },
457         { ICE_MAC_IPV6_GTPU_IPV6_TCP,     ICE_PTYPE_ATTR_GTP_UPLINK },
458         { ICE_MAC_IPV6_GTPU_IPV6_ICMPV6,  ICE_PTYPE_ATTR_GTP_UPLINK },
459 };
460
461 static const u32 ice_ptypes_gtpu[] = {
462         0x00000000, 0x00000000, 0x00000000, 0x00000000,
463         0x00000000, 0x00000000, 0x00000000, 0x00000000,
464         0x00000000, 0x00000000, 0x7FFFFE00, 0x00000000,
465         0x00000000, 0x00000000, 0x00000000, 0x00000000,
466         0x00000000, 0x00000000, 0x00000000, 0x00000000,
467         0x00000000, 0x00000000, 0x00000000, 0x00000000,
468         0x00000000, 0x00000000, 0x00000000, 0x00000000,
469         0x00000000, 0x00000000, 0x00000000, 0x00000000,
470 };
471
472 /* Packet types for pppoe */
473 static const u32 ice_ptypes_pppoe[] = {
474         0x00000000, 0x00000000, 0x00000000, 0x00000000,
475         0x00000000, 0x00000000, 0x00000000, 0x00000000,
476         0x00000000, 0x03FFF000, 0x00000000, 0x00000000,
477         0x00000000, 0x00000000, 0x00000000, 0x00000000,
478         0x00000000, 0x00000000, 0x00000000, 0x00000000,
479         0x00000000, 0x00000000, 0x00000000, 0x00000000,
480         0x00000000, 0x00000000, 0x00000000, 0x00000000,
481         0x00000000, 0x00000000, 0x00000000, 0x00000000,
482 };
483
484 /* Packet types for packets with PFCP NODE header */
485 static const u32 ice_ptypes_pfcp_node[] = {
486         0x00000000, 0x00000000, 0x00000000, 0x00000000,
487         0x00000000, 0x00000000, 0x00000000, 0x00000000,
488         0x00000000, 0x00000000, 0x80000000, 0x00000002,
489         0x00000000, 0x00000000, 0x00000000, 0x00000000,
490         0x00000000, 0x00000000, 0x00000000, 0x00000000,
491         0x00000000, 0x00000000, 0x00000000, 0x00000000,
492         0x00000000, 0x00000000, 0x00000000, 0x00000000,
493         0x00000000, 0x00000000, 0x00000000, 0x00000000,
494 };
495
496 /* Packet types for packets with PFCP SESSION header */
497 static const u32 ice_ptypes_pfcp_session[] = {
498         0x00000000, 0x00000000, 0x00000000, 0x00000000,
499         0x00000000, 0x00000000, 0x00000000, 0x00000000,
500         0x00000000, 0x00000000, 0x00000000, 0x00000005,
501         0x00000000, 0x00000000, 0x00000000, 0x00000000,
502         0x00000000, 0x00000000, 0x00000000, 0x00000000,
503         0x00000000, 0x00000000, 0x00000000, 0x00000000,
504         0x00000000, 0x00000000, 0x00000000, 0x00000000,
505         0x00000000, 0x00000000, 0x00000000, 0x00000000,
506 };
507
508 /* Packet types for l2tpv3 */
509 static const u32 ice_ptypes_l2tpv3[] = {
510         0x00000000, 0x00000000, 0x00000000, 0x00000000,
511         0x00000000, 0x00000000, 0x00000000, 0x00000000,
512         0x00000000, 0x00000000, 0x00000000, 0x00000300,
513         0x00000000, 0x00000000, 0x00000000, 0x00000000,
514         0x00000000, 0x00000000, 0x00000000, 0x00000000,
515         0x00000000, 0x00000000, 0x00000000, 0x00000000,
516         0x00000000, 0x00000000, 0x00000000, 0x00000000,
517         0x00000000, 0x00000000, 0x00000000, 0x00000000,
518 };
519
520 /* Packet types for esp */
521 static const u32 ice_ptypes_esp[] = {
522         0x00000000, 0x00000000, 0x00000000, 0x00000000,
523         0x00000000, 0x00000003, 0x00000000, 0x00000000,
524         0x00000000, 0x00000000, 0x00000000, 0x00000000,
525         0x00000000, 0x00000000, 0x00000000, 0x00000000,
526         0x00000000, 0x00000000, 0x00000000, 0x00000000,
527         0x00000000, 0x00000000, 0x00000000, 0x00000000,
528         0x00000000, 0x00000000, 0x00000000, 0x00000000,
529         0x00000000, 0x00000000, 0x00000000, 0x00000000,
530 };
531
532 /* Packet types for ah */
533 static const u32 ice_ptypes_ah[] = {
534         0x00000000, 0x00000000, 0x00000000, 0x00000000,
535         0x00000000, 0x0000000C, 0x00000000, 0x00000000,
536         0x00000000, 0x00000000, 0x00000000, 0x00000000,
537         0x00000000, 0x00000000, 0x00000000, 0x00000000,
538         0x00000000, 0x00000000, 0x00000000, 0x00000000,
539         0x00000000, 0x00000000, 0x00000000, 0x00000000,
540         0x00000000, 0x00000000, 0x00000000, 0x00000000,
541         0x00000000, 0x00000000, 0x00000000, 0x00000000,
542 };
543
544 /* Packet types for packets with NAT_T ESP header */
545 static const u32 ice_ptypes_nat_t_esp[] = {
546         0x00000000, 0x00000000, 0x00000000, 0x00000000,
547         0x00000000, 0x00000030, 0x00000000, 0x00000000,
548         0x00000000, 0x00000000, 0x00000000, 0x00000000,
549         0x00000000, 0x00000000, 0x00000000, 0x00000000,
550         0x00000000, 0x00000000, 0x00000000, 0x00000000,
551         0x00000000, 0x00000000, 0x00000000, 0x00000000,
552         0x00000000, 0x00000000, 0x00000000, 0x00000000,
553         0x00000000, 0x00000000, 0x00000000, 0x00000000,
554 };
555
556 static const u32 ice_ptypes_mac_non_ip_ofos[] = {
557         0x00000846, 0x00000000, 0x00000000, 0x00000000,
558         0x00000000, 0x00000000, 0x00000000, 0x00000000,
559         0x00400000, 0x03FFF000, 0x00000000, 0x00000000,
560         0x00000000, 0x00000000, 0x00000000, 0x00000000,
561         0x00000000, 0x00000000, 0x00000000, 0x00000000,
562         0x00000000, 0x00000000, 0x00000000, 0x00000000,
563         0x00000000, 0x00000000, 0x00000000, 0x00000000,
564         0x00000000, 0x00000000, 0x00000000, 0x00000000,
565 };
566
567 /* Manage parameters and info. used during the creation of a flow profile */
568 struct ice_flow_prof_params {
569         enum ice_block blk;
570         u16 entry_length; /* # of bytes formatted entry will require */
571         u8 es_cnt;
572         struct ice_flow_prof *prof;
573
574         /* For ACL, the es[0] will have the data of ICE_RX_MDID_PKT_FLAGS_15_0
575          * This will give us the direction flags.
576          */
577         struct ice_fv_word es[ICE_MAX_FV_WORDS];
578         /* attributes can be used to add attributes to a particular PTYPE */
579         const struct ice_ptype_attributes *attr;
580         u16 attr_cnt;
581
582         u16 mask[ICE_MAX_FV_WORDS];
583         ice_declare_bitmap(ptypes, ICE_FLOW_PTYPE_MAX);
584 };
585
586 #define ICE_FLOW_RSS_HDRS_INNER_MASK \
587         (ICE_FLOW_SEG_HDR_PPPOE | ICE_FLOW_SEG_HDR_GTPC | \
588         ICE_FLOW_SEG_HDR_GTPC_TEID | ICE_FLOW_SEG_HDR_GTPU | \
589         ICE_FLOW_SEG_HDR_PFCP_SESSION | ICE_FLOW_SEG_HDR_L2TPV3 | \
590         ICE_FLOW_SEG_HDR_ESP | ICE_FLOW_SEG_HDR_AH | \
591         ICE_FLOW_SEG_HDR_NAT_T_ESP)
592
593 #define ICE_FLOW_SEG_HDRS_L2_MASK       \
594         (ICE_FLOW_SEG_HDR_ETH | ICE_FLOW_SEG_HDR_VLAN)
595 #define ICE_FLOW_SEG_HDRS_L3_MASK       \
596         (ICE_FLOW_SEG_HDR_IPV4 | ICE_FLOW_SEG_HDR_IPV6 | \
597          ICE_FLOW_SEG_HDR_ARP)
598 #define ICE_FLOW_SEG_HDRS_L4_MASK       \
599         (ICE_FLOW_SEG_HDR_ICMP | ICE_FLOW_SEG_HDR_TCP | ICE_FLOW_SEG_HDR_UDP | \
600          ICE_FLOW_SEG_HDR_SCTP)
601
602 /**
603  * ice_flow_val_hdrs - validates packet segments for valid protocol headers
604  * @segs: array of one or more packet segments that describe the flow
605  * @segs_cnt: number of packet segments provided
606  */
607 static enum ice_status
608 ice_flow_val_hdrs(struct ice_flow_seg_info *segs, u8 segs_cnt)
609 {
610         u8 i;
611
612         for (i = 0; i < segs_cnt; i++) {
613                 /* Multiple L3 headers */
614                 if (segs[i].hdrs & ICE_FLOW_SEG_HDRS_L3_MASK &&
615                     !ice_is_pow2(segs[i].hdrs & ICE_FLOW_SEG_HDRS_L3_MASK))
616                         return ICE_ERR_PARAM;
617
618                 /* Multiple L4 headers */
619                 if (segs[i].hdrs & ICE_FLOW_SEG_HDRS_L4_MASK &&
620                     !ice_is_pow2(segs[i].hdrs & ICE_FLOW_SEG_HDRS_L4_MASK))
621                         return ICE_ERR_PARAM;
622         }
623
624         return ICE_SUCCESS;
625 }
626
627 /* Sizes of fixed known protocol headers without header options */
628 #define ICE_FLOW_PROT_HDR_SZ_MAC        14
629 #define ICE_FLOW_PROT_HDR_SZ_MAC_VLAN   (ICE_FLOW_PROT_HDR_SZ_MAC + 2)
630 #define ICE_FLOW_PROT_HDR_SZ_IPV4       20
631 #define ICE_FLOW_PROT_HDR_SZ_IPV6       40
632 #define ICE_FLOW_PROT_HDR_SZ_ARP        28
633 #define ICE_FLOW_PROT_HDR_SZ_ICMP       8
634 #define ICE_FLOW_PROT_HDR_SZ_TCP        20
635 #define ICE_FLOW_PROT_HDR_SZ_UDP        8
636 #define ICE_FLOW_PROT_HDR_SZ_SCTP       12
637
638 /**
639  * ice_flow_calc_seg_sz - calculates size of a packet segment based on headers
640  * @params: information about the flow to be processed
641  * @seg: index of packet segment whose header size is to be determined
642  */
643 static u16 ice_flow_calc_seg_sz(struct ice_flow_prof_params *params, u8 seg)
644 {
645         u16 sz;
646
647         /* L2 headers */
648         sz = (params->prof->segs[seg].hdrs & ICE_FLOW_SEG_HDR_VLAN) ?
649                 ICE_FLOW_PROT_HDR_SZ_MAC_VLAN : ICE_FLOW_PROT_HDR_SZ_MAC;
650
651         /* L3 headers */
652         if (params->prof->segs[seg].hdrs & ICE_FLOW_SEG_HDR_IPV4)
653                 sz += ICE_FLOW_PROT_HDR_SZ_IPV4;
654         else if (params->prof->segs[seg].hdrs & ICE_FLOW_SEG_HDR_IPV6)
655                 sz += ICE_FLOW_PROT_HDR_SZ_IPV6;
656         else if (params->prof->segs[seg].hdrs & ICE_FLOW_SEG_HDR_ARP)
657                 sz += ICE_FLOW_PROT_HDR_SZ_ARP;
658         else if (params->prof->segs[seg].hdrs & ICE_FLOW_SEG_HDRS_L4_MASK)
659                 /* A L3 header is required if L4 is specified */
660                 return 0;
661
662         /* L4 headers */
663         if (params->prof->segs[seg].hdrs & ICE_FLOW_SEG_HDR_ICMP)
664                 sz += ICE_FLOW_PROT_HDR_SZ_ICMP;
665         else if (params->prof->segs[seg].hdrs & ICE_FLOW_SEG_HDR_TCP)
666                 sz += ICE_FLOW_PROT_HDR_SZ_TCP;
667         else if (params->prof->segs[seg].hdrs & ICE_FLOW_SEG_HDR_UDP)
668                 sz += ICE_FLOW_PROT_HDR_SZ_UDP;
669         else if (params->prof->segs[seg].hdrs & ICE_FLOW_SEG_HDR_SCTP)
670                 sz += ICE_FLOW_PROT_HDR_SZ_SCTP;
671
672         return sz;
673 }
674
675 /**
676  * ice_flow_proc_seg_hdrs - process protocol headers present in pkt segments
677  * @params: information about the flow to be processed
678  *
679  * This function identifies the packet types associated with the protocol
680  * headers being present in packet segments of the specified flow profile.
681  */
682 static enum ice_status
683 ice_flow_proc_seg_hdrs(struct ice_flow_prof_params *params)
684 {
685         struct ice_flow_prof *prof;
686         u8 i;
687
688         ice_memset(params->ptypes, 0xff, sizeof(params->ptypes),
689                    ICE_NONDMA_MEM);
690
691         prof = params->prof;
692
693         for (i = 0; i < params->prof->segs_cnt; i++) {
694                 const ice_bitmap_t *src;
695                 u32 hdrs;
696
697                 hdrs = prof->segs[i].hdrs;
698
699                 if (hdrs & ICE_FLOW_SEG_HDR_ETH) {
700                         src = !i ? (const ice_bitmap_t *)ice_ptypes_mac_ofos :
701                                 (const ice_bitmap_t *)ice_ptypes_mac_il;
702                         ice_and_bitmap(params->ptypes, params->ptypes, src,
703                                        ICE_FLOW_PTYPE_MAX);
704                 }
705
706                 if (i && hdrs & ICE_FLOW_SEG_HDR_VLAN) {
707                         src = (const ice_bitmap_t *)ice_ptypes_macvlan_il;
708                         ice_and_bitmap(params->ptypes, params->ptypes, src,
709                                        ICE_FLOW_PTYPE_MAX);
710                 }
711
712                 if (!i && hdrs & ICE_FLOW_SEG_HDR_ARP) {
713                         ice_and_bitmap(params->ptypes, params->ptypes,
714                                        (const ice_bitmap_t *)ice_ptypes_arp_of,
715                                        ICE_FLOW_PTYPE_MAX);
716                 }
717
718                 if (hdrs & ICE_FLOW_SEG_HDR_IPV4) {
719                         src = !i ? (const ice_bitmap_t *)ice_ptypes_ipv4_ofos :
720                                 (const ice_bitmap_t *)ice_ptypes_ipv4_il;
721                         ice_and_bitmap(params->ptypes, params->ptypes, src,
722                                        ICE_FLOW_PTYPE_MAX);
723                         if (hdrs & ICE_FLOW_SEG_HDR_UDP) {
724                                 src = (const ice_bitmap_t *)ice_ptypes_udp_il;
725                                 ice_and_bitmap(params->ptypes,
726                                                 params->ptypes, src,
727                                                ICE_FLOW_PTYPE_MAX);
728                         } else if (hdrs & ICE_FLOW_SEG_HDR_TCP) {
729                                 ice_and_bitmap(params->ptypes, params->ptypes,
730                                                (const ice_bitmap_t *)
731                                                ice_ptypes_tcp_il,
732                                                ICE_FLOW_PTYPE_MAX);
733                         } else if (hdrs & ICE_FLOW_SEG_HDR_SCTP) {
734                                 src = (const ice_bitmap_t *)ice_ptypes_sctp_il;
735                                 ice_and_bitmap(params->ptypes, params->ptypes,
736                                                src, ICE_FLOW_PTYPE_MAX);
737                         }
738                 } else if (hdrs & ICE_FLOW_SEG_HDR_IPV6) {
739                         src = !i ? (const ice_bitmap_t *)ice_ptypes_ipv6_ofos :
740                                 (const ice_bitmap_t *)ice_ptypes_ipv6_il;
741                         ice_and_bitmap(params->ptypes, params->ptypes, src,
742                                        ICE_FLOW_PTYPE_MAX);
743                         if (hdrs & ICE_FLOW_SEG_HDR_UDP) {
744                                 src = (const ice_bitmap_t *)ice_ptypes_udp_il;
745                                 ice_and_bitmap(params->ptypes,
746                                                 params->ptypes, src,
747                                                ICE_FLOW_PTYPE_MAX);
748                         } else if (hdrs & ICE_FLOW_SEG_HDR_TCP) {
749                                 ice_and_bitmap(params->ptypes, params->ptypes,
750                                                (const ice_bitmap_t *)
751                                                ice_ptypes_tcp_il,
752                                                ICE_FLOW_PTYPE_MAX);
753                         } else if (hdrs & ICE_FLOW_SEG_HDR_SCTP) {
754                                 src = (const ice_bitmap_t *)ice_ptypes_sctp_il;
755                                 ice_and_bitmap(params->ptypes, params->ptypes,
756                                                src, ICE_FLOW_PTYPE_MAX);
757                         }
758                 }
759
760                 if (hdrs & ICE_FLOW_SEG_HDR_ETH_NON_IP) {
761                         src = (const ice_bitmap_t *)ice_ptypes_mac_non_ip_ofos;
762                         ice_and_bitmap(params->ptypes, params->ptypes,
763                                        src, ICE_FLOW_PTYPE_MAX);
764                 } else if (hdrs & ICE_FLOW_SEG_HDR_PPPOE) {
765                         src = (const ice_bitmap_t *)ice_ptypes_pppoe;
766                         ice_and_bitmap(params->ptypes, params->ptypes, src,
767                                        ICE_FLOW_PTYPE_MAX);
768                 }
769
770                 if (hdrs & ICE_FLOW_SEG_HDR_ICMP) {
771                         src = !i ? (const ice_bitmap_t *)ice_ptypes_icmp_of :
772                                 (const ice_bitmap_t *)ice_ptypes_icmp_il;
773                         ice_and_bitmap(params->ptypes, params->ptypes, src,
774                                        ICE_FLOW_PTYPE_MAX);
775                 } else if (hdrs & ICE_FLOW_SEG_HDR_GRE) {
776                         if (!i) {
777                                 src = (const ice_bitmap_t *)ice_ptypes_gre_of;
778                                 ice_and_bitmap(params->ptypes, params->ptypes,
779                                                src, ICE_FLOW_PTYPE_MAX);
780                         }
781                 } else if (hdrs & ICE_FLOW_SEG_HDR_GTPC) {
782                         src = (const ice_bitmap_t *)ice_ptypes_gtpc;
783                         ice_and_bitmap(params->ptypes, params->ptypes,
784                                        src, ICE_FLOW_PTYPE_MAX);
785                 } else if (hdrs & ICE_FLOW_SEG_HDR_GTPC_TEID) {
786                         src = (const ice_bitmap_t *)ice_ptypes_gtpc_tid;
787                         ice_and_bitmap(params->ptypes, params->ptypes,
788                                        src, ICE_FLOW_PTYPE_MAX);
789                 } else if (hdrs & ICE_FLOW_SEG_HDR_GTPU_DWN) {
790                         src = (const ice_bitmap_t *)ice_ptypes_gtpu;
791                         ice_and_bitmap(params->ptypes, params->ptypes,
792                                        src, ICE_FLOW_PTYPE_MAX);
793
794                         /* Attributes for GTP packet with downlink */
795                         params->attr = ice_attr_gtpu_down;
796                         params->attr_cnt = ARRAY_SIZE(ice_attr_gtpu_down);
797                 } else if (hdrs & ICE_FLOW_SEG_HDR_GTPU_UP) {
798                         src = (const ice_bitmap_t *)ice_ptypes_gtpu;
799                         ice_and_bitmap(params->ptypes, params->ptypes,
800                                        src, ICE_FLOW_PTYPE_MAX);
801
802                         /* Attributes for GTP packet with uplink */
803                         params->attr = ice_attr_gtpu_up;
804                         params->attr_cnt = ARRAY_SIZE(ice_attr_gtpu_up);
805                 } else if (hdrs & ICE_FLOW_SEG_HDR_GTPU_EH) {
806                         src = (const ice_bitmap_t *)ice_ptypes_gtpu;
807                         ice_and_bitmap(params->ptypes, params->ptypes,
808                                        src, ICE_FLOW_PTYPE_MAX);
809
810                         /* Attributes for GTP packet with Extension Header */
811                         params->attr = ice_attr_gtpu_eh;
812                         params->attr_cnt = ARRAY_SIZE(ice_attr_gtpu_eh);
813                 } else if (hdrs & ICE_FLOW_SEG_HDR_GTPU_IP) {
814                         src = (const ice_bitmap_t *)ice_ptypes_gtpu;
815                         ice_and_bitmap(params->ptypes, params->ptypes,
816                                        src, ICE_FLOW_PTYPE_MAX);
817                 } else if (hdrs & ICE_FLOW_SEG_HDR_L2TPV3) {
818                         src = (const ice_bitmap_t *)ice_ptypes_l2tpv3;
819                         ice_and_bitmap(params->ptypes, params->ptypes,
820                                        src, ICE_FLOW_PTYPE_MAX);
821                 } else if (hdrs & ICE_FLOW_SEG_HDR_ESP) {
822                         src = (const ice_bitmap_t *)ice_ptypes_esp;
823                         ice_and_bitmap(params->ptypes, params->ptypes,
824                                        src, ICE_FLOW_PTYPE_MAX);
825                 } else if (hdrs & ICE_FLOW_SEG_HDR_AH) {
826                         src = (const ice_bitmap_t *)ice_ptypes_ah;
827                         ice_and_bitmap(params->ptypes, params->ptypes,
828                                        src, ICE_FLOW_PTYPE_MAX);
829                 } else if (hdrs & ICE_FLOW_SEG_HDR_NAT_T_ESP) {
830                         src = (const ice_bitmap_t *)ice_ptypes_nat_t_esp;
831                         ice_and_bitmap(params->ptypes, params->ptypes,
832                                        src, ICE_FLOW_PTYPE_MAX);
833                 }
834
835                 if (hdrs & ICE_FLOW_SEG_HDR_PFCP) {
836                         if (hdrs & ICE_FLOW_SEG_HDR_PFCP_NODE)
837                                 src =
838                                 (const ice_bitmap_t *)ice_ptypes_pfcp_node;
839                         else
840                                 src =
841                                 (const ice_bitmap_t *)ice_ptypes_pfcp_session;
842
843                         ice_and_bitmap(params->ptypes, params->ptypes,
844                                        src, ICE_FLOW_PTYPE_MAX);
845                 } else {
846                         src = (const ice_bitmap_t *)ice_ptypes_pfcp_node;
847                         ice_andnot_bitmap(params->ptypes, params->ptypes,
848                                           src, ICE_FLOW_PTYPE_MAX);
849
850                         src = (const ice_bitmap_t *)ice_ptypes_pfcp_session;
851                         ice_andnot_bitmap(params->ptypes, params->ptypes,
852                                           src, ICE_FLOW_PTYPE_MAX);
853                 }
854         }
855
856         return ICE_SUCCESS;
857 }
858
859 /**
860  * ice_flow_xtract_pkt_flags - Create an extr sequence entry for packet flags
861  * @hw: pointer to the HW struct
862  * @params: information about the flow to be processed
863  * @flags: The value of pkt_flags[x:x] in Rx/Tx MDID metadata.
864  *
865  * This function will allocate an extraction sequence entries for a DWORD size
866  * chunk of the packet flags.
867  */
868 static enum ice_status
869 ice_flow_xtract_pkt_flags(struct ice_hw *hw,
870                           struct ice_flow_prof_params *params,
871                           enum ice_flex_mdid_pkt_flags flags)
872 {
873         u8 fv_words = hw->blk[params->blk].es.fvw;
874         u8 idx;
875
876         /* Make sure the number of extraction sequence entries required does not
877          * exceed the block's capacity.
878          */
879         if (params->es_cnt >= fv_words)
880                 return ICE_ERR_MAX_LIMIT;
881
882         /* some blocks require a reversed field vector layout */
883         if (hw->blk[params->blk].es.reverse)
884                 idx = fv_words - params->es_cnt - 1;
885         else
886                 idx = params->es_cnt;
887
888         params->es[idx].prot_id = ICE_PROT_META_ID;
889         params->es[idx].off = flags;
890         params->es_cnt++;
891
892         return ICE_SUCCESS;
893 }
894
895 /**
896  * ice_flow_xtract_fld - Create an extraction sequence entry for the given field
897  * @hw: pointer to the HW struct
898  * @params: information about the flow to be processed
899  * @seg: packet segment index of the field to be extracted
900  * @fld: ID of field to be extracted
901  * @match: bitfield of all fields
902  *
903  * This function determines the protocol ID, offset, and size of the given
904  * field. It then allocates one or more extraction sequence entries for the
905  * given field, and fill the entries with protocol ID and offset information.
906  */
907 static enum ice_status
908 ice_flow_xtract_fld(struct ice_hw *hw, struct ice_flow_prof_params *params,
909                     u8 seg, enum ice_flow_field fld, u64 match)
910 {
911         enum ice_flow_field sib = ICE_FLOW_FIELD_IDX_MAX;
912         enum ice_prot_id prot_id = ICE_PROT_ID_INVAL;
913         u8 fv_words = hw->blk[params->blk].es.fvw;
914         struct ice_flow_fld_info *flds;
915         u16 cnt, ese_bits, i;
916         u16 sib_mask = 0;
917         u16 mask;
918         u16 off;
919
920         flds = params->prof->segs[seg].fields;
921
922         switch (fld) {
923         case ICE_FLOW_FIELD_IDX_ETH_DA:
924         case ICE_FLOW_FIELD_IDX_ETH_SA:
925         case ICE_FLOW_FIELD_IDX_S_VLAN:
926         case ICE_FLOW_FIELD_IDX_C_VLAN:
927                 prot_id = seg == 0 ? ICE_PROT_MAC_OF_OR_S : ICE_PROT_MAC_IL;
928                 break;
929         case ICE_FLOW_FIELD_IDX_ETH_TYPE:
930                 prot_id = seg == 0 ? ICE_PROT_ETYPE_OL : ICE_PROT_ETYPE_IL;
931                 break;
932         case ICE_FLOW_FIELD_IDX_IPV4_DSCP:
933                 prot_id = seg == 0 ? ICE_PROT_IPV4_OF_OR_S : ICE_PROT_IPV4_IL;
934                 break;
935         case ICE_FLOW_FIELD_IDX_IPV6_DSCP:
936                 prot_id = seg == 0 ? ICE_PROT_IPV6_OF_OR_S : ICE_PROT_IPV6_IL;
937                 break;
938         case ICE_FLOW_FIELD_IDX_IPV4_TTL:
939         case ICE_FLOW_FIELD_IDX_IPV4_PROT:
940                 prot_id = seg == 0 ? ICE_PROT_IPV4_OF_OR_S : ICE_PROT_IPV4_IL;
941
942                 /* TTL and PROT share the same extraction seq. entry.
943                  * Each is considered a sibling to the other in terms of sharing
944                  * the same extraction sequence entry.
945                  */
946                 if (fld == ICE_FLOW_FIELD_IDX_IPV4_TTL)
947                         sib = ICE_FLOW_FIELD_IDX_IPV4_PROT;
948                 else if (fld == ICE_FLOW_FIELD_IDX_IPV4_PROT)
949                         sib = ICE_FLOW_FIELD_IDX_IPV4_TTL;
950
951                 /* If the sibling field is also included, that field's
952                  * mask needs to be included.
953                  */
954                 if (match & BIT(sib))
955                         sib_mask = ice_flds_info[sib].mask;
956                 break;
957         case ICE_FLOW_FIELD_IDX_IPV6_TTL:
958         case ICE_FLOW_FIELD_IDX_IPV6_PROT:
959                 prot_id = seg == 0 ? ICE_PROT_IPV6_OF_OR_S : ICE_PROT_IPV6_IL;
960
961                 /* TTL and PROT share the same extraction seq. entry.
962                  * Each is considered a sibling to the other in terms of sharing
963                  * the same extraction sequence entry.
964                  */
965                 if (fld == ICE_FLOW_FIELD_IDX_IPV6_TTL)
966                         sib = ICE_FLOW_FIELD_IDX_IPV6_PROT;
967                 else if (fld == ICE_FLOW_FIELD_IDX_IPV6_PROT)
968                         sib = ICE_FLOW_FIELD_IDX_IPV6_TTL;
969
970                 /* If the sibling field is also included, that field's
971                  * mask needs to be included.
972                  */
973                 if (match & BIT(sib))
974                         sib_mask = ice_flds_info[sib].mask;
975                 break;
976         case ICE_FLOW_FIELD_IDX_IPV4_SA:
977         case ICE_FLOW_FIELD_IDX_IPV4_DA:
978                 prot_id = seg == 0 ? ICE_PROT_IPV4_OF_OR_S : ICE_PROT_IPV4_IL;
979                 break;
980         case ICE_FLOW_FIELD_IDX_IPV6_SA:
981         case ICE_FLOW_FIELD_IDX_IPV6_DA:
982         case ICE_FLOW_FIELD_IDX_IPV6_PRE32_SA:
983         case ICE_FLOW_FIELD_IDX_IPV6_PRE32_DA:
984         case ICE_FLOW_FIELD_IDX_IPV6_PRE48_SA:
985         case ICE_FLOW_FIELD_IDX_IPV6_PRE48_DA:
986         case ICE_FLOW_FIELD_IDX_IPV6_PRE64_SA:
987         case ICE_FLOW_FIELD_IDX_IPV6_PRE64_DA:
988                 prot_id = seg == 0 ? ICE_PROT_IPV6_OF_OR_S : ICE_PROT_IPV6_IL;
989                 break;
990         case ICE_FLOW_FIELD_IDX_TCP_SRC_PORT:
991         case ICE_FLOW_FIELD_IDX_TCP_DST_PORT:
992         case ICE_FLOW_FIELD_IDX_TCP_FLAGS:
993                 prot_id = ICE_PROT_TCP_IL;
994                 break;
995         case ICE_FLOW_FIELD_IDX_UDP_SRC_PORT:
996         case ICE_FLOW_FIELD_IDX_UDP_DST_PORT:
997                 prot_id = ICE_PROT_UDP_IL_OR_S;
998                 break;
999         case ICE_FLOW_FIELD_IDX_SCTP_SRC_PORT:
1000         case ICE_FLOW_FIELD_IDX_SCTP_DST_PORT:
1001                 prot_id = ICE_PROT_SCTP_IL;
1002                 break;
1003         case ICE_FLOW_FIELD_IDX_GTPC_TEID:
1004         case ICE_FLOW_FIELD_IDX_GTPU_IP_TEID:
1005         case ICE_FLOW_FIELD_IDX_GTPU_UP_TEID:
1006         case ICE_FLOW_FIELD_IDX_GTPU_DWN_TEID:
1007         case ICE_FLOW_FIELD_IDX_GTPU_EH_TEID:
1008         case ICE_FLOW_FIELD_IDX_GTPU_EH_QFI:
1009                 /* GTP is accessed through UDP OF protocol */
1010                 prot_id = ICE_PROT_UDP_OF;
1011                 break;
1012         case ICE_FLOW_FIELD_IDX_PPPOE_SESS_ID:
1013                 prot_id = ICE_PROT_PPPOE;
1014                 break;
1015         case ICE_FLOW_FIELD_IDX_PFCP_SEID:
1016                 prot_id = ICE_PROT_UDP_IL_OR_S;
1017                 break;
1018         case ICE_FLOW_FIELD_IDX_L2TPV3_SESS_ID:
1019                 prot_id = ICE_PROT_L2TPV3;
1020                 break;
1021         case ICE_FLOW_FIELD_IDX_ESP_SPI:
1022                 prot_id = ICE_PROT_ESP_F;
1023                 break;
1024         case ICE_FLOW_FIELD_IDX_AH_SPI:
1025                 prot_id = ICE_PROT_ESP_2;
1026                 break;
1027         case ICE_FLOW_FIELD_IDX_NAT_T_ESP_SPI:
1028                 prot_id = ICE_PROT_UDP_IL_OR_S;
1029                 break;
1030         case ICE_FLOW_FIELD_IDX_ARP_SIP:
1031         case ICE_FLOW_FIELD_IDX_ARP_DIP:
1032         case ICE_FLOW_FIELD_IDX_ARP_SHA:
1033         case ICE_FLOW_FIELD_IDX_ARP_DHA:
1034         case ICE_FLOW_FIELD_IDX_ARP_OP:
1035                 prot_id = ICE_PROT_ARP_OF;
1036                 break;
1037         case ICE_FLOW_FIELD_IDX_ICMP_TYPE:
1038         case ICE_FLOW_FIELD_IDX_ICMP_CODE:
1039                 /* ICMP type and code share the same extraction seq. entry */
1040                 prot_id = (params->prof->segs[seg].hdrs &
1041                            ICE_FLOW_SEG_HDR_IPV4) ?
1042                         ICE_PROT_ICMP_IL : ICE_PROT_ICMPV6_IL;
1043                 sib = fld == ICE_FLOW_FIELD_IDX_ICMP_TYPE ?
1044                         ICE_FLOW_FIELD_IDX_ICMP_CODE :
1045                         ICE_FLOW_FIELD_IDX_ICMP_TYPE;
1046                 break;
1047         case ICE_FLOW_FIELD_IDX_GRE_KEYID:
1048                 prot_id = ICE_PROT_GRE_OF;
1049                 break;
1050         default:
1051                 return ICE_ERR_NOT_IMPL;
1052         }
1053
1054         /* Each extraction sequence entry is a word in size, and extracts a
1055          * word-aligned offset from a protocol header.
1056          */
1057         ese_bits = ICE_FLOW_FV_EXTRACT_SZ * BITS_PER_BYTE;
1058
1059         flds[fld].xtrct.prot_id = prot_id;
1060         flds[fld].xtrct.off = (ice_flds_info[fld].off / ese_bits) *
1061                 ICE_FLOW_FV_EXTRACT_SZ;
1062         flds[fld].xtrct.disp = (u8)(ice_flds_info[fld].off % ese_bits);
1063         flds[fld].xtrct.idx = params->es_cnt;
1064         flds[fld].xtrct.mask = ice_flds_info[fld].mask;
1065
1066         /* Adjust the next field-entry index after accommodating the number of
1067          * entries this field consumes
1068          */
1069         cnt = DIVIDE_AND_ROUND_UP(flds[fld].xtrct.disp +
1070                                   ice_flds_info[fld].size, ese_bits);
1071
1072         /* Fill in the extraction sequence entries needed for this field */
1073         off = flds[fld].xtrct.off;
1074         mask = flds[fld].xtrct.mask;
1075         for (i = 0; i < cnt; i++) {
1076                 /* Only consume an extraction sequence entry if there is no
1077                  * sibling field associated with this field or the sibling entry
1078                  * already extracts the word shared with this field.
1079                  */
1080                 if (sib == ICE_FLOW_FIELD_IDX_MAX ||
1081                     flds[sib].xtrct.prot_id == ICE_PROT_ID_INVAL ||
1082                     flds[sib].xtrct.off != off) {
1083                         u8 idx;
1084
1085                         /* Make sure the number of extraction sequence required
1086                          * does not exceed the block's capability
1087                          */
1088                         if (params->es_cnt >= fv_words)
1089                                 return ICE_ERR_MAX_LIMIT;
1090
1091                         /* some blocks require a reversed field vector layout */
1092                         if (hw->blk[params->blk].es.reverse)
1093                                 idx = fv_words - params->es_cnt - 1;
1094                         else
1095                                 idx = params->es_cnt;
1096
1097                         params->es[idx].prot_id = prot_id;
1098                         params->es[idx].off = off;
1099                         params->mask[idx] = mask | sib_mask;
1100                         params->es_cnt++;
1101                 }
1102
1103                 off += ICE_FLOW_FV_EXTRACT_SZ;
1104         }
1105
1106         return ICE_SUCCESS;
1107 }
1108
1109 /**
1110  * ice_flow_xtract_raws - Create extract sequence entries for raw bytes
1111  * @hw: pointer to the HW struct
1112  * @params: information about the flow to be processed
1113  * @seg: index of packet segment whose raw fields are to be be extracted
1114  */
1115 static enum ice_status
1116 ice_flow_xtract_raws(struct ice_hw *hw, struct ice_flow_prof_params *params,
1117                      u8 seg)
1118 {
1119         u16 fv_words;
1120         u16 hdrs_sz;
1121         u8 i;
1122
1123         if (!params->prof->segs[seg].raws_cnt)
1124                 return ICE_SUCCESS;
1125
1126         if (params->prof->segs[seg].raws_cnt >
1127             ARRAY_SIZE(params->prof->segs[seg].raws))
1128                 return ICE_ERR_MAX_LIMIT;
1129
1130         /* Offsets within the segment headers are not supported */
1131         hdrs_sz = ice_flow_calc_seg_sz(params, seg);
1132         if (!hdrs_sz)
1133                 return ICE_ERR_PARAM;
1134
1135         fv_words = hw->blk[params->blk].es.fvw;
1136
1137         for (i = 0; i < params->prof->segs[seg].raws_cnt; i++) {
1138                 struct ice_flow_seg_fld_raw *raw;
1139                 u16 off, cnt, j;
1140
1141                 raw = &params->prof->segs[seg].raws[i];
1142
1143                 /* Storing extraction information */
1144                 raw->info.xtrct.prot_id = ICE_PROT_MAC_OF_OR_S;
1145                 raw->info.xtrct.off = (raw->off / ICE_FLOW_FV_EXTRACT_SZ) *
1146                         ICE_FLOW_FV_EXTRACT_SZ;
1147                 raw->info.xtrct.disp = (raw->off % ICE_FLOW_FV_EXTRACT_SZ) *
1148                         BITS_PER_BYTE;
1149                 raw->info.xtrct.idx = params->es_cnt;
1150
1151                 /* Determine the number of field vector entries this raw field
1152                  * consumes.
1153                  */
1154                 cnt = DIVIDE_AND_ROUND_UP(raw->info.xtrct.disp +
1155                                           (raw->info.src.last * BITS_PER_BYTE),
1156                                           (ICE_FLOW_FV_EXTRACT_SZ *
1157                                            BITS_PER_BYTE));
1158                 off = raw->info.xtrct.off;
1159                 for (j = 0; j < cnt; j++) {
1160                         u16 idx;
1161
1162                         /* Make sure the number of extraction sequence required
1163                          * does not exceed the block's capability
1164                          */
1165                         if (params->es_cnt >= hw->blk[params->blk].es.count ||
1166                             params->es_cnt >= ICE_MAX_FV_WORDS)
1167                                 return ICE_ERR_MAX_LIMIT;
1168
1169                         /* some blocks require a reversed field vector layout */
1170                         if (hw->blk[params->blk].es.reverse)
1171                                 idx = fv_words - params->es_cnt - 1;
1172                         else
1173                                 idx = params->es_cnt;
1174
1175                         params->es[idx].prot_id = raw->info.xtrct.prot_id;
1176                         params->es[idx].off = off;
1177                         params->es_cnt++;
1178                         off += ICE_FLOW_FV_EXTRACT_SZ;
1179                 }
1180         }
1181
1182         return ICE_SUCCESS;
1183 }
1184
1185 /**
1186  * ice_flow_create_xtrct_seq - Create an extraction sequence for given segments
1187  * @hw: pointer to the HW struct
1188  * @params: information about the flow to be processed
1189  *
1190  * This function iterates through all matched fields in the given segments, and
1191  * creates an extraction sequence for the fields.
1192  */
1193 static enum ice_status
1194 ice_flow_create_xtrct_seq(struct ice_hw *hw,
1195                           struct ice_flow_prof_params *params)
1196 {
1197         enum ice_status status = ICE_SUCCESS;
1198         u8 i;
1199
1200         /* For ACL, we also need to extract the direction bit (Rx,Tx) data from
1201          * packet flags
1202          */
1203         if (params->blk == ICE_BLK_ACL) {
1204                 status = ice_flow_xtract_pkt_flags(hw, params,
1205                                                    ICE_RX_MDID_PKT_FLAGS_15_0);
1206                 if (status)
1207                         return status;
1208         }
1209
1210         for (i = 0; i < params->prof->segs_cnt; i++) {
1211                 u64 match = params->prof->segs[i].match;
1212                 enum ice_flow_field j;
1213
1214                 for (j = 0; j < ICE_FLOW_FIELD_IDX_MAX && match; j++) {
1215                         const u64 bit = BIT_ULL(j);
1216
1217                         if (match & bit) {
1218                                 status = ice_flow_xtract_fld(hw, params, i, j,
1219                                                              match);
1220                                 if (status)
1221                                         return status;
1222                                 match &= ~bit;
1223                         }
1224                 }
1225
1226                 /* Process raw matching bytes */
1227                 status = ice_flow_xtract_raws(hw, params, i);
1228                 if (status)
1229                         return status;
1230         }
1231
1232         return status;
1233 }
1234
1235 /**
1236  * ice_flow_sel_acl_scen - returns the specific scenario
1237  * @hw: pointer to the hardware structure
1238  * @params: information about the flow to be processed
1239  *
1240  * This function will return the specific scenario based on the
1241  * params passed to it
1242  */
1243 static enum ice_status
1244 ice_flow_sel_acl_scen(struct ice_hw *hw, struct ice_flow_prof_params *params)
1245 {
1246         /* Find the best-fit scenario for the provided match width */
1247         struct ice_acl_scen *cand_scen = NULL, *scen;
1248
1249         if (!hw->acl_tbl)
1250                 return ICE_ERR_DOES_NOT_EXIST;
1251
1252         /* Loop through each scenario and match against the scenario width
1253          * to select the specific scenario
1254          */
1255         LIST_FOR_EACH_ENTRY(scen, &hw->acl_tbl->scens, ice_acl_scen, list_entry)
1256                 if (scen->eff_width >= params->entry_length &&
1257                     (!cand_scen || cand_scen->eff_width > scen->eff_width))
1258                         cand_scen = scen;
1259         if (!cand_scen)
1260                 return ICE_ERR_DOES_NOT_EXIST;
1261
1262         params->prof->cfg.scen = cand_scen;
1263
1264         return ICE_SUCCESS;
1265 }
1266
1267 /**
1268  * ice_flow_acl_def_entry_frmt - Determine the layout of flow entries
1269  * @params: information about the flow to be processed
1270  */
1271 static enum ice_status
1272 ice_flow_acl_def_entry_frmt(struct ice_flow_prof_params *params)
1273 {
1274         u16 index, i, range_idx = 0;
1275
1276         index = ICE_AQC_ACL_PROF_BYTE_SEL_START_IDX;
1277
1278         for (i = 0; i < params->prof->segs_cnt; i++) {
1279                 struct ice_flow_seg_info *seg = &params->prof->segs[i];
1280                 u64 match = seg->match;
1281                 u8 j;
1282
1283                 for (j = 0; j < ICE_FLOW_FIELD_IDX_MAX && match; j++) {
1284                         struct ice_flow_fld_info *fld;
1285                         const u64 bit = BIT_ULL(j);
1286
1287                         if (!(match & bit))
1288                                 continue;
1289
1290                         fld = &seg->fields[j];
1291                         fld->entry.mask = ICE_FLOW_FLD_OFF_INVAL;
1292
1293                         if (fld->type == ICE_FLOW_FLD_TYPE_RANGE) {
1294                                 fld->entry.last = ICE_FLOW_FLD_OFF_INVAL;
1295
1296                                 /* Range checking only supported for single
1297                                  * words
1298                                  */
1299                                 if (DIVIDE_AND_ROUND_UP(ice_flds_info[j].size +
1300                                                         fld->xtrct.disp,
1301                                                         BITS_PER_BYTE * 2) > 1)
1302                                         return ICE_ERR_PARAM;
1303
1304                                 /* Ranges must define low and high values */
1305                                 if (fld->src.val == ICE_FLOW_FLD_OFF_INVAL ||
1306                                     fld->src.last == ICE_FLOW_FLD_OFF_INVAL)
1307                                         return ICE_ERR_PARAM;
1308
1309                                 fld->entry.val = range_idx++;
1310                         } else {
1311                                 /* Store adjusted byte-length of field for later
1312                                  * use, taking into account potential
1313                                  * non-byte-aligned displacement
1314                                  */
1315                                 fld->entry.last = DIVIDE_AND_ROUND_UP
1316                                         (ice_flds_info[j].size +
1317                                          (fld->xtrct.disp % BITS_PER_BYTE),
1318                                          BITS_PER_BYTE);
1319                                 fld->entry.val = index;
1320                                 index += fld->entry.last;
1321                         }
1322
1323                         match &= ~bit;
1324                 }
1325
1326                 for (j = 0; j < seg->raws_cnt; j++) {
1327                         struct ice_flow_seg_fld_raw *raw = &seg->raws[j];
1328
1329                         raw->info.entry.mask = ICE_FLOW_FLD_OFF_INVAL;
1330                         raw->info.entry.val = index;
1331                         raw->info.entry.last = raw->info.src.last;
1332                         index += raw->info.entry.last;
1333                 }
1334         }
1335
1336         /* Currently only support using the byte selection base, which only
1337          * allows for an effective entry size of 30 bytes. Reject anything
1338          * larger.
1339          */
1340         if (index > ICE_AQC_ACL_PROF_BYTE_SEL_ELEMS)
1341                 return ICE_ERR_PARAM;
1342
1343         /* Only 8 range checkers per profile, reject anything trying to use
1344          * more
1345          */
1346         if (range_idx > ICE_AQC_ACL_PROF_RANGES_NUM_CFG)
1347                 return ICE_ERR_PARAM;
1348
1349         /* Store # bytes required for entry for later use */
1350         params->entry_length = index - ICE_AQC_ACL_PROF_BYTE_SEL_START_IDX;
1351
1352         return ICE_SUCCESS;
1353 }
1354
1355 /**
1356  * ice_flow_proc_segs - process all packet segments associated with a profile
1357  * @hw: pointer to the HW struct
1358  * @params: information about the flow to be processed
1359  */
1360 static enum ice_status
1361 ice_flow_proc_segs(struct ice_hw *hw, struct ice_flow_prof_params *params)
1362 {
1363         enum ice_status status;
1364
1365         status = ice_flow_proc_seg_hdrs(params);
1366         if (status)
1367                 return status;
1368
1369         status = ice_flow_create_xtrct_seq(hw, params);
1370         if (status)
1371                 return status;
1372
1373         switch (params->blk) {
1374         case ICE_BLK_FD:
1375         case ICE_BLK_RSS:
1376                 status = ICE_SUCCESS;
1377                 break;
1378         case ICE_BLK_ACL:
1379                 status = ice_flow_acl_def_entry_frmt(params);
1380                 if (status)
1381                         return status;
1382                 status = ice_flow_sel_acl_scen(hw, params);
1383                 if (status)
1384                         return status;
1385                 break;
1386         default:
1387                 return ICE_ERR_NOT_IMPL;
1388         }
1389
1390         return status;
1391 }
1392
1393 #define ICE_FLOW_FIND_PROF_CHK_FLDS     0x00000001
1394 #define ICE_FLOW_FIND_PROF_CHK_VSI      0x00000002
1395 #define ICE_FLOW_FIND_PROF_NOT_CHK_DIR  0x00000004
1396
1397 /**
1398  * ice_flow_find_prof_conds - Find a profile matching headers and conditions
1399  * @hw: pointer to the HW struct
1400  * @blk: classification stage
1401  * @dir: flow direction
1402  * @segs: array of one or more packet segments that describe the flow
1403  * @segs_cnt: number of packet segments provided
1404  * @vsi_handle: software VSI handle to check VSI (ICE_FLOW_FIND_PROF_CHK_VSI)
1405  * @conds: additional conditions to be checked (ICE_FLOW_FIND_PROF_CHK_*)
1406  */
1407 static struct ice_flow_prof *
1408 ice_flow_find_prof_conds(struct ice_hw *hw, enum ice_block blk,
1409                          enum ice_flow_dir dir, struct ice_flow_seg_info *segs,
1410                          u8 segs_cnt, u16 vsi_handle, u32 conds)
1411 {
1412         struct ice_flow_prof *p, *prof = NULL;
1413
1414         ice_acquire_lock(&hw->fl_profs_locks[blk]);
1415         LIST_FOR_EACH_ENTRY(p, &hw->fl_profs[blk], ice_flow_prof, l_entry)
1416                 if ((p->dir == dir || conds & ICE_FLOW_FIND_PROF_NOT_CHK_DIR) &&
1417                     segs_cnt && segs_cnt == p->segs_cnt) {
1418                         u8 i;
1419
1420                         /* Check for profile-VSI association if specified */
1421                         if ((conds & ICE_FLOW_FIND_PROF_CHK_VSI) &&
1422                             ice_is_vsi_valid(hw, vsi_handle) &&
1423                             !ice_is_bit_set(p->vsis, vsi_handle))
1424                                 continue;
1425
1426                         /* Protocol headers must be checked. Matched fields are
1427                          * checked if specified.
1428                          */
1429                         for (i = 0; i < segs_cnt; i++)
1430                                 if (segs[i].hdrs != p->segs[i].hdrs ||
1431                                     ((conds & ICE_FLOW_FIND_PROF_CHK_FLDS) &&
1432                                      segs[i].match != p->segs[i].match))
1433                                         break;
1434
1435                         /* A match is found if all segments are matched */
1436                         if (i == segs_cnt) {
1437                                 prof = p;
1438                                 break;
1439                         }
1440                 }
1441         ice_release_lock(&hw->fl_profs_locks[blk]);
1442
1443         return prof;
1444 }
1445
1446 /**
1447  * ice_flow_find_prof - Look up a profile matching headers and matched fields
1448  * @hw: pointer to the HW struct
1449  * @blk: classification stage
1450  * @dir: flow direction
1451  * @segs: array of one or more packet segments that describe the flow
1452  * @segs_cnt: number of packet segments provided
1453  */
1454 u64
1455 ice_flow_find_prof(struct ice_hw *hw, enum ice_block blk, enum ice_flow_dir dir,
1456                    struct ice_flow_seg_info *segs, u8 segs_cnt)
1457 {
1458         struct ice_flow_prof *p;
1459
1460         p = ice_flow_find_prof_conds(hw, blk, dir, segs, segs_cnt,
1461                                      ICE_MAX_VSI, ICE_FLOW_FIND_PROF_CHK_FLDS);
1462
1463         return p ? p->id : ICE_FLOW_PROF_ID_INVAL;
1464 }
1465
1466 /**
1467  * ice_flow_find_prof_id - Look up a profile with given profile ID
1468  * @hw: pointer to the HW struct
1469  * @blk: classification stage
1470  * @prof_id: unique ID to identify this flow profile
1471  */
1472 static struct ice_flow_prof *
1473 ice_flow_find_prof_id(struct ice_hw *hw, enum ice_block blk, u64 prof_id)
1474 {
1475         struct ice_flow_prof *p;
1476
1477         LIST_FOR_EACH_ENTRY(p, &hw->fl_profs[blk], ice_flow_prof, l_entry)
1478                 if (p->id == prof_id)
1479                         return p;
1480
1481         return NULL;
1482 }
1483
1484 /**
1485  * ice_dealloc_flow_entry - Deallocate flow entry memory
1486  * @hw: pointer to the HW struct
1487  * @entry: flow entry to be removed
1488  */
1489 static void
1490 ice_dealloc_flow_entry(struct ice_hw *hw, struct ice_flow_entry *entry)
1491 {
1492         if (!entry)
1493                 return;
1494
1495         if (entry->entry)
1496                 ice_free(hw, entry->entry);
1497
1498         if (entry->range_buf) {
1499                 ice_free(hw, entry->range_buf);
1500                 entry->range_buf = NULL;
1501         }
1502
1503         if (entry->acts) {
1504                 ice_free(hw, entry->acts);
1505                 entry->acts = NULL;
1506                 entry->acts_cnt = 0;
1507         }
1508
1509         ice_free(hw, entry);
1510 }
1511
1512 #define ICE_ACL_INVALID_SCEN    0x3f
1513
1514 /**
1515  * ice_flow_acl_is_prof_in_use - Verify if the profile is associated to any PF
1516  * @hw: pointer to the hardware structure
1517  * @prof: pointer to flow profile
1518  * @buf: destination buffer function writes partial extraction sequence to
1519  *
1520  * returns ICE_SUCCESS if no PF is associated to the given profile
1521  * returns ICE_ERR_IN_USE if at least one PF is associated to the given profile
1522  * returns other error code for real error
1523  */
1524 static enum ice_status
1525 ice_flow_acl_is_prof_in_use(struct ice_hw *hw, struct ice_flow_prof *prof,
1526                             struct ice_aqc_acl_prof_generic_frmt *buf)
1527 {
1528         enum ice_status status;
1529         u8 prof_id = 0;
1530
1531         status = ice_flow_get_hw_prof(hw, ICE_BLK_ACL, prof->id, &prof_id);
1532         if (status)
1533                 return status;
1534
1535         status = ice_query_acl_prof(hw, prof_id, buf, NULL);
1536         if (status)
1537                 return status;
1538
1539         /* If all PF's associated scenarios are all 0 or all
1540          * ICE_ACL_INVALID_SCEN (63) for the given profile then the latter has
1541          * not been configured yet.
1542          */
1543         if (buf->pf_scenario_num[0] == 0 && buf->pf_scenario_num[1] == 0 &&
1544             buf->pf_scenario_num[2] == 0 && buf->pf_scenario_num[3] == 0 &&
1545             buf->pf_scenario_num[4] == 0 && buf->pf_scenario_num[5] == 0 &&
1546             buf->pf_scenario_num[6] == 0 && buf->pf_scenario_num[7] == 0)
1547                 return ICE_SUCCESS;
1548
1549         if (buf->pf_scenario_num[0] == ICE_ACL_INVALID_SCEN &&
1550             buf->pf_scenario_num[1] == ICE_ACL_INVALID_SCEN &&
1551             buf->pf_scenario_num[2] == ICE_ACL_INVALID_SCEN &&
1552             buf->pf_scenario_num[3] == ICE_ACL_INVALID_SCEN &&
1553             buf->pf_scenario_num[4] == ICE_ACL_INVALID_SCEN &&
1554             buf->pf_scenario_num[5] == ICE_ACL_INVALID_SCEN &&
1555             buf->pf_scenario_num[6] == ICE_ACL_INVALID_SCEN &&
1556             buf->pf_scenario_num[7] == ICE_ACL_INVALID_SCEN)
1557                 return ICE_SUCCESS;
1558         else
1559                 return ICE_ERR_IN_USE;
1560 }
1561
1562 /**
1563  * ice_flow_acl_free_act_cntr - Free the ACL rule's actions
1564  * @hw: pointer to the hardware structure
1565  * @acts: array of actions to be performed on a match
1566  * @acts_cnt: number of actions
1567  */
1568 static enum ice_status
1569 ice_flow_acl_free_act_cntr(struct ice_hw *hw, struct ice_flow_action *acts,
1570                            u8 acts_cnt)
1571 {
1572         int i;
1573
1574         for (i = 0; i < acts_cnt; i++) {
1575                 if (acts[i].type == ICE_FLOW_ACT_CNTR_PKT ||
1576                     acts[i].type == ICE_FLOW_ACT_CNTR_BYTES ||
1577                     acts[i].type == ICE_FLOW_ACT_CNTR_PKT_BYTES) {
1578                         struct ice_acl_cntrs cntrs;
1579                         enum ice_status status;
1580
1581                         cntrs.bank = 0; /* Only bank0 for the moment */
1582                         cntrs.first_cntr =
1583                                         LE16_TO_CPU(acts[i].data.acl_act.value);
1584                         cntrs.last_cntr =
1585                                         LE16_TO_CPU(acts[i].data.acl_act.value);
1586
1587                         if (acts[i].type == ICE_FLOW_ACT_CNTR_PKT_BYTES)
1588                                 cntrs.type = ICE_AQC_ACL_CNT_TYPE_DUAL;
1589                         else
1590                                 cntrs.type = ICE_AQC_ACL_CNT_TYPE_SINGLE;
1591
1592                         status = ice_aq_dealloc_acl_cntrs(hw, &cntrs, NULL);
1593                         if (status)
1594                                 return status;
1595                 }
1596         }
1597         return ICE_SUCCESS;
1598 }
1599
1600 /**
1601  * ice_flow_acl_disassoc_scen - Disassociate the scenario from the profile
1602  * @hw: pointer to the hardware structure
1603  * @prof: pointer to flow profile
1604  *
1605  * Disassociate the scenario from the profile for the PF of the VSI.
1606  */
1607 static enum ice_status
1608 ice_flow_acl_disassoc_scen(struct ice_hw *hw, struct ice_flow_prof *prof)
1609 {
1610         struct ice_aqc_acl_prof_generic_frmt buf;
1611         enum ice_status status = ICE_SUCCESS;
1612         u8 prof_id = 0;
1613
1614         ice_memset(&buf, 0, sizeof(buf), ICE_NONDMA_MEM);
1615
1616         status = ice_flow_get_hw_prof(hw, ICE_BLK_ACL, prof->id, &prof_id);
1617         if (status)
1618                 return status;
1619
1620         status = ice_query_acl_prof(hw, prof_id, &buf, NULL);
1621         if (status)
1622                 return status;
1623
1624         /* Clear scenario for this PF */
1625         buf.pf_scenario_num[hw->pf_id] = ICE_ACL_INVALID_SCEN;
1626         status = ice_prgm_acl_prof_extrt(hw, prof_id, &buf, NULL);
1627
1628         return status;
1629 }
1630
1631 /**
1632  * ice_flow_rem_entry_sync - Remove a flow entry
1633  * @hw: pointer to the HW struct
1634  * @blk: classification stage
1635  * @entry: flow entry to be removed
1636  */
1637 static enum ice_status
1638 ice_flow_rem_entry_sync(struct ice_hw *hw, enum ice_block blk,
1639                         struct ice_flow_entry *entry)
1640 {
1641         if (!entry)
1642                 return ICE_ERR_BAD_PTR;
1643
1644         if (blk == ICE_BLK_ACL) {
1645                 enum ice_status status;
1646
1647                 if (!entry->prof)
1648                         return ICE_ERR_BAD_PTR;
1649
1650                 status = ice_acl_rem_entry(hw, entry->prof->cfg.scen,
1651                                            entry->scen_entry_idx);
1652                 if (status)
1653                         return status;
1654
1655                 /* Checks if we need to release an ACL counter. */
1656                 if (entry->acts_cnt && entry->acts)
1657                         ice_flow_acl_free_act_cntr(hw, entry->acts,
1658                                                    entry->acts_cnt);
1659         }
1660
1661         LIST_DEL(&entry->l_entry);
1662
1663         ice_dealloc_flow_entry(hw, entry);
1664
1665         return ICE_SUCCESS;
1666 }
1667
1668 /**
1669  * ice_flow_add_prof_sync - Add a flow profile for packet segments and fields
1670  * @hw: pointer to the HW struct
1671  * @blk: classification stage
1672  * @dir: flow direction
1673  * @prof_id: unique ID to identify this flow profile
1674  * @segs: array of one or more packet segments that describe the flow
1675  * @segs_cnt: number of packet segments provided
1676  * @acts: array of default actions
1677  * @acts_cnt: number of default actions
1678  * @prof: stores the returned flow profile added
1679  *
1680  * Assumption: the caller has acquired the lock to the profile list
1681  */
1682 static enum ice_status
1683 ice_flow_add_prof_sync(struct ice_hw *hw, enum ice_block blk,
1684                        enum ice_flow_dir dir, u64 prof_id,
1685                        struct ice_flow_seg_info *segs, u8 segs_cnt,
1686                        struct ice_flow_action *acts, u8 acts_cnt,
1687                        struct ice_flow_prof **prof)
1688 {
1689         struct ice_flow_prof_params params;
1690         enum ice_status status;
1691         u8 i;
1692
1693         if (!prof || (acts_cnt && !acts))
1694                 return ICE_ERR_BAD_PTR;
1695
1696         ice_memset(&params, 0, sizeof(params), ICE_NONDMA_MEM);
1697         params.prof = (struct ice_flow_prof *)
1698                 ice_malloc(hw, sizeof(*params.prof));
1699         if (!params.prof)
1700                 return ICE_ERR_NO_MEMORY;
1701
1702         /* initialize extraction sequence to all invalid (0xff) */
1703         for (i = 0; i < ICE_MAX_FV_WORDS; i++) {
1704                 params.es[i].prot_id = ICE_PROT_INVALID;
1705                 params.es[i].off = ICE_FV_OFFSET_INVAL;
1706         }
1707
1708         params.blk = blk;
1709         params.prof->id = prof_id;
1710         params.prof->dir = dir;
1711         params.prof->segs_cnt = segs_cnt;
1712
1713         /* Make a copy of the segments that need to be persistent in the flow
1714          * profile instance
1715          */
1716         for (i = 0; i < segs_cnt; i++)
1717                 ice_memcpy(&params.prof->segs[i], &segs[i], sizeof(*segs),
1718                            ICE_NONDMA_TO_NONDMA);
1719
1720         /* Make a copy of the actions that need to be persistent in the flow
1721          * profile instance.
1722          */
1723         if (acts_cnt) {
1724                 params.prof->acts = (struct ice_flow_action *)
1725                         ice_memdup(hw, acts, acts_cnt * sizeof(*acts),
1726                                    ICE_NONDMA_TO_NONDMA);
1727
1728                 if (!params.prof->acts) {
1729                         status = ICE_ERR_NO_MEMORY;
1730                         goto out;
1731                 }
1732         }
1733
1734         status = ice_flow_proc_segs(hw, &params);
1735         if (status) {
1736                 ice_debug(hw, ICE_DBG_FLOW,
1737                           "Error processing a flow's packet segments\n");
1738                 goto out;
1739         }
1740
1741         /* Add a HW profile for this flow profile */
1742         status = ice_add_prof(hw, blk, prof_id, (u8 *)params.ptypes,
1743                               params.attr, params.attr_cnt, params.es,
1744                               params.mask);
1745         if (status) {
1746                 ice_debug(hw, ICE_DBG_FLOW, "Error adding a HW flow profile\n");
1747                 goto out;
1748         }
1749
1750         INIT_LIST_HEAD(&params.prof->entries);
1751         ice_init_lock(&params.prof->entries_lock);
1752         *prof = params.prof;
1753
1754 out:
1755         if (status) {
1756                 if (params.prof->acts)
1757                         ice_free(hw, params.prof->acts);
1758                 ice_free(hw, params.prof);
1759         }
1760
1761         return status;
1762 }
1763
1764 /**
1765  * ice_flow_rem_prof_sync - remove a flow profile
1766  * @hw: pointer to the hardware structure
1767  * @blk: classification stage
1768  * @prof: pointer to flow profile to remove
1769  *
1770  * Assumption: the caller has acquired the lock to the profile list
1771  */
1772 static enum ice_status
1773 ice_flow_rem_prof_sync(struct ice_hw *hw, enum ice_block blk,
1774                        struct ice_flow_prof *prof)
1775 {
1776         enum ice_status status;
1777
1778         /* Remove all remaining flow entries before removing the flow profile */
1779         if (!LIST_EMPTY(&prof->entries)) {
1780                 struct ice_flow_entry *e, *t;
1781
1782                 ice_acquire_lock(&prof->entries_lock);
1783
1784                 LIST_FOR_EACH_ENTRY_SAFE(e, t, &prof->entries, ice_flow_entry,
1785                                          l_entry) {
1786                         status = ice_flow_rem_entry_sync(hw, blk, e);
1787                         if (status)
1788                                 break;
1789                 }
1790
1791                 ice_release_lock(&prof->entries_lock);
1792         }
1793
1794         if (blk == ICE_BLK_ACL) {
1795                 struct ice_aqc_acl_profile_ranges query_rng_buf;
1796                 struct ice_aqc_acl_prof_generic_frmt buf;
1797                 u8 prof_id = 0;
1798
1799                 /* Disassociate the scenario from the profile for the PF */
1800                 status = ice_flow_acl_disassoc_scen(hw, prof);
1801                 if (status)
1802                         return status;
1803
1804                 /* Clear the range-checker if the profile ID is no longer
1805                  * used by any PF
1806                  */
1807                 status = ice_flow_acl_is_prof_in_use(hw, prof, &buf);
1808                 if (status && status != ICE_ERR_IN_USE) {
1809                         return status;
1810                 } else if (!status) {
1811                         /* Clear the range-checker value for profile ID */
1812                         ice_memset(&query_rng_buf, 0,
1813                                    sizeof(struct ice_aqc_acl_profile_ranges),
1814                                    ICE_NONDMA_MEM);
1815
1816                         status = ice_flow_get_hw_prof(hw, blk, prof->id,
1817                                                       &prof_id);
1818                         if (status)
1819                                 return status;
1820
1821                         status = ice_prog_acl_prof_ranges(hw, prof_id,
1822                                                           &query_rng_buf, NULL);
1823                         if (status)
1824                                 return status;
1825                 }
1826         }
1827
1828         /* Remove all hardware profiles associated with this flow profile */
1829         status = ice_rem_prof(hw, blk, prof->id);
1830         if (!status) {
1831                 LIST_DEL(&prof->l_entry);
1832                 ice_destroy_lock(&prof->entries_lock);
1833                 if (prof->acts)
1834                         ice_free(hw, prof->acts);
1835                 ice_free(hw, prof);
1836         }
1837
1838         return status;
1839 }
1840
1841 /**
1842  * ice_flow_acl_set_xtrct_seq_fld - Populate xtrct seq for single field
1843  * @buf: Destination buffer function writes partial xtrct sequence to
1844  * @info: Info about field
1845  */
1846 static void
1847 ice_flow_acl_set_xtrct_seq_fld(struct ice_aqc_acl_prof_generic_frmt *buf,
1848                                struct ice_flow_fld_info *info)
1849 {
1850         u16 dst, i;
1851         u8 src;
1852
1853         src = info->xtrct.idx * ICE_FLOW_FV_EXTRACT_SZ +
1854                 info->xtrct.disp / BITS_PER_BYTE;
1855         dst = info->entry.val;
1856         for (i = 0; i < info->entry.last; i++)
1857                 /* HW stores field vector words in LE, convert words back to BE
1858                  * so constructed entries will end up in network order
1859                  */
1860                 buf->byte_selection[dst++] = src++ ^ 1;
1861 }
1862
1863 /**
1864  * ice_flow_acl_set_xtrct_seq - Program ACL extraction sequence
1865  * @hw: pointer to the hardware structure
1866  * @prof: pointer to flow profile
1867  */
1868 static enum ice_status
1869 ice_flow_acl_set_xtrct_seq(struct ice_hw *hw, struct ice_flow_prof *prof)
1870 {
1871         struct ice_aqc_acl_prof_generic_frmt buf;
1872         struct ice_flow_fld_info *info;
1873         enum ice_status status;
1874         u8 prof_id = 0;
1875         u16 i;
1876
1877         ice_memset(&buf, 0, sizeof(buf), ICE_NONDMA_MEM);
1878
1879         status = ice_flow_get_hw_prof(hw, ICE_BLK_ACL, prof->id, &prof_id);
1880         if (status)
1881                 return status;
1882
1883         status = ice_flow_acl_is_prof_in_use(hw, prof, &buf);
1884         if (status && status != ICE_ERR_IN_USE)
1885                 return status;
1886
1887         if (!status) {
1888                 /* Program the profile dependent configuration. This is done
1889                  * only once regardless of the number of PFs using that profile
1890                  */
1891                 ice_memset(&buf, 0, sizeof(buf), ICE_NONDMA_MEM);
1892
1893                 for (i = 0; i < prof->segs_cnt; i++) {
1894                         struct ice_flow_seg_info *seg = &prof->segs[i];
1895                         u64 match = seg->match;
1896                         u16 j;
1897
1898                         for (j = 0; j < ICE_FLOW_FIELD_IDX_MAX && match; j++) {
1899                                 const u64 bit = BIT_ULL(j);
1900
1901                                 if (!(match & bit))
1902                                         continue;
1903
1904                                 info = &seg->fields[j];
1905
1906                                 if (info->type == ICE_FLOW_FLD_TYPE_RANGE)
1907                                         buf.word_selection[info->entry.val] =
1908                                                                 info->xtrct.idx;
1909                                 else
1910                                         ice_flow_acl_set_xtrct_seq_fld(&buf,
1911                                                                        info);
1912
1913                                 match &= ~bit;
1914                         }
1915
1916                         for (j = 0; j < seg->raws_cnt; j++) {
1917                                 info = &seg->raws[j].info;
1918                                 ice_flow_acl_set_xtrct_seq_fld(&buf, info);
1919                         }
1920                 }
1921
1922                 ice_memset(&buf.pf_scenario_num[0], ICE_ACL_INVALID_SCEN,
1923                            ICE_AQC_ACL_PROF_PF_SCEN_NUM_ELEMS,
1924                            ICE_NONDMA_MEM);
1925         }
1926
1927         /* Update the current PF */
1928         buf.pf_scenario_num[hw->pf_id] = (u8)prof->cfg.scen->id;
1929         status = ice_prgm_acl_prof_extrt(hw, prof_id, &buf, NULL);
1930
1931         return status;
1932 }
1933
1934 /**
1935  * ice_flow_assoc_vsig_vsi - associate a VSI with VSIG
1936  * @hw: pointer to the hardware structure
1937  * @blk: classification stage
1938  * @vsi_handle: software VSI handle
1939  * @vsig: target VSI group
1940  *
1941  * Assumption: the caller has already verified that the VSI to
1942  * be added has the same characteristics as the VSIG and will
1943  * thereby have access to all resources added to that VSIG.
1944  */
1945 enum ice_status
1946 ice_flow_assoc_vsig_vsi(struct ice_hw *hw, enum ice_block blk, u16 vsi_handle,
1947                         u16 vsig)
1948 {
1949         enum ice_status status;
1950
1951         if (!ice_is_vsi_valid(hw, vsi_handle) || blk >= ICE_BLK_COUNT)
1952                 return ICE_ERR_PARAM;
1953
1954         ice_acquire_lock(&hw->fl_profs_locks[blk]);
1955         status = ice_add_vsi_flow(hw, blk, ice_get_hw_vsi_num(hw, vsi_handle),
1956                                   vsig);
1957         ice_release_lock(&hw->fl_profs_locks[blk]);
1958
1959         return status;
1960 }
1961
1962 /**
1963  * ice_flow_assoc_prof - associate a VSI with a flow profile
1964  * @hw: pointer to the hardware structure
1965  * @blk: classification stage
1966  * @prof: pointer to flow profile
1967  * @vsi_handle: software VSI handle
1968  *
1969  * Assumption: the caller has acquired the lock to the profile list
1970  * and the software VSI handle has been validated
1971  */
1972 static enum ice_status
1973 ice_flow_assoc_prof(struct ice_hw *hw, enum ice_block blk,
1974                     struct ice_flow_prof *prof, u16 vsi_handle)
1975 {
1976         enum ice_status status = ICE_SUCCESS;
1977
1978         if (!ice_is_bit_set(prof->vsis, vsi_handle)) {
1979                 if (blk == ICE_BLK_ACL) {
1980                         status = ice_flow_acl_set_xtrct_seq(hw, prof);
1981                         if (status)
1982                                 return status;
1983                 }
1984                 status = ice_add_prof_id_flow(hw, blk,
1985                                               ice_get_hw_vsi_num(hw,
1986                                                                  vsi_handle),
1987                                               prof->id);
1988                 if (!status)
1989                         ice_set_bit(vsi_handle, prof->vsis);
1990                 else
1991                         ice_debug(hw, ICE_DBG_FLOW,
1992                                   "HW profile add failed, %d\n",
1993                                   status);
1994         }
1995
1996         return status;
1997 }
1998
1999 /**
2000  * ice_flow_disassoc_prof - disassociate a VSI from a flow profile
2001  * @hw: pointer to the hardware structure
2002  * @blk: classification stage
2003  * @prof: pointer to flow profile
2004  * @vsi_handle: software VSI handle
2005  *
2006  * Assumption: the caller has acquired the lock to the profile list
2007  * and the software VSI handle has been validated
2008  */
2009 static enum ice_status
2010 ice_flow_disassoc_prof(struct ice_hw *hw, enum ice_block blk,
2011                        struct ice_flow_prof *prof, u16 vsi_handle)
2012 {
2013         enum ice_status status = ICE_SUCCESS;
2014
2015         if (ice_is_bit_set(prof->vsis, vsi_handle)) {
2016                 status = ice_rem_prof_id_flow(hw, blk,
2017                                               ice_get_hw_vsi_num(hw,
2018                                                                  vsi_handle),
2019                                               prof->id);
2020                 if (!status)
2021                         ice_clear_bit(vsi_handle, prof->vsis);
2022                 else
2023                         ice_debug(hw, ICE_DBG_FLOW,
2024                                   "HW profile remove failed, %d\n",
2025                                   status);
2026         }
2027
2028         return status;
2029 }
2030
2031 /**
2032  * ice_flow_add_prof - Add a flow profile for packet segments and matched fields
2033  * @hw: pointer to the HW struct
2034  * @blk: classification stage
2035  * @dir: flow direction
2036  * @prof_id: unique ID to identify this flow profile
2037  * @segs: array of one or more packet segments that describe the flow
2038  * @segs_cnt: number of packet segments provided
2039  * @acts: array of default actions
2040  * @acts_cnt: number of default actions
2041  * @prof: stores the returned flow profile added
2042  */
2043 enum ice_status
2044 ice_flow_add_prof(struct ice_hw *hw, enum ice_block blk, enum ice_flow_dir dir,
2045                   u64 prof_id, struct ice_flow_seg_info *segs, u8 segs_cnt,
2046                   struct ice_flow_action *acts, u8 acts_cnt,
2047                   struct ice_flow_prof **prof)
2048 {
2049         enum ice_status status;
2050
2051         if (segs_cnt > ICE_FLOW_SEG_MAX)
2052                 return ICE_ERR_MAX_LIMIT;
2053
2054         if (!segs_cnt)
2055                 return ICE_ERR_PARAM;
2056
2057         if (!segs)
2058                 return ICE_ERR_BAD_PTR;
2059
2060         status = ice_flow_val_hdrs(segs, segs_cnt);
2061         if (status)
2062                 return status;
2063
2064         ice_acquire_lock(&hw->fl_profs_locks[blk]);
2065
2066         status = ice_flow_add_prof_sync(hw, blk, dir, prof_id, segs, segs_cnt,
2067                                         acts, acts_cnt, prof);
2068         if (!status)
2069                 LIST_ADD(&(*prof)->l_entry, &hw->fl_profs[blk]);
2070
2071         ice_release_lock(&hw->fl_profs_locks[blk]);
2072
2073         return status;
2074 }
2075
2076 /**
2077  * ice_flow_rem_prof - Remove a flow profile and all entries associated with it
2078  * @hw: pointer to the HW struct
2079  * @blk: the block for which the flow profile is to be removed
2080  * @prof_id: unique ID of the flow profile to be removed
2081  */
2082 enum ice_status
2083 ice_flow_rem_prof(struct ice_hw *hw, enum ice_block blk, u64 prof_id)
2084 {
2085         struct ice_flow_prof *prof;
2086         enum ice_status status;
2087
2088         ice_acquire_lock(&hw->fl_profs_locks[blk]);
2089
2090         prof = ice_flow_find_prof_id(hw, blk, prof_id);
2091         if (!prof) {
2092                 status = ICE_ERR_DOES_NOT_EXIST;
2093                 goto out;
2094         }
2095
2096         /* prof becomes invalid after the call */
2097         status = ice_flow_rem_prof_sync(hw, blk, prof);
2098
2099 out:
2100         ice_release_lock(&hw->fl_profs_locks[blk]);
2101
2102         return status;
2103 }
2104
2105 /**
2106  * ice_flow_get_hw_prof - return the HW profile for a specific profile ID handle
2107  * @hw: pointer to the HW struct
2108  * @blk: classification stage
2109  * @prof_id: the profile ID handle
2110  * @hw_prof_id: pointer to variable to receive the HW profile ID
2111  */
2112 enum ice_status
2113 ice_flow_get_hw_prof(struct ice_hw *hw, enum ice_block blk, u64 prof_id,
2114                      u8 *hw_prof_id)
2115 {
2116         struct ice_prof_map *map;
2117
2118         map = ice_search_prof_id(hw, blk, prof_id);
2119         if (map) {
2120                 *hw_prof_id = map->prof_id;
2121                 return ICE_SUCCESS;
2122         }
2123
2124         return ICE_ERR_DOES_NOT_EXIST;
2125 }
2126
2127 /**
2128  * ice_flow_find_entry - look for a flow entry using its unique ID
2129  * @hw: pointer to the HW struct
2130  * @blk: classification stage
2131  * @entry_id: unique ID to identify this flow entry
2132  *
2133  * This function looks for the flow entry with the specified unique ID in all
2134  * flow profiles of the specified classification stage. If the entry is found,
2135  * and it returns the handle to the flow entry. Otherwise, it returns
2136  * ICE_FLOW_ENTRY_ID_INVAL.
2137  */
2138 u64 ice_flow_find_entry(struct ice_hw *hw, enum ice_block blk, u64 entry_id)
2139 {
2140         struct ice_flow_entry *found = NULL;
2141         struct ice_flow_prof *p;
2142
2143         ice_acquire_lock(&hw->fl_profs_locks[blk]);
2144
2145         LIST_FOR_EACH_ENTRY(p, &hw->fl_profs[blk], ice_flow_prof, l_entry) {
2146                 struct ice_flow_entry *e;
2147
2148                 ice_acquire_lock(&p->entries_lock);
2149                 LIST_FOR_EACH_ENTRY(e, &p->entries, ice_flow_entry, l_entry)
2150                         if (e->id == entry_id) {
2151                                 found = e;
2152                                 break;
2153                         }
2154                 ice_release_lock(&p->entries_lock);
2155
2156                 if (found)
2157                         break;
2158         }
2159
2160         ice_release_lock(&hw->fl_profs_locks[blk]);
2161
2162         return found ? ICE_FLOW_ENTRY_HNDL(found) : ICE_FLOW_ENTRY_HANDLE_INVAL;
2163 }
2164
2165 /**
2166  * ice_flow_acl_check_actions - Checks the ACL rule's actions
2167  * @hw: pointer to the hardware structure
2168  * @acts: array of actions to be performed on a match
2169  * @acts_cnt: number of actions
2170  * @cnt_alloc: indicates if an ACL counter has been allocated.
2171  */
2172 static enum ice_status
2173 ice_flow_acl_check_actions(struct ice_hw *hw, struct ice_flow_action *acts,
2174                            u8 acts_cnt, bool *cnt_alloc)
2175 {
2176         ice_declare_bitmap(dup_check, ICE_AQC_TBL_MAX_ACTION_PAIRS * 2);
2177         int i;
2178
2179         ice_zero_bitmap(dup_check, ICE_AQC_TBL_MAX_ACTION_PAIRS * 2);
2180         *cnt_alloc = false;
2181
2182         if (acts_cnt > ICE_FLOW_ACL_MAX_NUM_ACT)
2183                 return ICE_ERR_OUT_OF_RANGE;
2184
2185         for (i = 0; i < acts_cnt; i++) {
2186                 if (acts[i].type != ICE_FLOW_ACT_NOP &&
2187                     acts[i].type != ICE_FLOW_ACT_DROP &&
2188                     acts[i].type != ICE_FLOW_ACT_CNTR_PKT &&
2189                     acts[i].type != ICE_FLOW_ACT_FWD_QUEUE)
2190                         return ICE_ERR_CFG;
2191
2192                 /* If the caller want to add two actions of the same type, then
2193                  * it is considered invalid configuration.
2194                  */
2195                 if (ice_test_and_set_bit(acts[i].type, dup_check))
2196                         return ICE_ERR_PARAM;
2197         }
2198
2199         /* Checks if ACL counters are needed. */
2200         for (i = 0; i < acts_cnt; i++) {
2201                 if (acts[i].type == ICE_FLOW_ACT_CNTR_PKT ||
2202                     acts[i].type == ICE_FLOW_ACT_CNTR_BYTES ||
2203                     acts[i].type == ICE_FLOW_ACT_CNTR_PKT_BYTES) {
2204                         struct ice_acl_cntrs cntrs;
2205                         enum ice_status status;
2206
2207                         cntrs.amount = 1;
2208                         cntrs.bank = 0; /* Only bank0 for the moment */
2209
2210                         if (acts[i].type == ICE_FLOW_ACT_CNTR_PKT_BYTES)
2211                                 cntrs.type = ICE_AQC_ACL_CNT_TYPE_DUAL;
2212                         else
2213                                 cntrs.type = ICE_AQC_ACL_CNT_TYPE_SINGLE;
2214
2215                         status = ice_aq_alloc_acl_cntrs(hw, &cntrs, NULL);
2216                         if (status)
2217                                 return status;
2218                         /* Counter index within the bank */
2219                         acts[i].data.acl_act.value =
2220                                                 CPU_TO_LE16(cntrs.first_cntr);
2221                         *cnt_alloc = true;
2222                 }
2223         }
2224
2225         return ICE_SUCCESS;
2226 }
2227
2228 /**
2229  * ice_flow_acl_frmt_entry_range - Format an ACL range checker for a given field
2230  * @fld: number of the given field
2231  * @info: info about field
2232  * @range_buf: range checker configuration buffer
2233  * @data: pointer to a data buffer containing flow entry's match values/masks
2234  * @range: Input/output param indicating which range checkers are being used
2235  */
2236 static void
2237 ice_flow_acl_frmt_entry_range(u16 fld, struct ice_flow_fld_info *info,
2238                               struct ice_aqc_acl_profile_ranges *range_buf,
2239                               u8 *data, u8 *range)
2240 {
2241         u16 new_mask;
2242
2243         /* If not specified, default mask is all bits in field */
2244         new_mask = (info->src.mask == ICE_FLOW_FLD_OFF_INVAL ?
2245                     BIT(ice_flds_info[fld].size) - 1 :
2246                     (*(u16 *)(data + info->src.mask))) << info->xtrct.disp;
2247
2248         /* If the mask is 0, then we don't need to worry about this input
2249          * range checker value.
2250          */
2251         if (new_mask) {
2252                 u16 new_high =
2253                         (*(u16 *)(data + info->src.last)) << info->xtrct.disp;
2254                 u16 new_low =
2255                         (*(u16 *)(data + info->src.val)) << info->xtrct.disp;
2256                 u8 range_idx = info->entry.val;
2257
2258                 range_buf->checker_cfg[range_idx].low_boundary =
2259                         CPU_TO_BE16(new_low);
2260                 range_buf->checker_cfg[range_idx].high_boundary =
2261                         CPU_TO_BE16(new_high);
2262                 range_buf->checker_cfg[range_idx].mask = CPU_TO_BE16(new_mask);
2263
2264                 /* Indicate which range checker is being used */
2265                 *range |= BIT(range_idx);
2266         }
2267 }
2268
2269 /**
2270  * ice_flow_acl_frmt_entry_fld - Partially format ACL entry for a given field
2271  * @fld: number of the given field
2272  * @info: info about the field
2273  * @buf: buffer containing the entry
2274  * @dontcare: buffer containing don't care mask for entry
2275  * @data: pointer to a data buffer containing flow entry's match values/masks
2276  */
2277 static void
2278 ice_flow_acl_frmt_entry_fld(u16 fld, struct ice_flow_fld_info *info, u8 *buf,
2279                             u8 *dontcare, u8 *data)
2280 {
2281         u16 dst, src, mask, k, end_disp, tmp_s = 0, tmp_m = 0;
2282         bool use_mask = false;
2283         u8 disp;
2284
2285         src = info->src.val;
2286         mask = info->src.mask;
2287         dst = info->entry.val - ICE_AQC_ACL_PROF_BYTE_SEL_START_IDX;
2288         disp = info->xtrct.disp % BITS_PER_BYTE;
2289
2290         if (mask != ICE_FLOW_FLD_OFF_INVAL)
2291                 use_mask = true;
2292
2293         for (k = 0; k < info->entry.last; k++, dst++) {
2294                 /* Add overflow bits from previous byte */
2295                 buf[dst] = (tmp_s & 0xff00) >> 8;
2296
2297                 /* If mask is not valid, tmp_m is always zero, so just setting
2298                  * dontcare to 0 (no masked bits). If mask is valid, pulls in
2299                  * overflow bits of mask from prev byte
2300                  */
2301                 dontcare[dst] = (tmp_m & 0xff00) >> 8;
2302
2303                 /* If there is displacement, last byte will only contain
2304                  * displaced data, but there is no more data to read from user
2305                  * buffer, so skip so as not to potentially read beyond end of
2306                  * user buffer
2307                  */
2308                 if (!disp || k < info->entry.last - 1) {
2309                         /* Store shifted data to use in next byte */
2310                         tmp_s = data[src++] << disp;
2311
2312                         /* Add current (shifted) byte */
2313                         buf[dst] |= tmp_s & 0xff;
2314
2315                         /* Handle mask if valid */
2316                         if (use_mask) {
2317                                 tmp_m = (~data[mask++] & 0xff) << disp;
2318                                 dontcare[dst] |= tmp_m & 0xff;
2319                         }
2320                 }
2321         }
2322
2323         /* Fill in don't care bits at beginning of field */
2324         if (disp) {
2325                 dst = info->entry.val - ICE_AQC_ACL_PROF_BYTE_SEL_START_IDX;
2326                 for (k = 0; k < disp; k++)
2327                         dontcare[dst] |= BIT(k);
2328         }
2329
2330         end_disp = (disp + ice_flds_info[fld].size) % BITS_PER_BYTE;
2331
2332         /* Fill in don't care bits at end of field */
2333         if (end_disp) {
2334                 dst = info->entry.val - ICE_AQC_ACL_PROF_BYTE_SEL_START_IDX +
2335                       info->entry.last - 1;
2336                 for (k = end_disp; k < BITS_PER_BYTE; k++)
2337                         dontcare[dst] |= BIT(k);
2338         }
2339 }
2340
2341 /**
2342  * ice_flow_acl_frmt_entry - Format ACL entry
2343  * @hw: pointer to the hardware structure
2344  * @prof: pointer to flow profile
2345  * @e: pointer to the flow entry
2346  * @data: pointer to a data buffer containing flow entry's match values/masks
2347  * @acts: array of actions to be performed on a match
2348  * @acts_cnt: number of actions
2349  *
2350  * Formats the key (and key_inverse) to be matched from the data passed in,
2351  * along with data from the flow profile. This key/key_inverse pair makes up
2352  * the 'entry' for an ACL flow entry.
2353  */
2354 static enum ice_status
2355 ice_flow_acl_frmt_entry(struct ice_hw *hw, struct ice_flow_prof *prof,
2356                         struct ice_flow_entry *e, u8 *data,
2357                         struct ice_flow_action *acts, u8 acts_cnt)
2358 {
2359         u8 *buf = NULL, *dontcare = NULL, *key = NULL, range = 0, dir_flag_msk;
2360         struct ice_aqc_acl_profile_ranges *range_buf = NULL;
2361         enum ice_status status;
2362         bool cnt_alloc;
2363         u8 prof_id = 0;
2364         u16 i, buf_sz;
2365
2366         status = ice_flow_get_hw_prof(hw, ICE_BLK_ACL, prof->id, &prof_id);
2367         if (status)
2368                 return status;
2369
2370         /* Format the result action */
2371
2372         status = ice_flow_acl_check_actions(hw, acts, acts_cnt, &cnt_alloc);
2373         if (status)
2374                 return status;
2375
2376         status = ICE_ERR_NO_MEMORY;
2377
2378         e->acts = (struct ice_flow_action *)
2379                 ice_memdup(hw, acts, acts_cnt * sizeof(*acts),
2380                            ICE_NONDMA_TO_NONDMA);
2381
2382         if (!e->acts)
2383                 goto out;
2384
2385         e->acts_cnt = acts_cnt;
2386
2387         /* Format the matching data */
2388         buf_sz = prof->cfg.scen->width;
2389         buf = (u8 *)ice_malloc(hw, buf_sz);
2390         if (!buf)
2391                 goto out;
2392
2393         dontcare = (u8 *)ice_malloc(hw, buf_sz);
2394         if (!dontcare)
2395                 goto out;
2396
2397         /* 'key' buffer will store both key and key_inverse, so must be twice
2398          * size of buf
2399          */
2400         key = (u8 *)ice_malloc(hw, buf_sz * 2);
2401         if (!key)
2402                 goto out;
2403
2404         range_buf = (struct ice_aqc_acl_profile_ranges *)
2405                 ice_malloc(hw, sizeof(struct ice_aqc_acl_profile_ranges));
2406         if (!range_buf)
2407                 goto out;
2408
2409         /* Set don't care mask to all 1's to start, will zero out used bytes */
2410         ice_memset(dontcare, 0xff, buf_sz, ICE_NONDMA_MEM);
2411
2412         for (i = 0; i < prof->segs_cnt; i++) {
2413                 struct ice_flow_seg_info *seg = &prof->segs[i];
2414                 u64 match = seg->match;
2415                 u16 j;
2416
2417                 for (j = 0; j < ICE_FLOW_FIELD_IDX_MAX && match; j++) {
2418                         struct ice_flow_fld_info *info;
2419                         const u64 bit = BIT_ULL(j);
2420
2421                         if (!(match & bit))
2422                                 continue;
2423
2424                         info = &seg->fields[j];
2425
2426                         if (info->type == ICE_FLOW_FLD_TYPE_RANGE)
2427                                 ice_flow_acl_frmt_entry_range(j, info,
2428                                                               range_buf, data,
2429                                                               &range);
2430                         else
2431                                 ice_flow_acl_frmt_entry_fld(j, info, buf,
2432                                                             dontcare, data);
2433
2434                         match &= ~bit;
2435                 }
2436
2437                 for (j = 0; j < seg->raws_cnt; j++) {
2438                         struct ice_flow_fld_info *info = &seg->raws[j].info;
2439                         u16 dst, src, mask, k;
2440                         bool use_mask = false;
2441
2442                         src = info->src.val;
2443                         dst = info->entry.val -
2444                                         ICE_AQC_ACL_PROF_BYTE_SEL_START_IDX;
2445                         mask = info->src.mask;
2446
2447                         if (mask != ICE_FLOW_FLD_OFF_INVAL)
2448                                 use_mask = true;
2449
2450                         for (k = 0; k < info->entry.last; k++, dst++) {
2451                                 buf[dst] = data[src++];
2452                                 if (use_mask)
2453                                         dontcare[dst] = ~data[mask++];
2454                                 else
2455                                         dontcare[dst] = 0;
2456                         }
2457                 }
2458         }
2459
2460         buf[prof->cfg.scen->pid_idx] = (u8)prof_id;
2461         dontcare[prof->cfg.scen->pid_idx] = 0;
2462
2463         /* Format the buffer for direction flags */
2464         dir_flag_msk = BIT(ICE_FLG_PKT_DIR);
2465
2466         if (prof->dir == ICE_FLOW_RX)
2467                 buf[prof->cfg.scen->pkt_dir_idx] = dir_flag_msk;
2468
2469         if (range) {
2470                 buf[prof->cfg.scen->rng_chk_idx] = range;
2471                 /* Mark any unused range checkers as don't care */
2472                 dontcare[prof->cfg.scen->rng_chk_idx] = ~range;
2473                 e->range_buf = range_buf;
2474         } else {
2475                 ice_free(hw, range_buf);
2476         }
2477
2478         status = ice_set_key(key, buf_sz * 2, buf, NULL, dontcare, NULL, 0,
2479                              buf_sz);
2480         if (status)
2481                 goto out;
2482
2483         e->entry = key;
2484         e->entry_sz = buf_sz * 2;
2485
2486 out:
2487         if (buf)
2488                 ice_free(hw, buf);
2489
2490         if (dontcare)
2491                 ice_free(hw, dontcare);
2492
2493         if (status && key)
2494                 ice_free(hw, key);
2495
2496         if (status && range_buf) {
2497                 ice_free(hw, range_buf);
2498                 e->range_buf = NULL;
2499         }
2500
2501         if (status && e->acts) {
2502                 ice_free(hw, e->acts);
2503                 e->acts = NULL;
2504                 e->acts_cnt = 0;
2505         }
2506
2507         if (status && cnt_alloc)
2508                 ice_flow_acl_free_act_cntr(hw, acts, acts_cnt);
2509
2510         return status;
2511 }
2512
2513 /**
2514  * ice_flow_acl_find_scen_entry_cond - Find an ACL scenario entry that matches
2515  *                                     the compared data.
2516  * @prof: pointer to flow profile
2517  * @e: pointer to the comparing flow entry
2518  * @do_chg_action: decide if we want to change the ACL action
2519  * @do_add_entry: decide if we want to add the new ACL entry
2520  * @do_rem_entry: decide if we want to remove the current ACL entry
2521  *
2522  * Find an ACL scenario entry that matches the compared data. In the same time,
2523  * this function also figure out:
2524  * a/ If we want to change the ACL action
2525  * b/ If we want to add the new ACL entry
2526  * c/ If we want to remove the current ACL entry
2527  */
2528 static struct ice_flow_entry *
2529 ice_flow_acl_find_scen_entry_cond(struct ice_flow_prof *prof,
2530                                   struct ice_flow_entry *e, bool *do_chg_action,
2531                                   bool *do_add_entry, bool *do_rem_entry)
2532 {
2533         struct ice_flow_entry *p, *return_entry = NULL;
2534         u8 i, j;
2535
2536         /* Check if:
2537          * a/ There exists an entry with same matching data, but different
2538          *    priority, then we remove this existing ACL entry. Then, we
2539          *    will add the new entry to the ACL scenario.
2540          * b/ There exists an entry with same matching data, priority, and
2541          *    result action, then we do nothing
2542          * c/ There exists an entry with same matching data, priority, but
2543          *    different, action, then do only change the action's entry.
2544          * d/ Else, we add this new entry to the ACL scenario.
2545          */
2546         *do_chg_action = false;
2547         *do_add_entry = true;
2548         *do_rem_entry = false;
2549         LIST_FOR_EACH_ENTRY(p, &prof->entries, ice_flow_entry, l_entry) {
2550                 if (memcmp(p->entry, e->entry, p->entry_sz))
2551                         continue;
2552
2553                 /* From this point, we have the same matching_data. */
2554                 *do_add_entry = false;
2555                 return_entry = p;
2556
2557                 if (p->priority != e->priority) {
2558                         /* matching data && !priority */
2559                         *do_add_entry = true;
2560                         *do_rem_entry = true;
2561                         break;
2562                 }
2563
2564                 /* From this point, we will have matching_data && priority */
2565                 if (p->acts_cnt != e->acts_cnt)
2566                         *do_chg_action = true;
2567                 for (i = 0; i < p->acts_cnt; i++) {
2568                         bool found_not_match = false;
2569
2570                         for (j = 0; j < e->acts_cnt; j++)
2571                                 if (memcmp(&p->acts[i], &e->acts[j],
2572                                            sizeof(struct ice_flow_action))) {
2573                                         found_not_match = true;
2574                                         break;
2575                                 }
2576
2577                         if (found_not_match) {
2578                                 *do_chg_action = true;
2579                                 break;
2580                         }
2581                 }
2582
2583                 /* (do_chg_action = true) means :
2584                  *    matching_data && priority && !result_action
2585                  * (do_chg_action = false) means :
2586                  *    matching_data && priority && result_action
2587                  */
2588                 break;
2589         }
2590
2591         return return_entry;
2592 }
2593
2594 /**
2595  * ice_flow_acl_convert_to_acl_prior - Convert to ACL priority
2596  * @p: flow priority
2597  */
2598 static enum ice_acl_entry_prior
2599 ice_flow_acl_convert_to_acl_prior(enum ice_flow_priority p)
2600 {
2601         enum ice_acl_entry_prior acl_prior;
2602
2603         switch (p) {
2604         case ICE_FLOW_PRIO_LOW:
2605                 acl_prior = ICE_LOW;
2606                 break;
2607         case ICE_FLOW_PRIO_NORMAL:
2608                 acl_prior = ICE_NORMAL;
2609                 break;
2610         case ICE_FLOW_PRIO_HIGH:
2611                 acl_prior = ICE_HIGH;
2612                 break;
2613         default:
2614                 acl_prior = ICE_NORMAL;
2615                 break;
2616         }
2617
2618         return acl_prior;
2619 }
2620
2621 /**
2622  * ice_flow_acl_union_rng_chk - Perform union operation between two
2623  *                              range-range checker buffers
2624  * @dst_buf: pointer to destination range checker buffer
2625  * @src_buf: pointer to source range checker buffer
2626  *
2627  * For this function, we do the union between dst_buf and src_buf
2628  * range checker buffer, and we will save the result back to dst_buf
2629  */
2630 static enum ice_status
2631 ice_flow_acl_union_rng_chk(struct ice_aqc_acl_profile_ranges *dst_buf,
2632                            struct ice_aqc_acl_profile_ranges *src_buf)
2633 {
2634         u8 i, j;
2635
2636         if (!dst_buf || !src_buf)
2637                 return ICE_ERR_BAD_PTR;
2638
2639         for (i = 0; i < ICE_AQC_ACL_PROF_RANGES_NUM_CFG; i++) {
2640                 struct ice_acl_rng_data *cfg_data = NULL, *in_data;
2641                 bool will_populate = false;
2642
2643                 in_data = &src_buf->checker_cfg[i];
2644
2645                 if (!in_data->mask)
2646                         break;
2647
2648                 for (j = 0; j < ICE_AQC_ACL_PROF_RANGES_NUM_CFG; j++) {
2649                         cfg_data = &dst_buf->checker_cfg[j];
2650
2651                         if (!cfg_data->mask ||
2652                             !memcmp(cfg_data, in_data,
2653                                     sizeof(struct ice_acl_rng_data))) {
2654                                 will_populate = true;
2655                                 break;
2656                         }
2657                 }
2658
2659                 if (will_populate) {
2660                         ice_memcpy(cfg_data, in_data,
2661                                    sizeof(struct ice_acl_rng_data),
2662                                    ICE_NONDMA_TO_NONDMA);
2663                 } else {
2664                         /* No available slot left to program range checker */
2665                         return ICE_ERR_MAX_LIMIT;
2666                 }
2667         }
2668
2669         return ICE_SUCCESS;
2670 }
2671
2672 /**
2673  * ice_flow_acl_add_scen_entry_sync - Add entry to ACL scenario sync
2674  * @hw: pointer to the hardware structure
2675  * @prof: pointer to flow profile
2676  * @entry: double pointer to the flow entry
2677  *
2678  * For this function, we will look at the current added entries in the
2679  * corresponding ACL scenario. Then, we will perform matching logic to
2680  * see if we want to add/modify/do nothing with this new entry.
2681  */
2682 static enum ice_status
2683 ice_flow_acl_add_scen_entry_sync(struct ice_hw *hw, struct ice_flow_prof *prof,
2684                                  struct ice_flow_entry **entry)
2685 {
2686         bool do_add_entry, do_rem_entry, do_chg_action, do_chg_rng_chk;
2687         struct ice_aqc_acl_profile_ranges query_rng_buf, cfg_rng_buf;
2688         struct ice_acl_act_entry *acts = NULL;
2689         struct ice_flow_entry *exist;
2690         enum ice_status status = ICE_SUCCESS;
2691         struct ice_flow_entry *e;
2692         u8 i;
2693
2694         if (!entry || !(*entry) || !prof)
2695                 return ICE_ERR_BAD_PTR;
2696
2697         e = *(entry);
2698
2699         do_chg_rng_chk = false;
2700         if (e->range_buf) {
2701                 u8 prof_id = 0;
2702
2703                 status = ice_flow_get_hw_prof(hw, ICE_BLK_ACL, prof->id,
2704                                               &prof_id);
2705                 if (status)
2706                         return status;
2707
2708                 /* Query the current range-checker value in FW */
2709                 status = ice_query_acl_prof_ranges(hw, prof_id, &query_rng_buf,
2710                                                    NULL);
2711                 if (status)
2712                         return status;
2713                 ice_memcpy(&cfg_rng_buf, &query_rng_buf,
2714                            sizeof(struct ice_aqc_acl_profile_ranges),
2715                            ICE_NONDMA_TO_NONDMA);
2716
2717                 /* Generate the new range-checker value */
2718                 status = ice_flow_acl_union_rng_chk(&cfg_rng_buf, e->range_buf);
2719                 if (status)
2720                         return status;
2721
2722                 /* Reconfigure the range check if the buffer is changed. */
2723                 do_chg_rng_chk = false;
2724                 if (memcmp(&query_rng_buf, &cfg_rng_buf,
2725                            sizeof(struct ice_aqc_acl_profile_ranges))) {
2726                         status = ice_prog_acl_prof_ranges(hw, prof_id,
2727                                                           &cfg_rng_buf, NULL);
2728                         if (status)
2729                                 return status;
2730
2731                         do_chg_rng_chk = true;
2732                 }
2733         }
2734
2735         /* Figure out if we want to (change the ACL action) and/or
2736          * (Add the new ACL entry) and/or (Remove the current ACL entry)
2737          */
2738         exist = ice_flow_acl_find_scen_entry_cond(prof, e, &do_chg_action,
2739                                                   &do_add_entry, &do_rem_entry);
2740
2741         if (do_rem_entry) {
2742                 status = ice_flow_rem_entry_sync(hw, ICE_BLK_ACL, exist);
2743                 if (status)
2744                         return status;
2745         }
2746
2747         /* Prepare the result action buffer */
2748         acts = (struct ice_acl_act_entry *)ice_calloc
2749                 (hw, e->entry_sz, sizeof(struct ice_acl_act_entry));
2750         for (i = 0; i < e->acts_cnt; i++)
2751                 ice_memcpy(&acts[i], &e->acts[i].data.acl_act,
2752                            sizeof(struct ice_acl_act_entry),
2753                            ICE_NONDMA_TO_NONDMA);
2754
2755         if (do_add_entry) {
2756                 enum ice_acl_entry_prior prior;
2757                 u8 *keys, *inverts;
2758                 u16 entry_idx;
2759
2760                 keys = (u8 *)e->entry;
2761                 inverts = keys + (e->entry_sz / 2);
2762                 prior = ice_flow_acl_convert_to_acl_prior(e->priority);
2763
2764                 status = ice_acl_add_entry(hw, prof->cfg.scen, prior, keys,
2765                                            inverts, acts, e->acts_cnt,
2766                                            &entry_idx);
2767                 if (status)
2768                         goto out;
2769
2770                 e->scen_entry_idx = entry_idx;
2771                 LIST_ADD(&e->l_entry, &prof->entries);
2772         } else {
2773                 if (do_chg_action) {
2774                         /* For the action memory info, update the SW's copy of
2775                          * exist entry with e's action memory info
2776                          */
2777                         ice_free(hw, exist->acts);
2778                         exist->acts_cnt = e->acts_cnt;
2779                         exist->acts = (struct ice_flow_action *)
2780                                 ice_calloc(hw, exist->acts_cnt,
2781                                            sizeof(struct ice_flow_action));
2782
2783                         if (!exist->acts) {
2784                                 status = ICE_ERR_NO_MEMORY;
2785                                 goto out;
2786                         }
2787
2788                         ice_memcpy(exist->acts, e->acts,
2789                                    sizeof(struct ice_flow_action) * e->acts_cnt,
2790                                    ICE_NONDMA_TO_NONDMA);
2791
2792                         status = ice_acl_prog_act(hw, prof->cfg.scen, acts,
2793                                                   e->acts_cnt,
2794                                                   exist->scen_entry_idx);
2795                         if (status)
2796                                 goto out;
2797                 }
2798
2799                 if (do_chg_rng_chk) {
2800                         /* In this case, we want to update the range checker
2801                          * information of the exist entry
2802                          */
2803                         status = ice_flow_acl_union_rng_chk(exist->range_buf,
2804                                                             e->range_buf);
2805                         if (status)
2806                                 goto out;
2807                 }
2808
2809                 /* As we don't add the new entry to our SW DB, deallocate its
2810                  * memories, and return the exist entry to the caller
2811                  */
2812                 ice_dealloc_flow_entry(hw, e);
2813                 *(entry) = exist;
2814         }
2815 out:
2816         if (acts)
2817                 ice_free(hw, acts);
2818
2819         return status;
2820 }
2821
2822 /**
2823  * ice_flow_acl_add_scen_entry - Add entry to ACL scenario
2824  * @hw: pointer to the hardware structure
2825  * @prof: pointer to flow profile
2826  * @e: double pointer to the flow entry
2827  */
2828 static enum ice_status
2829 ice_flow_acl_add_scen_entry(struct ice_hw *hw, struct ice_flow_prof *prof,
2830                             struct ice_flow_entry **e)
2831 {
2832         enum ice_status status;
2833
2834         ice_acquire_lock(&prof->entries_lock);
2835         status = ice_flow_acl_add_scen_entry_sync(hw, prof, e);
2836         ice_release_lock(&prof->entries_lock);
2837
2838         return status;
2839 }
2840
2841 /**
2842  * ice_flow_add_entry - Add a flow entry
2843  * @hw: pointer to the HW struct
2844  * @blk: classification stage
2845  * @prof_id: ID of the profile to add a new flow entry to
2846  * @entry_id: unique ID to identify this flow entry
2847  * @vsi_handle: software VSI handle for the flow entry
2848  * @prio: priority of the flow entry
2849  * @data: pointer to a data buffer containing flow entry's match values/masks
2850  * @acts: arrays of actions to be performed on a match
2851  * @acts_cnt: number of actions
2852  * @entry_h: pointer to buffer that receives the new flow entry's handle
2853  */
2854 enum ice_status
2855 ice_flow_add_entry(struct ice_hw *hw, enum ice_block blk, u64 prof_id,
2856                    u64 entry_id, u16 vsi_handle, enum ice_flow_priority prio,
2857                    void *data, struct ice_flow_action *acts, u8 acts_cnt,
2858                    u64 *entry_h)
2859 {
2860         struct ice_flow_entry *e = NULL;
2861         struct ice_flow_prof *prof;
2862         enum ice_status status = ICE_SUCCESS;
2863
2864         /* ACL entries must indicate an action */
2865         if (blk == ICE_BLK_ACL && (!acts || !acts_cnt))
2866                 return ICE_ERR_PARAM;
2867
2868         /* No flow entry data is expected for RSS */
2869         if (!entry_h || (!data && blk != ICE_BLK_RSS))
2870                 return ICE_ERR_BAD_PTR;
2871
2872         if (!ice_is_vsi_valid(hw, vsi_handle))
2873                 return ICE_ERR_PARAM;
2874
2875         ice_acquire_lock(&hw->fl_profs_locks[blk]);
2876
2877         prof = ice_flow_find_prof_id(hw, blk, prof_id);
2878         if (!prof) {
2879                 status = ICE_ERR_DOES_NOT_EXIST;
2880         } else {
2881                 /* Allocate memory for the entry being added and associate
2882                  * the VSI to the found flow profile
2883                  */
2884                 e = (struct ice_flow_entry *)ice_malloc(hw, sizeof(*e));
2885                 if (!e)
2886                         status = ICE_ERR_NO_MEMORY;
2887                 else
2888                         status = ice_flow_assoc_prof(hw, blk, prof, vsi_handle);
2889         }
2890
2891         ice_release_lock(&hw->fl_profs_locks[blk]);
2892         if (status)
2893                 goto out;
2894
2895         e->id = entry_id;
2896         e->vsi_handle = vsi_handle;
2897         e->prof = prof;
2898         e->priority = prio;
2899
2900         switch (blk) {
2901         case ICE_BLK_FD:
2902         case ICE_BLK_RSS:
2903                 break;
2904         case ICE_BLK_ACL:
2905                 /* ACL will handle the entry management */
2906                 status = ice_flow_acl_frmt_entry(hw, prof, e, (u8 *)data, acts,
2907                                                  acts_cnt);
2908                 if (status)
2909                         goto out;
2910
2911                 status = ice_flow_acl_add_scen_entry(hw, prof, &e);
2912                 if (status)
2913                         goto out;
2914
2915                 break;
2916         default:
2917                 status = ICE_ERR_NOT_IMPL;
2918                 goto out;
2919         }
2920
2921         if (blk != ICE_BLK_ACL) {
2922                 /* ACL will handle the entry management */
2923                 ice_acquire_lock(&prof->entries_lock);
2924                 LIST_ADD(&e->l_entry, &prof->entries);
2925                 ice_release_lock(&prof->entries_lock);
2926         }
2927
2928         *entry_h = ICE_FLOW_ENTRY_HNDL(e);
2929
2930 out:
2931         if (status && e) {
2932                 if (e->entry)
2933                         ice_free(hw, e->entry);
2934                 ice_free(hw, e);
2935         }
2936
2937         return status;
2938 }
2939
2940 /**
2941  * ice_flow_rem_entry - Remove a flow entry
2942  * @hw: pointer to the HW struct
2943  * @blk: classification stage
2944  * @entry_h: handle to the flow entry to be removed
2945  */
2946 enum ice_status ice_flow_rem_entry(struct ice_hw *hw, enum ice_block blk,
2947                                    u64 entry_h)
2948 {
2949         struct ice_flow_entry *entry;
2950         struct ice_flow_prof *prof;
2951         enum ice_status status = ICE_SUCCESS;
2952
2953         if (entry_h == ICE_FLOW_ENTRY_HANDLE_INVAL)
2954                 return ICE_ERR_PARAM;
2955
2956         entry = ICE_FLOW_ENTRY_PTR((unsigned long)entry_h);
2957
2958         /* Retain the pointer to the flow profile as the entry will be freed */
2959         prof = entry->prof;
2960
2961         if (prof) {
2962                 ice_acquire_lock(&prof->entries_lock);
2963                 status = ice_flow_rem_entry_sync(hw, blk, entry);
2964                 ice_release_lock(&prof->entries_lock);
2965         }
2966
2967         return status;
2968 }
2969
2970 /**
2971  * ice_flow_set_fld_ext - specifies locations of field from entry's input buffer
2972  * @seg: packet segment the field being set belongs to
2973  * @fld: field to be set
2974  * @field_type: type of the field
2975  * @val_loc: if not ICE_FLOW_FLD_OFF_INVAL, location of the value to match from
2976  *           entry's input buffer
2977  * @mask_loc: if not ICE_FLOW_FLD_OFF_INVAL, location of mask value from entry's
2978  *            input buffer
2979  * @last_loc: if not ICE_FLOW_FLD_OFF_INVAL, location of last/upper value from
2980  *            entry's input buffer
2981  *
2982  * This helper function stores information of a field being matched, including
2983  * the type of the field and the locations of the value to match, the mask, and
2984  * and the upper-bound value in the start of the input buffer for a flow entry.
2985  * This function should only be used for fixed-size data structures.
2986  *
2987  * This function also opportunistically determines the protocol headers to be
2988  * present based on the fields being set. Some fields cannot be used alone to
2989  * determine the protocol headers present. Sometimes, fields for particular
2990  * protocol headers are not matched. In those cases, the protocol headers
2991  * must be explicitly set.
2992  */
2993 static void
2994 ice_flow_set_fld_ext(struct ice_flow_seg_info *seg, enum ice_flow_field fld,
2995                      enum ice_flow_fld_match_type field_type, u16 val_loc,
2996                      u16 mask_loc, u16 last_loc)
2997 {
2998         u64 bit = BIT_ULL(fld);
2999
3000         seg->match |= bit;
3001         if (field_type == ICE_FLOW_FLD_TYPE_RANGE)
3002                 seg->range |= bit;
3003
3004         seg->fields[fld].type = field_type;
3005         seg->fields[fld].src.val = val_loc;
3006         seg->fields[fld].src.mask = mask_loc;
3007         seg->fields[fld].src.last = last_loc;
3008
3009         ICE_FLOW_SET_HDRS(seg, ice_flds_info[fld].hdr);
3010 }
3011
3012 /**
3013  * ice_flow_set_fld - specifies locations of field from entry's input buffer
3014  * @seg: packet segment the field being set belongs to
3015  * @fld: field to be set
3016  * @val_loc: if not ICE_FLOW_FLD_OFF_INVAL, location of the value to match from
3017  *           entry's input buffer
3018  * @mask_loc: if not ICE_FLOW_FLD_OFF_INVAL, location of mask value from entry's
3019  *            input buffer
3020  * @last_loc: if not ICE_FLOW_FLD_OFF_INVAL, location of last/upper value from
3021  *            entry's input buffer
3022  * @range: indicate if field being matched is to be in a range
3023  *
3024  * This function specifies the locations, in the form of byte offsets from the
3025  * start of the input buffer for a flow entry, from where the value to match,
3026  * the mask value, and upper value can be extracted. These locations are then
3027  * stored in the flow profile. When adding a flow entry associated with the
3028  * flow profile, these locations will be used to quickly extract the values and
3029  * create the content of a match entry. This function should only be used for
3030  * fixed-size data structures.
3031  */
3032 void
3033 ice_flow_set_fld(struct ice_flow_seg_info *seg, enum ice_flow_field fld,
3034                  u16 val_loc, u16 mask_loc, u16 last_loc, bool range)
3035 {
3036         enum ice_flow_fld_match_type t = range ?
3037                 ICE_FLOW_FLD_TYPE_RANGE : ICE_FLOW_FLD_TYPE_REG;
3038
3039         ice_flow_set_fld_ext(seg, fld, t, val_loc, mask_loc, last_loc);
3040 }
3041
3042 /**
3043  * ice_flow_set_fld_prefix - sets locations of prefix field from entry's buf
3044  * @seg: packet segment the field being set belongs to
3045  * @fld: field to be set
3046  * @val_loc: if not ICE_FLOW_FLD_OFF_INVAL, location of the value to match from
3047  *           entry's input buffer
3048  * @pref_loc: location of prefix value from entry's input buffer
3049  * @pref_sz: size of the location holding the prefix value
3050  *
3051  * This function specifies the locations, in the form of byte offsets from the
3052  * start of the input buffer for a flow entry, from where the value to match
3053  * and the IPv4 prefix value can be extracted. These locations are then stored
3054  * in the flow profile. When adding flow entries to the associated flow profile,
3055  * these locations can be used to quickly extract the values to create the
3056  * content of a match entry. This function should only be used for fixed-size
3057  * data structures.
3058  */
3059 void
3060 ice_flow_set_fld_prefix(struct ice_flow_seg_info *seg, enum ice_flow_field fld,
3061                         u16 val_loc, u16 pref_loc, u8 pref_sz)
3062 {
3063         /* For this type of field, the "mask" location is for the prefix value's
3064          * location and the "last" location is for the size of the location of
3065          * the prefix value.
3066          */
3067         ice_flow_set_fld_ext(seg, fld, ICE_FLOW_FLD_TYPE_PREFIX, val_loc,
3068                              pref_loc, (u16)pref_sz);
3069 }
3070
3071 /**
3072  * ice_flow_add_fld_raw - sets locations of a raw field from entry's input buf
3073  * @seg: packet segment the field being set belongs to
3074  * @off: offset of the raw field from the beginning of the segment in bytes
3075  * @len: length of the raw pattern to be matched
3076  * @val_loc: location of the value to match from entry's input buffer
3077  * @mask_loc: location of mask value from entry's input buffer
3078  *
3079  * This function specifies the offset of the raw field to be match from the
3080  * beginning of the specified packet segment, and the locations, in the form of
3081  * byte offsets from the start of the input buffer for a flow entry, from where
3082  * the value to match and the mask value to be extracted. These locations are
3083  * then stored in the flow profile. When adding flow entries to the associated
3084  * flow profile, these locations can be used to quickly extract the values to
3085  * create the content of a match entry. This function should only be used for
3086  * fixed-size data structures.
3087  */
3088 void
3089 ice_flow_add_fld_raw(struct ice_flow_seg_info *seg, u16 off, u8 len,
3090                      u16 val_loc, u16 mask_loc)
3091 {
3092         if (seg->raws_cnt < ICE_FLOW_SEG_RAW_FLD_MAX) {
3093                 seg->raws[seg->raws_cnt].off = off;
3094                 seg->raws[seg->raws_cnt].info.type = ICE_FLOW_FLD_TYPE_SIZE;
3095                 seg->raws[seg->raws_cnt].info.src.val = val_loc;
3096                 seg->raws[seg->raws_cnt].info.src.mask = mask_loc;
3097                 /* The "last" field is used to store the length of the field */
3098                 seg->raws[seg->raws_cnt].info.src.last = len;
3099         }
3100
3101         /* Overflows of "raws" will be handled as an error condition later in
3102          * the flow when this information is processed.
3103          */
3104         seg->raws_cnt++;
3105 }
3106
3107 #define ICE_FLOW_RSS_SEG_HDR_L2_MASKS \
3108 (ICE_FLOW_SEG_HDR_ETH | ICE_FLOW_SEG_HDR_VLAN)
3109
3110 #define ICE_FLOW_RSS_SEG_HDR_L3_MASKS \
3111         (ICE_FLOW_SEG_HDR_IPV4 | ICE_FLOW_SEG_HDR_IPV6)
3112
3113 #define ICE_FLOW_RSS_SEG_HDR_L4_MASKS \
3114         (ICE_FLOW_SEG_HDR_TCP | ICE_FLOW_SEG_HDR_UDP | \
3115          ICE_FLOW_SEG_HDR_SCTP)
3116
3117 #define ICE_FLOW_RSS_SEG_HDR_VAL_MASKS \
3118         (ICE_FLOW_RSS_SEG_HDR_L2_MASKS | \
3119          ICE_FLOW_RSS_SEG_HDR_L3_MASKS | \
3120          ICE_FLOW_RSS_SEG_HDR_L4_MASKS)
3121
3122 /**
3123  * ice_flow_set_rss_seg_info - setup packet segments for RSS
3124  * @segs: pointer to the flow field segment(s)
3125  * @hash_fields: fields to be hashed on for the segment(s)
3126  * @flow_hdr: protocol header fields within a packet segment
3127  *
3128  * Helper function to extract fields from hash bitmap and use flow
3129  * header value to set flow field segment for further use in flow
3130  * profile entry or removal.
3131  */
3132 static enum ice_status
3133 ice_flow_set_rss_seg_info(struct ice_flow_seg_info *segs, u64 hash_fields,
3134                           u32 flow_hdr)
3135 {
3136         u64 val = hash_fields;
3137         u8 i;
3138
3139         for (i = 0; val && i < ICE_FLOW_FIELD_IDX_MAX; i++) {
3140                 u64 bit = BIT_ULL(i);
3141
3142                 if (val & bit) {
3143                         ice_flow_set_fld(segs, (enum ice_flow_field)i,
3144                                          ICE_FLOW_FLD_OFF_INVAL,
3145                                          ICE_FLOW_FLD_OFF_INVAL,
3146                                          ICE_FLOW_FLD_OFF_INVAL, false);
3147                         val &= ~bit;
3148                 }
3149         }
3150         ICE_FLOW_SET_HDRS(segs, flow_hdr);
3151
3152         if (segs->hdrs & ~ICE_FLOW_RSS_SEG_HDR_VAL_MASKS &
3153             ~ICE_FLOW_RSS_HDRS_INNER_MASK)
3154                 return ICE_ERR_PARAM;
3155
3156         val = (u64)(segs->hdrs & ICE_FLOW_RSS_SEG_HDR_L3_MASKS);
3157         if (val && !ice_is_pow2(val))
3158                 return ICE_ERR_CFG;
3159
3160         val = (u64)(segs->hdrs & ICE_FLOW_RSS_SEG_HDR_L4_MASKS);
3161         if (val && !ice_is_pow2(val))
3162                 return ICE_ERR_CFG;
3163
3164         return ICE_SUCCESS;
3165 }
3166
3167 /**
3168  * ice_rem_vsi_rss_list - remove VSI from RSS list
3169  * @hw: pointer to the hardware structure
3170  * @vsi_handle: software VSI handle
3171  *
3172  * Remove the VSI from all RSS configurations in the list.
3173  */
3174 void ice_rem_vsi_rss_list(struct ice_hw *hw, u16 vsi_handle)
3175 {
3176         struct ice_rss_cfg *r, *tmp;
3177
3178         if (LIST_EMPTY(&hw->rss_list_head))
3179                 return;
3180
3181         ice_acquire_lock(&hw->rss_locks);
3182         LIST_FOR_EACH_ENTRY_SAFE(r, tmp, &hw->rss_list_head,
3183                                  ice_rss_cfg, l_entry)
3184                 if (ice_test_and_clear_bit(vsi_handle, r->vsis))
3185                         if (!ice_is_any_bit_set(r->vsis, ICE_MAX_VSI)) {
3186                                 LIST_DEL(&r->l_entry);
3187                                 ice_free(hw, r);
3188                         }
3189         ice_release_lock(&hw->rss_locks);
3190 }
3191
3192 /**
3193  * ice_rem_vsi_rss_cfg - remove RSS configurations associated with VSI
3194  * @hw: pointer to the hardware structure
3195  * @vsi_handle: software VSI handle
3196  *
3197  * This function will iterate through all flow profiles and disassociate
3198  * the VSI from that profile. If the flow profile has no VSIs it will
3199  * be removed.
3200  */
3201 enum ice_status ice_rem_vsi_rss_cfg(struct ice_hw *hw, u16 vsi_handle)
3202 {
3203         const enum ice_block blk = ICE_BLK_RSS;
3204         struct ice_flow_prof *p, *t;
3205         enum ice_status status = ICE_SUCCESS;
3206
3207         if (!ice_is_vsi_valid(hw, vsi_handle))
3208                 return ICE_ERR_PARAM;
3209
3210         if (LIST_EMPTY(&hw->fl_profs[blk]))
3211                 return ICE_SUCCESS;
3212
3213         ice_acquire_lock(&hw->fl_profs_locks[blk]);
3214         LIST_FOR_EACH_ENTRY_SAFE(p, t, &hw->fl_profs[blk], ice_flow_prof,
3215                                  l_entry)
3216                 if (ice_is_bit_set(p->vsis, vsi_handle)) {
3217                         status = ice_flow_disassoc_prof(hw, blk, p, vsi_handle);
3218                         if (status)
3219                                 break;
3220
3221                         if (!ice_is_any_bit_set(p->vsis, ICE_MAX_VSI)) {
3222                                 status = ice_flow_rem_prof_sync(hw, blk, p);
3223                                 if (status)
3224                                         break;
3225                         }
3226                 }
3227         ice_release_lock(&hw->fl_profs_locks[blk]);
3228
3229         return status;
3230 }
3231
3232 /**
3233  * ice_rem_rss_list - remove RSS configuration from list
3234  * @hw: pointer to the hardware structure
3235  * @vsi_handle: software VSI handle
3236  * @prof: pointer to flow profile
3237  *
3238  * Assumption: lock has already been acquired for RSS list
3239  */
3240 static void
3241 ice_rem_rss_list(struct ice_hw *hw, u16 vsi_handle, struct ice_flow_prof *prof)
3242 {
3243         struct ice_rss_cfg *r, *tmp;
3244
3245         /* Search for RSS hash fields associated to the VSI that match the
3246          * hash configurations associated to the flow profile. If found
3247          * remove from the RSS entry list of the VSI context and delete entry.
3248          */
3249         LIST_FOR_EACH_ENTRY_SAFE(r, tmp, &hw->rss_list_head,
3250                                  ice_rss_cfg, l_entry)
3251                 if (r->hashed_flds == prof->segs[prof->segs_cnt - 1].match &&
3252                     r->packet_hdr == prof->segs[prof->segs_cnt - 1].hdrs) {
3253                         ice_clear_bit(vsi_handle, r->vsis);
3254                         if (!ice_is_any_bit_set(r->vsis, ICE_MAX_VSI)) {
3255                                 LIST_DEL(&r->l_entry);
3256                                 ice_free(hw, r);
3257                         }
3258                         return;
3259                 }
3260 }
3261
3262 /**
3263  * ice_add_rss_list - add RSS configuration to list
3264  * @hw: pointer to the hardware structure
3265  * @vsi_handle: software VSI handle
3266  * @prof: pointer to flow profile
3267  *
3268  * Assumption: lock has already been acquired for RSS list
3269  */
3270 static enum ice_status
3271 ice_add_rss_list(struct ice_hw *hw, u16 vsi_handle, struct ice_flow_prof *prof)
3272 {
3273         struct ice_rss_cfg *r, *rss_cfg;
3274
3275         LIST_FOR_EACH_ENTRY(r, &hw->rss_list_head,
3276                             ice_rss_cfg, l_entry)
3277                 if (r->hashed_flds == prof->segs[prof->segs_cnt - 1].match &&
3278                     r->packet_hdr == prof->segs[prof->segs_cnt - 1].hdrs) {
3279                         ice_set_bit(vsi_handle, r->vsis);
3280                         return ICE_SUCCESS;
3281                 }
3282
3283         rss_cfg = (struct ice_rss_cfg *)ice_malloc(hw, sizeof(*rss_cfg));
3284         if (!rss_cfg)
3285                 return ICE_ERR_NO_MEMORY;
3286
3287         rss_cfg->hashed_flds = prof->segs[prof->segs_cnt - 1].match;
3288         rss_cfg->packet_hdr = prof->segs[prof->segs_cnt - 1].hdrs;
3289         rss_cfg->symm = prof->cfg.symm;
3290         ice_set_bit(vsi_handle, rss_cfg->vsis);
3291
3292         LIST_ADD_TAIL(&rss_cfg->l_entry, &hw->rss_list_head);
3293
3294         return ICE_SUCCESS;
3295 }
3296
3297 #define ICE_FLOW_PROF_HASH_S    0
3298 #define ICE_FLOW_PROF_HASH_M    (0xFFFFFFFFULL << ICE_FLOW_PROF_HASH_S)
3299 #define ICE_FLOW_PROF_HDR_S     32
3300 #define ICE_FLOW_PROF_HDR_M     (0x3FFFFFFFULL << ICE_FLOW_PROF_HDR_S)
3301 #define ICE_FLOW_PROF_ENCAP_S   63
3302 #define ICE_FLOW_PROF_ENCAP_M   (BIT_ULL(ICE_FLOW_PROF_ENCAP_S))
3303
3304 #define ICE_RSS_OUTER_HEADERS   1
3305 #define ICE_RSS_INNER_HEADERS   2
3306
3307 /* Flow profile ID format:
3308  * [0:31] - Packet match fields
3309  * [32:62] - Protocol header
3310  * [63] - Encapsulation flag, 0 if non-tunneled, 1 if tunneled
3311  */
3312 #define ICE_FLOW_GEN_PROFID(hash, hdr, segs_cnt) \
3313         (u64)(((u64)(hash) & ICE_FLOW_PROF_HASH_M) | \
3314               (((u64)(hdr) << ICE_FLOW_PROF_HDR_S) & ICE_FLOW_PROF_HDR_M) | \
3315               ((u8)((segs_cnt) - 1) ? ICE_FLOW_PROF_ENCAP_M : 0))
3316
3317 static void
3318 ice_rss_config_xor_word(struct ice_hw *hw, u8 prof_id, u8 src, u8 dst)
3319 {
3320         u32 s = ((src % 4) << 3); /* byte shift */
3321         u32 v = dst | 0x80; /* value to program */
3322         u8 i = src / 4; /* register index */
3323         u32 reg;
3324
3325         reg = rd32(hw, GLQF_HSYMM(prof_id, i));
3326         reg = (reg & ~(0xff << s)) | (v << s);
3327         wr32(hw, GLQF_HSYMM(prof_id, i), reg);
3328 }
3329
3330 static void
3331 ice_rss_config_xor(struct ice_hw *hw, u8 prof_id, u8 src, u8 dst, u8 len)
3332 {
3333         int fv_last_word =
3334                 ICE_FLOW_SW_FIELD_VECTOR_MAX / ICE_FLOW_FV_EXTRACT_SZ - 1;
3335         int i;
3336
3337         for (i = 0; i < len; i++) {
3338                 ice_rss_config_xor_word(hw, prof_id,
3339                                         /* Yes, field vector in GLQF_HSYMM and
3340                                          * GLQF_HINSET is inversed!
3341                                          */
3342                                         fv_last_word - (src + i),
3343                                         fv_last_word - (dst + i));
3344                 ice_rss_config_xor_word(hw, prof_id,
3345                                         fv_last_word - (dst + i),
3346                                         fv_last_word - (src + i));
3347         }
3348 }
3349
3350 static void
3351 ice_rss_update_symm(struct ice_hw *hw,
3352                     struct ice_flow_prof *prof)
3353 {
3354         struct ice_prof_map *map;
3355         u8 prof_id, m;
3356
3357         map = ice_search_prof_id(hw, ICE_BLK_RSS, prof->id);
3358         prof_id = map->prof_id;
3359
3360         /* clear to default */
3361         for (m = 0; m < 6; m++)
3362                 wr32(hw, GLQF_HSYMM(prof_id, m), 0);
3363         if (prof->cfg.symm) {
3364                 struct ice_flow_seg_info *seg =
3365                         &prof->segs[prof->segs_cnt - 1];
3366
3367                 struct ice_flow_seg_xtrct *ipv4_src =
3368                         &seg->fields[ICE_FLOW_FIELD_IDX_IPV4_SA].xtrct;
3369                 struct ice_flow_seg_xtrct *ipv4_dst =
3370                         &seg->fields[ICE_FLOW_FIELD_IDX_IPV4_DA].xtrct;
3371                 struct ice_flow_seg_xtrct *ipv6_src =
3372                         &seg->fields[ICE_FLOW_FIELD_IDX_IPV6_SA].xtrct;
3373                 struct ice_flow_seg_xtrct *ipv6_dst =
3374                         &seg->fields[ICE_FLOW_FIELD_IDX_IPV6_DA].xtrct;
3375
3376                 struct ice_flow_seg_xtrct *tcp_src =
3377                         &seg->fields[ICE_FLOW_FIELD_IDX_TCP_SRC_PORT].xtrct;
3378                 struct ice_flow_seg_xtrct *tcp_dst =
3379                         &seg->fields[ICE_FLOW_FIELD_IDX_TCP_DST_PORT].xtrct;
3380
3381                 struct ice_flow_seg_xtrct *udp_src =
3382                         &seg->fields[ICE_FLOW_FIELD_IDX_UDP_SRC_PORT].xtrct;
3383                 struct ice_flow_seg_xtrct *udp_dst =
3384                         &seg->fields[ICE_FLOW_FIELD_IDX_UDP_DST_PORT].xtrct;
3385
3386                 struct ice_flow_seg_xtrct *sctp_src =
3387                         &seg->fields[ICE_FLOW_FIELD_IDX_SCTP_SRC_PORT].xtrct;
3388                 struct ice_flow_seg_xtrct *sctp_dst =
3389                         &seg->fields[ICE_FLOW_FIELD_IDX_SCTP_DST_PORT].xtrct;
3390
3391                 /* xor IPv4 */
3392                 if (ipv4_src->prot_id != 0 && ipv4_dst->prot_id != 0)
3393                         ice_rss_config_xor(hw, prof_id,
3394                                            ipv4_src->idx, ipv4_dst->idx, 2);
3395
3396                 /* xor IPv6 */
3397                 if (ipv6_src->prot_id != 0 && ipv6_dst->prot_id != 0)
3398                         ice_rss_config_xor(hw, prof_id,
3399                                            ipv6_src->idx, ipv6_dst->idx, 8);
3400
3401                 /* xor TCP */
3402                 if (tcp_src->prot_id != 0 && tcp_dst->prot_id != 0)
3403                         ice_rss_config_xor(hw, prof_id,
3404                                            tcp_src->idx, tcp_dst->idx, 1);
3405
3406                 /* xor UDP */
3407                 if (udp_src->prot_id != 0 && udp_dst->prot_id != 0)
3408                         ice_rss_config_xor(hw, prof_id,
3409                                            udp_src->idx, udp_dst->idx, 1);
3410
3411                 /* xor SCTP */
3412                 if (sctp_src->prot_id != 0 && sctp_dst->prot_id != 0)
3413                         ice_rss_config_xor(hw, prof_id,
3414                                            sctp_src->idx, sctp_dst->idx, 1);
3415         }
3416 }
3417
3418 /**
3419  * ice_add_rss_cfg_sync - add an RSS configuration
3420  * @hw: pointer to the hardware structure
3421  * @vsi_handle: software VSI handle
3422  * @hashed_flds: hash bit fields (ICE_FLOW_HASH_*) to configure
3423  * @addl_hdrs: protocol header fields
3424  * @segs_cnt: packet segment count
3425  * @symm: symmetric hash enable/disable
3426  *
3427  * Assumption: lock has already been acquired for RSS list
3428  */
3429 static enum ice_status
3430 ice_add_rss_cfg_sync(struct ice_hw *hw, u16 vsi_handle, u64 hashed_flds,
3431                      u32 addl_hdrs, u8 segs_cnt, bool symm)
3432 {
3433         const enum ice_block blk = ICE_BLK_RSS;
3434         struct ice_flow_prof *prof = NULL;
3435         struct ice_flow_seg_info *segs;
3436         enum ice_status status;
3437
3438         if (!segs_cnt || segs_cnt > ICE_FLOW_SEG_MAX)
3439                 return ICE_ERR_PARAM;
3440
3441         segs = (struct ice_flow_seg_info *)ice_calloc(hw, segs_cnt,
3442                                                       sizeof(*segs));
3443         if (!segs)
3444                 return ICE_ERR_NO_MEMORY;
3445
3446         /* Construct the packet segment info from the hashed fields */
3447         status = ice_flow_set_rss_seg_info(&segs[segs_cnt - 1], hashed_flds,
3448                                            addl_hdrs);
3449         if (status)
3450                 goto exit;
3451
3452         /* Search for a flow profile that has matching headers, hash fields
3453          * and has the input VSI associated to it. If found, no further
3454          * operations required and exit.
3455          */
3456         prof = ice_flow_find_prof_conds(hw, blk, ICE_FLOW_RX, segs, segs_cnt,
3457                                         vsi_handle,
3458                                         ICE_FLOW_FIND_PROF_CHK_FLDS |
3459                                         ICE_FLOW_FIND_PROF_CHK_VSI);
3460         if (prof) {
3461                 if (prof->cfg.symm == symm)
3462                         goto exit;
3463                 prof->cfg.symm = symm;
3464                 goto update_symm;
3465         }
3466
3467         /* Check if a flow profile exists with the same protocol headers and
3468          * associated with the input VSI. If so disassociate the VSI from
3469          * this profile. The VSI will be added to a new profile created with
3470          * the protocol header and new hash field configuration.
3471          */
3472         prof = ice_flow_find_prof_conds(hw, blk, ICE_FLOW_RX, segs, segs_cnt,
3473                                         vsi_handle, ICE_FLOW_FIND_PROF_CHK_VSI);
3474         if (prof) {
3475                 status = ice_flow_disassoc_prof(hw, blk, prof, vsi_handle);
3476                 if (!status)
3477                         ice_rem_rss_list(hw, vsi_handle, prof);
3478                 else
3479                         goto exit;
3480
3481                 /* Remove profile if it has no VSIs associated */
3482                 if (!ice_is_any_bit_set(prof->vsis, ICE_MAX_VSI)) {
3483                         status = ice_flow_rem_prof(hw, blk, prof->id);
3484                         if (status)
3485                                 goto exit;
3486                 }
3487         }
3488
3489         /* Search for a profile that has same match fields only. If this
3490          * exists then associate the VSI to this profile.
3491          */
3492         prof = ice_flow_find_prof_conds(hw, blk, ICE_FLOW_RX, segs, segs_cnt,
3493                                         vsi_handle,
3494                                         ICE_FLOW_FIND_PROF_CHK_FLDS);
3495         if (prof) {
3496                 if (prof->cfg.symm == symm) {
3497                         status = ice_flow_assoc_prof(hw, blk, prof,
3498                                                      vsi_handle);
3499                         if (!status)
3500                                 status = ice_add_rss_list(hw, vsi_handle,
3501                                                           prof);
3502                 } else {
3503                         /* if a profile exist but with different symmetric
3504                          * requirement, just return error.
3505                          */
3506                         status = ICE_ERR_NOT_SUPPORTED;
3507                 }
3508                 goto exit;
3509         }
3510
3511         /* Create a new flow profile with generated profile and packet
3512          * segment information.
3513          */
3514         status = ice_flow_add_prof(hw, blk, ICE_FLOW_RX,
3515                                    ICE_FLOW_GEN_PROFID(hashed_flds,
3516                                                        segs[segs_cnt - 1].hdrs,
3517                                                        segs_cnt),
3518                                    segs, segs_cnt, NULL, 0, &prof);
3519         if (status)
3520                 goto exit;
3521
3522         status = ice_flow_assoc_prof(hw, blk, prof, vsi_handle);
3523         /* If association to a new flow profile failed then this profile can
3524          * be removed.
3525          */
3526         if (status) {
3527                 ice_flow_rem_prof(hw, blk, prof->id);
3528                 goto exit;
3529         }
3530
3531         status = ice_add_rss_list(hw, vsi_handle, prof);
3532
3533         prof->cfg.symm = symm;
3534
3535 update_symm:
3536         ice_rss_update_symm(hw, prof);
3537
3538 exit:
3539         ice_free(hw, segs);
3540         return status;
3541 }
3542
3543 /**
3544  * ice_add_rss_cfg - add an RSS configuration with specified hashed fields
3545  * @hw: pointer to the hardware structure
3546  * @vsi_handle: software VSI handle
3547  * @hashed_flds: hash bit fields (ICE_FLOW_HASH_*) to configure
3548  * @addl_hdrs: protocol header fields
3549  * @symm: symmetric hash enable/disable
3550  *
3551  * This function will generate a flow profile based on fields associated with
3552  * the input fields to hash on, the flow type and use the VSI number to add
3553  * a flow entry to the profile.
3554  */
3555 enum ice_status
3556 ice_add_rss_cfg(struct ice_hw *hw, u16 vsi_handle, u64 hashed_flds,
3557                 u32 addl_hdrs, bool symm)
3558 {
3559         enum ice_status status;
3560
3561         if (hashed_flds == ICE_HASH_INVALID ||
3562             !ice_is_vsi_valid(hw, vsi_handle))
3563                 return ICE_ERR_PARAM;
3564
3565         ice_acquire_lock(&hw->rss_locks);
3566         status = ice_add_rss_cfg_sync(hw, vsi_handle, hashed_flds, addl_hdrs,
3567                                       ICE_RSS_OUTER_HEADERS, symm);
3568         if (!status)
3569                 status = ice_add_rss_cfg_sync(hw, vsi_handle, hashed_flds,
3570                                               addl_hdrs, ICE_RSS_INNER_HEADERS,
3571                                               symm);
3572         ice_release_lock(&hw->rss_locks);
3573
3574         return status;
3575 }
3576
3577 /**
3578  * ice_rem_rss_cfg_sync - remove an existing RSS configuration
3579  * @hw: pointer to the hardware structure
3580  * @vsi_handle: software VSI handle
3581  * @hashed_flds: Packet hash types (ICE_FLOW_HASH_*) to remove
3582  * @addl_hdrs: Protocol header fields within a packet segment
3583  * @segs_cnt: packet segment count
3584  *
3585  * Assumption: lock has already been acquired for RSS list
3586  */
3587 static enum ice_status
3588 ice_rem_rss_cfg_sync(struct ice_hw *hw, u16 vsi_handle, u64 hashed_flds,
3589                      u32 addl_hdrs, u8 segs_cnt)
3590 {
3591         const enum ice_block blk = ICE_BLK_RSS;
3592         struct ice_flow_seg_info *segs;
3593         struct ice_flow_prof *prof;
3594         enum ice_status status;
3595
3596         segs = (struct ice_flow_seg_info *)ice_calloc(hw, segs_cnt,
3597                                                       sizeof(*segs));
3598         if (!segs)
3599                 return ICE_ERR_NO_MEMORY;
3600
3601         /* Construct the packet segment info from the hashed fields */
3602         status = ice_flow_set_rss_seg_info(&segs[segs_cnt - 1], hashed_flds,
3603                                            addl_hdrs);
3604         if (status)
3605                 goto out;
3606
3607         prof = ice_flow_find_prof_conds(hw, blk, ICE_FLOW_RX, segs, segs_cnt,
3608                                         vsi_handle,
3609                                         ICE_FLOW_FIND_PROF_CHK_FLDS);
3610         if (!prof) {
3611                 status = ICE_ERR_DOES_NOT_EXIST;
3612                 goto out;
3613         }
3614
3615         status = ice_flow_disassoc_prof(hw, blk, prof, vsi_handle);
3616         if (status)
3617                 goto out;
3618
3619         /* Remove RSS configuration from VSI context before deleting
3620          * the flow profile.
3621          */
3622         ice_rem_rss_list(hw, vsi_handle, prof);
3623
3624         if (!ice_is_any_bit_set(prof->vsis, ICE_MAX_VSI))
3625                 status = ice_flow_rem_prof(hw, blk, prof->id);
3626
3627 out:
3628         ice_free(hw, segs);
3629         return status;
3630 }
3631
3632 /**
3633  * ice_rem_rss_cfg - remove an existing RSS config with matching hashed fields
3634  * @hw: pointer to the hardware structure
3635  * @vsi_handle: software VSI handle
3636  * @hashed_flds: Packet hash types (ICE_FLOW_HASH_*) to remove
3637  * @addl_hdrs: Protocol header fields within a packet segment
3638  *
3639  * This function will lookup the flow profile based on the input
3640  * hash field bitmap, iterate through the profile entry list of
3641  * that profile and find entry associated with input VSI to be
3642  * removed. Calls are made to underlying flow apis which will in
3643  * turn build or update buffers for RSS XLT1 section.
3644  */
3645 enum ice_status
3646 ice_rem_rss_cfg(struct ice_hw *hw, u16 vsi_handle, u64 hashed_flds,
3647                 u32 addl_hdrs)
3648 {
3649         enum ice_status status;
3650
3651         if (hashed_flds == ICE_HASH_INVALID ||
3652             !ice_is_vsi_valid(hw, vsi_handle))
3653                 return ICE_ERR_PARAM;
3654
3655         ice_acquire_lock(&hw->rss_locks);
3656         status = ice_rem_rss_cfg_sync(hw, vsi_handle, hashed_flds, addl_hdrs,
3657                                       ICE_RSS_OUTER_HEADERS);
3658         if (!status)
3659                 status = ice_rem_rss_cfg_sync(hw, vsi_handle, hashed_flds,
3660                                               addl_hdrs, ICE_RSS_INNER_HEADERS);
3661         ice_release_lock(&hw->rss_locks);
3662
3663         return status;
3664 }
3665
3666 /**
3667  * ice_replay_rss_cfg - replay RSS configurations associated with VSI
3668  * @hw: pointer to the hardware structure
3669  * @vsi_handle: software VSI handle
3670  */
3671 enum ice_status ice_replay_rss_cfg(struct ice_hw *hw, u16 vsi_handle)
3672 {
3673         enum ice_status status = ICE_SUCCESS;
3674         struct ice_rss_cfg *r;
3675
3676         if (!ice_is_vsi_valid(hw, vsi_handle))
3677                 return ICE_ERR_PARAM;
3678
3679         ice_acquire_lock(&hw->rss_locks);
3680         LIST_FOR_EACH_ENTRY(r, &hw->rss_list_head,
3681                             ice_rss_cfg, l_entry) {
3682                 if (ice_is_bit_set(r->vsis, vsi_handle)) {
3683                         status = ice_add_rss_cfg_sync(hw, vsi_handle,
3684                                                       r->hashed_flds,
3685                                                       r->packet_hdr,
3686                                                       ICE_RSS_OUTER_HEADERS,
3687                                                       r->symm);
3688                         if (status)
3689                                 break;
3690                         status = ice_add_rss_cfg_sync(hw, vsi_handle,
3691                                                       r->hashed_flds,
3692                                                       r->packet_hdr,
3693                                                       ICE_RSS_INNER_HEADERS,
3694                                                       r->symm);
3695                         if (status)
3696                                 break;
3697                 }
3698         }
3699         ice_release_lock(&hw->rss_locks);
3700
3701         return status;
3702 }
3703
3704 /**
3705  * ice_get_rss_cfg - returns hashed fields for the given header types
3706  * @hw: pointer to the hardware structure
3707  * @vsi_handle: software VSI handle
3708  * @hdrs: protocol header type
3709  *
3710  * This function will return the match fields of the first instance of flow
3711  * profile having the given header types and containing input VSI
3712  */
3713 u64 ice_get_rss_cfg(struct ice_hw *hw, u16 vsi_handle, u32 hdrs)
3714 {
3715         struct ice_rss_cfg *r, *rss_cfg = NULL;
3716
3717         /* verify if the protocol header is non zero and VSI is valid */
3718         if (hdrs == ICE_FLOW_SEG_HDR_NONE || !ice_is_vsi_valid(hw, vsi_handle))
3719                 return ICE_HASH_INVALID;
3720
3721         ice_acquire_lock(&hw->rss_locks);
3722         LIST_FOR_EACH_ENTRY(r, &hw->rss_list_head,
3723                             ice_rss_cfg, l_entry)
3724                 if (ice_is_bit_set(r->vsis, vsi_handle) &&
3725                     r->packet_hdr == hdrs) {
3726                         rss_cfg = r;
3727                         break;
3728                 }
3729         ice_release_lock(&hw->rss_locks);
3730
3731         return rss_cfg ? rss_cfg->hashed_flds : ICE_HASH_INVALID;
3732 }