030a55ba7d8332e3e39209edaded7be7a76544e9
[dpdk.git] / drivers / net / ice / base / ice_flow.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2001-2020 Intel Corporation
3  */
4
5 #include "ice_common.h"
6 #include "ice_flow.h"
7
8 /* Size of known protocol header fields */
9 #define ICE_FLOW_FLD_SZ_ETH_TYPE        2
10 #define ICE_FLOW_FLD_SZ_VLAN            2
11 #define ICE_FLOW_FLD_SZ_IPV4_ADDR       4
12 #define ICE_FLOW_FLD_SZ_IPV6_ADDR       16
13 #define ICE_FLOW_FLD_SZ_IPV6_PRE32_ADDR 4
14 #define ICE_FLOW_FLD_SZ_IPV6_PRE48_ADDR 6
15 #define ICE_FLOW_FLD_SZ_IPV6_PRE64_ADDR 8
16 #define ICE_FLOW_FLD_SZ_IP_DSCP         1
17 #define ICE_FLOW_FLD_SZ_IP_TTL          1
18 #define ICE_FLOW_FLD_SZ_IP_PROT         1
19 #define ICE_FLOW_FLD_SZ_PORT            2
20 #define ICE_FLOW_FLD_SZ_TCP_FLAGS       1
21 #define ICE_FLOW_FLD_SZ_ICMP_TYPE       1
22 #define ICE_FLOW_FLD_SZ_ICMP_CODE       1
23 #define ICE_FLOW_FLD_SZ_ARP_OPER        2
24 #define ICE_FLOW_FLD_SZ_GRE_KEYID       4
25 #define ICE_FLOW_FLD_SZ_GTP_TEID        4
26 #define ICE_FLOW_FLD_SZ_GTP_QFI         2
27 #define ICE_FLOW_FLD_SZ_PPPOE_SESS_ID   2
28 #define ICE_FLOW_FLD_SZ_PFCP_SEID 8
29 #define ICE_FLOW_FLD_SZ_L2TPV3_SESS_ID  4
30 #define ICE_FLOW_FLD_SZ_ESP_SPI 4
31 #define ICE_FLOW_FLD_SZ_AH_SPI  4
32 #define ICE_FLOW_FLD_SZ_NAT_T_ESP_SPI   4
33
34 /* Describe properties of a protocol header field */
35 struct ice_flow_field_info {
36         enum ice_flow_seg_hdr hdr;
37         s16 off;        /* Offset from start of a protocol header, in bits */
38         u16 size;       /* Size of fields in bits */
39         u16 mask;       /* 16-bit mask for field */
40 };
41
42 #define ICE_FLOW_FLD_INFO(_hdr, _offset_bytes, _size_bytes) { \
43         .hdr = _hdr, \
44         .off = (_offset_bytes) * BITS_PER_BYTE, \
45         .size = (_size_bytes) * BITS_PER_BYTE, \
46         .mask = 0, \
47 }
48
49 #define ICE_FLOW_FLD_INFO_MSK(_hdr, _offset_bytes, _size_bytes, _mask) { \
50         .hdr = _hdr, \
51         .off = (_offset_bytes) * BITS_PER_BYTE, \
52         .size = (_size_bytes) * BITS_PER_BYTE, \
53         .mask = _mask, \
54 }
55
56 /* Table containing properties of supported protocol header fields */
57 static const
58 struct ice_flow_field_info ice_flds_info[ICE_FLOW_FIELD_IDX_MAX] = {
59         /* Ether */
60         /* ICE_FLOW_FIELD_IDX_ETH_DA */
61         ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_ETH, 0, ETH_ALEN),
62         /* ICE_FLOW_FIELD_IDX_ETH_SA */
63         ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_ETH, ETH_ALEN, ETH_ALEN),
64         /* ICE_FLOW_FIELD_IDX_S_VLAN */
65         ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_VLAN, 12, ICE_FLOW_FLD_SZ_VLAN),
66         /* ICE_FLOW_FIELD_IDX_C_VLAN */
67         ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_VLAN, 14, ICE_FLOW_FLD_SZ_VLAN),
68         /* ICE_FLOW_FIELD_IDX_ETH_TYPE */
69         ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_ETH, 0, ICE_FLOW_FLD_SZ_ETH_TYPE),
70         /* IPv4 / IPv6 */
71         /* ICE_FLOW_FIELD_IDX_IPV4_DSCP */
72         ICE_FLOW_FLD_INFO_MSK(ICE_FLOW_SEG_HDR_IPV4, 0, ICE_FLOW_FLD_SZ_IP_DSCP,
73                               0x00fc),
74         /* ICE_FLOW_FIELD_IDX_IPV6_DSCP */
75         ICE_FLOW_FLD_INFO_MSK(ICE_FLOW_SEG_HDR_IPV6, 0, ICE_FLOW_FLD_SZ_IP_DSCP,
76                               0x0ff0),
77         /* ICE_FLOW_FIELD_IDX_IPV4_TTL */
78         ICE_FLOW_FLD_INFO_MSK(ICE_FLOW_SEG_HDR_NONE, 8,
79                               ICE_FLOW_FLD_SZ_IP_TTL, 0xff00),
80         /* ICE_FLOW_FIELD_IDX_IPV4_PROT */
81         ICE_FLOW_FLD_INFO_MSK(ICE_FLOW_SEG_HDR_NONE, 8,
82                               ICE_FLOW_FLD_SZ_IP_PROT, 0x00ff),
83         /* ICE_FLOW_FIELD_IDX_IPV6_TTL */
84         ICE_FLOW_FLD_INFO_MSK(ICE_FLOW_SEG_HDR_NONE, 6,
85                               ICE_FLOW_FLD_SZ_IP_TTL, 0x00ff),
86         /* ICE_FLOW_FIELD_IDX_IPV6_PROT */
87         ICE_FLOW_FLD_INFO_MSK(ICE_FLOW_SEG_HDR_NONE, 6,
88                               ICE_FLOW_FLD_SZ_IP_PROT, 0xff00),
89         /* ICE_FLOW_FIELD_IDX_IPV4_SA */
90         ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_IPV4, 12, ICE_FLOW_FLD_SZ_IPV4_ADDR),
91         /* ICE_FLOW_FIELD_IDX_IPV4_DA */
92         ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_IPV4, 16, ICE_FLOW_FLD_SZ_IPV4_ADDR),
93         /* ICE_FLOW_FIELD_IDX_IPV6_SA */
94         ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_IPV6, 8, ICE_FLOW_FLD_SZ_IPV6_ADDR),
95         /* ICE_FLOW_FIELD_IDX_IPV6_DA */
96         ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_IPV6, 24, ICE_FLOW_FLD_SZ_IPV6_ADDR),
97         /* ICE_FLOW_FIELD_IDX_IPV6_PRE32_SA */
98         ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_IPV6, 8,
99                           ICE_FLOW_FLD_SZ_IPV6_PRE32_ADDR),
100         /* ICE_FLOW_FIELD_IDX_IPV6_PRE32_DA */
101         ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_IPV6, 24,
102                           ICE_FLOW_FLD_SZ_IPV6_PRE32_ADDR),
103         /* ICE_FLOW_FIELD_IDX_IPV6_PRE48_SA */
104         ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_IPV6, 8,
105                           ICE_FLOW_FLD_SZ_IPV6_PRE48_ADDR),
106         /* ICE_FLOW_FIELD_IDX_IPV6_PRE48_DA */
107         ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_IPV6, 24,
108                           ICE_FLOW_FLD_SZ_IPV6_PRE48_ADDR),
109         /* ICE_FLOW_FIELD_IDX_IPV6_PRE64_SA */
110         ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_IPV6, 8,
111                           ICE_FLOW_FLD_SZ_IPV6_PRE64_ADDR),
112         /* ICE_FLOW_FIELD_IDX_IPV6_PRE64_DA */
113         ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_IPV6, 24,
114                           ICE_FLOW_FLD_SZ_IPV6_PRE64_ADDR),
115         /* Transport */
116         /* ICE_FLOW_FIELD_IDX_TCP_SRC_PORT */
117         ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_TCP, 0, ICE_FLOW_FLD_SZ_PORT),
118         /* ICE_FLOW_FIELD_IDX_TCP_DST_PORT */
119         ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_TCP, 2, ICE_FLOW_FLD_SZ_PORT),
120         /* ICE_FLOW_FIELD_IDX_UDP_SRC_PORT */
121         ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_UDP, 0, ICE_FLOW_FLD_SZ_PORT),
122         /* ICE_FLOW_FIELD_IDX_UDP_DST_PORT */
123         ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_UDP, 2, ICE_FLOW_FLD_SZ_PORT),
124         /* ICE_FLOW_FIELD_IDX_SCTP_SRC_PORT */
125         ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_SCTP, 0, ICE_FLOW_FLD_SZ_PORT),
126         /* ICE_FLOW_FIELD_IDX_SCTP_DST_PORT */
127         ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_SCTP, 2, ICE_FLOW_FLD_SZ_PORT),
128         /* ICE_FLOW_FIELD_IDX_TCP_FLAGS */
129         ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_TCP, 13, ICE_FLOW_FLD_SZ_TCP_FLAGS),
130         /* ARP */
131         /* ICE_FLOW_FIELD_IDX_ARP_SIP */
132         ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_ARP, 14, ICE_FLOW_FLD_SZ_IPV4_ADDR),
133         /* ICE_FLOW_FIELD_IDX_ARP_DIP */
134         ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_ARP, 24, ICE_FLOW_FLD_SZ_IPV4_ADDR),
135         /* ICE_FLOW_FIELD_IDX_ARP_SHA */
136         ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_ARP, 8, ETH_ALEN),
137         /* ICE_FLOW_FIELD_IDX_ARP_DHA */
138         ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_ARP, 18, ETH_ALEN),
139         /* ICE_FLOW_FIELD_IDX_ARP_OP */
140         ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_ARP, 6, ICE_FLOW_FLD_SZ_ARP_OPER),
141         /* ICMP */
142         /* ICE_FLOW_FIELD_IDX_ICMP_TYPE */
143         ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_ICMP, 0, ICE_FLOW_FLD_SZ_ICMP_TYPE),
144         /* ICE_FLOW_FIELD_IDX_ICMP_CODE */
145         ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_ICMP, 1, ICE_FLOW_FLD_SZ_ICMP_CODE),
146         /* GRE */
147         /* ICE_FLOW_FIELD_IDX_GRE_KEYID */
148         ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_GRE, 12, ICE_FLOW_FLD_SZ_GRE_KEYID),
149         /* GTP */
150         /* ICE_FLOW_FIELD_IDX_GTPC_TEID */
151         ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_GTPC_TEID, 12,
152                           ICE_FLOW_FLD_SZ_GTP_TEID),
153         /* ICE_FLOW_FIELD_IDX_GTPU_IP_TEID */
154         ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_GTPU_IP, 12,
155                           ICE_FLOW_FLD_SZ_GTP_TEID),
156         /* ICE_FLOW_FIELD_IDX_GTPU_EH_TEID */
157         ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_GTPU_EH, 12,
158                           ICE_FLOW_FLD_SZ_GTP_TEID),
159         /* ICE_FLOW_FIELD_IDX_GTPU_EH_QFI */
160         ICE_FLOW_FLD_INFO_MSK(ICE_FLOW_SEG_HDR_GTPU_EH, 22,
161                               ICE_FLOW_FLD_SZ_GTP_QFI, 0x3f00),
162         /* ICE_FLOW_FIELD_IDX_GTPU_UP_TEID */
163         ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_GTPU_UP, 12,
164                           ICE_FLOW_FLD_SZ_GTP_TEID),
165         /* ICE_FLOW_FIELD_IDX_GTPU_DWN_TEID */
166         ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_GTPU_DWN, 12,
167                           ICE_FLOW_FLD_SZ_GTP_TEID),
168         /* PPPOE */
169         /* ICE_FLOW_FIELD_IDX_PPPOE_SESS_ID */
170         ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_PPPOE, 2,
171                           ICE_FLOW_FLD_SZ_PPPOE_SESS_ID),
172         /* PFCP */
173         /* ICE_FLOW_FIELD_IDX_PFCP_SEID */
174         ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_PFCP_SESSION, 12,
175                           ICE_FLOW_FLD_SZ_PFCP_SEID),
176         /* L2TPV3 */
177         /* ICE_FLOW_FIELD_IDX_L2TPV3_SESS_ID */
178         ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_L2TPV3, 0,
179                           ICE_FLOW_FLD_SZ_L2TPV3_SESS_ID),
180         /* ESP */
181         /* ICE_FLOW_FIELD_IDX_ESP_SPI */
182         ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_ESP, 0,
183                           ICE_FLOW_FLD_SZ_ESP_SPI),
184         /* AH */
185         /* ICE_FLOW_FIELD_IDX_AH_SPI */
186         ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_AH, 4,
187                           ICE_FLOW_FLD_SZ_AH_SPI),
188         /* NAT_T_ESP */
189         /* ICE_FLOW_FIELD_IDX_NAT_T_ESP_SPI */
190         ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_NAT_T_ESP, 8,
191                           ICE_FLOW_FLD_SZ_NAT_T_ESP_SPI),
192 };
193
194 /* Bitmaps indicating relevant packet types for a particular protocol header
195  *
196  * Packet types for packets with an Outer/First/Single MAC header
197  */
198 static const u32 ice_ptypes_mac_ofos[] = {
199         0xFDC00846, 0xBFBF7F7E, 0xF70001DF, 0xFEFDFDFB,
200         0x0000077E, 0x00000000, 0x00000000, 0x00000000,
201         0x00000000, 0x03FFF000, 0x7FFFFFE0, 0x00000000,
202         0x00000000, 0x00000000, 0x00000000, 0x00000000,
203         0x00000000, 0x00000000, 0x00000000, 0x00000000,
204         0x00000000, 0x00000000, 0x00000000, 0x00000000,
205         0x00000000, 0x00000000, 0x00000000, 0x00000000,
206         0x00000000, 0x00000000, 0x00000000, 0x00000000,
207 };
208
209 /* Packet types for packets with an Innermost/Last MAC VLAN header */
210 static const u32 ice_ptypes_macvlan_il[] = {
211         0x00000000, 0xBC000000, 0x000001DF, 0xF0000000,
212         0x0000077E, 0x00000000, 0x00000000, 0x00000000,
213         0x00000000, 0x00000000, 0x00000000, 0x00000000,
214         0x00000000, 0x00000000, 0x00000000, 0x00000000,
215         0x00000000, 0x00000000, 0x00000000, 0x00000000,
216         0x00000000, 0x00000000, 0x00000000, 0x00000000,
217         0x00000000, 0x00000000, 0x00000000, 0x00000000,
218         0x00000000, 0x00000000, 0x00000000, 0x00000000,
219 };
220
221 /* Packet types for packets with an Outer/First/Single IPv4 header */
222 static const u32 ice_ptypes_ipv4_ofos[] = {
223         0x1DC00000, 0x04000800, 0x00000000, 0x00000000,
224         0x00000000, 0x00000155, 0x00000000, 0x00000000,
225         0x0003000F, 0x000FC000, 0x83E0F800, 0x00000101,
226         0x00000000, 0x00000000, 0x00000000, 0x00000000,
227         0x00000000, 0x00000000, 0x00000000, 0x00000000,
228         0x00000000, 0x00000000, 0x00000000, 0x00000000,
229         0x00000000, 0x00000000, 0x00000000, 0x00000000,
230         0x00000000, 0x00000000, 0x00000000, 0x00000000,
231 };
232
233 /* Packet types for packets with an Innermost/Last IPv4 header */
234 static const u32 ice_ptypes_ipv4_il[] = {
235         0xE0000000, 0xB807700E, 0x80000003, 0xE01DC03B,
236         0x0000000E, 0x00000000, 0x00000000, 0x00000000,
237         0x00000000, 0x00000000, 0x001FF800, 0x00000000,
238         0x00000000, 0x00000000, 0x00000000, 0x00000000,
239         0x00000000, 0x00000000, 0x00000000, 0x00000000,
240         0x00000000, 0x00000000, 0x00000000, 0x00000000,
241         0x00000000, 0x00000000, 0x00000000, 0x00000000,
242         0x00000000, 0x00000000, 0x00000000, 0x00000000,
243 };
244
245 /* Packet types for packets with an Outer/First/Single IPv6 header */
246 static const u32 ice_ptypes_ipv6_ofos[] = {
247         0x00000000, 0x00000000, 0x77000000, 0x10002000,
248         0x00000000, 0x000002AA, 0x00000000, 0x00000000,
249         0x00080F00, 0x03F00000, 0x7C1F0000, 0x00000206,
250         0x00000000, 0x00000000, 0x00000000, 0x00000000,
251         0x00000000, 0x00000000, 0x00000000, 0x00000000,
252         0x00000000, 0x00000000, 0x00000000, 0x00000000,
253         0x00000000, 0x00000000, 0x00000000, 0x00000000,
254         0x00000000, 0x00000000, 0x00000000, 0x00000000,
255 };
256
257 /* Packet types for packets with an Innermost/Last IPv6 header */
258 static const u32 ice_ptypes_ipv6_il[] = {
259         0x00000000, 0x03B80770, 0x000001DC, 0x0EE00000,
260         0x00000770, 0x00000000, 0x00000000, 0x00000000,
261         0x00000000, 0x00000000, 0x7FE00000, 0x00000000,
262         0x00000000, 0x00000000, 0x00000000, 0x00000000,
263         0x00000000, 0x00000000, 0x00000000, 0x00000000,
264         0x00000000, 0x00000000, 0x00000000, 0x00000000,
265         0x00000000, 0x00000000, 0x00000000, 0x00000000,
266         0x00000000, 0x00000000, 0x00000000, 0x00000000,
267 };
268
269 /* Packet types for packets with an Outermost/First ARP header */
270 static const u32 ice_ptypes_arp_of[] = {
271         0x00000800, 0x00000000, 0x00000000, 0x00000000,
272         0x00000000, 0x00000000, 0x00000000, 0x00000000,
273         0x00000000, 0x00000000, 0x00000000, 0x00000000,
274         0x00000000, 0x00000000, 0x00000000, 0x00000000,
275         0x00000000, 0x00000000, 0x00000000, 0x00000000,
276         0x00000000, 0x00000000, 0x00000000, 0x00000000,
277         0x00000000, 0x00000000, 0x00000000, 0x00000000,
278         0x00000000, 0x00000000, 0x00000000, 0x00000000,
279 };
280
281 /* UDP Packet types for non-tunneled packets or tunneled
282  * packets with inner UDP.
283  */
284 static const u32 ice_ptypes_udp_il[] = {
285         0x81000000, 0x20204040, 0x04000010, 0x80810102,
286         0x00000040, 0x00000000, 0x00000000, 0x00000000,
287         0x00000000, 0x00410000, 0x90842000, 0x00000007,
288         0x00000000, 0x00000000, 0x00000000, 0x00000000,
289         0x00000000, 0x00000000, 0x00000000, 0x00000000,
290         0x00000000, 0x00000000, 0x00000000, 0x00000000,
291         0x00000000, 0x00000000, 0x00000000, 0x00000000,
292         0x00000000, 0x00000000, 0x00000000, 0x00000000,
293 };
294
295 /* Packet types for packets with an Innermost/Last TCP header */
296 static const u32 ice_ptypes_tcp_il[] = {
297         0x04000000, 0x80810102, 0x10000040, 0x02040408,
298         0x00000102, 0x00000000, 0x00000000, 0x00000000,
299         0x00000000, 0x00820000, 0x21084000, 0x00000000,
300         0x00000000, 0x00000000, 0x00000000, 0x00000000,
301         0x00000000, 0x00000000, 0x00000000, 0x00000000,
302         0x00000000, 0x00000000, 0x00000000, 0x00000000,
303         0x00000000, 0x00000000, 0x00000000, 0x00000000,
304         0x00000000, 0x00000000, 0x00000000, 0x00000000,
305 };
306
307 /* Packet types for packets with an Innermost/Last SCTP header */
308 static const u32 ice_ptypes_sctp_il[] = {
309         0x08000000, 0x01020204, 0x20000081, 0x04080810,
310         0x00000204, 0x00000000, 0x00000000, 0x00000000,
311         0x00000000, 0x01040000, 0x00000000, 0x00000000,
312         0x00000000, 0x00000000, 0x00000000, 0x00000000,
313         0x00000000, 0x00000000, 0x00000000, 0x00000000,
314         0x00000000, 0x00000000, 0x00000000, 0x00000000,
315         0x00000000, 0x00000000, 0x00000000, 0x00000000,
316         0x00000000, 0x00000000, 0x00000000, 0x00000000,
317 };
318
319 /* Packet types for packets with an Outermost/First ICMP header */
320 static const u32 ice_ptypes_icmp_of[] = {
321         0x10000000, 0x00000000, 0x00000000, 0x00000000,
322         0x00000000, 0x00000000, 0x00000000, 0x00000000,
323         0x00000000, 0x00000000, 0x00000000, 0x00000000,
324         0x00000000, 0x00000000, 0x00000000, 0x00000000,
325         0x00000000, 0x00000000, 0x00000000, 0x00000000,
326         0x00000000, 0x00000000, 0x00000000, 0x00000000,
327         0x00000000, 0x00000000, 0x00000000, 0x00000000,
328         0x00000000, 0x00000000, 0x00000000, 0x00000000,
329 };
330
331 /* Packet types for packets with an Innermost/Last ICMP header */
332 static const u32 ice_ptypes_icmp_il[] = {
333         0x00000000, 0x02040408, 0x40000102, 0x08101020,
334         0x00000408, 0x00000000, 0x00000000, 0x00000000,
335         0x00000000, 0x00000000, 0x42108000, 0x00000000,
336         0x00000000, 0x00000000, 0x00000000, 0x00000000,
337         0x00000000, 0x00000000, 0x00000000, 0x00000000,
338         0x00000000, 0x00000000, 0x00000000, 0x00000000,
339         0x00000000, 0x00000000, 0x00000000, 0x00000000,
340         0x00000000, 0x00000000, 0x00000000, 0x00000000,
341 };
342
343 /* Packet types for packets with an Outermost/First GRE header */
344 static const u32 ice_ptypes_gre_of[] = {
345         0x00000000, 0xBFBF7800, 0x000001DF, 0xFEFDE000,
346         0x0000017E, 0x00000000, 0x00000000, 0x00000000,
347         0x00000000, 0x00000000, 0x00000000, 0x00000000,
348         0x00000000, 0x00000000, 0x00000000, 0x00000000,
349         0x00000000, 0x00000000, 0x00000000, 0x00000000,
350         0x00000000, 0x00000000, 0x00000000, 0x00000000,
351         0x00000000, 0x00000000, 0x00000000, 0x00000000,
352         0x00000000, 0x00000000, 0x00000000, 0x00000000,
353 };
354
355 /* Packet types for packets with an Innermost/Last MAC header */
356 static const u32 ice_ptypes_mac_il[] = {
357         0x00000000, 0x00000000, 0x00000000, 0x00000000,
358         0x00000000, 0x00000000, 0x00000000, 0x00000000,
359         0x00000000, 0x00000000, 0x00000000, 0x00000000,
360         0x00000000, 0x00000000, 0x00000000, 0x00000000,
361         0x00000000, 0x00000000, 0x00000000, 0x00000000,
362         0x00000000, 0x00000000, 0x00000000, 0x00000000,
363         0x00000000, 0x00000000, 0x00000000, 0x00000000,
364         0x00000000, 0x00000000, 0x00000000, 0x00000000,
365 };
366
367 /* Packet types for GTPC */
368 static const u32 ice_ptypes_gtpc[] = {
369         0x00000000, 0x00000000, 0x00000000, 0x00000000,
370         0x00000000, 0x00000000, 0x00000000, 0x00000000,
371         0x00000000, 0x00000000, 0x00000180, 0x00000000,
372         0x00000000, 0x00000000, 0x00000000, 0x00000000,
373         0x00000000, 0x00000000, 0x00000000, 0x00000000,
374         0x00000000, 0x00000000, 0x00000000, 0x00000000,
375         0x00000000, 0x00000000, 0x00000000, 0x00000000,
376         0x00000000, 0x00000000, 0x00000000, 0x00000000,
377 };
378
379 /* Packet types for GTPC with TEID */
380 static const u32 ice_ptypes_gtpc_tid[] = {
381         0x00000000, 0x00000000, 0x00000000, 0x00000000,
382         0x00000000, 0x00000000, 0x00000000, 0x00000000,
383         0x00000000, 0x00000000, 0x00000060, 0x00000000,
384         0x00000000, 0x00000000, 0x00000000, 0x00000000,
385         0x00000000, 0x00000000, 0x00000000, 0x00000000,
386         0x00000000, 0x00000000, 0x00000000, 0x00000000,
387         0x00000000, 0x00000000, 0x00000000, 0x00000000,
388         0x00000000, 0x00000000, 0x00000000, 0x00000000,
389 };
390
391 /* Packet types for GTPU */
392 static const struct ice_ptype_attributes ice_attr_gtpu_eh[] = {
393         { ICE_MAC_IPV4_GTPU_IPV4_FRAG,    ICE_PTYPE_ATTR_GTP_PDU_EH },
394         { ICE_MAC_IPV4_GTPU_IPV4_PAY,     ICE_PTYPE_ATTR_GTP_PDU_EH },
395         { ICE_MAC_IPV4_GTPU_IPV4_UDP_PAY, ICE_PTYPE_ATTR_GTP_PDU_EH },
396         { ICE_MAC_IPV4_GTPU_IPV4_TCP,     ICE_PTYPE_ATTR_GTP_PDU_EH },
397         { ICE_MAC_IPV4_GTPU_IPV4_ICMP,    ICE_PTYPE_ATTR_GTP_PDU_EH },
398         { ICE_MAC_IPV6_GTPU_IPV4_FRAG,    ICE_PTYPE_ATTR_GTP_PDU_EH },
399         { ICE_MAC_IPV6_GTPU_IPV4_PAY,     ICE_PTYPE_ATTR_GTP_PDU_EH },
400         { ICE_MAC_IPV6_GTPU_IPV4_UDP_PAY, ICE_PTYPE_ATTR_GTP_PDU_EH },
401         { ICE_MAC_IPV6_GTPU_IPV4_TCP,     ICE_PTYPE_ATTR_GTP_PDU_EH },
402         { ICE_MAC_IPV6_GTPU_IPV4_ICMP,    ICE_PTYPE_ATTR_GTP_PDU_EH },
403         { ICE_MAC_IPV4_GTPU_IPV6_FRAG,    ICE_PTYPE_ATTR_GTP_PDU_EH },
404         { ICE_MAC_IPV4_GTPU_IPV6_PAY,     ICE_PTYPE_ATTR_GTP_PDU_EH },
405         { ICE_MAC_IPV4_GTPU_IPV6_UDP_PAY, ICE_PTYPE_ATTR_GTP_PDU_EH },
406         { ICE_MAC_IPV4_GTPU_IPV6_TCP,     ICE_PTYPE_ATTR_GTP_PDU_EH },
407         { ICE_MAC_IPV4_GTPU_IPV6_ICMPV6,  ICE_PTYPE_ATTR_GTP_PDU_EH },
408         { ICE_MAC_IPV6_GTPU_IPV6_FRAG,    ICE_PTYPE_ATTR_GTP_PDU_EH },
409         { ICE_MAC_IPV6_GTPU_IPV6_PAY,     ICE_PTYPE_ATTR_GTP_PDU_EH },
410         { ICE_MAC_IPV6_GTPU_IPV6_UDP_PAY, ICE_PTYPE_ATTR_GTP_PDU_EH },
411         { ICE_MAC_IPV6_GTPU_IPV6_TCP,     ICE_PTYPE_ATTR_GTP_PDU_EH },
412         { ICE_MAC_IPV6_GTPU_IPV6_ICMPV6,  ICE_PTYPE_ATTR_GTP_PDU_EH },
413 };
414
415 static const struct ice_ptype_attributes ice_attr_gtpu_down[] = {
416         { ICE_MAC_IPV4_GTPU_IPV4_FRAG,    ICE_PTYPE_ATTR_GTP_DOWNLINK },
417         { ICE_MAC_IPV4_GTPU_IPV4_PAY,     ICE_PTYPE_ATTR_GTP_DOWNLINK },
418         { ICE_MAC_IPV4_GTPU_IPV4_UDP_PAY, ICE_PTYPE_ATTR_GTP_DOWNLINK },
419         { ICE_MAC_IPV4_GTPU_IPV4_TCP,     ICE_PTYPE_ATTR_GTP_DOWNLINK },
420         { ICE_MAC_IPV4_GTPU_IPV4_ICMP,    ICE_PTYPE_ATTR_GTP_DOWNLINK },
421         { ICE_MAC_IPV6_GTPU_IPV4_FRAG,    ICE_PTYPE_ATTR_GTP_DOWNLINK },
422         { ICE_MAC_IPV6_GTPU_IPV4_PAY,     ICE_PTYPE_ATTR_GTP_DOWNLINK },
423         { ICE_MAC_IPV6_GTPU_IPV4_UDP_PAY, ICE_PTYPE_ATTR_GTP_DOWNLINK },
424         { ICE_MAC_IPV6_GTPU_IPV4_TCP,     ICE_PTYPE_ATTR_GTP_DOWNLINK },
425         { ICE_MAC_IPV6_GTPU_IPV4_ICMP,    ICE_PTYPE_ATTR_GTP_DOWNLINK },
426         { ICE_MAC_IPV4_GTPU_IPV6_FRAG,    ICE_PTYPE_ATTR_GTP_DOWNLINK },
427         { ICE_MAC_IPV4_GTPU_IPV6_PAY,     ICE_PTYPE_ATTR_GTP_DOWNLINK },
428         { ICE_MAC_IPV4_GTPU_IPV6_UDP_PAY, ICE_PTYPE_ATTR_GTP_DOWNLINK },
429         { ICE_MAC_IPV4_GTPU_IPV6_TCP,     ICE_PTYPE_ATTR_GTP_DOWNLINK },
430         { ICE_MAC_IPV4_GTPU_IPV6_ICMPV6,  ICE_PTYPE_ATTR_GTP_DOWNLINK },
431         { ICE_MAC_IPV6_GTPU_IPV6_FRAG,    ICE_PTYPE_ATTR_GTP_DOWNLINK },
432         { ICE_MAC_IPV6_GTPU_IPV6_PAY,     ICE_PTYPE_ATTR_GTP_DOWNLINK },
433         { ICE_MAC_IPV6_GTPU_IPV6_UDP_PAY, ICE_PTYPE_ATTR_GTP_DOWNLINK },
434         { ICE_MAC_IPV6_GTPU_IPV6_TCP,     ICE_PTYPE_ATTR_GTP_DOWNLINK },
435         { ICE_MAC_IPV6_GTPU_IPV6_ICMPV6,  ICE_PTYPE_ATTR_GTP_DOWNLINK },
436 };
437
438 static const struct ice_ptype_attributes ice_attr_gtpu_up[] = {
439         { ICE_MAC_IPV4_GTPU_IPV4_FRAG,    ICE_PTYPE_ATTR_GTP_UPLINK },
440         { ICE_MAC_IPV4_GTPU_IPV4_PAY,     ICE_PTYPE_ATTR_GTP_UPLINK },
441         { ICE_MAC_IPV4_GTPU_IPV4_UDP_PAY, ICE_PTYPE_ATTR_GTP_UPLINK },
442         { ICE_MAC_IPV4_GTPU_IPV4_TCP,     ICE_PTYPE_ATTR_GTP_UPLINK },
443         { ICE_MAC_IPV4_GTPU_IPV4_ICMP,    ICE_PTYPE_ATTR_GTP_UPLINK },
444         { ICE_MAC_IPV6_GTPU_IPV4_FRAG,    ICE_PTYPE_ATTR_GTP_UPLINK },
445         { ICE_MAC_IPV6_GTPU_IPV4_PAY,     ICE_PTYPE_ATTR_GTP_UPLINK },
446         { ICE_MAC_IPV6_GTPU_IPV4_UDP_PAY, ICE_PTYPE_ATTR_GTP_UPLINK },
447         { ICE_MAC_IPV6_GTPU_IPV4_TCP,     ICE_PTYPE_ATTR_GTP_UPLINK },
448         { ICE_MAC_IPV6_GTPU_IPV4_ICMP,    ICE_PTYPE_ATTR_GTP_UPLINK },
449         { ICE_MAC_IPV4_GTPU_IPV6_FRAG,    ICE_PTYPE_ATTR_GTP_UPLINK },
450         { ICE_MAC_IPV4_GTPU_IPV6_PAY,     ICE_PTYPE_ATTR_GTP_UPLINK },
451         { ICE_MAC_IPV4_GTPU_IPV6_UDP_PAY, ICE_PTYPE_ATTR_GTP_UPLINK },
452         { ICE_MAC_IPV4_GTPU_IPV6_TCP,     ICE_PTYPE_ATTR_GTP_UPLINK },
453         { ICE_MAC_IPV4_GTPU_IPV6_ICMPV6,  ICE_PTYPE_ATTR_GTP_UPLINK },
454         { ICE_MAC_IPV6_GTPU_IPV6_FRAG,    ICE_PTYPE_ATTR_GTP_UPLINK },
455         { ICE_MAC_IPV6_GTPU_IPV6_PAY,     ICE_PTYPE_ATTR_GTP_UPLINK },
456         { ICE_MAC_IPV6_GTPU_IPV6_UDP_PAY, ICE_PTYPE_ATTR_GTP_UPLINK },
457         { ICE_MAC_IPV6_GTPU_IPV6_TCP,     ICE_PTYPE_ATTR_GTP_UPLINK },
458         { ICE_MAC_IPV6_GTPU_IPV6_ICMPV6,  ICE_PTYPE_ATTR_GTP_UPLINK },
459 };
460
461 static const u32 ice_ptypes_gtpu[] = {
462         0x00000000, 0x00000000, 0x00000000, 0x00000000,
463         0x00000000, 0x00000000, 0x00000000, 0x00000000,
464         0x00000000, 0x00000000, 0x7FFFFE00, 0x00000000,
465         0x00000000, 0x00000000, 0x00000000, 0x00000000,
466         0x00000000, 0x00000000, 0x00000000, 0x00000000,
467         0x00000000, 0x00000000, 0x00000000, 0x00000000,
468         0x00000000, 0x00000000, 0x00000000, 0x00000000,
469         0x00000000, 0x00000000, 0x00000000, 0x00000000,
470 };
471
472 /* Packet types for pppoe */
473 static const u32 ice_ptypes_pppoe[] = {
474         0x00000000, 0x00000000, 0x00000000, 0x00000000,
475         0x00000000, 0x00000000, 0x00000000, 0x00000000,
476         0x00000000, 0x03FFF000, 0x00000000, 0x00000000,
477         0x00000000, 0x00000000, 0x00000000, 0x00000000,
478         0x00000000, 0x00000000, 0x00000000, 0x00000000,
479         0x00000000, 0x00000000, 0x00000000, 0x00000000,
480         0x00000000, 0x00000000, 0x00000000, 0x00000000,
481         0x00000000, 0x00000000, 0x00000000, 0x00000000,
482 };
483
484 /* Packet types for packets with PFCP NODE header */
485 static const u32 ice_ptypes_pfcp_node[] = {
486         0x00000000, 0x00000000, 0x00000000, 0x00000000,
487         0x00000000, 0x00000000, 0x00000000, 0x00000000,
488         0x00000000, 0x00000000, 0x80000000, 0x00000002,
489         0x00000000, 0x00000000, 0x00000000, 0x00000000,
490         0x00000000, 0x00000000, 0x00000000, 0x00000000,
491         0x00000000, 0x00000000, 0x00000000, 0x00000000,
492         0x00000000, 0x00000000, 0x00000000, 0x00000000,
493         0x00000000, 0x00000000, 0x00000000, 0x00000000,
494 };
495
496 /* Packet types for packets with PFCP SESSION header */
497 static const u32 ice_ptypes_pfcp_session[] = {
498         0x00000000, 0x00000000, 0x00000000, 0x00000000,
499         0x00000000, 0x00000000, 0x00000000, 0x00000000,
500         0x00000000, 0x00000000, 0x00000000, 0x00000005,
501         0x00000000, 0x00000000, 0x00000000, 0x00000000,
502         0x00000000, 0x00000000, 0x00000000, 0x00000000,
503         0x00000000, 0x00000000, 0x00000000, 0x00000000,
504         0x00000000, 0x00000000, 0x00000000, 0x00000000,
505         0x00000000, 0x00000000, 0x00000000, 0x00000000,
506 };
507
508 /* Packet types for l2tpv3 */
509 static const u32 ice_ptypes_l2tpv3[] = {
510         0x00000000, 0x00000000, 0x00000000, 0x00000000,
511         0x00000000, 0x00000000, 0x00000000, 0x00000000,
512         0x00000000, 0x00000000, 0x00000000, 0x00000300,
513         0x00000000, 0x00000000, 0x00000000, 0x00000000,
514         0x00000000, 0x00000000, 0x00000000, 0x00000000,
515         0x00000000, 0x00000000, 0x00000000, 0x00000000,
516         0x00000000, 0x00000000, 0x00000000, 0x00000000,
517         0x00000000, 0x00000000, 0x00000000, 0x00000000,
518 };
519
520 /* Packet types for esp */
521 static const u32 ice_ptypes_esp[] = {
522         0x00000000, 0x00000000, 0x00000000, 0x00000000,
523         0x00000000, 0x00000003, 0x00000000, 0x00000000,
524         0x00000000, 0x00000000, 0x00000000, 0x00000000,
525         0x00000000, 0x00000000, 0x00000000, 0x00000000,
526         0x00000000, 0x00000000, 0x00000000, 0x00000000,
527         0x00000000, 0x00000000, 0x00000000, 0x00000000,
528         0x00000000, 0x00000000, 0x00000000, 0x00000000,
529         0x00000000, 0x00000000, 0x00000000, 0x00000000,
530 };
531
532 /* Packet types for ah */
533 static const u32 ice_ptypes_ah[] = {
534         0x00000000, 0x00000000, 0x00000000, 0x00000000,
535         0x00000000, 0x0000000C, 0x00000000, 0x00000000,
536         0x00000000, 0x00000000, 0x00000000, 0x00000000,
537         0x00000000, 0x00000000, 0x00000000, 0x00000000,
538         0x00000000, 0x00000000, 0x00000000, 0x00000000,
539         0x00000000, 0x00000000, 0x00000000, 0x00000000,
540         0x00000000, 0x00000000, 0x00000000, 0x00000000,
541         0x00000000, 0x00000000, 0x00000000, 0x00000000,
542 };
543
544 /* Packet types for packets with NAT_T ESP header */
545 static const u32 ice_ptypes_nat_t_esp[] = {
546         0x00000000, 0x00000000, 0x00000000, 0x00000000,
547         0x00000000, 0x00000030, 0x00000000, 0x00000000,
548         0x00000000, 0x00000000, 0x00000000, 0x00000000,
549         0x00000000, 0x00000000, 0x00000000, 0x00000000,
550         0x00000000, 0x00000000, 0x00000000, 0x00000000,
551         0x00000000, 0x00000000, 0x00000000, 0x00000000,
552         0x00000000, 0x00000000, 0x00000000, 0x00000000,
553         0x00000000, 0x00000000, 0x00000000, 0x00000000,
554 };
555
556 static const u32 ice_ptypes_mac_non_ip_ofos[] = {
557         0x00000846, 0x00000000, 0x00000000, 0x00000000,
558         0x00000000, 0x00000000, 0x00000000, 0x00000000,
559         0x00400000, 0x03FFF000, 0x00000000, 0x00000000,
560         0x00000000, 0x00000000, 0x00000000, 0x00000000,
561         0x00000000, 0x00000000, 0x00000000, 0x00000000,
562         0x00000000, 0x00000000, 0x00000000, 0x00000000,
563         0x00000000, 0x00000000, 0x00000000, 0x00000000,
564         0x00000000, 0x00000000, 0x00000000, 0x00000000,
565 };
566
567 /* Manage parameters and info. used during the creation of a flow profile */
568 struct ice_flow_prof_params {
569         enum ice_block blk;
570         u16 entry_length; /* # of bytes formatted entry will require */
571         u8 es_cnt;
572         struct ice_flow_prof *prof;
573
574         /* For ACL, the es[0] will have the data of ICE_RX_MDID_PKT_FLAGS_15_0
575          * This will give us the direction flags.
576          */
577         struct ice_fv_word es[ICE_MAX_FV_WORDS];
578         /* attributes can be used to add attributes to a particular PTYPE */
579         const struct ice_ptype_attributes *attr;
580         u16 attr_cnt;
581
582         u16 mask[ICE_MAX_FV_WORDS];
583         ice_declare_bitmap(ptypes, ICE_FLOW_PTYPE_MAX);
584 };
585
586 #define ICE_FLOW_RSS_HDRS_INNER_MASK \
587         (ICE_FLOW_SEG_HDR_PPPOE | ICE_FLOW_SEG_HDR_GTPC | \
588         ICE_FLOW_SEG_HDR_GTPC_TEID | ICE_FLOW_SEG_HDR_GTPU | \
589         ICE_FLOW_SEG_HDR_PFCP_SESSION | ICE_FLOW_SEG_HDR_L2TPV3 | \
590         ICE_FLOW_SEG_HDR_ESP | ICE_FLOW_SEG_HDR_AH | \
591         ICE_FLOW_SEG_HDR_NAT_T_ESP)
592
593 #define ICE_FLOW_SEG_HDRS_L2_MASK       \
594         (ICE_FLOW_SEG_HDR_ETH | ICE_FLOW_SEG_HDR_VLAN)
595 #define ICE_FLOW_SEG_HDRS_L3_MASK       \
596         (ICE_FLOW_SEG_HDR_IPV4 | ICE_FLOW_SEG_HDR_IPV6 | \
597          ICE_FLOW_SEG_HDR_ARP)
598 #define ICE_FLOW_SEG_HDRS_L4_MASK       \
599         (ICE_FLOW_SEG_HDR_ICMP | ICE_FLOW_SEG_HDR_TCP | ICE_FLOW_SEG_HDR_UDP | \
600          ICE_FLOW_SEG_HDR_SCTP)
601
602 /**
603  * ice_flow_val_hdrs - validates packet segments for valid protocol headers
604  * @segs: array of one or more packet segments that describe the flow
605  * @segs_cnt: number of packet segments provided
606  */
607 static enum ice_status
608 ice_flow_val_hdrs(struct ice_flow_seg_info *segs, u8 segs_cnt)
609 {
610         u8 i;
611
612         for (i = 0; i < segs_cnt; i++) {
613                 /* Multiple L3 headers */
614                 if (segs[i].hdrs & ICE_FLOW_SEG_HDRS_L3_MASK &&
615                     !ice_is_pow2(segs[i].hdrs & ICE_FLOW_SEG_HDRS_L3_MASK))
616                         return ICE_ERR_PARAM;
617
618                 /* Multiple L4 headers */
619                 if (segs[i].hdrs & ICE_FLOW_SEG_HDRS_L4_MASK &&
620                     !ice_is_pow2(segs[i].hdrs & ICE_FLOW_SEG_HDRS_L4_MASK))
621                         return ICE_ERR_PARAM;
622         }
623
624         return ICE_SUCCESS;
625 }
626
627 /* Sizes of fixed known protocol headers without header options */
628 #define ICE_FLOW_PROT_HDR_SZ_MAC        14
629 #define ICE_FLOW_PROT_HDR_SZ_MAC_VLAN   (ICE_FLOW_PROT_HDR_SZ_MAC + 2)
630 #define ICE_FLOW_PROT_HDR_SZ_IPV4       20
631 #define ICE_FLOW_PROT_HDR_SZ_IPV6       40
632 #define ICE_FLOW_PROT_HDR_SZ_ARP        28
633 #define ICE_FLOW_PROT_HDR_SZ_ICMP       8
634 #define ICE_FLOW_PROT_HDR_SZ_TCP        20
635 #define ICE_FLOW_PROT_HDR_SZ_UDP        8
636 #define ICE_FLOW_PROT_HDR_SZ_SCTP       12
637
638 /**
639  * ice_flow_calc_seg_sz - calculates size of a packet segment based on headers
640  * @params: information about the flow to be processed
641  * @seg: index of packet segment whose header size is to be determined
642  */
643 static u16 ice_flow_calc_seg_sz(struct ice_flow_prof_params *params, u8 seg)
644 {
645         u16 sz;
646
647         /* L2 headers */
648         sz = (params->prof->segs[seg].hdrs & ICE_FLOW_SEG_HDR_VLAN) ?
649                 ICE_FLOW_PROT_HDR_SZ_MAC_VLAN : ICE_FLOW_PROT_HDR_SZ_MAC;
650
651         /* L3 headers */
652         if (params->prof->segs[seg].hdrs & ICE_FLOW_SEG_HDR_IPV4)
653                 sz += ICE_FLOW_PROT_HDR_SZ_IPV4;
654         else if (params->prof->segs[seg].hdrs & ICE_FLOW_SEG_HDR_IPV6)
655                 sz += ICE_FLOW_PROT_HDR_SZ_IPV6;
656         else if (params->prof->segs[seg].hdrs & ICE_FLOW_SEG_HDR_ARP)
657                 sz += ICE_FLOW_PROT_HDR_SZ_ARP;
658         else if (params->prof->segs[seg].hdrs & ICE_FLOW_SEG_HDRS_L4_MASK)
659                 /* A L3 header is required if L4 is specified */
660                 return 0;
661
662         /* L4 headers */
663         if (params->prof->segs[seg].hdrs & ICE_FLOW_SEG_HDR_ICMP)
664                 sz += ICE_FLOW_PROT_HDR_SZ_ICMP;
665         else if (params->prof->segs[seg].hdrs & ICE_FLOW_SEG_HDR_TCP)
666                 sz += ICE_FLOW_PROT_HDR_SZ_TCP;
667         else if (params->prof->segs[seg].hdrs & ICE_FLOW_SEG_HDR_UDP)
668                 sz += ICE_FLOW_PROT_HDR_SZ_UDP;
669         else if (params->prof->segs[seg].hdrs & ICE_FLOW_SEG_HDR_SCTP)
670                 sz += ICE_FLOW_PROT_HDR_SZ_SCTP;
671
672         return sz;
673 }
674
675 /**
676  * ice_flow_proc_seg_hdrs - process protocol headers present in pkt segments
677  * @params: information about the flow to be processed
678  *
679  * This function identifies the packet types associated with the protocol
680  * headers being present in packet segments of the specified flow profile.
681  */
682 static enum ice_status
683 ice_flow_proc_seg_hdrs(struct ice_flow_prof_params *params)
684 {
685         struct ice_flow_prof *prof;
686         u8 i;
687
688         ice_memset(params->ptypes, 0xff, sizeof(params->ptypes),
689                    ICE_NONDMA_MEM);
690
691         prof = params->prof;
692
693         for (i = 0; i < params->prof->segs_cnt; i++) {
694                 const ice_bitmap_t *src;
695                 u32 hdrs;
696
697                 hdrs = prof->segs[i].hdrs;
698
699                 if (hdrs & ICE_FLOW_SEG_HDR_ETH) {
700                         src = !i ? (const ice_bitmap_t *)ice_ptypes_mac_ofos :
701                                 (const ice_bitmap_t *)ice_ptypes_mac_il;
702                         ice_and_bitmap(params->ptypes, params->ptypes, src,
703                                        ICE_FLOW_PTYPE_MAX);
704                 }
705
706                 if (i && hdrs & ICE_FLOW_SEG_HDR_VLAN) {
707                         src = (const ice_bitmap_t *)ice_ptypes_macvlan_il;
708                         ice_and_bitmap(params->ptypes, params->ptypes, src,
709                                        ICE_FLOW_PTYPE_MAX);
710                 }
711
712                 if (!i && hdrs & ICE_FLOW_SEG_HDR_ARP) {
713                         ice_and_bitmap(params->ptypes, params->ptypes,
714                                        (const ice_bitmap_t *)ice_ptypes_arp_of,
715                                        ICE_FLOW_PTYPE_MAX);
716                 }
717
718                 if (hdrs & ICE_FLOW_SEG_HDR_IPV4) {
719                         src = !i ? (const ice_bitmap_t *)ice_ptypes_ipv4_ofos :
720                                 (const ice_bitmap_t *)ice_ptypes_ipv4_il;
721                         ice_and_bitmap(params->ptypes, params->ptypes, src,
722                                        ICE_FLOW_PTYPE_MAX);
723                         if (hdrs & ICE_FLOW_SEG_HDR_UDP) {
724                                 src = (const ice_bitmap_t *)ice_ptypes_udp_il;
725                                 ice_and_bitmap(params->ptypes,
726                                                 params->ptypes, src,
727                                                ICE_FLOW_PTYPE_MAX);
728                         } else if (hdrs & ICE_FLOW_SEG_HDR_TCP) {
729                                 ice_and_bitmap(params->ptypes, params->ptypes,
730                                                (const ice_bitmap_t *)
731                                                ice_ptypes_tcp_il,
732                                                ICE_FLOW_PTYPE_MAX);
733                         } else if (hdrs & ICE_FLOW_SEG_HDR_SCTP) {
734                                 src = (const ice_bitmap_t *)ice_ptypes_sctp_il;
735                                 ice_and_bitmap(params->ptypes, params->ptypes,
736                                                src, ICE_FLOW_PTYPE_MAX);
737                         }
738                 } else if (hdrs & ICE_FLOW_SEG_HDR_IPV6) {
739                         src = !i ? (const ice_bitmap_t *)ice_ptypes_ipv6_ofos :
740                                 (const ice_bitmap_t *)ice_ptypes_ipv6_il;
741                         ice_and_bitmap(params->ptypes, params->ptypes, src,
742                                        ICE_FLOW_PTYPE_MAX);
743                         if (hdrs & ICE_FLOW_SEG_HDR_UDP) {
744                                 src = (const ice_bitmap_t *)ice_ptypes_udp_il;
745                                 ice_and_bitmap(params->ptypes,
746                                                 params->ptypes, src,
747                                                ICE_FLOW_PTYPE_MAX);
748                         } else if (hdrs & ICE_FLOW_SEG_HDR_TCP) {
749                                 ice_and_bitmap(params->ptypes, params->ptypes,
750                                                (const ice_bitmap_t *)
751                                                ice_ptypes_tcp_il,
752                                                ICE_FLOW_PTYPE_MAX);
753                         } else if (hdrs & ICE_FLOW_SEG_HDR_SCTP) {
754                                 src = (const ice_bitmap_t *)ice_ptypes_sctp_il;
755                                 ice_and_bitmap(params->ptypes, params->ptypes,
756                                                src, ICE_FLOW_PTYPE_MAX);
757                         }
758                 }
759
760                 if (hdrs & ICE_FLOW_SEG_HDR_ETH_NON_IP) {
761                         src = (const ice_bitmap_t *)ice_ptypes_mac_non_ip_ofos;
762                         ice_and_bitmap(params->ptypes, params->ptypes,
763                                        src, ICE_FLOW_PTYPE_MAX);
764                 } else if (hdrs & ICE_FLOW_SEG_HDR_PPPOE) {
765                         src = (const ice_bitmap_t *)ice_ptypes_pppoe;
766                         ice_and_bitmap(params->ptypes, params->ptypes, src,
767                                        ICE_FLOW_PTYPE_MAX);
768                 }
769
770                 if (hdrs & ICE_FLOW_SEG_HDR_ICMP) {
771                         src = !i ? (const ice_bitmap_t *)ice_ptypes_icmp_of :
772                                 (const ice_bitmap_t *)ice_ptypes_icmp_il;
773                         ice_and_bitmap(params->ptypes, params->ptypes, src,
774                                        ICE_FLOW_PTYPE_MAX);
775                 } else if (hdrs & ICE_FLOW_SEG_HDR_GRE) {
776                         if (!i) {
777                                 src = (const ice_bitmap_t *)ice_ptypes_gre_of;
778                                 ice_and_bitmap(params->ptypes, params->ptypes,
779                                                src, ICE_FLOW_PTYPE_MAX);
780                         }
781                 } else if (hdrs & ICE_FLOW_SEG_HDR_GTPC) {
782                         src = (const ice_bitmap_t *)ice_ptypes_gtpc;
783                         ice_and_bitmap(params->ptypes, params->ptypes,
784                                        src, ICE_FLOW_PTYPE_MAX);
785                 } else if (hdrs & ICE_FLOW_SEG_HDR_GTPC_TEID) {
786                         src = (const ice_bitmap_t *)ice_ptypes_gtpc_tid;
787                         ice_and_bitmap(params->ptypes, params->ptypes,
788                                        src, ICE_FLOW_PTYPE_MAX);
789                 } else if (hdrs & ICE_FLOW_SEG_HDR_GTPU_DWN) {
790                         src = (const ice_bitmap_t *)ice_ptypes_gtpu;
791                         ice_and_bitmap(params->ptypes, params->ptypes,
792                                        src, ICE_FLOW_PTYPE_MAX);
793
794                         /* Attributes for GTP packet with downlink */
795                         params->attr = ice_attr_gtpu_down;
796                         params->attr_cnt = ARRAY_SIZE(ice_attr_gtpu_down);
797                 } else if (hdrs & ICE_FLOW_SEG_HDR_GTPU_UP) {
798                         src = (const ice_bitmap_t *)ice_ptypes_gtpu;
799                         ice_and_bitmap(params->ptypes, params->ptypes,
800                                        src, ICE_FLOW_PTYPE_MAX);
801
802                         /* Attributes for GTP packet with uplink */
803                         params->attr = ice_attr_gtpu_up;
804                         params->attr_cnt = ARRAY_SIZE(ice_attr_gtpu_up);
805                 } else if (hdrs & ICE_FLOW_SEG_HDR_GTPU_EH) {
806                         src = (const ice_bitmap_t *)ice_ptypes_gtpu;
807                         ice_and_bitmap(params->ptypes, params->ptypes,
808                                        src, ICE_FLOW_PTYPE_MAX);
809
810                         /* Attributes for GTP packet with Extension Header */
811                         params->attr = ice_attr_gtpu_eh;
812                         params->attr_cnt = ARRAY_SIZE(ice_attr_gtpu_eh);
813                 } else if (hdrs & ICE_FLOW_SEG_HDR_GTPU_IP) {
814                         src = (const ice_bitmap_t *)ice_ptypes_gtpu;
815                         ice_and_bitmap(params->ptypes, params->ptypes,
816                                        src, ICE_FLOW_PTYPE_MAX);
817                 } else if (hdrs & ICE_FLOW_SEG_HDR_L2TPV3) {
818                         src = (const ice_bitmap_t *)ice_ptypes_l2tpv3;
819                         ice_and_bitmap(params->ptypes, params->ptypes,
820                                        src, ICE_FLOW_PTYPE_MAX);
821                 } else if (hdrs & ICE_FLOW_SEG_HDR_ESP) {
822                         src = (const ice_bitmap_t *)ice_ptypes_esp;
823                         ice_and_bitmap(params->ptypes, params->ptypes,
824                                        src, ICE_FLOW_PTYPE_MAX);
825                 } else if (hdrs & ICE_FLOW_SEG_HDR_AH) {
826                         src = (const ice_bitmap_t *)ice_ptypes_ah;
827                         ice_and_bitmap(params->ptypes, params->ptypes,
828                                        src, ICE_FLOW_PTYPE_MAX);
829                 } else if (hdrs & ICE_FLOW_SEG_HDR_NAT_T_ESP) {
830                         src = (const ice_bitmap_t *)ice_ptypes_nat_t_esp;
831                         ice_and_bitmap(params->ptypes, params->ptypes,
832                                        src, ICE_FLOW_PTYPE_MAX);
833                 }
834
835                 if (hdrs & ICE_FLOW_SEG_HDR_PFCP) {
836                         if (hdrs & ICE_FLOW_SEG_HDR_PFCP_NODE)
837                                 src =
838                                 (const ice_bitmap_t *)ice_ptypes_pfcp_node;
839                         else
840                                 src =
841                                 (const ice_bitmap_t *)ice_ptypes_pfcp_session;
842
843                         ice_and_bitmap(params->ptypes, params->ptypes,
844                                        src, ICE_FLOW_PTYPE_MAX);
845                 } else {
846                         src = (const ice_bitmap_t *)ice_ptypes_pfcp_node;
847                         ice_andnot_bitmap(params->ptypes, params->ptypes,
848                                           src, ICE_FLOW_PTYPE_MAX);
849
850                         src = (const ice_bitmap_t *)ice_ptypes_pfcp_session;
851                         ice_andnot_bitmap(params->ptypes, params->ptypes,
852                                           src, ICE_FLOW_PTYPE_MAX);
853                 }
854         }
855
856         return ICE_SUCCESS;
857 }
858
859 /**
860  * ice_flow_xtract_pkt_flags - Create an extr sequence entry for packet flags
861  * @hw: pointer to the HW struct
862  * @params: information about the flow to be processed
863  * @flags: The value of pkt_flags[x:x] in Rx/Tx MDID metadata.
864  *
865  * This function will allocate an extraction sequence entries for a DWORD size
866  * chunk of the packet flags.
867  */
868 static enum ice_status
869 ice_flow_xtract_pkt_flags(struct ice_hw *hw,
870                           struct ice_flow_prof_params *params,
871                           enum ice_flex_mdid_pkt_flags flags)
872 {
873         u8 fv_words = hw->blk[params->blk].es.fvw;
874         u8 idx;
875
876         /* Make sure the number of extraction sequence entries required does not
877          * exceed the block's capacity.
878          */
879         if (params->es_cnt >= fv_words)
880                 return ICE_ERR_MAX_LIMIT;
881
882         /* some blocks require a reversed field vector layout */
883         if (hw->blk[params->blk].es.reverse)
884                 idx = fv_words - params->es_cnt - 1;
885         else
886                 idx = params->es_cnt;
887
888         params->es[idx].prot_id = ICE_PROT_META_ID;
889         params->es[idx].off = flags;
890         params->es_cnt++;
891
892         return ICE_SUCCESS;
893 }
894
895 /**
896  * ice_flow_xtract_fld - Create an extraction sequence entry for the given field
897  * @hw: pointer to the HW struct
898  * @params: information about the flow to be processed
899  * @seg: packet segment index of the field to be extracted
900  * @fld: ID of field to be extracted
901  * @match: bitfield of all fields
902  *
903  * This function determines the protocol ID, offset, and size of the given
904  * field. It then allocates one or more extraction sequence entries for the
905  * given field, and fill the entries with protocol ID and offset information.
906  */
907 static enum ice_status
908 ice_flow_xtract_fld(struct ice_hw *hw, struct ice_flow_prof_params *params,
909                     u8 seg, enum ice_flow_field fld, u64 match)
910 {
911         enum ice_flow_field sib = ICE_FLOW_FIELD_IDX_MAX;
912         enum ice_prot_id prot_id = ICE_PROT_ID_INVAL;
913         u8 fv_words = hw->blk[params->blk].es.fvw;
914         struct ice_flow_fld_info *flds;
915         u16 cnt, ese_bits, i;
916         u16 sib_mask = 0;
917         s16 adj = 0;
918         u16 mask;
919         u16 off;
920
921         flds = params->prof->segs[seg].fields;
922
923         switch (fld) {
924         case ICE_FLOW_FIELD_IDX_ETH_DA:
925         case ICE_FLOW_FIELD_IDX_ETH_SA:
926         case ICE_FLOW_FIELD_IDX_S_VLAN:
927         case ICE_FLOW_FIELD_IDX_C_VLAN:
928                 prot_id = seg == 0 ? ICE_PROT_MAC_OF_OR_S : ICE_PROT_MAC_IL;
929                 break;
930         case ICE_FLOW_FIELD_IDX_ETH_TYPE:
931                 prot_id = seg == 0 ? ICE_PROT_ETYPE_OL : ICE_PROT_ETYPE_IL;
932                 break;
933         case ICE_FLOW_FIELD_IDX_IPV4_DSCP:
934                 prot_id = seg == 0 ? ICE_PROT_IPV4_OF_OR_S : ICE_PROT_IPV4_IL;
935                 break;
936         case ICE_FLOW_FIELD_IDX_IPV6_DSCP:
937                 prot_id = seg == 0 ? ICE_PROT_IPV6_OF_OR_S : ICE_PROT_IPV6_IL;
938                 break;
939         case ICE_FLOW_FIELD_IDX_IPV4_TTL:
940         case ICE_FLOW_FIELD_IDX_IPV4_PROT:
941                 prot_id = seg == 0 ? ICE_PROT_IPV4_OF_OR_S : ICE_PROT_IPV4_IL;
942
943                 /* TTL and PROT share the same extraction seq. entry.
944                  * Each is considered a sibling to the other in terms of sharing
945                  * the same extraction sequence entry.
946                  */
947                 if (fld == ICE_FLOW_FIELD_IDX_IPV4_TTL)
948                         sib = ICE_FLOW_FIELD_IDX_IPV4_PROT;
949                 else if (fld == ICE_FLOW_FIELD_IDX_IPV4_PROT)
950                         sib = ICE_FLOW_FIELD_IDX_IPV4_TTL;
951
952                 /* If the sibling field is also included, that field's
953                  * mask needs to be included.
954                  */
955                 if (match & BIT(sib))
956                         sib_mask = ice_flds_info[sib].mask;
957                 break;
958         case ICE_FLOW_FIELD_IDX_IPV6_TTL:
959         case ICE_FLOW_FIELD_IDX_IPV6_PROT:
960                 prot_id = seg == 0 ? ICE_PROT_IPV6_OF_OR_S : ICE_PROT_IPV6_IL;
961
962                 /* TTL and PROT share the same extraction seq. entry.
963                  * Each is considered a sibling to the other in terms of sharing
964                  * the same extraction sequence entry.
965                  */
966                 if (fld == ICE_FLOW_FIELD_IDX_IPV6_TTL)
967                         sib = ICE_FLOW_FIELD_IDX_IPV6_PROT;
968                 else if (fld == ICE_FLOW_FIELD_IDX_IPV6_PROT)
969                         sib = ICE_FLOW_FIELD_IDX_IPV6_TTL;
970
971                 /* If the sibling field is also included, that field's
972                  * mask needs to be included.
973                  */
974                 if (match & BIT(sib))
975                         sib_mask = ice_flds_info[sib].mask;
976                 break;
977         case ICE_FLOW_FIELD_IDX_IPV4_SA:
978         case ICE_FLOW_FIELD_IDX_IPV4_DA:
979                 prot_id = seg == 0 ? ICE_PROT_IPV4_OF_OR_S : ICE_PROT_IPV4_IL;
980                 break;
981         case ICE_FLOW_FIELD_IDX_IPV6_SA:
982         case ICE_FLOW_FIELD_IDX_IPV6_DA:
983         case ICE_FLOW_FIELD_IDX_IPV6_PRE32_SA:
984         case ICE_FLOW_FIELD_IDX_IPV6_PRE32_DA:
985         case ICE_FLOW_FIELD_IDX_IPV6_PRE48_SA:
986         case ICE_FLOW_FIELD_IDX_IPV6_PRE48_DA:
987         case ICE_FLOW_FIELD_IDX_IPV6_PRE64_SA:
988         case ICE_FLOW_FIELD_IDX_IPV6_PRE64_DA:
989                 prot_id = seg == 0 ? ICE_PROT_IPV6_OF_OR_S : ICE_PROT_IPV6_IL;
990                 break;
991         case ICE_FLOW_FIELD_IDX_TCP_SRC_PORT:
992         case ICE_FLOW_FIELD_IDX_TCP_DST_PORT:
993         case ICE_FLOW_FIELD_IDX_TCP_FLAGS:
994                 prot_id = ICE_PROT_TCP_IL;
995                 break;
996         case ICE_FLOW_FIELD_IDX_UDP_SRC_PORT:
997         case ICE_FLOW_FIELD_IDX_UDP_DST_PORT:
998                 prot_id = ICE_PROT_UDP_IL_OR_S;
999                 break;
1000         case ICE_FLOW_FIELD_IDX_SCTP_SRC_PORT:
1001         case ICE_FLOW_FIELD_IDX_SCTP_DST_PORT:
1002                 prot_id = ICE_PROT_SCTP_IL;
1003                 break;
1004         case ICE_FLOW_FIELD_IDX_GTPC_TEID:
1005         case ICE_FLOW_FIELD_IDX_GTPU_IP_TEID:
1006         case ICE_FLOW_FIELD_IDX_GTPU_UP_TEID:
1007         case ICE_FLOW_FIELD_IDX_GTPU_DWN_TEID:
1008         case ICE_FLOW_FIELD_IDX_GTPU_EH_TEID:
1009         case ICE_FLOW_FIELD_IDX_GTPU_EH_QFI:
1010                 /* GTP is accessed through UDP OF protocol */
1011                 prot_id = ICE_PROT_UDP_OF;
1012                 break;
1013         case ICE_FLOW_FIELD_IDX_PPPOE_SESS_ID:
1014                 prot_id = ICE_PROT_PPPOE;
1015                 break;
1016         case ICE_FLOW_FIELD_IDX_PFCP_SEID:
1017                 prot_id = ICE_PROT_UDP_IL_OR_S;
1018                 break;
1019         case ICE_FLOW_FIELD_IDX_L2TPV3_SESS_ID:
1020                 prot_id = ICE_PROT_L2TPV3;
1021                 break;
1022         case ICE_FLOW_FIELD_IDX_ESP_SPI:
1023                 prot_id = ICE_PROT_ESP_F;
1024                 break;
1025         case ICE_FLOW_FIELD_IDX_AH_SPI:
1026                 prot_id = ICE_PROT_ESP_2;
1027                 break;
1028         case ICE_FLOW_FIELD_IDX_NAT_T_ESP_SPI:
1029                 prot_id = ICE_PROT_UDP_IL_OR_S;
1030                 break;
1031         case ICE_FLOW_FIELD_IDX_ARP_SIP:
1032         case ICE_FLOW_FIELD_IDX_ARP_DIP:
1033         case ICE_FLOW_FIELD_IDX_ARP_SHA:
1034         case ICE_FLOW_FIELD_IDX_ARP_DHA:
1035         case ICE_FLOW_FIELD_IDX_ARP_OP:
1036                 prot_id = ICE_PROT_ARP_OF;
1037                 break;
1038         case ICE_FLOW_FIELD_IDX_ICMP_TYPE:
1039         case ICE_FLOW_FIELD_IDX_ICMP_CODE:
1040                 /* ICMP type and code share the same extraction seq. entry */
1041                 prot_id = (params->prof->segs[seg].hdrs &
1042                            ICE_FLOW_SEG_HDR_IPV4) ?
1043                         ICE_PROT_ICMP_IL : ICE_PROT_ICMPV6_IL;
1044                 sib = fld == ICE_FLOW_FIELD_IDX_ICMP_TYPE ?
1045                         ICE_FLOW_FIELD_IDX_ICMP_CODE :
1046                         ICE_FLOW_FIELD_IDX_ICMP_TYPE;
1047                 break;
1048         case ICE_FLOW_FIELD_IDX_GRE_KEYID:
1049                 prot_id = ICE_PROT_GRE_OF;
1050                 break;
1051         default:
1052                 return ICE_ERR_NOT_IMPL;
1053         }
1054
1055         /* Each extraction sequence entry is a word in size, and extracts a
1056          * word-aligned offset from a protocol header.
1057          */
1058         ese_bits = ICE_FLOW_FV_EXTRACT_SZ * BITS_PER_BYTE;
1059
1060         flds[fld].xtrct.prot_id = prot_id;
1061         flds[fld].xtrct.off = (ice_flds_info[fld].off / ese_bits) *
1062                 ICE_FLOW_FV_EXTRACT_SZ;
1063         flds[fld].xtrct.disp = (u8)((ice_flds_info[fld].off + adj) % ese_bits);
1064         flds[fld].xtrct.idx = params->es_cnt;
1065         flds[fld].xtrct.mask = ice_flds_info[fld].mask;
1066
1067         /* Adjust the next field-entry index after accommodating the number of
1068          * entries this field consumes
1069          */
1070         cnt = DIVIDE_AND_ROUND_UP(flds[fld].xtrct.disp +
1071                                   ice_flds_info[fld].size, ese_bits);
1072
1073         /* Fill in the extraction sequence entries needed for this field */
1074         off = flds[fld].xtrct.off;
1075         mask = flds[fld].xtrct.mask;
1076         for (i = 0; i < cnt; i++) {
1077                 /* Only consume an extraction sequence entry if there is no
1078                  * sibling field associated with this field or the sibling entry
1079                  * already extracts the word shared with this field.
1080                  */
1081                 if (sib == ICE_FLOW_FIELD_IDX_MAX ||
1082                     flds[sib].xtrct.prot_id == ICE_PROT_ID_INVAL ||
1083                     flds[sib].xtrct.off != off) {
1084                         u8 idx;
1085
1086                         /* Make sure the number of extraction sequence required
1087                          * does not exceed the block's capability
1088                          */
1089                         if (params->es_cnt >= fv_words)
1090                                 return ICE_ERR_MAX_LIMIT;
1091
1092                         /* some blocks require a reversed field vector layout */
1093                         if (hw->blk[params->blk].es.reverse)
1094                                 idx = fv_words - params->es_cnt - 1;
1095                         else
1096                                 idx = params->es_cnt;
1097
1098                         params->es[idx].prot_id = prot_id;
1099                         params->es[idx].off = off;
1100                         params->mask[idx] = mask | sib_mask;
1101                         params->es_cnt++;
1102                 }
1103
1104                 off += ICE_FLOW_FV_EXTRACT_SZ;
1105         }
1106
1107         return ICE_SUCCESS;
1108 }
1109
1110 /**
1111  * ice_flow_xtract_raws - Create extract sequence entries for raw bytes
1112  * @hw: pointer to the HW struct
1113  * @params: information about the flow to be processed
1114  * @seg: index of packet segment whose raw fields are to be be extracted
1115  */
1116 static enum ice_status
1117 ice_flow_xtract_raws(struct ice_hw *hw, struct ice_flow_prof_params *params,
1118                      u8 seg)
1119 {
1120         u16 fv_words;
1121         u16 hdrs_sz;
1122         u8 i;
1123
1124         if (!params->prof->segs[seg].raws_cnt)
1125                 return ICE_SUCCESS;
1126
1127         if (params->prof->segs[seg].raws_cnt >
1128             ARRAY_SIZE(params->prof->segs[seg].raws))
1129                 return ICE_ERR_MAX_LIMIT;
1130
1131         /* Offsets within the segment headers are not supported */
1132         hdrs_sz = ice_flow_calc_seg_sz(params, seg);
1133         if (!hdrs_sz)
1134                 return ICE_ERR_PARAM;
1135
1136         fv_words = hw->blk[params->blk].es.fvw;
1137
1138         for (i = 0; i < params->prof->segs[seg].raws_cnt; i++) {
1139                 struct ice_flow_seg_fld_raw *raw;
1140                 u16 off, cnt, j;
1141
1142                 raw = &params->prof->segs[seg].raws[i];
1143
1144                 /* Storing extraction information */
1145                 raw->info.xtrct.prot_id = ICE_PROT_MAC_OF_OR_S;
1146                 raw->info.xtrct.off = (raw->off / ICE_FLOW_FV_EXTRACT_SZ) *
1147                         ICE_FLOW_FV_EXTRACT_SZ;
1148                 raw->info.xtrct.disp = (raw->off % ICE_FLOW_FV_EXTRACT_SZ) *
1149                         BITS_PER_BYTE;
1150                 raw->info.xtrct.idx = params->es_cnt;
1151
1152                 /* Determine the number of field vector entries this raw field
1153                  * consumes.
1154                  */
1155                 cnt = DIVIDE_AND_ROUND_UP(raw->info.xtrct.disp +
1156                                           (raw->info.src.last * BITS_PER_BYTE),
1157                                           (ICE_FLOW_FV_EXTRACT_SZ *
1158                                            BITS_PER_BYTE));
1159                 off = raw->info.xtrct.off;
1160                 for (j = 0; j < cnt; j++) {
1161                         u16 idx;
1162
1163                         /* Make sure the number of extraction sequence required
1164                          * does not exceed the block's capability
1165                          */
1166                         if (params->es_cnt >= hw->blk[params->blk].es.count ||
1167                             params->es_cnt >= ICE_MAX_FV_WORDS)
1168                                 return ICE_ERR_MAX_LIMIT;
1169
1170                         /* some blocks require a reversed field vector layout */
1171                         if (hw->blk[params->blk].es.reverse)
1172                                 idx = fv_words - params->es_cnt - 1;
1173                         else
1174                                 idx = params->es_cnt;
1175
1176                         params->es[idx].prot_id = raw->info.xtrct.prot_id;
1177                         params->es[idx].off = off;
1178                         params->es_cnt++;
1179                         off += ICE_FLOW_FV_EXTRACT_SZ;
1180                 }
1181         }
1182
1183         return ICE_SUCCESS;
1184 }
1185
1186 /**
1187  * ice_flow_create_xtrct_seq - Create an extraction sequence for given segments
1188  * @hw: pointer to the HW struct
1189  * @params: information about the flow to be processed
1190  *
1191  * This function iterates through all matched fields in the given segments, and
1192  * creates an extraction sequence for the fields.
1193  */
1194 static enum ice_status
1195 ice_flow_create_xtrct_seq(struct ice_hw *hw,
1196                           struct ice_flow_prof_params *params)
1197 {
1198         enum ice_status status = ICE_SUCCESS;
1199         u8 i;
1200
1201         /* For ACL, we also need to extract the direction bit (Rx,Tx) data from
1202          * packet flags
1203          */
1204         if (params->blk == ICE_BLK_ACL) {
1205                 status = ice_flow_xtract_pkt_flags(hw, params,
1206                                                    ICE_RX_MDID_PKT_FLAGS_15_0);
1207                 if (status)
1208                         return status;
1209         }
1210
1211         for (i = 0; i < params->prof->segs_cnt; i++) {
1212                 u64 match = params->prof->segs[i].match;
1213                 enum ice_flow_field j;
1214
1215                 for (j = 0; j < ICE_FLOW_FIELD_IDX_MAX && match; j++) {
1216                         const u64 bit = BIT_ULL(j);
1217
1218                         if (match & bit) {
1219                                 status = ice_flow_xtract_fld(hw, params, i, j,
1220                                                              match);
1221                                 if (status)
1222                                         return status;
1223                                 match &= ~bit;
1224                         }
1225                 }
1226
1227                 /* Process raw matching bytes */
1228                 status = ice_flow_xtract_raws(hw, params, i);
1229                 if (status)
1230                         return status;
1231         }
1232
1233         return status;
1234 }
1235
1236 /**
1237  * ice_flow_sel_acl_scen - returns the specific scenario
1238  * @hw: pointer to the hardware structure
1239  * @params: information about the flow to be processed
1240  *
1241  * This function will return the specific scenario based on the
1242  * params passed to it
1243  */
1244 static enum ice_status
1245 ice_flow_sel_acl_scen(struct ice_hw *hw, struct ice_flow_prof_params *params)
1246 {
1247         /* Find the best-fit scenario for the provided match width */
1248         struct ice_acl_scen *cand_scen = NULL, *scen;
1249
1250         if (!hw->acl_tbl)
1251                 return ICE_ERR_DOES_NOT_EXIST;
1252
1253         /* Loop through each scenario and match against the scenario width
1254          * to select the specific scenario
1255          */
1256         LIST_FOR_EACH_ENTRY(scen, &hw->acl_tbl->scens, ice_acl_scen, list_entry)
1257                 if (scen->eff_width >= params->entry_length &&
1258                     (!cand_scen || cand_scen->eff_width > scen->eff_width))
1259                         cand_scen = scen;
1260         if (!cand_scen)
1261                 return ICE_ERR_DOES_NOT_EXIST;
1262
1263         params->prof->cfg.scen = cand_scen;
1264
1265         return ICE_SUCCESS;
1266 }
1267
1268 /**
1269  * ice_flow_acl_def_entry_frmt - Determine the layout of flow entries
1270  * @params: information about the flow to be processed
1271  */
1272 static enum ice_status
1273 ice_flow_acl_def_entry_frmt(struct ice_flow_prof_params *params)
1274 {
1275         u16 index, i, range_idx = 0;
1276
1277         index = ICE_AQC_ACL_PROF_BYTE_SEL_START_IDX;
1278
1279         for (i = 0; i < params->prof->segs_cnt; i++) {
1280                 struct ice_flow_seg_info *seg = &params->prof->segs[i];
1281                 u64 match = seg->match;
1282                 u8 j;
1283
1284                 for (j = 0; j < ICE_FLOW_FIELD_IDX_MAX && match; j++) {
1285                         struct ice_flow_fld_info *fld;
1286                         const u64 bit = BIT_ULL(j);
1287
1288                         if (!(match & bit))
1289                                 continue;
1290
1291                         fld = &seg->fields[j];
1292                         fld->entry.mask = ICE_FLOW_FLD_OFF_INVAL;
1293
1294                         if (fld->type == ICE_FLOW_FLD_TYPE_RANGE) {
1295                                 fld->entry.last = ICE_FLOW_FLD_OFF_INVAL;
1296
1297                                 /* Range checking only supported for single
1298                                  * words
1299                                  */
1300                                 if (DIVIDE_AND_ROUND_UP(ice_flds_info[j].size +
1301                                                         fld->xtrct.disp,
1302                                                         BITS_PER_BYTE * 2) > 1)
1303                                         return ICE_ERR_PARAM;
1304
1305                                 /* Ranges must define low and high values */
1306                                 if (fld->src.val == ICE_FLOW_FLD_OFF_INVAL ||
1307                                     fld->src.last == ICE_FLOW_FLD_OFF_INVAL)
1308                                         return ICE_ERR_PARAM;
1309
1310                                 fld->entry.val = range_idx++;
1311                         } else {
1312                                 /* Store adjusted byte-length of field for later
1313                                  * use, taking into account potential
1314                                  * non-byte-aligned displacement
1315                                  */
1316                                 fld->entry.last = DIVIDE_AND_ROUND_UP
1317                                         (ice_flds_info[j].size +
1318                                          (fld->xtrct.disp % BITS_PER_BYTE),
1319                                          BITS_PER_BYTE);
1320                                 fld->entry.val = index;
1321                                 index += fld->entry.last;
1322                         }
1323
1324                         match &= ~bit;
1325                 }
1326
1327                 for (j = 0; j < seg->raws_cnt; j++) {
1328                         struct ice_flow_seg_fld_raw *raw = &seg->raws[j];
1329
1330                         raw->info.entry.mask = ICE_FLOW_FLD_OFF_INVAL;
1331                         raw->info.entry.val = index;
1332                         raw->info.entry.last = raw->info.src.last;
1333                         index += raw->info.entry.last;
1334                 }
1335         }
1336
1337         /* Currently only support using the byte selection base, which only
1338          * allows for an effective entry size of 30 bytes. Reject anything
1339          * larger.
1340          */
1341         if (index > ICE_AQC_ACL_PROF_BYTE_SEL_ELEMS)
1342                 return ICE_ERR_PARAM;
1343
1344         /* Only 8 range checkers per profile, reject anything trying to use
1345          * more
1346          */
1347         if (range_idx > ICE_AQC_ACL_PROF_RANGES_NUM_CFG)
1348                 return ICE_ERR_PARAM;
1349
1350         /* Store # bytes required for entry for later use */
1351         params->entry_length = index - ICE_AQC_ACL_PROF_BYTE_SEL_START_IDX;
1352
1353         return ICE_SUCCESS;
1354 }
1355
1356 /**
1357  * ice_flow_proc_segs - process all packet segments associated with a profile
1358  * @hw: pointer to the HW struct
1359  * @params: information about the flow to be processed
1360  */
1361 static enum ice_status
1362 ice_flow_proc_segs(struct ice_hw *hw, struct ice_flow_prof_params *params)
1363 {
1364         enum ice_status status;
1365
1366         status = ice_flow_proc_seg_hdrs(params);
1367         if (status)
1368                 return status;
1369
1370         status = ice_flow_create_xtrct_seq(hw, params);
1371         if (status)
1372                 return status;
1373
1374         switch (params->blk) {
1375         case ICE_BLK_FD:
1376         case ICE_BLK_RSS:
1377                 status = ICE_SUCCESS;
1378                 break;
1379         case ICE_BLK_ACL:
1380                 status = ice_flow_acl_def_entry_frmt(params);
1381                 if (status)
1382                         return status;
1383                 status = ice_flow_sel_acl_scen(hw, params);
1384                 if (status)
1385                         return status;
1386                 break;
1387         case ICE_BLK_SW:
1388         default:
1389                 return ICE_ERR_NOT_IMPL;
1390         }
1391
1392         return status;
1393 }
1394
1395 #define ICE_FLOW_FIND_PROF_CHK_FLDS     0x00000001
1396 #define ICE_FLOW_FIND_PROF_CHK_VSI      0x00000002
1397 #define ICE_FLOW_FIND_PROF_NOT_CHK_DIR  0x00000004
1398
1399 /**
1400  * ice_flow_find_prof_conds - Find a profile matching headers and conditions
1401  * @hw: pointer to the HW struct
1402  * @blk: classification stage
1403  * @dir: flow direction
1404  * @segs: array of one or more packet segments that describe the flow
1405  * @segs_cnt: number of packet segments provided
1406  * @vsi_handle: software VSI handle to check VSI (ICE_FLOW_FIND_PROF_CHK_VSI)
1407  * @conds: additional conditions to be checked (ICE_FLOW_FIND_PROF_CHK_*)
1408  */
1409 static struct ice_flow_prof *
1410 ice_flow_find_prof_conds(struct ice_hw *hw, enum ice_block blk,
1411                          enum ice_flow_dir dir, struct ice_flow_seg_info *segs,
1412                          u8 segs_cnt, u16 vsi_handle, u32 conds)
1413 {
1414         struct ice_flow_prof *p, *prof = NULL;
1415
1416         ice_acquire_lock(&hw->fl_profs_locks[blk]);
1417         LIST_FOR_EACH_ENTRY(p, &hw->fl_profs[blk], ice_flow_prof, l_entry)
1418                 if ((p->dir == dir || conds & ICE_FLOW_FIND_PROF_NOT_CHK_DIR) &&
1419                     segs_cnt && segs_cnt == p->segs_cnt) {
1420                         u8 i;
1421
1422                         /* Check for profile-VSI association if specified */
1423                         if ((conds & ICE_FLOW_FIND_PROF_CHK_VSI) &&
1424                             ice_is_vsi_valid(hw, vsi_handle) &&
1425                             !ice_is_bit_set(p->vsis, vsi_handle))
1426                                 continue;
1427
1428                         /* Protocol headers must be checked. Matched fields are
1429                          * checked if specified.
1430                          */
1431                         for (i = 0; i < segs_cnt; i++)
1432                                 if (segs[i].hdrs != p->segs[i].hdrs ||
1433                                     ((conds & ICE_FLOW_FIND_PROF_CHK_FLDS) &&
1434                                      segs[i].match != p->segs[i].match))
1435                                         break;
1436
1437                         /* A match is found if all segments are matched */
1438                         if (i == segs_cnt) {
1439                                 prof = p;
1440                                 break;
1441                         }
1442                 }
1443         ice_release_lock(&hw->fl_profs_locks[blk]);
1444
1445         return prof;
1446 }
1447
1448 /**
1449  * ice_flow_find_prof - Look up a profile matching headers and matched fields
1450  * @hw: pointer to the HW struct
1451  * @blk: classification stage
1452  * @dir: flow direction
1453  * @segs: array of one or more packet segments that describe the flow
1454  * @segs_cnt: number of packet segments provided
1455  */
1456 u64
1457 ice_flow_find_prof(struct ice_hw *hw, enum ice_block blk, enum ice_flow_dir dir,
1458                    struct ice_flow_seg_info *segs, u8 segs_cnt)
1459 {
1460         struct ice_flow_prof *p;
1461
1462         p = ice_flow_find_prof_conds(hw, blk, dir, segs, segs_cnt,
1463                                      ICE_MAX_VSI, ICE_FLOW_FIND_PROF_CHK_FLDS);
1464
1465         return p ? p->id : ICE_FLOW_PROF_ID_INVAL;
1466 }
1467
1468 /**
1469  * ice_flow_find_prof_id - Look up a profile with given profile ID
1470  * @hw: pointer to the HW struct
1471  * @blk: classification stage
1472  * @prof_id: unique ID to identify this flow profile
1473  */
1474 static struct ice_flow_prof *
1475 ice_flow_find_prof_id(struct ice_hw *hw, enum ice_block blk, u64 prof_id)
1476 {
1477         struct ice_flow_prof *p;
1478
1479         LIST_FOR_EACH_ENTRY(p, &hw->fl_profs[blk], ice_flow_prof, l_entry)
1480                 if (p->id == prof_id)
1481                         return p;
1482
1483         return NULL;
1484 }
1485
1486 /**
1487  * ice_dealloc_flow_entry - Deallocate flow entry memory
1488  * @hw: pointer to the HW struct
1489  * @entry: flow entry to be removed
1490  */
1491 static void
1492 ice_dealloc_flow_entry(struct ice_hw *hw, struct ice_flow_entry *entry)
1493 {
1494         if (!entry)
1495                 return;
1496
1497         if (entry->entry)
1498                 ice_free(hw, entry->entry);
1499
1500         if (entry->range_buf) {
1501                 ice_free(hw, entry->range_buf);
1502                 entry->range_buf = NULL;
1503         }
1504
1505         if (entry->acts) {
1506                 ice_free(hw, entry->acts);
1507                 entry->acts = NULL;
1508                 entry->acts_cnt = 0;
1509         }
1510
1511         ice_free(hw, entry);
1512 }
1513
1514 #define ICE_ACL_INVALID_SCEN    0x3f
1515
1516 /**
1517  * ice_flow_acl_is_prof_in_use - Verify if the profile is associated to any PF
1518  * @hw: pointer to the hardware structure
1519  * @prof: pointer to flow profile
1520  * @buf: destination buffer function writes partial extraction sequence to
1521  *
1522  * returns ICE_SUCCESS if no PF is associated to the given profile
1523  * returns ICE_ERR_IN_USE if at least one PF is associated to the given profile
1524  * returns other error code for real error
1525  */
1526 static enum ice_status
1527 ice_flow_acl_is_prof_in_use(struct ice_hw *hw, struct ice_flow_prof *prof,
1528                             struct ice_aqc_acl_prof_generic_frmt *buf)
1529 {
1530         enum ice_status status;
1531         u8 prof_id = 0;
1532
1533         status = ice_flow_get_hw_prof(hw, ICE_BLK_ACL, prof->id, &prof_id);
1534         if (status)
1535                 return status;
1536
1537         status = ice_query_acl_prof(hw, prof_id, buf, NULL);
1538         if (status)
1539                 return status;
1540
1541         /* If all PF's associated scenarios are all 0 or all
1542          * ICE_ACL_INVALID_SCEN (63) for the given profile then the latter has
1543          * not been configured yet.
1544          */
1545         if (buf->pf_scenario_num[0] == 0 && buf->pf_scenario_num[1] == 0 &&
1546             buf->pf_scenario_num[2] == 0 && buf->pf_scenario_num[3] == 0 &&
1547             buf->pf_scenario_num[4] == 0 && buf->pf_scenario_num[5] == 0 &&
1548             buf->pf_scenario_num[6] == 0 && buf->pf_scenario_num[7] == 0)
1549                 return ICE_SUCCESS;
1550
1551         if (buf->pf_scenario_num[0] == ICE_ACL_INVALID_SCEN &&
1552             buf->pf_scenario_num[1] == ICE_ACL_INVALID_SCEN &&
1553             buf->pf_scenario_num[2] == ICE_ACL_INVALID_SCEN &&
1554             buf->pf_scenario_num[3] == ICE_ACL_INVALID_SCEN &&
1555             buf->pf_scenario_num[4] == ICE_ACL_INVALID_SCEN &&
1556             buf->pf_scenario_num[5] == ICE_ACL_INVALID_SCEN &&
1557             buf->pf_scenario_num[6] == ICE_ACL_INVALID_SCEN &&
1558             buf->pf_scenario_num[7] == ICE_ACL_INVALID_SCEN)
1559                 return ICE_SUCCESS;
1560         else
1561                 return ICE_ERR_IN_USE;
1562 }
1563
1564 /**
1565  * ice_flow_acl_free_act_cntr - Free the ACL rule's actions
1566  * @hw: pointer to the hardware structure
1567  * @acts: array of actions to be performed on a match
1568  * @acts_cnt: number of actions
1569  */
1570 static enum ice_status
1571 ice_flow_acl_free_act_cntr(struct ice_hw *hw, struct ice_flow_action *acts,
1572                            u8 acts_cnt)
1573 {
1574         int i;
1575
1576         for (i = 0; i < acts_cnt; i++) {
1577                 if (acts[i].type == ICE_FLOW_ACT_CNTR_PKT ||
1578                     acts[i].type == ICE_FLOW_ACT_CNTR_BYTES ||
1579                     acts[i].type == ICE_FLOW_ACT_CNTR_PKT_BYTES) {
1580                         struct ice_acl_cntrs cntrs;
1581                         enum ice_status status;
1582
1583                         cntrs.bank = 0; /* Only bank0 for the moment */
1584                         cntrs.first_cntr =
1585                                         LE16_TO_CPU(acts[i].data.acl_act.value);
1586                         cntrs.last_cntr =
1587                                         LE16_TO_CPU(acts[i].data.acl_act.value);
1588
1589                         if (acts[i].type == ICE_FLOW_ACT_CNTR_PKT_BYTES)
1590                                 cntrs.type = ICE_AQC_ACL_CNT_TYPE_DUAL;
1591                         else
1592                                 cntrs.type = ICE_AQC_ACL_CNT_TYPE_SINGLE;
1593
1594                         status = ice_aq_dealloc_acl_cntrs(hw, &cntrs, NULL);
1595                         if (status)
1596                                 return status;
1597                 }
1598         }
1599         return ICE_SUCCESS;
1600 }
1601
1602 /**
1603  * ice_flow_acl_disassoc_scen - Disassociate the scenario from the profile
1604  * @hw: pointer to the hardware structure
1605  * @prof: pointer to flow profile
1606  *
1607  * Disassociate the scenario from the profile for the PF of the VSI.
1608  */
1609 static enum ice_status
1610 ice_flow_acl_disassoc_scen(struct ice_hw *hw, struct ice_flow_prof *prof)
1611 {
1612         struct ice_aqc_acl_prof_generic_frmt buf;
1613         enum ice_status status = ICE_SUCCESS;
1614         u8 prof_id = 0;
1615
1616         ice_memset(&buf, 0, sizeof(buf), ICE_NONDMA_MEM);
1617
1618         status = ice_flow_get_hw_prof(hw, ICE_BLK_ACL, prof->id, &prof_id);
1619         if (status)
1620                 return status;
1621
1622         status = ice_query_acl_prof(hw, prof_id, &buf, NULL);
1623         if (status)
1624                 return status;
1625
1626         /* Clear scenario for this PF */
1627         buf.pf_scenario_num[hw->pf_id] = ICE_ACL_INVALID_SCEN;
1628         status = ice_prgm_acl_prof_extrt(hw, prof_id, &buf, NULL);
1629
1630         return status;
1631 }
1632
1633 /**
1634  * ice_flow_rem_entry_sync - Remove a flow entry
1635  * @hw: pointer to the HW struct
1636  * @blk: classification stage
1637  * @entry: flow entry to be removed
1638  */
1639 static enum ice_status
1640 ice_flow_rem_entry_sync(struct ice_hw *hw, enum ice_block blk,
1641                         struct ice_flow_entry *entry)
1642 {
1643         if (!entry)
1644                 return ICE_ERR_BAD_PTR;
1645
1646         if (blk == ICE_BLK_ACL) {
1647                 enum ice_status status;
1648
1649                 if (!entry->prof)
1650                         return ICE_ERR_BAD_PTR;
1651
1652                 status = ice_acl_rem_entry(hw, entry->prof->cfg.scen,
1653                                            entry->scen_entry_idx);
1654                 if (status)
1655                         return status;
1656
1657                 /* Checks if we need to release an ACL counter. */
1658                 if (entry->acts_cnt && entry->acts)
1659                         ice_flow_acl_free_act_cntr(hw, entry->acts,
1660                                                    entry->acts_cnt);
1661         }
1662
1663         LIST_DEL(&entry->l_entry);
1664
1665         ice_dealloc_flow_entry(hw, entry);
1666
1667         return ICE_SUCCESS;
1668 }
1669
1670 /**
1671  * ice_flow_add_prof_sync - Add a flow profile for packet segments and fields
1672  * @hw: pointer to the HW struct
1673  * @blk: classification stage
1674  * @dir: flow direction
1675  * @prof_id: unique ID to identify this flow profile
1676  * @segs: array of one or more packet segments that describe the flow
1677  * @segs_cnt: number of packet segments provided
1678  * @acts: array of default actions
1679  * @acts_cnt: number of default actions
1680  * @prof: stores the returned flow profile added
1681  *
1682  * Assumption: the caller has acquired the lock to the profile list
1683  */
1684 static enum ice_status
1685 ice_flow_add_prof_sync(struct ice_hw *hw, enum ice_block blk,
1686                        enum ice_flow_dir dir, u64 prof_id,
1687                        struct ice_flow_seg_info *segs, u8 segs_cnt,
1688                        struct ice_flow_action *acts, u8 acts_cnt,
1689                        struct ice_flow_prof **prof)
1690 {
1691         struct ice_flow_prof_params params;
1692         enum ice_status status;
1693         u8 i;
1694
1695         if (!prof || (acts_cnt && !acts))
1696                 return ICE_ERR_BAD_PTR;
1697
1698         ice_memset(&params, 0, sizeof(params), ICE_NONDMA_MEM);
1699         params.prof = (struct ice_flow_prof *)
1700                 ice_malloc(hw, sizeof(*params.prof));
1701         if (!params.prof)
1702                 return ICE_ERR_NO_MEMORY;
1703
1704         /* initialize extraction sequence to all invalid (0xff) */
1705         for (i = 0; i < ICE_MAX_FV_WORDS; i++) {
1706                 params.es[i].prot_id = ICE_PROT_INVALID;
1707                 params.es[i].off = ICE_FV_OFFSET_INVAL;
1708         }
1709
1710         params.blk = blk;
1711         params.prof->id = prof_id;
1712         params.prof->dir = dir;
1713         params.prof->segs_cnt = segs_cnt;
1714
1715         /* Make a copy of the segments that need to be persistent in the flow
1716          * profile instance
1717          */
1718         for (i = 0; i < segs_cnt; i++)
1719                 ice_memcpy(&params.prof->segs[i], &segs[i], sizeof(*segs),
1720                            ICE_NONDMA_TO_NONDMA);
1721
1722         /* Make a copy of the actions that need to be persistent in the flow
1723          * profile instance.
1724          */
1725         if (acts_cnt) {
1726                 params.prof->acts = (struct ice_flow_action *)
1727                         ice_memdup(hw, acts, acts_cnt * sizeof(*acts),
1728                                    ICE_NONDMA_TO_NONDMA);
1729
1730                 if (!params.prof->acts) {
1731                         status = ICE_ERR_NO_MEMORY;
1732                         goto out;
1733                 }
1734         }
1735
1736         status = ice_flow_proc_segs(hw, &params);
1737         if (status) {
1738                 ice_debug(hw, ICE_DBG_FLOW,
1739                           "Error processing a flow's packet segments\n");
1740                 goto out;
1741         }
1742
1743         /* Add a HW profile for this flow profile */
1744         status = ice_add_prof(hw, blk, prof_id, (u8 *)params.ptypes,
1745                               params.attr, params.attr_cnt, params.es,
1746                               params.mask);
1747         if (status) {
1748                 ice_debug(hw, ICE_DBG_FLOW, "Error adding a HW flow profile\n");
1749                 goto out;
1750         }
1751
1752         INIT_LIST_HEAD(&params.prof->entries);
1753         ice_init_lock(&params.prof->entries_lock);
1754         *prof = params.prof;
1755
1756 out:
1757         if (status) {
1758                 if (params.prof->acts)
1759                         ice_free(hw, params.prof->acts);
1760                 ice_free(hw, params.prof);
1761         }
1762
1763         return status;
1764 }
1765
1766 /**
1767  * ice_flow_rem_prof_sync - remove a flow profile
1768  * @hw: pointer to the hardware structure
1769  * @blk: classification stage
1770  * @prof: pointer to flow profile to remove
1771  *
1772  * Assumption: the caller has acquired the lock to the profile list
1773  */
1774 static enum ice_status
1775 ice_flow_rem_prof_sync(struct ice_hw *hw, enum ice_block blk,
1776                        struct ice_flow_prof *prof)
1777 {
1778         enum ice_status status;
1779
1780         /* Remove all remaining flow entries before removing the flow profile */
1781         if (!LIST_EMPTY(&prof->entries)) {
1782                 struct ice_flow_entry *e, *t;
1783
1784                 ice_acquire_lock(&prof->entries_lock);
1785
1786                 LIST_FOR_EACH_ENTRY_SAFE(e, t, &prof->entries, ice_flow_entry,
1787                                          l_entry) {
1788                         status = ice_flow_rem_entry_sync(hw, blk, e);
1789                         if (status)
1790                                 break;
1791                 }
1792
1793                 ice_release_lock(&prof->entries_lock);
1794         }
1795
1796         if (blk == ICE_BLK_ACL) {
1797                 struct ice_aqc_acl_profile_ranges query_rng_buf;
1798                 struct ice_aqc_acl_prof_generic_frmt buf;
1799                 u8 prof_id = 0;
1800
1801                 /* Disassociate the scenario from the profile for the PF */
1802                 status = ice_flow_acl_disassoc_scen(hw, prof);
1803                 if (status)
1804                         return status;
1805
1806                 /* Clear the range-checker if the profile ID is no longer
1807                  * used by any PF
1808                  */
1809                 status = ice_flow_acl_is_prof_in_use(hw, prof, &buf);
1810                 if (status && status != ICE_ERR_IN_USE) {
1811                         return status;
1812                 } else if (!status) {
1813                         /* Clear the range-checker value for profile ID */
1814                         ice_memset(&query_rng_buf, 0,
1815                                    sizeof(struct ice_aqc_acl_profile_ranges),
1816                                    ICE_NONDMA_MEM);
1817
1818                         status = ice_flow_get_hw_prof(hw, blk, prof->id,
1819                                                       &prof_id);
1820                         if (status)
1821                                 return status;
1822
1823                         status = ice_prog_acl_prof_ranges(hw, prof_id,
1824                                                           &query_rng_buf, NULL);
1825                         if (status)
1826                                 return status;
1827                 }
1828         }
1829
1830         /* Remove all hardware profiles associated with this flow profile */
1831         status = ice_rem_prof(hw, blk, prof->id);
1832         if (!status) {
1833                 LIST_DEL(&prof->l_entry);
1834                 ice_destroy_lock(&prof->entries_lock);
1835                 if (prof->acts)
1836                         ice_free(hw, prof->acts);
1837                 ice_free(hw, prof);
1838         }
1839
1840         return status;
1841 }
1842
1843 /**
1844  * ice_flow_acl_set_xtrct_seq_fld - Populate xtrct seq for single field
1845  * @buf: Destination buffer function writes partial xtrct sequence to
1846  * @info: Info about field
1847  */
1848 static void
1849 ice_flow_acl_set_xtrct_seq_fld(struct ice_aqc_acl_prof_generic_frmt *buf,
1850                                struct ice_flow_fld_info *info)
1851 {
1852         u16 dst, i;
1853         u8 src;
1854
1855         src = info->xtrct.idx * ICE_FLOW_FV_EXTRACT_SZ +
1856                 info->xtrct.disp / BITS_PER_BYTE;
1857         dst = info->entry.val;
1858         for (i = 0; i < info->entry.last; i++)
1859                 /* HW stores field vector words in LE, convert words back to BE
1860                  * so constructed entries will end up in network order
1861                  */
1862                 buf->byte_selection[dst++] = src++ ^ 1;
1863 }
1864
1865 /**
1866  * ice_flow_acl_set_xtrct_seq - Program ACL extraction sequence
1867  * @hw: pointer to the hardware structure
1868  * @prof: pointer to flow profile
1869  */
1870 static enum ice_status
1871 ice_flow_acl_set_xtrct_seq(struct ice_hw *hw, struct ice_flow_prof *prof)
1872 {
1873         struct ice_aqc_acl_prof_generic_frmt buf;
1874         struct ice_flow_fld_info *info;
1875         enum ice_status status;
1876         u8 prof_id = 0;
1877         u16 i;
1878
1879         ice_memset(&buf, 0, sizeof(buf), ICE_NONDMA_MEM);
1880
1881         status = ice_flow_get_hw_prof(hw, ICE_BLK_ACL, prof->id, &prof_id);
1882         if (status)
1883                 return status;
1884
1885         status = ice_flow_acl_is_prof_in_use(hw, prof, &buf);
1886         if (status && status != ICE_ERR_IN_USE)
1887                 return status;
1888
1889         if (!status) {
1890                 /* Program the profile dependent configuration. This is done
1891                  * only once regardless of the number of PFs using that profile
1892                  */
1893                 ice_memset(&buf, 0, sizeof(buf), ICE_NONDMA_MEM);
1894
1895                 for (i = 0; i < prof->segs_cnt; i++) {
1896                         struct ice_flow_seg_info *seg = &prof->segs[i];
1897                         u64 match = seg->match;
1898                         u16 j;
1899
1900                         for (j = 0; j < ICE_FLOW_FIELD_IDX_MAX && match; j++) {
1901                                 const u64 bit = BIT_ULL(j);
1902
1903                                 if (!(match & bit))
1904                                         continue;
1905
1906                                 info = &seg->fields[j];
1907
1908                                 if (info->type == ICE_FLOW_FLD_TYPE_RANGE)
1909                                         buf.word_selection[info->entry.val] =
1910                                                                 info->xtrct.idx;
1911                                 else
1912                                         ice_flow_acl_set_xtrct_seq_fld(&buf,
1913                                                                        info);
1914
1915                                 match &= ~bit;
1916                         }
1917
1918                         for (j = 0; j < seg->raws_cnt; j++) {
1919                                 info = &seg->raws[j].info;
1920                                 ice_flow_acl_set_xtrct_seq_fld(&buf, info);
1921                         }
1922                 }
1923
1924                 ice_memset(&buf.pf_scenario_num[0], ICE_ACL_INVALID_SCEN,
1925                            ICE_AQC_ACL_PROF_PF_SCEN_NUM_ELEMS,
1926                            ICE_NONDMA_MEM);
1927         }
1928
1929         /* Update the current PF */
1930         buf.pf_scenario_num[hw->pf_id] = (u8)prof->cfg.scen->id;
1931         status = ice_prgm_acl_prof_extrt(hw, prof_id, &buf, NULL);
1932
1933         return status;
1934 }
1935
1936 /**
1937  * ice_flow_assoc_vsig_vsi - associate a VSI with VSIG
1938  * @hw: pointer to the hardware structure
1939  * @blk: classification stage
1940  * @vsi_handle: software VSI handle
1941  * @vsig: target VSI group
1942  *
1943  * Assumption: the caller has already verified that the VSI to
1944  * be added has the same characteristics as the VSIG and will
1945  * thereby have access to all resources added to that VSIG.
1946  */
1947 enum ice_status
1948 ice_flow_assoc_vsig_vsi(struct ice_hw *hw, enum ice_block blk, u16 vsi_handle,
1949                         u16 vsig)
1950 {
1951         enum ice_status status;
1952
1953         if (!ice_is_vsi_valid(hw, vsi_handle) || blk >= ICE_BLK_COUNT)
1954                 return ICE_ERR_PARAM;
1955
1956         ice_acquire_lock(&hw->fl_profs_locks[blk]);
1957         status = ice_add_vsi_flow(hw, blk, ice_get_hw_vsi_num(hw, vsi_handle),
1958                                   vsig);
1959         ice_release_lock(&hw->fl_profs_locks[blk]);
1960
1961         return status;
1962 }
1963
1964 /**
1965  * ice_flow_assoc_prof - associate a VSI with a flow profile
1966  * @hw: pointer to the hardware structure
1967  * @blk: classification stage
1968  * @prof: pointer to flow profile
1969  * @vsi_handle: software VSI handle
1970  *
1971  * Assumption: the caller has acquired the lock to the profile list
1972  * and the software VSI handle has been validated
1973  */
1974 static enum ice_status
1975 ice_flow_assoc_prof(struct ice_hw *hw, enum ice_block blk,
1976                     struct ice_flow_prof *prof, u16 vsi_handle)
1977 {
1978         enum ice_status status = ICE_SUCCESS;
1979
1980         if (!ice_is_bit_set(prof->vsis, vsi_handle)) {
1981                 if (blk == ICE_BLK_ACL) {
1982                         status = ice_flow_acl_set_xtrct_seq(hw, prof);
1983                         if (status)
1984                                 return status;
1985                 }
1986                 status = ice_add_prof_id_flow(hw, blk,
1987                                               ice_get_hw_vsi_num(hw,
1988                                                                  vsi_handle),
1989                                               prof->id);
1990                 if (!status)
1991                         ice_set_bit(vsi_handle, prof->vsis);
1992                 else
1993                         ice_debug(hw, ICE_DBG_FLOW,
1994                                   "HW profile add failed, %d\n",
1995                                   status);
1996         }
1997
1998         return status;
1999 }
2000
2001 /**
2002  * ice_flow_disassoc_prof - disassociate a VSI from a flow profile
2003  * @hw: pointer to the hardware structure
2004  * @blk: classification stage
2005  * @prof: pointer to flow profile
2006  * @vsi_handle: software VSI handle
2007  *
2008  * Assumption: the caller has acquired the lock to the profile list
2009  * and the software VSI handle has been validated
2010  */
2011 static enum ice_status
2012 ice_flow_disassoc_prof(struct ice_hw *hw, enum ice_block blk,
2013                        struct ice_flow_prof *prof, u16 vsi_handle)
2014 {
2015         enum ice_status status = ICE_SUCCESS;
2016
2017         if (ice_is_bit_set(prof->vsis, vsi_handle)) {
2018                 status = ice_rem_prof_id_flow(hw, blk,
2019                                               ice_get_hw_vsi_num(hw,
2020                                                                  vsi_handle),
2021                                               prof->id);
2022                 if (!status)
2023                         ice_clear_bit(vsi_handle, prof->vsis);
2024                 else
2025                         ice_debug(hw, ICE_DBG_FLOW,
2026                                   "HW profile remove failed, %d\n",
2027                                   status);
2028         }
2029
2030         return status;
2031 }
2032
2033 /**
2034  * ice_flow_add_prof - Add a flow profile for packet segments and matched fields
2035  * @hw: pointer to the HW struct
2036  * @blk: classification stage
2037  * @dir: flow direction
2038  * @prof_id: unique ID to identify this flow profile
2039  * @segs: array of one or more packet segments that describe the flow
2040  * @segs_cnt: number of packet segments provided
2041  * @acts: array of default actions
2042  * @acts_cnt: number of default actions
2043  * @prof: stores the returned flow profile added
2044  */
2045 enum ice_status
2046 ice_flow_add_prof(struct ice_hw *hw, enum ice_block blk, enum ice_flow_dir dir,
2047                   u64 prof_id, struct ice_flow_seg_info *segs, u8 segs_cnt,
2048                   struct ice_flow_action *acts, u8 acts_cnt,
2049                   struct ice_flow_prof **prof)
2050 {
2051         enum ice_status status;
2052
2053         if (segs_cnt > ICE_FLOW_SEG_MAX)
2054                 return ICE_ERR_MAX_LIMIT;
2055
2056         if (!segs_cnt)
2057                 return ICE_ERR_PARAM;
2058
2059         if (!segs)
2060                 return ICE_ERR_BAD_PTR;
2061
2062         status = ice_flow_val_hdrs(segs, segs_cnt);
2063         if (status)
2064                 return status;
2065
2066         ice_acquire_lock(&hw->fl_profs_locks[blk]);
2067
2068         status = ice_flow_add_prof_sync(hw, blk, dir, prof_id, segs, segs_cnt,
2069                                         acts, acts_cnt, prof);
2070         if (!status)
2071                 LIST_ADD(&(*prof)->l_entry, &hw->fl_profs[blk]);
2072
2073         ice_release_lock(&hw->fl_profs_locks[blk]);
2074
2075         return status;
2076 }
2077
2078 /**
2079  * ice_flow_rem_prof - Remove a flow profile and all entries associated with it
2080  * @hw: pointer to the HW struct
2081  * @blk: the block for which the flow profile is to be removed
2082  * @prof_id: unique ID of the flow profile to be removed
2083  */
2084 enum ice_status
2085 ice_flow_rem_prof(struct ice_hw *hw, enum ice_block blk, u64 prof_id)
2086 {
2087         struct ice_flow_prof *prof;
2088         enum ice_status status;
2089
2090         ice_acquire_lock(&hw->fl_profs_locks[blk]);
2091
2092         prof = ice_flow_find_prof_id(hw, blk, prof_id);
2093         if (!prof) {
2094                 status = ICE_ERR_DOES_NOT_EXIST;
2095                 goto out;
2096         }
2097
2098         /* prof becomes invalid after the call */
2099         status = ice_flow_rem_prof_sync(hw, blk, prof);
2100
2101 out:
2102         ice_release_lock(&hw->fl_profs_locks[blk]);
2103
2104         return status;
2105 }
2106
2107 /**
2108  * ice_flow_get_hw_prof - return the HW profile for a specific profile ID handle
2109  * @hw: pointer to the HW struct
2110  * @blk: classification stage
2111  * @prof_id: the profile ID handle
2112  * @hw_prof_id: pointer to variable to receive the HW profile ID
2113  */
2114 enum ice_status
2115 ice_flow_get_hw_prof(struct ice_hw *hw, enum ice_block blk, u64 prof_id,
2116                      u8 *hw_prof_id)
2117 {
2118         struct ice_prof_map *map;
2119
2120         map = ice_search_prof_id(hw, blk, prof_id);
2121         if (map) {
2122                 *hw_prof_id = map->prof_id;
2123                 return ICE_SUCCESS;
2124         }
2125
2126         return ICE_ERR_DOES_NOT_EXIST;
2127 }
2128
2129 /**
2130  * ice_flow_find_entry - look for a flow entry using its unique ID
2131  * @hw: pointer to the HW struct
2132  * @blk: classification stage
2133  * @entry_id: unique ID to identify this flow entry
2134  *
2135  * This function looks for the flow entry with the specified unique ID in all
2136  * flow profiles of the specified classification stage. If the entry is found,
2137  * and it returns the handle to the flow entry. Otherwise, it returns
2138  * ICE_FLOW_ENTRY_ID_INVAL.
2139  */
2140 u64 ice_flow_find_entry(struct ice_hw *hw, enum ice_block blk, u64 entry_id)
2141 {
2142         struct ice_flow_entry *found = NULL;
2143         struct ice_flow_prof *p;
2144
2145         ice_acquire_lock(&hw->fl_profs_locks[blk]);
2146
2147         LIST_FOR_EACH_ENTRY(p, &hw->fl_profs[blk], ice_flow_prof, l_entry) {
2148                 struct ice_flow_entry *e;
2149
2150                 ice_acquire_lock(&p->entries_lock);
2151                 LIST_FOR_EACH_ENTRY(e, &p->entries, ice_flow_entry, l_entry)
2152                         if (e->id == entry_id) {
2153                                 found = e;
2154                                 break;
2155                         }
2156                 ice_release_lock(&p->entries_lock);
2157
2158                 if (found)
2159                         break;
2160         }
2161
2162         ice_release_lock(&hw->fl_profs_locks[blk]);
2163
2164         return found ? ICE_FLOW_ENTRY_HNDL(found) : ICE_FLOW_ENTRY_HANDLE_INVAL;
2165 }
2166
2167 /**
2168  * ice_flow_acl_check_actions - Checks the ACL rule's actions
2169  * @hw: pointer to the hardware structure
2170  * @acts: array of actions to be performed on a match
2171  * @acts_cnt: number of actions
2172  * @cnt_alloc: indicates if an ACL counter has been allocated.
2173  */
2174 static enum ice_status
2175 ice_flow_acl_check_actions(struct ice_hw *hw, struct ice_flow_action *acts,
2176                            u8 acts_cnt, bool *cnt_alloc)
2177 {
2178         ice_declare_bitmap(dup_check, ICE_AQC_TBL_MAX_ACTION_PAIRS * 2);
2179         int i;
2180
2181         ice_zero_bitmap(dup_check, ICE_AQC_TBL_MAX_ACTION_PAIRS * 2);
2182         *cnt_alloc = false;
2183
2184         if (acts_cnt > ICE_FLOW_ACL_MAX_NUM_ACT)
2185                 return ICE_ERR_OUT_OF_RANGE;
2186
2187         for (i = 0; i < acts_cnt; i++) {
2188                 if (acts[i].type != ICE_FLOW_ACT_NOP &&
2189                     acts[i].type != ICE_FLOW_ACT_DROP &&
2190                     acts[i].type != ICE_FLOW_ACT_CNTR_PKT &&
2191                     acts[i].type != ICE_FLOW_ACT_FWD_QUEUE)
2192                         return ICE_ERR_CFG;
2193
2194                 /* If the caller want to add two actions of the same type, then
2195                  * it is considered invalid configuration.
2196                  */
2197                 if (ice_test_and_set_bit(acts[i].type, dup_check))
2198                         return ICE_ERR_PARAM;
2199         }
2200
2201         /* Checks if ACL counters are needed. */
2202         for (i = 0; i < acts_cnt; i++) {
2203                 if (acts[i].type == ICE_FLOW_ACT_CNTR_PKT ||
2204                     acts[i].type == ICE_FLOW_ACT_CNTR_BYTES ||
2205                     acts[i].type == ICE_FLOW_ACT_CNTR_PKT_BYTES) {
2206                         struct ice_acl_cntrs cntrs;
2207                         enum ice_status status;
2208
2209                         cntrs.amount = 1;
2210                         cntrs.bank = 0; /* Only bank0 for the moment */
2211
2212                         if (acts[i].type == ICE_FLOW_ACT_CNTR_PKT_BYTES)
2213                                 cntrs.type = ICE_AQC_ACL_CNT_TYPE_DUAL;
2214                         else
2215                                 cntrs.type = ICE_AQC_ACL_CNT_TYPE_SINGLE;
2216
2217                         status = ice_aq_alloc_acl_cntrs(hw, &cntrs, NULL);
2218                         if (status)
2219                                 return status;
2220                         /* Counter index within the bank */
2221                         acts[i].data.acl_act.value =
2222                                                 CPU_TO_LE16(cntrs.first_cntr);
2223                         *cnt_alloc = true;
2224                 }
2225         }
2226
2227         return ICE_SUCCESS;
2228 }
2229
2230 /**
2231  * ice_flow_acl_frmt_entry_range - Format an ACL range checker for a given field
2232  * @fld: number of the given field
2233  * @info: info about field
2234  * @range_buf: range checker configuration buffer
2235  * @data: pointer to a data buffer containing flow entry's match values/masks
2236  * @range: Input/output param indicating which range checkers are being used
2237  */
2238 static void
2239 ice_flow_acl_frmt_entry_range(u16 fld, struct ice_flow_fld_info *info,
2240                               struct ice_aqc_acl_profile_ranges *range_buf,
2241                               u8 *data, u8 *range)
2242 {
2243         u16 new_mask;
2244
2245         /* If not specified, default mask is all bits in field */
2246         new_mask = (info->src.mask == ICE_FLOW_FLD_OFF_INVAL ?
2247                     BIT(ice_flds_info[fld].size) - 1 :
2248                     (*(u16 *)(data + info->src.mask))) << info->xtrct.disp;
2249
2250         /* If the mask is 0, then we don't need to worry about this input
2251          * range checker value.
2252          */
2253         if (new_mask) {
2254                 u16 new_high =
2255                         (*(u16 *)(data + info->src.last)) << info->xtrct.disp;
2256                 u16 new_low =
2257                         (*(u16 *)(data + info->src.val)) << info->xtrct.disp;
2258                 u8 range_idx = info->entry.val;
2259
2260                 range_buf->checker_cfg[range_idx].low_boundary =
2261                         CPU_TO_BE16(new_low);
2262                 range_buf->checker_cfg[range_idx].high_boundary =
2263                         CPU_TO_BE16(new_high);
2264                 range_buf->checker_cfg[range_idx].mask = CPU_TO_BE16(new_mask);
2265
2266                 /* Indicate which range checker is being used */
2267                 *range |= BIT(range_idx);
2268         }
2269 }
2270
2271 /**
2272  * ice_flow_acl_frmt_entry_fld - Partially format ACL entry for a given field
2273  * @fld: number of the given field
2274  * @info: info about the field
2275  * @buf: buffer containing the entry
2276  * @dontcare: buffer containing don't care mask for entry
2277  * @data: pointer to a data buffer containing flow entry's match values/masks
2278  */
2279 static void
2280 ice_flow_acl_frmt_entry_fld(u16 fld, struct ice_flow_fld_info *info, u8 *buf,
2281                             u8 *dontcare, u8 *data)
2282 {
2283         u16 dst, src, mask, k, end_disp, tmp_s = 0, tmp_m = 0;
2284         bool use_mask = false;
2285         u8 disp;
2286
2287         src = info->src.val;
2288         mask = info->src.mask;
2289         dst = info->entry.val - ICE_AQC_ACL_PROF_BYTE_SEL_START_IDX;
2290         disp = info->xtrct.disp % BITS_PER_BYTE;
2291
2292         if (mask != ICE_FLOW_FLD_OFF_INVAL)
2293                 use_mask = true;
2294
2295         for (k = 0; k < info->entry.last; k++, dst++) {
2296                 /* Add overflow bits from previous byte */
2297                 buf[dst] = (tmp_s & 0xff00) >> 8;
2298
2299                 /* If mask is not valid, tmp_m is always zero, so just setting
2300                  * dontcare to 0 (no masked bits). If mask is valid, pulls in
2301                  * overflow bits of mask from prev byte
2302                  */
2303                 dontcare[dst] = (tmp_m & 0xff00) >> 8;
2304
2305                 /* If there is displacement, last byte will only contain
2306                  * displaced data, but there is no more data to read from user
2307                  * buffer, so skip so as not to potentially read beyond end of
2308                  * user buffer
2309                  */
2310                 if (!disp || k < info->entry.last - 1) {
2311                         /* Store shifted data to use in next byte */
2312                         tmp_s = data[src++] << disp;
2313
2314                         /* Add current (shifted) byte */
2315                         buf[dst] |= tmp_s & 0xff;
2316
2317                         /* Handle mask if valid */
2318                         if (use_mask) {
2319                                 tmp_m = (~data[mask++] & 0xff) << disp;
2320                                 dontcare[dst] |= tmp_m & 0xff;
2321                         }
2322                 }
2323         }
2324
2325         /* Fill in don't care bits at beginning of field */
2326         if (disp) {
2327                 dst = info->entry.val - ICE_AQC_ACL_PROF_BYTE_SEL_START_IDX;
2328                 for (k = 0; k < disp; k++)
2329                         dontcare[dst] |= BIT(k);
2330         }
2331
2332         end_disp = (disp + ice_flds_info[fld].size) % BITS_PER_BYTE;
2333
2334         /* Fill in don't care bits at end of field */
2335         if (end_disp) {
2336                 dst = info->entry.val - ICE_AQC_ACL_PROF_BYTE_SEL_START_IDX +
2337                       info->entry.last - 1;
2338                 for (k = end_disp; k < BITS_PER_BYTE; k++)
2339                         dontcare[dst] |= BIT(k);
2340         }
2341 }
2342
2343 /**
2344  * ice_flow_acl_frmt_entry - Format ACL entry
2345  * @hw: pointer to the hardware structure
2346  * @prof: pointer to flow profile
2347  * @e: pointer to the flow entry
2348  * @data: pointer to a data buffer containing flow entry's match values/masks
2349  * @acts: array of actions to be performed on a match
2350  * @acts_cnt: number of actions
2351  *
2352  * Formats the key (and key_inverse) to be matched from the data passed in,
2353  * along with data from the flow profile. This key/key_inverse pair makes up
2354  * the 'entry' for an ACL flow entry.
2355  */
2356 static enum ice_status
2357 ice_flow_acl_frmt_entry(struct ice_hw *hw, struct ice_flow_prof *prof,
2358                         struct ice_flow_entry *e, u8 *data,
2359                         struct ice_flow_action *acts, u8 acts_cnt)
2360 {
2361         u8 *buf = NULL, *dontcare = NULL, *key = NULL, range = 0, dir_flag_msk;
2362         struct ice_aqc_acl_profile_ranges *range_buf = NULL;
2363         enum ice_status status;
2364         bool cnt_alloc;
2365         u8 prof_id = 0;
2366         u16 i, buf_sz;
2367
2368         status = ice_flow_get_hw_prof(hw, ICE_BLK_ACL, prof->id, &prof_id);
2369         if (status)
2370                 return status;
2371
2372         /* Format the result action */
2373
2374         status = ice_flow_acl_check_actions(hw, acts, acts_cnt, &cnt_alloc);
2375         if (status)
2376                 return status;
2377
2378         status = ICE_ERR_NO_MEMORY;
2379
2380         e->acts = (struct ice_flow_action *)
2381                 ice_memdup(hw, acts, acts_cnt * sizeof(*acts),
2382                            ICE_NONDMA_TO_NONDMA);
2383
2384         if (!e->acts)
2385                 goto out;
2386
2387         e->acts_cnt = acts_cnt;
2388
2389         /* Format the matching data */
2390         buf_sz = prof->cfg.scen->width;
2391         buf = (u8 *)ice_malloc(hw, buf_sz);
2392         if (!buf)
2393                 goto out;
2394
2395         dontcare = (u8 *)ice_malloc(hw, buf_sz);
2396         if (!dontcare)
2397                 goto out;
2398
2399         /* 'key' buffer will store both key and key_inverse, so must be twice
2400          * size of buf
2401          */
2402         key = (u8 *)ice_malloc(hw, buf_sz * 2);
2403         if (!key)
2404                 goto out;
2405
2406         range_buf = (struct ice_aqc_acl_profile_ranges *)
2407                 ice_malloc(hw, sizeof(struct ice_aqc_acl_profile_ranges));
2408         if (!range_buf)
2409                 goto out;
2410
2411         /* Set don't care mask to all 1's to start, will zero out used bytes */
2412         ice_memset(dontcare, 0xff, buf_sz, ICE_NONDMA_MEM);
2413
2414         for (i = 0; i < prof->segs_cnt; i++) {
2415                 struct ice_flow_seg_info *seg = &prof->segs[i];
2416                 u64 match = seg->match;
2417                 u16 j;
2418
2419                 for (j = 0; j < ICE_FLOW_FIELD_IDX_MAX && match; j++) {
2420                         struct ice_flow_fld_info *info;
2421                         const u64 bit = BIT_ULL(j);
2422
2423                         if (!(match & bit))
2424                                 continue;
2425
2426                         info = &seg->fields[j];
2427
2428                         if (info->type == ICE_FLOW_FLD_TYPE_RANGE)
2429                                 ice_flow_acl_frmt_entry_range(j, info,
2430                                                               range_buf, data,
2431                                                               &range);
2432                         else
2433                                 ice_flow_acl_frmt_entry_fld(j, info, buf,
2434                                                             dontcare, data);
2435
2436                         match &= ~bit;
2437                 }
2438
2439                 for (j = 0; j < seg->raws_cnt; j++) {
2440                         struct ice_flow_fld_info *info = &seg->raws[j].info;
2441                         u16 dst, src, mask, k;
2442                         bool use_mask = false;
2443
2444                         src = info->src.val;
2445                         dst = info->entry.val -
2446                                         ICE_AQC_ACL_PROF_BYTE_SEL_START_IDX;
2447                         mask = info->src.mask;
2448
2449                         if (mask != ICE_FLOW_FLD_OFF_INVAL)
2450                                 use_mask = true;
2451
2452                         for (k = 0; k < info->entry.last; k++, dst++) {
2453                                 buf[dst] = data[src++];
2454                                 if (use_mask)
2455                                         dontcare[dst] = ~data[mask++];
2456                                 else
2457                                         dontcare[dst] = 0;
2458                         }
2459                 }
2460         }
2461
2462         buf[prof->cfg.scen->pid_idx] = (u8)prof_id;
2463         dontcare[prof->cfg.scen->pid_idx] = 0;
2464
2465         /* Format the buffer for direction flags */
2466         dir_flag_msk = BIT(ICE_FLG_PKT_DIR);
2467
2468         if (prof->dir == ICE_FLOW_RX)
2469                 buf[prof->cfg.scen->pkt_dir_idx] = dir_flag_msk;
2470
2471         if (range) {
2472                 buf[prof->cfg.scen->rng_chk_idx] = range;
2473                 /* Mark any unused range checkers as don't care */
2474                 dontcare[prof->cfg.scen->rng_chk_idx] = ~range;
2475                 e->range_buf = range_buf;
2476         } else {
2477                 ice_free(hw, range_buf);
2478         }
2479
2480         status = ice_set_key(key, buf_sz * 2, buf, NULL, dontcare, NULL, 0,
2481                              buf_sz);
2482         if (status)
2483                 goto out;
2484
2485         e->entry = key;
2486         e->entry_sz = buf_sz * 2;
2487
2488 out:
2489         if (buf)
2490                 ice_free(hw, buf);
2491
2492         if (dontcare)
2493                 ice_free(hw, dontcare);
2494
2495         if (status && key)
2496                 ice_free(hw, key);
2497
2498         if (status && range_buf) {
2499                 ice_free(hw, range_buf);
2500                 e->range_buf = NULL;
2501         }
2502
2503         if (status && e->acts) {
2504                 ice_free(hw, e->acts);
2505                 e->acts = NULL;
2506                 e->acts_cnt = 0;
2507         }
2508
2509         if (status && cnt_alloc)
2510                 ice_flow_acl_free_act_cntr(hw, acts, acts_cnt);
2511
2512         return status;
2513 }
2514
2515 /**
2516  * ice_flow_acl_find_scen_entry_cond - Find an ACL scenario entry that matches
2517  *                                     the compared data.
2518  * @prof: pointer to flow profile
2519  * @e: pointer to the comparing flow entry
2520  * @do_chg_action: decide if we want to change the ACL action
2521  * @do_add_entry: decide if we want to add the new ACL entry
2522  * @do_rem_entry: decide if we want to remove the current ACL entry
2523  *
2524  * Find an ACL scenario entry that matches the compared data. In the same time,
2525  * this function also figure out:
2526  * a/ If we want to change the ACL action
2527  * b/ If we want to add the new ACL entry
2528  * c/ If we want to remove the current ACL entry
2529  */
2530 static struct ice_flow_entry *
2531 ice_flow_acl_find_scen_entry_cond(struct ice_flow_prof *prof,
2532                                   struct ice_flow_entry *e, bool *do_chg_action,
2533                                   bool *do_add_entry, bool *do_rem_entry)
2534 {
2535         struct ice_flow_entry *p, *return_entry = NULL;
2536         u8 i, j;
2537
2538         /* Check if:
2539          * a/ There exists an entry with same matching data, but different
2540          *    priority, then we remove this existing ACL entry. Then, we
2541          *    will add the new entry to the ACL scenario.
2542          * b/ There exists an entry with same matching data, priority, and
2543          *    result action, then we do nothing
2544          * c/ There exists an entry with same matching data, priority, but
2545          *    different, action, then do only change the action's entry.
2546          * d/ Else, we add this new entry to the ACL scenario.
2547          */
2548         *do_chg_action = false;
2549         *do_add_entry = true;
2550         *do_rem_entry = false;
2551         LIST_FOR_EACH_ENTRY(p, &prof->entries, ice_flow_entry, l_entry) {
2552                 if (memcmp(p->entry, e->entry, p->entry_sz))
2553                         continue;
2554
2555                 /* From this point, we have the same matching_data. */
2556                 *do_add_entry = false;
2557                 return_entry = p;
2558
2559                 if (p->priority != e->priority) {
2560                         /* matching data && !priority */
2561                         *do_add_entry = true;
2562                         *do_rem_entry = true;
2563                         break;
2564                 }
2565
2566                 /* From this point, we will have matching_data && priority */
2567                 if (p->acts_cnt != e->acts_cnt)
2568                         *do_chg_action = true;
2569                 for (i = 0; i < p->acts_cnt; i++) {
2570                         bool found_not_match = false;
2571
2572                         for (j = 0; j < e->acts_cnt; j++)
2573                                 if (memcmp(&p->acts[i], &e->acts[j],
2574                                            sizeof(struct ice_flow_action))) {
2575                                         found_not_match = true;
2576                                         break;
2577                                 }
2578
2579                         if (found_not_match) {
2580                                 *do_chg_action = true;
2581                                 break;
2582                         }
2583                 }
2584
2585                 /* (do_chg_action = true) means :
2586                  *    matching_data && priority && !result_action
2587                  * (do_chg_action = false) means :
2588                  *    matching_data && priority && result_action
2589                  */
2590                 break;
2591         }
2592
2593         return return_entry;
2594 }
2595
2596 /**
2597  * ice_flow_acl_convert_to_acl_prior - Convert to ACL priority
2598  * @p: flow priority
2599  */
2600 static enum ice_acl_entry_prior
2601 ice_flow_acl_convert_to_acl_prior(enum ice_flow_priority p)
2602 {
2603         enum ice_acl_entry_prior acl_prior;
2604
2605         switch (p) {
2606         case ICE_FLOW_PRIO_LOW:
2607                 acl_prior = ICE_LOW;
2608                 break;
2609         case ICE_FLOW_PRIO_NORMAL:
2610                 acl_prior = ICE_NORMAL;
2611                 break;
2612         case ICE_FLOW_PRIO_HIGH:
2613                 acl_prior = ICE_HIGH;
2614                 break;
2615         default:
2616                 acl_prior = ICE_NORMAL;
2617                 break;
2618         }
2619
2620         return acl_prior;
2621 }
2622
2623 /**
2624  * ice_flow_acl_union_rng_chk - Perform union operation between two
2625  *                              range-range checker buffers
2626  * @dst_buf: pointer to destination range checker buffer
2627  * @src_buf: pointer to source range checker buffer
2628  *
2629  * For this function, we do the union between dst_buf and src_buf
2630  * range checker buffer, and we will save the result back to dst_buf
2631  */
2632 static enum ice_status
2633 ice_flow_acl_union_rng_chk(struct ice_aqc_acl_profile_ranges *dst_buf,
2634                            struct ice_aqc_acl_profile_ranges *src_buf)
2635 {
2636         u8 i, j;
2637
2638         if (!dst_buf || !src_buf)
2639                 return ICE_ERR_BAD_PTR;
2640
2641         for (i = 0; i < ICE_AQC_ACL_PROF_RANGES_NUM_CFG; i++) {
2642                 struct ice_acl_rng_data *cfg_data = NULL, *in_data;
2643                 bool will_populate = false;
2644
2645                 in_data = &src_buf->checker_cfg[i];
2646
2647                 if (!in_data->mask)
2648                         break;
2649
2650                 for (j = 0; j < ICE_AQC_ACL_PROF_RANGES_NUM_CFG; j++) {
2651                         cfg_data = &dst_buf->checker_cfg[j];
2652
2653                         if (!cfg_data->mask ||
2654                             !memcmp(cfg_data, in_data,
2655                                     sizeof(struct ice_acl_rng_data))) {
2656                                 will_populate = true;
2657                                 break;
2658                         }
2659                 }
2660
2661                 if (will_populate) {
2662                         ice_memcpy(cfg_data, in_data,
2663                                    sizeof(struct ice_acl_rng_data),
2664                                    ICE_NONDMA_TO_NONDMA);
2665                 } else {
2666                         /* No available slot left to program range checker */
2667                         return ICE_ERR_MAX_LIMIT;
2668                 }
2669         }
2670
2671         return ICE_SUCCESS;
2672 }
2673
2674 /**
2675  * ice_flow_acl_add_scen_entry_sync - Add entry to ACL scenario sync
2676  * @hw: pointer to the hardware structure
2677  * @prof: pointer to flow profile
2678  * @entry: double pointer to the flow entry
2679  *
2680  * For this function, we will look at the current added entries in the
2681  * corresponding ACL scenario. Then, we will perform matching logic to
2682  * see if we want to add/modify/do nothing with this new entry.
2683  */
2684 static enum ice_status
2685 ice_flow_acl_add_scen_entry_sync(struct ice_hw *hw, struct ice_flow_prof *prof,
2686                                  struct ice_flow_entry **entry)
2687 {
2688         bool do_add_entry, do_rem_entry, do_chg_action, do_chg_rng_chk;
2689         struct ice_aqc_acl_profile_ranges query_rng_buf, cfg_rng_buf;
2690         struct ice_acl_act_entry *acts = NULL;
2691         struct ice_flow_entry *exist;
2692         enum ice_status status = ICE_SUCCESS;
2693         struct ice_flow_entry *e;
2694         u8 i;
2695
2696         if (!entry || !(*entry) || !prof)
2697                 return ICE_ERR_BAD_PTR;
2698
2699         e = *(entry);
2700
2701         do_chg_rng_chk = false;
2702         if (e->range_buf) {
2703                 u8 prof_id = 0;
2704
2705                 status = ice_flow_get_hw_prof(hw, ICE_BLK_ACL, prof->id,
2706                                               &prof_id);
2707                 if (status)
2708                         return status;
2709
2710                 /* Query the current range-checker value in FW */
2711                 status = ice_query_acl_prof_ranges(hw, prof_id, &query_rng_buf,
2712                                                    NULL);
2713                 if (status)
2714                         return status;
2715                 ice_memcpy(&cfg_rng_buf, &query_rng_buf,
2716                            sizeof(struct ice_aqc_acl_profile_ranges),
2717                            ICE_NONDMA_TO_NONDMA);
2718
2719                 /* Generate the new range-checker value */
2720                 status = ice_flow_acl_union_rng_chk(&cfg_rng_buf, e->range_buf);
2721                 if (status)
2722                         return status;
2723
2724                 /* Reconfigure the range check if the buffer is changed. */
2725                 do_chg_rng_chk = false;
2726                 if (memcmp(&query_rng_buf, &cfg_rng_buf,
2727                            sizeof(struct ice_aqc_acl_profile_ranges))) {
2728                         status = ice_prog_acl_prof_ranges(hw, prof_id,
2729                                                           &cfg_rng_buf, NULL);
2730                         if (status)
2731                                 return status;
2732
2733                         do_chg_rng_chk = true;
2734                 }
2735         }
2736
2737         /* Figure out if we want to (change the ACL action) and/or
2738          * (Add the new ACL entry) and/or (Remove the current ACL entry)
2739          */
2740         exist = ice_flow_acl_find_scen_entry_cond(prof, e, &do_chg_action,
2741                                                   &do_add_entry, &do_rem_entry);
2742
2743         if (do_rem_entry) {
2744                 status = ice_flow_rem_entry_sync(hw, ICE_BLK_ACL, exist);
2745                 if (status)
2746                         return status;
2747         }
2748
2749         /* Prepare the result action buffer */
2750         acts = (struct ice_acl_act_entry *)ice_calloc
2751                 (hw, e->entry_sz, sizeof(struct ice_acl_act_entry));
2752         for (i = 0; i < e->acts_cnt; i++)
2753                 ice_memcpy(&acts[i], &e->acts[i].data.acl_act,
2754                            sizeof(struct ice_acl_act_entry),
2755                            ICE_NONDMA_TO_NONDMA);
2756
2757         if (do_add_entry) {
2758                 enum ice_acl_entry_prior prior;
2759                 u8 *keys, *inverts;
2760                 u16 entry_idx;
2761
2762                 keys = (u8 *)e->entry;
2763                 inverts = keys + (e->entry_sz / 2);
2764                 prior = ice_flow_acl_convert_to_acl_prior(e->priority);
2765
2766                 status = ice_acl_add_entry(hw, prof->cfg.scen, prior, keys,
2767                                            inverts, acts, e->acts_cnt,
2768                                            &entry_idx);
2769                 if (status)
2770                         goto out;
2771
2772                 e->scen_entry_idx = entry_idx;
2773                 LIST_ADD(&e->l_entry, &prof->entries);
2774         } else {
2775                 if (do_chg_action) {
2776                         /* For the action memory info, update the SW's copy of
2777                          * exist entry with e's action memory info
2778                          */
2779                         ice_free(hw, exist->acts);
2780                         exist->acts_cnt = e->acts_cnt;
2781                         exist->acts = (struct ice_flow_action *)
2782                                 ice_calloc(hw, exist->acts_cnt,
2783                                            sizeof(struct ice_flow_action));
2784
2785                         if (!exist->acts) {
2786                                 status = ICE_ERR_NO_MEMORY;
2787                                 goto out;
2788                         }
2789
2790                         ice_memcpy(exist->acts, e->acts,
2791                                    sizeof(struct ice_flow_action) * e->acts_cnt,
2792                                    ICE_NONDMA_TO_NONDMA);
2793
2794                         status = ice_acl_prog_act(hw, prof->cfg.scen, acts,
2795                                                   e->acts_cnt,
2796                                                   exist->scen_entry_idx);
2797                         if (status)
2798                                 goto out;
2799                 }
2800
2801                 if (do_chg_rng_chk) {
2802                         /* In this case, we want to update the range checker
2803                          * information of the exist entry
2804                          */
2805                         status = ice_flow_acl_union_rng_chk(exist->range_buf,
2806                                                             e->range_buf);
2807                         if (status)
2808                                 goto out;
2809                 }
2810
2811                 /* As we don't add the new entry to our SW DB, deallocate its
2812                  * memories, and return the exist entry to the caller
2813                  */
2814                 ice_dealloc_flow_entry(hw, e);
2815                 *(entry) = exist;
2816         }
2817 out:
2818         if (acts)
2819                 ice_free(hw, acts);
2820
2821         return status;
2822 }
2823
2824 /**
2825  * ice_flow_acl_add_scen_entry - Add entry to ACL scenario
2826  * @hw: pointer to the hardware structure
2827  * @prof: pointer to flow profile
2828  * @e: double pointer to the flow entry
2829  */
2830 static enum ice_status
2831 ice_flow_acl_add_scen_entry(struct ice_hw *hw, struct ice_flow_prof *prof,
2832                             struct ice_flow_entry **e)
2833 {
2834         enum ice_status status;
2835
2836         ice_acquire_lock(&prof->entries_lock);
2837         status = ice_flow_acl_add_scen_entry_sync(hw, prof, e);
2838         ice_release_lock(&prof->entries_lock);
2839
2840         return status;
2841 }
2842
2843 /**
2844  * ice_flow_add_entry - Add a flow entry
2845  * @hw: pointer to the HW struct
2846  * @blk: classification stage
2847  * @prof_id: ID of the profile to add a new flow entry to
2848  * @entry_id: unique ID to identify this flow entry
2849  * @vsi_handle: software VSI handle for the flow entry
2850  * @prio: priority of the flow entry
2851  * @data: pointer to a data buffer containing flow entry's match values/masks
2852  * @acts: arrays of actions to be performed on a match
2853  * @acts_cnt: number of actions
2854  * @entry_h: pointer to buffer that receives the new flow entry's handle
2855  */
2856 enum ice_status
2857 ice_flow_add_entry(struct ice_hw *hw, enum ice_block blk, u64 prof_id,
2858                    u64 entry_id, u16 vsi_handle, enum ice_flow_priority prio,
2859                    void *data, struct ice_flow_action *acts, u8 acts_cnt,
2860                    u64 *entry_h)
2861 {
2862         struct ice_flow_entry *e = NULL;
2863         struct ice_flow_prof *prof;
2864         enum ice_status status = ICE_SUCCESS;
2865
2866         /* ACL entries must indicate an action */
2867         if (blk == ICE_BLK_ACL && (!acts || !acts_cnt))
2868                 return ICE_ERR_PARAM;
2869
2870         /* No flow entry data is expected for RSS */
2871         if (!entry_h || (!data && blk != ICE_BLK_RSS))
2872                 return ICE_ERR_BAD_PTR;
2873
2874         if (!ice_is_vsi_valid(hw, vsi_handle))
2875                 return ICE_ERR_PARAM;
2876
2877         ice_acquire_lock(&hw->fl_profs_locks[blk]);
2878
2879         prof = ice_flow_find_prof_id(hw, blk, prof_id);
2880         if (!prof) {
2881                 status = ICE_ERR_DOES_NOT_EXIST;
2882         } else {
2883                 /* Allocate memory for the entry being added and associate
2884                  * the VSI to the found flow profile
2885                  */
2886                 e = (struct ice_flow_entry *)ice_malloc(hw, sizeof(*e));
2887                 if (!e)
2888                         status = ICE_ERR_NO_MEMORY;
2889                 else
2890                         status = ice_flow_assoc_prof(hw, blk, prof, vsi_handle);
2891         }
2892
2893         ice_release_lock(&hw->fl_profs_locks[blk]);
2894         if (status)
2895                 goto out;
2896
2897         e->id = entry_id;
2898         e->vsi_handle = vsi_handle;
2899         e->prof = prof;
2900         e->priority = prio;
2901
2902         switch (blk) {
2903         case ICE_BLK_FD:
2904         case ICE_BLK_RSS:
2905                 break;
2906         case ICE_BLK_ACL:
2907                 /* ACL will handle the entry management */
2908                 status = ice_flow_acl_frmt_entry(hw, prof, e, (u8 *)data, acts,
2909                                                  acts_cnt);
2910                 if (status)
2911                         goto out;
2912
2913                 status = ice_flow_acl_add_scen_entry(hw, prof, &e);
2914                 if (status)
2915                         goto out;
2916
2917                 break;
2918         case ICE_BLK_SW:
2919         case ICE_BLK_PE:
2920         default:
2921                 status = ICE_ERR_NOT_IMPL;
2922                 goto out;
2923         }
2924
2925         if (blk != ICE_BLK_ACL) {
2926                 /* ACL will handle the entry management */
2927                 ice_acquire_lock(&prof->entries_lock);
2928                 LIST_ADD(&e->l_entry, &prof->entries);
2929                 ice_release_lock(&prof->entries_lock);
2930         }
2931
2932         *entry_h = ICE_FLOW_ENTRY_HNDL(e);
2933
2934 out:
2935         if (status && e) {
2936                 if (e->entry)
2937                         ice_free(hw, e->entry);
2938                 ice_free(hw, e);
2939         }
2940
2941         return status;
2942 }
2943
2944 /**
2945  * ice_flow_rem_entry - Remove a flow entry
2946  * @hw: pointer to the HW struct
2947  * @blk: classification stage
2948  * @entry_h: handle to the flow entry to be removed
2949  */
2950 enum ice_status ice_flow_rem_entry(struct ice_hw *hw, enum ice_block blk,
2951                                    u64 entry_h)
2952 {
2953         struct ice_flow_entry *entry;
2954         struct ice_flow_prof *prof;
2955         enum ice_status status = ICE_SUCCESS;
2956
2957         if (entry_h == ICE_FLOW_ENTRY_HANDLE_INVAL)
2958                 return ICE_ERR_PARAM;
2959
2960         entry = ICE_FLOW_ENTRY_PTR((unsigned long)entry_h);
2961
2962         /* Retain the pointer to the flow profile as the entry will be freed */
2963         prof = entry->prof;
2964
2965         if (prof) {
2966                 ice_acquire_lock(&prof->entries_lock);
2967                 status = ice_flow_rem_entry_sync(hw, blk, entry);
2968                 ice_release_lock(&prof->entries_lock);
2969         }
2970
2971         return status;
2972 }
2973
2974 /**
2975  * ice_flow_set_fld_ext - specifies locations of field from entry's input buffer
2976  * @seg: packet segment the field being set belongs to
2977  * @fld: field to be set
2978  * @field_type: type of the field
2979  * @val_loc: if not ICE_FLOW_FLD_OFF_INVAL, location of the value to match from
2980  *           entry's input buffer
2981  * @mask_loc: if not ICE_FLOW_FLD_OFF_INVAL, location of mask value from entry's
2982  *            input buffer
2983  * @last_loc: if not ICE_FLOW_FLD_OFF_INVAL, location of last/upper value from
2984  *            entry's input buffer
2985  *
2986  * This helper function stores information of a field being matched, including
2987  * the type of the field and the locations of the value to match, the mask, and
2988  * and the upper-bound value in the start of the input buffer for a flow entry.
2989  * This function should only be used for fixed-size data structures.
2990  *
2991  * This function also opportunistically determines the protocol headers to be
2992  * present based on the fields being set. Some fields cannot be used alone to
2993  * determine the protocol headers present. Sometimes, fields for particular
2994  * protocol headers are not matched. In those cases, the protocol headers
2995  * must be explicitly set.
2996  */
2997 static void
2998 ice_flow_set_fld_ext(struct ice_flow_seg_info *seg, enum ice_flow_field fld,
2999                      enum ice_flow_fld_match_type field_type, u16 val_loc,
3000                      u16 mask_loc, u16 last_loc)
3001 {
3002         u64 bit = BIT_ULL(fld);
3003
3004         seg->match |= bit;
3005         if (field_type == ICE_FLOW_FLD_TYPE_RANGE)
3006                 seg->range |= bit;
3007
3008         seg->fields[fld].type = field_type;
3009         seg->fields[fld].src.val = val_loc;
3010         seg->fields[fld].src.mask = mask_loc;
3011         seg->fields[fld].src.last = last_loc;
3012
3013         ICE_FLOW_SET_HDRS(seg, ice_flds_info[fld].hdr);
3014 }
3015
3016 /**
3017  * ice_flow_set_fld - specifies locations of field from entry's input buffer
3018  * @seg: packet segment the field being set belongs to
3019  * @fld: field to be set
3020  * @val_loc: if not ICE_FLOW_FLD_OFF_INVAL, location of the value to match from
3021  *           entry's input buffer
3022  * @mask_loc: if not ICE_FLOW_FLD_OFF_INVAL, location of mask value from entry's
3023  *            input buffer
3024  * @last_loc: if not ICE_FLOW_FLD_OFF_INVAL, location of last/upper value from
3025  *            entry's input buffer
3026  * @range: indicate if field being matched is to be in a range
3027  *
3028  * This function specifies the locations, in the form of byte offsets from the
3029  * start of the input buffer for a flow entry, from where the value to match,
3030  * the mask value, and upper value can be extracted. These locations are then
3031  * stored in the flow profile. When adding a flow entry associated with the
3032  * flow profile, these locations will be used to quickly extract the values and
3033  * create the content of a match entry. This function should only be used for
3034  * fixed-size data structures.
3035  */
3036 void
3037 ice_flow_set_fld(struct ice_flow_seg_info *seg, enum ice_flow_field fld,
3038                  u16 val_loc, u16 mask_loc, u16 last_loc, bool range)
3039 {
3040         enum ice_flow_fld_match_type t = range ?
3041                 ICE_FLOW_FLD_TYPE_RANGE : ICE_FLOW_FLD_TYPE_REG;
3042
3043         ice_flow_set_fld_ext(seg, fld, t, val_loc, mask_loc, last_loc);
3044 }
3045
3046 /**
3047  * ice_flow_set_fld_prefix - sets locations of prefix field from entry's buf
3048  * @seg: packet segment the field being set belongs to
3049  * @fld: field to be set
3050  * @val_loc: if not ICE_FLOW_FLD_OFF_INVAL, location of the value to match from
3051  *           entry's input buffer
3052  * @pref_loc: location of prefix value from entry's input buffer
3053  * @pref_sz: size of the location holding the prefix value
3054  *
3055  * This function specifies the locations, in the form of byte offsets from the
3056  * start of the input buffer for a flow entry, from where the value to match
3057  * and the IPv4 prefix value can be extracted. These locations are then stored
3058  * in the flow profile. When adding flow entries to the associated flow profile,
3059  * these locations can be used to quickly extract the values to create the
3060  * content of a match entry. This function should only be used for fixed-size
3061  * data structures.
3062  */
3063 void
3064 ice_flow_set_fld_prefix(struct ice_flow_seg_info *seg, enum ice_flow_field fld,
3065                         u16 val_loc, u16 pref_loc, u8 pref_sz)
3066 {
3067         /* For this type of field, the "mask" location is for the prefix value's
3068          * location and the "last" location is for the size of the location of
3069          * the prefix value.
3070          */
3071         ice_flow_set_fld_ext(seg, fld, ICE_FLOW_FLD_TYPE_PREFIX, val_loc,
3072                              pref_loc, (u16)pref_sz);
3073 }
3074
3075 /**
3076  * ice_flow_add_fld_raw - sets locations of a raw field from entry's input buf
3077  * @seg: packet segment the field being set belongs to
3078  * @off: offset of the raw field from the beginning of the segment in bytes
3079  * @len: length of the raw pattern to be matched
3080  * @val_loc: location of the value to match from entry's input buffer
3081  * @mask_loc: location of mask value from entry's input buffer
3082  *
3083  * This function specifies the offset of the raw field to be match from the
3084  * beginning of the specified packet segment, and the locations, in the form of
3085  * byte offsets from the start of the input buffer for a flow entry, from where
3086  * the value to match and the mask value to be extracted. These locations are
3087  * then stored in the flow profile. When adding flow entries to the associated
3088  * flow profile, these locations can be used to quickly extract the values to
3089  * create the content of a match entry. This function should only be used for
3090  * fixed-size data structures.
3091  */
3092 void
3093 ice_flow_add_fld_raw(struct ice_flow_seg_info *seg, u16 off, u8 len,
3094                      u16 val_loc, u16 mask_loc)
3095 {
3096         if (seg->raws_cnt < ICE_FLOW_SEG_RAW_FLD_MAX) {
3097                 seg->raws[seg->raws_cnt].off = off;
3098                 seg->raws[seg->raws_cnt].info.type = ICE_FLOW_FLD_TYPE_SIZE;
3099                 seg->raws[seg->raws_cnt].info.src.val = val_loc;
3100                 seg->raws[seg->raws_cnt].info.src.mask = mask_loc;
3101                 /* The "last" field is used to store the length of the field */
3102                 seg->raws[seg->raws_cnt].info.src.last = len;
3103         }
3104
3105         /* Overflows of "raws" will be handled as an error condition later in
3106          * the flow when this information is processed.
3107          */
3108         seg->raws_cnt++;
3109 }
3110
3111 #define ICE_FLOW_RSS_SEG_HDR_L2_MASKS \
3112 (ICE_FLOW_SEG_HDR_ETH | ICE_FLOW_SEG_HDR_VLAN)
3113
3114 #define ICE_FLOW_RSS_SEG_HDR_L3_MASKS \
3115         (ICE_FLOW_SEG_HDR_IPV4 | ICE_FLOW_SEG_HDR_IPV6)
3116
3117 #define ICE_FLOW_RSS_SEG_HDR_L4_MASKS \
3118         (ICE_FLOW_SEG_HDR_TCP | ICE_FLOW_SEG_HDR_UDP | \
3119          ICE_FLOW_SEG_HDR_SCTP)
3120
3121 #define ICE_FLOW_RSS_SEG_HDR_VAL_MASKS \
3122         (ICE_FLOW_RSS_SEG_HDR_L2_MASKS | \
3123          ICE_FLOW_RSS_SEG_HDR_L3_MASKS | \
3124          ICE_FLOW_RSS_SEG_HDR_L4_MASKS)
3125
3126 /**
3127  * ice_flow_set_rss_seg_info - setup packet segments for RSS
3128  * @segs: pointer to the flow field segment(s)
3129  * @hash_fields: fields to be hashed on for the segment(s)
3130  * @flow_hdr: protocol header fields within a packet segment
3131  *
3132  * Helper function to extract fields from hash bitmap and use flow
3133  * header value to set flow field segment for further use in flow
3134  * profile entry or removal.
3135  */
3136 static enum ice_status
3137 ice_flow_set_rss_seg_info(struct ice_flow_seg_info *segs, u64 hash_fields,
3138                           u32 flow_hdr)
3139 {
3140         u64 val = hash_fields;
3141         u8 i;
3142
3143         for (i = 0; val && i < ICE_FLOW_FIELD_IDX_MAX; i++) {
3144                 u64 bit = BIT_ULL(i);
3145
3146                 if (val & bit) {
3147                         ice_flow_set_fld(segs, (enum ice_flow_field)i,
3148                                          ICE_FLOW_FLD_OFF_INVAL,
3149                                          ICE_FLOW_FLD_OFF_INVAL,
3150                                          ICE_FLOW_FLD_OFF_INVAL, false);
3151                         val &= ~bit;
3152                 }
3153         }
3154         ICE_FLOW_SET_HDRS(segs, flow_hdr);
3155
3156         if (segs->hdrs & ~ICE_FLOW_RSS_SEG_HDR_VAL_MASKS &
3157             ~ICE_FLOW_RSS_HDRS_INNER_MASK)
3158                 return ICE_ERR_PARAM;
3159
3160         val = (u64)(segs->hdrs & ICE_FLOW_RSS_SEG_HDR_L3_MASKS);
3161         if (val && !ice_is_pow2(val))
3162                 return ICE_ERR_CFG;
3163
3164         val = (u64)(segs->hdrs & ICE_FLOW_RSS_SEG_HDR_L4_MASKS);
3165         if (val && !ice_is_pow2(val))
3166                 return ICE_ERR_CFG;
3167
3168         return ICE_SUCCESS;
3169 }
3170
3171 /**
3172  * ice_rem_vsi_rss_list - remove VSI from RSS list
3173  * @hw: pointer to the hardware structure
3174  * @vsi_handle: software VSI handle
3175  *
3176  * Remove the VSI from all RSS configurations in the list.
3177  */
3178 void ice_rem_vsi_rss_list(struct ice_hw *hw, u16 vsi_handle)
3179 {
3180         struct ice_rss_cfg *r, *tmp;
3181
3182         if (LIST_EMPTY(&hw->rss_list_head))
3183                 return;
3184
3185         ice_acquire_lock(&hw->rss_locks);
3186         LIST_FOR_EACH_ENTRY_SAFE(r, tmp, &hw->rss_list_head,
3187                                  ice_rss_cfg, l_entry)
3188                 if (ice_test_and_clear_bit(vsi_handle, r->vsis))
3189                         if (!ice_is_any_bit_set(r->vsis, ICE_MAX_VSI)) {
3190                                 LIST_DEL(&r->l_entry);
3191                                 ice_free(hw, r);
3192                         }
3193         ice_release_lock(&hw->rss_locks);
3194 }
3195
3196 /**
3197  * ice_rem_vsi_rss_cfg - remove RSS configurations associated with VSI
3198  * @hw: pointer to the hardware structure
3199  * @vsi_handle: software VSI handle
3200  *
3201  * This function will iterate through all flow profiles and disassociate
3202  * the VSI from that profile. If the flow profile has no VSIs it will
3203  * be removed.
3204  */
3205 enum ice_status ice_rem_vsi_rss_cfg(struct ice_hw *hw, u16 vsi_handle)
3206 {
3207         const enum ice_block blk = ICE_BLK_RSS;
3208         struct ice_flow_prof *p, *t;
3209         enum ice_status status = ICE_SUCCESS;
3210
3211         if (!ice_is_vsi_valid(hw, vsi_handle))
3212                 return ICE_ERR_PARAM;
3213
3214         if (LIST_EMPTY(&hw->fl_profs[blk]))
3215                 return ICE_SUCCESS;
3216
3217         ice_acquire_lock(&hw->fl_profs_locks[blk]);
3218         LIST_FOR_EACH_ENTRY_SAFE(p, t, &hw->fl_profs[blk], ice_flow_prof,
3219                                  l_entry)
3220                 if (ice_is_bit_set(p->vsis, vsi_handle)) {
3221                         status = ice_flow_disassoc_prof(hw, blk, p, vsi_handle);
3222                         if (status)
3223                                 break;
3224
3225                         if (!ice_is_any_bit_set(p->vsis, ICE_MAX_VSI)) {
3226                                 status = ice_flow_rem_prof_sync(hw, blk, p);
3227                                 if (status)
3228                                         break;
3229                         }
3230                 }
3231         ice_release_lock(&hw->fl_profs_locks[blk]);
3232
3233         return status;
3234 }
3235
3236 /**
3237  * ice_rem_rss_list - remove RSS configuration from list
3238  * @hw: pointer to the hardware structure
3239  * @vsi_handle: software VSI handle
3240  * @prof: pointer to flow profile
3241  *
3242  * Assumption: lock has already been acquired for RSS list
3243  */
3244 static void
3245 ice_rem_rss_list(struct ice_hw *hw, u16 vsi_handle, struct ice_flow_prof *prof)
3246 {
3247         struct ice_rss_cfg *r, *tmp;
3248
3249         /* Search for RSS hash fields associated to the VSI that match the
3250          * hash configurations associated to the flow profile. If found
3251          * remove from the RSS entry list of the VSI context and delete entry.
3252          */
3253         LIST_FOR_EACH_ENTRY_SAFE(r, tmp, &hw->rss_list_head,
3254                                  ice_rss_cfg, l_entry)
3255                 if (r->hashed_flds == prof->segs[prof->segs_cnt - 1].match &&
3256                     r->packet_hdr == prof->segs[prof->segs_cnt - 1].hdrs) {
3257                         ice_clear_bit(vsi_handle, r->vsis);
3258                         if (!ice_is_any_bit_set(r->vsis, ICE_MAX_VSI)) {
3259                                 LIST_DEL(&r->l_entry);
3260                                 ice_free(hw, r);
3261                         }
3262                         return;
3263                 }
3264 }
3265
3266 /**
3267  * ice_add_rss_list - add RSS configuration to list
3268  * @hw: pointer to the hardware structure
3269  * @vsi_handle: software VSI handle
3270  * @prof: pointer to flow profile
3271  *
3272  * Assumption: lock has already been acquired for RSS list
3273  */
3274 static enum ice_status
3275 ice_add_rss_list(struct ice_hw *hw, u16 vsi_handle, struct ice_flow_prof *prof)
3276 {
3277         struct ice_rss_cfg *r, *rss_cfg;
3278
3279         LIST_FOR_EACH_ENTRY(r, &hw->rss_list_head,
3280                             ice_rss_cfg, l_entry)
3281                 if (r->hashed_flds == prof->segs[prof->segs_cnt - 1].match &&
3282                     r->packet_hdr == prof->segs[prof->segs_cnt - 1].hdrs) {
3283                         ice_set_bit(vsi_handle, r->vsis);
3284                         return ICE_SUCCESS;
3285                 }
3286
3287         rss_cfg = (struct ice_rss_cfg *)ice_malloc(hw, sizeof(*rss_cfg));
3288         if (!rss_cfg)
3289                 return ICE_ERR_NO_MEMORY;
3290
3291         rss_cfg->hashed_flds = prof->segs[prof->segs_cnt - 1].match;
3292         rss_cfg->packet_hdr = prof->segs[prof->segs_cnt - 1].hdrs;
3293         rss_cfg->symm = prof->cfg.symm;
3294         ice_set_bit(vsi_handle, rss_cfg->vsis);
3295
3296         LIST_ADD_TAIL(&rss_cfg->l_entry, &hw->rss_list_head);
3297
3298         return ICE_SUCCESS;
3299 }
3300
3301 #define ICE_FLOW_PROF_HASH_S    0
3302 #define ICE_FLOW_PROF_HASH_M    (0xFFFFFFFFULL << ICE_FLOW_PROF_HASH_S)
3303 #define ICE_FLOW_PROF_HDR_S     32
3304 #define ICE_FLOW_PROF_HDR_M     (0x3FFFFFFFULL << ICE_FLOW_PROF_HDR_S)
3305 #define ICE_FLOW_PROF_ENCAP_S   63
3306 #define ICE_FLOW_PROF_ENCAP_M   (BIT_ULL(ICE_FLOW_PROF_ENCAP_S))
3307
3308 #define ICE_RSS_OUTER_HEADERS   1
3309 #define ICE_RSS_INNER_HEADERS   2
3310
3311 /* Flow profile ID format:
3312  * [0:31] - Packet match fields
3313  * [32:62] - Protocol header
3314  * [63] - Encapsulation flag, 0 if non-tunneled, 1 if tunneled
3315  */
3316 #define ICE_FLOW_GEN_PROFID(hash, hdr, segs_cnt) \
3317         (u64)(((u64)(hash) & ICE_FLOW_PROF_HASH_M) | \
3318               (((u64)(hdr) << ICE_FLOW_PROF_HDR_S) & ICE_FLOW_PROF_HDR_M) | \
3319               ((u8)((segs_cnt) - 1) ? ICE_FLOW_PROF_ENCAP_M : 0))
3320
3321 static void
3322 ice_rss_config_xor_word(struct ice_hw *hw, u8 prof_id, u8 src, u8 dst)
3323 {
3324         u32 s = ((src % 4) << 3); /* byte shift */
3325         u32 v = dst | 0x80; /* value to program */
3326         u8 i = src / 4; /* register index */
3327         u32 reg;
3328
3329         reg = rd32(hw, GLQF_HSYMM(prof_id, i));
3330         reg = (reg & ~(0xff << s)) | (v << s);
3331         wr32(hw, GLQF_HSYMM(prof_id, i), reg);
3332 }
3333
3334 static void
3335 ice_rss_config_xor(struct ice_hw *hw, u8 prof_id, u8 src, u8 dst, u8 len)
3336 {
3337         int fv_last_word =
3338                 ICE_FLOW_SW_FIELD_VECTOR_MAX / ICE_FLOW_FV_EXTRACT_SZ - 1;
3339         int i;
3340
3341         for (i = 0; i < len; i++) {
3342                 ice_rss_config_xor_word(hw, prof_id,
3343                                         /* Yes, field vector in GLQF_HSYMM and
3344                                          * GLQF_HINSET is inversed!
3345                                          */
3346                                         fv_last_word - (src + i),
3347                                         fv_last_word - (dst + i));
3348                 ice_rss_config_xor_word(hw, prof_id,
3349                                         fv_last_word - (dst + i),
3350                                         fv_last_word - (src + i));
3351         }
3352 }
3353
3354 static void
3355 ice_rss_update_symm(struct ice_hw *hw,
3356                     struct ice_flow_prof *prof)
3357 {
3358         struct ice_prof_map *map;
3359         u8 prof_id, m;
3360
3361         map = ice_search_prof_id(hw, ICE_BLK_RSS, prof->id);
3362         prof_id = map->prof_id;
3363
3364         /* clear to default */
3365         for (m = 0; m < 6; m++)
3366                 wr32(hw, GLQF_HSYMM(prof_id, m), 0);
3367         if (prof->cfg.symm) {
3368                 struct ice_flow_seg_info *seg =
3369                         &prof->segs[prof->segs_cnt - 1];
3370
3371                 struct ice_flow_seg_xtrct *ipv4_src =
3372                         &seg->fields[ICE_FLOW_FIELD_IDX_IPV4_SA].xtrct;
3373                 struct ice_flow_seg_xtrct *ipv4_dst =
3374                         &seg->fields[ICE_FLOW_FIELD_IDX_IPV4_DA].xtrct;
3375                 struct ice_flow_seg_xtrct *ipv6_src =
3376                         &seg->fields[ICE_FLOW_FIELD_IDX_IPV6_SA].xtrct;
3377                 struct ice_flow_seg_xtrct *ipv6_dst =
3378                         &seg->fields[ICE_FLOW_FIELD_IDX_IPV6_DA].xtrct;
3379
3380                 struct ice_flow_seg_xtrct *tcp_src =
3381                         &seg->fields[ICE_FLOW_FIELD_IDX_TCP_SRC_PORT].xtrct;
3382                 struct ice_flow_seg_xtrct *tcp_dst =
3383                         &seg->fields[ICE_FLOW_FIELD_IDX_TCP_DST_PORT].xtrct;
3384
3385                 struct ice_flow_seg_xtrct *udp_src =
3386                         &seg->fields[ICE_FLOW_FIELD_IDX_UDP_SRC_PORT].xtrct;
3387                 struct ice_flow_seg_xtrct *udp_dst =
3388                         &seg->fields[ICE_FLOW_FIELD_IDX_UDP_DST_PORT].xtrct;
3389
3390                 struct ice_flow_seg_xtrct *sctp_src =
3391                         &seg->fields[ICE_FLOW_FIELD_IDX_SCTP_SRC_PORT].xtrct;
3392                 struct ice_flow_seg_xtrct *sctp_dst =
3393                         &seg->fields[ICE_FLOW_FIELD_IDX_SCTP_DST_PORT].xtrct;
3394
3395                 /* xor IPv4 */
3396                 if (ipv4_src->prot_id != 0 && ipv4_dst->prot_id != 0)
3397                         ice_rss_config_xor(hw, prof_id,
3398                                            ipv4_src->idx, ipv4_dst->idx, 2);
3399
3400                 /* xor IPv6 */
3401                 if (ipv6_src->prot_id != 0 && ipv6_dst->prot_id != 0)
3402                         ice_rss_config_xor(hw, prof_id,
3403                                            ipv6_src->idx, ipv6_dst->idx, 8);
3404
3405                 /* xor TCP */
3406                 if (tcp_src->prot_id != 0 && tcp_dst->prot_id != 0)
3407                         ice_rss_config_xor(hw, prof_id,
3408                                            tcp_src->idx, tcp_dst->idx, 1);
3409
3410                 /* xor UDP */
3411                 if (udp_src->prot_id != 0 && udp_dst->prot_id != 0)
3412                         ice_rss_config_xor(hw, prof_id,
3413                                            udp_src->idx, udp_dst->idx, 1);
3414
3415                 /* xor SCTP */
3416                 if (sctp_src->prot_id != 0 && sctp_dst->prot_id != 0)
3417                         ice_rss_config_xor(hw, prof_id,
3418                                            sctp_src->idx, sctp_dst->idx, 1);
3419         }
3420 }
3421
3422 /**
3423  * ice_add_rss_cfg_sync - add an RSS configuration
3424  * @hw: pointer to the hardware structure
3425  * @vsi_handle: software VSI handle
3426  * @hashed_flds: hash bit fields (ICE_FLOW_HASH_*) to configure
3427  * @addl_hdrs: protocol header fields
3428  * @segs_cnt: packet segment count
3429  * @symm: symmetric hash enable/disable
3430  *
3431  * Assumption: lock has already been acquired for RSS list
3432  */
3433 static enum ice_status
3434 ice_add_rss_cfg_sync(struct ice_hw *hw, u16 vsi_handle, u64 hashed_flds,
3435                      u32 addl_hdrs, u8 segs_cnt, bool symm)
3436 {
3437         const enum ice_block blk = ICE_BLK_RSS;
3438         struct ice_flow_prof *prof = NULL;
3439         struct ice_flow_seg_info *segs;
3440         enum ice_status status;
3441
3442         if (!segs_cnt || segs_cnt > ICE_FLOW_SEG_MAX)
3443                 return ICE_ERR_PARAM;
3444
3445         segs = (struct ice_flow_seg_info *)ice_calloc(hw, segs_cnt,
3446                                                       sizeof(*segs));
3447         if (!segs)
3448                 return ICE_ERR_NO_MEMORY;
3449
3450         /* Construct the packet segment info from the hashed fields */
3451         status = ice_flow_set_rss_seg_info(&segs[segs_cnt - 1], hashed_flds,
3452                                            addl_hdrs);
3453         if (status)
3454                 goto exit;
3455
3456         /* Search for a flow profile that has matching headers, hash fields
3457          * and has the input VSI associated to it. If found, no further
3458          * operations required and exit.
3459          */
3460         prof = ice_flow_find_prof_conds(hw, blk, ICE_FLOW_RX, segs, segs_cnt,
3461                                         vsi_handle,
3462                                         ICE_FLOW_FIND_PROF_CHK_FLDS |
3463                                         ICE_FLOW_FIND_PROF_CHK_VSI);
3464         if (prof) {
3465                 if (prof->cfg.symm == symm)
3466                         goto exit;
3467                 prof->cfg.symm = symm;
3468                 goto update_symm;
3469         }
3470
3471         /* Check if a flow profile exists with the same protocol headers and
3472          * associated with the input VSI. If so disassociate the VSI from
3473          * this profile. The VSI will be added to a new profile created with
3474          * the protocol header and new hash field configuration.
3475          */
3476         prof = ice_flow_find_prof_conds(hw, blk, ICE_FLOW_RX, segs, segs_cnt,
3477                                         vsi_handle, ICE_FLOW_FIND_PROF_CHK_VSI);
3478         if (prof) {
3479                 status = ice_flow_disassoc_prof(hw, blk, prof, vsi_handle);
3480                 if (!status)
3481                         ice_rem_rss_list(hw, vsi_handle, prof);
3482                 else
3483                         goto exit;
3484
3485                 /* Remove profile if it has no VSIs associated */
3486                 if (!ice_is_any_bit_set(prof->vsis, ICE_MAX_VSI)) {
3487                         status = ice_flow_rem_prof(hw, blk, prof->id);
3488                         if (status)
3489                                 goto exit;
3490                 }
3491         }
3492
3493         /* Search for a profile that has same match fields only. If this
3494          * exists then associate the VSI to this profile.
3495          */
3496         prof = ice_flow_find_prof_conds(hw, blk, ICE_FLOW_RX, segs, segs_cnt,
3497                                         vsi_handle,
3498                                         ICE_FLOW_FIND_PROF_CHK_FLDS);
3499         if (prof) {
3500                 if (prof->cfg.symm == symm) {
3501                         status = ice_flow_assoc_prof(hw, blk, prof,
3502                                                      vsi_handle);
3503                         if (!status)
3504                                 status = ice_add_rss_list(hw, vsi_handle,
3505                                                           prof);
3506                 } else {
3507                         /* if a profile exist but with different symmetric
3508                          * requirement, just return error.
3509                          */
3510                         status = ICE_ERR_NOT_SUPPORTED;
3511                 }
3512                 goto exit;
3513         }
3514
3515         /* Create a new flow profile with generated profile and packet
3516          * segment information.
3517          */
3518         status = ice_flow_add_prof(hw, blk, ICE_FLOW_RX,
3519                                    ICE_FLOW_GEN_PROFID(hashed_flds,
3520                                                        segs[segs_cnt - 1].hdrs,
3521                                                        segs_cnt),
3522                                    segs, segs_cnt, NULL, 0, &prof);
3523         if (status)
3524                 goto exit;
3525
3526         status = ice_flow_assoc_prof(hw, blk, prof, vsi_handle);
3527         /* If association to a new flow profile failed then this profile can
3528          * be removed.
3529          */
3530         if (status) {
3531                 ice_flow_rem_prof(hw, blk, prof->id);
3532                 goto exit;
3533         }
3534
3535         status = ice_add_rss_list(hw, vsi_handle, prof);
3536
3537         prof->cfg.symm = symm;
3538
3539 update_symm:
3540         ice_rss_update_symm(hw, prof);
3541
3542 exit:
3543         ice_free(hw, segs);
3544         return status;
3545 }
3546
3547 /**
3548  * ice_add_rss_cfg - add an RSS configuration with specified hashed fields
3549  * @hw: pointer to the hardware structure
3550  * @vsi_handle: software VSI handle
3551  * @hashed_flds: hash bit fields (ICE_FLOW_HASH_*) to configure
3552  * @addl_hdrs: protocol header fields
3553  * @symm: symmetric hash enable/disable
3554  *
3555  * This function will generate a flow profile based on fields associated with
3556  * the input fields to hash on, the flow type and use the VSI number to add
3557  * a flow entry to the profile.
3558  */
3559 enum ice_status
3560 ice_add_rss_cfg(struct ice_hw *hw, u16 vsi_handle, u64 hashed_flds,
3561                 u32 addl_hdrs, bool symm)
3562 {
3563         enum ice_status status;
3564
3565         if (hashed_flds == ICE_HASH_INVALID ||
3566             !ice_is_vsi_valid(hw, vsi_handle))
3567                 return ICE_ERR_PARAM;
3568
3569         ice_acquire_lock(&hw->rss_locks);
3570         status = ice_add_rss_cfg_sync(hw, vsi_handle, hashed_flds, addl_hdrs,
3571                                       ICE_RSS_OUTER_HEADERS, symm);
3572         if (!status)
3573                 status = ice_add_rss_cfg_sync(hw, vsi_handle, hashed_flds,
3574                                               addl_hdrs, ICE_RSS_INNER_HEADERS,
3575                                               symm);
3576         ice_release_lock(&hw->rss_locks);
3577
3578         return status;
3579 }
3580
3581 /**
3582  * ice_rem_rss_cfg_sync - remove an existing RSS configuration
3583  * @hw: pointer to the hardware structure
3584  * @vsi_handle: software VSI handle
3585  * @hashed_flds: Packet hash types (ICE_FLOW_HASH_*) to remove
3586  * @addl_hdrs: Protocol header fields within a packet segment
3587  * @segs_cnt: packet segment count
3588  *
3589  * Assumption: lock has already been acquired for RSS list
3590  */
3591 static enum ice_status
3592 ice_rem_rss_cfg_sync(struct ice_hw *hw, u16 vsi_handle, u64 hashed_flds,
3593                      u32 addl_hdrs, u8 segs_cnt)
3594 {
3595         const enum ice_block blk = ICE_BLK_RSS;
3596         struct ice_flow_seg_info *segs;
3597         struct ice_flow_prof *prof;
3598         enum ice_status status;
3599
3600         segs = (struct ice_flow_seg_info *)ice_calloc(hw, segs_cnt,
3601                                                       sizeof(*segs));
3602         if (!segs)
3603                 return ICE_ERR_NO_MEMORY;
3604
3605         /* Construct the packet segment info from the hashed fields */
3606         status = ice_flow_set_rss_seg_info(&segs[segs_cnt - 1], hashed_flds,
3607                                            addl_hdrs);
3608         if (status)
3609                 goto out;
3610
3611         prof = ice_flow_find_prof_conds(hw, blk, ICE_FLOW_RX, segs, segs_cnt,
3612                                         vsi_handle,
3613                                         ICE_FLOW_FIND_PROF_CHK_FLDS);
3614         if (!prof) {
3615                 status = ICE_ERR_DOES_NOT_EXIST;
3616                 goto out;
3617         }
3618
3619         status = ice_flow_disassoc_prof(hw, blk, prof, vsi_handle);
3620         if (status)
3621                 goto out;
3622
3623         /* Remove RSS configuration from VSI context before deleting
3624          * the flow profile.
3625          */
3626         ice_rem_rss_list(hw, vsi_handle, prof);
3627
3628         if (!ice_is_any_bit_set(prof->vsis, ICE_MAX_VSI))
3629                 status = ice_flow_rem_prof(hw, blk, prof->id);
3630
3631 out:
3632         ice_free(hw, segs);
3633         return status;
3634 }
3635
3636 /**
3637  * ice_rem_rss_cfg - remove an existing RSS config with matching hashed fields
3638  * @hw: pointer to the hardware structure
3639  * @vsi_handle: software VSI handle
3640  * @hashed_flds: Packet hash types (ICE_FLOW_HASH_*) to remove
3641  * @addl_hdrs: Protocol header fields within a packet segment
3642  *
3643  * This function will lookup the flow profile based on the input
3644  * hash field bitmap, iterate through the profile entry list of
3645  * that profile and find entry associated with input VSI to be
3646  * removed. Calls are made to underlying flow apis which will in
3647  * turn build or update buffers for RSS XLT1 section.
3648  */
3649 enum ice_status
3650 ice_rem_rss_cfg(struct ice_hw *hw, u16 vsi_handle, u64 hashed_flds,
3651                 u32 addl_hdrs)
3652 {
3653         enum ice_status status;
3654
3655         if (hashed_flds == ICE_HASH_INVALID ||
3656             !ice_is_vsi_valid(hw, vsi_handle))
3657                 return ICE_ERR_PARAM;
3658
3659         ice_acquire_lock(&hw->rss_locks);
3660         status = ice_rem_rss_cfg_sync(hw, vsi_handle, hashed_flds, addl_hdrs,
3661                                       ICE_RSS_OUTER_HEADERS);
3662         if (!status)
3663                 status = ice_rem_rss_cfg_sync(hw, vsi_handle, hashed_flds,
3664                                               addl_hdrs, ICE_RSS_INNER_HEADERS);
3665         ice_release_lock(&hw->rss_locks);
3666
3667         return status;
3668 }
3669
3670 /**
3671  * ice_replay_rss_cfg - replay RSS configurations associated with VSI
3672  * @hw: pointer to the hardware structure
3673  * @vsi_handle: software VSI handle
3674  */
3675 enum ice_status ice_replay_rss_cfg(struct ice_hw *hw, u16 vsi_handle)
3676 {
3677         enum ice_status status = ICE_SUCCESS;
3678         struct ice_rss_cfg *r;
3679
3680         if (!ice_is_vsi_valid(hw, vsi_handle))
3681                 return ICE_ERR_PARAM;
3682
3683         ice_acquire_lock(&hw->rss_locks);
3684         LIST_FOR_EACH_ENTRY(r, &hw->rss_list_head,
3685                             ice_rss_cfg, l_entry) {
3686                 if (ice_is_bit_set(r->vsis, vsi_handle)) {
3687                         status = ice_add_rss_cfg_sync(hw, vsi_handle,
3688                                                       r->hashed_flds,
3689                                                       r->packet_hdr,
3690                                                       ICE_RSS_OUTER_HEADERS,
3691                                                       r->symm);
3692                         if (status)
3693                                 break;
3694                         status = ice_add_rss_cfg_sync(hw, vsi_handle,
3695                                                       r->hashed_flds,
3696                                                       r->packet_hdr,
3697                                                       ICE_RSS_INNER_HEADERS,
3698                                                       r->symm);
3699                         if (status)
3700                                 break;
3701                 }
3702         }
3703         ice_release_lock(&hw->rss_locks);
3704
3705         return status;
3706 }
3707
3708 /**
3709  * ice_get_rss_cfg - returns hashed fields for the given header types
3710  * @hw: pointer to the hardware structure
3711  * @vsi_handle: software VSI handle
3712  * @hdrs: protocol header type
3713  *
3714  * This function will return the match fields of the first instance of flow
3715  * profile having the given header types and containing input VSI
3716  */
3717 u64 ice_get_rss_cfg(struct ice_hw *hw, u16 vsi_handle, u32 hdrs)
3718 {
3719         struct ice_rss_cfg *r, *rss_cfg = NULL;
3720
3721         /* verify if the protocol header is non zero and VSI is valid */
3722         if (hdrs == ICE_FLOW_SEG_HDR_NONE || !ice_is_vsi_valid(hw, vsi_handle))
3723                 return ICE_HASH_INVALID;
3724
3725         ice_acquire_lock(&hw->rss_locks);
3726         LIST_FOR_EACH_ENTRY(r, &hw->rss_list_head,
3727                             ice_rss_cfg, l_entry)
3728                 if (ice_is_bit_set(r->vsis, vsi_handle) &&
3729                     r->packet_hdr == hdrs) {
3730                         rss_cfg = r;
3731                         break;
3732                 }
3733         ice_release_lock(&hw->rss_locks);
3734
3735         return rss_cfg ? rss_cfg->hashed_flds : ICE_HASH_INVALID;
3736 }