net/ice/base: update copyright
[dpdk.git] / drivers / net / ice / base / ice_flow.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2001-2020 Intel Corporation
3  */
4
5 #include "ice_common.h"
6 #include "ice_flow.h"
7
8 /* Size of known protocol header fields */
9 #define ICE_FLOW_FLD_SZ_ETH_TYPE        2
10 #define ICE_FLOW_FLD_SZ_VLAN            2
11 #define ICE_FLOW_FLD_SZ_IPV4_ADDR       4
12 #define ICE_FLOW_FLD_SZ_IPV6_ADDR       16
13 #define ICE_FLOW_FLD_SZ_IP_DSCP         1
14 #define ICE_FLOW_FLD_SZ_IP_TTL          1
15 #define ICE_FLOW_FLD_SZ_IP_PROT         1
16 #define ICE_FLOW_FLD_SZ_PORT            2
17 #define ICE_FLOW_FLD_SZ_TCP_FLAGS       1
18 #define ICE_FLOW_FLD_SZ_ICMP_TYPE       1
19 #define ICE_FLOW_FLD_SZ_ICMP_CODE       1
20 #define ICE_FLOW_FLD_SZ_ARP_OPER        2
21 #define ICE_FLOW_FLD_SZ_GRE_KEYID       4
22 #define ICE_FLOW_FLD_SZ_GTP_TEID        4
23 #define ICE_FLOW_FLD_SZ_GTP_QFI         2
24 #define ICE_FLOW_FLD_SZ_PPPOE_SESS_ID   2
25 #define ICE_FLOW_FLD_SZ_PFCP_SEID 8
26 #define ICE_FLOW_FLD_SZ_L2TPV3_SESS_ID  4
27 #define ICE_FLOW_FLD_SZ_ESP_SPI 4
28 #define ICE_FLOW_FLD_SZ_AH_SPI  4
29 #define ICE_FLOW_FLD_SZ_NAT_T_ESP_SPI   4
30
31 /* Describe properties of a protocol header field */
32 struct ice_flow_field_info {
33         enum ice_flow_seg_hdr hdr;
34         s16 off;        /* Offset from start of a protocol header, in bits */
35         u16 size;       /* Size of fields in bits */
36         u16 mask;       /* 16-bit mask for field */
37 };
38
39 #define ICE_FLOW_FLD_INFO(_hdr, _offset_bytes, _size_bytes) { \
40         .hdr = _hdr, \
41         .off = (_offset_bytes) * BITS_PER_BYTE, \
42         .size = (_size_bytes) * BITS_PER_BYTE, \
43         .mask = 0, \
44 }
45
46 #define ICE_FLOW_FLD_INFO_MSK(_hdr, _offset_bytes, _size_bytes, _mask) { \
47         .hdr = _hdr, \
48         .off = (_offset_bytes) * BITS_PER_BYTE, \
49         .size = (_size_bytes) * BITS_PER_BYTE, \
50         .mask = _mask, \
51 }
52
53 /* Table containing properties of supported protocol header fields */
54 static const
55 struct ice_flow_field_info ice_flds_info[ICE_FLOW_FIELD_IDX_MAX] = {
56         /* Ether */
57         /* ICE_FLOW_FIELD_IDX_ETH_DA */
58         ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_ETH, 0, ETH_ALEN),
59         /* ICE_FLOW_FIELD_IDX_ETH_SA */
60         ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_ETH, ETH_ALEN, ETH_ALEN),
61         /* ICE_FLOW_FIELD_IDX_S_VLAN */
62         ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_VLAN, 12, ICE_FLOW_FLD_SZ_VLAN),
63         /* ICE_FLOW_FIELD_IDX_C_VLAN */
64         ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_VLAN, 14, ICE_FLOW_FLD_SZ_VLAN),
65         /* ICE_FLOW_FIELD_IDX_ETH_TYPE */
66         ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_ETH, 12, ICE_FLOW_FLD_SZ_ETH_TYPE),
67         /* IPv4 / IPv6 */
68         /* ICE_FLOW_FIELD_IDX_IPV4_DSCP */
69         ICE_FLOW_FLD_INFO_MSK(ICE_FLOW_SEG_HDR_IPV4, 0, ICE_FLOW_FLD_SZ_IP_DSCP,
70                               0x00fc),
71         /* ICE_FLOW_FIELD_IDX_IPV6_DSCP */
72         ICE_FLOW_FLD_INFO_MSK(ICE_FLOW_SEG_HDR_IPV6, 0, ICE_FLOW_FLD_SZ_IP_DSCP,
73                               0x0ff0),
74         /* ICE_FLOW_FIELD_IDX_IPV4_TTL */
75         ICE_FLOW_FLD_INFO_MSK(ICE_FLOW_SEG_HDR_NONE, 8,
76                               ICE_FLOW_FLD_SZ_IP_TTL, 0xff00),
77         /* ICE_FLOW_FIELD_IDX_IPV4_PROT */
78         ICE_FLOW_FLD_INFO_MSK(ICE_FLOW_SEG_HDR_NONE, 8,
79                               ICE_FLOW_FLD_SZ_IP_PROT, 0x00ff),
80         /* ICE_FLOW_FIELD_IDX_IPV6_TTL */
81         ICE_FLOW_FLD_INFO_MSK(ICE_FLOW_SEG_HDR_NONE, 6,
82                               ICE_FLOW_FLD_SZ_IP_TTL, 0x00ff),
83         /* ICE_FLOW_FIELD_IDX_IPV6_PROT */
84         ICE_FLOW_FLD_INFO_MSK(ICE_FLOW_SEG_HDR_NONE, 6,
85                               ICE_FLOW_FLD_SZ_IP_PROT, 0xff00),
86         /* ICE_FLOW_FIELD_IDX_IPV4_SA */
87         ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_IPV4, 12, ICE_FLOW_FLD_SZ_IPV4_ADDR),
88         /* ICE_FLOW_FIELD_IDX_IPV4_DA */
89         ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_IPV4, 16, ICE_FLOW_FLD_SZ_IPV4_ADDR),
90         /* ICE_FLOW_FIELD_IDX_IPV6_SA */
91         ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_IPV6, 8, ICE_FLOW_FLD_SZ_IPV6_ADDR),
92         /* ICE_FLOW_FIELD_IDX_IPV6_DA */
93         ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_IPV6, 24, ICE_FLOW_FLD_SZ_IPV6_ADDR),
94         /* Transport */
95         /* ICE_FLOW_FIELD_IDX_TCP_SRC_PORT */
96         ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_TCP, 0, ICE_FLOW_FLD_SZ_PORT),
97         /* ICE_FLOW_FIELD_IDX_TCP_DST_PORT */
98         ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_TCP, 2, ICE_FLOW_FLD_SZ_PORT),
99         /* ICE_FLOW_FIELD_IDX_UDP_SRC_PORT */
100         ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_UDP, 0, ICE_FLOW_FLD_SZ_PORT),
101         /* ICE_FLOW_FIELD_IDX_UDP_DST_PORT */
102         ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_UDP, 2, ICE_FLOW_FLD_SZ_PORT),
103         /* ICE_FLOW_FIELD_IDX_SCTP_SRC_PORT */
104         ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_SCTP, 0, ICE_FLOW_FLD_SZ_PORT),
105         /* ICE_FLOW_FIELD_IDX_SCTP_DST_PORT */
106         ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_SCTP, 2, ICE_FLOW_FLD_SZ_PORT),
107         /* ICE_FLOW_FIELD_IDX_TCP_FLAGS */
108         ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_TCP, 13, ICE_FLOW_FLD_SZ_TCP_FLAGS),
109         /* ARP */
110         /* ICE_FLOW_FIELD_IDX_ARP_SIP */
111         ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_ARP, 14, ICE_FLOW_FLD_SZ_IPV4_ADDR),
112         /* ICE_FLOW_FIELD_IDX_ARP_DIP */
113         ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_ARP, 24, ICE_FLOW_FLD_SZ_IPV4_ADDR),
114         /* ICE_FLOW_FIELD_IDX_ARP_SHA */
115         ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_ARP, 8, ETH_ALEN),
116         /* ICE_FLOW_FIELD_IDX_ARP_DHA */
117         ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_ARP, 18, ETH_ALEN),
118         /* ICE_FLOW_FIELD_IDX_ARP_OP */
119         ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_ARP, 6, ICE_FLOW_FLD_SZ_ARP_OPER),
120         /* ICMP */
121         /* ICE_FLOW_FIELD_IDX_ICMP_TYPE */
122         ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_ICMP, 0, ICE_FLOW_FLD_SZ_ICMP_TYPE),
123         /* ICE_FLOW_FIELD_IDX_ICMP_CODE */
124         ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_ICMP, 1, ICE_FLOW_FLD_SZ_ICMP_CODE),
125         /* GRE */
126         /* ICE_FLOW_FIELD_IDX_GRE_KEYID */
127         ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_GRE, 12, ICE_FLOW_FLD_SZ_GRE_KEYID),
128         /* GTP */
129         /* ICE_FLOW_FIELD_IDX_GTPC_TEID */
130         ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_GTPC_TEID, 12,
131                           ICE_FLOW_FLD_SZ_GTP_TEID),
132         /* ICE_FLOW_FIELD_IDX_GTPU_IP_TEID */
133         ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_GTPU_IP, 12,
134                           ICE_FLOW_FLD_SZ_GTP_TEID),
135         /* ICE_FLOW_FIELD_IDX_GTPU_EH_TEID */
136         ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_GTPU_EH, 12,
137                           ICE_FLOW_FLD_SZ_GTP_TEID),
138         /* ICE_FLOW_FIELD_IDX_GTPU_EH_QFI */
139         ICE_FLOW_FLD_INFO_MSK(ICE_FLOW_SEG_HDR_GTPU_EH, 22,
140                               ICE_FLOW_FLD_SZ_GTP_QFI, 0x3f00),
141         /* ICE_FLOW_FIELD_IDX_GTPU_UP_TEID */
142         ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_GTPU_UP, 12,
143                           ICE_FLOW_FLD_SZ_GTP_TEID),
144         /* ICE_FLOW_FIELD_IDX_GTPU_DWN_TEID */
145         ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_GTPU_DWN, 12,
146                           ICE_FLOW_FLD_SZ_GTP_TEID),
147         /* PPPOE */
148         /* ICE_FLOW_FIELD_IDX_PPPOE_SESS_ID */
149         ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_PPPOE, 2,
150                           ICE_FLOW_FLD_SZ_PPPOE_SESS_ID),
151         /* PFCP */
152         /* ICE_FLOW_FIELD_IDX_PFCP_SEID */
153         ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_PFCP_SESSION, 12,
154                           ICE_FLOW_FLD_SZ_PFCP_SEID),
155         /* L2TPV3 */
156         /* ICE_FLOW_FIELD_IDX_L2TPV3_SESS_ID */
157         ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_L2TPV3, 0,
158                           ICE_FLOW_FLD_SZ_L2TPV3_SESS_ID),
159         /* ESP */
160         /* ICE_FLOW_FIELD_IDX_ESP_SPI */
161         ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_ESP, 0,
162                           ICE_FLOW_FLD_SZ_ESP_SPI),
163         /* AH */
164         /* ICE_FLOW_FIELD_IDX_AH_SPI */
165         ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_AH, 4,
166                           ICE_FLOW_FLD_SZ_AH_SPI),
167         /* NAT_T_ESP */
168         /* ICE_FLOW_FIELD_IDX_NAT_T_ESP_SPI */
169         ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_NAT_T_ESP, 8,
170                           ICE_FLOW_FLD_SZ_NAT_T_ESP_SPI),
171 };
172
173 /* Bitmaps indicating relevant packet types for a particular protocol header
174  *
175  * Packet types for packets with an Outer/First/Single MAC header
176  */
177 static const u32 ice_ptypes_mac_ofos[] = {
178         0xFDC00846, 0xBFBF7F7E, 0xF70001DF, 0xFEFDFDFB,
179         0x0000077E, 0x00000000, 0x00000000, 0x00000000,
180         0x00000000, 0x03FFF000, 0x7FFFFFE0, 0x00000000,
181         0x00000000, 0x00000000, 0x00000000, 0x00000000,
182         0x00000000, 0x00000000, 0x00000000, 0x00000000,
183         0x00000000, 0x00000000, 0x00000000, 0x00000000,
184         0x00000000, 0x00000000, 0x00000000, 0x00000000,
185         0x00000000, 0x00000000, 0x00000000, 0x00000000,
186 };
187
188 /* Packet types for packets with an Innermost/Last MAC VLAN header */
189 static const u32 ice_ptypes_macvlan_il[] = {
190         0x00000000, 0xBC000000, 0x000001DF, 0xF0000000,
191         0x0000077E, 0x00000000, 0x00000000, 0x00000000,
192         0x00000000, 0x00000000, 0x00000000, 0x00000000,
193         0x00000000, 0x00000000, 0x00000000, 0x00000000,
194         0x00000000, 0x00000000, 0x00000000, 0x00000000,
195         0x00000000, 0x00000000, 0x00000000, 0x00000000,
196         0x00000000, 0x00000000, 0x00000000, 0x00000000,
197         0x00000000, 0x00000000, 0x00000000, 0x00000000,
198 };
199
200 /* Packet types for packets with an Outer/First/Single IPv4 header */
201 static const u32 ice_ptypes_ipv4_ofos[] = {
202         0x1DC00000, 0x04000800, 0x00000000, 0x00000000,
203         0x00000000, 0x00000155, 0x00000000, 0x00000000,
204         0x0003000F, 0x000FC000, 0x83E0F800, 0x00000101,
205         0x00000000, 0x00000000, 0x00000000, 0x00000000,
206         0x00000000, 0x00000000, 0x00000000, 0x00000000,
207         0x00000000, 0x00000000, 0x00000000, 0x00000000,
208         0x00000000, 0x00000000, 0x00000000, 0x00000000,
209         0x00000000, 0x00000000, 0x00000000, 0x00000000,
210 };
211
212 /* Packet types for packets with an Innermost/Last IPv4 header */
213 static const u32 ice_ptypes_ipv4_il[] = {
214         0xE0000000, 0xB807700E, 0x80000003, 0xE01DC03B,
215         0x0000000E, 0x00000000, 0x00000000, 0x00000000,
216         0x00000000, 0x00000000, 0x001FF800, 0x00000000,
217         0x00000000, 0x00000000, 0x00000000, 0x00000000,
218         0x00000000, 0x00000000, 0x00000000, 0x00000000,
219         0x00000000, 0x00000000, 0x00000000, 0x00000000,
220         0x00000000, 0x00000000, 0x00000000, 0x00000000,
221         0x00000000, 0x00000000, 0x00000000, 0x00000000,
222 };
223
224 /* Packet types for packets with an Outer/First/Single IPv6 header */
225 static const u32 ice_ptypes_ipv6_ofos[] = {
226         0x00000000, 0x00000000, 0x77000000, 0x10002000,
227         0x00000000, 0x000002AA, 0x00000000, 0x00000000,
228         0x00080F00, 0x03F00000, 0x7C1F0000, 0x00000206,
229         0x00000000, 0x00000000, 0x00000000, 0x00000000,
230         0x00000000, 0x00000000, 0x00000000, 0x00000000,
231         0x00000000, 0x00000000, 0x00000000, 0x00000000,
232         0x00000000, 0x00000000, 0x00000000, 0x00000000,
233         0x00000000, 0x00000000, 0x00000000, 0x00000000,
234 };
235
236 /* Packet types for packets with an Innermost/Last IPv6 header */
237 static const u32 ice_ptypes_ipv6_il[] = {
238         0x00000000, 0x03B80770, 0x000001DC, 0x0EE00000,
239         0x00000770, 0x00000000, 0x00000000, 0x00000000,
240         0x00000000, 0x00000000, 0x7FE00000, 0x00000000,
241         0x00000000, 0x00000000, 0x00000000, 0x00000000,
242         0x00000000, 0x00000000, 0x00000000, 0x00000000,
243         0x00000000, 0x00000000, 0x00000000, 0x00000000,
244         0x00000000, 0x00000000, 0x00000000, 0x00000000,
245         0x00000000, 0x00000000, 0x00000000, 0x00000000,
246 };
247
248 /* Packet types for packets with an Outermost/First ARP header */
249 static const u32 ice_ptypes_arp_of[] = {
250         0x00000800, 0x00000000, 0x00000000, 0x00000000,
251         0x00000000, 0x00000000, 0x00000000, 0x00000000,
252         0x00000000, 0x00000000, 0x00000000, 0x00000000,
253         0x00000000, 0x00000000, 0x00000000, 0x00000000,
254         0x00000000, 0x00000000, 0x00000000, 0x00000000,
255         0x00000000, 0x00000000, 0x00000000, 0x00000000,
256         0x00000000, 0x00000000, 0x00000000, 0x00000000,
257         0x00000000, 0x00000000, 0x00000000, 0x00000000,
258 };
259
260 /* UDP Packet types for non-tunneled packets or tunneled
261  * packets with inner UDP.
262  */
263 static const u32 ice_ptypes_udp_il[] = {
264         0x81000000, 0x20204040, 0x04000010, 0x80810102,
265         0x00000040, 0x00000000, 0x00000000, 0x00000000,
266         0x00000000, 0x00410000, 0x90842000, 0x00000007,
267         0x00000000, 0x00000000, 0x00000000, 0x00000000,
268         0x00000000, 0x00000000, 0x00000000, 0x00000000,
269         0x00000000, 0x00000000, 0x00000000, 0x00000000,
270         0x00000000, 0x00000000, 0x00000000, 0x00000000,
271         0x00000000, 0x00000000, 0x00000000, 0x00000000,
272 };
273
274 /* Packet types for packets with an Innermost/Last TCP header */
275 static const u32 ice_ptypes_tcp_il[] = {
276         0x04000000, 0x80810102, 0x10000040, 0x02040408,
277         0x00000102, 0x00000000, 0x00000000, 0x00000000,
278         0x00000000, 0x00820000, 0x21084000, 0x00000000,
279         0x00000000, 0x00000000, 0x00000000, 0x00000000,
280         0x00000000, 0x00000000, 0x00000000, 0x00000000,
281         0x00000000, 0x00000000, 0x00000000, 0x00000000,
282         0x00000000, 0x00000000, 0x00000000, 0x00000000,
283         0x00000000, 0x00000000, 0x00000000, 0x00000000,
284 };
285
286 /* Packet types for packets with an Innermost/Last SCTP header */
287 static const u32 ice_ptypes_sctp_il[] = {
288         0x08000000, 0x01020204, 0x20000081, 0x04080810,
289         0x00000204, 0x00000000, 0x00000000, 0x00000000,
290         0x00000000, 0x01040000, 0x00000000, 0x00000000,
291         0x00000000, 0x00000000, 0x00000000, 0x00000000,
292         0x00000000, 0x00000000, 0x00000000, 0x00000000,
293         0x00000000, 0x00000000, 0x00000000, 0x00000000,
294         0x00000000, 0x00000000, 0x00000000, 0x00000000,
295         0x00000000, 0x00000000, 0x00000000, 0x00000000,
296 };
297
298 /* Packet types for packets with an Outermost/First ICMP header */
299 static const u32 ice_ptypes_icmp_of[] = {
300         0x10000000, 0x00000000, 0x00000000, 0x00000000,
301         0x00000000, 0x00000000, 0x00000000, 0x00000000,
302         0x00000000, 0x00000000, 0x00000000, 0x00000000,
303         0x00000000, 0x00000000, 0x00000000, 0x00000000,
304         0x00000000, 0x00000000, 0x00000000, 0x00000000,
305         0x00000000, 0x00000000, 0x00000000, 0x00000000,
306         0x00000000, 0x00000000, 0x00000000, 0x00000000,
307         0x00000000, 0x00000000, 0x00000000, 0x00000000,
308 };
309
310 /* Packet types for packets with an Innermost/Last ICMP header */
311 static const u32 ice_ptypes_icmp_il[] = {
312         0x00000000, 0x02040408, 0x40000102, 0x08101020,
313         0x00000408, 0x00000000, 0x00000000, 0x00000000,
314         0x00000000, 0x00000000, 0x42108000, 0x00000000,
315         0x00000000, 0x00000000, 0x00000000, 0x00000000,
316         0x00000000, 0x00000000, 0x00000000, 0x00000000,
317         0x00000000, 0x00000000, 0x00000000, 0x00000000,
318         0x00000000, 0x00000000, 0x00000000, 0x00000000,
319         0x00000000, 0x00000000, 0x00000000, 0x00000000,
320 };
321
322 /* Packet types for packets with an Outermost/First GRE header */
323 static const u32 ice_ptypes_gre_of[] = {
324         0x00000000, 0xBFBF7800, 0x000001DF, 0xFEFDE000,
325         0x0000017E, 0x00000000, 0x00000000, 0x00000000,
326         0x00000000, 0x00000000, 0x00000000, 0x00000000,
327         0x00000000, 0x00000000, 0x00000000, 0x00000000,
328         0x00000000, 0x00000000, 0x00000000, 0x00000000,
329         0x00000000, 0x00000000, 0x00000000, 0x00000000,
330         0x00000000, 0x00000000, 0x00000000, 0x00000000,
331         0x00000000, 0x00000000, 0x00000000, 0x00000000,
332 };
333
334 /* Packet types for packets with an Innermost/Last MAC header */
335 static const u32 ice_ptypes_mac_il[] = {
336         0x00000000, 0x00000000, 0x00000000, 0x00000000,
337         0x00000000, 0x00000000, 0x00000000, 0x00000000,
338         0x00000000, 0x00000000, 0x00000000, 0x00000000,
339         0x00000000, 0x00000000, 0x00000000, 0x00000000,
340         0x00000000, 0x00000000, 0x00000000, 0x00000000,
341         0x00000000, 0x00000000, 0x00000000, 0x00000000,
342         0x00000000, 0x00000000, 0x00000000, 0x00000000,
343         0x00000000, 0x00000000, 0x00000000, 0x00000000,
344 };
345
346 /* Packet types for GTPC */
347 static const u32 ice_ptypes_gtpc[] = {
348         0x00000000, 0x00000000, 0x00000000, 0x00000000,
349         0x00000000, 0x00000000, 0x00000000, 0x00000000,
350         0x00000000, 0x00000000, 0x00000180, 0x00000000,
351         0x00000000, 0x00000000, 0x00000000, 0x00000000,
352         0x00000000, 0x00000000, 0x00000000, 0x00000000,
353         0x00000000, 0x00000000, 0x00000000, 0x00000000,
354         0x00000000, 0x00000000, 0x00000000, 0x00000000,
355         0x00000000, 0x00000000, 0x00000000, 0x00000000,
356 };
357
358 /* Packet types for GTPC with TEID */
359 static const u32 ice_ptypes_gtpc_tid[] = {
360         0x00000000, 0x00000000, 0x00000000, 0x00000000,
361         0x00000000, 0x00000000, 0x00000000, 0x00000000,
362         0x00000000, 0x00000000, 0x00000060, 0x00000000,
363         0x00000000, 0x00000000, 0x00000000, 0x00000000,
364         0x00000000, 0x00000000, 0x00000000, 0x00000000,
365         0x00000000, 0x00000000, 0x00000000, 0x00000000,
366         0x00000000, 0x00000000, 0x00000000, 0x00000000,
367         0x00000000, 0x00000000, 0x00000000, 0x00000000,
368 };
369
370 /* Packet types for GTPU */
371 static const struct ice_ptype_attributes ice_attr_gtpu_eh[] = {
372         { ICE_MAC_IPV4_GTPU_IPV4_FRAG,    ICE_PTYPE_ATTR_GTP_PDU_EH },
373         { ICE_MAC_IPV4_GTPU_IPV4_PAY,     ICE_PTYPE_ATTR_GTP_PDU_EH },
374         { ICE_MAC_IPV4_GTPU_IPV4_UDP_PAY, ICE_PTYPE_ATTR_GTP_PDU_EH },
375         { ICE_MAC_IPV4_GTPU_IPV4_TCP,     ICE_PTYPE_ATTR_GTP_PDU_EH },
376         { ICE_MAC_IPV4_GTPU_IPV4_ICMP,    ICE_PTYPE_ATTR_GTP_PDU_EH },
377         { ICE_MAC_IPV6_GTPU_IPV4_FRAG,    ICE_PTYPE_ATTR_GTP_PDU_EH },
378         { ICE_MAC_IPV6_GTPU_IPV4_PAY,     ICE_PTYPE_ATTR_GTP_PDU_EH },
379         { ICE_MAC_IPV6_GTPU_IPV4_UDP_PAY, ICE_PTYPE_ATTR_GTP_PDU_EH },
380         { ICE_MAC_IPV6_GTPU_IPV4_TCP,     ICE_PTYPE_ATTR_GTP_PDU_EH },
381         { ICE_MAC_IPV6_GTPU_IPV4_ICMP,    ICE_PTYPE_ATTR_GTP_PDU_EH },
382         { ICE_MAC_IPV4_GTPU_IPV6_FRAG,    ICE_PTYPE_ATTR_GTP_PDU_EH },
383         { ICE_MAC_IPV4_GTPU_IPV6_PAY,     ICE_PTYPE_ATTR_GTP_PDU_EH },
384         { ICE_MAC_IPV4_GTPU_IPV6_UDP_PAY, ICE_PTYPE_ATTR_GTP_PDU_EH },
385         { ICE_MAC_IPV4_GTPU_IPV6_TCP,     ICE_PTYPE_ATTR_GTP_PDU_EH },
386         { ICE_MAC_IPV4_GTPU_IPV6_ICMPV6,  ICE_PTYPE_ATTR_GTP_PDU_EH },
387         { ICE_MAC_IPV6_GTPU_IPV6_FRAG,    ICE_PTYPE_ATTR_GTP_PDU_EH },
388         { ICE_MAC_IPV6_GTPU_IPV6_PAY,     ICE_PTYPE_ATTR_GTP_PDU_EH },
389         { ICE_MAC_IPV6_GTPU_IPV6_UDP_PAY, ICE_PTYPE_ATTR_GTP_PDU_EH },
390         { ICE_MAC_IPV6_GTPU_IPV6_TCP,     ICE_PTYPE_ATTR_GTP_PDU_EH },
391         { ICE_MAC_IPV6_GTPU_IPV6_ICMPV6,  ICE_PTYPE_ATTR_GTP_PDU_EH },
392 };
393
394 static const struct ice_ptype_attributes ice_attr_gtpu_down[] = {
395         { ICE_MAC_IPV4_GTPU_IPV4_FRAG,    ICE_PTYPE_ATTR_GTP_DOWNLINK },
396         { ICE_MAC_IPV4_GTPU_IPV4_PAY,     ICE_PTYPE_ATTR_GTP_DOWNLINK },
397         { ICE_MAC_IPV4_GTPU_IPV4_UDP_PAY, ICE_PTYPE_ATTR_GTP_DOWNLINK },
398         { ICE_MAC_IPV4_GTPU_IPV4_TCP,     ICE_PTYPE_ATTR_GTP_DOWNLINK },
399         { ICE_MAC_IPV4_GTPU_IPV4_ICMP,    ICE_PTYPE_ATTR_GTP_DOWNLINK },
400         { ICE_MAC_IPV6_GTPU_IPV4_FRAG,    ICE_PTYPE_ATTR_GTP_DOWNLINK },
401         { ICE_MAC_IPV6_GTPU_IPV4_PAY,     ICE_PTYPE_ATTR_GTP_DOWNLINK },
402         { ICE_MAC_IPV6_GTPU_IPV4_UDP_PAY, ICE_PTYPE_ATTR_GTP_DOWNLINK },
403         { ICE_MAC_IPV6_GTPU_IPV4_TCP,     ICE_PTYPE_ATTR_GTP_DOWNLINK },
404         { ICE_MAC_IPV6_GTPU_IPV4_ICMP,    ICE_PTYPE_ATTR_GTP_DOWNLINK },
405         { ICE_MAC_IPV4_GTPU_IPV6_FRAG,    ICE_PTYPE_ATTR_GTP_DOWNLINK },
406         { ICE_MAC_IPV4_GTPU_IPV6_PAY,     ICE_PTYPE_ATTR_GTP_DOWNLINK },
407         { ICE_MAC_IPV4_GTPU_IPV6_UDP_PAY, ICE_PTYPE_ATTR_GTP_DOWNLINK },
408         { ICE_MAC_IPV4_GTPU_IPV6_TCP,     ICE_PTYPE_ATTR_GTP_DOWNLINK },
409         { ICE_MAC_IPV4_GTPU_IPV6_ICMPV6,  ICE_PTYPE_ATTR_GTP_DOWNLINK },
410         { ICE_MAC_IPV6_GTPU_IPV6_FRAG,    ICE_PTYPE_ATTR_GTP_DOWNLINK },
411         { ICE_MAC_IPV6_GTPU_IPV6_PAY,     ICE_PTYPE_ATTR_GTP_DOWNLINK },
412         { ICE_MAC_IPV6_GTPU_IPV6_UDP_PAY, ICE_PTYPE_ATTR_GTP_DOWNLINK },
413         { ICE_MAC_IPV6_GTPU_IPV6_TCP,     ICE_PTYPE_ATTR_GTP_DOWNLINK },
414         { ICE_MAC_IPV6_GTPU_IPV6_ICMPV6,  ICE_PTYPE_ATTR_GTP_DOWNLINK },
415 };
416
417 static const struct ice_ptype_attributes ice_attr_gtpu_up[] = {
418         { ICE_MAC_IPV4_GTPU_IPV4_FRAG,    ICE_PTYPE_ATTR_GTP_UPLINK },
419         { ICE_MAC_IPV4_GTPU_IPV4_PAY,     ICE_PTYPE_ATTR_GTP_UPLINK },
420         { ICE_MAC_IPV4_GTPU_IPV4_UDP_PAY, ICE_PTYPE_ATTR_GTP_UPLINK },
421         { ICE_MAC_IPV4_GTPU_IPV4_TCP,     ICE_PTYPE_ATTR_GTP_UPLINK },
422         { ICE_MAC_IPV4_GTPU_IPV4_ICMP,    ICE_PTYPE_ATTR_GTP_UPLINK },
423         { ICE_MAC_IPV6_GTPU_IPV4_FRAG,    ICE_PTYPE_ATTR_GTP_UPLINK },
424         { ICE_MAC_IPV6_GTPU_IPV4_PAY,     ICE_PTYPE_ATTR_GTP_UPLINK },
425         { ICE_MAC_IPV6_GTPU_IPV4_UDP_PAY, ICE_PTYPE_ATTR_GTP_UPLINK },
426         { ICE_MAC_IPV6_GTPU_IPV4_TCP,     ICE_PTYPE_ATTR_GTP_UPLINK },
427         { ICE_MAC_IPV6_GTPU_IPV4_ICMP,    ICE_PTYPE_ATTR_GTP_UPLINK },
428         { ICE_MAC_IPV4_GTPU_IPV6_FRAG,    ICE_PTYPE_ATTR_GTP_UPLINK },
429         { ICE_MAC_IPV4_GTPU_IPV6_PAY,     ICE_PTYPE_ATTR_GTP_UPLINK },
430         { ICE_MAC_IPV4_GTPU_IPV6_UDP_PAY, ICE_PTYPE_ATTR_GTP_UPLINK },
431         { ICE_MAC_IPV4_GTPU_IPV6_TCP,     ICE_PTYPE_ATTR_GTP_UPLINK },
432         { ICE_MAC_IPV4_GTPU_IPV6_ICMPV6,  ICE_PTYPE_ATTR_GTP_UPLINK },
433         { ICE_MAC_IPV6_GTPU_IPV6_FRAG,    ICE_PTYPE_ATTR_GTP_UPLINK },
434         { ICE_MAC_IPV6_GTPU_IPV6_PAY,     ICE_PTYPE_ATTR_GTP_UPLINK },
435         { ICE_MAC_IPV6_GTPU_IPV6_UDP_PAY, ICE_PTYPE_ATTR_GTP_UPLINK },
436         { ICE_MAC_IPV6_GTPU_IPV6_TCP,     ICE_PTYPE_ATTR_GTP_UPLINK },
437         { ICE_MAC_IPV6_GTPU_IPV6_ICMPV6,  ICE_PTYPE_ATTR_GTP_UPLINK },
438 };
439
440 static const u32 ice_ptypes_gtpu[] = {
441         0x00000000, 0x00000000, 0x00000000, 0x00000000,
442         0x00000000, 0x00000000, 0x00000000, 0x00000000,
443         0x00000000, 0x00000000, 0x7FFFFE00, 0x00000000,
444         0x00000000, 0x00000000, 0x00000000, 0x00000000,
445         0x00000000, 0x00000000, 0x00000000, 0x00000000,
446         0x00000000, 0x00000000, 0x00000000, 0x00000000,
447         0x00000000, 0x00000000, 0x00000000, 0x00000000,
448         0x00000000, 0x00000000, 0x00000000, 0x00000000,
449 };
450
451 /* Packet types for pppoe */
452 static const u32 ice_ptypes_pppoe[] = {
453         0x00000000, 0x00000000, 0x00000000, 0x00000000,
454         0x00000000, 0x00000000, 0x00000000, 0x00000000,
455         0x00000000, 0x03FFF000, 0x00000000, 0x00000000,
456         0x00000000, 0x00000000, 0x00000000, 0x00000000,
457         0x00000000, 0x00000000, 0x00000000, 0x00000000,
458         0x00000000, 0x00000000, 0x00000000, 0x00000000,
459         0x00000000, 0x00000000, 0x00000000, 0x00000000,
460         0x00000000, 0x00000000, 0x00000000, 0x00000000,
461 };
462
463 /* Packet types for packets with PFCP NODE header */
464 static const u32 ice_ptypes_pfcp_node[] = {
465         0x00000000, 0x00000000, 0x00000000, 0x00000000,
466         0x00000000, 0x00000000, 0x00000000, 0x00000000,
467         0x00000000, 0x00000000, 0x80000000, 0x00000002,
468         0x00000000, 0x00000000, 0x00000000, 0x00000000,
469         0x00000000, 0x00000000, 0x00000000, 0x00000000,
470         0x00000000, 0x00000000, 0x00000000, 0x00000000,
471         0x00000000, 0x00000000, 0x00000000, 0x00000000,
472         0x00000000, 0x00000000, 0x00000000, 0x00000000,
473 };
474
475 /* Packet types for packets with PFCP SESSION header */
476 static const u32 ice_ptypes_pfcp_session[] = {
477         0x00000000, 0x00000000, 0x00000000, 0x00000000,
478         0x00000000, 0x00000000, 0x00000000, 0x00000000,
479         0x00000000, 0x00000000, 0x00000000, 0x00000005,
480         0x00000000, 0x00000000, 0x00000000, 0x00000000,
481         0x00000000, 0x00000000, 0x00000000, 0x00000000,
482         0x00000000, 0x00000000, 0x00000000, 0x00000000,
483         0x00000000, 0x00000000, 0x00000000, 0x00000000,
484         0x00000000, 0x00000000, 0x00000000, 0x00000000,
485 };
486
487 /* Packet types for l2tpv3 */
488 static const u32 ice_ptypes_l2tpv3[] = {
489         0x00000000, 0x00000000, 0x00000000, 0x00000000,
490         0x00000000, 0x00000000, 0x00000000, 0x00000000,
491         0x00000000, 0x00000000, 0x00000000, 0x00000300,
492         0x00000000, 0x00000000, 0x00000000, 0x00000000,
493         0x00000000, 0x00000000, 0x00000000, 0x00000000,
494         0x00000000, 0x00000000, 0x00000000, 0x00000000,
495         0x00000000, 0x00000000, 0x00000000, 0x00000000,
496         0x00000000, 0x00000000, 0x00000000, 0x00000000,
497 };
498
499 /* Packet types for esp */
500 static const u32 ice_ptypes_esp[] = {
501         0x00000000, 0x00000000, 0x00000000, 0x00000000,
502         0x00000000, 0x00000003, 0x00000000, 0x00000000,
503         0x00000000, 0x00000000, 0x00000000, 0x00000000,
504         0x00000000, 0x00000000, 0x00000000, 0x00000000,
505         0x00000000, 0x00000000, 0x00000000, 0x00000000,
506         0x00000000, 0x00000000, 0x00000000, 0x00000000,
507         0x00000000, 0x00000000, 0x00000000, 0x00000000,
508         0x00000000, 0x00000000, 0x00000000, 0x00000000,
509 };
510
511 /* Packet types for ah */
512 static const u32 ice_ptypes_ah[] = {
513         0x00000000, 0x00000000, 0x00000000, 0x00000000,
514         0x00000000, 0x0000000C, 0x00000000, 0x00000000,
515         0x00000000, 0x00000000, 0x00000000, 0x00000000,
516         0x00000000, 0x00000000, 0x00000000, 0x00000000,
517         0x00000000, 0x00000000, 0x00000000, 0x00000000,
518         0x00000000, 0x00000000, 0x00000000, 0x00000000,
519         0x00000000, 0x00000000, 0x00000000, 0x00000000,
520         0x00000000, 0x00000000, 0x00000000, 0x00000000,
521 };
522
523 /* Packet types for packets with NAT_T ESP header */
524 static const u32 ice_ptypes_nat_t_esp[] = {
525         0x00000000, 0x00000000, 0x00000000, 0x00000000,
526         0x00000000, 0x00000030, 0x00000000, 0x00000000,
527         0x00000000, 0x00000000, 0x00000000, 0x00000000,
528         0x00000000, 0x00000000, 0x00000000, 0x00000000,
529         0x00000000, 0x00000000, 0x00000000, 0x00000000,
530         0x00000000, 0x00000000, 0x00000000, 0x00000000,
531         0x00000000, 0x00000000, 0x00000000, 0x00000000,
532         0x00000000, 0x00000000, 0x00000000, 0x00000000,
533 };
534
535 /* Manage parameters and info. used during the creation of a flow profile */
536 struct ice_flow_prof_params {
537         enum ice_block blk;
538         u16 entry_length; /* # of bytes formatted entry will require */
539         u8 es_cnt;
540         struct ice_flow_prof *prof;
541
542         /* For ACL, the es[0] will have the data of ICE_RX_MDID_PKT_FLAGS_15_0
543          * This will give us the direction flags.
544          */
545         struct ice_fv_word es[ICE_MAX_FV_WORDS];
546         /* attributes can be used to add attributes to a particular PTYPE */
547         const struct ice_ptype_attributes *attr;
548         u16 attr_cnt;
549
550         u16 mask[ICE_MAX_FV_WORDS];
551         ice_declare_bitmap(ptypes, ICE_FLOW_PTYPE_MAX);
552 };
553
554 #define ICE_FLOW_RSS_HDRS_INNER_MASK \
555         (ICE_FLOW_SEG_HDR_PPPOE | ICE_FLOW_SEG_HDR_GTPC | \
556         ICE_FLOW_SEG_HDR_GTPC_TEID | ICE_FLOW_SEG_HDR_GTPU | \
557         ICE_FLOW_SEG_HDR_PFCP_SESSION | ICE_FLOW_SEG_HDR_L2TPV3 | \
558         ICE_FLOW_SEG_HDR_ESP | ICE_FLOW_SEG_HDR_AH | \
559         ICE_FLOW_SEG_HDR_NAT_T_ESP)
560
561 #define ICE_FLOW_SEG_HDRS_L2_MASK       \
562         (ICE_FLOW_SEG_HDR_ETH | ICE_FLOW_SEG_HDR_VLAN)
563 #define ICE_FLOW_SEG_HDRS_L3_MASK       \
564         (ICE_FLOW_SEG_HDR_IPV4 | ICE_FLOW_SEG_HDR_IPV6 | \
565          ICE_FLOW_SEG_HDR_ARP)
566 #define ICE_FLOW_SEG_HDRS_L4_MASK       \
567         (ICE_FLOW_SEG_HDR_ICMP | ICE_FLOW_SEG_HDR_TCP | ICE_FLOW_SEG_HDR_UDP | \
568          ICE_FLOW_SEG_HDR_SCTP)
569
570 /**
571  * ice_flow_val_hdrs - validates packet segments for valid protocol headers
572  * @segs: array of one or more packet segments that describe the flow
573  * @segs_cnt: number of packet segments provided
574  */
575 static enum ice_status
576 ice_flow_val_hdrs(struct ice_flow_seg_info *segs, u8 segs_cnt)
577 {
578         u8 i;
579
580         for (i = 0; i < segs_cnt; i++) {
581                 /* Multiple L3 headers */
582                 if (segs[i].hdrs & ICE_FLOW_SEG_HDRS_L3_MASK &&
583                     !ice_is_pow2(segs[i].hdrs & ICE_FLOW_SEG_HDRS_L3_MASK))
584                         return ICE_ERR_PARAM;
585
586                 /* Multiple L4 headers */
587                 if (segs[i].hdrs & ICE_FLOW_SEG_HDRS_L4_MASK &&
588                     !ice_is_pow2(segs[i].hdrs & ICE_FLOW_SEG_HDRS_L4_MASK))
589                         return ICE_ERR_PARAM;
590         }
591
592         return ICE_SUCCESS;
593 }
594
595 /* Sizes of fixed known protocol headers without header options */
596 #define ICE_FLOW_PROT_HDR_SZ_MAC        14
597 #define ICE_FLOW_PROT_HDR_SZ_MAC_VLAN   (ICE_FLOW_PROT_HDR_SZ_MAC + 2)
598 #define ICE_FLOW_PROT_HDR_SZ_IPV4       20
599 #define ICE_FLOW_PROT_HDR_SZ_IPV6       40
600 #define ICE_FLOW_PROT_HDR_SZ_ARP        28
601 #define ICE_FLOW_PROT_HDR_SZ_ICMP       8
602 #define ICE_FLOW_PROT_HDR_SZ_TCP        20
603 #define ICE_FLOW_PROT_HDR_SZ_UDP        8
604 #define ICE_FLOW_PROT_HDR_SZ_SCTP       12
605
606 /**
607  * ice_flow_calc_seg_sz - calculates size of a packet segment based on headers
608  * @params: information about the flow to be processed
609  * @seg: index of packet segment whose header size is to be determined
610  */
611 static u16 ice_flow_calc_seg_sz(struct ice_flow_prof_params *params, u8 seg)
612 {
613         u16 sz;
614
615         /* L2 headers */
616         sz = (params->prof->segs[seg].hdrs & ICE_FLOW_SEG_HDR_VLAN) ?
617                 ICE_FLOW_PROT_HDR_SZ_MAC_VLAN : ICE_FLOW_PROT_HDR_SZ_MAC;
618
619         /* L3 headers */
620         if (params->prof->segs[seg].hdrs & ICE_FLOW_SEG_HDR_IPV4)
621                 sz += ICE_FLOW_PROT_HDR_SZ_IPV4;
622         else if (params->prof->segs[seg].hdrs & ICE_FLOW_SEG_HDR_IPV6)
623                 sz += ICE_FLOW_PROT_HDR_SZ_IPV6;
624         else if (params->prof->segs[seg].hdrs & ICE_FLOW_SEG_HDR_ARP)
625                 sz += ICE_FLOW_PROT_HDR_SZ_ARP;
626         else if (params->prof->segs[seg].hdrs & ICE_FLOW_SEG_HDRS_L4_MASK)
627                 /* A L3 header is required if L4 is specified */
628                 return 0;
629
630         /* L4 headers */
631         if (params->prof->segs[seg].hdrs & ICE_FLOW_SEG_HDR_ICMP)
632                 sz += ICE_FLOW_PROT_HDR_SZ_ICMP;
633         else if (params->prof->segs[seg].hdrs & ICE_FLOW_SEG_HDR_TCP)
634                 sz += ICE_FLOW_PROT_HDR_SZ_TCP;
635         else if (params->prof->segs[seg].hdrs & ICE_FLOW_SEG_HDR_UDP)
636                 sz += ICE_FLOW_PROT_HDR_SZ_UDP;
637         else if (params->prof->segs[seg].hdrs & ICE_FLOW_SEG_HDR_SCTP)
638                 sz += ICE_FLOW_PROT_HDR_SZ_SCTP;
639
640         return sz;
641 }
642
643 /**
644  * ice_flow_proc_seg_hdrs - process protocol headers present in pkt segments
645  * @params: information about the flow to be processed
646  *
647  * This function identifies the packet types associated with the protocol
648  * headers being present in packet segments of the specified flow profile.
649  */
650 static enum ice_status
651 ice_flow_proc_seg_hdrs(struct ice_flow_prof_params *params)
652 {
653         struct ice_flow_prof *prof;
654         u8 i;
655
656         ice_memset(params->ptypes, 0xff, sizeof(params->ptypes),
657                    ICE_NONDMA_MEM);
658
659         prof = params->prof;
660
661         for (i = 0; i < params->prof->segs_cnt; i++) {
662                 const ice_bitmap_t *src;
663                 u32 hdrs;
664
665                 hdrs = prof->segs[i].hdrs;
666
667                 if (hdrs & ICE_FLOW_SEG_HDR_ETH) {
668                         src = !i ? (const ice_bitmap_t *)ice_ptypes_mac_ofos :
669                                 (const ice_bitmap_t *)ice_ptypes_mac_il;
670                         ice_and_bitmap(params->ptypes, params->ptypes, src,
671                                        ICE_FLOW_PTYPE_MAX);
672                 }
673
674                 if (i && hdrs & ICE_FLOW_SEG_HDR_VLAN) {
675                         src = (const ice_bitmap_t *)ice_ptypes_macvlan_il;
676                         ice_and_bitmap(params->ptypes, params->ptypes, src,
677                                        ICE_FLOW_PTYPE_MAX);
678                 }
679
680                 if (!i && hdrs & ICE_FLOW_SEG_HDR_ARP) {
681                         ice_and_bitmap(params->ptypes, params->ptypes,
682                                        (const ice_bitmap_t *)ice_ptypes_arp_of,
683                                        ICE_FLOW_PTYPE_MAX);
684                 }
685
686                 if (hdrs & ICE_FLOW_SEG_HDR_PPPOE) {
687                         src = (const ice_bitmap_t *)ice_ptypes_pppoe;
688                         ice_and_bitmap(params->ptypes, params->ptypes, src,
689                                        ICE_FLOW_PTYPE_MAX);
690                 }
691
692                 if (hdrs & ICE_FLOW_SEG_HDR_IPV4) {
693                         src = !i ? (const ice_bitmap_t *)ice_ptypes_ipv4_ofos :
694                                 (const ice_bitmap_t *)ice_ptypes_ipv4_il;
695                         ice_and_bitmap(params->ptypes, params->ptypes, src,
696                                        ICE_FLOW_PTYPE_MAX);
697                 } else if (hdrs & ICE_FLOW_SEG_HDR_IPV6) {
698                         src = !i ? (const ice_bitmap_t *)ice_ptypes_ipv6_ofos :
699                                 (const ice_bitmap_t *)ice_ptypes_ipv6_il;
700                         ice_and_bitmap(params->ptypes, params->ptypes, src,
701                                        ICE_FLOW_PTYPE_MAX);
702                 }
703
704                 if (hdrs & ICE_FLOW_SEG_HDR_ICMP) {
705                         src = !i ? (const ice_bitmap_t *)ice_ptypes_icmp_of :
706                                 (const ice_bitmap_t *)ice_ptypes_icmp_il;
707                         ice_and_bitmap(params->ptypes, params->ptypes, src,
708                                        ICE_FLOW_PTYPE_MAX);
709                 } else if (hdrs & ICE_FLOW_SEG_HDR_UDP) {
710                         src = (const ice_bitmap_t *)ice_ptypes_udp_il;
711                         ice_and_bitmap(params->ptypes, params->ptypes, src,
712                                        ICE_FLOW_PTYPE_MAX);
713                 } else if (hdrs & ICE_FLOW_SEG_HDR_TCP) {
714                         ice_and_bitmap(params->ptypes, params->ptypes,
715                                        (const ice_bitmap_t *)ice_ptypes_tcp_il,
716                                        ICE_FLOW_PTYPE_MAX);
717                 } else if (hdrs & ICE_FLOW_SEG_HDR_SCTP) {
718                         src = (const ice_bitmap_t *)ice_ptypes_sctp_il;
719                         ice_and_bitmap(params->ptypes, params->ptypes, src,
720                                        ICE_FLOW_PTYPE_MAX);
721                 } else if (hdrs & ICE_FLOW_SEG_HDR_GRE) {
722                         if (!i) {
723                                 src = (const ice_bitmap_t *)ice_ptypes_gre_of;
724                                 ice_and_bitmap(params->ptypes, params->ptypes,
725                                                src, ICE_FLOW_PTYPE_MAX);
726                         }
727                 } else if (hdrs & ICE_FLOW_SEG_HDR_GTPC) {
728                         src = (const ice_bitmap_t *)ice_ptypes_gtpc;
729                         ice_and_bitmap(params->ptypes, params->ptypes,
730                                        src, ICE_FLOW_PTYPE_MAX);
731                 } else if (hdrs & ICE_FLOW_SEG_HDR_GTPC_TEID) {
732                         src = (const ice_bitmap_t *)ice_ptypes_gtpc_tid;
733                         ice_and_bitmap(params->ptypes, params->ptypes,
734                                        src, ICE_FLOW_PTYPE_MAX);
735                 } else if (hdrs & ICE_FLOW_SEG_HDR_GTPU_DWN) {
736                         src = (const ice_bitmap_t *)ice_ptypes_gtpu;
737                         ice_and_bitmap(params->ptypes, params->ptypes,
738                                        src, ICE_FLOW_PTYPE_MAX);
739
740                         /* Attributes for GTP packet with downlink */
741                         params->attr = ice_attr_gtpu_down;
742                         params->attr_cnt = ARRAY_SIZE(ice_attr_gtpu_down);
743                 } else if (hdrs & ICE_FLOW_SEG_HDR_GTPU_UP) {
744                         src = (const ice_bitmap_t *)ice_ptypes_gtpu;
745                         ice_and_bitmap(params->ptypes, params->ptypes,
746                                        src, ICE_FLOW_PTYPE_MAX);
747
748                         /* Attributes for GTP packet with uplink */
749                         params->attr = ice_attr_gtpu_up;
750                         params->attr_cnt = ARRAY_SIZE(ice_attr_gtpu_up);
751                 } else if (hdrs & ICE_FLOW_SEG_HDR_GTPU_EH) {
752                         src = (const ice_bitmap_t *)ice_ptypes_gtpu;
753                         ice_and_bitmap(params->ptypes, params->ptypes,
754                                        src, ICE_FLOW_PTYPE_MAX);
755
756                         /* Attributes for GTP packet with Extension Header */
757                         params->attr = ice_attr_gtpu_eh;
758                         params->attr_cnt = ARRAY_SIZE(ice_attr_gtpu_eh);
759                 } else if (hdrs & ICE_FLOW_SEG_HDR_GTPU_IP) {
760                         src = (const ice_bitmap_t *)ice_ptypes_gtpu;
761                         ice_and_bitmap(params->ptypes, params->ptypes,
762                                        src, ICE_FLOW_PTYPE_MAX);
763                 } else if (hdrs & ICE_FLOW_SEG_HDR_L2TPV3) {
764                         src = (const ice_bitmap_t *)ice_ptypes_l2tpv3;
765                         ice_and_bitmap(params->ptypes, params->ptypes,
766                                        src, ICE_FLOW_PTYPE_MAX);
767                 } else if (hdrs & ICE_FLOW_SEG_HDR_ESP) {
768                         src = (const ice_bitmap_t *)ice_ptypes_esp;
769                         ice_and_bitmap(params->ptypes, params->ptypes,
770                                        src, ICE_FLOW_PTYPE_MAX);
771                 } else if (hdrs & ICE_FLOW_SEG_HDR_AH) {
772                         src = (const ice_bitmap_t *)ice_ptypes_ah;
773                         ice_and_bitmap(params->ptypes, params->ptypes,
774                                        src, ICE_FLOW_PTYPE_MAX);
775                 } else if (hdrs & ICE_FLOW_SEG_HDR_NAT_T_ESP) {
776                         src = (const ice_bitmap_t *)ice_ptypes_nat_t_esp;
777                         ice_and_bitmap(params->ptypes, params->ptypes,
778                                        src, ICE_FLOW_PTYPE_MAX);
779                 }
780
781                 if (hdrs & ICE_FLOW_SEG_HDR_PFCP) {
782                         if (hdrs & ICE_FLOW_SEG_HDR_PFCP_NODE)
783                                 src =
784                                 (const ice_bitmap_t *)ice_ptypes_pfcp_node;
785                         else
786                                 src =
787                                 (const ice_bitmap_t *)ice_ptypes_pfcp_session;
788
789                         ice_and_bitmap(params->ptypes, params->ptypes,
790                                        src, ICE_FLOW_PTYPE_MAX);
791                 } else {
792                         src = (const ice_bitmap_t *)ice_ptypes_pfcp_node;
793                         ice_andnot_bitmap(params->ptypes, params->ptypes,
794                                           src, ICE_FLOW_PTYPE_MAX);
795
796                         src = (const ice_bitmap_t *)ice_ptypes_pfcp_session;
797                         ice_andnot_bitmap(params->ptypes, params->ptypes,
798                                           src, ICE_FLOW_PTYPE_MAX);
799                 }
800         }
801
802         return ICE_SUCCESS;
803 }
804
805 /**
806  * ice_flow_xtract_pkt_flags - Create an extr sequence entry for packet flags
807  * @hw: pointer to the HW struct
808  * @params: information about the flow to be processed
809  * @flags: The value of pkt_flags[x:x] in Rx/Tx MDID metadata.
810  *
811  * This function will allocate an extraction sequence entries for a DWORD size
812  * chunk of the packet flags.
813  */
814 static enum ice_status
815 ice_flow_xtract_pkt_flags(struct ice_hw *hw,
816                           struct ice_flow_prof_params *params,
817                           enum ice_flex_mdid_pkt_flags flags)
818 {
819         u8 fv_words = hw->blk[params->blk].es.fvw;
820         u8 idx;
821
822         /* Make sure the number of extraction sequence entries required does not
823          * exceed the block's capacity.
824          */
825         if (params->es_cnt >= fv_words)
826                 return ICE_ERR_MAX_LIMIT;
827
828         /* some blocks require a reversed field vector layout */
829         if (hw->blk[params->blk].es.reverse)
830                 idx = fv_words - params->es_cnt - 1;
831         else
832                 idx = params->es_cnt;
833
834         params->es[idx].prot_id = ICE_PROT_META_ID;
835         params->es[idx].off = flags;
836         params->es_cnt++;
837
838         return ICE_SUCCESS;
839 }
840
841 /**
842  * ice_flow_xtract_fld - Create an extraction sequence entry for the given field
843  * @hw: pointer to the HW struct
844  * @params: information about the flow to be processed
845  * @seg: packet segment index of the field to be extracted
846  * @fld: ID of field to be extracted
847  * @match: bitfield of all fields
848  *
849  * This function determines the protocol ID, offset, and size of the given
850  * field. It then allocates one or more extraction sequence entries for the
851  * given field, and fill the entries with protocol ID and offset information.
852  */
853 static enum ice_status
854 ice_flow_xtract_fld(struct ice_hw *hw, struct ice_flow_prof_params *params,
855                     u8 seg, enum ice_flow_field fld, u64 match)
856 {
857         enum ice_flow_field sib = ICE_FLOW_FIELD_IDX_MAX;
858         enum ice_prot_id prot_id = ICE_PROT_ID_INVAL;
859         u8 fv_words = hw->blk[params->blk].es.fvw;
860         struct ice_flow_fld_info *flds;
861         u16 cnt, ese_bits, i;
862         u16 sib_mask = 0;
863         s16 adj = 0;
864         u16 mask;
865         u16 off;
866
867         flds = params->prof->segs[seg].fields;
868
869         switch (fld) {
870         case ICE_FLOW_FIELD_IDX_ETH_DA:
871         case ICE_FLOW_FIELD_IDX_ETH_SA:
872         case ICE_FLOW_FIELD_IDX_S_VLAN:
873         case ICE_FLOW_FIELD_IDX_C_VLAN:
874                 prot_id = seg == 0 ? ICE_PROT_MAC_OF_OR_S : ICE_PROT_MAC_IL;
875                 break;
876         case ICE_FLOW_FIELD_IDX_ETH_TYPE:
877                 prot_id = seg == 0 ? ICE_PROT_ETYPE_OL : ICE_PROT_ETYPE_IL;
878                 break;
879         case ICE_FLOW_FIELD_IDX_IPV4_DSCP:
880                 prot_id = seg == 0 ? ICE_PROT_IPV4_OF_OR_S : ICE_PROT_IPV4_IL;
881                 break;
882         case ICE_FLOW_FIELD_IDX_IPV6_DSCP:
883                 prot_id = seg == 0 ? ICE_PROT_IPV6_OF_OR_S : ICE_PROT_IPV6_IL;
884                 break;
885         case ICE_FLOW_FIELD_IDX_IPV4_TTL:
886         case ICE_FLOW_FIELD_IDX_IPV4_PROT:
887                 prot_id = seg == 0 ? ICE_PROT_IPV4_OF_OR_S : ICE_PROT_IPV4_IL;
888
889                 /* TTL and PROT share the same extraction seq. entry.
890                  * Each is considered a sibling to the other in terms of sharing
891                  * the same extraction sequence entry.
892                  */
893                 if (fld == ICE_FLOW_FIELD_IDX_IPV4_TTL)
894                         sib = ICE_FLOW_FIELD_IDX_IPV4_PROT;
895                 else if (fld == ICE_FLOW_FIELD_IDX_IPV4_PROT)
896                         sib = ICE_FLOW_FIELD_IDX_IPV4_TTL;
897
898                 /* If the sibling field is also included, that field's
899                  * mask needs to be included.
900                  */
901                 if (match & BIT(sib))
902                         sib_mask = ice_flds_info[sib].mask;
903                 break;
904         case ICE_FLOW_FIELD_IDX_IPV6_TTL:
905         case ICE_FLOW_FIELD_IDX_IPV6_PROT:
906                 prot_id = seg == 0 ? ICE_PROT_IPV6_OF_OR_S : ICE_PROT_IPV6_IL;
907
908                 /* TTL and PROT share the same extraction seq. entry.
909                  * Each is considered a sibling to the other in terms of sharing
910                  * the same extraction sequence entry.
911                  */
912                 if (fld == ICE_FLOW_FIELD_IDX_IPV6_TTL)
913                         sib = ICE_FLOW_FIELD_IDX_IPV6_PROT;
914                 else if (fld == ICE_FLOW_FIELD_IDX_IPV6_PROT)
915                         sib = ICE_FLOW_FIELD_IDX_IPV6_TTL;
916
917                 /* If the sibling field is also included, that field's
918                  * mask needs to be included.
919                  */
920                 if (match & BIT(sib))
921                         sib_mask = ice_flds_info[sib].mask;
922                 break;
923         case ICE_FLOW_FIELD_IDX_IPV4_SA:
924         case ICE_FLOW_FIELD_IDX_IPV4_DA:
925                 prot_id = seg == 0 ? ICE_PROT_IPV4_OF_OR_S : ICE_PROT_IPV4_IL;
926                 break;
927         case ICE_FLOW_FIELD_IDX_IPV6_SA:
928         case ICE_FLOW_FIELD_IDX_IPV6_DA:
929                 prot_id = seg == 0 ? ICE_PROT_IPV6_OF_OR_S : ICE_PROT_IPV6_IL;
930                 break;
931         case ICE_FLOW_FIELD_IDX_TCP_SRC_PORT:
932         case ICE_FLOW_FIELD_IDX_TCP_DST_PORT:
933         case ICE_FLOW_FIELD_IDX_TCP_FLAGS:
934                 prot_id = ICE_PROT_TCP_IL;
935                 break;
936         case ICE_FLOW_FIELD_IDX_UDP_SRC_PORT:
937         case ICE_FLOW_FIELD_IDX_UDP_DST_PORT:
938                 prot_id = ICE_PROT_UDP_IL_OR_S;
939                 break;
940         case ICE_FLOW_FIELD_IDX_SCTP_SRC_PORT:
941         case ICE_FLOW_FIELD_IDX_SCTP_DST_PORT:
942                 prot_id = ICE_PROT_SCTP_IL;
943                 break;
944         case ICE_FLOW_FIELD_IDX_GTPC_TEID:
945         case ICE_FLOW_FIELD_IDX_GTPU_IP_TEID:
946         case ICE_FLOW_FIELD_IDX_GTPU_UP_TEID:
947         case ICE_FLOW_FIELD_IDX_GTPU_DWN_TEID:
948         case ICE_FLOW_FIELD_IDX_GTPU_EH_TEID:
949         case ICE_FLOW_FIELD_IDX_GTPU_EH_QFI:
950                 /* GTP is accessed through UDP OF protocol */
951                 prot_id = ICE_PROT_UDP_OF;
952                 break;
953         case ICE_FLOW_FIELD_IDX_PPPOE_SESS_ID:
954                 prot_id = ICE_PROT_PPPOE;
955                 break;
956         case ICE_FLOW_FIELD_IDX_PFCP_SEID:
957                 prot_id = ICE_PROT_UDP_IL_OR_S;
958                 break;
959         case ICE_FLOW_FIELD_IDX_L2TPV3_SESS_ID:
960                 prot_id = ICE_PROT_L2TPV3;
961                 break;
962         case ICE_FLOW_FIELD_IDX_ESP_SPI:
963                 prot_id = ICE_PROT_ESP_F;
964                 break;
965         case ICE_FLOW_FIELD_IDX_AH_SPI:
966                 prot_id = ICE_PROT_ESP_2;
967                 break;
968         case ICE_FLOW_FIELD_IDX_NAT_T_ESP_SPI:
969                 prot_id = ICE_PROT_UDP_IL_OR_S;
970                 break;
971         case ICE_FLOW_FIELD_IDX_ARP_SIP:
972         case ICE_FLOW_FIELD_IDX_ARP_DIP:
973         case ICE_FLOW_FIELD_IDX_ARP_SHA:
974         case ICE_FLOW_FIELD_IDX_ARP_DHA:
975         case ICE_FLOW_FIELD_IDX_ARP_OP:
976                 prot_id = ICE_PROT_ARP_OF;
977                 break;
978         case ICE_FLOW_FIELD_IDX_ICMP_TYPE:
979         case ICE_FLOW_FIELD_IDX_ICMP_CODE:
980                 /* ICMP type and code share the same extraction seq. entry */
981                 prot_id = (params->prof->segs[seg].hdrs &
982                            ICE_FLOW_SEG_HDR_IPV4) ?
983                         ICE_PROT_ICMP_IL : ICE_PROT_ICMPV6_IL;
984                 sib = fld == ICE_FLOW_FIELD_IDX_ICMP_TYPE ?
985                         ICE_FLOW_FIELD_IDX_ICMP_CODE :
986                         ICE_FLOW_FIELD_IDX_ICMP_TYPE;
987                 break;
988         case ICE_FLOW_FIELD_IDX_GRE_KEYID:
989                 prot_id = ICE_PROT_GRE_OF;
990                 break;
991         default:
992                 return ICE_ERR_NOT_IMPL;
993         }
994
995         /* Each extraction sequence entry is a word in size, and extracts a
996          * word-aligned offset from a protocol header.
997          */
998         ese_bits = ICE_FLOW_FV_EXTRACT_SZ * BITS_PER_BYTE;
999
1000         flds[fld].xtrct.prot_id = prot_id;
1001         flds[fld].xtrct.off = (ice_flds_info[fld].off / ese_bits) *
1002                 ICE_FLOW_FV_EXTRACT_SZ;
1003         flds[fld].xtrct.disp = (u8)((ice_flds_info[fld].off + adj) % ese_bits);
1004         flds[fld].xtrct.idx = params->es_cnt;
1005         flds[fld].xtrct.mask = ice_flds_info[fld].mask;
1006
1007         /* Adjust the next field-entry index after accommodating the number of
1008          * entries this field consumes
1009          */
1010         cnt = DIVIDE_AND_ROUND_UP(flds[fld].xtrct.disp +
1011                                   ice_flds_info[fld].size, ese_bits);
1012
1013         /* Fill in the extraction sequence entries needed for this field */
1014         off = flds[fld].xtrct.off;
1015         mask = flds[fld].xtrct.mask;
1016         for (i = 0; i < cnt; i++) {
1017                 /* Only consume an extraction sequence entry if there is no
1018                  * sibling field associated with this field or the sibling entry
1019                  * already extracts the word shared with this field.
1020                  */
1021                 if (sib == ICE_FLOW_FIELD_IDX_MAX ||
1022                     flds[sib].xtrct.prot_id == ICE_PROT_ID_INVAL ||
1023                     flds[sib].xtrct.off != off) {
1024                         u8 idx;
1025
1026                         /* Make sure the number of extraction sequence required
1027                          * does not exceed the block's capability
1028                          */
1029                         if (params->es_cnt >= fv_words)
1030                                 return ICE_ERR_MAX_LIMIT;
1031
1032                         /* some blocks require a reversed field vector layout */
1033                         if (hw->blk[params->blk].es.reverse)
1034                                 idx = fv_words - params->es_cnt - 1;
1035                         else
1036                                 idx = params->es_cnt;
1037
1038                         params->es[idx].prot_id = prot_id;
1039                         params->es[idx].off = off;
1040                         params->mask[idx] = mask | sib_mask;
1041                         params->es_cnt++;
1042                 }
1043
1044                 off += ICE_FLOW_FV_EXTRACT_SZ;
1045         }
1046
1047         return ICE_SUCCESS;
1048 }
1049
1050 /**
1051  * ice_flow_xtract_raws - Create extract sequence entries for raw bytes
1052  * @hw: pointer to the HW struct
1053  * @params: information about the flow to be processed
1054  * @seg: index of packet segment whose raw fields are to be be extracted
1055  */
1056 static enum ice_status
1057 ice_flow_xtract_raws(struct ice_hw *hw, struct ice_flow_prof_params *params,
1058                      u8 seg)
1059 {
1060         u16 fv_words;
1061         u16 hdrs_sz;
1062         u8 i;
1063
1064         if (!params->prof->segs[seg].raws_cnt)
1065                 return ICE_SUCCESS;
1066
1067         if (params->prof->segs[seg].raws_cnt >
1068             ARRAY_SIZE(params->prof->segs[seg].raws))
1069                 return ICE_ERR_MAX_LIMIT;
1070
1071         /* Offsets within the segment headers are not supported */
1072         hdrs_sz = ice_flow_calc_seg_sz(params, seg);
1073         if (!hdrs_sz)
1074                 return ICE_ERR_PARAM;
1075
1076         fv_words = hw->blk[params->blk].es.fvw;
1077
1078         for (i = 0; i < params->prof->segs[seg].raws_cnt; i++) {
1079                 struct ice_flow_seg_fld_raw *raw;
1080                 u16 off, cnt, j;
1081
1082                 raw = &params->prof->segs[seg].raws[i];
1083
1084                 /* Storing extraction information */
1085                 raw->info.xtrct.prot_id = ICE_PROT_MAC_OF_OR_S;
1086                 raw->info.xtrct.off = (raw->off / ICE_FLOW_FV_EXTRACT_SZ) *
1087                         ICE_FLOW_FV_EXTRACT_SZ;
1088                 raw->info.xtrct.disp = (raw->off % ICE_FLOW_FV_EXTRACT_SZ) *
1089                         BITS_PER_BYTE;
1090                 raw->info.xtrct.idx = params->es_cnt;
1091
1092                 /* Determine the number of field vector entries this raw field
1093                  * consumes.
1094                  */
1095                 cnt = DIVIDE_AND_ROUND_UP(raw->info.xtrct.disp +
1096                                           (raw->info.src.last * BITS_PER_BYTE),
1097                                           (ICE_FLOW_FV_EXTRACT_SZ *
1098                                            BITS_PER_BYTE));
1099                 off = raw->info.xtrct.off;
1100                 for (j = 0; j < cnt; j++) {
1101                         u16 idx;
1102
1103                         /* Make sure the number of extraction sequence required
1104                          * does not exceed the block's capability
1105                          */
1106                         if (params->es_cnt >= hw->blk[params->blk].es.count ||
1107                             params->es_cnt >= ICE_MAX_FV_WORDS)
1108                                 return ICE_ERR_MAX_LIMIT;
1109
1110                         /* some blocks require a reversed field vector layout */
1111                         if (hw->blk[params->blk].es.reverse)
1112                                 idx = fv_words - params->es_cnt - 1;
1113                         else
1114                                 idx = params->es_cnt;
1115
1116                         params->es[idx].prot_id = raw->info.xtrct.prot_id;
1117                         params->es[idx].off = off;
1118                         params->es_cnt++;
1119                         off += ICE_FLOW_FV_EXTRACT_SZ;
1120                 }
1121         }
1122
1123         return ICE_SUCCESS;
1124 }
1125
1126 /**
1127  * ice_flow_create_xtrct_seq - Create an extraction sequence for given segments
1128  * @hw: pointer to the HW struct
1129  * @params: information about the flow to be processed
1130  *
1131  * This function iterates through all matched fields in the given segments, and
1132  * creates an extraction sequence for the fields.
1133  */
1134 static enum ice_status
1135 ice_flow_create_xtrct_seq(struct ice_hw *hw,
1136                           struct ice_flow_prof_params *params)
1137 {
1138         enum ice_status status = ICE_SUCCESS;
1139         u8 i;
1140
1141         /* For ACL, we also need to extract the direction bit (Rx,Tx) data from
1142          * packet flags
1143          */
1144         if (params->blk == ICE_BLK_ACL) {
1145                 status = ice_flow_xtract_pkt_flags(hw, params,
1146                                                    ICE_RX_MDID_PKT_FLAGS_15_0);
1147                 if (status)
1148                         return status;
1149         }
1150
1151         for (i = 0; i < params->prof->segs_cnt; i++) {
1152                 u64 match = params->prof->segs[i].match;
1153                 enum ice_flow_field j;
1154
1155                 for (j = 0; j < ICE_FLOW_FIELD_IDX_MAX && match; j++) {
1156                         const u64 bit = BIT_ULL(j);
1157
1158                         if (match & bit) {
1159                                 status = ice_flow_xtract_fld(hw, params, i, j,
1160                                                              match);
1161                                 if (status)
1162                                         return status;
1163                                 match &= ~bit;
1164                         }
1165                 }
1166
1167                 /* Process raw matching bytes */
1168                 status = ice_flow_xtract_raws(hw, params, i);
1169                 if (status)
1170                         return status;
1171         }
1172
1173         return status;
1174 }
1175
1176 /**
1177  * ice_flow_sel_acl_scen - returns the specific scenario
1178  * @hw: pointer to the hardware structure
1179  * @params: information about the flow to be processed
1180  *
1181  * This function will return the specific scenario based on the
1182  * params passed to it
1183  */
1184 static enum ice_status
1185 ice_flow_sel_acl_scen(struct ice_hw *hw, struct ice_flow_prof_params *params)
1186 {
1187         /* Find the best-fit scenario for the provided match width */
1188         struct ice_acl_scen *cand_scen = NULL, *scen;
1189
1190         if (!hw->acl_tbl)
1191                 return ICE_ERR_DOES_NOT_EXIST;
1192
1193         /* Loop through each scenario and match against the scenario width
1194          * to select the specific scenario
1195          */
1196         LIST_FOR_EACH_ENTRY(scen, &hw->acl_tbl->scens, ice_acl_scen, list_entry)
1197                 if (scen->eff_width >= params->entry_length &&
1198                     (!cand_scen || cand_scen->eff_width > scen->eff_width))
1199                         cand_scen = scen;
1200         if (!cand_scen)
1201                 return ICE_ERR_DOES_NOT_EXIST;
1202
1203         params->prof->cfg.scen = cand_scen;
1204
1205         return ICE_SUCCESS;
1206 }
1207
1208 /**
1209  * ice_flow_acl_def_entry_frmt - Determine the layout of flow entries
1210  * @params: information about the flow to be processed
1211  */
1212 static enum ice_status
1213 ice_flow_acl_def_entry_frmt(struct ice_flow_prof_params *params)
1214 {
1215         u16 index, i, range_idx = 0;
1216
1217         index = ICE_AQC_ACL_PROF_BYTE_SEL_START_IDX;
1218
1219         for (i = 0; i < params->prof->segs_cnt; i++) {
1220                 struct ice_flow_seg_info *seg = &params->prof->segs[i];
1221                 u64 match = seg->match;
1222                 u8 j;
1223
1224                 for (j = 0; j < ICE_FLOW_FIELD_IDX_MAX && match; j++) {
1225                         struct ice_flow_fld_info *fld;
1226                         const u64 bit = BIT_ULL(j);
1227
1228                         if (!(match & bit))
1229                                 continue;
1230
1231                         fld = &seg->fields[j];
1232                         fld->entry.mask = ICE_FLOW_FLD_OFF_INVAL;
1233
1234                         if (fld->type == ICE_FLOW_FLD_TYPE_RANGE) {
1235                                 fld->entry.last = ICE_FLOW_FLD_OFF_INVAL;
1236
1237                                 /* Range checking only supported for single
1238                                  * words
1239                                  */
1240                                 if (DIVIDE_AND_ROUND_UP(ice_flds_info[j].size +
1241                                                         fld->xtrct.disp,
1242                                                         BITS_PER_BYTE * 2) > 1)
1243                                         return ICE_ERR_PARAM;
1244
1245                                 /* Ranges must define low and high values */
1246                                 if (fld->src.val == ICE_FLOW_FLD_OFF_INVAL ||
1247                                     fld->src.last == ICE_FLOW_FLD_OFF_INVAL)
1248                                         return ICE_ERR_PARAM;
1249
1250                                 fld->entry.val = range_idx++;
1251                         } else {
1252                                 /* Store adjusted byte-length of field for later
1253                                  * use, taking into account potential
1254                                  * non-byte-aligned displacement
1255                                  */
1256                                 fld->entry.last = DIVIDE_AND_ROUND_UP
1257                                         (ice_flds_info[j].size +
1258                                          (fld->xtrct.disp % BITS_PER_BYTE),
1259                                          BITS_PER_BYTE);
1260                                 fld->entry.val = index;
1261                                 index += fld->entry.last;
1262                         }
1263
1264                         match &= ~bit;
1265                 }
1266
1267                 for (j = 0; j < seg->raws_cnt; j++) {
1268                         struct ice_flow_seg_fld_raw *raw = &seg->raws[j];
1269
1270                         raw->info.entry.mask = ICE_FLOW_FLD_OFF_INVAL;
1271                         raw->info.entry.val = index;
1272                         raw->info.entry.last = raw->info.src.last;
1273                         index += raw->info.entry.last;
1274                 }
1275         }
1276
1277         /* Currently only support using the byte selection base, which only
1278          * allows for an effective entry size of 30 bytes. Reject anything
1279          * larger.
1280          */
1281         if (index > ICE_AQC_ACL_PROF_BYTE_SEL_ELEMS)
1282                 return ICE_ERR_PARAM;
1283
1284         /* Only 8 range checkers per profile, reject anything trying to use
1285          * more
1286          */
1287         if (range_idx > ICE_AQC_ACL_PROF_RANGES_NUM_CFG)
1288                 return ICE_ERR_PARAM;
1289
1290         /* Store # bytes required for entry for later use */
1291         params->entry_length = index - ICE_AQC_ACL_PROF_BYTE_SEL_START_IDX;
1292
1293         return ICE_SUCCESS;
1294 }
1295
1296 /**
1297  * ice_flow_proc_segs - process all packet segments associated with a profile
1298  * @hw: pointer to the HW struct
1299  * @params: information about the flow to be processed
1300  */
1301 static enum ice_status
1302 ice_flow_proc_segs(struct ice_hw *hw, struct ice_flow_prof_params *params)
1303 {
1304         enum ice_status status;
1305
1306         status = ice_flow_proc_seg_hdrs(params);
1307         if (status)
1308                 return status;
1309
1310         status = ice_flow_create_xtrct_seq(hw, params);
1311         if (status)
1312                 return status;
1313
1314         switch (params->blk) {
1315         case ICE_BLK_FD:
1316         case ICE_BLK_RSS:
1317                 status = ICE_SUCCESS;
1318                 break;
1319         case ICE_BLK_ACL:
1320                 status = ice_flow_acl_def_entry_frmt(params);
1321                 if (status)
1322                         return status;
1323                 status = ice_flow_sel_acl_scen(hw, params);
1324                 if (status)
1325                         return status;
1326                 break;
1327         case ICE_BLK_SW:
1328         default:
1329                 return ICE_ERR_NOT_IMPL;
1330         }
1331
1332         return status;
1333 }
1334
1335 #define ICE_FLOW_FIND_PROF_CHK_FLDS     0x00000001
1336 #define ICE_FLOW_FIND_PROF_CHK_VSI      0x00000002
1337 #define ICE_FLOW_FIND_PROF_NOT_CHK_DIR  0x00000004
1338
1339 /**
1340  * ice_flow_find_prof_conds - Find a profile matching headers and conditions
1341  * @hw: pointer to the HW struct
1342  * @blk: classification stage
1343  * @dir: flow direction
1344  * @segs: array of one or more packet segments that describe the flow
1345  * @segs_cnt: number of packet segments provided
1346  * @vsi_handle: software VSI handle to check VSI (ICE_FLOW_FIND_PROF_CHK_VSI)
1347  * @conds: additional conditions to be checked (ICE_FLOW_FIND_PROF_CHK_*)
1348  */
1349 static struct ice_flow_prof *
1350 ice_flow_find_prof_conds(struct ice_hw *hw, enum ice_block blk,
1351                          enum ice_flow_dir dir, struct ice_flow_seg_info *segs,
1352                          u8 segs_cnt, u16 vsi_handle, u32 conds)
1353 {
1354         struct ice_flow_prof *p, *prof = NULL;
1355
1356         ice_acquire_lock(&hw->fl_profs_locks[blk]);
1357         LIST_FOR_EACH_ENTRY(p, &hw->fl_profs[blk], ice_flow_prof, l_entry) {
1358                 if ((p->dir == dir || conds & ICE_FLOW_FIND_PROF_NOT_CHK_DIR) &&
1359                     segs_cnt && segs_cnt == p->segs_cnt) {
1360                         u8 i;
1361
1362                         /* Check for profile-VSI association if specified */
1363                         if ((conds & ICE_FLOW_FIND_PROF_CHK_VSI) &&
1364                             ice_is_vsi_valid(hw, vsi_handle) &&
1365                             !ice_is_bit_set(p->vsis, vsi_handle))
1366                                 continue;
1367
1368                         /* Protocol headers must be checked. Matched fields are
1369                          * checked if specified.
1370                          */
1371                         for (i = 0; i < segs_cnt; i++)
1372                                 if (segs[i].hdrs != p->segs[i].hdrs ||
1373                                     ((conds & ICE_FLOW_FIND_PROF_CHK_FLDS) &&
1374                                      segs[i].match != p->segs[i].match))
1375                                         break;
1376
1377                         /* A match is found if all segments are matched */
1378                         if (i == segs_cnt) {
1379                                 prof = p;
1380                                 break;
1381                         }
1382                 }
1383         }
1384         ice_release_lock(&hw->fl_profs_locks[blk]);
1385
1386         return prof;
1387 }
1388
1389 /**
1390  * ice_flow_find_prof - Look up a profile matching headers and matched fields
1391  * @hw: pointer to the HW struct
1392  * @blk: classification stage
1393  * @dir: flow direction
1394  * @segs: array of one or more packet segments that describe the flow
1395  * @segs_cnt: number of packet segments provided
1396  */
1397 u64
1398 ice_flow_find_prof(struct ice_hw *hw, enum ice_block blk, enum ice_flow_dir dir,
1399                    struct ice_flow_seg_info *segs, u8 segs_cnt)
1400 {
1401         struct ice_flow_prof *p;
1402
1403         p = ice_flow_find_prof_conds(hw, blk, dir, segs, segs_cnt,
1404                                      ICE_MAX_VSI, ICE_FLOW_FIND_PROF_CHK_FLDS);
1405
1406         return p ? p->id : ICE_FLOW_PROF_ID_INVAL;
1407 }
1408
1409 /**
1410  * ice_flow_find_prof_id - Look up a profile with given profile ID
1411  * @hw: pointer to the HW struct
1412  * @blk: classification stage
1413  * @prof_id: unique ID to identify this flow profile
1414  */
1415 static struct ice_flow_prof *
1416 ice_flow_find_prof_id(struct ice_hw *hw, enum ice_block blk, u64 prof_id)
1417 {
1418         struct ice_flow_prof *p;
1419
1420         LIST_FOR_EACH_ENTRY(p, &hw->fl_profs[blk], ice_flow_prof, l_entry) {
1421                 if (p->id == prof_id)
1422                         return p;
1423         }
1424
1425         return NULL;
1426 }
1427
1428 /**
1429  * ice_dealloc_flow_entry - Deallocate flow entry memory
1430  * @hw: pointer to the HW struct
1431  * @entry: flow entry to be removed
1432  */
1433 static void
1434 ice_dealloc_flow_entry(struct ice_hw *hw, struct ice_flow_entry *entry)
1435 {
1436         if (!entry)
1437                 return;
1438
1439         if (entry->entry)
1440                 ice_free(hw, entry->entry);
1441
1442         if (entry->range_buf) {
1443                 ice_free(hw, entry->range_buf);
1444                 entry->range_buf = NULL;
1445         }
1446
1447         if (entry->acts) {
1448                 ice_free(hw, entry->acts);
1449                 entry->acts = NULL;
1450                 entry->acts_cnt = 0;
1451         }
1452
1453         ice_free(hw, entry);
1454 }
1455
1456 #define ICE_ACL_INVALID_SCEN    0x3f
1457
1458 /**
1459  * ice_flow_acl_is_prof_in_use - Verify if the profile is associated to any pf
1460  * @hw: pointer to the hardware structure
1461  * @prof: pointer to flow profile
1462  * @buf: destination buffer function writes partial xtrct sequence to
1463  *
1464  * returns ICE_SUCCESS if no pf is associated to the given profile
1465  * returns ICE_ERR_IN_USE if at least one pf is associated to the given profile
1466  * returns other error code for real error
1467  */
1468 static enum ice_status
1469 ice_flow_acl_is_prof_in_use(struct ice_hw *hw, struct ice_flow_prof *prof,
1470                             struct ice_aqc_acl_prof_generic_frmt *buf)
1471 {
1472         enum ice_status status;
1473         u8 prof_id = 0;
1474
1475         status = ice_flow_get_hw_prof(hw, ICE_BLK_ACL, prof->id, &prof_id);
1476         if (status)
1477                 return status;
1478
1479         status = ice_query_acl_prof(hw, prof_id, buf, NULL);
1480         if (status)
1481                 return status;
1482
1483         /* If all pf's associated scenarios are all 0 or all
1484          * ICE_ACL_INVALID_SCEN (63) for the given profile then the latter has
1485          * not been configured yet.
1486          */
1487         if (buf->pf_scenario_num[0] == 0 && buf->pf_scenario_num[1] == 0 &&
1488             buf->pf_scenario_num[2] == 0 && buf->pf_scenario_num[3] == 0 &&
1489             buf->pf_scenario_num[4] == 0 && buf->pf_scenario_num[5] == 0 &&
1490             buf->pf_scenario_num[6] == 0 && buf->pf_scenario_num[7] == 0)
1491                 return ICE_SUCCESS;
1492
1493         if (buf->pf_scenario_num[0] == ICE_ACL_INVALID_SCEN &&
1494             buf->pf_scenario_num[1] == ICE_ACL_INVALID_SCEN &&
1495             buf->pf_scenario_num[2] == ICE_ACL_INVALID_SCEN &&
1496             buf->pf_scenario_num[3] == ICE_ACL_INVALID_SCEN &&
1497             buf->pf_scenario_num[4] == ICE_ACL_INVALID_SCEN &&
1498             buf->pf_scenario_num[5] == ICE_ACL_INVALID_SCEN &&
1499             buf->pf_scenario_num[6] == ICE_ACL_INVALID_SCEN &&
1500             buf->pf_scenario_num[7] == ICE_ACL_INVALID_SCEN)
1501                 return ICE_SUCCESS;
1502         else
1503                 return ICE_ERR_IN_USE;
1504 }
1505
1506 /**
1507  * ice_flow_acl_free_act_cntr - Free the acl rule's actions
1508  * @hw: pointer to the hardware structure
1509  * @acts: array of actions to be performed on a match
1510  * @acts_cnt: number of actions
1511  */
1512 static enum ice_status
1513 ice_flow_acl_free_act_cntr(struct ice_hw *hw, struct ice_flow_action *acts,
1514                            u8 acts_cnt)
1515 {
1516         int i;
1517
1518         for (i = 0; i < acts_cnt; i++) {
1519                 if (acts[i].type == ICE_FLOW_ACT_CNTR_PKT ||
1520                     acts[i].type == ICE_FLOW_ACT_CNTR_BYTES ||
1521                     acts[i].type == ICE_FLOW_ACT_CNTR_PKT_BYTES) {
1522                         struct ice_acl_cntrs cntrs;
1523                         enum ice_status status;
1524
1525                         cntrs.bank = 0; /* Only bank0 for the moment */
1526                         cntrs.first_cntr =
1527                                         LE16_TO_CPU(acts[i].data.acl_act.value);
1528                         cntrs.last_cntr =
1529                                         LE16_TO_CPU(acts[i].data.acl_act.value);
1530
1531                         if (acts[i].type == ICE_FLOW_ACT_CNTR_PKT_BYTES)
1532                                 cntrs.type = ICE_AQC_ACL_CNT_TYPE_DUAL;
1533                         else
1534                                 cntrs.type = ICE_AQC_ACL_CNT_TYPE_SINGLE;
1535
1536                         status = ice_aq_dealloc_acl_cntrs(hw, &cntrs, NULL);
1537                         if (status)
1538                                 return status;
1539                 }
1540         }
1541         return ICE_SUCCESS;
1542 }
1543
1544 /**
1545  * ice_flow_acl_disassoc_scen - Disassociate the scenario to the Profile
1546  * @hw: pointer to the hardware structure
1547  * @prof: pointer to flow profile
1548  *
1549  * Disassociate the scenario to the Profile for the PF of the VSI.
1550  */
1551 static enum ice_status
1552 ice_flow_acl_disassoc_scen(struct ice_hw *hw, struct ice_flow_prof *prof)
1553 {
1554         struct ice_aqc_acl_prof_generic_frmt buf;
1555         enum ice_status status = ICE_SUCCESS;
1556         u8 prof_id = 0;
1557
1558         ice_memset(&buf, 0, sizeof(buf), ICE_NONDMA_MEM);
1559
1560         status = ice_flow_get_hw_prof(hw, ICE_BLK_ACL, prof->id, &prof_id);
1561         if (status)
1562                 return status;
1563
1564         status = ice_query_acl_prof(hw, prof_id, &buf, NULL);
1565         if (status)
1566                 return status;
1567
1568         /* Clear scenario for this pf */
1569         buf.pf_scenario_num[hw->pf_id] = ICE_ACL_INVALID_SCEN;
1570         status = ice_prgm_acl_prof_extrt(hw, prof_id, &buf, NULL);
1571
1572         return status;
1573 }
1574
1575 /**
1576  * ice_flow_rem_entry_sync - Remove a flow entry
1577  * @hw: pointer to the HW struct
1578  * @blk: classification stage
1579  * @entry: flow entry to be removed
1580  */
1581 static enum ice_status
1582 ice_flow_rem_entry_sync(struct ice_hw *hw, enum ice_block blk,
1583                         struct ice_flow_entry *entry)
1584 {
1585         if (!entry)
1586                 return ICE_ERR_BAD_PTR;
1587
1588         if (blk == ICE_BLK_ACL) {
1589                 enum ice_status status;
1590
1591                 if (!entry->prof)
1592                         return ICE_ERR_BAD_PTR;
1593
1594                 status = ice_acl_rem_entry(hw, entry->prof->cfg.scen,
1595                                            entry->scen_entry_idx);
1596                 if (status)
1597                         return status;
1598
1599                 /* Checks if we need to release an ACL counter. */
1600                 if (entry->acts_cnt && entry->acts)
1601                         ice_flow_acl_free_act_cntr(hw, entry->acts,
1602                                                    entry->acts_cnt);
1603         }
1604
1605         LIST_DEL(&entry->l_entry);
1606
1607         ice_dealloc_flow_entry(hw, entry);
1608
1609         return ICE_SUCCESS;
1610 }
1611
1612 /**
1613  * ice_flow_add_prof_sync - Add a flow profile for packet segments and fields
1614  * @hw: pointer to the HW struct
1615  * @blk: classification stage
1616  * @dir: flow direction
1617  * @prof_id: unique ID to identify this flow profile
1618  * @segs: array of one or more packet segments that describe the flow
1619  * @segs_cnt: number of packet segments provided
1620  * @acts: array of default actions
1621  * @acts_cnt: number of default actions
1622  * @prof: stores the returned flow profile added
1623  *
1624  * Assumption: the caller has acquired the lock to the profile list
1625  */
1626 static enum ice_status
1627 ice_flow_add_prof_sync(struct ice_hw *hw, enum ice_block blk,
1628                        enum ice_flow_dir dir, u64 prof_id,
1629                        struct ice_flow_seg_info *segs, u8 segs_cnt,
1630                        struct ice_flow_action *acts, u8 acts_cnt,
1631                        struct ice_flow_prof **prof)
1632 {
1633         struct ice_flow_prof_params params;
1634         enum ice_status status;
1635         u8 i;
1636
1637         if (!prof || (acts_cnt && !acts))
1638                 return ICE_ERR_BAD_PTR;
1639
1640         ice_memset(&params, 0, sizeof(params), ICE_NONDMA_MEM);
1641         params.prof = (struct ice_flow_prof *)
1642                 ice_malloc(hw, sizeof(*params.prof));
1643         if (!params.prof)
1644                 return ICE_ERR_NO_MEMORY;
1645
1646         /* initialize extraction sequence to all invalid (0xff) */
1647         for (i = 0; i < ICE_MAX_FV_WORDS; i++) {
1648                 params.es[i].prot_id = ICE_PROT_INVALID;
1649                 params.es[i].off = ICE_FV_OFFSET_INVAL;
1650         }
1651
1652         params.blk = blk;
1653         params.prof->id = prof_id;
1654         params.prof->dir = dir;
1655         params.prof->segs_cnt = segs_cnt;
1656
1657         /* Make a copy of the segments that need to be persistent in the flow
1658          * profile instance
1659          */
1660         for (i = 0; i < segs_cnt; i++)
1661                 ice_memcpy(&params.prof->segs[i], &segs[i], sizeof(*segs),
1662                            ICE_NONDMA_TO_NONDMA);
1663
1664         /* Make a copy of the actions that need to be persistent in the flow
1665          * profile instance.
1666          */
1667         if (acts_cnt) {
1668                 params.prof->acts = (struct ice_flow_action *)
1669                         ice_memdup(hw, acts, acts_cnt * sizeof(*acts),
1670                                    ICE_NONDMA_TO_NONDMA);
1671
1672                 if (!params.prof->acts) {
1673                         status = ICE_ERR_NO_MEMORY;
1674                         goto out;
1675                 }
1676         }
1677
1678         status = ice_flow_proc_segs(hw, &params);
1679         if (status) {
1680                 ice_debug(hw, ICE_DBG_FLOW,
1681                           "Error processing a flow's packet segments\n");
1682                 goto out;
1683         }
1684
1685         /* Add a HW profile for this flow profile */
1686         status = ice_add_prof(hw, blk, prof_id, (u8 *)params.ptypes,
1687                               params.attr, params.attr_cnt, params.es,
1688                               params.mask);
1689         if (status) {
1690                 ice_debug(hw, ICE_DBG_FLOW, "Error adding a HW flow profile\n");
1691                 goto out;
1692         }
1693
1694         INIT_LIST_HEAD(&params.prof->entries);
1695         ice_init_lock(&params.prof->entries_lock);
1696         *prof = params.prof;
1697
1698 out:
1699         if (status) {
1700                 if (params.prof->acts)
1701                         ice_free(hw, params.prof->acts);
1702                 ice_free(hw, params.prof);
1703         }
1704
1705         return status;
1706 }
1707
1708 /**
1709  * ice_flow_rem_prof_sync - remove a flow profile
1710  * @hw: pointer to the hardware structure
1711  * @blk: classification stage
1712  * @prof: pointer to flow profile to remove
1713  *
1714  * Assumption: the caller has acquired the lock to the profile list
1715  */
1716 static enum ice_status
1717 ice_flow_rem_prof_sync(struct ice_hw *hw, enum ice_block blk,
1718                        struct ice_flow_prof *prof)
1719 {
1720         enum ice_status status;
1721
1722         /* Remove all remaining flow entries before removing the flow profile */
1723         if (!LIST_EMPTY(&prof->entries)) {
1724                 struct ice_flow_entry *e, *t;
1725
1726                 ice_acquire_lock(&prof->entries_lock);
1727
1728                 LIST_FOR_EACH_ENTRY_SAFE(e, t, &prof->entries, ice_flow_entry,
1729                                          l_entry) {
1730                         status = ice_flow_rem_entry_sync(hw, blk, e);
1731                         if (status)
1732                                 break;
1733                 }
1734
1735                 ice_release_lock(&prof->entries_lock);
1736         }
1737
1738         if (blk == ICE_BLK_ACL) {
1739                 struct ice_aqc_acl_profile_ranges query_rng_buf;
1740                 struct ice_aqc_acl_prof_generic_frmt buf;
1741                 u8 prof_id = 0;
1742
1743                 /* Deassociate the scenario to the Profile for the PF */
1744                 status = ice_flow_acl_disassoc_scen(hw, prof);
1745                 if (status)
1746                         return status;
1747
1748                 /* Clear the range-checker if the profile ID is no longer
1749                  * used by any PF
1750                  */
1751                 status = ice_flow_acl_is_prof_in_use(hw, prof, &buf);
1752                 if (status && status != ICE_ERR_IN_USE) {
1753                         return status;
1754                 } else if (!status) {
1755                         /* Clear the range-checker value for profile ID */
1756                         ice_memset(&query_rng_buf, 0,
1757                                    sizeof(struct ice_aqc_acl_profile_ranges),
1758                                    ICE_NONDMA_MEM);
1759
1760                         status = ice_flow_get_hw_prof(hw, blk, prof->id,
1761                                                       &prof_id);
1762                         if (status)
1763                                 return status;
1764
1765                         status = ice_prog_acl_prof_ranges(hw, prof_id,
1766                                                           &query_rng_buf, NULL);
1767                         if (status)
1768                                 return status;
1769                 }
1770         }
1771
1772         /* Remove all hardware profiles associated with this flow profile */
1773         status = ice_rem_prof(hw, blk, prof->id);
1774         if (!status) {
1775                 LIST_DEL(&prof->l_entry);
1776                 ice_destroy_lock(&prof->entries_lock);
1777                 if (prof->acts)
1778                         ice_free(hw, prof->acts);
1779                 ice_free(hw, prof);
1780         }
1781
1782         return status;
1783 }
1784
1785 /**
1786  * ice_flow_acl_set_xtrct_seq_fld - Populate xtrct seq for single field
1787  * @buf: Destination buffer function writes partial xtrct sequence to
1788  * @info: Info about field
1789  */
1790 static void
1791 ice_flow_acl_set_xtrct_seq_fld(struct ice_aqc_acl_prof_generic_frmt *buf,
1792                                struct ice_flow_fld_info *info)
1793 {
1794         u16 dst, i;
1795         u8 src;
1796
1797         src = info->xtrct.idx * ICE_FLOW_FV_EXTRACT_SZ +
1798                 info->xtrct.disp / BITS_PER_BYTE;
1799         dst = info->entry.val;
1800         for (i = 0; i < info->entry.last; i++)
1801                 /* HW stores field vector words in LE, convert words back to BE
1802                  * so constructed entries will end up in network order
1803                  */
1804                 buf->byte_selection[dst++] = src++ ^ 1;
1805 }
1806
1807 /**
1808  * ice_flow_acl_set_xtrct_seq - Program ACL extraction sequence
1809  * @hw: pointer to the hardware structure
1810  * @prof: pointer to flow profile
1811  */
1812 static enum ice_status
1813 ice_flow_acl_set_xtrct_seq(struct ice_hw *hw, struct ice_flow_prof *prof)
1814 {
1815         struct ice_aqc_acl_prof_generic_frmt buf;
1816         struct ice_flow_fld_info *info;
1817         enum ice_status status;
1818         u8 prof_id = 0;
1819         u16 i;
1820
1821         ice_memset(&buf, 0, sizeof(buf), ICE_NONDMA_MEM);
1822
1823         status = ice_flow_get_hw_prof(hw, ICE_BLK_ACL, prof->id, &prof_id);
1824         if (status)
1825                 return status;
1826
1827         status = ice_flow_acl_is_prof_in_use(hw, prof, &buf);
1828         if (status && status != ICE_ERR_IN_USE)
1829                 return status;
1830
1831         if (!status) {
1832                 /* Program the profile dependent configuration. This is done
1833                  * only once regardless of the number of PFs using that profile
1834                  */
1835                 ice_memset(&buf, 0, sizeof(buf), ICE_NONDMA_MEM);
1836
1837                 for (i = 0; i < prof->segs_cnt; i++) {
1838                         struct ice_flow_seg_info *seg = &prof->segs[i];
1839                         u64 match = seg->match;
1840                         u16 j;
1841
1842                         for (j = 0; j < ICE_FLOW_FIELD_IDX_MAX && match; j++) {
1843                                 const u64 bit = BIT_ULL(j);
1844
1845                                 if (!(match & bit))
1846                                         continue;
1847
1848                                 info = &seg->fields[j];
1849
1850                                 if (info->type == ICE_FLOW_FLD_TYPE_RANGE)
1851                                         buf.word_selection[info->entry.val] =
1852                                                                 info->xtrct.idx;
1853                                 else
1854                                         ice_flow_acl_set_xtrct_seq_fld(&buf,
1855                                                                        info);
1856
1857                                 match &= ~bit;
1858                         }
1859
1860                         for (j = 0; j < seg->raws_cnt; j++) {
1861                                 info = &seg->raws[j].info;
1862                                 ice_flow_acl_set_xtrct_seq_fld(&buf, info);
1863                         }
1864                 }
1865
1866                 ice_memset(&buf.pf_scenario_num[0], ICE_ACL_INVALID_SCEN,
1867                            ICE_AQC_ACL_PROF_PF_SCEN_NUM_ELEMS,
1868                            ICE_NONDMA_MEM);
1869         }
1870
1871         /* Update the current PF */
1872         buf.pf_scenario_num[hw->pf_id] = (u8)prof->cfg.scen->id;
1873         status = ice_prgm_acl_prof_extrt(hw, prof_id, &buf, NULL);
1874
1875         return status;
1876 }
1877
1878 /**
1879  * ice_flow_assoc_vsig_vsi - associate a VSI with VSIG
1880  * @hw: pointer to the hardware structure
1881  * @blk: classification stage
1882  * @vsi_handle: software VSI handle
1883  * @vsig: target VSI group
1884  *
1885  * Assumption: the caller has already verified that the VSI to
1886  * be added has the same characteristics as the VSIG and will
1887  * thereby have access to all resources added to that VSIG.
1888  */
1889 enum ice_status
1890 ice_flow_assoc_vsig_vsi(struct ice_hw *hw, enum ice_block blk, u16 vsi_handle,
1891                         u16 vsig)
1892 {
1893         enum ice_status status;
1894
1895         if (!ice_is_vsi_valid(hw, vsi_handle) || blk >= ICE_BLK_COUNT)
1896                 return ICE_ERR_PARAM;
1897
1898         ice_acquire_lock(&hw->fl_profs_locks[blk]);
1899         status = ice_add_vsi_flow(hw, blk, ice_get_hw_vsi_num(hw, vsi_handle),
1900                                   vsig);
1901         ice_release_lock(&hw->fl_profs_locks[blk]);
1902
1903         return status;
1904 }
1905
1906 /**
1907  * ice_flow_assoc_prof - associate a VSI with a flow profile
1908  * @hw: pointer to the hardware structure
1909  * @blk: classification stage
1910  * @prof: pointer to flow profile
1911  * @vsi_handle: software VSI handle
1912  *
1913  * Assumption: the caller has acquired the lock to the profile list
1914  * and the software VSI handle has been validated
1915  */
1916 static enum ice_status
1917 ice_flow_assoc_prof(struct ice_hw *hw, enum ice_block blk,
1918                     struct ice_flow_prof *prof, u16 vsi_handle)
1919 {
1920         enum ice_status status = ICE_SUCCESS;
1921
1922         if (!ice_is_bit_set(prof->vsis, vsi_handle)) {
1923                 if (blk == ICE_BLK_ACL) {
1924                         status = ice_flow_acl_set_xtrct_seq(hw, prof);
1925                         if (status)
1926                                 return status;
1927                 }
1928                 status = ice_add_prof_id_flow(hw, blk,
1929                                               ice_get_hw_vsi_num(hw,
1930                                                                  vsi_handle),
1931                                               prof->id);
1932                 if (!status)
1933                         ice_set_bit(vsi_handle, prof->vsis);
1934                 else
1935                         ice_debug(hw, ICE_DBG_FLOW,
1936                                   "HW profile add failed, %d\n",
1937                                   status);
1938         }
1939
1940         return status;
1941 }
1942
1943 /**
1944  * ice_flow_disassoc_prof - disassociate a VSI from a flow profile
1945  * @hw: pointer to the hardware structure
1946  * @blk: classification stage
1947  * @prof: pointer to flow profile
1948  * @vsi_handle: software VSI handle
1949  *
1950  * Assumption: the caller has acquired the lock to the profile list
1951  * and the software VSI handle has been validated
1952  */
1953 static enum ice_status
1954 ice_flow_disassoc_prof(struct ice_hw *hw, enum ice_block blk,
1955                        struct ice_flow_prof *prof, u16 vsi_handle)
1956 {
1957         enum ice_status status = ICE_SUCCESS;
1958
1959         if (ice_is_bit_set(prof->vsis, vsi_handle)) {
1960                 status = ice_rem_prof_id_flow(hw, blk,
1961                                               ice_get_hw_vsi_num(hw,
1962                                                                  vsi_handle),
1963                                               prof->id);
1964                 if (!status)
1965                         ice_clear_bit(vsi_handle, prof->vsis);
1966                 else
1967                         ice_debug(hw, ICE_DBG_FLOW,
1968                                   "HW profile remove failed, %d\n",
1969                                   status);
1970         }
1971
1972         return status;
1973 }
1974
1975 /**
1976  * ice_flow_add_prof - Add a flow profile for packet segments and matched fields
1977  * @hw: pointer to the HW struct
1978  * @blk: classification stage
1979  * @dir: flow direction
1980  * @prof_id: unique ID to identify this flow profile
1981  * @segs: array of one or more packet segments that describe the flow
1982  * @segs_cnt: number of packet segments provided
1983  * @acts: array of default actions
1984  * @acts_cnt: number of default actions
1985  * @prof: stores the returned flow profile added
1986  */
1987 enum ice_status
1988 ice_flow_add_prof(struct ice_hw *hw, enum ice_block blk, enum ice_flow_dir dir,
1989                   u64 prof_id, struct ice_flow_seg_info *segs, u8 segs_cnt,
1990                   struct ice_flow_action *acts, u8 acts_cnt,
1991                   struct ice_flow_prof **prof)
1992 {
1993         enum ice_status status;
1994
1995         if (segs_cnt > ICE_FLOW_SEG_MAX)
1996                 return ICE_ERR_MAX_LIMIT;
1997
1998         if (!segs_cnt)
1999                 return ICE_ERR_PARAM;
2000
2001         if (!segs)
2002                 return ICE_ERR_BAD_PTR;
2003
2004         status = ice_flow_val_hdrs(segs, segs_cnt);
2005         if (status)
2006                 return status;
2007
2008         ice_acquire_lock(&hw->fl_profs_locks[blk]);
2009
2010         status = ice_flow_add_prof_sync(hw, blk, dir, prof_id, segs, segs_cnt,
2011                                         acts, acts_cnt, prof);
2012         if (!status)
2013                 LIST_ADD(&(*prof)->l_entry, &hw->fl_profs[blk]);
2014
2015         ice_release_lock(&hw->fl_profs_locks[blk]);
2016
2017         return status;
2018 }
2019
2020 /**
2021  * ice_flow_rem_prof - Remove a flow profile and all entries associated with it
2022  * @hw: pointer to the HW struct
2023  * @blk: the block for which the flow profile is to be removed
2024  * @prof_id: unique ID of the flow profile to be removed
2025  */
2026 enum ice_status
2027 ice_flow_rem_prof(struct ice_hw *hw, enum ice_block blk, u64 prof_id)
2028 {
2029         struct ice_flow_prof *prof;
2030         enum ice_status status;
2031
2032         ice_acquire_lock(&hw->fl_profs_locks[blk]);
2033
2034         prof = ice_flow_find_prof_id(hw, blk, prof_id);
2035         if (!prof) {
2036                 status = ICE_ERR_DOES_NOT_EXIST;
2037                 goto out;
2038         }
2039
2040         /* prof becomes invalid after the call */
2041         status = ice_flow_rem_prof_sync(hw, blk, prof);
2042
2043 out:
2044         ice_release_lock(&hw->fl_profs_locks[blk]);
2045
2046         return status;
2047 }
2048
2049 /**
2050  * ice_flow_get_hw_prof - return the HW profile for a specific profile ID handle
2051  * @hw: pointer to the HW struct
2052  * @blk: classification stage
2053  * @prof_id: the profile ID handle
2054  * @hw_prof_id: pointer to variable to receive the HW profile ID
2055  */
2056 enum ice_status
2057 ice_flow_get_hw_prof(struct ice_hw *hw, enum ice_block blk, u64 prof_id,
2058                      u8 *hw_prof_id)
2059 {
2060         struct ice_prof_map *map;
2061
2062         map = ice_search_prof_id(hw, blk, prof_id);
2063         if (map) {
2064                 *hw_prof_id = map->prof_id;
2065                 return ICE_SUCCESS;
2066         }
2067
2068         return ICE_ERR_DOES_NOT_EXIST;
2069 }
2070
2071 /**
2072  * ice_flow_find_entry - look for a flow entry using its unique ID
2073  * @hw: pointer to the HW struct
2074  * @blk: classification stage
2075  * @entry_id: unique ID to identify this flow entry
2076  *
2077  * This function looks for the flow entry with the specified unique ID in all
2078  * flow profiles of the specified classification stage. If the entry is found,
2079  * and it returns the handle to the flow entry. Otherwise, it returns
2080  * ICE_FLOW_ENTRY_ID_INVAL.
2081  */
2082 u64 ice_flow_find_entry(struct ice_hw *hw, enum ice_block blk, u64 entry_id)
2083 {
2084         struct ice_flow_entry *found = NULL;
2085         struct ice_flow_prof *p;
2086
2087         ice_acquire_lock(&hw->fl_profs_locks[blk]);
2088
2089         LIST_FOR_EACH_ENTRY(p, &hw->fl_profs[blk], ice_flow_prof, l_entry) {
2090                 struct ice_flow_entry *e;
2091
2092                 ice_acquire_lock(&p->entries_lock);
2093                 LIST_FOR_EACH_ENTRY(e, &p->entries, ice_flow_entry, l_entry)
2094                         if (e->id == entry_id) {
2095                                 found = e;
2096                                 break;
2097                         }
2098                 ice_release_lock(&p->entries_lock);
2099
2100                 if (found)
2101                         break;
2102         }
2103
2104         ice_release_lock(&hw->fl_profs_locks[blk]);
2105
2106         return found ? ICE_FLOW_ENTRY_HNDL(found) : ICE_FLOW_ENTRY_HANDLE_INVAL;
2107 }
2108
2109 /**
2110  * ice_flow_acl_check_actions - Checks the acl rule's actions
2111  * @hw: pointer to the hardware structure
2112  * @acts: array of actions to be performed on a match
2113  * @acts_cnt: number of actions
2114  * @cnt_alloc: indicates if a ACL counter has been allocated.
2115  */
2116 static enum ice_status
2117 ice_flow_acl_check_actions(struct ice_hw *hw, struct ice_flow_action *acts,
2118                            u8 acts_cnt, bool *cnt_alloc)
2119 {
2120         ice_declare_bitmap(dup_check, ICE_AQC_TBL_MAX_ACTION_PAIRS * 2);
2121         int i;
2122
2123         ice_zero_bitmap(dup_check, ICE_AQC_TBL_MAX_ACTION_PAIRS * 2);
2124         *cnt_alloc = false;
2125
2126         if (acts_cnt > ICE_FLOW_ACL_MAX_NUM_ACT)
2127                 return ICE_ERR_OUT_OF_RANGE;
2128
2129         for (i = 0; i < acts_cnt; i++) {
2130                 if (acts[i].type != ICE_FLOW_ACT_NOP &&
2131                     acts[i].type != ICE_FLOW_ACT_DROP &&
2132                     acts[i].type != ICE_FLOW_ACT_CNTR_PKT &&
2133                     acts[i].type != ICE_FLOW_ACT_FWD_QUEUE)
2134                         return ICE_ERR_CFG;
2135
2136                 /* If the caller want to add two actions of the same type, then
2137                  * it is considered invalid configuration.
2138                  */
2139                 if (ice_test_and_set_bit(acts[i].type, dup_check))
2140                         return ICE_ERR_PARAM;
2141         }
2142
2143         /* Checks if ACL counters are needed. */
2144         for (i = 0; i < acts_cnt; i++) {
2145                 if (acts[i].type == ICE_FLOW_ACT_CNTR_PKT ||
2146                     acts[i].type == ICE_FLOW_ACT_CNTR_BYTES ||
2147                     acts[i].type == ICE_FLOW_ACT_CNTR_PKT_BYTES) {
2148                         struct ice_acl_cntrs cntrs;
2149                         enum ice_status status;
2150
2151                         cntrs.amount = 1;
2152                         cntrs.bank = 0; /* Only bank0 for the moment */
2153
2154                         if (acts[i].type == ICE_FLOW_ACT_CNTR_PKT_BYTES)
2155                                 cntrs.type = ICE_AQC_ACL_CNT_TYPE_DUAL;
2156                         else
2157                                 cntrs.type = ICE_AQC_ACL_CNT_TYPE_SINGLE;
2158
2159                         status = ice_aq_alloc_acl_cntrs(hw, &cntrs, NULL);
2160                         if (status)
2161                                 return status;
2162                         /* Counter index within the bank */
2163                         acts[i].data.acl_act.value =
2164                                                 CPU_TO_LE16(cntrs.first_cntr);
2165                         *cnt_alloc = true;
2166                 }
2167         }
2168
2169         return ICE_SUCCESS;
2170 }
2171
2172 /**
2173  * ice_flow_acl_frmt_entry_range - Format an acl range checker for a given field
2174  * @fld: number of the given field
2175  * @info: info about field
2176  * @range_buf: range checker configuration buffer
2177  * @data: pointer to a data buffer containing flow entry's match values/masks
2178  * @range: Input/output param indicating which range checkers are being used
2179  */
2180 static void
2181 ice_flow_acl_frmt_entry_range(u16 fld, struct ice_flow_fld_info *info,
2182                               struct ice_aqc_acl_profile_ranges *range_buf,
2183                               u8 *data, u8 *range)
2184 {
2185         u16 new_mask;
2186
2187         /* If not specified, default mask is all bits in field */
2188         new_mask = (info->src.mask == ICE_FLOW_FLD_OFF_INVAL ?
2189                     BIT(ice_flds_info[fld].size) - 1 :
2190                     (*(u16 *)(data + info->src.mask))) << info->xtrct.disp;
2191
2192         /* If the mask is 0, then we don't need to worry about this input
2193          * range checker value.
2194          */
2195         if (new_mask) {
2196                 u16 new_high =
2197                         (*(u16 *)(data + info->src.last)) << info->xtrct.disp;
2198                 u16 new_low =
2199                         (*(u16 *)(data + info->src.val)) << info->xtrct.disp;
2200                 u8 range_idx = info->entry.val;
2201
2202                 range_buf->checker_cfg[range_idx].low_boundary =
2203                         CPU_TO_BE16(new_low);
2204                 range_buf->checker_cfg[range_idx].high_boundary =
2205                         CPU_TO_BE16(new_high);
2206                 range_buf->checker_cfg[range_idx].mask = CPU_TO_BE16(new_mask);
2207
2208                 /* Indicate which range checker is being used */
2209                 *range |= BIT(range_idx);
2210         }
2211 }
2212
2213 /**
2214  * ice_flow_acl_frmt_entry_fld - Partially format acl entry for a given field
2215  * @fld: number of the given field
2216  * @info: info about the field
2217  * @buf: buffer containing the entry
2218  * @dontcare: buffer containing don't care mask for entry
2219  * @data: pointer to a data buffer containing flow entry's match values/masks
2220  */
2221 static void
2222 ice_flow_acl_frmt_entry_fld(u16 fld, struct ice_flow_fld_info *info, u8 *buf,
2223                             u8 *dontcare, u8 *data)
2224 {
2225         u16 dst, src, mask, k, end_disp, tmp_s = 0, tmp_m = 0;
2226         bool use_mask = false;
2227         u8 disp;
2228
2229         src = info->src.val;
2230         mask = info->src.mask;
2231         dst = info->entry.val - ICE_AQC_ACL_PROF_BYTE_SEL_START_IDX;
2232         disp = info->xtrct.disp % BITS_PER_BYTE;
2233
2234         if (mask != ICE_FLOW_FLD_OFF_INVAL)
2235                 use_mask = true;
2236
2237         for (k = 0; k < info->entry.last; k++, dst++) {
2238                 /* Add overflow bits from previous byte */
2239                 buf[dst] = (tmp_s & 0xff00) >> 8;
2240
2241                 /* If mask is not valid, tmp_m is always zero, so just setting
2242                  * dontcare to 0 (no masked bits). If mask is valid, pulls in
2243                  * overflow bits of mask from prev byte
2244                  */
2245                 dontcare[dst] = (tmp_m & 0xff00) >> 8;
2246
2247                 /* If there is displacement, last byte will only contain
2248                  * displaced data, but there is no more data to read from user
2249                  * buffer, so skip so as not to potentially read beyond end of
2250                  * user buffer
2251                  */
2252                 if (!disp || k < info->entry.last - 1) {
2253                         /* Store shifted data to use in next byte */
2254                         tmp_s = data[src++] << disp;
2255
2256                         /* Add current (shifted) byte */
2257                         buf[dst] |= tmp_s & 0xff;
2258
2259                         /* Handle mask if valid */
2260                         if (use_mask) {
2261                                 tmp_m = (~data[mask++] & 0xff) << disp;
2262                                 dontcare[dst] |= tmp_m & 0xff;
2263                         }
2264                 }
2265         }
2266
2267         /* Fill in don't care bits at beginning of field */
2268         if (disp) {
2269                 dst = info->entry.val - ICE_AQC_ACL_PROF_BYTE_SEL_START_IDX;
2270                 for (k = 0; k < disp; k++)
2271                         dontcare[dst] |= BIT(k);
2272         }
2273
2274         end_disp = (disp + ice_flds_info[fld].size) % BITS_PER_BYTE;
2275
2276         /* Fill in don't care bits at end of field */
2277         if (end_disp) {
2278                 dst = info->entry.val - ICE_AQC_ACL_PROF_BYTE_SEL_START_IDX +
2279                       info->entry.last - 1;
2280                 for (k = end_disp; k < BITS_PER_BYTE; k++)
2281                         dontcare[dst] |= BIT(k);
2282         }
2283 }
2284
2285 /**
2286  * ice_flow_acl_frmt_entry - Format acl entry
2287  * @hw: pointer to the hardware structure
2288  * @prof: pointer to flow profile
2289  * @e: pointer to the flow entry
2290  * @data: pointer to a data buffer containing flow entry's match values/masks
2291  * @acts: array of actions to be performed on a match
2292  * @acts_cnt: number of actions
2293  *
2294  * Formats the key (and key_inverse) to be matched from the data passed in,
2295  * along with data from the flow profile. This key/key_inverse pair makes up
2296  * the 'entry' for an acl flow entry.
2297  */
2298 static enum ice_status
2299 ice_flow_acl_frmt_entry(struct ice_hw *hw, struct ice_flow_prof *prof,
2300                         struct ice_flow_entry *e, u8 *data,
2301                         struct ice_flow_action *acts, u8 acts_cnt)
2302 {
2303         u8 *buf = NULL, *dontcare = NULL, *key = NULL, range = 0, dir_flag_msk;
2304         struct ice_aqc_acl_profile_ranges *range_buf = NULL;
2305         enum ice_status status;
2306         bool cnt_alloc;
2307         u8 prof_id = 0;
2308         u16 i, buf_sz;
2309
2310         status = ice_flow_get_hw_prof(hw, ICE_BLK_ACL, prof->id, &prof_id);
2311         if (status)
2312                 return status;
2313
2314         /* Format the result action */
2315
2316         status = ice_flow_acl_check_actions(hw, acts, acts_cnt, &cnt_alloc);
2317         if (status)
2318                 return status;
2319
2320         status = ICE_ERR_NO_MEMORY;
2321
2322         e->acts = (struct ice_flow_action *)
2323                 ice_memdup(hw, acts, acts_cnt * sizeof(*acts),
2324                            ICE_NONDMA_TO_NONDMA);
2325
2326         if (!e->acts)
2327                 goto out;
2328
2329         e->acts_cnt = acts_cnt;
2330
2331         /* Format the matching data */
2332         buf_sz = prof->cfg.scen->width;
2333         buf = (u8 *)ice_malloc(hw, buf_sz);
2334         if (!buf)
2335                 goto out;
2336
2337         dontcare = (u8 *)ice_malloc(hw, buf_sz);
2338         if (!dontcare)
2339                 goto out;
2340
2341         /* 'key' buffer will store both key and key_inverse, so must be twice
2342          * size of buf
2343          */
2344         key = (u8 *)ice_malloc(hw, buf_sz * 2);
2345         if (!key)
2346                 goto out;
2347
2348         range_buf = (struct ice_aqc_acl_profile_ranges *)
2349                 ice_malloc(hw, sizeof(struct ice_aqc_acl_profile_ranges));
2350         if (!range_buf)
2351                 goto out;
2352
2353         /* Set don't care mask to all 1's to start, will zero out used bytes */
2354         ice_memset(dontcare, 0xff, buf_sz, ICE_NONDMA_MEM);
2355
2356         for (i = 0; i < prof->segs_cnt; i++) {
2357                 struct ice_flow_seg_info *seg = &prof->segs[i];
2358                 u64 match = seg->match;
2359                 u16 j;
2360
2361                 for (j = 0; j < ICE_FLOW_FIELD_IDX_MAX && match; j++) {
2362                         struct ice_flow_fld_info *info;
2363                         const u64 bit = BIT_ULL(j);
2364
2365                         if (!(match & bit))
2366                                 continue;
2367
2368                         info = &seg->fields[j];
2369
2370                         if (info->type == ICE_FLOW_FLD_TYPE_RANGE)
2371                                 ice_flow_acl_frmt_entry_range(j, info,
2372                                                               range_buf, data,
2373                                                               &range);
2374                         else
2375                                 ice_flow_acl_frmt_entry_fld(j, info, buf,
2376                                                             dontcare, data);
2377
2378                         match &= ~bit;
2379                 }
2380
2381                 for (j = 0; j < seg->raws_cnt; j++) {
2382                         struct ice_flow_fld_info *info = &seg->raws[j].info;
2383                         u16 dst, src, mask, k;
2384                         bool use_mask = false;
2385
2386                         src = info->src.val;
2387                         dst = info->entry.val -
2388                                         ICE_AQC_ACL_PROF_BYTE_SEL_START_IDX;
2389                         mask = info->src.mask;
2390
2391                         if (mask != ICE_FLOW_FLD_OFF_INVAL)
2392                                 use_mask = true;
2393
2394                         for (k = 0; k < info->entry.last; k++, dst++) {
2395                                 buf[dst] = data[src++];
2396                                 if (use_mask)
2397                                         dontcare[dst] = ~data[mask++];
2398                                 else
2399                                         dontcare[dst] = 0;
2400                         }
2401                 }
2402         }
2403
2404         buf[prof->cfg.scen->pid_idx] = (u8)prof_id;
2405         dontcare[prof->cfg.scen->pid_idx] = 0;
2406
2407         /* Format the buffer for direction flags */
2408         dir_flag_msk = BIT(ICE_FLG_PKT_DIR);
2409
2410         if (prof->dir == ICE_FLOW_RX)
2411                 buf[prof->cfg.scen->pkt_dir_idx] = dir_flag_msk;
2412
2413         if (range) {
2414                 buf[prof->cfg.scen->rng_chk_idx] = range;
2415                 /* Mark any unused range checkers as don't care */
2416                 dontcare[prof->cfg.scen->rng_chk_idx] = ~range;
2417                 e->range_buf = range_buf;
2418         } else {
2419                 ice_free(hw, range_buf);
2420         }
2421
2422         status = ice_set_key(key, buf_sz * 2, buf, NULL, dontcare, NULL, 0,
2423                              buf_sz);
2424         if (status)
2425                 goto out;
2426
2427         e->entry = key;
2428         e->entry_sz = buf_sz * 2;
2429
2430 out:
2431         if (buf)
2432                 ice_free(hw, buf);
2433
2434         if (dontcare)
2435                 ice_free(hw, dontcare);
2436
2437         if (status && key)
2438                 ice_free(hw, key);
2439
2440         if (status && range_buf) {
2441                 ice_free(hw, range_buf);
2442                 e->range_buf = NULL;
2443         }
2444
2445         if (status && e->acts) {
2446                 ice_free(hw, e->acts);
2447                 e->acts = NULL;
2448                 e->acts_cnt = 0;
2449         }
2450
2451         if (status && cnt_alloc)
2452                 ice_flow_acl_free_act_cntr(hw, acts, acts_cnt);
2453
2454         return status;
2455 }
2456
2457 /**
2458  * ice_flow_acl_find_scen_entry_cond - Find an ACL scenario entry that matches
2459  *                                     the compared data.
2460  * @prof: pointer to flow profile
2461  * @e: pointer to the comparing flow entry
2462  * @do_chg_action: decide if we want to change the ACL action
2463  * @do_add_entry: decide if we want to add the new ACL entry
2464  * @do_rem_entry: decide if we want to remove the current ACL entry
2465  *
2466  * Find an ACL scenario entry that matches the compared data. In the same time,
2467  * this function also figure out:
2468  * a/ If we want to change the ACL action
2469  * b/ If we want to add the new ACL entry
2470  * c/ If we want to remove the current ACL entry
2471  */
2472 static struct ice_flow_entry *
2473 ice_flow_acl_find_scen_entry_cond(struct ice_flow_prof *prof,
2474                                   struct ice_flow_entry *e, bool *do_chg_action,
2475                                   bool *do_add_entry, bool *do_rem_entry)
2476 {
2477         struct ice_flow_entry *p, *return_entry = NULL;
2478         u8 i, j;
2479
2480         /* Check if:
2481          * a/ There exists an entry with same matching data, but different
2482          *    priority, then we remove this existing ACL entry. Then, we
2483          *    will add the new entry to the ACL scenario.
2484          * b/ There exists an entry with same matching data, priority, and
2485          *    result action, then we do nothing
2486          * c/ There exists an entry with same matching data, priority, but
2487          *    different, action, then do only change the action's entry.
2488          * d/ Else, we add this new entry to the ACL scenario.
2489          */
2490         *do_chg_action = false;
2491         *do_add_entry = true;
2492         *do_rem_entry = false;
2493         LIST_FOR_EACH_ENTRY(p, &prof->entries, ice_flow_entry, l_entry) {
2494                 if (memcmp(p->entry, e->entry, p->entry_sz))
2495                         continue;
2496
2497                 /* From this point, we have the same matching_data. */
2498                 *do_add_entry = false;
2499                 return_entry = p;
2500
2501                 if (p->priority != e->priority) {
2502                         /* matching data && !priority */
2503                         *do_add_entry = true;
2504                         *do_rem_entry = true;
2505                         break;
2506                 }
2507
2508                 /* From this point, we will have matching_data && priority */
2509                 if (p->acts_cnt != e->acts_cnt)
2510                         *do_chg_action = true;
2511                 for (i = 0; i < p->acts_cnt; i++) {
2512                         bool found_not_match = false;
2513
2514                         for (j = 0; j < e->acts_cnt; j++)
2515                                 if (memcmp(&p->acts[i], &e->acts[j],
2516                                            sizeof(struct ice_flow_action))) {
2517                                         found_not_match = true;
2518                                         break;
2519                                 }
2520
2521                         if (found_not_match) {
2522                                 *do_chg_action = true;
2523                                 break;
2524                         }
2525                 }
2526
2527                 /* (do_chg_action = true) means :
2528                  *    matching_data && priority && !result_action
2529                  * (do_chg_action = false) means :
2530                  *    matching_data && priority && result_action
2531                  */
2532                 break;
2533         }
2534
2535         return return_entry;
2536 }
2537
2538 /**
2539  * ice_flow_acl_convert_to_acl_prior - Convert to ACL priority
2540  * @p: flow priority
2541  */
2542 static enum ice_acl_entry_prior
2543 ice_flow_acl_convert_to_acl_prior(enum ice_flow_priority p)
2544 {
2545         enum ice_acl_entry_prior acl_prior;
2546
2547         switch (p) {
2548         case ICE_FLOW_PRIO_LOW:
2549                 acl_prior = ICE_LOW;
2550                 break;
2551         case ICE_FLOW_PRIO_NORMAL:
2552                 acl_prior = ICE_NORMAL;
2553                 break;
2554         case ICE_FLOW_PRIO_HIGH:
2555                 acl_prior = ICE_HIGH;
2556                 break;
2557         default:
2558                 acl_prior = ICE_NORMAL;
2559                 break;
2560         }
2561
2562         return acl_prior;
2563 }
2564
2565 /**
2566  * ice_flow_acl_union_rng_chk - Perform union operation between two
2567  *                              range-range checker buffers
2568  * @dst_buf: pointer to destination range checker buffer
2569  * @src_buf: pointer to source range checker buffer
2570  *
2571  * For this function, we do the union between dst_buf and src_buf
2572  * range checker buffer, and we will save the result back to dst_buf
2573  */
2574 static enum ice_status
2575 ice_flow_acl_union_rng_chk(struct ice_aqc_acl_profile_ranges *dst_buf,
2576                            struct ice_aqc_acl_profile_ranges *src_buf)
2577 {
2578         u8 i, j;
2579
2580         if (!dst_buf || !src_buf)
2581                 return ICE_ERR_BAD_PTR;
2582
2583         for (i = 0; i < ICE_AQC_ACL_PROF_RANGES_NUM_CFG; i++) {
2584                 struct ice_acl_rng_data *cfg_data = NULL, *in_data;
2585                 bool will_populate = false;
2586
2587                 in_data = &src_buf->checker_cfg[i];
2588
2589                 if (!in_data->mask)
2590                         break;
2591
2592                 for (j = 0; j < ICE_AQC_ACL_PROF_RANGES_NUM_CFG; j++) {
2593                         cfg_data = &dst_buf->checker_cfg[j];
2594
2595                         if (!cfg_data->mask ||
2596                             !memcmp(cfg_data, in_data,
2597                                     sizeof(struct ice_acl_rng_data))) {
2598                                 will_populate = true;
2599                                 break;
2600                         }
2601                 }
2602
2603                 if (will_populate) {
2604                         ice_memcpy(cfg_data, in_data,
2605                                    sizeof(struct ice_acl_rng_data),
2606                                    ICE_NONDMA_TO_NONDMA);
2607                 } else {
2608                         /* No available slot left to program range checker */
2609                         return ICE_ERR_MAX_LIMIT;
2610                 }
2611         }
2612
2613         return ICE_SUCCESS;
2614 }
2615
2616 /**
2617  * ice_flow_acl_add_scen_entry_sync - Add entry to ACL scenario sync
2618  * @hw: pointer to the hardware structure
2619  * @prof: pointer to flow profile
2620  * @entry: double pointer to the flow entry
2621  *
2622  * For this function, we will look at the current added entries in the
2623  * corresponding ACL scenario. Then, we will perform matching logic to
2624  * see if we want to add/modify/do nothing with this new entry.
2625  */
2626 static enum ice_status
2627 ice_flow_acl_add_scen_entry_sync(struct ice_hw *hw, struct ice_flow_prof *prof,
2628                                  struct ice_flow_entry **entry)
2629 {
2630         bool do_add_entry, do_rem_entry, do_chg_action, do_chg_rng_chk;
2631         struct ice_aqc_acl_profile_ranges query_rng_buf, cfg_rng_buf;
2632         struct ice_acl_act_entry *acts = NULL;
2633         struct ice_flow_entry *exist;
2634         enum ice_status status = ICE_SUCCESS;
2635         struct ice_flow_entry *e;
2636         u8 i;
2637
2638         if (!entry || !(*entry) || !prof)
2639                 return ICE_ERR_BAD_PTR;
2640
2641         e = *(entry);
2642
2643         do_chg_rng_chk = false;
2644         if (e->range_buf) {
2645                 u8 prof_id = 0;
2646
2647                 status = ice_flow_get_hw_prof(hw, ICE_BLK_ACL, prof->id,
2648                                               &prof_id);
2649                 if (status)
2650                         return status;
2651
2652                 /* Query the current range-checker value in FW */
2653                 status = ice_query_acl_prof_ranges(hw, prof_id, &query_rng_buf,
2654                                                    NULL);
2655                 if (status)
2656                         return status;
2657                 ice_memcpy(&cfg_rng_buf, &query_rng_buf,
2658                            sizeof(struct ice_aqc_acl_profile_ranges),
2659                            ICE_NONDMA_TO_NONDMA);
2660
2661                 /* Generate the new range-checker value */
2662                 status = ice_flow_acl_union_rng_chk(&cfg_rng_buf, e->range_buf);
2663                 if (status)
2664                         return status;
2665
2666                 /* Reconfigure the range check if the buffer is changed. */
2667                 do_chg_rng_chk = false;
2668                 if (memcmp(&query_rng_buf, &cfg_rng_buf,
2669                            sizeof(struct ice_aqc_acl_profile_ranges))) {
2670                         status = ice_prog_acl_prof_ranges(hw, prof_id,
2671                                                           &cfg_rng_buf, NULL);
2672                         if (status)
2673                                 return status;
2674
2675                         do_chg_rng_chk = true;
2676                 }
2677         }
2678
2679         /* Figure out if we want to (change the ACL action) and/or
2680          * (Add the new ACL entry) and/or (Remove the current ACL entry)
2681          */
2682         exist = ice_flow_acl_find_scen_entry_cond(prof, e, &do_chg_action,
2683                                                   &do_add_entry, &do_rem_entry);
2684
2685         if (do_rem_entry) {
2686                 status = ice_flow_rem_entry_sync(hw, ICE_BLK_ACL, exist);
2687                 if (status)
2688                         return status;
2689         }
2690
2691         /* Prepare the result action buffer */
2692         acts = (struct ice_acl_act_entry *)ice_calloc
2693                 (hw, e->entry_sz, sizeof(struct ice_acl_act_entry));
2694         for (i = 0; i < e->acts_cnt; i++)
2695                 ice_memcpy(&acts[i], &e->acts[i].data.acl_act,
2696                            sizeof(struct ice_acl_act_entry),
2697                            ICE_NONDMA_TO_NONDMA);
2698
2699         if (do_add_entry) {
2700                 enum ice_acl_entry_prior prior;
2701                 u8 *keys, *inverts;
2702                 u16 entry_idx;
2703
2704                 keys = (u8 *)e->entry;
2705                 inverts = keys + (e->entry_sz / 2);
2706                 prior = ice_flow_acl_convert_to_acl_prior(e->priority);
2707
2708                 status = ice_acl_add_entry(hw, prof->cfg.scen, prior, keys,
2709                                            inverts, acts, e->acts_cnt,
2710                                            &entry_idx);
2711                 if (status)
2712                         goto out;
2713
2714                 e->scen_entry_idx = entry_idx;
2715                 LIST_ADD(&e->l_entry, &prof->entries);
2716         } else {
2717                 if (do_chg_action) {
2718                         /* For the action memory info, update the SW's copy of
2719                          * exist entry with e's action memory info
2720                          */
2721                         ice_free(hw, exist->acts);
2722                         exist->acts_cnt = e->acts_cnt;
2723                         exist->acts = (struct ice_flow_action *)
2724                                 ice_calloc(hw, exist->acts_cnt,
2725                                            sizeof(struct ice_flow_action));
2726
2727                         if (!exist->acts) {
2728                                 status = ICE_ERR_NO_MEMORY;
2729                                 goto out;
2730                         }
2731
2732                         ice_memcpy(exist->acts, e->acts,
2733                                    sizeof(struct ice_flow_action) * e->acts_cnt,
2734                                    ICE_NONDMA_TO_NONDMA);
2735
2736                         status = ice_acl_prog_act(hw, prof->cfg.scen, acts,
2737                                                   e->acts_cnt,
2738                                                   exist->scen_entry_idx);
2739                         if (status)
2740                                 goto out;
2741                 }
2742
2743                 if (do_chg_rng_chk) {
2744                         /* In this case, we want to update the range checker
2745                          * information of the exist entry
2746                          */
2747                         status = ice_flow_acl_union_rng_chk(exist->range_buf,
2748                                                             e->range_buf);
2749                         if (status)
2750                                 goto out;
2751                 }
2752
2753                 /* As we don't add the new entry to our SW DB, deallocate its
2754                  * memories, and return the exist entry to the caller
2755                  */
2756                 ice_dealloc_flow_entry(hw, e);
2757                 *(entry) = exist;
2758         }
2759 out:
2760         if (acts)
2761                 ice_free(hw, acts);
2762
2763         return status;
2764 }
2765
2766 /**
2767  * ice_flow_acl_add_scen_entry - Add entry to ACL scenario
2768  * @hw: pointer to the hardware structure
2769  * @prof: pointer to flow profile
2770  * @e: double pointer to the flow entry
2771  */
2772 static enum ice_status
2773 ice_flow_acl_add_scen_entry(struct ice_hw *hw, struct ice_flow_prof *prof,
2774                             struct ice_flow_entry **e)
2775 {
2776         enum ice_status status;
2777
2778         ice_acquire_lock(&prof->entries_lock);
2779         status = ice_flow_acl_add_scen_entry_sync(hw, prof, e);
2780         ice_release_lock(&prof->entries_lock);
2781
2782         return status;
2783 }
2784
2785 /**
2786  * ice_flow_add_entry - Add a flow entry
2787  * @hw: pointer to the HW struct
2788  * @blk: classification stage
2789  * @prof_id: ID of the profile to add a new flow entry to
2790  * @entry_id: unique ID to identify this flow entry
2791  * @vsi_handle: software VSI handle for the flow entry
2792  * @prio: priority of the flow entry
2793  * @data: pointer to a data buffer containing flow entry's match values/masks
2794  * @acts: arrays of actions to be performed on a match
2795  * @acts_cnt: number of actions
2796  * @entry_h: pointer to buffer that receives the new flow entry's handle
2797  */
2798 enum ice_status
2799 ice_flow_add_entry(struct ice_hw *hw, enum ice_block blk, u64 prof_id,
2800                    u64 entry_id, u16 vsi_handle, enum ice_flow_priority prio,
2801                    void *data, struct ice_flow_action *acts, u8 acts_cnt,
2802                    u64 *entry_h)
2803 {
2804         struct ice_flow_entry *e = NULL;
2805         struct ice_flow_prof *prof;
2806         enum ice_status status = ICE_SUCCESS;
2807
2808         /* ACL entries must indicate an action */
2809         if (blk == ICE_BLK_ACL && (!acts || !acts_cnt))
2810                 return ICE_ERR_PARAM;
2811
2812         /* No flow entry data is expected for RSS */
2813         if (!entry_h || (!data && blk != ICE_BLK_RSS))
2814                 return ICE_ERR_BAD_PTR;
2815
2816         if (!ice_is_vsi_valid(hw, vsi_handle))
2817                 return ICE_ERR_PARAM;
2818
2819         ice_acquire_lock(&hw->fl_profs_locks[blk]);
2820
2821         prof = ice_flow_find_prof_id(hw, blk, prof_id);
2822         if (!prof) {
2823                 status = ICE_ERR_DOES_NOT_EXIST;
2824         } else {
2825                 /* Allocate memory for the entry being added and associate
2826                  * the VSI to the found flow profile
2827                  */
2828                 e = (struct ice_flow_entry *)ice_malloc(hw, sizeof(*e));
2829                 if (!e)
2830                         status = ICE_ERR_NO_MEMORY;
2831                 else
2832                         status = ice_flow_assoc_prof(hw, blk, prof, vsi_handle);
2833         }
2834
2835         ice_release_lock(&hw->fl_profs_locks[blk]);
2836         if (status)
2837                 goto out;
2838
2839         e->id = entry_id;
2840         e->vsi_handle = vsi_handle;
2841         e->prof = prof;
2842         e->priority = prio;
2843
2844         switch (blk) {
2845         case ICE_BLK_FD:
2846         case ICE_BLK_RSS:
2847                 break;
2848         case ICE_BLK_ACL:
2849                 /* ACL will handle the entry management */
2850                 status = ice_flow_acl_frmt_entry(hw, prof, e, (u8 *)data, acts,
2851                                                  acts_cnt);
2852                 if (status)
2853                         goto out;
2854
2855                 status = ice_flow_acl_add_scen_entry(hw, prof, &e);
2856                 if (status)
2857                         goto out;
2858
2859                 break;
2860         case ICE_BLK_SW:
2861         case ICE_BLK_PE:
2862         default:
2863                 status = ICE_ERR_NOT_IMPL;
2864                 goto out;
2865         }
2866
2867         if (blk != ICE_BLK_ACL) {
2868                 /* ACL will handle the entry management */
2869                 ice_acquire_lock(&prof->entries_lock);
2870                 LIST_ADD(&e->l_entry, &prof->entries);
2871                 ice_release_lock(&prof->entries_lock);
2872         }
2873
2874         *entry_h = ICE_FLOW_ENTRY_HNDL(e);
2875
2876 out:
2877         if (status && e) {
2878                 if (e->entry)
2879                         ice_free(hw, e->entry);
2880                 ice_free(hw, e);
2881         }
2882
2883         return status;
2884 }
2885
2886 /**
2887  * ice_flow_rem_entry - Remove a flow entry
2888  * @hw: pointer to the HW struct
2889  * @blk: classification stage
2890  * @entry_h: handle to the flow entry to be removed
2891  */
2892 enum ice_status ice_flow_rem_entry(struct ice_hw *hw, enum ice_block blk,
2893                                    u64 entry_h)
2894 {
2895         struct ice_flow_entry *entry;
2896         struct ice_flow_prof *prof;
2897         enum ice_status status = ICE_SUCCESS;
2898
2899         if (entry_h == ICE_FLOW_ENTRY_HANDLE_INVAL)
2900                 return ICE_ERR_PARAM;
2901
2902         entry = ICE_FLOW_ENTRY_PTR((unsigned long)entry_h);
2903
2904         /* Retain the pointer to the flow profile as the entry will be freed */
2905         prof = entry->prof;
2906
2907         if (prof) {
2908                 ice_acquire_lock(&prof->entries_lock);
2909                 status = ice_flow_rem_entry_sync(hw, blk, entry);
2910                 ice_release_lock(&prof->entries_lock);
2911         }
2912
2913         return status;
2914 }
2915
2916 /**
2917  * ice_flow_set_fld_ext - specifies locations of field from entry's input buffer
2918  * @seg: packet segment the field being set belongs to
2919  * @fld: field to be set
2920  * @field_type: type of the field
2921  * @val_loc: if not ICE_FLOW_FLD_OFF_INVAL, location of the value to match from
2922  *           entry's input buffer
2923  * @mask_loc: if not ICE_FLOW_FLD_OFF_INVAL, location of mask value from entry's
2924  *            input buffer
2925  * @last_loc: if not ICE_FLOW_FLD_OFF_INVAL, location of last/upper value from
2926  *            entry's input buffer
2927  *
2928  * This helper function stores information of a field being matched, including
2929  * the type of the field and the locations of the value to match, the mask, and
2930  * and the upper-bound value in the start of the input buffer for a flow entry.
2931  * This function should only be used for fixed-size data structures.
2932  *
2933  * This function also opportunistically determines the protocol headers to be
2934  * present based on the fields being set. Some fields cannot be used alone to
2935  * determine the protocol headers present. Sometimes, fields for particular
2936  * protocol headers are not matched. In those cases, the protocol headers
2937  * must be explicitly set.
2938  */
2939 static void
2940 ice_flow_set_fld_ext(struct ice_flow_seg_info *seg, enum ice_flow_field fld,
2941                      enum ice_flow_fld_match_type field_type, u16 val_loc,
2942                      u16 mask_loc, u16 last_loc)
2943 {
2944         u64 bit = BIT_ULL(fld);
2945
2946         seg->match |= bit;
2947         if (field_type == ICE_FLOW_FLD_TYPE_RANGE)
2948                 seg->range |= bit;
2949
2950         seg->fields[fld].type = field_type;
2951         seg->fields[fld].src.val = val_loc;
2952         seg->fields[fld].src.mask = mask_loc;
2953         seg->fields[fld].src.last = last_loc;
2954
2955         ICE_FLOW_SET_HDRS(seg, ice_flds_info[fld].hdr);
2956 }
2957
2958 /**
2959  * ice_flow_set_fld - specifies locations of field from entry's input buffer
2960  * @seg: packet segment the field being set belongs to
2961  * @fld: field to be set
2962  * @val_loc: if not ICE_FLOW_FLD_OFF_INVAL, location of the value to match from
2963  *           entry's input buffer
2964  * @mask_loc: if not ICE_FLOW_FLD_OFF_INVAL, location of mask value from entry's
2965  *            input buffer
2966  * @last_loc: if not ICE_FLOW_FLD_OFF_INVAL, location of last/upper value from
2967  *            entry's input buffer
2968  * @range: indicate if field being matched is to be in a range
2969  *
2970  * This function specifies the locations, in the form of byte offsets from the
2971  * start of the input buffer for a flow entry, from where the value to match,
2972  * the mask value, and upper value can be extracted. These locations are then
2973  * stored in the flow profile. When adding a flow entry associated with the
2974  * flow profile, these locations will be used to quickly extract the values and
2975  * create the content of a match entry. This function should only be used for
2976  * fixed-size data structures.
2977  */
2978 void
2979 ice_flow_set_fld(struct ice_flow_seg_info *seg, enum ice_flow_field fld,
2980                  u16 val_loc, u16 mask_loc, u16 last_loc, bool range)
2981 {
2982         enum ice_flow_fld_match_type t = range ?
2983                 ICE_FLOW_FLD_TYPE_RANGE : ICE_FLOW_FLD_TYPE_REG;
2984
2985         ice_flow_set_fld_ext(seg, fld, t, val_loc, mask_loc, last_loc);
2986 }
2987
2988 /**
2989  * ice_flow_set_fld_prefix - sets locations of prefix field from entry's buf
2990  * @seg: packet segment the field being set belongs to
2991  * @fld: field to be set
2992  * @val_loc: if not ICE_FLOW_FLD_OFF_INVAL, location of the value to match from
2993  *           entry's input buffer
2994  * @pref_loc: location of prefix value from entry's input buffer
2995  * @pref_sz: size of the location holding the prefix value
2996  *
2997  * This function specifies the locations, in the form of byte offsets from the
2998  * start of the input buffer for a flow entry, from where the value to match
2999  * and the IPv4 prefix value can be extracted. These locations are then stored
3000  * in the flow profile. When adding flow entries to the associated flow profile,
3001  * these locations can be used to quickly extract the values to create the
3002  * content of a match entry. This function should only be used for fixed-size
3003  * data structures.
3004  */
3005 void
3006 ice_flow_set_fld_prefix(struct ice_flow_seg_info *seg, enum ice_flow_field fld,
3007                         u16 val_loc, u16 pref_loc, u8 pref_sz)
3008 {
3009         /* For this type of field, the "mask" location is for the prefix value's
3010          * location and the "last" location is for the size of the location of
3011          * the prefix value.
3012          */
3013         ice_flow_set_fld_ext(seg, fld, ICE_FLOW_FLD_TYPE_PREFIX, val_loc,
3014                              pref_loc, (u16)pref_sz);
3015 }
3016
3017 /**
3018  * ice_flow_add_fld_raw - sets locations of a raw field from entry's input buf
3019  * @seg: packet segment the field being set belongs to
3020  * @off: offset of the raw field from the beginning of the segment in bytes
3021  * @len: length of the raw pattern to be matched
3022  * @val_loc: location of the value to match from entry's input buffer
3023  * @mask_loc: location of mask value from entry's input buffer
3024  *
3025  * This function specifies the offset of the raw field to be match from the
3026  * beginning of the specified packet segment, and the locations, in the form of
3027  * byte offsets from the start of the input buffer for a flow entry, from where
3028  * the value to match and the mask value to be extracted. These locations are
3029  * then stored in the flow profile. When adding flow entries to the associated
3030  * flow profile, these locations can be used to quickly extract the values to
3031  * create the content of a match entry. This function should only be used for
3032  * fixed-size data structures.
3033  */
3034 void
3035 ice_flow_add_fld_raw(struct ice_flow_seg_info *seg, u16 off, u8 len,
3036                      u16 val_loc, u16 mask_loc)
3037 {
3038         if (seg->raws_cnt < ICE_FLOW_SEG_RAW_FLD_MAX) {
3039                 seg->raws[seg->raws_cnt].off = off;
3040                 seg->raws[seg->raws_cnt].info.type = ICE_FLOW_FLD_TYPE_SIZE;
3041                 seg->raws[seg->raws_cnt].info.src.val = val_loc;
3042                 seg->raws[seg->raws_cnt].info.src.mask = mask_loc;
3043                 /* The "last" field is used to store the length of the field */
3044                 seg->raws[seg->raws_cnt].info.src.last = len;
3045         }
3046
3047         /* Overflows of "raws" will be handled as an error condition later in
3048          * the flow when this information is processed.
3049          */
3050         seg->raws_cnt++;
3051 }
3052
3053 #define ICE_FLOW_RSS_SEG_HDR_L2_MASKS \
3054 (ICE_FLOW_SEG_HDR_ETH | ICE_FLOW_SEG_HDR_VLAN)
3055
3056 #define ICE_FLOW_RSS_SEG_HDR_L3_MASKS \
3057         (ICE_FLOW_SEG_HDR_IPV4 | ICE_FLOW_SEG_HDR_IPV6)
3058
3059 #define ICE_FLOW_RSS_SEG_HDR_L4_MASKS \
3060         (ICE_FLOW_SEG_HDR_TCP | ICE_FLOW_SEG_HDR_UDP | \
3061          ICE_FLOW_SEG_HDR_SCTP)
3062
3063 #define ICE_FLOW_RSS_SEG_HDR_VAL_MASKS \
3064         (ICE_FLOW_RSS_SEG_HDR_L2_MASKS | \
3065          ICE_FLOW_RSS_SEG_HDR_L3_MASKS | \
3066          ICE_FLOW_RSS_SEG_HDR_L4_MASKS)
3067
3068 /**
3069  * ice_flow_set_rss_seg_info - setup packet segments for RSS
3070  * @segs: pointer to the flow field segment(s)
3071  * @hash_fields: fields to be hashed on for the segment(s)
3072  * @flow_hdr: protocol header fields within a packet segment
3073  *
3074  * Helper function to extract fields from hash bitmap and use flow
3075  * header value to set flow field segment for further use in flow
3076  * profile entry or removal.
3077  */
3078 static enum ice_status
3079 ice_flow_set_rss_seg_info(struct ice_flow_seg_info *segs, u64 hash_fields,
3080                           u32 flow_hdr)
3081 {
3082         u64 val = hash_fields;
3083         u8 i;
3084
3085         for (i = 0; val && i < ICE_FLOW_FIELD_IDX_MAX; i++) {
3086                 u64 bit = BIT_ULL(i);
3087
3088                 if (val & bit) {
3089                         ice_flow_set_fld(segs, (enum ice_flow_field)i,
3090                                          ICE_FLOW_FLD_OFF_INVAL,
3091                                          ICE_FLOW_FLD_OFF_INVAL,
3092                                          ICE_FLOW_FLD_OFF_INVAL, false);
3093                         val &= ~bit;
3094                 }
3095         }
3096         ICE_FLOW_SET_HDRS(segs, flow_hdr);
3097
3098         if (segs->hdrs & ~ICE_FLOW_RSS_SEG_HDR_VAL_MASKS &
3099             ~ICE_FLOW_RSS_HDRS_INNER_MASK)
3100                 return ICE_ERR_PARAM;
3101
3102         val = (u64)(segs->hdrs & ICE_FLOW_RSS_SEG_HDR_L3_MASKS);
3103         if (val && !ice_is_pow2(val))
3104                 return ICE_ERR_CFG;
3105
3106         val = (u64)(segs->hdrs & ICE_FLOW_RSS_SEG_HDR_L4_MASKS);
3107         if (val && !ice_is_pow2(val))
3108                 return ICE_ERR_CFG;
3109
3110         return ICE_SUCCESS;
3111 }
3112
3113 /**
3114  * ice_rem_vsi_rss_list - remove VSI from RSS list
3115  * @hw: pointer to the hardware structure
3116  * @vsi_handle: software VSI handle
3117  *
3118  * Remove the VSI from all RSS configurations in the list.
3119  */
3120 void ice_rem_vsi_rss_list(struct ice_hw *hw, u16 vsi_handle)
3121 {
3122         struct ice_rss_cfg *r, *tmp;
3123
3124         if (LIST_EMPTY(&hw->rss_list_head))
3125                 return;
3126
3127         ice_acquire_lock(&hw->rss_locks);
3128         LIST_FOR_EACH_ENTRY_SAFE(r, tmp, &hw->rss_list_head,
3129                                  ice_rss_cfg, l_entry) {
3130                 if (ice_test_and_clear_bit(vsi_handle, r->vsis))
3131                         if (!ice_is_any_bit_set(r->vsis, ICE_MAX_VSI)) {
3132                                 LIST_DEL(&r->l_entry);
3133                                 ice_free(hw, r);
3134                         }
3135         }
3136         ice_release_lock(&hw->rss_locks);
3137 }
3138
3139 /**
3140  * ice_rem_vsi_rss_cfg - remove RSS configurations associated with VSI
3141  * @hw: pointer to the hardware structure
3142  * @vsi_handle: software VSI handle
3143  *
3144  * This function will iterate through all flow profiles and disassociate
3145  * the VSI from that profile. If the flow profile has no VSIs it will
3146  * be removed.
3147  */
3148 enum ice_status ice_rem_vsi_rss_cfg(struct ice_hw *hw, u16 vsi_handle)
3149 {
3150         const enum ice_block blk = ICE_BLK_RSS;
3151         struct ice_flow_prof *p, *t;
3152         enum ice_status status = ICE_SUCCESS;
3153
3154         if (!ice_is_vsi_valid(hw, vsi_handle))
3155                 return ICE_ERR_PARAM;
3156
3157         if (LIST_EMPTY(&hw->fl_profs[blk]))
3158                 return ICE_SUCCESS;
3159
3160         ice_acquire_lock(&hw->fl_profs_locks[blk]);
3161         LIST_FOR_EACH_ENTRY_SAFE(p, t, &hw->fl_profs[blk], ice_flow_prof,
3162                                  l_entry) {
3163                 if (ice_is_bit_set(p->vsis, vsi_handle)) {
3164                         status = ice_flow_disassoc_prof(hw, blk, p, vsi_handle);
3165                         if (status)
3166                                 break;
3167
3168                         if (!ice_is_any_bit_set(p->vsis, ICE_MAX_VSI)) {
3169                                 status = ice_flow_rem_prof_sync(hw, blk, p);
3170                                 if (status)
3171                                         break;
3172                         }
3173                 }
3174         }
3175         ice_release_lock(&hw->fl_profs_locks[blk]);
3176
3177         return status;
3178 }
3179
3180 /**
3181  * ice_rem_rss_list - remove RSS configuration from list
3182  * @hw: pointer to the hardware structure
3183  * @vsi_handle: software VSI handle
3184  * @prof: pointer to flow profile
3185  *
3186  * Assumption: lock has already been acquired for RSS list
3187  */
3188 static void
3189 ice_rem_rss_list(struct ice_hw *hw, u16 vsi_handle, struct ice_flow_prof *prof)
3190 {
3191         struct ice_rss_cfg *r, *tmp;
3192
3193         /* Search for RSS hash fields associated to the VSI that match the
3194          * hash configurations associated to the flow profile. If found
3195          * remove from the RSS entry list of the VSI context and delete entry.
3196          */
3197         LIST_FOR_EACH_ENTRY_SAFE(r, tmp, &hw->rss_list_head,
3198                                  ice_rss_cfg, l_entry) {
3199                 if (r->hashed_flds == prof->segs[prof->segs_cnt - 1].match &&
3200                     r->packet_hdr == prof->segs[prof->segs_cnt - 1].hdrs) {
3201                         ice_clear_bit(vsi_handle, r->vsis);
3202                         if (!ice_is_any_bit_set(r->vsis, ICE_MAX_VSI)) {
3203                                 LIST_DEL(&r->l_entry);
3204                                 ice_free(hw, r);
3205                         }
3206                         return;
3207                 }
3208         }
3209 }
3210
3211 /**
3212  * ice_add_rss_list - add RSS configuration to list
3213  * @hw: pointer to the hardware structure
3214  * @vsi_handle: software VSI handle
3215  * @prof: pointer to flow profile
3216  *
3217  * Assumption: lock has already been acquired for RSS list
3218  */
3219 static enum ice_status
3220 ice_add_rss_list(struct ice_hw *hw, u16 vsi_handle, struct ice_flow_prof *prof)
3221 {
3222         struct ice_rss_cfg *r, *rss_cfg;
3223
3224         LIST_FOR_EACH_ENTRY(r, &hw->rss_list_head,
3225                             ice_rss_cfg, l_entry)
3226                 if (r->hashed_flds == prof->segs[prof->segs_cnt - 1].match &&
3227                     r->packet_hdr == prof->segs[prof->segs_cnt - 1].hdrs) {
3228                         ice_set_bit(vsi_handle, r->vsis);
3229                         return ICE_SUCCESS;
3230                 }
3231
3232         rss_cfg = (struct ice_rss_cfg *)ice_malloc(hw, sizeof(*rss_cfg));
3233         if (!rss_cfg)
3234                 return ICE_ERR_NO_MEMORY;
3235
3236         rss_cfg->hashed_flds = prof->segs[prof->segs_cnt - 1].match;
3237         rss_cfg->packet_hdr = prof->segs[prof->segs_cnt - 1].hdrs;
3238         rss_cfg->symm = prof->cfg.symm;
3239         ice_set_bit(vsi_handle, rss_cfg->vsis);
3240
3241         LIST_ADD_TAIL(&rss_cfg->l_entry, &hw->rss_list_head);
3242
3243         return ICE_SUCCESS;
3244 }
3245
3246 #define ICE_FLOW_PROF_HASH_S    0
3247 #define ICE_FLOW_PROF_HASH_M    (0xFFFFFFFFULL << ICE_FLOW_PROF_HASH_S)
3248 #define ICE_FLOW_PROF_HDR_S     32
3249 #define ICE_FLOW_PROF_HDR_M     (0x3FFFFFFFULL << ICE_FLOW_PROF_HDR_S)
3250 #define ICE_FLOW_PROF_ENCAP_S   63
3251 #define ICE_FLOW_PROF_ENCAP_M   (BIT_ULL(ICE_FLOW_PROF_ENCAP_S))
3252
3253 #define ICE_RSS_OUTER_HEADERS   1
3254 #define ICE_RSS_INNER_HEADERS   2
3255
3256 /* Flow profile ID format:
3257  * [0:31] - Packet match fields
3258  * [32:62] - Protocol header
3259  * [63] - Encapsulation flag, 0 if non-tunneled, 1 if tunneled
3260  */
3261 #define ICE_FLOW_GEN_PROFID(hash, hdr, segs_cnt) \
3262         (u64)(((u64)(hash) & ICE_FLOW_PROF_HASH_M) | \
3263               (((u64)(hdr) << ICE_FLOW_PROF_HDR_S) & ICE_FLOW_PROF_HDR_M) | \
3264               ((u8)((segs_cnt) - 1) ? ICE_FLOW_PROF_ENCAP_M : 0))
3265
3266 static void
3267 ice_rss_config_xor_word(struct ice_hw *hw, u8 prof_id, u8 src, u8 dst)
3268 {
3269         u32 s = ((src % 4) << 3); /* byte shift */
3270         u32 v = dst | 0x80; /* value to program */
3271         u8 i = src / 4; /* register index */
3272         u32 reg;
3273
3274         reg = rd32(hw, GLQF_HSYMM(prof_id, i));
3275         reg = (reg & ~(0xff << s)) | (v << s);
3276         wr32(hw, GLQF_HSYMM(prof_id, i), reg);
3277 }
3278
3279 static void
3280 ice_rss_config_xor(struct ice_hw *hw, u8 prof_id, u8 src, u8 dst, u8 len)
3281 {
3282         int fv_last_word =
3283                 ICE_FLOW_SW_FIELD_VECTOR_MAX / ICE_FLOW_FV_EXTRACT_SZ - 1;
3284         int i;
3285
3286         for (i = 0; i < len; i++) {
3287                 ice_rss_config_xor_word(hw, prof_id,
3288                                         /* Yes, field vector in GLQF_HSYMM and
3289                                          * GLQF_HINSET is inversed!
3290                                          */
3291                                         fv_last_word - (src + i),
3292                                         fv_last_word - (dst + i));
3293                 ice_rss_config_xor_word(hw, prof_id,
3294                                         fv_last_word - (dst + i),
3295                                         fv_last_word - (src + i));
3296         }
3297 }
3298
3299 static void
3300 ice_rss_update_symm(struct ice_hw *hw,
3301                     struct ice_flow_prof *prof)
3302 {
3303         struct ice_prof_map *map;
3304         u8 prof_id, m;
3305
3306         map = ice_search_prof_id(hw, ICE_BLK_RSS, prof->id);
3307         prof_id = map->prof_id;
3308
3309         /* clear to default */
3310         for (m = 0; m < 6; m++)
3311                 wr32(hw, GLQF_HSYMM(prof_id, m), 0);
3312         if (prof->cfg.symm) {
3313                 struct ice_flow_seg_info *seg =
3314                         &prof->segs[prof->segs_cnt - 1];
3315
3316                 struct ice_flow_seg_xtrct *ipv4_src =
3317                         &seg->fields[ICE_FLOW_FIELD_IDX_IPV4_SA].xtrct;
3318                 struct ice_flow_seg_xtrct *ipv4_dst =
3319                         &seg->fields[ICE_FLOW_FIELD_IDX_IPV4_DA].xtrct;
3320                 struct ice_flow_seg_xtrct *ipv6_src =
3321                         &seg->fields[ICE_FLOW_FIELD_IDX_IPV6_SA].xtrct;
3322                 struct ice_flow_seg_xtrct *ipv6_dst =
3323                         &seg->fields[ICE_FLOW_FIELD_IDX_IPV6_DA].xtrct;
3324
3325                 struct ice_flow_seg_xtrct *tcp_src =
3326                         &seg->fields[ICE_FLOW_FIELD_IDX_TCP_SRC_PORT].xtrct;
3327                 struct ice_flow_seg_xtrct *tcp_dst =
3328                         &seg->fields[ICE_FLOW_FIELD_IDX_TCP_DST_PORT].xtrct;
3329
3330                 struct ice_flow_seg_xtrct *udp_src =
3331                         &seg->fields[ICE_FLOW_FIELD_IDX_UDP_SRC_PORT].xtrct;
3332                 struct ice_flow_seg_xtrct *udp_dst =
3333                         &seg->fields[ICE_FLOW_FIELD_IDX_UDP_DST_PORT].xtrct;
3334
3335                 struct ice_flow_seg_xtrct *sctp_src =
3336                         &seg->fields[ICE_FLOW_FIELD_IDX_SCTP_SRC_PORT].xtrct;
3337                 struct ice_flow_seg_xtrct *sctp_dst =
3338                         &seg->fields[ICE_FLOW_FIELD_IDX_SCTP_DST_PORT].xtrct;
3339
3340                 /* xor IPv4 */
3341                 if (ipv4_src->prot_id != 0 && ipv4_dst->prot_id != 0)
3342                         ice_rss_config_xor(hw, prof_id,
3343                                            ipv4_src->idx, ipv4_dst->idx, 2);
3344
3345                 /* xor IPv6 */
3346                 if (ipv6_src->prot_id != 0 && ipv6_dst->prot_id != 0)
3347                         ice_rss_config_xor(hw, prof_id,
3348                                            ipv6_src->idx, ipv6_dst->idx, 8);
3349
3350                 /* xor TCP */
3351                 if (tcp_src->prot_id != 0 && tcp_dst->prot_id != 0)
3352                         ice_rss_config_xor(hw, prof_id,
3353                                            tcp_src->idx, tcp_dst->idx, 1);
3354
3355                 /* xor UDP */
3356                 if (udp_src->prot_id != 0 && udp_dst->prot_id != 0)
3357                         ice_rss_config_xor(hw, prof_id,
3358                                            udp_src->idx, udp_dst->idx, 1);
3359
3360                 /* xor SCTP */
3361                 if (sctp_src->prot_id != 0 && sctp_dst->prot_id != 0)
3362                         ice_rss_config_xor(hw, prof_id,
3363                                            sctp_src->idx, sctp_dst->idx, 1);
3364         }
3365 }
3366
3367 /**
3368  * ice_add_rss_cfg_sync - add an RSS configuration
3369  * @hw: pointer to the hardware structure
3370  * @vsi_handle: software VSI handle
3371  * @hashed_flds: hash bit fields (ICE_FLOW_HASH_*) to configure
3372  * @addl_hdrs: protocol header fields
3373  * @segs_cnt: packet segment count
3374  * @symm: symmetric hash enable/disable
3375  *
3376  * Assumption: lock has already been acquired for RSS list
3377  */
3378 static enum ice_status
3379 ice_add_rss_cfg_sync(struct ice_hw *hw, u16 vsi_handle, u64 hashed_flds,
3380                      u32 addl_hdrs, u8 segs_cnt, bool symm)
3381 {
3382         const enum ice_block blk = ICE_BLK_RSS;
3383         struct ice_flow_prof *prof = NULL;
3384         struct ice_flow_seg_info *segs;
3385         enum ice_status status;
3386
3387         if (!segs_cnt || segs_cnt > ICE_FLOW_SEG_MAX)
3388                 return ICE_ERR_PARAM;
3389
3390         segs = (struct ice_flow_seg_info *)ice_calloc(hw, segs_cnt,
3391                                                       sizeof(*segs));
3392         if (!segs)
3393                 return ICE_ERR_NO_MEMORY;
3394
3395         /* Construct the packet segment info from the hashed fields */
3396         status = ice_flow_set_rss_seg_info(&segs[segs_cnt - 1], hashed_flds,
3397                                            addl_hdrs);
3398         if (status)
3399                 goto exit;
3400
3401         /* Search for a flow profile that has matching headers, hash fields
3402          * and has the input VSI associated to it. If found, no further
3403          * operations required and exit.
3404          */
3405         prof = ice_flow_find_prof_conds(hw, blk, ICE_FLOW_RX, segs, segs_cnt,
3406                                         vsi_handle,
3407                                         ICE_FLOW_FIND_PROF_CHK_FLDS |
3408                                         ICE_FLOW_FIND_PROF_CHK_VSI);
3409         if (prof) {
3410                 if (prof->cfg.symm == symm)
3411                         goto exit;
3412                 prof->cfg.symm = symm;
3413                 goto update_symm;
3414         }
3415
3416         /* Check if a flow profile exists with the same protocol headers and
3417          * associated with the input VSI. If so disasscociate the VSI from
3418          * this profile. The VSI will be added to a new profile created with
3419          * the protocol header and new hash field configuration.
3420          */
3421         prof = ice_flow_find_prof_conds(hw, blk, ICE_FLOW_RX, segs, segs_cnt,
3422                                         vsi_handle, ICE_FLOW_FIND_PROF_CHK_VSI);
3423         if (prof) {
3424                 status = ice_flow_disassoc_prof(hw, blk, prof, vsi_handle);
3425                 if (!status)
3426                         ice_rem_rss_list(hw, vsi_handle, prof);
3427                 else
3428                         goto exit;
3429
3430                 /* Remove profile if it has no VSIs associated */
3431                 if (!ice_is_any_bit_set(prof->vsis, ICE_MAX_VSI)) {
3432                         status = ice_flow_rem_prof(hw, blk, prof->id);
3433                         if (status)
3434                                 goto exit;
3435                 }
3436         }
3437
3438         /* Search for a profile that has same match fields only. If this
3439          * exists then associate the VSI to this profile.
3440          */
3441         prof = ice_flow_find_prof_conds(hw, blk, ICE_FLOW_RX, segs, segs_cnt,
3442                                         vsi_handle,
3443                                         ICE_FLOW_FIND_PROF_CHK_FLDS);
3444         if (prof) {
3445                 if (prof->cfg.symm == symm) {
3446                         status = ice_flow_assoc_prof(hw, blk, prof,
3447                                                      vsi_handle);
3448                         if (!status)
3449                                 status = ice_add_rss_list(hw, vsi_handle,
3450                                                           prof);
3451                 } else {
3452                         /* if a profile exist but with different symmetric
3453                          * requirement, just return error.
3454                          */
3455                         status = ICE_ERR_NOT_SUPPORTED;
3456                 }
3457                 goto exit;
3458         }
3459
3460         /* Create a new flow profile with generated profile and packet
3461          * segment information.
3462          */
3463         status = ice_flow_add_prof(hw, blk, ICE_FLOW_RX,
3464                                    ICE_FLOW_GEN_PROFID(hashed_flds,
3465                                                        segs[segs_cnt - 1].hdrs,
3466                                                        segs_cnt),
3467                                    segs, segs_cnt, NULL, 0, &prof);
3468         if (status)
3469                 goto exit;
3470
3471         status = ice_flow_assoc_prof(hw, blk, prof, vsi_handle);
3472         /* If association to a new flow profile failed then this profile can
3473          * be removed.
3474          */
3475         if (status) {
3476                 ice_flow_rem_prof(hw, blk, prof->id);
3477                 goto exit;
3478         }
3479
3480         status = ice_add_rss_list(hw, vsi_handle, prof);
3481
3482         prof->cfg.symm = symm;
3483
3484 update_symm:
3485         ice_rss_update_symm(hw, prof);
3486
3487 exit:
3488         ice_free(hw, segs);
3489         return status;
3490 }
3491
3492 /**
3493  * ice_add_rss_cfg - add an RSS configuration with specified hashed fields
3494  * @hw: pointer to the hardware structure
3495  * @vsi_handle: software VSI handle
3496  * @hashed_flds: hash bit fields (ICE_FLOW_HASH_*) to configure
3497  * @addl_hdrs: protocol header fields
3498  * @symm: symmetric hash enable/disable
3499  *
3500  * This function will generate a flow profile based on fields associated with
3501  * the input fields to hash on, the flow type and use the VSI number to add
3502  * a flow entry to the profile.
3503  */
3504 enum ice_status
3505 ice_add_rss_cfg(struct ice_hw *hw, u16 vsi_handle, u64 hashed_flds,
3506                 u32 addl_hdrs, bool symm)
3507 {
3508         enum ice_status status;
3509
3510         if (hashed_flds == ICE_HASH_INVALID ||
3511             !ice_is_vsi_valid(hw, vsi_handle))
3512                 return ICE_ERR_PARAM;
3513
3514         ice_acquire_lock(&hw->rss_locks);
3515         status = ice_add_rss_cfg_sync(hw, vsi_handle, hashed_flds, addl_hdrs,
3516                                       ICE_RSS_OUTER_HEADERS, symm);
3517         if (!status)
3518                 status = ice_add_rss_cfg_sync(hw, vsi_handle, hashed_flds,
3519                                               addl_hdrs, ICE_RSS_INNER_HEADERS,
3520                                               symm);
3521         ice_release_lock(&hw->rss_locks);
3522
3523         return status;
3524 }
3525
3526 /**
3527  * ice_rem_rss_cfg_sync - remove an existing RSS configuration
3528  * @hw: pointer to the hardware structure
3529  * @vsi_handle: software VSI handle
3530  * @hashed_flds: Packet hash types (ICE_FLOW_HASH_*) to remove
3531  * @addl_hdrs: Protocol header fields within a packet segment
3532  * @segs_cnt: packet segment count
3533  *
3534  * Assumption: lock has already been acquired for RSS list
3535  */
3536 static enum ice_status
3537 ice_rem_rss_cfg_sync(struct ice_hw *hw, u16 vsi_handle, u64 hashed_flds,
3538                      u32 addl_hdrs, u8 segs_cnt)
3539 {
3540         const enum ice_block blk = ICE_BLK_RSS;
3541         struct ice_flow_seg_info *segs;
3542         struct ice_flow_prof *prof;
3543         enum ice_status status;
3544
3545         segs = (struct ice_flow_seg_info *)ice_calloc(hw, segs_cnt,
3546                                                       sizeof(*segs));
3547         if (!segs)
3548                 return ICE_ERR_NO_MEMORY;
3549
3550         /* Construct the packet segment info from the hashed fields */
3551         status = ice_flow_set_rss_seg_info(&segs[segs_cnt - 1], hashed_flds,
3552                                            addl_hdrs);
3553         if (status)
3554                 goto out;
3555
3556         prof = ice_flow_find_prof_conds(hw, blk, ICE_FLOW_RX, segs, segs_cnt,
3557                                         vsi_handle,
3558                                         ICE_FLOW_FIND_PROF_CHK_FLDS);
3559         if (!prof) {
3560                 status = ICE_ERR_DOES_NOT_EXIST;
3561                 goto out;
3562         }
3563
3564         status = ice_flow_disassoc_prof(hw, blk, prof, vsi_handle);
3565         if (status)
3566                 goto out;
3567
3568         /* Remove RSS configuration from VSI context before deleting
3569          * the flow profile.
3570          */
3571         ice_rem_rss_list(hw, vsi_handle, prof);
3572
3573         if (!ice_is_any_bit_set(prof->vsis, ICE_MAX_VSI))
3574                 status = ice_flow_rem_prof(hw, blk, prof->id);
3575
3576 out:
3577         ice_free(hw, segs);
3578         return status;
3579 }
3580
3581 /**
3582  * ice_rem_rss_cfg - remove an existing RSS config with matching hashed fields
3583  * @hw: pointer to the hardware structure
3584  * @vsi_handle: software VSI handle
3585  * @hashed_flds: Packet hash types (ICE_FLOW_HASH_*) to remove
3586  * @addl_hdrs: Protocol header fields within a packet segment
3587  *
3588  * This function will lookup the flow profile based on the input
3589  * hash field bitmap, iterate through the profile entry list of
3590  * that profile and find entry associated with input VSI to be
3591  * removed. Calls are made to underlying flow apis which will in
3592  * turn build or update buffers for RSS XLT1 section.
3593  */
3594 enum ice_status
3595 ice_rem_rss_cfg(struct ice_hw *hw, u16 vsi_handle, u64 hashed_flds,
3596                 u32 addl_hdrs)
3597 {
3598         enum ice_status status;
3599
3600         if (hashed_flds == ICE_HASH_INVALID ||
3601             !ice_is_vsi_valid(hw, vsi_handle))
3602                 return ICE_ERR_PARAM;
3603
3604         ice_acquire_lock(&hw->rss_locks);
3605         status = ice_rem_rss_cfg_sync(hw, vsi_handle, hashed_flds, addl_hdrs,
3606                                       ICE_RSS_OUTER_HEADERS);
3607         if (!status)
3608                 status = ice_rem_rss_cfg_sync(hw, vsi_handle, hashed_flds,
3609                                               addl_hdrs, ICE_RSS_INNER_HEADERS);
3610         ice_release_lock(&hw->rss_locks);
3611
3612         return status;
3613 }
3614
3615 /**
3616  * ice_replay_rss_cfg - replay RSS configurations associated with VSI
3617  * @hw: pointer to the hardware structure
3618  * @vsi_handle: software VSI handle
3619  */
3620 enum ice_status ice_replay_rss_cfg(struct ice_hw *hw, u16 vsi_handle)
3621 {
3622         enum ice_status status = ICE_SUCCESS;
3623         struct ice_rss_cfg *r;
3624
3625         if (!ice_is_vsi_valid(hw, vsi_handle))
3626                 return ICE_ERR_PARAM;
3627
3628         ice_acquire_lock(&hw->rss_locks);
3629         LIST_FOR_EACH_ENTRY(r, &hw->rss_list_head,
3630                             ice_rss_cfg, l_entry) {
3631                 if (ice_is_bit_set(r->vsis, vsi_handle)) {
3632                         status = ice_add_rss_cfg_sync(hw, vsi_handle,
3633                                                       r->hashed_flds,
3634                                                       r->packet_hdr,
3635                                                       ICE_RSS_OUTER_HEADERS,
3636                                                       r->symm);
3637                         if (status)
3638                                 break;
3639                         status = ice_add_rss_cfg_sync(hw, vsi_handle,
3640                                                       r->hashed_flds,
3641                                                       r->packet_hdr,
3642                                                       ICE_RSS_INNER_HEADERS,
3643                                                       r->symm);
3644                         if (status)
3645                                 break;
3646                 }
3647         }
3648         ice_release_lock(&hw->rss_locks);
3649
3650         return status;
3651 }
3652
3653 /**
3654  * ice_get_rss_cfg - returns hashed fields for the given header types
3655  * @hw: pointer to the hardware structure
3656  * @vsi_handle: software VSI handle
3657  * @hdrs: protocol header type
3658  *
3659  * This function will return the match fields of the first instance of flow
3660  * profile having the given header types and containing input VSI
3661  */
3662 u64 ice_get_rss_cfg(struct ice_hw *hw, u16 vsi_handle, u32 hdrs)
3663 {
3664         struct ice_rss_cfg *r, *rss_cfg = NULL;
3665
3666         /* verify if the protocol header is non zero and VSI is valid */
3667         if (hdrs == ICE_FLOW_SEG_HDR_NONE || !ice_is_vsi_valid(hw, vsi_handle))
3668                 return ICE_HASH_INVALID;
3669
3670         ice_acquire_lock(&hw->rss_locks);
3671         LIST_FOR_EACH_ENTRY(r, &hw->rss_list_head,
3672                             ice_rss_cfg, l_entry)
3673                 if (ice_is_bit_set(r->vsis, vsi_handle) &&
3674                     r->packet_hdr == hdrs) {
3675                         rss_cfg = r;
3676                         break;
3677                 }
3678         ice_release_lock(&hw->rss_locks);
3679
3680         return rss_cfg ? rss_cfg->hashed_flds : ICE_HASH_INVALID;
3681 }