net/ice/base: remove unnecessary braces
[dpdk.git] / drivers / net / ice / base / ice_flow.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2001-2020 Intel Corporation
3  */
4
5 #include "ice_common.h"
6 #include "ice_flow.h"
7
8 /* Size of known protocol header fields */
9 #define ICE_FLOW_FLD_SZ_ETH_TYPE        2
10 #define ICE_FLOW_FLD_SZ_VLAN            2
11 #define ICE_FLOW_FLD_SZ_IPV4_ADDR       4
12 #define ICE_FLOW_FLD_SZ_IPV6_ADDR       16
13 #define ICE_FLOW_FLD_SZ_IP_DSCP         1
14 #define ICE_FLOW_FLD_SZ_IP_TTL          1
15 #define ICE_FLOW_FLD_SZ_IP_PROT         1
16 #define ICE_FLOW_FLD_SZ_PORT            2
17 #define ICE_FLOW_FLD_SZ_TCP_FLAGS       1
18 #define ICE_FLOW_FLD_SZ_ICMP_TYPE       1
19 #define ICE_FLOW_FLD_SZ_ICMP_CODE       1
20 #define ICE_FLOW_FLD_SZ_ARP_OPER        2
21 #define ICE_FLOW_FLD_SZ_GRE_KEYID       4
22 #define ICE_FLOW_FLD_SZ_GTP_TEID        4
23 #define ICE_FLOW_FLD_SZ_GTP_QFI         2
24 #define ICE_FLOW_FLD_SZ_PPPOE_SESS_ID   2
25 #define ICE_FLOW_FLD_SZ_PFCP_SEID 8
26 #define ICE_FLOW_FLD_SZ_L2TPV3_SESS_ID  4
27 #define ICE_FLOW_FLD_SZ_ESP_SPI 4
28 #define ICE_FLOW_FLD_SZ_AH_SPI  4
29 #define ICE_FLOW_FLD_SZ_NAT_T_ESP_SPI   4
30
31 /* Describe properties of a protocol header field */
32 struct ice_flow_field_info {
33         enum ice_flow_seg_hdr hdr;
34         s16 off;        /* Offset from start of a protocol header, in bits */
35         u16 size;       /* Size of fields in bits */
36         u16 mask;       /* 16-bit mask for field */
37 };
38
39 #define ICE_FLOW_FLD_INFO(_hdr, _offset_bytes, _size_bytes) { \
40         .hdr = _hdr, \
41         .off = (_offset_bytes) * BITS_PER_BYTE, \
42         .size = (_size_bytes) * BITS_PER_BYTE, \
43         .mask = 0, \
44 }
45
46 #define ICE_FLOW_FLD_INFO_MSK(_hdr, _offset_bytes, _size_bytes, _mask) { \
47         .hdr = _hdr, \
48         .off = (_offset_bytes) * BITS_PER_BYTE, \
49         .size = (_size_bytes) * BITS_PER_BYTE, \
50         .mask = _mask, \
51 }
52
53 /* Table containing properties of supported protocol header fields */
54 static const
55 struct ice_flow_field_info ice_flds_info[ICE_FLOW_FIELD_IDX_MAX] = {
56         /* Ether */
57         /* ICE_FLOW_FIELD_IDX_ETH_DA */
58         ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_ETH, 0, ETH_ALEN),
59         /* ICE_FLOW_FIELD_IDX_ETH_SA */
60         ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_ETH, ETH_ALEN, ETH_ALEN),
61         /* ICE_FLOW_FIELD_IDX_S_VLAN */
62         ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_VLAN, 12, ICE_FLOW_FLD_SZ_VLAN),
63         /* ICE_FLOW_FIELD_IDX_C_VLAN */
64         ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_VLAN, 14, ICE_FLOW_FLD_SZ_VLAN),
65         /* ICE_FLOW_FIELD_IDX_ETH_TYPE */
66         ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_ETH, 0, ICE_FLOW_FLD_SZ_ETH_TYPE),
67         /* IPv4 / IPv6 */
68         /* ICE_FLOW_FIELD_IDX_IPV4_DSCP */
69         ICE_FLOW_FLD_INFO_MSK(ICE_FLOW_SEG_HDR_IPV4, 0, ICE_FLOW_FLD_SZ_IP_DSCP,
70                               0x00fc),
71         /* ICE_FLOW_FIELD_IDX_IPV6_DSCP */
72         ICE_FLOW_FLD_INFO_MSK(ICE_FLOW_SEG_HDR_IPV6, 0, ICE_FLOW_FLD_SZ_IP_DSCP,
73                               0x0ff0),
74         /* ICE_FLOW_FIELD_IDX_IPV4_TTL */
75         ICE_FLOW_FLD_INFO_MSK(ICE_FLOW_SEG_HDR_NONE, 8,
76                               ICE_FLOW_FLD_SZ_IP_TTL, 0xff00),
77         /* ICE_FLOW_FIELD_IDX_IPV4_PROT */
78         ICE_FLOW_FLD_INFO_MSK(ICE_FLOW_SEG_HDR_NONE, 8,
79                               ICE_FLOW_FLD_SZ_IP_PROT, 0x00ff),
80         /* ICE_FLOW_FIELD_IDX_IPV6_TTL */
81         ICE_FLOW_FLD_INFO_MSK(ICE_FLOW_SEG_HDR_NONE, 6,
82                               ICE_FLOW_FLD_SZ_IP_TTL, 0x00ff),
83         /* ICE_FLOW_FIELD_IDX_IPV6_PROT */
84         ICE_FLOW_FLD_INFO_MSK(ICE_FLOW_SEG_HDR_NONE, 6,
85                               ICE_FLOW_FLD_SZ_IP_PROT, 0xff00),
86         /* ICE_FLOW_FIELD_IDX_IPV4_SA */
87         ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_IPV4, 12, ICE_FLOW_FLD_SZ_IPV4_ADDR),
88         /* ICE_FLOW_FIELD_IDX_IPV4_DA */
89         ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_IPV4, 16, ICE_FLOW_FLD_SZ_IPV4_ADDR),
90         /* ICE_FLOW_FIELD_IDX_IPV6_SA */
91         ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_IPV6, 8, ICE_FLOW_FLD_SZ_IPV6_ADDR),
92         /* ICE_FLOW_FIELD_IDX_IPV6_DA */
93         ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_IPV6, 24, ICE_FLOW_FLD_SZ_IPV6_ADDR),
94         /* Transport */
95         /* ICE_FLOW_FIELD_IDX_TCP_SRC_PORT */
96         ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_TCP, 0, ICE_FLOW_FLD_SZ_PORT),
97         /* ICE_FLOW_FIELD_IDX_TCP_DST_PORT */
98         ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_TCP, 2, ICE_FLOW_FLD_SZ_PORT),
99         /* ICE_FLOW_FIELD_IDX_UDP_SRC_PORT */
100         ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_UDP, 0, ICE_FLOW_FLD_SZ_PORT),
101         /* ICE_FLOW_FIELD_IDX_UDP_DST_PORT */
102         ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_UDP, 2, ICE_FLOW_FLD_SZ_PORT),
103         /* ICE_FLOW_FIELD_IDX_SCTP_SRC_PORT */
104         ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_SCTP, 0, ICE_FLOW_FLD_SZ_PORT),
105         /* ICE_FLOW_FIELD_IDX_SCTP_DST_PORT */
106         ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_SCTP, 2, ICE_FLOW_FLD_SZ_PORT),
107         /* ICE_FLOW_FIELD_IDX_TCP_FLAGS */
108         ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_TCP, 13, ICE_FLOW_FLD_SZ_TCP_FLAGS),
109         /* ARP */
110         /* ICE_FLOW_FIELD_IDX_ARP_SIP */
111         ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_ARP, 14, ICE_FLOW_FLD_SZ_IPV4_ADDR),
112         /* ICE_FLOW_FIELD_IDX_ARP_DIP */
113         ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_ARP, 24, ICE_FLOW_FLD_SZ_IPV4_ADDR),
114         /* ICE_FLOW_FIELD_IDX_ARP_SHA */
115         ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_ARP, 8, ETH_ALEN),
116         /* ICE_FLOW_FIELD_IDX_ARP_DHA */
117         ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_ARP, 18, ETH_ALEN),
118         /* ICE_FLOW_FIELD_IDX_ARP_OP */
119         ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_ARP, 6, ICE_FLOW_FLD_SZ_ARP_OPER),
120         /* ICMP */
121         /* ICE_FLOW_FIELD_IDX_ICMP_TYPE */
122         ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_ICMP, 0, ICE_FLOW_FLD_SZ_ICMP_TYPE),
123         /* ICE_FLOW_FIELD_IDX_ICMP_CODE */
124         ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_ICMP, 1, ICE_FLOW_FLD_SZ_ICMP_CODE),
125         /* GRE */
126         /* ICE_FLOW_FIELD_IDX_GRE_KEYID */
127         ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_GRE, 12, ICE_FLOW_FLD_SZ_GRE_KEYID),
128         /* GTP */
129         /* ICE_FLOW_FIELD_IDX_GTPC_TEID */
130         ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_GTPC_TEID, 12,
131                           ICE_FLOW_FLD_SZ_GTP_TEID),
132         /* ICE_FLOW_FIELD_IDX_GTPU_IP_TEID */
133         ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_GTPU_IP, 12,
134                           ICE_FLOW_FLD_SZ_GTP_TEID),
135         /* ICE_FLOW_FIELD_IDX_GTPU_EH_TEID */
136         ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_GTPU_EH, 12,
137                           ICE_FLOW_FLD_SZ_GTP_TEID),
138         /* ICE_FLOW_FIELD_IDX_GTPU_EH_QFI */
139         ICE_FLOW_FLD_INFO_MSK(ICE_FLOW_SEG_HDR_GTPU_EH, 22,
140                               ICE_FLOW_FLD_SZ_GTP_QFI, 0x3f00),
141         /* ICE_FLOW_FIELD_IDX_GTPU_UP_TEID */
142         ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_GTPU_UP, 12,
143                           ICE_FLOW_FLD_SZ_GTP_TEID),
144         /* ICE_FLOW_FIELD_IDX_GTPU_DWN_TEID */
145         ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_GTPU_DWN, 12,
146                           ICE_FLOW_FLD_SZ_GTP_TEID),
147         /* PPPOE */
148         /* ICE_FLOW_FIELD_IDX_PPPOE_SESS_ID */
149         ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_PPPOE, 2,
150                           ICE_FLOW_FLD_SZ_PPPOE_SESS_ID),
151         /* PFCP */
152         /* ICE_FLOW_FIELD_IDX_PFCP_SEID */
153         ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_PFCP_SESSION, 12,
154                           ICE_FLOW_FLD_SZ_PFCP_SEID),
155         /* L2TPV3 */
156         /* ICE_FLOW_FIELD_IDX_L2TPV3_SESS_ID */
157         ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_L2TPV3, 0,
158                           ICE_FLOW_FLD_SZ_L2TPV3_SESS_ID),
159         /* ESP */
160         /* ICE_FLOW_FIELD_IDX_ESP_SPI */
161         ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_ESP, 0,
162                           ICE_FLOW_FLD_SZ_ESP_SPI),
163         /* AH */
164         /* ICE_FLOW_FIELD_IDX_AH_SPI */
165         ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_AH, 4,
166                           ICE_FLOW_FLD_SZ_AH_SPI),
167         /* NAT_T_ESP */
168         /* ICE_FLOW_FIELD_IDX_NAT_T_ESP_SPI */
169         ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_NAT_T_ESP, 8,
170                           ICE_FLOW_FLD_SZ_NAT_T_ESP_SPI),
171 };
172
173 /* Bitmaps indicating relevant packet types for a particular protocol header
174  *
175  * Packet types for packets with an Outer/First/Single MAC header
176  */
177 static const u32 ice_ptypes_mac_ofos[] = {
178         0xFDC00846, 0xBFBF7F7E, 0xF70001DF, 0xFEFDFDFB,
179         0x0000077E, 0x00000000, 0x00000000, 0x00000000,
180         0x00000000, 0x03FFF000, 0x7FFFFFE0, 0x00000000,
181         0x00000000, 0x00000000, 0x00000000, 0x00000000,
182         0x00000000, 0x00000000, 0x00000000, 0x00000000,
183         0x00000000, 0x00000000, 0x00000000, 0x00000000,
184         0x00000000, 0x00000000, 0x00000000, 0x00000000,
185         0x00000000, 0x00000000, 0x00000000, 0x00000000,
186 };
187
188 /* Packet types for packets with an Innermost/Last MAC VLAN header */
189 static const u32 ice_ptypes_macvlan_il[] = {
190         0x00000000, 0xBC000000, 0x000001DF, 0xF0000000,
191         0x0000077E, 0x00000000, 0x00000000, 0x00000000,
192         0x00000000, 0x00000000, 0x00000000, 0x00000000,
193         0x00000000, 0x00000000, 0x00000000, 0x00000000,
194         0x00000000, 0x00000000, 0x00000000, 0x00000000,
195         0x00000000, 0x00000000, 0x00000000, 0x00000000,
196         0x00000000, 0x00000000, 0x00000000, 0x00000000,
197         0x00000000, 0x00000000, 0x00000000, 0x00000000,
198 };
199
200 /* Packet types for packets with an Outer/First/Single IPv4 header */
201 static const u32 ice_ptypes_ipv4_ofos[] = {
202         0x1DC00000, 0x04000800, 0x00000000, 0x00000000,
203         0x00000000, 0x00000155, 0x00000000, 0x00000000,
204         0x0003000F, 0x000FC000, 0x83E0F800, 0x00000101,
205         0x00000000, 0x00000000, 0x00000000, 0x00000000,
206         0x00000000, 0x00000000, 0x00000000, 0x00000000,
207         0x00000000, 0x00000000, 0x00000000, 0x00000000,
208         0x00000000, 0x00000000, 0x00000000, 0x00000000,
209         0x00000000, 0x00000000, 0x00000000, 0x00000000,
210 };
211
212 /* Packet types for packets with an Innermost/Last IPv4 header */
213 static const u32 ice_ptypes_ipv4_il[] = {
214         0xE0000000, 0xB807700E, 0x80000003, 0xE01DC03B,
215         0x0000000E, 0x00000000, 0x00000000, 0x00000000,
216         0x00000000, 0x00000000, 0x001FF800, 0x00000000,
217         0x00000000, 0x00000000, 0x00000000, 0x00000000,
218         0x00000000, 0x00000000, 0x00000000, 0x00000000,
219         0x00000000, 0x00000000, 0x00000000, 0x00000000,
220         0x00000000, 0x00000000, 0x00000000, 0x00000000,
221         0x00000000, 0x00000000, 0x00000000, 0x00000000,
222 };
223
224 /* Packet types for packets with an Outer/First/Single IPv6 header */
225 static const u32 ice_ptypes_ipv6_ofos[] = {
226         0x00000000, 0x00000000, 0x77000000, 0x10002000,
227         0x00000000, 0x000002AA, 0x00000000, 0x00000000,
228         0x00080F00, 0x03F00000, 0x7C1F0000, 0x00000206,
229         0x00000000, 0x00000000, 0x00000000, 0x00000000,
230         0x00000000, 0x00000000, 0x00000000, 0x00000000,
231         0x00000000, 0x00000000, 0x00000000, 0x00000000,
232         0x00000000, 0x00000000, 0x00000000, 0x00000000,
233         0x00000000, 0x00000000, 0x00000000, 0x00000000,
234 };
235
236 /* Packet types for packets with an Innermost/Last IPv6 header */
237 static const u32 ice_ptypes_ipv6_il[] = {
238         0x00000000, 0x03B80770, 0x000001DC, 0x0EE00000,
239         0x00000770, 0x00000000, 0x00000000, 0x00000000,
240         0x00000000, 0x00000000, 0x7FE00000, 0x00000000,
241         0x00000000, 0x00000000, 0x00000000, 0x00000000,
242         0x00000000, 0x00000000, 0x00000000, 0x00000000,
243         0x00000000, 0x00000000, 0x00000000, 0x00000000,
244         0x00000000, 0x00000000, 0x00000000, 0x00000000,
245         0x00000000, 0x00000000, 0x00000000, 0x00000000,
246 };
247
248 /* Packet types for packets with an Outermost/First ARP header */
249 static const u32 ice_ptypes_arp_of[] = {
250         0x00000800, 0x00000000, 0x00000000, 0x00000000,
251         0x00000000, 0x00000000, 0x00000000, 0x00000000,
252         0x00000000, 0x00000000, 0x00000000, 0x00000000,
253         0x00000000, 0x00000000, 0x00000000, 0x00000000,
254         0x00000000, 0x00000000, 0x00000000, 0x00000000,
255         0x00000000, 0x00000000, 0x00000000, 0x00000000,
256         0x00000000, 0x00000000, 0x00000000, 0x00000000,
257         0x00000000, 0x00000000, 0x00000000, 0x00000000,
258 };
259
260 /* UDP Packet types for non-tunneled packets or tunneled
261  * packets with inner UDP.
262  */
263 static const u32 ice_ptypes_udp_il[] = {
264         0x81000000, 0x20204040, 0x04000010, 0x80810102,
265         0x00000040, 0x00000000, 0x00000000, 0x00000000,
266         0x00000000, 0x00410000, 0x90842000, 0x00000007,
267         0x00000000, 0x00000000, 0x00000000, 0x00000000,
268         0x00000000, 0x00000000, 0x00000000, 0x00000000,
269         0x00000000, 0x00000000, 0x00000000, 0x00000000,
270         0x00000000, 0x00000000, 0x00000000, 0x00000000,
271         0x00000000, 0x00000000, 0x00000000, 0x00000000,
272 };
273
274 /* Packet types for packets with an Innermost/Last TCP header */
275 static const u32 ice_ptypes_tcp_il[] = {
276         0x04000000, 0x80810102, 0x10000040, 0x02040408,
277         0x00000102, 0x00000000, 0x00000000, 0x00000000,
278         0x00000000, 0x00820000, 0x21084000, 0x00000000,
279         0x00000000, 0x00000000, 0x00000000, 0x00000000,
280         0x00000000, 0x00000000, 0x00000000, 0x00000000,
281         0x00000000, 0x00000000, 0x00000000, 0x00000000,
282         0x00000000, 0x00000000, 0x00000000, 0x00000000,
283         0x00000000, 0x00000000, 0x00000000, 0x00000000,
284 };
285
286 /* Packet types for packets with an Innermost/Last SCTP header */
287 static const u32 ice_ptypes_sctp_il[] = {
288         0x08000000, 0x01020204, 0x20000081, 0x04080810,
289         0x00000204, 0x00000000, 0x00000000, 0x00000000,
290         0x00000000, 0x01040000, 0x00000000, 0x00000000,
291         0x00000000, 0x00000000, 0x00000000, 0x00000000,
292         0x00000000, 0x00000000, 0x00000000, 0x00000000,
293         0x00000000, 0x00000000, 0x00000000, 0x00000000,
294         0x00000000, 0x00000000, 0x00000000, 0x00000000,
295         0x00000000, 0x00000000, 0x00000000, 0x00000000,
296 };
297
298 /* Packet types for packets with an Outermost/First ICMP header */
299 static const u32 ice_ptypes_icmp_of[] = {
300         0x10000000, 0x00000000, 0x00000000, 0x00000000,
301         0x00000000, 0x00000000, 0x00000000, 0x00000000,
302         0x00000000, 0x00000000, 0x00000000, 0x00000000,
303         0x00000000, 0x00000000, 0x00000000, 0x00000000,
304         0x00000000, 0x00000000, 0x00000000, 0x00000000,
305         0x00000000, 0x00000000, 0x00000000, 0x00000000,
306         0x00000000, 0x00000000, 0x00000000, 0x00000000,
307         0x00000000, 0x00000000, 0x00000000, 0x00000000,
308 };
309
310 /* Packet types for packets with an Innermost/Last ICMP header */
311 static const u32 ice_ptypes_icmp_il[] = {
312         0x00000000, 0x02040408, 0x40000102, 0x08101020,
313         0x00000408, 0x00000000, 0x00000000, 0x00000000,
314         0x00000000, 0x00000000, 0x42108000, 0x00000000,
315         0x00000000, 0x00000000, 0x00000000, 0x00000000,
316         0x00000000, 0x00000000, 0x00000000, 0x00000000,
317         0x00000000, 0x00000000, 0x00000000, 0x00000000,
318         0x00000000, 0x00000000, 0x00000000, 0x00000000,
319         0x00000000, 0x00000000, 0x00000000, 0x00000000,
320 };
321
322 /* Packet types for packets with an Outermost/First GRE header */
323 static const u32 ice_ptypes_gre_of[] = {
324         0x00000000, 0xBFBF7800, 0x000001DF, 0xFEFDE000,
325         0x0000017E, 0x00000000, 0x00000000, 0x00000000,
326         0x00000000, 0x00000000, 0x00000000, 0x00000000,
327         0x00000000, 0x00000000, 0x00000000, 0x00000000,
328         0x00000000, 0x00000000, 0x00000000, 0x00000000,
329         0x00000000, 0x00000000, 0x00000000, 0x00000000,
330         0x00000000, 0x00000000, 0x00000000, 0x00000000,
331         0x00000000, 0x00000000, 0x00000000, 0x00000000,
332 };
333
334 /* Packet types for packets with an Innermost/Last MAC header */
335 static const u32 ice_ptypes_mac_il[] = {
336         0x00000000, 0x00000000, 0x00000000, 0x00000000,
337         0x00000000, 0x00000000, 0x00000000, 0x00000000,
338         0x00000000, 0x00000000, 0x00000000, 0x00000000,
339         0x00000000, 0x00000000, 0x00000000, 0x00000000,
340         0x00000000, 0x00000000, 0x00000000, 0x00000000,
341         0x00000000, 0x00000000, 0x00000000, 0x00000000,
342         0x00000000, 0x00000000, 0x00000000, 0x00000000,
343         0x00000000, 0x00000000, 0x00000000, 0x00000000,
344 };
345
346 /* Packet types for GTPC */
347 static const u32 ice_ptypes_gtpc[] = {
348         0x00000000, 0x00000000, 0x00000000, 0x00000000,
349         0x00000000, 0x00000000, 0x00000000, 0x00000000,
350         0x00000000, 0x00000000, 0x00000180, 0x00000000,
351         0x00000000, 0x00000000, 0x00000000, 0x00000000,
352         0x00000000, 0x00000000, 0x00000000, 0x00000000,
353         0x00000000, 0x00000000, 0x00000000, 0x00000000,
354         0x00000000, 0x00000000, 0x00000000, 0x00000000,
355         0x00000000, 0x00000000, 0x00000000, 0x00000000,
356 };
357
358 /* Packet types for GTPC with TEID */
359 static const u32 ice_ptypes_gtpc_tid[] = {
360         0x00000000, 0x00000000, 0x00000000, 0x00000000,
361         0x00000000, 0x00000000, 0x00000000, 0x00000000,
362         0x00000000, 0x00000000, 0x00000060, 0x00000000,
363         0x00000000, 0x00000000, 0x00000000, 0x00000000,
364         0x00000000, 0x00000000, 0x00000000, 0x00000000,
365         0x00000000, 0x00000000, 0x00000000, 0x00000000,
366         0x00000000, 0x00000000, 0x00000000, 0x00000000,
367         0x00000000, 0x00000000, 0x00000000, 0x00000000,
368 };
369
370 /* Packet types for GTPU */
371 static const struct ice_ptype_attributes ice_attr_gtpu_eh[] = {
372         { ICE_MAC_IPV4_GTPU_IPV4_FRAG,    ICE_PTYPE_ATTR_GTP_PDU_EH },
373         { ICE_MAC_IPV4_GTPU_IPV4_PAY,     ICE_PTYPE_ATTR_GTP_PDU_EH },
374         { ICE_MAC_IPV4_GTPU_IPV4_UDP_PAY, ICE_PTYPE_ATTR_GTP_PDU_EH },
375         { ICE_MAC_IPV4_GTPU_IPV4_TCP,     ICE_PTYPE_ATTR_GTP_PDU_EH },
376         { ICE_MAC_IPV4_GTPU_IPV4_ICMP,    ICE_PTYPE_ATTR_GTP_PDU_EH },
377         { ICE_MAC_IPV6_GTPU_IPV4_FRAG,    ICE_PTYPE_ATTR_GTP_PDU_EH },
378         { ICE_MAC_IPV6_GTPU_IPV4_PAY,     ICE_PTYPE_ATTR_GTP_PDU_EH },
379         { ICE_MAC_IPV6_GTPU_IPV4_UDP_PAY, ICE_PTYPE_ATTR_GTP_PDU_EH },
380         { ICE_MAC_IPV6_GTPU_IPV4_TCP,     ICE_PTYPE_ATTR_GTP_PDU_EH },
381         { ICE_MAC_IPV6_GTPU_IPV4_ICMP,    ICE_PTYPE_ATTR_GTP_PDU_EH },
382         { ICE_MAC_IPV4_GTPU_IPV6_FRAG,    ICE_PTYPE_ATTR_GTP_PDU_EH },
383         { ICE_MAC_IPV4_GTPU_IPV6_PAY,     ICE_PTYPE_ATTR_GTP_PDU_EH },
384         { ICE_MAC_IPV4_GTPU_IPV6_UDP_PAY, ICE_PTYPE_ATTR_GTP_PDU_EH },
385         { ICE_MAC_IPV4_GTPU_IPV6_TCP,     ICE_PTYPE_ATTR_GTP_PDU_EH },
386         { ICE_MAC_IPV4_GTPU_IPV6_ICMPV6,  ICE_PTYPE_ATTR_GTP_PDU_EH },
387         { ICE_MAC_IPV6_GTPU_IPV6_FRAG,    ICE_PTYPE_ATTR_GTP_PDU_EH },
388         { ICE_MAC_IPV6_GTPU_IPV6_PAY,     ICE_PTYPE_ATTR_GTP_PDU_EH },
389         { ICE_MAC_IPV6_GTPU_IPV6_UDP_PAY, ICE_PTYPE_ATTR_GTP_PDU_EH },
390         { ICE_MAC_IPV6_GTPU_IPV6_TCP,     ICE_PTYPE_ATTR_GTP_PDU_EH },
391         { ICE_MAC_IPV6_GTPU_IPV6_ICMPV6,  ICE_PTYPE_ATTR_GTP_PDU_EH },
392 };
393
394 static const struct ice_ptype_attributes ice_attr_gtpu_down[] = {
395         { ICE_MAC_IPV4_GTPU_IPV4_FRAG,    ICE_PTYPE_ATTR_GTP_DOWNLINK },
396         { ICE_MAC_IPV4_GTPU_IPV4_PAY,     ICE_PTYPE_ATTR_GTP_DOWNLINK },
397         { ICE_MAC_IPV4_GTPU_IPV4_UDP_PAY, ICE_PTYPE_ATTR_GTP_DOWNLINK },
398         { ICE_MAC_IPV4_GTPU_IPV4_TCP,     ICE_PTYPE_ATTR_GTP_DOWNLINK },
399         { ICE_MAC_IPV4_GTPU_IPV4_ICMP,    ICE_PTYPE_ATTR_GTP_DOWNLINK },
400         { ICE_MAC_IPV6_GTPU_IPV4_FRAG,    ICE_PTYPE_ATTR_GTP_DOWNLINK },
401         { ICE_MAC_IPV6_GTPU_IPV4_PAY,     ICE_PTYPE_ATTR_GTP_DOWNLINK },
402         { ICE_MAC_IPV6_GTPU_IPV4_UDP_PAY, ICE_PTYPE_ATTR_GTP_DOWNLINK },
403         { ICE_MAC_IPV6_GTPU_IPV4_TCP,     ICE_PTYPE_ATTR_GTP_DOWNLINK },
404         { ICE_MAC_IPV6_GTPU_IPV4_ICMP,    ICE_PTYPE_ATTR_GTP_DOWNLINK },
405         { ICE_MAC_IPV4_GTPU_IPV6_FRAG,    ICE_PTYPE_ATTR_GTP_DOWNLINK },
406         { ICE_MAC_IPV4_GTPU_IPV6_PAY,     ICE_PTYPE_ATTR_GTP_DOWNLINK },
407         { ICE_MAC_IPV4_GTPU_IPV6_UDP_PAY, ICE_PTYPE_ATTR_GTP_DOWNLINK },
408         { ICE_MAC_IPV4_GTPU_IPV6_TCP,     ICE_PTYPE_ATTR_GTP_DOWNLINK },
409         { ICE_MAC_IPV4_GTPU_IPV6_ICMPV6,  ICE_PTYPE_ATTR_GTP_DOWNLINK },
410         { ICE_MAC_IPV6_GTPU_IPV6_FRAG,    ICE_PTYPE_ATTR_GTP_DOWNLINK },
411         { ICE_MAC_IPV6_GTPU_IPV6_PAY,     ICE_PTYPE_ATTR_GTP_DOWNLINK },
412         { ICE_MAC_IPV6_GTPU_IPV6_UDP_PAY, ICE_PTYPE_ATTR_GTP_DOWNLINK },
413         { ICE_MAC_IPV6_GTPU_IPV6_TCP,     ICE_PTYPE_ATTR_GTP_DOWNLINK },
414         { ICE_MAC_IPV6_GTPU_IPV6_ICMPV6,  ICE_PTYPE_ATTR_GTP_DOWNLINK },
415 };
416
417 static const struct ice_ptype_attributes ice_attr_gtpu_up[] = {
418         { ICE_MAC_IPV4_GTPU_IPV4_FRAG,    ICE_PTYPE_ATTR_GTP_UPLINK },
419         { ICE_MAC_IPV4_GTPU_IPV4_PAY,     ICE_PTYPE_ATTR_GTP_UPLINK },
420         { ICE_MAC_IPV4_GTPU_IPV4_UDP_PAY, ICE_PTYPE_ATTR_GTP_UPLINK },
421         { ICE_MAC_IPV4_GTPU_IPV4_TCP,     ICE_PTYPE_ATTR_GTP_UPLINK },
422         { ICE_MAC_IPV4_GTPU_IPV4_ICMP,    ICE_PTYPE_ATTR_GTP_UPLINK },
423         { ICE_MAC_IPV6_GTPU_IPV4_FRAG,    ICE_PTYPE_ATTR_GTP_UPLINK },
424         { ICE_MAC_IPV6_GTPU_IPV4_PAY,     ICE_PTYPE_ATTR_GTP_UPLINK },
425         { ICE_MAC_IPV6_GTPU_IPV4_UDP_PAY, ICE_PTYPE_ATTR_GTP_UPLINK },
426         { ICE_MAC_IPV6_GTPU_IPV4_TCP,     ICE_PTYPE_ATTR_GTP_UPLINK },
427         { ICE_MAC_IPV6_GTPU_IPV4_ICMP,    ICE_PTYPE_ATTR_GTP_UPLINK },
428         { ICE_MAC_IPV4_GTPU_IPV6_FRAG,    ICE_PTYPE_ATTR_GTP_UPLINK },
429         { ICE_MAC_IPV4_GTPU_IPV6_PAY,     ICE_PTYPE_ATTR_GTP_UPLINK },
430         { ICE_MAC_IPV4_GTPU_IPV6_UDP_PAY, ICE_PTYPE_ATTR_GTP_UPLINK },
431         { ICE_MAC_IPV4_GTPU_IPV6_TCP,     ICE_PTYPE_ATTR_GTP_UPLINK },
432         { ICE_MAC_IPV4_GTPU_IPV6_ICMPV6,  ICE_PTYPE_ATTR_GTP_UPLINK },
433         { ICE_MAC_IPV6_GTPU_IPV6_FRAG,    ICE_PTYPE_ATTR_GTP_UPLINK },
434         { ICE_MAC_IPV6_GTPU_IPV6_PAY,     ICE_PTYPE_ATTR_GTP_UPLINK },
435         { ICE_MAC_IPV6_GTPU_IPV6_UDP_PAY, ICE_PTYPE_ATTR_GTP_UPLINK },
436         { ICE_MAC_IPV6_GTPU_IPV6_TCP,     ICE_PTYPE_ATTR_GTP_UPLINK },
437         { ICE_MAC_IPV6_GTPU_IPV6_ICMPV6,  ICE_PTYPE_ATTR_GTP_UPLINK },
438 };
439
440 static const u32 ice_ptypes_gtpu[] = {
441         0x00000000, 0x00000000, 0x00000000, 0x00000000,
442         0x00000000, 0x00000000, 0x00000000, 0x00000000,
443         0x00000000, 0x00000000, 0x7FFFFE00, 0x00000000,
444         0x00000000, 0x00000000, 0x00000000, 0x00000000,
445         0x00000000, 0x00000000, 0x00000000, 0x00000000,
446         0x00000000, 0x00000000, 0x00000000, 0x00000000,
447         0x00000000, 0x00000000, 0x00000000, 0x00000000,
448         0x00000000, 0x00000000, 0x00000000, 0x00000000,
449 };
450
451 /* Packet types for pppoe */
452 static const u32 ice_ptypes_pppoe[] = {
453         0x00000000, 0x00000000, 0x00000000, 0x00000000,
454         0x00000000, 0x00000000, 0x00000000, 0x00000000,
455         0x00000000, 0x03FFF000, 0x00000000, 0x00000000,
456         0x00000000, 0x00000000, 0x00000000, 0x00000000,
457         0x00000000, 0x00000000, 0x00000000, 0x00000000,
458         0x00000000, 0x00000000, 0x00000000, 0x00000000,
459         0x00000000, 0x00000000, 0x00000000, 0x00000000,
460         0x00000000, 0x00000000, 0x00000000, 0x00000000,
461 };
462
463 /* Packet types for packets with PFCP NODE header */
464 static const u32 ice_ptypes_pfcp_node[] = {
465         0x00000000, 0x00000000, 0x00000000, 0x00000000,
466         0x00000000, 0x00000000, 0x00000000, 0x00000000,
467         0x00000000, 0x00000000, 0x80000000, 0x00000002,
468         0x00000000, 0x00000000, 0x00000000, 0x00000000,
469         0x00000000, 0x00000000, 0x00000000, 0x00000000,
470         0x00000000, 0x00000000, 0x00000000, 0x00000000,
471         0x00000000, 0x00000000, 0x00000000, 0x00000000,
472         0x00000000, 0x00000000, 0x00000000, 0x00000000,
473 };
474
475 /* Packet types for packets with PFCP SESSION header */
476 static const u32 ice_ptypes_pfcp_session[] = {
477         0x00000000, 0x00000000, 0x00000000, 0x00000000,
478         0x00000000, 0x00000000, 0x00000000, 0x00000000,
479         0x00000000, 0x00000000, 0x00000000, 0x00000005,
480         0x00000000, 0x00000000, 0x00000000, 0x00000000,
481         0x00000000, 0x00000000, 0x00000000, 0x00000000,
482         0x00000000, 0x00000000, 0x00000000, 0x00000000,
483         0x00000000, 0x00000000, 0x00000000, 0x00000000,
484         0x00000000, 0x00000000, 0x00000000, 0x00000000,
485 };
486
487 /* Packet types for l2tpv3 */
488 static const u32 ice_ptypes_l2tpv3[] = {
489         0x00000000, 0x00000000, 0x00000000, 0x00000000,
490         0x00000000, 0x00000000, 0x00000000, 0x00000000,
491         0x00000000, 0x00000000, 0x00000000, 0x00000300,
492         0x00000000, 0x00000000, 0x00000000, 0x00000000,
493         0x00000000, 0x00000000, 0x00000000, 0x00000000,
494         0x00000000, 0x00000000, 0x00000000, 0x00000000,
495         0x00000000, 0x00000000, 0x00000000, 0x00000000,
496         0x00000000, 0x00000000, 0x00000000, 0x00000000,
497 };
498
499 /* Packet types for esp */
500 static const u32 ice_ptypes_esp[] = {
501         0x00000000, 0x00000000, 0x00000000, 0x00000000,
502         0x00000000, 0x00000003, 0x00000000, 0x00000000,
503         0x00000000, 0x00000000, 0x00000000, 0x00000000,
504         0x00000000, 0x00000000, 0x00000000, 0x00000000,
505         0x00000000, 0x00000000, 0x00000000, 0x00000000,
506         0x00000000, 0x00000000, 0x00000000, 0x00000000,
507         0x00000000, 0x00000000, 0x00000000, 0x00000000,
508         0x00000000, 0x00000000, 0x00000000, 0x00000000,
509 };
510
511 /* Packet types for ah */
512 static const u32 ice_ptypes_ah[] = {
513         0x00000000, 0x00000000, 0x00000000, 0x00000000,
514         0x00000000, 0x0000000C, 0x00000000, 0x00000000,
515         0x00000000, 0x00000000, 0x00000000, 0x00000000,
516         0x00000000, 0x00000000, 0x00000000, 0x00000000,
517         0x00000000, 0x00000000, 0x00000000, 0x00000000,
518         0x00000000, 0x00000000, 0x00000000, 0x00000000,
519         0x00000000, 0x00000000, 0x00000000, 0x00000000,
520         0x00000000, 0x00000000, 0x00000000, 0x00000000,
521 };
522
523 /* Packet types for packets with NAT_T ESP header */
524 static const u32 ice_ptypes_nat_t_esp[] = {
525         0x00000000, 0x00000000, 0x00000000, 0x00000000,
526         0x00000000, 0x00000030, 0x00000000, 0x00000000,
527         0x00000000, 0x00000000, 0x00000000, 0x00000000,
528         0x00000000, 0x00000000, 0x00000000, 0x00000000,
529         0x00000000, 0x00000000, 0x00000000, 0x00000000,
530         0x00000000, 0x00000000, 0x00000000, 0x00000000,
531         0x00000000, 0x00000000, 0x00000000, 0x00000000,
532         0x00000000, 0x00000000, 0x00000000, 0x00000000,
533 };
534
535 static const u32 ice_ptypes_mac_non_ip_ofos[] = {
536         0x00000846, 0x00000000, 0x00000000, 0x00000000,
537         0x00000000, 0x00000000, 0x00000000, 0x00000000,
538         0x00400000, 0x03FFF000, 0x00000000, 0x00000000,
539         0x00000000, 0x00000000, 0x00000000, 0x00000000,
540         0x00000000, 0x00000000, 0x00000000, 0x00000000,
541         0x00000000, 0x00000000, 0x00000000, 0x00000000,
542         0x00000000, 0x00000000, 0x00000000, 0x00000000,
543         0x00000000, 0x00000000, 0x00000000, 0x00000000,
544 };
545
546 /* Manage parameters and info. used during the creation of a flow profile */
547 struct ice_flow_prof_params {
548         enum ice_block blk;
549         u16 entry_length; /* # of bytes formatted entry will require */
550         u8 es_cnt;
551         struct ice_flow_prof *prof;
552
553         /* For ACL, the es[0] will have the data of ICE_RX_MDID_PKT_FLAGS_15_0
554          * This will give us the direction flags.
555          */
556         struct ice_fv_word es[ICE_MAX_FV_WORDS];
557         /* attributes can be used to add attributes to a particular PTYPE */
558         const struct ice_ptype_attributes *attr;
559         u16 attr_cnt;
560
561         u16 mask[ICE_MAX_FV_WORDS];
562         ice_declare_bitmap(ptypes, ICE_FLOW_PTYPE_MAX);
563 };
564
565 #define ICE_FLOW_RSS_HDRS_INNER_MASK \
566         (ICE_FLOW_SEG_HDR_PPPOE | ICE_FLOW_SEG_HDR_GTPC | \
567         ICE_FLOW_SEG_HDR_GTPC_TEID | ICE_FLOW_SEG_HDR_GTPU | \
568         ICE_FLOW_SEG_HDR_PFCP_SESSION | ICE_FLOW_SEG_HDR_L2TPV3 | \
569         ICE_FLOW_SEG_HDR_ESP | ICE_FLOW_SEG_HDR_AH | \
570         ICE_FLOW_SEG_HDR_NAT_T_ESP)
571
572 #define ICE_FLOW_SEG_HDRS_L2_MASK       \
573         (ICE_FLOW_SEG_HDR_ETH | ICE_FLOW_SEG_HDR_VLAN)
574 #define ICE_FLOW_SEG_HDRS_L3_MASK       \
575         (ICE_FLOW_SEG_HDR_IPV4 | ICE_FLOW_SEG_HDR_IPV6 | \
576          ICE_FLOW_SEG_HDR_ARP)
577 #define ICE_FLOW_SEG_HDRS_L4_MASK       \
578         (ICE_FLOW_SEG_HDR_ICMP | ICE_FLOW_SEG_HDR_TCP | ICE_FLOW_SEG_HDR_UDP | \
579          ICE_FLOW_SEG_HDR_SCTP)
580
581 /**
582  * ice_flow_val_hdrs - validates packet segments for valid protocol headers
583  * @segs: array of one or more packet segments that describe the flow
584  * @segs_cnt: number of packet segments provided
585  */
586 static enum ice_status
587 ice_flow_val_hdrs(struct ice_flow_seg_info *segs, u8 segs_cnt)
588 {
589         u8 i;
590
591         for (i = 0; i < segs_cnt; i++) {
592                 /* Multiple L3 headers */
593                 if (segs[i].hdrs & ICE_FLOW_SEG_HDRS_L3_MASK &&
594                     !ice_is_pow2(segs[i].hdrs & ICE_FLOW_SEG_HDRS_L3_MASK))
595                         return ICE_ERR_PARAM;
596
597                 /* Multiple L4 headers */
598                 if (segs[i].hdrs & ICE_FLOW_SEG_HDRS_L4_MASK &&
599                     !ice_is_pow2(segs[i].hdrs & ICE_FLOW_SEG_HDRS_L4_MASK))
600                         return ICE_ERR_PARAM;
601         }
602
603         return ICE_SUCCESS;
604 }
605
606 /* Sizes of fixed known protocol headers without header options */
607 #define ICE_FLOW_PROT_HDR_SZ_MAC        14
608 #define ICE_FLOW_PROT_HDR_SZ_MAC_VLAN   (ICE_FLOW_PROT_HDR_SZ_MAC + 2)
609 #define ICE_FLOW_PROT_HDR_SZ_IPV4       20
610 #define ICE_FLOW_PROT_HDR_SZ_IPV6       40
611 #define ICE_FLOW_PROT_HDR_SZ_ARP        28
612 #define ICE_FLOW_PROT_HDR_SZ_ICMP       8
613 #define ICE_FLOW_PROT_HDR_SZ_TCP        20
614 #define ICE_FLOW_PROT_HDR_SZ_UDP        8
615 #define ICE_FLOW_PROT_HDR_SZ_SCTP       12
616
617 /**
618  * ice_flow_calc_seg_sz - calculates size of a packet segment based on headers
619  * @params: information about the flow to be processed
620  * @seg: index of packet segment whose header size is to be determined
621  */
622 static u16 ice_flow_calc_seg_sz(struct ice_flow_prof_params *params, u8 seg)
623 {
624         u16 sz;
625
626         /* L2 headers */
627         sz = (params->prof->segs[seg].hdrs & ICE_FLOW_SEG_HDR_VLAN) ?
628                 ICE_FLOW_PROT_HDR_SZ_MAC_VLAN : ICE_FLOW_PROT_HDR_SZ_MAC;
629
630         /* L3 headers */
631         if (params->prof->segs[seg].hdrs & ICE_FLOW_SEG_HDR_IPV4)
632                 sz += ICE_FLOW_PROT_HDR_SZ_IPV4;
633         else if (params->prof->segs[seg].hdrs & ICE_FLOW_SEG_HDR_IPV6)
634                 sz += ICE_FLOW_PROT_HDR_SZ_IPV6;
635         else if (params->prof->segs[seg].hdrs & ICE_FLOW_SEG_HDR_ARP)
636                 sz += ICE_FLOW_PROT_HDR_SZ_ARP;
637         else if (params->prof->segs[seg].hdrs & ICE_FLOW_SEG_HDRS_L4_MASK)
638                 /* A L3 header is required if L4 is specified */
639                 return 0;
640
641         /* L4 headers */
642         if (params->prof->segs[seg].hdrs & ICE_FLOW_SEG_HDR_ICMP)
643                 sz += ICE_FLOW_PROT_HDR_SZ_ICMP;
644         else if (params->prof->segs[seg].hdrs & ICE_FLOW_SEG_HDR_TCP)
645                 sz += ICE_FLOW_PROT_HDR_SZ_TCP;
646         else if (params->prof->segs[seg].hdrs & ICE_FLOW_SEG_HDR_UDP)
647                 sz += ICE_FLOW_PROT_HDR_SZ_UDP;
648         else if (params->prof->segs[seg].hdrs & ICE_FLOW_SEG_HDR_SCTP)
649                 sz += ICE_FLOW_PROT_HDR_SZ_SCTP;
650
651         return sz;
652 }
653
654 /**
655  * ice_flow_proc_seg_hdrs - process protocol headers present in pkt segments
656  * @params: information about the flow to be processed
657  *
658  * This function identifies the packet types associated with the protocol
659  * headers being present in packet segments of the specified flow profile.
660  */
661 static enum ice_status
662 ice_flow_proc_seg_hdrs(struct ice_flow_prof_params *params)
663 {
664         struct ice_flow_prof *prof;
665         u8 i;
666
667         ice_memset(params->ptypes, 0xff, sizeof(params->ptypes),
668                    ICE_NONDMA_MEM);
669
670         prof = params->prof;
671
672         for (i = 0; i < params->prof->segs_cnt; i++) {
673                 const ice_bitmap_t *src;
674                 u32 hdrs;
675
676                 hdrs = prof->segs[i].hdrs;
677
678                 if (hdrs & ICE_FLOW_SEG_HDR_ETH) {
679                         src = !i ? (const ice_bitmap_t *)ice_ptypes_mac_ofos :
680                                 (const ice_bitmap_t *)ice_ptypes_mac_il;
681                         ice_and_bitmap(params->ptypes, params->ptypes, src,
682                                        ICE_FLOW_PTYPE_MAX);
683                 }
684
685                 if (i && hdrs & ICE_FLOW_SEG_HDR_VLAN) {
686                         src = (const ice_bitmap_t *)ice_ptypes_macvlan_il;
687                         ice_and_bitmap(params->ptypes, params->ptypes, src,
688                                        ICE_FLOW_PTYPE_MAX);
689                 }
690
691                 if (!i && hdrs & ICE_FLOW_SEG_HDR_ARP) {
692                         ice_and_bitmap(params->ptypes, params->ptypes,
693                                        (const ice_bitmap_t *)ice_ptypes_arp_of,
694                                        ICE_FLOW_PTYPE_MAX);
695                 }
696
697                 if (hdrs & ICE_FLOW_SEG_HDR_IPV4) {
698                         src = !i ? (const ice_bitmap_t *)ice_ptypes_ipv4_ofos :
699                                 (const ice_bitmap_t *)ice_ptypes_ipv4_il;
700                         ice_and_bitmap(params->ptypes, params->ptypes, src,
701                                        ICE_FLOW_PTYPE_MAX);
702                         if (hdrs & ICE_FLOW_SEG_HDR_UDP) {
703                                 src = (const ice_bitmap_t *)ice_ptypes_udp_il;
704                                 ice_and_bitmap(params->ptypes,
705                                                 params->ptypes, src,
706                                                ICE_FLOW_PTYPE_MAX);
707                         } else if (hdrs & ICE_FLOW_SEG_HDR_TCP) {
708                                 ice_and_bitmap(params->ptypes, params->ptypes,
709                                                (const ice_bitmap_t *)
710                                                ice_ptypes_tcp_il,
711                                                ICE_FLOW_PTYPE_MAX);
712                         } else if (hdrs & ICE_FLOW_SEG_HDR_SCTP) {
713                                 src = (const ice_bitmap_t *)ice_ptypes_sctp_il;
714                                 ice_and_bitmap(params->ptypes, params->ptypes,
715                                                src, ICE_FLOW_PTYPE_MAX);
716                         }
717                 } else if (hdrs & ICE_FLOW_SEG_HDR_IPV6) {
718                         src = !i ? (const ice_bitmap_t *)ice_ptypes_ipv6_ofos :
719                                 (const ice_bitmap_t *)ice_ptypes_ipv6_il;
720                         ice_and_bitmap(params->ptypes, params->ptypes, src,
721                                        ICE_FLOW_PTYPE_MAX);
722                         if (hdrs & ICE_FLOW_SEG_HDR_UDP) {
723                                 src = (const ice_bitmap_t *)ice_ptypes_udp_il;
724                                 ice_and_bitmap(params->ptypes,
725                                                 params->ptypes, src,
726                                                ICE_FLOW_PTYPE_MAX);
727                         } else if (hdrs & ICE_FLOW_SEG_HDR_TCP) {
728                                 ice_and_bitmap(params->ptypes, params->ptypes,
729                                                (const ice_bitmap_t *)
730                                                ice_ptypes_tcp_il,
731                                                ICE_FLOW_PTYPE_MAX);
732                         } else if (hdrs & ICE_FLOW_SEG_HDR_SCTP) {
733                                 src = (const ice_bitmap_t *)ice_ptypes_sctp_il;
734                                 ice_and_bitmap(params->ptypes, params->ptypes,
735                                                src, ICE_FLOW_PTYPE_MAX);
736                         }
737                 }
738
739                 if (hdrs & ICE_FLOW_SEG_HDR_ETH_NON_IP) {
740                         src = (const ice_bitmap_t *)ice_ptypes_mac_non_ip_ofos;
741                         ice_and_bitmap(params->ptypes, params->ptypes,
742                                        src, ICE_FLOW_PTYPE_MAX);
743                 } else if (hdrs & ICE_FLOW_SEG_HDR_PPPOE) {
744                         src = (const ice_bitmap_t *)ice_ptypes_pppoe;
745                         ice_and_bitmap(params->ptypes, params->ptypes, src,
746                                        ICE_FLOW_PTYPE_MAX);
747                 }
748
749                 if (hdrs & ICE_FLOW_SEG_HDR_ICMP) {
750                         src = !i ? (const ice_bitmap_t *)ice_ptypes_icmp_of :
751                                 (const ice_bitmap_t *)ice_ptypes_icmp_il;
752                         ice_and_bitmap(params->ptypes, params->ptypes, src,
753                                        ICE_FLOW_PTYPE_MAX);
754                 } else if (hdrs & ICE_FLOW_SEG_HDR_GRE) {
755                         if (!i) {
756                                 src = (const ice_bitmap_t *)ice_ptypes_gre_of;
757                                 ice_and_bitmap(params->ptypes, params->ptypes,
758                                                src, ICE_FLOW_PTYPE_MAX);
759                         }
760                 } else if (hdrs & ICE_FLOW_SEG_HDR_GTPC) {
761                         src = (const ice_bitmap_t *)ice_ptypes_gtpc;
762                         ice_and_bitmap(params->ptypes, params->ptypes,
763                                        src, ICE_FLOW_PTYPE_MAX);
764                 } else if (hdrs & ICE_FLOW_SEG_HDR_GTPC_TEID) {
765                         src = (const ice_bitmap_t *)ice_ptypes_gtpc_tid;
766                         ice_and_bitmap(params->ptypes, params->ptypes,
767                                        src, ICE_FLOW_PTYPE_MAX);
768                 } else if (hdrs & ICE_FLOW_SEG_HDR_GTPU_DWN) {
769                         src = (const ice_bitmap_t *)ice_ptypes_gtpu;
770                         ice_and_bitmap(params->ptypes, params->ptypes,
771                                        src, ICE_FLOW_PTYPE_MAX);
772
773                         /* Attributes for GTP packet with downlink */
774                         params->attr = ice_attr_gtpu_down;
775                         params->attr_cnt = ARRAY_SIZE(ice_attr_gtpu_down);
776                 } else if (hdrs & ICE_FLOW_SEG_HDR_GTPU_UP) {
777                         src = (const ice_bitmap_t *)ice_ptypes_gtpu;
778                         ice_and_bitmap(params->ptypes, params->ptypes,
779                                        src, ICE_FLOW_PTYPE_MAX);
780
781                         /* Attributes for GTP packet with uplink */
782                         params->attr = ice_attr_gtpu_up;
783                         params->attr_cnt = ARRAY_SIZE(ice_attr_gtpu_up);
784                 } else if (hdrs & ICE_FLOW_SEG_HDR_GTPU_EH) {
785                         src = (const ice_bitmap_t *)ice_ptypes_gtpu;
786                         ice_and_bitmap(params->ptypes, params->ptypes,
787                                        src, ICE_FLOW_PTYPE_MAX);
788
789                         /* Attributes for GTP packet with Extension Header */
790                         params->attr = ice_attr_gtpu_eh;
791                         params->attr_cnt = ARRAY_SIZE(ice_attr_gtpu_eh);
792                 } else if (hdrs & ICE_FLOW_SEG_HDR_GTPU_IP) {
793                         src = (const ice_bitmap_t *)ice_ptypes_gtpu;
794                         ice_and_bitmap(params->ptypes, params->ptypes,
795                                        src, ICE_FLOW_PTYPE_MAX);
796                 } else if (hdrs & ICE_FLOW_SEG_HDR_L2TPV3) {
797                         src = (const ice_bitmap_t *)ice_ptypes_l2tpv3;
798                         ice_and_bitmap(params->ptypes, params->ptypes,
799                                        src, ICE_FLOW_PTYPE_MAX);
800                 } else if (hdrs & ICE_FLOW_SEG_HDR_ESP) {
801                         src = (const ice_bitmap_t *)ice_ptypes_esp;
802                         ice_and_bitmap(params->ptypes, params->ptypes,
803                                        src, ICE_FLOW_PTYPE_MAX);
804                 } else if (hdrs & ICE_FLOW_SEG_HDR_AH) {
805                         src = (const ice_bitmap_t *)ice_ptypes_ah;
806                         ice_and_bitmap(params->ptypes, params->ptypes,
807                                        src, ICE_FLOW_PTYPE_MAX);
808                 } else if (hdrs & ICE_FLOW_SEG_HDR_NAT_T_ESP) {
809                         src = (const ice_bitmap_t *)ice_ptypes_nat_t_esp;
810                         ice_and_bitmap(params->ptypes, params->ptypes,
811                                        src, ICE_FLOW_PTYPE_MAX);
812                 }
813
814                 if (hdrs & ICE_FLOW_SEG_HDR_PFCP) {
815                         if (hdrs & ICE_FLOW_SEG_HDR_PFCP_NODE)
816                                 src =
817                                 (const ice_bitmap_t *)ice_ptypes_pfcp_node;
818                         else
819                                 src =
820                                 (const ice_bitmap_t *)ice_ptypes_pfcp_session;
821
822                         ice_and_bitmap(params->ptypes, params->ptypes,
823                                        src, ICE_FLOW_PTYPE_MAX);
824                 } else {
825                         src = (const ice_bitmap_t *)ice_ptypes_pfcp_node;
826                         ice_andnot_bitmap(params->ptypes, params->ptypes,
827                                           src, ICE_FLOW_PTYPE_MAX);
828
829                         src = (const ice_bitmap_t *)ice_ptypes_pfcp_session;
830                         ice_andnot_bitmap(params->ptypes, params->ptypes,
831                                           src, ICE_FLOW_PTYPE_MAX);
832                 }
833         }
834
835         return ICE_SUCCESS;
836 }
837
838 /**
839  * ice_flow_xtract_pkt_flags - Create an extr sequence entry for packet flags
840  * @hw: pointer to the HW struct
841  * @params: information about the flow to be processed
842  * @flags: The value of pkt_flags[x:x] in Rx/Tx MDID metadata.
843  *
844  * This function will allocate an extraction sequence entries for a DWORD size
845  * chunk of the packet flags.
846  */
847 static enum ice_status
848 ice_flow_xtract_pkt_flags(struct ice_hw *hw,
849                           struct ice_flow_prof_params *params,
850                           enum ice_flex_mdid_pkt_flags flags)
851 {
852         u8 fv_words = hw->blk[params->blk].es.fvw;
853         u8 idx;
854
855         /* Make sure the number of extraction sequence entries required does not
856          * exceed the block's capacity.
857          */
858         if (params->es_cnt >= fv_words)
859                 return ICE_ERR_MAX_LIMIT;
860
861         /* some blocks require a reversed field vector layout */
862         if (hw->blk[params->blk].es.reverse)
863                 idx = fv_words - params->es_cnt - 1;
864         else
865                 idx = params->es_cnt;
866
867         params->es[idx].prot_id = ICE_PROT_META_ID;
868         params->es[idx].off = flags;
869         params->es_cnt++;
870
871         return ICE_SUCCESS;
872 }
873
874 /**
875  * ice_flow_xtract_fld - Create an extraction sequence entry for the given field
876  * @hw: pointer to the HW struct
877  * @params: information about the flow to be processed
878  * @seg: packet segment index of the field to be extracted
879  * @fld: ID of field to be extracted
880  * @match: bitfield of all fields
881  *
882  * This function determines the protocol ID, offset, and size of the given
883  * field. It then allocates one or more extraction sequence entries for the
884  * given field, and fill the entries with protocol ID and offset information.
885  */
886 static enum ice_status
887 ice_flow_xtract_fld(struct ice_hw *hw, struct ice_flow_prof_params *params,
888                     u8 seg, enum ice_flow_field fld, u64 match)
889 {
890         enum ice_flow_field sib = ICE_FLOW_FIELD_IDX_MAX;
891         enum ice_prot_id prot_id = ICE_PROT_ID_INVAL;
892         u8 fv_words = hw->blk[params->blk].es.fvw;
893         struct ice_flow_fld_info *flds;
894         u16 cnt, ese_bits, i;
895         u16 sib_mask = 0;
896         s16 adj = 0;
897         u16 mask;
898         u16 off;
899
900         flds = params->prof->segs[seg].fields;
901
902         switch (fld) {
903         case ICE_FLOW_FIELD_IDX_ETH_DA:
904         case ICE_FLOW_FIELD_IDX_ETH_SA:
905         case ICE_FLOW_FIELD_IDX_S_VLAN:
906         case ICE_FLOW_FIELD_IDX_C_VLAN:
907                 prot_id = seg == 0 ? ICE_PROT_MAC_OF_OR_S : ICE_PROT_MAC_IL;
908                 break;
909         case ICE_FLOW_FIELD_IDX_ETH_TYPE:
910                 prot_id = seg == 0 ? ICE_PROT_ETYPE_OL : ICE_PROT_ETYPE_IL;
911                 break;
912         case ICE_FLOW_FIELD_IDX_IPV4_DSCP:
913                 prot_id = seg == 0 ? ICE_PROT_IPV4_OF_OR_S : ICE_PROT_IPV4_IL;
914                 break;
915         case ICE_FLOW_FIELD_IDX_IPV6_DSCP:
916                 prot_id = seg == 0 ? ICE_PROT_IPV6_OF_OR_S : ICE_PROT_IPV6_IL;
917                 break;
918         case ICE_FLOW_FIELD_IDX_IPV4_TTL:
919         case ICE_FLOW_FIELD_IDX_IPV4_PROT:
920                 prot_id = seg == 0 ? ICE_PROT_IPV4_OF_OR_S : ICE_PROT_IPV4_IL;
921
922                 /* TTL and PROT share the same extraction seq. entry.
923                  * Each is considered a sibling to the other in terms of sharing
924                  * the same extraction sequence entry.
925                  */
926                 if (fld == ICE_FLOW_FIELD_IDX_IPV4_TTL)
927                         sib = ICE_FLOW_FIELD_IDX_IPV4_PROT;
928                 else if (fld == ICE_FLOW_FIELD_IDX_IPV4_PROT)
929                         sib = ICE_FLOW_FIELD_IDX_IPV4_TTL;
930
931                 /* If the sibling field is also included, that field's
932                  * mask needs to be included.
933                  */
934                 if (match & BIT(sib))
935                         sib_mask = ice_flds_info[sib].mask;
936                 break;
937         case ICE_FLOW_FIELD_IDX_IPV6_TTL:
938         case ICE_FLOW_FIELD_IDX_IPV6_PROT:
939                 prot_id = seg == 0 ? ICE_PROT_IPV6_OF_OR_S : ICE_PROT_IPV6_IL;
940
941                 /* TTL and PROT share the same extraction seq. entry.
942                  * Each is considered a sibling to the other in terms of sharing
943                  * the same extraction sequence entry.
944                  */
945                 if (fld == ICE_FLOW_FIELD_IDX_IPV6_TTL)
946                         sib = ICE_FLOW_FIELD_IDX_IPV6_PROT;
947                 else if (fld == ICE_FLOW_FIELD_IDX_IPV6_PROT)
948                         sib = ICE_FLOW_FIELD_IDX_IPV6_TTL;
949
950                 /* If the sibling field is also included, that field's
951                  * mask needs to be included.
952                  */
953                 if (match & BIT(sib))
954                         sib_mask = ice_flds_info[sib].mask;
955                 break;
956         case ICE_FLOW_FIELD_IDX_IPV4_SA:
957         case ICE_FLOW_FIELD_IDX_IPV4_DA:
958                 prot_id = seg == 0 ? ICE_PROT_IPV4_OF_OR_S : ICE_PROT_IPV4_IL;
959                 break;
960         case ICE_FLOW_FIELD_IDX_IPV6_SA:
961         case ICE_FLOW_FIELD_IDX_IPV6_DA:
962                 prot_id = seg == 0 ? ICE_PROT_IPV6_OF_OR_S : ICE_PROT_IPV6_IL;
963                 break;
964         case ICE_FLOW_FIELD_IDX_TCP_SRC_PORT:
965         case ICE_FLOW_FIELD_IDX_TCP_DST_PORT:
966         case ICE_FLOW_FIELD_IDX_TCP_FLAGS:
967                 prot_id = ICE_PROT_TCP_IL;
968                 break;
969         case ICE_FLOW_FIELD_IDX_UDP_SRC_PORT:
970         case ICE_FLOW_FIELD_IDX_UDP_DST_PORT:
971                 prot_id = ICE_PROT_UDP_IL_OR_S;
972                 break;
973         case ICE_FLOW_FIELD_IDX_SCTP_SRC_PORT:
974         case ICE_FLOW_FIELD_IDX_SCTP_DST_PORT:
975                 prot_id = ICE_PROT_SCTP_IL;
976                 break;
977         case ICE_FLOW_FIELD_IDX_GTPC_TEID:
978         case ICE_FLOW_FIELD_IDX_GTPU_IP_TEID:
979         case ICE_FLOW_FIELD_IDX_GTPU_UP_TEID:
980         case ICE_FLOW_FIELD_IDX_GTPU_DWN_TEID:
981         case ICE_FLOW_FIELD_IDX_GTPU_EH_TEID:
982         case ICE_FLOW_FIELD_IDX_GTPU_EH_QFI:
983                 /* GTP is accessed through UDP OF protocol */
984                 prot_id = ICE_PROT_UDP_OF;
985                 break;
986         case ICE_FLOW_FIELD_IDX_PPPOE_SESS_ID:
987                 prot_id = ICE_PROT_PPPOE;
988                 break;
989         case ICE_FLOW_FIELD_IDX_PFCP_SEID:
990                 prot_id = ICE_PROT_UDP_IL_OR_S;
991                 break;
992         case ICE_FLOW_FIELD_IDX_L2TPV3_SESS_ID:
993                 prot_id = ICE_PROT_L2TPV3;
994                 break;
995         case ICE_FLOW_FIELD_IDX_ESP_SPI:
996                 prot_id = ICE_PROT_ESP_F;
997                 break;
998         case ICE_FLOW_FIELD_IDX_AH_SPI:
999                 prot_id = ICE_PROT_ESP_2;
1000                 break;
1001         case ICE_FLOW_FIELD_IDX_NAT_T_ESP_SPI:
1002                 prot_id = ICE_PROT_UDP_IL_OR_S;
1003                 break;
1004         case ICE_FLOW_FIELD_IDX_ARP_SIP:
1005         case ICE_FLOW_FIELD_IDX_ARP_DIP:
1006         case ICE_FLOW_FIELD_IDX_ARP_SHA:
1007         case ICE_FLOW_FIELD_IDX_ARP_DHA:
1008         case ICE_FLOW_FIELD_IDX_ARP_OP:
1009                 prot_id = ICE_PROT_ARP_OF;
1010                 break;
1011         case ICE_FLOW_FIELD_IDX_ICMP_TYPE:
1012         case ICE_FLOW_FIELD_IDX_ICMP_CODE:
1013                 /* ICMP type and code share the same extraction seq. entry */
1014                 prot_id = (params->prof->segs[seg].hdrs &
1015                            ICE_FLOW_SEG_HDR_IPV4) ?
1016                         ICE_PROT_ICMP_IL : ICE_PROT_ICMPV6_IL;
1017                 sib = fld == ICE_FLOW_FIELD_IDX_ICMP_TYPE ?
1018                         ICE_FLOW_FIELD_IDX_ICMP_CODE :
1019                         ICE_FLOW_FIELD_IDX_ICMP_TYPE;
1020                 break;
1021         case ICE_FLOW_FIELD_IDX_GRE_KEYID:
1022                 prot_id = ICE_PROT_GRE_OF;
1023                 break;
1024         default:
1025                 return ICE_ERR_NOT_IMPL;
1026         }
1027
1028         /* Each extraction sequence entry is a word in size, and extracts a
1029          * word-aligned offset from a protocol header.
1030          */
1031         ese_bits = ICE_FLOW_FV_EXTRACT_SZ * BITS_PER_BYTE;
1032
1033         flds[fld].xtrct.prot_id = prot_id;
1034         flds[fld].xtrct.off = (ice_flds_info[fld].off / ese_bits) *
1035                 ICE_FLOW_FV_EXTRACT_SZ;
1036         flds[fld].xtrct.disp = (u8)((ice_flds_info[fld].off + adj) % ese_bits);
1037         flds[fld].xtrct.idx = params->es_cnt;
1038         flds[fld].xtrct.mask = ice_flds_info[fld].mask;
1039
1040         /* Adjust the next field-entry index after accommodating the number of
1041          * entries this field consumes
1042          */
1043         cnt = DIVIDE_AND_ROUND_UP(flds[fld].xtrct.disp +
1044                                   ice_flds_info[fld].size, ese_bits);
1045
1046         /* Fill in the extraction sequence entries needed for this field */
1047         off = flds[fld].xtrct.off;
1048         mask = flds[fld].xtrct.mask;
1049         for (i = 0; i < cnt; i++) {
1050                 /* Only consume an extraction sequence entry if there is no
1051                  * sibling field associated with this field or the sibling entry
1052                  * already extracts the word shared with this field.
1053                  */
1054                 if (sib == ICE_FLOW_FIELD_IDX_MAX ||
1055                     flds[sib].xtrct.prot_id == ICE_PROT_ID_INVAL ||
1056                     flds[sib].xtrct.off != off) {
1057                         u8 idx;
1058
1059                         /* Make sure the number of extraction sequence required
1060                          * does not exceed the block's capability
1061                          */
1062                         if (params->es_cnt >= fv_words)
1063                                 return ICE_ERR_MAX_LIMIT;
1064
1065                         /* some blocks require a reversed field vector layout */
1066                         if (hw->blk[params->blk].es.reverse)
1067                                 idx = fv_words - params->es_cnt - 1;
1068                         else
1069                                 idx = params->es_cnt;
1070
1071                         params->es[idx].prot_id = prot_id;
1072                         params->es[idx].off = off;
1073                         params->mask[idx] = mask | sib_mask;
1074                         params->es_cnt++;
1075                 }
1076
1077                 off += ICE_FLOW_FV_EXTRACT_SZ;
1078         }
1079
1080         return ICE_SUCCESS;
1081 }
1082
1083 /**
1084  * ice_flow_xtract_raws - Create extract sequence entries for raw bytes
1085  * @hw: pointer to the HW struct
1086  * @params: information about the flow to be processed
1087  * @seg: index of packet segment whose raw fields are to be be extracted
1088  */
1089 static enum ice_status
1090 ice_flow_xtract_raws(struct ice_hw *hw, struct ice_flow_prof_params *params,
1091                      u8 seg)
1092 {
1093         u16 fv_words;
1094         u16 hdrs_sz;
1095         u8 i;
1096
1097         if (!params->prof->segs[seg].raws_cnt)
1098                 return ICE_SUCCESS;
1099
1100         if (params->prof->segs[seg].raws_cnt >
1101             ARRAY_SIZE(params->prof->segs[seg].raws))
1102                 return ICE_ERR_MAX_LIMIT;
1103
1104         /* Offsets within the segment headers are not supported */
1105         hdrs_sz = ice_flow_calc_seg_sz(params, seg);
1106         if (!hdrs_sz)
1107                 return ICE_ERR_PARAM;
1108
1109         fv_words = hw->blk[params->blk].es.fvw;
1110
1111         for (i = 0; i < params->prof->segs[seg].raws_cnt; i++) {
1112                 struct ice_flow_seg_fld_raw *raw;
1113                 u16 off, cnt, j;
1114
1115                 raw = &params->prof->segs[seg].raws[i];
1116
1117                 /* Storing extraction information */
1118                 raw->info.xtrct.prot_id = ICE_PROT_MAC_OF_OR_S;
1119                 raw->info.xtrct.off = (raw->off / ICE_FLOW_FV_EXTRACT_SZ) *
1120                         ICE_FLOW_FV_EXTRACT_SZ;
1121                 raw->info.xtrct.disp = (raw->off % ICE_FLOW_FV_EXTRACT_SZ) *
1122                         BITS_PER_BYTE;
1123                 raw->info.xtrct.idx = params->es_cnt;
1124
1125                 /* Determine the number of field vector entries this raw field
1126                  * consumes.
1127                  */
1128                 cnt = DIVIDE_AND_ROUND_UP(raw->info.xtrct.disp +
1129                                           (raw->info.src.last * BITS_PER_BYTE),
1130                                           (ICE_FLOW_FV_EXTRACT_SZ *
1131                                            BITS_PER_BYTE));
1132                 off = raw->info.xtrct.off;
1133                 for (j = 0; j < cnt; j++) {
1134                         u16 idx;
1135
1136                         /* Make sure the number of extraction sequence required
1137                          * does not exceed the block's capability
1138                          */
1139                         if (params->es_cnt >= hw->blk[params->blk].es.count ||
1140                             params->es_cnt >= ICE_MAX_FV_WORDS)
1141                                 return ICE_ERR_MAX_LIMIT;
1142
1143                         /* some blocks require a reversed field vector layout */
1144                         if (hw->blk[params->blk].es.reverse)
1145                                 idx = fv_words - params->es_cnt - 1;
1146                         else
1147                                 idx = params->es_cnt;
1148
1149                         params->es[idx].prot_id = raw->info.xtrct.prot_id;
1150                         params->es[idx].off = off;
1151                         params->es_cnt++;
1152                         off += ICE_FLOW_FV_EXTRACT_SZ;
1153                 }
1154         }
1155
1156         return ICE_SUCCESS;
1157 }
1158
1159 /**
1160  * ice_flow_create_xtrct_seq - Create an extraction sequence for given segments
1161  * @hw: pointer to the HW struct
1162  * @params: information about the flow to be processed
1163  *
1164  * This function iterates through all matched fields in the given segments, and
1165  * creates an extraction sequence for the fields.
1166  */
1167 static enum ice_status
1168 ice_flow_create_xtrct_seq(struct ice_hw *hw,
1169                           struct ice_flow_prof_params *params)
1170 {
1171         enum ice_status status = ICE_SUCCESS;
1172         u8 i;
1173
1174         /* For ACL, we also need to extract the direction bit (Rx,Tx) data from
1175          * packet flags
1176          */
1177         if (params->blk == ICE_BLK_ACL) {
1178                 status = ice_flow_xtract_pkt_flags(hw, params,
1179                                                    ICE_RX_MDID_PKT_FLAGS_15_0);
1180                 if (status)
1181                         return status;
1182         }
1183
1184         for (i = 0; i < params->prof->segs_cnt; i++) {
1185                 u64 match = params->prof->segs[i].match;
1186                 enum ice_flow_field j;
1187
1188                 for (j = 0; j < ICE_FLOW_FIELD_IDX_MAX && match; j++) {
1189                         const u64 bit = BIT_ULL(j);
1190
1191                         if (match & bit) {
1192                                 status = ice_flow_xtract_fld(hw, params, i, j,
1193                                                              match);
1194                                 if (status)
1195                                         return status;
1196                                 match &= ~bit;
1197                         }
1198                 }
1199
1200                 /* Process raw matching bytes */
1201                 status = ice_flow_xtract_raws(hw, params, i);
1202                 if (status)
1203                         return status;
1204         }
1205
1206         return status;
1207 }
1208
1209 /**
1210  * ice_flow_sel_acl_scen - returns the specific scenario
1211  * @hw: pointer to the hardware structure
1212  * @params: information about the flow to be processed
1213  *
1214  * This function will return the specific scenario based on the
1215  * params passed to it
1216  */
1217 static enum ice_status
1218 ice_flow_sel_acl_scen(struct ice_hw *hw, struct ice_flow_prof_params *params)
1219 {
1220         /* Find the best-fit scenario for the provided match width */
1221         struct ice_acl_scen *cand_scen = NULL, *scen;
1222
1223         if (!hw->acl_tbl)
1224                 return ICE_ERR_DOES_NOT_EXIST;
1225
1226         /* Loop through each scenario and match against the scenario width
1227          * to select the specific scenario
1228          */
1229         LIST_FOR_EACH_ENTRY(scen, &hw->acl_tbl->scens, ice_acl_scen, list_entry)
1230                 if (scen->eff_width >= params->entry_length &&
1231                     (!cand_scen || cand_scen->eff_width > scen->eff_width))
1232                         cand_scen = scen;
1233         if (!cand_scen)
1234                 return ICE_ERR_DOES_NOT_EXIST;
1235
1236         params->prof->cfg.scen = cand_scen;
1237
1238         return ICE_SUCCESS;
1239 }
1240
1241 /**
1242  * ice_flow_acl_def_entry_frmt - Determine the layout of flow entries
1243  * @params: information about the flow to be processed
1244  */
1245 static enum ice_status
1246 ice_flow_acl_def_entry_frmt(struct ice_flow_prof_params *params)
1247 {
1248         u16 index, i, range_idx = 0;
1249
1250         index = ICE_AQC_ACL_PROF_BYTE_SEL_START_IDX;
1251
1252         for (i = 0; i < params->prof->segs_cnt; i++) {
1253                 struct ice_flow_seg_info *seg = &params->prof->segs[i];
1254                 u64 match = seg->match;
1255                 u8 j;
1256
1257                 for (j = 0; j < ICE_FLOW_FIELD_IDX_MAX && match; j++) {
1258                         struct ice_flow_fld_info *fld;
1259                         const u64 bit = BIT_ULL(j);
1260
1261                         if (!(match & bit))
1262                                 continue;
1263
1264                         fld = &seg->fields[j];
1265                         fld->entry.mask = ICE_FLOW_FLD_OFF_INVAL;
1266
1267                         if (fld->type == ICE_FLOW_FLD_TYPE_RANGE) {
1268                                 fld->entry.last = ICE_FLOW_FLD_OFF_INVAL;
1269
1270                                 /* Range checking only supported for single
1271                                  * words
1272                                  */
1273                                 if (DIVIDE_AND_ROUND_UP(ice_flds_info[j].size +
1274                                                         fld->xtrct.disp,
1275                                                         BITS_PER_BYTE * 2) > 1)
1276                                         return ICE_ERR_PARAM;
1277
1278                                 /* Ranges must define low and high values */
1279                                 if (fld->src.val == ICE_FLOW_FLD_OFF_INVAL ||
1280                                     fld->src.last == ICE_FLOW_FLD_OFF_INVAL)
1281                                         return ICE_ERR_PARAM;
1282
1283                                 fld->entry.val = range_idx++;
1284                         } else {
1285                                 /* Store adjusted byte-length of field for later
1286                                  * use, taking into account potential
1287                                  * non-byte-aligned displacement
1288                                  */
1289                                 fld->entry.last = DIVIDE_AND_ROUND_UP
1290                                         (ice_flds_info[j].size +
1291                                          (fld->xtrct.disp % BITS_PER_BYTE),
1292                                          BITS_PER_BYTE);
1293                                 fld->entry.val = index;
1294                                 index += fld->entry.last;
1295                         }
1296
1297                         match &= ~bit;
1298                 }
1299
1300                 for (j = 0; j < seg->raws_cnt; j++) {
1301                         struct ice_flow_seg_fld_raw *raw = &seg->raws[j];
1302
1303                         raw->info.entry.mask = ICE_FLOW_FLD_OFF_INVAL;
1304                         raw->info.entry.val = index;
1305                         raw->info.entry.last = raw->info.src.last;
1306                         index += raw->info.entry.last;
1307                 }
1308         }
1309
1310         /* Currently only support using the byte selection base, which only
1311          * allows for an effective entry size of 30 bytes. Reject anything
1312          * larger.
1313          */
1314         if (index > ICE_AQC_ACL_PROF_BYTE_SEL_ELEMS)
1315                 return ICE_ERR_PARAM;
1316
1317         /* Only 8 range checkers per profile, reject anything trying to use
1318          * more
1319          */
1320         if (range_idx > ICE_AQC_ACL_PROF_RANGES_NUM_CFG)
1321                 return ICE_ERR_PARAM;
1322
1323         /* Store # bytes required for entry for later use */
1324         params->entry_length = index - ICE_AQC_ACL_PROF_BYTE_SEL_START_IDX;
1325
1326         return ICE_SUCCESS;
1327 }
1328
1329 /**
1330  * ice_flow_proc_segs - process all packet segments associated with a profile
1331  * @hw: pointer to the HW struct
1332  * @params: information about the flow to be processed
1333  */
1334 static enum ice_status
1335 ice_flow_proc_segs(struct ice_hw *hw, struct ice_flow_prof_params *params)
1336 {
1337         enum ice_status status;
1338
1339         status = ice_flow_proc_seg_hdrs(params);
1340         if (status)
1341                 return status;
1342
1343         status = ice_flow_create_xtrct_seq(hw, params);
1344         if (status)
1345                 return status;
1346
1347         switch (params->blk) {
1348         case ICE_BLK_FD:
1349         case ICE_BLK_RSS:
1350                 status = ICE_SUCCESS;
1351                 break;
1352         case ICE_BLK_ACL:
1353                 status = ice_flow_acl_def_entry_frmt(params);
1354                 if (status)
1355                         return status;
1356                 status = ice_flow_sel_acl_scen(hw, params);
1357                 if (status)
1358                         return status;
1359                 break;
1360         case ICE_BLK_SW:
1361         default:
1362                 return ICE_ERR_NOT_IMPL;
1363         }
1364
1365         return status;
1366 }
1367
1368 #define ICE_FLOW_FIND_PROF_CHK_FLDS     0x00000001
1369 #define ICE_FLOW_FIND_PROF_CHK_VSI      0x00000002
1370 #define ICE_FLOW_FIND_PROF_NOT_CHK_DIR  0x00000004
1371
1372 /**
1373  * ice_flow_find_prof_conds - Find a profile matching headers and conditions
1374  * @hw: pointer to the HW struct
1375  * @blk: classification stage
1376  * @dir: flow direction
1377  * @segs: array of one or more packet segments that describe the flow
1378  * @segs_cnt: number of packet segments provided
1379  * @vsi_handle: software VSI handle to check VSI (ICE_FLOW_FIND_PROF_CHK_VSI)
1380  * @conds: additional conditions to be checked (ICE_FLOW_FIND_PROF_CHK_*)
1381  */
1382 static struct ice_flow_prof *
1383 ice_flow_find_prof_conds(struct ice_hw *hw, enum ice_block blk,
1384                          enum ice_flow_dir dir, struct ice_flow_seg_info *segs,
1385                          u8 segs_cnt, u16 vsi_handle, u32 conds)
1386 {
1387         struct ice_flow_prof *p, *prof = NULL;
1388
1389         ice_acquire_lock(&hw->fl_profs_locks[blk]);
1390         LIST_FOR_EACH_ENTRY(p, &hw->fl_profs[blk], ice_flow_prof, l_entry)
1391                 if ((p->dir == dir || conds & ICE_FLOW_FIND_PROF_NOT_CHK_DIR) &&
1392                     segs_cnt && segs_cnt == p->segs_cnt) {
1393                         u8 i;
1394
1395                         /* Check for profile-VSI association if specified */
1396                         if ((conds & ICE_FLOW_FIND_PROF_CHK_VSI) &&
1397                             ice_is_vsi_valid(hw, vsi_handle) &&
1398                             !ice_is_bit_set(p->vsis, vsi_handle))
1399                                 continue;
1400
1401                         /* Protocol headers must be checked. Matched fields are
1402                          * checked if specified.
1403                          */
1404                         for (i = 0; i < segs_cnt; i++)
1405                                 if (segs[i].hdrs != p->segs[i].hdrs ||
1406                                     ((conds & ICE_FLOW_FIND_PROF_CHK_FLDS) &&
1407                                      segs[i].match != p->segs[i].match))
1408                                         break;
1409
1410                         /* A match is found if all segments are matched */
1411                         if (i == segs_cnt) {
1412                                 prof = p;
1413                                 break;
1414                         }
1415                 }
1416         ice_release_lock(&hw->fl_profs_locks[blk]);
1417
1418         return prof;
1419 }
1420
1421 /**
1422  * ice_flow_find_prof - Look up a profile matching headers and matched fields
1423  * @hw: pointer to the HW struct
1424  * @blk: classification stage
1425  * @dir: flow direction
1426  * @segs: array of one or more packet segments that describe the flow
1427  * @segs_cnt: number of packet segments provided
1428  */
1429 u64
1430 ice_flow_find_prof(struct ice_hw *hw, enum ice_block blk, enum ice_flow_dir dir,
1431                    struct ice_flow_seg_info *segs, u8 segs_cnt)
1432 {
1433         struct ice_flow_prof *p;
1434
1435         p = ice_flow_find_prof_conds(hw, blk, dir, segs, segs_cnt,
1436                                      ICE_MAX_VSI, ICE_FLOW_FIND_PROF_CHK_FLDS);
1437
1438         return p ? p->id : ICE_FLOW_PROF_ID_INVAL;
1439 }
1440
1441 /**
1442  * ice_flow_find_prof_id - Look up a profile with given profile ID
1443  * @hw: pointer to the HW struct
1444  * @blk: classification stage
1445  * @prof_id: unique ID to identify this flow profile
1446  */
1447 static struct ice_flow_prof *
1448 ice_flow_find_prof_id(struct ice_hw *hw, enum ice_block blk, u64 prof_id)
1449 {
1450         struct ice_flow_prof *p;
1451
1452         LIST_FOR_EACH_ENTRY(p, &hw->fl_profs[blk], ice_flow_prof, l_entry)
1453                 if (p->id == prof_id)
1454                         return p;
1455
1456         return NULL;
1457 }
1458
1459 /**
1460  * ice_dealloc_flow_entry - Deallocate flow entry memory
1461  * @hw: pointer to the HW struct
1462  * @entry: flow entry to be removed
1463  */
1464 static void
1465 ice_dealloc_flow_entry(struct ice_hw *hw, struct ice_flow_entry *entry)
1466 {
1467         if (!entry)
1468                 return;
1469
1470         if (entry->entry)
1471                 ice_free(hw, entry->entry);
1472
1473         if (entry->range_buf) {
1474                 ice_free(hw, entry->range_buf);
1475                 entry->range_buf = NULL;
1476         }
1477
1478         if (entry->acts) {
1479                 ice_free(hw, entry->acts);
1480                 entry->acts = NULL;
1481                 entry->acts_cnt = 0;
1482         }
1483
1484         ice_free(hw, entry);
1485 }
1486
1487 #define ICE_ACL_INVALID_SCEN    0x3f
1488
1489 /**
1490  * ice_flow_acl_is_prof_in_use - Verify if the profile is associated to any PF
1491  * @hw: pointer to the hardware structure
1492  * @prof: pointer to flow profile
1493  * @buf: destination buffer function writes partial extraction sequence to
1494  *
1495  * returns ICE_SUCCESS if no PF is associated to the given profile
1496  * returns ICE_ERR_IN_USE if at least one PF is associated to the given profile
1497  * returns other error code for real error
1498  */
1499 static enum ice_status
1500 ice_flow_acl_is_prof_in_use(struct ice_hw *hw, struct ice_flow_prof *prof,
1501                             struct ice_aqc_acl_prof_generic_frmt *buf)
1502 {
1503         enum ice_status status;
1504         u8 prof_id = 0;
1505
1506         status = ice_flow_get_hw_prof(hw, ICE_BLK_ACL, prof->id, &prof_id);
1507         if (status)
1508                 return status;
1509
1510         status = ice_query_acl_prof(hw, prof_id, buf, NULL);
1511         if (status)
1512                 return status;
1513
1514         /* If all PF's associated scenarios are all 0 or all
1515          * ICE_ACL_INVALID_SCEN (63) for the given profile then the latter has
1516          * not been configured yet.
1517          */
1518         if (buf->pf_scenario_num[0] == 0 && buf->pf_scenario_num[1] == 0 &&
1519             buf->pf_scenario_num[2] == 0 && buf->pf_scenario_num[3] == 0 &&
1520             buf->pf_scenario_num[4] == 0 && buf->pf_scenario_num[5] == 0 &&
1521             buf->pf_scenario_num[6] == 0 && buf->pf_scenario_num[7] == 0)
1522                 return ICE_SUCCESS;
1523
1524         if (buf->pf_scenario_num[0] == ICE_ACL_INVALID_SCEN &&
1525             buf->pf_scenario_num[1] == ICE_ACL_INVALID_SCEN &&
1526             buf->pf_scenario_num[2] == ICE_ACL_INVALID_SCEN &&
1527             buf->pf_scenario_num[3] == ICE_ACL_INVALID_SCEN &&
1528             buf->pf_scenario_num[4] == ICE_ACL_INVALID_SCEN &&
1529             buf->pf_scenario_num[5] == ICE_ACL_INVALID_SCEN &&
1530             buf->pf_scenario_num[6] == ICE_ACL_INVALID_SCEN &&
1531             buf->pf_scenario_num[7] == ICE_ACL_INVALID_SCEN)
1532                 return ICE_SUCCESS;
1533         else
1534                 return ICE_ERR_IN_USE;
1535 }
1536
1537 /**
1538  * ice_flow_acl_free_act_cntr - Free the ACL rule's actions
1539  * @hw: pointer to the hardware structure
1540  * @acts: array of actions to be performed on a match
1541  * @acts_cnt: number of actions
1542  */
1543 static enum ice_status
1544 ice_flow_acl_free_act_cntr(struct ice_hw *hw, struct ice_flow_action *acts,
1545                            u8 acts_cnt)
1546 {
1547         int i;
1548
1549         for (i = 0; i < acts_cnt; i++) {
1550                 if (acts[i].type == ICE_FLOW_ACT_CNTR_PKT ||
1551                     acts[i].type == ICE_FLOW_ACT_CNTR_BYTES ||
1552                     acts[i].type == ICE_FLOW_ACT_CNTR_PKT_BYTES) {
1553                         struct ice_acl_cntrs cntrs;
1554                         enum ice_status status;
1555
1556                         cntrs.bank = 0; /* Only bank0 for the moment */
1557                         cntrs.first_cntr =
1558                                         LE16_TO_CPU(acts[i].data.acl_act.value);
1559                         cntrs.last_cntr =
1560                                         LE16_TO_CPU(acts[i].data.acl_act.value);
1561
1562                         if (acts[i].type == ICE_FLOW_ACT_CNTR_PKT_BYTES)
1563                                 cntrs.type = ICE_AQC_ACL_CNT_TYPE_DUAL;
1564                         else
1565                                 cntrs.type = ICE_AQC_ACL_CNT_TYPE_SINGLE;
1566
1567                         status = ice_aq_dealloc_acl_cntrs(hw, &cntrs, NULL);
1568                         if (status)
1569                                 return status;
1570                 }
1571         }
1572         return ICE_SUCCESS;
1573 }
1574
1575 /**
1576  * ice_flow_acl_disassoc_scen - Disassociate the scenario from the profile
1577  * @hw: pointer to the hardware structure
1578  * @prof: pointer to flow profile
1579  *
1580  * Disassociate the scenario from the profile for the PF of the VSI.
1581  */
1582 static enum ice_status
1583 ice_flow_acl_disassoc_scen(struct ice_hw *hw, struct ice_flow_prof *prof)
1584 {
1585         struct ice_aqc_acl_prof_generic_frmt buf;
1586         enum ice_status status = ICE_SUCCESS;
1587         u8 prof_id = 0;
1588
1589         ice_memset(&buf, 0, sizeof(buf), ICE_NONDMA_MEM);
1590
1591         status = ice_flow_get_hw_prof(hw, ICE_BLK_ACL, prof->id, &prof_id);
1592         if (status)
1593                 return status;
1594
1595         status = ice_query_acl_prof(hw, prof_id, &buf, NULL);
1596         if (status)
1597                 return status;
1598
1599         /* Clear scenario for this PF */
1600         buf.pf_scenario_num[hw->pf_id] = ICE_ACL_INVALID_SCEN;
1601         status = ice_prgm_acl_prof_extrt(hw, prof_id, &buf, NULL);
1602
1603         return status;
1604 }
1605
1606 /**
1607  * ice_flow_rem_entry_sync - Remove a flow entry
1608  * @hw: pointer to the HW struct
1609  * @blk: classification stage
1610  * @entry: flow entry to be removed
1611  */
1612 static enum ice_status
1613 ice_flow_rem_entry_sync(struct ice_hw *hw, enum ice_block blk,
1614                         struct ice_flow_entry *entry)
1615 {
1616         if (!entry)
1617                 return ICE_ERR_BAD_PTR;
1618
1619         if (blk == ICE_BLK_ACL) {
1620                 enum ice_status status;
1621
1622                 if (!entry->prof)
1623                         return ICE_ERR_BAD_PTR;
1624
1625                 status = ice_acl_rem_entry(hw, entry->prof->cfg.scen,
1626                                            entry->scen_entry_idx);
1627                 if (status)
1628                         return status;
1629
1630                 /* Checks if we need to release an ACL counter. */
1631                 if (entry->acts_cnt && entry->acts)
1632                         ice_flow_acl_free_act_cntr(hw, entry->acts,
1633                                                    entry->acts_cnt);
1634         }
1635
1636         LIST_DEL(&entry->l_entry);
1637
1638         ice_dealloc_flow_entry(hw, entry);
1639
1640         return ICE_SUCCESS;
1641 }
1642
1643 /**
1644  * ice_flow_add_prof_sync - Add a flow profile for packet segments and fields
1645  * @hw: pointer to the HW struct
1646  * @blk: classification stage
1647  * @dir: flow direction
1648  * @prof_id: unique ID to identify this flow profile
1649  * @segs: array of one or more packet segments that describe the flow
1650  * @segs_cnt: number of packet segments provided
1651  * @acts: array of default actions
1652  * @acts_cnt: number of default actions
1653  * @prof: stores the returned flow profile added
1654  *
1655  * Assumption: the caller has acquired the lock to the profile list
1656  */
1657 static enum ice_status
1658 ice_flow_add_prof_sync(struct ice_hw *hw, enum ice_block blk,
1659                        enum ice_flow_dir dir, u64 prof_id,
1660                        struct ice_flow_seg_info *segs, u8 segs_cnt,
1661                        struct ice_flow_action *acts, u8 acts_cnt,
1662                        struct ice_flow_prof **prof)
1663 {
1664         struct ice_flow_prof_params params;
1665         enum ice_status status;
1666         u8 i;
1667
1668         if (!prof || (acts_cnt && !acts))
1669                 return ICE_ERR_BAD_PTR;
1670
1671         ice_memset(&params, 0, sizeof(params), ICE_NONDMA_MEM);
1672         params.prof = (struct ice_flow_prof *)
1673                 ice_malloc(hw, sizeof(*params.prof));
1674         if (!params.prof)
1675                 return ICE_ERR_NO_MEMORY;
1676
1677         /* initialize extraction sequence to all invalid (0xff) */
1678         for (i = 0; i < ICE_MAX_FV_WORDS; i++) {
1679                 params.es[i].prot_id = ICE_PROT_INVALID;
1680                 params.es[i].off = ICE_FV_OFFSET_INVAL;
1681         }
1682
1683         params.blk = blk;
1684         params.prof->id = prof_id;
1685         params.prof->dir = dir;
1686         params.prof->segs_cnt = segs_cnt;
1687
1688         /* Make a copy of the segments that need to be persistent in the flow
1689          * profile instance
1690          */
1691         for (i = 0; i < segs_cnt; i++)
1692                 ice_memcpy(&params.prof->segs[i], &segs[i], sizeof(*segs),
1693                            ICE_NONDMA_TO_NONDMA);
1694
1695         /* Make a copy of the actions that need to be persistent in the flow
1696          * profile instance.
1697          */
1698         if (acts_cnt) {
1699                 params.prof->acts = (struct ice_flow_action *)
1700                         ice_memdup(hw, acts, acts_cnt * sizeof(*acts),
1701                                    ICE_NONDMA_TO_NONDMA);
1702
1703                 if (!params.prof->acts) {
1704                         status = ICE_ERR_NO_MEMORY;
1705                         goto out;
1706                 }
1707         }
1708
1709         status = ice_flow_proc_segs(hw, &params);
1710         if (status) {
1711                 ice_debug(hw, ICE_DBG_FLOW,
1712                           "Error processing a flow's packet segments\n");
1713                 goto out;
1714         }
1715
1716         /* Add a HW profile for this flow profile */
1717         status = ice_add_prof(hw, blk, prof_id, (u8 *)params.ptypes,
1718                               params.attr, params.attr_cnt, params.es,
1719                               params.mask);
1720         if (status) {
1721                 ice_debug(hw, ICE_DBG_FLOW, "Error adding a HW flow profile\n");
1722                 goto out;
1723         }
1724
1725         INIT_LIST_HEAD(&params.prof->entries);
1726         ice_init_lock(&params.prof->entries_lock);
1727         *prof = params.prof;
1728
1729 out:
1730         if (status) {
1731                 if (params.prof->acts)
1732                         ice_free(hw, params.prof->acts);
1733                 ice_free(hw, params.prof);
1734         }
1735
1736         return status;
1737 }
1738
1739 /**
1740  * ice_flow_rem_prof_sync - remove a flow profile
1741  * @hw: pointer to the hardware structure
1742  * @blk: classification stage
1743  * @prof: pointer to flow profile to remove
1744  *
1745  * Assumption: the caller has acquired the lock to the profile list
1746  */
1747 static enum ice_status
1748 ice_flow_rem_prof_sync(struct ice_hw *hw, enum ice_block blk,
1749                        struct ice_flow_prof *prof)
1750 {
1751         enum ice_status status;
1752
1753         /* Remove all remaining flow entries before removing the flow profile */
1754         if (!LIST_EMPTY(&prof->entries)) {
1755                 struct ice_flow_entry *e, *t;
1756
1757                 ice_acquire_lock(&prof->entries_lock);
1758
1759                 LIST_FOR_EACH_ENTRY_SAFE(e, t, &prof->entries, ice_flow_entry,
1760                                          l_entry) {
1761                         status = ice_flow_rem_entry_sync(hw, blk, e);
1762                         if (status)
1763                                 break;
1764                 }
1765
1766                 ice_release_lock(&prof->entries_lock);
1767         }
1768
1769         if (blk == ICE_BLK_ACL) {
1770                 struct ice_aqc_acl_profile_ranges query_rng_buf;
1771                 struct ice_aqc_acl_prof_generic_frmt buf;
1772                 u8 prof_id = 0;
1773
1774                 /* Disassociate the scenario from the profile for the PF */
1775                 status = ice_flow_acl_disassoc_scen(hw, prof);
1776                 if (status)
1777                         return status;
1778
1779                 /* Clear the range-checker if the profile ID is no longer
1780                  * used by any PF
1781                  */
1782                 status = ice_flow_acl_is_prof_in_use(hw, prof, &buf);
1783                 if (status && status != ICE_ERR_IN_USE) {
1784                         return status;
1785                 } else if (!status) {
1786                         /* Clear the range-checker value for profile ID */
1787                         ice_memset(&query_rng_buf, 0,
1788                                    sizeof(struct ice_aqc_acl_profile_ranges),
1789                                    ICE_NONDMA_MEM);
1790
1791                         status = ice_flow_get_hw_prof(hw, blk, prof->id,
1792                                                       &prof_id);
1793                         if (status)
1794                                 return status;
1795
1796                         status = ice_prog_acl_prof_ranges(hw, prof_id,
1797                                                           &query_rng_buf, NULL);
1798                         if (status)
1799                                 return status;
1800                 }
1801         }
1802
1803         /* Remove all hardware profiles associated with this flow profile */
1804         status = ice_rem_prof(hw, blk, prof->id);
1805         if (!status) {
1806                 LIST_DEL(&prof->l_entry);
1807                 ice_destroy_lock(&prof->entries_lock);
1808                 if (prof->acts)
1809                         ice_free(hw, prof->acts);
1810                 ice_free(hw, prof);
1811         }
1812
1813         return status;
1814 }
1815
1816 /**
1817  * ice_flow_acl_set_xtrct_seq_fld - Populate xtrct seq for single field
1818  * @buf: Destination buffer function writes partial xtrct sequence to
1819  * @info: Info about field
1820  */
1821 static void
1822 ice_flow_acl_set_xtrct_seq_fld(struct ice_aqc_acl_prof_generic_frmt *buf,
1823                                struct ice_flow_fld_info *info)
1824 {
1825         u16 dst, i;
1826         u8 src;
1827
1828         src = info->xtrct.idx * ICE_FLOW_FV_EXTRACT_SZ +
1829                 info->xtrct.disp / BITS_PER_BYTE;
1830         dst = info->entry.val;
1831         for (i = 0; i < info->entry.last; i++)
1832                 /* HW stores field vector words in LE, convert words back to BE
1833                  * so constructed entries will end up in network order
1834                  */
1835                 buf->byte_selection[dst++] = src++ ^ 1;
1836 }
1837
1838 /**
1839  * ice_flow_acl_set_xtrct_seq - Program ACL extraction sequence
1840  * @hw: pointer to the hardware structure
1841  * @prof: pointer to flow profile
1842  */
1843 static enum ice_status
1844 ice_flow_acl_set_xtrct_seq(struct ice_hw *hw, struct ice_flow_prof *prof)
1845 {
1846         struct ice_aqc_acl_prof_generic_frmt buf;
1847         struct ice_flow_fld_info *info;
1848         enum ice_status status;
1849         u8 prof_id = 0;
1850         u16 i;
1851
1852         ice_memset(&buf, 0, sizeof(buf), ICE_NONDMA_MEM);
1853
1854         status = ice_flow_get_hw_prof(hw, ICE_BLK_ACL, prof->id, &prof_id);
1855         if (status)
1856                 return status;
1857
1858         status = ice_flow_acl_is_prof_in_use(hw, prof, &buf);
1859         if (status && status != ICE_ERR_IN_USE)
1860                 return status;
1861
1862         if (!status) {
1863                 /* Program the profile dependent configuration. This is done
1864                  * only once regardless of the number of PFs using that profile
1865                  */
1866                 ice_memset(&buf, 0, sizeof(buf), ICE_NONDMA_MEM);
1867
1868                 for (i = 0; i < prof->segs_cnt; i++) {
1869                         struct ice_flow_seg_info *seg = &prof->segs[i];
1870                         u64 match = seg->match;
1871                         u16 j;
1872
1873                         for (j = 0; j < ICE_FLOW_FIELD_IDX_MAX && match; j++) {
1874                                 const u64 bit = BIT_ULL(j);
1875
1876                                 if (!(match & bit))
1877                                         continue;
1878
1879                                 info = &seg->fields[j];
1880
1881                                 if (info->type == ICE_FLOW_FLD_TYPE_RANGE)
1882                                         buf.word_selection[info->entry.val] =
1883                                                                 info->xtrct.idx;
1884                                 else
1885                                         ice_flow_acl_set_xtrct_seq_fld(&buf,
1886                                                                        info);
1887
1888                                 match &= ~bit;
1889                         }
1890
1891                         for (j = 0; j < seg->raws_cnt; j++) {
1892                                 info = &seg->raws[j].info;
1893                                 ice_flow_acl_set_xtrct_seq_fld(&buf, info);
1894                         }
1895                 }
1896
1897                 ice_memset(&buf.pf_scenario_num[0], ICE_ACL_INVALID_SCEN,
1898                            ICE_AQC_ACL_PROF_PF_SCEN_NUM_ELEMS,
1899                            ICE_NONDMA_MEM);
1900         }
1901
1902         /* Update the current PF */
1903         buf.pf_scenario_num[hw->pf_id] = (u8)prof->cfg.scen->id;
1904         status = ice_prgm_acl_prof_extrt(hw, prof_id, &buf, NULL);
1905
1906         return status;
1907 }
1908
1909 /**
1910  * ice_flow_assoc_vsig_vsi - associate a VSI with VSIG
1911  * @hw: pointer to the hardware structure
1912  * @blk: classification stage
1913  * @vsi_handle: software VSI handle
1914  * @vsig: target VSI group
1915  *
1916  * Assumption: the caller has already verified that the VSI to
1917  * be added has the same characteristics as the VSIG and will
1918  * thereby have access to all resources added to that VSIG.
1919  */
1920 enum ice_status
1921 ice_flow_assoc_vsig_vsi(struct ice_hw *hw, enum ice_block blk, u16 vsi_handle,
1922                         u16 vsig)
1923 {
1924         enum ice_status status;
1925
1926         if (!ice_is_vsi_valid(hw, vsi_handle) || blk >= ICE_BLK_COUNT)
1927                 return ICE_ERR_PARAM;
1928
1929         ice_acquire_lock(&hw->fl_profs_locks[blk]);
1930         status = ice_add_vsi_flow(hw, blk, ice_get_hw_vsi_num(hw, vsi_handle),
1931                                   vsig);
1932         ice_release_lock(&hw->fl_profs_locks[blk]);
1933
1934         return status;
1935 }
1936
1937 /**
1938  * ice_flow_assoc_prof - associate a VSI with a flow profile
1939  * @hw: pointer to the hardware structure
1940  * @blk: classification stage
1941  * @prof: pointer to flow profile
1942  * @vsi_handle: software VSI handle
1943  *
1944  * Assumption: the caller has acquired the lock to the profile list
1945  * and the software VSI handle has been validated
1946  */
1947 static enum ice_status
1948 ice_flow_assoc_prof(struct ice_hw *hw, enum ice_block blk,
1949                     struct ice_flow_prof *prof, u16 vsi_handle)
1950 {
1951         enum ice_status status = ICE_SUCCESS;
1952
1953         if (!ice_is_bit_set(prof->vsis, vsi_handle)) {
1954                 if (blk == ICE_BLK_ACL) {
1955                         status = ice_flow_acl_set_xtrct_seq(hw, prof);
1956                         if (status)
1957                                 return status;
1958                 }
1959                 status = ice_add_prof_id_flow(hw, blk,
1960                                               ice_get_hw_vsi_num(hw,
1961                                                                  vsi_handle),
1962                                               prof->id);
1963                 if (!status)
1964                         ice_set_bit(vsi_handle, prof->vsis);
1965                 else
1966                         ice_debug(hw, ICE_DBG_FLOW,
1967                                   "HW profile add failed, %d\n",
1968                                   status);
1969         }
1970
1971         return status;
1972 }
1973
1974 /**
1975  * ice_flow_disassoc_prof - disassociate a VSI from a flow profile
1976  * @hw: pointer to the hardware structure
1977  * @blk: classification stage
1978  * @prof: pointer to flow profile
1979  * @vsi_handle: software VSI handle
1980  *
1981  * Assumption: the caller has acquired the lock to the profile list
1982  * and the software VSI handle has been validated
1983  */
1984 static enum ice_status
1985 ice_flow_disassoc_prof(struct ice_hw *hw, enum ice_block blk,
1986                        struct ice_flow_prof *prof, u16 vsi_handle)
1987 {
1988         enum ice_status status = ICE_SUCCESS;
1989
1990         if (ice_is_bit_set(prof->vsis, vsi_handle)) {
1991                 status = ice_rem_prof_id_flow(hw, blk,
1992                                               ice_get_hw_vsi_num(hw,
1993                                                                  vsi_handle),
1994                                               prof->id);
1995                 if (!status)
1996                         ice_clear_bit(vsi_handle, prof->vsis);
1997                 else
1998                         ice_debug(hw, ICE_DBG_FLOW,
1999                                   "HW profile remove failed, %d\n",
2000                                   status);
2001         }
2002
2003         return status;
2004 }
2005
2006 /**
2007  * ice_flow_add_prof - Add a flow profile for packet segments and matched fields
2008  * @hw: pointer to the HW struct
2009  * @blk: classification stage
2010  * @dir: flow direction
2011  * @prof_id: unique ID to identify this flow profile
2012  * @segs: array of one or more packet segments that describe the flow
2013  * @segs_cnt: number of packet segments provided
2014  * @acts: array of default actions
2015  * @acts_cnt: number of default actions
2016  * @prof: stores the returned flow profile added
2017  */
2018 enum ice_status
2019 ice_flow_add_prof(struct ice_hw *hw, enum ice_block blk, enum ice_flow_dir dir,
2020                   u64 prof_id, struct ice_flow_seg_info *segs, u8 segs_cnt,
2021                   struct ice_flow_action *acts, u8 acts_cnt,
2022                   struct ice_flow_prof **prof)
2023 {
2024         enum ice_status status;
2025
2026         if (segs_cnt > ICE_FLOW_SEG_MAX)
2027                 return ICE_ERR_MAX_LIMIT;
2028
2029         if (!segs_cnt)
2030                 return ICE_ERR_PARAM;
2031
2032         if (!segs)
2033                 return ICE_ERR_BAD_PTR;
2034
2035         status = ice_flow_val_hdrs(segs, segs_cnt);
2036         if (status)
2037                 return status;
2038
2039         ice_acquire_lock(&hw->fl_profs_locks[blk]);
2040
2041         status = ice_flow_add_prof_sync(hw, blk, dir, prof_id, segs, segs_cnt,
2042                                         acts, acts_cnt, prof);
2043         if (!status)
2044                 LIST_ADD(&(*prof)->l_entry, &hw->fl_profs[blk]);
2045
2046         ice_release_lock(&hw->fl_profs_locks[blk]);
2047
2048         return status;
2049 }
2050
2051 /**
2052  * ice_flow_rem_prof - Remove a flow profile and all entries associated with it
2053  * @hw: pointer to the HW struct
2054  * @blk: the block for which the flow profile is to be removed
2055  * @prof_id: unique ID of the flow profile to be removed
2056  */
2057 enum ice_status
2058 ice_flow_rem_prof(struct ice_hw *hw, enum ice_block blk, u64 prof_id)
2059 {
2060         struct ice_flow_prof *prof;
2061         enum ice_status status;
2062
2063         ice_acquire_lock(&hw->fl_profs_locks[blk]);
2064
2065         prof = ice_flow_find_prof_id(hw, blk, prof_id);
2066         if (!prof) {
2067                 status = ICE_ERR_DOES_NOT_EXIST;
2068                 goto out;
2069         }
2070
2071         /* prof becomes invalid after the call */
2072         status = ice_flow_rem_prof_sync(hw, blk, prof);
2073
2074 out:
2075         ice_release_lock(&hw->fl_profs_locks[blk]);
2076
2077         return status;
2078 }
2079
2080 /**
2081  * ice_flow_get_hw_prof - return the HW profile for a specific profile ID handle
2082  * @hw: pointer to the HW struct
2083  * @blk: classification stage
2084  * @prof_id: the profile ID handle
2085  * @hw_prof_id: pointer to variable to receive the HW profile ID
2086  */
2087 enum ice_status
2088 ice_flow_get_hw_prof(struct ice_hw *hw, enum ice_block blk, u64 prof_id,
2089                      u8 *hw_prof_id)
2090 {
2091         struct ice_prof_map *map;
2092
2093         map = ice_search_prof_id(hw, blk, prof_id);
2094         if (map) {
2095                 *hw_prof_id = map->prof_id;
2096                 return ICE_SUCCESS;
2097         }
2098
2099         return ICE_ERR_DOES_NOT_EXIST;
2100 }
2101
2102 /**
2103  * ice_flow_find_entry - look for a flow entry using its unique ID
2104  * @hw: pointer to the HW struct
2105  * @blk: classification stage
2106  * @entry_id: unique ID to identify this flow entry
2107  *
2108  * This function looks for the flow entry with the specified unique ID in all
2109  * flow profiles of the specified classification stage. If the entry is found,
2110  * and it returns the handle to the flow entry. Otherwise, it returns
2111  * ICE_FLOW_ENTRY_ID_INVAL.
2112  */
2113 u64 ice_flow_find_entry(struct ice_hw *hw, enum ice_block blk, u64 entry_id)
2114 {
2115         struct ice_flow_entry *found = NULL;
2116         struct ice_flow_prof *p;
2117
2118         ice_acquire_lock(&hw->fl_profs_locks[blk]);
2119
2120         LIST_FOR_EACH_ENTRY(p, &hw->fl_profs[blk], ice_flow_prof, l_entry) {
2121                 struct ice_flow_entry *e;
2122
2123                 ice_acquire_lock(&p->entries_lock);
2124                 LIST_FOR_EACH_ENTRY(e, &p->entries, ice_flow_entry, l_entry)
2125                         if (e->id == entry_id) {
2126                                 found = e;
2127                                 break;
2128                         }
2129                 ice_release_lock(&p->entries_lock);
2130
2131                 if (found)
2132                         break;
2133         }
2134
2135         ice_release_lock(&hw->fl_profs_locks[blk]);
2136
2137         return found ? ICE_FLOW_ENTRY_HNDL(found) : ICE_FLOW_ENTRY_HANDLE_INVAL;
2138 }
2139
2140 /**
2141  * ice_flow_acl_check_actions - Checks the ACL rule's actions
2142  * @hw: pointer to the hardware structure
2143  * @acts: array of actions to be performed on a match
2144  * @acts_cnt: number of actions
2145  * @cnt_alloc: indicates if an ACL counter has been allocated.
2146  */
2147 static enum ice_status
2148 ice_flow_acl_check_actions(struct ice_hw *hw, struct ice_flow_action *acts,
2149                            u8 acts_cnt, bool *cnt_alloc)
2150 {
2151         ice_declare_bitmap(dup_check, ICE_AQC_TBL_MAX_ACTION_PAIRS * 2);
2152         int i;
2153
2154         ice_zero_bitmap(dup_check, ICE_AQC_TBL_MAX_ACTION_PAIRS * 2);
2155         *cnt_alloc = false;
2156
2157         if (acts_cnt > ICE_FLOW_ACL_MAX_NUM_ACT)
2158                 return ICE_ERR_OUT_OF_RANGE;
2159
2160         for (i = 0; i < acts_cnt; i++) {
2161                 if (acts[i].type != ICE_FLOW_ACT_NOP &&
2162                     acts[i].type != ICE_FLOW_ACT_DROP &&
2163                     acts[i].type != ICE_FLOW_ACT_CNTR_PKT &&
2164                     acts[i].type != ICE_FLOW_ACT_FWD_QUEUE)
2165                         return ICE_ERR_CFG;
2166
2167                 /* If the caller want to add two actions of the same type, then
2168                  * it is considered invalid configuration.
2169                  */
2170                 if (ice_test_and_set_bit(acts[i].type, dup_check))
2171                         return ICE_ERR_PARAM;
2172         }
2173
2174         /* Checks if ACL counters are needed. */
2175         for (i = 0; i < acts_cnt; i++) {
2176                 if (acts[i].type == ICE_FLOW_ACT_CNTR_PKT ||
2177                     acts[i].type == ICE_FLOW_ACT_CNTR_BYTES ||
2178                     acts[i].type == ICE_FLOW_ACT_CNTR_PKT_BYTES) {
2179                         struct ice_acl_cntrs cntrs;
2180                         enum ice_status status;
2181
2182                         cntrs.amount = 1;
2183                         cntrs.bank = 0; /* Only bank0 for the moment */
2184
2185                         if (acts[i].type == ICE_FLOW_ACT_CNTR_PKT_BYTES)
2186                                 cntrs.type = ICE_AQC_ACL_CNT_TYPE_DUAL;
2187                         else
2188                                 cntrs.type = ICE_AQC_ACL_CNT_TYPE_SINGLE;
2189
2190                         status = ice_aq_alloc_acl_cntrs(hw, &cntrs, NULL);
2191                         if (status)
2192                                 return status;
2193                         /* Counter index within the bank */
2194                         acts[i].data.acl_act.value =
2195                                                 CPU_TO_LE16(cntrs.first_cntr);
2196                         *cnt_alloc = true;
2197                 }
2198         }
2199
2200         return ICE_SUCCESS;
2201 }
2202
2203 /**
2204  * ice_flow_acl_frmt_entry_range - Format an ACL range checker for a given field
2205  * @fld: number of the given field
2206  * @info: info about field
2207  * @range_buf: range checker configuration buffer
2208  * @data: pointer to a data buffer containing flow entry's match values/masks
2209  * @range: Input/output param indicating which range checkers are being used
2210  */
2211 static void
2212 ice_flow_acl_frmt_entry_range(u16 fld, struct ice_flow_fld_info *info,
2213                               struct ice_aqc_acl_profile_ranges *range_buf,
2214                               u8 *data, u8 *range)
2215 {
2216         u16 new_mask;
2217
2218         /* If not specified, default mask is all bits in field */
2219         new_mask = (info->src.mask == ICE_FLOW_FLD_OFF_INVAL ?
2220                     BIT(ice_flds_info[fld].size) - 1 :
2221                     (*(u16 *)(data + info->src.mask))) << info->xtrct.disp;
2222
2223         /* If the mask is 0, then we don't need to worry about this input
2224          * range checker value.
2225          */
2226         if (new_mask) {
2227                 u16 new_high =
2228                         (*(u16 *)(data + info->src.last)) << info->xtrct.disp;
2229                 u16 new_low =
2230                         (*(u16 *)(data + info->src.val)) << info->xtrct.disp;
2231                 u8 range_idx = info->entry.val;
2232
2233                 range_buf->checker_cfg[range_idx].low_boundary =
2234                         CPU_TO_BE16(new_low);
2235                 range_buf->checker_cfg[range_idx].high_boundary =
2236                         CPU_TO_BE16(new_high);
2237                 range_buf->checker_cfg[range_idx].mask = CPU_TO_BE16(new_mask);
2238
2239                 /* Indicate which range checker is being used */
2240                 *range |= BIT(range_idx);
2241         }
2242 }
2243
2244 /**
2245  * ice_flow_acl_frmt_entry_fld - Partially format ACL entry for a given field
2246  * @fld: number of the given field
2247  * @info: info about the field
2248  * @buf: buffer containing the entry
2249  * @dontcare: buffer containing don't care mask for entry
2250  * @data: pointer to a data buffer containing flow entry's match values/masks
2251  */
2252 static void
2253 ice_flow_acl_frmt_entry_fld(u16 fld, struct ice_flow_fld_info *info, u8 *buf,
2254                             u8 *dontcare, u8 *data)
2255 {
2256         u16 dst, src, mask, k, end_disp, tmp_s = 0, tmp_m = 0;
2257         bool use_mask = false;
2258         u8 disp;
2259
2260         src = info->src.val;
2261         mask = info->src.mask;
2262         dst = info->entry.val - ICE_AQC_ACL_PROF_BYTE_SEL_START_IDX;
2263         disp = info->xtrct.disp % BITS_PER_BYTE;
2264
2265         if (mask != ICE_FLOW_FLD_OFF_INVAL)
2266                 use_mask = true;
2267
2268         for (k = 0; k < info->entry.last; k++, dst++) {
2269                 /* Add overflow bits from previous byte */
2270                 buf[dst] = (tmp_s & 0xff00) >> 8;
2271
2272                 /* If mask is not valid, tmp_m is always zero, so just setting
2273                  * dontcare to 0 (no masked bits). If mask is valid, pulls in
2274                  * overflow bits of mask from prev byte
2275                  */
2276                 dontcare[dst] = (tmp_m & 0xff00) >> 8;
2277
2278                 /* If there is displacement, last byte will only contain
2279                  * displaced data, but there is no more data to read from user
2280                  * buffer, so skip so as not to potentially read beyond end of
2281                  * user buffer
2282                  */
2283                 if (!disp || k < info->entry.last - 1) {
2284                         /* Store shifted data to use in next byte */
2285                         tmp_s = data[src++] << disp;
2286
2287                         /* Add current (shifted) byte */
2288                         buf[dst] |= tmp_s & 0xff;
2289
2290                         /* Handle mask if valid */
2291                         if (use_mask) {
2292                                 tmp_m = (~data[mask++] & 0xff) << disp;
2293                                 dontcare[dst] |= tmp_m & 0xff;
2294                         }
2295                 }
2296         }
2297
2298         /* Fill in don't care bits at beginning of field */
2299         if (disp) {
2300                 dst = info->entry.val - ICE_AQC_ACL_PROF_BYTE_SEL_START_IDX;
2301                 for (k = 0; k < disp; k++)
2302                         dontcare[dst] |= BIT(k);
2303         }
2304
2305         end_disp = (disp + ice_flds_info[fld].size) % BITS_PER_BYTE;
2306
2307         /* Fill in don't care bits at end of field */
2308         if (end_disp) {
2309                 dst = info->entry.val - ICE_AQC_ACL_PROF_BYTE_SEL_START_IDX +
2310                       info->entry.last - 1;
2311                 for (k = end_disp; k < BITS_PER_BYTE; k++)
2312                         dontcare[dst] |= BIT(k);
2313         }
2314 }
2315
2316 /**
2317  * ice_flow_acl_frmt_entry - Format ACL entry
2318  * @hw: pointer to the hardware structure
2319  * @prof: pointer to flow profile
2320  * @e: pointer to the flow entry
2321  * @data: pointer to a data buffer containing flow entry's match values/masks
2322  * @acts: array of actions to be performed on a match
2323  * @acts_cnt: number of actions
2324  *
2325  * Formats the key (and key_inverse) to be matched from the data passed in,
2326  * along with data from the flow profile. This key/key_inverse pair makes up
2327  * the 'entry' for an ACL flow entry.
2328  */
2329 static enum ice_status
2330 ice_flow_acl_frmt_entry(struct ice_hw *hw, struct ice_flow_prof *prof,
2331                         struct ice_flow_entry *e, u8 *data,
2332                         struct ice_flow_action *acts, u8 acts_cnt)
2333 {
2334         u8 *buf = NULL, *dontcare = NULL, *key = NULL, range = 0, dir_flag_msk;
2335         struct ice_aqc_acl_profile_ranges *range_buf = NULL;
2336         enum ice_status status;
2337         bool cnt_alloc;
2338         u8 prof_id = 0;
2339         u16 i, buf_sz;
2340
2341         status = ice_flow_get_hw_prof(hw, ICE_BLK_ACL, prof->id, &prof_id);
2342         if (status)
2343                 return status;
2344
2345         /* Format the result action */
2346
2347         status = ice_flow_acl_check_actions(hw, acts, acts_cnt, &cnt_alloc);
2348         if (status)
2349                 return status;
2350
2351         status = ICE_ERR_NO_MEMORY;
2352
2353         e->acts = (struct ice_flow_action *)
2354                 ice_memdup(hw, acts, acts_cnt * sizeof(*acts),
2355                            ICE_NONDMA_TO_NONDMA);
2356
2357         if (!e->acts)
2358                 goto out;
2359
2360         e->acts_cnt = acts_cnt;
2361
2362         /* Format the matching data */
2363         buf_sz = prof->cfg.scen->width;
2364         buf = (u8 *)ice_malloc(hw, buf_sz);
2365         if (!buf)
2366                 goto out;
2367
2368         dontcare = (u8 *)ice_malloc(hw, buf_sz);
2369         if (!dontcare)
2370                 goto out;
2371
2372         /* 'key' buffer will store both key and key_inverse, so must be twice
2373          * size of buf
2374          */
2375         key = (u8 *)ice_malloc(hw, buf_sz * 2);
2376         if (!key)
2377                 goto out;
2378
2379         range_buf = (struct ice_aqc_acl_profile_ranges *)
2380                 ice_malloc(hw, sizeof(struct ice_aqc_acl_profile_ranges));
2381         if (!range_buf)
2382                 goto out;
2383
2384         /* Set don't care mask to all 1's to start, will zero out used bytes */
2385         ice_memset(dontcare, 0xff, buf_sz, ICE_NONDMA_MEM);
2386
2387         for (i = 0; i < prof->segs_cnt; i++) {
2388                 struct ice_flow_seg_info *seg = &prof->segs[i];
2389                 u64 match = seg->match;
2390                 u16 j;
2391
2392                 for (j = 0; j < ICE_FLOW_FIELD_IDX_MAX && match; j++) {
2393                         struct ice_flow_fld_info *info;
2394                         const u64 bit = BIT_ULL(j);
2395
2396                         if (!(match & bit))
2397                                 continue;
2398
2399                         info = &seg->fields[j];
2400
2401                         if (info->type == ICE_FLOW_FLD_TYPE_RANGE)
2402                                 ice_flow_acl_frmt_entry_range(j, info,
2403                                                               range_buf, data,
2404                                                               &range);
2405                         else
2406                                 ice_flow_acl_frmt_entry_fld(j, info, buf,
2407                                                             dontcare, data);
2408
2409                         match &= ~bit;
2410                 }
2411
2412                 for (j = 0; j < seg->raws_cnt; j++) {
2413                         struct ice_flow_fld_info *info = &seg->raws[j].info;
2414                         u16 dst, src, mask, k;
2415                         bool use_mask = false;
2416
2417                         src = info->src.val;
2418                         dst = info->entry.val -
2419                                         ICE_AQC_ACL_PROF_BYTE_SEL_START_IDX;
2420                         mask = info->src.mask;
2421
2422                         if (mask != ICE_FLOW_FLD_OFF_INVAL)
2423                                 use_mask = true;
2424
2425                         for (k = 0; k < info->entry.last; k++, dst++) {
2426                                 buf[dst] = data[src++];
2427                                 if (use_mask)
2428                                         dontcare[dst] = ~data[mask++];
2429                                 else
2430                                         dontcare[dst] = 0;
2431                         }
2432                 }
2433         }
2434
2435         buf[prof->cfg.scen->pid_idx] = (u8)prof_id;
2436         dontcare[prof->cfg.scen->pid_idx] = 0;
2437
2438         /* Format the buffer for direction flags */
2439         dir_flag_msk = BIT(ICE_FLG_PKT_DIR);
2440
2441         if (prof->dir == ICE_FLOW_RX)
2442                 buf[prof->cfg.scen->pkt_dir_idx] = dir_flag_msk;
2443
2444         if (range) {
2445                 buf[prof->cfg.scen->rng_chk_idx] = range;
2446                 /* Mark any unused range checkers as don't care */
2447                 dontcare[prof->cfg.scen->rng_chk_idx] = ~range;
2448                 e->range_buf = range_buf;
2449         } else {
2450                 ice_free(hw, range_buf);
2451         }
2452
2453         status = ice_set_key(key, buf_sz * 2, buf, NULL, dontcare, NULL, 0,
2454                              buf_sz);
2455         if (status)
2456                 goto out;
2457
2458         e->entry = key;
2459         e->entry_sz = buf_sz * 2;
2460
2461 out:
2462         if (buf)
2463                 ice_free(hw, buf);
2464
2465         if (dontcare)
2466                 ice_free(hw, dontcare);
2467
2468         if (status && key)
2469                 ice_free(hw, key);
2470
2471         if (status && range_buf) {
2472                 ice_free(hw, range_buf);
2473                 e->range_buf = NULL;
2474         }
2475
2476         if (status && e->acts) {
2477                 ice_free(hw, e->acts);
2478                 e->acts = NULL;
2479                 e->acts_cnt = 0;
2480         }
2481
2482         if (status && cnt_alloc)
2483                 ice_flow_acl_free_act_cntr(hw, acts, acts_cnt);
2484
2485         return status;
2486 }
2487
2488 /**
2489  * ice_flow_acl_find_scen_entry_cond - Find an ACL scenario entry that matches
2490  *                                     the compared data.
2491  * @prof: pointer to flow profile
2492  * @e: pointer to the comparing flow entry
2493  * @do_chg_action: decide if we want to change the ACL action
2494  * @do_add_entry: decide if we want to add the new ACL entry
2495  * @do_rem_entry: decide if we want to remove the current ACL entry
2496  *
2497  * Find an ACL scenario entry that matches the compared data. In the same time,
2498  * this function also figure out:
2499  * a/ If we want to change the ACL action
2500  * b/ If we want to add the new ACL entry
2501  * c/ If we want to remove the current ACL entry
2502  */
2503 static struct ice_flow_entry *
2504 ice_flow_acl_find_scen_entry_cond(struct ice_flow_prof *prof,
2505                                   struct ice_flow_entry *e, bool *do_chg_action,
2506                                   bool *do_add_entry, bool *do_rem_entry)
2507 {
2508         struct ice_flow_entry *p, *return_entry = NULL;
2509         u8 i, j;
2510
2511         /* Check if:
2512          * a/ There exists an entry with same matching data, but different
2513          *    priority, then we remove this existing ACL entry. Then, we
2514          *    will add the new entry to the ACL scenario.
2515          * b/ There exists an entry with same matching data, priority, and
2516          *    result action, then we do nothing
2517          * c/ There exists an entry with same matching data, priority, but
2518          *    different, action, then do only change the action's entry.
2519          * d/ Else, we add this new entry to the ACL scenario.
2520          */
2521         *do_chg_action = false;
2522         *do_add_entry = true;
2523         *do_rem_entry = false;
2524         LIST_FOR_EACH_ENTRY(p, &prof->entries, ice_flow_entry, l_entry) {
2525                 if (memcmp(p->entry, e->entry, p->entry_sz))
2526                         continue;
2527
2528                 /* From this point, we have the same matching_data. */
2529                 *do_add_entry = false;
2530                 return_entry = p;
2531
2532                 if (p->priority != e->priority) {
2533                         /* matching data && !priority */
2534                         *do_add_entry = true;
2535                         *do_rem_entry = true;
2536                         break;
2537                 }
2538
2539                 /* From this point, we will have matching_data && priority */
2540                 if (p->acts_cnt != e->acts_cnt)
2541                         *do_chg_action = true;
2542                 for (i = 0; i < p->acts_cnt; i++) {
2543                         bool found_not_match = false;
2544
2545                         for (j = 0; j < e->acts_cnt; j++)
2546                                 if (memcmp(&p->acts[i], &e->acts[j],
2547                                            sizeof(struct ice_flow_action))) {
2548                                         found_not_match = true;
2549                                         break;
2550                                 }
2551
2552                         if (found_not_match) {
2553                                 *do_chg_action = true;
2554                                 break;
2555                         }
2556                 }
2557
2558                 /* (do_chg_action = true) means :
2559                  *    matching_data && priority && !result_action
2560                  * (do_chg_action = false) means :
2561                  *    matching_data && priority && result_action
2562                  */
2563                 break;
2564         }
2565
2566         return return_entry;
2567 }
2568
2569 /**
2570  * ice_flow_acl_convert_to_acl_prior - Convert to ACL priority
2571  * @p: flow priority
2572  */
2573 static enum ice_acl_entry_prior
2574 ice_flow_acl_convert_to_acl_prior(enum ice_flow_priority p)
2575 {
2576         enum ice_acl_entry_prior acl_prior;
2577
2578         switch (p) {
2579         case ICE_FLOW_PRIO_LOW:
2580                 acl_prior = ICE_LOW;
2581                 break;
2582         case ICE_FLOW_PRIO_NORMAL:
2583                 acl_prior = ICE_NORMAL;
2584                 break;
2585         case ICE_FLOW_PRIO_HIGH:
2586                 acl_prior = ICE_HIGH;
2587                 break;
2588         default:
2589                 acl_prior = ICE_NORMAL;
2590                 break;
2591         }
2592
2593         return acl_prior;
2594 }
2595
2596 /**
2597  * ice_flow_acl_union_rng_chk - Perform union operation between two
2598  *                              range-range checker buffers
2599  * @dst_buf: pointer to destination range checker buffer
2600  * @src_buf: pointer to source range checker buffer
2601  *
2602  * For this function, we do the union between dst_buf and src_buf
2603  * range checker buffer, and we will save the result back to dst_buf
2604  */
2605 static enum ice_status
2606 ice_flow_acl_union_rng_chk(struct ice_aqc_acl_profile_ranges *dst_buf,
2607                            struct ice_aqc_acl_profile_ranges *src_buf)
2608 {
2609         u8 i, j;
2610
2611         if (!dst_buf || !src_buf)
2612                 return ICE_ERR_BAD_PTR;
2613
2614         for (i = 0; i < ICE_AQC_ACL_PROF_RANGES_NUM_CFG; i++) {
2615                 struct ice_acl_rng_data *cfg_data = NULL, *in_data;
2616                 bool will_populate = false;
2617
2618                 in_data = &src_buf->checker_cfg[i];
2619
2620                 if (!in_data->mask)
2621                         break;
2622
2623                 for (j = 0; j < ICE_AQC_ACL_PROF_RANGES_NUM_CFG; j++) {
2624                         cfg_data = &dst_buf->checker_cfg[j];
2625
2626                         if (!cfg_data->mask ||
2627                             !memcmp(cfg_data, in_data,
2628                                     sizeof(struct ice_acl_rng_data))) {
2629                                 will_populate = true;
2630                                 break;
2631                         }
2632                 }
2633
2634                 if (will_populate) {
2635                         ice_memcpy(cfg_data, in_data,
2636                                    sizeof(struct ice_acl_rng_data),
2637                                    ICE_NONDMA_TO_NONDMA);
2638                 } else {
2639                         /* No available slot left to program range checker */
2640                         return ICE_ERR_MAX_LIMIT;
2641                 }
2642         }
2643
2644         return ICE_SUCCESS;
2645 }
2646
2647 /**
2648  * ice_flow_acl_add_scen_entry_sync - Add entry to ACL scenario sync
2649  * @hw: pointer to the hardware structure
2650  * @prof: pointer to flow profile
2651  * @entry: double pointer to the flow entry
2652  *
2653  * For this function, we will look at the current added entries in the
2654  * corresponding ACL scenario. Then, we will perform matching logic to
2655  * see if we want to add/modify/do nothing with this new entry.
2656  */
2657 static enum ice_status
2658 ice_flow_acl_add_scen_entry_sync(struct ice_hw *hw, struct ice_flow_prof *prof,
2659                                  struct ice_flow_entry **entry)
2660 {
2661         bool do_add_entry, do_rem_entry, do_chg_action, do_chg_rng_chk;
2662         struct ice_aqc_acl_profile_ranges query_rng_buf, cfg_rng_buf;
2663         struct ice_acl_act_entry *acts = NULL;
2664         struct ice_flow_entry *exist;
2665         enum ice_status status = ICE_SUCCESS;
2666         struct ice_flow_entry *e;
2667         u8 i;
2668
2669         if (!entry || !(*entry) || !prof)
2670                 return ICE_ERR_BAD_PTR;
2671
2672         e = *(entry);
2673
2674         do_chg_rng_chk = false;
2675         if (e->range_buf) {
2676                 u8 prof_id = 0;
2677
2678                 status = ice_flow_get_hw_prof(hw, ICE_BLK_ACL, prof->id,
2679                                               &prof_id);
2680                 if (status)
2681                         return status;
2682
2683                 /* Query the current range-checker value in FW */
2684                 status = ice_query_acl_prof_ranges(hw, prof_id, &query_rng_buf,
2685                                                    NULL);
2686                 if (status)
2687                         return status;
2688                 ice_memcpy(&cfg_rng_buf, &query_rng_buf,
2689                            sizeof(struct ice_aqc_acl_profile_ranges),
2690                            ICE_NONDMA_TO_NONDMA);
2691
2692                 /* Generate the new range-checker value */
2693                 status = ice_flow_acl_union_rng_chk(&cfg_rng_buf, e->range_buf);
2694                 if (status)
2695                         return status;
2696
2697                 /* Reconfigure the range check if the buffer is changed. */
2698                 do_chg_rng_chk = false;
2699                 if (memcmp(&query_rng_buf, &cfg_rng_buf,
2700                            sizeof(struct ice_aqc_acl_profile_ranges))) {
2701                         status = ice_prog_acl_prof_ranges(hw, prof_id,
2702                                                           &cfg_rng_buf, NULL);
2703                         if (status)
2704                                 return status;
2705
2706                         do_chg_rng_chk = true;
2707                 }
2708         }
2709
2710         /* Figure out if we want to (change the ACL action) and/or
2711          * (Add the new ACL entry) and/or (Remove the current ACL entry)
2712          */
2713         exist = ice_flow_acl_find_scen_entry_cond(prof, e, &do_chg_action,
2714                                                   &do_add_entry, &do_rem_entry);
2715
2716         if (do_rem_entry) {
2717                 status = ice_flow_rem_entry_sync(hw, ICE_BLK_ACL, exist);
2718                 if (status)
2719                         return status;
2720         }
2721
2722         /* Prepare the result action buffer */
2723         acts = (struct ice_acl_act_entry *)ice_calloc
2724                 (hw, e->entry_sz, sizeof(struct ice_acl_act_entry));
2725         for (i = 0; i < e->acts_cnt; i++)
2726                 ice_memcpy(&acts[i], &e->acts[i].data.acl_act,
2727                            sizeof(struct ice_acl_act_entry),
2728                            ICE_NONDMA_TO_NONDMA);
2729
2730         if (do_add_entry) {
2731                 enum ice_acl_entry_prior prior;
2732                 u8 *keys, *inverts;
2733                 u16 entry_idx;
2734
2735                 keys = (u8 *)e->entry;
2736                 inverts = keys + (e->entry_sz / 2);
2737                 prior = ice_flow_acl_convert_to_acl_prior(e->priority);
2738
2739                 status = ice_acl_add_entry(hw, prof->cfg.scen, prior, keys,
2740                                            inverts, acts, e->acts_cnt,
2741                                            &entry_idx);
2742                 if (status)
2743                         goto out;
2744
2745                 e->scen_entry_idx = entry_idx;
2746                 LIST_ADD(&e->l_entry, &prof->entries);
2747         } else {
2748                 if (do_chg_action) {
2749                         /* For the action memory info, update the SW's copy of
2750                          * exist entry with e's action memory info
2751                          */
2752                         ice_free(hw, exist->acts);
2753                         exist->acts_cnt = e->acts_cnt;
2754                         exist->acts = (struct ice_flow_action *)
2755                                 ice_calloc(hw, exist->acts_cnt,
2756                                            sizeof(struct ice_flow_action));
2757
2758                         if (!exist->acts) {
2759                                 status = ICE_ERR_NO_MEMORY;
2760                                 goto out;
2761                         }
2762
2763                         ice_memcpy(exist->acts, e->acts,
2764                                    sizeof(struct ice_flow_action) * e->acts_cnt,
2765                                    ICE_NONDMA_TO_NONDMA);
2766
2767                         status = ice_acl_prog_act(hw, prof->cfg.scen, acts,
2768                                                   e->acts_cnt,
2769                                                   exist->scen_entry_idx);
2770                         if (status)
2771                                 goto out;
2772                 }
2773
2774                 if (do_chg_rng_chk) {
2775                         /* In this case, we want to update the range checker
2776                          * information of the exist entry
2777                          */
2778                         status = ice_flow_acl_union_rng_chk(exist->range_buf,
2779                                                             e->range_buf);
2780                         if (status)
2781                                 goto out;
2782                 }
2783
2784                 /* As we don't add the new entry to our SW DB, deallocate its
2785                  * memories, and return the exist entry to the caller
2786                  */
2787                 ice_dealloc_flow_entry(hw, e);
2788                 *(entry) = exist;
2789         }
2790 out:
2791         if (acts)
2792                 ice_free(hw, acts);
2793
2794         return status;
2795 }
2796
2797 /**
2798  * ice_flow_acl_add_scen_entry - Add entry to ACL scenario
2799  * @hw: pointer to the hardware structure
2800  * @prof: pointer to flow profile
2801  * @e: double pointer to the flow entry
2802  */
2803 static enum ice_status
2804 ice_flow_acl_add_scen_entry(struct ice_hw *hw, struct ice_flow_prof *prof,
2805                             struct ice_flow_entry **e)
2806 {
2807         enum ice_status status;
2808
2809         ice_acquire_lock(&prof->entries_lock);
2810         status = ice_flow_acl_add_scen_entry_sync(hw, prof, e);
2811         ice_release_lock(&prof->entries_lock);
2812
2813         return status;
2814 }
2815
2816 /**
2817  * ice_flow_add_entry - Add a flow entry
2818  * @hw: pointer to the HW struct
2819  * @blk: classification stage
2820  * @prof_id: ID of the profile to add a new flow entry to
2821  * @entry_id: unique ID to identify this flow entry
2822  * @vsi_handle: software VSI handle for the flow entry
2823  * @prio: priority of the flow entry
2824  * @data: pointer to a data buffer containing flow entry's match values/masks
2825  * @acts: arrays of actions to be performed on a match
2826  * @acts_cnt: number of actions
2827  * @entry_h: pointer to buffer that receives the new flow entry's handle
2828  */
2829 enum ice_status
2830 ice_flow_add_entry(struct ice_hw *hw, enum ice_block blk, u64 prof_id,
2831                    u64 entry_id, u16 vsi_handle, enum ice_flow_priority prio,
2832                    void *data, struct ice_flow_action *acts, u8 acts_cnt,
2833                    u64 *entry_h)
2834 {
2835         struct ice_flow_entry *e = NULL;
2836         struct ice_flow_prof *prof;
2837         enum ice_status status = ICE_SUCCESS;
2838
2839         /* ACL entries must indicate an action */
2840         if (blk == ICE_BLK_ACL && (!acts || !acts_cnt))
2841                 return ICE_ERR_PARAM;
2842
2843         /* No flow entry data is expected for RSS */
2844         if (!entry_h || (!data && blk != ICE_BLK_RSS))
2845                 return ICE_ERR_BAD_PTR;
2846
2847         if (!ice_is_vsi_valid(hw, vsi_handle))
2848                 return ICE_ERR_PARAM;
2849
2850         ice_acquire_lock(&hw->fl_profs_locks[blk]);
2851
2852         prof = ice_flow_find_prof_id(hw, blk, prof_id);
2853         if (!prof) {
2854                 status = ICE_ERR_DOES_NOT_EXIST;
2855         } else {
2856                 /* Allocate memory for the entry being added and associate
2857                  * the VSI to the found flow profile
2858                  */
2859                 e = (struct ice_flow_entry *)ice_malloc(hw, sizeof(*e));
2860                 if (!e)
2861                         status = ICE_ERR_NO_MEMORY;
2862                 else
2863                         status = ice_flow_assoc_prof(hw, blk, prof, vsi_handle);
2864         }
2865
2866         ice_release_lock(&hw->fl_profs_locks[blk]);
2867         if (status)
2868                 goto out;
2869
2870         e->id = entry_id;
2871         e->vsi_handle = vsi_handle;
2872         e->prof = prof;
2873         e->priority = prio;
2874
2875         switch (blk) {
2876         case ICE_BLK_FD:
2877         case ICE_BLK_RSS:
2878                 break;
2879         case ICE_BLK_ACL:
2880                 /* ACL will handle the entry management */
2881                 status = ice_flow_acl_frmt_entry(hw, prof, e, (u8 *)data, acts,
2882                                                  acts_cnt);
2883                 if (status)
2884                         goto out;
2885
2886                 status = ice_flow_acl_add_scen_entry(hw, prof, &e);
2887                 if (status)
2888                         goto out;
2889
2890                 break;
2891         case ICE_BLK_SW:
2892         case ICE_BLK_PE:
2893         default:
2894                 status = ICE_ERR_NOT_IMPL;
2895                 goto out;
2896         }
2897
2898         if (blk != ICE_BLK_ACL) {
2899                 /* ACL will handle the entry management */
2900                 ice_acquire_lock(&prof->entries_lock);
2901                 LIST_ADD(&e->l_entry, &prof->entries);
2902                 ice_release_lock(&prof->entries_lock);
2903         }
2904
2905         *entry_h = ICE_FLOW_ENTRY_HNDL(e);
2906
2907 out:
2908         if (status && e) {
2909                 if (e->entry)
2910                         ice_free(hw, e->entry);
2911                 ice_free(hw, e);
2912         }
2913
2914         return status;
2915 }
2916
2917 /**
2918  * ice_flow_rem_entry - Remove a flow entry
2919  * @hw: pointer to the HW struct
2920  * @blk: classification stage
2921  * @entry_h: handle to the flow entry to be removed
2922  */
2923 enum ice_status ice_flow_rem_entry(struct ice_hw *hw, enum ice_block blk,
2924                                    u64 entry_h)
2925 {
2926         struct ice_flow_entry *entry;
2927         struct ice_flow_prof *prof;
2928         enum ice_status status = ICE_SUCCESS;
2929
2930         if (entry_h == ICE_FLOW_ENTRY_HANDLE_INVAL)
2931                 return ICE_ERR_PARAM;
2932
2933         entry = ICE_FLOW_ENTRY_PTR((unsigned long)entry_h);
2934
2935         /* Retain the pointer to the flow profile as the entry will be freed */
2936         prof = entry->prof;
2937
2938         if (prof) {
2939                 ice_acquire_lock(&prof->entries_lock);
2940                 status = ice_flow_rem_entry_sync(hw, blk, entry);
2941                 ice_release_lock(&prof->entries_lock);
2942         }
2943
2944         return status;
2945 }
2946
2947 /**
2948  * ice_flow_set_fld_ext - specifies locations of field from entry's input buffer
2949  * @seg: packet segment the field being set belongs to
2950  * @fld: field to be set
2951  * @field_type: type of the field
2952  * @val_loc: if not ICE_FLOW_FLD_OFF_INVAL, location of the value to match from
2953  *           entry's input buffer
2954  * @mask_loc: if not ICE_FLOW_FLD_OFF_INVAL, location of mask value from entry's
2955  *            input buffer
2956  * @last_loc: if not ICE_FLOW_FLD_OFF_INVAL, location of last/upper value from
2957  *            entry's input buffer
2958  *
2959  * This helper function stores information of a field being matched, including
2960  * the type of the field and the locations of the value to match, the mask, and
2961  * and the upper-bound value in the start of the input buffer for a flow entry.
2962  * This function should only be used for fixed-size data structures.
2963  *
2964  * This function also opportunistically determines the protocol headers to be
2965  * present based on the fields being set. Some fields cannot be used alone to
2966  * determine the protocol headers present. Sometimes, fields for particular
2967  * protocol headers are not matched. In those cases, the protocol headers
2968  * must be explicitly set.
2969  */
2970 static void
2971 ice_flow_set_fld_ext(struct ice_flow_seg_info *seg, enum ice_flow_field fld,
2972                      enum ice_flow_fld_match_type field_type, u16 val_loc,
2973                      u16 mask_loc, u16 last_loc)
2974 {
2975         u64 bit = BIT_ULL(fld);
2976
2977         seg->match |= bit;
2978         if (field_type == ICE_FLOW_FLD_TYPE_RANGE)
2979                 seg->range |= bit;
2980
2981         seg->fields[fld].type = field_type;
2982         seg->fields[fld].src.val = val_loc;
2983         seg->fields[fld].src.mask = mask_loc;
2984         seg->fields[fld].src.last = last_loc;
2985
2986         ICE_FLOW_SET_HDRS(seg, ice_flds_info[fld].hdr);
2987 }
2988
2989 /**
2990  * ice_flow_set_fld - specifies locations of field from entry's input buffer
2991  * @seg: packet segment the field being set belongs to
2992  * @fld: field to be set
2993  * @val_loc: if not ICE_FLOW_FLD_OFF_INVAL, location of the value to match from
2994  *           entry's input buffer
2995  * @mask_loc: if not ICE_FLOW_FLD_OFF_INVAL, location of mask value from entry's
2996  *            input buffer
2997  * @last_loc: if not ICE_FLOW_FLD_OFF_INVAL, location of last/upper value from
2998  *            entry's input buffer
2999  * @range: indicate if field being matched is to be in a range
3000  *
3001  * This function specifies the locations, in the form of byte offsets from the
3002  * start of the input buffer for a flow entry, from where the value to match,
3003  * the mask value, and upper value can be extracted. These locations are then
3004  * stored in the flow profile. When adding a flow entry associated with the
3005  * flow profile, these locations will be used to quickly extract the values and
3006  * create the content of a match entry. This function should only be used for
3007  * fixed-size data structures.
3008  */
3009 void
3010 ice_flow_set_fld(struct ice_flow_seg_info *seg, enum ice_flow_field fld,
3011                  u16 val_loc, u16 mask_loc, u16 last_loc, bool range)
3012 {
3013         enum ice_flow_fld_match_type t = range ?
3014                 ICE_FLOW_FLD_TYPE_RANGE : ICE_FLOW_FLD_TYPE_REG;
3015
3016         ice_flow_set_fld_ext(seg, fld, t, val_loc, mask_loc, last_loc);
3017 }
3018
3019 /**
3020  * ice_flow_set_fld_prefix - sets locations of prefix field from entry's buf
3021  * @seg: packet segment the field being set belongs to
3022  * @fld: field to be set
3023  * @val_loc: if not ICE_FLOW_FLD_OFF_INVAL, location of the value to match from
3024  *           entry's input buffer
3025  * @pref_loc: location of prefix value from entry's input buffer
3026  * @pref_sz: size of the location holding the prefix value
3027  *
3028  * This function specifies the locations, in the form of byte offsets from the
3029  * start of the input buffer for a flow entry, from where the value to match
3030  * and the IPv4 prefix value can be extracted. These locations are then stored
3031  * in the flow profile. When adding flow entries to the associated flow profile,
3032  * these locations can be used to quickly extract the values to create the
3033  * content of a match entry. This function should only be used for fixed-size
3034  * data structures.
3035  */
3036 void
3037 ice_flow_set_fld_prefix(struct ice_flow_seg_info *seg, enum ice_flow_field fld,
3038                         u16 val_loc, u16 pref_loc, u8 pref_sz)
3039 {
3040         /* For this type of field, the "mask" location is for the prefix value's
3041          * location and the "last" location is for the size of the location of
3042          * the prefix value.
3043          */
3044         ice_flow_set_fld_ext(seg, fld, ICE_FLOW_FLD_TYPE_PREFIX, val_loc,
3045                              pref_loc, (u16)pref_sz);
3046 }
3047
3048 /**
3049  * ice_flow_add_fld_raw - sets locations of a raw field from entry's input buf
3050  * @seg: packet segment the field being set belongs to
3051  * @off: offset of the raw field from the beginning of the segment in bytes
3052  * @len: length of the raw pattern to be matched
3053  * @val_loc: location of the value to match from entry's input buffer
3054  * @mask_loc: location of mask value from entry's input buffer
3055  *
3056  * This function specifies the offset of the raw field to be match from the
3057  * beginning of the specified packet segment, and the locations, in the form of
3058  * byte offsets from the start of the input buffer for a flow entry, from where
3059  * the value to match and the mask value to be extracted. These locations are
3060  * then stored in the flow profile. When adding flow entries to the associated
3061  * flow profile, these locations can be used to quickly extract the values to
3062  * create the content of a match entry. This function should only be used for
3063  * fixed-size data structures.
3064  */
3065 void
3066 ice_flow_add_fld_raw(struct ice_flow_seg_info *seg, u16 off, u8 len,
3067                      u16 val_loc, u16 mask_loc)
3068 {
3069         if (seg->raws_cnt < ICE_FLOW_SEG_RAW_FLD_MAX) {
3070                 seg->raws[seg->raws_cnt].off = off;
3071                 seg->raws[seg->raws_cnt].info.type = ICE_FLOW_FLD_TYPE_SIZE;
3072                 seg->raws[seg->raws_cnt].info.src.val = val_loc;
3073                 seg->raws[seg->raws_cnt].info.src.mask = mask_loc;
3074                 /* The "last" field is used to store the length of the field */
3075                 seg->raws[seg->raws_cnt].info.src.last = len;
3076         }
3077
3078         /* Overflows of "raws" will be handled as an error condition later in
3079          * the flow when this information is processed.
3080          */
3081         seg->raws_cnt++;
3082 }
3083
3084 #define ICE_FLOW_RSS_SEG_HDR_L2_MASKS \
3085 (ICE_FLOW_SEG_HDR_ETH | ICE_FLOW_SEG_HDR_VLAN)
3086
3087 #define ICE_FLOW_RSS_SEG_HDR_L3_MASKS \
3088         (ICE_FLOW_SEG_HDR_IPV4 | ICE_FLOW_SEG_HDR_IPV6)
3089
3090 #define ICE_FLOW_RSS_SEG_HDR_L4_MASKS \
3091         (ICE_FLOW_SEG_HDR_TCP | ICE_FLOW_SEG_HDR_UDP | \
3092          ICE_FLOW_SEG_HDR_SCTP)
3093
3094 #define ICE_FLOW_RSS_SEG_HDR_VAL_MASKS \
3095         (ICE_FLOW_RSS_SEG_HDR_L2_MASKS | \
3096          ICE_FLOW_RSS_SEG_HDR_L3_MASKS | \
3097          ICE_FLOW_RSS_SEG_HDR_L4_MASKS)
3098
3099 /**
3100  * ice_flow_set_rss_seg_info - setup packet segments for RSS
3101  * @segs: pointer to the flow field segment(s)
3102  * @hash_fields: fields to be hashed on for the segment(s)
3103  * @flow_hdr: protocol header fields within a packet segment
3104  *
3105  * Helper function to extract fields from hash bitmap and use flow
3106  * header value to set flow field segment for further use in flow
3107  * profile entry or removal.
3108  */
3109 static enum ice_status
3110 ice_flow_set_rss_seg_info(struct ice_flow_seg_info *segs, u64 hash_fields,
3111                           u32 flow_hdr)
3112 {
3113         u64 val = hash_fields;
3114         u8 i;
3115
3116         for (i = 0; val && i < ICE_FLOW_FIELD_IDX_MAX; i++) {
3117                 u64 bit = BIT_ULL(i);
3118
3119                 if (val & bit) {
3120                         ice_flow_set_fld(segs, (enum ice_flow_field)i,
3121                                          ICE_FLOW_FLD_OFF_INVAL,
3122                                          ICE_FLOW_FLD_OFF_INVAL,
3123                                          ICE_FLOW_FLD_OFF_INVAL, false);
3124                         val &= ~bit;
3125                 }
3126         }
3127         ICE_FLOW_SET_HDRS(segs, flow_hdr);
3128
3129         if (segs->hdrs & ~ICE_FLOW_RSS_SEG_HDR_VAL_MASKS &
3130             ~ICE_FLOW_RSS_HDRS_INNER_MASK)
3131                 return ICE_ERR_PARAM;
3132
3133         val = (u64)(segs->hdrs & ICE_FLOW_RSS_SEG_HDR_L3_MASKS);
3134         if (val && !ice_is_pow2(val))
3135                 return ICE_ERR_CFG;
3136
3137         val = (u64)(segs->hdrs & ICE_FLOW_RSS_SEG_HDR_L4_MASKS);
3138         if (val && !ice_is_pow2(val))
3139                 return ICE_ERR_CFG;
3140
3141         return ICE_SUCCESS;
3142 }
3143
3144 /**
3145  * ice_rem_vsi_rss_list - remove VSI from RSS list
3146  * @hw: pointer to the hardware structure
3147  * @vsi_handle: software VSI handle
3148  *
3149  * Remove the VSI from all RSS configurations in the list.
3150  */
3151 void ice_rem_vsi_rss_list(struct ice_hw *hw, u16 vsi_handle)
3152 {
3153         struct ice_rss_cfg *r, *tmp;
3154
3155         if (LIST_EMPTY(&hw->rss_list_head))
3156                 return;
3157
3158         ice_acquire_lock(&hw->rss_locks);
3159         LIST_FOR_EACH_ENTRY_SAFE(r, tmp, &hw->rss_list_head,
3160                                  ice_rss_cfg, l_entry)
3161                 if (ice_test_and_clear_bit(vsi_handle, r->vsis))
3162                         if (!ice_is_any_bit_set(r->vsis, ICE_MAX_VSI)) {
3163                                 LIST_DEL(&r->l_entry);
3164                                 ice_free(hw, r);
3165                         }
3166         ice_release_lock(&hw->rss_locks);
3167 }
3168
3169 /**
3170  * ice_rem_vsi_rss_cfg - remove RSS configurations associated with VSI
3171  * @hw: pointer to the hardware structure
3172  * @vsi_handle: software VSI handle
3173  *
3174  * This function will iterate through all flow profiles and disassociate
3175  * the VSI from that profile. If the flow profile has no VSIs it will
3176  * be removed.
3177  */
3178 enum ice_status ice_rem_vsi_rss_cfg(struct ice_hw *hw, u16 vsi_handle)
3179 {
3180         const enum ice_block blk = ICE_BLK_RSS;
3181         struct ice_flow_prof *p, *t;
3182         enum ice_status status = ICE_SUCCESS;
3183
3184         if (!ice_is_vsi_valid(hw, vsi_handle))
3185                 return ICE_ERR_PARAM;
3186
3187         if (LIST_EMPTY(&hw->fl_profs[blk]))
3188                 return ICE_SUCCESS;
3189
3190         ice_acquire_lock(&hw->fl_profs_locks[blk]);
3191         LIST_FOR_EACH_ENTRY_SAFE(p, t, &hw->fl_profs[blk], ice_flow_prof,
3192                                  l_entry)
3193                 if (ice_is_bit_set(p->vsis, vsi_handle)) {
3194                         status = ice_flow_disassoc_prof(hw, blk, p, vsi_handle);
3195                         if (status)
3196                                 break;
3197
3198                         if (!ice_is_any_bit_set(p->vsis, ICE_MAX_VSI)) {
3199                                 status = ice_flow_rem_prof_sync(hw, blk, p);
3200                                 if (status)
3201                                         break;
3202                         }
3203                 }
3204         ice_release_lock(&hw->fl_profs_locks[blk]);
3205
3206         return status;
3207 }
3208
3209 /**
3210  * ice_rem_rss_list - remove RSS configuration from list
3211  * @hw: pointer to the hardware structure
3212  * @vsi_handle: software VSI handle
3213  * @prof: pointer to flow profile
3214  *
3215  * Assumption: lock has already been acquired for RSS list
3216  */
3217 static void
3218 ice_rem_rss_list(struct ice_hw *hw, u16 vsi_handle, struct ice_flow_prof *prof)
3219 {
3220         struct ice_rss_cfg *r, *tmp;
3221
3222         /* Search for RSS hash fields associated to the VSI that match the
3223          * hash configurations associated to the flow profile. If found
3224          * remove from the RSS entry list of the VSI context and delete entry.
3225          */
3226         LIST_FOR_EACH_ENTRY_SAFE(r, tmp, &hw->rss_list_head,
3227                                  ice_rss_cfg, l_entry)
3228                 if (r->hashed_flds == prof->segs[prof->segs_cnt - 1].match &&
3229                     r->packet_hdr == prof->segs[prof->segs_cnt - 1].hdrs) {
3230                         ice_clear_bit(vsi_handle, r->vsis);
3231                         if (!ice_is_any_bit_set(r->vsis, ICE_MAX_VSI)) {
3232                                 LIST_DEL(&r->l_entry);
3233                                 ice_free(hw, r);
3234                         }
3235                         return;
3236                 }
3237 }
3238
3239 /**
3240  * ice_add_rss_list - add RSS configuration to list
3241  * @hw: pointer to the hardware structure
3242  * @vsi_handle: software VSI handle
3243  * @prof: pointer to flow profile
3244  *
3245  * Assumption: lock has already been acquired for RSS list
3246  */
3247 static enum ice_status
3248 ice_add_rss_list(struct ice_hw *hw, u16 vsi_handle, struct ice_flow_prof *prof)
3249 {
3250         struct ice_rss_cfg *r, *rss_cfg;
3251
3252         LIST_FOR_EACH_ENTRY(r, &hw->rss_list_head,
3253                             ice_rss_cfg, l_entry)
3254                 if (r->hashed_flds == prof->segs[prof->segs_cnt - 1].match &&
3255                     r->packet_hdr == prof->segs[prof->segs_cnt - 1].hdrs) {
3256                         ice_set_bit(vsi_handle, r->vsis);
3257                         return ICE_SUCCESS;
3258                 }
3259
3260         rss_cfg = (struct ice_rss_cfg *)ice_malloc(hw, sizeof(*rss_cfg));
3261         if (!rss_cfg)
3262                 return ICE_ERR_NO_MEMORY;
3263
3264         rss_cfg->hashed_flds = prof->segs[prof->segs_cnt - 1].match;
3265         rss_cfg->packet_hdr = prof->segs[prof->segs_cnt - 1].hdrs;
3266         rss_cfg->symm = prof->cfg.symm;
3267         ice_set_bit(vsi_handle, rss_cfg->vsis);
3268
3269         LIST_ADD_TAIL(&rss_cfg->l_entry, &hw->rss_list_head);
3270
3271         return ICE_SUCCESS;
3272 }
3273
3274 #define ICE_FLOW_PROF_HASH_S    0
3275 #define ICE_FLOW_PROF_HASH_M    (0xFFFFFFFFULL << ICE_FLOW_PROF_HASH_S)
3276 #define ICE_FLOW_PROF_HDR_S     32
3277 #define ICE_FLOW_PROF_HDR_M     (0x3FFFFFFFULL << ICE_FLOW_PROF_HDR_S)
3278 #define ICE_FLOW_PROF_ENCAP_S   63
3279 #define ICE_FLOW_PROF_ENCAP_M   (BIT_ULL(ICE_FLOW_PROF_ENCAP_S))
3280
3281 #define ICE_RSS_OUTER_HEADERS   1
3282 #define ICE_RSS_INNER_HEADERS   2
3283
3284 /* Flow profile ID format:
3285  * [0:31] - Packet match fields
3286  * [32:62] - Protocol header
3287  * [63] - Encapsulation flag, 0 if non-tunneled, 1 if tunneled
3288  */
3289 #define ICE_FLOW_GEN_PROFID(hash, hdr, segs_cnt) \
3290         (u64)(((u64)(hash) & ICE_FLOW_PROF_HASH_M) | \
3291               (((u64)(hdr) << ICE_FLOW_PROF_HDR_S) & ICE_FLOW_PROF_HDR_M) | \
3292               ((u8)((segs_cnt) - 1) ? ICE_FLOW_PROF_ENCAP_M : 0))
3293
3294 static void
3295 ice_rss_config_xor_word(struct ice_hw *hw, u8 prof_id, u8 src, u8 dst)
3296 {
3297         u32 s = ((src % 4) << 3); /* byte shift */
3298         u32 v = dst | 0x80; /* value to program */
3299         u8 i = src / 4; /* register index */
3300         u32 reg;
3301
3302         reg = rd32(hw, GLQF_HSYMM(prof_id, i));
3303         reg = (reg & ~(0xff << s)) | (v << s);
3304         wr32(hw, GLQF_HSYMM(prof_id, i), reg);
3305 }
3306
3307 static void
3308 ice_rss_config_xor(struct ice_hw *hw, u8 prof_id, u8 src, u8 dst, u8 len)
3309 {
3310         int fv_last_word =
3311                 ICE_FLOW_SW_FIELD_VECTOR_MAX / ICE_FLOW_FV_EXTRACT_SZ - 1;
3312         int i;
3313
3314         for (i = 0; i < len; i++) {
3315                 ice_rss_config_xor_word(hw, prof_id,
3316                                         /* Yes, field vector in GLQF_HSYMM and
3317                                          * GLQF_HINSET is inversed!
3318                                          */
3319                                         fv_last_word - (src + i),
3320                                         fv_last_word - (dst + i));
3321                 ice_rss_config_xor_word(hw, prof_id,
3322                                         fv_last_word - (dst + i),
3323                                         fv_last_word - (src + i));
3324         }
3325 }
3326
3327 static void
3328 ice_rss_update_symm(struct ice_hw *hw,
3329                     struct ice_flow_prof *prof)
3330 {
3331         struct ice_prof_map *map;
3332         u8 prof_id, m;
3333
3334         map = ice_search_prof_id(hw, ICE_BLK_RSS, prof->id);
3335         prof_id = map->prof_id;
3336
3337         /* clear to default */
3338         for (m = 0; m < 6; m++)
3339                 wr32(hw, GLQF_HSYMM(prof_id, m), 0);
3340         if (prof->cfg.symm) {
3341                 struct ice_flow_seg_info *seg =
3342                         &prof->segs[prof->segs_cnt - 1];
3343
3344                 struct ice_flow_seg_xtrct *ipv4_src =
3345                         &seg->fields[ICE_FLOW_FIELD_IDX_IPV4_SA].xtrct;
3346                 struct ice_flow_seg_xtrct *ipv4_dst =
3347                         &seg->fields[ICE_FLOW_FIELD_IDX_IPV4_DA].xtrct;
3348                 struct ice_flow_seg_xtrct *ipv6_src =
3349                         &seg->fields[ICE_FLOW_FIELD_IDX_IPV6_SA].xtrct;
3350                 struct ice_flow_seg_xtrct *ipv6_dst =
3351                         &seg->fields[ICE_FLOW_FIELD_IDX_IPV6_DA].xtrct;
3352
3353                 struct ice_flow_seg_xtrct *tcp_src =
3354                         &seg->fields[ICE_FLOW_FIELD_IDX_TCP_SRC_PORT].xtrct;
3355                 struct ice_flow_seg_xtrct *tcp_dst =
3356                         &seg->fields[ICE_FLOW_FIELD_IDX_TCP_DST_PORT].xtrct;
3357
3358                 struct ice_flow_seg_xtrct *udp_src =
3359                         &seg->fields[ICE_FLOW_FIELD_IDX_UDP_SRC_PORT].xtrct;
3360                 struct ice_flow_seg_xtrct *udp_dst =
3361                         &seg->fields[ICE_FLOW_FIELD_IDX_UDP_DST_PORT].xtrct;
3362
3363                 struct ice_flow_seg_xtrct *sctp_src =
3364                         &seg->fields[ICE_FLOW_FIELD_IDX_SCTP_SRC_PORT].xtrct;
3365                 struct ice_flow_seg_xtrct *sctp_dst =
3366                         &seg->fields[ICE_FLOW_FIELD_IDX_SCTP_DST_PORT].xtrct;
3367
3368                 /* xor IPv4 */
3369                 if (ipv4_src->prot_id != 0 && ipv4_dst->prot_id != 0)
3370                         ice_rss_config_xor(hw, prof_id,
3371                                            ipv4_src->idx, ipv4_dst->idx, 2);
3372
3373                 /* xor IPv6 */
3374                 if (ipv6_src->prot_id != 0 && ipv6_dst->prot_id != 0)
3375                         ice_rss_config_xor(hw, prof_id,
3376                                            ipv6_src->idx, ipv6_dst->idx, 8);
3377
3378                 /* xor TCP */
3379                 if (tcp_src->prot_id != 0 && tcp_dst->prot_id != 0)
3380                         ice_rss_config_xor(hw, prof_id,
3381                                            tcp_src->idx, tcp_dst->idx, 1);
3382
3383                 /* xor UDP */
3384                 if (udp_src->prot_id != 0 && udp_dst->prot_id != 0)
3385                         ice_rss_config_xor(hw, prof_id,
3386                                            udp_src->idx, udp_dst->idx, 1);
3387
3388                 /* xor SCTP */
3389                 if (sctp_src->prot_id != 0 && sctp_dst->prot_id != 0)
3390                         ice_rss_config_xor(hw, prof_id,
3391                                            sctp_src->idx, sctp_dst->idx, 1);
3392         }
3393 }
3394
3395 /**
3396  * ice_add_rss_cfg_sync - add an RSS configuration
3397  * @hw: pointer to the hardware structure
3398  * @vsi_handle: software VSI handle
3399  * @hashed_flds: hash bit fields (ICE_FLOW_HASH_*) to configure
3400  * @addl_hdrs: protocol header fields
3401  * @segs_cnt: packet segment count
3402  * @symm: symmetric hash enable/disable
3403  *
3404  * Assumption: lock has already been acquired for RSS list
3405  */
3406 static enum ice_status
3407 ice_add_rss_cfg_sync(struct ice_hw *hw, u16 vsi_handle, u64 hashed_flds,
3408                      u32 addl_hdrs, u8 segs_cnt, bool symm)
3409 {
3410         const enum ice_block blk = ICE_BLK_RSS;
3411         struct ice_flow_prof *prof = NULL;
3412         struct ice_flow_seg_info *segs;
3413         enum ice_status status;
3414
3415         if (!segs_cnt || segs_cnt > ICE_FLOW_SEG_MAX)
3416                 return ICE_ERR_PARAM;
3417
3418         segs = (struct ice_flow_seg_info *)ice_calloc(hw, segs_cnt,
3419                                                       sizeof(*segs));
3420         if (!segs)
3421                 return ICE_ERR_NO_MEMORY;
3422
3423         /* Construct the packet segment info from the hashed fields */
3424         status = ice_flow_set_rss_seg_info(&segs[segs_cnt - 1], hashed_flds,
3425                                            addl_hdrs);
3426         if (status)
3427                 goto exit;
3428
3429         /* Search for a flow profile that has matching headers, hash fields
3430          * and has the input VSI associated to it. If found, no further
3431          * operations required and exit.
3432          */
3433         prof = ice_flow_find_prof_conds(hw, blk, ICE_FLOW_RX, segs, segs_cnt,
3434                                         vsi_handle,
3435                                         ICE_FLOW_FIND_PROF_CHK_FLDS |
3436                                         ICE_FLOW_FIND_PROF_CHK_VSI);
3437         if (prof) {
3438                 if (prof->cfg.symm == symm)
3439                         goto exit;
3440                 prof->cfg.symm = symm;
3441                 goto update_symm;
3442         }
3443
3444         /* Check if a flow profile exists with the same protocol headers and
3445          * associated with the input VSI. If so disassociate the VSI from
3446          * this profile. The VSI will be added to a new profile created with
3447          * the protocol header and new hash field configuration.
3448          */
3449         prof = ice_flow_find_prof_conds(hw, blk, ICE_FLOW_RX, segs, segs_cnt,
3450                                         vsi_handle, ICE_FLOW_FIND_PROF_CHK_VSI);
3451         if (prof) {
3452                 status = ice_flow_disassoc_prof(hw, blk, prof, vsi_handle);
3453                 if (!status)
3454                         ice_rem_rss_list(hw, vsi_handle, prof);
3455                 else
3456                         goto exit;
3457
3458                 /* Remove profile if it has no VSIs associated */
3459                 if (!ice_is_any_bit_set(prof->vsis, ICE_MAX_VSI)) {
3460                         status = ice_flow_rem_prof(hw, blk, prof->id);
3461                         if (status)
3462                                 goto exit;
3463                 }
3464         }
3465
3466         /* Search for a profile that has same match fields only. If this
3467          * exists then associate the VSI to this profile.
3468          */
3469         prof = ice_flow_find_prof_conds(hw, blk, ICE_FLOW_RX, segs, segs_cnt,
3470                                         vsi_handle,
3471                                         ICE_FLOW_FIND_PROF_CHK_FLDS);
3472         if (prof) {
3473                 if (prof->cfg.symm == symm) {
3474                         status = ice_flow_assoc_prof(hw, blk, prof,
3475                                                      vsi_handle);
3476                         if (!status)
3477                                 status = ice_add_rss_list(hw, vsi_handle,
3478                                                           prof);
3479                 } else {
3480                         /* if a profile exist but with different symmetric
3481                          * requirement, just return error.
3482                          */
3483                         status = ICE_ERR_NOT_SUPPORTED;
3484                 }
3485                 goto exit;
3486         }
3487
3488         /* Create a new flow profile with generated profile and packet
3489          * segment information.
3490          */
3491         status = ice_flow_add_prof(hw, blk, ICE_FLOW_RX,
3492                                    ICE_FLOW_GEN_PROFID(hashed_flds,
3493                                                        segs[segs_cnt - 1].hdrs,
3494                                                        segs_cnt),
3495                                    segs, segs_cnt, NULL, 0, &prof);
3496         if (status)
3497                 goto exit;
3498
3499         status = ice_flow_assoc_prof(hw, blk, prof, vsi_handle);
3500         /* If association to a new flow profile failed then this profile can
3501          * be removed.
3502          */
3503         if (status) {
3504                 ice_flow_rem_prof(hw, blk, prof->id);
3505                 goto exit;
3506         }
3507
3508         status = ice_add_rss_list(hw, vsi_handle, prof);
3509
3510         prof->cfg.symm = symm;
3511
3512 update_symm:
3513         ice_rss_update_symm(hw, prof);
3514
3515 exit:
3516         ice_free(hw, segs);
3517         return status;
3518 }
3519
3520 /**
3521  * ice_add_rss_cfg - add an RSS configuration with specified hashed fields
3522  * @hw: pointer to the hardware structure
3523  * @vsi_handle: software VSI handle
3524  * @hashed_flds: hash bit fields (ICE_FLOW_HASH_*) to configure
3525  * @addl_hdrs: protocol header fields
3526  * @symm: symmetric hash enable/disable
3527  *
3528  * This function will generate a flow profile based on fields associated with
3529  * the input fields to hash on, the flow type and use the VSI number to add
3530  * a flow entry to the profile.
3531  */
3532 enum ice_status
3533 ice_add_rss_cfg(struct ice_hw *hw, u16 vsi_handle, u64 hashed_flds,
3534                 u32 addl_hdrs, bool symm)
3535 {
3536         enum ice_status status;
3537
3538         if (hashed_flds == ICE_HASH_INVALID ||
3539             !ice_is_vsi_valid(hw, vsi_handle))
3540                 return ICE_ERR_PARAM;
3541
3542         ice_acquire_lock(&hw->rss_locks);
3543         status = ice_add_rss_cfg_sync(hw, vsi_handle, hashed_flds, addl_hdrs,
3544                                       ICE_RSS_OUTER_HEADERS, symm);
3545         if (!status)
3546                 status = ice_add_rss_cfg_sync(hw, vsi_handle, hashed_flds,
3547                                               addl_hdrs, ICE_RSS_INNER_HEADERS,
3548                                               symm);
3549         ice_release_lock(&hw->rss_locks);
3550
3551         return status;
3552 }
3553
3554 /**
3555  * ice_rem_rss_cfg_sync - remove an existing RSS configuration
3556  * @hw: pointer to the hardware structure
3557  * @vsi_handle: software VSI handle
3558  * @hashed_flds: Packet hash types (ICE_FLOW_HASH_*) to remove
3559  * @addl_hdrs: Protocol header fields within a packet segment
3560  * @segs_cnt: packet segment count
3561  *
3562  * Assumption: lock has already been acquired for RSS list
3563  */
3564 static enum ice_status
3565 ice_rem_rss_cfg_sync(struct ice_hw *hw, u16 vsi_handle, u64 hashed_flds,
3566                      u32 addl_hdrs, u8 segs_cnt)
3567 {
3568         const enum ice_block blk = ICE_BLK_RSS;
3569         struct ice_flow_seg_info *segs;
3570         struct ice_flow_prof *prof;
3571         enum ice_status status;
3572
3573         segs = (struct ice_flow_seg_info *)ice_calloc(hw, segs_cnt,
3574                                                       sizeof(*segs));
3575         if (!segs)
3576                 return ICE_ERR_NO_MEMORY;
3577
3578         /* Construct the packet segment info from the hashed fields */
3579         status = ice_flow_set_rss_seg_info(&segs[segs_cnt - 1], hashed_flds,
3580                                            addl_hdrs);
3581         if (status)
3582                 goto out;
3583
3584         prof = ice_flow_find_prof_conds(hw, blk, ICE_FLOW_RX, segs, segs_cnt,
3585                                         vsi_handle,
3586                                         ICE_FLOW_FIND_PROF_CHK_FLDS);
3587         if (!prof) {
3588                 status = ICE_ERR_DOES_NOT_EXIST;
3589                 goto out;
3590         }
3591
3592         status = ice_flow_disassoc_prof(hw, blk, prof, vsi_handle);
3593         if (status)
3594                 goto out;
3595
3596         /* Remove RSS configuration from VSI context before deleting
3597          * the flow profile.
3598          */
3599         ice_rem_rss_list(hw, vsi_handle, prof);
3600
3601         if (!ice_is_any_bit_set(prof->vsis, ICE_MAX_VSI))
3602                 status = ice_flow_rem_prof(hw, blk, prof->id);
3603
3604 out:
3605         ice_free(hw, segs);
3606         return status;
3607 }
3608
3609 /**
3610  * ice_rem_rss_cfg - remove an existing RSS config with matching hashed fields
3611  * @hw: pointer to the hardware structure
3612  * @vsi_handle: software VSI handle
3613  * @hashed_flds: Packet hash types (ICE_FLOW_HASH_*) to remove
3614  * @addl_hdrs: Protocol header fields within a packet segment
3615  *
3616  * This function will lookup the flow profile based on the input
3617  * hash field bitmap, iterate through the profile entry list of
3618  * that profile and find entry associated with input VSI to be
3619  * removed. Calls are made to underlying flow apis which will in
3620  * turn build or update buffers for RSS XLT1 section.
3621  */
3622 enum ice_status
3623 ice_rem_rss_cfg(struct ice_hw *hw, u16 vsi_handle, u64 hashed_flds,
3624                 u32 addl_hdrs)
3625 {
3626         enum ice_status status;
3627
3628         if (hashed_flds == ICE_HASH_INVALID ||
3629             !ice_is_vsi_valid(hw, vsi_handle))
3630                 return ICE_ERR_PARAM;
3631
3632         ice_acquire_lock(&hw->rss_locks);
3633         status = ice_rem_rss_cfg_sync(hw, vsi_handle, hashed_flds, addl_hdrs,
3634                                       ICE_RSS_OUTER_HEADERS);
3635         if (!status)
3636                 status = ice_rem_rss_cfg_sync(hw, vsi_handle, hashed_flds,
3637                                               addl_hdrs, ICE_RSS_INNER_HEADERS);
3638         ice_release_lock(&hw->rss_locks);
3639
3640         return status;
3641 }
3642
3643 /**
3644  * ice_replay_rss_cfg - replay RSS configurations associated with VSI
3645  * @hw: pointer to the hardware structure
3646  * @vsi_handle: software VSI handle
3647  */
3648 enum ice_status ice_replay_rss_cfg(struct ice_hw *hw, u16 vsi_handle)
3649 {
3650         enum ice_status status = ICE_SUCCESS;
3651         struct ice_rss_cfg *r;
3652
3653         if (!ice_is_vsi_valid(hw, vsi_handle))
3654                 return ICE_ERR_PARAM;
3655
3656         ice_acquire_lock(&hw->rss_locks);
3657         LIST_FOR_EACH_ENTRY(r, &hw->rss_list_head,
3658                             ice_rss_cfg, l_entry) {
3659                 if (ice_is_bit_set(r->vsis, vsi_handle)) {
3660                         status = ice_add_rss_cfg_sync(hw, vsi_handle,
3661                                                       r->hashed_flds,
3662                                                       r->packet_hdr,
3663                                                       ICE_RSS_OUTER_HEADERS,
3664                                                       r->symm);
3665                         if (status)
3666                                 break;
3667                         status = ice_add_rss_cfg_sync(hw, vsi_handle,
3668                                                       r->hashed_flds,
3669                                                       r->packet_hdr,
3670                                                       ICE_RSS_INNER_HEADERS,
3671                                                       r->symm);
3672                         if (status)
3673                                 break;
3674                 }
3675         }
3676         ice_release_lock(&hw->rss_locks);
3677
3678         return status;
3679 }
3680
3681 /**
3682  * ice_get_rss_cfg - returns hashed fields for the given header types
3683  * @hw: pointer to the hardware structure
3684  * @vsi_handle: software VSI handle
3685  * @hdrs: protocol header type
3686  *
3687  * This function will return the match fields of the first instance of flow
3688  * profile having the given header types and containing input VSI
3689  */
3690 u64 ice_get_rss_cfg(struct ice_hw *hw, u16 vsi_handle, u32 hdrs)
3691 {
3692         struct ice_rss_cfg *r, *rss_cfg = NULL;
3693
3694         /* verify if the protocol header is non zero and VSI is valid */
3695         if (hdrs == ICE_FLOW_SEG_HDR_NONE || !ice_is_vsi_valid(hw, vsi_handle))
3696                 return ICE_HASH_INVALID;
3697
3698         ice_acquire_lock(&hw->rss_locks);
3699         LIST_FOR_EACH_ENTRY(r, &hw->rss_list_head,
3700                             ice_rss_cfg, l_entry)
3701                 if (ice_is_bit_set(r->vsis, vsi_handle) &&
3702                     r->packet_hdr == hdrs) {
3703                         rss_cfg = r;
3704                         break;
3705                 }
3706         ice_release_lock(&hw->rss_locks);
3707
3708         return rss_cfg ? rss_cfg->hashed_flds : ICE_HASH_INVALID;
3709 }