00f58697f89abf91d9e109780a50df953d911c04
[dpdk.git] / drivers / net / ice / base / ice_flow.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2001-2020 Intel Corporation
3  */
4
5 #include "ice_common.h"
6 #include "ice_flow.h"
7
8 /* Size of known protocol header fields */
9 #define ICE_FLOW_FLD_SZ_ETH_TYPE        2
10 #define ICE_FLOW_FLD_SZ_VLAN            2
11 #define ICE_FLOW_FLD_SZ_IPV4_ADDR       4
12 #define ICE_FLOW_FLD_SZ_IPV6_ADDR       16
13 #define ICE_FLOW_FLD_SZ_IP_DSCP         1
14 #define ICE_FLOW_FLD_SZ_IP_TTL          1
15 #define ICE_FLOW_FLD_SZ_IP_PROT         1
16 #define ICE_FLOW_FLD_SZ_PORT            2
17 #define ICE_FLOW_FLD_SZ_TCP_FLAGS       1
18 #define ICE_FLOW_FLD_SZ_ICMP_TYPE       1
19 #define ICE_FLOW_FLD_SZ_ICMP_CODE       1
20 #define ICE_FLOW_FLD_SZ_ARP_OPER        2
21 #define ICE_FLOW_FLD_SZ_GRE_KEYID       4
22 #define ICE_FLOW_FLD_SZ_GTP_TEID        4
23 #define ICE_FLOW_FLD_SZ_GTP_QFI         2
24 #define ICE_FLOW_FLD_SZ_PPPOE_SESS_ID   2
25 #define ICE_FLOW_FLD_SZ_PFCP_SEID 8
26 #define ICE_FLOW_FLD_SZ_L2TPV3_SESS_ID  4
27 #define ICE_FLOW_FLD_SZ_ESP_SPI 4
28 #define ICE_FLOW_FLD_SZ_AH_SPI  4
29 #define ICE_FLOW_FLD_SZ_NAT_T_ESP_SPI   4
30
31 /* Describe properties of a protocol header field */
32 struct ice_flow_field_info {
33         enum ice_flow_seg_hdr hdr;
34         s16 off;        /* Offset from start of a protocol header, in bits */
35         u16 size;       /* Size of fields in bits */
36         u16 mask;       /* 16-bit mask for field */
37 };
38
39 #define ICE_FLOW_FLD_INFO(_hdr, _offset_bytes, _size_bytes) { \
40         .hdr = _hdr, \
41         .off = (_offset_bytes) * BITS_PER_BYTE, \
42         .size = (_size_bytes) * BITS_PER_BYTE, \
43         .mask = 0, \
44 }
45
46 #define ICE_FLOW_FLD_INFO_MSK(_hdr, _offset_bytes, _size_bytes, _mask) { \
47         .hdr = _hdr, \
48         .off = (_offset_bytes) * BITS_PER_BYTE, \
49         .size = (_size_bytes) * BITS_PER_BYTE, \
50         .mask = _mask, \
51 }
52
53 /* Table containing properties of supported protocol header fields */
54 static const
55 struct ice_flow_field_info ice_flds_info[ICE_FLOW_FIELD_IDX_MAX] = {
56         /* Ether */
57         /* ICE_FLOW_FIELD_IDX_ETH_DA */
58         ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_ETH, 0, ETH_ALEN),
59         /* ICE_FLOW_FIELD_IDX_ETH_SA */
60         ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_ETH, ETH_ALEN, ETH_ALEN),
61         /* ICE_FLOW_FIELD_IDX_S_VLAN */
62         ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_VLAN, 12, ICE_FLOW_FLD_SZ_VLAN),
63         /* ICE_FLOW_FIELD_IDX_C_VLAN */
64         ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_VLAN, 14, ICE_FLOW_FLD_SZ_VLAN),
65         /* ICE_FLOW_FIELD_IDX_ETH_TYPE */
66         ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_ETH, 0, ICE_FLOW_FLD_SZ_ETH_TYPE),
67         /* IPv4 / IPv6 */
68         /* ICE_FLOW_FIELD_IDX_IPV4_DSCP */
69         ICE_FLOW_FLD_INFO_MSK(ICE_FLOW_SEG_HDR_IPV4, 0, ICE_FLOW_FLD_SZ_IP_DSCP,
70                               0x00fc),
71         /* ICE_FLOW_FIELD_IDX_IPV6_DSCP */
72         ICE_FLOW_FLD_INFO_MSK(ICE_FLOW_SEG_HDR_IPV6, 0, ICE_FLOW_FLD_SZ_IP_DSCP,
73                               0x0ff0),
74         /* ICE_FLOW_FIELD_IDX_IPV4_TTL */
75         ICE_FLOW_FLD_INFO_MSK(ICE_FLOW_SEG_HDR_NONE, 8,
76                               ICE_FLOW_FLD_SZ_IP_TTL, 0xff00),
77         /* ICE_FLOW_FIELD_IDX_IPV4_PROT */
78         ICE_FLOW_FLD_INFO_MSK(ICE_FLOW_SEG_HDR_NONE, 8,
79                               ICE_FLOW_FLD_SZ_IP_PROT, 0x00ff),
80         /* ICE_FLOW_FIELD_IDX_IPV6_TTL */
81         ICE_FLOW_FLD_INFO_MSK(ICE_FLOW_SEG_HDR_NONE, 6,
82                               ICE_FLOW_FLD_SZ_IP_TTL, 0x00ff),
83         /* ICE_FLOW_FIELD_IDX_IPV6_PROT */
84         ICE_FLOW_FLD_INFO_MSK(ICE_FLOW_SEG_HDR_NONE, 6,
85                               ICE_FLOW_FLD_SZ_IP_PROT, 0xff00),
86         /* ICE_FLOW_FIELD_IDX_IPV4_SA */
87         ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_IPV4, 12, ICE_FLOW_FLD_SZ_IPV4_ADDR),
88         /* ICE_FLOW_FIELD_IDX_IPV4_DA */
89         ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_IPV4, 16, ICE_FLOW_FLD_SZ_IPV4_ADDR),
90         /* ICE_FLOW_FIELD_IDX_IPV6_SA */
91         ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_IPV6, 8, ICE_FLOW_FLD_SZ_IPV6_ADDR),
92         /* ICE_FLOW_FIELD_IDX_IPV6_DA */
93         ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_IPV6, 24, ICE_FLOW_FLD_SZ_IPV6_ADDR),
94         /* Transport */
95         /* ICE_FLOW_FIELD_IDX_TCP_SRC_PORT */
96         ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_TCP, 0, ICE_FLOW_FLD_SZ_PORT),
97         /* ICE_FLOW_FIELD_IDX_TCP_DST_PORT */
98         ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_TCP, 2, ICE_FLOW_FLD_SZ_PORT),
99         /* ICE_FLOW_FIELD_IDX_UDP_SRC_PORT */
100         ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_UDP, 0, ICE_FLOW_FLD_SZ_PORT),
101         /* ICE_FLOW_FIELD_IDX_UDP_DST_PORT */
102         ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_UDP, 2, ICE_FLOW_FLD_SZ_PORT),
103         /* ICE_FLOW_FIELD_IDX_SCTP_SRC_PORT */
104         ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_SCTP, 0, ICE_FLOW_FLD_SZ_PORT),
105         /* ICE_FLOW_FIELD_IDX_SCTP_DST_PORT */
106         ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_SCTP, 2, ICE_FLOW_FLD_SZ_PORT),
107         /* ICE_FLOW_FIELD_IDX_TCP_FLAGS */
108         ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_TCP, 13, ICE_FLOW_FLD_SZ_TCP_FLAGS),
109         /* ARP */
110         /* ICE_FLOW_FIELD_IDX_ARP_SIP */
111         ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_ARP, 14, ICE_FLOW_FLD_SZ_IPV4_ADDR),
112         /* ICE_FLOW_FIELD_IDX_ARP_DIP */
113         ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_ARP, 24, ICE_FLOW_FLD_SZ_IPV4_ADDR),
114         /* ICE_FLOW_FIELD_IDX_ARP_SHA */
115         ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_ARP, 8, ETH_ALEN),
116         /* ICE_FLOW_FIELD_IDX_ARP_DHA */
117         ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_ARP, 18, ETH_ALEN),
118         /* ICE_FLOW_FIELD_IDX_ARP_OP */
119         ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_ARP, 6, ICE_FLOW_FLD_SZ_ARP_OPER),
120         /* ICMP */
121         /* ICE_FLOW_FIELD_IDX_ICMP_TYPE */
122         ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_ICMP, 0, ICE_FLOW_FLD_SZ_ICMP_TYPE),
123         /* ICE_FLOW_FIELD_IDX_ICMP_CODE */
124         ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_ICMP, 1, ICE_FLOW_FLD_SZ_ICMP_CODE),
125         /* GRE */
126         /* ICE_FLOW_FIELD_IDX_GRE_KEYID */
127         ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_GRE, 12, ICE_FLOW_FLD_SZ_GRE_KEYID),
128         /* GTP */
129         /* ICE_FLOW_FIELD_IDX_GTPC_TEID */
130         ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_GTPC_TEID, 12,
131                           ICE_FLOW_FLD_SZ_GTP_TEID),
132         /* ICE_FLOW_FIELD_IDX_GTPU_IP_TEID */
133         ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_GTPU_IP, 12,
134                           ICE_FLOW_FLD_SZ_GTP_TEID),
135         /* ICE_FLOW_FIELD_IDX_GTPU_EH_TEID */
136         ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_GTPU_EH, 12,
137                           ICE_FLOW_FLD_SZ_GTP_TEID),
138         /* ICE_FLOW_FIELD_IDX_GTPU_EH_QFI */
139         ICE_FLOW_FLD_INFO_MSK(ICE_FLOW_SEG_HDR_GTPU_EH, 22,
140                               ICE_FLOW_FLD_SZ_GTP_QFI, 0x3f00),
141         /* ICE_FLOW_FIELD_IDX_GTPU_UP_TEID */
142         ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_GTPU_UP, 12,
143                           ICE_FLOW_FLD_SZ_GTP_TEID),
144         /* ICE_FLOW_FIELD_IDX_GTPU_DWN_TEID */
145         ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_GTPU_DWN, 12,
146                           ICE_FLOW_FLD_SZ_GTP_TEID),
147         /* PPPOE */
148         /* ICE_FLOW_FIELD_IDX_PPPOE_SESS_ID */
149         ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_PPPOE, 2,
150                           ICE_FLOW_FLD_SZ_PPPOE_SESS_ID),
151         /* PFCP */
152         /* ICE_FLOW_FIELD_IDX_PFCP_SEID */
153         ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_PFCP_SESSION, 12,
154                           ICE_FLOW_FLD_SZ_PFCP_SEID),
155         /* L2TPV3 */
156         /* ICE_FLOW_FIELD_IDX_L2TPV3_SESS_ID */
157         ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_L2TPV3, 0,
158                           ICE_FLOW_FLD_SZ_L2TPV3_SESS_ID),
159         /* ESP */
160         /* ICE_FLOW_FIELD_IDX_ESP_SPI */
161         ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_ESP, 0,
162                           ICE_FLOW_FLD_SZ_ESP_SPI),
163         /* AH */
164         /* ICE_FLOW_FIELD_IDX_AH_SPI */
165         ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_AH, 4,
166                           ICE_FLOW_FLD_SZ_AH_SPI),
167         /* NAT_T_ESP */
168         /* ICE_FLOW_FIELD_IDX_NAT_T_ESP_SPI */
169         ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_NAT_T_ESP, 8,
170                           ICE_FLOW_FLD_SZ_NAT_T_ESP_SPI),
171 };
172
173 /* Bitmaps indicating relevant packet types for a particular protocol header
174  *
175  * Packet types for packets with an Outer/First/Single MAC header
176  */
177 static const u32 ice_ptypes_mac_ofos[] = {
178         0xFDC00846, 0xBFBF7F7E, 0xF70001DF, 0xFEFDFDFB,
179         0x0000077E, 0x00000000, 0x00000000, 0x00000000,
180         0x00000000, 0x03FFF000, 0x7FFFFFE0, 0x00000000,
181         0x00000000, 0x00000000, 0x00000000, 0x00000000,
182         0x00000000, 0x00000000, 0x00000000, 0x00000000,
183         0x00000000, 0x00000000, 0x00000000, 0x00000000,
184         0x00000000, 0x00000000, 0x00000000, 0x00000000,
185         0x00000000, 0x00000000, 0x00000000, 0x00000000,
186 };
187
188 /* Packet types for packets with an Innermost/Last MAC VLAN header */
189 static const u32 ice_ptypes_macvlan_il[] = {
190         0x00000000, 0xBC000000, 0x000001DF, 0xF0000000,
191         0x0000077E, 0x00000000, 0x00000000, 0x00000000,
192         0x00000000, 0x00000000, 0x00000000, 0x00000000,
193         0x00000000, 0x00000000, 0x00000000, 0x00000000,
194         0x00000000, 0x00000000, 0x00000000, 0x00000000,
195         0x00000000, 0x00000000, 0x00000000, 0x00000000,
196         0x00000000, 0x00000000, 0x00000000, 0x00000000,
197         0x00000000, 0x00000000, 0x00000000, 0x00000000,
198 };
199
200 /* Packet types for packets with an Outer/First/Single IPv4 header */
201 static const u32 ice_ptypes_ipv4_ofos[] = {
202         0x1DC00000, 0x04000800, 0x00000000, 0x00000000,
203         0x00000000, 0x00000155, 0x00000000, 0x00000000,
204         0x0003000F, 0x000FC000, 0x83E0F800, 0x00000101,
205         0x00000000, 0x00000000, 0x00000000, 0x00000000,
206         0x00000000, 0x00000000, 0x00000000, 0x00000000,
207         0x00000000, 0x00000000, 0x00000000, 0x00000000,
208         0x00000000, 0x00000000, 0x00000000, 0x00000000,
209         0x00000000, 0x00000000, 0x00000000, 0x00000000,
210 };
211
212 /* Packet types for packets with an Innermost/Last IPv4 header */
213 static const u32 ice_ptypes_ipv4_il[] = {
214         0xE0000000, 0xB807700E, 0x80000003, 0xE01DC03B,
215         0x0000000E, 0x00000000, 0x00000000, 0x00000000,
216         0x00000000, 0x00000000, 0x001FF800, 0x00000000,
217         0x00000000, 0x00000000, 0x00000000, 0x00000000,
218         0x00000000, 0x00000000, 0x00000000, 0x00000000,
219         0x00000000, 0x00000000, 0x00000000, 0x00000000,
220         0x00000000, 0x00000000, 0x00000000, 0x00000000,
221         0x00000000, 0x00000000, 0x00000000, 0x00000000,
222 };
223
224 /* Packet types for packets with an Outer/First/Single IPv6 header */
225 static const u32 ice_ptypes_ipv6_ofos[] = {
226         0x00000000, 0x00000000, 0x77000000, 0x10002000,
227         0x00000000, 0x000002AA, 0x00000000, 0x00000000,
228         0x00080F00, 0x03F00000, 0x7C1F0000, 0x00000206,
229         0x00000000, 0x00000000, 0x00000000, 0x00000000,
230         0x00000000, 0x00000000, 0x00000000, 0x00000000,
231         0x00000000, 0x00000000, 0x00000000, 0x00000000,
232         0x00000000, 0x00000000, 0x00000000, 0x00000000,
233         0x00000000, 0x00000000, 0x00000000, 0x00000000,
234 };
235
236 /* Packet types for packets with an Innermost/Last IPv6 header */
237 static const u32 ice_ptypes_ipv6_il[] = {
238         0x00000000, 0x03B80770, 0x000001DC, 0x0EE00000,
239         0x00000770, 0x00000000, 0x00000000, 0x00000000,
240         0x00000000, 0x00000000, 0x7FE00000, 0x00000000,
241         0x00000000, 0x00000000, 0x00000000, 0x00000000,
242         0x00000000, 0x00000000, 0x00000000, 0x00000000,
243         0x00000000, 0x00000000, 0x00000000, 0x00000000,
244         0x00000000, 0x00000000, 0x00000000, 0x00000000,
245         0x00000000, 0x00000000, 0x00000000, 0x00000000,
246 };
247
248 /* Packet types for packets with an Outermost/First ARP header */
249 static const u32 ice_ptypes_arp_of[] = {
250         0x00000800, 0x00000000, 0x00000000, 0x00000000,
251         0x00000000, 0x00000000, 0x00000000, 0x00000000,
252         0x00000000, 0x00000000, 0x00000000, 0x00000000,
253         0x00000000, 0x00000000, 0x00000000, 0x00000000,
254         0x00000000, 0x00000000, 0x00000000, 0x00000000,
255         0x00000000, 0x00000000, 0x00000000, 0x00000000,
256         0x00000000, 0x00000000, 0x00000000, 0x00000000,
257         0x00000000, 0x00000000, 0x00000000, 0x00000000,
258 };
259
260 /* UDP Packet types for non-tunneled packets or tunneled
261  * packets with inner UDP.
262  */
263 static const u32 ice_ptypes_udp_il[] = {
264         0x81000000, 0x20204040, 0x04000010, 0x80810102,
265         0x00000040, 0x00000000, 0x00000000, 0x00000000,
266         0x00000000, 0x00410000, 0x90842000, 0x00000007,
267         0x00000000, 0x00000000, 0x00000000, 0x00000000,
268         0x00000000, 0x00000000, 0x00000000, 0x00000000,
269         0x00000000, 0x00000000, 0x00000000, 0x00000000,
270         0x00000000, 0x00000000, 0x00000000, 0x00000000,
271         0x00000000, 0x00000000, 0x00000000, 0x00000000,
272 };
273
274 /* Packet types for packets with an Innermost/Last TCP header */
275 static const u32 ice_ptypes_tcp_il[] = {
276         0x04000000, 0x80810102, 0x10000040, 0x02040408,
277         0x00000102, 0x00000000, 0x00000000, 0x00000000,
278         0x00000000, 0x00820000, 0x21084000, 0x00000000,
279         0x00000000, 0x00000000, 0x00000000, 0x00000000,
280         0x00000000, 0x00000000, 0x00000000, 0x00000000,
281         0x00000000, 0x00000000, 0x00000000, 0x00000000,
282         0x00000000, 0x00000000, 0x00000000, 0x00000000,
283         0x00000000, 0x00000000, 0x00000000, 0x00000000,
284 };
285
286 /* Packet types for packets with an Innermost/Last SCTP header */
287 static const u32 ice_ptypes_sctp_il[] = {
288         0x08000000, 0x01020204, 0x20000081, 0x04080810,
289         0x00000204, 0x00000000, 0x00000000, 0x00000000,
290         0x00000000, 0x01040000, 0x00000000, 0x00000000,
291         0x00000000, 0x00000000, 0x00000000, 0x00000000,
292         0x00000000, 0x00000000, 0x00000000, 0x00000000,
293         0x00000000, 0x00000000, 0x00000000, 0x00000000,
294         0x00000000, 0x00000000, 0x00000000, 0x00000000,
295         0x00000000, 0x00000000, 0x00000000, 0x00000000,
296 };
297
298 /* Packet types for packets with an Outermost/First ICMP header */
299 static const u32 ice_ptypes_icmp_of[] = {
300         0x10000000, 0x00000000, 0x00000000, 0x00000000,
301         0x00000000, 0x00000000, 0x00000000, 0x00000000,
302         0x00000000, 0x00000000, 0x00000000, 0x00000000,
303         0x00000000, 0x00000000, 0x00000000, 0x00000000,
304         0x00000000, 0x00000000, 0x00000000, 0x00000000,
305         0x00000000, 0x00000000, 0x00000000, 0x00000000,
306         0x00000000, 0x00000000, 0x00000000, 0x00000000,
307         0x00000000, 0x00000000, 0x00000000, 0x00000000,
308 };
309
310 /* Packet types for packets with an Innermost/Last ICMP header */
311 static const u32 ice_ptypes_icmp_il[] = {
312         0x00000000, 0x02040408, 0x40000102, 0x08101020,
313         0x00000408, 0x00000000, 0x00000000, 0x00000000,
314         0x00000000, 0x00000000, 0x42108000, 0x00000000,
315         0x00000000, 0x00000000, 0x00000000, 0x00000000,
316         0x00000000, 0x00000000, 0x00000000, 0x00000000,
317         0x00000000, 0x00000000, 0x00000000, 0x00000000,
318         0x00000000, 0x00000000, 0x00000000, 0x00000000,
319         0x00000000, 0x00000000, 0x00000000, 0x00000000,
320 };
321
322 /* Packet types for packets with an Outermost/First GRE header */
323 static const u32 ice_ptypes_gre_of[] = {
324         0x00000000, 0xBFBF7800, 0x000001DF, 0xFEFDE000,
325         0x0000017E, 0x00000000, 0x00000000, 0x00000000,
326         0x00000000, 0x00000000, 0x00000000, 0x00000000,
327         0x00000000, 0x00000000, 0x00000000, 0x00000000,
328         0x00000000, 0x00000000, 0x00000000, 0x00000000,
329         0x00000000, 0x00000000, 0x00000000, 0x00000000,
330         0x00000000, 0x00000000, 0x00000000, 0x00000000,
331         0x00000000, 0x00000000, 0x00000000, 0x00000000,
332 };
333
334 /* Packet types for packets with an Innermost/Last MAC header */
335 static const u32 ice_ptypes_mac_il[] = {
336         0x00000000, 0x00000000, 0x00000000, 0x00000000,
337         0x00000000, 0x00000000, 0x00000000, 0x00000000,
338         0x00000000, 0x00000000, 0x00000000, 0x00000000,
339         0x00000000, 0x00000000, 0x00000000, 0x00000000,
340         0x00000000, 0x00000000, 0x00000000, 0x00000000,
341         0x00000000, 0x00000000, 0x00000000, 0x00000000,
342         0x00000000, 0x00000000, 0x00000000, 0x00000000,
343         0x00000000, 0x00000000, 0x00000000, 0x00000000,
344 };
345
346 /* Packet types for GTPC */
347 static const u32 ice_ptypes_gtpc[] = {
348         0x00000000, 0x00000000, 0x00000000, 0x00000000,
349         0x00000000, 0x00000000, 0x00000000, 0x00000000,
350         0x00000000, 0x00000000, 0x00000180, 0x00000000,
351         0x00000000, 0x00000000, 0x00000000, 0x00000000,
352         0x00000000, 0x00000000, 0x00000000, 0x00000000,
353         0x00000000, 0x00000000, 0x00000000, 0x00000000,
354         0x00000000, 0x00000000, 0x00000000, 0x00000000,
355         0x00000000, 0x00000000, 0x00000000, 0x00000000,
356 };
357
358 /* Packet types for GTPC with TEID */
359 static const u32 ice_ptypes_gtpc_tid[] = {
360         0x00000000, 0x00000000, 0x00000000, 0x00000000,
361         0x00000000, 0x00000000, 0x00000000, 0x00000000,
362         0x00000000, 0x00000000, 0x00000060, 0x00000000,
363         0x00000000, 0x00000000, 0x00000000, 0x00000000,
364         0x00000000, 0x00000000, 0x00000000, 0x00000000,
365         0x00000000, 0x00000000, 0x00000000, 0x00000000,
366         0x00000000, 0x00000000, 0x00000000, 0x00000000,
367         0x00000000, 0x00000000, 0x00000000, 0x00000000,
368 };
369
370 /* Packet types for GTPU */
371 static const struct ice_ptype_attributes ice_attr_gtpu_eh[] = {
372         { ICE_MAC_IPV4_GTPU_IPV4_FRAG,    ICE_PTYPE_ATTR_GTP_PDU_EH },
373         { ICE_MAC_IPV4_GTPU_IPV4_PAY,     ICE_PTYPE_ATTR_GTP_PDU_EH },
374         { ICE_MAC_IPV4_GTPU_IPV4_UDP_PAY, ICE_PTYPE_ATTR_GTP_PDU_EH },
375         { ICE_MAC_IPV4_GTPU_IPV4_TCP,     ICE_PTYPE_ATTR_GTP_PDU_EH },
376         { ICE_MAC_IPV4_GTPU_IPV4_ICMP,    ICE_PTYPE_ATTR_GTP_PDU_EH },
377         { ICE_MAC_IPV6_GTPU_IPV4_FRAG,    ICE_PTYPE_ATTR_GTP_PDU_EH },
378         { ICE_MAC_IPV6_GTPU_IPV4_PAY,     ICE_PTYPE_ATTR_GTP_PDU_EH },
379         { ICE_MAC_IPV6_GTPU_IPV4_UDP_PAY, ICE_PTYPE_ATTR_GTP_PDU_EH },
380         { ICE_MAC_IPV6_GTPU_IPV4_TCP,     ICE_PTYPE_ATTR_GTP_PDU_EH },
381         { ICE_MAC_IPV6_GTPU_IPV4_ICMP,    ICE_PTYPE_ATTR_GTP_PDU_EH },
382         { ICE_MAC_IPV4_GTPU_IPV6_FRAG,    ICE_PTYPE_ATTR_GTP_PDU_EH },
383         { ICE_MAC_IPV4_GTPU_IPV6_PAY,     ICE_PTYPE_ATTR_GTP_PDU_EH },
384         { ICE_MAC_IPV4_GTPU_IPV6_UDP_PAY, ICE_PTYPE_ATTR_GTP_PDU_EH },
385         { ICE_MAC_IPV4_GTPU_IPV6_TCP,     ICE_PTYPE_ATTR_GTP_PDU_EH },
386         { ICE_MAC_IPV4_GTPU_IPV6_ICMPV6,  ICE_PTYPE_ATTR_GTP_PDU_EH },
387         { ICE_MAC_IPV6_GTPU_IPV6_FRAG,    ICE_PTYPE_ATTR_GTP_PDU_EH },
388         { ICE_MAC_IPV6_GTPU_IPV6_PAY,     ICE_PTYPE_ATTR_GTP_PDU_EH },
389         { ICE_MAC_IPV6_GTPU_IPV6_UDP_PAY, ICE_PTYPE_ATTR_GTP_PDU_EH },
390         { ICE_MAC_IPV6_GTPU_IPV6_TCP,     ICE_PTYPE_ATTR_GTP_PDU_EH },
391         { ICE_MAC_IPV6_GTPU_IPV6_ICMPV6,  ICE_PTYPE_ATTR_GTP_PDU_EH },
392 };
393
394 static const struct ice_ptype_attributes ice_attr_gtpu_down[] = {
395         { ICE_MAC_IPV4_GTPU_IPV4_FRAG,    ICE_PTYPE_ATTR_GTP_DOWNLINK },
396         { ICE_MAC_IPV4_GTPU_IPV4_PAY,     ICE_PTYPE_ATTR_GTP_DOWNLINK },
397         { ICE_MAC_IPV4_GTPU_IPV4_UDP_PAY, ICE_PTYPE_ATTR_GTP_DOWNLINK },
398         { ICE_MAC_IPV4_GTPU_IPV4_TCP,     ICE_PTYPE_ATTR_GTP_DOWNLINK },
399         { ICE_MAC_IPV4_GTPU_IPV4_ICMP,    ICE_PTYPE_ATTR_GTP_DOWNLINK },
400         { ICE_MAC_IPV6_GTPU_IPV4_FRAG,    ICE_PTYPE_ATTR_GTP_DOWNLINK },
401         { ICE_MAC_IPV6_GTPU_IPV4_PAY,     ICE_PTYPE_ATTR_GTP_DOWNLINK },
402         { ICE_MAC_IPV6_GTPU_IPV4_UDP_PAY, ICE_PTYPE_ATTR_GTP_DOWNLINK },
403         { ICE_MAC_IPV6_GTPU_IPV4_TCP,     ICE_PTYPE_ATTR_GTP_DOWNLINK },
404         { ICE_MAC_IPV6_GTPU_IPV4_ICMP,    ICE_PTYPE_ATTR_GTP_DOWNLINK },
405         { ICE_MAC_IPV4_GTPU_IPV6_FRAG,    ICE_PTYPE_ATTR_GTP_DOWNLINK },
406         { ICE_MAC_IPV4_GTPU_IPV6_PAY,     ICE_PTYPE_ATTR_GTP_DOWNLINK },
407         { ICE_MAC_IPV4_GTPU_IPV6_UDP_PAY, ICE_PTYPE_ATTR_GTP_DOWNLINK },
408         { ICE_MAC_IPV4_GTPU_IPV6_TCP,     ICE_PTYPE_ATTR_GTP_DOWNLINK },
409         { ICE_MAC_IPV4_GTPU_IPV6_ICMPV6,  ICE_PTYPE_ATTR_GTP_DOWNLINK },
410         { ICE_MAC_IPV6_GTPU_IPV6_FRAG,    ICE_PTYPE_ATTR_GTP_DOWNLINK },
411         { ICE_MAC_IPV6_GTPU_IPV6_PAY,     ICE_PTYPE_ATTR_GTP_DOWNLINK },
412         { ICE_MAC_IPV6_GTPU_IPV6_UDP_PAY, ICE_PTYPE_ATTR_GTP_DOWNLINK },
413         { ICE_MAC_IPV6_GTPU_IPV6_TCP,     ICE_PTYPE_ATTR_GTP_DOWNLINK },
414         { ICE_MAC_IPV6_GTPU_IPV6_ICMPV6,  ICE_PTYPE_ATTR_GTP_DOWNLINK },
415 };
416
417 static const struct ice_ptype_attributes ice_attr_gtpu_up[] = {
418         { ICE_MAC_IPV4_GTPU_IPV4_FRAG,    ICE_PTYPE_ATTR_GTP_UPLINK },
419         { ICE_MAC_IPV4_GTPU_IPV4_PAY,     ICE_PTYPE_ATTR_GTP_UPLINK },
420         { ICE_MAC_IPV4_GTPU_IPV4_UDP_PAY, ICE_PTYPE_ATTR_GTP_UPLINK },
421         { ICE_MAC_IPV4_GTPU_IPV4_TCP,     ICE_PTYPE_ATTR_GTP_UPLINK },
422         { ICE_MAC_IPV4_GTPU_IPV4_ICMP,    ICE_PTYPE_ATTR_GTP_UPLINK },
423         { ICE_MAC_IPV6_GTPU_IPV4_FRAG,    ICE_PTYPE_ATTR_GTP_UPLINK },
424         { ICE_MAC_IPV6_GTPU_IPV4_PAY,     ICE_PTYPE_ATTR_GTP_UPLINK },
425         { ICE_MAC_IPV6_GTPU_IPV4_UDP_PAY, ICE_PTYPE_ATTR_GTP_UPLINK },
426         { ICE_MAC_IPV6_GTPU_IPV4_TCP,     ICE_PTYPE_ATTR_GTP_UPLINK },
427         { ICE_MAC_IPV6_GTPU_IPV4_ICMP,    ICE_PTYPE_ATTR_GTP_UPLINK },
428         { ICE_MAC_IPV4_GTPU_IPV6_FRAG,    ICE_PTYPE_ATTR_GTP_UPLINK },
429         { ICE_MAC_IPV4_GTPU_IPV6_PAY,     ICE_PTYPE_ATTR_GTP_UPLINK },
430         { ICE_MAC_IPV4_GTPU_IPV6_UDP_PAY, ICE_PTYPE_ATTR_GTP_UPLINK },
431         { ICE_MAC_IPV4_GTPU_IPV6_TCP,     ICE_PTYPE_ATTR_GTP_UPLINK },
432         { ICE_MAC_IPV4_GTPU_IPV6_ICMPV6,  ICE_PTYPE_ATTR_GTP_UPLINK },
433         { ICE_MAC_IPV6_GTPU_IPV6_FRAG,    ICE_PTYPE_ATTR_GTP_UPLINK },
434         { ICE_MAC_IPV6_GTPU_IPV6_PAY,     ICE_PTYPE_ATTR_GTP_UPLINK },
435         { ICE_MAC_IPV6_GTPU_IPV6_UDP_PAY, ICE_PTYPE_ATTR_GTP_UPLINK },
436         { ICE_MAC_IPV6_GTPU_IPV6_TCP,     ICE_PTYPE_ATTR_GTP_UPLINK },
437         { ICE_MAC_IPV6_GTPU_IPV6_ICMPV6,  ICE_PTYPE_ATTR_GTP_UPLINK },
438 };
439
440 static const u32 ice_ptypes_gtpu[] = {
441         0x00000000, 0x00000000, 0x00000000, 0x00000000,
442         0x00000000, 0x00000000, 0x00000000, 0x00000000,
443         0x00000000, 0x00000000, 0x7FFFFE00, 0x00000000,
444         0x00000000, 0x00000000, 0x00000000, 0x00000000,
445         0x00000000, 0x00000000, 0x00000000, 0x00000000,
446         0x00000000, 0x00000000, 0x00000000, 0x00000000,
447         0x00000000, 0x00000000, 0x00000000, 0x00000000,
448         0x00000000, 0x00000000, 0x00000000, 0x00000000,
449 };
450
451 /* Packet types for pppoe */
452 static const u32 ice_ptypes_pppoe[] = {
453         0x00000000, 0x00000000, 0x00000000, 0x00000000,
454         0x00000000, 0x00000000, 0x00000000, 0x00000000,
455         0x00000000, 0x03FFF000, 0x00000000, 0x00000000,
456         0x00000000, 0x00000000, 0x00000000, 0x00000000,
457         0x00000000, 0x00000000, 0x00000000, 0x00000000,
458         0x00000000, 0x00000000, 0x00000000, 0x00000000,
459         0x00000000, 0x00000000, 0x00000000, 0x00000000,
460         0x00000000, 0x00000000, 0x00000000, 0x00000000,
461 };
462
463 /* Packet types for packets with PFCP NODE header */
464 static const u32 ice_ptypes_pfcp_node[] = {
465         0x00000000, 0x00000000, 0x00000000, 0x00000000,
466         0x00000000, 0x00000000, 0x00000000, 0x00000000,
467         0x00000000, 0x00000000, 0x80000000, 0x00000002,
468         0x00000000, 0x00000000, 0x00000000, 0x00000000,
469         0x00000000, 0x00000000, 0x00000000, 0x00000000,
470         0x00000000, 0x00000000, 0x00000000, 0x00000000,
471         0x00000000, 0x00000000, 0x00000000, 0x00000000,
472         0x00000000, 0x00000000, 0x00000000, 0x00000000,
473 };
474
475 /* Packet types for packets with PFCP SESSION header */
476 static const u32 ice_ptypes_pfcp_session[] = {
477         0x00000000, 0x00000000, 0x00000000, 0x00000000,
478         0x00000000, 0x00000000, 0x00000000, 0x00000000,
479         0x00000000, 0x00000000, 0x00000000, 0x00000005,
480         0x00000000, 0x00000000, 0x00000000, 0x00000000,
481         0x00000000, 0x00000000, 0x00000000, 0x00000000,
482         0x00000000, 0x00000000, 0x00000000, 0x00000000,
483         0x00000000, 0x00000000, 0x00000000, 0x00000000,
484         0x00000000, 0x00000000, 0x00000000, 0x00000000,
485 };
486
487 /* Packet types for l2tpv3 */
488 static const u32 ice_ptypes_l2tpv3[] = {
489         0x00000000, 0x00000000, 0x00000000, 0x00000000,
490         0x00000000, 0x00000000, 0x00000000, 0x00000000,
491         0x00000000, 0x00000000, 0x00000000, 0x00000300,
492         0x00000000, 0x00000000, 0x00000000, 0x00000000,
493         0x00000000, 0x00000000, 0x00000000, 0x00000000,
494         0x00000000, 0x00000000, 0x00000000, 0x00000000,
495         0x00000000, 0x00000000, 0x00000000, 0x00000000,
496         0x00000000, 0x00000000, 0x00000000, 0x00000000,
497 };
498
499 /* Packet types for esp */
500 static const u32 ice_ptypes_esp[] = {
501         0x00000000, 0x00000000, 0x00000000, 0x00000000,
502         0x00000000, 0x00000003, 0x00000000, 0x00000000,
503         0x00000000, 0x00000000, 0x00000000, 0x00000000,
504         0x00000000, 0x00000000, 0x00000000, 0x00000000,
505         0x00000000, 0x00000000, 0x00000000, 0x00000000,
506         0x00000000, 0x00000000, 0x00000000, 0x00000000,
507         0x00000000, 0x00000000, 0x00000000, 0x00000000,
508         0x00000000, 0x00000000, 0x00000000, 0x00000000,
509 };
510
511 /* Packet types for ah */
512 static const u32 ice_ptypes_ah[] = {
513         0x00000000, 0x00000000, 0x00000000, 0x00000000,
514         0x00000000, 0x0000000C, 0x00000000, 0x00000000,
515         0x00000000, 0x00000000, 0x00000000, 0x00000000,
516         0x00000000, 0x00000000, 0x00000000, 0x00000000,
517         0x00000000, 0x00000000, 0x00000000, 0x00000000,
518         0x00000000, 0x00000000, 0x00000000, 0x00000000,
519         0x00000000, 0x00000000, 0x00000000, 0x00000000,
520         0x00000000, 0x00000000, 0x00000000, 0x00000000,
521 };
522
523 /* Packet types for packets with NAT_T ESP header */
524 static const u32 ice_ptypes_nat_t_esp[] = {
525         0x00000000, 0x00000000, 0x00000000, 0x00000000,
526         0x00000000, 0x00000030, 0x00000000, 0x00000000,
527         0x00000000, 0x00000000, 0x00000000, 0x00000000,
528         0x00000000, 0x00000000, 0x00000000, 0x00000000,
529         0x00000000, 0x00000000, 0x00000000, 0x00000000,
530         0x00000000, 0x00000000, 0x00000000, 0x00000000,
531         0x00000000, 0x00000000, 0x00000000, 0x00000000,
532         0x00000000, 0x00000000, 0x00000000, 0x00000000,
533 };
534
535 static const u32 ice_ptypes_mac_non_ip_ofos[] = {
536         0x00000846, 0x00000000, 0x00000000, 0x00000000,
537         0x00000000, 0x00000000, 0x00000000, 0x00000000,
538         0x00400000, 0x03FFF000, 0x00000000, 0x00000000,
539         0x00000000, 0x00000000, 0x00000000, 0x00000000,
540         0x00000000, 0x00000000, 0x00000000, 0x00000000,
541         0x00000000, 0x00000000, 0x00000000, 0x00000000,
542         0x00000000, 0x00000000, 0x00000000, 0x00000000,
543         0x00000000, 0x00000000, 0x00000000, 0x00000000,
544 };
545
546 /* Manage parameters and info. used during the creation of a flow profile */
547 struct ice_flow_prof_params {
548         enum ice_block blk;
549         u16 entry_length; /* # of bytes formatted entry will require */
550         u8 es_cnt;
551         struct ice_flow_prof *prof;
552
553         /* For ACL, the es[0] will have the data of ICE_RX_MDID_PKT_FLAGS_15_0
554          * This will give us the direction flags.
555          */
556         struct ice_fv_word es[ICE_MAX_FV_WORDS];
557         /* attributes can be used to add attributes to a particular PTYPE */
558         const struct ice_ptype_attributes *attr;
559         u16 attr_cnt;
560
561         u16 mask[ICE_MAX_FV_WORDS];
562         ice_declare_bitmap(ptypes, ICE_FLOW_PTYPE_MAX);
563 };
564
565 #define ICE_FLOW_RSS_HDRS_INNER_MASK \
566         (ICE_FLOW_SEG_HDR_PPPOE | ICE_FLOW_SEG_HDR_GTPC | \
567         ICE_FLOW_SEG_HDR_GTPC_TEID | ICE_FLOW_SEG_HDR_GTPU | \
568         ICE_FLOW_SEG_HDR_PFCP_SESSION | ICE_FLOW_SEG_HDR_L2TPV3 | \
569         ICE_FLOW_SEG_HDR_ESP | ICE_FLOW_SEG_HDR_AH | \
570         ICE_FLOW_SEG_HDR_NAT_T_ESP)
571
572 #define ICE_FLOW_SEG_HDRS_L2_MASK       \
573         (ICE_FLOW_SEG_HDR_ETH | ICE_FLOW_SEG_HDR_VLAN)
574 #define ICE_FLOW_SEG_HDRS_L3_MASK       \
575         (ICE_FLOW_SEG_HDR_IPV4 | ICE_FLOW_SEG_HDR_IPV6 | \
576          ICE_FLOW_SEG_HDR_ARP)
577 #define ICE_FLOW_SEG_HDRS_L4_MASK       \
578         (ICE_FLOW_SEG_HDR_ICMP | ICE_FLOW_SEG_HDR_TCP | ICE_FLOW_SEG_HDR_UDP | \
579          ICE_FLOW_SEG_HDR_SCTP)
580
581 /**
582  * ice_flow_val_hdrs - validates packet segments for valid protocol headers
583  * @segs: array of one or more packet segments that describe the flow
584  * @segs_cnt: number of packet segments provided
585  */
586 static enum ice_status
587 ice_flow_val_hdrs(struct ice_flow_seg_info *segs, u8 segs_cnt)
588 {
589         u8 i;
590
591         for (i = 0; i < segs_cnt; i++) {
592                 /* Multiple L3 headers */
593                 if (segs[i].hdrs & ICE_FLOW_SEG_HDRS_L3_MASK &&
594                     !ice_is_pow2(segs[i].hdrs & ICE_FLOW_SEG_HDRS_L3_MASK))
595                         return ICE_ERR_PARAM;
596
597                 /* Multiple L4 headers */
598                 if (segs[i].hdrs & ICE_FLOW_SEG_HDRS_L4_MASK &&
599                     !ice_is_pow2(segs[i].hdrs & ICE_FLOW_SEG_HDRS_L4_MASK))
600                         return ICE_ERR_PARAM;
601         }
602
603         return ICE_SUCCESS;
604 }
605
606 /* Sizes of fixed known protocol headers without header options */
607 #define ICE_FLOW_PROT_HDR_SZ_MAC        14
608 #define ICE_FLOW_PROT_HDR_SZ_MAC_VLAN   (ICE_FLOW_PROT_HDR_SZ_MAC + 2)
609 #define ICE_FLOW_PROT_HDR_SZ_IPV4       20
610 #define ICE_FLOW_PROT_HDR_SZ_IPV6       40
611 #define ICE_FLOW_PROT_HDR_SZ_ARP        28
612 #define ICE_FLOW_PROT_HDR_SZ_ICMP       8
613 #define ICE_FLOW_PROT_HDR_SZ_TCP        20
614 #define ICE_FLOW_PROT_HDR_SZ_UDP        8
615 #define ICE_FLOW_PROT_HDR_SZ_SCTP       12
616
617 /**
618  * ice_flow_calc_seg_sz - calculates size of a packet segment based on headers
619  * @params: information about the flow to be processed
620  * @seg: index of packet segment whose header size is to be determined
621  */
622 static u16 ice_flow_calc_seg_sz(struct ice_flow_prof_params *params, u8 seg)
623 {
624         u16 sz;
625
626         /* L2 headers */
627         sz = (params->prof->segs[seg].hdrs & ICE_FLOW_SEG_HDR_VLAN) ?
628                 ICE_FLOW_PROT_HDR_SZ_MAC_VLAN : ICE_FLOW_PROT_HDR_SZ_MAC;
629
630         /* L3 headers */
631         if (params->prof->segs[seg].hdrs & ICE_FLOW_SEG_HDR_IPV4)
632                 sz += ICE_FLOW_PROT_HDR_SZ_IPV4;
633         else if (params->prof->segs[seg].hdrs & ICE_FLOW_SEG_HDR_IPV6)
634                 sz += ICE_FLOW_PROT_HDR_SZ_IPV6;
635         else if (params->prof->segs[seg].hdrs & ICE_FLOW_SEG_HDR_ARP)
636                 sz += ICE_FLOW_PROT_HDR_SZ_ARP;
637         else if (params->prof->segs[seg].hdrs & ICE_FLOW_SEG_HDRS_L4_MASK)
638                 /* A L3 header is required if L4 is specified */
639                 return 0;
640
641         /* L4 headers */
642         if (params->prof->segs[seg].hdrs & ICE_FLOW_SEG_HDR_ICMP)
643                 sz += ICE_FLOW_PROT_HDR_SZ_ICMP;
644         else if (params->prof->segs[seg].hdrs & ICE_FLOW_SEG_HDR_TCP)
645                 sz += ICE_FLOW_PROT_HDR_SZ_TCP;
646         else if (params->prof->segs[seg].hdrs & ICE_FLOW_SEG_HDR_UDP)
647                 sz += ICE_FLOW_PROT_HDR_SZ_UDP;
648         else if (params->prof->segs[seg].hdrs & ICE_FLOW_SEG_HDR_SCTP)
649                 sz += ICE_FLOW_PROT_HDR_SZ_SCTP;
650
651         return sz;
652 }
653
654 /**
655  * ice_flow_proc_seg_hdrs - process protocol headers present in pkt segments
656  * @params: information about the flow to be processed
657  *
658  * This function identifies the packet types associated with the protocol
659  * headers being present in packet segments of the specified flow profile.
660  */
661 static enum ice_status
662 ice_flow_proc_seg_hdrs(struct ice_flow_prof_params *params)
663 {
664         struct ice_flow_prof *prof;
665         u8 i;
666
667         ice_memset(params->ptypes, 0xff, sizeof(params->ptypes),
668                    ICE_NONDMA_MEM);
669
670         prof = params->prof;
671
672         for (i = 0; i < params->prof->segs_cnt; i++) {
673                 const ice_bitmap_t *src;
674                 u32 hdrs;
675
676                 hdrs = prof->segs[i].hdrs;
677
678                 if (hdrs & ICE_FLOW_SEG_HDR_ETH) {
679                         src = !i ? (const ice_bitmap_t *)ice_ptypes_mac_ofos :
680                                 (const ice_bitmap_t *)ice_ptypes_mac_il;
681                         ice_and_bitmap(params->ptypes, params->ptypes, src,
682                                        ICE_FLOW_PTYPE_MAX);
683                 }
684
685                 if (i && hdrs & ICE_FLOW_SEG_HDR_VLAN) {
686                         src = (const ice_bitmap_t *)ice_ptypes_macvlan_il;
687                         ice_and_bitmap(params->ptypes, params->ptypes, src,
688                                        ICE_FLOW_PTYPE_MAX);
689                 }
690
691                 if (!i && hdrs & ICE_FLOW_SEG_HDR_ARP) {
692                         ice_and_bitmap(params->ptypes, params->ptypes,
693                                        (const ice_bitmap_t *)ice_ptypes_arp_of,
694                                        ICE_FLOW_PTYPE_MAX);
695                 }
696
697                 if (hdrs & ICE_FLOW_SEG_HDR_IPV4) {
698                         src = !i ? (const ice_bitmap_t *)ice_ptypes_ipv4_ofos :
699                                 (const ice_bitmap_t *)ice_ptypes_ipv4_il;
700                         ice_and_bitmap(params->ptypes, params->ptypes, src,
701                                        ICE_FLOW_PTYPE_MAX);
702                         if (hdrs & ICE_FLOW_SEG_HDR_UDP) {
703                                 src = (const ice_bitmap_t *)ice_ptypes_udp_il;
704                                 ice_and_bitmap(params->ptypes,
705                                                 params->ptypes, src,
706                                                ICE_FLOW_PTYPE_MAX);
707                         } else if (hdrs & ICE_FLOW_SEG_HDR_TCP) {
708                                 ice_and_bitmap(params->ptypes, params->ptypes,
709                                                (const ice_bitmap_t *)
710                                                ice_ptypes_tcp_il,
711                                                ICE_FLOW_PTYPE_MAX);
712                         } else if (hdrs & ICE_FLOW_SEG_HDR_SCTP) {
713                                 src = (const ice_bitmap_t *)ice_ptypes_sctp_il;
714                                 ice_and_bitmap(params->ptypes, params->ptypes,
715                                                src, ICE_FLOW_PTYPE_MAX);
716                         }
717                 } else if (hdrs & ICE_FLOW_SEG_HDR_IPV6) {
718                         src = !i ? (const ice_bitmap_t *)ice_ptypes_ipv6_ofos :
719                                 (const ice_bitmap_t *)ice_ptypes_ipv6_il;
720                         ice_and_bitmap(params->ptypes, params->ptypes, src,
721                                        ICE_FLOW_PTYPE_MAX);
722                         if (hdrs & ICE_FLOW_SEG_HDR_UDP) {
723                                 src = (const ice_bitmap_t *)ice_ptypes_udp_il;
724                                 ice_and_bitmap(params->ptypes,
725                                                 params->ptypes, src,
726                                                ICE_FLOW_PTYPE_MAX);
727                         } else if (hdrs & ICE_FLOW_SEG_HDR_TCP) {
728                                 ice_and_bitmap(params->ptypes, params->ptypes,
729                                                (const ice_bitmap_t *)
730                                                ice_ptypes_tcp_il,
731                                                ICE_FLOW_PTYPE_MAX);
732                         } else if (hdrs & ICE_FLOW_SEG_HDR_SCTP) {
733                                 src = (const ice_bitmap_t *)ice_ptypes_sctp_il;
734                                 ice_and_bitmap(params->ptypes, params->ptypes,
735                                                src, ICE_FLOW_PTYPE_MAX);
736                         }
737                 }
738
739                 if (hdrs & ICE_FLOW_SEG_HDR_ETH_NON_IP) {
740                         src = (const ice_bitmap_t *)ice_ptypes_mac_non_ip_ofos;
741                         ice_and_bitmap(params->ptypes, params->ptypes,
742                                        src, ICE_FLOW_PTYPE_MAX);
743                 } else if (hdrs & ICE_FLOW_SEG_HDR_PPPOE) {
744                         src = (const ice_bitmap_t *)ice_ptypes_pppoe;
745                         ice_and_bitmap(params->ptypes, params->ptypes, src,
746                                        ICE_FLOW_PTYPE_MAX);
747                 }
748
749                 if (hdrs & ICE_FLOW_SEG_HDR_ICMP) {
750                         src = !i ? (const ice_bitmap_t *)ice_ptypes_icmp_of :
751                                 (const ice_bitmap_t *)ice_ptypes_icmp_il;
752                         ice_and_bitmap(params->ptypes, params->ptypes, src,
753                                        ICE_FLOW_PTYPE_MAX);
754                 } else if (hdrs & ICE_FLOW_SEG_HDR_GRE) {
755                         if (!i) {
756                                 src = (const ice_bitmap_t *)ice_ptypes_gre_of;
757                                 ice_and_bitmap(params->ptypes, params->ptypes,
758                                                src, ICE_FLOW_PTYPE_MAX);
759                         }
760                 } else if (hdrs & ICE_FLOW_SEG_HDR_GTPC) {
761                         src = (const ice_bitmap_t *)ice_ptypes_gtpc;
762                         ice_and_bitmap(params->ptypes, params->ptypes,
763                                        src, ICE_FLOW_PTYPE_MAX);
764                 } else if (hdrs & ICE_FLOW_SEG_HDR_GTPC_TEID) {
765                         src = (const ice_bitmap_t *)ice_ptypes_gtpc_tid;
766                         ice_and_bitmap(params->ptypes, params->ptypes,
767                                        src, ICE_FLOW_PTYPE_MAX);
768                 } else if (hdrs & ICE_FLOW_SEG_HDR_GTPU_DWN) {
769                         src = (const ice_bitmap_t *)ice_ptypes_gtpu;
770                         ice_and_bitmap(params->ptypes, params->ptypes,
771                                        src, ICE_FLOW_PTYPE_MAX);
772
773                         /* Attributes for GTP packet with downlink */
774                         params->attr = ice_attr_gtpu_down;
775                         params->attr_cnt = ARRAY_SIZE(ice_attr_gtpu_down);
776                 } else if (hdrs & ICE_FLOW_SEG_HDR_GTPU_UP) {
777                         src = (const ice_bitmap_t *)ice_ptypes_gtpu;
778                         ice_and_bitmap(params->ptypes, params->ptypes,
779                                        src, ICE_FLOW_PTYPE_MAX);
780
781                         /* Attributes for GTP packet with uplink */
782                         params->attr = ice_attr_gtpu_up;
783                         params->attr_cnt = ARRAY_SIZE(ice_attr_gtpu_up);
784                 } else if (hdrs & ICE_FLOW_SEG_HDR_GTPU_EH) {
785                         src = (const ice_bitmap_t *)ice_ptypes_gtpu;
786                         ice_and_bitmap(params->ptypes, params->ptypes,
787                                        src, ICE_FLOW_PTYPE_MAX);
788
789                         /* Attributes for GTP packet with Extension Header */
790                         params->attr = ice_attr_gtpu_eh;
791                         params->attr_cnt = ARRAY_SIZE(ice_attr_gtpu_eh);
792                 } else if (hdrs & ICE_FLOW_SEG_HDR_GTPU_IP) {
793                         src = (const ice_bitmap_t *)ice_ptypes_gtpu;
794                         ice_and_bitmap(params->ptypes, params->ptypes,
795                                        src, ICE_FLOW_PTYPE_MAX);
796                 } else if (hdrs & ICE_FLOW_SEG_HDR_L2TPV3) {
797                         src = (const ice_bitmap_t *)ice_ptypes_l2tpv3;
798                         ice_and_bitmap(params->ptypes, params->ptypes,
799                                        src, ICE_FLOW_PTYPE_MAX);
800                 } else if (hdrs & ICE_FLOW_SEG_HDR_ESP) {
801                         src = (const ice_bitmap_t *)ice_ptypes_esp;
802                         ice_and_bitmap(params->ptypes, params->ptypes,
803                                        src, ICE_FLOW_PTYPE_MAX);
804                 } else if (hdrs & ICE_FLOW_SEG_HDR_AH) {
805                         src = (const ice_bitmap_t *)ice_ptypes_ah;
806                         ice_and_bitmap(params->ptypes, params->ptypes,
807                                        src, ICE_FLOW_PTYPE_MAX);
808                 } else if (hdrs & ICE_FLOW_SEG_HDR_NAT_T_ESP) {
809                         src = (const ice_bitmap_t *)ice_ptypes_nat_t_esp;
810                         ice_and_bitmap(params->ptypes, params->ptypes,
811                                        src, ICE_FLOW_PTYPE_MAX);
812                 }
813
814                 if (hdrs & ICE_FLOW_SEG_HDR_PFCP) {
815                         if (hdrs & ICE_FLOW_SEG_HDR_PFCP_NODE)
816                                 src =
817                                 (const ice_bitmap_t *)ice_ptypes_pfcp_node;
818                         else
819                                 src =
820                                 (const ice_bitmap_t *)ice_ptypes_pfcp_session;
821
822                         ice_and_bitmap(params->ptypes, params->ptypes,
823                                        src, ICE_FLOW_PTYPE_MAX);
824                 } else {
825                         src = (const ice_bitmap_t *)ice_ptypes_pfcp_node;
826                         ice_andnot_bitmap(params->ptypes, params->ptypes,
827                                           src, ICE_FLOW_PTYPE_MAX);
828
829                         src = (const ice_bitmap_t *)ice_ptypes_pfcp_session;
830                         ice_andnot_bitmap(params->ptypes, params->ptypes,
831                                           src, ICE_FLOW_PTYPE_MAX);
832                 }
833         }
834
835         return ICE_SUCCESS;
836 }
837
838 /**
839  * ice_flow_xtract_pkt_flags - Create an extr sequence entry for packet flags
840  * @hw: pointer to the HW struct
841  * @params: information about the flow to be processed
842  * @flags: The value of pkt_flags[x:x] in Rx/Tx MDID metadata.
843  *
844  * This function will allocate an extraction sequence entries for a DWORD size
845  * chunk of the packet flags.
846  */
847 static enum ice_status
848 ice_flow_xtract_pkt_flags(struct ice_hw *hw,
849                           struct ice_flow_prof_params *params,
850                           enum ice_flex_mdid_pkt_flags flags)
851 {
852         u8 fv_words = hw->blk[params->blk].es.fvw;
853         u8 idx;
854
855         /* Make sure the number of extraction sequence entries required does not
856          * exceed the block's capacity.
857          */
858         if (params->es_cnt >= fv_words)
859                 return ICE_ERR_MAX_LIMIT;
860
861         /* some blocks require a reversed field vector layout */
862         if (hw->blk[params->blk].es.reverse)
863                 idx = fv_words - params->es_cnt - 1;
864         else
865                 idx = params->es_cnt;
866
867         params->es[idx].prot_id = ICE_PROT_META_ID;
868         params->es[idx].off = flags;
869         params->es_cnt++;
870
871         return ICE_SUCCESS;
872 }
873
874 /**
875  * ice_flow_xtract_fld - Create an extraction sequence entry for the given field
876  * @hw: pointer to the HW struct
877  * @params: information about the flow to be processed
878  * @seg: packet segment index of the field to be extracted
879  * @fld: ID of field to be extracted
880  * @match: bitfield of all fields
881  *
882  * This function determines the protocol ID, offset, and size of the given
883  * field. It then allocates one or more extraction sequence entries for the
884  * given field, and fill the entries with protocol ID and offset information.
885  */
886 static enum ice_status
887 ice_flow_xtract_fld(struct ice_hw *hw, struct ice_flow_prof_params *params,
888                     u8 seg, enum ice_flow_field fld, u64 match)
889 {
890         enum ice_flow_field sib = ICE_FLOW_FIELD_IDX_MAX;
891         enum ice_prot_id prot_id = ICE_PROT_ID_INVAL;
892         u8 fv_words = hw->blk[params->blk].es.fvw;
893         struct ice_flow_fld_info *flds;
894         u16 cnt, ese_bits, i;
895         u16 sib_mask = 0;
896         s16 adj = 0;
897         u16 mask;
898         u16 off;
899
900         flds = params->prof->segs[seg].fields;
901
902         switch (fld) {
903         case ICE_FLOW_FIELD_IDX_ETH_DA:
904         case ICE_FLOW_FIELD_IDX_ETH_SA:
905         case ICE_FLOW_FIELD_IDX_S_VLAN:
906         case ICE_FLOW_FIELD_IDX_C_VLAN:
907                 prot_id = seg == 0 ? ICE_PROT_MAC_OF_OR_S : ICE_PROT_MAC_IL;
908                 break;
909         case ICE_FLOW_FIELD_IDX_ETH_TYPE:
910                 prot_id = seg == 0 ? ICE_PROT_ETYPE_OL : ICE_PROT_ETYPE_IL;
911                 break;
912         case ICE_FLOW_FIELD_IDX_IPV4_DSCP:
913                 prot_id = seg == 0 ? ICE_PROT_IPV4_OF_OR_S : ICE_PROT_IPV4_IL;
914                 break;
915         case ICE_FLOW_FIELD_IDX_IPV6_DSCP:
916                 prot_id = seg == 0 ? ICE_PROT_IPV6_OF_OR_S : ICE_PROT_IPV6_IL;
917                 break;
918         case ICE_FLOW_FIELD_IDX_IPV4_TTL:
919         case ICE_FLOW_FIELD_IDX_IPV4_PROT:
920                 prot_id = seg == 0 ? ICE_PROT_IPV4_OF_OR_S : ICE_PROT_IPV4_IL;
921
922                 /* TTL and PROT share the same extraction seq. entry.
923                  * Each is considered a sibling to the other in terms of sharing
924                  * the same extraction sequence entry.
925                  */
926                 if (fld == ICE_FLOW_FIELD_IDX_IPV4_TTL)
927                         sib = ICE_FLOW_FIELD_IDX_IPV4_PROT;
928                 else if (fld == ICE_FLOW_FIELD_IDX_IPV4_PROT)
929                         sib = ICE_FLOW_FIELD_IDX_IPV4_TTL;
930
931                 /* If the sibling field is also included, that field's
932                  * mask needs to be included.
933                  */
934                 if (match & BIT(sib))
935                         sib_mask = ice_flds_info[sib].mask;
936                 break;
937         case ICE_FLOW_FIELD_IDX_IPV6_TTL:
938         case ICE_FLOW_FIELD_IDX_IPV6_PROT:
939                 prot_id = seg == 0 ? ICE_PROT_IPV6_OF_OR_S : ICE_PROT_IPV6_IL;
940
941                 /* TTL and PROT share the same extraction seq. entry.
942                  * Each is considered a sibling to the other in terms of sharing
943                  * the same extraction sequence entry.
944                  */
945                 if (fld == ICE_FLOW_FIELD_IDX_IPV6_TTL)
946                         sib = ICE_FLOW_FIELD_IDX_IPV6_PROT;
947                 else if (fld == ICE_FLOW_FIELD_IDX_IPV6_PROT)
948                         sib = ICE_FLOW_FIELD_IDX_IPV6_TTL;
949
950                 /* If the sibling field is also included, that field's
951                  * mask needs to be included.
952                  */
953                 if (match & BIT(sib))
954                         sib_mask = ice_flds_info[sib].mask;
955                 break;
956         case ICE_FLOW_FIELD_IDX_IPV4_SA:
957         case ICE_FLOW_FIELD_IDX_IPV4_DA:
958                 prot_id = seg == 0 ? ICE_PROT_IPV4_OF_OR_S : ICE_PROT_IPV4_IL;
959                 break;
960         case ICE_FLOW_FIELD_IDX_IPV6_SA:
961         case ICE_FLOW_FIELD_IDX_IPV6_DA:
962                 prot_id = seg == 0 ? ICE_PROT_IPV6_OF_OR_S : ICE_PROT_IPV6_IL;
963                 break;
964         case ICE_FLOW_FIELD_IDX_TCP_SRC_PORT:
965         case ICE_FLOW_FIELD_IDX_TCP_DST_PORT:
966         case ICE_FLOW_FIELD_IDX_TCP_FLAGS:
967                 prot_id = ICE_PROT_TCP_IL;
968                 break;
969         case ICE_FLOW_FIELD_IDX_UDP_SRC_PORT:
970         case ICE_FLOW_FIELD_IDX_UDP_DST_PORT:
971                 prot_id = ICE_PROT_UDP_IL_OR_S;
972                 break;
973         case ICE_FLOW_FIELD_IDX_SCTP_SRC_PORT:
974         case ICE_FLOW_FIELD_IDX_SCTP_DST_PORT:
975                 prot_id = ICE_PROT_SCTP_IL;
976                 break;
977         case ICE_FLOW_FIELD_IDX_GTPC_TEID:
978         case ICE_FLOW_FIELD_IDX_GTPU_IP_TEID:
979         case ICE_FLOW_FIELD_IDX_GTPU_UP_TEID:
980         case ICE_FLOW_FIELD_IDX_GTPU_DWN_TEID:
981         case ICE_FLOW_FIELD_IDX_GTPU_EH_TEID:
982         case ICE_FLOW_FIELD_IDX_GTPU_EH_QFI:
983                 /* GTP is accessed through UDP OF protocol */
984                 prot_id = ICE_PROT_UDP_OF;
985                 break;
986         case ICE_FLOW_FIELD_IDX_PPPOE_SESS_ID:
987                 prot_id = ICE_PROT_PPPOE;
988                 break;
989         case ICE_FLOW_FIELD_IDX_PFCP_SEID:
990                 prot_id = ICE_PROT_UDP_IL_OR_S;
991                 break;
992         case ICE_FLOW_FIELD_IDX_L2TPV3_SESS_ID:
993                 prot_id = ICE_PROT_L2TPV3;
994                 break;
995         case ICE_FLOW_FIELD_IDX_ESP_SPI:
996                 prot_id = ICE_PROT_ESP_F;
997                 break;
998         case ICE_FLOW_FIELD_IDX_AH_SPI:
999                 prot_id = ICE_PROT_ESP_2;
1000                 break;
1001         case ICE_FLOW_FIELD_IDX_NAT_T_ESP_SPI:
1002                 prot_id = ICE_PROT_UDP_IL_OR_S;
1003                 break;
1004         case ICE_FLOW_FIELD_IDX_ARP_SIP:
1005         case ICE_FLOW_FIELD_IDX_ARP_DIP:
1006         case ICE_FLOW_FIELD_IDX_ARP_SHA:
1007         case ICE_FLOW_FIELD_IDX_ARP_DHA:
1008         case ICE_FLOW_FIELD_IDX_ARP_OP:
1009                 prot_id = ICE_PROT_ARP_OF;
1010                 break;
1011         case ICE_FLOW_FIELD_IDX_ICMP_TYPE:
1012         case ICE_FLOW_FIELD_IDX_ICMP_CODE:
1013                 /* ICMP type and code share the same extraction seq. entry */
1014                 prot_id = (params->prof->segs[seg].hdrs &
1015                            ICE_FLOW_SEG_HDR_IPV4) ?
1016                         ICE_PROT_ICMP_IL : ICE_PROT_ICMPV6_IL;
1017                 sib = fld == ICE_FLOW_FIELD_IDX_ICMP_TYPE ?
1018                         ICE_FLOW_FIELD_IDX_ICMP_CODE :
1019                         ICE_FLOW_FIELD_IDX_ICMP_TYPE;
1020                 break;
1021         case ICE_FLOW_FIELD_IDX_GRE_KEYID:
1022                 prot_id = ICE_PROT_GRE_OF;
1023                 break;
1024         default:
1025                 return ICE_ERR_NOT_IMPL;
1026         }
1027
1028         /* Each extraction sequence entry is a word in size, and extracts a
1029          * word-aligned offset from a protocol header.
1030          */
1031         ese_bits = ICE_FLOW_FV_EXTRACT_SZ * BITS_PER_BYTE;
1032
1033         flds[fld].xtrct.prot_id = prot_id;
1034         flds[fld].xtrct.off = (ice_flds_info[fld].off / ese_bits) *
1035                 ICE_FLOW_FV_EXTRACT_SZ;
1036         flds[fld].xtrct.disp = (u8)((ice_flds_info[fld].off + adj) % ese_bits);
1037         flds[fld].xtrct.idx = params->es_cnt;
1038         flds[fld].xtrct.mask = ice_flds_info[fld].mask;
1039
1040         /* Adjust the next field-entry index after accommodating the number of
1041          * entries this field consumes
1042          */
1043         cnt = DIVIDE_AND_ROUND_UP(flds[fld].xtrct.disp +
1044                                   ice_flds_info[fld].size, ese_bits);
1045
1046         /* Fill in the extraction sequence entries needed for this field */
1047         off = flds[fld].xtrct.off;
1048         mask = flds[fld].xtrct.mask;
1049         for (i = 0; i < cnt; i++) {
1050                 /* Only consume an extraction sequence entry if there is no
1051                  * sibling field associated with this field or the sibling entry
1052                  * already extracts the word shared with this field.
1053                  */
1054                 if (sib == ICE_FLOW_FIELD_IDX_MAX ||
1055                     flds[sib].xtrct.prot_id == ICE_PROT_ID_INVAL ||
1056                     flds[sib].xtrct.off != off) {
1057                         u8 idx;
1058
1059                         /* Make sure the number of extraction sequence required
1060                          * does not exceed the block's capability
1061                          */
1062                         if (params->es_cnt >= fv_words)
1063                                 return ICE_ERR_MAX_LIMIT;
1064
1065                         /* some blocks require a reversed field vector layout */
1066                         if (hw->blk[params->blk].es.reverse)
1067                                 idx = fv_words - params->es_cnt - 1;
1068                         else
1069                                 idx = params->es_cnt;
1070
1071                         params->es[idx].prot_id = prot_id;
1072                         params->es[idx].off = off;
1073                         params->mask[idx] = mask | sib_mask;
1074                         params->es_cnt++;
1075                 }
1076
1077                 off += ICE_FLOW_FV_EXTRACT_SZ;
1078         }
1079
1080         return ICE_SUCCESS;
1081 }
1082
1083 /**
1084  * ice_flow_xtract_raws - Create extract sequence entries for raw bytes
1085  * @hw: pointer to the HW struct
1086  * @params: information about the flow to be processed
1087  * @seg: index of packet segment whose raw fields are to be be extracted
1088  */
1089 static enum ice_status
1090 ice_flow_xtract_raws(struct ice_hw *hw, struct ice_flow_prof_params *params,
1091                      u8 seg)
1092 {
1093         u16 fv_words;
1094         u16 hdrs_sz;
1095         u8 i;
1096
1097         if (!params->prof->segs[seg].raws_cnt)
1098                 return ICE_SUCCESS;
1099
1100         if (params->prof->segs[seg].raws_cnt >
1101             ARRAY_SIZE(params->prof->segs[seg].raws))
1102                 return ICE_ERR_MAX_LIMIT;
1103
1104         /* Offsets within the segment headers are not supported */
1105         hdrs_sz = ice_flow_calc_seg_sz(params, seg);
1106         if (!hdrs_sz)
1107                 return ICE_ERR_PARAM;
1108
1109         fv_words = hw->blk[params->blk].es.fvw;
1110
1111         for (i = 0; i < params->prof->segs[seg].raws_cnt; i++) {
1112                 struct ice_flow_seg_fld_raw *raw;
1113                 u16 off, cnt, j;
1114
1115                 raw = &params->prof->segs[seg].raws[i];
1116
1117                 /* Storing extraction information */
1118                 raw->info.xtrct.prot_id = ICE_PROT_MAC_OF_OR_S;
1119                 raw->info.xtrct.off = (raw->off / ICE_FLOW_FV_EXTRACT_SZ) *
1120                         ICE_FLOW_FV_EXTRACT_SZ;
1121                 raw->info.xtrct.disp = (raw->off % ICE_FLOW_FV_EXTRACT_SZ) *
1122                         BITS_PER_BYTE;
1123                 raw->info.xtrct.idx = params->es_cnt;
1124
1125                 /* Determine the number of field vector entries this raw field
1126                  * consumes.
1127                  */
1128                 cnt = DIVIDE_AND_ROUND_UP(raw->info.xtrct.disp +
1129                                           (raw->info.src.last * BITS_PER_BYTE),
1130                                           (ICE_FLOW_FV_EXTRACT_SZ *
1131                                            BITS_PER_BYTE));
1132                 off = raw->info.xtrct.off;
1133                 for (j = 0; j < cnt; j++) {
1134                         u16 idx;
1135
1136                         /* Make sure the number of extraction sequence required
1137                          * does not exceed the block's capability
1138                          */
1139                         if (params->es_cnt >= hw->blk[params->blk].es.count ||
1140                             params->es_cnt >= ICE_MAX_FV_WORDS)
1141                                 return ICE_ERR_MAX_LIMIT;
1142
1143                         /* some blocks require a reversed field vector layout */
1144                         if (hw->blk[params->blk].es.reverse)
1145                                 idx = fv_words - params->es_cnt - 1;
1146                         else
1147                                 idx = params->es_cnt;
1148
1149                         params->es[idx].prot_id = raw->info.xtrct.prot_id;
1150                         params->es[idx].off = off;
1151                         params->es_cnt++;
1152                         off += ICE_FLOW_FV_EXTRACT_SZ;
1153                 }
1154         }
1155
1156         return ICE_SUCCESS;
1157 }
1158
1159 /**
1160  * ice_flow_create_xtrct_seq - Create an extraction sequence for given segments
1161  * @hw: pointer to the HW struct
1162  * @params: information about the flow to be processed
1163  *
1164  * This function iterates through all matched fields in the given segments, and
1165  * creates an extraction sequence for the fields.
1166  */
1167 static enum ice_status
1168 ice_flow_create_xtrct_seq(struct ice_hw *hw,
1169                           struct ice_flow_prof_params *params)
1170 {
1171         enum ice_status status = ICE_SUCCESS;
1172         u8 i;
1173
1174         /* For ACL, we also need to extract the direction bit (Rx,Tx) data from
1175          * packet flags
1176          */
1177         if (params->blk == ICE_BLK_ACL) {
1178                 status = ice_flow_xtract_pkt_flags(hw, params,
1179                                                    ICE_RX_MDID_PKT_FLAGS_15_0);
1180                 if (status)
1181                         return status;
1182         }
1183
1184         for (i = 0; i < params->prof->segs_cnt; i++) {
1185                 u64 match = params->prof->segs[i].match;
1186                 enum ice_flow_field j;
1187
1188                 for (j = 0; j < ICE_FLOW_FIELD_IDX_MAX && match; j++) {
1189                         const u64 bit = BIT_ULL(j);
1190
1191                         if (match & bit) {
1192                                 status = ice_flow_xtract_fld(hw, params, i, j,
1193                                                              match);
1194                                 if (status)
1195                                         return status;
1196                                 match &= ~bit;
1197                         }
1198                 }
1199
1200                 /* Process raw matching bytes */
1201                 status = ice_flow_xtract_raws(hw, params, i);
1202                 if (status)
1203                         return status;
1204         }
1205
1206         return status;
1207 }
1208
1209 /**
1210  * ice_flow_sel_acl_scen - returns the specific scenario
1211  * @hw: pointer to the hardware structure
1212  * @params: information about the flow to be processed
1213  *
1214  * This function will return the specific scenario based on the
1215  * params passed to it
1216  */
1217 static enum ice_status
1218 ice_flow_sel_acl_scen(struct ice_hw *hw, struct ice_flow_prof_params *params)
1219 {
1220         /* Find the best-fit scenario for the provided match width */
1221         struct ice_acl_scen *cand_scen = NULL, *scen;
1222
1223         if (!hw->acl_tbl)
1224                 return ICE_ERR_DOES_NOT_EXIST;
1225
1226         /* Loop through each scenario and match against the scenario width
1227          * to select the specific scenario
1228          */
1229         LIST_FOR_EACH_ENTRY(scen, &hw->acl_tbl->scens, ice_acl_scen, list_entry)
1230                 if (scen->eff_width >= params->entry_length &&
1231                     (!cand_scen || cand_scen->eff_width > scen->eff_width))
1232                         cand_scen = scen;
1233         if (!cand_scen)
1234                 return ICE_ERR_DOES_NOT_EXIST;
1235
1236         params->prof->cfg.scen = cand_scen;
1237
1238         return ICE_SUCCESS;
1239 }
1240
1241 /**
1242  * ice_flow_acl_def_entry_frmt - Determine the layout of flow entries
1243  * @params: information about the flow to be processed
1244  */
1245 static enum ice_status
1246 ice_flow_acl_def_entry_frmt(struct ice_flow_prof_params *params)
1247 {
1248         u16 index, i, range_idx = 0;
1249
1250         index = ICE_AQC_ACL_PROF_BYTE_SEL_START_IDX;
1251
1252         for (i = 0; i < params->prof->segs_cnt; i++) {
1253                 struct ice_flow_seg_info *seg = &params->prof->segs[i];
1254                 u64 match = seg->match;
1255                 u8 j;
1256
1257                 for (j = 0; j < ICE_FLOW_FIELD_IDX_MAX && match; j++) {
1258                         struct ice_flow_fld_info *fld;
1259                         const u64 bit = BIT_ULL(j);
1260
1261                         if (!(match & bit))
1262                                 continue;
1263
1264                         fld = &seg->fields[j];
1265                         fld->entry.mask = ICE_FLOW_FLD_OFF_INVAL;
1266
1267                         if (fld->type == ICE_FLOW_FLD_TYPE_RANGE) {
1268                                 fld->entry.last = ICE_FLOW_FLD_OFF_INVAL;
1269
1270                                 /* Range checking only supported for single
1271                                  * words
1272                                  */
1273                                 if (DIVIDE_AND_ROUND_UP(ice_flds_info[j].size +
1274                                                         fld->xtrct.disp,
1275                                                         BITS_PER_BYTE * 2) > 1)
1276                                         return ICE_ERR_PARAM;
1277
1278                                 /* Ranges must define low and high values */
1279                                 if (fld->src.val == ICE_FLOW_FLD_OFF_INVAL ||
1280                                     fld->src.last == ICE_FLOW_FLD_OFF_INVAL)
1281                                         return ICE_ERR_PARAM;
1282
1283                                 fld->entry.val = range_idx++;
1284                         } else {
1285                                 /* Store adjusted byte-length of field for later
1286                                  * use, taking into account potential
1287                                  * non-byte-aligned displacement
1288                                  */
1289                                 fld->entry.last = DIVIDE_AND_ROUND_UP
1290                                         (ice_flds_info[j].size +
1291                                          (fld->xtrct.disp % BITS_PER_BYTE),
1292                                          BITS_PER_BYTE);
1293                                 fld->entry.val = index;
1294                                 index += fld->entry.last;
1295                         }
1296
1297                         match &= ~bit;
1298                 }
1299
1300                 for (j = 0; j < seg->raws_cnt; j++) {
1301                         struct ice_flow_seg_fld_raw *raw = &seg->raws[j];
1302
1303                         raw->info.entry.mask = ICE_FLOW_FLD_OFF_INVAL;
1304                         raw->info.entry.val = index;
1305                         raw->info.entry.last = raw->info.src.last;
1306                         index += raw->info.entry.last;
1307                 }
1308         }
1309
1310         /* Currently only support using the byte selection base, which only
1311          * allows for an effective entry size of 30 bytes. Reject anything
1312          * larger.
1313          */
1314         if (index > ICE_AQC_ACL_PROF_BYTE_SEL_ELEMS)
1315                 return ICE_ERR_PARAM;
1316
1317         /* Only 8 range checkers per profile, reject anything trying to use
1318          * more
1319          */
1320         if (range_idx > ICE_AQC_ACL_PROF_RANGES_NUM_CFG)
1321                 return ICE_ERR_PARAM;
1322
1323         /* Store # bytes required for entry for later use */
1324         params->entry_length = index - ICE_AQC_ACL_PROF_BYTE_SEL_START_IDX;
1325
1326         return ICE_SUCCESS;
1327 }
1328
1329 /**
1330  * ice_flow_proc_segs - process all packet segments associated with a profile
1331  * @hw: pointer to the HW struct
1332  * @params: information about the flow to be processed
1333  */
1334 static enum ice_status
1335 ice_flow_proc_segs(struct ice_hw *hw, struct ice_flow_prof_params *params)
1336 {
1337         enum ice_status status;
1338
1339         status = ice_flow_proc_seg_hdrs(params);
1340         if (status)
1341                 return status;
1342
1343         status = ice_flow_create_xtrct_seq(hw, params);
1344         if (status)
1345                 return status;
1346
1347         switch (params->blk) {
1348         case ICE_BLK_FD:
1349         case ICE_BLK_RSS:
1350                 status = ICE_SUCCESS;
1351                 break;
1352         case ICE_BLK_ACL:
1353                 status = ice_flow_acl_def_entry_frmt(params);
1354                 if (status)
1355                         return status;
1356                 status = ice_flow_sel_acl_scen(hw, params);
1357                 if (status)
1358                         return status;
1359                 break;
1360         case ICE_BLK_SW:
1361         default:
1362                 return ICE_ERR_NOT_IMPL;
1363         }
1364
1365         return status;
1366 }
1367
1368 #define ICE_FLOW_FIND_PROF_CHK_FLDS     0x00000001
1369 #define ICE_FLOW_FIND_PROF_CHK_VSI      0x00000002
1370 #define ICE_FLOW_FIND_PROF_NOT_CHK_DIR  0x00000004
1371
1372 /**
1373  * ice_flow_find_prof_conds - Find a profile matching headers and conditions
1374  * @hw: pointer to the HW struct
1375  * @blk: classification stage
1376  * @dir: flow direction
1377  * @segs: array of one or more packet segments that describe the flow
1378  * @segs_cnt: number of packet segments provided
1379  * @vsi_handle: software VSI handle to check VSI (ICE_FLOW_FIND_PROF_CHK_VSI)
1380  * @conds: additional conditions to be checked (ICE_FLOW_FIND_PROF_CHK_*)
1381  */
1382 static struct ice_flow_prof *
1383 ice_flow_find_prof_conds(struct ice_hw *hw, enum ice_block blk,
1384                          enum ice_flow_dir dir, struct ice_flow_seg_info *segs,
1385                          u8 segs_cnt, u16 vsi_handle, u32 conds)
1386 {
1387         struct ice_flow_prof *p, *prof = NULL;
1388
1389         ice_acquire_lock(&hw->fl_profs_locks[blk]);
1390         LIST_FOR_EACH_ENTRY(p, &hw->fl_profs[blk], ice_flow_prof, l_entry) {
1391                 if ((p->dir == dir || conds & ICE_FLOW_FIND_PROF_NOT_CHK_DIR) &&
1392                     segs_cnt && segs_cnt == p->segs_cnt) {
1393                         u8 i;
1394
1395                         /* Check for profile-VSI association if specified */
1396                         if ((conds & ICE_FLOW_FIND_PROF_CHK_VSI) &&
1397                             ice_is_vsi_valid(hw, vsi_handle) &&
1398                             !ice_is_bit_set(p->vsis, vsi_handle))
1399                                 continue;
1400
1401                         /* Protocol headers must be checked. Matched fields are
1402                          * checked if specified.
1403                          */
1404                         for (i = 0; i < segs_cnt; i++)
1405                                 if (segs[i].hdrs != p->segs[i].hdrs ||
1406                                     ((conds & ICE_FLOW_FIND_PROF_CHK_FLDS) &&
1407                                      segs[i].match != p->segs[i].match))
1408                                         break;
1409
1410                         /* A match is found if all segments are matched */
1411                         if (i == segs_cnt) {
1412                                 prof = p;
1413                                 break;
1414                         }
1415                 }
1416         }
1417         ice_release_lock(&hw->fl_profs_locks[blk]);
1418
1419         return prof;
1420 }
1421
1422 /**
1423  * ice_flow_find_prof - Look up a profile matching headers and matched fields
1424  * @hw: pointer to the HW struct
1425  * @blk: classification stage
1426  * @dir: flow direction
1427  * @segs: array of one or more packet segments that describe the flow
1428  * @segs_cnt: number of packet segments provided
1429  */
1430 u64
1431 ice_flow_find_prof(struct ice_hw *hw, enum ice_block blk, enum ice_flow_dir dir,
1432                    struct ice_flow_seg_info *segs, u8 segs_cnt)
1433 {
1434         struct ice_flow_prof *p;
1435
1436         p = ice_flow_find_prof_conds(hw, blk, dir, segs, segs_cnt,
1437                                      ICE_MAX_VSI, ICE_FLOW_FIND_PROF_CHK_FLDS);
1438
1439         return p ? p->id : ICE_FLOW_PROF_ID_INVAL;
1440 }
1441
1442 /**
1443  * ice_flow_find_prof_id - Look up a profile with given profile ID
1444  * @hw: pointer to the HW struct
1445  * @blk: classification stage
1446  * @prof_id: unique ID to identify this flow profile
1447  */
1448 static struct ice_flow_prof *
1449 ice_flow_find_prof_id(struct ice_hw *hw, enum ice_block blk, u64 prof_id)
1450 {
1451         struct ice_flow_prof *p;
1452
1453         LIST_FOR_EACH_ENTRY(p, &hw->fl_profs[blk], ice_flow_prof, l_entry) {
1454                 if (p->id == prof_id)
1455                         return p;
1456         }
1457
1458         return NULL;
1459 }
1460
1461 /**
1462  * ice_dealloc_flow_entry - Deallocate flow entry memory
1463  * @hw: pointer to the HW struct
1464  * @entry: flow entry to be removed
1465  */
1466 static void
1467 ice_dealloc_flow_entry(struct ice_hw *hw, struct ice_flow_entry *entry)
1468 {
1469         if (!entry)
1470                 return;
1471
1472         if (entry->entry)
1473                 ice_free(hw, entry->entry);
1474
1475         if (entry->range_buf) {
1476                 ice_free(hw, entry->range_buf);
1477                 entry->range_buf = NULL;
1478         }
1479
1480         if (entry->acts) {
1481                 ice_free(hw, entry->acts);
1482                 entry->acts = NULL;
1483                 entry->acts_cnt = 0;
1484         }
1485
1486         ice_free(hw, entry);
1487 }
1488
1489 #define ICE_ACL_INVALID_SCEN    0x3f
1490
1491 /**
1492  * ice_flow_acl_is_prof_in_use - Verify if the profile is associated to any PF
1493  * @hw: pointer to the hardware structure
1494  * @prof: pointer to flow profile
1495  * @buf: destination buffer function writes partial extraction sequence to
1496  *
1497  * returns ICE_SUCCESS if no PF is associated to the given profile
1498  * returns ICE_ERR_IN_USE if at least one PF is associated to the given profile
1499  * returns other error code for real error
1500  */
1501 static enum ice_status
1502 ice_flow_acl_is_prof_in_use(struct ice_hw *hw, struct ice_flow_prof *prof,
1503                             struct ice_aqc_acl_prof_generic_frmt *buf)
1504 {
1505         enum ice_status status;
1506         u8 prof_id = 0;
1507
1508         status = ice_flow_get_hw_prof(hw, ICE_BLK_ACL, prof->id, &prof_id);
1509         if (status)
1510                 return status;
1511
1512         status = ice_query_acl_prof(hw, prof_id, buf, NULL);
1513         if (status)
1514                 return status;
1515
1516         /* If all PF's associated scenarios are all 0 or all
1517          * ICE_ACL_INVALID_SCEN (63) for the given profile then the latter has
1518          * not been configured yet.
1519          */
1520         if (buf->pf_scenario_num[0] == 0 && buf->pf_scenario_num[1] == 0 &&
1521             buf->pf_scenario_num[2] == 0 && buf->pf_scenario_num[3] == 0 &&
1522             buf->pf_scenario_num[4] == 0 && buf->pf_scenario_num[5] == 0 &&
1523             buf->pf_scenario_num[6] == 0 && buf->pf_scenario_num[7] == 0)
1524                 return ICE_SUCCESS;
1525
1526         if (buf->pf_scenario_num[0] == ICE_ACL_INVALID_SCEN &&
1527             buf->pf_scenario_num[1] == ICE_ACL_INVALID_SCEN &&
1528             buf->pf_scenario_num[2] == ICE_ACL_INVALID_SCEN &&
1529             buf->pf_scenario_num[3] == ICE_ACL_INVALID_SCEN &&
1530             buf->pf_scenario_num[4] == ICE_ACL_INVALID_SCEN &&
1531             buf->pf_scenario_num[5] == ICE_ACL_INVALID_SCEN &&
1532             buf->pf_scenario_num[6] == ICE_ACL_INVALID_SCEN &&
1533             buf->pf_scenario_num[7] == ICE_ACL_INVALID_SCEN)
1534                 return ICE_SUCCESS;
1535         else
1536                 return ICE_ERR_IN_USE;
1537 }
1538
1539 /**
1540  * ice_flow_acl_free_act_cntr - Free the ACL rule's actions
1541  * @hw: pointer to the hardware structure
1542  * @acts: array of actions to be performed on a match
1543  * @acts_cnt: number of actions
1544  */
1545 static enum ice_status
1546 ice_flow_acl_free_act_cntr(struct ice_hw *hw, struct ice_flow_action *acts,
1547                            u8 acts_cnt)
1548 {
1549         int i;
1550
1551         for (i = 0; i < acts_cnt; i++) {
1552                 if (acts[i].type == ICE_FLOW_ACT_CNTR_PKT ||
1553                     acts[i].type == ICE_FLOW_ACT_CNTR_BYTES ||
1554                     acts[i].type == ICE_FLOW_ACT_CNTR_PKT_BYTES) {
1555                         struct ice_acl_cntrs cntrs;
1556                         enum ice_status status;
1557
1558                         cntrs.bank = 0; /* Only bank0 for the moment */
1559                         cntrs.first_cntr =
1560                                         LE16_TO_CPU(acts[i].data.acl_act.value);
1561                         cntrs.last_cntr =
1562                                         LE16_TO_CPU(acts[i].data.acl_act.value);
1563
1564                         if (acts[i].type == ICE_FLOW_ACT_CNTR_PKT_BYTES)
1565                                 cntrs.type = ICE_AQC_ACL_CNT_TYPE_DUAL;
1566                         else
1567                                 cntrs.type = ICE_AQC_ACL_CNT_TYPE_SINGLE;
1568
1569                         status = ice_aq_dealloc_acl_cntrs(hw, &cntrs, NULL);
1570                         if (status)
1571                                 return status;
1572                 }
1573         }
1574         return ICE_SUCCESS;
1575 }
1576
1577 /**
1578  * ice_flow_acl_disassoc_scen - Disassociate the scenario from the profile
1579  * @hw: pointer to the hardware structure
1580  * @prof: pointer to flow profile
1581  *
1582  * Disassociate the scenario from the profile for the PF of the VSI.
1583  */
1584 static enum ice_status
1585 ice_flow_acl_disassoc_scen(struct ice_hw *hw, struct ice_flow_prof *prof)
1586 {
1587         struct ice_aqc_acl_prof_generic_frmt buf;
1588         enum ice_status status = ICE_SUCCESS;
1589         u8 prof_id = 0;
1590
1591         ice_memset(&buf, 0, sizeof(buf), ICE_NONDMA_MEM);
1592
1593         status = ice_flow_get_hw_prof(hw, ICE_BLK_ACL, prof->id, &prof_id);
1594         if (status)
1595                 return status;
1596
1597         status = ice_query_acl_prof(hw, prof_id, &buf, NULL);
1598         if (status)
1599                 return status;
1600
1601         /* Clear scenario for this PF */
1602         buf.pf_scenario_num[hw->pf_id] = ICE_ACL_INVALID_SCEN;
1603         status = ice_prgm_acl_prof_extrt(hw, prof_id, &buf, NULL);
1604
1605         return status;
1606 }
1607
1608 /**
1609  * ice_flow_rem_entry_sync - Remove a flow entry
1610  * @hw: pointer to the HW struct
1611  * @blk: classification stage
1612  * @entry: flow entry to be removed
1613  */
1614 static enum ice_status
1615 ice_flow_rem_entry_sync(struct ice_hw *hw, enum ice_block blk,
1616                         struct ice_flow_entry *entry)
1617 {
1618         if (!entry)
1619                 return ICE_ERR_BAD_PTR;
1620
1621         if (blk == ICE_BLK_ACL) {
1622                 enum ice_status status;
1623
1624                 if (!entry->prof)
1625                         return ICE_ERR_BAD_PTR;
1626
1627                 status = ice_acl_rem_entry(hw, entry->prof->cfg.scen,
1628                                            entry->scen_entry_idx);
1629                 if (status)
1630                         return status;
1631
1632                 /* Checks if we need to release an ACL counter. */
1633                 if (entry->acts_cnt && entry->acts)
1634                         ice_flow_acl_free_act_cntr(hw, entry->acts,
1635                                                    entry->acts_cnt);
1636         }
1637
1638         LIST_DEL(&entry->l_entry);
1639
1640         ice_dealloc_flow_entry(hw, entry);
1641
1642         return ICE_SUCCESS;
1643 }
1644
1645 /**
1646  * ice_flow_add_prof_sync - Add a flow profile for packet segments and fields
1647  * @hw: pointer to the HW struct
1648  * @blk: classification stage
1649  * @dir: flow direction
1650  * @prof_id: unique ID to identify this flow profile
1651  * @segs: array of one or more packet segments that describe the flow
1652  * @segs_cnt: number of packet segments provided
1653  * @acts: array of default actions
1654  * @acts_cnt: number of default actions
1655  * @prof: stores the returned flow profile added
1656  *
1657  * Assumption: the caller has acquired the lock to the profile list
1658  */
1659 static enum ice_status
1660 ice_flow_add_prof_sync(struct ice_hw *hw, enum ice_block blk,
1661                        enum ice_flow_dir dir, u64 prof_id,
1662                        struct ice_flow_seg_info *segs, u8 segs_cnt,
1663                        struct ice_flow_action *acts, u8 acts_cnt,
1664                        struct ice_flow_prof **prof)
1665 {
1666         struct ice_flow_prof_params params;
1667         enum ice_status status;
1668         u8 i;
1669
1670         if (!prof || (acts_cnt && !acts))
1671                 return ICE_ERR_BAD_PTR;
1672
1673         ice_memset(&params, 0, sizeof(params), ICE_NONDMA_MEM);
1674         params.prof = (struct ice_flow_prof *)
1675                 ice_malloc(hw, sizeof(*params.prof));
1676         if (!params.prof)
1677                 return ICE_ERR_NO_MEMORY;
1678
1679         /* initialize extraction sequence to all invalid (0xff) */
1680         for (i = 0; i < ICE_MAX_FV_WORDS; i++) {
1681                 params.es[i].prot_id = ICE_PROT_INVALID;
1682                 params.es[i].off = ICE_FV_OFFSET_INVAL;
1683         }
1684
1685         params.blk = blk;
1686         params.prof->id = prof_id;
1687         params.prof->dir = dir;
1688         params.prof->segs_cnt = segs_cnt;
1689
1690         /* Make a copy of the segments that need to be persistent in the flow
1691          * profile instance
1692          */
1693         for (i = 0; i < segs_cnt; i++)
1694                 ice_memcpy(&params.prof->segs[i], &segs[i], sizeof(*segs),
1695                            ICE_NONDMA_TO_NONDMA);
1696
1697         /* Make a copy of the actions that need to be persistent in the flow
1698          * profile instance.
1699          */
1700         if (acts_cnt) {
1701                 params.prof->acts = (struct ice_flow_action *)
1702                         ice_memdup(hw, acts, acts_cnt * sizeof(*acts),
1703                                    ICE_NONDMA_TO_NONDMA);
1704
1705                 if (!params.prof->acts) {
1706                         status = ICE_ERR_NO_MEMORY;
1707                         goto out;
1708                 }
1709         }
1710
1711         status = ice_flow_proc_segs(hw, &params);
1712         if (status) {
1713                 ice_debug(hw, ICE_DBG_FLOW,
1714                           "Error processing a flow's packet segments\n");
1715                 goto out;
1716         }
1717
1718         /* Add a HW profile for this flow profile */
1719         status = ice_add_prof(hw, blk, prof_id, (u8 *)params.ptypes,
1720                               params.attr, params.attr_cnt, params.es,
1721                               params.mask);
1722         if (status) {
1723                 ice_debug(hw, ICE_DBG_FLOW, "Error adding a HW flow profile\n");
1724                 goto out;
1725         }
1726
1727         INIT_LIST_HEAD(&params.prof->entries);
1728         ice_init_lock(&params.prof->entries_lock);
1729         *prof = params.prof;
1730
1731 out:
1732         if (status) {
1733                 if (params.prof->acts)
1734                         ice_free(hw, params.prof->acts);
1735                 ice_free(hw, params.prof);
1736         }
1737
1738         return status;
1739 }
1740
1741 /**
1742  * ice_flow_rem_prof_sync - remove a flow profile
1743  * @hw: pointer to the hardware structure
1744  * @blk: classification stage
1745  * @prof: pointer to flow profile to remove
1746  *
1747  * Assumption: the caller has acquired the lock to the profile list
1748  */
1749 static enum ice_status
1750 ice_flow_rem_prof_sync(struct ice_hw *hw, enum ice_block blk,
1751                        struct ice_flow_prof *prof)
1752 {
1753         enum ice_status status;
1754
1755         /* Remove all remaining flow entries before removing the flow profile */
1756         if (!LIST_EMPTY(&prof->entries)) {
1757                 struct ice_flow_entry *e, *t;
1758
1759                 ice_acquire_lock(&prof->entries_lock);
1760
1761                 LIST_FOR_EACH_ENTRY_SAFE(e, t, &prof->entries, ice_flow_entry,
1762                                          l_entry) {
1763                         status = ice_flow_rem_entry_sync(hw, blk, e);
1764                         if (status)
1765                                 break;
1766                 }
1767
1768                 ice_release_lock(&prof->entries_lock);
1769         }
1770
1771         if (blk == ICE_BLK_ACL) {
1772                 struct ice_aqc_acl_profile_ranges query_rng_buf;
1773                 struct ice_aqc_acl_prof_generic_frmt buf;
1774                 u8 prof_id = 0;
1775
1776                 /* Disassociate the scenario from the profile for the PF */
1777                 status = ice_flow_acl_disassoc_scen(hw, prof);
1778                 if (status)
1779                         return status;
1780
1781                 /* Clear the range-checker if the profile ID is no longer
1782                  * used by any PF
1783                  */
1784                 status = ice_flow_acl_is_prof_in_use(hw, prof, &buf);
1785                 if (status && status != ICE_ERR_IN_USE) {
1786                         return status;
1787                 } else if (!status) {
1788                         /* Clear the range-checker value for profile ID */
1789                         ice_memset(&query_rng_buf, 0,
1790                                    sizeof(struct ice_aqc_acl_profile_ranges),
1791                                    ICE_NONDMA_MEM);
1792
1793                         status = ice_flow_get_hw_prof(hw, blk, prof->id,
1794                                                       &prof_id);
1795                         if (status)
1796                                 return status;
1797
1798                         status = ice_prog_acl_prof_ranges(hw, prof_id,
1799                                                           &query_rng_buf, NULL);
1800                         if (status)
1801                                 return status;
1802                 }
1803         }
1804
1805         /* Remove all hardware profiles associated with this flow profile */
1806         status = ice_rem_prof(hw, blk, prof->id);
1807         if (!status) {
1808                 LIST_DEL(&prof->l_entry);
1809                 ice_destroy_lock(&prof->entries_lock);
1810                 if (prof->acts)
1811                         ice_free(hw, prof->acts);
1812                 ice_free(hw, prof);
1813         }
1814
1815         return status;
1816 }
1817
1818 /**
1819  * ice_flow_acl_set_xtrct_seq_fld - Populate xtrct seq for single field
1820  * @buf: Destination buffer function writes partial xtrct sequence to
1821  * @info: Info about field
1822  */
1823 static void
1824 ice_flow_acl_set_xtrct_seq_fld(struct ice_aqc_acl_prof_generic_frmt *buf,
1825                                struct ice_flow_fld_info *info)
1826 {
1827         u16 dst, i;
1828         u8 src;
1829
1830         src = info->xtrct.idx * ICE_FLOW_FV_EXTRACT_SZ +
1831                 info->xtrct.disp / BITS_PER_BYTE;
1832         dst = info->entry.val;
1833         for (i = 0; i < info->entry.last; i++)
1834                 /* HW stores field vector words in LE, convert words back to BE
1835                  * so constructed entries will end up in network order
1836                  */
1837                 buf->byte_selection[dst++] = src++ ^ 1;
1838 }
1839
1840 /**
1841  * ice_flow_acl_set_xtrct_seq - Program ACL extraction sequence
1842  * @hw: pointer to the hardware structure
1843  * @prof: pointer to flow profile
1844  */
1845 static enum ice_status
1846 ice_flow_acl_set_xtrct_seq(struct ice_hw *hw, struct ice_flow_prof *prof)
1847 {
1848         struct ice_aqc_acl_prof_generic_frmt buf;
1849         struct ice_flow_fld_info *info;
1850         enum ice_status status;
1851         u8 prof_id = 0;
1852         u16 i;
1853
1854         ice_memset(&buf, 0, sizeof(buf), ICE_NONDMA_MEM);
1855
1856         status = ice_flow_get_hw_prof(hw, ICE_BLK_ACL, prof->id, &prof_id);
1857         if (status)
1858                 return status;
1859
1860         status = ice_flow_acl_is_prof_in_use(hw, prof, &buf);
1861         if (status && status != ICE_ERR_IN_USE)
1862                 return status;
1863
1864         if (!status) {
1865                 /* Program the profile dependent configuration. This is done
1866                  * only once regardless of the number of PFs using that profile
1867                  */
1868                 ice_memset(&buf, 0, sizeof(buf), ICE_NONDMA_MEM);
1869
1870                 for (i = 0; i < prof->segs_cnt; i++) {
1871                         struct ice_flow_seg_info *seg = &prof->segs[i];
1872                         u64 match = seg->match;
1873                         u16 j;
1874
1875                         for (j = 0; j < ICE_FLOW_FIELD_IDX_MAX && match; j++) {
1876                                 const u64 bit = BIT_ULL(j);
1877
1878                                 if (!(match & bit))
1879                                         continue;
1880
1881                                 info = &seg->fields[j];
1882
1883                                 if (info->type == ICE_FLOW_FLD_TYPE_RANGE)
1884                                         buf.word_selection[info->entry.val] =
1885                                                                 info->xtrct.idx;
1886                                 else
1887                                         ice_flow_acl_set_xtrct_seq_fld(&buf,
1888                                                                        info);
1889
1890                                 match &= ~bit;
1891                         }
1892
1893                         for (j = 0; j < seg->raws_cnt; j++) {
1894                                 info = &seg->raws[j].info;
1895                                 ice_flow_acl_set_xtrct_seq_fld(&buf, info);
1896                         }
1897                 }
1898
1899                 ice_memset(&buf.pf_scenario_num[0], ICE_ACL_INVALID_SCEN,
1900                            ICE_AQC_ACL_PROF_PF_SCEN_NUM_ELEMS,
1901                            ICE_NONDMA_MEM);
1902         }
1903
1904         /* Update the current PF */
1905         buf.pf_scenario_num[hw->pf_id] = (u8)prof->cfg.scen->id;
1906         status = ice_prgm_acl_prof_extrt(hw, prof_id, &buf, NULL);
1907
1908         return status;
1909 }
1910
1911 /**
1912  * ice_flow_assoc_vsig_vsi - associate a VSI with VSIG
1913  * @hw: pointer to the hardware structure
1914  * @blk: classification stage
1915  * @vsi_handle: software VSI handle
1916  * @vsig: target VSI group
1917  *
1918  * Assumption: the caller has already verified that the VSI to
1919  * be added has the same characteristics as the VSIG and will
1920  * thereby have access to all resources added to that VSIG.
1921  */
1922 enum ice_status
1923 ice_flow_assoc_vsig_vsi(struct ice_hw *hw, enum ice_block blk, u16 vsi_handle,
1924                         u16 vsig)
1925 {
1926         enum ice_status status;
1927
1928         if (!ice_is_vsi_valid(hw, vsi_handle) || blk >= ICE_BLK_COUNT)
1929                 return ICE_ERR_PARAM;
1930
1931         ice_acquire_lock(&hw->fl_profs_locks[blk]);
1932         status = ice_add_vsi_flow(hw, blk, ice_get_hw_vsi_num(hw, vsi_handle),
1933                                   vsig);
1934         ice_release_lock(&hw->fl_profs_locks[blk]);
1935
1936         return status;
1937 }
1938
1939 /**
1940  * ice_flow_assoc_prof - associate a VSI with a flow profile
1941  * @hw: pointer to the hardware structure
1942  * @blk: classification stage
1943  * @prof: pointer to flow profile
1944  * @vsi_handle: software VSI handle
1945  *
1946  * Assumption: the caller has acquired the lock to the profile list
1947  * and the software VSI handle has been validated
1948  */
1949 static enum ice_status
1950 ice_flow_assoc_prof(struct ice_hw *hw, enum ice_block blk,
1951                     struct ice_flow_prof *prof, u16 vsi_handle)
1952 {
1953         enum ice_status status = ICE_SUCCESS;
1954
1955         if (!ice_is_bit_set(prof->vsis, vsi_handle)) {
1956                 if (blk == ICE_BLK_ACL) {
1957                         status = ice_flow_acl_set_xtrct_seq(hw, prof);
1958                         if (status)
1959                                 return status;
1960                 }
1961                 status = ice_add_prof_id_flow(hw, blk,
1962                                               ice_get_hw_vsi_num(hw,
1963                                                                  vsi_handle),
1964                                               prof->id);
1965                 if (!status)
1966                         ice_set_bit(vsi_handle, prof->vsis);
1967                 else
1968                         ice_debug(hw, ICE_DBG_FLOW,
1969                                   "HW profile add failed, %d\n",
1970                                   status);
1971         }
1972
1973         return status;
1974 }
1975
1976 /**
1977  * ice_flow_disassoc_prof - disassociate a VSI from a flow profile
1978  * @hw: pointer to the hardware structure
1979  * @blk: classification stage
1980  * @prof: pointer to flow profile
1981  * @vsi_handle: software VSI handle
1982  *
1983  * Assumption: the caller has acquired the lock to the profile list
1984  * and the software VSI handle has been validated
1985  */
1986 static enum ice_status
1987 ice_flow_disassoc_prof(struct ice_hw *hw, enum ice_block blk,
1988                        struct ice_flow_prof *prof, u16 vsi_handle)
1989 {
1990         enum ice_status status = ICE_SUCCESS;
1991
1992         if (ice_is_bit_set(prof->vsis, vsi_handle)) {
1993                 status = ice_rem_prof_id_flow(hw, blk,
1994                                               ice_get_hw_vsi_num(hw,
1995                                                                  vsi_handle),
1996                                               prof->id);
1997                 if (!status)
1998                         ice_clear_bit(vsi_handle, prof->vsis);
1999                 else
2000                         ice_debug(hw, ICE_DBG_FLOW,
2001                                   "HW profile remove failed, %d\n",
2002                                   status);
2003         }
2004
2005         return status;
2006 }
2007
2008 /**
2009  * ice_flow_add_prof - Add a flow profile for packet segments and matched fields
2010  * @hw: pointer to the HW struct
2011  * @blk: classification stage
2012  * @dir: flow direction
2013  * @prof_id: unique ID to identify this flow profile
2014  * @segs: array of one or more packet segments that describe the flow
2015  * @segs_cnt: number of packet segments provided
2016  * @acts: array of default actions
2017  * @acts_cnt: number of default actions
2018  * @prof: stores the returned flow profile added
2019  */
2020 enum ice_status
2021 ice_flow_add_prof(struct ice_hw *hw, enum ice_block blk, enum ice_flow_dir dir,
2022                   u64 prof_id, struct ice_flow_seg_info *segs, u8 segs_cnt,
2023                   struct ice_flow_action *acts, u8 acts_cnt,
2024                   struct ice_flow_prof **prof)
2025 {
2026         enum ice_status status;
2027
2028         if (segs_cnt > ICE_FLOW_SEG_MAX)
2029                 return ICE_ERR_MAX_LIMIT;
2030
2031         if (!segs_cnt)
2032                 return ICE_ERR_PARAM;
2033
2034         if (!segs)
2035                 return ICE_ERR_BAD_PTR;
2036
2037         status = ice_flow_val_hdrs(segs, segs_cnt);
2038         if (status)
2039                 return status;
2040
2041         ice_acquire_lock(&hw->fl_profs_locks[blk]);
2042
2043         status = ice_flow_add_prof_sync(hw, blk, dir, prof_id, segs, segs_cnt,
2044                                         acts, acts_cnt, prof);
2045         if (!status)
2046                 LIST_ADD(&(*prof)->l_entry, &hw->fl_profs[blk]);
2047
2048         ice_release_lock(&hw->fl_profs_locks[blk]);
2049
2050         return status;
2051 }
2052
2053 /**
2054  * ice_flow_rem_prof - Remove a flow profile and all entries associated with it
2055  * @hw: pointer to the HW struct
2056  * @blk: the block for which the flow profile is to be removed
2057  * @prof_id: unique ID of the flow profile to be removed
2058  */
2059 enum ice_status
2060 ice_flow_rem_prof(struct ice_hw *hw, enum ice_block blk, u64 prof_id)
2061 {
2062         struct ice_flow_prof *prof;
2063         enum ice_status status;
2064
2065         ice_acquire_lock(&hw->fl_profs_locks[blk]);
2066
2067         prof = ice_flow_find_prof_id(hw, blk, prof_id);
2068         if (!prof) {
2069                 status = ICE_ERR_DOES_NOT_EXIST;
2070                 goto out;
2071         }
2072
2073         /* prof becomes invalid after the call */
2074         status = ice_flow_rem_prof_sync(hw, blk, prof);
2075
2076 out:
2077         ice_release_lock(&hw->fl_profs_locks[blk]);
2078
2079         return status;
2080 }
2081
2082 /**
2083  * ice_flow_get_hw_prof - return the HW profile for a specific profile ID handle
2084  * @hw: pointer to the HW struct
2085  * @blk: classification stage
2086  * @prof_id: the profile ID handle
2087  * @hw_prof_id: pointer to variable to receive the HW profile ID
2088  */
2089 enum ice_status
2090 ice_flow_get_hw_prof(struct ice_hw *hw, enum ice_block blk, u64 prof_id,
2091                      u8 *hw_prof_id)
2092 {
2093         struct ice_prof_map *map;
2094
2095         map = ice_search_prof_id(hw, blk, prof_id);
2096         if (map) {
2097                 *hw_prof_id = map->prof_id;
2098                 return ICE_SUCCESS;
2099         }
2100
2101         return ICE_ERR_DOES_NOT_EXIST;
2102 }
2103
2104 /**
2105  * ice_flow_find_entry - look for a flow entry using its unique ID
2106  * @hw: pointer to the HW struct
2107  * @blk: classification stage
2108  * @entry_id: unique ID to identify this flow entry
2109  *
2110  * This function looks for the flow entry with the specified unique ID in all
2111  * flow profiles of the specified classification stage. If the entry is found,
2112  * and it returns the handle to the flow entry. Otherwise, it returns
2113  * ICE_FLOW_ENTRY_ID_INVAL.
2114  */
2115 u64 ice_flow_find_entry(struct ice_hw *hw, enum ice_block blk, u64 entry_id)
2116 {
2117         struct ice_flow_entry *found = NULL;
2118         struct ice_flow_prof *p;
2119
2120         ice_acquire_lock(&hw->fl_profs_locks[blk]);
2121
2122         LIST_FOR_EACH_ENTRY(p, &hw->fl_profs[blk], ice_flow_prof, l_entry) {
2123                 struct ice_flow_entry *e;
2124
2125                 ice_acquire_lock(&p->entries_lock);
2126                 LIST_FOR_EACH_ENTRY(e, &p->entries, ice_flow_entry, l_entry)
2127                         if (e->id == entry_id) {
2128                                 found = e;
2129                                 break;
2130                         }
2131                 ice_release_lock(&p->entries_lock);
2132
2133                 if (found)
2134                         break;
2135         }
2136
2137         ice_release_lock(&hw->fl_profs_locks[blk]);
2138
2139         return found ? ICE_FLOW_ENTRY_HNDL(found) : ICE_FLOW_ENTRY_HANDLE_INVAL;
2140 }
2141
2142 /**
2143  * ice_flow_acl_check_actions - Checks the ACL rule's actions
2144  * @hw: pointer to the hardware structure
2145  * @acts: array of actions to be performed on a match
2146  * @acts_cnt: number of actions
2147  * @cnt_alloc: indicates if an ACL counter has been allocated.
2148  */
2149 static enum ice_status
2150 ice_flow_acl_check_actions(struct ice_hw *hw, struct ice_flow_action *acts,
2151                            u8 acts_cnt, bool *cnt_alloc)
2152 {
2153         ice_declare_bitmap(dup_check, ICE_AQC_TBL_MAX_ACTION_PAIRS * 2);
2154         int i;
2155
2156         ice_zero_bitmap(dup_check, ICE_AQC_TBL_MAX_ACTION_PAIRS * 2);
2157         *cnt_alloc = false;
2158
2159         if (acts_cnt > ICE_FLOW_ACL_MAX_NUM_ACT)
2160                 return ICE_ERR_OUT_OF_RANGE;
2161
2162         for (i = 0; i < acts_cnt; i++) {
2163                 if (acts[i].type != ICE_FLOW_ACT_NOP &&
2164                     acts[i].type != ICE_FLOW_ACT_DROP &&
2165                     acts[i].type != ICE_FLOW_ACT_CNTR_PKT &&
2166                     acts[i].type != ICE_FLOW_ACT_FWD_QUEUE)
2167                         return ICE_ERR_CFG;
2168
2169                 /* If the caller want to add two actions of the same type, then
2170                  * it is considered invalid configuration.
2171                  */
2172                 if (ice_test_and_set_bit(acts[i].type, dup_check))
2173                         return ICE_ERR_PARAM;
2174         }
2175
2176         /* Checks if ACL counters are needed. */
2177         for (i = 0; i < acts_cnt; i++) {
2178                 if (acts[i].type == ICE_FLOW_ACT_CNTR_PKT ||
2179                     acts[i].type == ICE_FLOW_ACT_CNTR_BYTES ||
2180                     acts[i].type == ICE_FLOW_ACT_CNTR_PKT_BYTES) {
2181                         struct ice_acl_cntrs cntrs;
2182                         enum ice_status status;
2183
2184                         cntrs.amount = 1;
2185                         cntrs.bank = 0; /* Only bank0 for the moment */
2186
2187                         if (acts[i].type == ICE_FLOW_ACT_CNTR_PKT_BYTES)
2188                                 cntrs.type = ICE_AQC_ACL_CNT_TYPE_DUAL;
2189                         else
2190                                 cntrs.type = ICE_AQC_ACL_CNT_TYPE_SINGLE;
2191
2192                         status = ice_aq_alloc_acl_cntrs(hw, &cntrs, NULL);
2193                         if (status)
2194                                 return status;
2195                         /* Counter index within the bank */
2196                         acts[i].data.acl_act.value =
2197                                                 CPU_TO_LE16(cntrs.first_cntr);
2198                         *cnt_alloc = true;
2199                 }
2200         }
2201
2202         return ICE_SUCCESS;
2203 }
2204
2205 /**
2206  * ice_flow_acl_frmt_entry_range - Format an ACL range checker for a given field
2207  * @fld: number of the given field
2208  * @info: info about field
2209  * @range_buf: range checker configuration buffer
2210  * @data: pointer to a data buffer containing flow entry's match values/masks
2211  * @range: Input/output param indicating which range checkers are being used
2212  */
2213 static void
2214 ice_flow_acl_frmt_entry_range(u16 fld, struct ice_flow_fld_info *info,
2215                               struct ice_aqc_acl_profile_ranges *range_buf,
2216                               u8 *data, u8 *range)
2217 {
2218         u16 new_mask;
2219
2220         /* If not specified, default mask is all bits in field */
2221         new_mask = (info->src.mask == ICE_FLOW_FLD_OFF_INVAL ?
2222                     BIT(ice_flds_info[fld].size) - 1 :
2223                     (*(u16 *)(data + info->src.mask))) << info->xtrct.disp;
2224
2225         /* If the mask is 0, then we don't need to worry about this input
2226          * range checker value.
2227          */
2228         if (new_mask) {
2229                 u16 new_high =
2230                         (*(u16 *)(data + info->src.last)) << info->xtrct.disp;
2231                 u16 new_low =
2232                         (*(u16 *)(data + info->src.val)) << info->xtrct.disp;
2233                 u8 range_idx = info->entry.val;
2234
2235                 range_buf->checker_cfg[range_idx].low_boundary =
2236                         CPU_TO_BE16(new_low);
2237                 range_buf->checker_cfg[range_idx].high_boundary =
2238                         CPU_TO_BE16(new_high);
2239                 range_buf->checker_cfg[range_idx].mask = CPU_TO_BE16(new_mask);
2240
2241                 /* Indicate which range checker is being used */
2242                 *range |= BIT(range_idx);
2243         }
2244 }
2245
2246 /**
2247  * ice_flow_acl_frmt_entry_fld - Partially format ACL entry for a given field
2248  * @fld: number of the given field
2249  * @info: info about the field
2250  * @buf: buffer containing the entry
2251  * @dontcare: buffer containing don't care mask for entry
2252  * @data: pointer to a data buffer containing flow entry's match values/masks
2253  */
2254 static void
2255 ice_flow_acl_frmt_entry_fld(u16 fld, struct ice_flow_fld_info *info, u8 *buf,
2256                             u8 *dontcare, u8 *data)
2257 {
2258         u16 dst, src, mask, k, end_disp, tmp_s = 0, tmp_m = 0;
2259         bool use_mask = false;
2260         u8 disp;
2261
2262         src = info->src.val;
2263         mask = info->src.mask;
2264         dst = info->entry.val - ICE_AQC_ACL_PROF_BYTE_SEL_START_IDX;
2265         disp = info->xtrct.disp % BITS_PER_BYTE;
2266
2267         if (mask != ICE_FLOW_FLD_OFF_INVAL)
2268                 use_mask = true;
2269
2270         for (k = 0; k < info->entry.last; k++, dst++) {
2271                 /* Add overflow bits from previous byte */
2272                 buf[dst] = (tmp_s & 0xff00) >> 8;
2273
2274                 /* If mask is not valid, tmp_m is always zero, so just setting
2275                  * dontcare to 0 (no masked bits). If mask is valid, pulls in
2276                  * overflow bits of mask from prev byte
2277                  */
2278                 dontcare[dst] = (tmp_m & 0xff00) >> 8;
2279
2280                 /* If there is displacement, last byte will only contain
2281                  * displaced data, but there is no more data to read from user
2282                  * buffer, so skip so as not to potentially read beyond end of
2283                  * user buffer
2284                  */
2285                 if (!disp || k < info->entry.last - 1) {
2286                         /* Store shifted data to use in next byte */
2287                         tmp_s = data[src++] << disp;
2288
2289                         /* Add current (shifted) byte */
2290                         buf[dst] |= tmp_s & 0xff;
2291
2292                         /* Handle mask if valid */
2293                         if (use_mask) {
2294                                 tmp_m = (~data[mask++] & 0xff) << disp;
2295                                 dontcare[dst] |= tmp_m & 0xff;
2296                         }
2297                 }
2298         }
2299
2300         /* Fill in don't care bits at beginning of field */
2301         if (disp) {
2302                 dst = info->entry.val - ICE_AQC_ACL_PROF_BYTE_SEL_START_IDX;
2303                 for (k = 0; k < disp; k++)
2304                         dontcare[dst] |= BIT(k);
2305         }
2306
2307         end_disp = (disp + ice_flds_info[fld].size) % BITS_PER_BYTE;
2308
2309         /* Fill in don't care bits at end of field */
2310         if (end_disp) {
2311                 dst = info->entry.val - ICE_AQC_ACL_PROF_BYTE_SEL_START_IDX +
2312                       info->entry.last - 1;
2313                 for (k = end_disp; k < BITS_PER_BYTE; k++)
2314                         dontcare[dst] |= BIT(k);
2315         }
2316 }
2317
2318 /**
2319  * ice_flow_acl_frmt_entry - Format ACL entry
2320  * @hw: pointer to the hardware structure
2321  * @prof: pointer to flow profile
2322  * @e: pointer to the flow entry
2323  * @data: pointer to a data buffer containing flow entry's match values/masks
2324  * @acts: array of actions to be performed on a match
2325  * @acts_cnt: number of actions
2326  *
2327  * Formats the key (and key_inverse) to be matched from the data passed in,
2328  * along with data from the flow profile. This key/key_inverse pair makes up
2329  * the 'entry' for an ACL flow entry.
2330  */
2331 static enum ice_status
2332 ice_flow_acl_frmt_entry(struct ice_hw *hw, struct ice_flow_prof *prof,
2333                         struct ice_flow_entry *e, u8 *data,
2334                         struct ice_flow_action *acts, u8 acts_cnt)
2335 {
2336         u8 *buf = NULL, *dontcare = NULL, *key = NULL, range = 0, dir_flag_msk;
2337         struct ice_aqc_acl_profile_ranges *range_buf = NULL;
2338         enum ice_status status;
2339         bool cnt_alloc;
2340         u8 prof_id = 0;
2341         u16 i, buf_sz;
2342
2343         status = ice_flow_get_hw_prof(hw, ICE_BLK_ACL, prof->id, &prof_id);
2344         if (status)
2345                 return status;
2346
2347         /* Format the result action */
2348
2349         status = ice_flow_acl_check_actions(hw, acts, acts_cnt, &cnt_alloc);
2350         if (status)
2351                 return status;
2352
2353         status = ICE_ERR_NO_MEMORY;
2354
2355         e->acts = (struct ice_flow_action *)
2356                 ice_memdup(hw, acts, acts_cnt * sizeof(*acts),
2357                            ICE_NONDMA_TO_NONDMA);
2358
2359         if (!e->acts)
2360                 goto out;
2361
2362         e->acts_cnt = acts_cnt;
2363
2364         /* Format the matching data */
2365         buf_sz = prof->cfg.scen->width;
2366         buf = (u8 *)ice_malloc(hw, buf_sz);
2367         if (!buf)
2368                 goto out;
2369
2370         dontcare = (u8 *)ice_malloc(hw, buf_sz);
2371         if (!dontcare)
2372                 goto out;
2373
2374         /* 'key' buffer will store both key and key_inverse, so must be twice
2375          * size of buf
2376          */
2377         key = (u8 *)ice_malloc(hw, buf_sz * 2);
2378         if (!key)
2379                 goto out;
2380
2381         range_buf = (struct ice_aqc_acl_profile_ranges *)
2382                 ice_malloc(hw, sizeof(struct ice_aqc_acl_profile_ranges));
2383         if (!range_buf)
2384                 goto out;
2385
2386         /* Set don't care mask to all 1's to start, will zero out used bytes */
2387         ice_memset(dontcare, 0xff, buf_sz, ICE_NONDMA_MEM);
2388
2389         for (i = 0; i < prof->segs_cnt; i++) {
2390                 struct ice_flow_seg_info *seg = &prof->segs[i];
2391                 u64 match = seg->match;
2392                 u16 j;
2393
2394                 for (j = 0; j < ICE_FLOW_FIELD_IDX_MAX && match; j++) {
2395                         struct ice_flow_fld_info *info;
2396                         const u64 bit = BIT_ULL(j);
2397
2398                         if (!(match & bit))
2399                                 continue;
2400
2401                         info = &seg->fields[j];
2402
2403                         if (info->type == ICE_FLOW_FLD_TYPE_RANGE)
2404                                 ice_flow_acl_frmt_entry_range(j, info,
2405                                                               range_buf, data,
2406                                                               &range);
2407                         else
2408                                 ice_flow_acl_frmt_entry_fld(j, info, buf,
2409                                                             dontcare, data);
2410
2411                         match &= ~bit;
2412                 }
2413
2414                 for (j = 0; j < seg->raws_cnt; j++) {
2415                         struct ice_flow_fld_info *info = &seg->raws[j].info;
2416                         u16 dst, src, mask, k;
2417                         bool use_mask = false;
2418
2419                         src = info->src.val;
2420                         dst = info->entry.val -
2421                                         ICE_AQC_ACL_PROF_BYTE_SEL_START_IDX;
2422                         mask = info->src.mask;
2423
2424                         if (mask != ICE_FLOW_FLD_OFF_INVAL)
2425                                 use_mask = true;
2426
2427                         for (k = 0; k < info->entry.last; k++, dst++) {
2428                                 buf[dst] = data[src++];
2429                                 if (use_mask)
2430                                         dontcare[dst] = ~data[mask++];
2431                                 else
2432                                         dontcare[dst] = 0;
2433                         }
2434                 }
2435         }
2436
2437         buf[prof->cfg.scen->pid_idx] = (u8)prof_id;
2438         dontcare[prof->cfg.scen->pid_idx] = 0;
2439
2440         /* Format the buffer for direction flags */
2441         dir_flag_msk = BIT(ICE_FLG_PKT_DIR);
2442
2443         if (prof->dir == ICE_FLOW_RX)
2444                 buf[prof->cfg.scen->pkt_dir_idx] = dir_flag_msk;
2445
2446         if (range) {
2447                 buf[prof->cfg.scen->rng_chk_idx] = range;
2448                 /* Mark any unused range checkers as don't care */
2449                 dontcare[prof->cfg.scen->rng_chk_idx] = ~range;
2450                 e->range_buf = range_buf;
2451         } else {
2452                 ice_free(hw, range_buf);
2453         }
2454
2455         status = ice_set_key(key, buf_sz * 2, buf, NULL, dontcare, NULL, 0,
2456                              buf_sz);
2457         if (status)
2458                 goto out;
2459
2460         e->entry = key;
2461         e->entry_sz = buf_sz * 2;
2462
2463 out:
2464         if (buf)
2465                 ice_free(hw, buf);
2466
2467         if (dontcare)
2468                 ice_free(hw, dontcare);
2469
2470         if (status && key)
2471                 ice_free(hw, key);
2472
2473         if (status && range_buf) {
2474                 ice_free(hw, range_buf);
2475                 e->range_buf = NULL;
2476         }
2477
2478         if (status && e->acts) {
2479                 ice_free(hw, e->acts);
2480                 e->acts = NULL;
2481                 e->acts_cnt = 0;
2482         }
2483
2484         if (status && cnt_alloc)
2485                 ice_flow_acl_free_act_cntr(hw, acts, acts_cnt);
2486
2487         return status;
2488 }
2489
2490 /**
2491  * ice_flow_acl_find_scen_entry_cond - Find an ACL scenario entry that matches
2492  *                                     the compared data.
2493  * @prof: pointer to flow profile
2494  * @e: pointer to the comparing flow entry
2495  * @do_chg_action: decide if we want to change the ACL action
2496  * @do_add_entry: decide if we want to add the new ACL entry
2497  * @do_rem_entry: decide if we want to remove the current ACL entry
2498  *
2499  * Find an ACL scenario entry that matches the compared data. In the same time,
2500  * this function also figure out:
2501  * a/ If we want to change the ACL action
2502  * b/ If we want to add the new ACL entry
2503  * c/ If we want to remove the current ACL entry
2504  */
2505 static struct ice_flow_entry *
2506 ice_flow_acl_find_scen_entry_cond(struct ice_flow_prof *prof,
2507                                   struct ice_flow_entry *e, bool *do_chg_action,
2508                                   bool *do_add_entry, bool *do_rem_entry)
2509 {
2510         struct ice_flow_entry *p, *return_entry = NULL;
2511         u8 i, j;
2512
2513         /* Check if:
2514          * a/ There exists an entry with same matching data, but different
2515          *    priority, then we remove this existing ACL entry. Then, we
2516          *    will add the new entry to the ACL scenario.
2517          * b/ There exists an entry with same matching data, priority, and
2518          *    result action, then we do nothing
2519          * c/ There exists an entry with same matching data, priority, but
2520          *    different, action, then do only change the action's entry.
2521          * d/ Else, we add this new entry to the ACL scenario.
2522          */
2523         *do_chg_action = false;
2524         *do_add_entry = true;
2525         *do_rem_entry = false;
2526         LIST_FOR_EACH_ENTRY(p, &prof->entries, ice_flow_entry, l_entry) {
2527                 if (memcmp(p->entry, e->entry, p->entry_sz))
2528                         continue;
2529
2530                 /* From this point, we have the same matching_data. */
2531                 *do_add_entry = false;
2532                 return_entry = p;
2533
2534                 if (p->priority != e->priority) {
2535                         /* matching data && !priority */
2536                         *do_add_entry = true;
2537                         *do_rem_entry = true;
2538                         break;
2539                 }
2540
2541                 /* From this point, we will have matching_data && priority */
2542                 if (p->acts_cnt != e->acts_cnt)
2543                         *do_chg_action = true;
2544                 for (i = 0; i < p->acts_cnt; i++) {
2545                         bool found_not_match = false;
2546
2547                         for (j = 0; j < e->acts_cnt; j++)
2548                                 if (memcmp(&p->acts[i], &e->acts[j],
2549                                            sizeof(struct ice_flow_action))) {
2550                                         found_not_match = true;
2551                                         break;
2552                                 }
2553
2554                         if (found_not_match) {
2555                                 *do_chg_action = true;
2556                                 break;
2557                         }
2558                 }
2559
2560                 /* (do_chg_action = true) means :
2561                  *    matching_data && priority && !result_action
2562                  * (do_chg_action = false) means :
2563                  *    matching_data && priority && result_action
2564                  */
2565                 break;
2566         }
2567
2568         return return_entry;
2569 }
2570
2571 /**
2572  * ice_flow_acl_convert_to_acl_prior - Convert to ACL priority
2573  * @p: flow priority
2574  */
2575 static enum ice_acl_entry_prior
2576 ice_flow_acl_convert_to_acl_prior(enum ice_flow_priority p)
2577 {
2578         enum ice_acl_entry_prior acl_prior;
2579
2580         switch (p) {
2581         case ICE_FLOW_PRIO_LOW:
2582                 acl_prior = ICE_LOW;
2583                 break;
2584         case ICE_FLOW_PRIO_NORMAL:
2585                 acl_prior = ICE_NORMAL;
2586                 break;
2587         case ICE_FLOW_PRIO_HIGH:
2588                 acl_prior = ICE_HIGH;
2589                 break;
2590         default:
2591                 acl_prior = ICE_NORMAL;
2592                 break;
2593         }
2594
2595         return acl_prior;
2596 }
2597
2598 /**
2599  * ice_flow_acl_union_rng_chk - Perform union operation between two
2600  *                              range-range checker buffers
2601  * @dst_buf: pointer to destination range checker buffer
2602  * @src_buf: pointer to source range checker buffer
2603  *
2604  * For this function, we do the union between dst_buf and src_buf
2605  * range checker buffer, and we will save the result back to dst_buf
2606  */
2607 static enum ice_status
2608 ice_flow_acl_union_rng_chk(struct ice_aqc_acl_profile_ranges *dst_buf,
2609                            struct ice_aqc_acl_profile_ranges *src_buf)
2610 {
2611         u8 i, j;
2612
2613         if (!dst_buf || !src_buf)
2614                 return ICE_ERR_BAD_PTR;
2615
2616         for (i = 0; i < ICE_AQC_ACL_PROF_RANGES_NUM_CFG; i++) {
2617                 struct ice_acl_rng_data *cfg_data = NULL, *in_data;
2618                 bool will_populate = false;
2619
2620                 in_data = &src_buf->checker_cfg[i];
2621
2622                 if (!in_data->mask)
2623                         break;
2624
2625                 for (j = 0; j < ICE_AQC_ACL_PROF_RANGES_NUM_CFG; j++) {
2626                         cfg_data = &dst_buf->checker_cfg[j];
2627
2628                         if (!cfg_data->mask ||
2629                             !memcmp(cfg_data, in_data,
2630                                     sizeof(struct ice_acl_rng_data))) {
2631                                 will_populate = true;
2632                                 break;
2633                         }
2634                 }
2635
2636                 if (will_populate) {
2637                         ice_memcpy(cfg_data, in_data,
2638                                    sizeof(struct ice_acl_rng_data),
2639                                    ICE_NONDMA_TO_NONDMA);
2640                 } else {
2641                         /* No available slot left to program range checker */
2642                         return ICE_ERR_MAX_LIMIT;
2643                 }
2644         }
2645
2646         return ICE_SUCCESS;
2647 }
2648
2649 /**
2650  * ice_flow_acl_add_scen_entry_sync - Add entry to ACL scenario sync
2651  * @hw: pointer to the hardware structure
2652  * @prof: pointer to flow profile
2653  * @entry: double pointer to the flow entry
2654  *
2655  * For this function, we will look at the current added entries in the
2656  * corresponding ACL scenario. Then, we will perform matching logic to
2657  * see if we want to add/modify/do nothing with this new entry.
2658  */
2659 static enum ice_status
2660 ice_flow_acl_add_scen_entry_sync(struct ice_hw *hw, struct ice_flow_prof *prof,
2661                                  struct ice_flow_entry **entry)
2662 {
2663         bool do_add_entry, do_rem_entry, do_chg_action, do_chg_rng_chk;
2664         struct ice_aqc_acl_profile_ranges query_rng_buf, cfg_rng_buf;
2665         struct ice_acl_act_entry *acts = NULL;
2666         struct ice_flow_entry *exist;
2667         enum ice_status status = ICE_SUCCESS;
2668         struct ice_flow_entry *e;
2669         u8 i;
2670
2671         if (!entry || !(*entry) || !prof)
2672                 return ICE_ERR_BAD_PTR;
2673
2674         e = *(entry);
2675
2676         do_chg_rng_chk = false;
2677         if (e->range_buf) {
2678                 u8 prof_id = 0;
2679
2680                 status = ice_flow_get_hw_prof(hw, ICE_BLK_ACL, prof->id,
2681                                               &prof_id);
2682                 if (status)
2683                         return status;
2684
2685                 /* Query the current range-checker value in FW */
2686                 status = ice_query_acl_prof_ranges(hw, prof_id, &query_rng_buf,
2687                                                    NULL);
2688                 if (status)
2689                         return status;
2690                 ice_memcpy(&cfg_rng_buf, &query_rng_buf,
2691                            sizeof(struct ice_aqc_acl_profile_ranges),
2692                            ICE_NONDMA_TO_NONDMA);
2693
2694                 /* Generate the new range-checker value */
2695                 status = ice_flow_acl_union_rng_chk(&cfg_rng_buf, e->range_buf);
2696                 if (status)
2697                         return status;
2698
2699                 /* Reconfigure the range check if the buffer is changed. */
2700                 do_chg_rng_chk = false;
2701                 if (memcmp(&query_rng_buf, &cfg_rng_buf,
2702                            sizeof(struct ice_aqc_acl_profile_ranges))) {
2703                         status = ice_prog_acl_prof_ranges(hw, prof_id,
2704                                                           &cfg_rng_buf, NULL);
2705                         if (status)
2706                                 return status;
2707
2708                         do_chg_rng_chk = true;
2709                 }
2710         }
2711
2712         /* Figure out if we want to (change the ACL action) and/or
2713          * (Add the new ACL entry) and/or (Remove the current ACL entry)
2714          */
2715         exist = ice_flow_acl_find_scen_entry_cond(prof, e, &do_chg_action,
2716                                                   &do_add_entry, &do_rem_entry);
2717
2718         if (do_rem_entry) {
2719                 status = ice_flow_rem_entry_sync(hw, ICE_BLK_ACL, exist);
2720                 if (status)
2721                         return status;
2722         }
2723
2724         /* Prepare the result action buffer */
2725         acts = (struct ice_acl_act_entry *)ice_calloc
2726                 (hw, e->entry_sz, sizeof(struct ice_acl_act_entry));
2727         for (i = 0; i < e->acts_cnt; i++)
2728                 ice_memcpy(&acts[i], &e->acts[i].data.acl_act,
2729                            sizeof(struct ice_acl_act_entry),
2730                            ICE_NONDMA_TO_NONDMA);
2731
2732         if (do_add_entry) {
2733                 enum ice_acl_entry_prior prior;
2734                 u8 *keys, *inverts;
2735                 u16 entry_idx;
2736
2737                 keys = (u8 *)e->entry;
2738                 inverts = keys + (e->entry_sz / 2);
2739                 prior = ice_flow_acl_convert_to_acl_prior(e->priority);
2740
2741                 status = ice_acl_add_entry(hw, prof->cfg.scen, prior, keys,
2742                                            inverts, acts, e->acts_cnt,
2743                                            &entry_idx);
2744                 if (status)
2745                         goto out;
2746
2747                 e->scen_entry_idx = entry_idx;
2748                 LIST_ADD(&e->l_entry, &prof->entries);
2749         } else {
2750                 if (do_chg_action) {
2751                         /* For the action memory info, update the SW's copy of
2752                          * exist entry with e's action memory info
2753                          */
2754                         ice_free(hw, exist->acts);
2755                         exist->acts_cnt = e->acts_cnt;
2756                         exist->acts = (struct ice_flow_action *)
2757                                 ice_calloc(hw, exist->acts_cnt,
2758                                            sizeof(struct ice_flow_action));
2759
2760                         if (!exist->acts) {
2761                                 status = ICE_ERR_NO_MEMORY;
2762                                 goto out;
2763                         }
2764
2765                         ice_memcpy(exist->acts, e->acts,
2766                                    sizeof(struct ice_flow_action) * e->acts_cnt,
2767                                    ICE_NONDMA_TO_NONDMA);
2768
2769                         status = ice_acl_prog_act(hw, prof->cfg.scen, acts,
2770                                                   e->acts_cnt,
2771                                                   exist->scen_entry_idx);
2772                         if (status)
2773                                 goto out;
2774                 }
2775
2776                 if (do_chg_rng_chk) {
2777                         /* In this case, we want to update the range checker
2778                          * information of the exist entry
2779                          */
2780                         status = ice_flow_acl_union_rng_chk(exist->range_buf,
2781                                                             e->range_buf);
2782                         if (status)
2783                                 goto out;
2784                 }
2785
2786                 /* As we don't add the new entry to our SW DB, deallocate its
2787                  * memories, and return the exist entry to the caller
2788                  */
2789                 ice_dealloc_flow_entry(hw, e);
2790                 *(entry) = exist;
2791         }
2792 out:
2793         if (acts)
2794                 ice_free(hw, acts);
2795
2796         return status;
2797 }
2798
2799 /**
2800  * ice_flow_acl_add_scen_entry - Add entry to ACL scenario
2801  * @hw: pointer to the hardware structure
2802  * @prof: pointer to flow profile
2803  * @e: double pointer to the flow entry
2804  */
2805 static enum ice_status
2806 ice_flow_acl_add_scen_entry(struct ice_hw *hw, struct ice_flow_prof *prof,
2807                             struct ice_flow_entry **e)
2808 {
2809         enum ice_status status;
2810
2811         ice_acquire_lock(&prof->entries_lock);
2812         status = ice_flow_acl_add_scen_entry_sync(hw, prof, e);
2813         ice_release_lock(&prof->entries_lock);
2814
2815         return status;
2816 }
2817
2818 /**
2819  * ice_flow_add_entry - Add a flow entry
2820  * @hw: pointer to the HW struct
2821  * @blk: classification stage
2822  * @prof_id: ID of the profile to add a new flow entry to
2823  * @entry_id: unique ID to identify this flow entry
2824  * @vsi_handle: software VSI handle for the flow entry
2825  * @prio: priority of the flow entry
2826  * @data: pointer to a data buffer containing flow entry's match values/masks
2827  * @acts: arrays of actions to be performed on a match
2828  * @acts_cnt: number of actions
2829  * @entry_h: pointer to buffer that receives the new flow entry's handle
2830  */
2831 enum ice_status
2832 ice_flow_add_entry(struct ice_hw *hw, enum ice_block blk, u64 prof_id,
2833                    u64 entry_id, u16 vsi_handle, enum ice_flow_priority prio,
2834                    void *data, struct ice_flow_action *acts, u8 acts_cnt,
2835                    u64 *entry_h)
2836 {
2837         struct ice_flow_entry *e = NULL;
2838         struct ice_flow_prof *prof;
2839         enum ice_status status = ICE_SUCCESS;
2840
2841         /* ACL entries must indicate an action */
2842         if (blk == ICE_BLK_ACL && (!acts || !acts_cnt))
2843                 return ICE_ERR_PARAM;
2844
2845         /* No flow entry data is expected for RSS */
2846         if (!entry_h || (!data && blk != ICE_BLK_RSS))
2847                 return ICE_ERR_BAD_PTR;
2848
2849         if (!ice_is_vsi_valid(hw, vsi_handle))
2850                 return ICE_ERR_PARAM;
2851
2852         ice_acquire_lock(&hw->fl_profs_locks[blk]);
2853
2854         prof = ice_flow_find_prof_id(hw, blk, prof_id);
2855         if (!prof) {
2856                 status = ICE_ERR_DOES_NOT_EXIST;
2857         } else {
2858                 /* Allocate memory for the entry being added and associate
2859                  * the VSI to the found flow profile
2860                  */
2861                 e = (struct ice_flow_entry *)ice_malloc(hw, sizeof(*e));
2862                 if (!e)
2863                         status = ICE_ERR_NO_MEMORY;
2864                 else
2865                         status = ice_flow_assoc_prof(hw, blk, prof, vsi_handle);
2866         }
2867
2868         ice_release_lock(&hw->fl_profs_locks[blk]);
2869         if (status)
2870                 goto out;
2871
2872         e->id = entry_id;
2873         e->vsi_handle = vsi_handle;
2874         e->prof = prof;
2875         e->priority = prio;
2876
2877         switch (blk) {
2878         case ICE_BLK_FD:
2879         case ICE_BLK_RSS:
2880                 break;
2881         case ICE_BLK_ACL:
2882                 /* ACL will handle the entry management */
2883                 status = ice_flow_acl_frmt_entry(hw, prof, e, (u8 *)data, acts,
2884                                                  acts_cnt);
2885                 if (status)
2886                         goto out;
2887
2888                 status = ice_flow_acl_add_scen_entry(hw, prof, &e);
2889                 if (status)
2890                         goto out;
2891
2892                 break;
2893         case ICE_BLK_SW:
2894         case ICE_BLK_PE:
2895         default:
2896                 status = ICE_ERR_NOT_IMPL;
2897                 goto out;
2898         }
2899
2900         if (blk != ICE_BLK_ACL) {
2901                 /* ACL will handle the entry management */
2902                 ice_acquire_lock(&prof->entries_lock);
2903                 LIST_ADD(&e->l_entry, &prof->entries);
2904                 ice_release_lock(&prof->entries_lock);
2905         }
2906
2907         *entry_h = ICE_FLOW_ENTRY_HNDL(e);
2908
2909 out:
2910         if (status && e) {
2911                 if (e->entry)
2912                         ice_free(hw, e->entry);
2913                 ice_free(hw, e);
2914         }
2915
2916         return status;
2917 }
2918
2919 /**
2920  * ice_flow_rem_entry - Remove a flow entry
2921  * @hw: pointer to the HW struct
2922  * @blk: classification stage
2923  * @entry_h: handle to the flow entry to be removed
2924  */
2925 enum ice_status ice_flow_rem_entry(struct ice_hw *hw, enum ice_block blk,
2926                                    u64 entry_h)
2927 {
2928         struct ice_flow_entry *entry;
2929         struct ice_flow_prof *prof;
2930         enum ice_status status = ICE_SUCCESS;
2931
2932         if (entry_h == ICE_FLOW_ENTRY_HANDLE_INVAL)
2933                 return ICE_ERR_PARAM;
2934
2935         entry = ICE_FLOW_ENTRY_PTR((unsigned long)entry_h);
2936
2937         /* Retain the pointer to the flow profile as the entry will be freed */
2938         prof = entry->prof;
2939
2940         if (prof) {
2941                 ice_acquire_lock(&prof->entries_lock);
2942                 status = ice_flow_rem_entry_sync(hw, blk, entry);
2943                 ice_release_lock(&prof->entries_lock);
2944         }
2945
2946         return status;
2947 }
2948
2949 /**
2950  * ice_flow_set_fld_ext - specifies locations of field from entry's input buffer
2951  * @seg: packet segment the field being set belongs to
2952  * @fld: field to be set
2953  * @field_type: type of the field
2954  * @val_loc: if not ICE_FLOW_FLD_OFF_INVAL, location of the value to match from
2955  *           entry's input buffer
2956  * @mask_loc: if not ICE_FLOW_FLD_OFF_INVAL, location of mask value from entry's
2957  *            input buffer
2958  * @last_loc: if not ICE_FLOW_FLD_OFF_INVAL, location of last/upper value from
2959  *            entry's input buffer
2960  *
2961  * This helper function stores information of a field being matched, including
2962  * the type of the field and the locations of the value to match, the mask, and
2963  * and the upper-bound value in the start of the input buffer for a flow entry.
2964  * This function should only be used for fixed-size data structures.
2965  *
2966  * This function also opportunistically determines the protocol headers to be
2967  * present based on the fields being set. Some fields cannot be used alone to
2968  * determine the protocol headers present. Sometimes, fields for particular
2969  * protocol headers are not matched. In those cases, the protocol headers
2970  * must be explicitly set.
2971  */
2972 static void
2973 ice_flow_set_fld_ext(struct ice_flow_seg_info *seg, enum ice_flow_field fld,
2974                      enum ice_flow_fld_match_type field_type, u16 val_loc,
2975                      u16 mask_loc, u16 last_loc)
2976 {
2977         u64 bit = BIT_ULL(fld);
2978
2979         seg->match |= bit;
2980         if (field_type == ICE_FLOW_FLD_TYPE_RANGE)
2981                 seg->range |= bit;
2982
2983         seg->fields[fld].type = field_type;
2984         seg->fields[fld].src.val = val_loc;
2985         seg->fields[fld].src.mask = mask_loc;
2986         seg->fields[fld].src.last = last_loc;
2987
2988         ICE_FLOW_SET_HDRS(seg, ice_flds_info[fld].hdr);
2989 }
2990
2991 /**
2992  * ice_flow_set_fld - specifies locations of field from entry's input buffer
2993  * @seg: packet segment the field being set belongs to
2994  * @fld: field to be set
2995  * @val_loc: if not ICE_FLOW_FLD_OFF_INVAL, location of the value to match from
2996  *           entry's input buffer
2997  * @mask_loc: if not ICE_FLOW_FLD_OFF_INVAL, location of mask value from entry's
2998  *            input buffer
2999  * @last_loc: if not ICE_FLOW_FLD_OFF_INVAL, location of last/upper value from
3000  *            entry's input buffer
3001  * @range: indicate if field being matched is to be in a range
3002  *
3003  * This function specifies the locations, in the form of byte offsets from the
3004  * start of the input buffer for a flow entry, from where the value to match,
3005  * the mask value, and upper value can be extracted. These locations are then
3006  * stored in the flow profile. When adding a flow entry associated with the
3007  * flow profile, these locations will be used to quickly extract the values and
3008  * create the content of a match entry. This function should only be used for
3009  * fixed-size data structures.
3010  */
3011 void
3012 ice_flow_set_fld(struct ice_flow_seg_info *seg, enum ice_flow_field fld,
3013                  u16 val_loc, u16 mask_loc, u16 last_loc, bool range)
3014 {
3015         enum ice_flow_fld_match_type t = range ?
3016                 ICE_FLOW_FLD_TYPE_RANGE : ICE_FLOW_FLD_TYPE_REG;
3017
3018         ice_flow_set_fld_ext(seg, fld, t, val_loc, mask_loc, last_loc);
3019 }
3020
3021 /**
3022  * ice_flow_set_fld_prefix - sets locations of prefix field from entry's buf
3023  * @seg: packet segment the field being set belongs to
3024  * @fld: field to be set
3025  * @val_loc: if not ICE_FLOW_FLD_OFF_INVAL, location of the value to match from
3026  *           entry's input buffer
3027  * @pref_loc: location of prefix value from entry's input buffer
3028  * @pref_sz: size of the location holding the prefix value
3029  *
3030  * This function specifies the locations, in the form of byte offsets from the
3031  * start of the input buffer for a flow entry, from where the value to match
3032  * and the IPv4 prefix value can be extracted. These locations are then stored
3033  * in the flow profile. When adding flow entries to the associated flow profile,
3034  * these locations can be used to quickly extract the values to create the
3035  * content of a match entry. This function should only be used for fixed-size
3036  * data structures.
3037  */
3038 void
3039 ice_flow_set_fld_prefix(struct ice_flow_seg_info *seg, enum ice_flow_field fld,
3040                         u16 val_loc, u16 pref_loc, u8 pref_sz)
3041 {
3042         /* For this type of field, the "mask" location is for the prefix value's
3043          * location and the "last" location is for the size of the location of
3044          * the prefix value.
3045          */
3046         ice_flow_set_fld_ext(seg, fld, ICE_FLOW_FLD_TYPE_PREFIX, val_loc,
3047                              pref_loc, (u16)pref_sz);
3048 }
3049
3050 /**
3051  * ice_flow_add_fld_raw - sets locations of a raw field from entry's input buf
3052  * @seg: packet segment the field being set belongs to
3053  * @off: offset of the raw field from the beginning of the segment in bytes
3054  * @len: length of the raw pattern to be matched
3055  * @val_loc: location of the value to match from entry's input buffer
3056  * @mask_loc: location of mask value from entry's input buffer
3057  *
3058  * This function specifies the offset of the raw field to be match from the
3059  * beginning of the specified packet segment, and the locations, in the form of
3060  * byte offsets from the start of the input buffer for a flow entry, from where
3061  * the value to match and the mask value to be extracted. These locations are
3062  * then stored in the flow profile. When adding flow entries to the associated
3063  * flow profile, these locations can be used to quickly extract the values to
3064  * create the content of a match entry. This function should only be used for
3065  * fixed-size data structures.
3066  */
3067 void
3068 ice_flow_add_fld_raw(struct ice_flow_seg_info *seg, u16 off, u8 len,
3069                      u16 val_loc, u16 mask_loc)
3070 {
3071         if (seg->raws_cnt < ICE_FLOW_SEG_RAW_FLD_MAX) {
3072                 seg->raws[seg->raws_cnt].off = off;
3073                 seg->raws[seg->raws_cnt].info.type = ICE_FLOW_FLD_TYPE_SIZE;
3074                 seg->raws[seg->raws_cnt].info.src.val = val_loc;
3075                 seg->raws[seg->raws_cnt].info.src.mask = mask_loc;
3076                 /* The "last" field is used to store the length of the field */
3077                 seg->raws[seg->raws_cnt].info.src.last = len;
3078         }
3079
3080         /* Overflows of "raws" will be handled as an error condition later in
3081          * the flow when this information is processed.
3082          */
3083         seg->raws_cnt++;
3084 }
3085
3086 #define ICE_FLOW_RSS_SEG_HDR_L2_MASKS \
3087 (ICE_FLOW_SEG_HDR_ETH | ICE_FLOW_SEG_HDR_VLAN)
3088
3089 #define ICE_FLOW_RSS_SEG_HDR_L3_MASKS \
3090         (ICE_FLOW_SEG_HDR_IPV4 | ICE_FLOW_SEG_HDR_IPV6)
3091
3092 #define ICE_FLOW_RSS_SEG_HDR_L4_MASKS \
3093         (ICE_FLOW_SEG_HDR_TCP | ICE_FLOW_SEG_HDR_UDP | \
3094          ICE_FLOW_SEG_HDR_SCTP)
3095
3096 #define ICE_FLOW_RSS_SEG_HDR_VAL_MASKS \
3097         (ICE_FLOW_RSS_SEG_HDR_L2_MASKS | \
3098          ICE_FLOW_RSS_SEG_HDR_L3_MASKS | \
3099          ICE_FLOW_RSS_SEG_HDR_L4_MASKS)
3100
3101 /**
3102  * ice_flow_set_rss_seg_info - setup packet segments for RSS
3103  * @segs: pointer to the flow field segment(s)
3104  * @hash_fields: fields to be hashed on for the segment(s)
3105  * @flow_hdr: protocol header fields within a packet segment
3106  *
3107  * Helper function to extract fields from hash bitmap and use flow
3108  * header value to set flow field segment for further use in flow
3109  * profile entry or removal.
3110  */
3111 static enum ice_status
3112 ice_flow_set_rss_seg_info(struct ice_flow_seg_info *segs, u64 hash_fields,
3113                           u32 flow_hdr)
3114 {
3115         u64 val = hash_fields;
3116         u8 i;
3117
3118         for (i = 0; val && i < ICE_FLOW_FIELD_IDX_MAX; i++) {
3119                 u64 bit = BIT_ULL(i);
3120
3121                 if (val & bit) {
3122                         ice_flow_set_fld(segs, (enum ice_flow_field)i,
3123                                          ICE_FLOW_FLD_OFF_INVAL,
3124                                          ICE_FLOW_FLD_OFF_INVAL,
3125                                          ICE_FLOW_FLD_OFF_INVAL, false);
3126                         val &= ~bit;
3127                 }
3128         }
3129         ICE_FLOW_SET_HDRS(segs, flow_hdr);
3130
3131         if (segs->hdrs & ~ICE_FLOW_RSS_SEG_HDR_VAL_MASKS &
3132             ~ICE_FLOW_RSS_HDRS_INNER_MASK)
3133                 return ICE_ERR_PARAM;
3134
3135         val = (u64)(segs->hdrs & ICE_FLOW_RSS_SEG_HDR_L3_MASKS);
3136         if (val && !ice_is_pow2(val))
3137                 return ICE_ERR_CFG;
3138
3139         val = (u64)(segs->hdrs & ICE_FLOW_RSS_SEG_HDR_L4_MASKS);
3140         if (val && !ice_is_pow2(val))
3141                 return ICE_ERR_CFG;
3142
3143         return ICE_SUCCESS;
3144 }
3145
3146 /**
3147  * ice_rem_vsi_rss_list - remove VSI from RSS list
3148  * @hw: pointer to the hardware structure
3149  * @vsi_handle: software VSI handle
3150  *
3151  * Remove the VSI from all RSS configurations in the list.
3152  */
3153 void ice_rem_vsi_rss_list(struct ice_hw *hw, u16 vsi_handle)
3154 {
3155         struct ice_rss_cfg *r, *tmp;
3156
3157         if (LIST_EMPTY(&hw->rss_list_head))
3158                 return;
3159
3160         ice_acquire_lock(&hw->rss_locks);
3161         LIST_FOR_EACH_ENTRY_SAFE(r, tmp, &hw->rss_list_head,
3162                                  ice_rss_cfg, l_entry) {
3163                 if (ice_test_and_clear_bit(vsi_handle, r->vsis))
3164                         if (!ice_is_any_bit_set(r->vsis, ICE_MAX_VSI)) {
3165                                 LIST_DEL(&r->l_entry);
3166                                 ice_free(hw, r);
3167                         }
3168         }
3169         ice_release_lock(&hw->rss_locks);
3170 }
3171
3172 /**
3173  * ice_rem_vsi_rss_cfg - remove RSS configurations associated with VSI
3174  * @hw: pointer to the hardware structure
3175  * @vsi_handle: software VSI handle
3176  *
3177  * This function will iterate through all flow profiles and disassociate
3178  * the VSI from that profile. If the flow profile has no VSIs it will
3179  * be removed.
3180  */
3181 enum ice_status ice_rem_vsi_rss_cfg(struct ice_hw *hw, u16 vsi_handle)
3182 {
3183         const enum ice_block blk = ICE_BLK_RSS;
3184         struct ice_flow_prof *p, *t;
3185         enum ice_status status = ICE_SUCCESS;
3186
3187         if (!ice_is_vsi_valid(hw, vsi_handle))
3188                 return ICE_ERR_PARAM;
3189
3190         if (LIST_EMPTY(&hw->fl_profs[blk]))
3191                 return ICE_SUCCESS;
3192
3193         ice_acquire_lock(&hw->fl_profs_locks[blk]);
3194         LIST_FOR_EACH_ENTRY_SAFE(p, t, &hw->fl_profs[blk], ice_flow_prof,
3195                                  l_entry) {
3196                 if (ice_is_bit_set(p->vsis, vsi_handle)) {
3197                         status = ice_flow_disassoc_prof(hw, blk, p, vsi_handle);
3198                         if (status)
3199                                 break;
3200
3201                         if (!ice_is_any_bit_set(p->vsis, ICE_MAX_VSI)) {
3202                                 status = ice_flow_rem_prof_sync(hw, blk, p);
3203                                 if (status)
3204                                         break;
3205                         }
3206                 }
3207         }
3208         ice_release_lock(&hw->fl_profs_locks[blk]);
3209
3210         return status;
3211 }
3212
3213 /**
3214  * ice_rem_rss_list - remove RSS configuration from list
3215  * @hw: pointer to the hardware structure
3216  * @vsi_handle: software VSI handle
3217  * @prof: pointer to flow profile
3218  *
3219  * Assumption: lock has already been acquired for RSS list
3220  */
3221 static void
3222 ice_rem_rss_list(struct ice_hw *hw, u16 vsi_handle, struct ice_flow_prof *prof)
3223 {
3224         struct ice_rss_cfg *r, *tmp;
3225
3226         /* Search for RSS hash fields associated to the VSI that match the
3227          * hash configurations associated to the flow profile. If found
3228          * remove from the RSS entry list of the VSI context and delete entry.
3229          */
3230         LIST_FOR_EACH_ENTRY_SAFE(r, tmp, &hw->rss_list_head,
3231                                  ice_rss_cfg, l_entry) {
3232                 if (r->hashed_flds == prof->segs[prof->segs_cnt - 1].match &&
3233                     r->packet_hdr == prof->segs[prof->segs_cnt - 1].hdrs) {
3234                         ice_clear_bit(vsi_handle, r->vsis);
3235                         if (!ice_is_any_bit_set(r->vsis, ICE_MAX_VSI)) {
3236                                 LIST_DEL(&r->l_entry);
3237                                 ice_free(hw, r);
3238                         }
3239                         return;
3240                 }
3241         }
3242 }
3243
3244 /**
3245  * ice_add_rss_list - add RSS configuration to list
3246  * @hw: pointer to the hardware structure
3247  * @vsi_handle: software VSI handle
3248  * @prof: pointer to flow profile
3249  *
3250  * Assumption: lock has already been acquired for RSS list
3251  */
3252 static enum ice_status
3253 ice_add_rss_list(struct ice_hw *hw, u16 vsi_handle, struct ice_flow_prof *prof)
3254 {
3255         struct ice_rss_cfg *r, *rss_cfg;
3256
3257         LIST_FOR_EACH_ENTRY(r, &hw->rss_list_head,
3258                             ice_rss_cfg, l_entry)
3259                 if (r->hashed_flds == prof->segs[prof->segs_cnt - 1].match &&
3260                     r->packet_hdr == prof->segs[prof->segs_cnt - 1].hdrs) {
3261                         ice_set_bit(vsi_handle, r->vsis);
3262                         return ICE_SUCCESS;
3263                 }
3264
3265         rss_cfg = (struct ice_rss_cfg *)ice_malloc(hw, sizeof(*rss_cfg));
3266         if (!rss_cfg)
3267                 return ICE_ERR_NO_MEMORY;
3268
3269         rss_cfg->hashed_flds = prof->segs[prof->segs_cnt - 1].match;
3270         rss_cfg->packet_hdr = prof->segs[prof->segs_cnt - 1].hdrs;
3271         rss_cfg->symm = prof->cfg.symm;
3272         ice_set_bit(vsi_handle, rss_cfg->vsis);
3273
3274         LIST_ADD_TAIL(&rss_cfg->l_entry, &hw->rss_list_head);
3275
3276         return ICE_SUCCESS;
3277 }
3278
3279 #define ICE_FLOW_PROF_HASH_S    0
3280 #define ICE_FLOW_PROF_HASH_M    (0xFFFFFFFFULL << ICE_FLOW_PROF_HASH_S)
3281 #define ICE_FLOW_PROF_HDR_S     32
3282 #define ICE_FLOW_PROF_HDR_M     (0x3FFFFFFFULL << ICE_FLOW_PROF_HDR_S)
3283 #define ICE_FLOW_PROF_ENCAP_S   63
3284 #define ICE_FLOW_PROF_ENCAP_M   (BIT_ULL(ICE_FLOW_PROF_ENCAP_S))
3285
3286 #define ICE_RSS_OUTER_HEADERS   1
3287 #define ICE_RSS_INNER_HEADERS   2
3288
3289 /* Flow profile ID format:
3290  * [0:31] - Packet match fields
3291  * [32:62] - Protocol header
3292  * [63] - Encapsulation flag, 0 if non-tunneled, 1 if tunneled
3293  */
3294 #define ICE_FLOW_GEN_PROFID(hash, hdr, segs_cnt) \
3295         (u64)(((u64)(hash) & ICE_FLOW_PROF_HASH_M) | \
3296               (((u64)(hdr) << ICE_FLOW_PROF_HDR_S) & ICE_FLOW_PROF_HDR_M) | \
3297               ((u8)((segs_cnt) - 1) ? ICE_FLOW_PROF_ENCAP_M : 0))
3298
3299 static void
3300 ice_rss_config_xor_word(struct ice_hw *hw, u8 prof_id, u8 src, u8 dst)
3301 {
3302         u32 s = ((src % 4) << 3); /* byte shift */
3303         u32 v = dst | 0x80; /* value to program */
3304         u8 i = src / 4; /* register index */
3305         u32 reg;
3306
3307         reg = rd32(hw, GLQF_HSYMM(prof_id, i));
3308         reg = (reg & ~(0xff << s)) | (v << s);
3309         wr32(hw, GLQF_HSYMM(prof_id, i), reg);
3310 }
3311
3312 static void
3313 ice_rss_config_xor(struct ice_hw *hw, u8 prof_id, u8 src, u8 dst, u8 len)
3314 {
3315         int fv_last_word =
3316                 ICE_FLOW_SW_FIELD_VECTOR_MAX / ICE_FLOW_FV_EXTRACT_SZ - 1;
3317         int i;
3318
3319         for (i = 0; i < len; i++) {
3320                 ice_rss_config_xor_word(hw, prof_id,
3321                                         /* Yes, field vector in GLQF_HSYMM and
3322                                          * GLQF_HINSET is inversed!
3323                                          */
3324                                         fv_last_word - (src + i),
3325                                         fv_last_word - (dst + i));
3326                 ice_rss_config_xor_word(hw, prof_id,
3327                                         fv_last_word - (dst + i),
3328                                         fv_last_word - (src + i));
3329         }
3330 }
3331
3332 static void
3333 ice_rss_update_symm(struct ice_hw *hw,
3334                     struct ice_flow_prof *prof)
3335 {
3336         struct ice_prof_map *map;
3337         u8 prof_id, m;
3338
3339         map = ice_search_prof_id(hw, ICE_BLK_RSS, prof->id);
3340         prof_id = map->prof_id;
3341
3342         /* clear to default */
3343         for (m = 0; m < 6; m++)
3344                 wr32(hw, GLQF_HSYMM(prof_id, m), 0);
3345         if (prof->cfg.symm) {
3346                 struct ice_flow_seg_info *seg =
3347                         &prof->segs[prof->segs_cnt - 1];
3348
3349                 struct ice_flow_seg_xtrct *ipv4_src =
3350                         &seg->fields[ICE_FLOW_FIELD_IDX_IPV4_SA].xtrct;
3351                 struct ice_flow_seg_xtrct *ipv4_dst =
3352                         &seg->fields[ICE_FLOW_FIELD_IDX_IPV4_DA].xtrct;
3353                 struct ice_flow_seg_xtrct *ipv6_src =
3354                         &seg->fields[ICE_FLOW_FIELD_IDX_IPV6_SA].xtrct;
3355                 struct ice_flow_seg_xtrct *ipv6_dst =
3356                         &seg->fields[ICE_FLOW_FIELD_IDX_IPV6_DA].xtrct;
3357
3358                 struct ice_flow_seg_xtrct *tcp_src =
3359                         &seg->fields[ICE_FLOW_FIELD_IDX_TCP_SRC_PORT].xtrct;
3360                 struct ice_flow_seg_xtrct *tcp_dst =
3361                         &seg->fields[ICE_FLOW_FIELD_IDX_TCP_DST_PORT].xtrct;
3362
3363                 struct ice_flow_seg_xtrct *udp_src =
3364                         &seg->fields[ICE_FLOW_FIELD_IDX_UDP_SRC_PORT].xtrct;
3365                 struct ice_flow_seg_xtrct *udp_dst =
3366                         &seg->fields[ICE_FLOW_FIELD_IDX_UDP_DST_PORT].xtrct;
3367
3368                 struct ice_flow_seg_xtrct *sctp_src =
3369                         &seg->fields[ICE_FLOW_FIELD_IDX_SCTP_SRC_PORT].xtrct;
3370                 struct ice_flow_seg_xtrct *sctp_dst =
3371                         &seg->fields[ICE_FLOW_FIELD_IDX_SCTP_DST_PORT].xtrct;
3372
3373                 /* xor IPv4 */
3374                 if (ipv4_src->prot_id != 0 && ipv4_dst->prot_id != 0)
3375                         ice_rss_config_xor(hw, prof_id,
3376                                            ipv4_src->idx, ipv4_dst->idx, 2);
3377
3378                 /* xor IPv6 */
3379                 if (ipv6_src->prot_id != 0 && ipv6_dst->prot_id != 0)
3380                         ice_rss_config_xor(hw, prof_id,
3381                                            ipv6_src->idx, ipv6_dst->idx, 8);
3382
3383                 /* xor TCP */
3384                 if (tcp_src->prot_id != 0 && tcp_dst->prot_id != 0)
3385                         ice_rss_config_xor(hw, prof_id,
3386                                            tcp_src->idx, tcp_dst->idx, 1);
3387
3388                 /* xor UDP */
3389                 if (udp_src->prot_id != 0 && udp_dst->prot_id != 0)
3390                         ice_rss_config_xor(hw, prof_id,
3391                                            udp_src->idx, udp_dst->idx, 1);
3392
3393                 /* xor SCTP */
3394                 if (sctp_src->prot_id != 0 && sctp_dst->prot_id != 0)
3395                         ice_rss_config_xor(hw, prof_id,
3396                                            sctp_src->idx, sctp_dst->idx, 1);
3397         }
3398 }
3399
3400 /**
3401  * ice_add_rss_cfg_sync - add an RSS configuration
3402  * @hw: pointer to the hardware structure
3403  * @vsi_handle: software VSI handle
3404  * @hashed_flds: hash bit fields (ICE_FLOW_HASH_*) to configure
3405  * @addl_hdrs: protocol header fields
3406  * @segs_cnt: packet segment count
3407  * @symm: symmetric hash enable/disable
3408  *
3409  * Assumption: lock has already been acquired for RSS list
3410  */
3411 static enum ice_status
3412 ice_add_rss_cfg_sync(struct ice_hw *hw, u16 vsi_handle, u64 hashed_flds,
3413                      u32 addl_hdrs, u8 segs_cnt, bool symm)
3414 {
3415         const enum ice_block blk = ICE_BLK_RSS;
3416         struct ice_flow_prof *prof = NULL;
3417         struct ice_flow_seg_info *segs;
3418         enum ice_status status;
3419
3420         if (!segs_cnt || segs_cnt > ICE_FLOW_SEG_MAX)
3421                 return ICE_ERR_PARAM;
3422
3423         segs = (struct ice_flow_seg_info *)ice_calloc(hw, segs_cnt,
3424                                                       sizeof(*segs));
3425         if (!segs)
3426                 return ICE_ERR_NO_MEMORY;
3427
3428         /* Construct the packet segment info from the hashed fields */
3429         status = ice_flow_set_rss_seg_info(&segs[segs_cnt - 1], hashed_flds,
3430                                            addl_hdrs);
3431         if (status)
3432                 goto exit;
3433
3434         /* Search for a flow profile that has matching headers, hash fields
3435          * and has the input VSI associated to it. If found, no further
3436          * operations required and exit.
3437          */
3438         prof = ice_flow_find_prof_conds(hw, blk, ICE_FLOW_RX, segs, segs_cnt,
3439                                         vsi_handle,
3440                                         ICE_FLOW_FIND_PROF_CHK_FLDS |
3441                                         ICE_FLOW_FIND_PROF_CHK_VSI);
3442         if (prof) {
3443                 if (prof->cfg.symm == symm)
3444                         goto exit;
3445                 prof->cfg.symm = symm;
3446                 goto update_symm;
3447         }
3448
3449         /* Check if a flow profile exists with the same protocol headers and
3450          * associated with the input VSI. If so disassociate the VSI from
3451          * this profile. The VSI will be added to a new profile created with
3452          * the protocol header and new hash field configuration.
3453          */
3454         prof = ice_flow_find_prof_conds(hw, blk, ICE_FLOW_RX, segs, segs_cnt,
3455                                         vsi_handle, ICE_FLOW_FIND_PROF_CHK_VSI);
3456         if (prof) {
3457                 status = ice_flow_disassoc_prof(hw, blk, prof, vsi_handle);
3458                 if (!status)
3459                         ice_rem_rss_list(hw, vsi_handle, prof);
3460                 else
3461                         goto exit;
3462
3463                 /* Remove profile if it has no VSIs associated */
3464                 if (!ice_is_any_bit_set(prof->vsis, ICE_MAX_VSI)) {
3465                         status = ice_flow_rem_prof(hw, blk, prof->id);
3466                         if (status)
3467                                 goto exit;
3468                 }
3469         }
3470
3471         /* Search for a profile that has same match fields only. If this
3472          * exists then associate the VSI to this profile.
3473          */
3474         prof = ice_flow_find_prof_conds(hw, blk, ICE_FLOW_RX, segs, segs_cnt,
3475                                         vsi_handle,
3476                                         ICE_FLOW_FIND_PROF_CHK_FLDS);
3477         if (prof) {
3478                 if (prof->cfg.symm == symm) {
3479                         status = ice_flow_assoc_prof(hw, blk, prof,
3480                                                      vsi_handle);
3481                         if (!status)
3482                                 status = ice_add_rss_list(hw, vsi_handle,
3483                                                           prof);
3484                 } else {
3485                         /* if a profile exist but with different symmetric
3486                          * requirement, just return error.
3487                          */
3488                         status = ICE_ERR_NOT_SUPPORTED;
3489                 }
3490                 goto exit;
3491         }
3492
3493         /* Create a new flow profile with generated profile and packet
3494          * segment information.
3495          */
3496         status = ice_flow_add_prof(hw, blk, ICE_FLOW_RX,
3497                                    ICE_FLOW_GEN_PROFID(hashed_flds,
3498                                                        segs[segs_cnt - 1].hdrs,
3499                                                        segs_cnt),
3500                                    segs, segs_cnt, NULL, 0, &prof);
3501         if (status)
3502                 goto exit;
3503
3504         status = ice_flow_assoc_prof(hw, blk, prof, vsi_handle);
3505         /* If association to a new flow profile failed then this profile can
3506          * be removed.
3507          */
3508         if (status) {
3509                 ice_flow_rem_prof(hw, blk, prof->id);
3510                 goto exit;
3511         }
3512
3513         status = ice_add_rss_list(hw, vsi_handle, prof);
3514
3515         prof->cfg.symm = symm;
3516
3517 update_symm:
3518         ice_rss_update_symm(hw, prof);
3519
3520 exit:
3521         ice_free(hw, segs);
3522         return status;
3523 }
3524
3525 /**
3526  * ice_add_rss_cfg - add an RSS configuration with specified hashed fields
3527  * @hw: pointer to the hardware structure
3528  * @vsi_handle: software VSI handle
3529  * @hashed_flds: hash bit fields (ICE_FLOW_HASH_*) to configure
3530  * @addl_hdrs: protocol header fields
3531  * @symm: symmetric hash enable/disable
3532  *
3533  * This function will generate a flow profile based on fields associated with
3534  * the input fields to hash on, the flow type and use the VSI number to add
3535  * a flow entry to the profile.
3536  */
3537 enum ice_status
3538 ice_add_rss_cfg(struct ice_hw *hw, u16 vsi_handle, u64 hashed_flds,
3539                 u32 addl_hdrs, bool symm)
3540 {
3541         enum ice_status status;
3542
3543         if (hashed_flds == ICE_HASH_INVALID ||
3544             !ice_is_vsi_valid(hw, vsi_handle))
3545                 return ICE_ERR_PARAM;
3546
3547         ice_acquire_lock(&hw->rss_locks);
3548         status = ice_add_rss_cfg_sync(hw, vsi_handle, hashed_flds, addl_hdrs,
3549                                       ICE_RSS_OUTER_HEADERS, symm);
3550         if (!status)
3551                 status = ice_add_rss_cfg_sync(hw, vsi_handle, hashed_flds,
3552                                               addl_hdrs, ICE_RSS_INNER_HEADERS,
3553                                               symm);
3554         ice_release_lock(&hw->rss_locks);
3555
3556         return status;
3557 }
3558
3559 /**
3560  * ice_rem_rss_cfg_sync - remove an existing RSS configuration
3561  * @hw: pointer to the hardware structure
3562  * @vsi_handle: software VSI handle
3563  * @hashed_flds: Packet hash types (ICE_FLOW_HASH_*) to remove
3564  * @addl_hdrs: Protocol header fields within a packet segment
3565  * @segs_cnt: packet segment count
3566  *
3567  * Assumption: lock has already been acquired for RSS list
3568  */
3569 static enum ice_status
3570 ice_rem_rss_cfg_sync(struct ice_hw *hw, u16 vsi_handle, u64 hashed_flds,
3571                      u32 addl_hdrs, u8 segs_cnt)
3572 {
3573         const enum ice_block blk = ICE_BLK_RSS;
3574         struct ice_flow_seg_info *segs;
3575         struct ice_flow_prof *prof;
3576         enum ice_status status;
3577
3578         segs = (struct ice_flow_seg_info *)ice_calloc(hw, segs_cnt,
3579                                                       sizeof(*segs));
3580         if (!segs)
3581                 return ICE_ERR_NO_MEMORY;
3582
3583         /* Construct the packet segment info from the hashed fields */
3584         status = ice_flow_set_rss_seg_info(&segs[segs_cnt - 1], hashed_flds,
3585                                            addl_hdrs);
3586         if (status)
3587                 goto out;
3588
3589         prof = ice_flow_find_prof_conds(hw, blk, ICE_FLOW_RX, segs, segs_cnt,
3590                                         vsi_handle,
3591                                         ICE_FLOW_FIND_PROF_CHK_FLDS);
3592         if (!prof) {
3593                 status = ICE_ERR_DOES_NOT_EXIST;
3594                 goto out;
3595         }
3596
3597         status = ice_flow_disassoc_prof(hw, blk, prof, vsi_handle);
3598         if (status)
3599                 goto out;
3600
3601         /* Remove RSS configuration from VSI context before deleting
3602          * the flow profile.
3603          */
3604         ice_rem_rss_list(hw, vsi_handle, prof);
3605
3606         if (!ice_is_any_bit_set(prof->vsis, ICE_MAX_VSI))
3607                 status = ice_flow_rem_prof(hw, blk, prof->id);
3608
3609 out:
3610         ice_free(hw, segs);
3611         return status;
3612 }
3613
3614 /**
3615  * ice_rem_rss_cfg - remove an existing RSS config with matching hashed fields
3616  * @hw: pointer to the hardware structure
3617  * @vsi_handle: software VSI handle
3618  * @hashed_flds: Packet hash types (ICE_FLOW_HASH_*) to remove
3619  * @addl_hdrs: Protocol header fields within a packet segment
3620  *
3621  * This function will lookup the flow profile based on the input
3622  * hash field bitmap, iterate through the profile entry list of
3623  * that profile and find entry associated with input VSI to be
3624  * removed. Calls are made to underlying flow apis which will in
3625  * turn build or update buffers for RSS XLT1 section.
3626  */
3627 enum ice_status
3628 ice_rem_rss_cfg(struct ice_hw *hw, u16 vsi_handle, u64 hashed_flds,
3629                 u32 addl_hdrs)
3630 {
3631         enum ice_status status;
3632
3633         if (hashed_flds == ICE_HASH_INVALID ||
3634             !ice_is_vsi_valid(hw, vsi_handle))
3635                 return ICE_ERR_PARAM;
3636
3637         ice_acquire_lock(&hw->rss_locks);
3638         status = ice_rem_rss_cfg_sync(hw, vsi_handle, hashed_flds, addl_hdrs,
3639                                       ICE_RSS_OUTER_HEADERS);
3640         if (!status)
3641                 status = ice_rem_rss_cfg_sync(hw, vsi_handle, hashed_flds,
3642                                               addl_hdrs, ICE_RSS_INNER_HEADERS);
3643         ice_release_lock(&hw->rss_locks);
3644
3645         return status;
3646 }
3647
3648 /**
3649  * ice_replay_rss_cfg - replay RSS configurations associated with VSI
3650  * @hw: pointer to the hardware structure
3651  * @vsi_handle: software VSI handle
3652  */
3653 enum ice_status ice_replay_rss_cfg(struct ice_hw *hw, u16 vsi_handle)
3654 {
3655         enum ice_status status = ICE_SUCCESS;
3656         struct ice_rss_cfg *r;
3657
3658         if (!ice_is_vsi_valid(hw, vsi_handle))
3659                 return ICE_ERR_PARAM;
3660
3661         ice_acquire_lock(&hw->rss_locks);
3662         LIST_FOR_EACH_ENTRY(r, &hw->rss_list_head,
3663                             ice_rss_cfg, l_entry) {
3664                 if (ice_is_bit_set(r->vsis, vsi_handle)) {
3665                         status = ice_add_rss_cfg_sync(hw, vsi_handle,
3666                                                       r->hashed_flds,
3667                                                       r->packet_hdr,
3668                                                       ICE_RSS_OUTER_HEADERS,
3669                                                       r->symm);
3670                         if (status)
3671                                 break;
3672                         status = ice_add_rss_cfg_sync(hw, vsi_handle,
3673                                                       r->hashed_flds,
3674                                                       r->packet_hdr,
3675                                                       ICE_RSS_INNER_HEADERS,
3676                                                       r->symm);
3677                         if (status)
3678                                 break;
3679                 }
3680         }
3681         ice_release_lock(&hw->rss_locks);
3682
3683         return status;
3684 }
3685
3686 /**
3687  * ice_get_rss_cfg - returns hashed fields for the given header types
3688  * @hw: pointer to the hardware structure
3689  * @vsi_handle: software VSI handle
3690  * @hdrs: protocol header type
3691  *
3692  * This function will return the match fields of the first instance of flow
3693  * profile having the given header types and containing input VSI
3694  */
3695 u64 ice_get_rss_cfg(struct ice_hw *hw, u16 vsi_handle, u32 hdrs)
3696 {
3697         struct ice_rss_cfg *r, *rss_cfg = NULL;
3698
3699         /* verify if the protocol header is non zero and VSI is valid */
3700         if (hdrs == ICE_FLOW_SEG_HDR_NONE || !ice_is_vsi_valid(hw, vsi_handle))
3701                 return ICE_HASH_INVALID;
3702
3703         ice_acquire_lock(&hw->rss_locks);
3704         LIST_FOR_EACH_ENTRY(r, &hw->rss_list_head,
3705                             ice_rss_cfg, l_entry)
3706                 if (ice_is_bit_set(r->vsis, vsi_handle) &&
3707                     r->packet_hdr == hdrs) {
3708                         rss_cfg = r;
3709                         break;
3710                 }
3711         ice_release_lock(&hw->rss_locks);
3712
3713         return rss_cfg ? rss_cfg->hashed_flds : ICE_HASH_INVALID;
3714 }