net/ice: fix RSS for GTPU
[dpdk.git] / drivers / net / ice / base / ice_flow.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2001-2020 Intel Corporation
3  */
4
5 #include "ice_common.h"
6 #include "ice_flow.h"
7
8 /* Size of known protocol header fields */
9 #define ICE_FLOW_FLD_SZ_ETH_TYPE        2
10 #define ICE_FLOW_FLD_SZ_VLAN            2
11 #define ICE_FLOW_FLD_SZ_IPV4_ADDR       4
12 #define ICE_FLOW_FLD_SZ_IPV6_ADDR       16
13 #define ICE_FLOW_FLD_SZ_IP_DSCP         1
14 #define ICE_FLOW_FLD_SZ_IP_TTL          1
15 #define ICE_FLOW_FLD_SZ_IP_PROT         1
16 #define ICE_FLOW_FLD_SZ_PORT            2
17 #define ICE_FLOW_FLD_SZ_TCP_FLAGS       1
18 #define ICE_FLOW_FLD_SZ_ICMP_TYPE       1
19 #define ICE_FLOW_FLD_SZ_ICMP_CODE       1
20 #define ICE_FLOW_FLD_SZ_ARP_OPER        2
21 #define ICE_FLOW_FLD_SZ_GRE_KEYID       4
22 #define ICE_FLOW_FLD_SZ_GTP_TEID        4
23 #define ICE_FLOW_FLD_SZ_GTP_QFI         2
24 #define ICE_FLOW_FLD_SZ_PPPOE_SESS_ID   2
25 #define ICE_FLOW_FLD_SZ_PFCP_SEID 8
26 #define ICE_FLOW_FLD_SZ_L2TPV3_SESS_ID  4
27 #define ICE_FLOW_FLD_SZ_ESP_SPI 4
28 #define ICE_FLOW_FLD_SZ_AH_SPI  4
29 #define ICE_FLOW_FLD_SZ_NAT_T_ESP_SPI   4
30
31 /* Describe properties of a protocol header field */
32 struct ice_flow_field_info {
33         enum ice_flow_seg_hdr hdr;
34         s16 off;        /* Offset from start of a protocol header, in bits */
35         u16 size;       /* Size of fields in bits */
36         u16 mask;       /* 16-bit mask for field */
37 };
38
39 #define ICE_FLOW_FLD_INFO(_hdr, _offset_bytes, _size_bytes) { \
40         .hdr = _hdr, \
41         .off = (_offset_bytes) * BITS_PER_BYTE, \
42         .size = (_size_bytes) * BITS_PER_BYTE, \
43         .mask = 0, \
44 }
45
46 #define ICE_FLOW_FLD_INFO_MSK(_hdr, _offset_bytes, _size_bytes, _mask) { \
47         .hdr = _hdr, \
48         .off = (_offset_bytes) * BITS_PER_BYTE, \
49         .size = (_size_bytes) * BITS_PER_BYTE, \
50         .mask = _mask, \
51 }
52
53 /* Table containing properties of supported protocol header fields */
54 static const
55 struct ice_flow_field_info ice_flds_info[ICE_FLOW_FIELD_IDX_MAX] = {
56         /* Ether */
57         /* ICE_FLOW_FIELD_IDX_ETH_DA */
58         ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_ETH, 0, ETH_ALEN),
59         /* ICE_FLOW_FIELD_IDX_ETH_SA */
60         ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_ETH, ETH_ALEN, ETH_ALEN),
61         /* ICE_FLOW_FIELD_IDX_S_VLAN */
62         ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_VLAN, 12, ICE_FLOW_FLD_SZ_VLAN),
63         /* ICE_FLOW_FIELD_IDX_C_VLAN */
64         ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_VLAN, 14, ICE_FLOW_FLD_SZ_VLAN),
65         /* ICE_FLOW_FIELD_IDX_ETH_TYPE */
66         ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_ETH, 12, ICE_FLOW_FLD_SZ_ETH_TYPE),
67         /* IPv4 / IPv6 */
68         /* ICE_FLOW_FIELD_IDX_IPV4_DSCP */
69         ICE_FLOW_FLD_INFO_MSK(ICE_FLOW_SEG_HDR_IPV4, 0, ICE_FLOW_FLD_SZ_IP_DSCP,
70                               0x00fc),
71         /* ICE_FLOW_FIELD_IDX_IPV6_DSCP */
72         ICE_FLOW_FLD_INFO_MSK(ICE_FLOW_SEG_HDR_IPV6, 0, ICE_FLOW_FLD_SZ_IP_DSCP,
73                               0x0ff0),
74         /* ICE_FLOW_FIELD_IDX_IPV4_TTL */
75         ICE_FLOW_FLD_INFO_MSK(ICE_FLOW_SEG_HDR_NONE, 8,
76                               ICE_FLOW_FLD_SZ_IP_TTL, 0xff00),
77         /* ICE_FLOW_FIELD_IDX_IPV4_PROT */
78         ICE_FLOW_FLD_INFO_MSK(ICE_FLOW_SEG_HDR_NONE, 8,
79                               ICE_FLOW_FLD_SZ_IP_PROT, 0x00ff),
80         /* ICE_FLOW_FIELD_IDX_IPV6_TTL */
81         ICE_FLOW_FLD_INFO_MSK(ICE_FLOW_SEG_HDR_NONE, 6,
82                               ICE_FLOW_FLD_SZ_IP_TTL, 0x00ff),
83         /* ICE_FLOW_FIELD_IDX_IPV6_PROT */
84         ICE_FLOW_FLD_INFO_MSK(ICE_FLOW_SEG_HDR_NONE, 6,
85                               ICE_FLOW_FLD_SZ_IP_PROT, 0xff00),
86         /* ICE_FLOW_FIELD_IDX_IPV4_SA */
87         ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_IPV4, 12, ICE_FLOW_FLD_SZ_IPV4_ADDR),
88         /* ICE_FLOW_FIELD_IDX_IPV4_DA */
89         ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_IPV4, 16, ICE_FLOW_FLD_SZ_IPV4_ADDR),
90         /* ICE_FLOW_FIELD_IDX_IPV6_SA */
91         ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_IPV6, 8, ICE_FLOW_FLD_SZ_IPV6_ADDR),
92         /* ICE_FLOW_FIELD_IDX_IPV6_DA */
93         ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_IPV6, 24, ICE_FLOW_FLD_SZ_IPV6_ADDR),
94         /* Transport */
95         /* ICE_FLOW_FIELD_IDX_TCP_SRC_PORT */
96         ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_TCP, 0, ICE_FLOW_FLD_SZ_PORT),
97         /* ICE_FLOW_FIELD_IDX_TCP_DST_PORT */
98         ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_TCP, 2, ICE_FLOW_FLD_SZ_PORT),
99         /* ICE_FLOW_FIELD_IDX_UDP_SRC_PORT */
100         ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_UDP, 0, ICE_FLOW_FLD_SZ_PORT),
101         /* ICE_FLOW_FIELD_IDX_UDP_DST_PORT */
102         ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_UDP, 2, ICE_FLOW_FLD_SZ_PORT),
103         /* ICE_FLOW_FIELD_IDX_SCTP_SRC_PORT */
104         ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_SCTP, 0, ICE_FLOW_FLD_SZ_PORT),
105         /* ICE_FLOW_FIELD_IDX_SCTP_DST_PORT */
106         ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_SCTP, 2, ICE_FLOW_FLD_SZ_PORT),
107         /* ICE_FLOW_FIELD_IDX_TCP_FLAGS */
108         ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_TCP, 13, ICE_FLOW_FLD_SZ_TCP_FLAGS),
109         /* ARP */
110         /* ICE_FLOW_FIELD_IDX_ARP_SIP */
111         ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_ARP, 14, ICE_FLOW_FLD_SZ_IPV4_ADDR),
112         /* ICE_FLOW_FIELD_IDX_ARP_DIP */
113         ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_ARP, 24, ICE_FLOW_FLD_SZ_IPV4_ADDR),
114         /* ICE_FLOW_FIELD_IDX_ARP_SHA */
115         ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_ARP, 8, ETH_ALEN),
116         /* ICE_FLOW_FIELD_IDX_ARP_DHA */
117         ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_ARP, 18, ETH_ALEN),
118         /* ICE_FLOW_FIELD_IDX_ARP_OP */
119         ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_ARP, 6, ICE_FLOW_FLD_SZ_ARP_OPER),
120         /* ICMP */
121         /* ICE_FLOW_FIELD_IDX_ICMP_TYPE */
122         ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_ICMP, 0, ICE_FLOW_FLD_SZ_ICMP_TYPE),
123         /* ICE_FLOW_FIELD_IDX_ICMP_CODE */
124         ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_ICMP, 1, ICE_FLOW_FLD_SZ_ICMP_CODE),
125         /* GRE */
126         /* ICE_FLOW_FIELD_IDX_GRE_KEYID */
127         ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_GRE, 12, ICE_FLOW_FLD_SZ_GRE_KEYID),
128         /* GTP */
129         /* ICE_FLOW_FIELD_IDX_GTPC_TEID */
130         ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_GTPC_TEID, 12,
131                           ICE_FLOW_FLD_SZ_GTP_TEID),
132         /* ICE_FLOW_FIELD_IDX_GTPU_IP_TEID */
133         ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_GTPU_IP, 12,
134                           ICE_FLOW_FLD_SZ_GTP_TEID),
135         /* ICE_FLOW_FIELD_IDX_GTPU_EH_TEID */
136         ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_GTPU_EH, 12,
137                           ICE_FLOW_FLD_SZ_GTP_TEID),
138         /* ICE_FLOW_FIELD_IDX_GTPU_EH_QFI */
139         ICE_FLOW_FLD_INFO_MSK(ICE_FLOW_SEG_HDR_GTPU_EH, 22,
140                               ICE_FLOW_FLD_SZ_GTP_QFI, 0x3f00),
141         /* ICE_FLOW_FIELD_IDX_GTPU_UP_TEID */
142         ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_GTPU_UP, 12,
143                           ICE_FLOW_FLD_SZ_GTP_TEID),
144         /* ICE_FLOW_FIELD_IDX_GTPU_DWN_TEID */
145         ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_GTPU_DWN, 12,
146                           ICE_FLOW_FLD_SZ_GTP_TEID),
147         /* PPPOE */
148         /* ICE_FLOW_FIELD_IDX_PPPOE_SESS_ID */
149         ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_PPPOE, 2,
150                           ICE_FLOW_FLD_SZ_PPPOE_SESS_ID),
151         /* PFCP */
152         /* ICE_FLOW_FIELD_IDX_PFCP_SEID */
153         ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_PFCP_SESSION, 12,
154                           ICE_FLOW_FLD_SZ_PFCP_SEID),
155         /* L2TPV3 */
156         /* ICE_FLOW_FIELD_IDX_L2TPV3_SESS_ID */
157         ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_L2TPV3, 0,
158                           ICE_FLOW_FLD_SZ_L2TPV3_SESS_ID),
159         /* ESP */
160         /* ICE_FLOW_FIELD_IDX_ESP_SPI */
161         ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_ESP, 0,
162                           ICE_FLOW_FLD_SZ_ESP_SPI),
163         /* AH */
164         /* ICE_FLOW_FIELD_IDX_AH_SPI */
165         ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_AH, 4,
166                           ICE_FLOW_FLD_SZ_AH_SPI),
167         /* NAT_T_ESP */
168         /* ICE_FLOW_FIELD_IDX_NAT_T_ESP_SPI */
169         ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_NAT_T_ESP, 8,
170                           ICE_FLOW_FLD_SZ_NAT_T_ESP_SPI),
171 };
172
173 /* Bitmaps indicating relevant packet types for a particular protocol header
174  *
175  * Packet types for packets with an Outer/First/Single MAC header
176  */
177 static const u32 ice_ptypes_mac_ofos[] = {
178         0xFDC00846, 0xBFBF7F7E, 0xF70001DF, 0xFEFDFDFB,
179         0x0000077E, 0x00000000, 0x00000000, 0x00000000,
180         0x00000000, 0x03FFF000, 0x7FFFFFE0, 0x00000000,
181         0x00000000, 0x00000000, 0x00000000, 0x00000000,
182         0x00000000, 0x00000000, 0x00000000, 0x00000000,
183         0x00000000, 0x00000000, 0x00000000, 0x00000000,
184         0x00000000, 0x00000000, 0x00000000, 0x00000000,
185         0x00000000, 0x00000000, 0x00000000, 0x00000000,
186 };
187
188 /* Packet types for packets with an Innermost/Last MAC VLAN header */
189 static const u32 ice_ptypes_macvlan_il[] = {
190         0x00000000, 0xBC000000, 0x000001DF, 0xF0000000,
191         0x0000077E, 0x00000000, 0x00000000, 0x00000000,
192         0x00000000, 0x00000000, 0x00000000, 0x00000000,
193         0x00000000, 0x00000000, 0x00000000, 0x00000000,
194         0x00000000, 0x00000000, 0x00000000, 0x00000000,
195         0x00000000, 0x00000000, 0x00000000, 0x00000000,
196         0x00000000, 0x00000000, 0x00000000, 0x00000000,
197         0x00000000, 0x00000000, 0x00000000, 0x00000000,
198 };
199
200 /* Packet types for packets with an Outer/First/Single IPv4 header */
201 static const u32 ice_ptypes_ipv4_ofos[] = {
202         0x1DC00000, 0x04000800, 0x00000000, 0x00000000,
203         0x00000000, 0x00000155, 0x00000000, 0x00000000,
204         0x0003000F, 0x000FC000, 0x83E0F800, 0x00000101,
205         0x00000000, 0x00000000, 0x00000000, 0x00000000,
206         0x00000000, 0x00000000, 0x00000000, 0x00000000,
207         0x00000000, 0x00000000, 0x00000000, 0x00000000,
208         0x00000000, 0x00000000, 0x00000000, 0x00000000,
209         0x00000000, 0x00000000, 0x00000000, 0x00000000,
210 };
211
212 /* Packet types for packets with an Innermost/Last IPv4 header */
213 static const u32 ice_ptypes_ipv4_il[] = {
214         0xE0000000, 0xB807700E, 0x80000003, 0xE01DC03B,
215         0x0000000E, 0x00000000, 0x00000000, 0x00000000,
216         0x00000000, 0x00000000, 0x001FF800, 0x00000000,
217         0x00000000, 0x00000000, 0x00000000, 0x00000000,
218         0x00000000, 0x00000000, 0x00000000, 0x00000000,
219         0x00000000, 0x00000000, 0x00000000, 0x00000000,
220         0x00000000, 0x00000000, 0x00000000, 0x00000000,
221         0x00000000, 0x00000000, 0x00000000, 0x00000000,
222 };
223
224 /* Packet types for packets with an Outer/First/Single IPv6 header */
225 static const u32 ice_ptypes_ipv6_ofos[] = {
226         0x00000000, 0x00000000, 0x77000000, 0x10002000,
227         0x00000000, 0x000002AA, 0x00000000, 0x00000000,
228         0x00080F00, 0x03F00000, 0x7C1F0000, 0x00000206,
229         0x00000000, 0x00000000, 0x00000000, 0x00000000,
230         0x00000000, 0x00000000, 0x00000000, 0x00000000,
231         0x00000000, 0x00000000, 0x00000000, 0x00000000,
232         0x00000000, 0x00000000, 0x00000000, 0x00000000,
233         0x00000000, 0x00000000, 0x00000000, 0x00000000,
234 };
235
236 /* Packet types for packets with an Innermost/Last IPv6 header */
237 static const u32 ice_ptypes_ipv6_il[] = {
238         0x00000000, 0x03B80770, 0x000001DC, 0x0EE00000,
239         0x00000770, 0x00000000, 0x00000000, 0x00000000,
240         0x00000000, 0x00000000, 0x7FE00000, 0x00000000,
241         0x00000000, 0x00000000, 0x00000000, 0x00000000,
242         0x00000000, 0x00000000, 0x00000000, 0x00000000,
243         0x00000000, 0x00000000, 0x00000000, 0x00000000,
244         0x00000000, 0x00000000, 0x00000000, 0x00000000,
245         0x00000000, 0x00000000, 0x00000000, 0x00000000,
246 };
247
248 /* Packet types for packets with an Outermost/First ARP header */
249 static const u32 ice_ptypes_arp_of[] = {
250         0x00000800, 0x00000000, 0x00000000, 0x00000000,
251         0x00000000, 0x00000000, 0x00000000, 0x00000000,
252         0x00000000, 0x00000000, 0x00000000, 0x00000000,
253         0x00000000, 0x00000000, 0x00000000, 0x00000000,
254         0x00000000, 0x00000000, 0x00000000, 0x00000000,
255         0x00000000, 0x00000000, 0x00000000, 0x00000000,
256         0x00000000, 0x00000000, 0x00000000, 0x00000000,
257         0x00000000, 0x00000000, 0x00000000, 0x00000000,
258 };
259
260 /* UDP Packet types for non-tunneled packets or tunneled
261  * packets with inner UDP.
262  */
263 static const u32 ice_ptypes_udp_il[] = {
264         0x81000000, 0x20204040, 0x04000010, 0x80810102,
265         0x00000040, 0x00000000, 0x00000000, 0x00000000,
266         0x00000000, 0x00410000, 0x90842000, 0x00000007,
267         0x00000000, 0x00000000, 0x00000000, 0x00000000,
268         0x00000000, 0x00000000, 0x00000000, 0x00000000,
269         0x00000000, 0x00000000, 0x00000000, 0x00000000,
270         0x00000000, 0x00000000, 0x00000000, 0x00000000,
271         0x00000000, 0x00000000, 0x00000000, 0x00000000,
272 };
273
274 /* Packet types for packets with an Innermost/Last TCP header */
275 static const u32 ice_ptypes_tcp_il[] = {
276         0x04000000, 0x80810102, 0x10000040, 0x02040408,
277         0x00000102, 0x00000000, 0x00000000, 0x00000000,
278         0x00000000, 0x00820000, 0x21084000, 0x00000000,
279         0x00000000, 0x00000000, 0x00000000, 0x00000000,
280         0x00000000, 0x00000000, 0x00000000, 0x00000000,
281         0x00000000, 0x00000000, 0x00000000, 0x00000000,
282         0x00000000, 0x00000000, 0x00000000, 0x00000000,
283         0x00000000, 0x00000000, 0x00000000, 0x00000000,
284 };
285
286 /* Packet types for packets with an Innermost/Last SCTP header */
287 static const u32 ice_ptypes_sctp_il[] = {
288         0x08000000, 0x01020204, 0x20000081, 0x04080810,
289         0x00000204, 0x00000000, 0x00000000, 0x00000000,
290         0x00000000, 0x01040000, 0x00000000, 0x00000000,
291         0x00000000, 0x00000000, 0x00000000, 0x00000000,
292         0x00000000, 0x00000000, 0x00000000, 0x00000000,
293         0x00000000, 0x00000000, 0x00000000, 0x00000000,
294         0x00000000, 0x00000000, 0x00000000, 0x00000000,
295         0x00000000, 0x00000000, 0x00000000, 0x00000000,
296 };
297
298 /* Packet types for packets with an Outermost/First ICMP header */
299 static const u32 ice_ptypes_icmp_of[] = {
300         0x10000000, 0x00000000, 0x00000000, 0x00000000,
301         0x00000000, 0x00000000, 0x00000000, 0x00000000,
302         0x00000000, 0x00000000, 0x00000000, 0x00000000,
303         0x00000000, 0x00000000, 0x00000000, 0x00000000,
304         0x00000000, 0x00000000, 0x00000000, 0x00000000,
305         0x00000000, 0x00000000, 0x00000000, 0x00000000,
306         0x00000000, 0x00000000, 0x00000000, 0x00000000,
307         0x00000000, 0x00000000, 0x00000000, 0x00000000,
308 };
309
310 /* Packet types for packets with an Innermost/Last ICMP header */
311 static const u32 ice_ptypes_icmp_il[] = {
312         0x00000000, 0x02040408, 0x40000102, 0x08101020,
313         0x00000408, 0x00000000, 0x00000000, 0x00000000,
314         0x00000000, 0x00000000, 0x42108000, 0x00000000,
315         0x00000000, 0x00000000, 0x00000000, 0x00000000,
316         0x00000000, 0x00000000, 0x00000000, 0x00000000,
317         0x00000000, 0x00000000, 0x00000000, 0x00000000,
318         0x00000000, 0x00000000, 0x00000000, 0x00000000,
319         0x00000000, 0x00000000, 0x00000000, 0x00000000,
320 };
321
322 /* Packet types for packets with an Outermost/First GRE header */
323 static const u32 ice_ptypes_gre_of[] = {
324         0x00000000, 0xBFBF7800, 0x000001DF, 0xFEFDE000,
325         0x0000017E, 0x00000000, 0x00000000, 0x00000000,
326         0x00000000, 0x00000000, 0x00000000, 0x00000000,
327         0x00000000, 0x00000000, 0x00000000, 0x00000000,
328         0x00000000, 0x00000000, 0x00000000, 0x00000000,
329         0x00000000, 0x00000000, 0x00000000, 0x00000000,
330         0x00000000, 0x00000000, 0x00000000, 0x00000000,
331         0x00000000, 0x00000000, 0x00000000, 0x00000000,
332 };
333
334 /* Packet types for packets with an Innermost/Last MAC header */
335 static const u32 ice_ptypes_mac_il[] = {
336         0x00000000, 0x00000000, 0x00000000, 0x00000000,
337         0x00000000, 0x00000000, 0x00000000, 0x00000000,
338         0x00000000, 0x00000000, 0x00000000, 0x00000000,
339         0x00000000, 0x00000000, 0x00000000, 0x00000000,
340         0x00000000, 0x00000000, 0x00000000, 0x00000000,
341         0x00000000, 0x00000000, 0x00000000, 0x00000000,
342         0x00000000, 0x00000000, 0x00000000, 0x00000000,
343         0x00000000, 0x00000000, 0x00000000, 0x00000000,
344 };
345
346 /* Packet types for GTPC */
347 static const u32 ice_ptypes_gtpc[] = {
348         0x00000000, 0x00000000, 0x00000000, 0x00000000,
349         0x00000000, 0x00000000, 0x00000000, 0x00000000,
350         0x00000000, 0x00000000, 0x00000180, 0x00000000,
351         0x00000000, 0x00000000, 0x00000000, 0x00000000,
352         0x00000000, 0x00000000, 0x00000000, 0x00000000,
353         0x00000000, 0x00000000, 0x00000000, 0x00000000,
354         0x00000000, 0x00000000, 0x00000000, 0x00000000,
355         0x00000000, 0x00000000, 0x00000000, 0x00000000,
356 };
357
358 /* Packet types for GTPC with TEID */
359 static const u32 ice_ptypes_gtpc_tid[] = {
360         0x00000000, 0x00000000, 0x00000000, 0x00000000,
361         0x00000000, 0x00000000, 0x00000000, 0x00000000,
362         0x00000000, 0x00000000, 0x00000060, 0x00000000,
363         0x00000000, 0x00000000, 0x00000000, 0x00000000,
364         0x00000000, 0x00000000, 0x00000000, 0x00000000,
365         0x00000000, 0x00000000, 0x00000000, 0x00000000,
366         0x00000000, 0x00000000, 0x00000000, 0x00000000,
367         0x00000000, 0x00000000, 0x00000000, 0x00000000,
368 };
369
370 /* Packet types for GTPU */
371 static const struct ice_ptype_attributes ice_attr_gtpu_eh[] = {
372         { ICE_MAC_IPV4_GTPU_IPV4_FRAG,    ICE_PTYPE_ATTR_GTP_PDU_EH },
373         { ICE_MAC_IPV4_GTPU_IPV4_PAY,     ICE_PTYPE_ATTR_GTP_PDU_EH },
374         { ICE_MAC_IPV4_GTPU_IPV4_UDP_PAY, ICE_PTYPE_ATTR_GTP_PDU_EH },
375         { ICE_MAC_IPV4_GTPU_IPV4_TCP,     ICE_PTYPE_ATTR_GTP_PDU_EH },
376         { ICE_MAC_IPV4_GTPU_IPV4_ICMP,    ICE_PTYPE_ATTR_GTP_PDU_EH },
377         { ICE_MAC_IPV6_GTPU_IPV4_FRAG,    ICE_PTYPE_ATTR_GTP_PDU_EH },
378         { ICE_MAC_IPV6_GTPU_IPV4_PAY,     ICE_PTYPE_ATTR_GTP_PDU_EH },
379         { ICE_MAC_IPV6_GTPU_IPV4_UDP_PAY, ICE_PTYPE_ATTR_GTP_PDU_EH },
380         { ICE_MAC_IPV6_GTPU_IPV4_TCP,     ICE_PTYPE_ATTR_GTP_PDU_EH },
381         { ICE_MAC_IPV6_GTPU_IPV4_ICMP,    ICE_PTYPE_ATTR_GTP_PDU_EH },
382         { ICE_MAC_IPV4_GTPU_IPV6_FRAG,    ICE_PTYPE_ATTR_GTP_PDU_EH },
383         { ICE_MAC_IPV4_GTPU_IPV6_PAY,     ICE_PTYPE_ATTR_GTP_PDU_EH },
384         { ICE_MAC_IPV4_GTPU_IPV6_UDP_PAY, ICE_PTYPE_ATTR_GTP_PDU_EH },
385         { ICE_MAC_IPV4_GTPU_IPV6_TCP,     ICE_PTYPE_ATTR_GTP_PDU_EH },
386         { ICE_MAC_IPV4_GTPU_IPV6_ICMPV6,  ICE_PTYPE_ATTR_GTP_PDU_EH },
387         { ICE_MAC_IPV6_GTPU_IPV6_FRAG,    ICE_PTYPE_ATTR_GTP_PDU_EH },
388         { ICE_MAC_IPV6_GTPU_IPV6_PAY,     ICE_PTYPE_ATTR_GTP_PDU_EH },
389         { ICE_MAC_IPV6_GTPU_IPV6_UDP_PAY, ICE_PTYPE_ATTR_GTP_PDU_EH },
390         { ICE_MAC_IPV6_GTPU_IPV6_TCP,     ICE_PTYPE_ATTR_GTP_PDU_EH },
391         { ICE_MAC_IPV6_GTPU_IPV6_ICMPV6,  ICE_PTYPE_ATTR_GTP_PDU_EH },
392 };
393
394 static const struct ice_ptype_attributes ice_attr_gtpu_down[] = {
395         { ICE_MAC_IPV4_GTPU_IPV4_FRAG,    ICE_PTYPE_ATTR_GTP_DOWNLINK },
396         { ICE_MAC_IPV4_GTPU_IPV4_PAY,     ICE_PTYPE_ATTR_GTP_DOWNLINK },
397         { ICE_MAC_IPV4_GTPU_IPV4_UDP_PAY, ICE_PTYPE_ATTR_GTP_DOWNLINK },
398         { ICE_MAC_IPV4_GTPU_IPV4_TCP,     ICE_PTYPE_ATTR_GTP_DOWNLINK },
399         { ICE_MAC_IPV4_GTPU_IPV4_ICMP,    ICE_PTYPE_ATTR_GTP_DOWNLINK },
400         { ICE_MAC_IPV6_GTPU_IPV4_FRAG,    ICE_PTYPE_ATTR_GTP_DOWNLINK },
401         { ICE_MAC_IPV6_GTPU_IPV4_PAY,     ICE_PTYPE_ATTR_GTP_DOWNLINK },
402         { ICE_MAC_IPV6_GTPU_IPV4_UDP_PAY, ICE_PTYPE_ATTR_GTP_DOWNLINK },
403         { ICE_MAC_IPV6_GTPU_IPV4_TCP,     ICE_PTYPE_ATTR_GTP_DOWNLINK },
404         { ICE_MAC_IPV6_GTPU_IPV4_ICMP,    ICE_PTYPE_ATTR_GTP_DOWNLINK },
405         { ICE_MAC_IPV4_GTPU_IPV6_FRAG,    ICE_PTYPE_ATTR_GTP_DOWNLINK },
406         { ICE_MAC_IPV4_GTPU_IPV6_PAY,     ICE_PTYPE_ATTR_GTP_DOWNLINK },
407         { ICE_MAC_IPV4_GTPU_IPV6_UDP_PAY, ICE_PTYPE_ATTR_GTP_DOWNLINK },
408         { ICE_MAC_IPV4_GTPU_IPV6_TCP,     ICE_PTYPE_ATTR_GTP_DOWNLINK },
409         { ICE_MAC_IPV4_GTPU_IPV6_ICMPV6,  ICE_PTYPE_ATTR_GTP_DOWNLINK },
410         { ICE_MAC_IPV6_GTPU_IPV6_FRAG,    ICE_PTYPE_ATTR_GTP_DOWNLINK },
411         { ICE_MAC_IPV6_GTPU_IPV6_PAY,     ICE_PTYPE_ATTR_GTP_DOWNLINK },
412         { ICE_MAC_IPV6_GTPU_IPV6_UDP_PAY, ICE_PTYPE_ATTR_GTP_DOWNLINK },
413         { ICE_MAC_IPV6_GTPU_IPV6_TCP,     ICE_PTYPE_ATTR_GTP_DOWNLINK },
414         { ICE_MAC_IPV6_GTPU_IPV6_ICMPV6,  ICE_PTYPE_ATTR_GTP_DOWNLINK },
415 };
416
417 static const struct ice_ptype_attributes ice_attr_gtpu_up[] = {
418         { ICE_MAC_IPV4_GTPU_IPV4_FRAG,    ICE_PTYPE_ATTR_GTP_UPLINK },
419         { ICE_MAC_IPV4_GTPU_IPV4_PAY,     ICE_PTYPE_ATTR_GTP_UPLINK },
420         { ICE_MAC_IPV4_GTPU_IPV4_UDP_PAY, ICE_PTYPE_ATTR_GTP_UPLINK },
421         { ICE_MAC_IPV4_GTPU_IPV4_TCP,     ICE_PTYPE_ATTR_GTP_UPLINK },
422         { ICE_MAC_IPV4_GTPU_IPV4_ICMP,    ICE_PTYPE_ATTR_GTP_UPLINK },
423         { ICE_MAC_IPV6_GTPU_IPV4_FRAG,    ICE_PTYPE_ATTR_GTP_UPLINK },
424         { ICE_MAC_IPV6_GTPU_IPV4_PAY,     ICE_PTYPE_ATTR_GTP_UPLINK },
425         { ICE_MAC_IPV6_GTPU_IPV4_UDP_PAY, ICE_PTYPE_ATTR_GTP_UPLINK },
426         { ICE_MAC_IPV6_GTPU_IPV4_TCP,     ICE_PTYPE_ATTR_GTP_UPLINK },
427         { ICE_MAC_IPV6_GTPU_IPV4_ICMP,    ICE_PTYPE_ATTR_GTP_UPLINK },
428         { ICE_MAC_IPV4_GTPU_IPV6_FRAG,    ICE_PTYPE_ATTR_GTP_UPLINK },
429         { ICE_MAC_IPV4_GTPU_IPV6_PAY,     ICE_PTYPE_ATTR_GTP_UPLINK },
430         { ICE_MAC_IPV4_GTPU_IPV6_UDP_PAY, ICE_PTYPE_ATTR_GTP_UPLINK },
431         { ICE_MAC_IPV4_GTPU_IPV6_TCP,     ICE_PTYPE_ATTR_GTP_UPLINK },
432         { ICE_MAC_IPV4_GTPU_IPV6_ICMPV6,  ICE_PTYPE_ATTR_GTP_UPLINK },
433         { ICE_MAC_IPV6_GTPU_IPV6_FRAG,    ICE_PTYPE_ATTR_GTP_UPLINK },
434         { ICE_MAC_IPV6_GTPU_IPV6_PAY,     ICE_PTYPE_ATTR_GTP_UPLINK },
435         { ICE_MAC_IPV6_GTPU_IPV6_UDP_PAY, ICE_PTYPE_ATTR_GTP_UPLINK },
436         { ICE_MAC_IPV6_GTPU_IPV6_TCP,     ICE_PTYPE_ATTR_GTP_UPLINK },
437         { ICE_MAC_IPV6_GTPU_IPV6_ICMPV6,  ICE_PTYPE_ATTR_GTP_UPLINK },
438 };
439
440 static const u32 ice_ptypes_gtpu[] = {
441         0x00000000, 0x00000000, 0x00000000, 0x00000000,
442         0x00000000, 0x00000000, 0x00000000, 0x00000000,
443         0x00000000, 0x00000000, 0x7FFFFE00, 0x00000000,
444         0x00000000, 0x00000000, 0x00000000, 0x00000000,
445         0x00000000, 0x00000000, 0x00000000, 0x00000000,
446         0x00000000, 0x00000000, 0x00000000, 0x00000000,
447         0x00000000, 0x00000000, 0x00000000, 0x00000000,
448         0x00000000, 0x00000000, 0x00000000, 0x00000000,
449 };
450
451 /* Packet types for pppoe */
452 static const u32 ice_ptypes_pppoe[] = {
453         0x00000000, 0x00000000, 0x00000000, 0x00000000,
454         0x00000000, 0x00000000, 0x00000000, 0x00000000,
455         0x00000000, 0x03FFF000, 0x00000000, 0x00000000,
456         0x00000000, 0x00000000, 0x00000000, 0x00000000,
457         0x00000000, 0x00000000, 0x00000000, 0x00000000,
458         0x00000000, 0x00000000, 0x00000000, 0x00000000,
459         0x00000000, 0x00000000, 0x00000000, 0x00000000,
460         0x00000000, 0x00000000, 0x00000000, 0x00000000,
461 };
462
463 /* Packet types for packets with PFCP NODE header */
464 static const u32 ice_ptypes_pfcp_node[] = {
465         0x00000000, 0x00000000, 0x00000000, 0x00000000,
466         0x00000000, 0x00000000, 0x00000000, 0x00000000,
467         0x00000000, 0x00000000, 0x80000000, 0x00000002,
468         0x00000000, 0x00000000, 0x00000000, 0x00000000,
469         0x00000000, 0x00000000, 0x00000000, 0x00000000,
470         0x00000000, 0x00000000, 0x00000000, 0x00000000,
471         0x00000000, 0x00000000, 0x00000000, 0x00000000,
472         0x00000000, 0x00000000, 0x00000000, 0x00000000,
473 };
474
475 /* Packet types for packets with PFCP SESSION header */
476 static const u32 ice_ptypes_pfcp_session[] = {
477         0x00000000, 0x00000000, 0x00000000, 0x00000000,
478         0x00000000, 0x00000000, 0x00000000, 0x00000000,
479         0x00000000, 0x00000000, 0x00000000, 0x00000005,
480         0x00000000, 0x00000000, 0x00000000, 0x00000000,
481         0x00000000, 0x00000000, 0x00000000, 0x00000000,
482         0x00000000, 0x00000000, 0x00000000, 0x00000000,
483         0x00000000, 0x00000000, 0x00000000, 0x00000000,
484         0x00000000, 0x00000000, 0x00000000, 0x00000000,
485 };
486
487 /* Packet types for l2tpv3 */
488 static const u32 ice_ptypes_l2tpv3[] = {
489         0x00000000, 0x00000000, 0x00000000, 0x00000000,
490         0x00000000, 0x00000000, 0x00000000, 0x00000000,
491         0x00000000, 0x00000000, 0x00000000, 0x00000300,
492         0x00000000, 0x00000000, 0x00000000, 0x00000000,
493         0x00000000, 0x00000000, 0x00000000, 0x00000000,
494         0x00000000, 0x00000000, 0x00000000, 0x00000000,
495         0x00000000, 0x00000000, 0x00000000, 0x00000000,
496         0x00000000, 0x00000000, 0x00000000, 0x00000000,
497 };
498
499 /* Packet types for esp */
500 static const u32 ice_ptypes_esp[] = {
501         0x00000000, 0x00000000, 0x00000000, 0x00000000,
502         0x00000000, 0x00000003, 0x00000000, 0x00000000,
503         0x00000000, 0x00000000, 0x00000000, 0x00000000,
504         0x00000000, 0x00000000, 0x00000000, 0x00000000,
505         0x00000000, 0x00000000, 0x00000000, 0x00000000,
506         0x00000000, 0x00000000, 0x00000000, 0x00000000,
507         0x00000000, 0x00000000, 0x00000000, 0x00000000,
508         0x00000000, 0x00000000, 0x00000000, 0x00000000,
509 };
510
511 /* Packet types for ah */
512 static const u32 ice_ptypes_ah[] = {
513         0x00000000, 0x00000000, 0x00000000, 0x00000000,
514         0x00000000, 0x0000000C, 0x00000000, 0x00000000,
515         0x00000000, 0x00000000, 0x00000000, 0x00000000,
516         0x00000000, 0x00000000, 0x00000000, 0x00000000,
517         0x00000000, 0x00000000, 0x00000000, 0x00000000,
518         0x00000000, 0x00000000, 0x00000000, 0x00000000,
519         0x00000000, 0x00000000, 0x00000000, 0x00000000,
520         0x00000000, 0x00000000, 0x00000000, 0x00000000,
521 };
522
523 /* Packet types for packets with NAT_T ESP header */
524 static const u32 ice_ptypes_nat_t_esp[] = {
525         0x00000000, 0x00000000, 0x00000000, 0x00000000,
526         0x00000000, 0x00000030, 0x00000000, 0x00000000,
527         0x00000000, 0x00000000, 0x00000000, 0x00000000,
528         0x00000000, 0x00000000, 0x00000000, 0x00000000,
529         0x00000000, 0x00000000, 0x00000000, 0x00000000,
530         0x00000000, 0x00000000, 0x00000000, 0x00000000,
531         0x00000000, 0x00000000, 0x00000000, 0x00000000,
532         0x00000000, 0x00000000, 0x00000000, 0x00000000,
533 };
534
535 /* Manage parameters and info. used during the creation of a flow profile */
536 struct ice_flow_prof_params {
537         enum ice_block blk;
538         u16 entry_length; /* # of bytes formatted entry will require */
539         u8 es_cnt;
540         struct ice_flow_prof *prof;
541
542         /* For ACL, the es[0] will have the data of ICE_RX_MDID_PKT_FLAGS_15_0
543          * This will give us the direction flags.
544          */
545         struct ice_fv_word es[ICE_MAX_FV_WORDS];
546         /* attributes can be used to add attributes to a particular PTYPE */
547         const struct ice_ptype_attributes *attr;
548         u16 attr_cnt;
549
550         u16 mask[ICE_MAX_FV_WORDS];
551         ice_declare_bitmap(ptypes, ICE_FLOW_PTYPE_MAX);
552 };
553
554 #define ICE_FLOW_RSS_HDRS_INNER_MASK \
555         (ICE_FLOW_SEG_HDR_PPPOE | ICE_FLOW_SEG_HDR_GTPC | \
556         ICE_FLOW_SEG_HDR_GTPC_TEID | ICE_FLOW_SEG_HDR_GTPU | \
557         ICE_FLOW_SEG_HDR_PFCP_SESSION | ICE_FLOW_SEG_HDR_L2TPV3 | \
558         ICE_FLOW_SEG_HDR_ESP | ICE_FLOW_SEG_HDR_AH | \
559         ICE_FLOW_SEG_HDR_NAT_T_ESP)
560
561 #define ICE_FLOW_SEG_HDRS_L2_MASK       \
562         (ICE_FLOW_SEG_HDR_ETH | ICE_FLOW_SEG_HDR_VLAN)
563 #define ICE_FLOW_SEG_HDRS_L3_MASK       \
564         (ICE_FLOW_SEG_HDR_IPV4 | ICE_FLOW_SEG_HDR_IPV6 | \
565          ICE_FLOW_SEG_HDR_ARP)
566 #define ICE_FLOW_SEG_HDRS_L4_MASK       \
567         (ICE_FLOW_SEG_HDR_ICMP | ICE_FLOW_SEG_HDR_TCP | ICE_FLOW_SEG_HDR_UDP | \
568          ICE_FLOW_SEG_HDR_SCTP)
569
570 /**
571  * ice_flow_val_hdrs - validates packet segments for valid protocol headers
572  * @segs: array of one or more packet segments that describe the flow
573  * @segs_cnt: number of packet segments provided
574  */
575 static enum ice_status
576 ice_flow_val_hdrs(struct ice_flow_seg_info *segs, u8 segs_cnt)
577 {
578         u8 i;
579
580         for (i = 0; i < segs_cnt; i++) {
581                 /* Multiple L3 headers */
582                 if (segs[i].hdrs & ICE_FLOW_SEG_HDRS_L3_MASK &&
583                     !ice_is_pow2(segs[i].hdrs & ICE_FLOW_SEG_HDRS_L3_MASK))
584                         return ICE_ERR_PARAM;
585
586                 /* Multiple L4 headers */
587                 if (segs[i].hdrs & ICE_FLOW_SEG_HDRS_L4_MASK &&
588                     !ice_is_pow2(segs[i].hdrs & ICE_FLOW_SEG_HDRS_L4_MASK))
589                         return ICE_ERR_PARAM;
590         }
591
592         return ICE_SUCCESS;
593 }
594
595 /* Sizes of fixed known protocol headers without header options */
596 #define ICE_FLOW_PROT_HDR_SZ_MAC        14
597 #define ICE_FLOW_PROT_HDR_SZ_MAC_VLAN   (ICE_FLOW_PROT_HDR_SZ_MAC + 2)
598 #define ICE_FLOW_PROT_HDR_SZ_IPV4       20
599 #define ICE_FLOW_PROT_HDR_SZ_IPV6       40
600 #define ICE_FLOW_PROT_HDR_SZ_ARP        28
601 #define ICE_FLOW_PROT_HDR_SZ_ICMP       8
602 #define ICE_FLOW_PROT_HDR_SZ_TCP        20
603 #define ICE_FLOW_PROT_HDR_SZ_UDP        8
604 #define ICE_FLOW_PROT_HDR_SZ_SCTP       12
605
606 /**
607  * ice_flow_calc_seg_sz - calculates size of a packet segment based on headers
608  * @params: information about the flow to be processed
609  * @seg: index of packet segment whose header size is to be determined
610  */
611 static u16 ice_flow_calc_seg_sz(struct ice_flow_prof_params *params, u8 seg)
612 {
613         u16 sz;
614
615         /* L2 headers */
616         sz = (params->prof->segs[seg].hdrs & ICE_FLOW_SEG_HDR_VLAN) ?
617                 ICE_FLOW_PROT_HDR_SZ_MAC_VLAN : ICE_FLOW_PROT_HDR_SZ_MAC;
618
619         /* L3 headers */
620         if (params->prof->segs[seg].hdrs & ICE_FLOW_SEG_HDR_IPV4)
621                 sz += ICE_FLOW_PROT_HDR_SZ_IPV4;
622         else if (params->prof->segs[seg].hdrs & ICE_FLOW_SEG_HDR_IPV6)
623                 sz += ICE_FLOW_PROT_HDR_SZ_IPV6;
624         else if (params->prof->segs[seg].hdrs & ICE_FLOW_SEG_HDR_ARP)
625                 sz += ICE_FLOW_PROT_HDR_SZ_ARP;
626         else if (params->prof->segs[seg].hdrs & ICE_FLOW_SEG_HDRS_L4_MASK)
627                 /* A L3 header is required if L4 is specified */
628                 return 0;
629
630         /* L4 headers */
631         if (params->prof->segs[seg].hdrs & ICE_FLOW_SEG_HDR_ICMP)
632                 sz += ICE_FLOW_PROT_HDR_SZ_ICMP;
633         else if (params->prof->segs[seg].hdrs & ICE_FLOW_SEG_HDR_TCP)
634                 sz += ICE_FLOW_PROT_HDR_SZ_TCP;
635         else if (params->prof->segs[seg].hdrs & ICE_FLOW_SEG_HDR_UDP)
636                 sz += ICE_FLOW_PROT_HDR_SZ_UDP;
637         else if (params->prof->segs[seg].hdrs & ICE_FLOW_SEG_HDR_SCTP)
638                 sz += ICE_FLOW_PROT_HDR_SZ_SCTP;
639
640         return sz;
641 }
642
643 /**
644  * ice_flow_proc_seg_hdrs - process protocol headers present in pkt segments
645  * @params: information about the flow to be processed
646  *
647  * This function identifies the packet types associated with the protocol
648  * headers being present in packet segments of the specified flow profile.
649  */
650 static enum ice_status
651 ice_flow_proc_seg_hdrs(struct ice_flow_prof_params *params)
652 {
653         struct ice_flow_prof *prof;
654         u8 i;
655
656         ice_memset(params->ptypes, 0xff, sizeof(params->ptypes),
657                    ICE_NONDMA_MEM);
658
659         prof = params->prof;
660
661         for (i = 0; i < params->prof->segs_cnt; i++) {
662                 const ice_bitmap_t *src;
663                 u32 hdrs;
664
665                 hdrs = prof->segs[i].hdrs;
666
667                 if (hdrs & ICE_FLOW_SEG_HDR_ETH) {
668                         src = !i ? (const ice_bitmap_t *)ice_ptypes_mac_ofos :
669                                 (const ice_bitmap_t *)ice_ptypes_mac_il;
670                         ice_and_bitmap(params->ptypes, params->ptypes, src,
671                                        ICE_FLOW_PTYPE_MAX);
672                 }
673
674                 if (i && hdrs & ICE_FLOW_SEG_HDR_VLAN) {
675                         src = (const ice_bitmap_t *)ice_ptypes_macvlan_il;
676                         ice_and_bitmap(params->ptypes, params->ptypes, src,
677                                        ICE_FLOW_PTYPE_MAX);
678                 }
679
680                 if (!i && hdrs & ICE_FLOW_SEG_HDR_ARP) {
681                         ice_and_bitmap(params->ptypes, params->ptypes,
682                                        (const ice_bitmap_t *)ice_ptypes_arp_of,
683                                        ICE_FLOW_PTYPE_MAX);
684                 }
685
686                 if (hdrs & ICE_FLOW_SEG_HDR_PPPOE) {
687                         src = (const ice_bitmap_t *)ice_ptypes_pppoe;
688                         ice_and_bitmap(params->ptypes, params->ptypes, src,
689                                        ICE_FLOW_PTYPE_MAX);
690                 }
691
692                 if (hdrs & ICE_FLOW_SEG_HDR_IPV4) {
693                         src = !i ? (const ice_bitmap_t *)ice_ptypes_ipv4_ofos :
694                                 (const ice_bitmap_t *)ice_ptypes_ipv4_il;
695                         ice_and_bitmap(params->ptypes, params->ptypes, src,
696                                        ICE_FLOW_PTYPE_MAX);
697                         if (hdrs & ICE_FLOW_SEG_HDR_UDP) {
698                                 src = (const ice_bitmap_t *)ice_ptypes_udp_il;
699                                 ice_and_bitmap(params->ptypes,
700                                                 params->ptypes, src,
701                                                ICE_FLOW_PTYPE_MAX);
702                         } else if (hdrs & ICE_FLOW_SEG_HDR_TCP) {
703                                 ice_and_bitmap(params->ptypes, params->ptypes,
704                                                (const ice_bitmap_t *)
705                                                ice_ptypes_tcp_il,
706                                                ICE_FLOW_PTYPE_MAX);
707                         } else if (hdrs & ICE_FLOW_SEG_HDR_SCTP) {
708                                 src = (const ice_bitmap_t *)ice_ptypes_sctp_il;
709                                 ice_and_bitmap(params->ptypes, params->ptypes,
710                                                src, ICE_FLOW_PTYPE_MAX);
711                         }
712                 } else if (hdrs & ICE_FLOW_SEG_HDR_IPV6) {
713                         src = !i ? (const ice_bitmap_t *)ice_ptypes_ipv6_ofos :
714                                 (const ice_bitmap_t *)ice_ptypes_ipv6_il;
715                         ice_and_bitmap(params->ptypes, params->ptypes, src,
716                                        ICE_FLOW_PTYPE_MAX);
717                         if (hdrs & ICE_FLOW_SEG_HDR_UDP) {
718                                 src = (const ice_bitmap_t *)ice_ptypes_udp_il;
719                                 ice_and_bitmap(params->ptypes,
720                                                 params->ptypes, src,
721                                                ICE_FLOW_PTYPE_MAX);
722                         } else if (hdrs & ICE_FLOW_SEG_HDR_TCP) {
723                                 ice_and_bitmap(params->ptypes, params->ptypes,
724                                                (const ice_bitmap_t *)
725                                                ice_ptypes_tcp_il,
726                                                ICE_FLOW_PTYPE_MAX);
727                         } else if (hdrs & ICE_FLOW_SEG_HDR_SCTP) {
728                                 src = (const ice_bitmap_t *)ice_ptypes_sctp_il;
729                                 ice_and_bitmap(params->ptypes, params->ptypes,
730                                                src, ICE_FLOW_PTYPE_MAX);
731                         }
732                 }
733
734                 if (hdrs & ICE_FLOW_SEG_HDR_ICMP) {
735                         src = !i ? (const ice_bitmap_t *)ice_ptypes_icmp_of :
736                                 (const ice_bitmap_t *)ice_ptypes_icmp_il;
737                         ice_and_bitmap(params->ptypes, params->ptypes, src,
738                                        ICE_FLOW_PTYPE_MAX);
739                 } else if (hdrs & ICE_FLOW_SEG_HDR_GRE) {
740                         if (!i) {
741                                 src = (const ice_bitmap_t *)ice_ptypes_gre_of;
742                                 ice_and_bitmap(params->ptypes, params->ptypes,
743                                                src, ICE_FLOW_PTYPE_MAX);
744                         }
745                 } else if (hdrs & ICE_FLOW_SEG_HDR_GTPC) {
746                         src = (const ice_bitmap_t *)ice_ptypes_gtpc;
747                         ice_and_bitmap(params->ptypes, params->ptypes,
748                                        src, ICE_FLOW_PTYPE_MAX);
749                 } else if (hdrs & ICE_FLOW_SEG_HDR_GTPC_TEID) {
750                         src = (const ice_bitmap_t *)ice_ptypes_gtpc_tid;
751                         ice_and_bitmap(params->ptypes, params->ptypes,
752                                        src, ICE_FLOW_PTYPE_MAX);
753                 } else if (hdrs & ICE_FLOW_SEG_HDR_GTPU_DWN) {
754                         src = (const ice_bitmap_t *)ice_ptypes_gtpu;
755                         ice_and_bitmap(params->ptypes, params->ptypes,
756                                        src, ICE_FLOW_PTYPE_MAX);
757
758                         /* Attributes for GTP packet with downlink */
759                         params->attr = ice_attr_gtpu_down;
760                         params->attr_cnt = ARRAY_SIZE(ice_attr_gtpu_down);
761                 } else if (hdrs & ICE_FLOW_SEG_HDR_GTPU_UP) {
762                         src = (const ice_bitmap_t *)ice_ptypes_gtpu;
763                         ice_and_bitmap(params->ptypes, params->ptypes,
764                                        src, ICE_FLOW_PTYPE_MAX);
765
766                         /* Attributes for GTP packet with uplink */
767                         params->attr = ice_attr_gtpu_up;
768                         params->attr_cnt = ARRAY_SIZE(ice_attr_gtpu_up);
769                 } else if (hdrs & ICE_FLOW_SEG_HDR_GTPU_EH) {
770                         src = (const ice_bitmap_t *)ice_ptypes_gtpu;
771                         ice_and_bitmap(params->ptypes, params->ptypes,
772                                        src, ICE_FLOW_PTYPE_MAX);
773
774                         /* Attributes for GTP packet with Extension Header */
775                         params->attr = ice_attr_gtpu_eh;
776                         params->attr_cnt = ARRAY_SIZE(ice_attr_gtpu_eh);
777                 } else if (hdrs & ICE_FLOW_SEG_HDR_GTPU_IP) {
778                         src = (const ice_bitmap_t *)ice_ptypes_gtpu;
779                         ice_and_bitmap(params->ptypes, params->ptypes,
780                                        src, ICE_FLOW_PTYPE_MAX);
781                 } else if (hdrs & ICE_FLOW_SEG_HDR_L2TPV3) {
782                         src = (const ice_bitmap_t *)ice_ptypes_l2tpv3;
783                         ice_and_bitmap(params->ptypes, params->ptypes,
784                                        src, ICE_FLOW_PTYPE_MAX);
785                 } else if (hdrs & ICE_FLOW_SEG_HDR_ESP) {
786                         src = (const ice_bitmap_t *)ice_ptypes_esp;
787                         ice_and_bitmap(params->ptypes, params->ptypes,
788                                        src, ICE_FLOW_PTYPE_MAX);
789                 } else if (hdrs & ICE_FLOW_SEG_HDR_AH) {
790                         src = (const ice_bitmap_t *)ice_ptypes_ah;
791                         ice_and_bitmap(params->ptypes, params->ptypes,
792                                        src, ICE_FLOW_PTYPE_MAX);
793                 } else if (hdrs & ICE_FLOW_SEG_HDR_NAT_T_ESP) {
794                         src = (const ice_bitmap_t *)ice_ptypes_nat_t_esp;
795                         ice_and_bitmap(params->ptypes, params->ptypes,
796                                        src, ICE_FLOW_PTYPE_MAX);
797                 }
798
799                 if (hdrs & ICE_FLOW_SEG_HDR_PFCP) {
800                         if (hdrs & ICE_FLOW_SEG_HDR_PFCP_NODE)
801                                 src =
802                                 (const ice_bitmap_t *)ice_ptypes_pfcp_node;
803                         else
804                                 src =
805                                 (const ice_bitmap_t *)ice_ptypes_pfcp_session;
806
807                         ice_and_bitmap(params->ptypes, params->ptypes,
808                                        src, ICE_FLOW_PTYPE_MAX);
809                 } else {
810                         src = (const ice_bitmap_t *)ice_ptypes_pfcp_node;
811                         ice_andnot_bitmap(params->ptypes, params->ptypes,
812                                           src, ICE_FLOW_PTYPE_MAX);
813
814                         src = (const ice_bitmap_t *)ice_ptypes_pfcp_session;
815                         ice_andnot_bitmap(params->ptypes, params->ptypes,
816                                           src, ICE_FLOW_PTYPE_MAX);
817                 }
818         }
819
820         return ICE_SUCCESS;
821 }
822
823 /**
824  * ice_flow_xtract_pkt_flags - Create an extr sequence entry for packet flags
825  * @hw: pointer to the HW struct
826  * @params: information about the flow to be processed
827  * @flags: The value of pkt_flags[x:x] in Rx/Tx MDID metadata.
828  *
829  * This function will allocate an extraction sequence entries for a DWORD size
830  * chunk of the packet flags.
831  */
832 static enum ice_status
833 ice_flow_xtract_pkt_flags(struct ice_hw *hw,
834                           struct ice_flow_prof_params *params,
835                           enum ice_flex_mdid_pkt_flags flags)
836 {
837         u8 fv_words = hw->blk[params->blk].es.fvw;
838         u8 idx;
839
840         /* Make sure the number of extraction sequence entries required does not
841          * exceed the block's capacity.
842          */
843         if (params->es_cnt >= fv_words)
844                 return ICE_ERR_MAX_LIMIT;
845
846         /* some blocks require a reversed field vector layout */
847         if (hw->blk[params->blk].es.reverse)
848                 idx = fv_words - params->es_cnt - 1;
849         else
850                 idx = params->es_cnt;
851
852         params->es[idx].prot_id = ICE_PROT_META_ID;
853         params->es[idx].off = flags;
854         params->es_cnt++;
855
856         return ICE_SUCCESS;
857 }
858
859 /**
860  * ice_flow_xtract_fld - Create an extraction sequence entry for the given field
861  * @hw: pointer to the HW struct
862  * @params: information about the flow to be processed
863  * @seg: packet segment index of the field to be extracted
864  * @fld: ID of field to be extracted
865  * @match: bitfield of all fields
866  *
867  * This function determines the protocol ID, offset, and size of the given
868  * field. It then allocates one or more extraction sequence entries for the
869  * given field, and fill the entries with protocol ID and offset information.
870  */
871 static enum ice_status
872 ice_flow_xtract_fld(struct ice_hw *hw, struct ice_flow_prof_params *params,
873                     u8 seg, enum ice_flow_field fld, u64 match)
874 {
875         enum ice_flow_field sib = ICE_FLOW_FIELD_IDX_MAX;
876         enum ice_prot_id prot_id = ICE_PROT_ID_INVAL;
877         u8 fv_words = hw->blk[params->blk].es.fvw;
878         struct ice_flow_fld_info *flds;
879         u16 cnt, ese_bits, i;
880         u16 sib_mask = 0;
881         s16 adj = 0;
882         u16 mask;
883         u16 off;
884
885         flds = params->prof->segs[seg].fields;
886
887         switch (fld) {
888         case ICE_FLOW_FIELD_IDX_ETH_DA:
889         case ICE_FLOW_FIELD_IDX_ETH_SA:
890         case ICE_FLOW_FIELD_IDX_S_VLAN:
891         case ICE_FLOW_FIELD_IDX_C_VLAN:
892                 prot_id = seg == 0 ? ICE_PROT_MAC_OF_OR_S : ICE_PROT_MAC_IL;
893                 break;
894         case ICE_FLOW_FIELD_IDX_ETH_TYPE:
895                 prot_id = seg == 0 ? ICE_PROT_ETYPE_OL : ICE_PROT_ETYPE_IL;
896                 break;
897         case ICE_FLOW_FIELD_IDX_IPV4_DSCP:
898                 prot_id = seg == 0 ? ICE_PROT_IPV4_OF_OR_S : ICE_PROT_IPV4_IL;
899                 break;
900         case ICE_FLOW_FIELD_IDX_IPV6_DSCP:
901                 prot_id = seg == 0 ? ICE_PROT_IPV6_OF_OR_S : ICE_PROT_IPV6_IL;
902                 break;
903         case ICE_FLOW_FIELD_IDX_IPV4_TTL:
904         case ICE_FLOW_FIELD_IDX_IPV4_PROT:
905                 prot_id = seg == 0 ? ICE_PROT_IPV4_OF_OR_S : ICE_PROT_IPV4_IL;
906
907                 /* TTL and PROT share the same extraction seq. entry.
908                  * Each is considered a sibling to the other in terms of sharing
909                  * the same extraction sequence entry.
910                  */
911                 if (fld == ICE_FLOW_FIELD_IDX_IPV4_TTL)
912                         sib = ICE_FLOW_FIELD_IDX_IPV4_PROT;
913                 else if (fld == ICE_FLOW_FIELD_IDX_IPV4_PROT)
914                         sib = ICE_FLOW_FIELD_IDX_IPV4_TTL;
915
916                 /* If the sibling field is also included, that field's
917                  * mask needs to be included.
918                  */
919                 if (match & BIT(sib))
920                         sib_mask = ice_flds_info[sib].mask;
921                 break;
922         case ICE_FLOW_FIELD_IDX_IPV6_TTL:
923         case ICE_FLOW_FIELD_IDX_IPV6_PROT:
924                 prot_id = seg == 0 ? ICE_PROT_IPV6_OF_OR_S : ICE_PROT_IPV6_IL;
925
926                 /* TTL and PROT share the same extraction seq. entry.
927                  * Each is considered a sibling to the other in terms of sharing
928                  * the same extraction sequence entry.
929                  */
930                 if (fld == ICE_FLOW_FIELD_IDX_IPV6_TTL)
931                         sib = ICE_FLOW_FIELD_IDX_IPV6_PROT;
932                 else if (fld == ICE_FLOW_FIELD_IDX_IPV6_PROT)
933                         sib = ICE_FLOW_FIELD_IDX_IPV6_TTL;
934
935                 /* If the sibling field is also included, that field's
936                  * mask needs to be included.
937                  */
938                 if (match & BIT(sib))
939                         sib_mask = ice_flds_info[sib].mask;
940                 break;
941         case ICE_FLOW_FIELD_IDX_IPV4_SA:
942         case ICE_FLOW_FIELD_IDX_IPV4_DA:
943                 prot_id = seg == 0 ? ICE_PROT_IPV4_OF_OR_S : ICE_PROT_IPV4_IL;
944                 break;
945         case ICE_FLOW_FIELD_IDX_IPV6_SA:
946         case ICE_FLOW_FIELD_IDX_IPV6_DA:
947                 prot_id = seg == 0 ? ICE_PROT_IPV6_OF_OR_S : ICE_PROT_IPV6_IL;
948                 break;
949         case ICE_FLOW_FIELD_IDX_TCP_SRC_PORT:
950         case ICE_FLOW_FIELD_IDX_TCP_DST_PORT:
951         case ICE_FLOW_FIELD_IDX_TCP_FLAGS:
952                 prot_id = ICE_PROT_TCP_IL;
953                 break;
954         case ICE_FLOW_FIELD_IDX_UDP_SRC_PORT:
955         case ICE_FLOW_FIELD_IDX_UDP_DST_PORT:
956                 prot_id = ICE_PROT_UDP_IL_OR_S;
957                 break;
958         case ICE_FLOW_FIELD_IDX_SCTP_SRC_PORT:
959         case ICE_FLOW_FIELD_IDX_SCTP_DST_PORT:
960                 prot_id = ICE_PROT_SCTP_IL;
961                 break;
962         case ICE_FLOW_FIELD_IDX_GTPC_TEID:
963         case ICE_FLOW_FIELD_IDX_GTPU_IP_TEID:
964         case ICE_FLOW_FIELD_IDX_GTPU_UP_TEID:
965         case ICE_FLOW_FIELD_IDX_GTPU_DWN_TEID:
966         case ICE_FLOW_FIELD_IDX_GTPU_EH_TEID:
967         case ICE_FLOW_FIELD_IDX_GTPU_EH_QFI:
968                 /* GTP is accessed through UDP OF protocol */
969                 prot_id = ICE_PROT_UDP_OF;
970                 break;
971         case ICE_FLOW_FIELD_IDX_PPPOE_SESS_ID:
972                 prot_id = ICE_PROT_PPPOE;
973                 break;
974         case ICE_FLOW_FIELD_IDX_PFCP_SEID:
975                 prot_id = ICE_PROT_UDP_IL_OR_S;
976                 break;
977         case ICE_FLOW_FIELD_IDX_L2TPV3_SESS_ID:
978                 prot_id = ICE_PROT_L2TPV3;
979                 break;
980         case ICE_FLOW_FIELD_IDX_ESP_SPI:
981                 prot_id = ICE_PROT_ESP_F;
982                 break;
983         case ICE_FLOW_FIELD_IDX_AH_SPI:
984                 prot_id = ICE_PROT_ESP_2;
985                 break;
986         case ICE_FLOW_FIELD_IDX_NAT_T_ESP_SPI:
987                 prot_id = ICE_PROT_UDP_IL_OR_S;
988                 break;
989         case ICE_FLOW_FIELD_IDX_ARP_SIP:
990         case ICE_FLOW_FIELD_IDX_ARP_DIP:
991         case ICE_FLOW_FIELD_IDX_ARP_SHA:
992         case ICE_FLOW_FIELD_IDX_ARP_DHA:
993         case ICE_FLOW_FIELD_IDX_ARP_OP:
994                 prot_id = ICE_PROT_ARP_OF;
995                 break;
996         case ICE_FLOW_FIELD_IDX_ICMP_TYPE:
997         case ICE_FLOW_FIELD_IDX_ICMP_CODE:
998                 /* ICMP type and code share the same extraction seq. entry */
999                 prot_id = (params->prof->segs[seg].hdrs &
1000                            ICE_FLOW_SEG_HDR_IPV4) ?
1001                         ICE_PROT_ICMP_IL : ICE_PROT_ICMPV6_IL;
1002                 sib = fld == ICE_FLOW_FIELD_IDX_ICMP_TYPE ?
1003                         ICE_FLOW_FIELD_IDX_ICMP_CODE :
1004                         ICE_FLOW_FIELD_IDX_ICMP_TYPE;
1005                 break;
1006         case ICE_FLOW_FIELD_IDX_GRE_KEYID:
1007                 prot_id = ICE_PROT_GRE_OF;
1008                 break;
1009         default:
1010                 return ICE_ERR_NOT_IMPL;
1011         }
1012
1013         /* Each extraction sequence entry is a word in size, and extracts a
1014          * word-aligned offset from a protocol header.
1015          */
1016         ese_bits = ICE_FLOW_FV_EXTRACT_SZ * BITS_PER_BYTE;
1017
1018         flds[fld].xtrct.prot_id = prot_id;
1019         flds[fld].xtrct.off = (ice_flds_info[fld].off / ese_bits) *
1020                 ICE_FLOW_FV_EXTRACT_SZ;
1021         flds[fld].xtrct.disp = (u8)((ice_flds_info[fld].off + adj) % ese_bits);
1022         flds[fld].xtrct.idx = params->es_cnt;
1023         flds[fld].xtrct.mask = ice_flds_info[fld].mask;
1024
1025         /* Adjust the next field-entry index after accommodating the number of
1026          * entries this field consumes
1027          */
1028         cnt = DIVIDE_AND_ROUND_UP(flds[fld].xtrct.disp +
1029                                   ice_flds_info[fld].size, ese_bits);
1030
1031         /* Fill in the extraction sequence entries needed for this field */
1032         off = flds[fld].xtrct.off;
1033         mask = flds[fld].xtrct.mask;
1034         for (i = 0; i < cnt; i++) {
1035                 /* Only consume an extraction sequence entry if there is no
1036                  * sibling field associated with this field or the sibling entry
1037                  * already extracts the word shared with this field.
1038                  */
1039                 if (sib == ICE_FLOW_FIELD_IDX_MAX ||
1040                     flds[sib].xtrct.prot_id == ICE_PROT_ID_INVAL ||
1041                     flds[sib].xtrct.off != off) {
1042                         u8 idx;
1043
1044                         /* Make sure the number of extraction sequence required
1045                          * does not exceed the block's capability
1046                          */
1047                         if (params->es_cnt >= fv_words)
1048                                 return ICE_ERR_MAX_LIMIT;
1049
1050                         /* some blocks require a reversed field vector layout */
1051                         if (hw->blk[params->blk].es.reverse)
1052                                 idx = fv_words - params->es_cnt - 1;
1053                         else
1054                                 idx = params->es_cnt;
1055
1056                         params->es[idx].prot_id = prot_id;
1057                         params->es[idx].off = off;
1058                         params->mask[idx] = mask | sib_mask;
1059                         params->es_cnt++;
1060                 }
1061
1062                 off += ICE_FLOW_FV_EXTRACT_SZ;
1063         }
1064
1065         return ICE_SUCCESS;
1066 }
1067
1068 /**
1069  * ice_flow_xtract_raws - Create extract sequence entries for raw bytes
1070  * @hw: pointer to the HW struct
1071  * @params: information about the flow to be processed
1072  * @seg: index of packet segment whose raw fields are to be be extracted
1073  */
1074 static enum ice_status
1075 ice_flow_xtract_raws(struct ice_hw *hw, struct ice_flow_prof_params *params,
1076                      u8 seg)
1077 {
1078         u16 fv_words;
1079         u16 hdrs_sz;
1080         u8 i;
1081
1082         if (!params->prof->segs[seg].raws_cnt)
1083                 return ICE_SUCCESS;
1084
1085         if (params->prof->segs[seg].raws_cnt >
1086             ARRAY_SIZE(params->prof->segs[seg].raws))
1087                 return ICE_ERR_MAX_LIMIT;
1088
1089         /* Offsets within the segment headers are not supported */
1090         hdrs_sz = ice_flow_calc_seg_sz(params, seg);
1091         if (!hdrs_sz)
1092                 return ICE_ERR_PARAM;
1093
1094         fv_words = hw->blk[params->blk].es.fvw;
1095
1096         for (i = 0; i < params->prof->segs[seg].raws_cnt; i++) {
1097                 struct ice_flow_seg_fld_raw *raw;
1098                 u16 off, cnt, j;
1099
1100                 raw = &params->prof->segs[seg].raws[i];
1101
1102                 /* Storing extraction information */
1103                 raw->info.xtrct.prot_id = ICE_PROT_MAC_OF_OR_S;
1104                 raw->info.xtrct.off = (raw->off / ICE_FLOW_FV_EXTRACT_SZ) *
1105                         ICE_FLOW_FV_EXTRACT_SZ;
1106                 raw->info.xtrct.disp = (raw->off % ICE_FLOW_FV_EXTRACT_SZ) *
1107                         BITS_PER_BYTE;
1108                 raw->info.xtrct.idx = params->es_cnt;
1109
1110                 /* Determine the number of field vector entries this raw field
1111                  * consumes.
1112                  */
1113                 cnt = DIVIDE_AND_ROUND_UP(raw->info.xtrct.disp +
1114                                           (raw->info.src.last * BITS_PER_BYTE),
1115                                           (ICE_FLOW_FV_EXTRACT_SZ *
1116                                            BITS_PER_BYTE));
1117                 off = raw->info.xtrct.off;
1118                 for (j = 0; j < cnt; j++) {
1119                         u16 idx;
1120
1121                         /* Make sure the number of extraction sequence required
1122                          * does not exceed the block's capability
1123                          */
1124                         if (params->es_cnt >= hw->blk[params->blk].es.count ||
1125                             params->es_cnt >= ICE_MAX_FV_WORDS)
1126                                 return ICE_ERR_MAX_LIMIT;
1127
1128                         /* some blocks require a reversed field vector layout */
1129                         if (hw->blk[params->blk].es.reverse)
1130                                 idx = fv_words - params->es_cnt - 1;
1131                         else
1132                                 idx = params->es_cnt;
1133
1134                         params->es[idx].prot_id = raw->info.xtrct.prot_id;
1135                         params->es[idx].off = off;
1136                         params->es_cnt++;
1137                         off += ICE_FLOW_FV_EXTRACT_SZ;
1138                 }
1139         }
1140
1141         return ICE_SUCCESS;
1142 }
1143
1144 /**
1145  * ice_flow_create_xtrct_seq - Create an extraction sequence for given segments
1146  * @hw: pointer to the HW struct
1147  * @params: information about the flow to be processed
1148  *
1149  * This function iterates through all matched fields in the given segments, and
1150  * creates an extraction sequence for the fields.
1151  */
1152 static enum ice_status
1153 ice_flow_create_xtrct_seq(struct ice_hw *hw,
1154                           struct ice_flow_prof_params *params)
1155 {
1156         enum ice_status status = ICE_SUCCESS;
1157         u8 i;
1158
1159         /* For ACL, we also need to extract the direction bit (Rx,Tx) data from
1160          * packet flags
1161          */
1162         if (params->blk == ICE_BLK_ACL) {
1163                 status = ice_flow_xtract_pkt_flags(hw, params,
1164                                                    ICE_RX_MDID_PKT_FLAGS_15_0);
1165                 if (status)
1166                         return status;
1167         }
1168
1169         for (i = 0; i < params->prof->segs_cnt; i++) {
1170                 u64 match = params->prof->segs[i].match;
1171                 enum ice_flow_field j;
1172
1173                 for (j = 0; j < ICE_FLOW_FIELD_IDX_MAX && match; j++) {
1174                         const u64 bit = BIT_ULL(j);
1175
1176                         if (match & bit) {
1177                                 status = ice_flow_xtract_fld(hw, params, i, j,
1178                                                              match);
1179                                 if (status)
1180                                         return status;
1181                                 match &= ~bit;
1182                         }
1183                 }
1184
1185                 /* Process raw matching bytes */
1186                 status = ice_flow_xtract_raws(hw, params, i);
1187                 if (status)
1188                         return status;
1189         }
1190
1191         return status;
1192 }
1193
1194 /**
1195  * ice_flow_sel_acl_scen - returns the specific scenario
1196  * @hw: pointer to the hardware structure
1197  * @params: information about the flow to be processed
1198  *
1199  * This function will return the specific scenario based on the
1200  * params passed to it
1201  */
1202 static enum ice_status
1203 ice_flow_sel_acl_scen(struct ice_hw *hw, struct ice_flow_prof_params *params)
1204 {
1205         /* Find the best-fit scenario for the provided match width */
1206         struct ice_acl_scen *cand_scen = NULL, *scen;
1207
1208         if (!hw->acl_tbl)
1209                 return ICE_ERR_DOES_NOT_EXIST;
1210
1211         /* Loop through each scenario and match against the scenario width
1212          * to select the specific scenario
1213          */
1214         LIST_FOR_EACH_ENTRY(scen, &hw->acl_tbl->scens, ice_acl_scen, list_entry)
1215                 if (scen->eff_width >= params->entry_length &&
1216                     (!cand_scen || cand_scen->eff_width > scen->eff_width))
1217                         cand_scen = scen;
1218         if (!cand_scen)
1219                 return ICE_ERR_DOES_NOT_EXIST;
1220
1221         params->prof->cfg.scen = cand_scen;
1222
1223         return ICE_SUCCESS;
1224 }
1225
1226 /**
1227  * ice_flow_acl_def_entry_frmt - Determine the layout of flow entries
1228  * @params: information about the flow to be processed
1229  */
1230 static enum ice_status
1231 ice_flow_acl_def_entry_frmt(struct ice_flow_prof_params *params)
1232 {
1233         u16 index, i, range_idx = 0;
1234
1235         index = ICE_AQC_ACL_PROF_BYTE_SEL_START_IDX;
1236
1237         for (i = 0; i < params->prof->segs_cnt; i++) {
1238                 struct ice_flow_seg_info *seg = &params->prof->segs[i];
1239                 u64 match = seg->match;
1240                 u8 j;
1241
1242                 for (j = 0; j < ICE_FLOW_FIELD_IDX_MAX && match; j++) {
1243                         struct ice_flow_fld_info *fld;
1244                         const u64 bit = BIT_ULL(j);
1245
1246                         if (!(match & bit))
1247                                 continue;
1248
1249                         fld = &seg->fields[j];
1250                         fld->entry.mask = ICE_FLOW_FLD_OFF_INVAL;
1251
1252                         if (fld->type == ICE_FLOW_FLD_TYPE_RANGE) {
1253                                 fld->entry.last = ICE_FLOW_FLD_OFF_INVAL;
1254
1255                                 /* Range checking only supported for single
1256                                  * words
1257                                  */
1258                                 if (DIVIDE_AND_ROUND_UP(ice_flds_info[j].size +
1259                                                         fld->xtrct.disp,
1260                                                         BITS_PER_BYTE * 2) > 1)
1261                                         return ICE_ERR_PARAM;
1262
1263                                 /* Ranges must define low and high values */
1264                                 if (fld->src.val == ICE_FLOW_FLD_OFF_INVAL ||
1265                                     fld->src.last == ICE_FLOW_FLD_OFF_INVAL)
1266                                         return ICE_ERR_PARAM;
1267
1268                                 fld->entry.val = range_idx++;
1269                         } else {
1270                                 /* Store adjusted byte-length of field for later
1271                                  * use, taking into account potential
1272                                  * non-byte-aligned displacement
1273                                  */
1274                                 fld->entry.last = DIVIDE_AND_ROUND_UP
1275                                         (ice_flds_info[j].size +
1276                                          (fld->xtrct.disp % BITS_PER_BYTE),
1277                                          BITS_PER_BYTE);
1278                                 fld->entry.val = index;
1279                                 index += fld->entry.last;
1280                         }
1281
1282                         match &= ~bit;
1283                 }
1284
1285                 for (j = 0; j < seg->raws_cnt; j++) {
1286                         struct ice_flow_seg_fld_raw *raw = &seg->raws[j];
1287
1288                         raw->info.entry.mask = ICE_FLOW_FLD_OFF_INVAL;
1289                         raw->info.entry.val = index;
1290                         raw->info.entry.last = raw->info.src.last;
1291                         index += raw->info.entry.last;
1292                 }
1293         }
1294
1295         /* Currently only support using the byte selection base, which only
1296          * allows for an effective entry size of 30 bytes. Reject anything
1297          * larger.
1298          */
1299         if (index > ICE_AQC_ACL_PROF_BYTE_SEL_ELEMS)
1300                 return ICE_ERR_PARAM;
1301
1302         /* Only 8 range checkers per profile, reject anything trying to use
1303          * more
1304          */
1305         if (range_idx > ICE_AQC_ACL_PROF_RANGES_NUM_CFG)
1306                 return ICE_ERR_PARAM;
1307
1308         /* Store # bytes required for entry for later use */
1309         params->entry_length = index - ICE_AQC_ACL_PROF_BYTE_SEL_START_IDX;
1310
1311         return ICE_SUCCESS;
1312 }
1313
1314 /**
1315  * ice_flow_proc_segs - process all packet segments associated with a profile
1316  * @hw: pointer to the HW struct
1317  * @params: information about the flow to be processed
1318  */
1319 static enum ice_status
1320 ice_flow_proc_segs(struct ice_hw *hw, struct ice_flow_prof_params *params)
1321 {
1322         enum ice_status status;
1323
1324         status = ice_flow_proc_seg_hdrs(params);
1325         if (status)
1326                 return status;
1327
1328         status = ice_flow_create_xtrct_seq(hw, params);
1329         if (status)
1330                 return status;
1331
1332         switch (params->blk) {
1333         case ICE_BLK_FD:
1334         case ICE_BLK_RSS:
1335                 status = ICE_SUCCESS;
1336                 break;
1337         case ICE_BLK_ACL:
1338                 status = ice_flow_acl_def_entry_frmt(params);
1339                 if (status)
1340                         return status;
1341                 status = ice_flow_sel_acl_scen(hw, params);
1342                 if (status)
1343                         return status;
1344                 break;
1345         case ICE_BLK_SW:
1346         default:
1347                 return ICE_ERR_NOT_IMPL;
1348         }
1349
1350         return status;
1351 }
1352
1353 #define ICE_FLOW_FIND_PROF_CHK_FLDS     0x00000001
1354 #define ICE_FLOW_FIND_PROF_CHK_VSI      0x00000002
1355 #define ICE_FLOW_FIND_PROF_NOT_CHK_DIR  0x00000004
1356
1357 /**
1358  * ice_flow_find_prof_conds - Find a profile matching headers and conditions
1359  * @hw: pointer to the HW struct
1360  * @blk: classification stage
1361  * @dir: flow direction
1362  * @segs: array of one or more packet segments that describe the flow
1363  * @segs_cnt: number of packet segments provided
1364  * @vsi_handle: software VSI handle to check VSI (ICE_FLOW_FIND_PROF_CHK_VSI)
1365  * @conds: additional conditions to be checked (ICE_FLOW_FIND_PROF_CHK_*)
1366  */
1367 static struct ice_flow_prof *
1368 ice_flow_find_prof_conds(struct ice_hw *hw, enum ice_block blk,
1369                          enum ice_flow_dir dir, struct ice_flow_seg_info *segs,
1370                          u8 segs_cnt, u16 vsi_handle, u32 conds)
1371 {
1372         struct ice_flow_prof *p, *prof = NULL;
1373
1374         ice_acquire_lock(&hw->fl_profs_locks[blk]);
1375         LIST_FOR_EACH_ENTRY(p, &hw->fl_profs[blk], ice_flow_prof, l_entry) {
1376                 if ((p->dir == dir || conds & ICE_FLOW_FIND_PROF_NOT_CHK_DIR) &&
1377                     segs_cnt && segs_cnt == p->segs_cnt) {
1378                         u8 i;
1379
1380                         /* Check for profile-VSI association if specified */
1381                         if ((conds & ICE_FLOW_FIND_PROF_CHK_VSI) &&
1382                             ice_is_vsi_valid(hw, vsi_handle) &&
1383                             !ice_is_bit_set(p->vsis, vsi_handle))
1384                                 continue;
1385
1386                         /* Protocol headers must be checked. Matched fields are
1387                          * checked if specified.
1388                          */
1389                         for (i = 0; i < segs_cnt; i++)
1390                                 if (segs[i].hdrs != p->segs[i].hdrs ||
1391                                     ((conds & ICE_FLOW_FIND_PROF_CHK_FLDS) &&
1392                                      segs[i].match != p->segs[i].match))
1393                                         break;
1394
1395                         /* A match is found if all segments are matched */
1396                         if (i == segs_cnt) {
1397                                 prof = p;
1398                                 break;
1399                         }
1400                 }
1401         }
1402         ice_release_lock(&hw->fl_profs_locks[blk]);
1403
1404         return prof;
1405 }
1406
1407 /**
1408  * ice_flow_find_prof - Look up a profile matching headers and matched fields
1409  * @hw: pointer to the HW struct
1410  * @blk: classification stage
1411  * @dir: flow direction
1412  * @segs: array of one or more packet segments that describe the flow
1413  * @segs_cnt: number of packet segments provided
1414  */
1415 u64
1416 ice_flow_find_prof(struct ice_hw *hw, enum ice_block blk, enum ice_flow_dir dir,
1417                    struct ice_flow_seg_info *segs, u8 segs_cnt)
1418 {
1419         struct ice_flow_prof *p;
1420
1421         p = ice_flow_find_prof_conds(hw, blk, dir, segs, segs_cnt,
1422                                      ICE_MAX_VSI, ICE_FLOW_FIND_PROF_CHK_FLDS);
1423
1424         return p ? p->id : ICE_FLOW_PROF_ID_INVAL;
1425 }
1426
1427 /**
1428  * ice_flow_find_prof_id - Look up a profile with given profile ID
1429  * @hw: pointer to the HW struct
1430  * @blk: classification stage
1431  * @prof_id: unique ID to identify this flow profile
1432  */
1433 static struct ice_flow_prof *
1434 ice_flow_find_prof_id(struct ice_hw *hw, enum ice_block blk, u64 prof_id)
1435 {
1436         struct ice_flow_prof *p;
1437
1438         LIST_FOR_EACH_ENTRY(p, &hw->fl_profs[blk], ice_flow_prof, l_entry) {
1439                 if (p->id == prof_id)
1440                         return p;
1441         }
1442
1443         return NULL;
1444 }
1445
1446 /**
1447  * ice_dealloc_flow_entry - Deallocate flow entry memory
1448  * @hw: pointer to the HW struct
1449  * @entry: flow entry to be removed
1450  */
1451 static void
1452 ice_dealloc_flow_entry(struct ice_hw *hw, struct ice_flow_entry *entry)
1453 {
1454         if (!entry)
1455                 return;
1456
1457         if (entry->entry)
1458                 ice_free(hw, entry->entry);
1459
1460         if (entry->range_buf) {
1461                 ice_free(hw, entry->range_buf);
1462                 entry->range_buf = NULL;
1463         }
1464
1465         if (entry->acts) {
1466                 ice_free(hw, entry->acts);
1467                 entry->acts = NULL;
1468                 entry->acts_cnt = 0;
1469         }
1470
1471         ice_free(hw, entry);
1472 }
1473
1474 #define ICE_ACL_INVALID_SCEN    0x3f
1475
1476 /**
1477  * ice_flow_acl_is_prof_in_use - Verify if the profile is associated to any pf
1478  * @hw: pointer to the hardware structure
1479  * @prof: pointer to flow profile
1480  * @buf: destination buffer function writes partial xtrct sequence to
1481  *
1482  * returns ICE_SUCCESS if no pf is associated to the given profile
1483  * returns ICE_ERR_IN_USE if at least one pf is associated to the given profile
1484  * returns other error code for real error
1485  */
1486 static enum ice_status
1487 ice_flow_acl_is_prof_in_use(struct ice_hw *hw, struct ice_flow_prof *prof,
1488                             struct ice_aqc_acl_prof_generic_frmt *buf)
1489 {
1490         enum ice_status status;
1491         u8 prof_id = 0;
1492
1493         status = ice_flow_get_hw_prof(hw, ICE_BLK_ACL, prof->id, &prof_id);
1494         if (status)
1495                 return status;
1496
1497         status = ice_query_acl_prof(hw, prof_id, buf, NULL);
1498         if (status)
1499                 return status;
1500
1501         /* If all pf's associated scenarios are all 0 or all
1502          * ICE_ACL_INVALID_SCEN (63) for the given profile then the latter has
1503          * not been configured yet.
1504          */
1505         if (buf->pf_scenario_num[0] == 0 && buf->pf_scenario_num[1] == 0 &&
1506             buf->pf_scenario_num[2] == 0 && buf->pf_scenario_num[3] == 0 &&
1507             buf->pf_scenario_num[4] == 0 && buf->pf_scenario_num[5] == 0 &&
1508             buf->pf_scenario_num[6] == 0 && buf->pf_scenario_num[7] == 0)
1509                 return ICE_SUCCESS;
1510
1511         if (buf->pf_scenario_num[0] == ICE_ACL_INVALID_SCEN &&
1512             buf->pf_scenario_num[1] == ICE_ACL_INVALID_SCEN &&
1513             buf->pf_scenario_num[2] == ICE_ACL_INVALID_SCEN &&
1514             buf->pf_scenario_num[3] == ICE_ACL_INVALID_SCEN &&
1515             buf->pf_scenario_num[4] == ICE_ACL_INVALID_SCEN &&
1516             buf->pf_scenario_num[5] == ICE_ACL_INVALID_SCEN &&
1517             buf->pf_scenario_num[6] == ICE_ACL_INVALID_SCEN &&
1518             buf->pf_scenario_num[7] == ICE_ACL_INVALID_SCEN)
1519                 return ICE_SUCCESS;
1520         else
1521                 return ICE_ERR_IN_USE;
1522 }
1523
1524 /**
1525  * ice_flow_acl_free_act_cntr - Free the acl rule's actions
1526  * @hw: pointer to the hardware structure
1527  * @acts: array of actions to be performed on a match
1528  * @acts_cnt: number of actions
1529  */
1530 static enum ice_status
1531 ice_flow_acl_free_act_cntr(struct ice_hw *hw, struct ice_flow_action *acts,
1532                            u8 acts_cnt)
1533 {
1534         int i;
1535
1536         for (i = 0; i < acts_cnt; i++) {
1537                 if (acts[i].type == ICE_FLOW_ACT_CNTR_PKT ||
1538                     acts[i].type == ICE_FLOW_ACT_CNTR_BYTES ||
1539                     acts[i].type == ICE_FLOW_ACT_CNTR_PKT_BYTES) {
1540                         struct ice_acl_cntrs cntrs;
1541                         enum ice_status status;
1542
1543                         cntrs.bank = 0; /* Only bank0 for the moment */
1544                         cntrs.first_cntr =
1545                                         LE16_TO_CPU(acts[i].data.acl_act.value);
1546                         cntrs.last_cntr =
1547                                         LE16_TO_CPU(acts[i].data.acl_act.value);
1548
1549                         if (acts[i].type == ICE_FLOW_ACT_CNTR_PKT_BYTES)
1550                                 cntrs.type = ICE_AQC_ACL_CNT_TYPE_DUAL;
1551                         else
1552                                 cntrs.type = ICE_AQC_ACL_CNT_TYPE_SINGLE;
1553
1554                         status = ice_aq_dealloc_acl_cntrs(hw, &cntrs, NULL);
1555                         if (status)
1556                                 return status;
1557                 }
1558         }
1559         return ICE_SUCCESS;
1560 }
1561
1562 /**
1563  * ice_flow_acl_disassoc_scen - Disassociate the scenario to the Profile
1564  * @hw: pointer to the hardware structure
1565  * @prof: pointer to flow profile
1566  *
1567  * Disassociate the scenario to the Profile for the PF of the VSI.
1568  */
1569 static enum ice_status
1570 ice_flow_acl_disassoc_scen(struct ice_hw *hw, struct ice_flow_prof *prof)
1571 {
1572         struct ice_aqc_acl_prof_generic_frmt buf;
1573         enum ice_status status = ICE_SUCCESS;
1574         u8 prof_id = 0;
1575
1576         ice_memset(&buf, 0, sizeof(buf), ICE_NONDMA_MEM);
1577
1578         status = ice_flow_get_hw_prof(hw, ICE_BLK_ACL, prof->id, &prof_id);
1579         if (status)
1580                 return status;
1581
1582         status = ice_query_acl_prof(hw, prof_id, &buf, NULL);
1583         if (status)
1584                 return status;
1585
1586         /* Clear scenario for this pf */
1587         buf.pf_scenario_num[hw->pf_id] = ICE_ACL_INVALID_SCEN;
1588         status = ice_prgm_acl_prof_extrt(hw, prof_id, &buf, NULL);
1589
1590         return status;
1591 }
1592
1593 /**
1594  * ice_flow_rem_entry_sync - Remove a flow entry
1595  * @hw: pointer to the HW struct
1596  * @blk: classification stage
1597  * @entry: flow entry to be removed
1598  */
1599 static enum ice_status
1600 ice_flow_rem_entry_sync(struct ice_hw *hw, enum ice_block blk,
1601                         struct ice_flow_entry *entry)
1602 {
1603         if (!entry)
1604                 return ICE_ERR_BAD_PTR;
1605
1606         if (blk == ICE_BLK_ACL) {
1607                 enum ice_status status;
1608
1609                 if (!entry->prof)
1610                         return ICE_ERR_BAD_PTR;
1611
1612                 status = ice_acl_rem_entry(hw, entry->prof->cfg.scen,
1613                                            entry->scen_entry_idx);
1614                 if (status)
1615                         return status;
1616
1617                 /* Checks if we need to release an ACL counter. */
1618                 if (entry->acts_cnt && entry->acts)
1619                         ice_flow_acl_free_act_cntr(hw, entry->acts,
1620                                                    entry->acts_cnt);
1621         }
1622
1623         LIST_DEL(&entry->l_entry);
1624
1625         ice_dealloc_flow_entry(hw, entry);
1626
1627         return ICE_SUCCESS;
1628 }
1629
1630 /**
1631  * ice_flow_add_prof_sync - Add a flow profile for packet segments and fields
1632  * @hw: pointer to the HW struct
1633  * @blk: classification stage
1634  * @dir: flow direction
1635  * @prof_id: unique ID to identify this flow profile
1636  * @segs: array of one or more packet segments that describe the flow
1637  * @segs_cnt: number of packet segments provided
1638  * @acts: array of default actions
1639  * @acts_cnt: number of default actions
1640  * @prof: stores the returned flow profile added
1641  *
1642  * Assumption: the caller has acquired the lock to the profile list
1643  */
1644 static enum ice_status
1645 ice_flow_add_prof_sync(struct ice_hw *hw, enum ice_block blk,
1646                        enum ice_flow_dir dir, u64 prof_id,
1647                        struct ice_flow_seg_info *segs, u8 segs_cnt,
1648                        struct ice_flow_action *acts, u8 acts_cnt,
1649                        struct ice_flow_prof **prof)
1650 {
1651         struct ice_flow_prof_params params;
1652         enum ice_status status;
1653         u8 i;
1654
1655         if (!prof || (acts_cnt && !acts))
1656                 return ICE_ERR_BAD_PTR;
1657
1658         ice_memset(&params, 0, sizeof(params), ICE_NONDMA_MEM);
1659         params.prof = (struct ice_flow_prof *)
1660                 ice_malloc(hw, sizeof(*params.prof));
1661         if (!params.prof)
1662                 return ICE_ERR_NO_MEMORY;
1663
1664         /* initialize extraction sequence to all invalid (0xff) */
1665         for (i = 0; i < ICE_MAX_FV_WORDS; i++) {
1666                 params.es[i].prot_id = ICE_PROT_INVALID;
1667                 params.es[i].off = ICE_FV_OFFSET_INVAL;
1668         }
1669
1670         params.blk = blk;
1671         params.prof->id = prof_id;
1672         params.prof->dir = dir;
1673         params.prof->segs_cnt = segs_cnt;
1674
1675         /* Make a copy of the segments that need to be persistent in the flow
1676          * profile instance
1677          */
1678         for (i = 0; i < segs_cnt; i++)
1679                 ice_memcpy(&params.prof->segs[i], &segs[i], sizeof(*segs),
1680                            ICE_NONDMA_TO_NONDMA);
1681
1682         /* Make a copy of the actions that need to be persistent in the flow
1683          * profile instance.
1684          */
1685         if (acts_cnt) {
1686                 params.prof->acts = (struct ice_flow_action *)
1687                         ice_memdup(hw, acts, acts_cnt * sizeof(*acts),
1688                                    ICE_NONDMA_TO_NONDMA);
1689
1690                 if (!params.prof->acts) {
1691                         status = ICE_ERR_NO_MEMORY;
1692                         goto out;
1693                 }
1694         }
1695
1696         status = ice_flow_proc_segs(hw, &params);
1697         if (status) {
1698                 ice_debug(hw, ICE_DBG_FLOW,
1699                           "Error processing a flow's packet segments\n");
1700                 goto out;
1701         }
1702
1703         /* Add a HW profile for this flow profile */
1704         status = ice_add_prof(hw, blk, prof_id, (u8 *)params.ptypes,
1705                               params.attr, params.attr_cnt, params.es,
1706                               params.mask);
1707         if (status) {
1708                 ice_debug(hw, ICE_DBG_FLOW, "Error adding a HW flow profile\n");
1709                 goto out;
1710         }
1711
1712         INIT_LIST_HEAD(&params.prof->entries);
1713         ice_init_lock(&params.prof->entries_lock);
1714         *prof = params.prof;
1715
1716 out:
1717         if (status) {
1718                 if (params.prof->acts)
1719                         ice_free(hw, params.prof->acts);
1720                 ice_free(hw, params.prof);
1721         }
1722
1723         return status;
1724 }
1725
1726 /**
1727  * ice_flow_rem_prof_sync - remove a flow profile
1728  * @hw: pointer to the hardware structure
1729  * @blk: classification stage
1730  * @prof: pointer to flow profile to remove
1731  *
1732  * Assumption: the caller has acquired the lock to the profile list
1733  */
1734 static enum ice_status
1735 ice_flow_rem_prof_sync(struct ice_hw *hw, enum ice_block blk,
1736                        struct ice_flow_prof *prof)
1737 {
1738         enum ice_status status;
1739
1740         /* Remove all remaining flow entries before removing the flow profile */
1741         if (!LIST_EMPTY(&prof->entries)) {
1742                 struct ice_flow_entry *e, *t;
1743
1744                 ice_acquire_lock(&prof->entries_lock);
1745
1746                 LIST_FOR_EACH_ENTRY_SAFE(e, t, &prof->entries, ice_flow_entry,
1747                                          l_entry) {
1748                         status = ice_flow_rem_entry_sync(hw, blk, e);
1749                         if (status)
1750                                 break;
1751                 }
1752
1753                 ice_release_lock(&prof->entries_lock);
1754         }
1755
1756         if (blk == ICE_BLK_ACL) {
1757                 struct ice_aqc_acl_profile_ranges query_rng_buf;
1758                 struct ice_aqc_acl_prof_generic_frmt buf;
1759                 u8 prof_id = 0;
1760
1761                 /* Deassociate the scenario to the Profile for the PF */
1762                 status = ice_flow_acl_disassoc_scen(hw, prof);
1763                 if (status)
1764                         return status;
1765
1766                 /* Clear the range-checker if the profile ID is no longer
1767                  * used by any PF
1768                  */
1769                 status = ice_flow_acl_is_prof_in_use(hw, prof, &buf);
1770                 if (status && status != ICE_ERR_IN_USE) {
1771                         return status;
1772                 } else if (!status) {
1773                         /* Clear the range-checker value for profile ID */
1774                         ice_memset(&query_rng_buf, 0,
1775                                    sizeof(struct ice_aqc_acl_profile_ranges),
1776                                    ICE_NONDMA_MEM);
1777
1778                         status = ice_flow_get_hw_prof(hw, blk, prof->id,
1779                                                       &prof_id);
1780                         if (status)
1781                                 return status;
1782
1783                         status = ice_prog_acl_prof_ranges(hw, prof_id,
1784                                                           &query_rng_buf, NULL);
1785                         if (status)
1786                                 return status;
1787                 }
1788         }
1789
1790         /* Remove all hardware profiles associated with this flow profile */
1791         status = ice_rem_prof(hw, blk, prof->id);
1792         if (!status) {
1793                 LIST_DEL(&prof->l_entry);
1794                 ice_destroy_lock(&prof->entries_lock);
1795                 if (prof->acts)
1796                         ice_free(hw, prof->acts);
1797                 ice_free(hw, prof);
1798         }
1799
1800         return status;
1801 }
1802
1803 /**
1804  * ice_flow_acl_set_xtrct_seq_fld - Populate xtrct seq for single field
1805  * @buf: Destination buffer function writes partial xtrct sequence to
1806  * @info: Info about field
1807  */
1808 static void
1809 ice_flow_acl_set_xtrct_seq_fld(struct ice_aqc_acl_prof_generic_frmt *buf,
1810                                struct ice_flow_fld_info *info)
1811 {
1812         u16 dst, i;
1813         u8 src;
1814
1815         src = info->xtrct.idx * ICE_FLOW_FV_EXTRACT_SZ +
1816                 info->xtrct.disp / BITS_PER_BYTE;
1817         dst = info->entry.val;
1818         for (i = 0; i < info->entry.last; i++)
1819                 /* HW stores field vector words in LE, convert words back to BE
1820                  * so constructed entries will end up in network order
1821                  */
1822                 buf->byte_selection[dst++] = src++ ^ 1;
1823 }
1824
1825 /**
1826  * ice_flow_acl_set_xtrct_seq - Program ACL extraction sequence
1827  * @hw: pointer to the hardware structure
1828  * @prof: pointer to flow profile
1829  */
1830 static enum ice_status
1831 ice_flow_acl_set_xtrct_seq(struct ice_hw *hw, struct ice_flow_prof *prof)
1832 {
1833         struct ice_aqc_acl_prof_generic_frmt buf;
1834         struct ice_flow_fld_info *info;
1835         enum ice_status status;
1836         u8 prof_id = 0;
1837         u16 i;
1838
1839         ice_memset(&buf, 0, sizeof(buf), ICE_NONDMA_MEM);
1840
1841         status = ice_flow_get_hw_prof(hw, ICE_BLK_ACL, prof->id, &prof_id);
1842         if (status)
1843                 return status;
1844
1845         status = ice_flow_acl_is_prof_in_use(hw, prof, &buf);
1846         if (status && status != ICE_ERR_IN_USE)
1847                 return status;
1848
1849         if (!status) {
1850                 /* Program the profile dependent configuration. This is done
1851                  * only once regardless of the number of PFs using that profile
1852                  */
1853                 ice_memset(&buf, 0, sizeof(buf), ICE_NONDMA_MEM);
1854
1855                 for (i = 0; i < prof->segs_cnt; i++) {
1856                         struct ice_flow_seg_info *seg = &prof->segs[i];
1857                         u64 match = seg->match;
1858                         u16 j;
1859
1860                         for (j = 0; j < ICE_FLOW_FIELD_IDX_MAX && match; j++) {
1861                                 const u64 bit = BIT_ULL(j);
1862
1863                                 if (!(match & bit))
1864                                         continue;
1865
1866                                 info = &seg->fields[j];
1867
1868                                 if (info->type == ICE_FLOW_FLD_TYPE_RANGE)
1869                                         buf.word_selection[info->entry.val] =
1870                                                                 info->xtrct.idx;
1871                                 else
1872                                         ice_flow_acl_set_xtrct_seq_fld(&buf,
1873                                                                        info);
1874
1875                                 match &= ~bit;
1876                         }
1877
1878                         for (j = 0; j < seg->raws_cnt; j++) {
1879                                 info = &seg->raws[j].info;
1880                                 ice_flow_acl_set_xtrct_seq_fld(&buf, info);
1881                         }
1882                 }
1883
1884                 ice_memset(&buf.pf_scenario_num[0], ICE_ACL_INVALID_SCEN,
1885                            ICE_AQC_ACL_PROF_PF_SCEN_NUM_ELEMS,
1886                            ICE_NONDMA_MEM);
1887         }
1888
1889         /* Update the current PF */
1890         buf.pf_scenario_num[hw->pf_id] = (u8)prof->cfg.scen->id;
1891         status = ice_prgm_acl_prof_extrt(hw, prof_id, &buf, NULL);
1892
1893         return status;
1894 }
1895
1896 /**
1897  * ice_flow_assoc_vsig_vsi - associate a VSI with VSIG
1898  * @hw: pointer to the hardware structure
1899  * @blk: classification stage
1900  * @vsi_handle: software VSI handle
1901  * @vsig: target VSI group
1902  *
1903  * Assumption: the caller has already verified that the VSI to
1904  * be added has the same characteristics as the VSIG and will
1905  * thereby have access to all resources added to that VSIG.
1906  */
1907 enum ice_status
1908 ice_flow_assoc_vsig_vsi(struct ice_hw *hw, enum ice_block blk, u16 vsi_handle,
1909                         u16 vsig)
1910 {
1911         enum ice_status status;
1912
1913         if (!ice_is_vsi_valid(hw, vsi_handle) || blk >= ICE_BLK_COUNT)
1914                 return ICE_ERR_PARAM;
1915
1916         ice_acquire_lock(&hw->fl_profs_locks[blk]);
1917         status = ice_add_vsi_flow(hw, blk, ice_get_hw_vsi_num(hw, vsi_handle),
1918                                   vsig);
1919         ice_release_lock(&hw->fl_profs_locks[blk]);
1920
1921         return status;
1922 }
1923
1924 /**
1925  * ice_flow_assoc_prof - associate a VSI with a flow profile
1926  * @hw: pointer to the hardware structure
1927  * @blk: classification stage
1928  * @prof: pointer to flow profile
1929  * @vsi_handle: software VSI handle
1930  *
1931  * Assumption: the caller has acquired the lock to the profile list
1932  * and the software VSI handle has been validated
1933  */
1934 static enum ice_status
1935 ice_flow_assoc_prof(struct ice_hw *hw, enum ice_block blk,
1936                     struct ice_flow_prof *prof, u16 vsi_handle)
1937 {
1938         enum ice_status status = ICE_SUCCESS;
1939
1940         if (!ice_is_bit_set(prof->vsis, vsi_handle)) {
1941                 if (blk == ICE_BLK_ACL) {
1942                         status = ice_flow_acl_set_xtrct_seq(hw, prof);
1943                         if (status)
1944                                 return status;
1945                 }
1946                 status = ice_add_prof_id_flow(hw, blk,
1947                                               ice_get_hw_vsi_num(hw,
1948                                                                  vsi_handle),
1949                                               prof->id);
1950                 if (!status)
1951                         ice_set_bit(vsi_handle, prof->vsis);
1952                 else
1953                         ice_debug(hw, ICE_DBG_FLOW,
1954                                   "HW profile add failed, %d\n",
1955                                   status);
1956         }
1957
1958         return status;
1959 }
1960
1961 /**
1962  * ice_flow_disassoc_prof - disassociate a VSI from a flow profile
1963  * @hw: pointer to the hardware structure
1964  * @blk: classification stage
1965  * @prof: pointer to flow profile
1966  * @vsi_handle: software VSI handle
1967  *
1968  * Assumption: the caller has acquired the lock to the profile list
1969  * and the software VSI handle has been validated
1970  */
1971 static enum ice_status
1972 ice_flow_disassoc_prof(struct ice_hw *hw, enum ice_block blk,
1973                        struct ice_flow_prof *prof, u16 vsi_handle)
1974 {
1975         enum ice_status status = ICE_SUCCESS;
1976
1977         if (ice_is_bit_set(prof->vsis, vsi_handle)) {
1978                 status = ice_rem_prof_id_flow(hw, blk,
1979                                               ice_get_hw_vsi_num(hw,
1980                                                                  vsi_handle),
1981                                               prof->id);
1982                 if (!status)
1983                         ice_clear_bit(vsi_handle, prof->vsis);
1984                 else
1985                         ice_debug(hw, ICE_DBG_FLOW,
1986                                   "HW profile remove failed, %d\n",
1987                                   status);
1988         }
1989
1990         return status;
1991 }
1992
1993 /**
1994  * ice_flow_add_prof - Add a flow profile for packet segments and matched fields
1995  * @hw: pointer to the HW struct
1996  * @blk: classification stage
1997  * @dir: flow direction
1998  * @prof_id: unique ID to identify this flow profile
1999  * @segs: array of one or more packet segments that describe the flow
2000  * @segs_cnt: number of packet segments provided
2001  * @acts: array of default actions
2002  * @acts_cnt: number of default actions
2003  * @prof: stores the returned flow profile added
2004  */
2005 enum ice_status
2006 ice_flow_add_prof(struct ice_hw *hw, enum ice_block blk, enum ice_flow_dir dir,
2007                   u64 prof_id, struct ice_flow_seg_info *segs, u8 segs_cnt,
2008                   struct ice_flow_action *acts, u8 acts_cnt,
2009                   struct ice_flow_prof **prof)
2010 {
2011         enum ice_status status;
2012
2013         if (segs_cnt > ICE_FLOW_SEG_MAX)
2014                 return ICE_ERR_MAX_LIMIT;
2015
2016         if (!segs_cnt)
2017                 return ICE_ERR_PARAM;
2018
2019         if (!segs)
2020                 return ICE_ERR_BAD_PTR;
2021
2022         status = ice_flow_val_hdrs(segs, segs_cnt);
2023         if (status)
2024                 return status;
2025
2026         ice_acquire_lock(&hw->fl_profs_locks[blk]);
2027
2028         status = ice_flow_add_prof_sync(hw, blk, dir, prof_id, segs, segs_cnt,
2029                                         acts, acts_cnt, prof);
2030         if (!status)
2031                 LIST_ADD(&(*prof)->l_entry, &hw->fl_profs[blk]);
2032
2033         ice_release_lock(&hw->fl_profs_locks[blk]);
2034
2035         return status;
2036 }
2037
2038 /**
2039  * ice_flow_rem_prof - Remove a flow profile and all entries associated with it
2040  * @hw: pointer to the HW struct
2041  * @blk: the block for which the flow profile is to be removed
2042  * @prof_id: unique ID of the flow profile to be removed
2043  */
2044 enum ice_status
2045 ice_flow_rem_prof(struct ice_hw *hw, enum ice_block blk, u64 prof_id)
2046 {
2047         struct ice_flow_prof *prof;
2048         enum ice_status status;
2049
2050         ice_acquire_lock(&hw->fl_profs_locks[blk]);
2051
2052         prof = ice_flow_find_prof_id(hw, blk, prof_id);
2053         if (!prof) {
2054                 status = ICE_ERR_DOES_NOT_EXIST;
2055                 goto out;
2056         }
2057
2058         /* prof becomes invalid after the call */
2059         status = ice_flow_rem_prof_sync(hw, blk, prof);
2060
2061 out:
2062         ice_release_lock(&hw->fl_profs_locks[blk]);
2063
2064         return status;
2065 }
2066
2067 /**
2068  * ice_flow_get_hw_prof - return the HW profile for a specific profile ID handle
2069  * @hw: pointer to the HW struct
2070  * @blk: classification stage
2071  * @prof_id: the profile ID handle
2072  * @hw_prof_id: pointer to variable to receive the HW profile ID
2073  */
2074 enum ice_status
2075 ice_flow_get_hw_prof(struct ice_hw *hw, enum ice_block blk, u64 prof_id,
2076                      u8 *hw_prof_id)
2077 {
2078         struct ice_prof_map *map;
2079
2080         map = ice_search_prof_id(hw, blk, prof_id);
2081         if (map) {
2082                 *hw_prof_id = map->prof_id;
2083                 return ICE_SUCCESS;
2084         }
2085
2086         return ICE_ERR_DOES_NOT_EXIST;
2087 }
2088
2089 /**
2090  * ice_flow_find_entry - look for a flow entry using its unique ID
2091  * @hw: pointer to the HW struct
2092  * @blk: classification stage
2093  * @entry_id: unique ID to identify this flow entry
2094  *
2095  * This function looks for the flow entry with the specified unique ID in all
2096  * flow profiles of the specified classification stage. If the entry is found,
2097  * and it returns the handle to the flow entry. Otherwise, it returns
2098  * ICE_FLOW_ENTRY_ID_INVAL.
2099  */
2100 u64 ice_flow_find_entry(struct ice_hw *hw, enum ice_block blk, u64 entry_id)
2101 {
2102         struct ice_flow_entry *found = NULL;
2103         struct ice_flow_prof *p;
2104
2105         ice_acquire_lock(&hw->fl_profs_locks[blk]);
2106
2107         LIST_FOR_EACH_ENTRY(p, &hw->fl_profs[blk], ice_flow_prof, l_entry) {
2108                 struct ice_flow_entry *e;
2109
2110                 ice_acquire_lock(&p->entries_lock);
2111                 LIST_FOR_EACH_ENTRY(e, &p->entries, ice_flow_entry, l_entry)
2112                         if (e->id == entry_id) {
2113                                 found = e;
2114                                 break;
2115                         }
2116                 ice_release_lock(&p->entries_lock);
2117
2118                 if (found)
2119                         break;
2120         }
2121
2122         ice_release_lock(&hw->fl_profs_locks[blk]);
2123
2124         return found ? ICE_FLOW_ENTRY_HNDL(found) : ICE_FLOW_ENTRY_HANDLE_INVAL;
2125 }
2126
2127 /**
2128  * ice_flow_acl_check_actions - Checks the acl rule's actions
2129  * @hw: pointer to the hardware structure
2130  * @acts: array of actions to be performed on a match
2131  * @acts_cnt: number of actions
2132  * @cnt_alloc: indicates if a ACL counter has been allocated.
2133  */
2134 static enum ice_status
2135 ice_flow_acl_check_actions(struct ice_hw *hw, struct ice_flow_action *acts,
2136                            u8 acts_cnt, bool *cnt_alloc)
2137 {
2138         ice_declare_bitmap(dup_check, ICE_AQC_TBL_MAX_ACTION_PAIRS * 2);
2139         int i;
2140
2141         ice_zero_bitmap(dup_check, ICE_AQC_TBL_MAX_ACTION_PAIRS * 2);
2142         *cnt_alloc = false;
2143
2144         if (acts_cnt > ICE_FLOW_ACL_MAX_NUM_ACT)
2145                 return ICE_ERR_OUT_OF_RANGE;
2146
2147         for (i = 0; i < acts_cnt; i++) {
2148                 if (acts[i].type != ICE_FLOW_ACT_NOP &&
2149                     acts[i].type != ICE_FLOW_ACT_DROP &&
2150                     acts[i].type != ICE_FLOW_ACT_CNTR_PKT &&
2151                     acts[i].type != ICE_FLOW_ACT_FWD_QUEUE)
2152                         return ICE_ERR_CFG;
2153
2154                 /* If the caller want to add two actions of the same type, then
2155                  * it is considered invalid configuration.
2156                  */
2157                 if (ice_test_and_set_bit(acts[i].type, dup_check))
2158                         return ICE_ERR_PARAM;
2159         }
2160
2161         /* Checks if ACL counters are needed. */
2162         for (i = 0; i < acts_cnt; i++) {
2163                 if (acts[i].type == ICE_FLOW_ACT_CNTR_PKT ||
2164                     acts[i].type == ICE_FLOW_ACT_CNTR_BYTES ||
2165                     acts[i].type == ICE_FLOW_ACT_CNTR_PKT_BYTES) {
2166                         struct ice_acl_cntrs cntrs;
2167                         enum ice_status status;
2168
2169                         cntrs.amount = 1;
2170                         cntrs.bank = 0; /* Only bank0 for the moment */
2171
2172                         if (acts[i].type == ICE_FLOW_ACT_CNTR_PKT_BYTES)
2173                                 cntrs.type = ICE_AQC_ACL_CNT_TYPE_DUAL;
2174                         else
2175                                 cntrs.type = ICE_AQC_ACL_CNT_TYPE_SINGLE;
2176
2177                         status = ice_aq_alloc_acl_cntrs(hw, &cntrs, NULL);
2178                         if (status)
2179                                 return status;
2180                         /* Counter index within the bank */
2181                         acts[i].data.acl_act.value =
2182                                                 CPU_TO_LE16(cntrs.first_cntr);
2183                         *cnt_alloc = true;
2184                 }
2185         }
2186
2187         return ICE_SUCCESS;
2188 }
2189
2190 /**
2191  * ice_flow_acl_frmt_entry_range - Format an acl range checker for a given field
2192  * @fld: number of the given field
2193  * @info: info about field
2194  * @range_buf: range checker configuration buffer
2195  * @data: pointer to a data buffer containing flow entry's match values/masks
2196  * @range: Input/output param indicating which range checkers are being used
2197  */
2198 static void
2199 ice_flow_acl_frmt_entry_range(u16 fld, struct ice_flow_fld_info *info,
2200                               struct ice_aqc_acl_profile_ranges *range_buf,
2201                               u8 *data, u8 *range)
2202 {
2203         u16 new_mask;
2204
2205         /* If not specified, default mask is all bits in field */
2206         new_mask = (info->src.mask == ICE_FLOW_FLD_OFF_INVAL ?
2207                     BIT(ice_flds_info[fld].size) - 1 :
2208                     (*(u16 *)(data + info->src.mask))) << info->xtrct.disp;
2209
2210         /* If the mask is 0, then we don't need to worry about this input
2211          * range checker value.
2212          */
2213         if (new_mask) {
2214                 u16 new_high =
2215                         (*(u16 *)(data + info->src.last)) << info->xtrct.disp;
2216                 u16 new_low =
2217                         (*(u16 *)(data + info->src.val)) << info->xtrct.disp;
2218                 u8 range_idx = info->entry.val;
2219
2220                 range_buf->checker_cfg[range_idx].low_boundary =
2221                         CPU_TO_BE16(new_low);
2222                 range_buf->checker_cfg[range_idx].high_boundary =
2223                         CPU_TO_BE16(new_high);
2224                 range_buf->checker_cfg[range_idx].mask = CPU_TO_BE16(new_mask);
2225
2226                 /* Indicate which range checker is being used */
2227                 *range |= BIT(range_idx);
2228         }
2229 }
2230
2231 /**
2232  * ice_flow_acl_frmt_entry_fld - Partially format acl entry for a given field
2233  * @fld: number of the given field
2234  * @info: info about the field
2235  * @buf: buffer containing the entry
2236  * @dontcare: buffer containing don't care mask for entry
2237  * @data: pointer to a data buffer containing flow entry's match values/masks
2238  */
2239 static void
2240 ice_flow_acl_frmt_entry_fld(u16 fld, struct ice_flow_fld_info *info, u8 *buf,
2241                             u8 *dontcare, u8 *data)
2242 {
2243         u16 dst, src, mask, k, end_disp, tmp_s = 0, tmp_m = 0;
2244         bool use_mask = false;
2245         u8 disp;
2246
2247         src = info->src.val;
2248         mask = info->src.mask;
2249         dst = info->entry.val - ICE_AQC_ACL_PROF_BYTE_SEL_START_IDX;
2250         disp = info->xtrct.disp % BITS_PER_BYTE;
2251
2252         if (mask != ICE_FLOW_FLD_OFF_INVAL)
2253                 use_mask = true;
2254
2255         for (k = 0; k < info->entry.last; k++, dst++) {
2256                 /* Add overflow bits from previous byte */
2257                 buf[dst] = (tmp_s & 0xff00) >> 8;
2258
2259                 /* If mask is not valid, tmp_m is always zero, so just setting
2260                  * dontcare to 0 (no masked bits). If mask is valid, pulls in
2261                  * overflow bits of mask from prev byte
2262                  */
2263                 dontcare[dst] = (tmp_m & 0xff00) >> 8;
2264
2265                 /* If there is displacement, last byte will only contain
2266                  * displaced data, but there is no more data to read from user
2267                  * buffer, so skip so as not to potentially read beyond end of
2268                  * user buffer
2269                  */
2270                 if (!disp || k < info->entry.last - 1) {
2271                         /* Store shifted data to use in next byte */
2272                         tmp_s = data[src++] << disp;
2273
2274                         /* Add current (shifted) byte */
2275                         buf[dst] |= tmp_s & 0xff;
2276
2277                         /* Handle mask if valid */
2278                         if (use_mask) {
2279                                 tmp_m = (~data[mask++] & 0xff) << disp;
2280                                 dontcare[dst] |= tmp_m & 0xff;
2281                         }
2282                 }
2283         }
2284
2285         /* Fill in don't care bits at beginning of field */
2286         if (disp) {
2287                 dst = info->entry.val - ICE_AQC_ACL_PROF_BYTE_SEL_START_IDX;
2288                 for (k = 0; k < disp; k++)
2289                         dontcare[dst] |= BIT(k);
2290         }
2291
2292         end_disp = (disp + ice_flds_info[fld].size) % BITS_PER_BYTE;
2293
2294         /* Fill in don't care bits at end of field */
2295         if (end_disp) {
2296                 dst = info->entry.val - ICE_AQC_ACL_PROF_BYTE_SEL_START_IDX +
2297                       info->entry.last - 1;
2298                 for (k = end_disp; k < BITS_PER_BYTE; k++)
2299                         dontcare[dst] |= BIT(k);
2300         }
2301 }
2302
2303 /**
2304  * ice_flow_acl_frmt_entry - Format acl entry
2305  * @hw: pointer to the hardware structure
2306  * @prof: pointer to flow profile
2307  * @e: pointer to the flow entry
2308  * @data: pointer to a data buffer containing flow entry's match values/masks
2309  * @acts: array of actions to be performed on a match
2310  * @acts_cnt: number of actions
2311  *
2312  * Formats the key (and key_inverse) to be matched from the data passed in,
2313  * along with data from the flow profile. This key/key_inverse pair makes up
2314  * the 'entry' for an acl flow entry.
2315  */
2316 static enum ice_status
2317 ice_flow_acl_frmt_entry(struct ice_hw *hw, struct ice_flow_prof *prof,
2318                         struct ice_flow_entry *e, u8 *data,
2319                         struct ice_flow_action *acts, u8 acts_cnt)
2320 {
2321         u8 *buf = NULL, *dontcare = NULL, *key = NULL, range = 0, dir_flag_msk;
2322         struct ice_aqc_acl_profile_ranges *range_buf = NULL;
2323         enum ice_status status;
2324         bool cnt_alloc;
2325         u8 prof_id = 0;
2326         u16 i, buf_sz;
2327
2328         status = ice_flow_get_hw_prof(hw, ICE_BLK_ACL, prof->id, &prof_id);
2329         if (status)
2330                 return status;
2331
2332         /* Format the result action */
2333
2334         status = ice_flow_acl_check_actions(hw, acts, acts_cnt, &cnt_alloc);
2335         if (status)
2336                 return status;
2337
2338         status = ICE_ERR_NO_MEMORY;
2339
2340         e->acts = (struct ice_flow_action *)
2341                 ice_memdup(hw, acts, acts_cnt * sizeof(*acts),
2342                            ICE_NONDMA_TO_NONDMA);
2343
2344         if (!e->acts)
2345                 goto out;
2346
2347         e->acts_cnt = acts_cnt;
2348
2349         /* Format the matching data */
2350         buf_sz = prof->cfg.scen->width;
2351         buf = (u8 *)ice_malloc(hw, buf_sz);
2352         if (!buf)
2353                 goto out;
2354
2355         dontcare = (u8 *)ice_malloc(hw, buf_sz);
2356         if (!dontcare)
2357                 goto out;
2358
2359         /* 'key' buffer will store both key and key_inverse, so must be twice
2360          * size of buf
2361          */
2362         key = (u8 *)ice_malloc(hw, buf_sz * 2);
2363         if (!key)
2364                 goto out;
2365
2366         range_buf = (struct ice_aqc_acl_profile_ranges *)
2367                 ice_malloc(hw, sizeof(struct ice_aqc_acl_profile_ranges));
2368         if (!range_buf)
2369                 goto out;
2370
2371         /* Set don't care mask to all 1's to start, will zero out used bytes */
2372         ice_memset(dontcare, 0xff, buf_sz, ICE_NONDMA_MEM);
2373
2374         for (i = 0; i < prof->segs_cnt; i++) {
2375                 struct ice_flow_seg_info *seg = &prof->segs[i];
2376                 u64 match = seg->match;
2377                 u16 j;
2378
2379                 for (j = 0; j < ICE_FLOW_FIELD_IDX_MAX && match; j++) {
2380                         struct ice_flow_fld_info *info;
2381                         const u64 bit = BIT_ULL(j);
2382
2383                         if (!(match & bit))
2384                                 continue;
2385
2386                         info = &seg->fields[j];
2387
2388                         if (info->type == ICE_FLOW_FLD_TYPE_RANGE)
2389                                 ice_flow_acl_frmt_entry_range(j, info,
2390                                                               range_buf, data,
2391                                                               &range);
2392                         else
2393                                 ice_flow_acl_frmt_entry_fld(j, info, buf,
2394                                                             dontcare, data);
2395
2396                         match &= ~bit;
2397                 }
2398
2399                 for (j = 0; j < seg->raws_cnt; j++) {
2400                         struct ice_flow_fld_info *info = &seg->raws[j].info;
2401                         u16 dst, src, mask, k;
2402                         bool use_mask = false;
2403
2404                         src = info->src.val;
2405                         dst = info->entry.val -
2406                                         ICE_AQC_ACL_PROF_BYTE_SEL_START_IDX;
2407                         mask = info->src.mask;
2408
2409                         if (mask != ICE_FLOW_FLD_OFF_INVAL)
2410                                 use_mask = true;
2411
2412                         for (k = 0; k < info->entry.last; k++, dst++) {
2413                                 buf[dst] = data[src++];
2414                                 if (use_mask)
2415                                         dontcare[dst] = ~data[mask++];
2416                                 else
2417                                         dontcare[dst] = 0;
2418                         }
2419                 }
2420         }
2421
2422         buf[prof->cfg.scen->pid_idx] = (u8)prof_id;
2423         dontcare[prof->cfg.scen->pid_idx] = 0;
2424
2425         /* Format the buffer for direction flags */
2426         dir_flag_msk = BIT(ICE_FLG_PKT_DIR);
2427
2428         if (prof->dir == ICE_FLOW_RX)
2429                 buf[prof->cfg.scen->pkt_dir_idx] = dir_flag_msk;
2430
2431         if (range) {
2432                 buf[prof->cfg.scen->rng_chk_idx] = range;
2433                 /* Mark any unused range checkers as don't care */
2434                 dontcare[prof->cfg.scen->rng_chk_idx] = ~range;
2435                 e->range_buf = range_buf;
2436         } else {
2437                 ice_free(hw, range_buf);
2438         }
2439
2440         status = ice_set_key(key, buf_sz * 2, buf, NULL, dontcare, NULL, 0,
2441                              buf_sz);
2442         if (status)
2443                 goto out;
2444
2445         e->entry = key;
2446         e->entry_sz = buf_sz * 2;
2447
2448 out:
2449         if (buf)
2450                 ice_free(hw, buf);
2451
2452         if (dontcare)
2453                 ice_free(hw, dontcare);
2454
2455         if (status && key)
2456                 ice_free(hw, key);
2457
2458         if (status && range_buf) {
2459                 ice_free(hw, range_buf);
2460                 e->range_buf = NULL;
2461         }
2462
2463         if (status && e->acts) {
2464                 ice_free(hw, e->acts);
2465                 e->acts = NULL;
2466                 e->acts_cnt = 0;
2467         }
2468
2469         if (status && cnt_alloc)
2470                 ice_flow_acl_free_act_cntr(hw, acts, acts_cnt);
2471
2472         return status;
2473 }
2474
2475 /**
2476  * ice_flow_acl_find_scen_entry_cond - Find an ACL scenario entry that matches
2477  *                                     the compared data.
2478  * @prof: pointer to flow profile
2479  * @e: pointer to the comparing flow entry
2480  * @do_chg_action: decide if we want to change the ACL action
2481  * @do_add_entry: decide if we want to add the new ACL entry
2482  * @do_rem_entry: decide if we want to remove the current ACL entry
2483  *
2484  * Find an ACL scenario entry that matches the compared data. In the same time,
2485  * this function also figure out:
2486  * a/ If we want to change the ACL action
2487  * b/ If we want to add the new ACL entry
2488  * c/ If we want to remove the current ACL entry
2489  */
2490 static struct ice_flow_entry *
2491 ice_flow_acl_find_scen_entry_cond(struct ice_flow_prof *prof,
2492                                   struct ice_flow_entry *e, bool *do_chg_action,
2493                                   bool *do_add_entry, bool *do_rem_entry)
2494 {
2495         struct ice_flow_entry *p, *return_entry = NULL;
2496         u8 i, j;
2497
2498         /* Check if:
2499          * a/ There exists an entry with same matching data, but different
2500          *    priority, then we remove this existing ACL entry. Then, we
2501          *    will add the new entry to the ACL scenario.
2502          * b/ There exists an entry with same matching data, priority, and
2503          *    result action, then we do nothing
2504          * c/ There exists an entry with same matching data, priority, but
2505          *    different, action, then do only change the action's entry.
2506          * d/ Else, we add this new entry to the ACL scenario.
2507          */
2508         *do_chg_action = false;
2509         *do_add_entry = true;
2510         *do_rem_entry = false;
2511         LIST_FOR_EACH_ENTRY(p, &prof->entries, ice_flow_entry, l_entry) {
2512                 if (memcmp(p->entry, e->entry, p->entry_sz))
2513                         continue;
2514
2515                 /* From this point, we have the same matching_data. */
2516                 *do_add_entry = false;
2517                 return_entry = p;
2518
2519                 if (p->priority != e->priority) {
2520                         /* matching data && !priority */
2521                         *do_add_entry = true;
2522                         *do_rem_entry = true;
2523                         break;
2524                 }
2525
2526                 /* From this point, we will have matching_data && priority */
2527                 if (p->acts_cnt != e->acts_cnt)
2528                         *do_chg_action = true;
2529                 for (i = 0; i < p->acts_cnt; i++) {
2530                         bool found_not_match = false;
2531
2532                         for (j = 0; j < e->acts_cnt; j++)
2533                                 if (memcmp(&p->acts[i], &e->acts[j],
2534                                            sizeof(struct ice_flow_action))) {
2535                                         found_not_match = true;
2536                                         break;
2537                                 }
2538
2539                         if (found_not_match) {
2540                                 *do_chg_action = true;
2541                                 break;
2542                         }
2543                 }
2544
2545                 /* (do_chg_action = true) means :
2546                  *    matching_data && priority && !result_action
2547                  * (do_chg_action = false) means :
2548                  *    matching_data && priority && result_action
2549                  */
2550                 break;
2551         }
2552
2553         return return_entry;
2554 }
2555
2556 /**
2557  * ice_flow_acl_convert_to_acl_prior - Convert to ACL priority
2558  * @p: flow priority
2559  */
2560 static enum ice_acl_entry_prior
2561 ice_flow_acl_convert_to_acl_prior(enum ice_flow_priority p)
2562 {
2563         enum ice_acl_entry_prior acl_prior;
2564
2565         switch (p) {
2566         case ICE_FLOW_PRIO_LOW:
2567                 acl_prior = ICE_LOW;
2568                 break;
2569         case ICE_FLOW_PRIO_NORMAL:
2570                 acl_prior = ICE_NORMAL;
2571                 break;
2572         case ICE_FLOW_PRIO_HIGH:
2573                 acl_prior = ICE_HIGH;
2574                 break;
2575         default:
2576                 acl_prior = ICE_NORMAL;
2577                 break;
2578         }
2579
2580         return acl_prior;
2581 }
2582
2583 /**
2584  * ice_flow_acl_union_rng_chk - Perform union operation between two
2585  *                              range-range checker buffers
2586  * @dst_buf: pointer to destination range checker buffer
2587  * @src_buf: pointer to source range checker buffer
2588  *
2589  * For this function, we do the union between dst_buf and src_buf
2590  * range checker buffer, and we will save the result back to dst_buf
2591  */
2592 static enum ice_status
2593 ice_flow_acl_union_rng_chk(struct ice_aqc_acl_profile_ranges *dst_buf,
2594                            struct ice_aqc_acl_profile_ranges *src_buf)
2595 {
2596         u8 i, j;
2597
2598         if (!dst_buf || !src_buf)
2599                 return ICE_ERR_BAD_PTR;
2600
2601         for (i = 0; i < ICE_AQC_ACL_PROF_RANGES_NUM_CFG; i++) {
2602                 struct ice_acl_rng_data *cfg_data = NULL, *in_data;
2603                 bool will_populate = false;
2604
2605                 in_data = &src_buf->checker_cfg[i];
2606
2607                 if (!in_data->mask)
2608                         break;
2609
2610                 for (j = 0; j < ICE_AQC_ACL_PROF_RANGES_NUM_CFG; j++) {
2611                         cfg_data = &dst_buf->checker_cfg[j];
2612
2613                         if (!cfg_data->mask ||
2614                             !memcmp(cfg_data, in_data,
2615                                     sizeof(struct ice_acl_rng_data))) {
2616                                 will_populate = true;
2617                                 break;
2618                         }
2619                 }
2620
2621                 if (will_populate) {
2622                         ice_memcpy(cfg_data, in_data,
2623                                    sizeof(struct ice_acl_rng_data),
2624                                    ICE_NONDMA_TO_NONDMA);
2625                 } else {
2626                         /* No available slot left to program range checker */
2627                         return ICE_ERR_MAX_LIMIT;
2628                 }
2629         }
2630
2631         return ICE_SUCCESS;
2632 }
2633
2634 /**
2635  * ice_flow_acl_add_scen_entry_sync - Add entry to ACL scenario sync
2636  * @hw: pointer to the hardware structure
2637  * @prof: pointer to flow profile
2638  * @entry: double pointer to the flow entry
2639  *
2640  * For this function, we will look at the current added entries in the
2641  * corresponding ACL scenario. Then, we will perform matching logic to
2642  * see if we want to add/modify/do nothing with this new entry.
2643  */
2644 static enum ice_status
2645 ice_flow_acl_add_scen_entry_sync(struct ice_hw *hw, struct ice_flow_prof *prof,
2646                                  struct ice_flow_entry **entry)
2647 {
2648         bool do_add_entry, do_rem_entry, do_chg_action, do_chg_rng_chk;
2649         struct ice_aqc_acl_profile_ranges query_rng_buf, cfg_rng_buf;
2650         struct ice_acl_act_entry *acts = NULL;
2651         struct ice_flow_entry *exist;
2652         enum ice_status status = ICE_SUCCESS;
2653         struct ice_flow_entry *e;
2654         u8 i;
2655
2656         if (!entry || !(*entry) || !prof)
2657                 return ICE_ERR_BAD_PTR;
2658
2659         e = *(entry);
2660
2661         do_chg_rng_chk = false;
2662         if (e->range_buf) {
2663                 u8 prof_id = 0;
2664
2665                 status = ice_flow_get_hw_prof(hw, ICE_BLK_ACL, prof->id,
2666                                               &prof_id);
2667                 if (status)
2668                         return status;
2669
2670                 /* Query the current range-checker value in FW */
2671                 status = ice_query_acl_prof_ranges(hw, prof_id, &query_rng_buf,
2672                                                    NULL);
2673                 if (status)
2674                         return status;
2675                 ice_memcpy(&cfg_rng_buf, &query_rng_buf,
2676                            sizeof(struct ice_aqc_acl_profile_ranges),
2677                            ICE_NONDMA_TO_NONDMA);
2678
2679                 /* Generate the new range-checker value */
2680                 status = ice_flow_acl_union_rng_chk(&cfg_rng_buf, e->range_buf);
2681                 if (status)
2682                         return status;
2683
2684                 /* Reconfigure the range check if the buffer is changed. */
2685                 do_chg_rng_chk = false;
2686                 if (memcmp(&query_rng_buf, &cfg_rng_buf,
2687                            sizeof(struct ice_aqc_acl_profile_ranges))) {
2688                         status = ice_prog_acl_prof_ranges(hw, prof_id,
2689                                                           &cfg_rng_buf, NULL);
2690                         if (status)
2691                                 return status;
2692
2693                         do_chg_rng_chk = true;
2694                 }
2695         }
2696
2697         /* Figure out if we want to (change the ACL action) and/or
2698          * (Add the new ACL entry) and/or (Remove the current ACL entry)
2699          */
2700         exist = ice_flow_acl_find_scen_entry_cond(prof, e, &do_chg_action,
2701                                                   &do_add_entry, &do_rem_entry);
2702
2703         if (do_rem_entry) {
2704                 status = ice_flow_rem_entry_sync(hw, ICE_BLK_ACL, exist);
2705                 if (status)
2706                         return status;
2707         }
2708
2709         /* Prepare the result action buffer */
2710         acts = (struct ice_acl_act_entry *)ice_calloc
2711                 (hw, e->entry_sz, sizeof(struct ice_acl_act_entry));
2712         for (i = 0; i < e->acts_cnt; i++)
2713                 ice_memcpy(&acts[i], &e->acts[i].data.acl_act,
2714                            sizeof(struct ice_acl_act_entry),
2715                            ICE_NONDMA_TO_NONDMA);
2716
2717         if (do_add_entry) {
2718                 enum ice_acl_entry_prior prior;
2719                 u8 *keys, *inverts;
2720                 u16 entry_idx;
2721
2722                 keys = (u8 *)e->entry;
2723                 inverts = keys + (e->entry_sz / 2);
2724                 prior = ice_flow_acl_convert_to_acl_prior(e->priority);
2725
2726                 status = ice_acl_add_entry(hw, prof->cfg.scen, prior, keys,
2727                                            inverts, acts, e->acts_cnt,
2728                                            &entry_idx);
2729                 if (status)
2730                         goto out;
2731
2732                 e->scen_entry_idx = entry_idx;
2733                 LIST_ADD(&e->l_entry, &prof->entries);
2734         } else {
2735                 if (do_chg_action) {
2736                         /* For the action memory info, update the SW's copy of
2737                          * exist entry with e's action memory info
2738                          */
2739                         ice_free(hw, exist->acts);
2740                         exist->acts_cnt = e->acts_cnt;
2741                         exist->acts = (struct ice_flow_action *)
2742                                 ice_calloc(hw, exist->acts_cnt,
2743                                            sizeof(struct ice_flow_action));
2744
2745                         if (!exist->acts) {
2746                                 status = ICE_ERR_NO_MEMORY;
2747                                 goto out;
2748                         }
2749
2750                         ice_memcpy(exist->acts, e->acts,
2751                                    sizeof(struct ice_flow_action) * e->acts_cnt,
2752                                    ICE_NONDMA_TO_NONDMA);
2753
2754                         status = ice_acl_prog_act(hw, prof->cfg.scen, acts,
2755                                                   e->acts_cnt,
2756                                                   exist->scen_entry_idx);
2757                         if (status)
2758                                 goto out;
2759                 }
2760
2761                 if (do_chg_rng_chk) {
2762                         /* In this case, we want to update the range checker
2763                          * information of the exist entry
2764                          */
2765                         status = ice_flow_acl_union_rng_chk(exist->range_buf,
2766                                                             e->range_buf);
2767                         if (status)
2768                                 goto out;
2769                 }
2770
2771                 /* As we don't add the new entry to our SW DB, deallocate its
2772                  * memories, and return the exist entry to the caller
2773                  */
2774                 ice_dealloc_flow_entry(hw, e);
2775                 *(entry) = exist;
2776         }
2777 out:
2778         if (acts)
2779                 ice_free(hw, acts);
2780
2781         return status;
2782 }
2783
2784 /**
2785  * ice_flow_acl_add_scen_entry - Add entry to ACL scenario
2786  * @hw: pointer to the hardware structure
2787  * @prof: pointer to flow profile
2788  * @e: double pointer to the flow entry
2789  */
2790 static enum ice_status
2791 ice_flow_acl_add_scen_entry(struct ice_hw *hw, struct ice_flow_prof *prof,
2792                             struct ice_flow_entry **e)
2793 {
2794         enum ice_status status;
2795
2796         ice_acquire_lock(&prof->entries_lock);
2797         status = ice_flow_acl_add_scen_entry_sync(hw, prof, e);
2798         ice_release_lock(&prof->entries_lock);
2799
2800         return status;
2801 }
2802
2803 /**
2804  * ice_flow_add_entry - Add a flow entry
2805  * @hw: pointer to the HW struct
2806  * @blk: classification stage
2807  * @prof_id: ID of the profile to add a new flow entry to
2808  * @entry_id: unique ID to identify this flow entry
2809  * @vsi_handle: software VSI handle for the flow entry
2810  * @prio: priority of the flow entry
2811  * @data: pointer to a data buffer containing flow entry's match values/masks
2812  * @acts: arrays of actions to be performed on a match
2813  * @acts_cnt: number of actions
2814  * @entry_h: pointer to buffer that receives the new flow entry's handle
2815  */
2816 enum ice_status
2817 ice_flow_add_entry(struct ice_hw *hw, enum ice_block blk, u64 prof_id,
2818                    u64 entry_id, u16 vsi_handle, enum ice_flow_priority prio,
2819                    void *data, struct ice_flow_action *acts, u8 acts_cnt,
2820                    u64 *entry_h)
2821 {
2822         struct ice_flow_entry *e = NULL;
2823         struct ice_flow_prof *prof;
2824         enum ice_status status = ICE_SUCCESS;
2825
2826         /* ACL entries must indicate an action */
2827         if (blk == ICE_BLK_ACL && (!acts || !acts_cnt))
2828                 return ICE_ERR_PARAM;
2829
2830         /* No flow entry data is expected for RSS */
2831         if (!entry_h || (!data && blk != ICE_BLK_RSS))
2832                 return ICE_ERR_BAD_PTR;
2833
2834         if (!ice_is_vsi_valid(hw, vsi_handle))
2835                 return ICE_ERR_PARAM;
2836
2837         ice_acquire_lock(&hw->fl_profs_locks[blk]);
2838
2839         prof = ice_flow_find_prof_id(hw, blk, prof_id);
2840         if (!prof) {
2841                 status = ICE_ERR_DOES_NOT_EXIST;
2842         } else {
2843                 /* Allocate memory for the entry being added and associate
2844                  * the VSI to the found flow profile
2845                  */
2846                 e = (struct ice_flow_entry *)ice_malloc(hw, sizeof(*e));
2847                 if (!e)
2848                         status = ICE_ERR_NO_MEMORY;
2849                 else
2850                         status = ice_flow_assoc_prof(hw, blk, prof, vsi_handle);
2851         }
2852
2853         ice_release_lock(&hw->fl_profs_locks[blk]);
2854         if (status)
2855                 goto out;
2856
2857         e->id = entry_id;
2858         e->vsi_handle = vsi_handle;
2859         e->prof = prof;
2860         e->priority = prio;
2861
2862         switch (blk) {
2863         case ICE_BLK_FD:
2864         case ICE_BLK_RSS:
2865                 break;
2866         case ICE_BLK_ACL:
2867                 /* ACL will handle the entry management */
2868                 status = ice_flow_acl_frmt_entry(hw, prof, e, (u8 *)data, acts,
2869                                                  acts_cnt);
2870                 if (status)
2871                         goto out;
2872
2873                 status = ice_flow_acl_add_scen_entry(hw, prof, &e);
2874                 if (status)
2875                         goto out;
2876
2877                 break;
2878         case ICE_BLK_SW:
2879         case ICE_BLK_PE:
2880         default:
2881                 status = ICE_ERR_NOT_IMPL;
2882                 goto out;
2883         }
2884
2885         if (blk != ICE_BLK_ACL) {
2886                 /* ACL will handle the entry management */
2887                 ice_acquire_lock(&prof->entries_lock);
2888                 LIST_ADD(&e->l_entry, &prof->entries);
2889                 ice_release_lock(&prof->entries_lock);
2890         }
2891
2892         *entry_h = ICE_FLOW_ENTRY_HNDL(e);
2893
2894 out:
2895         if (status && e) {
2896                 if (e->entry)
2897                         ice_free(hw, e->entry);
2898                 ice_free(hw, e);
2899         }
2900
2901         return status;
2902 }
2903
2904 /**
2905  * ice_flow_rem_entry - Remove a flow entry
2906  * @hw: pointer to the HW struct
2907  * @blk: classification stage
2908  * @entry_h: handle to the flow entry to be removed
2909  */
2910 enum ice_status ice_flow_rem_entry(struct ice_hw *hw, enum ice_block blk,
2911                                    u64 entry_h)
2912 {
2913         struct ice_flow_entry *entry;
2914         struct ice_flow_prof *prof;
2915         enum ice_status status = ICE_SUCCESS;
2916
2917         if (entry_h == ICE_FLOW_ENTRY_HANDLE_INVAL)
2918                 return ICE_ERR_PARAM;
2919
2920         entry = ICE_FLOW_ENTRY_PTR((unsigned long)entry_h);
2921
2922         /* Retain the pointer to the flow profile as the entry will be freed */
2923         prof = entry->prof;
2924
2925         if (prof) {
2926                 ice_acquire_lock(&prof->entries_lock);
2927                 status = ice_flow_rem_entry_sync(hw, blk, entry);
2928                 ice_release_lock(&prof->entries_lock);
2929         }
2930
2931         return status;
2932 }
2933
2934 /**
2935  * ice_flow_set_fld_ext - specifies locations of field from entry's input buffer
2936  * @seg: packet segment the field being set belongs to
2937  * @fld: field to be set
2938  * @field_type: type of the field
2939  * @val_loc: if not ICE_FLOW_FLD_OFF_INVAL, location of the value to match from
2940  *           entry's input buffer
2941  * @mask_loc: if not ICE_FLOW_FLD_OFF_INVAL, location of mask value from entry's
2942  *            input buffer
2943  * @last_loc: if not ICE_FLOW_FLD_OFF_INVAL, location of last/upper value from
2944  *            entry's input buffer
2945  *
2946  * This helper function stores information of a field being matched, including
2947  * the type of the field and the locations of the value to match, the mask, and
2948  * and the upper-bound value in the start of the input buffer for a flow entry.
2949  * This function should only be used for fixed-size data structures.
2950  *
2951  * This function also opportunistically determines the protocol headers to be
2952  * present based on the fields being set. Some fields cannot be used alone to
2953  * determine the protocol headers present. Sometimes, fields for particular
2954  * protocol headers are not matched. In those cases, the protocol headers
2955  * must be explicitly set.
2956  */
2957 static void
2958 ice_flow_set_fld_ext(struct ice_flow_seg_info *seg, enum ice_flow_field fld,
2959                      enum ice_flow_fld_match_type field_type, u16 val_loc,
2960                      u16 mask_loc, u16 last_loc)
2961 {
2962         u64 bit = BIT_ULL(fld);
2963
2964         seg->match |= bit;
2965         if (field_type == ICE_FLOW_FLD_TYPE_RANGE)
2966                 seg->range |= bit;
2967
2968         seg->fields[fld].type = field_type;
2969         seg->fields[fld].src.val = val_loc;
2970         seg->fields[fld].src.mask = mask_loc;
2971         seg->fields[fld].src.last = last_loc;
2972
2973         ICE_FLOW_SET_HDRS(seg, ice_flds_info[fld].hdr);
2974 }
2975
2976 /**
2977  * ice_flow_set_fld - specifies locations of field from entry's input buffer
2978  * @seg: packet segment the field being set belongs to
2979  * @fld: field to be set
2980  * @val_loc: if not ICE_FLOW_FLD_OFF_INVAL, location of the value to match from
2981  *           entry's input buffer
2982  * @mask_loc: if not ICE_FLOW_FLD_OFF_INVAL, location of mask value from entry's
2983  *            input buffer
2984  * @last_loc: if not ICE_FLOW_FLD_OFF_INVAL, location of last/upper value from
2985  *            entry's input buffer
2986  * @range: indicate if field being matched is to be in a range
2987  *
2988  * This function specifies the locations, in the form of byte offsets from the
2989  * start of the input buffer for a flow entry, from where the value to match,
2990  * the mask value, and upper value can be extracted. These locations are then
2991  * stored in the flow profile. When adding a flow entry associated with the
2992  * flow profile, these locations will be used to quickly extract the values and
2993  * create the content of a match entry. This function should only be used for
2994  * fixed-size data structures.
2995  */
2996 void
2997 ice_flow_set_fld(struct ice_flow_seg_info *seg, enum ice_flow_field fld,
2998                  u16 val_loc, u16 mask_loc, u16 last_loc, bool range)
2999 {
3000         enum ice_flow_fld_match_type t = range ?
3001                 ICE_FLOW_FLD_TYPE_RANGE : ICE_FLOW_FLD_TYPE_REG;
3002
3003         ice_flow_set_fld_ext(seg, fld, t, val_loc, mask_loc, last_loc);
3004 }
3005
3006 /**
3007  * ice_flow_set_fld_prefix - sets locations of prefix field from entry's buf
3008  * @seg: packet segment the field being set belongs to
3009  * @fld: field to be set
3010  * @val_loc: if not ICE_FLOW_FLD_OFF_INVAL, location of the value to match from
3011  *           entry's input buffer
3012  * @pref_loc: location of prefix value from entry's input buffer
3013  * @pref_sz: size of the location holding the prefix value
3014  *
3015  * This function specifies the locations, in the form of byte offsets from the
3016  * start of the input buffer for a flow entry, from where the value to match
3017  * and the IPv4 prefix value can be extracted. These locations are then stored
3018  * in the flow profile. When adding flow entries to the associated flow profile,
3019  * these locations can be used to quickly extract the values to create the
3020  * content of a match entry. This function should only be used for fixed-size
3021  * data structures.
3022  */
3023 void
3024 ice_flow_set_fld_prefix(struct ice_flow_seg_info *seg, enum ice_flow_field fld,
3025                         u16 val_loc, u16 pref_loc, u8 pref_sz)
3026 {
3027         /* For this type of field, the "mask" location is for the prefix value's
3028          * location and the "last" location is for the size of the location of
3029          * the prefix value.
3030          */
3031         ice_flow_set_fld_ext(seg, fld, ICE_FLOW_FLD_TYPE_PREFIX, val_loc,
3032                              pref_loc, (u16)pref_sz);
3033 }
3034
3035 /**
3036  * ice_flow_add_fld_raw - sets locations of a raw field from entry's input buf
3037  * @seg: packet segment the field being set belongs to
3038  * @off: offset of the raw field from the beginning of the segment in bytes
3039  * @len: length of the raw pattern to be matched
3040  * @val_loc: location of the value to match from entry's input buffer
3041  * @mask_loc: location of mask value from entry's input buffer
3042  *
3043  * This function specifies the offset of the raw field to be match from the
3044  * beginning of the specified packet segment, and the locations, in the form of
3045  * byte offsets from the start of the input buffer for a flow entry, from where
3046  * the value to match and the mask value to be extracted. These locations are
3047  * then stored in the flow profile. When adding flow entries to the associated
3048  * flow profile, these locations can be used to quickly extract the values to
3049  * create the content of a match entry. This function should only be used for
3050  * fixed-size data structures.
3051  */
3052 void
3053 ice_flow_add_fld_raw(struct ice_flow_seg_info *seg, u16 off, u8 len,
3054                      u16 val_loc, u16 mask_loc)
3055 {
3056         if (seg->raws_cnt < ICE_FLOW_SEG_RAW_FLD_MAX) {
3057                 seg->raws[seg->raws_cnt].off = off;
3058                 seg->raws[seg->raws_cnt].info.type = ICE_FLOW_FLD_TYPE_SIZE;
3059                 seg->raws[seg->raws_cnt].info.src.val = val_loc;
3060                 seg->raws[seg->raws_cnt].info.src.mask = mask_loc;
3061                 /* The "last" field is used to store the length of the field */
3062                 seg->raws[seg->raws_cnt].info.src.last = len;
3063         }
3064
3065         /* Overflows of "raws" will be handled as an error condition later in
3066          * the flow when this information is processed.
3067          */
3068         seg->raws_cnt++;
3069 }
3070
3071 #define ICE_FLOW_RSS_SEG_HDR_L2_MASKS \
3072 (ICE_FLOW_SEG_HDR_ETH | ICE_FLOW_SEG_HDR_VLAN)
3073
3074 #define ICE_FLOW_RSS_SEG_HDR_L3_MASKS \
3075         (ICE_FLOW_SEG_HDR_IPV4 | ICE_FLOW_SEG_HDR_IPV6)
3076
3077 #define ICE_FLOW_RSS_SEG_HDR_L4_MASKS \
3078         (ICE_FLOW_SEG_HDR_TCP | ICE_FLOW_SEG_HDR_UDP | \
3079          ICE_FLOW_SEG_HDR_SCTP)
3080
3081 #define ICE_FLOW_RSS_SEG_HDR_VAL_MASKS \
3082         (ICE_FLOW_RSS_SEG_HDR_L2_MASKS | \
3083          ICE_FLOW_RSS_SEG_HDR_L3_MASKS | \
3084          ICE_FLOW_RSS_SEG_HDR_L4_MASKS)
3085
3086 /**
3087  * ice_flow_set_rss_seg_info - setup packet segments for RSS
3088  * @segs: pointer to the flow field segment(s)
3089  * @hash_fields: fields to be hashed on for the segment(s)
3090  * @flow_hdr: protocol header fields within a packet segment
3091  *
3092  * Helper function to extract fields from hash bitmap and use flow
3093  * header value to set flow field segment for further use in flow
3094  * profile entry or removal.
3095  */
3096 static enum ice_status
3097 ice_flow_set_rss_seg_info(struct ice_flow_seg_info *segs, u64 hash_fields,
3098                           u32 flow_hdr)
3099 {
3100         u64 val = hash_fields;
3101         u8 i;
3102
3103         for (i = 0; val && i < ICE_FLOW_FIELD_IDX_MAX; i++) {
3104                 u64 bit = BIT_ULL(i);
3105
3106                 if (val & bit) {
3107                         ice_flow_set_fld(segs, (enum ice_flow_field)i,
3108                                          ICE_FLOW_FLD_OFF_INVAL,
3109                                          ICE_FLOW_FLD_OFF_INVAL,
3110                                          ICE_FLOW_FLD_OFF_INVAL, false);
3111                         val &= ~bit;
3112                 }
3113         }
3114         ICE_FLOW_SET_HDRS(segs, flow_hdr);
3115
3116         if (segs->hdrs & ~ICE_FLOW_RSS_SEG_HDR_VAL_MASKS &
3117             ~ICE_FLOW_RSS_HDRS_INNER_MASK)
3118                 return ICE_ERR_PARAM;
3119
3120         val = (u64)(segs->hdrs & ICE_FLOW_RSS_SEG_HDR_L3_MASKS);
3121         if (val && !ice_is_pow2(val))
3122                 return ICE_ERR_CFG;
3123
3124         val = (u64)(segs->hdrs & ICE_FLOW_RSS_SEG_HDR_L4_MASKS);
3125         if (val && !ice_is_pow2(val))
3126                 return ICE_ERR_CFG;
3127
3128         return ICE_SUCCESS;
3129 }
3130
3131 /**
3132  * ice_rem_vsi_rss_list - remove VSI from RSS list
3133  * @hw: pointer to the hardware structure
3134  * @vsi_handle: software VSI handle
3135  *
3136  * Remove the VSI from all RSS configurations in the list.
3137  */
3138 void ice_rem_vsi_rss_list(struct ice_hw *hw, u16 vsi_handle)
3139 {
3140         struct ice_rss_cfg *r, *tmp;
3141
3142         if (LIST_EMPTY(&hw->rss_list_head))
3143                 return;
3144
3145         ice_acquire_lock(&hw->rss_locks);
3146         LIST_FOR_EACH_ENTRY_SAFE(r, tmp, &hw->rss_list_head,
3147                                  ice_rss_cfg, l_entry) {
3148                 if (ice_test_and_clear_bit(vsi_handle, r->vsis))
3149                         if (!ice_is_any_bit_set(r->vsis, ICE_MAX_VSI)) {
3150                                 LIST_DEL(&r->l_entry);
3151                                 ice_free(hw, r);
3152                         }
3153         }
3154         ice_release_lock(&hw->rss_locks);
3155 }
3156
3157 /**
3158  * ice_rem_vsi_rss_cfg - remove RSS configurations associated with VSI
3159  * @hw: pointer to the hardware structure
3160  * @vsi_handle: software VSI handle
3161  *
3162  * This function will iterate through all flow profiles and disassociate
3163  * the VSI from that profile. If the flow profile has no VSIs it will
3164  * be removed.
3165  */
3166 enum ice_status ice_rem_vsi_rss_cfg(struct ice_hw *hw, u16 vsi_handle)
3167 {
3168         const enum ice_block blk = ICE_BLK_RSS;
3169         struct ice_flow_prof *p, *t;
3170         enum ice_status status = ICE_SUCCESS;
3171
3172         if (!ice_is_vsi_valid(hw, vsi_handle))
3173                 return ICE_ERR_PARAM;
3174
3175         if (LIST_EMPTY(&hw->fl_profs[blk]))
3176                 return ICE_SUCCESS;
3177
3178         ice_acquire_lock(&hw->fl_profs_locks[blk]);
3179         LIST_FOR_EACH_ENTRY_SAFE(p, t, &hw->fl_profs[blk], ice_flow_prof,
3180                                  l_entry) {
3181                 if (ice_is_bit_set(p->vsis, vsi_handle)) {
3182                         status = ice_flow_disassoc_prof(hw, blk, p, vsi_handle);
3183                         if (status)
3184                                 break;
3185
3186                         if (!ice_is_any_bit_set(p->vsis, ICE_MAX_VSI)) {
3187                                 status = ice_flow_rem_prof_sync(hw, blk, p);
3188                                 if (status)
3189                                         break;
3190                         }
3191                 }
3192         }
3193         ice_release_lock(&hw->fl_profs_locks[blk]);
3194
3195         return status;
3196 }
3197
3198 /**
3199  * ice_rem_rss_list - remove RSS configuration from list
3200  * @hw: pointer to the hardware structure
3201  * @vsi_handle: software VSI handle
3202  * @prof: pointer to flow profile
3203  *
3204  * Assumption: lock has already been acquired for RSS list
3205  */
3206 static void
3207 ice_rem_rss_list(struct ice_hw *hw, u16 vsi_handle, struct ice_flow_prof *prof)
3208 {
3209         struct ice_rss_cfg *r, *tmp;
3210
3211         /* Search for RSS hash fields associated to the VSI that match the
3212          * hash configurations associated to the flow profile. If found
3213          * remove from the RSS entry list of the VSI context and delete entry.
3214          */
3215         LIST_FOR_EACH_ENTRY_SAFE(r, tmp, &hw->rss_list_head,
3216                                  ice_rss_cfg, l_entry) {
3217                 if (r->hashed_flds == prof->segs[prof->segs_cnt - 1].match &&
3218                     r->packet_hdr == prof->segs[prof->segs_cnt - 1].hdrs) {
3219                         ice_clear_bit(vsi_handle, r->vsis);
3220                         if (!ice_is_any_bit_set(r->vsis, ICE_MAX_VSI)) {
3221                                 LIST_DEL(&r->l_entry);
3222                                 ice_free(hw, r);
3223                         }
3224                         return;
3225                 }
3226         }
3227 }
3228
3229 /**
3230  * ice_add_rss_list - add RSS configuration to list
3231  * @hw: pointer to the hardware structure
3232  * @vsi_handle: software VSI handle
3233  * @prof: pointer to flow profile
3234  *
3235  * Assumption: lock has already been acquired for RSS list
3236  */
3237 static enum ice_status
3238 ice_add_rss_list(struct ice_hw *hw, u16 vsi_handle, struct ice_flow_prof *prof)
3239 {
3240         struct ice_rss_cfg *r, *rss_cfg;
3241
3242         LIST_FOR_EACH_ENTRY(r, &hw->rss_list_head,
3243                             ice_rss_cfg, l_entry)
3244                 if (r->hashed_flds == prof->segs[prof->segs_cnt - 1].match &&
3245                     r->packet_hdr == prof->segs[prof->segs_cnt - 1].hdrs) {
3246                         ice_set_bit(vsi_handle, r->vsis);
3247                         return ICE_SUCCESS;
3248                 }
3249
3250         rss_cfg = (struct ice_rss_cfg *)ice_malloc(hw, sizeof(*rss_cfg));
3251         if (!rss_cfg)
3252                 return ICE_ERR_NO_MEMORY;
3253
3254         rss_cfg->hashed_flds = prof->segs[prof->segs_cnt - 1].match;
3255         rss_cfg->packet_hdr = prof->segs[prof->segs_cnt - 1].hdrs;
3256         rss_cfg->symm = prof->cfg.symm;
3257         ice_set_bit(vsi_handle, rss_cfg->vsis);
3258
3259         LIST_ADD_TAIL(&rss_cfg->l_entry, &hw->rss_list_head);
3260
3261         return ICE_SUCCESS;
3262 }
3263
3264 #define ICE_FLOW_PROF_HASH_S    0
3265 #define ICE_FLOW_PROF_HASH_M    (0xFFFFFFFFULL << ICE_FLOW_PROF_HASH_S)
3266 #define ICE_FLOW_PROF_HDR_S     32
3267 #define ICE_FLOW_PROF_HDR_M     (0x3FFFFFFFULL << ICE_FLOW_PROF_HDR_S)
3268 #define ICE_FLOW_PROF_ENCAP_S   63
3269 #define ICE_FLOW_PROF_ENCAP_M   (BIT_ULL(ICE_FLOW_PROF_ENCAP_S))
3270
3271 #define ICE_RSS_OUTER_HEADERS   1
3272 #define ICE_RSS_INNER_HEADERS   2
3273
3274 /* Flow profile ID format:
3275  * [0:31] - Packet match fields
3276  * [32:62] - Protocol header
3277  * [63] - Encapsulation flag, 0 if non-tunneled, 1 if tunneled
3278  */
3279 #define ICE_FLOW_GEN_PROFID(hash, hdr, segs_cnt) \
3280         (u64)(((u64)(hash) & ICE_FLOW_PROF_HASH_M) | \
3281               (((u64)(hdr) << ICE_FLOW_PROF_HDR_S) & ICE_FLOW_PROF_HDR_M) | \
3282               ((u8)((segs_cnt) - 1) ? ICE_FLOW_PROF_ENCAP_M : 0))
3283
3284 static void
3285 ice_rss_config_xor_word(struct ice_hw *hw, u8 prof_id, u8 src, u8 dst)
3286 {
3287         u32 s = ((src % 4) << 3); /* byte shift */
3288         u32 v = dst | 0x80; /* value to program */
3289         u8 i = src / 4; /* register index */
3290         u32 reg;
3291
3292         reg = rd32(hw, GLQF_HSYMM(prof_id, i));
3293         reg = (reg & ~(0xff << s)) | (v << s);
3294         wr32(hw, GLQF_HSYMM(prof_id, i), reg);
3295 }
3296
3297 static void
3298 ice_rss_config_xor(struct ice_hw *hw, u8 prof_id, u8 src, u8 dst, u8 len)
3299 {
3300         int fv_last_word =
3301                 ICE_FLOW_SW_FIELD_VECTOR_MAX / ICE_FLOW_FV_EXTRACT_SZ - 1;
3302         int i;
3303
3304         for (i = 0; i < len; i++) {
3305                 ice_rss_config_xor_word(hw, prof_id,
3306                                         /* Yes, field vector in GLQF_HSYMM and
3307                                          * GLQF_HINSET is inversed!
3308                                          */
3309                                         fv_last_word - (src + i),
3310                                         fv_last_word - (dst + i));
3311                 ice_rss_config_xor_word(hw, prof_id,
3312                                         fv_last_word - (dst + i),
3313                                         fv_last_word - (src + i));
3314         }
3315 }
3316
3317 static void
3318 ice_rss_update_symm(struct ice_hw *hw,
3319                     struct ice_flow_prof *prof)
3320 {
3321         struct ice_prof_map *map;
3322         u8 prof_id, m;
3323
3324         map = ice_search_prof_id(hw, ICE_BLK_RSS, prof->id);
3325         prof_id = map->prof_id;
3326
3327         /* clear to default */
3328         for (m = 0; m < 6; m++)
3329                 wr32(hw, GLQF_HSYMM(prof_id, m), 0);
3330         if (prof->cfg.symm) {
3331                 struct ice_flow_seg_info *seg =
3332                         &prof->segs[prof->segs_cnt - 1];
3333
3334                 struct ice_flow_seg_xtrct *ipv4_src =
3335                         &seg->fields[ICE_FLOW_FIELD_IDX_IPV4_SA].xtrct;
3336                 struct ice_flow_seg_xtrct *ipv4_dst =
3337                         &seg->fields[ICE_FLOW_FIELD_IDX_IPV4_DA].xtrct;
3338                 struct ice_flow_seg_xtrct *ipv6_src =
3339                         &seg->fields[ICE_FLOW_FIELD_IDX_IPV6_SA].xtrct;
3340                 struct ice_flow_seg_xtrct *ipv6_dst =
3341                         &seg->fields[ICE_FLOW_FIELD_IDX_IPV6_DA].xtrct;
3342
3343                 struct ice_flow_seg_xtrct *tcp_src =
3344                         &seg->fields[ICE_FLOW_FIELD_IDX_TCP_SRC_PORT].xtrct;
3345                 struct ice_flow_seg_xtrct *tcp_dst =
3346                         &seg->fields[ICE_FLOW_FIELD_IDX_TCP_DST_PORT].xtrct;
3347
3348                 struct ice_flow_seg_xtrct *udp_src =
3349                         &seg->fields[ICE_FLOW_FIELD_IDX_UDP_SRC_PORT].xtrct;
3350                 struct ice_flow_seg_xtrct *udp_dst =
3351                         &seg->fields[ICE_FLOW_FIELD_IDX_UDP_DST_PORT].xtrct;
3352
3353                 struct ice_flow_seg_xtrct *sctp_src =
3354                         &seg->fields[ICE_FLOW_FIELD_IDX_SCTP_SRC_PORT].xtrct;
3355                 struct ice_flow_seg_xtrct *sctp_dst =
3356                         &seg->fields[ICE_FLOW_FIELD_IDX_SCTP_DST_PORT].xtrct;
3357
3358                 /* xor IPv4 */
3359                 if (ipv4_src->prot_id != 0 && ipv4_dst->prot_id != 0)
3360                         ice_rss_config_xor(hw, prof_id,
3361                                            ipv4_src->idx, ipv4_dst->idx, 2);
3362
3363                 /* xor IPv6 */
3364                 if (ipv6_src->prot_id != 0 && ipv6_dst->prot_id != 0)
3365                         ice_rss_config_xor(hw, prof_id,
3366                                            ipv6_src->idx, ipv6_dst->idx, 8);
3367
3368                 /* xor TCP */
3369                 if (tcp_src->prot_id != 0 && tcp_dst->prot_id != 0)
3370                         ice_rss_config_xor(hw, prof_id,
3371                                            tcp_src->idx, tcp_dst->idx, 1);
3372
3373                 /* xor UDP */
3374                 if (udp_src->prot_id != 0 && udp_dst->prot_id != 0)
3375                         ice_rss_config_xor(hw, prof_id,
3376                                            udp_src->idx, udp_dst->idx, 1);
3377
3378                 /* xor SCTP */
3379                 if (sctp_src->prot_id != 0 && sctp_dst->prot_id != 0)
3380                         ice_rss_config_xor(hw, prof_id,
3381                                            sctp_src->idx, sctp_dst->idx, 1);
3382         }
3383 }
3384
3385 /**
3386  * ice_add_rss_cfg_sync - add an RSS configuration
3387  * @hw: pointer to the hardware structure
3388  * @vsi_handle: software VSI handle
3389  * @hashed_flds: hash bit fields (ICE_FLOW_HASH_*) to configure
3390  * @addl_hdrs: protocol header fields
3391  * @segs_cnt: packet segment count
3392  * @symm: symmetric hash enable/disable
3393  *
3394  * Assumption: lock has already been acquired for RSS list
3395  */
3396 static enum ice_status
3397 ice_add_rss_cfg_sync(struct ice_hw *hw, u16 vsi_handle, u64 hashed_flds,
3398                      u32 addl_hdrs, u8 segs_cnt, bool symm)
3399 {
3400         const enum ice_block blk = ICE_BLK_RSS;
3401         struct ice_flow_prof *prof = NULL;
3402         struct ice_flow_seg_info *segs;
3403         enum ice_status status;
3404
3405         if (!segs_cnt || segs_cnt > ICE_FLOW_SEG_MAX)
3406                 return ICE_ERR_PARAM;
3407
3408         segs = (struct ice_flow_seg_info *)ice_calloc(hw, segs_cnt,
3409                                                       sizeof(*segs));
3410         if (!segs)
3411                 return ICE_ERR_NO_MEMORY;
3412
3413         /* Construct the packet segment info from the hashed fields */
3414         status = ice_flow_set_rss_seg_info(&segs[segs_cnt - 1], hashed_flds,
3415                                            addl_hdrs);
3416         if (status)
3417                 goto exit;
3418
3419         /* Search for a flow profile that has matching headers, hash fields
3420          * and has the input VSI associated to it. If found, no further
3421          * operations required and exit.
3422          */
3423         prof = ice_flow_find_prof_conds(hw, blk, ICE_FLOW_RX, segs, segs_cnt,
3424                                         vsi_handle,
3425                                         ICE_FLOW_FIND_PROF_CHK_FLDS |
3426                                         ICE_FLOW_FIND_PROF_CHK_VSI);
3427         if (prof) {
3428                 if (prof->cfg.symm == symm)
3429                         goto exit;
3430                 prof->cfg.symm = symm;
3431                 goto update_symm;
3432         }
3433
3434         /* Check if a flow profile exists with the same protocol headers and
3435          * associated with the input VSI. If so disasscociate the VSI from
3436          * this profile. The VSI will be added to a new profile created with
3437          * the protocol header and new hash field configuration.
3438          */
3439         prof = ice_flow_find_prof_conds(hw, blk, ICE_FLOW_RX, segs, segs_cnt,
3440                                         vsi_handle, ICE_FLOW_FIND_PROF_CHK_VSI);
3441         if (prof) {
3442                 status = ice_flow_disassoc_prof(hw, blk, prof, vsi_handle);
3443                 if (!status)
3444                         ice_rem_rss_list(hw, vsi_handle, prof);
3445                 else
3446                         goto exit;
3447
3448                 /* Remove profile if it has no VSIs associated */
3449                 if (!ice_is_any_bit_set(prof->vsis, ICE_MAX_VSI)) {
3450                         status = ice_flow_rem_prof(hw, blk, prof->id);
3451                         if (status)
3452                                 goto exit;
3453                 }
3454         }
3455
3456         /* Search for a profile that has same match fields only. If this
3457          * exists then associate the VSI to this profile.
3458          */
3459         prof = ice_flow_find_prof_conds(hw, blk, ICE_FLOW_RX, segs, segs_cnt,
3460                                         vsi_handle,
3461                                         ICE_FLOW_FIND_PROF_CHK_FLDS);
3462         if (prof) {
3463                 if (prof->cfg.symm == symm) {
3464                         status = ice_flow_assoc_prof(hw, blk, prof,
3465                                                      vsi_handle);
3466                         if (!status)
3467                                 status = ice_add_rss_list(hw, vsi_handle,
3468                                                           prof);
3469                 } else {
3470                         /* if a profile exist but with different symmetric
3471                          * requirement, just return error.
3472                          */
3473                         status = ICE_ERR_NOT_SUPPORTED;
3474                 }
3475                 goto exit;
3476         }
3477
3478         /* Create a new flow profile with generated profile and packet
3479          * segment information.
3480          */
3481         status = ice_flow_add_prof(hw, blk, ICE_FLOW_RX,
3482                                    ICE_FLOW_GEN_PROFID(hashed_flds,
3483                                                        segs[segs_cnt - 1].hdrs,
3484                                                        segs_cnt),
3485                                    segs, segs_cnt, NULL, 0, &prof);
3486         if (status)
3487                 goto exit;
3488
3489         status = ice_flow_assoc_prof(hw, blk, prof, vsi_handle);
3490         /* If association to a new flow profile failed then this profile can
3491          * be removed.
3492          */
3493         if (status) {
3494                 ice_flow_rem_prof(hw, blk, prof->id);
3495                 goto exit;
3496         }
3497
3498         status = ice_add_rss_list(hw, vsi_handle, prof);
3499
3500         prof->cfg.symm = symm;
3501
3502 update_symm:
3503         ice_rss_update_symm(hw, prof);
3504
3505 exit:
3506         ice_free(hw, segs);
3507         return status;
3508 }
3509
3510 /**
3511  * ice_add_rss_cfg - add an RSS configuration with specified hashed fields
3512  * @hw: pointer to the hardware structure
3513  * @vsi_handle: software VSI handle
3514  * @hashed_flds: hash bit fields (ICE_FLOW_HASH_*) to configure
3515  * @addl_hdrs: protocol header fields
3516  * @symm: symmetric hash enable/disable
3517  *
3518  * This function will generate a flow profile based on fields associated with
3519  * the input fields to hash on, the flow type and use the VSI number to add
3520  * a flow entry to the profile.
3521  */
3522 enum ice_status
3523 ice_add_rss_cfg(struct ice_hw *hw, u16 vsi_handle, u64 hashed_flds,
3524                 u32 addl_hdrs, bool symm)
3525 {
3526         enum ice_status status;
3527
3528         if (hashed_flds == ICE_HASH_INVALID ||
3529             !ice_is_vsi_valid(hw, vsi_handle))
3530                 return ICE_ERR_PARAM;
3531
3532         ice_acquire_lock(&hw->rss_locks);
3533         status = ice_add_rss_cfg_sync(hw, vsi_handle, hashed_flds, addl_hdrs,
3534                                       ICE_RSS_OUTER_HEADERS, symm);
3535         if (!status)
3536                 status = ice_add_rss_cfg_sync(hw, vsi_handle, hashed_flds,
3537                                               addl_hdrs, ICE_RSS_INNER_HEADERS,
3538                                               symm);
3539         ice_release_lock(&hw->rss_locks);
3540
3541         return status;
3542 }
3543
3544 /**
3545  * ice_rem_rss_cfg_sync - remove an existing RSS configuration
3546  * @hw: pointer to the hardware structure
3547  * @vsi_handle: software VSI handle
3548  * @hashed_flds: Packet hash types (ICE_FLOW_HASH_*) to remove
3549  * @addl_hdrs: Protocol header fields within a packet segment
3550  * @segs_cnt: packet segment count
3551  *
3552  * Assumption: lock has already been acquired for RSS list
3553  */
3554 static enum ice_status
3555 ice_rem_rss_cfg_sync(struct ice_hw *hw, u16 vsi_handle, u64 hashed_flds,
3556                      u32 addl_hdrs, u8 segs_cnt)
3557 {
3558         const enum ice_block blk = ICE_BLK_RSS;
3559         struct ice_flow_seg_info *segs;
3560         struct ice_flow_prof *prof;
3561         enum ice_status status;
3562
3563         segs = (struct ice_flow_seg_info *)ice_calloc(hw, segs_cnt,
3564                                                       sizeof(*segs));
3565         if (!segs)
3566                 return ICE_ERR_NO_MEMORY;
3567
3568         /* Construct the packet segment info from the hashed fields */
3569         status = ice_flow_set_rss_seg_info(&segs[segs_cnt - 1], hashed_flds,
3570                                            addl_hdrs);
3571         if (status)
3572                 goto out;
3573
3574         prof = ice_flow_find_prof_conds(hw, blk, ICE_FLOW_RX, segs, segs_cnt,
3575                                         vsi_handle,
3576                                         ICE_FLOW_FIND_PROF_CHK_FLDS);
3577         if (!prof) {
3578                 status = ICE_ERR_DOES_NOT_EXIST;
3579                 goto out;
3580         }
3581
3582         status = ice_flow_disassoc_prof(hw, blk, prof, vsi_handle);
3583         if (status)
3584                 goto out;
3585
3586         /* Remove RSS configuration from VSI context before deleting
3587          * the flow profile.
3588          */
3589         ice_rem_rss_list(hw, vsi_handle, prof);
3590
3591         if (!ice_is_any_bit_set(prof->vsis, ICE_MAX_VSI))
3592                 status = ice_flow_rem_prof(hw, blk, prof->id);
3593
3594 out:
3595         ice_free(hw, segs);
3596         return status;
3597 }
3598
3599 /**
3600  * ice_rem_rss_cfg - remove an existing RSS config with matching hashed fields
3601  * @hw: pointer to the hardware structure
3602  * @vsi_handle: software VSI handle
3603  * @hashed_flds: Packet hash types (ICE_FLOW_HASH_*) to remove
3604  * @addl_hdrs: Protocol header fields within a packet segment
3605  *
3606  * This function will lookup the flow profile based on the input
3607  * hash field bitmap, iterate through the profile entry list of
3608  * that profile and find entry associated with input VSI to be
3609  * removed. Calls are made to underlying flow apis which will in
3610  * turn build or update buffers for RSS XLT1 section.
3611  */
3612 enum ice_status
3613 ice_rem_rss_cfg(struct ice_hw *hw, u16 vsi_handle, u64 hashed_flds,
3614                 u32 addl_hdrs)
3615 {
3616         enum ice_status status;
3617
3618         if (hashed_flds == ICE_HASH_INVALID ||
3619             !ice_is_vsi_valid(hw, vsi_handle))
3620                 return ICE_ERR_PARAM;
3621
3622         ice_acquire_lock(&hw->rss_locks);
3623         status = ice_rem_rss_cfg_sync(hw, vsi_handle, hashed_flds, addl_hdrs,
3624                                       ICE_RSS_OUTER_HEADERS);
3625         if (!status)
3626                 status = ice_rem_rss_cfg_sync(hw, vsi_handle, hashed_flds,
3627                                               addl_hdrs, ICE_RSS_INNER_HEADERS);
3628         ice_release_lock(&hw->rss_locks);
3629
3630         return status;
3631 }
3632
3633 /**
3634  * ice_replay_rss_cfg - replay RSS configurations associated with VSI
3635  * @hw: pointer to the hardware structure
3636  * @vsi_handle: software VSI handle
3637  */
3638 enum ice_status ice_replay_rss_cfg(struct ice_hw *hw, u16 vsi_handle)
3639 {
3640         enum ice_status status = ICE_SUCCESS;
3641         struct ice_rss_cfg *r;
3642
3643         if (!ice_is_vsi_valid(hw, vsi_handle))
3644                 return ICE_ERR_PARAM;
3645
3646         ice_acquire_lock(&hw->rss_locks);
3647         LIST_FOR_EACH_ENTRY(r, &hw->rss_list_head,
3648                             ice_rss_cfg, l_entry) {
3649                 if (ice_is_bit_set(r->vsis, vsi_handle)) {
3650                         status = ice_add_rss_cfg_sync(hw, vsi_handle,
3651                                                       r->hashed_flds,
3652                                                       r->packet_hdr,
3653                                                       ICE_RSS_OUTER_HEADERS,
3654                                                       r->symm);
3655                         if (status)
3656                                 break;
3657                         status = ice_add_rss_cfg_sync(hw, vsi_handle,
3658                                                       r->hashed_flds,
3659                                                       r->packet_hdr,
3660                                                       ICE_RSS_INNER_HEADERS,
3661                                                       r->symm);
3662                         if (status)
3663                                 break;
3664                 }
3665         }
3666         ice_release_lock(&hw->rss_locks);
3667
3668         return status;
3669 }
3670
3671 /**
3672  * ice_get_rss_cfg - returns hashed fields for the given header types
3673  * @hw: pointer to the hardware structure
3674  * @vsi_handle: software VSI handle
3675  * @hdrs: protocol header type
3676  *
3677  * This function will return the match fields of the first instance of flow
3678  * profile having the given header types and containing input VSI
3679  */
3680 u64 ice_get_rss_cfg(struct ice_hw *hw, u16 vsi_handle, u32 hdrs)
3681 {
3682         struct ice_rss_cfg *r, *rss_cfg = NULL;
3683
3684         /* verify if the protocol header is non zero and VSI is valid */
3685         if (hdrs == ICE_FLOW_SEG_HDR_NONE || !ice_is_vsi_valid(hw, vsi_handle))
3686                 return ICE_HASH_INVALID;
3687
3688         ice_acquire_lock(&hw->rss_locks);
3689         LIST_FOR_EACH_ENTRY(r, &hw->rss_list_head,
3690                             ice_rss_cfg, l_entry)
3691                 if (ice_is_bit_set(r->vsis, vsi_handle) &&
3692                     r->packet_hdr == hdrs) {
3693                         rss_cfg = r;
3694                         break;
3695                 }
3696         ice_release_lock(&hw->rss_locks);
3697
3698         return rss_cfg ? rss_cfg->hashed_flds : ICE_HASH_INVALID;
3699 }