1989abfa7bad387511c6a0607f545d2c51f2bacd
[dpdk.git] / drivers / net / ice / base / ice_flow.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2001-2021 Intel Corporation
3  */
4
5 #include "ice_common.h"
6 #include "ice_flow.h"
7
8 /* Size of known protocol header fields */
9 #define ICE_FLOW_FLD_SZ_ETH_TYPE        2
10 #define ICE_FLOW_FLD_SZ_VLAN            2
11 #define ICE_FLOW_FLD_SZ_IPV4_ADDR       4
12 #define ICE_FLOW_FLD_SZ_IPV6_ADDR       16
13 #define ICE_FLOW_FLD_SZ_IPV6_PRE32_ADDR 4
14 #define ICE_FLOW_FLD_SZ_IPV6_PRE48_ADDR 6
15 #define ICE_FLOW_FLD_SZ_IPV6_PRE64_ADDR 8
16 #define ICE_FLOW_FLD_SZ_IPV4_ID         2
17 #define ICE_FLOW_FLD_SZ_IPV6_ID         4
18 #define ICE_FLOW_FLD_SZ_IP_DSCP         1
19 #define ICE_FLOW_FLD_SZ_IP_TTL          1
20 #define ICE_FLOW_FLD_SZ_IP_PROT         1
21 #define ICE_FLOW_FLD_SZ_PORT            2
22 #define ICE_FLOW_FLD_SZ_TCP_FLAGS       1
23 #define ICE_FLOW_FLD_SZ_ICMP_TYPE       1
24 #define ICE_FLOW_FLD_SZ_ICMP_CODE       1
25 #define ICE_FLOW_FLD_SZ_ARP_OPER        2
26 #define ICE_FLOW_FLD_SZ_GRE_KEYID       4
27 #define ICE_FLOW_FLD_SZ_GTP_TEID        4
28 #define ICE_FLOW_FLD_SZ_GTP_QFI         2
29 #define ICE_FLOW_FLD_SZ_PPPOE_SESS_ID   2
30 #define ICE_FLOW_FLD_SZ_PFCP_SEID 8
31 #define ICE_FLOW_FLD_SZ_L2TPV3_SESS_ID  4
32 #define ICE_FLOW_FLD_SZ_ESP_SPI 4
33 #define ICE_FLOW_FLD_SZ_AH_SPI  4
34 #define ICE_FLOW_FLD_SZ_NAT_T_ESP_SPI   4
35 #define ICE_FLOW_FLD_SZ_VXLAN_VNI       4
36 #define ICE_FLOW_FLD_SZ_ECPRI_TP0_PC_ID 2
37
38 /* Describe properties of a protocol header field */
39 struct ice_flow_field_info {
40         enum ice_flow_seg_hdr hdr;
41         s16 off;        /* Offset from start of a protocol header, in bits */
42         u16 size;       /* Size of fields in bits */
43         u16 mask;       /* 16-bit mask for field */
44 };
45
46 #define ICE_FLOW_FLD_INFO(_hdr, _offset_bytes, _size_bytes) { \
47         .hdr = _hdr, \
48         .off = (_offset_bytes) * BITS_PER_BYTE, \
49         .size = (_size_bytes) * BITS_PER_BYTE, \
50         .mask = 0, \
51 }
52
53 #define ICE_FLOW_FLD_INFO_MSK(_hdr, _offset_bytes, _size_bytes, _mask) { \
54         .hdr = _hdr, \
55         .off = (_offset_bytes) * BITS_PER_BYTE, \
56         .size = (_size_bytes) * BITS_PER_BYTE, \
57         .mask = _mask, \
58 }
59
60 /* Table containing properties of supported protocol header fields */
61 static const
62 struct ice_flow_field_info ice_flds_info[ICE_FLOW_FIELD_IDX_MAX] = {
63         /* Ether */
64         /* ICE_FLOW_FIELD_IDX_ETH_DA */
65         ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_ETH, 0, ETH_ALEN),
66         /* ICE_FLOW_FIELD_IDX_ETH_SA */
67         ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_ETH, ETH_ALEN, ETH_ALEN),
68         /* ICE_FLOW_FIELD_IDX_S_VLAN */
69         ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_VLAN, 12, ICE_FLOW_FLD_SZ_VLAN),
70         /* ICE_FLOW_FIELD_IDX_C_VLAN */
71         ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_VLAN, 14, ICE_FLOW_FLD_SZ_VLAN),
72         /* ICE_FLOW_FIELD_IDX_ETH_TYPE */
73         ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_ETH, 0, ICE_FLOW_FLD_SZ_ETH_TYPE),
74         /* IPv4 / IPv6 */
75         /* ICE_FLOW_FIELD_IDX_IPV4_DSCP */
76         ICE_FLOW_FLD_INFO_MSK(ICE_FLOW_SEG_HDR_IPV4, 0, ICE_FLOW_FLD_SZ_IP_DSCP,
77                               0x00fc),
78         /* ICE_FLOW_FIELD_IDX_IPV6_DSCP */
79         ICE_FLOW_FLD_INFO_MSK(ICE_FLOW_SEG_HDR_IPV6, 0, ICE_FLOW_FLD_SZ_IP_DSCP,
80                               0x0ff0),
81         /* ICE_FLOW_FIELD_IDX_IPV4_TTL */
82         ICE_FLOW_FLD_INFO_MSK(ICE_FLOW_SEG_HDR_NONE, 8,
83                               ICE_FLOW_FLD_SZ_IP_TTL, 0xff00),
84         /* ICE_FLOW_FIELD_IDX_IPV4_PROT */
85         ICE_FLOW_FLD_INFO_MSK(ICE_FLOW_SEG_HDR_NONE, 8,
86                               ICE_FLOW_FLD_SZ_IP_PROT, 0x00ff),
87         /* ICE_FLOW_FIELD_IDX_IPV6_TTL */
88         ICE_FLOW_FLD_INFO_MSK(ICE_FLOW_SEG_HDR_NONE, 6,
89                               ICE_FLOW_FLD_SZ_IP_TTL, 0x00ff),
90         /* ICE_FLOW_FIELD_IDX_IPV6_PROT */
91         ICE_FLOW_FLD_INFO_MSK(ICE_FLOW_SEG_HDR_NONE, 6,
92                               ICE_FLOW_FLD_SZ_IP_PROT, 0xff00),
93         /* ICE_FLOW_FIELD_IDX_IPV4_SA */
94         ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_IPV4, 12, ICE_FLOW_FLD_SZ_IPV4_ADDR),
95         /* ICE_FLOW_FIELD_IDX_IPV4_DA */
96         ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_IPV4, 16, ICE_FLOW_FLD_SZ_IPV4_ADDR),
97         /* ICE_FLOW_FIELD_IDX_IPV6_SA */
98         ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_IPV6, 8, ICE_FLOW_FLD_SZ_IPV6_ADDR),
99         /* ICE_FLOW_FIELD_IDX_IPV6_DA */
100         ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_IPV6, 24, ICE_FLOW_FLD_SZ_IPV6_ADDR),
101         /* ICE_FLOW_FIELD_IDX_IPV4_FRAG */
102         ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_IPV_FRAG, 4,
103                           ICE_FLOW_FLD_SZ_IPV4_ID),
104         /* ICE_FLOW_FIELD_IDX_IPV6_FRAG */
105         ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_IPV_FRAG, 4,
106                           ICE_FLOW_FLD_SZ_IPV6_ID),
107         /* ICE_FLOW_FIELD_IDX_IPV6_PRE32_SA */
108         ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_IPV6, 8,
109                           ICE_FLOW_FLD_SZ_IPV6_PRE32_ADDR),
110         /* ICE_FLOW_FIELD_IDX_IPV6_PRE32_DA */
111         ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_IPV6, 24,
112                           ICE_FLOW_FLD_SZ_IPV6_PRE32_ADDR),
113         /* ICE_FLOW_FIELD_IDX_IPV6_PRE48_SA */
114         ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_IPV6, 8,
115                           ICE_FLOW_FLD_SZ_IPV6_PRE48_ADDR),
116         /* ICE_FLOW_FIELD_IDX_IPV6_PRE48_DA */
117         ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_IPV6, 24,
118                           ICE_FLOW_FLD_SZ_IPV6_PRE48_ADDR),
119         /* ICE_FLOW_FIELD_IDX_IPV6_PRE64_SA */
120         ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_IPV6, 8,
121                           ICE_FLOW_FLD_SZ_IPV6_PRE64_ADDR),
122         /* ICE_FLOW_FIELD_IDX_IPV6_PRE64_DA */
123         ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_IPV6, 24,
124                           ICE_FLOW_FLD_SZ_IPV6_PRE64_ADDR),
125         /* Transport */
126         /* ICE_FLOW_FIELD_IDX_TCP_SRC_PORT */
127         ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_TCP, 0, ICE_FLOW_FLD_SZ_PORT),
128         /* ICE_FLOW_FIELD_IDX_TCP_DST_PORT */
129         ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_TCP, 2, ICE_FLOW_FLD_SZ_PORT),
130         /* ICE_FLOW_FIELD_IDX_UDP_SRC_PORT */
131         ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_UDP, 0, ICE_FLOW_FLD_SZ_PORT),
132         /* ICE_FLOW_FIELD_IDX_UDP_DST_PORT */
133         ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_UDP, 2, ICE_FLOW_FLD_SZ_PORT),
134         /* ICE_FLOW_FIELD_IDX_SCTP_SRC_PORT */
135         ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_SCTP, 0, ICE_FLOW_FLD_SZ_PORT),
136         /* ICE_FLOW_FIELD_IDX_SCTP_DST_PORT */
137         ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_SCTP, 2, ICE_FLOW_FLD_SZ_PORT),
138         /* ICE_FLOW_FIELD_IDX_TCP_FLAGS */
139         ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_TCP, 13, ICE_FLOW_FLD_SZ_TCP_FLAGS),
140         /* ARP */
141         /* ICE_FLOW_FIELD_IDX_ARP_SIP */
142         ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_ARP, 14, ICE_FLOW_FLD_SZ_IPV4_ADDR),
143         /* ICE_FLOW_FIELD_IDX_ARP_DIP */
144         ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_ARP, 24, ICE_FLOW_FLD_SZ_IPV4_ADDR),
145         /* ICE_FLOW_FIELD_IDX_ARP_SHA */
146         ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_ARP, 8, ETH_ALEN),
147         /* ICE_FLOW_FIELD_IDX_ARP_DHA */
148         ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_ARP, 18, ETH_ALEN),
149         /* ICE_FLOW_FIELD_IDX_ARP_OP */
150         ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_ARP, 6, ICE_FLOW_FLD_SZ_ARP_OPER),
151         /* ICMP */
152         /* ICE_FLOW_FIELD_IDX_ICMP_TYPE */
153         ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_ICMP, 0, ICE_FLOW_FLD_SZ_ICMP_TYPE),
154         /* ICE_FLOW_FIELD_IDX_ICMP_CODE */
155         ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_ICMP, 1, ICE_FLOW_FLD_SZ_ICMP_CODE),
156         /* GRE */
157         /* ICE_FLOW_FIELD_IDX_GRE_KEYID */
158         ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_GRE, 12, ICE_FLOW_FLD_SZ_GRE_KEYID),
159         /* GTP */
160         /* ICE_FLOW_FIELD_IDX_GTPC_TEID */
161         ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_GTPC_TEID, 12,
162                           ICE_FLOW_FLD_SZ_GTP_TEID),
163         /* ICE_FLOW_FIELD_IDX_GTPU_IP_TEID */
164         ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_GTPU_IP, 12,
165                           ICE_FLOW_FLD_SZ_GTP_TEID),
166         /* ICE_FLOW_FIELD_IDX_GTPU_EH_TEID */
167         ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_GTPU_EH, 12,
168                           ICE_FLOW_FLD_SZ_GTP_TEID),
169         /* ICE_FLOW_FIELD_IDX_GTPU_EH_QFI */
170         ICE_FLOW_FLD_INFO_MSK(ICE_FLOW_SEG_HDR_GTPU_EH, 22,
171                               ICE_FLOW_FLD_SZ_GTP_QFI, 0x3f00),
172         /* ICE_FLOW_FIELD_IDX_GTPU_UP_TEID */
173         ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_GTPU_UP, 12,
174                           ICE_FLOW_FLD_SZ_GTP_TEID),
175         /* ICE_FLOW_FIELD_IDX_GTPU_DWN_TEID */
176         ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_GTPU_DWN, 12,
177                           ICE_FLOW_FLD_SZ_GTP_TEID),
178         /* PPPOE */
179         /* ICE_FLOW_FIELD_IDX_PPPOE_SESS_ID */
180         ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_PPPOE, 2,
181                           ICE_FLOW_FLD_SZ_PPPOE_SESS_ID),
182         /* PFCP */
183         /* ICE_FLOW_FIELD_IDX_PFCP_SEID */
184         ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_PFCP_SESSION, 12,
185                           ICE_FLOW_FLD_SZ_PFCP_SEID),
186         /* L2TPV3 */
187         /* ICE_FLOW_FIELD_IDX_L2TPV3_SESS_ID */
188         ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_L2TPV3, 0,
189                           ICE_FLOW_FLD_SZ_L2TPV3_SESS_ID),
190         /* ESP */
191         /* ICE_FLOW_FIELD_IDX_ESP_SPI */
192         ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_ESP, 0,
193                           ICE_FLOW_FLD_SZ_ESP_SPI),
194         /* AH */
195         /* ICE_FLOW_FIELD_IDX_AH_SPI */
196         ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_AH, 4,
197                           ICE_FLOW_FLD_SZ_AH_SPI),
198         /* NAT_T_ESP */
199         /* ICE_FLOW_FIELD_IDX_NAT_T_ESP_SPI */
200         ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_NAT_T_ESP, 8,
201                           ICE_FLOW_FLD_SZ_NAT_T_ESP_SPI),
202         /* ICE_FLOW_FIELD_IDX_VXLAN_VNI */
203         ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_VXLAN, 12,
204                           ICE_FLOW_FLD_SZ_VXLAN_VNI),
205         /* ECPRI_TP0 */
206         /* ICE_FLOW_FIELD_IDX_ECPRI_TP0_PC_ID */
207         ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_ECPRI_TP0, 4,
208                           ICE_FLOW_FLD_SZ_ECPRI_TP0_PC_ID),
209         /* UDP_ECPRI_TP0 */
210         /* ICE_FLOW_FIELD_IDX_UDP_ECPRI_TP0_PC_ID */
211         ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_UDP_ECPRI_TP0, 12,
212                           ICE_FLOW_FLD_SZ_ECPRI_TP0_PC_ID),
213 };
214
215 /* Bitmaps indicating relevant packet types for a particular protocol header
216  *
217  * Packet types for packets with an Outer/First/Single MAC header
218  */
219 static const u32 ice_ptypes_mac_ofos[] = {
220         0xFDC00846, 0xBFBF7F7E, 0xF70001DF, 0xFEFDFDFB,
221         0x0000077E, 0x000003FF, 0x00000000, 0x00000000,
222         0x00400000, 0x03FFF000, 0xFFFFFFE0, 0x00100707,
223         0xFFFFFF00, 0x0000003F, 0x00000000, 0x00000000,
224         0x00000000, 0x00000000, 0x00000000, 0x00000000,
225         0x00000000, 0x00000000, 0x00000000, 0x00000000,
226         0x00000000, 0x00000000, 0x00000000, 0x00000000,
227         0x00000000, 0x00000000, 0x00000000, 0x00000000,
228 };
229
230 /* Packet types for packets with an Innermost/Last MAC VLAN header */
231 static const u32 ice_ptypes_macvlan_il[] = {
232         0x00000000, 0xBC000000, 0x000001DF, 0xF0000000,
233         0x0000077E, 0x00000000, 0x00000000, 0x00000000,
234         0x00000000, 0x00000000, 0x00000000, 0x00000000,
235         0x00000000, 0x00000000, 0x00000000, 0x00000000,
236         0x00000000, 0x00000000, 0x00000000, 0x00000000,
237         0x00000000, 0x00000000, 0x00000000, 0x00000000,
238         0x00000000, 0x00000000, 0x00000000, 0x00000000,
239         0x00000000, 0x00000000, 0x00000000, 0x00000000,
240 };
241
242 /* Packet types for packets with an Outer/First/Single IPv4 header, does NOT
243  * include IPV4 other PTYPEs
244  */
245 static const u32 ice_ptypes_ipv4_ofos[] = {
246         0x1DC00000, 0x24000800, 0x00000000, 0x00000000,
247         0x00000000, 0x00000155, 0x00000000, 0x00000000,
248         0x00000000, 0x000FC000, 0x000002A0, 0x00100000,
249         0x00001500, 0x00000000, 0x00000000, 0x00000000,
250         0x00000000, 0x00000000, 0x00000000, 0x00000000,
251         0x00000000, 0x00000000, 0x00000000, 0x00000000,
252         0x00000000, 0x00000000, 0x00000000, 0x00000000,
253         0x00000000, 0x00000000, 0x00000000, 0x00000000,
254 };
255
256 /* Packet types for packets with an Outer/First/Single IPv4 header, includes
257  * IPV4 other PTYPEs
258  */
259 static const u32 ice_ptypes_ipv4_ofos_all[] = {
260         0x1DC00000, 0x24000800, 0x00000000, 0x00000000,
261         0x00000000, 0x00000155, 0x00000000, 0x00000000,
262         0x00000000, 0x000FC000, 0x83E0FAA0, 0x00000101,
263         0x03FFD500, 0x00000000, 0x00000000, 0x00000000,
264         0x00000000, 0x00000000, 0x00000000, 0x00000000,
265         0x00000000, 0x00000000, 0x00000000, 0x00000000,
266         0x00000000, 0x00000000, 0x00000000, 0x00000000,
267         0x00000000, 0x00000000, 0x00000000, 0x00000000,
268 };
269
270 /* Packet types for packets with an Innermost/Last IPv4 header */
271 static const u32 ice_ptypes_ipv4_il[] = {
272         0xE0000000, 0xB807700E, 0x80000003, 0xE01DC03B,
273         0x0000000E, 0x00000000, 0x00000000, 0x00000000,
274         0x00000000, 0x00000000, 0x001FF800, 0x00100000,
275         0xFC0FC000, 0x00000000, 0x00000000, 0x00000000,
276         0x00000000, 0x00000000, 0x00000000, 0x00000000,
277         0x00000000, 0x00000000, 0x00000000, 0x00000000,
278         0x00000000, 0x00000000, 0x00000000, 0x00000000,
279         0x00000000, 0x00000000, 0x00000000, 0x00000000,
280 };
281
282 /* Packet types for packets with an Outer/First/Single IPv6 header, does NOT
283  * include IVP6 other PTYPEs
284  */
285 static const u32 ice_ptypes_ipv6_ofos[] = {
286         0x00000000, 0x00000000, 0x77000000, 0x10002000,
287         0x00000000, 0x000002AA, 0x00000000, 0x00000000,
288         0x00000000, 0x03F00000, 0x00000540, 0x00000000,
289         0x00002A00, 0x00000000, 0x00000000, 0x00000000,
290         0x00000000, 0x00000000, 0x00000000, 0x00000000,
291         0x00000000, 0x00000000, 0x00000000, 0x00000000,
292         0x00000000, 0x00000000, 0x00000000, 0x00000000,
293         0x00000000, 0x00000000, 0x00000000, 0x00000000,
294 };
295
296 /* Packet types for packets with an Outer/First/Single IPv6 header, includes
297  * IPV6 other PTYPEs
298  */
299 static const u32 ice_ptypes_ipv6_ofos_all[] = {
300         0x00000000, 0x00000000, 0x77000000, 0x10002000,
301         0x00000000, 0x000002AA, 0x00000000, 0x00000000,
302         0x00000000, 0x03F00000, 0x7C1F0540, 0x00000206,
303         0xFC002A00, 0x0000003F, 0x00000000, 0x00000000,
304         0x00000000, 0x00000000, 0x00000000, 0x00000000,
305         0x00000000, 0x00000000, 0x00000000, 0x00000000,
306         0x00000000, 0x00000000, 0x00000000, 0x00000000,
307         0x00000000, 0x00000000, 0x00000000, 0x00000000,
308 };
309
310 /* Packet types for packets with an Innermost/Last IPv6 header */
311 static const u32 ice_ptypes_ipv6_il[] = {
312         0x00000000, 0x03B80770, 0x000001DC, 0x0EE00000,
313         0x00000770, 0x00000000, 0x00000000, 0x00000000,
314         0x00000000, 0x00000000, 0x7FE00000, 0x00000000,
315         0x03F00000, 0x0000003F, 0x00000000, 0x00000000,
316         0x00000000, 0x00000000, 0x00000000, 0x00000000,
317         0x00000000, 0x00000000, 0x00000000, 0x00000000,
318         0x00000000, 0x00000000, 0x00000000, 0x00000000,
319         0x00000000, 0x00000000, 0x00000000, 0x00000000,
320 };
321
322 /* Packet types for packets with an Outer/First/Single IPv4 header - no L4 */
323 static const u32 ice_ptypes_ipv4_ofos_no_l4[] = {
324         0x10C00000, 0x04000800, 0x00000000, 0x00000000,
325         0x00000000, 0x00000000, 0x00000000, 0x00000000,
326         0x00000000, 0x000cc000, 0x000002A0, 0x00000000,
327         0x00001500, 0x00000000, 0x00000000, 0x00000000,
328         0x00000000, 0x00000000, 0x00000000, 0x00000000,
329         0x00000000, 0x00000000, 0x00000000, 0x00000000,
330         0x00000000, 0x00000000, 0x00000000, 0x00000000,
331         0x00000000, 0x00000000, 0x00000000, 0x00000000,
332 };
333
334 /* Packet types for packets with an Innermost/Last IPv4 header - no L4 */
335 static const u32 ice_ptypes_ipv4_il_no_l4[] = {
336         0x60000000, 0x18043008, 0x80000002, 0x6010c021,
337         0x00000008, 0x00000000, 0x00000000, 0x00000000,
338         0x00000000, 0x00000000, 0x00139800, 0x00000000,
339         0x8C08C000, 0x00000000, 0x00000000, 0x00000000,
340         0x00000000, 0x00000000, 0x00000000, 0x00000000,
341         0x00000000, 0x00000000, 0x00000000, 0x00000000,
342         0x00000000, 0x00000000, 0x00000000, 0x00000000,
343         0x00000000, 0x00000000, 0x00000000, 0x00000000,
344 };
345
346 /* Packet types for packets with an Outer/First/Single IPv6 header - no L4 */
347 static const u32 ice_ptypes_ipv6_ofos_no_l4[] = {
348         0x00000000, 0x00000000, 0x43000000, 0x10002000,
349         0x00000000, 0x00000000, 0x00000000, 0x00000000,
350         0x00000000, 0x02300000, 0x00000540, 0x00000000,
351         0x00002A00, 0x00000000, 0x00000000, 0x00000000,
352         0x00000000, 0x00000000, 0x00000000, 0x00000000,
353         0x00000000, 0x00000000, 0x00000000, 0x00000000,
354         0x00000000, 0x00000000, 0x00000000, 0x00000000,
355         0x00000000, 0x00000000, 0x00000000, 0x00000000,
356 };
357
358 /* Packet types for packets with an Innermost/Last IPv6 header - no L4 */
359 static const u32 ice_ptypes_ipv6_il_no_l4[] = {
360         0x00000000, 0x02180430, 0x0000010c, 0x086010c0,
361         0x00000430, 0x00000000, 0x00000000, 0x00000000,
362         0x00000000, 0x00000000, 0x4e600000, 0x00000000,
363         0x02300000, 0x00000023, 0x00000000, 0x00000000,
364         0x00000000, 0x00000000, 0x00000000, 0x00000000,
365         0x00000000, 0x00000000, 0x00000000, 0x00000000,
366         0x00000000, 0x00000000, 0x00000000, 0x00000000,
367         0x00000000, 0x00000000, 0x00000000, 0x00000000,
368 };
369
370 /* Packet types for packets with an Outermost/First ARP header */
371 static const u32 ice_ptypes_arp_of[] = {
372         0x00000800, 0x00000000, 0x00000000, 0x00000000,
373         0x00000000, 0x00000000, 0x00000000, 0x00000000,
374         0x00000000, 0x00000000, 0x00000000, 0x00000000,
375         0x00000000, 0x00000000, 0x00000000, 0x00000000,
376         0x00000000, 0x00000000, 0x00000000, 0x00000000,
377         0x00000000, 0x00000000, 0x00000000, 0x00000000,
378         0x00000000, 0x00000000, 0x00000000, 0x00000000,
379         0x00000000, 0x00000000, 0x00000000, 0x00000000,
380 };
381
382 /* UDP Packet types for non-tunneled packets or tunneled
383  * packets with inner UDP.
384  */
385 static const u32 ice_ptypes_udp_il[] = {
386         0x81000000, 0x20204040, 0x04000010, 0x80810102,
387         0x00000040, 0x00000000, 0x00000000, 0x00000000,
388         0x00000000, 0x00410000, 0x908427E0, 0x00100007,
389         0x10410000, 0x00000004, 0x00000000, 0x00000000,
390         0x00000000, 0x00000000, 0x00000000, 0x00000000,
391         0x00000000, 0x00000000, 0x00000000, 0x00000000,
392         0x00000000, 0x00000000, 0x00000000, 0x00000000,
393         0x00000000, 0x00000000, 0x00000000, 0x00000000,
394 };
395
396 /* Packet types for packets with an Innermost/Last TCP header */
397 static const u32 ice_ptypes_tcp_il[] = {
398         0x04000000, 0x80810102, 0x10000040, 0x02040408,
399         0x00000102, 0x00000000, 0x00000000, 0x00000000,
400         0x00000000, 0x00820000, 0x21084000, 0x00000000,
401         0x20820000, 0x00000008, 0x00000000, 0x00000000,
402         0x00000000, 0x00000000, 0x00000000, 0x00000000,
403         0x00000000, 0x00000000, 0x00000000, 0x00000000,
404         0x00000000, 0x00000000, 0x00000000, 0x00000000,
405         0x00000000, 0x00000000, 0x00000000, 0x00000000,
406 };
407
408 /* Packet types for packets with an Innermost/Last SCTP header */
409 static const u32 ice_ptypes_sctp_il[] = {
410         0x08000000, 0x01020204, 0x20000081, 0x04080810,
411         0x00000204, 0x00000000, 0x00000000, 0x00000000,
412         0x00000000, 0x01040000, 0x00000000, 0x00000000,
413         0x41040000, 0x00000010, 0x00000000, 0x00000000,
414         0x00000000, 0x00000000, 0x00000000, 0x00000000,
415         0x00000000, 0x00000000, 0x00000000, 0x00000000,
416         0x00000000, 0x00000000, 0x00000000, 0x00000000,
417         0x00000000, 0x00000000, 0x00000000, 0x00000000,
418 };
419
420 /* Packet types for packets with an Outermost/First ICMP header */
421 static const u32 ice_ptypes_icmp_of[] = {
422         0x10000000, 0x00000000, 0x00000000, 0x00000000,
423         0x00000000, 0x00000000, 0x00000000, 0x00000000,
424         0x00000000, 0x00000000, 0x00000000, 0x00000000,
425         0x00000000, 0x00000000, 0x00000000, 0x00000000,
426         0x00000000, 0x00000000, 0x00000000, 0x00000000,
427         0x00000000, 0x00000000, 0x00000000, 0x00000000,
428         0x00000000, 0x00000000, 0x00000000, 0x00000000,
429         0x00000000, 0x00000000, 0x00000000, 0x00000000,
430 };
431
432 /* Packet types for packets with an Innermost/Last ICMP header */
433 static const u32 ice_ptypes_icmp_il[] = {
434         0x00000000, 0x02040408, 0x40000102, 0x08101020,
435         0x00000408, 0x00000000, 0x00000000, 0x00000000,
436         0x00000000, 0x00000000, 0x42108000, 0x00000000,
437         0x82080000, 0x00000020, 0x00000000, 0x00000000,
438         0x00000000, 0x00000000, 0x00000000, 0x00000000,
439         0x00000000, 0x00000000, 0x00000000, 0x00000000,
440         0x00000000, 0x00000000, 0x00000000, 0x00000000,
441         0x00000000, 0x00000000, 0x00000000, 0x00000000,
442 };
443
444 /* Packet types for packets with an Outermost/First GRE header */
445 static const u32 ice_ptypes_gre_of[] = {
446         0x00000000, 0xBFBF7800, 0x000001DF, 0xFEFDE000,
447         0x0000017E, 0x00000000, 0x00000000, 0x00000000,
448         0x00000000, 0x00000000, 0x00000000, 0x00000000,
449         0x00000000, 0x00000000, 0x00000000, 0x00000000,
450         0x00000000, 0x00000000, 0x00000000, 0x00000000,
451         0x00000000, 0x00000000, 0x00000000, 0x00000000,
452         0x00000000, 0x00000000, 0x00000000, 0x00000000,
453         0x00000000, 0x00000000, 0x00000000, 0x00000000,
454 };
455
456 /* Packet types for packets with an Innermost/Last MAC header */
457 static const u32 ice_ptypes_mac_il[] = {
458         0x00000000, 0x20000000, 0x00000000, 0x00000000,
459         0x00000000, 0x00000000, 0x00000000, 0x00000000,
460         0x00000000, 0x00000000, 0x00000000, 0x00000000,
461         0x00000000, 0x00000000, 0x00000000, 0x00000000,
462         0x00000000, 0x00000000, 0x00000000, 0x00000000,
463         0x00000000, 0x00000000, 0x00000000, 0x00000000,
464         0x00000000, 0x00000000, 0x00000000, 0x00000000,
465         0x00000000, 0x00000000, 0x00000000, 0x00000000,
466 };
467
468 /* Packet types for GTPC */
469 static const u32 ice_ptypes_gtpc[] = {
470         0x00000000, 0x00000000, 0x00000000, 0x00000000,
471         0x00000000, 0x00000000, 0x00000000, 0x00000000,
472         0x00000000, 0x00000000, 0x000001E0, 0x00000000,
473         0x00000000, 0x00000000, 0x00000000, 0x00000000,
474         0x00000000, 0x00000000, 0x00000000, 0x00000000,
475         0x00000000, 0x00000000, 0x00000000, 0x00000000,
476         0x00000000, 0x00000000, 0x00000000, 0x00000000,
477         0x00000000, 0x00000000, 0x00000000, 0x00000000,
478 };
479
480 /* Packet types for VXLAN with VNI */
481 static const u32 ice_ptypes_vxlan_vni[] = {
482         0x00000000, 0xBFBFF800, 0x00EFDFDF, 0xFEFDE000,
483         0x03BF7F7E, 0x00000000, 0x00000000, 0x00000000,
484         0x00000000, 0x00000000, 0x00000000, 0x00000000,
485         0x00000000, 0x00000000, 0x00000000, 0x00000000,
486         0x00000000, 0x00000000, 0x00000000, 0x00000000,
487         0x00000000, 0x00000000, 0x00000000, 0x00000000,
488         0x00000000, 0x00000000, 0x00000000, 0x00000000,
489         0x00000000, 0x00000000, 0x00000000, 0x00000000,
490 };
491
492 /* Packet types for GTPC with TEID */
493 static const u32 ice_ptypes_gtpc_tid[] = {
494         0x00000000, 0x00000000, 0x00000000, 0x00000000,
495         0x00000000, 0x00000000, 0x00000000, 0x00000000,
496         0x00000000, 0x00000000, 0x00000060, 0x00000000,
497         0x00000000, 0x00000000, 0x00000000, 0x00000000,
498         0x00000000, 0x00000000, 0x00000000, 0x00000000,
499         0x00000000, 0x00000000, 0x00000000, 0x00000000,
500         0x00000000, 0x00000000, 0x00000000, 0x00000000,
501         0x00000000, 0x00000000, 0x00000000, 0x00000000,
502 };
503
504 /* Packet types for GTPU */
505 static const struct ice_ptype_attributes ice_attr_gtpu_session[] = {
506         { ICE_MAC_IPV4_GTPU_IPV4_FRAG,    ICE_PTYPE_ATTR_GTP_SESSION },
507         { ICE_MAC_IPV4_GTPU_IPV4_PAY,     ICE_PTYPE_ATTR_GTP_SESSION },
508         { ICE_MAC_IPV4_GTPU_IPV4_UDP_PAY, ICE_PTYPE_ATTR_GTP_SESSION },
509         { ICE_MAC_IPV4_GTPU_IPV4_TCP,     ICE_PTYPE_ATTR_GTP_SESSION },
510         { ICE_MAC_IPV4_GTPU_IPV4_ICMP,    ICE_PTYPE_ATTR_GTP_SESSION },
511         { ICE_MAC_IPV6_GTPU_IPV4_FRAG,    ICE_PTYPE_ATTR_GTP_SESSION },
512         { ICE_MAC_IPV6_GTPU_IPV4_PAY,     ICE_PTYPE_ATTR_GTP_SESSION },
513         { ICE_MAC_IPV6_GTPU_IPV4_UDP_PAY, ICE_PTYPE_ATTR_GTP_SESSION },
514         { ICE_MAC_IPV6_GTPU_IPV4_TCP,     ICE_PTYPE_ATTR_GTP_SESSION },
515         { ICE_MAC_IPV6_GTPU_IPV4_ICMP,    ICE_PTYPE_ATTR_GTP_SESSION },
516         { ICE_MAC_IPV4_GTPU_IPV6_FRAG,    ICE_PTYPE_ATTR_GTP_SESSION },
517         { ICE_MAC_IPV4_GTPU_IPV6_PAY,     ICE_PTYPE_ATTR_GTP_SESSION },
518         { ICE_MAC_IPV4_GTPU_IPV6_UDP_PAY, ICE_PTYPE_ATTR_GTP_SESSION },
519         { ICE_MAC_IPV4_GTPU_IPV6_TCP,     ICE_PTYPE_ATTR_GTP_SESSION },
520         { ICE_MAC_IPV4_GTPU_IPV6_ICMPV6,  ICE_PTYPE_ATTR_GTP_SESSION },
521         { ICE_MAC_IPV6_GTPU_IPV6_FRAG,    ICE_PTYPE_ATTR_GTP_SESSION },
522         { ICE_MAC_IPV6_GTPU_IPV6_PAY,     ICE_PTYPE_ATTR_GTP_SESSION },
523         { ICE_MAC_IPV6_GTPU_IPV6_UDP_PAY, ICE_PTYPE_ATTR_GTP_SESSION },
524         { ICE_MAC_IPV6_GTPU_IPV6_TCP,     ICE_PTYPE_ATTR_GTP_SESSION },
525         { ICE_MAC_IPV6_GTPU_IPV6_ICMPV6,  ICE_PTYPE_ATTR_GTP_SESSION },
526 };
527
528 static const struct ice_ptype_attributes ice_attr_gtpu_eh[] = {
529         { ICE_MAC_IPV4_GTPU_IPV4_FRAG,    ICE_PTYPE_ATTR_GTP_PDU_EH },
530         { ICE_MAC_IPV4_GTPU_IPV4_PAY,     ICE_PTYPE_ATTR_GTP_PDU_EH },
531         { ICE_MAC_IPV4_GTPU_IPV4_UDP_PAY, ICE_PTYPE_ATTR_GTP_PDU_EH },
532         { ICE_MAC_IPV4_GTPU_IPV4_TCP,     ICE_PTYPE_ATTR_GTP_PDU_EH },
533         { ICE_MAC_IPV4_GTPU_IPV4_ICMP,    ICE_PTYPE_ATTR_GTP_PDU_EH },
534         { ICE_MAC_IPV6_GTPU_IPV4_FRAG,    ICE_PTYPE_ATTR_GTP_PDU_EH },
535         { ICE_MAC_IPV6_GTPU_IPV4_PAY,     ICE_PTYPE_ATTR_GTP_PDU_EH },
536         { ICE_MAC_IPV6_GTPU_IPV4_UDP_PAY, ICE_PTYPE_ATTR_GTP_PDU_EH },
537         { ICE_MAC_IPV6_GTPU_IPV4_TCP,     ICE_PTYPE_ATTR_GTP_PDU_EH },
538         { ICE_MAC_IPV6_GTPU_IPV4_ICMP,    ICE_PTYPE_ATTR_GTP_PDU_EH },
539         { ICE_MAC_IPV4_GTPU_IPV6_FRAG,    ICE_PTYPE_ATTR_GTP_PDU_EH },
540         { ICE_MAC_IPV4_GTPU_IPV6_PAY,     ICE_PTYPE_ATTR_GTP_PDU_EH },
541         { ICE_MAC_IPV4_GTPU_IPV6_UDP_PAY, ICE_PTYPE_ATTR_GTP_PDU_EH },
542         { ICE_MAC_IPV4_GTPU_IPV6_TCP,     ICE_PTYPE_ATTR_GTP_PDU_EH },
543         { ICE_MAC_IPV4_GTPU_IPV6_ICMPV6,  ICE_PTYPE_ATTR_GTP_PDU_EH },
544         { ICE_MAC_IPV6_GTPU_IPV6_FRAG,    ICE_PTYPE_ATTR_GTP_PDU_EH },
545         { ICE_MAC_IPV6_GTPU_IPV6_PAY,     ICE_PTYPE_ATTR_GTP_PDU_EH },
546         { ICE_MAC_IPV6_GTPU_IPV6_UDP_PAY, ICE_PTYPE_ATTR_GTP_PDU_EH },
547         { ICE_MAC_IPV6_GTPU_IPV6_TCP,     ICE_PTYPE_ATTR_GTP_PDU_EH },
548         { ICE_MAC_IPV6_GTPU_IPV6_ICMPV6,  ICE_PTYPE_ATTR_GTP_PDU_EH },
549 };
550
551 static const struct ice_ptype_attributes ice_attr_gtpu_down[] = {
552         { ICE_MAC_IPV4_GTPU_IPV4_FRAG,    ICE_PTYPE_ATTR_GTP_DOWNLINK },
553         { ICE_MAC_IPV4_GTPU_IPV4_PAY,     ICE_PTYPE_ATTR_GTP_DOWNLINK },
554         { ICE_MAC_IPV4_GTPU_IPV4_UDP_PAY, ICE_PTYPE_ATTR_GTP_DOWNLINK },
555         { ICE_MAC_IPV4_GTPU_IPV4_TCP,     ICE_PTYPE_ATTR_GTP_DOWNLINK },
556         { ICE_MAC_IPV4_GTPU_IPV4_ICMP,    ICE_PTYPE_ATTR_GTP_DOWNLINK },
557         { ICE_MAC_IPV6_GTPU_IPV4_FRAG,    ICE_PTYPE_ATTR_GTP_DOWNLINK },
558         { ICE_MAC_IPV6_GTPU_IPV4_PAY,     ICE_PTYPE_ATTR_GTP_DOWNLINK },
559         { ICE_MAC_IPV6_GTPU_IPV4_UDP_PAY, ICE_PTYPE_ATTR_GTP_DOWNLINK },
560         { ICE_MAC_IPV6_GTPU_IPV4_TCP,     ICE_PTYPE_ATTR_GTP_DOWNLINK },
561         { ICE_MAC_IPV6_GTPU_IPV4_ICMP,    ICE_PTYPE_ATTR_GTP_DOWNLINK },
562         { ICE_MAC_IPV4_GTPU_IPV6_FRAG,    ICE_PTYPE_ATTR_GTP_DOWNLINK },
563         { ICE_MAC_IPV4_GTPU_IPV6_PAY,     ICE_PTYPE_ATTR_GTP_DOWNLINK },
564         { ICE_MAC_IPV4_GTPU_IPV6_UDP_PAY, ICE_PTYPE_ATTR_GTP_DOWNLINK },
565         { ICE_MAC_IPV4_GTPU_IPV6_TCP,     ICE_PTYPE_ATTR_GTP_DOWNLINK },
566         { ICE_MAC_IPV4_GTPU_IPV6_ICMPV6,  ICE_PTYPE_ATTR_GTP_DOWNLINK },
567         { ICE_MAC_IPV6_GTPU_IPV6_FRAG,    ICE_PTYPE_ATTR_GTP_DOWNLINK },
568         { ICE_MAC_IPV6_GTPU_IPV6_PAY,     ICE_PTYPE_ATTR_GTP_DOWNLINK },
569         { ICE_MAC_IPV6_GTPU_IPV6_UDP_PAY, ICE_PTYPE_ATTR_GTP_DOWNLINK },
570         { ICE_MAC_IPV6_GTPU_IPV6_TCP,     ICE_PTYPE_ATTR_GTP_DOWNLINK },
571         { ICE_MAC_IPV6_GTPU_IPV6_ICMPV6,  ICE_PTYPE_ATTR_GTP_DOWNLINK },
572 };
573
574 static const struct ice_ptype_attributes ice_attr_gtpu_up[] = {
575         { ICE_MAC_IPV4_GTPU_IPV4_FRAG,    ICE_PTYPE_ATTR_GTP_UPLINK },
576         { ICE_MAC_IPV4_GTPU_IPV4_PAY,     ICE_PTYPE_ATTR_GTP_UPLINK },
577         { ICE_MAC_IPV4_GTPU_IPV4_UDP_PAY, ICE_PTYPE_ATTR_GTP_UPLINK },
578         { ICE_MAC_IPV4_GTPU_IPV4_TCP,     ICE_PTYPE_ATTR_GTP_UPLINK },
579         { ICE_MAC_IPV4_GTPU_IPV4_ICMP,    ICE_PTYPE_ATTR_GTP_UPLINK },
580         { ICE_MAC_IPV6_GTPU_IPV4_FRAG,    ICE_PTYPE_ATTR_GTP_UPLINK },
581         { ICE_MAC_IPV6_GTPU_IPV4_PAY,     ICE_PTYPE_ATTR_GTP_UPLINK },
582         { ICE_MAC_IPV6_GTPU_IPV4_UDP_PAY, ICE_PTYPE_ATTR_GTP_UPLINK },
583         { ICE_MAC_IPV6_GTPU_IPV4_TCP,     ICE_PTYPE_ATTR_GTP_UPLINK },
584         { ICE_MAC_IPV6_GTPU_IPV4_ICMP,    ICE_PTYPE_ATTR_GTP_UPLINK },
585         { ICE_MAC_IPV4_GTPU_IPV6_FRAG,    ICE_PTYPE_ATTR_GTP_UPLINK },
586         { ICE_MAC_IPV4_GTPU_IPV6_PAY,     ICE_PTYPE_ATTR_GTP_UPLINK },
587         { ICE_MAC_IPV4_GTPU_IPV6_UDP_PAY, ICE_PTYPE_ATTR_GTP_UPLINK },
588         { ICE_MAC_IPV4_GTPU_IPV6_TCP,     ICE_PTYPE_ATTR_GTP_UPLINK },
589         { ICE_MAC_IPV4_GTPU_IPV6_ICMPV6,  ICE_PTYPE_ATTR_GTP_UPLINK },
590         { ICE_MAC_IPV6_GTPU_IPV6_FRAG,    ICE_PTYPE_ATTR_GTP_UPLINK },
591         { ICE_MAC_IPV6_GTPU_IPV6_PAY,     ICE_PTYPE_ATTR_GTP_UPLINK },
592         { ICE_MAC_IPV6_GTPU_IPV6_UDP_PAY, ICE_PTYPE_ATTR_GTP_UPLINK },
593         { ICE_MAC_IPV6_GTPU_IPV6_TCP,     ICE_PTYPE_ATTR_GTP_UPLINK },
594         { ICE_MAC_IPV6_GTPU_IPV6_ICMPV6,  ICE_PTYPE_ATTR_GTP_UPLINK },
595 };
596
597 static const u32 ice_ptypes_gtpu[] = {
598         0x00000000, 0x00000000, 0x00000000, 0x00000000,
599         0x00000000, 0x00000000, 0x00000000, 0x00000000,
600         0x00000000, 0x00000000, 0x7FFFFE00, 0x00000000,
601         0x00000000, 0x00000000, 0x00000000, 0x00000000,
602         0x00000000, 0x00000000, 0x00000000, 0x00000000,
603         0x00000000, 0x00000000, 0x00000000, 0x00000000,
604         0x00000000, 0x00000000, 0x00000000, 0x00000000,
605         0x00000000, 0x00000000, 0x00000000, 0x00000000,
606 };
607
608 /* Packet types for pppoe */
609 static const u32 ice_ptypes_pppoe[] = {
610         0x00000000, 0x00000000, 0x00000000, 0x00000000,
611         0x00000000, 0x00000000, 0x00000000, 0x00000000,
612         0x00000000, 0x03ffe000, 0x00000000, 0x00000000,
613         0x00000000, 0x00000000, 0x00000000, 0x00000000,
614         0x00000000, 0x00000000, 0x00000000, 0x00000000,
615         0x00000000, 0x00000000, 0x00000000, 0x00000000,
616         0x00000000, 0x00000000, 0x00000000, 0x00000000,
617         0x00000000, 0x00000000, 0x00000000, 0x00000000,
618 };
619
620 /* Packet types for packets with PFCP NODE header */
621 static const u32 ice_ptypes_pfcp_node[] = {
622         0x00000000, 0x00000000, 0x00000000, 0x00000000,
623         0x00000000, 0x00000000, 0x00000000, 0x00000000,
624         0x00000000, 0x00000000, 0x80000000, 0x00000002,
625         0x00000000, 0x00000000, 0x00000000, 0x00000000,
626         0x00000000, 0x00000000, 0x00000000, 0x00000000,
627         0x00000000, 0x00000000, 0x00000000, 0x00000000,
628         0x00000000, 0x00000000, 0x00000000, 0x00000000,
629         0x00000000, 0x00000000, 0x00000000, 0x00000000,
630 };
631
632 /* Packet types for packets with PFCP SESSION header */
633 static const u32 ice_ptypes_pfcp_session[] = {
634         0x00000000, 0x00000000, 0x00000000, 0x00000000,
635         0x00000000, 0x00000000, 0x00000000, 0x00000000,
636         0x00000000, 0x00000000, 0x00000000, 0x00000005,
637         0x00000000, 0x00000000, 0x00000000, 0x00000000,
638         0x00000000, 0x00000000, 0x00000000, 0x00000000,
639         0x00000000, 0x00000000, 0x00000000, 0x00000000,
640         0x00000000, 0x00000000, 0x00000000, 0x00000000,
641         0x00000000, 0x00000000, 0x00000000, 0x00000000,
642 };
643
644 /* Packet types for l2tpv3 */
645 static const u32 ice_ptypes_l2tpv3[] = {
646         0x00000000, 0x00000000, 0x00000000, 0x00000000,
647         0x00000000, 0x00000000, 0x00000000, 0x00000000,
648         0x00000000, 0x00000000, 0x00000000, 0x00000300,
649         0x00000000, 0x00000000, 0x00000000, 0x00000000,
650         0x00000000, 0x00000000, 0x00000000, 0x00000000,
651         0x00000000, 0x00000000, 0x00000000, 0x00000000,
652         0x00000000, 0x00000000, 0x00000000, 0x00000000,
653         0x00000000, 0x00000000, 0x00000000, 0x00000000,
654 };
655
656 /* Packet types for esp */
657 static const u32 ice_ptypes_esp[] = {
658         0x00000000, 0x00000000, 0x00000000, 0x00000000,
659         0x00000000, 0x00000003, 0x00000000, 0x00000000,
660         0x00000000, 0x00000000, 0x00000000, 0x00000000,
661         0x00000000, 0x00000000, 0x00000000, 0x00000000,
662         0x00000000, 0x00000000, 0x00000000, 0x00000000,
663         0x00000000, 0x00000000, 0x00000000, 0x00000000,
664         0x00000000, 0x00000000, 0x00000000, 0x00000000,
665         0x00000000, 0x00000000, 0x00000000, 0x00000000,
666 };
667
668 /* Packet types for ah */
669 static const u32 ice_ptypes_ah[] = {
670         0x00000000, 0x00000000, 0x00000000, 0x00000000,
671         0x00000000, 0x0000000C, 0x00000000, 0x00000000,
672         0x00000000, 0x00000000, 0x00000000, 0x00000000,
673         0x00000000, 0x00000000, 0x00000000, 0x00000000,
674         0x00000000, 0x00000000, 0x00000000, 0x00000000,
675         0x00000000, 0x00000000, 0x00000000, 0x00000000,
676         0x00000000, 0x00000000, 0x00000000, 0x00000000,
677         0x00000000, 0x00000000, 0x00000000, 0x00000000,
678 };
679
680 /* Packet types for packets with NAT_T ESP header */
681 static const u32 ice_ptypes_nat_t_esp[] = {
682         0x00000000, 0x00000000, 0x00000000, 0x00000000,
683         0x00000000, 0x00000030, 0x00000000, 0x00000000,
684         0x00000000, 0x00000000, 0x00000000, 0x00000000,
685         0x00000000, 0x00000000, 0x00000000, 0x00000000,
686         0x00000000, 0x00000000, 0x00000000, 0x00000000,
687         0x00000000, 0x00000000, 0x00000000, 0x00000000,
688         0x00000000, 0x00000000, 0x00000000, 0x00000000,
689         0x00000000, 0x00000000, 0x00000000, 0x00000000,
690 };
691
692 static const u32 ice_ptypes_mac_non_ip_ofos[] = {
693         0x00000846, 0x00000000, 0x00000000, 0x00000000,
694         0x00000000, 0x00000000, 0x00000000, 0x00000000,
695         0x00400000, 0x03FFF000, 0x00000000, 0x00000000,
696         0x00000000, 0x00000000, 0x00000000, 0x00000000,
697         0x00000000, 0x00000000, 0x00000000, 0x00000000,
698         0x00000000, 0x00000000, 0x00000000, 0x00000000,
699         0x00000000, 0x00000000, 0x00000000, 0x00000000,
700         0x00000000, 0x00000000, 0x00000000, 0x00000000,
701 };
702
703 static const u32 ice_ptypes_gtpu_no_ip[] = {
704         0x00000000, 0x00000000, 0x00000000, 0x00000000,
705         0x00000000, 0x00000000, 0x00000000, 0x00000000,
706         0x00000000, 0x00000000, 0x00000600, 0x00000000,
707         0x00000000, 0x00000000, 0x00000000, 0x00000000,
708         0x00000000, 0x00000000, 0x00000000, 0x00000000,
709         0x00000000, 0x00000000, 0x00000000, 0x00000000,
710         0x00000000, 0x00000000, 0x00000000, 0x00000000,
711         0x00000000, 0x00000000, 0x00000000, 0x00000000,
712 };
713
714 static const u32 ice_ptypes_ecpri_tp0[] = {
715         0x00000000, 0x00000000, 0x00000000, 0x00000000,
716         0x00000000, 0x00000000, 0x00000000, 0x00000000,
717         0x00000000, 0x00000000, 0x00000000, 0x00000400,
718         0x00000000, 0x00000000, 0x00000000, 0x00000000,
719         0x00000000, 0x00000000, 0x00000000, 0x00000000,
720         0x00000000, 0x00000000, 0x00000000, 0x00000000,
721         0x00000000, 0x00000000, 0x00000000, 0x00000000,
722         0x00000000, 0x00000000, 0x00000000, 0x00000000,
723 };
724
725 static const u32 ice_ptypes_udp_ecpri_tp0[] = {
726         0x00000000, 0x00000000, 0x00000000, 0x00000000,
727         0x00000000, 0x00000000, 0x00000000, 0x00000000,
728         0x00000000, 0x00000000, 0x00000000, 0x00100000,
729         0x00000000, 0x00000000, 0x00000000, 0x00000000,
730         0x00000000, 0x00000000, 0x00000000, 0x00000000,
731         0x00000000, 0x00000000, 0x00000000, 0x00000000,
732         0x00000000, 0x00000000, 0x00000000, 0x00000000,
733         0x00000000, 0x00000000, 0x00000000, 0x00000000,
734 };
735
736 static const u32 ice_ptypes_l2tpv2[] = {
737         0x00000000, 0x00000000, 0x00000000, 0x00000000,
738         0x00000000, 0x00000000, 0x00000000, 0x00000000,
739         0x00000000, 0x00000000, 0x00000000, 0x00000000,
740         0xFFFFFF00, 0x0000003F, 0x00000000, 0x00000000,
741         0x00000000, 0x00000000, 0x00000000, 0x00000000,
742         0x00000000, 0x00000000, 0x00000000, 0x00000000,
743         0x00000000, 0x00000000, 0x00000000, 0x00000000,
744         0x00000000, 0x00000000, 0x00000000, 0x00000000,
745 };
746
747 static const u32 ice_ptypes_ppp[] = {
748         0x00000000, 0x00000000, 0x00000000, 0x00000000,
749         0x00000000, 0x00000000, 0x00000000, 0x00000000,
750         0x00000000, 0x00000000, 0x00000000, 0x00000000,
751         0xFFFFF000, 0x0000003F, 0x00000000, 0x00000000,
752         0x00000000, 0x00000000, 0x00000000, 0x00000000,
753         0x00000000, 0x00000000, 0x00000000, 0x00000000,
754         0x00000000, 0x00000000, 0x00000000, 0x00000000,
755         0x00000000, 0x00000000, 0x00000000, 0x00000000,
756 };
757
758 static const u32 ice_ptypes_ipv4_frag[] = {
759         0x00400000, 0x00000000, 0x00000000, 0x00000000,
760         0x00000000, 0x00000000, 0x00000000, 0x00000000,
761         0x00000000, 0x00000000, 0x00000000, 0x00000000,
762         0x00000000, 0x00000000, 0x00000000, 0x00000000,
763         0x00000000, 0x00000000, 0x00000000, 0x00000000,
764         0x00000000, 0x00000000, 0x00000000, 0x00000000,
765         0x00000000, 0x00000000, 0x00000000, 0x00000000,
766         0x00000000, 0x00000000, 0x00000000, 0x00000000,
767 };
768
769 static const u32 ice_ptypes_ipv6_frag[] = {
770         0x00000000, 0x00000000, 0x01000000, 0x00000000,
771         0x00000000, 0x00000000, 0x00000000, 0x00000000,
772         0x00000000, 0x00000000, 0x00000000, 0x00000000,
773         0x00000000, 0x00000000, 0x00000000, 0x00000000,
774         0x00000000, 0x00000000, 0x00000000, 0x00000000,
775         0x00000000, 0x00000000, 0x00000000, 0x00000000,
776         0x00000000, 0x00000000, 0x00000000, 0x00000000,
777         0x00000000, 0x00000000, 0x00000000, 0x00000000,
778 };
779
780 /* Manage parameters and info. used during the creation of a flow profile */
781 struct ice_flow_prof_params {
782         enum ice_block blk;
783         u16 entry_length; /* # of bytes formatted entry will require */
784         u8 es_cnt;
785         struct ice_flow_prof *prof;
786
787         /* For ACL, the es[0] will have the data of ICE_RX_MDID_PKT_FLAGS_15_0
788          * This will give us the direction flags.
789          */
790         struct ice_fv_word es[ICE_MAX_FV_WORDS];
791         /* attributes can be used to add attributes to a particular PTYPE */
792         const struct ice_ptype_attributes *attr;
793         u16 attr_cnt;
794
795         u16 mask[ICE_MAX_FV_WORDS];
796         ice_declare_bitmap(ptypes, ICE_FLOW_PTYPE_MAX);
797 };
798
799 #define ICE_FLOW_RSS_HDRS_INNER_MASK \
800         (ICE_FLOW_SEG_HDR_PPPOE | ICE_FLOW_SEG_HDR_GTPC | \
801         ICE_FLOW_SEG_HDR_GTPC_TEID | ICE_FLOW_SEG_HDR_GTPU | \
802         ICE_FLOW_SEG_HDR_PFCP_SESSION | ICE_FLOW_SEG_HDR_L2TPV3 | \
803         ICE_FLOW_SEG_HDR_ESP | ICE_FLOW_SEG_HDR_AH | \
804         ICE_FLOW_SEG_HDR_NAT_T_ESP | ICE_FLOW_SEG_HDR_GTPU_NON_IP | \
805         ICE_FLOW_SEG_HDR_ECPRI_TP0 | ICE_FLOW_SEG_HDR_UDP_ECPRI_TP0 | \
806         ICE_FLOW_SEG_HDR_L2TPV2 | ICE_FLOW_SEG_HDR_PPP)
807
808 #define ICE_FLOW_SEG_HDRS_L2_MASK       \
809         (ICE_FLOW_SEG_HDR_ETH | ICE_FLOW_SEG_HDR_VLAN)
810 #define ICE_FLOW_SEG_HDRS_L3_MASK       \
811         (ICE_FLOW_SEG_HDR_IPV4 | ICE_FLOW_SEG_HDR_IPV6 | \
812          ICE_FLOW_SEG_HDR_ARP)
813 #define ICE_FLOW_SEG_HDRS_L4_MASK       \
814         (ICE_FLOW_SEG_HDR_ICMP | ICE_FLOW_SEG_HDR_TCP | ICE_FLOW_SEG_HDR_UDP | \
815          ICE_FLOW_SEG_HDR_SCTP)
816 /* mask for L4 protocols that are NOT part of IPV4/6 OTHER PTYPE groups */
817 #define ICE_FLOW_SEG_HDRS_L4_MASK_NO_OTHER      \
818         (ICE_FLOW_SEG_HDR_TCP | ICE_FLOW_SEG_HDR_UDP | ICE_FLOW_SEG_HDR_SCTP)
819
820 /**
821  * ice_flow_val_hdrs - validates packet segments for valid protocol headers
822  * @segs: array of one or more packet segments that describe the flow
823  * @segs_cnt: number of packet segments provided
824  */
825 static enum ice_status
826 ice_flow_val_hdrs(struct ice_flow_seg_info *segs, u8 segs_cnt)
827 {
828         u8 i;
829
830         for (i = 0; i < segs_cnt; i++) {
831                 /* Multiple L3 headers */
832                 if (segs[i].hdrs & ICE_FLOW_SEG_HDRS_L3_MASK &&
833                     !ice_is_pow2(segs[i].hdrs & ICE_FLOW_SEG_HDRS_L3_MASK))
834                         return ICE_ERR_PARAM;
835
836                 /* Multiple L4 headers */
837                 if (segs[i].hdrs & ICE_FLOW_SEG_HDRS_L4_MASK &&
838                     !ice_is_pow2(segs[i].hdrs & ICE_FLOW_SEG_HDRS_L4_MASK))
839                         return ICE_ERR_PARAM;
840         }
841
842         return ICE_SUCCESS;
843 }
844
845 /* Sizes of fixed known protocol headers without header options */
846 #define ICE_FLOW_PROT_HDR_SZ_MAC        14
847 #define ICE_FLOW_PROT_HDR_SZ_MAC_VLAN   (ICE_FLOW_PROT_HDR_SZ_MAC + 2)
848 #define ICE_FLOW_PROT_HDR_SZ_IPV4       20
849 #define ICE_FLOW_PROT_HDR_SZ_IPV6       40
850 #define ICE_FLOW_PROT_HDR_SZ_ARP        28
851 #define ICE_FLOW_PROT_HDR_SZ_ICMP       8
852 #define ICE_FLOW_PROT_HDR_SZ_TCP        20
853 #define ICE_FLOW_PROT_HDR_SZ_UDP        8
854 #define ICE_FLOW_PROT_HDR_SZ_SCTP       12
855
856 /**
857  * ice_flow_calc_seg_sz - calculates size of a packet segment based on headers
858  * @params: information about the flow to be processed
859  * @seg: index of packet segment whose header size is to be determined
860  */
861 static u16 ice_flow_calc_seg_sz(struct ice_flow_prof_params *params, u8 seg)
862 {
863         u16 sz;
864
865         /* L2 headers */
866         sz = (params->prof->segs[seg].hdrs & ICE_FLOW_SEG_HDR_VLAN) ?
867                 ICE_FLOW_PROT_HDR_SZ_MAC_VLAN : ICE_FLOW_PROT_HDR_SZ_MAC;
868
869         /* L3 headers */
870         if (params->prof->segs[seg].hdrs & ICE_FLOW_SEG_HDR_IPV4)
871                 sz += ICE_FLOW_PROT_HDR_SZ_IPV4;
872         else if (params->prof->segs[seg].hdrs & ICE_FLOW_SEG_HDR_IPV6)
873                 sz += ICE_FLOW_PROT_HDR_SZ_IPV6;
874         else if (params->prof->segs[seg].hdrs & ICE_FLOW_SEG_HDR_ARP)
875                 sz += ICE_FLOW_PROT_HDR_SZ_ARP;
876         else if (params->prof->segs[seg].hdrs & ICE_FLOW_SEG_HDRS_L4_MASK)
877                 /* A L3 header is required if L4 is specified */
878                 return 0;
879
880         /* L4 headers */
881         if (params->prof->segs[seg].hdrs & ICE_FLOW_SEG_HDR_ICMP)
882                 sz += ICE_FLOW_PROT_HDR_SZ_ICMP;
883         else if (params->prof->segs[seg].hdrs & ICE_FLOW_SEG_HDR_TCP)
884                 sz += ICE_FLOW_PROT_HDR_SZ_TCP;
885         else if (params->prof->segs[seg].hdrs & ICE_FLOW_SEG_HDR_UDP)
886                 sz += ICE_FLOW_PROT_HDR_SZ_UDP;
887         else if (params->prof->segs[seg].hdrs & ICE_FLOW_SEG_HDR_SCTP)
888                 sz += ICE_FLOW_PROT_HDR_SZ_SCTP;
889
890         return sz;
891 }
892
893 /**
894  * ice_flow_proc_seg_hdrs - process protocol headers present in pkt segments
895  * @params: information about the flow to be processed
896  *
897  * This function identifies the packet types associated with the protocol
898  * headers being present in packet segments of the specified flow profile.
899  */
900 static enum ice_status
901 ice_flow_proc_seg_hdrs(struct ice_flow_prof_params *params)
902 {
903         struct ice_flow_prof *prof;
904         u8 i;
905
906         ice_memset(params->ptypes, 0xff, sizeof(params->ptypes),
907                    ICE_NONDMA_MEM);
908
909         prof = params->prof;
910
911         for (i = 0; i < params->prof->segs_cnt; i++) {
912                 const ice_bitmap_t *src;
913                 u32 hdrs;
914
915                 hdrs = prof->segs[i].hdrs;
916
917                 if (hdrs & ICE_FLOW_SEG_HDR_ETH) {
918                         src = !i ? (const ice_bitmap_t *)ice_ptypes_mac_ofos :
919                                 (const ice_bitmap_t *)ice_ptypes_mac_il;
920                         ice_and_bitmap(params->ptypes, params->ptypes, src,
921                                        ICE_FLOW_PTYPE_MAX);
922                 }
923
924                 if (i && hdrs & ICE_FLOW_SEG_HDR_VLAN) {
925                         src = (const ice_bitmap_t *)ice_ptypes_macvlan_il;
926                         ice_and_bitmap(params->ptypes, params->ptypes, src,
927                                        ICE_FLOW_PTYPE_MAX);
928                 }
929
930                 if (!i && hdrs & ICE_FLOW_SEG_HDR_ARP) {
931                         ice_and_bitmap(params->ptypes, params->ptypes,
932                                        (const ice_bitmap_t *)ice_ptypes_arp_of,
933                                        ICE_FLOW_PTYPE_MAX);
934                 }
935
936                 if (hdrs & ICE_FLOW_SEG_HDR_ECPRI_TP0) {
937                         src = (const ice_bitmap_t *)ice_ptypes_ecpri_tp0;
938                         ice_and_bitmap(params->ptypes, params->ptypes, src,
939                                        ICE_FLOW_PTYPE_MAX);
940                 }
941                 if ((hdrs & ICE_FLOW_SEG_HDR_IPV4) &&
942                     (hdrs & ICE_FLOW_SEG_HDR_IPV_OTHER)) {
943                         src = i ?
944                                 (const ice_bitmap_t *)ice_ptypes_ipv4_il :
945                                 (const ice_bitmap_t *)ice_ptypes_ipv4_ofos_all;
946                         ice_and_bitmap(params->ptypes, params->ptypes, src,
947                                        ICE_FLOW_PTYPE_MAX);
948                 } else if ((hdrs & ICE_FLOW_SEG_HDR_IPV6) &&
949                            (hdrs & ICE_FLOW_SEG_HDR_IPV_OTHER)) {
950                         src = i ?
951                                 (const ice_bitmap_t *)ice_ptypes_ipv6_il :
952                                 (const ice_bitmap_t *)ice_ptypes_ipv6_ofos_all;
953                         ice_and_bitmap(params->ptypes, params->ptypes, src,
954                                        ICE_FLOW_PTYPE_MAX);
955                 } else if ((hdrs & ICE_FLOW_SEG_HDR_IPV4) &&
956                                 (hdrs & ICE_FLOW_SEG_HDR_IPV_FRAG)) {
957                         src = (const ice_bitmap_t *)ice_ptypes_ipv4_frag;
958                         ice_and_bitmap(params->ptypes, params->ptypes, src,
959                                        ICE_FLOW_PTYPE_MAX);
960                 } else if ((hdrs & ICE_FLOW_SEG_HDR_IPV6) &&
961                                 (hdrs & ICE_FLOW_SEG_HDR_IPV_FRAG)) {
962                         src = (const ice_bitmap_t *)ice_ptypes_ipv6_frag;
963                         ice_and_bitmap(params->ptypes, params->ptypes, src,
964                                        ICE_FLOW_PTYPE_MAX);
965                 } else if ((hdrs & ICE_FLOW_SEG_HDR_IPV4) &&
966                            !(hdrs & ICE_FLOW_SEG_HDRS_L4_MASK_NO_OTHER)) {
967                         src = !i ? (const ice_bitmap_t *)ice_ptypes_ipv4_ofos_no_l4 :
968                                 (const ice_bitmap_t *)ice_ptypes_ipv4_il_no_l4;
969                         ice_and_bitmap(params->ptypes, params->ptypes, src,
970                                        ICE_FLOW_PTYPE_MAX);
971                 } else if (hdrs & ICE_FLOW_SEG_HDR_IPV4) {
972                         src = !i ? (const ice_bitmap_t *)ice_ptypes_ipv4_ofos :
973                                 (const ice_bitmap_t *)ice_ptypes_ipv4_il;
974                         ice_and_bitmap(params->ptypes, params->ptypes, src,
975                                        ICE_FLOW_PTYPE_MAX);
976                 } else if ((hdrs & ICE_FLOW_SEG_HDR_IPV6) &&
977                            !(hdrs & ICE_FLOW_SEG_HDRS_L4_MASK_NO_OTHER)) {
978                         src = !i ? (const ice_bitmap_t *)ice_ptypes_ipv6_ofos_no_l4 :
979                                 (const ice_bitmap_t *)ice_ptypes_ipv6_il_no_l4;
980                         ice_and_bitmap(params->ptypes, params->ptypes, src,
981                                        ICE_FLOW_PTYPE_MAX);
982                 } else if (hdrs & ICE_FLOW_SEG_HDR_IPV6) {
983                         src = !i ? (const ice_bitmap_t *)ice_ptypes_ipv6_ofos :
984                                 (const ice_bitmap_t *)ice_ptypes_ipv6_il;
985                         ice_and_bitmap(params->ptypes, params->ptypes, src,
986                                        ICE_FLOW_PTYPE_MAX);
987                 }
988
989                 if (hdrs & ICE_FLOW_SEG_HDR_ETH_NON_IP) {
990                         src = (const ice_bitmap_t *)ice_ptypes_mac_non_ip_ofos;
991                         ice_and_bitmap(params->ptypes, params->ptypes,
992                                        src, ICE_FLOW_PTYPE_MAX);
993                 } else if (hdrs & ICE_FLOW_SEG_HDR_PPPOE) {
994                         src = (const ice_bitmap_t *)ice_ptypes_pppoe;
995                         ice_and_bitmap(params->ptypes, params->ptypes, src,
996                                        ICE_FLOW_PTYPE_MAX);
997                 } else {
998                         src = (const ice_bitmap_t *)ice_ptypes_pppoe;
999                         ice_andnot_bitmap(params->ptypes, params->ptypes, src,
1000                                           ICE_FLOW_PTYPE_MAX);
1001                 }
1002
1003                 if (hdrs & ICE_FLOW_SEG_HDR_UDP) {
1004                         src = (const ice_bitmap_t *)ice_ptypes_udp_il;
1005                         ice_and_bitmap(params->ptypes, params->ptypes, src,
1006                                        ICE_FLOW_PTYPE_MAX);
1007                 } else if (hdrs & ICE_FLOW_SEG_HDR_TCP) {
1008                         ice_and_bitmap(params->ptypes, params->ptypes,
1009                                        (const ice_bitmap_t *)ice_ptypes_tcp_il,
1010                                        ICE_FLOW_PTYPE_MAX);
1011                 } else if (hdrs & ICE_FLOW_SEG_HDR_SCTP) {
1012                         src = (const ice_bitmap_t *)ice_ptypes_sctp_il;
1013                         ice_and_bitmap(params->ptypes, params->ptypes, src,
1014                                        ICE_FLOW_PTYPE_MAX);
1015                 }
1016
1017                 if (hdrs & ICE_FLOW_SEG_HDR_ICMP) {
1018                         src = !i ? (const ice_bitmap_t *)ice_ptypes_icmp_of :
1019                                 (const ice_bitmap_t *)ice_ptypes_icmp_il;
1020                         ice_and_bitmap(params->ptypes, params->ptypes, src,
1021                                        ICE_FLOW_PTYPE_MAX);
1022                 } else if (hdrs & ICE_FLOW_SEG_HDR_GRE) {
1023                         if (!i) {
1024                                 src = (const ice_bitmap_t *)ice_ptypes_gre_of;
1025                                 ice_and_bitmap(params->ptypes, params->ptypes,
1026                                                src, ICE_FLOW_PTYPE_MAX);
1027                         }
1028                 } else if (hdrs & ICE_FLOW_SEG_HDR_GTPC) {
1029                         src = (const ice_bitmap_t *)ice_ptypes_gtpc;
1030                         ice_and_bitmap(params->ptypes, params->ptypes,
1031                                        src, ICE_FLOW_PTYPE_MAX);
1032                 } else if (hdrs & ICE_FLOW_SEG_HDR_GTPC_TEID) {
1033                         src = (const ice_bitmap_t *)ice_ptypes_gtpc_tid;
1034                         ice_and_bitmap(params->ptypes, params->ptypes,
1035                                        src, ICE_FLOW_PTYPE_MAX);
1036                 } else if (hdrs & ICE_FLOW_SEG_HDR_GTPU_NON_IP) {
1037                         src = (const ice_bitmap_t *)ice_ptypes_gtpu_no_ip;
1038                         ice_and_bitmap(params->ptypes, params->ptypes,
1039                                        src, ICE_FLOW_PTYPE_MAX);
1040                 } else if (hdrs & ICE_FLOW_SEG_HDR_GTPU_DWN) {
1041                         src = (const ice_bitmap_t *)ice_ptypes_gtpu;
1042                         ice_and_bitmap(params->ptypes, params->ptypes,
1043                                        src, ICE_FLOW_PTYPE_MAX);
1044
1045                         /* Attributes for GTP packet with downlink */
1046                         params->attr = ice_attr_gtpu_down;
1047                         params->attr_cnt = ARRAY_SIZE(ice_attr_gtpu_down);
1048                 } else if (hdrs & ICE_FLOW_SEG_HDR_GTPU_UP) {
1049                         src = (const ice_bitmap_t *)ice_ptypes_gtpu;
1050                         ice_and_bitmap(params->ptypes, params->ptypes,
1051                                        src, ICE_FLOW_PTYPE_MAX);
1052
1053                         /* Attributes for GTP packet with uplink */
1054                         params->attr = ice_attr_gtpu_up;
1055                         params->attr_cnt = ARRAY_SIZE(ice_attr_gtpu_up);
1056                 } else if (hdrs & ICE_FLOW_SEG_HDR_GTPU_EH) {
1057                         src = (const ice_bitmap_t *)ice_ptypes_gtpu;
1058                         ice_and_bitmap(params->ptypes, params->ptypes,
1059                                        src, ICE_FLOW_PTYPE_MAX);
1060
1061                         /* Attributes for GTP packet with Extension Header */
1062                         params->attr = ice_attr_gtpu_eh;
1063                         params->attr_cnt = ARRAY_SIZE(ice_attr_gtpu_eh);
1064                 } else if (hdrs & ICE_FLOW_SEG_HDR_GTPU_IP) {
1065                         src = (const ice_bitmap_t *)ice_ptypes_gtpu;
1066                         ice_and_bitmap(params->ptypes, params->ptypes,
1067                                        src, ICE_FLOW_PTYPE_MAX);
1068
1069                         /* Attributes for GTP packet without Extension Header */
1070                         params->attr = ice_attr_gtpu_session;
1071                         params->attr_cnt = ARRAY_SIZE(ice_attr_gtpu_session);
1072                 } else if (hdrs & ICE_FLOW_SEG_HDR_L2TPV2) {
1073                         src = (const ice_bitmap_t *)ice_ptypes_l2tpv2;
1074                         ice_and_bitmap(params->ptypes, params->ptypes,
1075                                        src, ICE_FLOW_PTYPE_MAX);
1076                 } else if (hdrs & ICE_FLOW_SEG_HDR_L2TPV3) {
1077                         src = (const ice_bitmap_t *)ice_ptypes_l2tpv3;
1078                         ice_and_bitmap(params->ptypes, params->ptypes,
1079                                        src, ICE_FLOW_PTYPE_MAX);
1080                 } else if (hdrs & ICE_FLOW_SEG_HDR_ESP) {
1081                         src = (const ice_bitmap_t *)ice_ptypes_esp;
1082                         ice_and_bitmap(params->ptypes, params->ptypes,
1083                                        src, ICE_FLOW_PTYPE_MAX);
1084                 } else if (hdrs & ICE_FLOW_SEG_HDR_AH) {
1085                         src = (const ice_bitmap_t *)ice_ptypes_ah;
1086                         ice_and_bitmap(params->ptypes, params->ptypes,
1087                                        src, ICE_FLOW_PTYPE_MAX);
1088                 } else if (hdrs & ICE_FLOW_SEG_HDR_NAT_T_ESP) {
1089                         src = (const ice_bitmap_t *)ice_ptypes_nat_t_esp;
1090                         ice_and_bitmap(params->ptypes, params->ptypes,
1091                                        src, ICE_FLOW_PTYPE_MAX);
1092                 } else if (hdrs & ICE_FLOW_SEG_HDR_VXLAN) {
1093                         src = (const ice_bitmap_t *)ice_ptypes_vxlan_vni;
1094                         ice_and_bitmap(params->ptypes, params->ptypes,
1095                                        src, ICE_FLOW_PTYPE_MAX);
1096                 } else if (hdrs & ICE_FLOW_SEG_HDR_UDP_ECPRI_TP0) {
1097                         src = (const ice_bitmap_t *)ice_ptypes_udp_ecpri_tp0;
1098                         ice_and_bitmap(params->ptypes, params->ptypes,
1099                                        src, ICE_FLOW_PTYPE_MAX);
1100                 }
1101
1102                 if (hdrs & ICE_FLOW_SEG_HDR_PPP) {
1103                         src = (const ice_bitmap_t *)ice_ptypes_ppp;
1104                         ice_and_bitmap(params->ptypes, params->ptypes,
1105                                        src, ICE_FLOW_PTYPE_MAX);
1106                 }
1107
1108                 if (hdrs & ICE_FLOW_SEG_HDR_PFCP) {
1109                         if (hdrs & ICE_FLOW_SEG_HDR_PFCP_NODE)
1110                                 src =
1111                                 (const ice_bitmap_t *)ice_ptypes_pfcp_node;
1112                         else
1113                                 src =
1114                                 (const ice_bitmap_t *)ice_ptypes_pfcp_session;
1115
1116                         ice_and_bitmap(params->ptypes, params->ptypes,
1117                                        src, ICE_FLOW_PTYPE_MAX);
1118                 } else {
1119                         src = (const ice_bitmap_t *)ice_ptypes_pfcp_node;
1120                         ice_andnot_bitmap(params->ptypes, params->ptypes,
1121                                           src, ICE_FLOW_PTYPE_MAX);
1122
1123                         src = (const ice_bitmap_t *)ice_ptypes_pfcp_session;
1124                         ice_andnot_bitmap(params->ptypes, params->ptypes,
1125                                           src, ICE_FLOW_PTYPE_MAX);
1126                 }
1127         }
1128
1129         return ICE_SUCCESS;
1130 }
1131
1132 /**
1133  * ice_flow_xtract_pkt_flags - Create an extr sequence entry for packet flags
1134  * @hw: pointer to the HW struct
1135  * @params: information about the flow to be processed
1136  * @flags: The value of pkt_flags[x:x] in Rx/Tx MDID metadata.
1137  *
1138  * This function will allocate an extraction sequence entries for a DWORD size
1139  * chunk of the packet flags.
1140  */
1141 static enum ice_status
1142 ice_flow_xtract_pkt_flags(struct ice_hw *hw,
1143                           struct ice_flow_prof_params *params,
1144                           enum ice_flex_mdid_pkt_flags flags)
1145 {
1146         u8 fv_words = hw->blk[params->blk].es.fvw;
1147         u8 idx;
1148
1149         /* Make sure the number of extraction sequence entries required does not
1150          * exceed the block's capacity.
1151          */
1152         if (params->es_cnt >= fv_words)
1153                 return ICE_ERR_MAX_LIMIT;
1154
1155         /* some blocks require a reversed field vector layout */
1156         if (hw->blk[params->blk].es.reverse)
1157                 idx = fv_words - params->es_cnt - 1;
1158         else
1159                 idx = params->es_cnt;
1160
1161         params->es[idx].prot_id = ICE_PROT_META_ID;
1162         params->es[idx].off = flags;
1163         params->es_cnt++;
1164
1165         return ICE_SUCCESS;
1166 }
1167
1168 /**
1169  * ice_flow_xtract_fld - Create an extraction sequence entry for the given field
1170  * @hw: pointer to the HW struct
1171  * @params: information about the flow to be processed
1172  * @seg: packet segment index of the field to be extracted
1173  * @fld: ID of field to be extracted
1174  * @match: bitfield of all fields
1175  *
1176  * This function determines the protocol ID, offset, and size of the given
1177  * field. It then allocates one or more extraction sequence entries for the
1178  * given field, and fill the entries with protocol ID and offset information.
1179  */
1180 static enum ice_status
1181 ice_flow_xtract_fld(struct ice_hw *hw, struct ice_flow_prof_params *params,
1182                     u8 seg, enum ice_flow_field fld, u64 match)
1183 {
1184         enum ice_flow_field sib = ICE_FLOW_FIELD_IDX_MAX;
1185         enum ice_prot_id prot_id = ICE_PROT_ID_INVAL;
1186         u8 fv_words = hw->blk[params->blk].es.fvw;
1187         struct ice_flow_fld_info *flds;
1188         u16 cnt, ese_bits, i;
1189         u16 sib_mask = 0;
1190         u16 mask;
1191         u16 off;
1192
1193         flds = params->prof->segs[seg].fields;
1194
1195         switch (fld) {
1196         case ICE_FLOW_FIELD_IDX_ETH_DA:
1197         case ICE_FLOW_FIELD_IDX_ETH_SA:
1198         case ICE_FLOW_FIELD_IDX_S_VLAN:
1199         case ICE_FLOW_FIELD_IDX_C_VLAN:
1200                 prot_id = seg == 0 ? ICE_PROT_MAC_OF_OR_S : ICE_PROT_MAC_IL;
1201                 break;
1202         case ICE_FLOW_FIELD_IDX_ETH_TYPE:
1203                 prot_id = seg == 0 ? ICE_PROT_ETYPE_OL : ICE_PROT_ETYPE_IL;
1204                 break;
1205         case ICE_FLOW_FIELD_IDX_IPV4_DSCP:
1206                 prot_id = seg == 0 ? ICE_PROT_IPV4_OF_OR_S : ICE_PROT_IPV4_IL;
1207                 break;
1208         case ICE_FLOW_FIELD_IDX_IPV6_DSCP:
1209                 prot_id = seg == 0 ? ICE_PROT_IPV6_OF_OR_S : ICE_PROT_IPV6_IL;
1210                 break;
1211         case ICE_FLOW_FIELD_IDX_IPV4_TTL:
1212         case ICE_FLOW_FIELD_IDX_IPV4_PROT:
1213                 prot_id = seg == 0 ? ICE_PROT_IPV4_OF_OR_S : ICE_PROT_IPV4_IL;
1214
1215                 /* TTL and PROT share the same extraction seq. entry.
1216                  * Each is considered a sibling to the other in terms of sharing
1217                  * the same extraction sequence entry.
1218                  */
1219                 if (fld == ICE_FLOW_FIELD_IDX_IPV4_TTL)
1220                         sib = ICE_FLOW_FIELD_IDX_IPV4_PROT;
1221                 else
1222                         sib = ICE_FLOW_FIELD_IDX_IPV4_TTL;
1223
1224                 /* If the sibling field is also included, that field's
1225                  * mask needs to be included.
1226                  */
1227                 if (match & BIT(sib))
1228                         sib_mask = ice_flds_info[sib].mask;
1229                 break;
1230         case ICE_FLOW_FIELD_IDX_IPV6_TTL:
1231         case ICE_FLOW_FIELD_IDX_IPV6_PROT:
1232                 prot_id = seg == 0 ? ICE_PROT_IPV6_OF_OR_S : ICE_PROT_IPV6_IL;
1233
1234                 /* TTL and PROT share the same extraction seq. entry.
1235                  * Each is considered a sibling to the other in terms of sharing
1236                  * the same extraction sequence entry.
1237                  */
1238                 if (fld == ICE_FLOW_FIELD_IDX_IPV6_TTL)
1239                         sib = ICE_FLOW_FIELD_IDX_IPV6_PROT;
1240                 else
1241                         sib = ICE_FLOW_FIELD_IDX_IPV6_TTL;
1242
1243                 /* If the sibling field is also included, that field's
1244                  * mask needs to be included.
1245                  */
1246                 if (match & BIT(sib))
1247                         sib_mask = ice_flds_info[sib].mask;
1248                 break;
1249         case ICE_FLOW_FIELD_IDX_IPV4_SA:
1250         case ICE_FLOW_FIELD_IDX_IPV4_DA:
1251                 prot_id = seg == 0 ? ICE_PROT_IPV4_OF_OR_S : ICE_PROT_IPV4_IL;
1252                 break;
1253         case ICE_FLOW_FIELD_IDX_IPV4_ID:
1254                 prot_id = ICE_PROT_IPV4_OF_OR_S;
1255                 break;
1256         case ICE_FLOW_FIELD_IDX_IPV6_SA:
1257         case ICE_FLOW_FIELD_IDX_IPV6_DA:
1258         case ICE_FLOW_FIELD_IDX_IPV6_PRE32_SA:
1259         case ICE_FLOW_FIELD_IDX_IPV6_PRE32_DA:
1260         case ICE_FLOW_FIELD_IDX_IPV6_PRE48_SA:
1261         case ICE_FLOW_FIELD_IDX_IPV6_PRE48_DA:
1262         case ICE_FLOW_FIELD_IDX_IPV6_PRE64_SA:
1263         case ICE_FLOW_FIELD_IDX_IPV6_PRE64_DA:
1264                 prot_id = seg == 0 ? ICE_PROT_IPV6_OF_OR_S : ICE_PROT_IPV6_IL;
1265                 break;
1266         case ICE_FLOW_FIELD_IDX_IPV6_ID:
1267                 prot_id = ICE_PROT_IPV6_FRAG;
1268                 break;
1269         case ICE_FLOW_FIELD_IDX_TCP_SRC_PORT:
1270         case ICE_FLOW_FIELD_IDX_TCP_DST_PORT:
1271         case ICE_FLOW_FIELD_IDX_TCP_FLAGS:
1272                 prot_id = ICE_PROT_TCP_IL;
1273                 break;
1274         case ICE_FLOW_FIELD_IDX_UDP_SRC_PORT:
1275         case ICE_FLOW_FIELD_IDX_UDP_DST_PORT:
1276                 prot_id = ICE_PROT_UDP_IL_OR_S;
1277                 break;
1278         case ICE_FLOW_FIELD_IDX_SCTP_SRC_PORT:
1279         case ICE_FLOW_FIELD_IDX_SCTP_DST_PORT:
1280                 prot_id = ICE_PROT_SCTP_IL;
1281                 break;
1282         case ICE_FLOW_FIELD_IDX_VXLAN_VNI:
1283         case ICE_FLOW_FIELD_IDX_GTPC_TEID:
1284         case ICE_FLOW_FIELD_IDX_GTPU_IP_TEID:
1285         case ICE_FLOW_FIELD_IDX_GTPU_UP_TEID:
1286         case ICE_FLOW_FIELD_IDX_GTPU_DWN_TEID:
1287         case ICE_FLOW_FIELD_IDX_GTPU_EH_TEID:
1288         case ICE_FLOW_FIELD_IDX_GTPU_EH_QFI:
1289                 /* GTP is accessed through UDP OF protocol */
1290                 prot_id = ICE_PROT_UDP_OF;
1291                 break;
1292         case ICE_FLOW_FIELD_IDX_PPPOE_SESS_ID:
1293                 prot_id = ICE_PROT_PPPOE;
1294                 break;
1295         case ICE_FLOW_FIELD_IDX_PFCP_SEID:
1296                 prot_id = ICE_PROT_UDP_IL_OR_S;
1297                 break;
1298         case ICE_FLOW_FIELD_IDX_L2TPV3_SESS_ID:
1299                 prot_id = ICE_PROT_L2TPV3;
1300                 break;
1301         case ICE_FLOW_FIELD_IDX_ESP_SPI:
1302                 prot_id = ICE_PROT_ESP_F;
1303                 break;
1304         case ICE_FLOW_FIELD_IDX_AH_SPI:
1305                 prot_id = ICE_PROT_ESP_2;
1306                 break;
1307         case ICE_FLOW_FIELD_IDX_NAT_T_ESP_SPI:
1308                 prot_id = ICE_PROT_UDP_IL_OR_S;
1309                 break;
1310         case ICE_FLOW_FIELD_IDX_ECPRI_TP0_PC_ID:
1311                 prot_id = ICE_PROT_ECPRI;
1312                 break;
1313         case ICE_FLOW_FIELD_IDX_UDP_ECPRI_TP0_PC_ID:
1314                 prot_id = ICE_PROT_UDP_IL_OR_S;
1315                 break;
1316         case ICE_FLOW_FIELD_IDX_ARP_SIP:
1317         case ICE_FLOW_FIELD_IDX_ARP_DIP:
1318         case ICE_FLOW_FIELD_IDX_ARP_SHA:
1319         case ICE_FLOW_FIELD_IDX_ARP_DHA:
1320         case ICE_FLOW_FIELD_IDX_ARP_OP:
1321                 prot_id = ICE_PROT_ARP_OF;
1322                 break;
1323         case ICE_FLOW_FIELD_IDX_ICMP_TYPE:
1324         case ICE_FLOW_FIELD_IDX_ICMP_CODE:
1325                 /* ICMP type and code share the same extraction seq. entry */
1326                 prot_id = (params->prof->segs[seg].hdrs &
1327                            ICE_FLOW_SEG_HDR_IPV4) ?
1328                         ICE_PROT_ICMP_IL : ICE_PROT_ICMPV6_IL;
1329                 sib = fld == ICE_FLOW_FIELD_IDX_ICMP_TYPE ?
1330                         ICE_FLOW_FIELD_IDX_ICMP_CODE :
1331                         ICE_FLOW_FIELD_IDX_ICMP_TYPE;
1332                 break;
1333         case ICE_FLOW_FIELD_IDX_GRE_KEYID:
1334                 prot_id = ICE_PROT_GRE_OF;
1335                 break;
1336         default:
1337                 return ICE_ERR_NOT_IMPL;
1338         }
1339
1340         /* Each extraction sequence entry is a word in size, and extracts a
1341          * word-aligned offset from a protocol header.
1342          */
1343         ese_bits = ICE_FLOW_FV_EXTRACT_SZ * BITS_PER_BYTE;
1344
1345         flds[fld].xtrct.prot_id = prot_id;
1346         flds[fld].xtrct.off = (ice_flds_info[fld].off / ese_bits) *
1347                 ICE_FLOW_FV_EXTRACT_SZ;
1348         flds[fld].xtrct.disp = (u8)(ice_flds_info[fld].off % ese_bits);
1349         flds[fld].xtrct.idx = params->es_cnt;
1350         flds[fld].xtrct.mask = ice_flds_info[fld].mask;
1351
1352         /* Adjust the next field-entry index after accommodating the number of
1353          * entries this field consumes
1354          */
1355         cnt = DIVIDE_AND_ROUND_UP(flds[fld].xtrct.disp +
1356                                   ice_flds_info[fld].size, ese_bits);
1357
1358         /* Fill in the extraction sequence entries needed for this field */
1359         off = flds[fld].xtrct.off;
1360         mask = flds[fld].xtrct.mask;
1361         for (i = 0; i < cnt; i++) {
1362                 /* Only consume an extraction sequence entry if there is no
1363                  * sibling field associated with this field or the sibling entry
1364                  * already extracts the word shared with this field.
1365                  */
1366                 if (sib == ICE_FLOW_FIELD_IDX_MAX ||
1367                     flds[sib].xtrct.prot_id == ICE_PROT_ID_INVAL ||
1368                     flds[sib].xtrct.off != off) {
1369                         u8 idx;
1370
1371                         /* Make sure the number of extraction sequence required
1372                          * does not exceed the block's capability
1373                          */
1374                         if (params->es_cnt >= fv_words)
1375                                 return ICE_ERR_MAX_LIMIT;
1376
1377                         /* some blocks require a reversed field vector layout */
1378                         if (hw->blk[params->blk].es.reverse)
1379                                 idx = fv_words - params->es_cnt - 1;
1380                         else
1381                                 idx = params->es_cnt;
1382
1383                         params->es[idx].prot_id = prot_id;
1384                         params->es[idx].off = off;
1385                         params->mask[idx] = mask | sib_mask;
1386                         params->es_cnt++;
1387                 }
1388
1389                 off += ICE_FLOW_FV_EXTRACT_SZ;
1390         }
1391
1392         return ICE_SUCCESS;
1393 }
1394
1395 /**
1396  * ice_flow_xtract_raws - Create extract sequence entries for raw bytes
1397  * @hw: pointer to the HW struct
1398  * @params: information about the flow to be processed
1399  * @seg: index of packet segment whose raw fields are to be extracted
1400  */
1401 static enum ice_status
1402 ice_flow_xtract_raws(struct ice_hw *hw, struct ice_flow_prof_params *params,
1403                      u8 seg)
1404 {
1405         u16 fv_words;
1406         u16 hdrs_sz;
1407         u8 i;
1408
1409         if (!params->prof->segs[seg].raws_cnt)
1410                 return ICE_SUCCESS;
1411
1412         if (params->prof->segs[seg].raws_cnt >
1413             ARRAY_SIZE(params->prof->segs[seg].raws))
1414                 return ICE_ERR_MAX_LIMIT;
1415
1416         /* Offsets within the segment headers are not supported */
1417         hdrs_sz = ice_flow_calc_seg_sz(params, seg);
1418         if (!hdrs_sz)
1419                 return ICE_ERR_PARAM;
1420
1421         fv_words = hw->blk[params->blk].es.fvw;
1422
1423         for (i = 0; i < params->prof->segs[seg].raws_cnt; i++) {
1424                 struct ice_flow_seg_fld_raw *raw;
1425                 u16 off, cnt, j;
1426
1427                 raw = &params->prof->segs[seg].raws[i];
1428
1429                 /* Storing extraction information */
1430                 raw->info.xtrct.prot_id = ICE_PROT_MAC_OF_OR_S;
1431                 raw->info.xtrct.off = (raw->off / ICE_FLOW_FV_EXTRACT_SZ) *
1432                         ICE_FLOW_FV_EXTRACT_SZ;
1433                 raw->info.xtrct.disp = (raw->off % ICE_FLOW_FV_EXTRACT_SZ) *
1434                         BITS_PER_BYTE;
1435                 raw->info.xtrct.idx = params->es_cnt;
1436
1437                 /* Determine the number of field vector entries this raw field
1438                  * consumes.
1439                  */
1440                 cnt = DIVIDE_AND_ROUND_UP(raw->info.xtrct.disp +
1441                                           (raw->info.src.last * BITS_PER_BYTE),
1442                                           (ICE_FLOW_FV_EXTRACT_SZ *
1443                                            BITS_PER_BYTE));
1444                 off = raw->info.xtrct.off;
1445                 for (j = 0; j < cnt; j++) {
1446                         u16 idx;
1447
1448                         /* Make sure the number of extraction sequence required
1449                          * does not exceed the block's capability
1450                          */
1451                         if (params->es_cnt >= hw->blk[params->blk].es.count ||
1452                             params->es_cnt >= ICE_MAX_FV_WORDS)
1453                                 return ICE_ERR_MAX_LIMIT;
1454
1455                         /* some blocks require a reversed field vector layout */
1456                         if (hw->blk[params->blk].es.reverse)
1457                                 idx = fv_words - params->es_cnt - 1;
1458                         else
1459                                 idx = params->es_cnt;
1460
1461                         params->es[idx].prot_id = raw->info.xtrct.prot_id;
1462                         params->es[idx].off = off;
1463                         params->es_cnt++;
1464                         off += ICE_FLOW_FV_EXTRACT_SZ;
1465                 }
1466         }
1467
1468         return ICE_SUCCESS;
1469 }
1470
1471 /**
1472  * ice_flow_create_xtrct_seq - Create an extraction sequence for given segments
1473  * @hw: pointer to the HW struct
1474  * @params: information about the flow to be processed
1475  *
1476  * This function iterates through all matched fields in the given segments, and
1477  * creates an extraction sequence for the fields.
1478  */
1479 static enum ice_status
1480 ice_flow_create_xtrct_seq(struct ice_hw *hw,
1481                           struct ice_flow_prof_params *params)
1482 {
1483         enum ice_status status = ICE_SUCCESS;
1484         u8 i;
1485
1486         /* For ACL, we also need to extract the direction bit (Rx,Tx) data from
1487          * packet flags
1488          */
1489         if (params->blk == ICE_BLK_ACL) {
1490                 status = ice_flow_xtract_pkt_flags(hw, params,
1491                                                    ICE_RX_MDID_PKT_FLAGS_15_0);
1492                 if (status)
1493                         return status;
1494         }
1495
1496         for (i = 0; i < params->prof->segs_cnt; i++) {
1497                 u64 match = params->prof->segs[i].match;
1498                 enum ice_flow_field j;
1499
1500                 ice_for_each_set_bit(j, (ice_bitmap_t *)&match,
1501                                      ICE_FLOW_FIELD_IDX_MAX) {
1502                         status = ice_flow_xtract_fld(hw, params, i, j, match);
1503                         if (status)
1504                                 return status;
1505                         ice_clear_bit(j, (ice_bitmap_t *)&match);
1506                 }
1507
1508                 /* Process raw matching bytes */
1509                 status = ice_flow_xtract_raws(hw, params, i);
1510                 if (status)
1511                         return status;
1512         }
1513
1514         return status;
1515 }
1516
1517 /**
1518  * ice_flow_sel_acl_scen - returns the specific scenario
1519  * @hw: pointer to the hardware structure
1520  * @params: information about the flow to be processed
1521  *
1522  * This function will return the specific scenario based on the
1523  * params passed to it
1524  */
1525 static enum ice_status
1526 ice_flow_sel_acl_scen(struct ice_hw *hw, struct ice_flow_prof_params *params)
1527 {
1528         /* Find the best-fit scenario for the provided match width */
1529         struct ice_acl_scen *cand_scen = NULL, *scen;
1530
1531         if (!hw->acl_tbl)
1532                 return ICE_ERR_DOES_NOT_EXIST;
1533
1534         /* Loop through each scenario and match against the scenario width
1535          * to select the specific scenario
1536          */
1537         LIST_FOR_EACH_ENTRY(scen, &hw->acl_tbl->scens, ice_acl_scen, list_entry)
1538                 if (scen->eff_width >= params->entry_length &&
1539                     (!cand_scen || cand_scen->eff_width > scen->eff_width))
1540                         cand_scen = scen;
1541         if (!cand_scen)
1542                 return ICE_ERR_DOES_NOT_EXIST;
1543
1544         params->prof->cfg.scen = cand_scen;
1545
1546         return ICE_SUCCESS;
1547 }
1548
1549 /**
1550  * ice_flow_acl_def_entry_frmt - Determine the layout of flow entries
1551  * @params: information about the flow to be processed
1552  */
1553 static enum ice_status
1554 ice_flow_acl_def_entry_frmt(struct ice_flow_prof_params *params)
1555 {
1556         u16 index, i, range_idx = 0;
1557
1558         index = ICE_AQC_ACL_PROF_BYTE_SEL_START_IDX;
1559
1560         for (i = 0; i < params->prof->segs_cnt; i++) {
1561                 struct ice_flow_seg_info *seg = &params->prof->segs[i];
1562                 u8 j;
1563
1564                 ice_for_each_set_bit(j, (ice_bitmap_t *)&seg->match,
1565                                      ICE_FLOW_FIELD_IDX_MAX) {
1566                         struct ice_flow_fld_info *fld = &seg->fields[j];
1567
1568                         fld->entry.mask = ICE_FLOW_FLD_OFF_INVAL;
1569
1570                         if (fld->type == ICE_FLOW_FLD_TYPE_RANGE) {
1571                                 fld->entry.last = ICE_FLOW_FLD_OFF_INVAL;
1572
1573                                 /* Range checking only supported for single
1574                                  * words
1575                                  */
1576                                 if (DIVIDE_AND_ROUND_UP(ice_flds_info[j].size +
1577                                                         fld->xtrct.disp,
1578                                                         BITS_PER_BYTE * 2) > 1)
1579                                         return ICE_ERR_PARAM;
1580
1581                                 /* Ranges must define low and high values */
1582                                 if (fld->src.val == ICE_FLOW_FLD_OFF_INVAL ||
1583                                     fld->src.last == ICE_FLOW_FLD_OFF_INVAL)
1584                                         return ICE_ERR_PARAM;
1585
1586                                 fld->entry.val = range_idx++;
1587                         } else {
1588                                 /* Store adjusted byte-length of field for later
1589                                  * use, taking into account potential
1590                                  * non-byte-aligned displacement
1591                                  */
1592                                 fld->entry.last = DIVIDE_AND_ROUND_UP
1593                                         (ice_flds_info[j].size +
1594                                          (fld->xtrct.disp % BITS_PER_BYTE),
1595                                          BITS_PER_BYTE);
1596                                 fld->entry.val = index;
1597                                 index += fld->entry.last;
1598                         }
1599                 }
1600
1601                 for (j = 0; j < seg->raws_cnt; j++) {
1602                         struct ice_flow_seg_fld_raw *raw = &seg->raws[j];
1603
1604                         raw->info.entry.mask = ICE_FLOW_FLD_OFF_INVAL;
1605                         raw->info.entry.val = index;
1606                         raw->info.entry.last = raw->info.src.last;
1607                         index += raw->info.entry.last;
1608                 }
1609         }
1610
1611         /* Currently only support using the byte selection base, which only
1612          * allows for an effective entry size of 30 bytes. Reject anything
1613          * larger.
1614          */
1615         if (index > ICE_AQC_ACL_PROF_BYTE_SEL_ELEMS)
1616                 return ICE_ERR_PARAM;
1617
1618         /* Only 8 range checkers per profile, reject anything trying to use
1619          * more
1620          */
1621         if (range_idx > ICE_AQC_ACL_PROF_RANGES_NUM_CFG)
1622                 return ICE_ERR_PARAM;
1623
1624         /* Store # bytes required for entry for later use */
1625         params->entry_length = index - ICE_AQC_ACL_PROF_BYTE_SEL_START_IDX;
1626
1627         return ICE_SUCCESS;
1628 }
1629
1630 /**
1631  * ice_flow_proc_segs - process all packet segments associated with a profile
1632  * @hw: pointer to the HW struct
1633  * @params: information about the flow to be processed
1634  */
1635 static enum ice_status
1636 ice_flow_proc_segs(struct ice_hw *hw, struct ice_flow_prof_params *params)
1637 {
1638         enum ice_status status;
1639
1640         status = ice_flow_proc_seg_hdrs(params);
1641         if (status)
1642                 return status;
1643
1644         status = ice_flow_create_xtrct_seq(hw, params);
1645         if (status)
1646                 return status;
1647
1648         switch (params->blk) {
1649         case ICE_BLK_FD:
1650         case ICE_BLK_RSS:
1651                 status = ICE_SUCCESS;
1652                 break;
1653         case ICE_BLK_ACL:
1654                 status = ice_flow_acl_def_entry_frmt(params);
1655                 if (status)
1656                         return status;
1657                 status = ice_flow_sel_acl_scen(hw, params);
1658                 if (status)
1659                         return status;
1660                 break;
1661         default:
1662                 return ICE_ERR_NOT_IMPL;
1663         }
1664
1665         return status;
1666 }
1667
1668 #define ICE_FLOW_FIND_PROF_CHK_FLDS     0x00000001
1669 #define ICE_FLOW_FIND_PROF_CHK_VSI      0x00000002
1670 #define ICE_FLOW_FIND_PROF_NOT_CHK_DIR  0x00000004
1671
1672 /**
1673  * ice_flow_find_prof_conds - Find a profile matching headers and conditions
1674  * @hw: pointer to the HW struct
1675  * @blk: classification stage
1676  * @dir: flow direction
1677  * @segs: array of one or more packet segments that describe the flow
1678  * @segs_cnt: number of packet segments provided
1679  * @vsi_handle: software VSI handle to check VSI (ICE_FLOW_FIND_PROF_CHK_VSI)
1680  * @conds: additional conditions to be checked (ICE_FLOW_FIND_PROF_CHK_*)
1681  */
1682 static struct ice_flow_prof *
1683 ice_flow_find_prof_conds(struct ice_hw *hw, enum ice_block blk,
1684                          enum ice_flow_dir dir, struct ice_flow_seg_info *segs,
1685                          u8 segs_cnt, u16 vsi_handle, u32 conds)
1686 {
1687         struct ice_flow_prof *p, *prof = NULL;
1688
1689         ice_acquire_lock(&hw->fl_profs_locks[blk]);
1690         LIST_FOR_EACH_ENTRY(p, &hw->fl_profs[blk], ice_flow_prof, l_entry)
1691                 if ((p->dir == dir || conds & ICE_FLOW_FIND_PROF_NOT_CHK_DIR) &&
1692                     segs_cnt && segs_cnt == p->segs_cnt) {
1693                         u8 i;
1694
1695                         /* Check for profile-VSI association if specified */
1696                         if ((conds & ICE_FLOW_FIND_PROF_CHK_VSI) &&
1697                             ice_is_vsi_valid(hw, vsi_handle) &&
1698                             !ice_is_bit_set(p->vsis, vsi_handle))
1699                                 continue;
1700
1701                         /* Protocol headers must be checked. Matched fields are
1702                          * checked if specified.
1703                          */
1704                         for (i = 0; i < segs_cnt; i++)
1705                                 if (segs[i].hdrs != p->segs[i].hdrs ||
1706                                     ((conds & ICE_FLOW_FIND_PROF_CHK_FLDS) &&
1707                                      segs[i].match != p->segs[i].match))
1708                                         break;
1709
1710                         /* A match is found if all segments are matched */
1711                         if (i == segs_cnt) {
1712                                 prof = p;
1713                                 break;
1714                         }
1715                 }
1716         ice_release_lock(&hw->fl_profs_locks[blk]);
1717
1718         return prof;
1719 }
1720
1721 /**
1722  * ice_flow_find_prof - Look up a profile matching headers and matched fields
1723  * @hw: pointer to the HW struct
1724  * @blk: classification stage
1725  * @dir: flow direction
1726  * @segs: array of one or more packet segments that describe the flow
1727  * @segs_cnt: number of packet segments provided
1728  */
1729 u64
1730 ice_flow_find_prof(struct ice_hw *hw, enum ice_block blk, enum ice_flow_dir dir,
1731                    struct ice_flow_seg_info *segs, u8 segs_cnt)
1732 {
1733         struct ice_flow_prof *p;
1734
1735         p = ice_flow_find_prof_conds(hw, blk, dir, segs, segs_cnt,
1736                                      ICE_MAX_VSI, ICE_FLOW_FIND_PROF_CHK_FLDS);
1737
1738         return p ? p->id : ICE_FLOW_PROF_ID_INVAL;
1739 }
1740
1741 /**
1742  * ice_flow_find_prof_id - Look up a profile with given profile ID
1743  * @hw: pointer to the HW struct
1744  * @blk: classification stage
1745  * @prof_id: unique ID to identify this flow profile
1746  */
1747 static struct ice_flow_prof *
1748 ice_flow_find_prof_id(struct ice_hw *hw, enum ice_block blk, u64 prof_id)
1749 {
1750         struct ice_flow_prof *p;
1751
1752         LIST_FOR_EACH_ENTRY(p, &hw->fl_profs[blk], ice_flow_prof, l_entry)
1753                 if (p->id == prof_id)
1754                         return p;
1755
1756         return NULL;
1757 }
1758
1759 /**
1760  * ice_dealloc_flow_entry - Deallocate flow entry memory
1761  * @hw: pointer to the HW struct
1762  * @entry: flow entry to be removed
1763  */
1764 static void
1765 ice_dealloc_flow_entry(struct ice_hw *hw, struct ice_flow_entry *entry)
1766 {
1767         if (!entry)
1768                 return;
1769
1770         if (entry->entry)
1771                 ice_free(hw, entry->entry);
1772
1773         if (entry->range_buf) {
1774                 ice_free(hw, entry->range_buf);
1775                 entry->range_buf = NULL;
1776         }
1777
1778         if (entry->acts) {
1779                 ice_free(hw, entry->acts);
1780                 entry->acts = NULL;
1781                 entry->acts_cnt = 0;
1782         }
1783
1784         ice_free(hw, entry);
1785 }
1786
1787 /**
1788  * ice_flow_get_hw_prof - return the HW profile for a specific profile ID handle
1789  * @hw: pointer to the HW struct
1790  * @blk: classification stage
1791  * @prof_id: the profile ID handle
1792  * @hw_prof_id: pointer to variable to receive the HW profile ID
1793  */
1794 enum ice_status
1795 ice_flow_get_hw_prof(struct ice_hw *hw, enum ice_block blk, u64 prof_id,
1796                      u8 *hw_prof_id)
1797 {
1798         enum ice_status status = ICE_ERR_DOES_NOT_EXIST;
1799         struct ice_prof_map *map;
1800
1801         ice_acquire_lock(&hw->blk[blk].es.prof_map_lock);
1802         map = ice_search_prof_id(hw, blk, prof_id);
1803         if (map) {
1804                 *hw_prof_id = map->prof_id;
1805                 status = ICE_SUCCESS;
1806         }
1807         ice_release_lock(&hw->blk[blk].es.prof_map_lock);
1808         return status;
1809 }
1810
1811 #define ICE_ACL_INVALID_SCEN    0x3f
1812
1813 /**
1814  * ice_flow_acl_is_prof_in_use - Verify if the profile is associated to any PF
1815  * @hw: pointer to the hardware structure
1816  * @prof: pointer to flow profile
1817  * @buf: destination buffer function writes partial extraction sequence to
1818  *
1819  * returns ICE_SUCCESS if no PF is associated to the given profile
1820  * returns ICE_ERR_IN_USE if at least one PF is associated to the given profile
1821  * returns other error code for real error
1822  */
1823 static enum ice_status
1824 ice_flow_acl_is_prof_in_use(struct ice_hw *hw, struct ice_flow_prof *prof,
1825                             struct ice_aqc_acl_prof_generic_frmt *buf)
1826 {
1827         enum ice_status status;
1828         u8 prof_id = 0;
1829
1830         status = ice_flow_get_hw_prof(hw, ICE_BLK_ACL, prof->id, &prof_id);
1831         if (status)
1832                 return status;
1833
1834         status = ice_query_acl_prof(hw, prof_id, buf, NULL);
1835         if (status)
1836                 return status;
1837
1838         /* If all PF's associated scenarios are all 0 or all
1839          * ICE_ACL_INVALID_SCEN (63) for the given profile then the latter has
1840          * not been configured yet.
1841          */
1842         if (buf->pf_scenario_num[0] == 0 && buf->pf_scenario_num[1] == 0 &&
1843             buf->pf_scenario_num[2] == 0 && buf->pf_scenario_num[3] == 0 &&
1844             buf->pf_scenario_num[4] == 0 && buf->pf_scenario_num[5] == 0 &&
1845             buf->pf_scenario_num[6] == 0 && buf->pf_scenario_num[7] == 0)
1846                 return ICE_SUCCESS;
1847
1848         if (buf->pf_scenario_num[0] == ICE_ACL_INVALID_SCEN &&
1849             buf->pf_scenario_num[1] == ICE_ACL_INVALID_SCEN &&
1850             buf->pf_scenario_num[2] == ICE_ACL_INVALID_SCEN &&
1851             buf->pf_scenario_num[3] == ICE_ACL_INVALID_SCEN &&
1852             buf->pf_scenario_num[4] == ICE_ACL_INVALID_SCEN &&
1853             buf->pf_scenario_num[5] == ICE_ACL_INVALID_SCEN &&
1854             buf->pf_scenario_num[6] == ICE_ACL_INVALID_SCEN &&
1855             buf->pf_scenario_num[7] == ICE_ACL_INVALID_SCEN)
1856                 return ICE_SUCCESS;
1857
1858         return ICE_ERR_IN_USE;
1859 }
1860
1861 /**
1862  * ice_flow_acl_free_act_cntr - Free the ACL rule's actions
1863  * @hw: pointer to the hardware structure
1864  * @acts: array of actions to be performed on a match
1865  * @acts_cnt: number of actions
1866  */
1867 static enum ice_status
1868 ice_flow_acl_free_act_cntr(struct ice_hw *hw, struct ice_flow_action *acts,
1869                            u8 acts_cnt)
1870 {
1871         int i;
1872
1873         for (i = 0; i < acts_cnt; i++) {
1874                 if (acts[i].type == ICE_FLOW_ACT_CNTR_PKT ||
1875                     acts[i].type == ICE_FLOW_ACT_CNTR_BYTES ||
1876                     acts[i].type == ICE_FLOW_ACT_CNTR_PKT_BYTES) {
1877                         struct ice_acl_cntrs cntrs = { 0 };
1878                         enum ice_status status;
1879
1880                         /* amount is unused in the dealloc path but the common
1881                          * parameter check routine wants a value set, as zero
1882                          * is invalid for the check. Just set it.
1883                          */
1884                         cntrs.amount = 1;
1885                         cntrs.bank = 0; /* Only bank0 for the moment */
1886                         cntrs.first_cntr =
1887                                         LE16_TO_CPU(acts[i].data.acl_act.value);
1888                         cntrs.last_cntr =
1889                                         LE16_TO_CPU(acts[i].data.acl_act.value);
1890
1891                         if (acts[i].type == ICE_FLOW_ACT_CNTR_PKT_BYTES)
1892                                 cntrs.type = ICE_AQC_ACL_CNT_TYPE_DUAL;
1893                         else
1894                                 cntrs.type = ICE_AQC_ACL_CNT_TYPE_SINGLE;
1895
1896                         status = ice_aq_dealloc_acl_cntrs(hw, &cntrs, NULL);
1897                         if (status)
1898                                 return status;
1899                 }
1900         }
1901         return ICE_SUCCESS;
1902 }
1903
1904 /**
1905  * ice_flow_acl_disassoc_scen - Disassociate the scenario from the profile
1906  * @hw: pointer to the hardware structure
1907  * @prof: pointer to flow profile
1908  *
1909  * Disassociate the scenario from the profile for the PF of the VSI.
1910  */
1911 static enum ice_status
1912 ice_flow_acl_disassoc_scen(struct ice_hw *hw, struct ice_flow_prof *prof)
1913 {
1914         struct ice_aqc_acl_prof_generic_frmt buf;
1915         enum ice_status status = ICE_SUCCESS;
1916         u8 prof_id = 0;
1917
1918         ice_memset(&buf, 0, sizeof(buf), ICE_NONDMA_MEM);
1919
1920         status = ice_flow_get_hw_prof(hw, ICE_BLK_ACL, prof->id, &prof_id);
1921         if (status)
1922                 return status;
1923
1924         status = ice_query_acl_prof(hw, prof_id, &buf, NULL);
1925         if (status)
1926                 return status;
1927
1928         /* Clear scenario for this PF */
1929         buf.pf_scenario_num[hw->pf_id] = ICE_ACL_INVALID_SCEN;
1930         status = ice_prgm_acl_prof_xtrct(hw, prof_id, &buf, NULL);
1931
1932         return status;
1933 }
1934
1935 /**
1936  * ice_flow_rem_entry_sync - Remove a flow entry
1937  * @hw: pointer to the HW struct
1938  * @blk: classification stage
1939  * @entry: flow entry to be removed
1940  */
1941 static enum ice_status
1942 ice_flow_rem_entry_sync(struct ice_hw *hw, enum ice_block blk,
1943                         struct ice_flow_entry *entry)
1944 {
1945         if (!entry)
1946                 return ICE_ERR_BAD_PTR;
1947
1948         if (blk == ICE_BLK_ACL) {
1949                 enum ice_status status;
1950
1951                 if (!entry->prof)
1952                         return ICE_ERR_BAD_PTR;
1953
1954                 status = ice_acl_rem_entry(hw, entry->prof->cfg.scen,
1955                                            entry->scen_entry_idx);
1956                 if (status)
1957                         return status;
1958
1959                 /* Checks if we need to release an ACL counter. */
1960                 if (entry->acts_cnt && entry->acts)
1961                         ice_flow_acl_free_act_cntr(hw, entry->acts,
1962                                                    entry->acts_cnt);
1963         }
1964
1965         LIST_DEL(&entry->l_entry);
1966
1967         ice_dealloc_flow_entry(hw, entry);
1968
1969         return ICE_SUCCESS;
1970 }
1971
1972 /**
1973  * ice_flow_add_prof_sync - Add a flow profile for packet segments and fields
1974  * @hw: pointer to the HW struct
1975  * @blk: classification stage
1976  * @dir: flow direction
1977  * @prof_id: unique ID to identify this flow profile
1978  * @segs: array of one or more packet segments that describe the flow
1979  * @segs_cnt: number of packet segments provided
1980  * @acts: array of default actions
1981  * @acts_cnt: number of default actions
1982  * @prof: stores the returned flow profile added
1983  *
1984  * Assumption: the caller has acquired the lock to the profile list
1985  */
1986 static enum ice_status
1987 ice_flow_add_prof_sync(struct ice_hw *hw, enum ice_block blk,
1988                        enum ice_flow_dir dir, u64 prof_id,
1989                        struct ice_flow_seg_info *segs, u8 segs_cnt,
1990                        struct ice_flow_action *acts, u8 acts_cnt,
1991                        struct ice_flow_prof **prof)
1992 {
1993         struct ice_flow_prof_params *params;
1994         enum ice_status status;
1995         u8 i;
1996
1997         if (!prof || (acts_cnt && !acts))
1998                 return ICE_ERR_BAD_PTR;
1999
2000         params = (struct ice_flow_prof_params *)ice_malloc(hw, sizeof(*params));
2001         if (!params)
2002                 return ICE_ERR_NO_MEMORY;
2003
2004         params->prof = (struct ice_flow_prof *)
2005                 ice_malloc(hw, sizeof(*params->prof));
2006         if (!params->prof) {
2007                 status = ICE_ERR_NO_MEMORY;
2008                 goto free_params;
2009         }
2010
2011         /* initialize extraction sequence to all invalid (0xff) */
2012         for (i = 0; i < ICE_MAX_FV_WORDS; i++) {
2013                 params->es[i].prot_id = ICE_PROT_INVALID;
2014                 params->es[i].off = ICE_FV_OFFSET_INVAL;
2015         }
2016
2017         params->blk = blk;
2018         params->prof->id = prof_id;
2019         params->prof->dir = dir;
2020         params->prof->segs_cnt = segs_cnt;
2021
2022         /* Make a copy of the segments that need to be persistent in the flow
2023          * profile instance
2024          */
2025         for (i = 0; i < segs_cnt; i++)
2026                 ice_memcpy(&params->prof->segs[i], &segs[i], sizeof(*segs),
2027                            ICE_NONDMA_TO_NONDMA);
2028
2029         /* Make a copy of the actions that need to be persistent in the flow
2030          * profile instance.
2031          */
2032         if (acts_cnt) {
2033                 params->prof->acts = (struct ice_flow_action *)
2034                         ice_memdup(hw, acts, acts_cnt * sizeof(*acts),
2035                                    ICE_NONDMA_TO_NONDMA);
2036
2037                 if (!params->prof->acts) {
2038                         status = ICE_ERR_NO_MEMORY;
2039                         goto out;
2040                 }
2041         }
2042
2043         status = ice_flow_proc_segs(hw, params);
2044         if (status) {
2045                 ice_debug(hw, ICE_DBG_FLOW, "Error processing a flow's packet segments\n");
2046                 goto out;
2047         }
2048
2049         /* Add a HW profile for this flow profile */
2050         status = ice_add_prof(hw, blk, prof_id, (u8 *)params->ptypes,
2051                               params->attr, params->attr_cnt, params->es,
2052                               params->mask);
2053         if (status) {
2054                 ice_debug(hw, ICE_DBG_FLOW, "Error adding a HW flow profile\n");
2055                 goto out;
2056         }
2057
2058         INIT_LIST_HEAD(&params->prof->entries);
2059         ice_init_lock(&params->prof->entries_lock);
2060         *prof = params->prof;
2061
2062 out:
2063         if (status) {
2064                 if (params->prof->acts)
2065                         ice_free(hw, params->prof->acts);
2066                 ice_free(hw, params->prof);
2067         }
2068 free_params:
2069         ice_free(hw, params);
2070
2071         return status;
2072 }
2073
2074 /**
2075  * ice_flow_rem_prof_sync - remove a flow profile
2076  * @hw: pointer to the hardware structure
2077  * @blk: classification stage
2078  * @prof: pointer to flow profile to remove
2079  *
2080  * Assumption: the caller has acquired the lock to the profile list
2081  */
2082 static enum ice_status
2083 ice_flow_rem_prof_sync(struct ice_hw *hw, enum ice_block blk,
2084                        struct ice_flow_prof *prof)
2085 {
2086         enum ice_status status;
2087
2088         /* Remove all remaining flow entries before removing the flow profile */
2089         if (!LIST_EMPTY(&prof->entries)) {
2090                 struct ice_flow_entry *e, *t;
2091
2092                 ice_acquire_lock(&prof->entries_lock);
2093
2094                 LIST_FOR_EACH_ENTRY_SAFE(e, t, &prof->entries, ice_flow_entry,
2095                                          l_entry) {
2096                         status = ice_flow_rem_entry_sync(hw, blk, e);
2097                         if (status)
2098                                 break;
2099                 }
2100
2101                 ice_release_lock(&prof->entries_lock);
2102         }
2103
2104         if (blk == ICE_BLK_ACL) {
2105                 struct ice_aqc_acl_profile_ranges query_rng_buf;
2106                 struct ice_aqc_acl_prof_generic_frmt buf;
2107                 u8 prof_id = 0;
2108
2109                 /* Disassociate the scenario from the profile for the PF */
2110                 status = ice_flow_acl_disassoc_scen(hw, prof);
2111                 if (status)
2112                         return status;
2113
2114                 /* Clear the range-checker if the profile ID is no longer
2115                  * used by any PF
2116                  */
2117                 status = ice_flow_acl_is_prof_in_use(hw, prof, &buf);
2118                 if (status && status != ICE_ERR_IN_USE) {
2119                         return status;
2120                 } else if (!status) {
2121                         /* Clear the range-checker value for profile ID */
2122                         ice_memset(&query_rng_buf, 0,
2123                                    sizeof(struct ice_aqc_acl_profile_ranges),
2124                                    ICE_NONDMA_MEM);
2125
2126                         status = ice_flow_get_hw_prof(hw, blk, prof->id,
2127                                                       &prof_id);
2128                         if (status)
2129                                 return status;
2130
2131                         status = ice_prog_acl_prof_ranges(hw, prof_id,
2132                                                           &query_rng_buf, NULL);
2133                         if (status)
2134                                 return status;
2135                 }
2136         }
2137
2138         /* Remove all hardware profiles associated with this flow profile */
2139         status = ice_rem_prof(hw, blk, prof->id);
2140         if (!status) {
2141                 LIST_DEL(&prof->l_entry);
2142                 ice_destroy_lock(&prof->entries_lock);
2143                 if (prof->acts)
2144                         ice_free(hw, prof->acts);
2145                 ice_free(hw, prof);
2146         }
2147
2148         return status;
2149 }
2150
2151 /**
2152  * ice_flow_acl_set_xtrct_seq_fld - Populate xtrct seq for single field
2153  * @buf: Destination buffer function writes partial xtrct sequence to
2154  * @info: Info about field
2155  */
2156 static void
2157 ice_flow_acl_set_xtrct_seq_fld(struct ice_aqc_acl_prof_generic_frmt *buf,
2158                                struct ice_flow_fld_info *info)
2159 {
2160         u16 dst, i;
2161         u8 src;
2162
2163         src = info->xtrct.idx * ICE_FLOW_FV_EXTRACT_SZ +
2164                 info->xtrct.disp / BITS_PER_BYTE;
2165         dst = info->entry.val;
2166         for (i = 0; i < info->entry.last; i++)
2167                 /* HW stores field vector words in LE, convert words back to BE
2168                  * so constructed entries will end up in network order
2169                  */
2170                 buf->byte_selection[dst++] = src++ ^ 1;
2171 }
2172
2173 /**
2174  * ice_flow_acl_set_xtrct_seq - Program ACL extraction sequence
2175  * @hw: pointer to the hardware structure
2176  * @prof: pointer to flow profile
2177  */
2178 static enum ice_status
2179 ice_flow_acl_set_xtrct_seq(struct ice_hw *hw, struct ice_flow_prof *prof)
2180 {
2181         struct ice_aqc_acl_prof_generic_frmt buf;
2182         struct ice_flow_fld_info *info;
2183         enum ice_status status;
2184         u8 prof_id = 0;
2185         u16 i;
2186
2187         ice_memset(&buf, 0, sizeof(buf), ICE_NONDMA_MEM);
2188
2189         status = ice_flow_get_hw_prof(hw, ICE_BLK_ACL, prof->id, &prof_id);
2190         if (status)
2191                 return status;
2192
2193         status = ice_flow_acl_is_prof_in_use(hw, prof, &buf);
2194         if (status && status != ICE_ERR_IN_USE)
2195                 return status;
2196
2197         if (!status) {
2198                 /* Program the profile dependent configuration. This is done
2199                  * only once regardless of the number of PFs using that profile
2200                  */
2201                 ice_memset(&buf, 0, sizeof(buf), ICE_NONDMA_MEM);
2202
2203                 for (i = 0; i < prof->segs_cnt; i++) {
2204                         struct ice_flow_seg_info *seg = &prof->segs[i];
2205                         u16 j;
2206
2207                         ice_for_each_set_bit(j, (ice_bitmap_t *)&seg->match,
2208                                              ICE_FLOW_FIELD_IDX_MAX) {
2209                                 info = &seg->fields[j];
2210
2211                                 if (info->type == ICE_FLOW_FLD_TYPE_RANGE)
2212                                         buf.word_selection[info->entry.val] =
2213                                                 info->xtrct.idx;
2214                                 else
2215                                         ice_flow_acl_set_xtrct_seq_fld(&buf,
2216                                                                        info);
2217                         }
2218
2219                         for (j = 0; j < seg->raws_cnt; j++) {
2220                                 info = &seg->raws[j].info;
2221                                 ice_flow_acl_set_xtrct_seq_fld(&buf, info);
2222                         }
2223                 }
2224
2225                 ice_memset(&buf.pf_scenario_num[0], ICE_ACL_INVALID_SCEN,
2226                            ICE_AQC_ACL_PROF_PF_SCEN_NUM_ELEMS,
2227                            ICE_NONDMA_MEM);
2228         }
2229
2230         /* Update the current PF */
2231         buf.pf_scenario_num[hw->pf_id] = (u8)prof->cfg.scen->id;
2232         status = ice_prgm_acl_prof_xtrct(hw, prof_id, &buf, NULL);
2233
2234         return status;
2235 }
2236
2237 /**
2238  * ice_flow_assoc_vsig_vsi - associate a VSI with VSIG
2239  * @hw: pointer to the hardware structure
2240  * @blk: classification stage
2241  * @vsi_handle: software VSI handle
2242  * @vsig: target VSI group
2243  *
2244  * Assumption: the caller has already verified that the VSI to
2245  * be added has the same characteristics as the VSIG and will
2246  * thereby have access to all resources added to that VSIG.
2247  */
2248 enum ice_status
2249 ice_flow_assoc_vsig_vsi(struct ice_hw *hw, enum ice_block blk, u16 vsi_handle,
2250                         u16 vsig)
2251 {
2252         enum ice_status status;
2253
2254         if (!ice_is_vsi_valid(hw, vsi_handle) || blk >= ICE_BLK_COUNT)
2255                 return ICE_ERR_PARAM;
2256
2257         ice_acquire_lock(&hw->fl_profs_locks[blk]);
2258         status = ice_add_vsi_flow(hw, blk, ice_get_hw_vsi_num(hw, vsi_handle),
2259                                   vsig);
2260         ice_release_lock(&hw->fl_profs_locks[blk]);
2261
2262         return status;
2263 }
2264
2265 /**
2266  * ice_flow_assoc_prof - associate a VSI with a flow profile
2267  * @hw: pointer to the hardware structure
2268  * @blk: classification stage
2269  * @prof: pointer to flow profile
2270  * @vsi_handle: software VSI handle
2271  *
2272  * Assumption: the caller has acquired the lock to the profile list
2273  * and the software VSI handle has been validated
2274  */
2275 enum ice_status
2276 ice_flow_assoc_prof(struct ice_hw *hw, enum ice_block blk,
2277                     struct ice_flow_prof *prof, u16 vsi_handle)
2278 {
2279         enum ice_status status = ICE_SUCCESS;
2280
2281         if (!ice_is_bit_set(prof->vsis, vsi_handle)) {
2282                 if (blk == ICE_BLK_ACL) {
2283                         status = ice_flow_acl_set_xtrct_seq(hw, prof);
2284                         if (status)
2285                                 return status;
2286                 }
2287                 status = ice_add_prof_id_flow(hw, blk,
2288                                               ice_get_hw_vsi_num(hw,
2289                                                                  vsi_handle),
2290                                               prof->id);
2291                 if (!status)
2292                         ice_set_bit(vsi_handle, prof->vsis);
2293                 else
2294                         ice_debug(hw, ICE_DBG_FLOW, "HW profile add failed, %d\n",
2295                                   status);
2296         }
2297
2298         return status;
2299 }
2300
2301 /**
2302  * ice_flow_disassoc_prof - disassociate a VSI from a flow profile
2303  * @hw: pointer to the hardware structure
2304  * @blk: classification stage
2305  * @prof: pointer to flow profile
2306  * @vsi_handle: software VSI handle
2307  *
2308  * Assumption: the caller has acquired the lock to the profile list
2309  * and the software VSI handle has been validated
2310  */
2311 static enum ice_status
2312 ice_flow_disassoc_prof(struct ice_hw *hw, enum ice_block blk,
2313                        struct ice_flow_prof *prof, u16 vsi_handle)
2314 {
2315         enum ice_status status = ICE_SUCCESS;
2316
2317         if (ice_is_bit_set(prof->vsis, vsi_handle)) {
2318                 status = ice_rem_prof_id_flow(hw, blk,
2319                                               ice_get_hw_vsi_num(hw,
2320                                                                  vsi_handle),
2321                                               prof->id);
2322                 if (!status)
2323                         ice_clear_bit(vsi_handle, prof->vsis);
2324                 else
2325                         ice_debug(hw, ICE_DBG_FLOW, "HW profile remove failed, %d\n",
2326                                   status);
2327         }
2328
2329         return status;
2330 }
2331
2332 /**
2333  * ice_flow_add_prof - Add a flow profile for packet segments and matched fields
2334  * @hw: pointer to the HW struct
2335  * @blk: classification stage
2336  * @dir: flow direction
2337  * @prof_id: unique ID to identify this flow profile
2338  * @segs: array of one or more packet segments that describe the flow
2339  * @segs_cnt: number of packet segments provided
2340  * @acts: array of default actions
2341  * @acts_cnt: number of default actions
2342  * @prof: stores the returned flow profile added
2343  */
2344 enum ice_status
2345 ice_flow_add_prof(struct ice_hw *hw, enum ice_block blk, enum ice_flow_dir dir,
2346                   u64 prof_id, struct ice_flow_seg_info *segs, u8 segs_cnt,
2347                   struct ice_flow_action *acts, u8 acts_cnt,
2348                   struct ice_flow_prof **prof)
2349 {
2350         enum ice_status status;
2351
2352         if (segs_cnt > ICE_FLOW_SEG_MAX)
2353                 return ICE_ERR_MAX_LIMIT;
2354
2355         if (!segs_cnt)
2356                 return ICE_ERR_PARAM;
2357
2358         if (!segs)
2359                 return ICE_ERR_BAD_PTR;
2360
2361         status = ice_flow_val_hdrs(segs, segs_cnt);
2362         if (status)
2363                 return status;
2364
2365         ice_acquire_lock(&hw->fl_profs_locks[blk]);
2366
2367         status = ice_flow_add_prof_sync(hw, blk, dir, prof_id, segs, segs_cnt,
2368                                         acts, acts_cnt, prof);
2369         if (!status)
2370                 LIST_ADD(&(*prof)->l_entry, &hw->fl_profs[blk]);
2371
2372         ice_release_lock(&hw->fl_profs_locks[blk]);
2373
2374         return status;
2375 }
2376
2377 /**
2378  * ice_flow_rem_prof - Remove a flow profile and all entries associated with it
2379  * @hw: pointer to the HW struct
2380  * @blk: the block for which the flow profile is to be removed
2381  * @prof_id: unique ID of the flow profile to be removed
2382  */
2383 enum ice_status
2384 ice_flow_rem_prof(struct ice_hw *hw, enum ice_block blk, u64 prof_id)
2385 {
2386         struct ice_flow_prof *prof;
2387         enum ice_status status;
2388
2389         ice_acquire_lock(&hw->fl_profs_locks[blk]);
2390
2391         prof = ice_flow_find_prof_id(hw, blk, prof_id);
2392         if (!prof) {
2393                 status = ICE_ERR_DOES_NOT_EXIST;
2394                 goto out;
2395         }
2396
2397         /* prof becomes invalid after the call */
2398         status = ice_flow_rem_prof_sync(hw, blk, prof);
2399
2400 out:
2401         ice_release_lock(&hw->fl_profs_locks[blk]);
2402
2403         return status;
2404 }
2405
2406 /**
2407  * ice_flow_find_entry - look for a flow entry using its unique ID
2408  * @hw: pointer to the HW struct
2409  * @blk: classification stage
2410  * @entry_id: unique ID to identify this flow entry
2411  *
2412  * This function looks for the flow entry with the specified unique ID in all
2413  * flow profiles of the specified classification stage. If the entry is found,
2414  * and it returns the handle to the flow entry. Otherwise, it returns
2415  * ICE_FLOW_ENTRY_ID_INVAL.
2416  */
2417 u64 ice_flow_find_entry(struct ice_hw *hw, enum ice_block blk, u64 entry_id)
2418 {
2419         struct ice_flow_entry *found = NULL;
2420         struct ice_flow_prof *p;
2421
2422         ice_acquire_lock(&hw->fl_profs_locks[blk]);
2423
2424         LIST_FOR_EACH_ENTRY(p, &hw->fl_profs[blk], ice_flow_prof, l_entry) {
2425                 struct ice_flow_entry *e;
2426
2427                 ice_acquire_lock(&p->entries_lock);
2428                 LIST_FOR_EACH_ENTRY(e, &p->entries, ice_flow_entry, l_entry)
2429                         if (e->id == entry_id) {
2430                                 found = e;
2431                                 break;
2432                         }
2433                 ice_release_lock(&p->entries_lock);
2434
2435                 if (found)
2436                         break;
2437         }
2438
2439         ice_release_lock(&hw->fl_profs_locks[blk]);
2440
2441         return found ? ICE_FLOW_ENTRY_HNDL(found) : ICE_FLOW_ENTRY_HANDLE_INVAL;
2442 }
2443
2444 /**
2445  * ice_flow_acl_check_actions - Checks the ACL rule's actions
2446  * @hw: pointer to the hardware structure
2447  * @acts: array of actions to be performed on a match
2448  * @acts_cnt: number of actions
2449  * @cnt_alloc: indicates if an ACL counter has been allocated.
2450  */
2451 static enum ice_status
2452 ice_flow_acl_check_actions(struct ice_hw *hw, struct ice_flow_action *acts,
2453                            u8 acts_cnt, bool *cnt_alloc)
2454 {
2455         ice_declare_bitmap(dup_check, ICE_AQC_TBL_MAX_ACTION_PAIRS * 2);
2456         int i;
2457
2458         ice_zero_bitmap(dup_check, ICE_AQC_TBL_MAX_ACTION_PAIRS * 2);
2459         *cnt_alloc = false;
2460
2461         if (acts_cnt > ICE_FLOW_ACL_MAX_NUM_ACT)
2462                 return ICE_ERR_OUT_OF_RANGE;
2463
2464         for (i = 0; i < acts_cnt; i++) {
2465                 if (acts[i].type != ICE_FLOW_ACT_NOP &&
2466                     acts[i].type != ICE_FLOW_ACT_DROP &&
2467                     acts[i].type != ICE_FLOW_ACT_CNTR_PKT &&
2468                     acts[i].type != ICE_FLOW_ACT_FWD_QUEUE)
2469                         return ICE_ERR_CFG;
2470
2471                 /* If the caller want to add two actions of the same type, then
2472                  * it is considered invalid configuration.
2473                  */
2474                 if (ice_test_and_set_bit(acts[i].type, dup_check))
2475                         return ICE_ERR_PARAM;
2476         }
2477
2478         /* Checks if ACL counters are needed. */
2479         for (i = 0; i < acts_cnt; i++) {
2480                 if (acts[i].type == ICE_FLOW_ACT_CNTR_PKT ||
2481                     acts[i].type == ICE_FLOW_ACT_CNTR_BYTES ||
2482                     acts[i].type == ICE_FLOW_ACT_CNTR_PKT_BYTES) {
2483                         struct ice_acl_cntrs cntrs = { 0 };
2484                         enum ice_status status;
2485
2486                         cntrs.amount = 1;
2487                         cntrs.bank = 0; /* Only bank0 for the moment */
2488
2489                         if (acts[i].type == ICE_FLOW_ACT_CNTR_PKT_BYTES)
2490                                 cntrs.type = ICE_AQC_ACL_CNT_TYPE_DUAL;
2491                         else
2492                                 cntrs.type = ICE_AQC_ACL_CNT_TYPE_SINGLE;
2493
2494                         status = ice_aq_alloc_acl_cntrs(hw, &cntrs, NULL);
2495                         if (status)
2496                                 return status;
2497                         /* Counter index within the bank */
2498                         acts[i].data.acl_act.value =
2499                                                 CPU_TO_LE16(cntrs.first_cntr);
2500                         *cnt_alloc = true;
2501                 }
2502         }
2503
2504         return ICE_SUCCESS;
2505 }
2506
2507 /**
2508  * ice_flow_acl_frmt_entry_range - Format an ACL range checker for a given field
2509  * @fld: number of the given field
2510  * @info: info about field
2511  * @range_buf: range checker configuration buffer
2512  * @data: pointer to a data buffer containing flow entry's match values/masks
2513  * @range: Input/output param indicating which range checkers are being used
2514  */
2515 static void
2516 ice_flow_acl_frmt_entry_range(u16 fld, struct ice_flow_fld_info *info,
2517                               struct ice_aqc_acl_profile_ranges *range_buf,
2518                               u8 *data, u8 *range)
2519 {
2520         u16 new_mask;
2521
2522         /* If not specified, default mask is all bits in field */
2523         new_mask = (info->src.mask == ICE_FLOW_FLD_OFF_INVAL ?
2524                     BIT(ice_flds_info[fld].size) - 1 :
2525                     (*(u16 *)(data + info->src.mask))) << info->xtrct.disp;
2526
2527         /* If the mask is 0, then we don't need to worry about this input
2528          * range checker value.
2529          */
2530         if (new_mask) {
2531                 u16 new_high =
2532                         (*(u16 *)(data + info->src.last)) << info->xtrct.disp;
2533                 u16 new_low =
2534                         (*(u16 *)(data + info->src.val)) << info->xtrct.disp;
2535                 u8 range_idx = info->entry.val;
2536
2537                 range_buf->checker_cfg[range_idx].low_boundary =
2538                         CPU_TO_BE16(new_low);
2539                 range_buf->checker_cfg[range_idx].high_boundary =
2540                         CPU_TO_BE16(new_high);
2541                 range_buf->checker_cfg[range_idx].mask = CPU_TO_BE16(new_mask);
2542
2543                 /* Indicate which range checker is being used */
2544                 *range |= BIT(range_idx);
2545         }
2546 }
2547
2548 /**
2549  * ice_flow_acl_frmt_entry_fld - Partially format ACL entry for a given field
2550  * @fld: number of the given field
2551  * @info: info about the field
2552  * @buf: buffer containing the entry
2553  * @dontcare: buffer containing don't care mask for entry
2554  * @data: pointer to a data buffer containing flow entry's match values/masks
2555  */
2556 static void
2557 ice_flow_acl_frmt_entry_fld(u16 fld, struct ice_flow_fld_info *info, u8 *buf,
2558                             u8 *dontcare, u8 *data)
2559 {
2560         u16 dst, src, mask, k, end_disp, tmp_s = 0, tmp_m = 0;
2561         bool use_mask = false;
2562         u8 disp;
2563
2564         src = info->src.val;
2565         mask = info->src.mask;
2566         dst = info->entry.val - ICE_AQC_ACL_PROF_BYTE_SEL_START_IDX;
2567         disp = info->xtrct.disp % BITS_PER_BYTE;
2568
2569         if (mask != ICE_FLOW_FLD_OFF_INVAL)
2570                 use_mask = true;
2571
2572         for (k = 0; k < info->entry.last; k++, dst++) {
2573                 /* Add overflow bits from previous byte */
2574                 buf[dst] = (tmp_s & 0xff00) >> 8;
2575
2576                 /* If mask is not valid, tmp_m is always zero, so just setting
2577                  * dontcare to 0 (no masked bits). If mask is valid, pulls in
2578                  * overflow bits of mask from prev byte
2579                  */
2580                 dontcare[dst] = (tmp_m & 0xff00) >> 8;
2581
2582                 /* If there is displacement, last byte will only contain
2583                  * displaced data, but there is no more data to read from user
2584                  * buffer, so skip so as not to potentially read beyond end of
2585                  * user buffer
2586                  */
2587                 if (!disp || k < info->entry.last - 1) {
2588                         /* Store shifted data to use in next byte */
2589                         tmp_s = data[src++] << disp;
2590
2591                         /* Add current (shifted) byte */
2592                         buf[dst] |= tmp_s & 0xff;
2593
2594                         /* Handle mask if valid */
2595                         if (use_mask) {
2596                                 tmp_m = (~data[mask++] & 0xff) << disp;
2597                                 dontcare[dst] |= tmp_m & 0xff;
2598                         }
2599                 }
2600         }
2601
2602         /* Fill in don't care bits at beginning of field */
2603         if (disp) {
2604                 dst = info->entry.val - ICE_AQC_ACL_PROF_BYTE_SEL_START_IDX;
2605                 for (k = 0; k < disp; k++)
2606                         dontcare[dst] |= BIT(k);
2607         }
2608
2609         end_disp = (disp + ice_flds_info[fld].size) % BITS_PER_BYTE;
2610
2611         /* Fill in don't care bits at end of field */
2612         if (end_disp) {
2613                 dst = info->entry.val - ICE_AQC_ACL_PROF_BYTE_SEL_START_IDX +
2614                       info->entry.last - 1;
2615                 for (k = end_disp; k < BITS_PER_BYTE; k++)
2616                         dontcare[dst] |= BIT(k);
2617         }
2618 }
2619
2620 /**
2621  * ice_flow_acl_frmt_entry - Format ACL entry
2622  * @hw: pointer to the hardware structure
2623  * @prof: pointer to flow profile
2624  * @e: pointer to the flow entry
2625  * @data: pointer to a data buffer containing flow entry's match values/masks
2626  * @acts: array of actions to be performed on a match
2627  * @acts_cnt: number of actions
2628  *
2629  * Formats the key (and key_inverse) to be matched from the data passed in,
2630  * along with data from the flow profile. This key/key_inverse pair makes up
2631  * the 'entry' for an ACL flow entry.
2632  */
2633 static enum ice_status
2634 ice_flow_acl_frmt_entry(struct ice_hw *hw, struct ice_flow_prof *prof,
2635                         struct ice_flow_entry *e, u8 *data,
2636                         struct ice_flow_action *acts, u8 acts_cnt)
2637 {
2638         u8 *buf = NULL, *dontcare = NULL, *key = NULL, range = 0, dir_flag_msk;
2639         struct ice_aqc_acl_profile_ranges *range_buf = NULL;
2640         enum ice_status status;
2641         bool cnt_alloc;
2642         u8 prof_id = 0;
2643         u16 i, buf_sz;
2644
2645         status = ice_flow_get_hw_prof(hw, ICE_BLK_ACL, prof->id, &prof_id);
2646         if (status)
2647                 return status;
2648
2649         /* Format the result action */
2650
2651         status = ice_flow_acl_check_actions(hw, acts, acts_cnt, &cnt_alloc);
2652         if (status)
2653                 return status;
2654
2655         status = ICE_ERR_NO_MEMORY;
2656
2657         e->acts = (struct ice_flow_action *)
2658                 ice_memdup(hw, acts, acts_cnt * sizeof(*acts),
2659                            ICE_NONDMA_TO_NONDMA);
2660         if (!e->acts)
2661                 goto out;
2662
2663         e->acts_cnt = acts_cnt;
2664
2665         /* Format the matching data */
2666         buf_sz = prof->cfg.scen->width;
2667         buf = (u8 *)ice_malloc(hw, buf_sz);
2668         if (!buf)
2669                 goto out;
2670
2671         dontcare = (u8 *)ice_malloc(hw, buf_sz);
2672         if (!dontcare)
2673                 goto out;
2674
2675         /* 'key' buffer will store both key and key_inverse, so must be twice
2676          * size of buf
2677          */
2678         key = (u8 *)ice_malloc(hw, buf_sz * 2);
2679         if (!key)
2680                 goto out;
2681
2682         range_buf = (struct ice_aqc_acl_profile_ranges *)
2683                 ice_malloc(hw, sizeof(struct ice_aqc_acl_profile_ranges));
2684         if (!range_buf)
2685                 goto out;
2686
2687         /* Set don't care mask to all 1's to start, will zero out used bytes */
2688         ice_memset(dontcare, 0xff, buf_sz, ICE_NONDMA_MEM);
2689
2690         for (i = 0; i < prof->segs_cnt; i++) {
2691                 struct ice_flow_seg_info *seg = &prof->segs[i];
2692                 u8 j;
2693
2694                 ice_for_each_set_bit(j, (ice_bitmap_t *)&seg->match,
2695                                      ICE_FLOW_FIELD_IDX_MAX) {
2696                         struct ice_flow_fld_info *info = &seg->fields[j];
2697
2698                         if (info->type == ICE_FLOW_FLD_TYPE_RANGE)
2699                                 ice_flow_acl_frmt_entry_range(j, info,
2700                                                               range_buf, data,
2701                                                               &range);
2702                         else
2703                                 ice_flow_acl_frmt_entry_fld(j, info, buf,
2704                                                             dontcare, data);
2705                 }
2706
2707                 for (j = 0; j < seg->raws_cnt; j++) {
2708                         struct ice_flow_fld_info *info = &seg->raws[j].info;
2709                         u16 dst, src, mask, k;
2710                         bool use_mask = false;
2711
2712                         src = info->src.val;
2713                         dst = info->entry.val -
2714                                         ICE_AQC_ACL_PROF_BYTE_SEL_START_IDX;
2715                         mask = info->src.mask;
2716
2717                         if (mask != ICE_FLOW_FLD_OFF_INVAL)
2718                                 use_mask = true;
2719
2720                         for (k = 0; k < info->entry.last; k++, dst++) {
2721                                 buf[dst] = data[src++];
2722                                 if (use_mask)
2723                                         dontcare[dst] = ~data[mask++];
2724                                 else
2725                                         dontcare[dst] = 0;
2726                         }
2727                 }
2728         }
2729
2730         buf[prof->cfg.scen->pid_idx] = (u8)prof_id;
2731         dontcare[prof->cfg.scen->pid_idx] = 0;
2732
2733         /* Format the buffer for direction flags */
2734         dir_flag_msk = BIT(ICE_FLG_PKT_DIR);
2735
2736         if (prof->dir == ICE_FLOW_RX)
2737                 buf[prof->cfg.scen->pkt_dir_idx] = dir_flag_msk;
2738
2739         if (range) {
2740                 buf[prof->cfg.scen->rng_chk_idx] = range;
2741                 /* Mark any unused range checkers as don't care */
2742                 dontcare[prof->cfg.scen->rng_chk_idx] = ~range;
2743                 e->range_buf = range_buf;
2744         } else {
2745                 ice_free(hw, range_buf);
2746         }
2747
2748         status = ice_set_key(key, buf_sz * 2, buf, NULL, dontcare, NULL, 0,
2749                              buf_sz);
2750         if (status)
2751                 goto out;
2752
2753         e->entry = key;
2754         e->entry_sz = buf_sz * 2;
2755
2756 out:
2757         if (buf)
2758                 ice_free(hw, buf);
2759
2760         if (dontcare)
2761                 ice_free(hw, dontcare);
2762
2763         if (status && key)
2764                 ice_free(hw, key);
2765
2766         if (status && range_buf) {
2767                 ice_free(hw, range_buf);
2768                 e->range_buf = NULL;
2769         }
2770
2771         if (status && e->acts) {
2772                 ice_free(hw, e->acts);
2773                 e->acts = NULL;
2774                 e->acts_cnt = 0;
2775         }
2776
2777         if (status && cnt_alloc)
2778                 ice_flow_acl_free_act_cntr(hw, acts, acts_cnt);
2779
2780         return status;
2781 }
2782
2783 /**
2784  * ice_flow_acl_find_scen_entry_cond - Find an ACL scenario entry that matches
2785  *                                     the compared data.
2786  * @prof: pointer to flow profile
2787  * @e: pointer to the comparing flow entry
2788  * @do_chg_action: decide if we want to change the ACL action
2789  * @do_add_entry: decide if we want to add the new ACL entry
2790  * @do_rem_entry: decide if we want to remove the current ACL entry
2791  *
2792  * Find an ACL scenario entry that matches the compared data. In the same time,
2793  * this function also figure out:
2794  * a/ If we want to change the ACL action
2795  * b/ If we want to add the new ACL entry
2796  * c/ If we want to remove the current ACL entry
2797  */
2798 static struct ice_flow_entry *
2799 ice_flow_acl_find_scen_entry_cond(struct ice_flow_prof *prof,
2800                                   struct ice_flow_entry *e, bool *do_chg_action,
2801                                   bool *do_add_entry, bool *do_rem_entry)
2802 {
2803         struct ice_flow_entry *p, *return_entry = NULL;
2804         u8 i, j;
2805
2806         /* Check if:
2807          * a/ There exists an entry with same matching data, but different
2808          *    priority, then we remove this existing ACL entry. Then, we
2809          *    will add the new entry to the ACL scenario.
2810          * b/ There exists an entry with same matching data, priority, and
2811          *    result action, then we do nothing
2812          * c/ There exists an entry with same matching data, priority, but
2813          *    different, action, then do only change the action's entry.
2814          * d/ Else, we add this new entry to the ACL scenario.
2815          */
2816         *do_chg_action = false;
2817         *do_add_entry = true;
2818         *do_rem_entry = false;
2819         LIST_FOR_EACH_ENTRY(p, &prof->entries, ice_flow_entry, l_entry) {
2820                 if (memcmp(p->entry, e->entry, p->entry_sz))
2821                         continue;
2822
2823                 /* From this point, we have the same matching_data. */
2824                 *do_add_entry = false;
2825                 return_entry = p;
2826
2827                 if (p->priority != e->priority) {
2828                         /* matching data && !priority */
2829                         *do_add_entry = true;
2830                         *do_rem_entry = true;
2831                         break;
2832                 }
2833
2834                 /* From this point, we will have matching_data && priority */
2835                 if (p->acts_cnt != e->acts_cnt)
2836                         *do_chg_action = true;
2837                 for (i = 0; i < p->acts_cnt; i++) {
2838                         bool found_not_match = false;
2839
2840                         for (j = 0; j < e->acts_cnt; j++)
2841                                 if (memcmp(&p->acts[i], &e->acts[j],
2842                                            sizeof(struct ice_flow_action))) {
2843                                         found_not_match = true;
2844                                         break;
2845                                 }
2846
2847                         if (found_not_match) {
2848                                 *do_chg_action = true;
2849                                 break;
2850                         }
2851                 }
2852
2853                 /* (do_chg_action = true) means :
2854                  *    matching_data && priority && !result_action
2855                  * (do_chg_action = false) means :
2856                  *    matching_data && priority && result_action
2857                  */
2858                 break;
2859         }
2860
2861         return return_entry;
2862 }
2863
2864 /**
2865  * ice_flow_acl_convert_to_acl_prio - Convert to ACL priority
2866  * @p: flow priority
2867  */
2868 static enum ice_acl_entry_prio
2869 ice_flow_acl_convert_to_acl_prio(enum ice_flow_priority p)
2870 {
2871         enum ice_acl_entry_prio acl_prio;
2872
2873         switch (p) {
2874         case ICE_FLOW_PRIO_LOW:
2875                 acl_prio = ICE_ACL_PRIO_LOW;
2876                 break;
2877         case ICE_FLOW_PRIO_NORMAL:
2878                 acl_prio = ICE_ACL_PRIO_NORMAL;
2879                 break;
2880         case ICE_FLOW_PRIO_HIGH:
2881                 acl_prio = ICE_ACL_PRIO_HIGH;
2882                 break;
2883         default:
2884                 acl_prio = ICE_ACL_PRIO_NORMAL;
2885                 break;
2886         }
2887
2888         return acl_prio;
2889 }
2890
2891 /**
2892  * ice_flow_acl_union_rng_chk - Perform union operation between two
2893  *                              range-range checker buffers
2894  * @dst_buf: pointer to destination range checker buffer
2895  * @src_buf: pointer to source range checker buffer
2896  *
2897  * For this function, we do the union between dst_buf and src_buf
2898  * range checker buffer, and we will save the result back to dst_buf
2899  */
2900 static enum ice_status
2901 ice_flow_acl_union_rng_chk(struct ice_aqc_acl_profile_ranges *dst_buf,
2902                            struct ice_aqc_acl_profile_ranges *src_buf)
2903 {
2904         u8 i, j;
2905
2906         if (!dst_buf || !src_buf)
2907                 return ICE_ERR_BAD_PTR;
2908
2909         for (i = 0; i < ICE_AQC_ACL_PROF_RANGES_NUM_CFG; i++) {
2910                 struct ice_acl_rng_data *cfg_data = NULL, *in_data;
2911                 bool will_populate = false;
2912
2913                 in_data = &src_buf->checker_cfg[i];
2914
2915                 if (!in_data->mask)
2916                         break;
2917
2918                 for (j = 0; j < ICE_AQC_ACL_PROF_RANGES_NUM_CFG; j++) {
2919                         cfg_data = &dst_buf->checker_cfg[j];
2920
2921                         if (!cfg_data->mask ||
2922                             !memcmp(cfg_data, in_data,
2923                                     sizeof(struct ice_acl_rng_data))) {
2924                                 will_populate = true;
2925                                 break;
2926                         }
2927                 }
2928
2929                 if (will_populate) {
2930                         ice_memcpy(cfg_data, in_data,
2931                                    sizeof(struct ice_acl_rng_data),
2932                                    ICE_NONDMA_TO_NONDMA);
2933                 } else {
2934                         /* No available slot left to program range checker */
2935                         return ICE_ERR_MAX_LIMIT;
2936                 }
2937         }
2938
2939         return ICE_SUCCESS;
2940 }
2941
2942 /**
2943  * ice_flow_acl_add_scen_entry_sync - Add entry to ACL scenario sync
2944  * @hw: pointer to the hardware structure
2945  * @prof: pointer to flow profile
2946  * @entry: double pointer to the flow entry
2947  *
2948  * For this function, we will look at the current added entries in the
2949  * corresponding ACL scenario. Then, we will perform matching logic to
2950  * see if we want to add/modify/do nothing with this new entry.
2951  */
2952 static enum ice_status
2953 ice_flow_acl_add_scen_entry_sync(struct ice_hw *hw, struct ice_flow_prof *prof,
2954                                  struct ice_flow_entry **entry)
2955 {
2956         bool do_add_entry, do_rem_entry, do_chg_action, do_chg_rng_chk;
2957         struct ice_aqc_acl_profile_ranges query_rng_buf, cfg_rng_buf;
2958         struct ice_acl_act_entry *acts = NULL;
2959         struct ice_flow_entry *exist;
2960         enum ice_status status = ICE_SUCCESS;
2961         struct ice_flow_entry *e;
2962         u8 i;
2963
2964         if (!entry || !(*entry) || !prof)
2965                 return ICE_ERR_BAD_PTR;
2966
2967         e = *entry;
2968
2969         do_chg_rng_chk = false;
2970         if (e->range_buf) {
2971                 u8 prof_id = 0;
2972
2973                 status = ice_flow_get_hw_prof(hw, ICE_BLK_ACL, prof->id,
2974                                               &prof_id);
2975                 if (status)
2976                         return status;
2977
2978                 /* Query the current range-checker value in FW */
2979                 status = ice_query_acl_prof_ranges(hw, prof_id, &query_rng_buf,
2980                                                    NULL);
2981                 if (status)
2982                         return status;
2983                 ice_memcpy(&cfg_rng_buf, &query_rng_buf,
2984                            sizeof(struct ice_aqc_acl_profile_ranges),
2985                            ICE_NONDMA_TO_NONDMA);
2986
2987                 /* Generate the new range-checker value */
2988                 status = ice_flow_acl_union_rng_chk(&cfg_rng_buf, e->range_buf);
2989                 if (status)
2990                         return status;
2991
2992                 /* Reconfigure the range check if the buffer is changed. */
2993                 do_chg_rng_chk = false;
2994                 if (memcmp(&query_rng_buf, &cfg_rng_buf,
2995                            sizeof(struct ice_aqc_acl_profile_ranges))) {
2996                         status = ice_prog_acl_prof_ranges(hw, prof_id,
2997                                                           &cfg_rng_buf, NULL);
2998                         if (status)
2999                                 return status;
3000
3001                         do_chg_rng_chk = true;
3002                 }
3003         }
3004
3005         /* Figure out if we want to (change the ACL action) and/or
3006          * (Add the new ACL entry) and/or (Remove the current ACL entry)
3007          */
3008         exist = ice_flow_acl_find_scen_entry_cond(prof, e, &do_chg_action,
3009                                                   &do_add_entry, &do_rem_entry);
3010         if (do_rem_entry) {
3011                 status = ice_flow_rem_entry_sync(hw, ICE_BLK_ACL, exist);
3012                 if (status)
3013                         return status;
3014         }
3015
3016         /* Prepare the result action buffer */
3017         acts = (struct ice_acl_act_entry *)
3018                 ice_calloc(hw, e->entry_sz, sizeof(struct ice_acl_act_entry));
3019         if (!acts)
3020                 return ICE_ERR_NO_MEMORY;
3021
3022         for (i = 0; i < e->acts_cnt; i++)
3023                 ice_memcpy(&acts[i], &e->acts[i].data.acl_act,
3024                            sizeof(struct ice_acl_act_entry),
3025                            ICE_NONDMA_TO_NONDMA);
3026
3027         if (do_add_entry) {
3028                 enum ice_acl_entry_prio prio;
3029                 u8 *keys, *inverts;
3030                 u16 entry_idx;
3031
3032                 keys = (u8 *)e->entry;
3033                 inverts = keys + (e->entry_sz / 2);
3034                 prio = ice_flow_acl_convert_to_acl_prio(e->priority);
3035
3036                 status = ice_acl_add_entry(hw, prof->cfg.scen, prio, keys,
3037                                            inverts, acts, e->acts_cnt,
3038                                            &entry_idx);
3039                 if (status)
3040                         goto out;
3041
3042                 e->scen_entry_idx = entry_idx;
3043                 LIST_ADD(&e->l_entry, &prof->entries);
3044         } else {
3045                 if (do_chg_action) {
3046                         /* For the action memory info, update the SW's copy of
3047                          * exist entry with e's action memory info
3048                          */
3049                         ice_free(hw, exist->acts);
3050                         exist->acts_cnt = e->acts_cnt;
3051                         exist->acts = (struct ice_flow_action *)
3052                                 ice_calloc(hw, exist->acts_cnt,
3053                                            sizeof(struct ice_flow_action));
3054                         if (!exist->acts) {
3055                                 status = ICE_ERR_NO_MEMORY;
3056                                 goto out;
3057                         }
3058
3059                         ice_memcpy(exist->acts, e->acts,
3060                                    sizeof(struct ice_flow_action) * e->acts_cnt,
3061                                    ICE_NONDMA_TO_NONDMA);
3062
3063                         status = ice_acl_prog_act(hw, prof->cfg.scen, acts,
3064                                                   e->acts_cnt,
3065                                                   exist->scen_entry_idx);
3066                         if (status)
3067                                 goto out;
3068                 }
3069
3070                 if (do_chg_rng_chk) {
3071                         /* In this case, we want to update the range checker
3072                          * information of the exist entry
3073                          */
3074                         status = ice_flow_acl_union_rng_chk(exist->range_buf,
3075                                                             e->range_buf);
3076                         if (status)
3077                                 goto out;
3078                 }
3079
3080                 /* As we don't add the new entry to our SW DB, deallocate its
3081                  * memories, and return the exist entry to the caller
3082                  */
3083                 ice_dealloc_flow_entry(hw, e);
3084                 *(entry) = exist;
3085         }
3086 out:
3087         ice_free(hw, acts);
3088
3089         return status;
3090 }
3091
3092 /**
3093  * ice_flow_acl_add_scen_entry - Add entry to ACL scenario
3094  * @hw: pointer to the hardware structure
3095  * @prof: pointer to flow profile
3096  * @e: double pointer to the flow entry
3097  */
3098 static enum ice_status
3099 ice_flow_acl_add_scen_entry(struct ice_hw *hw, struct ice_flow_prof *prof,
3100                             struct ice_flow_entry **e)
3101 {
3102         enum ice_status status;
3103
3104         ice_acquire_lock(&prof->entries_lock);
3105         status = ice_flow_acl_add_scen_entry_sync(hw, prof, e);
3106         ice_release_lock(&prof->entries_lock);
3107
3108         return status;
3109 }
3110
3111 /**
3112  * ice_flow_add_entry - Add a flow entry
3113  * @hw: pointer to the HW struct
3114  * @blk: classification stage
3115  * @prof_id: ID of the profile to add a new flow entry to
3116  * @entry_id: unique ID to identify this flow entry
3117  * @vsi_handle: software VSI handle for the flow entry
3118  * @prio: priority of the flow entry
3119  * @data: pointer to a data buffer containing flow entry's match values/masks
3120  * @acts: arrays of actions to be performed on a match
3121  * @acts_cnt: number of actions
3122  * @entry_h: pointer to buffer that receives the new flow entry's handle
3123  */
3124 enum ice_status
3125 ice_flow_add_entry(struct ice_hw *hw, enum ice_block blk, u64 prof_id,
3126                    u64 entry_id, u16 vsi_handle, enum ice_flow_priority prio,
3127                    void *data, struct ice_flow_action *acts, u8 acts_cnt,
3128                    u64 *entry_h)
3129 {
3130         struct ice_flow_entry *e = NULL;
3131         struct ice_flow_prof *prof;
3132         enum ice_status status = ICE_SUCCESS;
3133
3134         /* ACL entries must indicate an action */
3135         if (blk == ICE_BLK_ACL && (!acts || !acts_cnt))
3136                 return ICE_ERR_PARAM;
3137
3138         /* No flow entry data is expected for RSS */
3139         if (!entry_h || (!data && blk != ICE_BLK_RSS))
3140                 return ICE_ERR_BAD_PTR;
3141
3142         if (!ice_is_vsi_valid(hw, vsi_handle))
3143                 return ICE_ERR_PARAM;
3144
3145         ice_acquire_lock(&hw->fl_profs_locks[blk]);
3146
3147         prof = ice_flow_find_prof_id(hw, blk, prof_id);
3148         if (!prof) {
3149                 status = ICE_ERR_DOES_NOT_EXIST;
3150         } else {
3151                 /* Allocate memory for the entry being added and associate
3152                  * the VSI to the found flow profile
3153                  */
3154                 e = (struct ice_flow_entry *)ice_malloc(hw, sizeof(*e));
3155                 if (!e)
3156                         status = ICE_ERR_NO_MEMORY;
3157                 else
3158                         status = ice_flow_assoc_prof(hw, blk, prof, vsi_handle);
3159         }
3160
3161         ice_release_lock(&hw->fl_profs_locks[blk]);
3162         if (status)
3163                 goto out;
3164
3165         e->id = entry_id;
3166         e->vsi_handle = vsi_handle;
3167         e->prof = prof;
3168         e->priority = prio;
3169
3170         switch (blk) {
3171         case ICE_BLK_FD:
3172         case ICE_BLK_RSS:
3173                 break;
3174         case ICE_BLK_ACL:
3175                 /* ACL will handle the entry management */
3176                 status = ice_flow_acl_frmt_entry(hw, prof, e, (u8 *)data, acts,
3177                                                  acts_cnt);
3178                 if (status)
3179                         goto out;
3180
3181                 status = ice_flow_acl_add_scen_entry(hw, prof, &e);
3182                 if (status)
3183                         goto out;
3184
3185                 break;
3186         default:
3187                 status = ICE_ERR_NOT_IMPL;
3188                 goto out;
3189         }
3190
3191         if (blk != ICE_BLK_ACL) {
3192                 /* ACL will handle the entry management */
3193                 ice_acquire_lock(&prof->entries_lock);
3194                 LIST_ADD(&e->l_entry, &prof->entries);
3195                 ice_release_lock(&prof->entries_lock);
3196         }
3197
3198         *entry_h = ICE_FLOW_ENTRY_HNDL(e);
3199
3200 out:
3201         if (status && e) {
3202                 if (e->entry)
3203                         ice_free(hw, e->entry);
3204                 ice_free(hw, e);
3205         }
3206
3207         return status;
3208 }
3209
3210 /**
3211  * ice_flow_rem_entry - Remove a flow entry
3212  * @hw: pointer to the HW struct
3213  * @blk: classification stage
3214  * @entry_h: handle to the flow entry to be removed
3215  */
3216 enum ice_status ice_flow_rem_entry(struct ice_hw *hw, enum ice_block blk,
3217                                    u64 entry_h)
3218 {
3219         struct ice_flow_entry *entry;
3220         struct ice_flow_prof *prof;
3221         enum ice_status status = ICE_SUCCESS;
3222
3223         if (entry_h == ICE_FLOW_ENTRY_HANDLE_INVAL)
3224                 return ICE_ERR_PARAM;
3225
3226         entry = ICE_FLOW_ENTRY_PTR((intptr_t)entry_h);
3227
3228         /* Retain the pointer to the flow profile as the entry will be freed */
3229         prof = entry->prof;
3230
3231         if (prof) {
3232                 ice_acquire_lock(&prof->entries_lock);
3233                 status = ice_flow_rem_entry_sync(hw, blk, entry);
3234                 ice_release_lock(&prof->entries_lock);
3235         }
3236
3237         return status;
3238 }
3239
3240 /**
3241  * ice_flow_set_fld_ext - specifies locations of field from entry's input buffer
3242  * @seg: packet segment the field being set belongs to
3243  * @fld: field to be set
3244  * @field_type: type of the field
3245  * @val_loc: if not ICE_FLOW_FLD_OFF_INVAL, location of the value to match from
3246  *           entry's input buffer
3247  * @mask_loc: if not ICE_FLOW_FLD_OFF_INVAL, location of mask value from entry's
3248  *            input buffer
3249  * @last_loc: if not ICE_FLOW_FLD_OFF_INVAL, location of last/upper value from
3250  *            entry's input buffer
3251  *
3252  * This helper function stores information of a field being matched, including
3253  * the type of the field and the locations of the value to match, the mask, and
3254  * the upper-bound value in the start of the input buffer for a flow entry.
3255  * This function should only be used for fixed-size data structures.
3256  *
3257  * This function also opportunistically determines the protocol headers to be
3258  * present based on the fields being set. Some fields cannot be used alone to
3259  * determine the protocol headers present. Sometimes, fields for particular
3260  * protocol headers are not matched. In those cases, the protocol headers
3261  * must be explicitly set.
3262  */
3263 static void
3264 ice_flow_set_fld_ext(struct ice_flow_seg_info *seg, enum ice_flow_field fld,
3265                      enum ice_flow_fld_match_type field_type, u16 val_loc,
3266                      u16 mask_loc, u16 last_loc)
3267 {
3268         u64 bit = BIT_ULL(fld);
3269
3270         seg->match |= bit;
3271         if (field_type == ICE_FLOW_FLD_TYPE_RANGE)
3272                 seg->range |= bit;
3273
3274         seg->fields[fld].type = field_type;
3275         seg->fields[fld].src.val = val_loc;
3276         seg->fields[fld].src.mask = mask_loc;
3277         seg->fields[fld].src.last = last_loc;
3278
3279         ICE_FLOW_SET_HDRS(seg, ice_flds_info[fld].hdr);
3280 }
3281
3282 /**
3283  * ice_flow_set_fld - specifies locations of field from entry's input buffer
3284  * @seg: packet segment the field being set belongs to
3285  * @fld: field to be set
3286  * @val_loc: if not ICE_FLOW_FLD_OFF_INVAL, location of the value to match from
3287  *           entry's input buffer
3288  * @mask_loc: if not ICE_FLOW_FLD_OFF_INVAL, location of mask value from entry's
3289  *            input buffer
3290  * @last_loc: if not ICE_FLOW_FLD_OFF_INVAL, location of last/upper value from
3291  *            entry's input buffer
3292  * @range: indicate if field being matched is to be in a range
3293  *
3294  * This function specifies the locations, in the form of byte offsets from the
3295  * start of the input buffer for a flow entry, from where the value to match,
3296  * the mask value, and upper value can be extracted. These locations are then
3297  * stored in the flow profile. When adding a flow entry associated with the
3298  * flow profile, these locations will be used to quickly extract the values and
3299  * create the content of a match entry. This function should only be used for
3300  * fixed-size data structures.
3301  */
3302 void
3303 ice_flow_set_fld(struct ice_flow_seg_info *seg, enum ice_flow_field fld,
3304                  u16 val_loc, u16 mask_loc, u16 last_loc, bool range)
3305 {
3306         enum ice_flow_fld_match_type t = range ?
3307                 ICE_FLOW_FLD_TYPE_RANGE : ICE_FLOW_FLD_TYPE_REG;
3308
3309         ice_flow_set_fld_ext(seg, fld, t, val_loc, mask_loc, last_loc);
3310 }
3311
3312 /**
3313  * ice_flow_set_fld_prefix - sets locations of prefix field from entry's buf
3314  * @seg: packet segment the field being set belongs to
3315  * @fld: field to be set
3316  * @val_loc: if not ICE_FLOW_FLD_OFF_INVAL, location of the value to match from
3317  *           entry's input buffer
3318  * @pref_loc: location of prefix value from entry's input buffer
3319  * @pref_sz: size of the location holding the prefix value
3320  *
3321  * This function specifies the locations, in the form of byte offsets from the
3322  * start of the input buffer for a flow entry, from where the value to match
3323  * and the IPv4 prefix value can be extracted. These locations are then stored
3324  * in the flow profile. When adding flow entries to the associated flow profile,
3325  * these locations can be used to quickly extract the values to create the
3326  * content of a match entry. This function should only be used for fixed-size
3327  * data structures.
3328  */
3329 void
3330 ice_flow_set_fld_prefix(struct ice_flow_seg_info *seg, enum ice_flow_field fld,
3331                         u16 val_loc, u16 pref_loc, u8 pref_sz)
3332 {
3333         /* For this type of field, the "mask" location is for the prefix value's
3334          * location and the "last" location is for the size of the location of
3335          * the prefix value.
3336          */
3337         ice_flow_set_fld_ext(seg, fld, ICE_FLOW_FLD_TYPE_PREFIX, val_loc,
3338                              pref_loc, (u16)pref_sz);
3339 }
3340
3341 /**
3342  * ice_flow_add_fld_raw - sets locations of a raw field from entry's input buf
3343  * @seg: packet segment the field being set belongs to
3344  * @off: offset of the raw field from the beginning of the segment in bytes
3345  * @len: length of the raw pattern to be matched
3346  * @val_loc: location of the value to match from entry's input buffer
3347  * @mask_loc: location of mask value from entry's input buffer
3348  *
3349  * This function specifies the offset of the raw field to be match from the
3350  * beginning of the specified packet segment, and the locations, in the form of
3351  * byte offsets from the start of the input buffer for a flow entry, from where
3352  * the value to match and the mask value to be extracted. These locations are
3353  * then stored in the flow profile. When adding flow entries to the associated
3354  * flow profile, these locations can be used to quickly extract the values to
3355  * create the content of a match entry. This function should only be used for
3356  * fixed-size data structures.
3357  */
3358 void
3359 ice_flow_add_fld_raw(struct ice_flow_seg_info *seg, u16 off, u8 len,
3360                      u16 val_loc, u16 mask_loc)
3361 {
3362         if (seg->raws_cnt < ICE_FLOW_SEG_RAW_FLD_MAX) {
3363                 seg->raws[seg->raws_cnt].off = off;
3364                 seg->raws[seg->raws_cnt].info.type = ICE_FLOW_FLD_TYPE_SIZE;
3365                 seg->raws[seg->raws_cnt].info.src.val = val_loc;
3366                 seg->raws[seg->raws_cnt].info.src.mask = mask_loc;
3367                 /* The "last" field is used to store the length of the field */
3368                 seg->raws[seg->raws_cnt].info.src.last = len;
3369         }
3370
3371         /* Overflows of "raws" will be handled as an error condition later in
3372          * the flow when this information is processed.
3373          */
3374         seg->raws_cnt++;
3375 }
3376
3377 /**
3378  * ice_flow_rem_vsi_prof - remove vsi from flow profile
3379  * @hw: pointer to the hardware structure
3380  * @blk: classification stage
3381  * @vsi_handle: software VSI handle
3382  * @prof_id: unique ID to identify this flow profile
3383  *
3384  * This function removes the flow entries associated to the input
3385  * vsi handle and disassociates the vsi from the flow profile.
3386  */
3387 enum ice_status ice_flow_rem_vsi_prof(struct ice_hw *hw, enum ice_block blk, u16 vsi_handle,
3388                                       u64 prof_id)
3389 {
3390         struct ice_flow_prof *prof = NULL;
3391         enum ice_status status = ICE_SUCCESS;
3392
3393         if (blk >= ICE_BLK_COUNT || !ice_is_vsi_valid(hw, vsi_handle))
3394                 return ICE_ERR_PARAM;
3395
3396         /* find flow profile pointer with input package block and profile id */
3397         prof = ice_flow_find_prof_id(hw, ICE_BLK_FD, prof_id);
3398         if (!prof) {
3399                 ice_debug(hw, ICE_DBG_PKG,
3400                           "Cannot find flow profile id=%" PRIu64 "\n", prof_id);
3401                 return ICE_ERR_DOES_NOT_EXIST;
3402         }
3403
3404         /* Remove all remaining flow entries before removing the flow profile */
3405         if (!LIST_EMPTY(&prof->entries)) {
3406                 struct ice_flow_entry *e, *t;
3407
3408                 ice_acquire_lock(&prof->entries_lock);
3409                 LIST_FOR_EACH_ENTRY_SAFE(e, t, &prof->entries, ice_flow_entry,
3410                                          l_entry) {
3411                         if (e->vsi_handle != vsi_handle)
3412                                 continue;
3413
3414                         status = ice_flow_rem_entry_sync(hw, blk, e);
3415                         if (status)
3416                                 break;
3417                 }
3418                 ice_release_lock(&prof->entries_lock);
3419         }
3420         if (status)
3421                 return status;
3422
3423         /* disassociate the flow profile from sw vsi handle */
3424         status = ice_flow_disassoc_prof(hw, blk, prof, vsi_handle);
3425         if (status)
3426                 ice_debug(hw, ICE_DBG_PKG,
3427                           "ice_flow_disassoc_prof() failed with status=%d\n",
3428                           status);
3429         return status;
3430 }
3431
3432 #define ICE_FLOW_RSS_SEG_HDR_L2_MASKS \
3433 (ICE_FLOW_SEG_HDR_ETH | ICE_FLOW_SEG_HDR_ETH_NON_IP | ICE_FLOW_SEG_HDR_VLAN)
3434
3435 #define ICE_FLOW_RSS_SEG_HDR_L3_MASKS \
3436         (ICE_FLOW_SEG_HDR_IPV4 | ICE_FLOW_SEG_HDR_IPV6)
3437
3438 #define ICE_FLOW_RSS_SEG_HDR_L4_MASKS \
3439         (ICE_FLOW_SEG_HDR_TCP | ICE_FLOW_SEG_HDR_UDP | ICE_FLOW_SEG_HDR_SCTP)
3440
3441 #define ICE_FLOW_RSS_SEG_HDR_VAL_MASKS \
3442         (ICE_FLOW_RSS_SEG_HDR_L2_MASKS | \
3443          ICE_FLOW_RSS_SEG_HDR_L3_MASKS | \
3444          ICE_FLOW_RSS_SEG_HDR_L4_MASKS)
3445
3446 /**
3447  * ice_flow_set_rss_seg_info - setup packet segments for RSS
3448  * @segs: pointer to the flow field segment(s)
3449  * @seg_cnt: segment count
3450  * @cfg: configure parameters
3451  *
3452  * Helper function to extract fields from hash bitmap and use flow
3453  * header value to set flow field segment for further use in flow
3454  * profile entry or removal.
3455  */
3456 static enum ice_status
3457 ice_flow_set_rss_seg_info(struct ice_flow_seg_info *segs, u8 seg_cnt,
3458                           const struct ice_rss_hash_cfg *cfg)
3459 {
3460         struct ice_flow_seg_info *seg;
3461         u64 val;
3462         u8 i;
3463
3464         /* set inner most segment */
3465         seg = &segs[seg_cnt - 1];
3466
3467         ice_for_each_set_bit(i, (const ice_bitmap_t *)&cfg->hash_flds,
3468                              ICE_FLOW_FIELD_IDX_MAX)
3469                 ice_flow_set_fld(seg, (enum ice_flow_field)i,
3470                                  ICE_FLOW_FLD_OFF_INVAL, ICE_FLOW_FLD_OFF_INVAL,
3471                                  ICE_FLOW_FLD_OFF_INVAL, false);
3472
3473         ICE_FLOW_SET_HDRS(seg, cfg->addl_hdrs);
3474
3475         /* set outer most header */
3476         if (cfg->hdr_type == ICE_RSS_INNER_HEADERS_W_OUTER_IPV4)
3477                 segs[ICE_RSS_OUTER_HEADERS].hdrs |= ICE_FLOW_SEG_HDR_IPV4 |
3478                                                    ICE_FLOW_SEG_HDR_IPV_FRAG |
3479                                                    ICE_FLOW_SEG_HDR_IPV_OTHER;
3480         else if (cfg->hdr_type == ICE_RSS_INNER_HEADERS_W_OUTER_IPV6)
3481                 segs[ICE_RSS_OUTER_HEADERS].hdrs |= ICE_FLOW_SEG_HDR_IPV6 |
3482                                                    ICE_FLOW_SEG_HDR_IPV_FRAG |
3483                                                    ICE_FLOW_SEG_HDR_IPV_OTHER;
3484
3485         if (seg->hdrs & ~ICE_FLOW_RSS_SEG_HDR_VAL_MASKS &
3486             ~ICE_FLOW_RSS_HDRS_INNER_MASK & ~ICE_FLOW_SEG_HDR_IPV_OTHER &
3487             ~ICE_FLOW_SEG_HDR_IPV_FRAG)
3488                 return ICE_ERR_PARAM;
3489
3490         val = (u64)(seg->hdrs & ICE_FLOW_RSS_SEG_HDR_L3_MASKS);
3491         if (val && !ice_is_pow2(val))
3492                 return ICE_ERR_CFG;
3493
3494         val = (u64)(seg->hdrs & ICE_FLOW_RSS_SEG_HDR_L4_MASKS);
3495         if (val && !ice_is_pow2(val))
3496                 return ICE_ERR_CFG;
3497
3498         return ICE_SUCCESS;
3499 }
3500
3501 /**
3502  * ice_rem_vsi_rss_list - remove VSI from RSS list
3503  * @hw: pointer to the hardware structure
3504  * @vsi_handle: software VSI handle
3505  *
3506  * Remove the VSI from all RSS configurations in the list.
3507  */
3508 void ice_rem_vsi_rss_list(struct ice_hw *hw, u16 vsi_handle)
3509 {
3510         struct ice_rss_cfg *r, *tmp;
3511
3512         if (LIST_EMPTY(&hw->rss_list_head))
3513                 return;
3514
3515         ice_acquire_lock(&hw->rss_locks);
3516         LIST_FOR_EACH_ENTRY_SAFE(r, tmp, &hw->rss_list_head,
3517                                  ice_rss_cfg, l_entry)
3518                 if (ice_test_and_clear_bit(vsi_handle, r->vsis))
3519                         if (!ice_is_any_bit_set(r->vsis, ICE_MAX_VSI)) {
3520                                 LIST_DEL(&r->l_entry);
3521                                 ice_free(hw, r);
3522                         }
3523         ice_release_lock(&hw->rss_locks);
3524 }
3525
3526 /**
3527  * ice_rem_vsi_rss_cfg - remove RSS configurations associated with VSI
3528  * @hw: pointer to the hardware structure
3529  * @vsi_handle: software VSI handle
3530  *
3531  * This function will iterate through all flow profiles and disassociate
3532  * the VSI from that profile. If the flow profile has no VSIs it will
3533  * be removed.
3534  */
3535 enum ice_status ice_rem_vsi_rss_cfg(struct ice_hw *hw, u16 vsi_handle)
3536 {
3537         const enum ice_block blk = ICE_BLK_RSS;
3538         struct ice_flow_prof *p, *t;
3539         enum ice_status status = ICE_SUCCESS;
3540
3541         if (!ice_is_vsi_valid(hw, vsi_handle))
3542                 return ICE_ERR_PARAM;
3543
3544         if (LIST_EMPTY(&hw->fl_profs[blk]))
3545                 return ICE_SUCCESS;
3546
3547         ice_acquire_lock(&hw->rss_locks);
3548         LIST_FOR_EACH_ENTRY_SAFE(p, t, &hw->fl_profs[blk], ice_flow_prof,
3549                                  l_entry)
3550                 if (ice_is_bit_set(p->vsis, vsi_handle)) {
3551                         status = ice_flow_disassoc_prof(hw, blk, p, vsi_handle);
3552                         if (status)
3553                                 break;
3554
3555                         if (!ice_is_any_bit_set(p->vsis, ICE_MAX_VSI)) {
3556                                 status = ice_flow_rem_prof(hw, blk, p->id);
3557                                 if (status)
3558                                         break;
3559                         }
3560                 }
3561         ice_release_lock(&hw->rss_locks);
3562
3563         return status;
3564 }
3565
3566 /**
3567  * ice_get_rss_hdr_type - get a RSS profile's header type
3568  * @prof: RSS flow profile
3569  */
3570 static enum ice_rss_cfg_hdr_type
3571 ice_get_rss_hdr_type(struct ice_flow_prof *prof)
3572 {
3573         enum ice_rss_cfg_hdr_type hdr_type = ICE_RSS_ANY_HEADERS;
3574
3575         if (prof->segs_cnt == ICE_FLOW_SEG_SINGLE) {
3576                 hdr_type = ICE_RSS_OUTER_HEADERS;
3577         } else if (prof->segs_cnt == ICE_FLOW_SEG_MAX) {
3578                 if (prof->segs[ICE_RSS_OUTER_HEADERS].hdrs == ICE_FLOW_SEG_HDR_NONE)
3579                         hdr_type = ICE_RSS_INNER_HEADERS;
3580                 if (prof->segs[ICE_RSS_OUTER_HEADERS].hdrs & ICE_FLOW_SEG_HDR_IPV4)
3581                         hdr_type = ICE_RSS_INNER_HEADERS_W_OUTER_IPV4;
3582                 if (prof->segs[ICE_RSS_OUTER_HEADERS].hdrs & ICE_FLOW_SEG_HDR_IPV6)
3583                         hdr_type = ICE_RSS_INNER_HEADERS_W_OUTER_IPV6;
3584         }
3585
3586         return hdr_type;
3587 }
3588
3589 /**
3590  * ice_rem_rss_list - remove RSS configuration from list
3591  * @hw: pointer to the hardware structure
3592  * @vsi_handle: software VSI handle
3593  * @prof: pointer to flow profile
3594  *
3595  * Assumption: lock has already been acquired for RSS list
3596  */
3597 static void
3598 ice_rem_rss_list(struct ice_hw *hw, u16 vsi_handle, struct ice_flow_prof *prof)
3599 {
3600         enum ice_rss_cfg_hdr_type hdr_type;
3601         struct ice_rss_cfg *r, *tmp;
3602
3603         /* Search for RSS hash fields associated to the VSI that match the
3604          * hash configurations associated to the flow profile. If found
3605          * remove from the RSS entry list of the VSI context and delete entry.
3606          */
3607         hdr_type = ice_get_rss_hdr_type(prof);
3608         LIST_FOR_EACH_ENTRY_SAFE(r, tmp, &hw->rss_list_head,
3609                                  ice_rss_cfg, l_entry)
3610                 if (r->hash.hash_flds == prof->segs[prof->segs_cnt - 1].match &&
3611                     r->hash.addl_hdrs == prof->segs[prof->segs_cnt - 1].hdrs &&
3612                     r->hash.hdr_type == hdr_type) {
3613                         ice_clear_bit(vsi_handle, r->vsis);
3614                         if (!ice_is_any_bit_set(r->vsis, ICE_MAX_VSI)) {
3615                                 LIST_DEL(&r->l_entry);
3616                                 ice_free(hw, r);
3617                         }
3618                         return;
3619                 }
3620 }
3621
3622 /**
3623  * ice_add_rss_list - add RSS configuration to list
3624  * @hw: pointer to the hardware structure
3625  * @vsi_handle: software VSI handle
3626  * @prof: pointer to flow profile
3627  *
3628  * Assumption: lock has already been acquired for RSS list
3629  */
3630 static enum ice_status
3631 ice_add_rss_list(struct ice_hw *hw, u16 vsi_handle, struct ice_flow_prof *prof)
3632 {
3633         enum ice_rss_cfg_hdr_type hdr_type;
3634         struct ice_rss_cfg *r, *rss_cfg;
3635
3636         hdr_type = ice_get_rss_hdr_type(prof);
3637         LIST_FOR_EACH_ENTRY(r, &hw->rss_list_head,
3638                             ice_rss_cfg, l_entry)
3639                 if (r->hash.hash_flds == prof->segs[prof->segs_cnt - 1].match &&
3640                     r->hash.addl_hdrs == prof->segs[prof->segs_cnt - 1].hdrs &&
3641                     r->hash.hdr_type == hdr_type) {
3642                         ice_set_bit(vsi_handle, r->vsis);
3643                         return ICE_SUCCESS;
3644                 }
3645
3646         rss_cfg = (struct ice_rss_cfg *)ice_malloc(hw, sizeof(*rss_cfg));
3647         if (!rss_cfg)
3648                 return ICE_ERR_NO_MEMORY;
3649
3650         rss_cfg->hash.hash_flds = prof->segs[prof->segs_cnt - 1].match;
3651         rss_cfg->hash.addl_hdrs = prof->segs[prof->segs_cnt - 1].hdrs;
3652         rss_cfg->hash.hdr_type = hdr_type;
3653         rss_cfg->hash.symm = prof->cfg.symm;
3654         ice_set_bit(vsi_handle, rss_cfg->vsis);
3655
3656         LIST_ADD_TAIL(&rss_cfg->l_entry, &hw->rss_list_head);
3657
3658         return ICE_SUCCESS;
3659 }
3660
3661 #define ICE_FLOW_PROF_HASH_S    0
3662 #define ICE_FLOW_PROF_HASH_M    (0xFFFFFFFFULL << ICE_FLOW_PROF_HASH_S)
3663 #define ICE_FLOW_PROF_HDR_S     32
3664 #define ICE_FLOW_PROF_HDR_M     (0x3FFFFFFFULL << ICE_FLOW_PROF_HDR_S)
3665 #define ICE_FLOW_PROF_ENCAP_S   62
3666 #define ICE_FLOW_PROF_ENCAP_M   (0x3ULL << ICE_FLOW_PROF_ENCAP_S)
3667
3668 /* Flow profile ID format:
3669  * [0:31] - Packet match fields
3670  * [32:61] - Protocol header
3671  * [62:63] - Encapsulation flag:
3672  *           0 if non-tunneled
3673  *           1 if tunneled
3674  *           2 for tunneled with outer ipv4
3675  *           3 for tunneled with outer ipv6
3676  */
3677 #define ICE_FLOW_GEN_PROFID(hash, hdr, encap) \
3678         ((u64)(((u64)(hash) & ICE_FLOW_PROF_HASH_M) | \
3679                (((u64)(hdr) << ICE_FLOW_PROF_HDR_S) & ICE_FLOW_PROF_HDR_M) | \
3680                (((u64)(encap) << ICE_FLOW_PROF_ENCAP_S) & ICE_FLOW_PROF_ENCAP_M)))
3681
3682 static void
3683 ice_rss_config_xor_word(struct ice_hw *hw, u8 prof_id, u8 src, u8 dst)
3684 {
3685         u32 s = ((src % 4) << 3); /* byte shift */
3686         u32 v = dst | 0x80; /* value to program */
3687         u8 i = src / 4; /* register index */
3688         u32 reg;
3689
3690         reg = rd32(hw, GLQF_HSYMM(prof_id, i));
3691         reg = (reg & ~(0xff << s)) | (v << s);
3692         wr32(hw, GLQF_HSYMM(prof_id, i), reg);
3693 }
3694
3695 static void
3696 ice_rss_config_xor(struct ice_hw *hw, u8 prof_id, u8 src, u8 dst, u8 len)
3697 {
3698         int fv_last_word =
3699                 ICE_FLOW_SW_FIELD_VECTOR_MAX / ICE_FLOW_FV_EXTRACT_SZ - 1;
3700         int i;
3701
3702         for (i = 0; i < len; i++) {
3703                 ice_rss_config_xor_word(hw, prof_id,
3704                                         /* Yes, field vector in GLQF_HSYMM and
3705                                          * GLQF_HINSET is inversed!
3706                                          */
3707                                         fv_last_word - (src + i),
3708                                         fv_last_word - (dst + i));
3709                 ice_rss_config_xor_word(hw, prof_id,
3710                                         fv_last_word - (dst + i),
3711                                         fv_last_word - (src + i));
3712         }
3713 }
3714
3715 static void
3716 ice_rss_update_symm(struct ice_hw *hw,
3717                     struct ice_flow_prof *prof)
3718 {
3719         struct ice_prof_map *map;
3720         u8 prof_id, m;
3721
3722         ice_acquire_lock(&hw->blk[ICE_BLK_RSS].es.prof_map_lock);
3723         map = ice_search_prof_id(hw, ICE_BLK_RSS, prof->id);
3724         if (map)
3725                 prof_id = map->prof_id;
3726         ice_release_lock(&hw->blk[ICE_BLK_RSS].es.prof_map_lock);
3727         if (!map)
3728                 return;
3729         /* clear to default */
3730         for (m = 0; m < 6; m++)
3731                 wr32(hw, GLQF_HSYMM(prof_id, m), 0);
3732         if (prof->cfg.symm) {
3733                 struct ice_flow_seg_info *seg =
3734                         &prof->segs[prof->segs_cnt - 1];
3735
3736                 struct ice_flow_seg_xtrct *ipv4_src =
3737                         &seg->fields[ICE_FLOW_FIELD_IDX_IPV4_SA].xtrct;
3738                 struct ice_flow_seg_xtrct *ipv4_dst =
3739                         &seg->fields[ICE_FLOW_FIELD_IDX_IPV4_DA].xtrct;
3740                 struct ice_flow_seg_xtrct *ipv6_src =
3741                         &seg->fields[ICE_FLOW_FIELD_IDX_IPV6_SA].xtrct;
3742                 struct ice_flow_seg_xtrct *ipv6_dst =
3743                         &seg->fields[ICE_FLOW_FIELD_IDX_IPV6_DA].xtrct;
3744
3745                 struct ice_flow_seg_xtrct *tcp_src =
3746                         &seg->fields[ICE_FLOW_FIELD_IDX_TCP_SRC_PORT].xtrct;
3747                 struct ice_flow_seg_xtrct *tcp_dst =
3748                         &seg->fields[ICE_FLOW_FIELD_IDX_TCP_DST_PORT].xtrct;
3749
3750                 struct ice_flow_seg_xtrct *udp_src =
3751                         &seg->fields[ICE_FLOW_FIELD_IDX_UDP_SRC_PORT].xtrct;
3752                 struct ice_flow_seg_xtrct *udp_dst =
3753                         &seg->fields[ICE_FLOW_FIELD_IDX_UDP_DST_PORT].xtrct;
3754
3755                 struct ice_flow_seg_xtrct *sctp_src =
3756                         &seg->fields[ICE_FLOW_FIELD_IDX_SCTP_SRC_PORT].xtrct;
3757                 struct ice_flow_seg_xtrct *sctp_dst =
3758                         &seg->fields[ICE_FLOW_FIELD_IDX_SCTP_DST_PORT].xtrct;
3759
3760                 /* xor IPv4 */
3761                 if (ipv4_src->prot_id != 0 && ipv4_dst->prot_id != 0)
3762                         ice_rss_config_xor(hw, prof_id,
3763                                            ipv4_src->idx, ipv4_dst->idx, 2);
3764
3765                 /* xor IPv6 */
3766                 if (ipv6_src->prot_id != 0 && ipv6_dst->prot_id != 0)
3767                         ice_rss_config_xor(hw, prof_id,
3768                                            ipv6_src->idx, ipv6_dst->idx, 8);
3769
3770                 /* xor TCP */
3771                 if (tcp_src->prot_id != 0 && tcp_dst->prot_id != 0)
3772                         ice_rss_config_xor(hw, prof_id,
3773                                            tcp_src->idx, tcp_dst->idx, 1);
3774
3775                 /* xor UDP */
3776                 if (udp_src->prot_id != 0 && udp_dst->prot_id != 0)
3777                         ice_rss_config_xor(hw, prof_id,
3778                                            udp_src->idx, udp_dst->idx, 1);
3779
3780                 /* xor SCTP */
3781                 if (sctp_src->prot_id != 0 && sctp_dst->prot_id != 0)
3782                         ice_rss_config_xor(hw, prof_id,
3783                                            sctp_src->idx, sctp_dst->idx, 1);
3784         }
3785 }
3786
3787 /**
3788  * ice_add_rss_cfg_sync - add an RSS configuration
3789  * @hw: pointer to the hardware structure
3790  * @vsi_handle: software VSI handle
3791  * @cfg: configure parameters
3792  *
3793  * Assumption: lock has already been acquired for RSS list
3794  */
3795 static enum ice_status
3796 ice_add_rss_cfg_sync(struct ice_hw *hw, u16 vsi_handle,
3797                      const struct ice_rss_hash_cfg *cfg)
3798 {
3799         const enum ice_block blk = ICE_BLK_RSS;
3800         struct ice_flow_prof *prof = NULL;
3801         struct ice_flow_seg_info *segs;
3802         enum ice_status status;
3803         u8 segs_cnt;
3804
3805         segs_cnt = (cfg->hdr_type == ICE_RSS_OUTER_HEADERS) ?
3806                         ICE_FLOW_SEG_SINGLE : ICE_FLOW_SEG_MAX;
3807
3808         segs = (struct ice_flow_seg_info *)ice_calloc(hw, segs_cnt,
3809                                                       sizeof(*segs));
3810         if (!segs)
3811                 return ICE_ERR_NO_MEMORY;
3812
3813         /* Construct the packet segment info from the hashed fields */
3814         status = ice_flow_set_rss_seg_info(segs, segs_cnt, cfg);
3815         if (status)
3816                 goto exit;
3817
3818         /* Search for a flow profile that has matching headers, hash fields
3819          * and has the input VSI associated to it. If found, no further
3820          * operations required and exit.
3821          */
3822         prof = ice_flow_find_prof_conds(hw, blk, ICE_FLOW_RX, segs, segs_cnt,
3823                                         vsi_handle,
3824                                         ICE_FLOW_FIND_PROF_CHK_FLDS |
3825                                         ICE_FLOW_FIND_PROF_CHK_VSI);
3826         if (prof) {
3827                 if (prof->cfg.symm == cfg->symm)
3828                         goto exit;
3829                 prof->cfg.symm = cfg->symm;
3830                 goto update_symm;
3831         }
3832
3833         /* Check if a flow profile exists with the same protocol headers and
3834          * associated with the input VSI. If so disassociate the VSI from
3835          * this profile. The VSI will be added to a new profile created with
3836          * the protocol header and new hash field configuration.
3837          */
3838         prof = ice_flow_find_prof_conds(hw, blk, ICE_FLOW_RX, segs, segs_cnt,
3839                                         vsi_handle, ICE_FLOW_FIND_PROF_CHK_VSI);
3840         if (prof) {
3841                 status = ice_flow_disassoc_prof(hw, blk, prof, vsi_handle);
3842                 if (!status)
3843                         ice_rem_rss_list(hw, vsi_handle, prof);
3844                 else
3845                         goto exit;
3846
3847                 /* Remove profile if it has no VSIs associated */
3848                 if (!ice_is_any_bit_set(prof->vsis, ICE_MAX_VSI)) {
3849                         status = ice_flow_rem_prof(hw, blk, prof->id);
3850                         if (status)
3851                                 goto exit;
3852                 }
3853         }
3854
3855         /* Search for a profile that has same match fields only. If this
3856          * exists then associate the VSI to this profile.
3857          */
3858         prof = ice_flow_find_prof_conds(hw, blk, ICE_FLOW_RX, segs, segs_cnt,
3859                                         vsi_handle,
3860                                         ICE_FLOW_FIND_PROF_CHK_FLDS);
3861         if (prof) {
3862                 if (prof->cfg.symm == cfg->symm) {
3863                         status = ice_flow_assoc_prof(hw, blk, prof,
3864                                                      vsi_handle);
3865                         if (!status)
3866                                 status = ice_add_rss_list(hw, vsi_handle,
3867                                                           prof);
3868                 } else {
3869                         /* if a profile exist but with different symmetric
3870                          * requirement, just return error.
3871                          */
3872                         status = ICE_ERR_NOT_SUPPORTED;
3873                 }
3874                 goto exit;
3875         }
3876
3877         /* Create a new flow profile with generated profile and packet
3878          * segment information.
3879          */
3880         status = ice_flow_add_prof(hw, blk, ICE_FLOW_RX,
3881                                    ICE_FLOW_GEN_PROFID(cfg->hash_flds,
3882                                                        segs[segs_cnt - 1].hdrs,
3883                                                        cfg->hdr_type),
3884                                    segs, segs_cnt, NULL, 0, &prof);
3885         if (status)
3886                 goto exit;
3887
3888         status = ice_flow_assoc_prof(hw, blk, prof, vsi_handle);
3889         /* If association to a new flow profile failed then this profile can
3890          * be removed.
3891          */
3892         if (status) {
3893                 ice_flow_rem_prof(hw, blk, prof->id);
3894                 goto exit;
3895         }
3896
3897         status = ice_add_rss_list(hw, vsi_handle, prof);
3898
3899         prof->cfg.symm = cfg->symm;
3900 update_symm:
3901         ice_rss_update_symm(hw, prof);
3902
3903 exit:
3904         ice_free(hw, segs);
3905         return status;
3906 }
3907
3908 /**
3909  * ice_add_rss_cfg - add an RSS configuration with specified hashed fields
3910  * @hw: pointer to the hardware structure
3911  * @vsi_handle: software VSI handle
3912  * @cfg: configure parameters
3913  *
3914  * This function will generate a flow profile based on fields associated with
3915  * the input fields to hash on, the flow type and use the VSI number to add
3916  * a flow entry to the profile.
3917  */
3918 enum ice_status
3919 ice_add_rss_cfg(struct ice_hw *hw, u16 vsi_handle,
3920                 const struct ice_rss_hash_cfg *cfg)
3921 {
3922         struct ice_rss_hash_cfg local_cfg;
3923         enum ice_status status;
3924
3925         if (!ice_is_vsi_valid(hw, vsi_handle) ||
3926             !cfg || cfg->hdr_type > ICE_RSS_ANY_HEADERS ||
3927             cfg->hash_flds == ICE_HASH_INVALID)
3928                 return ICE_ERR_PARAM;
3929
3930         local_cfg = *cfg;
3931         if (cfg->hdr_type < ICE_RSS_ANY_HEADERS) {
3932                 ice_acquire_lock(&hw->rss_locks);
3933                 status = ice_add_rss_cfg_sync(hw, vsi_handle, &local_cfg);
3934                 ice_release_lock(&hw->rss_locks);
3935         } else {
3936                 ice_acquire_lock(&hw->rss_locks);
3937                 local_cfg.hdr_type = ICE_RSS_OUTER_HEADERS;
3938                 status = ice_add_rss_cfg_sync(hw, vsi_handle, &local_cfg);
3939                 if (!status) {
3940                         local_cfg.hdr_type = ICE_RSS_INNER_HEADERS;
3941                         status = ice_add_rss_cfg_sync(hw, vsi_handle,
3942                                                       &local_cfg);
3943                 }
3944                 ice_release_lock(&hw->rss_locks);
3945         }
3946
3947         return status;
3948 }
3949
3950 /**
3951  * ice_rem_rss_cfg_sync - remove an existing RSS configuration
3952  * @hw: pointer to the hardware structure
3953  * @vsi_handle: software VSI handle
3954  * @cfg: configure parameters
3955  *
3956  * Assumption: lock has already been acquired for RSS list
3957  */
3958 static enum ice_status
3959 ice_rem_rss_cfg_sync(struct ice_hw *hw, u16 vsi_handle,
3960                      const struct ice_rss_hash_cfg *cfg)
3961 {
3962         const enum ice_block blk = ICE_BLK_RSS;
3963         struct ice_flow_seg_info *segs;
3964         struct ice_flow_prof *prof;
3965         enum ice_status status;
3966         u8 segs_cnt;
3967
3968         segs_cnt = (cfg->hdr_type == ICE_RSS_OUTER_HEADERS) ?
3969                         ICE_FLOW_SEG_SINGLE : ICE_FLOW_SEG_MAX;
3970         segs = (struct ice_flow_seg_info *)ice_calloc(hw, segs_cnt,
3971                                                       sizeof(*segs));
3972         if (!segs)
3973                 return ICE_ERR_NO_MEMORY;
3974
3975         /* Construct the packet segment info from the hashed fields */
3976         status = ice_flow_set_rss_seg_info(segs, segs_cnt, cfg);
3977         if (status)
3978                 goto out;
3979
3980         prof = ice_flow_find_prof_conds(hw, blk, ICE_FLOW_RX, segs, segs_cnt,
3981                                         vsi_handle,
3982                                         ICE_FLOW_FIND_PROF_CHK_FLDS);
3983         if (!prof) {
3984                 status = ICE_ERR_DOES_NOT_EXIST;
3985                 goto out;
3986         }
3987
3988         status = ice_flow_disassoc_prof(hw, blk, prof, vsi_handle);
3989         if (status)
3990                 goto out;
3991
3992         /* Remove RSS configuration from VSI context before deleting
3993          * the flow profile.
3994          */
3995         ice_rem_rss_list(hw, vsi_handle, prof);
3996
3997         if (!ice_is_any_bit_set(prof->vsis, ICE_MAX_VSI))
3998                 status = ice_flow_rem_prof(hw, blk, prof->id);
3999
4000 out:
4001         ice_free(hw, segs);
4002         return status;
4003 }
4004
4005 /**
4006  * ice_rem_rss_cfg - remove an existing RSS config with matching hashed fields
4007  * @hw: pointer to the hardware structure
4008  * @vsi_handle: software VSI handle
4009  * @cfg: configure parameters
4010  *
4011  * This function will lookup the flow profile based on the input
4012  * hash field bitmap, iterate through the profile entry list of
4013  * that profile and find entry associated with input VSI to be
4014  * removed. Calls are made to underlying flow apis which will in
4015  * turn build or update buffers for RSS XLT1 section.
4016  */
4017 enum ice_status
4018 ice_rem_rss_cfg(struct ice_hw *hw, u16 vsi_handle,
4019                 const struct ice_rss_hash_cfg *cfg)
4020 {
4021         struct ice_rss_hash_cfg local_cfg;
4022         enum ice_status status;
4023
4024         if (!ice_is_vsi_valid(hw, vsi_handle) ||
4025             !cfg || cfg->hdr_type > ICE_RSS_ANY_HEADERS ||
4026             cfg->hash_flds == ICE_HASH_INVALID)
4027                 return ICE_ERR_PARAM;
4028
4029         ice_acquire_lock(&hw->rss_locks);
4030         local_cfg = *cfg;
4031         if (cfg->hdr_type < ICE_RSS_ANY_HEADERS) {
4032                 status = ice_rem_rss_cfg_sync(hw, vsi_handle, &local_cfg);
4033         } else {
4034                 local_cfg.hdr_type = ICE_RSS_OUTER_HEADERS;
4035                 status = ice_rem_rss_cfg_sync(hw, vsi_handle, &local_cfg);
4036
4037                 if (!status) {
4038                         local_cfg.hdr_type = ICE_RSS_INNER_HEADERS;
4039                         status = ice_rem_rss_cfg_sync(hw, vsi_handle,
4040                                                       &local_cfg);
4041                 }
4042         }
4043         ice_release_lock(&hw->rss_locks);
4044
4045         return status;
4046 }
4047
4048 /**
4049  * ice_replay_rss_cfg - replay RSS configurations associated with VSI
4050  * @hw: pointer to the hardware structure
4051  * @vsi_handle: software VSI handle
4052  */
4053 enum ice_status ice_replay_rss_cfg(struct ice_hw *hw, u16 vsi_handle)
4054 {
4055         enum ice_status status = ICE_SUCCESS;
4056         struct ice_rss_cfg *r;
4057
4058         if (!ice_is_vsi_valid(hw, vsi_handle))
4059                 return ICE_ERR_PARAM;
4060
4061         ice_acquire_lock(&hw->rss_locks);
4062         LIST_FOR_EACH_ENTRY(r, &hw->rss_list_head,
4063                             ice_rss_cfg, l_entry) {
4064                 if (ice_is_bit_set(r->vsis, vsi_handle)) {
4065                         status = ice_add_rss_cfg_sync(hw, vsi_handle, &r->hash);
4066                         if (status)
4067                                 break;
4068                 }
4069         }
4070         ice_release_lock(&hw->rss_locks);
4071
4072         return status;
4073 }
4074
4075 /**
4076  * ice_get_rss_cfg - returns hashed fields for the given header types
4077  * @hw: pointer to the hardware structure
4078  * @vsi_handle: software VSI handle
4079  * @hdrs: protocol header type
4080  *
4081  * This function will return the match fields of the first instance of flow
4082  * profile having the given header types and containing input VSI
4083  */
4084 u64 ice_get_rss_cfg(struct ice_hw *hw, u16 vsi_handle, u32 hdrs)
4085 {
4086         u64 rss_hash = ICE_HASH_INVALID;
4087         struct ice_rss_cfg *r;
4088
4089         /* verify if the protocol header is non zero and VSI is valid */
4090         if (hdrs == ICE_FLOW_SEG_HDR_NONE || !ice_is_vsi_valid(hw, vsi_handle))
4091                 return ICE_HASH_INVALID;
4092
4093         ice_acquire_lock(&hw->rss_locks);
4094         LIST_FOR_EACH_ENTRY(r, &hw->rss_list_head,
4095                             ice_rss_cfg, l_entry)
4096                 if (ice_is_bit_set(r->vsis, vsi_handle) &&
4097                     r->hash.addl_hdrs == hdrs) {
4098                         rss_hash = r->hash.hash_flds;
4099                         break;
4100                 }
4101         ice_release_lock(&hw->rss_locks);
4102
4103         return rss_hash;
4104 }