net/ice/base: support RSS for IPv4/L4 checksum
[dpdk.git] / drivers / net / ice / base / ice_flow.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2001-2021 Intel Corporation
3  */
4
5 #include "ice_common.h"
6 #include "ice_flow.h"
7
8 /* Size of known protocol header fields */
9 #define ICE_FLOW_FLD_SZ_ETH_TYPE        2
10 #define ICE_FLOW_FLD_SZ_VLAN            2
11 #define ICE_FLOW_FLD_SZ_IPV4_ADDR       4
12 #define ICE_FLOW_FLD_SZ_IPV6_ADDR       16
13 #define ICE_FLOW_FLD_SZ_IPV6_PRE32_ADDR 4
14 #define ICE_FLOW_FLD_SZ_IPV6_PRE48_ADDR 6
15 #define ICE_FLOW_FLD_SZ_IPV6_PRE64_ADDR 8
16 #define ICE_FLOW_FLD_SZ_IPV4_ID         2
17 #define ICE_FLOW_FLD_SZ_IPV6_ID         4
18 #define ICE_FLOW_FLD_SZ_IP_CHKSUM       2
19 #define ICE_FLOW_FLD_SZ_TCP_CHKSUM      2
20 #define ICE_FLOW_FLD_SZ_UDP_CHKSUM      2
21 #define ICE_FLOW_FLD_SZ_SCTP_CHKSUM     4
22 #define ICE_FLOW_FLD_SZ_IP_DSCP         1
23 #define ICE_FLOW_FLD_SZ_IP_TTL          1
24 #define ICE_FLOW_FLD_SZ_IP_PROT         1
25 #define ICE_FLOW_FLD_SZ_PORT            2
26 #define ICE_FLOW_FLD_SZ_TCP_FLAGS       1
27 #define ICE_FLOW_FLD_SZ_ICMP_TYPE       1
28 #define ICE_FLOW_FLD_SZ_ICMP_CODE       1
29 #define ICE_FLOW_FLD_SZ_ARP_OPER        2
30 #define ICE_FLOW_FLD_SZ_GRE_KEYID       4
31 #define ICE_FLOW_FLD_SZ_GTP_TEID        4
32 #define ICE_FLOW_FLD_SZ_GTP_QFI         2
33 #define ICE_FLOW_FLD_SZ_PPPOE_SESS_ID   2
34 #define ICE_FLOW_FLD_SZ_PFCP_SEID 8
35 #define ICE_FLOW_FLD_SZ_L2TPV3_SESS_ID  4
36 #define ICE_FLOW_FLD_SZ_ESP_SPI 4
37 #define ICE_FLOW_FLD_SZ_AH_SPI  4
38 #define ICE_FLOW_FLD_SZ_NAT_T_ESP_SPI   4
39 #define ICE_FLOW_FLD_SZ_VXLAN_VNI       4
40 #define ICE_FLOW_FLD_SZ_ECPRI_TP0_PC_ID 2
41
42 /* Describe properties of a protocol header field */
43 struct ice_flow_field_info {
44         enum ice_flow_seg_hdr hdr;
45         s16 off;        /* Offset from start of a protocol header, in bits */
46         u16 size;       /* Size of fields in bits */
47         u16 mask;       /* 16-bit mask for field */
48 };
49
50 #define ICE_FLOW_FLD_INFO(_hdr, _offset_bytes, _size_bytes) { \
51         .hdr = _hdr, \
52         .off = (_offset_bytes) * BITS_PER_BYTE, \
53         .size = (_size_bytes) * BITS_PER_BYTE, \
54         .mask = 0, \
55 }
56
57 #define ICE_FLOW_FLD_INFO_MSK(_hdr, _offset_bytes, _size_bytes, _mask) { \
58         .hdr = _hdr, \
59         .off = (_offset_bytes) * BITS_PER_BYTE, \
60         .size = (_size_bytes) * BITS_PER_BYTE, \
61         .mask = _mask, \
62 }
63
64 /* Table containing properties of supported protocol header fields */
65 static const
66 struct ice_flow_field_info ice_flds_info[ICE_FLOW_FIELD_IDX_MAX] = {
67         /* Ether */
68         /* ICE_FLOW_FIELD_IDX_ETH_DA */
69         ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_ETH, 0, ETH_ALEN),
70         /* ICE_FLOW_FIELD_IDX_ETH_SA */
71         ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_ETH, ETH_ALEN, ETH_ALEN),
72         /* ICE_FLOW_FIELD_IDX_S_VLAN */
73         ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_VLAN, 12, ICE_FLOW_FLD_SZ_VLAN),
74         /* ICE_FLOW_FIELD_IDX_C_VLAN */
75         ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_VLAN, 14, ICE_FLOW_FLD_SZ_VLAN),
76         /* ICE_FLOW_FIELD_IDX_ETH_TYPE */
77         ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_ETH, 0, ICE_FLOW_FLD_SZ_ETH_TYPE),
78         /* IPv4 / IPv6 */
79         /* ICE_FLOW_FIELD_IDX_IPV4_DSCP */
80         ICE_FLOW_FLD_INFO_MSK(ICE_FLOW_SEG_HDR_IPV4, 0, ICE_FLOW_FLD_SZ_IP_DSCP,
81                               0x00fc),
82         /* ICE_FLOW_FIELD_IDX_IPV6_DSCP */
83         ICE_FLOW_FLD_INFO_MSK(ICE_FLOW_SEG_HDR_IPV6, 0, ICE_FLOW_FLD_SZ_IP_DSCP,
84                               0x0ff0),
85         /* ICE_FLOW_FIELD_IDX_IPV4_TTL */
86         ICE_FLOW_FLD_INFO_MSK(ICE_FLOW_SEG_HDR_NONE, 8,
87                               ICE_FLOW_FLD_SZ_IP_TTL, 0xff00),
88         /* ICE_FLOW_FIELD_IDX_IPV4_PROT */
89         ICE_FLOW_FLD_INFO_MSK(ICE_FLOW_SEG_HDR_NONE, 8,
90                               ICE_FLOW_FLD_SZ_IP_PROT, 0x00ff),
91         /* ICE_FLOW_FIELD_IDX_IPV6_TTL */
92         ICE_FLOW_FLD_INFO_MSK(ICE_FLOW_SEG_HDR_NONE, 6,
93                               ICE_FLOW_FLD_SZ_IP_TTL, 0x00ff),
94         /* ICE_FLOW_FIELD_IDX_IPV6_PROT */
95         ICE_FLOW_FLD_INFO_MSK(ICE_FLOW_SEG_HDR_NONE, 6,
96                               ICE_FLOW_FLD_SZ_IP_PROT, 0xff00),
97         /* ICE_FLOW_FIELD_IDX_IPV4_SA */
98         ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_IPV4, 12, ICE_FLOW_FLD_SZ_IPV4_ADDR),
99         /* ICE_FLOW_FIELD_IDX_IPV4_DA */
100         ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_IPV4, 16, ICE_FLOW_FLD_SZ_IPV4_ADDR),
101         /* ICE_FLOW_FIELD_IDX_IPV6_SA */
102         ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_IPV6, 8, ICE_FLOW_FLD_SZ_IPV6_ADDR),
103         /* ICE_FLOW_FIELD_IDX_IPV6_DA */
104         ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_IPV6, 24, ICE_FLOW_FLD_SZ_IPV6_ADDR),
105         /* ICE_FLOW_FIELD_IDX_IPV4_CHKSUM */
106         ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_IPV4, 10, ICE_FLOW_FLD_SZ_IP_CHKSUM),
107         /* ICE_FLOW_FIELD_IDX_IPV4_FRAG */
108         ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_IPV_FRAG, 4,
109                           ICE_FLOW_FLD_SZ_IPV4_ID),
110         /* ICE_FLOW_FIELD_IDX_IPV6_FRAG */
111         ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_IPV_FRAG, 4,
112                           ICE_FLOW_FLD_SZ_IPV6_ID),
113         /* ICE_FLOW_FIELD_IDX_IPV6_PRE32_SA */
114         ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_IPV6, 8,
115                           ICE_FLOW_FLD_SZ_IPV6_PRE32_ADDR),
116         /* ICE_FLOW_FIELD_IDX_IPV6_PRE32_DA */
117         ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_IPV6, 24,
118                           ICE_FLOW_FLD_SZ_IPV6_PRE32_ADDR),
119         /* ICE_FLOW_FIELD_IDX_IPV6_PRE48_SA */
120         ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_IPV6, 8,
121                           ICE_FLOW_FLD_SZ_IPV6_PRE48_ADDR),
122         /* ICE_FLOW_FIELD_IDX_IPV6_PRE48_DA */
123         ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_IPV6, 24,
124                           ICE_FLOW_FLD_SZ_IPV6_PRE48_ADDR),
125         /* ICE_FLOW_FIELD_IDX_IPV6_PRE64_SA */
126         ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_IPV6, 8,
127                           ICE_FLOW_FLD_SZ_IPV6_PRE64_ADDR),
128         /* ICE_FLOW_FIELD_IDX_IPV6_PRE64_DA */
129         ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_IPV6, 24,
130                           ICE_FLOW_FLD_SZ_IPV6_PRE64_ADDR),
131         /* Transport */
132         /* ICE_FLOW_FIELD_IDX_TCP_SRC_PORT */
133         ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_TCP, 0, ICE_FLOW_FLD_SZ_PORT),
134         /* ICE_FLOW_FIELD_IDX_TCP_DST_PORT */
135         ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_TCP, 2, ICE_FLOW_FLD_SZ_PORT),
136         /* ICE_FLOW_FIELD_IDX_UDP_SRC_PORT */
137         ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_UDP, 0, ICE_FLOW_FLD_SZ_PORT),
138         /* ICE_FLOW_FIELD_IDX_UDP_DST_PORT */
139         ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_UDP, 2, ICE_FLOW_FLD_SZ_PORT),
140         /* ICE_FLOW_FIELD_IDX_SCTP_SRC_PORT */
141         ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_SCTP, 0, ICE_FLOW_FLD_SZ_PORT),
142         /* ICE_FLOW_FIELD_IDX_SCTP_DST_PORT */
143         ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_SCTP, 2, ICE_FLOW_FLD_SZ_PORT),
144         /* ICE_FLOW_FIELD_IDX_TCP_FLAGS */
145         ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_TCP, 13, ICE_FLOW_FLD_SZ_TCP_FLAGS),
146         /* ICE_FLOW_FIELD_IDX_TCP_CHKSUM */
147         ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_TCP, 16, ICE_FLOW_FLD_SZ_TCP_CHKSUM),
148         /* ICE_FLOW_FIELD_IDX_UDP_CHKSUM */
149         ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_UDP, 6, ICE_FLOW_FLD_SZ_UDP_CHKSUM),
150         /* ICE_FLOW_FIELD_IDX_SCTP_CHKSUM */
151         ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_SCTP, 8,
152                           ICE_FLOW_FLD_SZ_SCTP_CHKSUM),
153         /* ARP */
154         /* ICE_FLOW_FIELD_IDX_ARP_SIP */
155         ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_ARP, 14, ICE_FLOW_FLD_SZ_IPV4_ADDR),
156         /* ICE_FLOW_FIELD_IDX_ARP_DIP */
157         ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_ARP, 24, ICE_FLOW_FLD_SZ_IPV4_ADDR),
158         /* ICE_FLOW_FIELD_IDX_ARP_SHA */
159         ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_ARP, 8, ETH_ALEN),
160         /* ICE_FLOW_FIELD_IDX_ARP_DHA */
161         ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_ARP, 18, ETH_ALEN),
162         /* ICE_FLOW_FIELD_IDX_ARP_OP */
163         ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_ARP, 6, ICE_FLOW_FLD_SZ_ARP_OPER),
164         /* ICMP */
165         /* ICE_FLOW_FIELD_IDX_ICMP_TYPE */
166         ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_ICMP, 0, ICE_FLOW_FLD_SZ_ICMP_TYPE),
167         /* ICE_FLOW_FIELD_IDX_ICMP_CODE */
168         ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_ICMP, 1, ICE_FLOW_FLD_SZ_ICMP_CODE),
169         /* GRE */
170         /* ICE_FLOW_FIELD_IDX_GRE_KEYID */
171         ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_GRE, 12, ICE_FLOW_FLD_SZ_GRE_KEYID),
172         /* GTP */
173         /* ICE_FLOW_FIELD_IDX_GTPC_TEID */
174         ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_GTPC_TEID, 12,
175                           ICE_FLOW_FLD_SZ_GTP_TEID),
176         /* ICE_FLOW_FIELD_IDX_GTPU_IP_TEID */
177         ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_GTPU_IP, 12,
178                           ICE_FLOW_FLD_SZ_GTP_TEID),
179         /* ICE_FLOW_FIELD_IDX_GTPU_EH_TEID */
180         ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_GTPU_EH, 12,
181                           ICE_FLOW_FLD_SZ_GTP_TEID),
182         /* ICE_FLOW_FIELD_IDX_GTPU_EH_QFI */
183         ICE_FLOW_FLD_INFO_MSK(ICE_FLOW_SEG_HDR_GTPU_EH, 22,
184                               ICE_FLOW_FLD_SZ_GTP_QFI, 0x3f00),
185         /* ICE_FLOW_FIELD_IDX_GTPU_UP_TEID */
186         ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_GTPU_UP, 12,
187                           ICE_FLOW_FLD_SZ_GTP_TEID),
188         /* ICE_FLOW_FIELD_IDX_GTPU_DWN_TEID */
189         ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_GTPU_DWN, 12,
190                           ICE_FLOW_FLD_SZ_GTP_TEID),
191         /* PPPOE */
192         /* ICE_FLOW_FIELD_IDX_PPPOE_SESS_ID */
193         ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_PPPOE, 2,
194                           ICE_FLOW_FLD_SZ_PPPOE_SESS_ID),
195         /* PFCP */
196         /* ICE_FLOW_FIELD_IDX_PFCP_SEID */
197         ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_PFCP_SESSION, 12,
198                           ICE_FLOW_FLD_SZ_PFCP_SEID),
199         /* L2TPV3 */
200         /* ICE_FLOW_FIELD_IDX_L2TPV3_SESS_ID */
201         ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_L2TPV3, 0,
202                           ICE_FLOW_FLD_SZ_L2TPV3_SESS_ID),
203         /* ESP */
204         /* ICE_FLOW_FIELD_IDX_ESP_SPI */
205         ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_ESP, 0,
206                           ICE_FLOW_FLD_SZ_ESP_SPI),
207         /* AH */
208         /* ICE_FLOW_FIELD_IDX_AH_SPI */
209         ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_AH, 4,
210                           ICE_FLOW_FLD_SZ_AH_SPI),
211         /* NAT_T_ESP */
212         /* ICE_FLOW_FIELD_IDX_NAT_T_ESP_SPI */
213         ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_NAT_T_ESP, 8,
214                           ICE_FLOW_FLD_SZ_NAT_T_ESP_SPI),
215         /* ICE_FLOW_FIELD_IDX_VXLAN_VNI */
216         ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_VXLAN, 12,
217                           ICE_FLOW_FLD_SZ_VXLAN_VNI),
218         /* ECPRI_TP0 */
219         /* ICE_FLOW_FIELD_IDX_ECPRI_TP0_PC_ID */
220         ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_ECPRI_TP0, 4,
221                           ICE_FLOW_FLD_SZ_ECPRI_TP0_PC_ID),
222         /* UDP_ECPRI_TP0 */
223         /* ICE_FLOW_FIELD_IDX_UDP_ECPRI_TP0_PC_ID */
224         ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_UDP_ECPRI_TP0, 12,
225                           ICE_FLOW_FLD_SZ_ECPRI_TP0_PC_ID),
226 };
227
228 /* Bitmaps indicating relevant packet types for a particular protocol header
229  *
230  * Packet types for packets with an Outer/First/Single MAC header
231  */
232 static const u32 ice_ptypes_mac_ofos[] = {
233         0xFDC00846, 0xBFBF7F7E, 0xF70001DF, 0xFEFDFDFB,
234         0x0000077E, 0x000003FF, 0x00000000, 0x00000000,
235         0x00400000, 0x03FFF000, 0xFFFFFFE0, 0x00100707,
236         0xFFFFFF00, 0x0000003F, 0x00000000, 0x00000000,
237         0x00000000, 0x00000000, 0x00000000, 0x00000000,
238         0x00000000, 0x00000000, 0x00000000, 0x00000000,
239         0x00000000, 0x00000000, 0x00000000, 0x00000000,
240         0x00000000, 0x00000000, 0x00000000, 0x00000000,
241 };
242
243 /* Packet types for packets with an Innermost/Last MAC VLAN header */
244 static const u32 ice_ptypes_macvlan_il[] = {
245         0x00000000, 0xBC000000, 0x000001DF, 0xF0000000,
246         0x0000077E, 0x00000000, 0x00000000, 0x00000000,
247         0x00000000, 0x00000000, 0x00000000, 0x00000000,
248         0x00000000, 0x00000000, 0x00000000, 0x00000000,
249         0x00000000, 0x00000000, 0x00000000, 0x00000000,
250         0x00000000, 0x00000000, 0x00000000, 0x00000000,
251         0x00000000, 0x00000000, 0x00000000, 0x00000000,
252         0x00000000, 0x00000000, 0x00000000, 0x00000000,
253 };
254
255 /* Packet types for packets with an Outer/First/Single non-frag IPv4 header,
256  * does NOT include IPV4 other PTYPEs
257  */
258 static const u32 ice_ptypes_ipv4_ofos[] = {
259         0x1D800000, 0x24000800, 0x00000000, 0x00000000,
260         0x00000000, 0x00000155, 0x00000000, 0x00000000,
261         0x00000000, 0x000FC000, 0x000002A0, 0x00100000,
262         0x00001500, 0x00000000, 0x00000000, 0x00000000,
263         0x00000000, 0x00000000, 0x00000000, 0x00000000,
264         0x00000000, 0x00000000, 0x00000000, 0x00000000,
265         0x00000000, 0x00000000, 0x00000000, 0x00000000,
266         0x00000000, 0x00000000, 0x00000000, 0x00000000,
267 };
268
269 /* Packet types for packets with an Outer/First/Single non-frag IPv4 header,
270  * includes IPV4 other PTYPEs
271  */
272 static const u32 ice_ptypes_ipv4_ofos_all[] = {
273         0x1D800000, 0x27BF7800, 0x00000000, 0x00000000,
274         0x00000000, 0x00000155, 0x00000000, 0x00000000,
275         0x00000000, 0x000FC000, 0x83E0FAA0, 0x00000101,
276         0x03FFD000, 0x00000000, 0x02FBEFBC, 0x00000000,
277         0x00000000, 0x00000000, 0x00000000, 0x00000000,
278         0x00000000, 0x00000000, 0x00000000, 0x00000000,
279         0x00000000, 0x00000000, 0x00000000, 0x00000000,
280         0x00000000, 0x00000000, 0x00000000, 0x00000000,
281 };
282
283 /* Packet types for packets with an Innermost/Last IPv4 header */
284 static const u32 ice_ptypes_ipv4_il[] = {
285         0xE0000000, 0xB807700E, 0x80000003, 0xE01DC03B,
286         0x0000000E, 0x00000000, 0x00000000, 0x00000000,
287         0x00000000, 0x00000000, 0x001FF800, 0x00100000,
288         0xFC0FC000, 0x00000000, 0xBC0BC0BC, 0x00000BC0,
289         0x00000000, 0x00000000, 0x00000000, 0x00000000,
290         0x00000000, 0x00000000, 0x00000000, 0x00000000,
291         0x00000000, 0x00000000, 0x00000000, 0x00000000,
292         0x00000000, 0x00000000, 0x00000000, 0x00000000,
293 };
294
295 /* Packet types for packets with an Outer/First/Single non-frag IPv6 header,
296  * does NOT include IVP6 other PTYPEs
297  */
298 static const u32 ice_ptypes_ipv6_ofos[] = {
299         0x00000000, 0x00000000, 0x76000000, 0x10002000,
300         0x00000000, 0x000002AA, 0x00000000, 0x00000000,
301         0x00000000, 0x03F00000, 0x00000540, 0x00000000,
302         0x00002A00, 0x00000000, 0x00000000, 0x00000000,
303         0x00000000, 0x00000000, 0x00000000, 0x00000000,
304         0x00000000, 0x00000000, 0x00000000, 0x00000000,
305         0x00000000, 0x00000000, 0x00000000, 0x00000000,
306         0x00000000, 0x00000000, 0x00000000, 0x00000000,
307 };
308
309 /* Packet types for packets with an Outer/First/Single non-frag IPv6 header,
310  * includes IPV6 other PTYPEs
311  */
312 static const u32 ice_ptypes_ipv6_ofos_all[] = {
313         0x00000000, 0x00000000, 0x76000000, 0x1EFDE000,
314         0x00000000, 0x000002AA, 0x00000000, 0x00000000,
315         0x00000000, 0x03F00000, 0x7C1F0540, 0x00000206,
316         0xFC002000, 0x0000003F, 0xBC000000, 0x0002FBEF,
317         0x00000000, 0x00000000, 0x00000000, 0x00000000,
318         0x00000000, 0x00000000, 0x00000000, 0x00000000,
319         0x00000000, 0x00000000, 0x00000000, 0x00000000,
320         0x00000000, 0x00000000, 0x00000000, 0x00000000,
321 };
322
323 /* Packet types for packets with an Innermost/Last IPv6 header */
324 static const u32 ice_ptypes_ipv6_il[] = {
325         0x00000000, 0x03B80770, 0x000001DC, 0x0EE00000,
326         0x00000770, 0x00000000, 0x00000000, 0x00000000,
327         0x00000000, 0x00000000, 0x7FE00000, 0x00000000,
328         0x03F00000, 0x0000003F, 0x02F02F00, 0x0002F02F,
329         0x00000000, 0x00000000, 0x00000000, 0x00000000,
330         0x00000000, 0x00000000, 0x00000000, 0x00000000,
331         0x00000000, 0x00000000, 0x00000000, 0x00000000,
332         0x00000000, 0x00000000, 0x00000000, 0x00000000,
333 };
334
335 /* Packet types for packets with an Outer/First/Single
336  * non-frag IPv4 header - no L4
337  */
338 static const u32 ice_ptypes_ipv4_ofos_no_l4[] = {
339         0x10800000, 0x04000800, 0x00000000, 0x00000000,
340         0x00000000, 0x00000000, 0x00000000, 0x00000000,
341         0x00000000, 0x000cc000, 0x000002A0, 0x00000000,
342         0x00001500, 0x00000000, 0x00000000, 0x00000000,
343         0x00000000, 0x00000000, 0x00000000, 0x00000000,
344         0x00000000, 0x00000000, 0x00000000, 0x00000000,
345         0x00000000, 0x00000000, 0x00000000, 0x00000000,
346         0x00000000, 0x00000000, 0x00000000, 0x00000000,
347 };
348
349 /* Packet types for packets with an Innermost/Last IPv4 header - no L4 */
350 static const u32 ice_ptypes_ipv4_il_no_l4[] = {
351         0x60000000, 0x18043008, 0x80000002, 0x6010c021,
352         0x00000008, 0x00000000, 0x00000000, 0x00000000,
353         0x00000000, 0x00000000, 0x00139800, 0x00000000,
354         0x8C08C000, 0x00000000, 0x00000000, 0x00000000,
355         0x00000000, 0x00000000, 0x00000000, 0x00000000,
356         0x00000000, 0x00000000, 0x00000000, 0x00000000,
357         0x00000000, 0x00000000, 0x00000000, 0x00000000,
358         0x00000000, 0x00000000, 0x00000000, 0x00000000,
359 };
360
361 /* Packet types for packets with an Outer/First/Single
362  * non-frag IPv6 header - no L4
363  */
364 static const u32 ice_ptypes_ipv6_ofos_no_l4[] = {
365         0x00000000, 0x00000000, 0x42000000, 0x10002000,
366         0x00000000, 0x00000000, 0x00000000, 0x00000000,
367         0x00000000, 0x02300000, 0x00000540, 0x00000000,
368         0x00002A00, 0x00000000, 0x00000000, 0x00000000,
369         0x00000000, 0x00000000, 0x00000000, 0x00000000,
370         0x00000000, 0x00000000, 0x00000000, 0x00000000,
371         0x00000000, 0x00000000, 0x00000000, 0x00000000,
372         0x00000000, 0x00000000, 0x00000000, 0x00000000,
373 };
374
375 /* Packet types for packets with an Innermost/Last IPv6 header - no L4 */
376 static const u32 ice_ptypes_ipv6_il_no_l4[] = {
377         0x00000000, 0x02180430, 0x0000010c, 0x086010c0,
378         0x00000430, 0x00000000, 0x00000000, 0x00000000,
379         0x00000000, 0x00000000, 0x4e600000, 0x00000000,
380         0x02300000, 0x00000023, 0x00000000, 0x00000000,
381         0x00000000, 0x00000000, 0x00000000, 0x00000000,
382         0x00000000, 0x00000000, 0x00000000, 0x00000000,
383         0x00000000, 0x00000000, 0x00000000, 0x00000000,
384         0x00000000, 0x00000000, 0x00000000, 0x00000000,
385 };
386
387 /* Packet types for packets with an Outermost/First ARP header */
388 static const u32 ice_ptypes_arp_of[] = {
389         0x00000800, 0x00000000, 0x00000000, 0x00000000,
390         0x00000000, 0x00000000, 0x00000000, 0x00000000,
391         0x00000000, 0x00000000, 0x00000000, 0x00000000,
392         0x00000000, 0x00000000, 0x00000000, 0x00000000,
393         0x00000000, 0x00000000, 0x00000000, 0x00000000,
394         0x00000000, 0x00000000, 0x00000000, 0x00000000,
395         0x00000000, 0x00000000, 0x00000000, 0x00000000,
396         0x00000000, 0x00000000, 0x00000000, 0x00000000,
397 };
398
399 /* UDP Packet types for non-tunneled packets or tunneled
400  * packets with inner UDP.
401  */
402 static const u32 ice_ptypes_udp_il[] = {
403         0x81000000, 0x20204040, 0x04000010, 0x80810102,
404         0x00000040, 0x00000000, 0x00000000, 0x00000000,
405         0x00000000, 0x00410000, 0x908427E0, 0x00100007,
406         0x10410000, 0x00000004, 0x10410410, 0x00004104,
407         0x00000000, 0x00000000, 0x00000000, 0x00000000,
408         0x00000000, 0x00000000, 0x00000000, 0x00000000,
409         0x00000000, 0x00000000, 0x00000000, 0x00000000,
410         0x00000000, 0x00000000, 0x00000000, 0x00000000,
411 };
412
413 /* Packet types for packets with an Innermost/Last TCP header */
414 static const u32 ice_ptypes_tcp_il[] = {
415         0x04000000, 0x80810102, 0x10000040, 0x02040408,
416         0x00000102, 0x00000000, 0x00000000, 0x00000000,
417         0x00000000, 0x00820000, 0x21084000, 0x00000000,
418         0x20820000, 0x00000008, 0x20820820, 0x00008208,
419         0x00000000, 0x00000000, 0x00000000, 0x00000000,
420         0x00000000, 0x00000000, 0x00000000, 0x00000000,
421         0x00000000, 0x00000000, 0x00000000, 0x00000000,
422         0x00000000, 0x00000000, 0x00000000, 0x00000000,
423 };
424
425 /* Packet types for packets with an Innermost/Last SCTP header */
426 static const u32 ice_ptypes_sctp_il[] = {
427         0x08000000, 0x01020204, 0x20000081, 0x04080810,
428         0x00000204, 0x00000000, 0x00000000, 0x00000000,
429         0x00000000, 0x01040000, 0x00000000, 0x00000000,
430         0x41040000, 0x00000010, 0x00000000, 0x00000000,
431         0x00000000, 0x00000000, 0x00000000, 0x00000000,
432         0x00000000, 0x00000000, 0x00000000, 0x00000000,
433         0x00000000, 0x00000000, 0x00000000, 0x00000000,
434         0x00000000, 0x00000000, 0x00000000, 0x00000000,
435 };
436
437 /* Packet types for packets with an Outermost/First ICMP header */
438 static const u32 ice_ptypes_icmp_of[] = {
439         0x10000000, 0x00000000, 0x00000000, 0x00000000,
440         0x00000000, 0x00000000, 0x00000000, 0x00000000,
441         0x00000000, 0x00000000, 0x00000000, 0x00000000,
442         0x00000000, 0x00000000, 0x00000000, 0x00000000,
443         0x00000000, 0x00000000, 0x00000000, 0x00000000,
444         0x00000000, 0x00000000, 0x00000000, 0x00000000,
445         0x00000000, 0x00000000, 0x00000000, 0x00000000,
446         0x00000000, 0x00000000, 0x00000000, 0x00000000,
447 };
448
449 /* Packet types for packets with an Innermost/Last ICMP header */
450 static const u32 ice_ptypes_icmp_il[] = {
451         0x00000000, 0x02040408, 0x40000102, 0x08101020,
452         0x00000408, 0x00000000, 0x00000000, 0x00000000,
453         0x00000000, 0x00000000, 0x42108000, 0x00000000,
454         0x82080000, 0x00000020, 0x00000000, 0x00000000,
455         0x00000000, 0x00000000, 0x00000000, 0x00000000,
456         0x00000000, 0x00000000, 0x00000000, 0x00000000,
457         0x00000000, 0x00000000, 0x00000000, 0x00000000,
458         0x00000000, 0x00000000, 0x00000000, 0x00000000,
459 };
460
461 /* Packet types for packets with an Outermost/First GRE header */
462 static const u32 ice_ptypes_gre_of[] = {
463         0x00000000, 0xBFBF7800, 0x000001DF, 0xFEFDE000,
464         0x0000017E, 0x00000000, 0x00000000, 0x00000000,
465         0x00000000, 0x00000000, 0x00000000, 0x00000000,
466         0x00000000, 0x00000000, 0xBEFBEFBC, 0x0002FBEF,
467         0x00000000, 0x00000000, 0x00000000, 0x00000000,
468         0x00000000, 0x00000000, 0x00000000, 0x00000000,
469         0x00000000, 0x00000000, 0x00000000, 0x00000000,
470         0x00000000, 0x00000000, 0x00000000, 0x00000000,
471 };
472
473 /* Packet types for packets with an Innermost/Last MAC header */
474 static const u32 ice_ptypes_mac_il[] = {
475         0x00000000, 0x20000000, 0x00000000, 0x00000000,
476         0x00000000, 0x00000000, 0x00000000, 0x00000000,
477         0x00000000, 0x00000000, 0x00000000, 0x00000000,
478         0x00000000, 0x00000000, 0x00000000, 0x00000000,
479         0x00000000, 0x00000000, 0x00000000, 0x00000000,
480         0x00000000, 0x00000000, 0x00000000, 0x00000000,
481         0x00000000, 0x00000000, 0x00000000, 0x00000000,
482         0x00000000, 0x00000000, 0x00000000, 0x00000000,
483 };
484
485 /* Packet types for GTPC */
486 static const u32 ice_ptypes_gtpc[] = {
487         0x00000000, 0x00000000, 0x00000000, 0x00000000,
488         0x00000000, 0x00000000, 0x00000000, 0x00000000,
489         0x00000000, 0x00000000, 0x000001E0, 0x00000000,
490         0x00000000, 0x00000000, 0x00000000, 0x00000000,
491         0x00000000, 0x00000000, 0x00000000, 0x00000000,
492         0x00000000, 0x00000000, 0x00000000, 0x00000000,
493         0x00000000, 0x00000000, 0x00000000, 0x00000000,
494         0x00000000, 0x00000000, 0x00000000, 0x00000000,
495 };
496
497 /* Packet types for VXLAN with VNI */
498 static const u32 ice_ptypes_vxlan_vni[] = {
499         0x00000000, 0xBFBFF800, 0x00EFDFDF, 0xFEFDE000,
500         0x03BF7F7E, 0x00000000, 0x00000000, 0x00000000,
501         0x00000000, 0x00000000, 0x00000000, 0x00000000,
502         0x00000000, 0x00000000, 0x00000000, 0x00000000,
503         0x00000000, 0x00000000, 0x00000000, 0x00000000,
504         0x00000000, 0x00000000, 0x00000000, 0x00000000,
505         0x00000000, 0x00000000, 0x00000000, 0x00000000,
506         0x00000000, 0x00000000, 0x00000000, 0x00000000,
507 };
508
509 /* Packet types for GTPC with TEID */
510 static const u32 ice_ptypes_gtpc_tid[] = {
511         0x00000000, 0x00000000, 0x00000000, 0x00000000,
512         0x00000000, 0x00000000, 0x00000000, 0x00000000,
513         0x00000000, 0x00000000, 0x00000060, 0x00000000,
514         0x00000000, 0x00000000, 0x00000000, 0x00000000,
515         0x00000000, 0x00000000, 0x00000000, 0x00000000,
516         0x00000000, 0x00000000, 0x00000000, 0x00000000,
517         0x00000000, 0x00000000, 0x00000000, 0x00000000,
518         0x00000000, 0x00000000, 0x00000000, 0x00000000,
519 };
520
521 /* Packet types for GTPU */
522 static const struct ice_ptype_attributes ice_attr_gtpu_session[] = {
523         { ICE_MAC_IPV4_GTPU_IPV4_FRAG,    ICE_PTYPE_ATTR_GTP_SESSION },
524         { ICE_MAC_IPV4_GTPU_IPV4_PAY,     ICE_PTYPE_ATTR_GTP_SESSION },
525         { ICE_MAC_IPV4_GTPU_IPV4_UDP_PAY, ICE_PTYPE_ATTR_GTP_SESSION },
526         { ICE_MAC_IPV4_GTPU_IPV4_TCP,     ICE_PTYPE_ATTR_GTP_SESSION },
527         { ICE_MAC_IPV4_GTPU_IPV4_ICMP,    ICE_PTYPE_ATTR_GTP_SESSION },
528         { ICE_MAC_IPV6_GTPU_IPV4_FRAG,    ICE_PTYPE_ATTR_GTP_SESSION },
529         { ICE_MAC_IPV6_GTPU_IPV4_PAY,     ICE_PTYPE_ATTR_GTP_SESSION },
530         { ICE_MAC_IPV6_GTPU_IPV4_UDP_PAY, ICE_PTYPE_ATTR_GTP_SESSION },
531         { ICE_MAC_IPV6_GTPU_IPV4_TCP,     ICE_PTYPE_ATTR_GTP_SESSION },
532         { ICE_MAC_IPV6_GTPU_IPV4_ICMP,    ICE_PTYPE_ATTR_GTP_SESSION },
533         { ICE_MAC_IPV4_GTPU_IPV6_FRAG,    ICE_PTYPE_ATTR_GTP_SESSION },
534         { ICE_MAC_IPV4_GTPU_IPV6_PAY,     ICE_PTYPE_ATTR_GTP_SESSION },
535         { ICE_MAC_IPV4_GTPU_IPV6_UDP_PAY, ICE_PTYPE_ATTR_GTP_SESSION },
536         { ICE_MAC_IPV4_GTPU_IPV6_TCP,     ICE_PTYPE_ATTR_GTP_SESSION },
537         { ICE_MAC_IPV4_GTPU_IPV6_ICMPV6,  ICE_PTYPE_ATTR_GTP_SESSION },
538         { ICE_MAC_IPV6_GTPU_IPV6_FRAG,    ICE_PTYPE_ATTR_GTP_SESSION },
539         { ICE_MAC_IPV6_GTPU_IPV6_PAY,     ICE_PTYPE_ATTR_GTP_SESSION },
540         { ICE_MAC_IPV6_GTPU_IPV6_UDP_PAY, ICE_PTYPE_ATTR_GTP_SESSION },
541         { ICE_MAC_IPV6_GTPU_IPV6_TCP,     ICE_PTYPE_ATTR_GTP_SESSION },
542         { ICE_MAC_IPV6_GTPU_IPV6_ICMPV6,  ICE_PTYPE_ATTR_GTP_SESSION },
543         { MAC_IPV4_TUN_IPV4_GTPU_IPV4_FRAG,     ICE_PTYPE_ATTR_GTP_SESSION },
544         { MAC_IPV4_TUN_IPV4_GTPU_IPV4_PAY,      ICE_PTYPE_ATTR_GTP_SESSION },
545         { MAC_IPV4_TUN_IPV4_GTPU_IPV4_UDP_PAY,  ICE_PTYPE_ATTR_GTP_SESSION },
546         { MAC_IPV4_TUN_IPV4_GTPU_IPV4_TCP,      ICE_PTYPE_ATTR_GTP_SESSION },
547         { MAC_IPV4_TUN_IPV4_GTPU_IPV4_ICMP,     ICE_PTYPE_ATTR_GTP_SESSION },
548         { MAC_IPV4_TUN_IPV4_GTPU_IPV6_FRAG,     ICE_PTYPE_ATTR_GTP_SESSION },
549         { MAC_IPV4_TUN_IPV4_GTPU_IPV6_PAY,      ICE_PTYPE_ATTR_GTP_SESSION },
550         { MAC_IPV4_TUN_IPV4_GTPU_IPV6_UDP_PAY,  ICE_PTYPE_ATTR_GTP_SESSION },
551         { MAC_IPV4_TUN_IPV4_GTPU_IPV6_TCP,      ICE_PTYPE_ATTR_GTP_SESSION },
552         { MAC_IPV4_TUN_IPV4_GTPU_IPV6_ICMPV6,   ICE_PTYPE_ATTR_GTP_SESSION },
553         { MAC_IPV4_TUN_IPV6_GTPU_IPV4_FRAG,     ICE_PTYPE_ATTR_GTP_SESSION },
554         { MAC_IPV4_TUN_IPV6_GTPU_IPV4_PAY,      ICE_PTYPE_ATTR_GTP_SESSION },
555         { MAC_IPV4_TUN_IPV6_GTPU_IPV4_UDP_PAY,  ICE_PTYPE_ATTR_GTP_SESSION },
556         { MAC_IPV4_TUN_IPV6_GTPU_IPV4_TCP,      ICE_PTYPE_ATTR_GTP_SESSION },
557         { MAC_IPV4_TUN_IPV6_GTPU_IPV4_ICMP,     ICE_PTYPE_ATTR_GTP_SESSION },
558         { MAC_IPV4_TUN_IPV6_GTPU_IPV6_FRAG,     ICE_PTYPE_ATTR_GTP_SESSION },
559         { MAC_IPV4_TUN_IPV6_GTPU_IPV6_PAY,      ICE_PTYPE_ATTR_GTP_SESSION },
560         { MAC_IPV4_TUN_IPV6_GTPU_IPV6_UDP_PAY,  ICE_PTYPE_ATTR_GTP_SESSION },
561         { MAC_IPV4_TUN_IPV6_GTPU_IPV6_TCP,      ICE_PTYPE_ATTR_GTP_SESSION },
562         { MAC_IPV4_TUN_IPV6_GTPU_IPV6_ICMPV6,   ICE_PTYPE_ATTR_GTP_SESSION },
563         { MAC_IPV6_TUN_IPV4_GTPU_IPV4_FRAG,     ICE_PTYPE_ATTR_GTP_SESSION },
564         { MAC_IPV6_TUN_IPV4_GTPU_IPV4_PAY,      ICE_PTYPE_ATTR_GTP_SESSION },
565         { MAC_IPV6_TUN_IPV4_GTPU_IPV4_UDP_PAY,  ICE_PTYPE_ATTR_GTP_SESSION },
566         { MAC_IPV6_TUN_IPV4_GTPU_IPV4_TCP,      ICE_PTYPE_ATTR_GTP_SESSION },
567         { MAC_IPV6_TUN_IPV4_GTPU_IPV4_ICMP,     ICE_PTYPE_ATTR_GTP_SESSION },
568         { MAC_IPV6_TUN_IPV4_GTPU_IPV6_FRAG,     ICE_PTYPE_ATTR_GTP_SESSION },
569         { MAC_IPV6_TUN_IPV4_GTPU_IPV6_PAY,      ICE_PTYPE_ATTR_GTP_SESSION },
570         { MAC_IPV6_TUN_IPV4_GTPU_IPV6_UDP_PAY,  ICE_PTYPE_ATTR_GTP_SESSION },
571         { MAC_IPV6_TUN_IPV4_GTPU_IPV6_TCP,      ICE_PTYPE_ATTR_GTP_SESSION },
572         { MAC_IPV6_TUN_IPV4_GTPU_IPV6_ICMPV6,   ICE_PTYPE_ATTR_GTP_SESSION },
573         { MAC_IPV6_TUN_IPV6_GTPU_IPV4_FRAG,     ICE_PTYPE_ATTR_GTP_SESSION },
574         { MAC_IPV6_TUN_IPV6_GTPU_IPV4_PAY,      ICE_PTYPE_ATTR_GTP_SESSION },
575         { MAC_IPV6_TUN_IPV6_GTPU_IPV4_UDP_PAY,  ICE_PTYPE_ATTR_GTP_SESSION },
576         { MAC_IPV6_TUN_IPV6_GTPU_IPV4_TCP,      ICE_PTYPE_ATTR_GTP_SESSION },
577         { MAC_IPV6_TUN_IPV6_GTPU_IPV4_ICMP,     ICE_PTYPE_ATTR_GTP_SESSION },
578         { MAC_IPV6_TUN_IPV6_GTPU_IPV6_FRAG,     ICE_PTYPE_ATTR_GTP_SESSION },
579         { MAC_IPV6_TUN_IPV6_GTPU_IPV6_PAY,      ICE_PTYPE_ATTR_GTP_SESSION },
580         { MAC_IPV6_TUN_IPV6_GTPU_IPV6_UDP_PAY,  ICE_PTYPE_ATTR_GTP_SESSION },
581         { MAC_IPV6_TUN_IPV6_GTPU_IPV6_TCP,      ICE_PTYPE_ATTR_GTP_SESSION },
582         { MAC_IPV6_TUN_IPV6_GTPU_IPV6_ICMPV6,   ICE_PTYPE_ATTR_GTP_SESSION },
583 };
584
585 static const struct ice_ptype_attributes ice_attr_gtpu_eh[] = {
586         { ICE_MAC_IPV4_GTPU_IPV4_FRAG,    ICE_PTYPE_ATTR_GTP_PDU_EH },
587         { ICE_MAC_IPV4_GTPU_IPV4_PAY,     ICE_PTYPE_ATTR_GTP_PDU_EH },
588         { ICE_MAC_IPV4_GTPU_IPV4_UDP_PAY, ICE_PTYPE_ATTR_GTP_PDU_EH },
589         { ICE_MAC_IPV4_GTPU_IPV4_TCP,     ICE_PTYPE_ATTR_GTP_PDU_EH },
590         { ICE_MAC_IPV4_GTPU_IPV4_ICMP,    ICE_PTYPE_ATTR_GTP_PDU_EH },
591         { ICE_MAC_IPV6_GTPU_IPV4_FRAG,    ICE_PTYPE_ATTR_GTP_PDU_EH },
592         { ICE_MAC_IPV6_GTPU_IPV4_PAY,     ICE_PTYPE_ATTR_GTP_PDU_EH },
593         { ICE_MAC_IPV6_GTPU_IPV4_UDP_PAY, ICE_PTYPE_ATTR_GTP_PDU_EH },
594         { ICE_MAC_IPV6_GTPU_IPV4_TCP,     ICE_PTYPE_ATTR_GTP_PDU_EH },
595         { ICE_MAC_IPV6_GTPU_IPV4_ICMP,    ICE_PTYPE_ATTR_GTP_PDU_EH },
596         { ICE_MAC_IPV4_GTPU_IPV6_FRAG,    ICE_PTYPE_ATTR_GTP_PDU_EH },
597         { ICE_MAC_IPV4_GTPU_IPV6_PAY,     ICE_PTYPE_ATTR_GTP_PDU_EH },
598         { ICE_MAC_IPV4_GTPU_IPV6_UDP_PAY, ICE_PTYPE_ATTR_GTP_PDU_EH },
599         { ICE_MAC_IPV4_GTPU_IPV6_TCP,     ICE_PTYPE_ATTR_GTP_PDU_EH },
600         { ICE_MAC_IPV4_GTPU_IPV6_ICMPV6,  ICE_PTYPE_ATTR_GTP_PDU_EH },
601         { ICE_MAC_IPV6_GTPU_IPV6_FRAG,    ICE_PTYPE_ATTR_GTP_PDU_EH },
602         { ICE_MAC_IPV6_GTPU_IPV6_PAY,     ICE_PTYPE_ATTR_GTP_PDU_EH },
603         { ICE_MAC_IPV6_GTPU_IPV6_UDP_PAY, ICE_PTYPE_ATTR_GTP_PDU_EH },
604         { ICE_MAC_IPV6_GTPU_IPV6_TCP,     ICE_PTYPE_ATTR_GTP_PDU_EH },
605         { ICE_MAC_IPV6_GTPU_IPV6_ICMPV6,  ICE_PTYPE_ATTR_GTP_PDU_EH },
606         { MAC_IPV4_TUN_IPV4_GTPU_IPV4_FRAG,     ICE_PTYPE_ATTR_GTP_PDU_EH },
607         { MAC_IPV4_TUN_IPV4_GTPU_IPV4_PAY,      ICE_PTYPE_ATTR_GTP_PDU_EH },
608         { MAC_IPV4_TUN_IPV4_GTPU_IPV4_UDP_PAY,  ICE_PTYPE_ATTR_GTP_PDU_EH },
609         { MAC_IPV4_TUN_IPV4_GTPU_IPV4_TCP,      ICE_PTYPE_ATTR_GTP_PDU_EH },
610         { MAC_IPV4_TUN_IPV4_GTPU_IPV4_ICMP,     ICE_PTYPE_ATTR_GTP_PDU_EH },
611         { MAC_IPV4_TUN_IPV4_GTPU_IPV6_FRAG,     ICE_PTYPE_ATTR_GTP_PDU_EH },
612         { MAC_IPV4_TUN_IPV4_GTPU_IPV6_PAY,      ICE_PTYPE_ATTR_GTP_PDU_EH },
613         { MAC_IPV4_TUN_IPV4_GTPU_IPV6_UDP_PAY,  ICE_PTYPE_ATTR_GTP_PDU_EH },
614         { MAC_IPV4_TUN_IPV4_GTPU_IPV6_TCP,      ICE_PTYPE_ATTR_GTP_PDU_EH },
615         { MAC_IPV4_TUN_IPV4_GTPU_IPV6_ICMPV6,   ICE_PTYPE_ATTR_GTP_PDU_EH },
616         { MAC_IPV4_TUN_IPV6_GTPU_IPV4_FRAG,     ICE_PTYPE_ATTR_GTP_PDU_EH },
617         { MAC_IPV4_TUN_IPV6_GTPU_IPV4_PAY,      ICE_PTYPE_ATTR_GTP_PDU_EH },
618         { MAC_IPV4_TUN_IPV6_GTPU_IPV4_UDP_PAY,  ICE_PTYPE_ATTR_GTP_PDU_EH },
619         { MAC_IPV4_TUN_IPV6_GTPU_IPV4_TCP,      ICE_PTYPE_ATTR_GTP_PDU_EH },
620         { MAC_IPV4_TUN_IPV6_GTPU_IPV4_ICMP,     ICE_PTYPE_ATTR_GTP_PDU_EH },
621         { MAC_IPV4_TUN_IPV6_GTPU_IPV6_FRAG,     ICE_PTYPE_ATTR_GTP_PDU_EH },
622         { MAC_IPV4_TUN_IPV6_GTPU_IPV6_PAY,      ICE_PTYPE_ATTR_GTP_PDU_EH },
623         { MAC_IPV4_TUN_IPV6_GTPU_IPV6_UDP_PAY,  ICE_PTYPE_ATTR_GTP_PDU_EH },
624         { MAC_IPV4_TUN_IPV6_GTPU_IPV6_TCP,      ICE_PTYPE_ATTR_GTP_PDU_EH },
625         { MAC_IPV4_TUN_IPV6_GTPU_IPV6_ICMPV6,   ICE_PTYPE_ATTR_GTP_PDU_EH },
626         { MAC_IPV6_TUN_IPV4_GTPU_IPV4_FRAG,     ICE_PTYPE_ATTR_GTP_PDU_EH },
627         { MAC_IPV6_TUN_IPV4_GTPU_IPV4_PAY,      ICE_PTYPE_ATTR_GTP_PDU_EH },
628         { MAC_IPV6_TUN_IPV4_GTPU_IPV4_UDP_PAY,  ICE_PTYPE_ATTR_GTP_PDU_EH },
629         { MAC_IPV6_TUN_IPV4_GTPU_IPV4_TCP,      ICE_PTYPE_ATTR_GTP_PDU_EH },
630         { MAC_IPV6_TUN_IPV4_GTPU_IPV4_ICMP,     ICE_PTYPE_ATTR_GTP_PDU_EH },
631         { MAC_IPV6_TUN_IPV4_GTPU_IPV6_FRAG,     ICE_PTYPE_ATTR_GTP_PDU_EH },
632         { MAC_IPV6_TUN_IPV4_GTPU_IPV6_PAY,      ICE_PTYPE_ATTR_GTP_PDU_EH },
633         { MAC_IPV6_TUN_IPV4_GTPU_IPV6_UDP_PAY,  ICE_PTYPE_ATTR_GTP_PDU_EH },
634         { MAC_IPV6_TUN_IPV4_GTPU_IPV6_TCP,      ICE_PTYPE_ATTR_GTP_PDU_EH },
635         { MAC_IPV6_TUN_IPV4_GTPU_IPV6_ICMPV6,   ICE_PTYPE_ATTR_GTP_PDU_EH },
636         { MAC_IPV6_TUN_IPV6_GTPU_IPV4_FRAG,     ICE_PTYPE_ATTR_GTP_PDU_EH },
637         { MAC_IPV6_TUN_IPV6_GTPU_IPV4_PAY,      ICE_PTYPE_ATTR_GTP_PDU_EH },
638         { MAC_IPV6_TUN_IPV6_GTPU_IPV4_UDP_PAY,  ICE_PTYPE_ATTR_GTP_PDU_EH },
639         { MAC_IPV6_TUN_IPV6_GTPU_IPV4_TCP,      ICE_PTYPE_ATTR_GTP_PDU_EH },
640         { MAC_IPV6_TUN_IPV6_GTPU_IPV4_ICMP,     ICE_PTYPE_ATTR_GTP_PDU_EH },
641         { MAC_IPV6_TUN_IPV6_GTPU_IPV6_FRAG,     ICE_PTYPE_ATTR_GTP_PDU_EH },
642         { MAC_IPV6_TUN_IPV6_GTPU_IPV6_PAY,      ICE_PTYPE_ATTR_GTP_PDU_EH },
643         { MAC_IPV6_TUN_IPV6_GTPU_IPV6_UDP_PAY,  ICE_PTYPE_ATTR_GTP_PDU_EH },
644         { MAC_IPV6_TUN_IPV6_GTPU_IPV6_TCP,      ICE_PTYPE_ATTR_GTP_PDU_EH },
645         { MAC_IPV6_TUN_IPV6_GTPU_IPV6_ICMPV6,   ICE_PTYPE_ATTR_GTP_PDU_EH },
646 };
647
648 static const struct ice_ptype_attributes ice_attr_gtpu_down[] = {
649         { ICE_MAC_IPV4_GTPU_IPV4_FRAG,    ICE_PTYPE_ATTR_GTP_DOWNLINK },
650         { ICE_MAC_IPV4_GTPU_IPV4_PAY,     ICE_PTYPE_ATTR_GTP_DOWNLINK },
651         { ICE_MAC_IPV4_GTPU_IPV4_UDP_PAY, ICE_PTYPE_ATTR_GTP_DOWNLINK },
652         { ICE_MAC_IPV4_GTPU_IPV4_TCP,     ICE_PTYPE_ATTR_GTP_DOWNLINK },
653         { ICE_MAC_IPV4_GTPU_IPV4_ICMP,    ICE_PTYPE_ATTR_GTP_DOWNLINK },
654         { ICE_MAC_IPV6_GTPU_IPV4_FRAG,    ICE_PTYPE_ATTR_GTP_DOWNLINK },
655         { ICE_MAC_IPV6_GTPU_IPV4_PAY,     ICE_PTYPE_ATTR_GTP_DOWNLINK },
656         { ICE_MAC_IPV6_GTPU_IPV4_UDP_PAY, ICE_PTYPE_ATTR_GTP_DOWNLINK },
657         { ICE_MAC_IPV6_GTPU_IPV4_TCP,     ICE_PTYPE_ATTR_GTP_DOWNLINK },
658         { ICE_MAC_IPV6_GTPU_IPV4_ICMP,    ICE_PTYPE_ATTR_GTP_DOWNLINK },
659         { ICE_MAC_IPV4_GTPU_IPV6_FRAG,    ICE_PTYPE_ATTR_GTP_DOWNLINK },
660         { ICE_MAC_IPV4_GTPU_IPV6_PAY,     ICE_PTYPE_ATTR_GTP_DOWNLINK },
661         { ICE_MAC_IPV4_GTPU_IPV6_UDP_PAY, ICE_PTYPE_ATTR_GTP_DOWNLINK },
662         { ICE_MAC_IPV4_GTPU_IPV6_TCP,     ICE_PTYPE_ATTR_GTP_DOWNLINK },
663         { ICE_MAC_IPV4_GTPU_IPV6_ICMPV6,  ICE_PTYPE_ATTR_GTP_DOWNLINK },
664         { ICE_MAC_IPV6_GTPU_IPV6_FRAG,    ICE_PTYPE_ATTR_GTP_DOWNLINK },
665         { ICE_MAC_IPV6_GTPU_IPV6_PAY,     ICE_PTYPE_ATTR_GTP_DOWNLINK },
666         { ICE_MAC_IPV6_GTPU_IPV6_UDP_PAY, ICE_PTYPE_ATTR_GTP_DOWNLINK },
667         { ICE_MAC_IPV6_GTPU_IPV6_TCP,     ICE_PTYPE_ATTR_GTP_DOWNLINK },
668         { ICE_MAC_IPV6_GTPU_IPV6_ICMPV6,  ICE_PTYPE_ATTR_GTP_DOWNLINK },
669         { MAC_IPV4_TUN_IPV4_GTPU_IPV4_FRAG,     ICE_PTYPE_ATTR_GTP_DOWNLINK },
670         { MAC_IPV4_TUN_IPV4_GTPU_IPV4_PAY,      ICE_PTYPE_ATTR_GTP_DOWNLINK },
671         { MAC_IPV4_TUN_IPV4_GTPU_IPV4_UDP_PAY,  ICE_PTYPE_ATTR_GTP_DOWNLINK },
672         { MAC_IPV4_TUN_IPV4_GTPU_IPV4_TCP,      ICE_PTYPE_ATTR_GTP_DOWNLINK },
673         { MAC_IPV4_TUN_IPV4_GTPU_IPV4_ICMP,     ICE_PTYPE_ATTR_GTP_DOWNLINK },
674         { MAC_IPV4_TUN_IPV4_GTPU_IPV6_FRAG,     ICE_PTYPE_ATTR_GTP_DOWNLINK },
675         { MAC_IPV4_TUN_IPV4_GTPU_IPV6_PAY,      ICE_PTYPE_ATTR_GTP_DOWNLINK },
676         { MAC_IPV4_TUN_IPV4_GTPU_IPV6_UDP_PAY,  ICE_PTYPE_ATTR_GTP_DOWNLINK },
677         { MAC_IPV4_TUN_IPV4_GTPU_IPV6_TCP,      ICE_PTYPE_ATTR_GTP_DOWNLINK },
678         { MAC_IPV4_TUN_IPV4_GTPU_IPV6_ICMPV6,   ICE_PTYPE_ATTR_GTP_DOWNLINK },
679         { MAC_IPV4_TUN_IPV6_GTPU_IPV4_FRAG,     ICE_PTYPE_ATTR_GTP_DOWNLINK },
680         { MAC_IPV4_TUN_IPV6_GTPU_IPV4_PAY,      ICE_PTYPE_ATTR_GTP_DOWNLINK },
681         { MAC_IPV4_TUN_IPV6_GTPU_IPV4_UDP_PAY,  ICE_PTYPE_ATTR_GTP_DOWNLINK },
682         { MAC_IPV4_TUN_IPV6_GTPU_IPV4_TCP,      ICE_PTYPE_ATTR_GTP_DOWNLINK },
683         { MAC_IPV4_TUN_IPV6_GTPU_IPV4_ICMP,     ICE_PTYPE_ATTR_GTP_DOWNLINK },
684         { MAC_IPV4_TUN_IPV6_GTPU_IPV6_FRAG,     ICE_PTYPE_ATTR_GTP_DOWNLINK },
685         { MAC_IPV4_TUN_IPV6_GTPU_IPV6_PAY,      ICE_PTYPE_ATTR_GTP_DOWNLINK },
686         { MAC_IPV4_TUN_IPV6_GTPU_IPV6_UDP_PAY,  ICE_PTYPE_ATTR_GTP_DOWNLINK },
687         { MAC_IPV4_TUN_IPV6_GTPU_IPV6_TCP,      ICE_PTYPE_ATTR_GTP_DOWNLINK },
688         { MAC_IPV4_TUN_IPV6_GTPU_IPV6_ICMPV6,   ICE_PTYPE_ATTR_GTP_DOWNLINK },
689         { MAC_IPV6_TUN_IPV4_GTPU_IPV4_FRAG,     ICE_PTYPE_ATTR_GTP_DOWNLINK },
690         { MAC_IPV6_TUN_IPV4_GTPU_IPV4_PAY,      ICE_PTYPE_ATTR_GTP_DOWNLINK },
691         { MAC_IPV6_TUN_IPV4_GTPU_IPV4_UDP_PAY,  ICE_PTYPE_ATTR_GTP_DOWNLINK },
692         { MAC_IPV6_TUN_IPV4_GTPU_IPV4_TCP,      ICE_PTYPE_ATTR_GTP_DOWNLINK },
693         { MAC_IPV6_TUN_IPV4_GTPU_IPV4_ICMP,     ICE_PTYPE_ATTR_GTP_DOWNLINK },
694         { MAC_IPV6_TUN_IPV4_GTPU_IPV6_FRAG,     ICE_PTYPE_ATTR_GTP_DOWNLINK },
695         { MAC_IPV6_TUN_IPV4_GTPU_IPV6_PAY,      ICE_PTYPE_ATTR_GTP_DOWNLINK },
696         { MAC_IPV6_TUN_IPV4_GTPU_IPV6_UDP_PAY,  ICE_PTYPE_ATTR_GTP_DOWNLINK },
697         { MAC_IPV6_TUN_IPV4_GTPU_IPV6_TCP,      ICE_PTYPE_ATTR_GTP_DOWNLINK },
698         { MAC_IPV6_TUN_IPV4_GTPU_IPV6_ICMPV6,   ICE_PTYPE_ATTR_GTP_DOWNLINK },
699         { MAC_IPV6_TUN_IPV6_GTPU_IPV4_FRAG,     ICE_PTYPE_ATTR_GTP_DOWNLINK },
700         { MAC_IPV6_TUN_IPV6_GTPU_IPV4_PAY,      ICE_PTYPE_ATTR_GTP_DOWNLINK },
701         { MAC_IPV6_TUN_IPV6_GTPU_IPV4_UDP_PAY,  ICE_PTYPE_ATTR_GTP_DOWNLINK },
702         { MAC_IPV6_TUN_IPV6_GTPU_IPV4_TCP,      ICE_PTYPE_ATTR_GTP_DOWNLINK },
703         { MAC_IPV6_TUN_IPV6_GTPU_IPV4_ICMP,     ICE_PTYPE_ATTR_GTP_DOWNLINK },
704         { MAC_IPV6_TUN_IPV6_GTPU_IPV6_FRAG,     ICE_PTYPE_ATTR_GTP_DOWNLINK },
705         { MAC_IPV6_TUN_IPV6_GTPU_IPV6_PAY,      ICE_PTYPE_ATTR_GTP_DOWNLINK },
706         { MAC_IPV6_TUN_IPV6_GTPU_IPV6_UDP_PAY,  ICE_PTYPE_ATTR_GTP_DOWNLINK },
707         { MAC_IPV6_TUN_IPV6_GTPU_IPV6_TCP,      ICE_PTYPE_ATTR_GTP_DOWNLINK },
708         { MAC_IPV6_TUN_IPV6_GTPU_IPV6_ICMPV6,   ICE_PTYPE_ATTR_GTP_DOWNLINK },
709 };
710
711 static const struct ice_ptype_attributes ice_attr_gtpu_up[] = {
712         { ICE_MAC_IPV4_GTPU_IPV4_FRAG,    ICE_PTYPE_ATTR_GTP_UPLINK },
713         { ICE_MAC_IPV4_GTPU_IPV4_PAY,     ICE_PTYPE_ATTR_GTP_UPLINK },
714         { ICE_MAC_IPV4_GTPU_IPV4_UDP_PAY, ICE_PTYPE_ATTR_GTP_UPLINK },
715         { ICE_MAC_IPV4_GTPU_IPV4_TCP,     ICE_PTYPE_ATTR_GTP_UPLINK },
716         { ICE_MAC_IPV4_GTPU_IPV4_ICMP,    ICE_PTYPE_ATTR_GTP_UPLINK },
717         { ICE_MAC_IPV6_GTPU_IPV4_FRAG,    ICE_PTYPE_ATTR_GTP_UPLINK },
718         { ICE_MAC_IPV6_GTPU_IPV4_PAY,     ICE_PTYPE_ATTR_GTP_UPLINK },
719         { ICE_MAC_IPV6_GTPU_IPV4_UDP_PAY, ICE_PTYPE_ATTR_GTP_UPLINK },
720         { ICE_MAC_IPV6_GTPU_IPV4_TCP,     ICE_PTYPE_ATTR_GTP_UPLINK },
721         { ICE_MAC_IPV6_GTPU_IPV4_ICMP,    ICE_PTYPE_ATTR_GTP_UPLINK },
722         { ICE_MAC_IPV4_GTPU_IPV6_FRAG,    ICE_PTYPE_ATTR_GTP_UPLINK },
723         { ICE_MAC_IPV4_GTPU_IPV6_PAY,     ICE_PTYPE_ATTR_GTP_UPLINK },
724         { ICE_MAC_IPV4_GTPU_IPV6_UDP_PAY, ICE_PTYPE_ATTR_GTP_UPLINK },
725         { ICE_MAC_IPV4_GTPU_IPV6_TCP,     ICE_PTYPE_ATTR_GTP_UPLINK },
726         { ICE_MAC_IPV4_GTPU_IPV6_ICMPV6,  ICE_PTYPE_ATTR_GTP_UPLINK },
727         { ICE_MAC_IPV6_GTPU_IPV6_FRAG,    ICE_PTYPE_ATTR_GTP_UPLINK },
728         { ICE_MAC_IPV6_GTPU_IPV6_PAY,     ICE_PTYPE_ATTR_GTP_UPLINK },
729         { ICE_MAC_IPV6_GTPU_IPV6_UDP_PAY, ICE_PTYPE_ATTR_GTP_UPLINK },
730         { ICE_MAC_IPV6_GTPU_IPV6_TCP,     ICE_PTYPE_ATTR_GTP_UPLINK },
731         { ICE_MAC_IPV6_GTPU_IPV6_ICMPV6,  ICE_PTYPE_ATTR_GTP_UPLINK },
732         { MAC_IPV4_TUN_IPV4_GTPU_IPV4_FRAG,     ICE_PTYPE_ATTR_GTP_UPLINK },
733         { MAC_IPV4_TUN_IPV4_GTPU_IPV4_PAY,      ICE_PTYPE_ATTR_GTP_UPLINK },
734         { MAC_IPV4_TUN_IPV4_GTPU_IPV4_UDP_PAY,  ICE_PTYPE_ATTR_GTP_UPLINK },
735         { MAC_IPV4_TUN_IPV4_GTPU_IPV4_TCP,      ICE_PTYPE_ATTR_GTP_UPLINK },
736         { MAC_IPV4_TUN_IPV4_GTPU_IPV4_ICMP,     ICE_PTYPE_ATTR_GTP_UPLINK },
737         { MAC_IPV4_TUN_IPV4_GTPU_IPV6_FRAG,     ICE_PTYPE_ATTR_GTP_UPLINK },
738         { MAC_IPV4_TUN_IPV4_GTPU_IPV6_PAY,      ICE_PTYPE_ATTR_GTP_UPLINK },
739         { MAC_IPV4_TUN_IPV4_GTPU_IPV6_UDP_PAY,  ICE_PTYPE_ATTR_GTP_UPLINK },
740         { MAC_IPV4_TUN_IPV4_GTPU_IPV6_TCP,      ICE_PTYPE_ATTR_GTP_UPLINK },
741         { MAC_IPV4_TUN_IPV4_GTPU_IPV6_ICMPV6,   ICE_PTYPE_ATTR_GTP_UPLINK },
742         { MAC_IPV4_TUN_IPV6_GTPU_IPV4_FRAG,     ICE_PTYPE_ATTR_GTP_UPLINK },
743         { MAC_IPV4_TUN_IPV6_GTPU_IPV4_PAY,      ICE_PTYPE_ATTR_GTP_UPLINK },
744         { MAC_IPV4_TUN_IPV6_GTPU_IPV4_UDP_PAY,  ICE_PTYPE_ATTR_GTP_UPLINK },
745         { MAC_IPV4_TUN_IPV6_GTPU_IPV4_TCP,      ICE_PTYPE_ATTR_GTP_UPLINK },
746         { MAC_IPV4_TUN_IPV6_GTPU_IPV4_ICMP,     ICE_PTYPE_ATTR_GTP_UPLINK },
747         { MAC_IPV4_TUN_IPV6_GTPU_IPV6_FRAG,     ICE_PTYPE_ATTR_GTP_UPLINK },
748         { MAC_IPV4_TUN_IPV6_GTPU_IPV6_PAY,      ICE_PTYPE_ATTR_GTP_UPLINK },
749         { MAC_IPV4_TUN_IPV6_GTPU_IPV6_UDP_PAY,  ICE_PTYPE_ATTR_GTP_UPLINK },
750         { MAC_IPV4_TUN_IPV6_GTPU_IPV6_TCP,      ICE_PTYPE_ATTR_GTP_UPLINK },
751         { MAC_IPV4_TUN_IPV6_GTPU_IPV6_ICMPV6,   ICE_PTYPE_ATTR_GTP_UPLINK },
752         { MAC_IPV6_TUN_IPV4_GTPU_IPV4_FRAG,     ICE_PTYPE_ATTR_GTP_UPLINK },
753         { MAC_IPV6_TUN_IPV4_GTPU_IPV4_PAY,      ICE_PTYPE_ATTR_GTP_UPLINK },
754         { MAC_IPV6_TUN_IPV4_GTPU_IPV4_UDP_PAY,  ICE_PTYPE_ATTR_GTP_UPLINK },
755         { MAC_IPV6_TUN_IPV4_GTPU_IPV4_TCP,      ICE_PTYPE_ATTR_GTP_UPLINK },
756         { MAC_IPV6_TUN_IPV4_GTPU_IPV4_ICMP,     ICE_PTYPE_ATTR_GTP_UPLINK },
757         { MAC_IPV6_TUN_IPV4_GTPU_IPV6_FRAG,     ICE_PTYPE_ATTR_GTP_UPLINK },
758         { MAC_IPV6_TUN_IPV4_GTPU_IPV6_PAY,      ICE_PTYPE_ATTR_GTP_UPLINK },
759         { MAC_IPV6_TUN_IPV4_GTPU_IPV6_UDP_PAY,  ICE_PTYPE_ATTR_GTP_UPLINK },
760         { MAC_IPV6_TUN_IPV4_GTPU_IPV6_TCP,      ICE_PTYPE_ATTR_GTP_UPLINK },
761         { MAC_IPV6_TUN_IPV4_GTPU_IPV6_ICMPV6,   ICE_PTYPE_ATTR_GTP_UPLINK },
762         { MAC_IPV6_TUN_IPV6_GTPU_IPV4_FRAG,     ICE_PTYPE_ATTR_GTP_UPLINK },
763         { MAC_IPV6_TUN_IPV6_GTPU_IPV4_PAY,      ICE_PTYPE_ATTR_GTP_UPLINK },
764         { MAC_IPV6_TUN_IPV6_GTPU_IPV4_UDP_PAY,  ICE_PTYPE_ATTR_GTP_UPLINK },
765         { MAC_IPV6_TUN_IPV6_GTPU_IPV4_TCP,      ICE_PTYPE_ATTR_GTP_UPLINK },
766         { MAC_IPV6_TUN_IPV6_GTPU_IPV4_ICMP,     ICE_PTYPE_ATTR_GTP_UPLINK },
767         { MAC_IPV6_TUN_IPV6_GTPU_IPV6_FRAG,     ICE_PTYPE_ATTR_GTP_UPLINK },
768         { MAC_IPV6_TUN_IPV6_GTPU_IPV6_PAY,      ICE_PTYPE_ATTR_GTP_UPLINK },
769         { MAC_IPV6_TUN_IPV6_GTPU_IPV6_UDP_PAY,  ICE_PTYPE_ATTR_GTP_UPLINK },
770         { MAC_IPV6_TUN_IPV6_GTPU_IPV6_TCP,      ICE_PTYPE_ATTR_GTP_UPLINK },
771         { MAC_IPV6_TUN_IPV6_GTPU_IPV6_ICMPV6,   ICE_PTYPE_ATTR_GTP_UPLINK },
772 };
773
774 static const u32 ice_ptypes_gtpu[] = {
775         0x00000000, 0x00000000, 0x00000000, 0x00000000,
776         0x00000000, 0x00000000, 0x00000000, 0x00000000,
777         0x00000000, 0x00000000, 0x7FFFFE00, 0x00000000,
778         0x00000000, 0x0000003F, 0xBEFBEFBC, 0x0002FBEF,
779         0x00000000, 0x00000000, 0x00000000, 0x00000000,
780         0x00000000, 0x00000000, 0x00000000, 0x00000000,
781         0x00000000, 0x00000000, 0x00000000, 0x00000000,
782         0x00000000, 0x00000000, 0x00000000, 0x00000000,
783 };
784
785 /* Packet types for pppoe */
786 static const u32 ice_ptypes_pppoe[] = {
787         0x00000000, 0x00000000, 0x00000000, 0x00000000,
788         0x00000000, 0x00000000, 0x00000000, 0x00000000,
789         0x00000000, 0x03ffe000, 0x00000000, 0x00000000,
790         0x00000000, 0x00000000, 0x00000000, 0x00000000,
791         0x00000000, 0x00000000, 0x00000000, 0x00000000,
792         0x00000000, 0x00000000, 0x00000000, 0x00000000,
793         0x00000000, 0x00000000, 0x00000000, 0x00000000,
794         0x00000000, 0x00000000, 0x00000000, 0x00000000,
795 };
796
797 /* Packet types for packets with PFCP NODE header */
798 static const u32 ice_ptypes_pfcp_node[] = {
799         0x00000000, 0x00000000, 0x00000000, 0x00000000,
800         0x00000000, 0x00000000, 0x00000000, 0x00000000,
801         0x00000000, 0x00000000, 0x80000000, 0x00000002,
802         0x00000000, 0x00000000, 0x00000000, 0x00000000,
803         0x00000000, 0x00000000, 0x00000000, 0x00000000,
804         0x00000000, 0x00000000, 0x00000000, 0x00000000,
805         0x00000000, 0x00000000, 0x00000000, 0x00000000,
806         0x00000000, 0x00000000, 0x00000000, 0x00000000,
807 };
808
809 /* Packet types for packets with PFCP SESSION header */
810 static const u32 ice_ptypes_pfcp_session[] = {
811         0x00000000, 0x00000000, 0x00000000, 0x00000000,
812         0x00000000, 0x00000000, 0x00000000, 0x00000000,
813         0x00000000, 0x00000000, 0x00000000, 0x00000005,
814         0x00000000, 0x00000000, 0x00000000, 0x00000000,
815         0x00000000, 0x00000000, 0x00000000, 0x00000000,
816         0x00000000, 0x00000000, 0x00000000, 0x00000000,
817         0x00000000, 0x00000000, 0x00000000, 0x00000000,
818         0x00000000, 0x00000000, 0x00000000, 0x00000000,
819 };
820
821 /* Packet types for l2tpv3 */
822 static const u32 ice_ptypes_l2tpv3[] = {
823         0x00000000, 0x00000000, 0x00000000, 0x00000000,
824         0x00000000, 0x00000000, 0x00000000, 0x00000000,
825         0x00000000, 0x00000000, 0x00000000, 0x00000300,
826         0x00000000, 0x00000000, 0x00000000, 0x00000000,
827         0x00000000, 0x00000000, 0x00000000, 0x00000000,
828         0x00000000, 0x00000000, 0x00000000, 0x00000000,
829         0x00000000, 0x00000000, 0x00000000, 0x00000000,
830         0x00000000, 0x00000000, 0x00000000, 0x00000000,
831 };
832
833 /* Packet types for esp */
834 static const u32 ice_ptypes_esp[] = {
835         0x00000000, 0x00000000, 0x00000000, 0x00000000,
836         0x00000000, 0x00000003, 0x00000000, 0x00000000,
837         0x00000000, 0x00000000, 0x00000000, 0x00000000,
838         0x00000000, 0x00000000, 0x00000000, 0x00000000,
839         0x00000000, 0x00000000, 0x00000000, 0x00000000,
840         0x00000000, 0x00000000, 0x00000000, 0x00000000,
841         0x00000000, 0x00000000, 0x00000000, 0x00000000,
842         0x00000000, 0x00000000, 0x00000000, 0x00000000,
843 };
844
845 /* Packet types for ah */
846 static const u32 ice_ptypes_ah[] = {
847         0x00000000, 0x00000000, 0x00000000, 0x00000000,
848         0x00000000, 0x0000000C, 0x00000000, 0x00000000,
849         0x00000000, 0x00000000, 0x00000000, 0x00000000,
850         0x00000000, 0x00000000, 0x00000000, 0x00000000,
851         0x00000000, 0x00000000, 0x00000000, 0x00000000,
852         0x00000000, 0x00000000, 0x00000000, 0x00000000,
853         0x00000000, 0x00000000, 0x00000000, 0x00000000,
854         0x00000000, 0x00000000, 0x00000000, 0x00000000,
855 };
856
857 /* Packet types for packets with NAT_T ESP header */
858 static const u32 ice_ptypes_nat_t_esp[] = {
859         0x00000000, 0x00000000, 0x00000000, 0x00000000,
860         0x00000000, 0x00000030, 0x00000000, 0x00000000,
861         0x00000000, 0x00000000, 0x00000000, 0x00000000,
862         0x00000000, 0x00000000, 0x00000000, 0x00000000,
863         0x00000000, 0x00000000, 0x00000000, 0x00000000,
864         0x00000000, 0x00000000, 0x00000000, 0x00000000,
865         0x00000000, 0x00000000, 0x00000000, 0x00000000,
866         0x00000000, 0x00000000, 0x00000000, 0x00000000,
867 };
868
869 static const u32 ice_ptypes_mac_non_ip_ofos[] = {
870         0x00000846, 0x00000000, 0x00000000, 0x00000000,
871         0x00000000, 0x00000000, 0x00000000, 0x00000000,
872         0x00400000, 0x03FFF000, 0x00000000, 0x00000000,
873         0x00000000, 0x00000000, 0x00000000, 0x00000000,
874         0x00000000, 0x00000000, 0x00000000, 0x00000000,
875         0x00000000, 0x00000000, 0x00000000, 0x00000000,
876         0x00000000, 0x00000000, 0x00000000, 0x00000000,
877         0x00000000, 0x00000000, 0x00000000, 0x00000000,
878 };
879
880 static const u32 ice_ptypes_gtpu_no_ip[] = {
881         0x00000000, 0x00000000, 0x00000000, 0x00000000,
882         0x00000000, 0x00000000, 0x00000000, 0x00000000,
883         0x00000000, 0x00000000, 0x00000600, 0x00000000,
884         0x00000000, 0x00000000, 0x00000000, 0x00000000,
885         0x00000000, 0x00000000, 0x00000000, 0x00000000,
886         0x00000000, 0x00000000, 0x00000000, 0x00000000,
887         0x00000000, 0x00000000, 0x00000000, 0x00000000,
888         0x00000000, 0x00000000, 0x00000000, 0x00000000,
889 };
890
891 static const u32 ice_ptypes_ecpri_tp0[] = {
892         0x00000000, 0x00000000, 0x00000000, 0x00000000,
893         0x00000000, 0x00000000, 0x00000000, 0x00000000,
894         0x00000000, 0x00000000, 0x00000000, 0x00000400,
895         0x00000000, 0x00000000, 0x00000000, 0x00000000,
896         0x00000000, 0x00000000, 0x00000000, 0x00000000,
897         0x00000000, 0x00000000, 0x00000000, 0x00000000,
898         0x00000000, 0x00000000, 0x00000000, 0x00000000,
899         0x00000000, 0x00000000, 0x00000000, 0x00000000,
900 };
901
902 static const u32 ice_ptypes_udp_ecpri_tp0[] = {
903         0x00000000, 0x00000000, 0x00000000, 0x00000000,
904         0x00000000, 0x00000000, 0x00000000, 0x00000000,
905         0x00000000, 0x00000000, 0x00000000, 0x00100000,
906         0x00000000, 0x00000000, 0x00000000, 0x00000000,
907         0x00000000, 0x00000000, 0x00000000, 0x00000000,
908         0x00000000, 0x00000000, 0x00000000, 0x00000000,
909         0x00000000, 0x00000000, 0x00000000, 0x00000000,
910         0x00000000, 0x00000000, 0x00000000, 0x00000000,
911 };
912
913 static const u32 ice_ptypes_l2tpv2[] = {
914         0x00000000, 0x00000000, 0x00000000, 0x00000000,
915         0x00000000, 0x00000000, 0x00000000, 0x00000000,
916         0x00000000, 0x00000000, 0x00000000, 0x00000000,
917         0xFFFFFF00, 0x0000003F, 0x00000000, 0x00000000,
918         0x00000000, 0x00000000, 0x00000000, 0x00000000,
919         0x00000000, 0x00000000, 0x00000000, 0x00000000,
920         0x00000000, 0x00000000, 0x00000000, 0x00000000,
921         0x00000000, 0x00000000, 0x00000000, 0x00000000,
922 };
923
924 static const u32 ice_ptypes_ppp[] = {
925         0x00000000, 0x00000000, 0x00000000, 0x00000000,
926         0x00000000, 0x00000000, 0x00000000, 0x00000000,
927         0x00000000, 0x00000000, 0x00000000, 0x00000000,
928         0xFFFFF000, 0x0000003F, 0x00000000, 0x00000000,
929         0x00000000, 0x00000000, 0x00000000, 0x00000000,
930         0x00000000, 0x00000000, 0x00000000, 0x00000000,
931         0x00000000, 0x00000000, 0x00000000, 0x00000000,
932         0x00000000, 0x00000000, 0x00000000, 0x00000000,
933 };
934
935 static const u32 ice_ptypes_ipv4_frag[] = {
936         0x00400000, 0x00000000, 0x00000000, 0x00000000,
937         0x00000000, 0x00000000, 0x00000000, 0x00000000,
938         0x00000000, 0x00000000, 0x00000000, 0x00000000,
939         0x00000000, 0x00000000, 0x00000000, 0x00000000,
940         0x00000000, 0x00000000, 0x00000000, 0x00000000,
941         0x00000000, 0x00000000, 0x00000000, 0x00000000,
942         0x00000000, 0x00000000, 0x00000000, 0x00000000,
943         0x00000000, 0x00000000, 0x00000000, 0x00000000,
944 };
945
946 static const u32 ice_ptypes_ipv6_frag[] = {
947         0x00000000, 0x00000000, 0x01000000, 0x00000000,
948         0x00000000, 0x00000000, 0x00000000, 0x00000000,
949         0x00000000, 0x00000000, 0x00000000, 0x00000000,
950         0x00000000, 0x00000000, 0x00000000, 0x00000000,
951         0x00000000, 0x00000000, 0x00000000, 0x00000000,
952         0x00000000, 0x00000000, 0x00000000, 0x00000000,
953         0x00000000, 0x00000000, 0x00000000, 0x00000000,
954         0x00000000, 0x00000000, 0x00000000, 0x00000000,
955 };
956
957 /* Manage parameters and info. used during the creation of a flow profile */
958 struct ice_flow_prof_params {
959         enum ice_block blk;
960         u16 entry_length; /* # of bytes formatted entry will require */
961         u8 es_cnt;
962         struct ice_flow_prof *prof;
963
964         /* For ACL, the es[0] will have the data of ICE_RX_MDID_PKT_FLAGS_15_0
965          * This will give us the direction flags.
966          */
967         struct ice_fv_word es[ICE_MAX_FV_WORDS];
968         /* attributes can be used to add attributes to a particular PTYPE */
969         const struct ice_ptype_attributes *attr;
970         u16 attr_cnt;
971
972         u16 mask[ICE_MAX_FV_WORDS];
973         ice_declare_bitmap(ptypes, ICE_FLOW_PTYPE_MAX);
974 };
975
976 #define ICE_FLOW_RSS_HDRS_INNER_MASK \
977         (ICE_FLOW_SEG_HDR_PPPOE | ICE_FLOW_SEG_HDR_GTPC | \
978         ICE_FLOW_SEG_HDR_GTPC_TEID | ICE_FLOW_SEG_HDR_GTPU | \
979         ICE_FLOW_SEG_HDR_PFCP_SESSION | ICE_FLOW_SEG_HDR_L2TPV3 | \
980         ICE_FLOW_SEG_HDR_ESP | ICE_FLOW_SEG_HDR_AH | \
981         ICE_FLOW_SEG_HDR_NAT_T_ESP | ICE_FLOW_SEG_HDR_GTPU_NON_IP | \
982         ICE_FLOW_SEG_HDR_ECPRI_TP0 | ICE_FLOW_SEG_HDR_UDP_ECPRI_TP0 | \
983         ICE_FLOW_SEG_HDR_L2TPV2 | ICE_FLOW_SEG_HDR_PPP | ICE_FLOW_SEG_HDR_GRE)
984
985 #define ICE_FLOW_SEG_HDRS_L2_MASK       \
986         (ICE_FLOW_SEG_HDR_ETH | ICE_FLOW_SEG_HDR_VLAN)
987 #define ICE_FLOW_SEG_HDRS_L3_MASK       \
988         (ICE_FLOW_SEG_HDR_IPV4 | ICE_FLOW_SEG_HDR_IPV6 | \
989          ICE_FLOW_SEG_HDR_ARP)
990 #define ICE_FLOW_SEG_HDRS_L4_MASK       \
991         (ICE_FLOW_SEG_HDR_ICMP | ICE_FLOW_SEG_HDR_TCP | ICE_FLOW_SEG_HDR_UDP | \
992          ICE_FLOW_SEG_HDR_SCTP)
993 /* mask for L4 protocols that are NOT part of IPV4/6 OTHER PTYPE groups */
994 #define ICE_FLOW_SEG_HDRS_L4_MASK_NO_OTHER      \
995         (ICE_FLOW_SEG_HDR_TCP | ICE_FLOW_SEG_HDR_UDP | ICE_FLOW_SEG_HDR_SCTP)
996
997 /**
998  * ice_flow_val_hdrs - validates packet segments for valid protocol headers
999  * @segs: array of one or more packet segments that describe the flow
1000  * @segs_cnt: number of packet segments provided
1001  */
1002 static enum ice_status
1003 ice_flow_val_hdrs(struct ice_flow_seg_info *segs, u8 segs_cnt)
1004 {
1005         u8 i;
1006
1007         for (i = 0; i < segs_cnt; i++) {
1008                 /* Multiple L3 headers */
1009                 if (segs[i].hdrs & ICE_FLOW_SEG_HDRS_L3_MASK &&
1010                     !ice_is_pow2(segs[i].hdrs & ICE_FLOW_SEG_HDRS_L3_MASK))
1011                         return ICE_ERR_PARAM;
1012
1013                 /* Multiple L4 headers */
1014                 if (segs[i].hdrs & ICE_FLOW_SEG_HDRS_L4_MASK &&
1015                     !ice_is_pow2(segs[i].hdrs & ICE_FLOW_SEG_HDRS_L4_MASK))
1016                         return ICE_ERR_PARAM;
1017         }
1018
1019         return ICE_SUCCESS;
1020 }
1021
1022 /* Sizes of fixed known protocol headers without header options */
1023 #define ICE_FLOW_PROT_HDR_SZ_MAC        14
1024 #define ICE_FLOW_PROT_HDR_SZ_MAC_VLAN   (ICE_FLOW_PROT_HDR_SZ_MAC + 2)
1025 #define ICE_FLOW_PROT_HDR_SZ_IPV4       20
1026 #define ICE_FLOW_PROT_HDR_SZ_IPV6       40
1027 #define ICE_FLOW_PROT_HDR_SZ_ARP        28
1028 #define ICE_FLOW_PROT_HDR_SZ_ICMP       8
1029 #define ICE_FLOW_PROT_HDR_SZ_TCP        20
1030 #define ICE_FLOW_PROT_HDR_SZ_UDP        8
1031 #define ICE_FLOW_PROT_HDR_SZ_SCTP       12
1032
1033 /**
1034  * ice_flow_calc_seg_sz - calculates size of a packet segment based on headers
1035  * @params: information about the flow to be processed
1036  * @seg: index of packet segment whose header size is to be determined
1037  */
1038 static u16 ice_flow_calc_seg_sz(struct ice_flow_prof_params *params, u8 seg)
1039 {
1040         u16 sz;
1041
1042         /* L2 headers */
1043         sz = (params->prof->segs[seg].hdrs & ICE_FLOW_SEG_HDR_VLAN) ?
1044                 ICE_FLOW_PROT_HDR_SZ_MAC_VLAN : ICE_FLOW_PROT_HDR_SZ_MAC;
1045
1046         /* L3 headers */
1047         if (params->prof->segs[seg].hdrs & ICE_FLOW_SEG_HDR_IPV4)
1048                 sz += ICE_FLOW_PROT_HDR_SZ_IPV4;
1049         else if (params->prof->segs[seg].hdrs & ICE_FLOW_SEG_HDR_IPV6)
1050                 sz += ICE_FLOW_PROT_HDR_SZ_IPV6;
1051         else if (params->prof->segs[seg].hdrs & ICE_FLOW_SEG_HDR_ARP)
1052                 sz += ICE_FLOW_PROT_HDR_SZ_ARP;
1053         else if (params->prof->segs[seg].hdrs & ICE_FLOW_SEG_HDRS_L4_MASK)
1054                 /* A L3 header is required if L4 is specified */
1055                 return 0;
1056
1057         /* L4 headers */
1058         if (params->prof->segs[seg].hdrs & ICE_FLOW_SEG_HDR_ICMP)
1059                 sz += ICE_FLOW_PROT_HDR_SZ_ICMP;
1060         else if (params->prof->segs[seg].hdrs & ICE_FLOW_SEG_HDR_TCP)
1061                 sz += ICE_FLOW_PROT_HDR_SZ_TCP;
1062         else if (params->prof->segs[seg].hdrs & ICE_FLOW_SEG_HDR_UDP)
1063                 sz += ICE_FLOW_PROT_HDR_SZ_UDP;
1064         else if (params->prof->segs[seg].hdrs & ICE_FLOW_SEG_HDR_SCTP)
1065                 sz += ICE_FLOW_PROT_HDR_SZ_SCTP;
1066
1067         return sz;
1068 }
1069
1070 /**
1071  * ice_flow_proc_seg_hdrs - process protocol headers present in pkt segments
1072  * @params: information about the flow to be processed
1073  *
1074  * This function identifies the packet types associated with the protocol
1075  * headers being present in packet segments of the specified flow profile.
1076  */
1077 static enum ice_status
1078 ice_flow_proc_seg_hdrs(struct ice_flow_prof_params *params)
1079 {
1080         struct ice_flow_prof *prof;
1081         u8 i;
1082
1083         ice_memset(params->ptypes, 0xff, sizeof(params->ptypes),
1084                    ICE_NONDMA_MEM);
1085
1086         prof = params->prof;
1087
1088         for (i = 0; i < params->prof->segs_cnt; i++) {
1089                 const ice_bitmap_t *src;
1090                 u32 hdrs;
1091
1092                 hdrs = prof->segs[i].hdrs;
1093
1094                 if (hdrs & ICE_FLOW_SEG_HDR_ETH) {
1095                         src = !i ? (const ice_bitmap_t *)ice_ptypes_mac_ofos :
1096                                 (const ice_bitmap_t *)ice_ptypes_mac_il;
1097                         ice_and_bitmap(params->ptypes, params->ptypes, src,
1098                                        ICE_FLOW_PTYPE_MAX);
1099                 }
1100
1101                 if (i && hdrs & ICE_FLOW_SEG_HDR_VLAN) {
1102                         src = (const ice_bitmap_t *)ice_ptypes_macvlan_il;
1103                         ice_and_bitmap(params->ptypes, params->ptypes, src,
1104                                        ICE_FLOW_PTYPE_MAX);
1105                 }
1106
1107                 if (!i && hdrs & ICE_FLOW_SEG_HDR_ARP) {
1108                         ice_and_bitmap(params->ptypes, params->ptypes,
1109                                        (const ice_bitmap_t *)ice_ptypes_arp_of,
1110                                        ICE_FLOW_PTYPE_MAX);
1111                 }
1112
1113                 if (hdrs & ICE_FLOW_SEG_HDR_ECPRI_TP0) {
1114                         src = (const ice_bitmap_t *)ice_ptypes_ecpri_tp0;
1115                         ice_and_bitmap(params->ptypes, params->ptypes, src,
1116                                        ICE_FLOW_PTYPE_MAX);
1117                 }
1118                 if ((hdrs & ICE_FLOW_SEG_HDR_IPV4) &&
1119                     (hdrs & ICE_FLOW_SEG_HDR_IPV_OTHER)) {
1120                         src = i ?
1121                                 (const ice_bitmap_t *)ice_ptypes_ipv4_il :
1122                                 (const ice_bitmap_t *)ice_ptypes_ipv4_ofos_all;
1123                         ice_and_bitmap(params->ptypes, params->ptypes, src,
1124                                        ICE_FLOW_PTYPE_MAX);
1125                 } else if ((hdrs & ICE_FLOW_SEG_HDR_IPV6) &&
1126                            (hdrs & ICE_FLOW_SEG_HDR_IPV_OTHER)) {
1127                         src = i ?
1128                                 (const ice_bitmap_t *)ice_ptypes_ipv6_il :
1129                                 (const ice_bitmap_t *)ice_ptypes_ipv6_ofos_all;
1130                         ice_and_bitmap(params->ptypes, params->ptypes, src,
1131                                        ICE_FLOW_PTYPE_MAX);
1132                 } else if ((hdrs & ICE_FLOW_SEG_HDR_IPV4) &&
1133                                 (hdrs & ICE_FLOW_SEG_HDR_IPV_FRAG)) {
1134                         src = (const ice_bitmap_t *)ice_ptypes_ipv4_frag;
1135                         ice_and_bitmap(params->ptypes, params->ptypes, src,
1136                                        ICE_FLOW_PTYPE_MAX);
1137                 } else if ((hdrs & ICE_FLOW_SEG_HDR_IPV6) &&
1138                                 (hdrs & ICE_FLOW_SEG_HDR_IPV_FRAG)) {
1139                         src = (const ice_bitmap_t *)ice_ptypes_ipv6_frag;
1140                         ice_and_bitmap(params->ptypes, params->ptypes, src,
1141                                        ICE_FLOW_PTYPE_MAX);
1142                 } else if ((hdrs & ICE_FLOW_SEG_HDR_IPV4) &&
1143                            !(hdrs & ICE_FLOW_SEG_HDRS_L4_MASK_NO_OTHER)) {
1144                         src = !i ? (const ice_bitmap_t *)ice_ptypes_ipv4_ofos_no_l4 :
1145                                 (const ice_bitmap_t *)ice_ptypes_ipv4_il_no_l4;
1146                         ice_and_bitmap(params->ptypes, params->ptypes, src,
1147                                        ICE_FLOW_PTYPE_MAX);
1148                 } else if (hdrs & ICE_FLOW_SEG_HDR_IPV4) {
1149                         src = !i ? (const ice_bitmap_t *)ice_ptypes_ipv4_ofos :
1150                                 (const ice_bitmap_t *)ice_ptypes_ipv4_il;
1151                         ice_and_bitmap(params->ptypes, params->ptypes, src,
1152                                        ICE_FLOW_PTYPE_MAX);
1153                 } else if ((hdrs & ICE_FLOW_SEG_HDR_IPV6) &&
1154                            !(hdrs & ICE_FLOW_SEG_HDRS_L4_MASK_NO_OTHER)) {
1155                         src = !i ? (const ice_bitmap_t *)ice_ptypes_ipv6_ofos_no_l4 :
1156                                 (const ice_bitmap_t *)ice_ptypes_ipv6_il_no_l4;
1157                         ice_and_bitmap(params->ptypes, params->ptypes, src,
1158                                        ICE_FLOW_PTYPE_MAX);
1159                 } else if (hdrs & ICE_FLOW_SEG_HDR_IPV6) {
1160                         src = !i ? (const ice_bitmap_t *)ice_ptypes_ipv6_ofos :
1161                                 (const ice_bitmap_t *)ice_ptypes_ipv6_il;
1162                         ice_and_bitmap(params->ptypes, params->ptypes, src,
1163                                        ICE_FLOW_PTYPE_MAX);
1164                 }
1165
1166                 if (hdrs & ICE_FLOW_SEG_HDR_ETH_NON_IP) {
1167                         src = (const ice_bitmap_t *)ice_ptypes_mac_non_ip_ofos;
1168                         ice_and_bitmap(params->ptypes, params->ptypes,
1169                                        src, ICE_FLOW_PTYPE_MAX);
1170                 } else if (hdrs & ICE_FLOW_SEG_HDR_PPPOE) {
1171                         src = (const ice_bitmap_t *)ice_ptypes_pppoe;
1172                         ice_and_bitmap(params->ptypes, params->ptypes, src,
1173                                        ICE_FLOW_PTYPE_MAX);
1174                 } else {
1175                         src = (const ice_bitmap_t *)ice_ptypes_pppoe;
1176                         ice_andnot_bitmap(params->ptypes, params->ptypes, src,
1177                                           ICE_FLOW_PTYPE_MAX);
1178                 }
1179
1180                 if (hdrs & ICE_FLOW_SEG_HDR_UDP) {
1181                         src = (const ice_bitmap_t *)ice_ptypes_udp_il;
1182                         ice_and_bitmap(params->ptypes, params->ptypes, src,
1183                                        ICE_FLOW_PTYPE_MAX);
1184                 } else if (hdrs & ICE_FLOW_SEG_HDR_TCP) {
1185                         ice_and_bitmap(params->ptypes, params->ptypes,
1186                                        (const ice_bitmap_t *)ice_ptypes_tcp_il,
1187                                        ICE_FLOW_PTYPE_MAX);
1188                 } else if (hdrs & ICE_FLOW_SEG_HDR_SCTP) {
1189                         src = (const ice_bitmap_t *)ice_ptypes_sctp_il;
1190                         ice_and_bitmap(params->ptypes, params->ptypes, src,
1191                                        ICE_FLOW_PTYPE_MAX);
1192                 }
1193
1194                 if (hdrs & ICE_FLOW_SEG_HDR_ICMP) {
1195                         src = !i ? (const ice_bitmap_t *)ice_ptypes_icmp_of :
1196                                 (const ice_bitmap_t *)ice_ptypes_icmp_il;
1197                         ice_and_bitmap(params->ptypes, params->ptypes, src,
1198                                        ICE_FLOW_PTYPE_MAX);
1199                 } else if (hdrs & ICE_FLOW_SEG_HDR_GRE) {
1200                         src = (const ice_bitmap_t *)ice_ptypes_gre_of;
1201                         ice_and_bitmap(params->ptypes, params->ptypes, src,
1202                                        ICE_FLOW_PTYPE_MAX);
1203                 } else if (hdrs & ICE_FLOW_SEG_HDR_GTPC) {
1204                         src = (const ice_bitmap_t *)ice_ptypes_gtpc;
1205                         ice_and_bitmap(params->ptypes, params->ptypes,
1206                                        src, ICE_FLOW_PTYPE_MAX);
1207                 } else if (hdrs & ICE_FLOW_SEG_HDR_GTPC_TEID) {
1208                         src = (const ice_bitmap_t *)ice_ptypes_gtpc_tid;
1209                         ice_and_bitmap(params->ptypes, params->ptypes,
1210                                        src, ICE_FLOW_PTYPE_MAX);
1211                 } else if (hdrs & ICE_FLOW_SEG_HDR_GTPU_NON_IP) {
1212                         src = (const ice_bitmap_t *)ice_ptypes_gtpu_no_ip;
1213                         ice_and_bitmap(params->ptypes, params->ptypes,
1214                                        src, ICE_FLOW_PTYPE_MAX);
1215                 } else if (hdrs & ICE_FLOW_SEG_HDR_GTPU_DWN) {
1216                         src = (const ice_bitmap_t *)ice_ptypes_gtpu;
1217                         ice_and_bitmap(params->ptypes, params->ptypes,
1218                                        src, ICE_FLOW_PTYPE_MAX);
1219
1220                         /* Attributes for GTP packet with downlink */
1221                         params->attr = ice_attr_gtpu_down;
1222                         params->attr_cnt = ARRAY_SIZE(ice_attr_gtpu_down);
1223                 } else if (hdrs & ICE_FLOW_SEG_HDR_GTPU_UP) {
1224                         src = (const ice_bitmap_t *)ice_ptypes_gtpu;
1225                         ice_and_bitmap(params->ptypes, params->ptypes,
1226                                        src, ICE_FLOW_PTYPE_MAX);
1227
1228                         /* Attributes for GTP packet with uplink */
1229                         params->attr = ice_attr_gtpu_up;
1230                         params->attr_cnt = ARRAY_SIZE(ice_attr_gtpu_up);
1231                 } else if (hdrs & ICE_FLOW_SEG_HDR_GTPU_EH) {
1232                         src = (const ice_bitmap_t *)ice_ptypes_gtpu;
1233                         ice_and_bitmap(params->ptypes, params->ptypes,
1234                                        src, ICE_FLOW_PTYPE_MAX);
1235
1236                         /* Attributes for GTP packet with Extension Header */
1237                         params->attr = ice_attr_gtpu_eh;
1238                         params->attr_cnt = ARRAY_SIZE(ice_attr_gtpu_eh);
1239                 } else if (hdrs & ICE_FLOW_SEG_HDR_GTPU_IP) {
1240                         src = (const ice_bitmap_t *)ice_ptypes_gtpu;
1241                         ice_and_bitmap(params->ptypes, params->ptypes,
1242                                        src, ICE_FLOW_PTYPE_MAX);
1243
1244                         /* Attributes for GTP packet without Extension Header */
1245                         params->attr = ice_attr_gtpu_session;
1246                         params->attr_cnt = ARRAY_SIZE(ice_attr_gtpu_session);
1247                 } else if (hdrs & ICE_FLOW_SEG_HDR_L2TPV2) {
1248                         src = (const ice_bitmap_t *)ice_ptypes_l2tpv2;
1249                         ice_and_bitmap(params->ptypes, params->ptypes,
1250                                        src, ICE_FLOW_PTYPE_MAX);
1251                 } else if (hdrs & ICE_FLOW_SEG_HDR_L2TPV3) {
1252                         src = (const ice_bitmap_t *)ice_ptypes_l2tpv3;
1253                         ice_and_bitmap(params->ptypes, params->ptypes,
1254                                        src, ICE_FLOW_PTYPE_MAX);
1255                 } else if (hdrs & ICE_FLOW_SEG_HDR_ESP) {
1256                         src = (const ice_bitmap_t *)ice_ptypes_esp;
1257                         ice_and_bitmap(params->ptypes, params->ptypes,
1258                                        src, ICE_FLOW_PTYPE_MAX);
1259                 } else if (hdrs & ICE_FLOW_SEG_HDR_AH) {
1260                         src = (const ice_bitmap_t *)ice_ptypes_ah;
1261                         ice_and_bitmap(params->ptypes, params->ptypes,
1262                                        src, ICE_FLOW_PTYPE_MAX);
1263                 } else if (hdrs & ICE_FLOW_SEG_HDR_NAT_T_ESP) {
1264                         src = (const ice_bitmap_t *)ice_ptypes_nat_t_esp;
1265                         ice_and_bitmap(params->ptypes, params->ptypes,
1266                                        src, ICE_FLOW_PTYPE_MAX);
1267                 } else if (hdrs & ICE_FLOW_SEG_HDR_VXLAN) {
1268                         src = (const ice_bitmap_t *)ice_ptypes_vxlan_vni;
1269                         ice_and_bitmap(params->ptypes, params->ptypes,
1270                                        src, ICE_FLOW_PTYPE_MAX);
1271                 } else if (hdrs & ICE_FLOW_SEG_HDR_UDP_ECPRI_TP0) {
1272                         src = (const ice_bitmap_t *)ice_ptypes_udp_ecpri_tp0;
1273                         ice_and_bitmap(params->ptypes, params->ptypes,
1274                                        src, ICE_FLOW_PTYPE_MAX);
1275                 }
1276
1277                 if (hdrs & ICE_FLOW_SEG_HDR_PPP) {
1278                         src = (const ice_bitmap_t *)ice_ptypes_ppp;
1279                         ice_and_bitmap(params->ptypes, params->ptypes,
1280                                        src, ICE_FLOW_PTYPE_MAX);
1281                 }
1282
1283                 if (hdrs & ICE_FLOW_SEG_HDR_PFCP) {
1284                         if (hdrs & ICE_FLOW_SEG_HDR_PFCP_NODE)
1285                                 src =
1286                                 (const ice_bitmap_t *)ice_ptypes_pfcp_node;
1287                         else
1288                                 src =
1289                                 (const ice_bitmap_t *)ice_ptypes_pfcp_session;
1290
1291                         ice_and_bitmap(params->ptypes, params->ptypes,
1292                                        src, ICE_FLOW_PTYPE_MAX);
1293                 } else {
1294                         src = (const ice_bitmap_t *)ice_ptypes_pfcp_node;
1295                         ice_andnot_bitmap(params->ptypes, params->ptypes,
1296                                           src, ICE_FLOW_PTYPE_MAX);
1297
1298                         src = (const ice_bitmap_t *)ice_ptypes_pfcp_session;
1299                         ice_andnot_bitmap(params->ptypes, params->ptypes,
1300                                           src, ICE_FLOW_PTYPE_MAX);
1301                 }
1302         }
1303
1304         return ICE_SUCCESS;
1305 }
1306
1307 /**
1308  * ice_flow_xtract_pkt_flags - Create an extr sequence entry for packet flags
1309  * @hw: pointer to the HW struct
1310  * @params: information about the flow to be processed
1311  * @flags: The value of pkt_flags[x:x] in Rx/Tx MDID metadata.
1312  *
1313  * This function will allocate an extraction sequence entries for a DWORD size
1314  * chunk of the packet flags.
1315  */
1316 static enum ice_status
1317 ice_flow_xtract_pkt_flags(struct ice_hw *hw,
1318                           struct ice_flow_prof_params *params,
1319                           enum ice_flex_mdid_pkt_flags flags)
1320 {
1321         u8 fv_words = hw->blk[params->blk].es.fvw;
1322         u8 idx;
1323
1324         /* Make sure the number of extraction sequence entries required does not
1325          * exceed the block's capacity.
1326          */
1327         if (params->es_cnt >= fv_words)
1328                 return ICE_ERR_MAX_LIMIT;
1329
1330         /* some blocks require a reversed field vector layout */
1331         if (hw->blk[params->blk].es.reverse)
1332                 idx = fv_words - params->es_cnt - 1;
1333         else
1334                 idx = params->es_cnt;
1335
1336         params->es[idx].prot_id = ICE_PROT_META_ID;
1337         params->es[idx].off = flags;
1338         params->es_cnt++;
1339
1340         return ICE_SUCCESS;
1341 }
1342
1343 /**
1344  * ice_flow_xtract_fld - Create an extraction sequence entry for the given field
1345  * @hw: pointer to the HW struct
1346  * @params: information about the flow to be processed
1347  * @seg: packet segment index of the field to be extracted
1348  * @fld: ID of field to be extracted
1349  * @match: bitfield of all fields
1350  *
1351  * This function determines the protocol ID, offset, and size of the given
1352  * field. It then allocates one or more extraction sequence entries for the
1353  * given field, and fill the entries with protocol ID and offset information.
1354  */
1355 static enum ice_status
1356 ice_flow_xtract_fld(struct ice_hw *hw, struct ice_flow_prof_params *params,
1357                     u8 seg, enum ice_flow_field fld, u64 match)
1358 {
1359         enum ice_flow_field sib = ICE_FLOW_FIELD_IDX_MAX;
1360         enum ice_prot_id prot_id = ICE_PROT_ID_INVAL;
1361         u8 fv_words = hw->blk[params->blk].es.fvw;
1362         struct ice_flow_fld_info *flds;
1363         u16 cnt, ese_bits, i;
1364         u16 sib_mask = 0;
1365         u16 mask;
1366         u16 off;
1367
1368         flds = params->prof->segs[seg].fields;
1369
1370         switch (fld) {
1371         case ICE_FLOW_FIELD_IDX_ETH_DA:
1372         case ICE_FLOW_FIELD_IDX_ETH_SA:
1373         case ICE_FLOW_FIELD_IDX_S_VLAN:
1374         case ICE_FLOW_FIELD_IDX_C_VLAN:
1375                 prot_id = seg == 0 ? ICE_PROT_MAC_OF_OR_S : ICE_PROT_MAC_IL;
1376                 break;
1377         case ICE_FLOW_FIELD_IDX_ETH_TYPE:
1378                 prot_id = seg == 0 ? ICE_PROT_ETYPE_OL : ICE_PROT_ETYPE_IL;
1379                 break;
1380         case ICE_FLOW_FIELD_IDX_IPV4_DSCP:
1381                 prot_id = seg == 0 ? ICE_PROT_IPV4_OF_OR_S : ICE_PROT_IPV4_IL;
1382                 break;
1383         case ICE_FLOW_FIELD_IDX_IPV6_DSCP:
1384                 prot_id = seg == 0 ? ICE_PROT_IPV6_OF_OR_S : ICE_PROT_IPV6_IL;
1385                 break;
1386         case ICE_FLOW_FIELD_IDX_IPV4_TTL:
1387         case ICE_FLOW_FIELD_IDX_IPV4_PROT:
1388                 prot_id = seg == 0 ? ICE_PROT_IPV4_OF_OR_S : ICE_PROT_IPV4_IL;
1389
1390                 /* TTL and PROT share the same extraction seq. entry.
1391                  * Each is considered a sibling to the other in terms of sharing
1392                  * the same extraction sequence entry.
1393                  */
1394                 if (fld == ICE_FLOW_FIELD_IDX_IPV4_TTL)
1395                         sib = ICE_FLOW_FIELD_IDX_IPV4_PROT;
1396                 else
1397                         sib = ICE_FLOW_FIELD_IDX_IPV4_TTL;
1398
1399                 /* If the sibling field is also included, that field's
1400                  * mask needs to be included.
1401                  */
1402                 if (match & BIT(sib))
1403                         sib_mask = ice_flds_info[sib].mask;
1404                 break;
1405         case ICE_FLOW_FIELD_IDX_IPV6_TTL:
1406         case ICE_FLOW_FIELD_IDX_IPV6_PROT:
1407                 prot_id = seg == 0 ? ICE_PROT_IPV6_OF_OR_S : ICE_PROT_IPV6_IL;
1408
1409                 /* TTL and PROT share the same extraction seq. entry.
1410                  * Each is considered a sibling to the other in terms of sharing
1411                  * the same extraction sequence entry.
1412                  */
1413                 if (fld == ICE_FLOW_FIELD_IDX_IPV6_TTL)
1414                         sib = ICE_FLOW_FIELD_IDX_IPV6_PROT;
1415                 else
1416                         sib = ICE_FLOW_FIELD_IDX_IPV6_TTL;
1417
1418                 /* If the sibling field is also included, that field's
1419                  * mask needs to be included.
1420                  */
1421                 if (match & BIT(sib))
1422                         sib_mask = ice_flds_info[sib].mask;
1423                 break;
1424         case ICE_FLOW_FIELD_IDX_IPV4_SA:
1425         case ICE_FLOW_FIELD_IDX_IPV4_DA:
1426         case ICE_FLOW_FIELD_IDX_IPV4_CHKSUM:
1427                 prot_id = seg == 0 ? ICE_PROT_IPV4_OF_OR_S : ICE_PROT_IPV4_IL;
1428                 if (params->prof->segs[0].hdrs & ICE_FLOW_SEG_HDR_GRE &&
1429                     params->prof->segs[1].hdrs & ICE_FLOW_SEG_HDR_GTPU &&
1430                     seg == 1)
1431                         prot_id = ICE_PROT_IPV4_IL_IL;
1432                 break;
1433         case ICE_FLOW_FIELD_IDX_IPV4_ID:
1434                 prot_id = ICE_PROT_IPV4_OF_OR_S;
1435                 break;
1436         case ICE_FLOW_FIELD_IDX_IPV6_SA:
1437         case ICE_FLOW_FIELD_IDX_IPV6_DA:
1438         case ICE_FLOW_FIELD_IDX_IPV6_PRE32_SA:
1439         case ICE_FLOW_FIELD_IDX_IPV6_PRE32_DA:
1440         case ICE_FLOW_FIELD_IDX_IPV6_PRE48_SA:
1441         case ICE_FLOW_FIELD_IDX_IPV6_PRE48_DA:
1442         case ICE_FLOW_FIELD_IDX_IPV6_PRE64_SA:
1443         case ICE_FLOW_FIELD_IDX_IPV6_PRE64_DA:
1444                 prot_id = seg == 0 ? ICE_PROT_IPV6_OF_OR_S : ICE_PROT_IPV6_IL;
1445                 if (params->prof->segs[0].hdrs & ICE_FLOW_SEG_HDR_GRE &&
1446                     params->prof->segs[1].hdrs & ICE_FLOW_SEG_HDR_GTPU &&
1447                     seg == 1)
1448                         prot_id = ICE_PROT_IPV6_IL_IL;
1449                 break;
1450         case ICE_FLOW_FIELD_IDX_IPV6_ID:
1451                 prot_id = ICE_PROT_IPV6_FRAG;
1452                 break;
1453         case ICE_FLOW_FIELD_IDX_TCP_SRC_PORT:
1454         case ICE_FLOW_FIELD_IDX_TCP_DST_PORT:
1455         case ICE_FLOW_FIELD_IDX_TCP_FLAGS:
1456         case ICE_FLOW_FIELD_IDX_TCP_CHKSUM:
1457                 prot_id = ICE_PROT_TCP_IL;
1458                 break;
1459         case ICE_FLOW_FIELD_IDX_UDP_SRC_PORT:
1460         case ICE_FLOW_FIELD_IDX_UDP_DST_PORT:
1461         case ICE_FLOW_FIELD_IDX_UDP_CHKSUM:
1462                 prot_id = ICE_PROT_UDP_IL_OR_S;
1463                 break;
1464         case ICE_FLOW_FIELD_IDX_SCTP_SRC_PORT:
1465         case ICE_FLOW_FIELD_IDX_SCTP_DST_PORT:
1466         case ICE_FLOW_FIELD_IDX_SCTP_CHKSUM:
1467                 prot_id = ICE_PROT_SCTP_IL;
1468                 break;
1469         case ICE_FLOW_FIELD_IDX_VXLAN_VNI:
1470         case ICE_FLOW_FIELD_IDX_GTPC_TEID:
1471         case ICE_FLOW_FIELD_IDX_GTPU_IP_TEID:
1472         case ICE_FLOW_FIELD_IDX_GTPU_UP_TEID:
1473         case ICE_FLOW_FIELD_IDX_GTPU_DWN_TEID:
1474         case ICE_FLOW_FIELD_IDX_GTPU_EH_TEID:
1475         case ICE_FLOW_FIELD_IDX_GTPU_EH_QFI:
1476                 /* GTP is accessed through UDP OF protocol */
1477                 prot_id = ICE_PROT_UDP_OF;
1478                 break;
1479         case ICE_FLOW_FIELD_IDX_PPPOE_SESS_ID:
1480                 prot_id = ICE_PROT_PPPOE;
1481                 break;
1482         case ICE_FLOW_FIELD_IDX_PFCP_SEID:
1483                 prot_id = ICE_PROT_UDP_IL_OR_S;
1484                 break;
1485         case ICE_FLOW_FIELD_IDX_L2TPV3_SESS_ID:
1486                 prot_id = ICE_PROT_L2TPV3;
1487                 break;
1488         case ICE_FLOW_FIELD_IDX_ESP_SPI:
1489                 prot_id = ICE_PROT_ESP_F;
1490                 break;
1491         case ICE_FLOW_FIELD_IDX_AH_SPI:
1492                 prot_id = ICE_PROT_ESP_2;
1493                 break;
1494         case ICE_FLOW_FIELD_IDX_NAT_T_ESP_SPI:
1495                 prot_id = ICE_PROT_UDP_IL_OR_S;
1496                 break;
1497         case ICE_FLOW_FIELD_IDX_ECPRI_TP0_PC_ID:
1498                 prot_id = ICE_PROT_ECPRI;
1499                 break;
1500         case ICE_FLOW_FIELD_IDX_UDP_ECPRI_TP0_PC_ID:
1501                 prot_id = ICE_PROT_UDP_IL_OR_S;
1502                 break;
1503         case ICE_FLOW_FIELD_IDX_ARP_SIP:
1504         case ICE_FLOW_FIELD_IDX_ARP_DIP:
1505         case ICE_FLOW_FIELD_IDX_ARP_SHA:
1506         case ICE_FLOW_FIELD_IDX_ARP_DHA:
1507         case ICE_FLOW_FIELD_IDX_ARP_OP:
1508                 prot_id = ICE_PROT_ARP_OF;
1509                 break;
1510         case ICE_FLOW_FIELD_IDX_ICMP_TYPE:
1511         case ICE_FLOW_FIELD_IDX_ICMP_CODE:
1512                 /* ICMP type and code share the same extraction seq. entry */
1513                 prot_id = (params->prof->segs[seg].hdrs &
1514                            ICE_FLOW_SEG_HDR_IPV4) ?
1515                         ICE_PROT_ICMP_IL : ICE_PROT_ICMPV6_IL;
1516                 sib = fld == ICE_FLOW_FIELD_IDX_ICMP_TYPE ?
1517                         ICE_FLOW_FIELD_IDX_ICMP_CODE :
1518                         ICE_FLOW_FIELD_IDX_ICMP_TYPE;
1519                 break;
1520         case ICE_FLOW_FIELD_IDX_GRE_KEYID:
1521                 prot_id = ICE_PROT_GRE_OF;
1522                 break;
1523         default:
1524                 return ICE_ERR_NOT_IMPL;
1525         }
1526
1527         /* Each extraction sequence entry is a word in size, and extracts a
1528          * word-aligned offset from a protocol header.
1529          */
1530         ese_bits = ICE_FLOW_FV_EXTRACT_SZ * BITS_PER_BYTE;
1531
1532         flds[fld].xtrct.prot_id = prot_id;
1533         flds[fld].xtrct.off = (ice_flds_info[fld].off / ese_bits) *
1534                 ICE_FLOW_FV_EXTRACT_SZ;
1535         flds[fld].xtrct.disp = (u8)(ice_flds_info[fld].off % ese_bits);
1536         flds[fld].xtrct.idx = params->es_cnt;
1537         flds[fld].xtrct.mask = ice_flds_info[fld].mask;
1538
1539         /* Adjust the next field-entry index after accommodating the number of
1540          * entries this field consumes
1541          */
1542         cnt = DIVIDE_AND_ROUND_UP(flds[fld].xtrct.disp +
1543                                   ice_flds_info[fld].size, ese_bits);
1544
1545         /* Fill in the extraction sequence entries needed for this field */
1546         off = flds[fld].xtrct.off;
1547         mask = flds[fld].xtrct.mask;
1548         for (i = 0; i < cnt; i++) {
1549                 /* Only consume an extraction sequence entry if there is no
1550                  * sibling field associated with this field or the sibling entry
1551                  * already extracts the word shared with this field.
1552                  */
1553                 if (sib == ICE_FLOW_FIELD_IDX_MAX ||
1554                     flds[sib].xtrct.prot_id == ICE_PROT_ID_INVAL ||
1555                     flds[sib].xtrct.off != off) {
1556                         u8 idx;
1557
1558                         /* Make sure the number of extraction sequence required
1559                          * does not exceed the block's capability
1560                          */
1561                         if (params->es_cnt >= fv_words)
1562                                 return ICE_ERR_MAX_LIMIT;
1563
1564                         /* some blocks require a reversed field vector layout */
1565                         if (hw->blk[params->blk].es.reverse)
1566                                 idx = fv_words - params->es_cnt - 1;
1567                         else
1568                                 idx = params->es_cnt;
1569
1570                         params->es[idx].prot_id = prot_id;
1571                         params->es[idx].off = off;
1572                         params->mask[idx] = mask | sib_mask;
1573                         params->es_cnt++;
1574                 }
1575
1576                 off += ICE_FLOW_FV_EXTRACT_SZ;
1577         }
1578
1579         return ICE_SUCCESS;
1580 }
1581
1582 /**
1583  * ice_flow_xtract_raws - Create extract sequence entries for raw bytes
1584  * @hw: pointer to the HW struct
1585  * @params: information about the flow to be processed
1586  * @seg: index of packet segment whose raw fields are to be extracted
1587  */
1588 static enum ice_status
1589 ice_flow_xtract_raws(struct ice_hw *hw, struct ice_flow_prof_params *params,
1590                      u8 seg)
1591 {
1592         u16 fv_words;
1593         u16 hdrs_sz;
1594         u8 i;
1595
1596         if (!params->prof->segs[seg].raws_cnt)
1597                 return ICE_SUCCESS;
1598
1599         if (params->prof->segs[seg].raws_cnt >
1600             ARRAY_SIZE(params->prof->segs[seg].raws))
1601                 return ICE_ERR_MAX_LIMIT;
1602
1603         /* Offsets within the segment headers are not supported */
1604         hdrs_sz = ice_flow_calc_seg_sz(params, seg);
1605         if (!hdrs_sz)
1606                 return ICE_ERR_PARAM;
1607
1608         fv_words = hw->blk[params->blk].es.fvw;
1609
1610         for (i = 0; i < params->prof->segs[seg].raws_cnt; i++) {
1611                 struct ice_flow_seg_fld_raw *raw;
1612                 u16 off, cnt, j;
1613
1614                 raw = &params->prof->segs[seg].raws[i];
1615
1616                 /* Storing extraction information */
1617                 raw->info.xtrct.prot_id = ICE_PROT_MAC_OF_OR_S;
1618                 raw->info.xtrct.off = (raw->off / ICE_FLOW_FV_EXTRACT_SZ) *
1619                         ICE_FLOW_FV_EXTRACT_SZ;
1620                 raw->info.xtrct.disp = (raw->off % ICE_FLOW_FV_EXTRACT_SZ) *
1621                         BITS_PER_BYTE;
1622                 raw->info.xtrct.idx = params->es_cnt;
1623
1624                 /* Determine the number of field vector entries this raw field
1625                  * consumes.
1626                  */
1627                 cnt = DIVIDE_AND_ROUND_UP(raw->info.xtrct.disp +
1628                                           (raw->info.src.last * BITS_PER_BYTE),
1629                                           (ICE_FLOW_FV_EXTRACT_SZ *
1630                                            BITS_PER_BYTE));
1631                 off = raw->info.xtrct.off;
1632                 for (j = 0; j < cnt; j++) {
1633                         u16 idx;
1634
1635                         /* Make sure the number of extraction sequence required
1636                          * does not exceed the block's capability
1637                          */
1638                         if (params->es_cnt >= hw->blk[params->blk].es.count ||
1639                             params->es_cnt >= ICE_MAX_FV_WORDS)
1640                                 return ICE_ERR_MAX_LIMIT;
1641
1642                         /* some blocks require a reversed field vector layout */
1643                         if (hw->blk[params->blk].es.reverse)
1644                                 idx = fv_words - params->es_cnt - 1;
1645                         else
1646                                 idx = params->es_cnt;
1647
1648                         params->es[idx].prot_id = raw->info.xtrct.prot_id;
1649                         params->es[idx].off = off;
1650                         params->es_cnt++;
1651                         off += ICE_FLOW_FV_EXTRACT_SZ;
1652                 }
1653         }
1654
1655         return ICE_SUCCESS;
1656 }
1657
1658 /**
1659  * ice_flow_create_xtrct_seq - Create an extraction sequence for given segments
1660  * @hw: pointer to the HW struct
1661  * @params: information about the flow to be processed
1662  *
1663  * This function iterates through all matched fields in the given segments, and
1664  * creates an extraction sequence for the fields.
1665  */
1666 static enum ice_status
1667 ice_flow_create_xtrct_seq(struct ice_hw *hw,
1668                           struct ice_flow_prof_params *params)
1669 {
1670         enum ice_status status = ICE_SUCCESS;
1671         u8 i;
1672
1673         /* For ACL, we also need to extract the direction bit (Rx,Tx) data from
1674          * packet flags
1675          */
1676         if (params->blk == ICE_BLK_ACL) {
1677                 status = ice_flow_xtract_pkt_flags(hw, params,
1678                                                    ICE_RX_MDID_PKT_FLAGS_15_0);
1679                 if (status)
1680                         return status;
1681         }
1682
1683         for (i = 0; i < params->prof->segs_cnt; i++) {
1684                 u64 match = params->prof->segs[i].match;
1685                 enum ice_flow_field j;
1686
1687                 ice_for_each_set_bit(j, (ice_bitmap_t *)&match,
1688                                      ICE_FLOW_FIELD_IDX_MAX) {
1689                         status = ice_flow_xtract_fld(hw, params, i, j, match);
1690                         if (status)
1691                                 return status;
1692                         ice_clear_bit(j, (ice_bitmap_t *)&match);
1693                 }
1694
1695                 /* Process raw matching bytes */
1696                 status = ice_flow_xtract_raws(hw, params, i);
1697                 if (status)
1698                         return status;
1699         }
1700
1701         return status;
1702 }
1703
1704 /**
1705  * ice_flow_sel_acl_scen - returns the specific scenario
1706  * @hw: pointer to the hardware structure
1707  * @params: information about the flow to be processed
1708  *
1709  * This function will return the specific scenario based on the
1710  * params passed to it
1711  */
1712 static enum ice_status
1713 ice_flow_sel_acl_scen(struct ice_hw *hw, struct ice_flow_prof_params *params)
1714 {
1715         /* Find the best-fit scenario for the provided match width */
1716         struct ice_acl_scen *cand_scen = NULL, *scen;
1717
1718         if (!hw->acl_tbl)
1719                 return ICE_ERR_DOES_NOT_EXIST;
1720
1721         /* Loop through each scenario and match against the scenario width
1722          * to select the specific scenario
1723          */
1724         LIST_FOR_EACH_ENTRY(scen, &hw->acl_tbl->scens, ice_acl_scen, list_entry)
1725                 if (scen->eff_width >= params->entry_length &&
1726                     (!cand_scen || cand_scen->eff_width > scen->eff_width))
1727                         cand_scen = scen;
1728         if (!cand_scen)
1729                 return ICE_ERR_DOES_NOT_EXIST;
1730
1731         params->prof->cfg.scen = cand_scen;
1732
1733         return ICE_SUCCESS;
1734 }
1735
1736 /**
1737  * ice_flow_acl_def_entry_frmt - Determine the layout of flow entries
1738  * @params: information about the flow to be processed
1739  */
1740 static enum ice_status
1741 ice_flow_acl_def_entry_frmt(struct ice_flow_prof_params *params)
1742 {
1743         u16 index, i, range_idx = 0;
1744
1745         index = ICE_AQC_ACL_PROF_BYTE_SEL_START_IDX;
1746
1747         for (i = 0; i < params->prof->segs_cnt; i++) {
1748                 struct ice_flow_seg_info *seg = &params->prof->segs[i];
1749                 u8 j;
1750
1751                 ice_for_each_set_bit(j, (ice_bitmap_t *)&seg->match,
1752                                      ICE_FLOW_FIELD_IDX_MAX) {
1753                         struct ice_flow_fld_info *fld = &seg->fields[j];
1754
1755                         fld->entry.mask = ICE_FLOW_FLD_OFF_INVAL;
1756
1757                         if (fld->type == ICE_FLOW_FLD_TYPE_RANGE) {
1758                                 fld->entry.last = ICE_FLOW_FLD_OFF_INVAL;
1759
1760                                 /* Range checking only supported for single
1761                                  * words
1762                                  */
1763                                 if (DIVIDE_AND_ROUND_UP(ice_flds_info[j].size +
1764                                                         fld->xtrct.disp,
1765                                                         BITS_PER_BYTE * 2) > 1)
1766                                         return ICE_ERR_PARAM;
1767
1768                                 /* Ranges must define low and high values */
1769                                 if (fld->src.val == ICE_FLOW_FLD_OFF_INVAL ||
1770                                     fld->src.last == ICE_FLOW_FLD_OFF_INVAL)
1771                                         return ICE_ERR_PARAM;
1772
1773                                 fld->entry.val = range_idx++;
1774                         } else {
1775                                 /* Store adjusted byte-length of field for later
1776                                  * use, taking into account potential
1777                                  * non-byte-aligned displacement
1778                                  */
1779                                 fld->entry.last = DIVIDE_AND_ROUND_UP
1780                                         (ice_flds_info[j].size +
1781                                          (fld->xtrct.disp % BITS_PER_BYTE),
1782                                          BITS_PER_BYTE);
1783                                 fld->entry.val = index;
1784                                 index += fld->entry.last;
1785                         }
1786                 }
1787
1788                 for (j = 0; j < seg->raws_cnt; j++) {
1789                         struct ice_flow_seg_fld_raw *raw = &seg->raws[j];
1790
1791                         raw->info.entry.mask = ICE_FLOW_FLD_OFF_INVAL;
1792                         raw->info.entry.val = index;
1793                         raw->info.entry.last = raw->info.src.last;
1794                         index += raw->info.entry.last;
1795                 }
1796         }
1797
1798         /* Currently only support using the byte selection base, which only
1799          * allows for an effective entry size of 30 bytes. Reject anything
1800          * larger.
1801          */
1802         if (index > ICE_AQC_ACL_PROF_BYTE_SEL_ELEMS)
1803                 return ICE_ERR_PARAM;
1804
1805         /* Only 8 range checkers per profile, reject anything trying to use
1806          * more
1807          */
1808         if (range_idx > ICE_AQC_ACL_PROF_RANGES_NUM_CFG)
1809                 return ICE_ERR_PARAM;
1810
1811         /* Store # bytes required for entry for later use */
1812         params->entry_length = index - ICE_AQC_ACL_PROF_BYTE_SEL_START_IDX;
1813
1814         return ICE_SUCCESS;
1815 }
1816
1817 /**
1818  * ice_flow_proc_segs - process all packet segments associated with a profile
1819  * @hw: pointer to the HW struct
1820  * @params: information about the flow to be processed
1821  */
1822 static enum ice_status
1823 ice_flow_proc_segs(struct ice_hw *hw, struct ice_flow_prof_params *params)
1824 {
1825         enum ice_status status;
1826
1827         status = ice_flow_proc_seg_hdrs(params);
1828         if (status)
1829                 return status;
1830
1831         status = ice_flow_create_xtrct_seq(hw, params);
1832         if (status)
1833                 return status;
1834
1835         switch (params->blk) {
1836         case ICE_BLK_FD:
1837         case ICE_BLK_RSS:
1838                 status = ICE_SUCCESS;
1839                 break;
1840         case ICE_BLK_ACL:
1841                 status = ice_flow_acl_def_entry_frmt(params);
1842                 if (status)
1843                         return status;
1844                 status = ice_flow_sel_acl_scen(hw, params);
1845                 if (status)
1846                         return status;
1847                 break;
1848         default:
1849                 return ICE_ERR_NOT_IMPL;
1850         }
1851
1852         return status;
1853 }
1854
1855 #define ICE_FLOW_FIND_PROF_CHK_FLDS     0x00000001
1856 #define ICE_FLOW_FIND_PROF_CHK_VSI      0x00000002
1857 #define ICE_FLOW_FIND_PROF_NOT_CHK_DIR  0x00000004
1858
1859 /**
1860  * ice_flow_find_prof_conds - Find a profile matching headers and conditions
1861  * @hw: pointer to the HW struct
1862  * @blk: classification stage
1863  * @dir: flow direction
1864  * @segs: array of one or more packet segments that describe the flow
1865  * @segs_cnt: number of packet segments provided
1866  * @vsi_handle: software VSI handle to check VSI (ICE_FLOW_FIND_PROF_CHK_VSI)
1867  * @conds: additional conditions to be checked (ICE_FLOW_FIND_PROF_CHK_*)
1868  */
1869 static struct ice_flow_prof *
1870 ice_flow_find_prof_conds(struct ice_hw *hw, enum ice_block blk,
1871                          enum ice_flow_dir dir, struct ice_flow_seg_info *segs,
1872                          u8 segs_cnt, u16 vsi_handle, u32 conds)
1873 {
1874         struct ice_flow_prof *p, *prof = NULL;
1875
1876         ice_acquire_lock(&hw->fl_profs_locks[blk]);
1877         LIST_FOR_EACH_ENTRY(p, &hw->fl_profs[blk], ice_flow_prof, l_entry)
1878                 if ((p->dir == dir || conds & ICE_FLOW_FIND_PROF_NOT_CHK_DIR) &&
1879                     segs_cnt && segs_cnt == p->segs_cnt) {
1880                         u8 i;
1881
1882                         /* Check for profile-VSI association if specified */
1883                         if ((conds & ICE_FLOW_FIND_PROF_CHK_VSI) &&
1884                             ice_is_vsi_valid(hw, vsi_handle) &&
1885                             !ice_is_bit_set(p->vsis, vsi_handle))
1886                                 continue;
1887
1888                         /* Protocol headers must be checked. Matched fields are
1889                          * checked if specified.
1890                          */
1891                         for (i = 0; i < segs_cnt; i++)
1892                                 if (segs[i].hdrs != p->segs[i].hdrs ||
1893                                     ((conds & ICE_FLOW_FIND_PROF_CHK_FLDS) &&
1894                                      segs[i].match != p->segs[i].match))
1895                                         break;
1896
1897                         /* A match is found if all segments are matched */
1898                         if (i == segs_cnt) {
1899                                 prof = p;
1900                                 break;
1901                         }
1902                 }
1903         ice_release_lock(&hw->fl_profs_locks[blk]);
1904
1905         return prof;
1906 }
1907
1908 /**
1909  * ice_flow_find_prof - Look up a profile matching headers and matched fields
1910  * @hw: pointer to the HW struct
1911  * @blk: classification stage
1912  * @dir: flow direction
1913  * @segs: array of one or more packet segments that describe the flow
1914  * @segs_cnt: number of packet segments provided
1915  */
1916 u64
1917 ice_flow_find_prof(struct ice_hw *hw, enum ice_block blk, enum ice_flow_dir dir,
1918                    struct ice_flow_seg_info *segs, u8 segs_cnt)
1919 {
1920         struct ice_flow_prof *p;
1921
1922         p = ice_flow_find_prof_conds(hw, blk, dir, segs, segs_cnt,
1923                                      ICE_MAX_VSI, ICE_FLOW_FIND_PROF_CHK_FLDS);
1924
1925         return p ? p->id : ICE_FLOW_PROF_ID_INVAL;
1926 }
1927
1928 /**
1929  * ice_flow_find_prof_id - Look up a profile with given profile ID
1930  * @hw: pointer to the HW struct
1931  * @blk: classification stage
1932  * @prof_id: unique ID to identify this flow profile
1933  */
1934 static struct ice_flow_prof *
1935 ice_flow_find_prof_id(struct ice_hw *hw, enum ice_block blk, u64 prof_id)
1936 {
1937         struct ice_flow_prof *p;
1938
1939         LIST_FOR_EACH_ENTRY(p, &hw->fl_profs[blk], ice_flow_prof, l_entry)
1940                 if (p->id == prof_id)
1941                         return p;
1942
1943         return NULL;
1944 }
1945
1946 /**
1947  * ice_dealloc_flow_entry - Deallocate flow entry memory
1948  * @hw: pointer to the HW struct
1949  * @entry: flow entry to be removed
1950  */
1951 static void
1952 ice_dealloc_flow_entry(struct ice_hw *hw, struct ice_flow_entry *entry)
1953 {
1954         if (!entry)
1955                 return;
1956
1957         if (entry->entry)
1958                 ice_free(hw, entry->entry);
1959
1960         if (entry->range_buf) {
1961                 ice_free(hw, entry->range_buf);
1962                 entry->range_buf = NULL;
1963         }
1964
1965         if (entry->acts) {
1966                 ice_free(hw, entry->acts);
1967                 entry->acts = NULL;
1968                 entry->acts_cnt = 0;
1969         }
1970
1971         ice_free(hw, entry);
1972 }
1973
1974 /**
1975  * ice_flow_get_hw_prof - return the HW profile for a specific profile ID handle
1976  * @hw: pointer to the HW struct
1977  * @blk: classification stage
1978  * @prof_id: the profile ID handle
1979  * @hw_prof_id: pointer to variable to receive the HW profile ID
1980  */
1981 enum ice_status
1982 ice_flow_get_hw_prof(struct ice_hw *hw, enum ice_block blk, u64 prof_id,
1983                      u8 *hw_prof_id)
1984 {
1985         enum ice_status status = ICE_ERR_DOES_NOT_EXIST;
1986         struct ice_prof_map *map;
1987
1988         ice_acquire_lock(&hw->blk[blk].es.prof_map_lock);
1989         map = ice_search_prof_id(hw, blk, prof_id);
1990         if (map) {
1991                 *hw_prof_id = map->prof_id;
1992                 status = ICE_SUCCESS;
1993         }
1994         ice_release_lock(&hw->blk[blk].es.prof_map_lock);
1995         return status;
1996 }
1997
1998 #define ICE_ACL_INVALID_SCEN    0x3f
1999
2000 /**
2001  * ice_flow_acl_is_prof_in_use - Verify if the profile is associated to any PF
2002  * @hw: pointer to the hardware structure
2003  * @prof: pointer to flow profile
2004  * @buf: destination buffer function writes partial extraction sequence to
2005  *
2006  * returns ICE_SUCCESS if no PF is associated to the given profile
2007  * returns ICE_ERR_IN_USE if at least one PF is associated to the given profile
2008  * returns other error code for real error
2009  */
2010 static enum ice_status
2011 ice_flow_acl_is_prof_in_use(struct ice_hw *hw, struct ice_flow_prof *prof,
2012                             struct ice_aqc_acl_prof_generic_frmt *buf)
2013 {
2014         enum ice_status status;
2015         u8 prof_id = 0;
2016
2017         status = ice_flow_get_hw_prof(hw, ICE_BLK_ACL, prof->id, &prof_id);
2018         if (status)
2019                 return status;
2020
2021         status = ice_query_acl_prof(hw, prof_id, buf, NULL);
2022         if (status)
2023                 return status;
2024
2025         /* If all PF's associated scenarios are all 0 or all
2026          * ICE_ACL_INVALID_SCEN (63) for the given profile then the latter has
2027          * not been configured yet.
2028          */
2029         if (buf->pf_scenario_num[0] == 0 && buf->pf_scenario_num[1] == 0 &&
2030             buf->pf_scenario_num[2] == 0 && buf->pf_scenario_num[3] == 0 &&
2031             buf->pf_scenario_num[4] == 0 && buf->pf_scenario_num[5] == 0 &&
2032             buf->pf_scenario_num[6] == 0 && buf->pf_scenario_num[7] == 0)
2033                 return ICE_SUCCESS;
2034
2035         if (buf->pf_scenario_num[0] == ICE_ACL_INVALID_SCEN &&
2036             buf->pf_scenario_num[1] == ICE_ACL_INVALID_SCEN &&
2037             buf->pf_scenario_num[2] == ICE_ACL_INVALID_SCEN &&
2038             buf->pf_scenario_num[3] == ICE_ACL_INVALID_SCEN &&
2039             buf->pf_scenario_num[4] == ICE_ACL_INVALID_SCEN &&
2040             buf->pf_scenario_num[5] == ICE_ACL_INVALID_SCEN &&
2041             buf->pf_scenario_num[6] == ICE_ACL_INVALID_SCEN &&
2042             buf->pf_scenario_num[7] == ICE_ACL_INVALID_SCEN)
2043                 return ICE_SUCCESS;
2044
2045         return ICE_ERR_IN_USE;
2046 }
2047
2048 /**
2049  * ice_flow_acl_free_act_cntr - Free the ACL rule's actions
2050  * @hw: pointer to the hardware structure
2051  * @acts: array of actions to be performed on a match
2052  * @acts_cnt: number of actions
2053  */
2054 static enum ice_status
2055 ice_flow_acl_free_act_cntr(struct ice_hw *hw, struct ice_flow_action *acts,
2056                            u8 acts_cnt)
2057 {
2058         int i;
2059
2060         for (i = 0; i < acts_cnt; i++) {
2061                 if (acts[i].type == ICE_FLOW_ACT_CNTR_PKT ||
2062                     acts[i].type == ICE_FLOW_ACT_CNTR_BYTES ||
2063                     acts[i].type == ICE_FLOW_ACT_CNTR_PKT_BYTES) {
2064                         struct ice_acl_cntrs cntrs = { 0 };
2065                         enum ice_status status;
2066
2067                         /* amount is unused in the dealloc path but the common
2068                          * parameter check routine wants a value set, as zero
2069                          * is invalid for the check. Just set it.
2070                          */
2071                         cntrs.amount = 1;
2072                         cntrs.bank = 0; /* Only bank0 for the moment */
2073                         cntrs.first_cntr =
2074                                         LE16_TO_CPU(acts[i].data.acl_act.value);
2075                         cntrs.last_cntr =
2076                                         LE16_TO_CPU(acts[i].data.acl_act.value);
2077
2078                         if (acts[i].type == ICE_FLOW_ACT_CNTR_PKT_BYTES)
2079                                 cntrs.type = ICE_AQC_ACL_CNT_TYPE_DUAL;
2080                         else
2081                                 cntrs.type = ICE_AQC_ACL_CNT_TYPE_SINGLE;
2082
2083                         status = ice_aq_dealloc_acl_cntrs(hw, &cntrs, NULL);
2084                         if (status)
2085                                 return status;
2086                 }
2087         }
2088         return ICE_SUCCESS;
2089 }
2090
2091 /**
2092  * ice_flow_acl_disassoc_scen - Disassociate the scenario from the profile
2093  * @hw: pointer to the hardware structure
2094  * @prof: pointer to flow profile
2095  *
2096  * Disassociate the scenario from the profile for the PF of the VSI.
2097  */
2098 static enum ice_status
2099 ice_flow_acl_disassoc_scen(struct ice_hw *hw, struct ice_flow_prof *prof)
2100 {
2101         struct ice_aqc_acl_prof_generic_frmt buf;
2102         enum ice_status status = ICE_SUCCESS;
2103         u8 prof_id = 0;
2104
2105         ice_memset(&buf, 0, sizeof(buf), ICE_NONDMA_MEM);
2106
2107         status = ice_flow_get_hw_prof(hw, ICE_BLK_ACL, prof->id, &prof_id);
2108         if (status)
2109                 return status;
2110
2111         status = ice_query_acl_prof(hw, prof_id, &buf, NULL);
2112         if (status)
2113                 return status;
2114
2115         /* Clear scenario for this PF */
2116         buf.pf_scenario_num[hw->pf_id] = ICE_ACL_INVALID_SCEN;
2117         status = ice_prgm_acl_prof_xtrct(hw, prof_id, &buf, NULL);
2118
2119         return status;
2120 }
2121
2122 /**
2123  * ice_flow_rem_entry_sync - Remove a flow entry
2124  * @hw: pointer to the HW struct
2125  * @blk: classification stage
2126  * @entry: flow entry to be removed
2127  */
2128 static enum ice_status
2129 ice_flow_rem_entry_sync(struct ice_hw *hw, enum ice_block blk,
2130                         struct ice_flow_entry *entry)
2131 {
2132         if (!entry)
2133                 return ICE_ERR_BAD_PTR;
2134
2135         if (blk == ICE_BLK_ACL) {
2136                 enum ice_status status;
2137
2138                 if (!entry->prof)
2139                         return ICE_ERR_BAD_PTR;
2140
2141                 status = ice_acl_rem_entry(hw, entry->prof->cfg.scen,
2142                                            entry->scen_entry_idx);
2143                 if (status)
2144                         return status;
2145
2146                 /* Checks if we need to release an ACL counter. */
2147                 if (entry->acts_cnt && entry->acts)
2148                         ice_flow_acl_free_act_cntr(hw, entry->acts,
2149                                                    entry->acts_cnt);
2150         }
2151
2152         LIST_DEL(&entry->l_entry);
2153
2154         ice_dealloc_flow_entry(hw, entry);
2155
2156         return ICE_SUCCESS;
2157 }
2158
2159 /**
2160  * ice_flow_add_prof_sync - Add a flow profile for packet segments and fields
2161  * @hw: pointer to the HW struct
2162  * @blk: classification stage
2163  * @dir: flow direction
2164  * @prof_id: unique ID to identify this flow profile
2165  * @segs: array of one or more packet segments that describe the flow
2166  * @segs_cnt: number of packet segments provided
2167  * @acts: array of default actions
2168  * @acts_cnt: number of default actions
2169  * @prof: stores the returned flow profile added
2170  *
2171  * Assumption: the caller has acquired the lock to the profile list
2172  */
2173 static enum ice_status
2174 ice_flow_add_prof_sync(struct ice_hw *hw, enum ice_block blk,
2175                        enum ice_flow_dir dir, u64 prof_id,
2176                        struct ice_flow_seg_info *segs, u8 segs_cnt,
2177                        struct ice_flow_action *acts, u8 acts_cnt,
2178                        struct ice_flow_prof **prof)
2179 {
2180         struct ice_flow_prof_params *params;
2181         enum ice_status status;
2182         u8 i;
2183
2184         if (!prof || (acts_cnt && !acts))
2185                 return ICE_ERR_BAD_PTR;
2186
2187         params = (struct ice_flow_prof_params *)ice_malloc(hw, sizeof(*params));
2188         if (!params)
2189                 return ICE_ERR_NO_MEMORY;
2190
2191         params->prof = (struct ice_flow_prof *)
2192                 ice_malloc(hw, sizeof(*params->prof));
2193         if (!params->prof) {
2194                 status = ICE_ERR_NO_MEMORY;
2195                 goto free_params;
2196         }
2197
2198         /* initialize extraction sequence to all invalid (0xff) */
2199         for (i = 0; i < ICE_MAX_FV_WORDS; i++) {
2200                 params->es[i].prot_id = ICE_PROT_INVALID;
2201                 params->es[i].off = ICE_FV_OFFSET_INVAL;
2202         }
2203
2204         params->blk = blk;
2205         params->prof->id = prof_id;
2206         params->prof->dir = dir;
2207         params->prof->segs_cnt = segs_cnt;
2208
2209         /* Make a copy of the segments that need to be persistent in the flow
2210          * profile instance
2211          */
2212         for (i = 0; i < segs_cnt; i++)
2213                 ice_memcpy(&params->prof->segs[i], &segs[i], sizeof(*segs),
2214                            ICE_NONDMA_TO_NONDMA);
2215
2216         /* Make a copy of the actions that need to be persistent in the flow
2217          * profile instance.
2218          */
2219         if (acts_cnt) {
2220                 params->prof->acts = (struct ice_flow_action *)
2221                         ice_memdup(hw, acts, acts_cnt * sizeof(*acts),
2222                                    ICE_NONDMA_TO_NONDMA);
2223
2224                 if (!params->prof->acts) {
2225                         status = ICE_ERR_NO_MEMORY;
2226                         goto out;
2227                 }
2228         }
2229
2230         status = ice_flow_proc_segs(hw, params);
2231         if (status) {
2232                 ice_debug(hw, ICE_DBG_FLOW, "Error processing a flow's packet segments\n");
2233                 goto out;
2234         }
2235
2236         /* Add a HW profile for this flow profile */
2237         status = ice_add_prof(hw, blk, prof_id, (u8 *)params->ptypes,
2238                               params->attr, params->attr_cnt, params->es,
2239                               params->mask);
2240         if (status) {
2241                 ice_debug(hw, ICE_DBG_FLOW, "Error adding a HW flow profile\n");
2242                 goto out;
2243         }
2244
2245         INIT_LIST_HEAD(&params->prof->entries);
2246         ice_init_lock(&params->prof->entries_lock);
2247         *prof = params->prof;
2248
2249 out:
2250         if (status) {
2251                 if (params->prof->acts)
2252                         ice_free(hw, params->prof->acts);
2253                 ice_free(hw, params->prof);
2254         }
2255 free_params:
2256         ice_free(hw, params);
2257
2258         return status;
2259 }
2260
2261 /**
2262  * ice_flow_rem_prof_sync - remove a flow profile
2263  * @hw: pointer to the hardware structure
2264  * @blk: classification stage
2265  * @prof: pointer to flow profile to remove
2266  *
2267  * Assumption: the caller has acquired the lock to the profile list
2268  */
2269 static enum ice_status
2270 ice_flow_rem_prof_sync(struct ice_hw *hw, enum ice_block blk,
2271                        struct ice_flow_prof *prof)
2272 {
2273         enum ice_status status;
2274
2275         /* Remove all remaining flow entries before removing the flow profile */
2276         if (!LIST_EMPTY(&prof->entries)) {
2277                 struct ice_flow_entry *e, *t;
2278
2279                 ice_acquire_lock(&prof->entries_lock);
2280
2281                 LIST_FOR_EACH_ENTRY_SAFE(e, t, &prof->entries, ice_flow_entry,
2282                                          l_entry) {
2283                         status = ice_flow_rem_entry_sync(hw, blk, e);
2284                         if (status)
2285                                 break;
2286                 }
2287
2288                 ice_release_lock(&prof->entries_lock);
2289         }
2290
2291         if (blk == ICE_BLK_ACL) {
2292                 struct ice_aqc_acl_profile_ranges query_rng_buf;
2293                 struct ice_aqc_acl_prof_generic_frmt buf;
2294                 u8 prof_id = 0;
2295
2296                 /* Disassociate the scenario from the profile for the PF */
2297                 status = ice_flow_acl_disassoc_scen(hw, prof);
2298                 if (status)
2299                         return status;
2300
2301                 /* Clear the range-checker if the profile ID is no longer
2302                  * used by any PF
2303                  */
2304                 status = ice_flow_acl_is_prof_in_use(hw, prof, &buf);
2305                 if (status && status != ICE_ERR_IN_USE) {
2306                         return status;
2307                 } else if (!status) {
2308                         /* Clear the range-checker value for profile ID */
2309                         ice_memset(&query_rng_buf, 0,
2310                                    sizeof(struct ice_aqc_acl_profile_ranges),
2311                                    ICE_NONDMA_MEM);
2312
2313                         status = ice_flow_get_hw_prof(hw, blk, prof->id,
2314                                                       &prof_id);
2315                         if (status)
2316                                 return status;
2317
2318                         status = ice_prog_acl_prof_ranges(hw, prof_id,
2319                                                           &query_rng_buf, NULL);
2320                         if (status)
2321                                 return status;
2322                 }
2323         }
2324
2325         /* Remove all hardware profiles associated with this flow profile */
2326         status = ice_rem_prof(hw, blk, prof->id);
2327         if (!status) {
2328                 LIST_DEL(&prof->l_entry);
2329                 ice_destroy_lock(&prof->entries_lock);
2330                 if (prof->acts)
2331                         ice_free(hw, prof->acts);
2332                 ice_free(hw, prof);
2333         }
2334
2335         return status;
2336 }
2337
2338 /**
2339  * ice_flow_acl_set_xtrct_seq_fld - Populate xtrct seq for single field
2340  * @buf: Destination buffer function writes partial xtrct sequence to
2341  * @info: Info about field
2342  */
2343 static void
2344 ice_flow_acl_set_xtrct_seq_fld(struct ice_aqc_acl_prof_generic_frmt *buf,
2345                                struct ice_flow_fld_info *info)
2346 {
2347         u16 dst, i;
2348         u8 src;
2349
2350         src = info->xtrct.idx * ICE_FLOW_FV_EXTRACT_SZ +
2351                 info->xtrct.disp / BITS_PER_BYTE;
2352         dst = info->entry.val;
2353         for (i = 0; i < info->entry.last; i++)
2354                 /* HW stores field vector words in LE, convert words back to BE
2355                  * so constructed entries will end up in network order
2356                  */
2357                 buf->byte_selection[dst++] = src++ ^ 1;
2358 }
2359
2360 /**
2361  * ice_flow_acl_set_xtrct_seq - Program ACL extraction sequence
2362  * @hw: pointer to the hardware structure
2363  * @prof: pointer to flow profile
2364  */
2365 static enum ice_status
2366 ice_flow_acl_set_xtrct_seq(struct ice_hw *hw, struct ice_flow_prof *prof)
2367 {
2368         struct ice_aqc_acl_prof_generic_frmt buf;
2369         struct ice_flow_fld_info *info;
2370         enum ice_status status;
2371         u8 prof_id = 0;
2372         u16 i;
2373
2374         ice_memset(&buf, 0, sizeof(buf), ICE_NONDMA_MEM);
2375
2376         status = ice_flow_get_hw_prof(hw, ICE_BLK_ACL, prof->id, &prof_id);
2377         if (status)
2378                 return status;
2379
2380         status = ice_flow_acl_is_prof_in_use(hw, prof, &buf);
2381         if (status && status != ICE_ERR_IN_USE)
2382                 return status;
2383
2384         if (!status) {
2385                 /* Program the profile dependent configuration. This is done
2386                  * only once regardless of the number of PFs using that profile
2387                  */
2388                 ice_memset(&buf, 0, sizeof(buf), ICE_NONDMA_MEM);
2389
2390                 for (i = 0; i < prof->segs_cnt; i++) {
2391                         struct ice_flow_seg_info *seg = &prof->segs[i];
2392                         u16 j;
2393
2394                         ice_for_each_set_bit(j, (ice_bitmap_t *)&seg->match,
2395                                              ICE_FLOW_FIELD_IDX_MAX) {
2396                                 info = &seg->fields[j];
2397
2398                                 if (info->type == ICE_FLOW_FLD_TYPE_RANGE)
2399                                         buf.word_selection[info->entry.val] =
2400                                                 info->xtrct.idx;
2401                                 else
2402                                         ice_flow_acl_set_xtrct_seq_fld(&buf,
2403                                                                        info);
2404                         }
2405
2406                         for (j = 0; j < seg->raws_cnt; j++) {
2407                                 info = &seg->raws[j].info;
2408                                 ice_flow_acl_set_xtrct_seq_fld(&buf, info);
2409                         }
2410                 }
2411
2412                 ice_memset(&buf.pf_scenario_num[0], ICE_ACL_INVALID_SCEN,
2413                            ICE_AQC_ACL_PROF_PF_SCEN_NUM_ELEMS,
2414                            ICE_NONDMA_MEM);
2415         }
2416
2417         /* Update the current PF */
2418         buf.pf_scenario_num[hw->pf_id] = (u8)prof->cfg.scen->id;
2419         status = ice_prgm_acl_prof_xtrct(hw, prof_id, &buf, NULL);
2420
2421         return status;
2422 }
2423
2424 /**
2425  * ice_flow_assoc_vsig_vsi - associate a VSI with VSIG
2426  * @hw: pointer to the hardware structure
2427  * @blk: classification stage
2428  * @vsi_handle: software VSI handle
2429  * @vsig: target VSI group
2430  *
2431  * Assumption: the caller has already verified that the VSI to
2432  * be added has the same characteristics as the VSIG and will
2433  * thereby have access to all resources added to that VSIG.
2434  */
2435 enum ice_status
2436 ice_flow_assoc_vsig_vsi(struct ice_hw *hw, enum ice_block blk, u16 vsi_handle,
2437                         u16 vsig)
2438 {
2439         enum ice_status status;
2440
2441         if (!ice_is_vsi_valid(hw, vsi_handle) || blk >= ICE_BLK_COUNT)
2442                 return ICE_ERR_PARAM;
2443
2444         ice_acquire_lock(&hw->fl_profs_locks[blk]);
2445         status = ice_add_vsi_flow(hw, blk, ice_get_hw_vsi_num(hw, vsi_handle),
2446                                   vsig);
2447         ice_release_lock(&hw->fl_profs_locks[blk]);
2448
2449         return status;
2450 }
2451
2452 /**
2453  * ice_flow_assoc_prof - associate a VSI with a flow profile
2454  * @hw: pointer to the hardware structure
2455  * @blk: classification stage
2456  * @prof: pointer to flow profile
2457  * @vsi_handle: software VSI handle
2458  *
2459  * Assumption: the caller has acquired the lock to the profile list
2460  * and the software VSI handle has been validated
2461  */
2462 enum ice_status
2463 ice_flow_assoc_prof(struct ice_hw *hw, enum ice_block blk,
2464                     struct ice_flow_prof *prof, u16 vsi_handle)
2465 {
2466         enum ice_status status = ICE_SUCCESS;
2467
2468         if (!ice_is_bit_set(prof->vsis, vsi_handle)) {
2469                 if (blk == ICE_BLK_ACL) {
2470                         status = ice_flow_acl_set_xtrct_seq(hw, prof);
2471                         if (status)
2472                                 return status;
2473                 }
2474                 status = ice_add_prof_id_flow(hw, blk,
2475                                               ice_get_hw_vsi_num(hw,
2476                                                                  vsi_handle),
2477                                               prof->id);
2478                 if (!status)
2479                         ice_set_bit(vsi_handle, prof->vsis);
2480                 else
2481                         ice_debug(hw, ICE_DBG_FLOW, "HW profile add failed, %d\n",
2482                                   status);
2483         }
2484
2485         return status;
2486 }
2487
2488 /**
2489  * ice_flow_disassoc_prof - disassociate a VSI from a flow profile
2490  * @hw: pointer to the hardware structure
2491  * @blk: classification stage
2492  * @prof: pointer to flow profile
2493  * @vsi_handle: software VSI handle
2494  *
2495  * Assumption: the caller has acquired the lock to the profile list
2496  * and the software VSI handle has been validated
2497  */
2498 static enum ice_status
2499 ice_flow_disassoc_prof(struct ice_hw *hw, enum ice_block blk,
2500                        struct ice_flow_prof *prof, u16 vsi_handle)
2501 {
2502         enum ice_status status = ICE_SUCCESS;
2503
2504         if (ice_is_bit_set(prof->vsis, vsi_handle)) {
2505                 status = ice_rem_prof_id_flow(hw, blk,
2506                                               ice_get_hw_vsi_num(hw,
2507                                                                  vsi_handle),
2508                                               prof->id);
2509                 if (!status)
2510                         ice_clear_bit(vsi_handle, prof->vsis);
2511                 else
2512                         ice_debug(hw, ICE_DBG_FLOW, "HW profile remove failed, %d\n",
2513                                   status);
2514         }
2515
2516         return status;
2517 }
2518
2519 /**
2520  * ice_flow_add_prof - Add a flow profile for packet segments and matched fields
2521  * @hw: pointer to the HW struct
2522  * @blk: classification stage
2523  * @dir: flow direction
2524  * @prof_id: unique ID to identify this flow profile
2525  * @segs: array of one or more packet segments that describe the flow
2526  * @segs_cnt: number of packet segments provided
2527  * @acts: array of default actions
2528  * @acts_cnt: number of default actions
2529  * @prof: stores the returned flow profile added
2530  */
2531 enum ice_status
2532 ice_flow_add_prof(struct ice_hw *hw, enum ice_block blk, enum ice_flow_dir dir,
2533                   u64 prof_id, struct ice_flow_seg_info *segs, u8 segs_cnt,
2534                   struct ice_flow_action *acts, u8 acts_cnt,
2535                   struct ice_flow_prof **prof)
2536 {
2537         enum ice_status status;
2538
2539         if (segs_cnt > ICE_FLOW_SEG_MAX)
2540                 return ICE_ERR_MAX_LIMIT;
2541
2542         if (!segs_cnt)
2543                 return ICE_ERR_PARAM;
2544
2545         if (!segs)
2546                 return ICE_ERR_BAD_PTR;
2547
2548         status = ice_flow_val_hdrs(segs, segs_cnt);
2549         if (status)
2550                 return status;
2551
2552         ice_acquire_lock(&hw->fl_profs_locks[blk]);
2553
2554         status = ice_flow_add_prof_sync(hw, blk, dir, prof_id, segs, segs_cnt,
2555                                         acts, acts_cnt, prof);
2556         if (!status)
2557                 LIST_ADD(&(*prof)->l_entry, &hw->fl_profs[blk]);
2558
2559         ice_release_lock(&hw->fl_profs_locks[blk]);
2560
2561         return status;
2562 }
2563
2564 /**
2565  * ice_flow_rem_prof - Remove a flow profile and all entries associated with it
2566  * @hw: pointer to the HW struct
2567  * @blk: the block for which the flow profile is to be removed
2568  * @prof_id: unique ID of the flow profile to be removed
2569  */
2570 enum ice_status
2571 ice_flow_rem_prof(struct ice_hw *hw, enum ice_block blk, u64 prof_id)
2572 {
2573         struct ice_flow_prof *prof;
2574         enum ice_status status;
2575
2576         ice_acquire_lock(&hw->fl_profs_locks[blk]);
2577
2578         prof = ice_flow_find_prof_id(hw, blk, prof_id);
2579         if (!prof) {
2580                 status = ICE_ERR_DOES_NOT_EXIST;
2581                 goto out;
2582         }
2583
2584         /* prof becomes invalid after the call */
2585         status = ice_flow_rem_prof_sync(hw, blk, prof);
2586
2587 out:
2588         ice_release_lock(&hw->fl_profs_locks[blk]);
2589
2590         return status;
2591 }
2592
2593 /**
2594  * ice_flow_find_entry - look for a flow entry using its unique ID
2595  * @hw: pointer to the HW struct
2596  * @blk: classification stage
2597  * @entry_id: unique ID to identify this flow entry
2598  *
2599  * This function looks for the flow entry with the specified unique ID in all
2600  * flow profiles of the specified classification stage. If the entry is found,
2601  * and it returns the handle to the flow entry. Otherwise, it returns
2602  * ICE_FLOW_ENTRY_ID_INVAL.
2603  */
2604 u64 ice_flow_find_entry(struct ice_hw *hw, enum ice_block blk, u64 entry_id)
2605 {
2606         struct ice_flow_entry *found = NULL;
2607         struct ice_flow_prof *p;
2608
2609         ice_acquire_lock(&hw->fl_profs_locks[blk]);
2610
2611         LIST_FOR_EACH_ENTRY(p, &hw->fl_profs[blk], ice_flow_prof, l_entry) {
2612                 struct ice_flow_entry *e;
2613
2614                 ice_acquire_lock(&p->entries_lock);
2615                 LIST_FOR_EACH_ENTRY(e, &p->entries, ice_flow_entry, l_entry)
2616                         if (e->id == entry_id) {
2617                                 found = e;
2618                                 break;
2619                         }
2620                 ice_release_lock(&p->entries_lock);
2621
2622                 if (found)
2623                         break;
2624         }
2625
2626         ice_release_lock(&hw->fl_profs_locks[blk]);
2627
2628         return found ? ICE_FLOW_ENTRY_HNDL(found) : ICE_FLOW_ENTRY_HANDLE_INVAL;
2629 }
2630
2631 /**
2632  * ice_flow_acl_check_actions - Checks the ACL rule's actions
2633  * @hw: pointer to the hardware structure
2634  * @acts: array of actions to be performed on a match
2635  * @acts_cnt: number of actions
2636  * @cnt_alloc: indicates if an ACL counter has been allocated.
2637  */
2638 static enum ice_status
2639 ice_flow_acl_check_actions(struct ice_hw *hw, struct ice_flow_action *acts,
2640                            u8 acts_cnt, bool *cnt_alloc)
2641 {
2642         ice_declare_bitmap(dup_check, ICE_AQC_TBL_MAX_ACTION_PAIRS * 2);
2643         int i;
2644
2645         ice_zero_bitmap(dup_check, ICE_AQC_TBL_MAX_ACTION_PAIRS * 2);
2646         *cnt_alloc = false;
2647
2648         if (acts_cnt > ICE_FLOW_ACL_MAX_NUM_ACT)
2649                 return ICE_ERR_OUT_OF_RANGE;
2650
2651         for (i = 0; i < acts_cnt; i++) {
2652                 if (acts[i].type != ICE_FLOW_ACT_NOP &&
2653                     acts[i].type != ICE_FLOW_ACT_DROP &&
2654                     acts[i].type != ICE_FLOW_ACT_CNTR_PKT &&
2655                     acts[i].type != ICE_FLOW_ACT_FWD_QUEUE)
2656                         return ICE_ERR_CFG;
2657
2658                 /* If the caller want to add two actions of the same type, then
2659                  * it is considered invalid configuration.
2660                  */
2661                 if (ice_test_and_set_bit(acts[i].type, dup_check))
2662                         return ICE_ERR_PARAM;
2663         }
2664
2665         /* Checks if ACL counters are needed. */
2666         for (i = 0; i < acts_cnt; i++) {
2667                 if (acts[i].type == ICE_FLOW_ACT_CNTR_PKT ||
2668                     acts[i].type == ICE_FLOW_ACT_CNTR_BYTES ||
2669                     acts[i].type == ICE_FLOW_ACT_CNTR_PKT_BYTES) {
2670                         struct ice_acl_cntrs cntrs = { 0 };
2671                         enum ice_status status;
2672
2673                         cntrs.amount = 1;
2674                         cntrs.bank = 0; /* Only bank0 for the moment */
2675
2676                         if (acts[i].type == ICE_FLOW_ACT_CNTR_PKT_BYTES)
2677                                 cntrs.type = ICE_AQC_ACL_CNT_TYPE_DUAL;
2678                         else
2679                                 cntrs.type = ICE_AQC_ACL_CNT_TYPE_SINGLE;
2680
2681                         status = ice_aq_alloc_acl_cntrs(hw, &cntrs, NULL);
2682                         if (status)
2683                                 return status;
2684                         /* Counter index within the bank */
2685                         acts[i].data.acl_act.value =
2686                                                 CPU_TO_LE16(cntrs.first_cntr);
2687                         *cnt_alloc = true;
2688                 }
2689         }
2690
2691         return ICE_SUCCESS;
2692 }
2693
2694 /**
2695  * ice_flow_acl_frmt_entry_range - Format an ACL range checker for a given field
2696  * @fld: number of the given field
2697  * @info: info about field
2698  * @range_buf: range checker configuration buffer
2699  * @data: pointer to a data buffer containing flow entry's match values/masks
2700  * @range: Input/output param indicating which range checkers are being used
2701  */
2702 static void
2703 ice_flow_acl_frmt_entry_range(u16 fld, struct ice_flow_fld_info *info,
2704                               struct ice_aqc_acl_profile_ranges *range_buf,
2705                               u8 *data, u8 *range)
2706 {
2707         u16 new_mask;
2708
2709         /* If not specified, default mask is all bits in field */
2710         new_mask = (info->src.mask == ICE_FLOW_FLD_OFF_INVAL ?
2711                     BIT(ice_flds_info[fld].size) - 1 :
2712                     (*(u16 *)(data + info->src.mask))) << info->xtrct.disp;
2713
2714         /* If the mask is 0, then we don't need to worry about this input
2715          * range checker value.
2716          */
2717         if (new_mask) {
2718                 u16 new_high =
2719                         (*(u16 *)(data + info->src.last)) << info->xtrct.disp;
2720                 u16 new_low =
2721                         (*(u16 *)(data + info->src.val)) << info->xtrct.disp;
2722                 u8 range_idx = info->entry.val;
2723
2724                 range_buf->checker_cfg[range_idx].low_boundary =
2725                         CPU_TO_BE16(new_low);
2726                 range_buf->checker_cfg[range_idx].high_boundary =
2727                         CPU_TO_BE16(new_high);
2728                 range_buf->checker_cfg[range_idx].mask = CPU_TO_BE16(new_mask);
2729
2730                 /* Indicate which range checker is being used */
2731                 *range |= BIT(range_idx);
2732         }
2733 }
2734
2735 /**
2736  * ice_flow_acl_frmt_entry_fld - Partially format ACL entry for a given field
2737  * @fld: number of the given field
2738  * @info: info about the field
2739  * @buf: buffer containing the entry
2740  * @dontcare: buffer containing don't care mask for entry
2741  * @data: pointer to a data buffer containing flow entry's match values/masks
2742  */
2743 static void
2744 ice_flow_acl_frmt_entry_fld(u16 fld, struct ice_flow_fld_info *info, u8 *buf,
2745                             u8 *dontcare, u8 *data)
2746 {
2747         u16 dst, src, mask, k, end_disp, tmp_s = 0, tmp_m = 0;
2748         bool use_mask = false;
2749         u8 disp;
2750
2751         src = info->src.val;
2752         mask = info->src.mask;
2753         dst = info->entry.val - ICE_AQC_ACL_PROF_BYTE_SEL_START_IDX;
2754         disp = info->xtrct.disp % BITS_PER_BYTE;
2755
2756         if (mask != ICE_FLOW_FLD_OFF_INVAL)
2757                 use_mask = true;
2758
2759         for (k = 0; k < info->entry.last; k++, dst++) {
2760                 /* Add overflow bits from previous byte */
2761                 buf[dst] = (tmp_s & 0xff00) >> 8;
2762
2763                 /* If mask is not valid, tmp_m is always zero, so just setting
2764                  * dontcare to 0 (no masked bits). If mask is valid, pulls in
2765                  * overflow bits of mask from prev byte
2766                  */
2767                 dontcare[dst] = (tmp_m & 0xff00) >> 8;
2768
2769                 /* If there is displacement, last byte will only contain
2770                  * displaced data, but there is no more data to read from user
2771                  * buffer, so skip so as not to potentially read beyond end of
2772                  * user buffer
2773                  */
2774                 if (!disp || k < info->entry.last - 1) {
2775                         /* Store shifted data to use in next byte */
2776                         tmp_s = data[src++] << disp;
2777
2778                         /* Add current (shifted) byte */
2779                         buf[dst] |= tmp_s & 0xff;
2780
2781                         /* Handle mask if valid */
2782                         if (use_mask) {
2783                                 tmp_m = (~data[mask++] & 0xff) << disp;
2784                                 dontcare[dst] |= tmp_m & 0xff;
2785                         }
2786                 }
2787         }
2788
2789         /* Fill in don't care bits at beginning of field */
2790         if (disp) {
2791                 dst = info->entry.val - ICE_AQC_ACL_PROF_BYTE_SEL_START_IDX;
2792                 for (k = 0; k < disp; k++)
2793                         dontcare[dst] |= BIT(k);
2794         }
2795
2796         end_disp = (disp + ice_flds_info[fld].size) % BITS_PER_BYTE;
2797
2798         /* Fill in don't care bits at end of field */
2799         if (end_disp) {
2800                 dst = info->entry.val - ICE_AQC_ACL_PROF_BYTE_SEL_START_IDX +
2801                       info->entry.last - 1;
2802                 for (k = end_disp; k < BITS_PER_BYTE; k++)
2803                         dontcare[dst] |= BIT(k);
2804         }
2805 }
2806
2807 /**
2808  * ice_flow_acl_frmt_entry - Format ACL entry
2809  * @hw: pointer to the hardware structure
2810  * @prof: pointer to flow profile
2811  * @e: pointer to the flow entry
2812  * @data: pointer to a data buffer containing flow entry's match values/masks
2813  * @acts: array of actions to be performed on a match
2814  * @acts_cnt: number of actions
2815  *
2816  * Formats the key (and key_inverse) to be matched from the data passed in,
2817  * along with data from the flow profile. This key/key_inverse pair makes up
2818  * the 'entry' for an ACL flow entry.
2819  */
2820 static enum ice_status
2821 ice_flow_acl_frmt_entry(struct ice_hw *hw, struct ice_flow_prof *prof,
2822                         struct ice_flow_entry *e, u8 *data,
2823                         struct ice_flow_action *acts, u8 acts_cnt)
2824 {
2825         u8 *buf = NULL, *dontcare = NULL, *key = NULL, range = 0, dir_flag_msk;
2826         struct ice_aqc_acl_profile_ranges *range_buf = NULL;
2827         enum ice_status status;
2828         bool cnt_alloc;
2829         u8 prof_id = 0;
2830         u16 i, buf_sz;
2831
2832         status = ice_flow_get_hw_prof(hw, ICE_BLK_ACL, prof->id, &prof_id);
2833         if (status)
2834                 return status;
2835
2836         /* Format the result action */
2837
2838         status = ice_flow_acl_check_actions(hw, acts, acts_cnt, &cnt_alloc);
2839         if (status)
2840                 return status;
2841
2842         status = ICE_ERR_NO_MEMORY;
2843
2844         e->acts = (struct ice_flow_action *)
2845                 ice_memdup(hw, acts, acts_cnt * sizeof(*acts),
2846                            ICE_NONDMA_TO_NONDMA);
2847         if (!e->acts)
2848                 goto out;
2849
2850         e->acts_cnt = acts_cnt;
2851
2852         /* Format the matching data */
2853         buf_sz = prof->cfg.scen->width;
2854         buf = (u8 *)ice_malloc(hw, buf_sz);
2855         if (!buf)
2856                 goto out;
2857
2858         dontcare = (u8 *)ice_malloc(hw, buf_sz);
2859         if (!dontcare)
2860                 goto out;
2861
2862         /* 'key' buffer will store both key and key_inverse, so must be twice
2863          * size of buf
2864          */
2865         key = (u8 *)ice_malloc(hw, buf_sz * 2);
2866         if (!key)
2867                 goto out;
2868
2869         range_buf = (struct ice_aqc_acl_profile_ranges *)
2870                 ice_malloc(hw, sizeof(struct ice_aqc_acl_profile_ranges));
2871         if (!range_buf)
2872                 goto out;
2873
2874         /* Set don't care mask to all 1's to start, will zero out used bytes */
2875         ice_memset(dontcare, 0xff, buf_sz, ICE_NONDMA_MEM);
2876
2877         for (i = 0; i < prof->segs_cnt; i++) {
2878                 struct ice_flow_seg_info *seg = &prof->segs[i];
2879                 u8 j;
2880
2881                 ice_for_each_set_bit(j, (ice_bitmap_t *)&seg->match,
2882                                      ICE_FLOW_FIELD_IDX_MAX) {
2883                         struct ice_flow_fld_info *info = &seg->fields[j];
2884
2885                         if (info->type == ICE_FLOW_FLD_TYPE_RANGE)
2886                                 ice_flow_acl_frmt_entry_range(j, info,
2887                                                               range_buf, data,
2888                                                               &range);
2889                         else
2890                                 ice_flow_acl_frmt_entry_fld(j, info, buf,
2891                                                             dontcare, data);
2892                 }
2893
2894                 for (j = 0; j < seg->raws_cnt; j++) {
2895                         struct ice_flow_fld_info *info = &seg->raws[j].info;
2896                         u16 dst, src, mask, k;
2897                         bool use_mask = false;
2898
2899                         src = info->src.val;
2900                         dst = info->entry.val -
2901                                         ICE_AQC_ACL_PROF_BYTE_SEL_START_IDX;
2902                         mask = info->src.mask;
2903
2904                         if (mask != ICE_FLOW_FLD_OFF_INVAL)
2905                                 use_mask = true;
2906
2907                         for (k = 0; k < info->entry.last; k++, dst++) {
2908                                 buf[dst] = data[src++];
2909                                 if (use_mask)
2910                                         dontcare[dst] = ~data[mask++];
2911                                 else
2912                                         dontcare[dst] = 0;
2913                         }
2914                 }
2915         }
2916
2917         buf[prof->cfg.scen->pid_idx] = (u8)prof_id;
2918         dontcare[prof->cfg.scen->pid_idx] = 0;
2919
2920         /* Format the buffer for direction flags */
2921         dir_flag_msk = BIT(ICE_FLG_PKT_DIR);
2922
2923         if (prof->dir == ICE_FLOW_RX)
2924                 buf[prof->cfg.scen->pkt_dir_idx] = dir_flag_msk;
2925
2926         if (range) {
2927                 buf[prof->cfg.scen->rng_chk_idx] = range;
2928                 /* Mark any unused range checkers as don't care */
2929                 dontcare[prof->cfg.scen->rng_chk_idx] = ~range;
2930                 e->range_buf = range_buf;
2931         } else {
2932                 ice_free(hw, range_buf);
2933         }
2934
2935         status = ice_set_key(key, buf_sz * 2, buf, NULL, dontcare, NULL, 0,
2936                              buf_sz);
2937         if (status)
2938                 goto out;
2939
2940         e->entry = key;
2941         e->entry_sz = buf_sz * 2;
2942
2943 out:
2944         if (buf)
2945                 ice_free(hw, buf);
2946
2947         if (dontcare)
2948                 ice_free(hw, dontcare);
2949
2950         if (status && key)
2951                 ice_free(hw, key);
2952
2953         if (status && range_buf) {
2954                 ice_free(hw, range_buf);
2955                 e->range_buf = NULL;
2956         }
2957
2958         if (status && e->acts) {
2959                 ice_free(hw, e->acts);
2960                 e->acts = NULL;
2961                 e->acts_cnt = 0;
2962         }
2963
2964         if (status && cnt_alloc)
2965                 ice_flow_acl_free_act_cntr(hw, acts, acts_cnt);
2966
2967         return status;
2968 }
2969
2970 /**
2971  * ice_flow_acl_find_scen_entry_cond - Find an ACL scenario entry that matches
2972  *                                     the compared data.
2973  * @prof: pointer to flow profile
2974  * @e: pointer to the comparing flow entry
2975  * @do_chg_action: decide if we want to change the ACL action
2976  * @do_add_entry: decide if we want to add the new ACL entry
2977  * @do_rem_entry: decide if we want to remove the current ACL entry
2978  *
2979  * Find an ACL scenario entry that matches the compared data. In the same time,
2980  * this function also figure out:
2981  * a/ If we want to change the ACL action
2982  * b/ If we want to add the new ACL entry
2983  * c/ If we want to remove the current ACL entry
2984  */
2985 static struct ice_flow_entry *
2986 ice_flow_acl_find_scen_entry_cond(struct ice_flow_prof *prof,
2987                                   struct ice_flow_entry *e, bool *do_chg_action,
2988                                   bool *do_add_entry, bool *do_rem_entry)
2989 {
2990         struct ice_flow_entry *p, *return_entry = NULL;
2991         u8 i, j;
2992
2993         /* Check if:
2994          * a/ There exists an entry with same matching data, but different
2995          *    priority, then we remove this existing ACL entry. Then, we
2996          *    will add the new entry to the ACL scenario.
2997          * b/ There exists an entry with same matching data, priority, and
2998          *    result action, then we do nothing
2999          * c/ There exists an entry with same matching data, priority, but
3000          *    different, action, then do only change the action's entry.
3001          * d/ Else, we add this new entry to the ACL scenario.
3002          */
3003         *do_chg_action = false;
3004         *do_add_entry = true;
3005         *do_rem_entry = false;
3006         LIST_FOR_EACH_ENTRY(p, &prof->entries, ice_flow_entry, l_entry) {
3007                 if (memcmp(p->entry, e->entry, p->entry_sz))
3008                         continue;
3009
3010                 /* From this point, we have the same matching_data. */
3011                 *do_add_entry = false;
3012                 return_entry = p;
3013
3014                 if (p->priority != e->priority) {
3015                         /* matching data && !priority */
3016                         *do_add_entry = true;
3017                         *do_rem_entry = true;
3018                         break;
3019                 }
3020
3021                 /* From this point, we will have matching_data && priority */
3022                 if (p->acts_cnt != e->acts_cnt)
3023                         *do_chg_action = true;
3024                 for (i = 0; i < p->acts_cnt; i++) {
3025                         bool found_not_match = false;
3026
3027                         for (j = 0; j < e->acts_cnt; j++)
3028                                 if (memcmp(&p->acts[i], &e->acts[j],
3029                                            sizeof(struct ice_flow_action))) {
3030                                         found_not_match = true;
3031                                         break;
3032                                 }
3033
3034                         if (found_not_match) {
3035                                 *do_chg_action = true;
3036                                 break;
3037                         }
3038                 }
3039
3040                 /* (do_chg_action = true) means :
3041                  *    matching_data && priority && !result_action
3042                  * (do_chg_action = false) means :
3043                  *    matching_data && priority && result_action
3044                  */
3045                 break;
3046         }
3047
3048         return return_entry;
3049 }
3050
3051 /**
3052  * ice_flow_acl_convert_to_acl_prio - Convert to ACL priority
3053  * @p: flow priority
3054  */
3055 static enum ice_acl_entry_prio
3056 ice_flow_acl_convert_to_acl_prio(enum ice_flow_priority p)
3057 {
3058         enum ice_acl_entry_prio acl_prio;
3059
3060         switch (p) {
3061         case ICE_FLOW_PRIO_LOW:
3062                 acl_prio = ICE_ACL_PRIO_LOW;
3063                 break;
3064         case ICE_FLOW_PRIO_NORMAL:
3065                 acl_prio = ICE_ACL_PRIO_NORMAL;
3066                 break;
3067         case ICE_FLOW_PRIO_HIGH:
3068                 acl_prio = ICE_ACL_PRIO_HIGH;
3069                 break;
3070         default:
3071                 acl_prio = ICE_ACL_PRIO_NORMAL;
3072                 break;
3073         }
3074
3075         return acl_prio;
3076 }
3077
3078 /**
3079  * ice_flow_acl_union_rng_chk - Perform union operation between two
3080  *                              range-range checker buffers
3081  * @dst_buf: pointer to destination range checker buffer
3082  * @src_buf: pointer to source range checker buffer
3083  *
3084  * For this function, we do the union between dst_buf and src_buf
3085  * range checker buffer, and we will save the result back to dst_buf
3086  */
3087 static enum ice_status
3088 ice_flow_acl_union_rng_chk(struct ice_aqc_acl_profile_ranges *dst_buf,
3089                            struct ice_aqc_acl_profile_ranges *src_buf)
3090 {
3091         u8 i, j;
3092
3093         if (!dst_buf || !src_buf)
3094                 return ICE_ERR_BAD_PTR;
3095
3096         for (i = 0; i < ICE_AQC_ACL_PROF_RANGES_NUM_CFG; i++) {
3097                 struct ice_acl_rng_data *cfg_data = NULL, *in_data;
3098                 bool will_populate = false;
3099
3100                 in_data = &src_buf->checker_cfg[i];
3101
3102                 if (!in_data->mask)
3103                         break;
3104
3105                 for (j = 0; j < ICE_AQC_ACL_PROF_RANGES_NUM_CFG; j++) {
3106                         cfg_data = &dst_buf->checker_cfg[j];
3107
3108                         if (!cfg_data->mask ||
3109                             !memcmp(cfg_data, in_data,
3110                                     sizeof(struct ice_acl_rng_data))) {
3111                                 will_populate = true;
3112                                 break;
3113                         }
3114                 }
3115
3116                 if (will_populate) {
3117                         ice_memcpy(cfg_data, in_data,
3118                                    sizeof(struct ice_acl_rng_data),
3119                                    ICE_NONDMA_TO_NONDMA);
3120                 } else {
3121                         /* No available slot left to program range checker */
3122                         return ICE_ERR_MAX_LIMIT;
3123                 }
3124         }
3125
3126         return ICE_SUCCESS;
3127 }
3128
3129 /**
3130  * ice_flow_acl_add_scen_entry_sync - Add entry to ACL scenario sync
3131  * @hw: pointer to the hardware structure
3132  * @prof: pointer to flow profile
3133  * @entry: double pointer to the flow entry
3134  *
3135  * For this function, we will look at the current added entries in the
3136  * corresponding ACL scenario. Then, we will perform matching logic to
3137  * see if we want to add/modify/do nothing with this new entry.
3138  */
3139 static enum ice_status
3140 ice_flow_acl_add_scen_entry_sync(struct ice_hw *hw, struct ice_flow_prof *prof,
3141                                  struct ice_flow_entry **entry)
3142 {
3143         bool do_add_entry, do_rem_entry, do_chg_action, do_chg_rng_chk;
3144         struct ice_aqc_acl_profile_ranges query_rng_buf, cfg_rng_buf;
3145         struct ice_acl_act_entry *acts = NULL;
3146         struct ice_flow_entry *exist;
3147         enum ice_status status = ICE_SUCCESS;
3148         struct ice_flow_entry *e;
3149         u8 i;
3150
3151         if (!entry || !(*entry) || !prof)
3152                 return ICE_ERR_BAD_PTR;
3153
3154         e = *entry;
3155
3156         do_chg_rng_chk = false;
3157         if (e->range_buf) {
3158                 u8 prof_id = 0;
3159
3160                 status = ice_flow_get_hw_prof(hw, ICE_BLK_ACL, prof->id,
3161                                               &prof_id);
3162                 if (status)
3163                         return status;
3164
3165                 /* Query the current range-checker value in FW */
3166                 status = ice_query_acl_prof_ranges(hw, prof_id, &query_rng_buf,
3167                                                    NULL);
3168                 if (status)
3169                         return status;
3170                 ice_memcpy(&cfg_rng_buf, &query_rng_buf,
3171                            sizeof(struct ice_aqc_acl_profile_ranges),
3172                            ICE_NONDMA_TO_NONDMA);
3173
3174                 /* Generate the new range-checker value */
3175                 status = ice_flow_acl_union_rng_chk(&cfg_rng_buf, e->range_buf);
3176                 if (status)
3177                         return status;
3178
3179                 /* Reconfigure the range check if the buffer is changed. */
3180                 do_chg_rng_chk = false;
3181                 if (memcmp(&query_rng_buf, &cfg_rng_buf,
3182                            sizeof(struct ice_aqc_acl_profile_ranges))) {
3183                         status = ice_prog_acl_prof_ranges(hw, prof_id,
3184                                                           &cfg_rng_buf, NULL);
3185                         if (status)
3186                                 return status;
3187
3188                         do_chg_rng_chk = true;
3189                 }
3190         }
3191
3192         /* Figure out if we want to (change the ACL action) and/or
3193          * (Add the new ACL entry) and/or (Remove the current ACL entry)
3194          */
3195         exist = ice_flow_acl_find_scen_entry_cond(prof, e, &do_chg_action,
3196                                                   &do_add_entry, &do_rem_entry);
3197         if (do_rem_entry) {
3198                 status = ice_flow_rem_entry_sync(hw, ICE_BLK_ACL, exist);
3199                 if (status)
3200                         return status;
3201         }
3202
3203         /* Prepare the result action buffer */
3204         acts = (struct ice_acl_act_entry *)
3205                 ice_calloc(hw, e->entry_sz, sizeof(struct ice_acl_act_entry));
3206         if (!acts)
3207                 return ICE_ERR_NO_MEMORY;
3208
3209         for (i = 0; i < e->acts_cnt; i++)
3210                 ice_memcpy(&acts[i], &e->acts[i].data.acl_act,
3211                            sizeof(struct ice_acl_act_entry),
3212                            ICE_NONDMA_TO_NONDMA);
3213
3214         if (do_add_entry) {
3215                 enum ice_acl_entry_prio prio;
3216                 u8 *keys, *inverts;
3217                 u16 entry_idx;
3218
3219                 keys = (u8 *)e->entry;
3220                 inverts = keys + (e->entry_sz / 2);
3221                 prio = ice_flow_acl_convert_to_acl_prio(e->priority);
3222
3223                 status = ice_acl_add_entry(hw, prof->cfg.scen, prio, keys,
3224                                            inverts, acts, e->acts_cnt,
3225                                            &entry_idx);
3226                 if (status)
3227                         goto out;
3228
3229                 e->scen_entry_idx = entry_idx;
3230                 LIST_ADD(&e->l_entry, &prof->entries);
3231         } else {
3232                 if (do_chg_action) {
3233                         /* For the action memory info, update the SW's copy of
3234                          * exist entry with e's action memory info
3235                          */
3236                         ice_free(hw, exist->acts);
3237                         exist->acts_cnt = e->acts_cnt;
3238                         exist->acts = (struct ice_flow_action *)
3239                                 ice_calloc(hw, exist->acts_cnt,
3240                                            sizeof(struct ice_flow_action));
3241                         if (!exist->acts) {
3242                                 status = ICE_ERR_NO_MEMORY;
3243                                 goto out;
3244                         }
3245
3246                         ice_memcpy(exist->acts, e->acts,
3247                                    sizeof(struct ice_flow_action) * e->acts_cnt,
3248                                    ICE_NONDMA_TO_NONDMA);
3249
3250                         status = ice_acl_prog_act(hw, prof->cfg.scen, acts,
3251                                                   e->acts_cnt,
3252                                                   exist->scen_entry_idx);
3253                         if (status)
3254                                 goto out;
3255                 }
3256
3257                 if (do_chg_rng_chk) {
3258                         /* In this case, we want to update the range checker
3259                          * information of the exist entry
3260                          */
3261                         status = ice_flow_acl_union_rng_chk(exist->range_buf,
3262                                                             e->range_buf);
3263                         if (status)
3264                                 goto out;
3265                 }
3266
3267                 /* As we don't add the new entry to our SW DB, deallocate its
3268                  * memories, and return the exist entry to the caller
3269                  */
3270                 ice_dealloc_flow_entry(hw, e);
3271                 *(entry) = exist;
3272         }
3273 out:
3274         ice_free(hw, acts);
3275
3276         return status;
3277 }
3278
3279 /**
3280  * ice_flow_acl_add_scen_entry - Add entry to ACL scenario
3281  * @hw: pointer to the hardware structure
3282  * @prof: pointer to flow profile
3283  * @e: double pointer to the flow entry
3284  */
3285 static enum ice_status
3286 ice_flow_acl_add_scen_entry(struct ice_hw *hw, struct ice_flow_prof *prof,
3287                             struct ice_flow_entry **e)
3288 {
3289         enum ice_status status;
3290
3291         ice_acquire_lock(&prof->entries_lock);
3292         status = ice_flow_acl_add_scen_entry_sync(hw, prof, e);
3293         ice_release_lock(&prof->entries_lock);
3294
3295         return status;
3296 }
3297
3298 /**
3299  * ice_flow_add_entry - Add a flow entry
3300  * @hw: pointer to the HW struct
3301  * @blk: classification stage
3302  * @prof_id: ID of the profile to add a new flow entry to
3303  * @entry_id: unique ID to identify this flow entry
3304  * @vsi_handle: software VSI handle for the flow entry
3305  * @prio: priority of the flow entry
3306  * @data: pointer to a data buffer containing flow entry's match values/masks
3307  * @acts: arrays of actions to be performed on a match
3308  * @acts_cnt: number of actions
3309  * @entry_h: pointer to buffer that receives the new flow entry's handle
3310  */
3311 enum ice_status
3312 ice_flow_add_entry(struct ice_hw *hw, enum ice_block blk, u64 prof_id,
3313                    u64 entry_id, u16 vsi_handle, enum ice_flow_priority prio,
3314                    void *data, struct ice_flow_action *acts, u8 acts_cnt,
3315                    u64 *entry_h)
3316 {
3317         struct ice_flow_entry *e = NULL;
3318         struct ice_flow_prof *prof;
3319         enum ice_status status = ICE_SUCCESS;
3320
3321         /* ACL entries must indicate an action */
3322         if (blk == ICE_BLK_ACL && (!acts || !acts_cnt))
3323                 return ICE_ERR_PARAM;
3324
3325         /* No flow entry data is expected for RSS */
3326         if (!entry_h || (!data && blk != ICE_BLK_RSS))
3327                 return ICE_ERR_BAD_PTR;
3328
3329         if (!ice_is_vsi_valid(hw, vsi_handle))
3330                 return ICE_ERR_PARAM;
3331
3332         ice_acquire_lock(&hw->fl_profs_locks[blk]);
3333
3334         prof = ice_flow_find_prof_id(hw, blk, prof_id);
3335         if (!prof) {
3336                 status = ICE_ERR_DOES_NOT_EXIST;
3337         } else {
3338                 /* Allocate memory for the entry being added and associate
3339                  * the VSI to the found flow profile
3340                  */
3341                 e = (struct ice_flow_entry *)ice_malloc(hw, sizeof(*e));
3342                 if (!e)
3343                         status = ICE_ERR_NO_MEMORY;
3344                 else
3345                         status = ice_flow_assoc_prof(hw, blk, prof, vsi_handle);
3346         }
3347
3348         ice_release_lock(&hw->fl_profs_locks[blk]);
3349         if (status)
3350                 goto out;
3351
3352         e->id = entry_id;
3353         e->vsi_handle = vsi_handle;
3354         e->prof = prof;
3355         e->priority = prio;
3356
3357         switch (blk) {
3358         case ICE_BLK_FD:
3359         case ICE_BLK_RSS:
3360                 break;
3361         case ICE_BLK_ACL:
3362                 /* ACL will handle the entry management */
3363                 status = ice_flow_acl_frmt_entry(hw, prof, e, (u8 *)data, acts,
3364                                                  acts_cnt);
3365                 if (status)
3366                         goto out;
3367
3368                 status = ice_flow_acl_add_scen_entry(hw, prof, &e);
3369                 if (status)
3370                         goto out;
3371
3372                 break;
3373         default:
3374                 status = ICE_ERR_NOT_IMPL;
3375                 goto out;
3376         }
3377
3378         if (blk != ICE_BLK_ACL) {
3379                 /* ACL will handle the entry management */
3380                 ice_acquire_lock(&prof->entries_lock);
3381                 LIST_ADD(&e->l_entry, &prof->entries);
3382                 ice_release_lock(&prof->entries_lock);
3383         }
3384
3385         *entry_h = ICE_FLOW_ENTRY_HNDL(e);
3386
3387 out:
3388         if (status && e) {
3389                 if (e->entry)
3390                         ice_free(hw, e->entry);
3391                 ice_free(hw, e);
3392         }
3393
3394         return status;
3395 }
3396
3397 /**
3398  * ice_flow_rem_entry - Remove a flow entry
3399  * @hw: pointer to the HW struct
3400  * @blk: classification stage
3401  * @entry_h: handle to the flow entry to be removed
3402  */
3403 enum ice_status ice_flow_rem_entry(struct ice_hw *hw, enum ice_block blk,
3404                                    u64 entry_h)
3405 {
3406         struct ice_flow_entry *entry;
3407         struct ice_flow_prof *prof;
3408         enum ice_status status = ICE_SUCCESS;
3409
3410         if (entry_h == ICE_FLOW_ENTRY_HANDLE_INVAL)
3411                 return ICE_ERR_PARAM;
3412
3413         entry = ICE_FLOW_ENTRY_PTR((intptr_t)entry_h);
3414
3415         /* Retain the pointer to the flow profile as the entry will be freed */
3416         prof = entry->prof;
3417
3418         if (prof) {
3419                 ice_acquire_lock(&prof->entries_lock);
3420                 status = ice_flow_rem_entry_sync(hw, blk, entry);
3421                 ice_release_lock(&prof->entries_lock);
3422         }
3423
3424         return status;
3425 }
3426
3427 /**
3428  * ice_flow_set_fld_ext - specifies locations of field from entry's input buffer
3429  * @seg: packet segment the field being set belongs to
3430  * @fld: field to be set
3431  * @field_type: type of the field
3432  * @val_loc: if not ICE_FLOW_FLD_OFF_INVAL, location of the value to match from
3433  *           entry's input buffer
3434  * @mask_loc: if not ICE_FLOW_FLD_OFF_INVAL, location of mask value from entry's
3435  *            input buffer
3436  * @last_loc: if not ICE_FLOW_FLD_OFF_INVAL, location of last/upper value from
3437  *            entry's input buffer
3438  *
3439  * This helper function stores information of a field being matched, including
3440  * the type of the field and the locations of the value to match, the mask, and
3441  * the upper-bound value in the start of the input buffer for a flow entry.
3442  * This function should only be used for fixed-size data structures.
3443  *
3444  * This function also opportunistically determines the protocol headers to be
3445  * present based on the fields being set. Some fields cannot be used alone to
3446  * determine the protocol headers present. Sometimes, fields for particular
3447  * protocol headers are not matched. In those cases, the protocol headers
3448  * must be explicitly set.
3449  */
3450 static void
3451 ice_flow_set_fld_ext(struct ice_flow_seg_info *seg, enum ice_flow_field fld,
3452                      enum ice_flow_fld_match_type field_type, u16 val_loc,
3453                      u16 mask_loc, u16 last_loc)
3454 {
3455         u64 bit = BIT_ULL(fld);
3456
3457         seg->match |= bit;
3458         if (field_type == ICE_FLOW_FLD_TYPE_RANGE)
3459                 seg->range |= bit;
3460
3461         seg->fields[fld].type = field_type;
3462         seg->fields[fld].src.val = val_loc;
3463         seg->fields[fld].src.mask = mask_loc;
3464         seg->fields[fld].src.last = last_loc;
3465
3466         ICE_FLOW_SET_HDRS(seg, ice_flds_info[fld].hdr);
3467 }
3468
3469 /**
3470  * ice_flow_set_fld - specifies locations of field from entry's input buffer
3471  * @seg: packet segment the field being set belongs to
3472  * @fld: field to be set
3473  * @val_loc: if not ICE_FLOW_FLD_OFF_INVAL, location of the value to match from
3474  *           entry's input buffer
3475  * @mask_loc: if not ICE_FLOW_FLD_OFF_INVAL, location of mask value from entry's
3476  *            input buffer
3477  * @last_loc: if not ICE_FLOW_FLD_OFF_INVAL, location of last/upper value from
3478  *            entry's input buffer
3479  * @range: indicate if field being matched is to be in a range
3480  *
3481  * This function specifies the locations, in the form of byte offsets from the
3482  * start of the input buffer for a flow entry, from where the value to match,
3483  * the mask value, and upper value can be extracted. These locations are then
3484  * stored in the flow profile. When adding a flow entry associated with the
3485  * flow profile, these locations will be used to quickly extract the values and
3486  * create the content of a match entry. This function should only be used for
3487  * fixed-size data structures.
3488  */
3489 void
3490 ice_flow_set_fld(struct ice_flow_seg_info *seg, enum ice_flow_field fld,
3491                  u16 val_loc, u16 mask_loc, u16 last_loc, bool range)
3492 {
3493         enum ice_flow_fld_match_type t = range ?
3494                 ICE_FLOW_FLD_TYPE_RANGE : ICE_FLOW_FLD_TYPE_REG;
3495
3496         ice_flow_set_fld_ext(seg, fld, t, val_loc, mask_loc, last_loc);
3497 }
3498
3499 /**
3500  * ice_flow_set_fld_prefix - sets locations of prefix field from entry's buf
3501  * @seg: packet segment the field being set belongs to
3502  * @fld: field to be set
3503  * @val_loc: if not ICE_FLOW_FLD_OFF_INVAL, location of the value to match from
3504  *           entry's input buffer
3505  * @pref_loc: location of prefix value from entry's input buffer
3506  * @pref_sz: size of the location holding the prefix value
3507  *
3508  * This function specifies the locations, in the form of byte offsets from the
3509  * start of the input buffer for a flow entry, from where the value to match
3510  * and the IPv4 prefix value can be extracted. These locations are then stored
3511  * in the flow profile. When adding flow entries to the associated flow profile,
3512  * these locations can be used to quickly extract the values to create the
3513  * content of a match entry. This function should only be used for fixed-size
3514  * data structures.
3515  */
3516 void
3517 ice_flow_set_fld_prefix(struct ice_flow_seg_info *seg, enum ice_flow_field fld,
3518                         u16 val_loc, u16 pref_loc, u8 pref_sz)
3519 {
3520         /* For this type of field, the "mask" location is for the prefix value's
3521          * location and the "last" location is for the size of the location of
3522          * the prefix value.
3523          */
3524         ice_flow_set_fld_ext(seg, fld, ICE_FLOW_FLD_TYPE_PREFIX, val_loc,
3525                              pref_loc, (u16)pref_sz);
3526 }
3527
3528 /**
3529  * ice_flow_add_fld_raw - sets locations of a raw field from entry's input buf
3530  * @seg: packet segment the field being set belongs to
3531  * @off: offset of the raw field from the beginning of the segment in bytes
3532  * @len: length of the raw pattern to be matched
3533  * @val_loc: location of the value to match from entry's input buffer
3534  * @mask_loc: location of mask value from entry's input buffer
3535  *
3536  * This function specifies the offset of the raw field to be match from the
3537  * beginning of the specified packet segment, and the locations, in the form of
3538  * byte offsets from the start of the input buffer for a flow entry, from where
3539  * the value to match and the mask value to be extracted. These locations are
3540  * then stored in the flow profile. When adding flow entries to the associated
3541  * flow profile, these locations can be used to quickly extract the values to
3542  * create the content of a match entry. This function should only be used for
3543  * fixed-size data structures.
3544  */
3545 void
3546 ice_flow_add_fld_raw(struct ice_flow_seg_info *seg, u16 off, u8 len,
3547                      u16 val_loc, u16 mask_loc)
3548 {
3549         if (seg->raws_cnt < ICE_FLOW_SEG_RAW_FLD_MAX) {
3550                 seg->raws[seg->raws_cnt].off = off;
3551                 seg->raws[seg->raws_cnt].info.type = ICE_FLOW_FLD_TYPE_SIZE;
3552                 seg->raws[seg->raws_cnt].info.src.val = val_loc;
3553                 seg->raws[seg->raws_cnt].info.src.mask = mask_loc;
3554                 /* The "last" field is used to store the length of the field */
3555                 seg->raws[seg->raws_cnt].info.src.last = len;
3556         }
3557
3558         /* Overflows of "raws" will be handled as an error condition later in
3559          * the flow when this information is processed.
3560          */
3561         seg->raws_cnt++;
3562 }
3563
3564 /**
3565  * ice_flow_rem_vsi_prof - remove vsi from flow profile
3566  * @hw: pointer to the hardware structure
3567  * @blk: classification stage
3568  * @vsi_handle: software VSI handle
3569  * @prof_id: unique ID to identify this flow profile
3570  *
3571  * This function removes the flow entries associated to the input
3572  * vsi handle and disassociates the vsi from the flow profile.
3573  */
3574 enum ice_status ice_flow_rem_vsi_prof(struct ice_hw *hw, enum ice_block blk, u16 vsi_handle,
3575                                       u64 prof_id)
3576 {
3577         struct ice_flow_prof *prof = NULL;
3578         enum ice_status status = ICE_SUCCESS;
3579
3580         if (blk >= ICE_BLK_COUNT || !ice_is_vsi_valid(hw, vsi_handle))
3581                 return ICE_ERR_PARAM;
3582
3583         /* find flow profile pointer with input package block and profile id */
3584         prof = ice_flow_find_prof_id(hw, ICE_BLK_FD, prof_id);
3585         if (!prof) {
3586                 ice_debug(hw, ICE_DBG_PKG,
3587                           "Cannot find flow profile id=%" PRIu64 "\n", prof_id);
3588                 return ICE_ERR_DOES_NOT_EXIST;
3589         }
3590
3591         /* Remove all remaining flow entries before removing the flow profile */
3592         if (!LIST_EMPTY(&prof->entries)) {
3593                 struct ice_flow_entry *e, *t;
3594
3595                 ice_acquire_lock(&prof->entries_lock);
3596                 LIST_FOR_EACH_ENTRY_SAFE(e, t, &prof->entries, ice_flow_entry,
3597                                          l_entry) {
3598                         if (e->vsi_handle != vsi_handle)
3599                                 continue;
3600
3601                         status = ice_flow_rem_entry_sync(hw, blk, e);
3602                         if (status)
3603                                 break;
3604                 }
3605                 ice_release_lock(&prof->entries_lock);
3606         }
3607         if (status)
3608                 return status;
3609
3610         /* disassociate the flow profile from sw vsi handle */
3611         status = ice_flow_disassoc_prof(hw, blk, prof, vsi_handle);
3612         if (status)
3613                 ice_debug(hw, ICE_DBG_PKG,
3614                           "ice_flow_disassoc_prof() failed with status=%d\n",
3615                           status);
3616         return status;
3617 }
3618
3619 #define ICE_FLOW_RSS_SEG_HDR_L2_MASKS \
3620 (ICE_FLOW_SEG_HDR_ETH | ICE_FLOW_SEG_HDR_ETH_NON_IP | ICE_FLOW_SEG_HDR_VLAN)
3621
3622 #define ICE_FLOW_RSS_SEG_HDR_L3_MASKS \
3623         (ICE_FLOW_SEG_HDR_IPV4 | ICE_FLOW_SEG_HDR_IPV6)
3624
3625 #define ICE_FLOW_RSS_SEG_HDR_L4_MASKS \
3626         (ICE_FLOW_SEG_HDR_TCP | ICE_FLOW_SEG_HDR_UDP | ICE_FLOW_SEG_HDR_SCTP)
3627
3628 #define ICE_FLOW_RSS_SEG_HDR_VAL_MASKS \
3629         (ICE_FLOW_RSS_SEG_HDR_L2_MASKS | \
3630          ICE_FLOW_RSS_SEG_HDR_L3_MASKS | \
3631          ICE_FLOW_RSS_SEG_HDR_L4_MASKS)
3632
3633 /**
3634  * ice_flow_set_rss_seg_info - setup packet segments for RSS
3635  * @segs: pointer to the flow field segment(s)
3636  * @seg_cnt: segment count
3637  * @cfg: configure parameters
3638  *
3639  * Helper function to extract fields from hash bitmap and use flow
3640  * header value to set flow field segment for further use in flow
3641  * profile entry or removal.
3642  */
3643 static enum ice_status
3644 ice_flow_set_rss_seg_info(struct ice_flow_seg_info *segs, u8 seg_cnt,
3645                           const struct ice_rss_hash_cfg *cfg)
3646 {
3647         struct ice_flow_seg_info *seg;
3648         u64 val;
3649         u8 i;
3650
3651         /* set inner most segment */
3652         seg = &segs[seg_cnt - 1];
3653
3654         ice_for_each_set_bit(i, (const ice_bitmap_t *)&cfg->hash_flds,
3655                              ICE_FLOW_FIELD_IDX_MAX)
3656                 ice_flow_set_fld(seg, (enum ice_flow_field)i,
3657                                  ICE_FLOW_FLD_OFF_INVAL, ICE_FLOW_FLD_OFF_INVAL,
3658                                  ICE_FLOW_FLD_OFF_INVAL, false);
3659
3660         ICE_FLOW_SET_HDRS(seg, cfg->addl_hdrs);
3661
3662         /* set outer most header */
3663         if (cfg->hdr_type == ICE_RSS_INNER_HEADERS_W_OUTER_IPV4)
3664                 segs[ICE_RSS_OUTER_HEADERS].hdrs |= ICE_FLOW_SEG_HDR_IPV4 |
3665                                                    ICE_FLOW_SEG_HDR_IPV_FRAG |
3666                                                    ICE_FLOW_SEG_HDR_IPV_OTHER;
3667         else if (cfg->hdr_type == ICE_RSS_INNER_HEADERS_W_OUTER_IPV6)
3668                 segs[ICE_RSS_OUTER_HEADERS].hdrs |= ICE_FLOW_SEG_HDR_IPV6 |
3669                                                    ICE_FLOW_SEG_HDR_IPV_FRAG |
3670                                                    ICE_FLOW_SEG_HDR_IPV_OTHER;
3671         else if (cfg->hdr_type == ICE_RSS_INNER_HEADERS_W_OUTER_IPV4_GRE)
3672                 segs[ICE_RSS_OUTER_HEADERS].hdrs |= ICE_FLOW_SEG_HDR_IPV4 |
3673                                                    ICE_FLOW_SEG_HDR_GRE |
3674                                                    ICE_FLOW_SEG_HDR_IPV_OTHER;
3675         else if (cfg->hdr_type == ICE_RSS_INNER_HEADERS_W_OUTER_IPV6_GRE)
3676                 segs[ICE_RSS_OUTER_HEADERS].hdrs |= ICE_FLOW_SEG_HDR_IPV6 |
3677                                                    ICE_FLOW_SEG_HDR_GRE |
3678                                                    ICE_FLOW_SEG_HDR_IPV_OTHER;
3679
3680         if (seg->hdrs & ~ICE_FLOW_RSS_SEG_HDR_VAL_MASKS &
3681             ~ICE_FLOW_RSS_HDRS_INNER_MASK & ~ICE_FLOW_SEG_HDR_IPV_OTHER &
3682             ~ICE_FLOW_SEG_HDR_IPV_FRAG)
3683                 return ICE_ERR_PARAM;
3684
3685         val = (u64)(seg->hdrs & ICE_FLOW_RSS_SEG_HDR_L3_MASKS);
3686         if (val && !ice_is_pow2(val))
3687                 return ICE_ERR_CFG;
3688
3689         val = (u64)(seg->hdrs & ICE_FLOW_RSS_SEG_HDR_L4_MASKS);
3690         if (val && !ice_is_pow2(val))
3691                 return ICE_ERR_CFG;
3692
3693         return ICE_SUCCESS;
3694 }
3695
3696 /**
3697  * ice_rem_vsi_rss_list - remove VSI from RSS list
3698  * @hw: pointer to the hardware structure
3699  * @vsi_handle: software VSI handle
3700  *
3701  * Remove the VSI from all RSS configurations in the list.
3702  */
3703 void ice_rem_vsi_rss_list(struct ice_hw *hw, u16 vsi_handle)
3704 {
3705         struct ice_rss_cfg *r, *tmp;
3706
3707         if (LIST_EMPTY(&hw->rss_list_head))
3708                 return;
3709
3710         ice_acquire_lock(&hw->rss_locks);
3711         LIST_FOR_EACH_ENTRY_SAFE(r, tmp, &hw->rss_list_head,
3712                                  ice_rss_cfg, l_entry)
3713                 if (ice_test_and_clear_bit(vsi_handle, r->vsis))
3714                         if (!ice_is_any_bit_set(r->vsis, ICE_MAX_VSI)) {
3715                                 LIST_DEL(&r->l_entry);
3716                                 ice_free(hw, r);
3717                         }
3718         ice_release_lock(&hw->rss_locks);
3719 }
3720
3721 /**
3722  * ice_rem_vsi_rss_cfg - remove RSS configurations associated with VSI
3723  * @hw: pointer to the hardware structure
3724  * @vsi_handle: software VSI handle
3725  *
3726  * This function will iterate through all flow profiles and disassociate
3727  * the VSI from that profile. If the flow profile has no VSIs it will
3728  * be removed.
3729  */
3730 enum ice_status ice_rem_vsi_rss_cfg(struct ice_hw *hw, u16 vsi_handle)
3731 {
3732         const enum ice_block blk = ICE_BLK_RSS;
3733         struct ice_flow_prof *p, *t;
3734         enum ice_status status = ICE_SUCCESS;
3735
3736         if (!ice_is_vsi_valid(hw, vsi_handle))
3737                 return ICE_ERR_PARAM;
3738
3739         if (LIST_EMPTY(&hw->fl_profs[blk]))
3740                 return ICE_SUCCESS;
3741
3742         ice_acquire_lock(&hw->rss_locks);
3743         LIST_FOR_EACH_ENTRY_SAFE(p, t, &hw->fl_profs[blk], ice_flow_prof,
3744                                  l_entry)
3745                 if (ice_is_bit_set(p->vsis, vsi_handle)) {
3746                         status = ice_flow_disassoc_prof(hw, blk, p, vsi_handle);
3747                         if (status)
3748                                 break;
3749
3750                         if (!ice_is_any_bit_set(p->vsis, ICE_MAX_VSI)) {
3751                                 status = ice_flow_rem_prof(hw, blk, p->id);
3752                                 if (status)
3753                                         break;
3754                         }
3755                 }
3756         ice_release_lock(&hw->rss_locks);
3757
3758         return status;
3759 }
3760
3761 /**
3762  * ice_get_rss_hdr_type - get a RSS profile's header type
3763  * @prof: RSS flow profile
3764  */
3765 static enum ice_rss_cfg_hdr_type
3766 ice_get_rss_hdr_type(struct ice_flow_prof *prof)
3767 {
3768         enum ice_rss_cfg_hdr_type hdr_type = ICE_RSS_ANY_HEADERS;
3769
3770         if (prof->segs_cnt == ICE_FLOW_SEG_SINGLE) {
3771                 hdr_type = ICE_RSS_OUTER_HEADERS;
3772         } else if (prof->segs_cnt == ICE_FLOW_SEG_MAX) {
3773                 if (prof->segs[ICE_RSS_OUTER_HEADERS].hdrs == ICE_FLOW_SEG_HDR_NONE)
3774                         hdr_type = ICE_RSS_INNER_HEADERS;
3775                 if (prof->segs[ICE_RSS_OUTER_HEADERS].hdrs & ICE_FLOW_SEG_HDR_IPV4)
3776                         hdr_type = ICE_RSS_INNER_HEADERS_W_OUTER_IPV4;
3777                 if (prof->segs[ICE_RSS_OUTER_HEADERS].hdrs & ICE_FLOW_SEG_HDR_IPV6)
3778                         hdr_type = ICE_RSS_INNER_HEADERS_W_OUTER_IPV6;
3779         }
3780
3781         return hdr_type;
3782 }
3783
3784 /**
3785  * ice_rem_rss_list - remove RSS configuration from list
3786  * @hw: pointer to the hardware structure
3787  * @vsi_handle: software VSI handle
3788  * @prof: pointer to flow profile
3789  *
3790  * Assumption: lock has already been acquired for RSS list
3791  */
3792 static void
3793 ice_rem_rss_list(struct ice_hw *hw, u16 vsi_handle, struct ice_flow_prof *prof)
3794 {
3795         enum ice_rss_cfg_hdr_type hdr_type;
3796         struct ice_rss_cfg *r, *tmp;
3797
3798         /* Search for RSS hash fields associated to the VSI that match the
3799          * hash configurations associated to the flow profile. If found
3800          * remove from the RSS entry list of the VSI context and delete entry.
3801          */
3802         hdr_type = ice_get_rss_hdr_type(prof);
3803         LIST_FOR_EACH_ENTRY_SAFE(r, tmp, &hw->rss_list_head,
3804                                  ice_rss_cfg, l_entry)
3805                 if (r->hash.hash_flds == prof->segs[prof->segs_cnt - 1].match &&
3806                     r->hash.addl_hdrs == prof->segs[prof->segs_cnt - 1].hdrs &&
3807                     r->hash.hdr_type == hdr_type) {
3808                         ice_clear_bit(vsi_handle, r->vsis);
3809                         if (!ice_is_any_bit_set(r->vsis, ICE_MAX_VSI)) {
3810                                 LIST_DEL(&r->l_entry);
3811                                 ice_free(hw, r);
3812                         }
3813                         return;
3814                 }
3815 }
3816
3817 /**
3818  * ice_add_rss_list - add RSS configuration to list
3819  * @hw: pointer to the hardware structure
3820  * @vsi_handle: software VSI handle
3821  * @prof: pointer to flow profile
3822  *
3823  * Assumption: lock has already been acquired for RSS list
3824  */
3825 static enum ice_status
3826 ice_add_rss_list(struct ice_hw *hw, u16 vsi_handle, struct ice_flow_prof *prof)
3827 {
3828         enum ice_rss_cfg_hdr_type hdr_type;
3829         struct ice_rss_cfg *r, *rss_cfg;
3830
3831         hdr_type = ice_get_rss_hdr_type(prof);
3832         LIST_FOR_EACH_ENTRY(r, &hw->rss_list_head,
3833                             ice_rss_cfg, l_entry)
3834                 if (r->hash.hash_flds == prof->segs[prof->segs_cnt - 1].match &&
3835                     r->hash.addl_hdrs == prof->segs[prof->segs_cnt - 1].hdrs &&
3836                     r->hash.hdr_type == hdr_type) {
3837                         ice_set_bit(vsi_handle, r->vsis);
3838                         return ICE_SUCCESS;
3839                 }
3840
3841         rss_cfg = (struct ice_rss_cfg *)ice_malloc(hw, sizeof(*rss_cfg));
3842         if (!rss_cfg)
3843                 return ICE_ERR_NO_MEMORY;
3844
3845         rss_cfg->hash.hash_flds = prof->segs[prof->segs_cnt - 1].match;
3846         rss_cfg->hash.addl_hdrs = prof->segs[prof->segs_cnt - 1].hdrs;
3847         rss_cfg->hash.hdr_type = hdr_type;
3848         rss_cfg->hash.symm = prof->cfg.symm;
3849         ice_set_bit(vsi_handle, rss_cfg->vsis);
3850
3851         LIST_ADD_TAIL(&rss_cfg->l_entry, &hw->rss_list_head);
3852
3853         return ICE_SUCCESS;
3854 }
3855
3856 #define ICE_FLOW_PROF_HASH_S    0
3857 #define ICE_FLOW_PROF_HASH_M    (0xFFFFFFFFULL << ICE_FLOW_PROF_HASH_S)
3858 #define ICE_FLOW_PROF_HDR_S     32
3859 #define ICE_FLOW_PROF_HDR_M     (0x3FFFFFFFULL << ICE_FLOW_PROF_HDR_S)
3860 #define ICE_FLOW_PROF_ENCAP_S   62
3861 #define ICE_FLOW_PROF_ENCAP_M   (0x3ULL << ICE_FLOW_PROF_ENCAP_S)
3862
3863 /* Flow profile ID format:
3864  * [0:31] - Packet match fields
3865  * [32:61] - Protocol header
3866  * [62:63] - Encapsulation flag:
3867  *           0 if non-tunneled
3868  *           1 if tunneled
3869  *           2 for tunneled with outer ipv4
3870  *           3 for tunneled with outer ipv6
3871  */
3872 #define ICE_FLOW_GEN_PROFID(hash, hdr, encap) \
3873         ((u64)(((u64)(hash) & ICE_FLOW_PROF_HASH_M) | \
3874                (((u64)(hdr) << ICE_FLOW_PROF_HDR_S) & ICE_FLOW_PROF_HDR_M) | \
3875                (((u64)(encap) << ICE_FLOW_PROF_ENCAP_S) & ICE_FLOW_PROF_ENCAP_M)))
3876
3877 static void
3878 ice_rss_config_xor_word(struct ice_hw *hw, u8 prof_id, u8 src, u8 dst)
3879 {
3880         u32 s = ((src % 4) << 3); /* byte shift */
3881         u32 v = dst | 0x80; /* value to program */
3882         u8 i = src / 4; /* register index */
3883         u32 reg;
3884
3885         reg = rd32(hw, GLQF_HSYMM(prof_id, i));
3886         reg = (reg & ~(0xff << s)) | (v << s);
3887         wr32(hw, GLQF_HSYMM(prof_id, i), reg);
3888 }
3889
3890 static void
3891 ice_rss_config_xor(struct ice_hw *hw, u8 prof_id, u8 src, u8 dst, u8 len)
3892 {
3893         int fv_last_word =
3894                 ICE_FLOW_SW_FIELD_VECTOR_MAX / ICE_FLOW_FV_EXTRACT_SZ - 1;
3895         int i;
3896
3897         for (i = 0; i < len; i++) {
3898                 ice_rss_config_xor_word(hw, prof_id,
3899                                         /* Yes, field vector in GLQF_HSYMM and
3900                                          * GLQF_HINSET is inversed!
3901                                          */
3902                                         fv_last_word - (src + i),
3903                                         fv_last_word - (dst + i));
3904                 ice_rss_config_xor_word(hw, prof_id,
3905                                         fv_last_word - (dst + i),
3906                                         fv_last_word - (src + i));
3907         }
3908 }
3909
3910 static void
3911 ice_rss_update_symm(struct ice_hw *hw,
3912                     struct ice_flow_prof *prof)
3913 {
3914         struct ice_prof_map *map;
3915         u8 prof_id, m;
3916
3917         ice_acquire_lock(&hw->blk[ICE_BLK_RSS].es.prof_map_lock);
3918         map = ice_search_prof_id(hw, ICE_BLK_RSS, prof->id);
3919         if (map)
3920                 prof_id = map->prof_id;
3921         ice_release_lock(&hw->blk[ICE_BLK_RSS].es.prof_map_lock);
3922         if (!map)
3923                 return;
3924         /* clear to default */
3925         for (m = 0; m < 6; m++)
3926                 wr32(hw, GLQF_HSYMM(prof_id, m), 0);
3927         if (prof->cfg.symm) {
3928                 struct ice_flow_seg_info *seg =
3929                         &prof->segs[prof->segs_cnt - 1];
3930
3931                 struct ice_flow_seg_xtrct *ipv4_src =
3932                         &seg->fields[ICE_FLOW_FIELD_IDX_IPV4_SA].xtrct;
3933                 struct ice_flow_seg_xtrct *ipv4_dst =
3934                         &seg->fields[ICE_FLOW_FIELD_IDX_IPV4_DA].xtrct;
3935                 struct ice_flow_seg_xtrct *ipv6_src =
3936                         &seg->fields[ICE_FLOW_FIELD_IDX_IPV6_SA].xtrct;
3937                 struct ice_flow_seg_xtrct *ipv6_dst =
3938                         &seg->fields[ICE_FLOW_FIELD_IDX_IPV6_DA].xtrct;
3939
3940                 struct ice_flow_seg_xtrct *tcp_src =
3941                         &seg->fields[ICE_FLOW_FIELD_IDX_TCP_SRC_PORT].xtrct;
3942                 struct ice_flow_seg_xtrct *tcp_dst =
3943                         &seg->fields[ICE_FLOW_FIELD_IDX_TCP_DST_PORT].xtrct;
3944
3945                 struct ice_flow_seg_xtrct *udp_src =
3946                         &seg->fields[ICE_FLOW_FIELD_IDX_UDP_SRC_PORT].xtrct;
3947                 struct ice_flow_seg_xtrct *udp_dst =
3948                         &seg->fields[ICE_FLOW_FIELD_IDX_UDP_DST_PORT].xtrct;
3949
3950                 struct ice_flow_seg_xtrct *sctp_src =
3951                         &seg->fields[ICE_FLOW_FIELD_IDX_SCTP_SRC_PORT].xtrct;
3952                 struct ice_flow_seg_xtrct *sctp_dst =
3953                         &seg->fields[ICE_FLOW_FIELD_IDX_SCTP_DST_PORT].xtrct;
3954
3955                 /* xor IPv4 */
3956                 if (ipv4_src->prot_id != 0 && ipv4_dst->prot_id != 0)
3957                         ice_rss_config_xor(hw, prof_id,
3958                                            ipv4_src->idx, ipv4_dst->idx, 2);
3959
3960                 /* xor IPv6 */
3961                 if (ipv6_src->prot_id != 0 && ipv6_dst->prot_id != 0)
3962                         ice_rss_config_xor(hw, prof_id,
3963                                            ipv6_src->idx, ipv6_dst->idx, 8);
3964
3965                 /* xor TCP */
3966                 if (tcp_src->prot_id != 0 && tcp_dst->prot_id != 0)
3967                         ice_rss_config_xor(hw, prof_id,
3968                                            tcp_src->idx, tcp_dst->idx, 1);
3969
3970                 /* xor UDP */
3971                 if (udp_src->prot_id != 0 && udp_dst->prot_id != 0)
3972                         ice_rss_config_xor(hw, prof_id,
3973                                            udp_src->idx, udp_dst->idx, 1);
3974
3975                 /* xor SCTP */
3976                 if (sctp_src->prot_id != 0 && sctp_dst->prot_id != 0)
3977                         ice_rss_config_xor(hw, prof_id,
3978                                            sctp_src->idx, sctp_dst->idx, 1);
3979         }
3980 }
3981
3982 /**
3983  * ice_add_rss_cfg_sync - add an RSS configuration
3984  * @hw: pointer to the hardware structure
3985  * @vsi_handle: software VSI handle
3986  * @cfg: configure parameters
3987  *
3988  * Assumption: lock has already been acquired for RSS list
3989  */
3990 static enum ice_status
3991 ice_add_rss_cfg_sync(struct ice_hw *hw, u16 vsi_handle,
3992                      const struct ice_rss_hash_cfg *cfg)
3993 {
3994         const enum ice_block blk = ICE_BLK_RSS;
3995         struct ice_flow_prof *prof = NULL;
3996         struct ice_flow_seg_info *segs;
3997         enum ice_status status;
3998         u8 segs_cnt;
3999
4000         segs_cnt = (cfg->hdr_type == ICE_RSS_OUTER_HEADERS) ?
4001                         ICE_FLOW_SEG_SINGLE : ICE_FLOW_SEG_MAX;
4002
4003         segs = (struct ice_flow_seg_info *)ice_calloc(hw, segs_cnt,
4004                                                       sizeof(*segs));
4005         if (!segs)
4006                 return ICE_ERR_NO_MEMORY;
4007
4008         /* Construct the packet segment info from the hashed fields */
4009         status = ice_flow_set_rss_seg_info(segs, segs_cnt, cfg);
4010         if (status)
4011                 goto exit;
4012
4013         /* Search for a flow profile that has matching headers, hash fields
4014          * and has the input VSI associated to it. If found, no further
4015          * operations required and exit.
4016          */
4017         prof = ice_flow_find_prof_conds(hw, blk, ICE_FLOW_RX, segs, segs_cnt,
4018                                         vsi_handle,
4019                                         ICE_FLOW_FIND_PROF_CHK_FLDS |
4020                                         ICE_FLOW_FIND_PROF_CHK_VSI);
4021         if (prof) {
4022                 if (prof->cfg.symm == cfg->symm)
4023                         goto exit;
4024                 prof->cfg.symm = cfg->symm;
4025                 goto update_symm;
4026         }
4027
4028         /* Check if a flow profile exists with the same protocol headers and
4029          * associated with the input VSI. If so disassociate the VSI from
4030          * this profile. The VSI will be added to a new profile created with
4031          * the protocol header and new hash field configuration.
4032          */
4033         prof = ice_flow_find_prof_conds(hw, blk, ICE_FLOW_RX, segs, segs_cnt,
4034                                         vsi_handle, ICE_FLOW_FIND_PROF_CHK_VSI);
4035         if (prof) {
4036                 status = ice_flow_disassoc_prof(hw, blk, prof, vsi_handle);
4037                 if (!status)
4038                         ice_rem_rss_list(hw, vsi_handle, prof);
4039                 else
4040                         goto exit;
4041
4042                 /* Remove profile if it has no VSIs associated */
4043                 if (!ice_is_any_bit_set(prof->vsis, ICE_MAX_VSI)) {
4044                         status = ice_flow_rem_prof(hw, blk, prof->id);
4045                         if (status)
4046                                 goto exit;
4047                 }
4048         }
4049
4050         /* Search for a profile that has same match fields only. If this
4051          * exists then associate the VSI to this profile.
4052          */
4053         prof = ice_flow_find_prof_conds(hw, blk, ICE_FLOW_RX, segs, segs_cnt,
4054                                         vsi_handle,
4055                                         ICE_FLOW_FIND_PROF_CHK_FLDS);
4056         if (prof) {
4057                 if (prof->cfg.symm == cfg->symm) {
4058                         status = ice_flow_assoc_prof(hw, blk, prof,
4059                                                      vsi_handle);
4060                         if (!status)
4061                                 status = ice_add_rss_list(hw, vsi_handle,
4062                                                           prof);
4063                 } else {
4064                         /* if a profile exist but with different symmetric
4065                          * requirement, just return error.
4066                          */
4067                         status = ICE_ERR_NOT_SUPPORTED;
4068                 }
4069                 goto exit;
4070         }
4071
4072         /* Create a new flow profile with generated profile and packet
4073          * segment information.
4074          */
4075         status = ice_flow_add_prof(hw, blk, ICE_FLOW_RX,
4076                                    ICE_FLOW_GEN_PROFID(cfg->hash_flds,
4077                                                        segs[segs_cnt - 1].hdrs,
4078                                                        cfg->hdr_type),
4079                                    segs, segs_cnt, NULL, 0, &prof);
4080         if (status)
4081                 goto exit;
4082
4083         status = ice_flow_assoc_prof(hw, blk, prof, vsi_handle);
4084         /* If association to a new flow profile failed then this profile can
4085          * be removed.
4086          */
4087         if (status) {
4088                 ice_flow_rem_prof(hw, blk, prof->id);
4089                 goto exit;
4090         }
4091
4092         status = ice_add_rss_list(hw, vsi_handle, prof);
4093
4094         prof->cfg.symm = cfg->symm;
4095 update_symm:
4096         ice_rss_update_symm(hw, prof);
4097
4098 exit:
4099         ice_free(hw, segs);
4100         return status;
4101 }
4102
4103 /**
4104  * ice_add_rss_cfg - add an RSS configuration with specified hashed fields
4105  * @hw: pointer to the hardware structure
4106  * @vsi_handle: software VSI handle
4107  * @cfg: configure parameters
4108  *
4109  * This function will generate a flow profile based on fields associated with
4110  * the input fields to hash on, the flow type and use the VSI number to add
4111  * a flow entry to the profile.
4112  */
4113 enum ice_status
4114 ice_add_rss_cfg(struct ice_hw *hw, u16 vsi_handle,
4115                 const struct ice_rss_hash_cfg *cfg)
4116 {
4117         struct ice_rss_hash_cfg local_cfg;
4118         enum ice_status status;
4119
4120         if (!ice_is_vsi_valid(hw, vsi_handle) ||
4121             !cfg || cfg->hdr_type > ICE_RSS_ANY_HEADERS ||
4122             cfg->hash_flds == ICE_HASH_INVALID)
4123                 return ICE_ERR_PARAM;
4124
4125         local_cfg = *cfg;
4126         if (cfg->hdr_type < ICE_RSS_ANY_HEADERS) {
4127                 ice_acquire_lock(&hw->rss_locks);
4128                 status = ice_add_rss_cfg_sync(hw, vsi_handle, &local_cfg);
4129                 ice_release_lock(&hw->rss_locks);
4130         } else {
4131                 ice_acquire_lock(&hw->rss_locks);
4132                 local_cfg.hdr_type = ICE_RSS_OUTER_HEADERS;
4133                 status = ice_add_rss_cfg_sync(hw, vsi_handle, &local_cfg);
4134                 if (!status) {
4135                         local_cfg.hdr_type = ICE_RSS_INNER_HEADERS;
4136                         status = ice_add_rss_cfg_sync(hw, vsi_handle,
4137                                                       &local_cfg);
4138                 }
4139                 ice_release_lock(&hw->rss_locks);
4140         }
4141
4142         return status;
4143 }
4144
4145 /**
4146  * ice_rem_rss_cfg_sync - remove an existing RSS configuration
4147  * @hw: pointer to the hardware structure
4148  * @vsi_handle: software VSI handle
4149  * @cfg: configure parameters
4150  *
4151  * Assumption: lock has already been acquired for RSS list
4152  */
4153 static enum ice_status
4154 ice_rem_rss_cfg_sync(struct ice_hw *hw, u16 vsi_handle,
4155                      const struct ice_rss_hash_cfg *cfg)
4156 {
4157         const enum ice_block blk = ICE_BLK_RSS;
4158         struct ice_flow_seg_info *segs;
4159         struct ice_flow_prof *prof;
4160         enum ice_status status;
4161         u8 segs_cnt;
4162
4163         segs_cnt = (cfg->hdr_type == ICE_RSS_OUTER_HEADERS) ?
4164                         ICE_FLOW_SEG_SINGLE : ICE_FLOW_SEG_MAX;
4165         segs = (struct ice_flow_seg_info *)ice_calloc(hw, segs_cnt,
4166                                                       sizeof(*segs));
4167         if (!segs)
4168                 return ICE_ERR_NO_MEMORY;
4169
4170         /* Construct the packet segment info from the hashed fields */
4171         status = ice_flow_set_rss_seg_info(segs, segs_cnt, cfg);
4172         if (status)
4173                 goto out;
4174
4175         prof = ice_flow_find_prof_conds(hw, blk, ICE_FLOW_RX, segs, segs_cnt,
4176                                         vsi_handle,
4177                                         ICE_FLOW_FIND_PROF_CHK_FLDS);
4178         if (!prof) {
4179                 status = ICE_ERR_DOES_NOT_EXIST;
4180                 goto out;
4181         }
4182
4183         status = ice_flow_disassoc_prof(hw, blk, prof, vsi_handle);
4184         if (status)
4185                 goto out;
4186
4187         /* Remove RSS configuration from VSI context before deleting
4188          * the flow profile.
4189          */
4190         ice_rem_rss_list(hw, vsi_handle, prof);
4191
4192         if (!ice_is_any_bit_set(prof->vsis, ICE_MAX_VSI))
4193                 status = ice_flow_rem_prof(hw, blk, prof->id);
4194
4195 out:
4196         ice_free(hw, segs);
4197         return status;
4198 }
4199
4200 /**
4201  * ice_rem_rss_cfg - remove an existing RSS config with matching hashed fields
4202  * @hw: pointer to the hardware structure
4203  * @vsi_handle: software VSI handle
4204  * @cfg: configure parameters
4205  *
4206  * This function will lookup the flow profile based on the input
4207  * hash field bitmap, iterate through the profile entry list of
4208  * that profile and find entry associated with input VSI to be
4209  * removed. Calls are made to underlying flow apis which will in
4210  * turn build or update buffers for RSS XLT1 section.
4211  */
4212 enum ice_status
4213 ice_rem_rss_cfg(struct ice_hw *hw, u16 vsi_handle,
4214                 const struct ice_rss_hash_cfg *cfg)
4215 {
4216         struct ice_rss_hash_cfg local_cfg;
4217         enum ice_status status;
4218
4219         if (!ice_is_vsi_valid(hw, vsi_handle) ||
4220             !cfg || cfg->hdr_type > ICE_RSS_ANY_HEADERS ||
4221             cfg->hash_flds == ICE_HASH_INVALID)
4222                 return ICE_ERR_PARAM;
4223
4224         ice_acquire_lock(&hw->rss_locks);
4225         local_cfg = *cfg;
4226         if (cfg->hdr_type < ICE_RSS_ANY_HEADERS) {
4227                 status = ice_rem_rss_cfg_sync(hw, vsi_handle, &local_cfg);
4228         } else {
4229                 local_cfg.hdr_type = ICE_RSS_OUTER_HEADERS;
4230                 status = ice_rem_rss_cfg_sync(hw, vsi_handle, &local_cfg);
4231
4232                 if (!status) {
4233                         local_cfg.hdr_type = ICE_RSS_INNER_HEADERS;
4234                         status = ice_rem_rss_cfg_sync(hw, vsi_handle,
4235                                                       &local_cfg);
4236                 }
4237         }
4238         ice_release_lock(&hw->rss_locks);
4239
4240         return status;
4241 }
4242
4243 /**
4244  * ice_replay_rss_cfg - replay RSS configurations associated with VSI
4245  * @hw: pointer to the hardware structure
4246  * @vsi_handle: software VSI handle
4247  */
4248 enum ice_status ice_replay_rss_cfg(struct ice_hw *hw, u16 vsi_handle)
4249 {
4250         enum ice_status status = ICE_SUCCESS;
4251         struct ice_rss_cfg *r;
4252
4253         if (!ice_is_vsi_valid(hw, vsi_handle))
4254                 return ICE_ERR_PARAM;
4255
4256         ice_acquire_lock(&hw->rss_locks);
4257         LIST_FOR_EACH_ENTRY(r, &hw->rss_list_head,
4258                             ice_rss_cfg, l_entry) {
4259                 if (ice_is_bit_set(r->vsis, vsi_handle)) {
4260                         status = ice_add_rss_cfg_sync(hw, vsi_handle, &r->hash);
4261                         if (status)
4262                                 break;
4263                 }
4264         }
4265         ice_release_lock(&hw->rss_locks);
4266
4267         return status;
4268 }
4269
4270 /**
4271  * ice_get_rss_cfg - returns hashed fields for the given header types
4272  * @hw: pointer to the hardware structure
4273  * @vsi_handle: software VSI handle
4274  * @hdrs: protocol header type
4275  *
4276  * This function will return the match fields of the first instance of flow
4277  * profile having the given header types and containing input VSI
4278  */
4279 u64 ice_get_rss_cfg(struct ice_hw *hw, u16 vsi_handle, u32 hdrs)
4280 {
4281         u64 rss_hash = ICE_HASH_INVALID;
4282         struct ice_rss_cfg *r;
4283
4284         /* verify if the protocol header is non zero and VSI is valid */
4285         if (hdrs == ICE_FLOW_SEG_HDR_NONE || !ice_is_vsi_valid(hw, vsi_handle))
4286                 return ICE_HASH_INVALID;
4287
4288         ice_acquire_lock(&hw->rss_locks);
4289         LIST_FOR_EACH_ENTRY(r, &hw->rss_list_head,
4290                             ice_rss_cfg, l_entry)
4291                 if (ice_is_bit_set(r->vsis, vsi_handle) &&
4292                     r->hash.addl_hdrs == hdrs) {
4293                         rss_hash = r->hash.hash_flds;
4294                         break;
4295                 }
4296         ice_release_lock(&hw->rss_locks);
4297
4298         return rss_hash;
4299 }