net/ice/base: add profile validation on switch filter
[dpdk.git] / drivers / net / ice / base / ice_flow.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2001-2021 Intel Corporation
3  */
4
5 #include "ice_common.h"
6 #include "ice_flow.h"
7
8 /* Size of known protocol header fields */
9 #define ICE_FLOW_FLD_SZ_ETH_TYPE        2
10 #define ICE_FLOW_FLD_SZ_VLAN            2
11 #define ICE_FLOW_FLD_SZ_IPV4_ADDR       4
12 #define ICE_FLOW_FLD_SZ_IPV6_ADDR       16
13 #define ICE_FLOW_FLD_SZ_IPV6_PRE32_ADDR 4
14 #define ICE_FLOW_FLD_SZ_IPV6_PRE48_ADDR 6
15 #define ICE_FLOW_FLD_SZ_IPV6_PRE64_ADDR 8
16 #define ICE_FLOW_FLD_SZ_IPV4_ID         2
17 #define ICE_FLOW_FLD_SZ_IPV6_ID         4
18 #define ICE_FLOW_FLD_SZ_IP_CHKSUM       2
19 #define ICE_FLOW_FLD_SZ_TCP_CHKSUM      2
20 #define ICE_FLOW_FLD_SZ_UDP_CHKSUM      2
21 #define ICE_FLOW_FLD_SZ_SCTP_CHKSUM     4
22 #define ICE_FLOW_FLD_SZ_IP_DSCP         1
23 #define ICE_FLOW_FLD_SZ_IP_TTL          1
24 #define ICE_FLOW_FLD_SZ_IP_PROT         1
25 #define ICE_FLOW_FLD_SZ_PORT            2
26 #define ICE_FLOW_FLD_SZ_TCP_FLAGS       1
27 #define ICE_FLOW_FLD_SZ_ICMP_TYPE       1
28 #define ICE_FLOW_FLD_SZ_ICMP_CODE       1
29 #define ICE_FLOW_FLD_SZ_ARP_OPER        2
30 #define ICE_FLOW_FLD_SZ_GRE_KEYID       4
31 #define ICE_FLOW_FLD_SZ_GTP_TEID        4
32 #define ICE_FLOW_FLD_SZ_GTP_QFI         2
33 #define ICE_FLOW_FLD_SZ_PPPOE_SESS_ID   2
34 #define ICE_FLOW_FLD_SZ_PFCP_SEID 8
35 #define ICE_FLOW_FLD_SZ_L2TPV3_SESS_ID  4
36 #define ICE_FLOW_FLD_SZ_ESP_SPI 4
37 #define ICE_FLOW_FLD_SZ_AH_SPI  4
38 #define ICE_FLOW_FLD_SZ_NAT_T_ESP_SPI   4
39 #define ICE_FLOW_FLD_SZ_VXLAN_VNI       4
40 #define ICE_FLOW_FLD_SZ_ECPRI_TP0_PC_ID 2
41
42 /* Describe properties of a protocol header field */
43 struct ice_flow_field_info {
44         enum ice_flow_seg_hdr hdr;
45         s16 off;        /* Offset from start of a protocol header, in bits */
46         u16 size;       /* Size of fields in bits */
47         u16 mask;       /* 16-bit mask for field */
48 };
49
50 #define ICE_FLOW_FLD_INFO(_hdr, _offset_bytes, _size_bytes) { \
51         .hdr = _hdr, \
52         .off = (_offset_bytes) * BITS_PER_BYTE, \
53         .size = (_size_bytes) * BITS_PER_BYTE, \
54         .mask = 0, \
55 }
56
57 #define ICE_FLOW_FLD_INFO_MSK(_hdr, _offset_bytes, _size_bytes, _mask) { \
58         .hdr = _hdr, \
59         .off = (_offset_bytes) * BITS_PER_BYTE, \
60         .size = (_size_bytes) * BITS_PER_BYTE, \
61         .mask = _mask, \
62 }
63
64 /* Table containing properties of supported protocol header fields */
65 static const
66 struct ice_flow_field_info ice_flds_info[ICE_FLOW_FIELD_IDX_MAX] = {
67         /* Ether */
68         /* ICE_FLOW_FIELD_IDX_ETH_DA */
69         ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_ETH, 0, ETH_ALEN),
70         /* ICE_FLOW_FIELD_IDX_ETH_SA */
71         ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_ETH, ETH_ALEN, ETH_ALEN),
72         /* ICE_FLOW_FIELD_IDX_S_VLAN */
73         ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_VLAN, 12, ICE_FLOW_FLD_SZ_VLAN),
74         /* ICE_FLOW_FIELD_IDX_C_VLAN */
75         ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_VLAN, 14, ICE_FLOW_FLD_SZ_VLAN),
76         /* ICE_FLOW_FIELD_IDX_ETH_TYPE */
77         ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_ETH, 0, ICE_FLOW_FLD_SZ_ETH_TYPE),
78         /* IPv4 / IPv6 */
79         /* ICE_FLOW_FIELD_IDX_IPV4_DSCP */
80         ICE_FLOW_FLD_INFO_MSK(ICE_FLOW_SEG_HDR_IPV4, 0, ICE_FLOW_FLD_SZ_IP_DSCP,
81                               0x00fc),
82         /* ICE_FLOW_FIELD_IDX_IPV6_DSCP */
83         ICE_FLOW_FLD_INFO_MSK(ICE_FLOW_SEG_HDR_IPV6, 0, ICE_FLOW_FLD_SZ_IP_DSCP,
84                               0x0ff0),
85         /* ICE_FLOW_FIELD_IDX_IPV4_TTL */
86         ICE_FLOW_FLD_INFO_MSK(ICE_FLOW_SEG_HDR_NONE, 8,
87                               ICE_FLOW_FLD_SZ_IP_TTL, 0xff00),
88         /* ICE_FLOW_FIELD_IDX_IPV4_PROT */
89         ICE_FLOW_FLD_INFO_MSK(ICE_FLOW_SEG_HDR_NONE, 8,
90                               ICE_FLOW_FLD_SZ_IP_PROT, 0x00ff),
91         /* ICE_FLOW_FIELD_IDX_IPV6_TTL */
92         ICE_FLOW_FLD_INFO_MSK(ICE_FLOW_SEG_HDR_NONE, 6,
93                               ICE_FLOW_FLD_SZ_IP_TTL, 0x00ff),
94         /* ICE_FLOW_FIELD_IDX_IPV6_PROT */
95         ICE_FLOW_FLD_INFO_MSK(ICE_FLOW_SEG_HDR_NONE, 6,
96                               ICE_FLOW_FLD_SZ_IP_PROT, 0xff00),
97         /* ICE_FLOW_FIELD_IDX_IPV4_SA */
98         ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_IPV4, 12, ICE_FLOW_FLD_SZ_IPV4_ADDR),
99         /* ICE_FLOW_FIELD_IDX_IPV4_DA */
100         ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_IPV4, 16, ICE_FLOW_FLD_SZ_IPV4_ADDR),
101         /* ICE_FLOW_FIELD_IDX_IPV6_SA */
102         ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_IPV6, 8, ICE_FLOW_FLD_SZ_IPV6_ADDR),
103         /* ICE_FLOW_FIELD_IDX_IPV6_DA */
104         ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_IPV6, 24, ICE_FLOW_FLD_SZ_IPV6_ADDR),
105         /* ICE_FLOW_FIELD_IDX_IPV4_CHKSUM */
106         ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_IPV4, 10, ICE_FLOW_FLD_SZ_IP_CHKSUM),
107         /* ICE_FLOW_FIELD_IDX_IPV4_FRAG */
108         ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_IPV_FRAG, 4,
109                           ICE_FLOW_FLD_SZ_IPV4_ID),
110         /* ICE_FLOW_FIELD_IDX_IPV6_FRAG */
111         ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_IPV_FRAG, 4,
112                           ICE_FLOW_FLD_SZ_IPV6_ID),
113         /* ICE_FLOW_FIELD_IDX_IPV6_PRE32_SA */
114         ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_IPV6, 8,
115                           ICE_FLOW_FLD_SZ_IPV6_PRE32_ADDR),
116         /* ICE_FLOW_FIELD_IDX_IPV6_PRE32_DA */
117         ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_IPV6, 24,
118                           ICE_FLOW_FLD_SZ_IPV6_PRE32_ADDR),
119         /* ICE_FLOW_FIELD_IDX_IPV6_PRE48_SA */
120         ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_IPV6, 8,
121                           ICE_FLOW_FLD_SZ_IPV6_PRE48_ADDR),
122         /* ICE_FLOW_FIELD_IDX_IPV6_PRE48_DA */
123         ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_IPV6, 24,
124                           ICE_FLOW_FLD_SZ_IPV6_PRE48_ADDR),
125         /* ICE_FLOW_FIELD_IDX_IPV6_PRE64_SA */
126         ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_IPV6, 8,
127                           ICE_FLOW_FLD_SZ_IPV6_PRE64_ADDR),
128         /* ICE_FLOW_FIELD_IDX_IPV6_PRE64_DA */
129         ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_IPV6, 24,
130                           ICE_FLOW_FLD_SZ_IPV6_PRE64_ADDR),
131         /* Transport */
132         /* ICE_FLOW_FIELD_IDX_TCP_SRC_PORT */
133         ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_TCP, 0, ICE_FLOW_FLD_SZ_PORT),
134         /* ICE_FLOW_FIELD_IDX_TCP_DST_PORT */
135         ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_TCP, 2, ICE_FLOW_FLD_SZ_PORT),
136         /* ICE_FLOW_FIELD_IDX_UDP_SRC_PORT */
137         ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_UDP, 0, ICE_FLOW_FLD_SZ_PORT),
138         /* ICE_FLOW_FIELD_IDX_UDP_DST_PORT */
139         ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_UDP, 2, ICE_FLOW_FLD_SZ_PORT),
140         /* ICE_FLOW_FIELD_IDX_SCTP_SRC_PORT */
141         ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_SCTP, 0, ICE_FLOW_FLD_SZ_PORT),
142         /* ICE_FLOW_FIELD_IDX_SCTP_DST_PORT */
143         ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_SCTP, 2, ICE_FLOW_FLD_SZ_PORT),
144         /* ICE_FLOW_FIELD_IDX_TCP_FLAGS */
145         ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_TCP, 13, ICE_FLOW_FLD_SZ_TCP_FLAGS),
146         /* ICE_FLOW_FIELD_IDX_TCP_CHKSUM */
147         ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_TCP, 16, ICE_FLOW_FLD_SZ_TCP_CHKSUM),
148         /* ICE_FLOW_FIELD_IDX_UDP_CHKSUM */
149         ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_UDP, 6, ICE_FLOW_FLD_SZ_UDP_CHKSUM),
150         /* ICE_FLOW_FIELD_IDX_SCTP_CHKSUM */
151         ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_SCTP, 8,
152                           ICE_FLOW_FLD_SZ_SCTP_CHKSUM),
153         /* ARP */
154         /* ICE_FLOW_FIELD_IDX_ARP_SIP */
155         ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_ARP, 14, ICE_FLOW_FLD_SZ_IPV4_ADDR),
156         /* ICE_FLOW_FIELD_IDX_ARP_DIP */
157         ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_ARP, 24, ICE_FLOW_FLD_SZ_IPV4_ADDR),
158         /* ICE_FLOW_FIELD_IDX_ARP_SHA */
159         ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_ARP, 8, ETH_ALEN),
160         /* ICE_FLOW_FIELD_IDX_ARP_DHA */
161         ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_ARP, 18, ETH_ALEN),
162         /* ICE_FLOW_FIELD_IDX_ARP_OP */
163         ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_ARP, 6, ICE_FLOW_FLD_SZ_ARP_OPER),
164         /* ICMP */
165         /* ICE_FLOW_FIELD_IDX_ICMP_TYPE */
166         ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_ICMP, 0, ICE_FLOW_FLD_SZ_ICMP_TYPE),
167         /* ICE_FLOW_FIELD_IDX_ICMP_CODE */
168         ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_ICMP, 1, ICE_FLOW_FLD_SZ_ICMP_CODE),
169         /* GRE */
170         /* ICE_FLOW_FIELD_IDX_GRE_KEYID */
171         ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_GRE, 12, ICE_FLOW_FLD_SZ_GRE_KEYID),
172         /* GTP */
173         /* ICE_FLOW_FIELD_IDX_GTPC_TEID */
174         ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_GTPC_TEID, 12,
175                           ICE_FLOW_FLD_SZ_GTP_TEID),
176         /* ICE_FLOW_FIELD_IDX_GTPU_IP_TEID */
177         ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_GTPU_IP, 12,
178                           ICE_FLOW_FLD_SZ_GTP_TEID),
179         /* ICE_FLOW_FIELD_IDX_GTPU_EH_TEID */
180         ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_GTPU_EH, 12,
181                           ICE_FLOW_FLD_SZ_GTP_TEID),
182         /* ICE_FLOW_FIELD_IDX_GTPU_EH_QFI */
183         ICE_FLOW_FLD_INFO_MSK(ICE_FLOW_SEG_HDR_GTPU_EH, 22,
184                               ICE_FLOW_FLD_SZ_GTP_QFI, 0x3f00),
185         /* ICE_FLOW_FIELD_IDX_GTPU_UP_TEID */
186         ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_GTPU_UP, 12,
187                           ICE_FLOW_FLD_SZ_GTP_TEID),
188         /* ICE_FLOW_FIELD_IDX_GTPU_UP_QFI */
189         ICE_FLOW_FLD_INFO_MSK(ICE_FLOW_SEG_HDR_GTPU_UP, 22,
190                               ICE_FLOW_FLD_SZ_GTP_QFI, 0x3f00),
191         /* ICE_FLOW_FIELD_IDX_GTPU_DWN_TEID */
192         ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_GTPU_DWN, 12,
193                           ICE_FLOW_FLD_SZ_GTP_TEID),
194         /* ICE_FLOW_FIELD_IDX_GTPU_DWN_QFI */
195         ICE_FLOW_FLD_INFO_MSK(ICE_FLOW_SEG_HDR_GTPU_DWN, 22,
196                               ICE_FLOW_FLD_SZ_GTP_QFI, 0x3f00),
197         /* PPPOE */
198         /* ICE_FLOW_FIELD_IDX_PPPOE_SESS_ID */
199         ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_PPPOE, 2,
200                           ICE_FLOW_FLD_SZ_PPPOE_SESS_ID),
201         /* PFCP */
202         /* ICE_FLOW_FIELD_IDX_PFCP_SEID */
203         ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_PFCP_SESSION, 12,
204                           ICE_FLOW_FLD_SZ_PFCP_SEID),
205         /* L2TPV3 */
206         /* ICE_FLOW_FIELD_IDX_L2TPV3_SESS_ID */
207         ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_L2TPV3, 0,
208                           ICE_FLOW_FLD_SZ_L2TPV3_SESS_ID),
209         /* ESP */
210         /* ICE_FLOW_FIELD_IDX_ESP_SPI */
211         ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_ESP, 0,
212                           ICE_FLOW_FLD_SZ_ESP_SPI),
213         /* AH */
214         /* ICE_FLOW_FIELD_IDX_AH_SPI */
215         ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_AH, 4,
216                           ICE_FLOW_FLD_SZ_AH_SPI),
217         /* NAT_T_ESP */
218         /* ICE_FLOW_FIELD_IDX_NAT_T_ESP_SPI */
219         ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_NAT_T_ESP, 8,
220                           ICE_FLOW_FLD_SZ_NAT_T_ESP_SPI),
221         /* ICE_FLOW_FIELD_IDX_VXLAN_VNI */
222         ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_VXLAN, 12,
223                           ICE_FLOW_FLD_SZ_VXLAN_VNI),
224         /* ECPRI_TP0 */
225         /* ICE_FLOW_FIELD_IDX_ECPRI_TP0_PC_ID */
226         ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_ECPRI_TP0, 4,
227                           ICE_FLOW_FLD_SZ_ECPRI_TP0_PC_ID),
228         /* UDP_ECPRI_TP0 */
229         /* ICE_FLOW_FIELD_IDX_UDP_ECPRI_TP0_PC_ID */
230         ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_UDP_ECPRI_TP0, 12,
231                           ICE_FLOW_FLD_SZ_ECPRI_TP0_PC_ID),
232 };
233
234 /* Bitmaps indicating relevant packet types for a particular protocol header
235  *
236  * Packet types for packets with an Outer/First/Single MAC header
237  */
238 static const u32 ice_ptypes_mac_ofos[] = {
239         0xFDC00846, 0xBFBF7F7E, 0xF70001DF, 0xFEFDFDFB,
240         0x0000077E, 0x000003FF, 0x00000000, 0x00000000,
241         0x00400000, 0x03FFF000, 0xFFFFFFE0, 0x00100707,
242         0xFFFFFF00, 0x0000003F, 0x00000000, 0x00000000,
243         0x00000000, 0x00000000, 0x00000000, 0x00000000,
244         0x00000000, 0x00000000, 0x00000000, 0x00000000,
245         0x00000000, 0x00000000, 0x00000000, 0x00000000,
246         0x00000000, 0x00000000, 0x00000000, 0x00000000,
247 };
248
249 /* Packet types for packets with an Innermost/Last MAC VLAN header */
250 static const u32 ice_ptypes_macvlan_il[] = {
251         0x00000000, 0xBC000000, 0x000001DF, 0xF0000000,
252         0x0000077E, 0x00000000, 0x00000000, 0x00000000,
253         0x00000000, 0x00000000, 0x00000000, 0x00000000,
254         0x00000000, 0x00000000, 0x00000000, 0x00000000,
255         0x00000000, 0x00000000, 0x00000000, 0x00000000,
256         0x00000000, 0x00000000, 0x00000000, 0x00000000,
257         0x00000000, 0x00000000, 0x00000000, 0x00000000,
258         0x00000000, 0x00000000, 0x00000000, 0x00000000,
259 };
260
261 /* Packet types for packets with an Outer/First/Single non-frag IPv4 header,
262  * does NOT include IPV4 other PTYPEs
263  */
264 static const u32 ice_ptypes_ipv4_ofos[] = {
265         0x1D800000, 0x24000800, 0x00000000, 0x00000000,
266         0x00000000, 0x00000155, 0x00000000, 0x00000000,
267         0x00000000, 0x000FC000, 0x000002A0, 0x00100000,
268         0x00001500, 0x00000000, 0x00000000, 0x00000000,
269         0x00000000, 0x00000000, 0x00000000, 0x00000000,
270         0x00000000, 0x00000000, 0x00000000, 0x00000000,
271         0x00000000, 0x00000000, 0x00000000, 0x00000000,
272         0x00000000, 0x00000000, 0x00000000, 0x00000000,
273 };
274
275 /* Packet types for packets with an Outer/First/Single non-frag IPv4 header,
276  * includes IPV4 other PTYPEs
277  */
278 static const u32 ice_ptypes_ipv4_ofos_all[] = {
279         0x1D800000, 0x27BF7800, 0x00000000, 0x00000000,
280         0x00000000, 0x00000155, 0x00000000, 0x00000000,
281         0x00000000, 0x000FC000, 0x83E0FAA0, 0x00000101,
282         0x03FFD000, 0x00000000, 0x02FBEFBC, 0x00000000,
283         0x00000000, 0x00000000, 0x00000000, 0x00000000,
284         0x00000000, 0x00000000, 0x00000000, 0x00000000,
285         0x00000000, 0x00000000, 0x00000000, 0x00000000,
286         0x00000000, 0x00000000, 0x00000000, 0x00000000,
287 };
288
289 /* Packet types for packets with an Innermost/Last IPv4 header */
290 static const u32 ice_ptypes_ipv4_il[] = {
291         0xE0000000, 0xB807700E, 0x80000003, 0xE01DC03B,
292         0x0000000E, 0x00000000, 0x00000000, 0x00000000,
293         0x00000000, 0x00000000, 0x001FF800, 0x00100000,
294         0xFC0FC000, 0x00000000, 0xBC0BC0BC, 0x00000BC0,
295         0x00000000, 0x00000000, 0x00000000, 0x00000000,
296         0x00000000, 0x00000000, 0x00000000, 0x00000000,
297         0x00000000, 0x00000000, 0x00000000, 0x00000000,
298         0x00000000, 0x00000000, 0x00000000, 0x00000000,
299 };
300
301 /* Packet types for packets with an Outer/First/Single non-frag IPv6 header,
302  * does NOT include IVP6 other PTYPEs
303  */
304 static const u32 ice_ptypes_ipv6_ofos[] = {
305         0x00000000, 0x00000000, 0x76000000, 0x10002000,
306         0x00000000, 0x000002AA, 0x00000000, 0x00000000,
307         0x00000000, 0x03F00000, 0x00000540, 0x00000000,
308         0x00002A00, 0x00000000, 0x00000000, 0x00000000,
309         0x00000000, 0x00000000, 0x00000000, 0x00000000,
310         0x00000000, 0x00000000, 0x00000000, 0x00000000,
311         0x00000000, 0x00000000, 0x00000000, 0x00000000,
312         0x00000000, 0x00000000, 0x00000000, 0x00000000,
313 };
314
315 /* Packet types for packets with an Outer/First/Single non-frag IPv6 header,
316  * includes IPV6 other PTYPEs
317  */
318 static const u32 ice_ptypes_ipv6_ofos_all[] = {
319         0x00000000, 0x00000000, 0x76000000, 0x1EFDE000,
320         0x00000000, 0x000002AA, 0x00000000, 0x00000000,
321         0x00000000, 0x03F00000, 0x7C1F0540, 0x00000206,
322         0xFC002000, 0x0000003F, 0xBC000000, 0x0002FBEF,
323         0x00000000, 0x00000000, 0x00000000, 0x00000000,
324         0x00000000, 0x00000000, 0x00000000, 0x00000000,
325         0x00000000, 0x00000000, 0x00000000, 0x00000000,
326         0x00000000, 0x00000000, 0x00000000, 0x00000000,
327 };
328
329 /* Packet types for packets with an Innermost/Last IPv6 header */
330 static const u32 ice_ptypes_ipv6_il[] = {
331         0x00000000, 0x03B80770, 0x000001DC, 0x0EE00000,
332         0x00000770, 0x00000000, 0x00000000, 0x00000000,
333         0x00000000, 0x00000000, 0x7FE00000, 0x00000000,
334         0x03F00000, 0x0000003F, 0x02F02F00, 0x0002F02F,
335         0x00000000, 0x00000000, 0x00000000, 0x00000000,
336         0x00000000, 0x00000000, 0x00000000, 0x00000000,
337         0x00000000, 0x00000000, 0x00000000, 0x00000000,
338         0x00000000, 0x00000000, 0x00000000, 0x00000000,
339 };
340
341 /* Packet types for packets with an Outer/First/Single
342  * non-frag IPv4 header - no L4
343  */
344 static const u32 ice_ptypes_ipv4_ofos_no_l4[] = {
345         0x10800000, 0x04000800, 0x00000000, 0x00000000,
346         0x00000000, 0x00000000, 0x00000000, 0x00000000,
347         0x00000000, 0x000cc000, 0x000002A0, 0x00000000,
348         0x00001500, 0x00000000, 0x00000000, 0x00000000,
349         0x00000000, 0x00000000, 0x00000000, 0x00000000,
350         0x00000000, 0x00000000, 0x00000000, 0x00000000,
351         0x00000000, 0x00000000, 0x00000000, 0x00000000,
352         0x00000000, 0x00000000, 0x00000000, 0x00000000,
353 };
354
355 /* Packet types for packets with an Innermost/Last IPv4 header - no L4 */
356 static const u32 ice_ptypes_ipv4_il_no_l4[] = {
357         0x60000000, 0x18043008, 0x80000002, 0x6010c021,
358         0x00000008, 0x00000000, 0x00000000, 0x00000000,
359         0x00000000, 0x00000000, 0x00139800, 0x00000000,
360         0x8C08C000, 0x00000000, 0x00000000, 0x00000000,
361         0x00000000, 0x00000000, 0x00000000, 0x00000000,
362         0x00000000, 0x00000000, 0x00000000, 0x00000000,
363         0x00000000, 0x00000000, 0x00000000, 0x00000000,
364         0x00000000, 0x00000000, 0x00000000, 0x00000000,
365 };
366
367 /* Packet types for packets with an Outer/First/Single
368  * non-frag IPv6 header - no L4
369  */
370 static const u32 ice_ptypes_ipv6_ofos_no_l4[] = {
371         0x00000000, 0x00000000, 0x42000000, 0x10002000,
372         0x00000000, 0x00000000, 0x00000000, 0x00000000,
373         0x00000000, 0x02300000, 0x00000540, 0x00000000,
374         0x00002A00, 0x00000000, 0x00000000, 0x00000000,
375         0x00000000, 0x00000000, 0x00000000, 0x00000000,
376         0x00000000, 0x00000000, 0x00000000, 0x00000000,
377         0x00000000, 0x00000000, 0x00000000, 0x00000000,
378         0x00000000, 0x00000000, 0x00000000, 0x00000000,
379 };
380
381 /* Packet types for packets with an Innermost/Last IPv6 header - no L4 */
382 static const u32 ice_ptypes_ipv6_il_no_l4[] = {
383         0x00000000, 0x02180430, 0x0000010c, 0x086010c0,
384         0x00000430, 0x00000000, 0x00000000, 0x00000000,
385         0x00000000, 0x00000000, 0x4e600000, 0x00000000,
386         0x02300000, 0x00000023, 0x00000000, 0x00000000,
387         0x00000000, 0x00000000, 0x00000000, 0x00000000,
388         0x00000000, 0x00000000, 0x00000000, 0x00000000,
389         0x00000000, 0x00000000, 0x00000000, 0x00000000,
390         0x00000000, 0x00000000, 0x00000000, 0x00000000,
391 };
392
393 /* Packet types for packets with an Outermost/First ARP header */
394 static const u32 ice_ptypes_arp_of[] = {
395         0x00000800, 0x00000000, 0x00000000, 0x00000000,
396         0x00000000, 0x00000000, 0x00000000, 0x00000000,
397         0x00000000, 0x00000000, 0x00000000, 0x00000000,
398         0x00000000, 0x00000000, 0x00000000, 0x00000000,
399         0x00000000, 0x00000000, 0x00000000, 0x00000000,
400         0x00000000, 0x00000000, 0x00000000, 0x00000000,
401         0x00000000, 0x00000000, 0x00000000, 0x00000000,
402         0x00000000, 0x00000000, 0x00000000, 0x00000000,
403 };
404
405 /* UDP Packet types for non-tunneled packets or tunneled
406  * packets with inner UDP.
407  */
408 static const u32 ice_ptypes_udp_il[] = {
409         0x81000000, 0x20204040, 0x04000010, 0x80810102,
410         0x00000040, 0x00000000, 0x00000000, 0x00000000,
411         0x00000000, 0x00410000, 0x908427E0, 0x00100007,
412         0x10410000, 0x00000004, 0x10410410, 0x00004104,
413         0x00000000, 0x00000000, 0x00000000, 0x00000000,
414         0x00000000, 0x00000000, 0x00000000, 0x00000000,
415         0x00000000, 0x00000000, 0x00000000, 0x00000000,
416         0x00000000, 0x00000000, 0x00000000, 0x00000000,
417 };
418
419 /* Packet types for packets with an Innermost/Last TCP header */
420 static const u32 ice_ptypes_tcp_il[] = {
421         0x04000000, 0x80810102, 0x10000040, 0x02040408,
422         0x00000102, 0x00000000, 0x00000000, 0x00000000,
423         0x00000000, 0x00820000, 0x21084000, 0x00000000,
424         0x20820000, 0x00000008, 0x20820820, 0x00008208,
425         0x00000000, 0x00000000, 0x00000000, 0x00000000,
426         0x00000000, 0x00000000, 0x00000000, 0x00000000,
427         0x00000000, 0x00000000, 0x00000000, 0x00000000,
428         0x00000000, 0x00000000, 0x00000000, 0x00000000,
429 };
430
431 /* Packet types for packets with an Innermost/Last SCTP header */
432 static const u32 ice_ptypes_sctp_il[] = {
433         0x08000000, 0x01020204, 0x20000081, 0x04080810,
434         0x00000204, 0x00000000, 0x00000000, 0x00000000,
435         0x00000000, 0x01040000, 0x00000000, 0x00000000,
436         0x41040000, 0x00000010, 0x00000000, 0x00000000,
437         0x00000000, 0x00000000, 0x00000000, 0x00000000,
438         0x00000000, 0x00000000, 0x00000000, 0x00000000,
439         0x00000000, 0x00000000, 0x00000000, 0x00000000,
440         0x00000000, 0x00000000, 0x00000000, 0x00000000,
441 };
442
443 /* Packet types for packets with an Outermost/First ICMP header */
444 static const u32 ice_ptypes_icmp_of[] = {
445         0x10000000, 0x00000000, 0x00000000, 0x00000000,
446         0x00000000, 0x00000000, 0x00000000, 0x00000000,
447         0x00000000, 0x00000000, 0x00000000, 0x00000000,
448         0x00000000, 0x00000000, 0x00000000, 0x00000000,
449         0x00000000, 0x00000000, 0x00000000, 0x00000000,
450         0x00000000, 0x00000000, 0x00000000, 0x00000000,
451         0x00000000, 0x00000000, 0x00000000, 0x00000000,
452         0x00000000, 0x00000000, 0x00000000, 0x00000000,
453 };
454
455 /* Packet types for packets with an Innermost/Last ICMP header */
456 static const u32 ice_ptypes_icmp_il[] = {
457         0x00000000, 0x02040408, 0x40000102, 0x08101020,
458         0x00000408, 0x00000000, 0x00000000, 0x00000000,
459         0x00000000, 0x00000000, 0x42108000, 0x00000000,
460         0x82080000, 0x00000020, 0x00000000, 0x00000000,
461         0x00000000, 0x00000000, 0x00000000, 0x00000000,
462         0x00000000, 0x00000000, 0x00000000, 0x00000000,
463         0x00000000, 0x00000000, 0x00000000, 0x00000000,
464         0x00000000, 0x00000000, 0x00000000, 0x00000000,
465 };
466
467 /* Packet types for packets with an Outermost/First GRE header */
468 static const u32 ice_ptypes_gre_of[] = {
469         0x00000000, 0xBFBF7800, 0x000001DF, 0xFEFDE000,
470         0x0000017E, 0x00000000, 0x00000000, 0x00000000,
471         0x00000000, 0x00000000, 0x00000000, 0x00000000,
472         0x00000000, 0x00000000, 0xBEFBEFBC, 0x0002FBEF,
473         0x00000000, 0x00000000, 0x00000000, 0x00000000,
474         0x00000000, 0x00000000, 0x00000000, 0x00000000,
475         0x00000000, 0x00000000, 0x00000000, 0x00000000,
476         0x00000000, 0x00000000, 0x00000000, 0x00000000,
477 };
478
479 /* Packet types for packets with an Innermost/Last MAC header */
480 static const u32 ice_ptypes_mac_il[] = {
481         0x00000000, 0x20000000, 0x00000000, 0x00000000,
482         0x00000000, 0x00000000, 0x00000000, 0x00000000,
483         0x00000000, 0x00000000, 0x00000000, 0x00000000,
484         0x00000000, 0x00000000, 0x00000000, 0x00000000,
485         0x00000000, 0x00000000, 0x00000000, 0x00000000,
486         0x00000000, 0x00000000, 0x00000000, 0x00000000,
487         0x00000000, 0x00000000, 0x00000000, 0x00000000,
488         0x00000000, 0x00000000, 0x00000000, 0x00000000,
489 };
490
491 /* Packet types for GTPC */
492 static const u32 ice_ptypes_gtpc[] = {
493         0x00000000, 0x00000000, 0x00000000, 0x00000000,
494         0x00000000, 0x00000000, 0x00000000, 0x00000000,
495         0x00000000, 0x00000000, 0x000001E0, 0x00000000,
496         0x00000000, 0x00000000, 0x00000000, 0x00000000,
497         0x00000000, 0x00000000, 0x00000000, 0x00000000,
498         0x00000000, 0x00000000, 0x00000000, 0x00000000,
499         0x00000000, 0x00000000, 0x00000000, 0x00000000,
500         0x00000000, 0x00000000, 0x00000000, 0x00000000,
501 };
502
503 /* Packet types for VXLAN with VNI */
504 static const u32 ice_ptypes_vxlan_vni[] = {
505         0x00000000, 0xBFBFF800, 0x00EFDFDF, 0xFEFDE000,
506         0x03BF7F7E, 0x00000000, 0x00000000, 0x00000000,
507         0x00000000, 0x00000000, 0x00000000, 0x00000000,
508         0x00000000, 0x00000000, 0x00000000, 0x00000000,
509         0x00000000, 0x00000000, 0x00000000, 0x00000000,
510         0x00000000, 0x00000000, 0x00000000, 0x00000000,
511         0x00000000, 0x00000000, 0x00000000, 0x00000000,
512         0x00000000, 0x00000000, 0x00000000, 0x00000000,
513 };
514
515 /* Packet types for GTPC with TEID */
516 static const u32 ice_ptypes_gtpc_tid[] = {
517         0x00000000, 0x00000000, 0x00000000, 0x00000000,
518         0x00000000, 0x00000000, 0x00000000, 0x00000000,
519         0x00000000, 0x00000000, 0x00000060, 0x00000000,
520         0x00000000, 0x00000000, 0x00000000, 0x00000000,
521         0x00000000, 0x00000000, 0x00000000, 0x00000000,
522         0x00000000, 0x00000000, 0x00000000, 0x00000000,
523         0x00000000, 0x00000000, 0x00000000, 0x00000000,
524         0x00000000, 0x00000000, 0x00000000, 0x00000000,
525 };
526
527 /* Packet types for GTPU */
528 static const struct ice_ptype_attributes ice_attr_gtpu_session[] = {
529         { ICE_MAC_IPV4_GTPU_IPV4_FRAG,    ICE_PTYPE_ATTR_GTP_SESSION },
530         { ICE_MAC_IPV4_GTPU_IPV4_PAY,     ICE_PTYPE_ATTR_GTP_SESSION },
531         { ICE_MAC_IPV4_GTPU_IPV4_UDP_PAY, ICE_PTYPE_ATTR_GTP_SESSION },
532         { ICE_MAC_IPV4_GTPU_IPV4_TCP,     ICE_PTYPE_ATTR_GTP_SESSION },
533         { ICE_MAC_IPV4_GTPU_IPV4_ICMP,    ICE_PTYPE_ATTR_GTP_SESSION },
534         { ICE_MAC_IPV6_GTPU_IPV4_FRAG,    ICE_PTYPE_ATTR_GTP_SESSION },
535         { ICE_MAC_IPV6_GTPU_IPV4_PAY,     ICE_PTYPE_ATTR_GTP_SESSION },
536         { ICE_MAC_IPV6_GTPU_IPV4_UDP_PAY, ICE_PTYPE_ATTR_GTP_SESSION },
537         { ICE_MAC_IPV6_GTPU_IPV4_TCP,     ICE_PTYPE_ATTR_GTP_SESSION },
538         { ICE_MAC_IPV6_GTPU_IPV4_ICMP,    ICE_PTYPE_ATTR_GTP_SESSION },
539         { ICE_MAC_IPV4_GTPU_IPV6_FRAG,    ICE_PTYPE_ATTR_GTP_SESSION },
540         { ICE_MAC_IPV4_GTPU_IPV6_PAY,     ICE_PTYPE_ATTR_GTP_SESSION },
541         { ICE_MAC_IPV4_GTPU_IPV6_UDP_PAY, ICE_PTYPE_ATTR_GTP_SESSION },
542         { ICE_MAC_IPV4_GTPU_IPV6_TCP,     ICE_PTYPE_ATTR_GTP_SESSION },
543         { ICE_MAC_IPV4_GTPU_IPV6_ICMPV6,  ICE_PTYPE_ATTR_GTP_SESSION },
544         { ICE_MAC_IPV6_GTPU_IPV6_FRAG,    ICE_PTYPE_ATTR_GTP_SESSION },
545         { ICE_MAC_IPV6_GTPU_IPV6_PAY,     ICE_PTYPE_ATTR_GTP_SESSION },
546         { ICE_MAC_IPV6_GTPU_IPV6_UDP_PAY, ICE_PTYPE_ATTR_GTP_SESSION },
547         { ICE_MAC_IPV6_GTPU_IPV6_TCP,     ICE_PTYPE_ATTR_GTP_SESSION },
548         { ICE_MAC_IPV6_GTPU_IPV6_ICMPV6,  ICE_PTYPE_ATTR_GTP_SESSION },
549         { MAC_IPV4_TUN_IPV4_GTPU_IPV4_FRAG,     ICE_PTYPE_ATTR_GTP_SESSION },
550         { MAC_IPV4_TUN_IPV4_GTPU_IPV4_PAY,      ICE_PTYPE_ATTR_GTP_SESSION },
551         { MAC_IPV4_TUN_IPV4_GTPU_IPV4_UDP_PAY,  ICE_PTYPE_ATTR_GTP_SESSION },
552         { MAC_IPV4_TUN_IPV4_GTPU_IPV4_TCP,      ICE_PTYPE_ATTR_GTP_SESSION },
553         { MAC_IPV4_TUN_IPV4_GTPU_IPV4_ICMP,     ICE_PTYPE_ATTR_GTP_SESSION },
554         { MAC_IPV4_TUN_IPV4_GTPU_IPV6_FRAG,     ICE_PTYPE_ATTR_GTP_SESSION },
555         { MAC_IPV4_TUN_IPV4_GTPU_IPV6_PAY,      ICE_PTYPE_ATTR_GTP_SESSION },
556         { MAC_IPV4_TUN_IPV4_GTPU_IPV6_UDP_PAY,  ICE_PTYPE_ATTR_GTP_SESSION },
557         { MAC_IPV4_TUN_IPV4_GTPU_IPV6_TCP,      ICE_PTYPE_ATTR_GTP_SESSION },
558         { MAC_IPV4_TUN_IPV4_GTPU_IPV6_ICMPV6,   ICE_PTYPE_ATTR_GTP_SESSION },
559         { MAC_IPV4_TUN_IPV6_GTPU_IPV4_FRAG,     ICE_PTYPE_ATTR_GTP_SESSION },
560         { MAC_IPV4_TUN_IPV6_GTPU_IPV4_PAY,      ICE_PTYPE_ATTR_GTP_SESSION },
561         { MAC_IPV4_TUN_IPV6_GTPU_IPV4_UDP_PAY,  ICE_PTYPE_ATTR_GTP_SESSION },
562         { MAC_IPV4_TUN_IPV6_GTPU_IPV4_TCP,      ICE_PTYPE_ATTR_GTP_SESSION },
563         { MAC_IPV4_TUN_IPV6_GTPU_IPV4_ICMP,     ICE_PTYPE_ATTR_GTP_SESSION },
564         { MAC_IPV4_TUN_IPV6_GTPU_IPV6_FRAG,     ICE_PTYPE_ATTR_GTP_SESSION },
565         { MAC_IPV4_TUN_IPV6_GTPU_IPV6_PAY,      ICE_PTYPE_ATTR_GTP_SESSION },
566         { MAC_IPV4_TUN_IPV6_GTPU_IPV6_UDP_PAY,  ICE_PTYPE_ATTR_GTP_SESSION },
567         { MAC_IPV4_TUN_IPV6_GTPU_IPV6_TCP,      ICE_PTYPE_ATTR_GTP_SESSION },
568         { MAC_IPV4_TUN_IPV6_GTPU_IPV6_ICMPV6,   ICE_PTYPE_ATTR_GTP_SESSION },
569         { MAC_IPV6_TUN_IPV4_GTPU_IPV4_FRAG,     ICE_PTYPE_ATTR_GTP_SESSION },
570         { MAC_IPV6_TUN_IPV4_GTPU_IPV4_PAY,      ICE_PTYPE_ATTR_GTP_SESSION },
571         { MAC_IPV6_TUN_IPV4_GTPU_IPV4_UDP_PAY,  ICE_PTYPE_ATTR_GTP_SESSION },
572         { MAC_IPV6_TUN_IPV4_GTPU_IPV4_TCP,      ICE_PTYPE_ATTR_GTP_SESSION },
573         { MAC_IPV6_TUN_IPV4_GTPU_IPV4_ICMP,     ICE_PTYPE_ATTR_GTP_SESSION },
574         { MAC_IPV6_TUN_IPV4_GTPU_IPV6_FRAG,     ICE_PTYPE_ATTR_GTP_SESSION },
575         { MAC_IPV6_TUN_IPV4_GTPU_IPV6_PAY,      ICE_PTYPE_ATTR_GTP_SESSION },
576         { MAC_IPV6_TUN_IPV4_GTPU_IPV6_UDP_PAY,  ICE_PTYPE_ATTR_GTP_SESSION },
577         { MAC_IPV6_TUN_IPV4_GTPU_IPV6_TCP,      ICE_PTYPE_ATTR_GTP_SESSION },
578         { MAC_IPV6_TUN_IPV4_GTPU_IPV6_ICMPV6,   ICE_PTYPE_ATTR_GTP_SESSION },
579         { MAC_IPV6_TUN_IPV6_GTPU_IPV4_FRAG,     ICE_PTYPE_ATTR_GTP_SESSION },
580         { MAC_IPV6_TUN_IPV6_GTPU_IPV4_PAY,      ICE_PTYPE_ATTR_GTP_SESSION },
581         { MAC_IPV6_TUN_IPV6_GTPU_IPV4_UDP_PAY,  ICE_PTYPE_ATTR_GTP_SESSION },
582         { MAC_IPV6_TUN_IPV6_GTPU_IPV4_TCP,      ICE_PTYPE_ATTR_GTP_SESSION },
583         { MAC_IPV6_TUN_IPV6_GTPU_IPV4_ICMP,     ICE_PTYPE_ATTR_GTP_SESSION },
584         { MAC_IPV6_TUN_IPV6_GTPU_IPV6_FRAG,     ICE_PTYPE_ATTR_GTP_SESSION },
585         { MAC_IPV6_TUN_IPV6_GTPU_IPV6_PAY,      ICE_PTYPE_ATTR_GTP_SESSION },
586         { MAC_IPV6_TUN_IPV6_GTPU_IPV6_UDP_PAY,  ICE_PTYPE_ATTR_GTP_SESSION },
587         { MAC_IPV6_TUN_IPV6_GTPU_IPV6_TCP,      ICE_PTYPE_ATTR_GTP_SESSION },
588         { MAC_IPV6_TUN_IPV6_GTPU_IPV6_ICMPV6,   ICE_PTYPE_ATTR_GTP_SESSION },
589 };
590
591 static const struct ice_ptype_attributes ice_attr_gtpu_eh[] = {
592         { ICE_MAC_IPV4_GTPU_IPV4_FRAG,    ICE_PTYPE_ATTR_GTP_PDU_EH },
593         { ICE_MAC_IPV4_GTPU_IPV4_PAY,     ICE_PTYPE_ATTR_GTP_PDU_EH },
594         { ICE_MAC_IPV4_GTPU_IPV4_UDP_PAY, ICE_PTYPE_ATTR_GTP_PDU_EH },
595         { ICE_MAC_IPV4_GTPU_IPV4_TCP,     ICE_PTYPE_ATTR_GTP_PDU_EH },
596         { ICE_MAC_IPV4_GTPU_IPV4_ICMP,    ICE_PTYPE_ATTR_GTP_PDU_EH },
597         { ICE_MAC_IPV6_GTPU_IPV4_FRAG,    ICE_PTYPE_ATTR_GTP_PDU_EH },
598         { ICE_MAC_IPV6_GTPU_IPV4_PAY,     ICE_PTYPE_ATTR_GTP_PDU_EH },
599         { ICE_MAC_IPV6_GTPU_IPV4_UDP_PAY, ICE_PTYPE_ATTR_GTP_PDU_EH },
600         { ICE_MAC_IPV6_GTPU_IPV4_TCP,     ICE_PTYPE_ATTR_GTP_PDU_EH },
601         { ICE_MAC_IPV6_GTPU_IPV4_ICMP,    ICE_PTYPE_ATTR_GTP_PDU_EH },
602         { ICE_MAC_IPV4_GTPU_IPV6_FRAG,    ICE_PTYPE_ATTR_GTP_PDU_EH },
603         { ICE_MAC_IPV4_GTPU_IPV6_PAY,     ICE_PTYPE_ATTR_GTP_PDU_EH },
604         { ICE_MAC_IPV4_GTPU_IPV6_UDP_PAY, ICE_PTYPE_ATTR_GTP_PDU_EH },
605         { ICE_MAC_IPV4_GTPU_IPV6_TCP,     ICE_PTYPE_ATTR_GTP_PDU_EH },
606         { ICE_MAC_IPV4_GTPU_IPV6_ICMPV6,  ICE_PTYPE_ATTR_GTP_PDU_EH },
607         { ICE_MAC_IPV6_GTPU_IPV6_FRAG,    ICE_PTYPE_ATTR_GTP_PDU_EH },
608         { ICE_MAC_IPV6_GTPU_IPV6_PAY,     ICE_PTYPE_ATTR_GTP_PDU_EH },
609         { ICE_MAC_IPV6_GTPU_IPV6_UDP_PAY, ICE_PTYPE_ATTR_GTP_PDU_EH },
610         { ICE_MAC_IPV6_GTPU_IPV6_TCP,     ICE_PTYPE_ATTR_GTP_PDU_EH },
611         { ICE_MAC_IPV6_GTPU_IPV6_ICMPV6,  ICE_PTYPE_ATTR_GTP_PDU_EH },
612         { MAC_IPV4_TUN_IPV4_GTPU_IPV4_FRAG,     ICE_PTYPE_ATTR_GTP_PDU_EH },
613         { MAC_IPV4_TUN_IPV4_GTPU_IPV4_PAY,      ICE_PTYPE_ATTR_GTP_PDU_EH },
614         { MAC_IPV4_TUN_IPV4_GTPU_IPV4_UDP_PAY,  ICE_PTYPE_ATTR_GTP_PDU_EH },
615         { MAC_IPV4_TUN_IPV4_GTPU_IPV4_TCP,      ICE_PTYPE_ATTR_GTP_PDU_EH },
616         { MAC_IPV4_TUN_IPV4_GTPU_IPV4_ICMP,     ICE_PTYPE_ATTR_GTP_PDU_EH },
617         { MAC_IPV4_TUN_IPV4_GTPU_IPV6_FRAG,     ICE_PTYPE_ATTR_GTP_PDU_EH },
618         { MAC_IPV4_TUN_IPV4_GTPU_IPV6_PAY,      ICE_PTYPE_ATTR_GTP_PDU_EH },
619         { MAC_IPV4_TUN_IPV4_GTPU_IPV6_UDP_PAY,  ICE_PTYPE_ATTR_GTP_PDU_EH },
620         { MAC_IPV4_TUN_IPV4_GTPU_IPV6_TCP,      ICE_PTYPE_ATTR_GTP_PDU_EH },
621         { MAC_IPV4_TUN_IPV4_GTPU_IPV6_ICMPV6,   ICE_PTYPE_ATTR_GTP_PDU_EH },
622         { MAC_IPV4_TUN_IPV6_GTPU_IPV4_FRAG,     ICE_PTYPE_ATTR_GTP_PDU_EH },
623         { MAC_IPV4_TUN_IPV6_GTPU_IPV4_PAY,      ICE_PTYPE_ATTR_GTP_PDU_EH },
624         { MAC_IPV4_TUN_IPV6_GTPU_IPV4_UDP_PAY,  ICE_PTYPE_ATTR_GTP_PDU_EH },
625         { MAC_IPV4_TUN_IPV6_GTPU_IPV4_TCP,      ICE_PTYPE_ATTR_GTP_PDU_EH },
626         { MAC_IPV4_TUN_IPV6_GTPU_IPV4_ICMP,     ICE_PTYPE_ATTR_GTP_PDU_EH },
627         { MAC_IPV4_TUN_IPV6_GTPU_IPV6_FRAG,     ICE_PTYPE_ATTR_GTP_PDU_EH },
628         { MAC_IPV4_TUN_IPV6_GTPU_IPV6_PAY,      ICE_PTYPE_ATTR_GTP_PDU_EH },
629         { MAC_IPV4_TUN_IPV6_GTPU_IPV6_UDP_PAY,  ICE_PTYPE_ATTR_GTP_PDU_EH },
630         { MAC_IPV4_TUN_IPV6_GTPU_IPV6_TCP,      ICE_PTYPE_ATTR_GTP_PDU_EH },
631         { MAC_IPV4_TUN_IPV6_GTPU_IPV6_ICMPV6,   ICE_PTYPE_ATTR_GTP_PDU_EH },
632         { MAC_IPV6_TUN_IPV4_GTPU_IPV4_FRAG,     ICE_PTYPE_ATTR_GTP_PDU_EH },
633         { MAC_IPV6_TUN_IPV4_GTPU_IPV4_PAY,      ICE_PTYPE_ATTR_GTP_PDU_EH },
634         { MAC_IPV6_TUN_IPV4_GTPU_IPV4_UDP_PAY,  ICE_PTYPE_ATTR_GTP_PDU_EH },
635         { MAC_IPV6_TUN_IPV4_GTPU_IPV4_TCP,      ICE_PTYPE_ATTR_GTP_PDU_EH },
636         { MAC_IPV6_TUN_IPV4_GTPU_IPV4_ICMP,     ICE_PTYPE_ATTR_GTP_PDU_EH },
637         { MAC_IPV6_TUN_IPV4_GTPU_IPV6_FRAG,     ICE_PTYPE_ATTR_GTP_PDU_EH },
638         { MAC_IPV6_TUN_IPV4_GTPU_IPV6_PAY,      ICE_PTYPE_ATTR_GTP_PDU_EH },
639         { MAC_IPV6_TUN_IPV4_GTPU_IPV6_UDP_PAY,  ICE_PTYPE_ATTR_GTP_PDU_EH },
640         { MAC_IPV6_TUN_IPV4_GTPU_IPV6_TCP,      ICE_PTYPE_ATTR_GTP_PDU_EH },
641         { MAC_IPV6_TUN_IPV4_GTPU_IPV6_ICMPV6,   ICE_PTYPE_ATTR_GTP_PDU_EH },
642         { MAC_IPV6_TUN_IPV6_GTPU_IPV4_FRAG,     ICE_PTYPE_ATTR_GTP_PDU_EH },
643         { MAC_IPV6_TUN_IPV6_GTPU_IPV4_PAY,      ICE_PTYPE_ATTR_GTP_PDU_EH },
644         { MAC_IPV6_TUN_IPV6_GTPU_IPV4_UDP_PAY,  ICE_PTYPE_ATTR_GTP_PDU_EH },
645         { MAC_IPV6_TUN_IPV6_GTPU_IPV4_TCP,      ICE_PTYPE_ATTR_GTP_PDU_EH },
646         { MAC_IPV6_TUN_IPV6_GTPU_IPV4_ICMP,     ICE_PTYPE_ATTR_GTP_PDU_EH },
647         { MAC_IPV6_TUN_IPV6_GTPU_IPV6_FRAG,     ICE_PTYPE_ATTR_GTP_PDU_EH },
648         { MAC_IPV6_TUN_IPV6_GTPU_IPV6_PAY,      ICE_PTYPE_ATTR_GTP_PDU_EH },
649         { MAC_IPV6_TUN_IPV6_GTPU_IPV6_UDP_PAY,  ICE_PTYPE_ATTR_GTP_PDU_EH },
650         { MAC_IPV6_TUN_IPV6_GTPU_IPV6_TCP,      ICE_PTYPE_ATTR_GTP_PDU_EH },
651         { MAC_IPV6_TUN_IPV6_GTPU_IPV6_ICMPV6,   ICE_PTYPE_ATTR_GTP_PDU_EH },
652 };
653
654 static const struct ice_ptype_attributes ice_attr_gtpu_down[] = {
655         { ICE_MAC_IPV4_GTPU_IPV4_FRAG,    ICE_PTYPE_ATTR_GTP_DOWNLINK },
656         { ICE_MAC_IPV4_GTPU_IPV4_PAY,     ICE_PTYPE_ATTR_GTP_DOWNLINK },
657         { ICE_MAC_IPV4_GTPU_IPV4_UDP_PAY, ICE_PTYPE_ATTR_GTP_DOWNLINK },
658         { ICE_MAC_IPV4_GTPU_IPV4_TCP,     ICE_PTYPE_ATTR_GTP_DOWNLINK },
659         { ICE_MAC_IPV4_GTPU_IPV4_ICMP,    ICE_PTYPE_ATTR_GTP_DOWNLINK },
660         { ICE_MAC_IPV6_GTPU_IPV4_FRAG,    ICE_PTYPE_ATTR_GTP_DOWNLINK },
661         { ICE_MAC_IPV6_GTPU_IPV4_PAY,     ICE_PTYPE_ATTR_GTP_DOWNLINK },
662         { ICE_MAC_IPV6_GTPU_IPV4_UDP_PAY, ICE_PTYPE_ATTR_GTP_DOWNLINK },
663         { ICE_MAC_IPV6_GTPU_IPV4_TCP,     ICE_PTYPE_ATTR_GTP_DOWNLINK },
664         { ICE_MAC_IPV6_GTPU_IPV4_ICMP,    ICE_PTYPE_ATTR_GTP_DOWNLINK },
665         { ICE_MAC_IPV4_GTPU_IPV6_FRAG,    ICE_PTYPE_ATTR_GTP_DOWNLINK },
666         { ICE_MAC_IPV4_GTPU_IPV6_PAY,     ICE_PTYPE_ATTR_GTP_DOWNLINK },
667         { ICE_MAC_IPV4_GTPU_IPV6_UDP_PAY, ICE_PTYPE_ATTR_GTP_DOWNLINK },
668         { ICE_MAC_IPV4_GTPU_IPV6_TCP,     ICE_PTYPE_ATTR_GTP_DOWNLINK },
669         { ICE_MAC_IPV4_GTPU_IPV6_ICMPV6,  ICE_PTYPE_ATTR_GTP_DOWNLINK },
670         { ICE_MAC_IPV6_GTPU_IPV6_FRAG,    ICE_PTYPE_ATTR_GTP_DOWNLINK },
671         { ICE_MAC_IPV6_GTPU_IPV6_PAY,     ICE_PTYPE_ATTR_GTP_DOWNLINK },
672         { ICE_MAC_IPV6_GTPU_IPV6_UDP_PAY, ICE_PTYPE_ATTR_GTP_DOWNLINK },
673         { ICE_MAC_IPV6_GTPU_IPV6_TCP,     ICE_PTYPE_ATTR_GTP_DOWNLINK },
674         { ICE_MAC_IPV6_GTPU_IPV6_ICMPV6,  ICE_PTYPE_ATTR_GTP_DOWNLINK },
675         { MAC_IPV4_TUN_IPV4_GTPU_IPV4_FRAG,     ICE_PTYPE_ATTR_GTP_DOWNLINK },
676         { MAC_IPV4_TUN_IPV4_GTPU_IPV4_PAY,      ICE_PTYPE_ATTR_GTP_DOWNLINK },
677         { MAC_IPV4_TUN_IPV4_GTPU_IPV4_UDP_PAY,  ICE_PTYPE_ATTR_GTP_DOWNLINK },
678         { MAC_IPV4_TUN_IPV4_GTPU_IPV4_TCP,      ICE_PTYPE_ATTR_GTP_DOWNLINK },
679         { MAC_IPV4_TUN_IPV4_GTPU_IPV4_ICMP,     ICE_PTYPE_ATTR_GTP_DOWNLINK },
680         { MAC_IPV4_TUN_IPV4_GTPU_IPV6_FRAG,     ICE_PTYPE_ATTR_GTP_DOWNLINK },
681         { MAC_IPV4_TUN_IPV4_GTPU_IPV6_PAY,      ICE_PTYPE_ATTR_GTP_DOWNLINK },
682         { MAC_IPV4_TUN_IPV4_GTPU_IPV6_UDP_PAY,  ICE_PTYPE_ATTR_GTP_DOWNLINK },
683         { MAC_IPV4_TUN_IPV4_GTPU_IPV6_TCP,      ICE_PTYPE_ATTR_GTP_DOWNLINK },
684         { MAC_IPV4_TUN_IPV4_GTPU_IPV6_ICMPV6,   ICE_PTYPE_ATTR_GTP_DOWNLINK },
685         { MAC_IPV4_TUN_IPV6_GTPU_IPV4_FRAG,     ICE_PTYPE_ATTR_GTP_DOWNLINK },
686         { MAC_IPV4_TUN_IPV6_GTPU_IPV4_PAY,      ICE_PTYPE_ATTR_GTP_DOWNLINK },
687         { MAC_IPV4_TUN_IPV6_GTPU_IPV4_UDP_PAY,  ICE_PTYPE_ATTR_GTP_DOWNLINK },
688         { MAC_IPV4_TUN_IPV6_GTPU_IPV4_TCP,      ICE_PTYPE_ATTR_GTP_DOWNLINK },
689         { MAC_IPV4_TUN_IPV6_GTPU_IPV4_ICMP,     ICE_PTYPE_ATTR_GTP_DOWNLINK },
690         { MAC_IPV4_TUN_IPV6_GTPU_IPV6_FRAG,     ICE_PTYPE_ATTR_GTP_DOWNLINK },
691         { MAC_IPV4_TUN_IPV6_GTPU_IPV6_PAY,      ICE_PTYPE_ATTR_GTP_DOWNLINK },
692         { MAC_IPV4_TUN_IPV6_GTPU_IPV6_UDP_PAY,  ICE_PTYPE_ATTR_GTP_DOWNLINK },
693         { MAC_IPV4_TUN_IPV6_GTPU_IPV6_TCP,      ICE_PTYPE_ATTR_GTP_DOWNLINK },
694         { MAC_IPV4_TUN_IPV6_GTPU_IPV6_ICMPV6,   ICE_PTYPE_ATTR_GTP_DOWNLINK },
695         { MAC_IPV6_TUN_IPV4_GTPU_IPV4_FRAG,     ICE_PTYPE_ATTR_GTP_DOWNLINK },
696         { MAC_IPV6_TUN_IPV4_GTPU_IPV4_PAY,      ICE_PTYPE_ATTR_GTP_DOWNLINK },
697         { MAC_IPV6_TUN_IPV4_GTPU_IPV4_UDP_PAY,  ICE_PTYPE_ATTR_GTP_DOWNLINK },
698         { MAC_IPV6_TUN_IPV4_GTPU_IPV4_TCP,      ICE_PTYPE_ATTR_GTP_DOWNLINK },
699         { MAC_IPV6_TUN_IPV4_GTPU_IPV4_ICMP,     ICE_PTYPE_ATTR_GTP_DOWNLINK },
700         { MAC_IPV6_TUN_IPV4_GTPU_IPV6_FRAG,     ICE_PTYPE_ATTR_GTP_DOWNLINK },
701         { MAC_IPV6_TUN_IPV4_GTPU_IPV6_PAY,      ICE_PTYPE_ATTR_GTP_DOWNLINK },
702         { MAC_IPV6_TUN_IPV4_GTPU_IPV6_UDP_PAY,  ICE_PTYPE_ATTR_GTP_DOWNLINK },
703         { MAC_IPV6_TUN_IPV4_GTPU_IPV6_TCP,      ICE_PTYPE_ATTR_GTP_DOWNLINK },
704         { MAC_IPV6_TUN_IPV4_GTPU_IPV6_ICMPV6,   ICE_PTYPE_ATTR_GTP_DOWNLINK },
705         { MAC_IPV6_TUN_IPV6_GTPU_IPV4_FRAG,     ICE_PTYPE_ATTR_GTP_DOWNLINK },
706         { MAC_IPV6_TUN_IPV6_GTPU_IPV4_PAY,      ICE_PTYPE_ATTR_GTP_DOWNLINK },
707         { MAC_IPV6_TUN_IPV6_GTPU_IPV4_UDP_PAY,  ICE_PTYPE_ATTR_GTP_DOWNLINK },
708         { MAC_IPV6_TUN_IPV6_GTPU_IPV4_TCP,      ICE_PTYPE_ATTR_GTP_DOWNLINK },
709         { MAC_IPV6_TUN_IPV6_GTPU_IPV4_ICMP,     ICE_PTYPE_ATTR_GTP_DOWNLINK },
710         { MAC_IPV6_TUN_IPV6_GTPU_IPV6_FRAG,     ICE_PTYPE_ATTR_GTP_DOWNLINK },
711         { MAC_IPV6_TUN_IPV6_GTPU_IPV6_PAY,      ICE_PTYPE_ATTR_GTP_DOWNLINK },
712         { MAC_IPV6_TUN_IPV6_GTPU_IPV6_UDP_PAY,  ICE_PTYPE_ATTR_GTP_DOWNLINK },
713         { MAC_IPV6_TUN_IPV6_GTPU_IPV6_TCP,      ICE_PTYPE_ATTR_GTP_DOWNLINK },
714         { MAC_IPV6_TUN_IPV6_GTPU_IPV6_ICMPV6,   ICE_PTYPE_ATTR_GTP_DOWNLINK },
715 };
716
717 static const struct ice_ptype_attributes ice_attr_gtpu_up[] = {
718         { ICE_MAC_IPV4_GTPU_IPV4_FRAG,    ICE_PTYPE_ATTR_GTP_UPLINK },
719         { ICE_MAC_IPV4_GTPU_IPV4_PAY,     ICE_PTYPE_ATTR_GTP_UPLINK },
720         { ICE_MAC_IPV4_GTPU_IPV4_UDP_PAY, ICE_PTYPE_ATTR_GTP_UPLINK },
721         { ICE_MAC_IPV4_GTPU_IPV4_TCP,     ICE_PTYPE_ATTR_GTP_UPLINK },
722         { ICE_MAC_IPV4_GTPU_IPV4_ICMP,    ICE_PTYPE_ATTR_GTP_UPLINK },
723         { ICE_MAC_IPV6_GTPU_IPV4_FRAG,    ICE_PTYPE_ATTR_GTP_UPLINK },
724         { ICE_MAC_IPV6_GTPU_IPV4_PAY,     ICE_PTYPE_ATTR_GTP_UPLINK },
725         { ICE_MAC_IPV6_GTPU_IPV4_UDP_PAY, ICE_PTYPE_ATTR_GTP_UPLINK },
726         { ICE_MAC_IPV6_GTPU_IPV4_TCP,     ICE_PTYPE_ATTR_GTP_UPLINK },
727         { ICE_MAC_IPV6_GTPU_IPV4_ICMP,    ICE_PTYPE_ATTR_GTP_UPLINK },
728         { ICE_MAC_IPV4_GTPU_IPV6_FRAG,    ICE_PTYPE_ATTR_GTP_UPLINK },
729         { ICE_MAC_IPV4_GTPU_IPV6_PAY,     ICE_PTYPE_ATTR_GTP_UPLINK },
730         { ICE_MAC_IPV4_GTPU_IPV6_UDP_PAY, ICE_PTYPE_ATTR_GTP_UPLINK },
731         { ICE_MAC_IPV4_GTPU_IPV6_TCP,     ICE_PTYPE_ATTR_GTP_UPLINK },
732         { ICE_MAC_IPV4_GTPU_IPV6_ICMPV6,  ICE_PTYPE_ATTR_GTP_UPLINK },
733         { ICE_MAC_IPV6_GTPU_IPV6_FRAG,    ICE_PTYPE_ATTR_GTP_UPLINK },
734         { ICE_MAC_IPV6_GTPU_IPV6_PAY,     ICE_PTYPE_ATTR_GTP_UPLINK },
735         { ICE_MAC_IPV6_GTPU_IPV6_UDP_PAY, ICE_PTYPE_ATTR_GTP_UPLINK },
736         { ICE_MAC_IPV6_GTPU_IPV6_TCP,     ICE_PTYPE_ATTR_GTP_UPLINK },
737         { ICE_MAC_IPV6_GTPU_IPV6_ICMPV6,  ICE_PTYPE_ATTR_GTP_UPLINK },
738         { MAC_IPV4_TUN_IPV4_GTPU_IPV4_FRAG,     ICE_PTYPE_ATTR_GTP_UPLINK },
739         { MAC_IPV4_TUN_IPV4_GTPU_IPV4_PAY,      ICE_PTYPE_ATTR_GTP_UPLINK },
740         { MAC_IPV4_TUN_IPV4_GTPU_IPV4_UDP_PAY,  ICE_PTYPE_ATTR_GTP_UPLINK },
741         { MAC_IPV4_TUN_IPV4_GTPU_IPV4_TCP,      ICE_PTYPE_ATTR_GTP_UPLINK },
742         { MAC_IPV4_TUN_IPV4_GTPU_IPV4_ICMP,     ICE_PTYPE_ATTR_GTP_UPLINK },
743         { MAC_IPV4_TUN_IPV4_GTPU_IPV6_FRAG,     ICE_PTYPE_ATTR_GTP_UPLINK },
744         { MAC_IPV4_TUN_IPV4_GTPU_IPV6_PAY,      ICE_PTYPE_ATTR_GTP_UPLINK },
745         { MAC_IPV4_TUN_IPV4_GTPU_IPV6_UDP_PAY,  ICE_PTYPE_ATTR_GTP_UPLINK },
746         { MAC_IPV4_TUN_IPV4_GTPU_IPV6_TCP,      ICE_PTYPE_ATTR_GTP_UPLINK },
747         { MAC_IPV4_TUN_IPV4_GTPU_IPV6_ICMPV6,   ICE_PTYPE_ATTR_GTP_UPLINK },
748         { MAC_IPV4_TUN_IPV6_GTPU_IPV4_FRAG,     ICE_PTYPE_ATTR_GTP_UPLINK },
749         { MAC_IPV4_TUN_IPV6_GTPU_IPV4_PAY,      ICE_PTYPE_ATTR_GTP_UPLINK },
750         { MAC_IPV4_TUN_IPV6_GTPU_IPV4_UDP_PAY,  ICE_PTYPE_ATTR_GTP_UPLINK },
751         { MAC_IPV4_TUN_IPV6_GTPU_IPV4_TCP,      ICE_PTYPE_ATTR_GTP_UPLINK },
752         { MAC_IPV4_TUN_IPV6_GTPU_IPV4_ICMP,     ICE_PTYPE_ATTR_GTP_UPLINK },
753         { MAC_IPV4_TUN_IPV6_GTPU_IPV6_FRAG,     ICE_PTYPE_ATTR_GTP_UPLINK },
754         { MAC_IPV4_TUN_IPV6_GTPU_IPV6_PAY,      ICE_PTYPE_ATTR_GTP_UPLINK },
755         { MAC_IPV4_TUN_IPV6_GTPU_IPV6_UDP_PAY,  ICE_PTYPE_ATTR_GTP_UPLINK },
756         { MAC_IPV4_TUN_IPV6_GTPU_IPV6_TCP,      ICE_PTYPE_ATTR_GTP_UPLINK },
757         { MAC_IPV4_TUN_IPV6_GTPU_IPV6_ICMPV6,   ICE_PTYPE_ATTR_GTP_UPLINK },
758         { MAC_IPV6_TUN_IPV4_GTPU_IPV4_FRAG,     ICE_PTYPE_ATTR_GTP_UPLINK },
759         { MAC_IPV6_TUN_IPV4_GTPU_IPV4_PAY,      ICE_PTYPE_ATTR_GTP_UPLINK },
760         { MAC_IPV6_TUN_IPV4_GTPU_IPV4_UDP_PAY,  ICE_PTYPE_ATTR_GTP_UPLINK },
761         { MAC_IPV6_TUN_IPV4_GTPU_IPV4_TCP,      ICE_PTYPE_ATTR_GTP_UPLINK },
762         { MAC_IPV6_TUN_IPV4_GTPU_IPV4_ICMP,     ICE_PTYPE_ATTR_GTP_UPLINK },
763         { MAC_IPV6_TUN_IPV4_GTPU_IPV6_FRAG,     ICE_PTYPE_ATTR_GTP_UPLINK },
764         { MAC_IPV6_TUN_IPV4_GTPU_IPV6_PAY,      ICE_PTYPE_ATTR_GTP_UPLINK },
765         { MAC_IPV6_TUN_IPV4_GTPU_IPV6_UDP_PAY,  ICE_PTYPE_ATTR_GTP_UPLINK },
766         { MAC_IPV6_TUN_IPV4_GTPU_IPV6_TCP,      ICE_PTYPE_ATTR_GTP_UPLINK },
767         { MAC_IPV6_TUN_IPV4_GTPU_IPV6_ICMPV6,   ICE_PTYPE_ATTR_GTP_UPLINK },
768         { MAC_IPV6_TUN_IPV6_GTPU_IPV4_FRAG,     ICE_PTYPE_ATTR_GTP_UPLINK },
769         { MAC_IPV6_TUN_IPV6_GTPU_IPV4_PAY,      ICE_PTYPE_ATTR_GTP_UPLINK },
770         { MAC_IPV6_TUN_IPV6_GTPU_IPV4_UDP_PAY,  ICE_PTYPE_ATTR_GTP_UPLINK },
771         { MAC_IPV6_TUN_IPV6_GTPU_IPV4_TCP,      ICE_PTYPE_ATTR_GTP_UPLINK },
772         { MAC_IPV6_TUN_IPV6_GTPU_IPV4_ICMP,     ICE_PTYPE_ATTR_GTP_UPLINK },
773         { MAC_IPV6_TUN_IPV6_GTPU_IPV6_FRAG,     ICE_PTYPE_ATTR_GTP_UPLINK },
774         { MAC_IPV6_TUN_IPV6_GTPU_IPV6_PAY,      ICE_PTYPE_ATTR_GTP_UPLINK },
775         { MAC_IPV6_TUN_IPV6_GTPU_IPV6_UDP_PAY,  ICE_PTYPE_ATTR_GTP_UPLINK },
776         { MAC_IPV6_TUN_IPV6_GTPU_IPV6_TCP,      ICE_PTYPE_ATTR_GTP_UPLINK },
777         { MAC_IPV6_TUN_IPV6_GTPU_IPV6_ICMPV6,   ICE_PTYPE_ATTR_GTP_UPLINK },
778 };
779
780 static const u32 ice_ptypes_gtpu[] = {
781         0x00000000, 0x00000000, 0x00000000, 0x00000000,
782         0x00000000, 0x00000000, 0x00000000, 0x00000000,
783         0x00000000, 0x00000000, 0x7FFFFE00, 0x00000000,
784         0x00000000, 0x0000003F, 0xBEFBEFBC, 0x0002FBEF,
785         0x00000000, 0x00000000, 0x00000000, 0x00000000,
786         0x00000000, 0x00000000, 0x00000000, 0x00000000,
787         0x00000000, 0x00000000, 0x00000000, 0x00000000,
788         0x00000000, 0x00000000, 0x00000000, 0x00000000,
789 };
790
791 /* Packet types for pppoe */
792 static const u32 ice_ptypes_pppoe[] = {
793         0x00000000, 0x00000000, 0x00000000, 0x00000000,
794         0x00000000, 0x00000000, 0x00000000, 0x00000000,
795         0x00000000, 0x03ffe000, 0x00000000, 0x00000000,
796         0x00000000, 0x00000000, 0x00000000, 0x00000000,
797         0x00000000, 0x00000000, 0x00000000, 0x00000000,
798         0x00000000, 0x00000000, 0x00000000, 0x00000000,
799         0x00000000, 0x00000000, 0x00000000, 0x00000000,
800         0x00000000, 0x00000000, 0x00000000, 0x00000000,
801 };
802
803 /* Packet types for packets with PFCP NODE header */
804 static const u32 ice_ptypes_pfcp_node[] = {
805         0x00000000, 0x00000000, 0x00000000, 0x00000000,
806         0x00000000, 0x00000000, 0x00000000, 0x00000000,
807         0x00000000, 0x00000000, 0x80000000, 0x00000002,
808         0x00000000, 0x00000000, 0x00000000, 0x00000000,
809         0x00000000, 0x00000000, 0x00000000, 0x00000000,
810         0x00000000, 0x00000000, 0x00000000, 0x00000000,
811         0x00000000, 0x00000000, 0x00000000, 0x00000000,
812         0x00000000, 0x00000000, 0x00000000, 0x00000000,
813 };
814
815 /* Packet types for packets with PFCP SESSION header */
816 static const u32 ice_ptypes_pfcp_session[] = {
817         0x00000000, 0x00000000, 0x00000000, 0x00000000,
818         0x00000000, 0x00000000, 0x00000000, 0x00000000,
819         0x00000000, 0x00000000, 0x00000000, 0x00000005,
820         0x00000000, 0x00000000, 0x00000000, 0x00000000,
821         0x00000000, 0x00000000, 0x00000000, 0x00000000,
822         0x00000000, 0x00000000, 0x00000000, 0x00000000,
823         0x00000000, 0x00000000, 0x00000000, 0x00000000,
824         0x00000000, 0x00000000, 0x00000000, 0x00000000,
825 };
826
827 /* Packet types for l2tpv3 */
828 static const u32 ice_ptypes_l2tpv3[] = {
829         0x00000000, 0x00000000, 0x00000000, 0x00000000,
830         0x00000000, 0x00000000, 0x00000000, 0x00000000,
831         0x00000000, 0x00000000, 0x00000000, 0x00000300,
832         0x00000000, 0x00000000, 0x00000000, 0x00000000,
833         0x00000000, 0x00000000, 0x00000000, 0x00000000,
834         0x00000000, 0x00000000, 0x00000000, 0x00000000,
835         0x00000000, 0x00000000, 0x00000000, 0x00000000,
836         0x00000000, 0x00000000, 0x00000000, 0x00000000,
837 };
838
839 /* Packet types for esp */
840 static const u32 ice_ptypes_esp[] = {
841         0x00000000, 0x00000000, 0x00000000, 0x00000000,
842         0x00000000, 0x00000003, 0x00000000, 0x00000000,
843         0x00000000, 0x00000000, 0x00000000, 0x00000000,
844         0x00000000, 0x00000000, 0x00000000, 0x00000000,
845         0x00000000, 0x00000000, 0x00000000, 0x00000000,
846         0x00000000, 0x00000000, 0x00000000, 0x00000000,
847         0x00000000, 0x00000000, 0x00000000, 0x00000000,
848         0x00000000, 0x00000000, 0x00000000, 0x00000000,
849 };
850
851 /* Packet types for ah */
852 static const u32 ice_ptypes_ah[] = {
853         0x00000000, 0x00000000, 0x00000000, 0x00000000,
854         0x00000000, 0x0000000C, 0x00000000, 0x00000000,
855         0x00000000, 0x00000000, 0x00000000, 0x00000000,
856         0x00000000, 0x00000000, 0x00000000, 0x00000000,
857         0x00000000, 0x00000000, 0x00000000, 0x00000000,
858         0x00000000, 0x00000000, 0x00000000, 0x00000000,
859         0x00000000, 0x00000000, 0x00000000, 0x00000000,
860         0x00000000, 0x00000000, 0x00000000, 0x00000000,
861 };
862
863 /* Packet types for packets with NAT_T ESP header */
864 static const u32 ice_ptypes_nat_t_esp[] = {
865         0x00000000, 0x00000000, 0x00000000, 0x00000000,
866         0x00000000, 0x00000030, 0x00000000, 0x00000000,
867         0x00000000, 0x00000000, 0x00000000, 0x00000000,
868         0x00000000, 0x00000000, 0x00000000, 0x00000000,
869         0x00000000, 0x00000000, 0x00000000, 0x00000000,
870         0x00000000, 0x00000000, 0x00000000, 0x00000000,
871         0x00000000, 0x00000000, 0x00000000, 0x00000000,
872         0x00000000, 0x00000000, 0x00000000, 0x00000000,
873 };
874
875 static const u32 ice_ptypes_mac_non_ip_ofos[] = {
876         0x00000846, 0x00000000, 0x00000000, 0x00000000,
877         0x00000000, 0x00000000, 0x00000000, 0x00000000,
878         0x00400000, 0x03FFF000, 0x00000000, 0x00000000,
879         0x00000000, 0x00000000, 0x00000000, 0x00000000,
880         0x00000000, 0x00000000, 0x00000000, 0x00000000,
881         0x00000000, 0x00000000, 0x00000000, 0x00000000,
882         0x00000000, 0x00000000, 0x00000000, 0x00000000,
883         0x00000000, 0x00000000, 0x00000000, 0x00000000,
884 };
885
886 static const u32 ice_ptypes_gtpu_no_ip[] = {
887         0x00000000, 0x00000000, 0x00000000, 0x00000000,
888         0x00000000, 0x00000000, 0x00000000, 0x00000000,
889         0x00000000, 0x00000000, 0x00000600, 0x00000000,
890         0x00000000, 0x00000000, 0x00000000, 0x00000000,
891         0x00000000, 0x00000000, 0x00000000, 0x00000000,
892         0x00000000, 0x00000000, 0x00000000, 0x00000000,
893         0x00000000, 0x00000000, 0x00000000, 0x00000000,
894         0x00000000, 0x00000000, 0x00000000, 0x00000000,
895 };
896
897 static const u32 ice_ptypes_ecpri_tp0[] = {
898         0x00000000, 0x00000000, 0x00000000, 0x00000000,
899         0x00000000, 0x00000000, 0x00000000, 0x00000000,
900         0x00000000, 0x00000000, 0x00000000, 0x00000400,
901         0x00000000, 0x00000000, 0x00000000, 0x00000000,
902         0x00000000, 0x00000000, 0x00000000, 0x00000000,
903         0x00000000, 0x00000000, 0x00000000, 0x00000000,
904         0x00000000, 0x00000000, 0x00000000, 0x00000000,
905         0x00000000, 0x00000000, 0x00000000, 0x00000000,
906 };
907
908 static const u32 ice_ptypes_udp_ecpri_tp0[] = {
909         0x00000000, 0x00000000, 0x00000000, 0x00000000,
910         0x00000000, 0x00000000, 0x00000000, 0x00000000,
911         0x00000000, 0x00000000, 0x00000000, 0x00100000,
912         0x00000000, 0x00000000, 0x00000000, 0x00000000,
913         0x00000000, 0x00000000, 0x00000000, 0x00000000,
914         0x00000000, 0x00000000, 0x00000000, 0x00000000,
915         0x00000000, 0x00000000, 0x00000000, 0x00000000,
916         0x00000000, 0x00000000, 0x00000000, 0x00000000,
917 };
918
919 static const u32 ice_ptypes_l2tpv2[] = {
920         0x00000000, 0x00000000, 0x00000000, 0x00000000,
921         0x00000000, 0x00000000, 0x00000000, 0x00000000,
922         0x00000000, 0x00000000, 0x00000000, 0x00000000,
923         0xFFFFFF00, 0x0000003F, 0x00000000, 0x00000000,
924         0x00000000, 0x00000000, 0x00000000, 0x00000000,
925         0x00000000, 0x00000000, 0x00000000, 0x00000000,
926         0x00000000, 0x00000000, 0x00000000, 0x00000000,
927         0x00000000, 0x00000000, 0x00000000, 0x00000000,
928 };
929
930 static const u32 ice_ptypes_ppp[] = {
931         0x00000000, 0x00000000, 0x00000000, 0x00000000,
932         0x00000000, 0x00000000, 0x00000000, 0x00000000,
933         0x00000000, 0x00000000, 0x00000000, 0x00000000,
934         0xFFFFF000, 0x0000003F, 0x00000000, 0x00000000,
935         0x00000000, 0x00000000, 0x00000000, 0x00000000,
936         0x00000000, 0x00000000, 0x00000000, 0x00000000,
937         0x00000000, 0x00000000, 0x00000000, 0x00000000,
938         0x00000000, 0x00000000, 0x00000000, 0x00000000,
939 };
940
941 static const u32 ice_ptypes_ipv4_frag[] = {
942         0x00400000, 0x00000000, 0x00000000, 0x00000000,
943         0x00000000, 0x00000000, 0x00000000, 0x00000000,
944         0x00000000, 0x00000000, 0x00000000, 0x00000000,
945         0x00000000, 0x00000000, 0x00000000, 0x00000000,
946         0x00000000, 0x00000000, 0x00000000, 0x00000000,
947         0x00000000, 0x00000000, 0x00000000, 0x00000000,
948         0x00000000, 0x00000000, 0x00000000, 0x00000000,
949         0x00000000, 0x00000000, 0x00000000, 0x00000000,
950 };
951
952 static const u32 ice_ptypes_ipv6_frag[] = {
953         0x00000000, 0x00000000, 0x01000000, 0x00000000,
954         0x00000000, 0x00000000, 0x00000000, 0x00000000,
955         0x00000000, 0x00000000, 0x00000000, 0x00000000,
956         0x00000000, 0x00000000, 0x00000000, 0x00000000,
957         0x00000000, 0x00000000, 0x00000000, 0x00000000,
958         0x00000000, 0x00000000, 0x00000000, 0x00000000,
959         0x00000000, 0x00000000, 0x00000000, 0x00000000,
960         0x00000000, 0x00000000, 0x00000000, 0x00000000,
961 };
962
963 /* Manage parameters and info. used during the creation of a flow profile */
964 struct ice_flow_prof_params {
965         enum ice_block blk;
966         u16 entry_length; /* # of bytes formatted entry will require */
967         u8 es_cnt;
968         struct ice_flow_prof *prof;
969
970         /* For ACL, the es[0] will have the data of ICE_RX_MDID_PKT_FLAGS_15_0
971          * This will give us the direction flags.
972          */
973         struct ice_fv_word es[ICE_MAX_FV_WORDS];
974         /* attributes can be used to add attributes to a particular PTYPE */
975         const struct ice_ptype_attributes *attr;
976         u16 attr_cnt;
977
978         u16 mask[ICE_MAX_FV_WORDS];
979         ice_declare_bitmap(ptypes, ICE_FLOW_PTYPE_MAX);
980 };
981
982 #define ICE_FLOW_RSS_HDRS_INNER_MASK \
983         (ICE_FLOW_SEG_HDR_PPPOE | ICE_FLOW_SEG_HDR_GTPC | \
984         ICE_FLOW_SEG_HDR_GTPC_TEID | ICE_FLOW_SEG_HDR_GTPU | \
985         ICE_FLOW_SEG_HDR_PFCP_SESSION | ICE_FLOW_SEG_HDR_L2TPV3 | \
986         ICE_FLOW_SEG_HDR_ESP | ICE_FLOW_SEG_HDR_AH | \
987         ICE_FLOW_SEG_HDR_NAT_T_ESP | ICE_FLOW_SEG_HDR_GTPU_NON_IP | \
988         ICE_FLOW_SEG_HDR_ECPRI_TP0 | ICE_FLOW_SEG_HDR_UDP_ECPRI_TP0 | \
989         ICE_FLOW_SEG_HDR_L2TPV2 | ICE_FLOW_SEG_HDR_PPP | ICE_FLOW_SEG_HDR_GRE)
990
991 #define ICE_FLOW_SEG_HDRS_L2_MASK       \
992         (ICE_FLOW_SEG_HDR_ETH | ICE_FLOW_SEG_HDR_VLAN)
993 #define ICE_FLOW_SEG_HDRS_L3_MASK       \
994         (ICE_FLOW_SEG_HDR_IPV4 | ICE_FLOW_SEG_HDR_IPV6 | \
995          ICE_FLOW_SEG_HDR_ARP)
996 #define ICE_FLOW_SEG_HDRS_L4_MASK       \
997         (ICE_FLOW_SEG_HDR_ICMP | ICE_FLOW_SEG_HDR_TCP | ICE_FLOW_SEG_HDR_UDP | \
998          ICE_FLOW_SEG_HDR_SCTP)
999 /* mask for L4 protocols that are NOT part of IPV4/6 OTHER PTYPE groups */
1000 #define ICE_FLOW_SEG_HDRS_L4_MASK_NO_OTHER      \
1001         (ICE_FLOW_SEG_HDR_TCP | ICE_FLOW_SEG_HDR_UDP | ICE_FLOW_SEG_HDR_SCTP)
1002
1003 /**
1004  * ice_flow_val_hdrs - validates packet segments for valid protocol headers
1005  * @segs: array of one or more packet segments that describe the flow
1006  * @segs_cnt: number of packet segments provided
1007  */
1008 static enum ice_status
1009 ice_flow_val_hdrs(struct ice_flow_seg_info *segs, u8 segs_cnt)
1010 {
1011         u8 i;
1012
1013         for (i = 0; i < segs_cnt; i++) {
1014                 /* Multiple L3 headers */
1015                 if (segs[i].hdrs & ICE_FLOW_SEG_HDRS_L3_MASK &&
1016                     !ice_is_pow2(segs[i].hdrs & ICE_FLOW_SEG_HDRS_L3_MASK))
1017                         return ICE_ERR_PARAM;
1018
1019                 /* Multiple L4 headers */
1020                 if (segs[i].hdrs & ICE_FLOW_SEG_HDRS_L4_MASK &&
1021                     !ice_is_pow2(segs[i].hdrs & ICE_FLOW_SEG_HDRS_L4_MASK))
1022                         return ICE_ERR_PARAM;
1023         }
1024
1025         return ICE_SUCCESS;
1026 }
1027
1028 /* Sizes of fixed known protocol headers without header options */
1029 #define ICE_FLOW_PROT_HDR_SZ_MAC        14
1030 #define ICE_FLOW_PROT_HDR_SZ_MAC_VLAN   (ICE_FLOW_PROT_HDR_SZ_MAC + 2)
1031 #define ICE_FLOW_PROT_HDR_SZ_IPV4       20
1032 #define ICE_FLOW_PROT_HDR_SZ_IPV6       40
1033 #define ICE_FLOW_PROT_HDR_SZ_ARP        28
1034 #define ICE_FLOW_PROT_HDR_SZ_ICMP       8
1035 #define ICE_FLOW_PROT_HDR_SZ_TCP        20
1036 #define ICE_FLOW_PROT_HDR_SZ_UDP        8
1037 #define ICE_FLOW_PROT_HDR_SZ_SCTP       12
1038
1039 /**
1040  * ice_flow_calc_seg_sz - calculates size of a packet segment based on headers
1041  * @params: information about the flow to be processed
1042  * @seg: index of packet segment whose header size is to be determined
1043  */
1044 static u16 ice_flow_calc_seg_sz(struct ice_flow_prof_params *params, u8 seg)
1045 {
1046         u16 sz;
1047
1048         /* L2 headers */
1049         sz = (params->prof->segs[seg].hdrs & ICE_FLOW_SEG_HDR_VLAN) ?
1050                 ICE_FLOW_PROT_HDR_SZ_MAC_VLAN : ICE_FLOW_PROT_HDR_SZ_MAC;
1051
1052         /* L3 headers */
1053         if (params->prof->segs[seg].hdrs & ICE_FLOW_SEG_HDR_IPV4)
1054                 sz += ICE_FLOW_PROT_HDR_SZ_IPV4;
1055         else if (params->prof->segs[seg].hdrs & ICE_FLOW_SEG_HDR_IPV6)
1056                 sz += ICE_FLOW_PROT_HDR_SZ_IPV6;
1057         else if (params->prof->segs[seg].hdrs & ICE_FLOW_SEG_HDR_ARP)
1058                 sz += ICE_FLOW_PROT_HDR_SZ_ARP;
1059         else if (params->prof->segs[seg].hdrs & ICE_FLOW_SEG_HDRS_L4_MASK)
1060                 /* A L3 header is required if L4 is specified */
1061                 return 0;
1062
1063         /* L4 headers */
1064         if (params->prof->segs[seg].hdrs & ICE_FLOW_SEG_HDR_ICMP)
1065                 sz += ICE_FLOW_PROT_HDR_SZ_ICMP;
1066         else if (params->prof->segs[seg].hdrs & ICE_FLOW_SEG_HDR_TCP)
1067                 sz += ICE_FLOW_PROT_HDR_SZ_TCP;
1068         else if (params->prof->segs[seg].hdrs & ICE_FLOW_SEG_HDR_UDP)
1069                 sz += ICE_FLOW_PROT_HDR_SZ_UDP;
1070         else if (params->prof->segs[seg].hdrs & ICE_FLOW_SEG_HDR_SCTP)
1071                 sz += ICE_FLOW_PROT_HDR_SZ_SCTP;
1072
1073         return sz;
1074 }
1075
1076 /**
1077  * ice_flow_proc_seg_hdrs - process protocol headers present in pkt segments
1078  * @params: information about the flow to be processed
1079  *
1080  * This function identifies the packet types associated with the protocol
1081  * headers being present in packet segments of the specified flow profile.
1082  */
1083 static enum ice_status
1084 ice_flow_proc_seg_hdrs(struct ice_flow_prof_params *params)
1085 {
1086         struct ice_flow_prof *prof;
1087         u8 i;
1088
1089         ice_memset(params->ptypes, 0xff, sizeof(params->ptypes),
1090                    ICE_NONDMA_MEM);
1091
1092         prof = params->prof;
1093
1094         for (i = 0; i < params->prof->segs_cnt; i++) {
1095                 const ice_bitmap_t *src;
1096                 u32 hdrs;
1097
1098                 hdrs = prof->segs[i].hdrs;
1099
1100                 if (hdrs & ICE_FLOW_SEG_HDR_ETH) {
1101                         src = !i ? (const ice_bitmap_t *)ice_ptypes_mac_ofos :
1102                                 (const ice_bitmap_t *)ice_ptypes_mac_il;
1103                         ice_and_bitmap(params->ptypes, params->ptypes, src,
1104                                        ICE_FLOW_PTYPE_MAX);
1105                 }
1106
1107                 if (i && hdrs & ICE_FLOW_SEG_HDR_VLAN) {
1108                         src = (const ice_bitmap_t *)ice_ptypes_macvlan_il;
1109                         ice_and_bitmap(params->ptypes, params->ptypes, src,
1110                                        ICE_FLOW_PTYPE_MAX);
1111                 }
1112
1113                 if (!i && hdrs & ICE_FLOW_SEG_HDR_ARP) {
1114                         ice_and_bitmap(params->ptypes, params->ptypes,
1115                                        (const ice_bitmap_t *)ice_ptypes_arp_of,
1116                                        ICE_FLOW_PTYPE_MAX);
1117                 }
1118
1119                 if (hdrs & ICE_FLOW_SEG_HDR_ECPRI_TP0) {
1120                         src = (const ice_bitmap_t *)ice_ptypes_ecpri_tp0;
1121                         ice_and_bitmap(params->ptypes, params->ptypes, src,
1122                                        ICE_FLOW_PTYPE_MAX);
1123                 }
1124                 if ((hdrs & ICE_FLOW_SEG_HDR_IPV4) &&
1125                     (hdrs & ICE_FLOW_SEG_HDR_IPV_OTHER)) {
1126                         src = i ?
1127                                 (const ice_bitmap_t *)ice_ptypes_ipv4_il :
1128                                 (const ice_bitmap_t *)ice_ptypes_ipv4_ofos_all;
1129                         ice_and_bitmap(params->ptypes, params->ptypes, src,
1130                                        ICE_FLOW_PTYPE_MAX);
1131                 } else if ((hdrs & ICE_FLOW_SEG_HDR_IPV6) &&
1132                            (hdrs & ICE_FLOW_SEG_HDR_IPV_OTHER)) {
1133                         src = i ?
1134                                 (const ice_bitmap_t *)ice_ptypes_ipv6_il :
1135                                 (const ice_bitmap_t *)ice_ptypes_ipv6_ofos_all;
1136                         ice_and_bitmap(params->ptypes, params->ptypes, src,
1137                                        ICE_FLOW_PTYPE_MAX);
1138                 } else if ((hdrs & ICE_FLOW_SEG_HDR_IPV4) &&
1139                                 (hdrs & ICE_FLOW_SEG_HDR_IPV_FRAG)) {
1140                         src = (const ice_bitmap_t *)ice_ptypes_ipv4_frag;
1141                         ice_and_bitmap(params->ptypes, params->ptypes, src,
1142                                        ICE_FLOW_PTYPE_MAX);
1143                 } else if ((hdrs & ICE_FLOW_SEG_HDR_IPV6) &&
1144                                 (hdrs & ICE_FLOW_SEG_HDR_IPV_FRAG)) {
1145                         src = (const ice_bitmap_t *)ice_ptypes_ipv6_frag;
1146                         ice_and_bitmap(params->ptypes, params->ptypes, src,
1147                                        ICE_FLOW_PTYPE_MAX);
1148                 } else if ((hdrs & ICE_FLOW_SEG_HDR_IPV4) &&
1149                            !(hdrs & ICE_FLOW_SEG_HDRS_L4_MASK_NO_OTHER)) {
1150                         src = !i ? (const ice_bitmap_t *)ice_ptypes_ipv4_ofos_no_l4 :
1151                                 (const ice_bitmap_t *)ice_ptypes_ipv4_il_no_l4;
1152                         ice_and_bitmap(params->ptypes, params->ptypes, src,
1153                                        ICE_FLOW_PTYPE_MAX);
1154                 } else if (hdrs & ICE_FLOW_SEG_HDR_IPV4) {
1155                         src = !i ? (const ice_bitmap_t *)ice_ptypes_ipv4_ofos :
1156                                 (const ice_bitmap_t *)ice_ptypes_ipv4_il;
1157                         ice_and_bitmap(params->ptypes, params->ptypes, src,
1158                                        ICE_FLOW_PTYPE_MAX);
1159                 } else if ((hdrs & ICE_FLOW_SEG_HDR_IPV6) &&
1160                            !(hdrs & ICE_FLOW_SEG_HDRS_L4_MASK_NO_OTHER)) {
1161                         src = !i ? (const ice_bitmap_t *)ice_ptypes_ipv6_ofos_no_l4 :
1162                                 (const ice_bitmap_t *)ice_ptypes_ipv6_il_no_l4;
1163                         ice_and_bitmap(params->ptypes, params->ptypes, src,
1164                                        ICE_FLOW_PTYPE_MAX);
1165                 } else if (hdrs & ICE_FLOW_SEG_HDR_IPV6) {
1166                         src = !i ? (const ice_bitmap_t *)ice_ptypes_ipv6_ofos :
1167                                 (const ice_bitmap_t *)ice_ptypes_ipv6_il;
1168                         ice_and_bitmap(params->ptypes, params->ptypes, src,
1169                                        ICE_FLOW_PTYPE_MAX);
1170                 }
1171
1172                 if (hdrs & ICE_FLOW_SEG_HDR_ETH_NON_IP) {
1173                         src = (const ice_bitmap_t *)ice_ptypes_mac_non_ip_ofos;
1174                         ice_and_bitmap(params->ptypes, params->ptypes,
1175                                        src, ICE_FLOW_PTYPE_MAX);
1176                 } else if (hdrs & ICE_FLOW_SEG_HDR_PPPOE) {
1177                         src = (const ice_bitmap_t *)ice_ptypes_pppoe;
1178                         ice_and_bitmap(params->ptypes, params->ptypes, src,
1179                                        ICE_FLOW_PTYPE_MAX);
1180                 } else {
1181                         src = (const ice_bitmap_t *)ice_ptypes_pppoe;
1182                         ice_andnot_bitmap(params->ptypes, params->ptypes, src,
1183                                           ICE_FLOW_PTYPE_MAX);
1184                 }
1185
1186                 if (hdrs & ICE_FLOW_SEG_HDR_UDP) {
1187                         src = (const ice_bitmap_t *)ice_ptypes_udp_il;
1188                         ice_and_bitmap(params->ptypes, params->ptypes, src,
1189                                        ICE_FLOW_PTYPE_MAX);
1190                 } else if (hdrs & ICE_FLOW_SEG_HDR_TCP) {
1191                         ice_and_bitmap(params->ptypes, params->ptypes,
1192                                        (const ice_bitmap_t *)ice_ptypes_tcp_il,
1193                                        ICE_FLOW_PTYPE_MAX);
1194                 } else if (hdrs & ICE_FLOW_SEG_HDR_SCTP) {
1195                         src = (const ice_bitmap_t *)ice_ptypes_sctp_il;
1196                         ice_and_bitmap(params->ptypes, params->ptypes, src,
1197                                        ICE_FLOW_PTYPE_MAX);
1198                 }
1199
1200                 if (hdrs & ICE_FLOW_SEG_HDR_ICMP) {
1201                         src = !i ? (const ice_bitmap_t *)ice_ptypes_icmp_of :
1202                                 (const ice_bitmap_t *)ice_ptypes_icmp_il;
1203                         ice_and_bitmap(params->ptypes, params->ptypes, src,
1204                                        ICE_FLOW_PTYPE_MAX);
1205                 } else if (hdrs & ICE_FLOW_SEG_HDR_GRE) {
1206                         src = (const ice_bitmap_t *)ice_ptypes_gre_of;
1207                         ice_and_bitmap(params->ptypes, params->ptypes, src,
1208                                        ICE_FLOW_PTYPE_MAX);
1209                 } else if (hdrs & ICE_FLOW_SEG_HDR_GTPC) {
1210                         src = (const ice_bitmap_t *)ice_ptypes_gtpc;
1211                         ice_and_bitmap(params->ptypes, params->ptypes,
1212                                        src, ICE_FLOW_PTYPE_MAX);
1213                 } else if (hdrs & ICE_FLOW_SEG_HDR_GTPC_TEID) {
1214                         src = (const ice_bitmap_t *)ice_ptypes_gtpc_tid;
1215                         ice_and_bitmap(params->ptypes, params->ptypes,
1216                                        src, ICE_FLOW_PTYPE_MAX);
1217                 } else if (hdrs & ICE_FLOW_SEG_HDR_GTPU_NON_IP) {
1218                         src = (const ice_bitmap_t *)ice_ptypes_gtpu_no_ip;
1219                         ice_and_bitmap(params->ptypes, params->ptypes,
1220                                        src, ICE_FLOW_PTYPE_MAX);
1221                 } else if (hdrs & ICE_FLOW_SEG_HDR_GTPU_DWN) {
1222                         src = (const ice_bitmap_t *)ice_ptypes_gtpu;
1223                         ice_and_bitmap(params->ptypes, params->ptypes,
1224                                        src, ICE_FLOW_PTYPE_MAX);
1225
1226                         /* Attributes for GTP packet with downlink */
1227                         params->attr = ice_attr_gtpu_down;
1228                         params->attr_cnt = ARRAY_SIZE(ice_attr_gtpu_down);
1229                 } else if (hdrs & ICE_FLOW_SEG_HDR_GTPU_UP) {
1230                         src = (const ice_bitmap_t *)ice_ptypes_gtpu;
1231                         ice_and_bitmap(params->ptypes, params->ptypes,
1232                                        src, ICE_FLOW_PTYPE_MAX);
1233
1234                         /* Attributes for GTP packet with uplink */
1235                         params->attr = ice_attr_gtpu_up;
1236                         params->attr_cnt = ARRAY_SIZE(ice_attr_gtpu_up);
1237                 } else if (hdrs & ICE_FLOW_SEG_HDR_GTPU_EH) {
1238                         src = (const ice_bitmap_t *)ice_ptypes_gtpu;
1239                         ice_and_bitmap(params->ptypes, params->ptypes,
1240                                        src, ICE_FLOW_PTYPE_MAX);
1241
1242                         /* Attributes for GTP packet with Extension Header */
1243                         params->attr = ice_attr_gtpu_eh;
1244                         params->attr_cnt = ARRAY_SIZE(ice_attr_gtpu_eh);
1245                 } else if (hdrs & ICE_FLOW_SEG_HDR_GTPU_IP) {
1246                         src = (const ice_bitmap_t *)ice_ptypes_gtpu;
1247                         ice_and_bitmap(params->ptypes, params->ptypes,
1248                                        src, ICE_FLOW_PTYPE_MAX);
1249
1250                         /* Attributes for GTP packet without Extension Header */
1251                         params->attr = ice_attr_gtpu_session;
1252                         params->attr_cnt = ARRAY_SIZE(ice_attr_gtpu_session);
1253                 } else if (hdrs & ICE_FLOW_SEG_HDR_L2TPV2) {
1254                         src = (const ice_bitmap_t *)ice_ptypes_l2tpv2;
1255                         ice_and_bitmap(params->ptypes, params->ptypes,
1256                                        src, ICE_FLOW_PTYPE_MAX);
1257                 } else if (hdrs & ICE_FLOW_SEG_HDR_L2TPV3) {
1258                         src = (const ice_bitmap_t *)ice_ptypes_l2tpv3;
1259                         ice_and_bitmap(params->ptypes, params->ptypes,
1260                                        src, ICE_FLOW_PTYPE_MAX);
1261                 } else if (hdrs & ICE_FLOW_SEG_HDR_ESP) {
1262                         src = (const ice_bitmap_t *)ice_ptypes_esp;
1263                         ice_and_bitmap(params->ptypes, params->ptypes,
1264                                        src, ICE_FLOW_PTYPE_MAX);
1265                 } else if (hdrs & ICE_FLOW_SEG_HDR_AH) {
1266                         src = (const ice_bitmap_t *)ice_ptypes_ah;
1267                         ice_and_bitmap(params->ptypes, params->ptypes,
1268                                        src, ICE_FLOW_PTYPE_MAX);
1269                 } else if (hdrs & ICE_FLOW_SEG_HDR_NAT_T_ESP) {
1270                         src = (const ice_bitmap_t *)ice_ptypes_nat_t_esp;
1271                         ice_and_bitmap(params->ptypes, params->ptypes,
1272                                        src, ICE_FLOW_PTYPE_MAX);
1273                 } else if (hdrs & ICE_FLOW_SEG_HDR_VXLAN) {
1274                         src = (const ice_bitmap_t *)ice_ptypes_vxlan_vni;
1275                         ice_and_bitmap(params->ptypes, params->ptypes,
1276                                        src, ICE_FLOW_PTYPE_MAX);
1277                 } else if (hdrs & ICE_FLOW_SEG_HDR_UDP_ECPRI_TP0) {
1278                         src = (const ice_bitmap_t *)ice_ptypes_udp_ecpri_tp0;
1279                         ice_and_bitmap(params->ptypes, params->ptypes,
1280                                        src, ICE_FLOW_PTYPE_MAX);
1281                 }
1282
1283                 if (hdrs & ICE_FLOW_SEG_HDR_PPP) {
1284                         src = (const ice_bitmap_t *)ice_ptypes_ppp;
1285                         ice_and_bitmap(params->ptypes, params->ptypes,
1286                                        src, ICE_FLOW_PTYPE_MAX);
1287                 }
1288
1289                 if (hdrs & ICE_FLOW_SEG_HDR_PFCP) {
1290                         if (hdrs & ICE_FLOW_SEG_HDR_PFCP_NODE)
1291                                 src =
1292                                 (const ice_bitmap_t *)ice_ptypes_pfcp_node;
1293                         else
1294                                 src =
1295                                 (const ice_bitmap_t *)ice_ptypes_pfcp_session;
1296
1297                         ice_and_bitmap(params->ptypes, params->ptypes,
1298                                        src, ICE_FLOW_PTYPE_MAX);
1299                 } else {
1300                         src = (const ice_bitmap_t *)ice_ptypes_pfcp_node;
1301                         ice_andnot_bitmap(params->ptypes, params->ptypes,
1302                                           src, ICE_FLOW_PTYPE_MAX);
1303
1304                         src = (const ice_bitmap_t *)ice_ptypes_pfcp_session;
1305                         ice_andnot_bitmap(params->ptypes, params->ptypes,
1306                                           src, ICE_FLOW_PTYPE_MAX);
1307                 }
1308         }
1309
1310         return ICE_SUCCESS;
1311 }
1312
1313 /**
1314  * ice_flow_xtract_pkt_flags - Create an extr sequence entry for packet flags
1315  * @hw: pointer to the HW struct
1316  * @params: information about the flow to be processed
1317  * @flags: The value of pkt_flags[x:x] in Rx/Tx MDID metadata.
1318  *
1319  * This function will allocate an extraction sequence entries for a DWORD size
1320  * chunk of the packet flags.
1321  */
1322 static enum ice_status
1323 ice_flow_xtract_pkt_flags(struct ice_hw *hw,
1324                           struct ice_flow_prof_params *params,
1325                           enum ice_flex_mdid_pkt_flags flags)
1326 {
1327         u8 fv_words = hw->blk[params->blk].es.fvw;
1328         u8 idx;
1329
1330         /* Make sure the number of extraction sequence entries required does not
1331          * exceed the block's capacity.
1332          */
1333         if (params->es_cnt >= fv_words)
1334                 return ICE_ERR_MAX_LIMIT;
1335
1336         /* some blocks require a reversed field vector layout */
1337         if (hw->blk[params->blk].es.reverse)
1338                 idx = fv_words - params->es_cnt - 1;
1339         else
1340                 idx = params->es_cnt;
1341
1342         params->es[idx].prot_id = ICE_PROT_META_ID;
1343         params->es[idx].off = flags;
1344         params->es_cnt++;
1345
1346         return ICE_SUCCESS;
1347 }
1348
1349 /**
1350  * ice_flow_xtract_fld - Create an extraction sequence entry for the given field
1351  * @hw: pointer to the HW struct
1352  * @params: information about the flow to be processed
1353  * @seg: packet segment index of the field to be extracted
1354  * @fld: ID of field to be extracted
1355  * @match: bitfield of all fields
1356  *
1357  * This function determines the protocol ID, offset, and size of the given
1358  * field. It then allocates one or more extraction sequence entries for the
1359  * given field, and fill the entries with protocol ID and offset information.
1360  */
1361 static enum ice_status
1362 ice_flow_xtract_fld(struct ice_hw *hw, struct ice_flow_prof_params *params,
1363                     u8 seg, enum ice_flow_field fld, u64 match)
1364 {
1365         enum ice_flow_field sib = ICE_FLOW_FIELD_IDX_MAX;
1366         enum ice_prot_id prot_id = ICE_PROT_ID_INVAL;
1367         u8 fv_words = hw->blk[params->blk].es.fvw;
1368         struct ice_flow_fld_info *flds;
1369         u16 cnt, ese_bits, i;
1370         u16 sib_mask = 0;
1371         u16 mask;
1372         u16 off;
1373
1374         flds = params->prof->segs[seg].fields;
1375
1376         switch (fld) {
1377         case ICE_FLOW_FIELD_IDX_ETH_DA:
1378         case ICE_FLOW_FIELD_IDX_ETH_SA:
1379         case ICE_FLOW_FIELD_IDX_S_VLAN:
1380         case ICE_FLOW_FIELD_IDX_C_VLAN:
1381                 prot_id = seg == 0 ? ICE_PROT_MAC_OF_OR_S : ICE_PROT_MAC_IL;
1382                 break;
1383         case ICE_FLOW_FIELD_IDX_ETH_TYPE:
1384                 prot_id = seg == 0 ? ICE_PROT_ETYPE_OL : ICE_PROT_ETYPE_IL;
1385                 break;
1386         case ICE_FLOW_FIELD_IDX_IPV4_DSCP:
1387                 prot_id = seg == 0 ? ICE_PROT_IPV4_OF_OR_S : ICE_PROT_IPV4_IL;
1388                 break;
1389         case ICE_FLOW_FIELD_IDX_IPV6_DSCP:
1390                 prot_id = seg == 0 ? ICE_PROT_IPV6_OF_OR_S : ICE_PROT_IPV6_IL;
1391                 break;
1392         case ICE_FLOW_FIELD_IDX_IPV4_TTL:
1393         case ICE_FLOW_FIELD_IDX_IPV4_PROT:
1394                 prot_id = seg == 0 ? ICE_PROT_IPV4_OF_OR_S : ICE_PROT_IPV4_IL;
1395
1396                 /* TTL and PROT share the same extraction seq. entry.
1397                  * Each is considered a sibling to the other in terms of sharing
1398                  * the same extraction sequence entry.
1399                  */
1400                 if (fld == ICE_FLOW_FIELD_IDX_IPV4_TTL)
1401                         sib = ICE_FLOW_FIELD_IDX_IPV4_PROT;
1402                 else
1403                         sib = ICE_FLOW_FIELD_IDX_IPV4_TTL;
1404
1405                 /* If the sibling field is also included, that field's
1406                  * mask needs to be included.
1407                  */
1408                 if (match & BIT(sib))
1409                         sib_mask = ice_flds_info[sib].mask;
1410                 break;
1411         case ICE_FLOW_FIELD_IDX_IPV6_TTL:
1412         case ICE_FLOW_FIELD_IDX_IPV6_PROT:
1413                 prot_id = seg == 0 ? ICE_PROT_IPV6_OF_OR_S : ICE_PROT_IPV6_IL;
1414
1415                 /* TTL and PROT share the same extraction seq. entry.
1416                  * Each is considered a sibling to the other in terms of sharing
1417                  * the same extraction sequence entry.
1418                  */
1419                 if (fld == ICE_FLOW_FIELD_IDX_IPV6_TTL)
1420                         sib = ICE_FLOW_FIELD_IDX_IPV6_PROT;
1421                 else
1422                         sib = ICE_FLOW_FIELD_IDX_IPV6_TTL;
1423
1424                 /* If the sibling field is also included, that field's
1425                  * mask needs to be included.
1426                  */
1427                 if (match & BIT(sib))
1428                         sib_mask = ice_flds_info[sib].mask;
1429                 break;
1430         case ICE_FLOW_FIELD_IDX_IPV4_SA:
1431         case ICE_FLOW_FIELD_IDX_IPV4_DA:
1432         case ICE_FLOW_FIELD_IDX_IPV4_CHKSUM:
1433                 prot_id = seg == 0 ? ICE_PROT_IPV4_OF_OR_S : ICE_PROT_IPV4_IL;
1434                 if (params->prof->segs[0].hdrs & ICE_FLOW_SEG_HDR_GRE &&
1435                     params->prof->segs[1].hdrs & ICE_FLOW_SEG_HDR_GTPU &&
1436                     seg == 1)
1437                         prot_id = ICE_PROT_IPV4_IL_IL;
1438                 break;
1439         case ICE_FLOW_FIELD_IDX_IPV4_ID:
1440                 prot_id = ICE_PROT_IPV4_OF_OR_S;
1441                 break;
1442         case ICE_FLOW_FIELD_IDX_IPV6_SA:
1443         case ICE_FLOW_FIELD_IDX_IPV6_DA:
1444         case ICE_FLOW_FIELD_IDX_IPV6_PRE32_SA:
1445         case ICE_FLOW_FIELD_IDX_IPV6_PRE32_DA:
1446         case ICE_FLOW_FIELD_IDX_IPV6_PRE48_SA:
1447         case ICE_FLOW_FIELD_IDX_IPV6_PRE48_DA:
1448         case ICE_FLOW_FIELD_IDX_IPV6_PRE64_SA:
1449         case ICE_FLOW_FIELD_IDX_IPV6_PRE64_DA:
1450                 prot_id = seg == 0 ? ICE_PROT_IPV6_OF_OR_S : ICE_PROT_IPV6_IL;
1451                 if (params->prof->segs[0].hdrs & ICE_FLOW_SEG_HDR_GRE &&
1452                     params->prof->segs[1].hdrs & ICE_FLOW_SEG_HDR_GTPU &&
1453                     seg == 1)
1454                         prot_id = ICE_PROT_IPV6_IL_IL;
1455                 break;
1456         case ICE_FLOW_FIELD_IDX_IPV6_ID:
1457                 prot_id = ICE_PROT_IPV6_FRAG;
1458                 break;
1459         case ICE_FLOW_FIELD_IDX_TCP_SRC_PORT:
1460         case ICE_FLOW_FIELD_IDX_TCP_DST_PORT:
1461         case ICE_FLOW_FIELD_IDX_TCP_FLAGS:
1462         case ICE_FLOW_FIELD_IDX_TCP_CHKSUM:
1463                 prot_id = ICE_PROT_TCP_IL;
1464                 break;
1465         case ICE_FLOW_FIELD_IDX_UDP_SRC_PORT:
1466         case ICE_FLOW_FIELD_IDX_UDP_DST_PORT:
1467         case ICE_FLOW_FIELD_IDX_UDP_CHKSUM:
1468                 prot_id = ICE_PROT_UDP_IL_OR_S;
1469                 break;
1470         case ICE_FLOW_FIELD_IDX_SCTP_SRC_PORT:
1471         case ICE_FLOW_FIELD_IDX_SCTP_DST_PORT:
1472         case ICE_FLOW_FIELD_IDX_SCTP_CHKSUM:
1473                 prot_id = ICE_PROT_SCTP_IL;
1474                 break;
1475         case ICE_FLOW_FIELD_IDX_VXLAN_VNI:
1476         case ICE_FLOW_FIELD_IDX_GTPC_TEID:
1477         case ICE_FLOW_FIELD_IDX_GTPU_IP_TEID:
1478         case ICE_FLOW_FIELD_IDX_GTPU_UP_TEID:
1479         case ICE_FLOW_FIELD_IDX_GTPU_DWN_TEID:
1480         case ICE_FLOW_FIELD_IDX_GTPU_EH_TEID:
1481         case ICE_FLOW_FIELD_IDX_GTPU_EH_QFI:
1482         case ICE_FLOW_FIELD_IDX_GTPU_UP_QFI:
1483         case ICE_FLOW_FIELD_IDX_GTPU_DWN_QFI:
1484                 /* GTP is accessed through UDP OF protocol */
1485                 prot_id = ICE_PROT_UDP_OF;
1486                 break;
1487         case ICE_FLOW_FIELD_IDX_PPPOE_SESS_ID:
1488                 prot_id = ICE_PROT_PPPOE;
1489                 break;
1490         case ICE_FLOW_FIELD_IDX_PFCP_SEID:
1491                 prot_id = ICE_PROT_UDP_IL_OR_S;
1492                 break;
1493         case ICE_FLOW_FIELD_IDX_L2TPV3_SESS_ID:
1494                 prot_id = ICE_PROT_L2TPV3;
1495                 break;
1496         case ICE_FLOW_FIELD_IDX_ESP_SPI:
1497                 prot_id = ICE_PROT_ESP_F;
1498                 break;
1499         case ICE_FLOW_FIELD_IDX_AH_SPI:
1500                 prot_id = ICE_PROT_ESP_2;
1501                 break;
1502         case ICE_FLOW_FIELD_IDX_NAT_T_ESP_SPI:
1503                 prot_id = ICE_PROT_UDP_IL_OR_S;
1504                 break;
1505         case ICE_FLOW_FIELD_IDX_ECPRI_TP0_PC_ID:
1506                 prot_id = ICE_PROT_ECPRI;
1507                 break;
1508         case ICE_FLOW_FIELD_IDX_UDP_ECPRI_TP0_PC_ID:
1509                 prot_id = ICE_PROT_UDP_IL_OR_S;
1510                 break;
1511         case ICE_FLOW_FIELD_IDX_ARP_SIP:
1512         case ICE_FLOW_FIELD_IDX_ARP_DIP:
1513         case ICE_FLOW_FIELD_IDX_ARP_SHA:
1514         case ICE_FLOW_FIELD_IDX_ARP_DHA:
1515         case ICE_FLOW_FIELD_IDX_ARP_OP:
1516                 prot_id = ICE_PROT_ARP_OF;
1517                 break;
1518         case ICE_FLOW_FIELD_IDX_ICMP_TYPE:
1519         case ICE_FLOW_FIELD_IDX_ICMP_CODE:
1520                 /* ICMP type and code share the same extraction seq. entry */
1521                 prot_id = (params->prof->segs[seg].hdrs &
1522                            ICE_FLOW_SEG_HDR_IPV4) ?
1523                         ICE_PROT_ICMP_IL : ICE_PROT_ICMPV6_IL;
1524                 sib = fld == ICE_FLOW_FIELD_IDX_ICMP_TYPE ?
1525                         ICE_FLOW_FIELD_IDX_ICMP_CODE :
1526                         ICE_FLOW_FIELD_IDX_ICMP_TYPE;
1527                 break;
1528         case ICE_FLOW_FIELD_IDX_GRE_KEYID:
1529                 prot_id = ICE_PROT_GRE_OF;
1530                 break;
1531         default:
1532                 return ICE_ERR_NOT_IMPL;
1533         }
1534
1535         /* Each extraction sequence entry is a word in size, and extracts a
1536          * word-aligned offset from a protocol header.
1537          */
1538         ese_bits = ICE_FLOW_FV_EXTRACT_SZ * BITS_PER_BYTE;
1539
1540         flds[fld].xtrct.prot_id = prot_id;
1541         flds[fld].xtrct.off = (ice_flds_info[fld].off / ese_bits) *
1542                 ICE_FLOW_FV_EXTRACT_SZ;
1543         flds[fld].xtrct.disp = (u8)(ice_flds_info[fld].off % ese_bits);
1544         flds[fld].xtrct.idx = params->es_cnt;
1545         flds[fld].xtrct.mask = ice_flds_info[fld].mask;
1546
1547         /* Adjust the next field-entry index after accommodating the number of
1548          * entries this field consumes
1549          */
1550         cnt = DIVIDE_AND_ROUND_UP(flds[fld].xtrct.disp +
1551                                   ice_flds_info[fld].size, ese_bits);
1552
1553         /* Fill in the extraction sequence entries needed for this field */
1554         off = flds[fld].xtrct.off;
1555         mask = flds[fld].xtrct.mask;
1556         for (i = 0; i < cnt; i++) {
1557                 /* Only consume an extraction sequence entry if there is no
1558                  * sibling field associated with this field or the sibling entry
1559                  * already extracts the word shared with this field.
1560                  */
1561                 if (sib == ICE_FLOW_FIELD_IDX_MAX ||
1562                     flds[sib].xtrct.prot_id == ICE_PROT_ID_INVAL ||
1563                     flds[sib].xtrct.off != off) {
1564                         u8 idx;
1565
1566                         /* Make sure the number of extraction sequence required
1567                          * does not exceed the block's capability
1568                          */
1569                         if (params->es_cnt >= fv_words)
1570                                 return ICE_ERR_MAX_LIMIT;
1571
1572                         /* some blocks require a reversed field vector layout */
1573                         if (hw->blk[params->blk].es.reverse)
1574                                 idx = fv_words - params->es_cnt - 1;
1575                         else
1576                                 idx = params->es_cnt;
1577
1578                         params->es[idx].prot_id = prot_id;
1579                         params->es[idx].off = off;
1580                         params->mask[idx] = mask | sib_mask;
1581                         params->es_cnt++;
1582                 }
1583
1584                 off += ICE_FLOW_FV_EXTRACT_SZ;
1585         }
1586
1587         return ICE_SUCCESS;
1588 }
1589
1590 /**
1591  * ice_flow_xtract_raws - Create extract sequence entries for raw bytes
1592  * @hw: pointer to the HW struct
1593  * @params: information about the flow to be processed
1594  * @seg: index of packet segment whose raw fields are to be extracted
1595  */
1596 static enum ice_status
1597 ice_flow_xtract_raws(struct ice_hw *hw, struct ice_flow_prof_params *params,
1598                      u8 seg)
1599 {
1600         u16 fv_words;
1601         u16 hdrs_sz;
1602         u8 i;
1603
1604         if (!params->prof->segs[seg].raws_cnt)
1605                 return ICE_SUCCESS;
1606
1607         if (params->prof->segs[seg].raws_cnt >
1608             ARRAY_SIZE(params->prof->segs[seg].raws))
1609                 return ICE_ERR_MAX_LIMIT;
1610
1611         /* Offsets within the segment headers are not supported */
1612         hdrs_sz = ice_flow_calc_seg_sz(params, seg);
1613         if (!hdrs_sz)
1614                 return ICE_ERR_PARAM;
1615
1616         fv_words = hw->blk[params->blk].es.fvw;
1617
1618         for (i = 0; i < params->prof->segs[seg].raws_cnt; i++) {
1619                 struct ice_flow_seg_fld_raw *raw;
1620                 u16 off, cnt, j;
1621
1622                 raw = &params->prof->segs[seg].raws[i];
1623
1624                 /* Storing extraction information */
1625                 raw->info.xtrct.prot_id = ICE_PROT_MAC_OF_OR_S;
1626                 raw->info.xtrct.off = (raw->off / ICE_FLOW_FV_EXTRACT_SZ) *
1627                         ICE_FLOW_FV_EXTRACT_SZ;
1628                 raw->info.xtrct.disp = (raw->off % ICE_FLOW_FV_EXTRACT_SZ) *
1629                         BITS_PER_BYTE;
1630                 raw->info.xtrct.idx = params->es_cnt;
1631
1632                 /* Determine the number of field vector entries this raw field
1633                  * consumes.
1634                  */
1635                 cnt = DIVIDE_AND_ROUND_UP(raw->info.xtrct.disp +
1636                                           (raw->info.src.last * BITS_PER_BYTE),
1637                                           (ICE_FLOW_FV_EXTRACT_SZ *
1638                                            BITS_PER_BYTE));
1639                 off = raw->info.xtrct.off;
1640                 for (j = 0; j < cnt; j++) {
1641                         u16 idx;
1642
1643                         /* Make sure the number of extraction sequence required
1644                          * does not exceed the block's capability
1645                          */
1646                         if (params->es_cnt >= hw->blk[params->blk].es.count ||
1647                             params->es_cnt >= ICE_MAX_FV_WORDS)
1648                                 return ICE_ERR_MAX_LIMIT;
1649
1650                         /* some blocks require a reversed field vector layout */
1651                         if (hw->blk[params->blk].es.reverse)
1652                                 idx = fv_words - params->es_cnt - 1;
1653                         else
1654                                 idx = params->es_cnt;
1655
1656                         params->es[idx].prot_id = raw->info.xtrct.prot_id;
1657                         params->es[idx].off = off;
1658                         params->es_cnt++;
1659                         off += ICE_FLOW_FV_EXTRACT_SZ;
1660                 }
1661         }
1662
1663         return ICE_SUCCESS;
1664 }
1665
1666 /**
1667  * ice_flow_create_xtrct_seq - Create an extraction sequence for given segments
1668  * @hw: pointer to the HW struct
1669  * @params: information about the flow to be processed
1670  *
1671  * This function iterates through all matched fields in the given segments, and
1672  * creates an extraction sequence for the fields.
1673  */
1674 static enum ice_status
1675 ice_flow_create_xtrct_seq(struct ice_hw *hw,
1676                           struct ice_flow_prof_params *params)
1677 {
1678         enum ice_status status = ICE_SUCCESS;
1679         u8 i;
1680
1681         /* For ACL, we also need to extract the direction bit (Rx,Tx) data from
1682          * packet flags
1683          */
1684         if (params->blk == ICE_BLK_ACL) {
1685                 status = ice_flow_xtract_pkt_flags(hw, params,
1686                                                    ICE_RX_MDID_PKT_FLAGS_15_0);
1687                 if (status)
1688                         return status;
1689         }
1690
1691         for (i = 0; i < params->prof->segs_cnt; i++) {
1692                 u64 match = params->prof->segs[i].match;
1693                 enum ice_flow_field j;
1694
1695                 ice_for_each_set_bit(j, (ice_bitmap_t *)&match,
1696                                      ICE_FLOW_FIELD_IDX_MAX) {
1697                         status = ice_flow_xtract_fld(hw, params, i, j, match);
1698                         if (status)
1699                                 return status;
1700                         ice_clear_bit(j, (ice_bitmap_t *)&match);
1701                 }
1702
1703                 /* Process raw matching bytes */
1704                 status = ice_flow_xtract_raws(hw, params, i);
1705                 if (status)
1706                         return status;
1707         }
1708
1709         return status;
1710 }
1711
1712 /**
1713  * ice_flow_sel_acl_scen - returns the specific scenario
1714  * @hw: pointer to the hardware structure
1715  * @params: information about the flow to be processed
1716  *
1717  * This function will return the specific scenario based on the
1718  * params passed to it
1719  */
1720 static enum ice_status
1721 ice_flow_sel_acl_scen(struct ice_hw *hw, struct ice_flow_prof_params *params)
1722 {
1723         /* Find the best-fit scenario for the provided match width */
1724         struct ice_acl_scen *cand_scen = NULL, *scen;
1725
1726         if (!hw->acl_tbl)
1727                 return ICE_ERR_DOES_NOT_EXIST;
1728
1729         /* Loop through each scenario and match against the scenario width
1730          * to select the specific scenario
1731          */
1732         LIST_FOR_EACH_ENTRY(scen, &hw->acl_tbl->scens, ice_acl_scen, list_entry)
1733                 if (scen->eff_width >= params->entry_length &&
1734                     (!cand_scen || cand_scen->eff_width > scen->eff_width))
1735                         cand_scen = scen;
1736         if (!cand_scen)
1737                 return ICE_ERR_DOES_NOT_EXIST;
1738
1739         params->prof->cfg.scen = cand_scen;
1740
1741         return ICE_SUCCESS;
1742 }
1743
1744 /**
1745  * ice_flow_acl_def_entry_frmt - Determine the layout of flow entries
1746  * @params: information about the flow to be processed
1747  */
1748 static enum ice_status
1749 ice_flow_acl_def_entry_frmt(struct ice_flow_prof_params *params)
1750 {
1751         u16 index, i, range_idx = 0;
1752
1753         index = ICE_AQC_ACL_PROF_BYTE_SEL_START_IDX;
1754
1755         for (i = 0; i < params->prof->segs_cnt; i++) {
1756                 struct ice_flow_seg_info *seg = &params->prof->segs[i];
1757                 u8 j;
1758
1759                 ice_for_each_set_bit(j, (ice_bitmap_t *)&seg->match,
1760                                      ICE_FLOW_FIELD_IDX_MAX) {
1761                         struct ice_flow_fld_info *fld = &seg->fields[j];
1762
1763                         fld->entry.mask = ICE_FLOW_FLD_OFF_INVAL;
1764
1765                         if (fld->type == ICE_FLOW_FLD_TYPE_RANGE) {
1766                                 fld->entry.last = ICE_FLOW_FLD_OFF_INVAL;
1767
1768                                 /* Range checking only supported for single
1769                                  * words
1770                                  */
1771                                 if (DIVIDE_AND_ROUND_UP(ice_flds_info[j].size +
1772                                                         fld->xtrct.disp,
1773                                                         BITS_PER_BYTE * 2) > 1)
1774                                         return ICE_ERR_PARAM;
1775
1776                                 /* Ranges must define low and high values */
1777                                 if (fld->src.val == ICE_FLOW_FLD_OFF_INVAL ||
1778                                     fld->src.last == ICE_FLOW_FLD_OFF_INVAL)
1779                                         return ICE_ERR_PARAM;
1780
1781                                 fld->entry.val = range_idx++;
1782                         } else {
1783                                 /* Store adjusted byte-length of field for later
1784                                  * use, taking into account potential
1785                                  * non-byte-aligned displacement
1786                                  */
1787                                 fld->entry.last = DIVIDE_AND_ROUND_UP
1788                                         (ice_flds_info[j].size +
1789                                          (fld->xtrct.disp % BITS_PER_BYTE),
1790                                          BITS_PER_BYTE);
1791                                 fld->entry.val = index;
1792                                 index += fld->entry.last;
1793                         }
1794                 }
1795
1796                 for (j = 0; j < seg->raws_cnt; j++) {
1797                         struct ice_flow_seg_fld_raw *raw = &seg->raws[j];
1798
1799                         raw->info.entry.mask = ICE_FLOW_FLD_OFF_INVAL;
1800                         raw->info.entry.val = index;
1801                         raw->info.entry.last = raw->info.src.last;
1802                         index += raw->info.entry.last;
1803                 }
1804         }
1805
1806         /* Currently only support using the byte selection base, which only
1807          * allows for an effective entry size of 30 bytes. Reject anything
1808          * larger.
1809          */
1810         if (index > ICE_AQC_ACL_PROF_BYTE_SEL_ELEMS)
1811                 return ICE_ERR_PARAM;
1812
1813         /* Only 8 range checkers per profile, reject anything trying to use
1814          * more
1815          */
1816         if (range_idx > ICE_AQC_ACL_PROF_RANGES_NUM_CFG)
1817                 return ICE_ERR_PARAM;
1818
1819         /* Store # bytes required for entry for later use */
1820         params->entry_length = index - ICE_AQC_ACL_PROF_BYTE_SEL_START_IDX;
1821
1822         return ICE_SUCCESS;
1823 }
1824
1825 /**
1826  * ice_flow_proc_segs - process all packet segments associated with a profile
1827  * @hw: pointer to the HW struct
1828  * @params: information about the flow to be processed
1829  */
1830 static enum ice_status
1831 ice_flow_proc_segs(struct ice_hw *hw, struct ice_flow_prof_params *params)
1832 {
1833         enum ice_status status;
1834
1835         status = ice_flow_proc_seg_hdrs(params);
1836         if (status)
1837                 return status;
1838
1839         status = ice_flow_create_xtrct_seq(hw, params);
1840         if (status)
1841                 return status;
1842
1843         switch (params->blk) {
1844         case ICE_BLK_FD:
1845         case ICE_BLK_RSS:
1846                 status = ICE_SUCCESS;
1847                 break;
1848         case ICE_BLK_ACL:
1849                 status = ice_flow_acl_def_entry_frmt(params);
1850                 if (status)
1851                         return status;
1852                 status = ice_flow_sel_acl_scen(hw, params);
1853                 if (status)
1854                         return status;
1855                 break;
1856         default:
1857                 return ICE_ERR_NOT_IMPL;
1858         }
1859
1860         return status;
1861 }
1862
1863 #define ICE_FLOW_FIND_PROF_CHK_FLDS     0x00000001
1864 #define ICE_FLOW_FIND_PROF_CHK_VSI      0x00000002
1865 #define ICE_FLOW_FIND_PROF_NOT_CHK_DIR  0x00000004
1866
1867 /**
1868  * ice_flow_find_prof_conds - Find a profile matching headers and conditions
1869  * @hw: pointer to the HW struct
1870  * @blk: classification stage
1871  * @dir: flow direction
1872  * @segs: array of one or more packet segments that describe the flow
1873  * @segs_cnt: number of packet segments provided
1874  * @vsi_handle: software VSI handle to check VSI (ICE_FLOW_FIND_PROF_CHK_VSI)
1875  * @conds: additional conditions to be checked (ICE_FLOW_FIND_PROF_CHK_*)
1876  */
1877 static struct ice_flow_prof *
1878 ice_flow_find_prof_conds(struct ice_hw *hw, enum ice_block blk,
1879                          enum ice_flow_dir dir, struct ice_flow_seg_info *segs,
1880                          u8 segs_cnt, u16 vsi_handle, u32 conds)
1881 {
1882         struct ice_flow_prof *p, *prof = NULL;
1883
1884         ice_acquire_lock(&hw->fl_profs_locks[blk]);
1885         LIST_FOR_EACH_ENTRY(p, &hw->fl_profs[blk], ice_flow_prof, l_entry)
1886                 if ((p->dir == dir || conds & ICE_FLOW_FIND_PROF_NOT_CHK_DIR) &&
1887                     segs_cnt && segs_cnt == p->segs_cnt) {
1888                         u8 i;
1889
1890                         /* Check for profile-VSI association if specified */
1891                         if ((conds & ICE_FLOW_FIND_PROF_CHK_VSI) &&
1892                             ice_is_vsi_valid(hw, vsi_handle) &&
1893                             !ice_is_bit_set(p->vsis, vsi_handle))
1894                                 continue;
1895
1896                         /* Protocol headers must be checked. Matched fields are
1897                          * checked if specified.
1898                          */
1899                         for (i = 0; i < segs_cnt; i++)
1900                                 if (segs[i].hdrs != p->segs[i].hdrs ||
1901                                     ((conds & ICE_FLOW_FIND_PROF_CHK_FLDS) &&
1902                                      segs[i].match != p->segs[i].match))
1903                                         break;
1904
1905                         /* A match is found if all segments are matched */
1906                         if (i == segs_cnt) {
1907                                 prof = p;
1908                                 break;
1909                         }
1910                 }
1911         ice_release_lock(&hw->fl_profs_locks[blk]);
1912
1913         return prof;
1914 }
1915
1916 /**
1917  * ice_flow_find_prof - Look up a profile matching headers and matched fields
1918  * @hw: pointer to the HW struct
1919  * @blk: classification stage
1920  * @dir: flow direction
1921  * @segs: array of one or more packet segments that describe the flow
1922  * @segs_cnt: number of packet segments provided
1923  */
1924 u64
1925 ice_flow_find_prof(struct ice_hw *hw, enum ice_block blk, enum ice_flow_dir dir,
1926                    struct ice_flow_seg_info *segs, u8 segs_cnt)
1927 {
1928         struct ice_flow_prof *p;
1929
1930         p = ice_flow_find_prof_conds(hw, blk, dir, segs, segs_cnt,
1931                                      ICE_MAX_VSI, ICE_FLOW_FIND_PROF_CHK_FLDS);
1932
1933         return p ? p->id : ICE_FLOW_PROF_ID_INVAL;
1934 }
1935
1936 /**
1937  * ice_flow_find_prof_id - Look up a profile with given profile ID
1938  * @hw: pointer to the HW struct
1939  * @blk: classification stage
1940  * @prof_id: unique ID to identify this flow profile
1941  */
1942 static struct ice_flow_prof *
1943 ice_flow_find_prof_id(struct ice_hw *hw, enum ice_block blk, u64 prof_id)
1944 {
1945         struct ice_flow_prof *p;
1946
1947         LIST_FOR_EACH_ENTRY(p, &hw->fl_profs[blk], ice_flow_prof, l_entry)
1948                 if (p->id == prof_id)
1949                         return p;
1950
1951         return NULL;
1952 }
1953
1954 /**
1955  * ice_dealloc_flow_entry - Deallocate flow entry memory
1956  * @hw: pointer to the HW struct
1957  * @entry: flow entry to be removed
1958  */
1959 static void
1960 ice_dealloc_flow_entry(struct ice_hw *hw, struct ice_flow_entry *entry)
1961 {
1962         if (!entry)
1963                 return;
1964
1965         if (entry->entry)
1966                 ice_free(hw, entry->entry);
1967
1968         if (entry->range_buf) {
1969                 ice_free(hw, entry->range_buf);
1970                 entry->range_buf = NULL;
1971         }
1972
1973         if (entry->acts) {
1974                 ice_free(hw, entry->acts);
1975                 entry->acts = NULL;
1976                 entry->acts_cnt = 0;
1977         }
1978
1979         ice_free(hw, entry);
1980 }
1981
1982 /**
1983  * ice_flow_get_hw_prof - return the HW profile for a specific profile ID handle
1984  * @hw: pointer to the HW struct
1985  * @blk: classification stage
1986  * @prof_id: the profile ID handle
1987  * @hw_prof_id: pointer to variable to receive the HW profile ID
1988  */
1989 enum ice_status
1990 ice_flow_get_hw_prof(struct ice_hw *hw, enum ice_block blk, u64 prof_id,
1991                      u8 *hw_prof_id)
1992 {
1993         enum ice_status status = ICE_ERR_DOES_NOT_EXIST;
1994         struct ice_prof_map *map;
1995
1996         ice_acquire_lock(&hw->blk[blk].es.prof_map_lock);
1997         map = ice_search_prof_id(hw, blk, prof_id);
1998         if (map) {
1999                 *hw_prof_id = map->prof_id;
2000                 status = ICE_SUCCESS;
2001         }
2002         ice_release_lock(&hw->blk[blk].es.prof_map_lock);
2003         return status;
2004 }
2005
2006 #define ICE_ACL_INVALID_SCEN    0x3f
2007
2008 /**
2009  * ice_flow_acl_is_prof_in_use - Verify if the profile is associated to any PF
2010  * @hw: pointer to the hardware structure
2011  * @prof: pointer to flow profile
2012  * @buf: destination buffer function writes partial extraction sequence to
2013  *
2014  * returns ICE_SUCCESS if no PF is associated to the given profile
2015  * returns ICE_ERR_IN_USE if at least one PF is associated to the given profile
2016  * returns other error code for real error
2017  */
2018 static enum ice_status
2019 ice_flow_acl_is_prof_in_use(struct ice_hw *hw, struct ice_flow_prof *prof,
2020                             struct ice_aqc_acl_prof_generic_frmt *buf)
2021 {
2022         enum ice_status status;
2023         u8 prof_id = 0;
2024
2025         status = ice_flow_get_hw_prof(hw, ICE_BLK_ACL, prof->id, &prof_id);
2026         if (status)
2027                 return status;
2028
2029         status = ice_query_acl_prof(hw, prof_id, buf, NULL);
2030         if (status)
2031                 return status;
2032
2033         /* If all PF's associated scenarios are all 0 or all
2034          * ICE_ACL_INVALID_SCEN (63) for the given profile then the latter has
2035          * not been configured yet.
2036          */
2037         if (buf->pf_scenario_num[0] == 0 && buf->pf_scenario_num[1] == 0 &&
2038             buf->pf_scenario_num[2] == 0 && buf->pf_scenario_num[3] == 0 &&
2039             buf->pf_scenario_num[4] == 0 && buf->pf_scenario_num[5] == 0 &&
2040             buf->pf_scenario_num[6] == 0 && buf->pf_scenario_num[7] == 0)
2041                 return ICE_SUCCESS;
2042
2043         if (buf->pf_scenario_num[0] == ICE_ACL_INVALID_SCEN &&
2044             buf->pf_scenario_num[1] == ICE_ACL_INVALID_SCEN &&
2045             buf->pf_scenario_num[2] == ICE_ACL_INVALID_SCEN &&
2046             buf->pf_scenario_num[3] == ICE_ACL_INVALID_SCEN &&
2047             buf->pf_scenario_num[4] == ICE_ACL_INVALID_SCEN &&
2048             buf->pf_scenario_num[5] == ICE_ACL_INVALID_SCEN &&
2049             buf->pf_scenario_num[6] == ICE_ACL_INVALID_SCEN &&
2050             buf->pf_scenario_num[7] == ICE_ACL_INVALID_SCEN)
2051                 return ICE_SUCCESS;
2052
2053         return ICE_ERR_IN_USE;
2054 }
2055
2056 /**
2057  * ice_flow_acl_free_act_cntr - Free the ACL rule's actions
2058  * @hw: pointer to the hardware structure
2059  * @acts: array of actions to be performed on a match
2060  * @acts_cnt: number of actions
2061  */
2062 static enum ice_status
2063 ice_flow_acl_free_act_cntr(struct ice_hw *hw, struct ice_flow_action *acts,
2064                            u8 acts_cnt)
2065 {
2066         int i;
2067
2068         for (i = 0; i < acts_cnt; i++) {
2069                 if (acts[i].type == ICE_FLOW_ACT_CNTR_PKT ||
2070                     acts[i].type == ICE_FLOW_ACT_CNTR_BYTES ||
2071                     acts[i].type == ICE_FLOW_ACT_CNTR_PKT_BYTES) {
2072                         struct ice_acl_cntrs cntrs = { 0 };
2073                         enum ice_status status;
2074
2075                         /* amount is unused in the dealloc path but the common
2076                          * parameter check routine wants a value set, as zero
2077                          * is invalid for the check. Just set it.
2078                          */
2079                         cntrs.amount = 1;
2080                         cntrs.bank = 0; /* Only bank0 for the moment */
2081                         cntrs.first_cntr =
2082                                         LE16_TO_CPU(acts[i].data.acl_act.value);
2083                         cntrs.last_cntr =
2084                                         LE16_TO_CPU(acts[i].data.acl_act.value);
2085
2086                         if (acts[i].type == ICE_FLOW_ACT_CNTR_PKT_BYTES)
2087                                 cntrs.type = ICE_AQC_ACL_CNT_TYPE_DUAL;
2088                         else
2089                                 cntrs.type = ICE_AQC_ACL_CNT_TYPE_SINGLE;
2090
2091                         status = ice_aq_dealloc_acl_cntrs(hw, &cntrs, NULL);
2092                         if (status)
2093                                 return status;
2094                 }
2095         }
2096         return ICE_SUCCESS;
2097 }
2098
2099 /**
2100  * ice_flow_acl_disassoc_scen - Disassociate the scenario from the profile
2101  * @hw: pointer to the hardware structure
2102  * @prof: pointer to flow profile
2103  *
2104  * Disassociate the scenario from the profile for the PF of the VSI.
2105  */
2106 static enum ice_status
2107 ice_flow_acl_disassoc_scen(struct ice_hw *hw, struct ice_flow_prof *prof)
2108 {
2109         struct ice_aqc_acl_prof_generic_frmt buf;
2110         enum ice_status status = ICE_SUCCESS;
2111         u8 prof_id = 0;
2112
2113         ice_memset(&buf, 0, sizeof(buf), ICE_NONDMA_MEM);
2114
2115         status = ice_flow_get_hw_prof(hw, ICE_BLK_ACL, prof->id, &prof_id);
2116         if (status)
2117                 return status;
2118
2119         status = ice_query_acl_prof(hw, prof_id, &buf, NULL);
2120         if (status)
2121                 return status;
2122
2123         /* Clear scenario for this PF */
2124         buf.pf_scenario_num[hw->pf_id] = ICE_ACL_INVALID_SCEN;
2125         status = ice_prgm_acl_prof_xtrct(hw, prof_id, &buf, NULL);
2126
2127         return status;
2128 }
2129
2130 /**
2131  * ice_flow_rem_entry_sync - Remove a flow entry
2132  * @hw: pointer to the HW struct
2133  * @blk: classification stage
2134  * @entry: flow entry to be removed
2135  */
2136 static enum ice_status
2137 ice_flow_rem_entry_sync(struct ice_hw *hw, enum ice_block blk,
2138                         struct ice_flow_entry *entry)
2139 {
2140         if (!entry)
2141                 return ICE_ERR_BAD_PTR;
2142
2143         if (blk == ICE_BLK_ACL) {
2144                 enum ice_status status;
2145
2146                 if (!entry->prof)
2147                         return ICE_ERR_BAD_PTR;
2148
2149                 status = ice_acl_rem_entry(hw, entry->prof->cfg.scen,
2150                                            entry->scen_entry_idx);
2151                 if (status)
2152                         return status;
2153
2154                 /* Checks if we need to release an ACL counter. */
2155                 if (entry->acts_cnt && entry->acts)
2156                         ice_flow_acl_free_act_cntr(hw, entry->acts,
2157                                                    entry->acts_cnt);
2158         }
2159
2160         LIST_DEL(&entry->l_entry);
2161
2162         ice_dealloc_flow_entry(hw, entry);
2163
2164         return ICE_SUCCESS;
2165 }
2166
2167 /**
2168  * ice_flow_add_prof_sync - Add a flow profile for packet segments and fields
2169  * @hw: pointer to the HW struct
2170  * @blk: classification stage
2171  * @dir: flow direction
2172  * @prof_id: unique ID to identify this flow profile
2173  * @segs: array of one or more packet segments that describe the flow
2174  * @segs_cnt: number of packet segments provided
2175  * @acts: array of default actions
2176  * @acts_cnt: number of default actions
2177  * @prof: stores the returned flow profile added
2178  *
2179  * Assumption: the caller has acquired the lock to the profile list
2180  */
2181 static enum ice_status
2182 ice_flow_add_prof_sync(struct ice_hw *hw, enum ice_block blk,
2183                        enum ice_flow_dir dir, u64 prof_id,
2184                        struct ice_flow_seg_info *segs, u8 segs_cnt,
2185                        struct ice_flow_action *acts, u8 acts_cnt,
2186                        struct ice_flow_prof **prof)
2187 {
2188         struct ice_flow_prof_params *params;
2189         enum ice_status status;
2190         u8 i;
2191
2192         if (!prof || (acts_cnt && !acts))
2193                 return ICE_ERR_BAD_PTR;
2194
2195         params = (struct ice_flow_prof_params *)ice_malloc(hw, sizeof(*params));
2196         if (!params)
2197                 return ICE_ERR_NO_MEMORY;
2198
2199         params->prof = (struct ice_flow_prof *)
2200                 ice_malloc(hw, sizeof(*params->prof));
2201         if (!params->prof) {
2202                 status = ICE_ERR_NO_MEMORY;
2203                 goto free_params;
2204         }
2205
2206         /* initialize extraction sequence to all invalid (0xff) */
2207         for (i = 0; i < ICE_MAX_FV_WORDS; i++) {
2208                 params->es[i].prot_id = ICE_PROT_INVALID;
2209                 params->es[i].off = ICE_FV_OFFSET_INVAL;
2210         }
2211
2212         params->blk = blk;
2213         params->prof->id = prof_id;
2214         params->prof->dir = dir;
2215         params->prof->segs_cnt = segs_cnt;
2216
2217         /* Make a copy of the segments that need to be persistent in the flow
2218          * profile instance
2219          */
2220         for (i = 0; i < segs_cnt; i++)
2221                 ice_memcpy(&params->prof->segs[i], &segs[i], sizeof(*segs),
2222                            ICE_NONDMA_TO_NONDMA);
2223
2224         /* Make a copy of the actions that need to be persistent in the flow
2225          * profile instance.
2226          */
2227         if (acts_cnt) {
2228                 params->prof->acts = (struct ice_flow_action *)
2229                         ice_memdup(hw, acts, acts_cnt * sizeof(*acts),
2230                                    ICE_NONDMA_TO_NONDMA);
2231
2232                 if (!params->prof->acts) {
2233                         status = ICE_ERR_NO_MEMORY;
2234                         goto out;
2235                 }
2236         }
2237
2238         status = ice_flow_proc_segs(hw, params);
2239         if (status) {
2240                 ice_debug(hw, ICE_DBG_FLOW, "Error processing a flow's packet segments\n");
2241                 goto out;
2242         }
2243
2244         /* Add a HW profile for this flow profile */
2245         status = ice_add_prof(hw, blk, prof_id, (u8 *)params->ptypes,
2246                               params->attr, params->attr_cnt, params->es,
2247                               params->mask, true);
2248         if (status) {
2249                 ice_debug(hw, ICE_DBG_FLOW, "Error adding a HW flow profile\n");
2250                 goto out;
2251         }
2252
2253         INIT_LIST_HEAD(&params->prof->entries);
2254         ice_init_lock(&params->prof->entries_lock);
2255         *prof = params->prof;
2256
2257 out:
2258         if (status) {
2259                 if (params->prof->acts)
2260                         ice_free(hw, params->prof->acts);
2261                 ice_free(hw, params->prof);
2262         }
2263 free_params:
2264         ice_free(hw, params);
2265
2266         return status;
2267 }
2268
2269 /**
2270  * ice_flow_rem_prof_sync - remove a flow profile
2271  * @hw: pointer to the hardware structure
2272  * @blk: classification stage
2273  * @prof: pointer to flow profile to remove
2274  *
2275  * Assumption: the caller has acquired the lock to the profile list
2276  */
2277 static enum ice_status
2278 ice_flow_rem_prof_sync(struct ice_hw *hw, enum ice_block blk,
2279                        struct ice_flow_prof *prof)
2280 {
2281         enum ice_status status;
2282
2283         /* Remove all remaining flow entries before removing the flow profile */
2284         if (!LIST_EMPTY(&prof->entries)) {
2285                 struct ice_flow_entry *e, *t;
2286
2287                 ice_acquire_lock(&prof->entries_lock);
2288
2289                 LIST_FOR_EACH_ENTRY_SAFE(e, t, &prof->entries, ice_flow_entry,
2290                                          l_entry) {
2291                         status = ice_flow_rem_entry_sync(hw, blk, e);
2292                         if (status)
2293                                 break;
2294                 }
2295
2296                 ice_release_lock(&prof->entries_lock);
2297         }
2298
2299         if (blk == ICE_BLK_ACL) {
2300                 struct ice_aqc_acl_profile_ranges query_rng_buf;
2301                 struct ice_aqc_acl_prof_generic_frmt buf;
2302                 u8 prof_id = 0;
2303
2304                 /* Disassociate the scenario from the profile for the PF */
2305                 status = ice_flow_acl_disassoc_scen(hw, prof);
2306                 if (status)
2307                         return status;
2308
2309                 /* Clear the range-checker if the profile ID is no longer
2310                  * used by any PF
2311                  */
2312                 status = ice_flow_acl_is_prof_in_use(hw, prof, &buf);
2313                 if (status && status != ICE_ERR_IN_USE) {
2314                         return status;
2315                 } else if (!status) {
2316                         /* Clear the range-checker value for profile ID */
2317                         ice_memset(&query_rng_buf, 0,
2318                                    sizeof(struct ice_aqc_acl_profile_ranges),
2319                                    ICE_NONDMA_MEM);
2320
2321                         status = ice_flow_get_hw_prof(hw, blk, prof->id,
2322                                                       &prof_id);
2323                         if (status)
2324                                 return status;
2325
2326                         status = ice_prog_acl_prof_ranges(hw, prof_id,
2327                                                           &query_rng_buf, NULL);
2328                         if (status)
2329                                 return status;
2330                 }
2331         }
2332
2333         /* Remove all hardware profiles associated with this flow profile */
2334         status = ice_rem_prof(hw, blk, prof->id);
2335         if (!status) {
2336                 LIST_DEL(&prof->l_entry);
2337                 ice_destroy_lock(&prof->entries_lock);
2338                 if (prof->acts)
2339                         ice_free(hw, prof->acts);
2340                 ice_free(hw, prof);
2341         }
2342
2343         return status;
2344 }
2345
2346 /**
2347  * ice_flow_acl_set_xtrct_seq_fld - Populate xtrct seq for single field
2348  * @buf: Destination buffer function writes partial xtrct sequence to
2349  * @info: Info about field
2350  */
2351 static void
2352 ice_flow_acl_set_xtrct_seq_fld(struct ice_aqc_acl_prof_generic_frmt *buf,
2353                                struct ice_flow_fld_info *info)
2354 {
2355         u16 dst, i;
2356         u8 src;
2357
2358         src = info->xtrct.idx * ICE_FLOW_FV_EXTRACT_SZ +
2359                 info->xtrct.disp / BITS_PER_BYTE;
2360         dst = info->entry.val;
2361         for (i = 0; i < info->entry.last; i++)
2362                 /* HW stores field vector words in LE, convert words back to BE
2363                  * so constructed entries will end up in network order
2364                  */
2365                 buf->byte_selection[dst++] = src++ ^ 1;
2366 }
2367
2368 /**
2369  * ice_flow_acl_set_xtrct_seq - Program ACL extraction sequence
2370  * @hw: pointer to the hardware structure
2371  * @prof: pointer to flow profile
2372  */
2373 static enum ice_status
2374 ice_flow_acl_set_xtrct_seq(struct ice_hw *hw, struct ice_flow_prof *prof)
2375 {
2376         struct ice_aqc_acl_prof_generic_frmt buf;
2377         struct ice_flow_fld_info *info;
2378         enum ice_status status;
2379         u8 prof_id = 0;
2380         u16 i;
2381
2382         ice_memset(&buf, 0, sizeof(buf), ICE_NONDMA_MEM);
2383
2384         status = ice_flow_get_hw_prof(hw, ICE_BLK_ACL, prof->id, &prof_id);
2385         if (status)
2386                 return status;
2387
2388         status = ice_flow_acl_is_prof_in_use(hw, prof, &buf);
2389         if (status && status != ICE_ERR_IN_USE)
2390                 return status;
2391
2392         if (!status) {
2393                 /* Program the profile dependent configuration. This is done
2394                  * only once regardless of the number of PFs using that profile
2395                  */
2396                 ice_memset(&buf, 0, sizeof(buf), ICE_NONDMA_MEM);
2397
2398                 for (i = 0; i < prof->segs_cnt; i++) {
2399                         struct ice_flow_seg_info *seg = &prof->segs[i];
2400                         u16 j;
2401
2402                         ice_for_each_set_bit(j, (ice_bitmap_t *)&seg->match,
2403                                              ICE_FLOW_FIELD_IDX_MAX) {
2404                                 info = &seg->fields[j];
2405
2406                                 if (info->type == ICE_FLOW_FLD_TYPE_RANGE)
2407                                         buf.word_selection[info->entry.val] =
2408                                                 info->xtrct.idx;
2409                                 else
2410                                         ice_flow_acl_set_xtrct_seq_fld(&buf,
2411                                                                        info);
2412                         }
2413
2414                         for (j = 0; j < seg->raws_cnt; j++) {
2415                                 info = &seg->raws[j].info;
2416                                 ice_flow_acl_set_xtrct_seq_fld(&buf, info);
2417                         }
2418                 }
2419
2420                 ice_memset(&buf.pf_scenario_num[0], ICE_ACL_INVALID_SCEN,
2421                            ICE_AQC_ACL_PROF_PF_SCEN_NUM_ELEMS,
2422                            ICE_NONDMA_MEM);
2423         }
2424
2425         /* Update the current PF */
2426         buf.pf_scenario_num[hw->pf_id] = (u8)prof->cfg.scen->id;
2427         status = ice_prgm_acl_prof_xtrct(hw, prof_id, &buf, NULL);
2428
2429         return status;
2430 }
2431
2432 /**
2433  * ice_flow_assoc_vsig_vsi - associate a VSI with VSIG
2434  * @hw: pointer to the hardware structure
2435  * @blk: classification stage
2436  * @vsi_handle: software VSI handle
2437  * @vsig: target VSI group
2438  *
2439  * Assumption: the caller has already verified that the VSI to
2440  * be added has the same characteristics as the VSIG and will
2441  * thereby have access to all resources added to that VSIG.
2442  */
2443 enum ice_status
2444 ice_flow_assoc_vsig_vsi(struct ice_hw *hw, enum ice_block blk, u16 vsi_handle,
2445                         u16 vsig)
2446 {
2447         enum ice_status status;
2448
2449         if (!ice_is_vsi_valid(hw, vsi_handle) || blk >= ICE_BLK_COUNT)
2450                 return ICE_ERR_PARAM;
2451
2452         ice_acquire_lock(&hw->fl_profs_locks[blk]);
2453         status = ice_add_vsi_flow(hw, blk, ice_get_hw_vsi_num(hw, vsi_handle),
2454                                   vsig);
2455         ice_release_lock(&hw->fl_profs_locks[blk]);
2456
2457         return status;
2458 }
2459
2460 /**
2461  * ice_flow_assoc_prof - associate a VSI with a flow profile
2462  * @hw: pointer to the hardware structure
2463  * @blk: classification stage
2464  * @prof: pointer to flow profile
2465  * @vsi_handle: software VSI handle
2466  *
2467  * Assumption: the caller has acquired the lock to the profile list
2468  * and the software VSI handle has been validated
2469  */
2470 enum ice_status
2471 ice_flow_assoc_prof(struct ice_hw *hw, enum ice_block blk,
2472                     struct ice_flow_prof *prof, u16 vsi_handle)
2473 {
2474         enum ice_status status = ICE_SUCCESS;
2475
2476         if (!ice_is_bit_set(prof->vsis, vsi_handle)) {
2477                 if (blk == ICE_BLK_ACL) {
2478                         status = ice_flow_acl_set_xtrct_seq(hw, prof);
2479                         if (status)
2480                                 return status;
2481                 }
2482                 status = ice_add_prof_id_flow(hw, blk,
2483                                               ice_get_hw_vsi_num(hw,
2484                                                                  vsi_handle),
2485                                               prof->id);
2486                 if (!status)
2487                         ice_set_bit(vsi_handle, prof->vsis);
2488                 else
2489                         ice_debug(hw, ICE_DBG_FLOW, "HW profile add failed, %d\n",
2490                                   status);
2491         }
2492
2493         return status;
2494 }
2495
2496 /**
2497  * ice_flow_disassoc_prof - disassociate a VSI from a flow profile
2498  * @hw: pointer to the hardware structure
2499  * @blk: classification stage
2500  * @prof: pointer to flow profile
2501  * @vsi_handle: software VSI handle
2502  *
2503  * Assumption: the caller has acquired the lock to the profile list
2504  * and the software VSI handle has been validated
2505  */
2506 static enum ice_status
2507 ice_flow_disassoc_prof(struct ice_hw *hw, enum ice_block blk,
2508                        struct ice_flow_prof *prof, u16 vsi_handle)
2509 {
2510         enum ice_status status = ICE_SUCCESS;
2511
2512         if (ice_is_bit_set(prof->vsis, vsi_handle)) {
2513                 status = ice_rem_prof_id_flow(hw, blk,
2514                                               ice_get_hw_vsi_num(hw,
2515                                                                  vsi_handle),
2516                                               prof->id);
2517                 if (!status)
2518                         ice_clear_bit(vsi_handle, prof->vsis);
2519                 else
2520                         ice_debug(hw, ICE_DBG_FLOW, "HW profile remove failed, %d\n",
2521                                   status);
2522         }
2523
2524         return status;
2525 }
2526
2527 #define FLAG_GTP_EH_PDU_LINK    BIT_ULL(13)
2528 #define FLAG_GTP_EH_PDU         BIT_ULL(14)
2529
2530 #define FLAG_GTPU_MSK   \
2531         (FLAG_GTP_EH_PDU | FLAG_GTP_EH_PDU_LINK)
2532 #define FLAG_GTPU_UP    \
2533         (FLAG_GTP_EH_PDU | FLAG_GTP_EH_PDU_LINK)
2534 #define FLAG_GTPU_DW    \
2535         (FLAG_GTP_EH_PDU)
2536 /**
2537  * ice_flow_set_hw_prof - Set HW flow profile based on the parsed profile info
2538  * @hw: pointer to the HW struct
2539  * @dest_vsi_handle: dest VSI handle
2540  * @fdir_vsi_handle: fdir programming VSI handle
2541  * @prof: stores parsed profile info from raw flow
2542  * @blk: classification stage
2543  */
2544 enum ice_status
2545 ice_flow_set_hw_prof(struct ice_hw *hw, u16 dest_vsi_handle,
2546                      u16 fdir_vsi_handle, struct ice_parser_profile *prof,
2547                      enum ice_block blk)
2548 {
2549         int id = ice_find_first_bit(prof->ptypes, UINT16_MAX);
2550         struct ice_flow_prof_params *params;
2551         u8 fv_words = hw->blk[blk].es.fvw;
2552         enum ice_status status;
2553         u16 vsi_num;
2554         int i, idx;
2555
2556         params = (struct ice_flow_prof_params *)ice_malloc(hw, sizeof(*params));
2557         if (!params)
2558                 return ICE_ERR_NO_MEMORY;
2559
2560         for (i = 0; i < ICE_MAX_FV_WORDS; i++) {
2561                 params->es[i].prot_id = ICE_PROT_INVALID;
2562                 params->es[i].off = ICE_FV_OFFSET_INVAL;
2563         }
2564
2565         for (i = 0; i < prof->fv_num; i++) {
2566                 if (hw->blk[blk].es.reverse)
2567                         idx = fv_words - i - 1;
2568                 else
2569                         idx = i;
2570                 params->es[idx].prot_id = prof->fv[i].proto_id;
2571                 params->es[idx].off = prof->fv[i].offset;
2572                 params->mask[idx] = CPU_TO_BE16(prof->fv[i].msk);
2573         }
2574
2575         switch (prof->flags) {
2576         case FLAG_GTPU_DW:
2577                 params->attr = ice_attr_gtpu_down;
2578                 params->attr_cnt = ARRAY_SIZE(ice_attr_gtpu_down);
2579                 break;
2580         case FLAG_GTPU_UP:
2581                 params->attr = ice_attr_gtpu_up;
2582                 params->attr_cnt = ARRAY_SIZE(ice_attr_gtpu_up);
2583                 break;
2584         default:
2585                 if (prof->flags_msk & FLAG_GTPU_MSK) {
2586                         params->attr = ice_attr_gtpu_session;
2587                         params->attr_cnt = ARRAY_SIZE(ice_attr_gtpu_session);
2588                 }
2589                 break;
2590         }
2591
2592         status = ice_add_prof(hw, blk, id, (u8 *)prof->ptypes,
2593                               params->attr, params->attr_cnt,
2594                               params->es, params->mask, false);
2595         if (status)
2596                 goto free_params;
2597
2598         status = ice_flow_assoc_hw_prof(hw, blk, dest_vsi_handle,
2599                                         fdir_vsi_handle, id);
2600         if (status)
2601                 goto free_params;
2602
2603         return ICE_SUCCESS;
2604
2605 free_params:
2606         ice_free(hw, params);
2607
2608         return status;
2609 }
2610
2611 /**
2612  * ice_flow_add_prof - Add a flow profile for packet segments and matched fields
2613  * @hw: pointer to the HW struct
2614  * @blk: classification stage
2615  * @dir: flow direction
2616  * @prof_id: unique ID to identify this flow profile
2617  * @segs: array of one or more packet segments that describe the flow
2618  * @segs_cnt: number of packet segments provided
2619  * @acts: array of default actions
2620  * @acts_cnt: number of default actions
2621  * @prof: stores the returned flow profile added
2622  */
2623 enum ice_status
2624 ice_flow_add_prof(struct ice_hw *hw, enum ice_block blk, enum ice_flow_dir dir,
2625                   u64 prof_id, struct ice_flow_seg_info *segs, u8 segs_cnt,
2626                   struct ice_flow_action *acts, u8 acts_cnt,
2627                   struct ice_flow_prof **prof)
2628 {
2629         enum ice_status status;
2630
2631         if (segs_cnt > ICE_FLOW_SEG_MAX)
2632                 return ICE_ERR_MAX_LIMIT;
2633
2634         if (!segs_cnt)
2635                 return ICE_ERR_PARAM;
2636
2637         if (!segs)
2638                 return ICE_ERR_BAD_PTR;
2639
2640         status = ice_flow_val_hdrs(segs, segs_cnt);
2641         if (status)
2642                 return status;
2643
2644         ice_acquire_lock(&hw->fl_profs_locks[blk]);
2645
2646         status = ice_flow_add_prof_sync(hw, blk, dir, prof_id, segs, segs_cnt,
2647                                         acts, acts_cnt, prof);
2648         if (!status)
2649                 LIST_ADD(&(*prof)->l_entry, &hw->fl_profs[blk]);
2650
2651         ice_release_lock(&hw->fl_profs_locks[blk]);
2652
2653         return status;
2654 }
2655
2656 /**
2657  * ice_flow_rem_prof - Remove a flow profile and all entries associated with it
2658  * @hw: pointer to the HW struct
2659  * @blk: the block for which the flow profile is to be removed
2660  * @prof_id: unique ID of the flow profile to be removed
2661  */
2662 enum ice_status
2663 ice_flow_rem_prof(struct ice_hw *hw, enum ice_block blk, u64 prof_id)
2664 {
2665         struct ice_flow_prof *prof;
2666         enum ice_status status;
2667
2668         ice_acquire_lock(&hw->fl_profs_locks[blk]);
2669
2670         prof = ice_flow_find_prof_id(hw, blk, prof_id);
2671         if (!prof) {
2672                 status = ICE_ERR_DOES_NOT_EXIST;
2673                 goto out;
2674         }
2675
2676         /* prof becomes invalid after the call */
2677         status = ice_flow_rem_prof_sync(hw, blk, prof);
2678
2679 out:
2680         ice_release_lock(&hw->fl_profs_locks[blk]);
2681
2682         return status;
2683 }
2684
2685 /**
2686  * ice_flow_find_entry - look for a flow entry using its unique ID
2687  * @hw: pointer to the HW struct
2688  * @blk: classification stage
2689  * @entry_id: unique ID to identify this flow entry
2690  *
2691  * This function looks for the flow entry with the specified unique ID in all
2692  * flow profiles of the specified classification stage. If the entry is found,
2693  * and it returns the handle to the flow entry. Otherwise, it returns
2694  * ICE_FLOW_ENTRY_ID_INVAL.
2695  */
2696 u64 ice_flow_find_entry(struct ice_hw *hw, enum ice_block blk, u64 entry_id)
2697 {
2698         struct ice_flow_entry *found = NULL;
2699         struct ice_flow_prof *p;
2700
2701         ice_acquire_lock(&hw->fl_profs_locks[blk]);
2702
2703         LIST_FOR_EACH_ENTRY(p, &hw->fl_profs[blk], ice_flow_prof, l_entry) {
2704                 struct ice_flow_entry *e;
2705
2706                 ice_acquire_lock(&p->entries_lock);
2707                 LIST_FOR_EACH_ENTRY(e, &p->entries, ice_flow_entry, l_entry)
2708                         if (e->id == entry_id) {
2709                                 found = e;
2710                                 break;
2711                         }
2712                 ice_release_lock(&p->entries_lock);
2713
2714                 if (found)
2715                         break;
2716         }
2717
2718         ice_release_lock(&hw->fl_profs_locks[blk]);
2719
2720         return found ? ICE_FLOW_ENTRY_HNDL(found) : ICE_FLOW_ENTRY_HANDLE_INVAL;
2721 }
2722
2723 /**
2724  * ice_flow_acl_check_actions - Checks the ACL rule's actions
2725  * @hw: pointer to the hardware structure
2726  * @acts: array of actions to be performed on a match
2727  * @acts_cnt: number of actions
2728  * @cnt_alloc: indicates if an ACL counter has been allocated.
2729  */
2730 static enum ice_status
2731 ice_flow_acl_check_actions(struct ice_hw *hw, struct ice_flow_action *acts,
2732                            u8 acts_cnt, bool *cnt_alloc)
2733 {
2734         ice_declare_bitmap(dup_check, ICE_AQC_TBL_MAX_ACTION_PAIRS * 2);
2735         int i;
2736
2737         ice_zero_bitmap(dup_check, ICE_AQC_TBL_MAX_ACTION_PAIRS * 2);
2738         *cnt_alloc = false;
2739
2740         if (acts_cnt > ICE_FLOW_ACL_MAX_NUM_ACT)
2741                 return ICE_ERR_OUT_OF_RANGE;
2742
2743         for (i = 0; i < acts_cnt; i++) {
2744                 if (acts[i].type != ICE_FLOW_ACT_NOP &&
2745                     acts[i].type != ICE_FLOW_ACT_DROP &&
2746                     acts[i].type != ICE_FLOW_ACT_CNTR_PKT &&
2747                     acts[i].type != ICE_FLOW_ACT_FWD_QUEUE)
2748                         return ICE_ERR_CFG;
2749
2750                 /* If the caller want to add two actions of the same type, then
2751                  * it is considered invalid configuration.
2752                  */
2753                 if (ice_test_and_set_bit(acts[i].type, dup_check))
2754                         return ICE_ERR_PARAM;
2755         }
2756
2757         /* Checks if ACL counters are needed. */
2758         for (i = 0; i < acts_cnt; i++) {
2759                 if (acts[i].type == ICE_FLOW_ACT_CNTR_PKT ||
2760                     acts[i].type == ICE_FLOW_ACT_CNTR_BYTES ||
2761                     acts[i].type == ICE_FLOW_ACT_CNTR_PKT_BYTES) {
2762                         struct ice_acl_cntrs cntrs = { 0 };
2763                         enum ice_status status;
2764
2765                         cntrs.amount = 1;
2766                         cntrs.bank = 0; /* Only bank0 for the moment */
2767
2768                         if (acts[i].type == ICE_FLOW_ACT_CNTR_PKT_BYTES)
2769                                 cntrs.type = ICE_AQC_ACL_CNT_TYPE_DUAL;
2770                         else
2771                                 cntrs.type = ICE_AQC_ACL_CNT_TYPE_SINGLE;
2772
2773                         status = ice_aq_alloc_acl_cntrs(hw, &cntrs, NULL);
2774                         if (status)
2775                                 return status;
2776                         /* Counter index within the bank */
2777                         acts[i].data.acl_act.value =
2778                                                 CPU_TO_LE16(cntrs.first_cntr);
2779                         *cnt_alloc = true;
2780                 }
2781         }
2782
2783         return ICE_SUCCESS;
2784 }
2785
2786 /**
2787  * ice_flow_acl_frmt_entry_range - Format an ACL range checker for a given field
2788  * @fld: number of the given field
2789  * @info: info about field
2790  * @range_buf: range checker configuration buffer
2791  * @data: pointer to a data buffer containing flow entry's match values/masks
2792  * @range: Input/output param indicating which range checkers are being used
2793  */
2794 static void
2795 ice_flow_acl_frmt_entry_range(u16 fld, struct ice_flow_fld_info *info,
2796                               struct ice_aqc_acl_profile_ranges *range_buf,
2797                               u8 *data, u8 *range)
2798 {
2799         u16 new_mask;
2800
2801         /* If not specified, default mask is all bits in field */
2802         new_mask = (info->src.mask == ICE_FLOW_FLD_OFF_INVAL ?
2803                     BIT(ice_flds_info[fld].size) - 1 :
2804                     (*(u16 *)(data + info->src.mask))) << info->xtrct.disp;
2805
2806         /* If the mask is 0, then we don't need to worry about this input
2807          * range checker value.
2808          */
2809         if (new_mask) {
2810                 u16 new_high =
2811                         (*(u16 *)(data + info->src.last)) << info->xtrct.disp;
2812                 u16 new_low =
2813                         (*(u16 *)(data + info->src.val)) << info->xtrct.disp;
2814                 u8 range_idx = info->entry.val;
2815
2816                 range_buf->checker_cfg[range_idx].low_boundary =
2817                         CPU_TO_BE16(new_low);
2818                 range_buf->checker_cfg[range_idx].high_boundary =
2819                         CPU_TO_BE16(new_high);
2820                 range_buf->checker_cfg[range_idx].mask = CPU_TO_BE16(new_mask);
2821
2822                 /* Indicate which range checker is being used */
2823                 *range |= BIT(range_idx);
2824         }
2825 }
2826
2827 /**
2828  * ice_flow_acl_frmt_entry_fld - Partially format ACL entry for a given field
2829  * @fld: number of the given field
2830  * @info: info about the field
2831  * @buf: buffer containing the entry
2832  * @dontcare: buffer containing don't care mask for entry
2833  * @data: pointer to a data buffer containing flow entry's match values/masks
2834  */
2835 static void
2836 ice_flow_acl_frmt_entry_fld(u16 fld, struct ice_flow_fld_info *info, u8 *buf,
2837                             u8 *dontcare, u8 *data)
2838 {
2839         u16 dst, src, mask, k, end_disp, tmp_s = 0, tmp_m = 0;
2840         bool use_mask = false;
2841         u8 disp;
2842
2843         src = info->src.val;
2844         mask = info->src.mask;
2845         dst = info->entry.val - ICE_AQC_ACL_PROF_BYTE_SEL_START_IDX;
2846         disp = info->xtrct.disp % BITS_PER_BYTE;
2847
2848         if (mask != ICE_FLOW_FLD_OFF_INVAL)
2849                 use_mask = true;
2850
2851         for (k = 0; k < info->entry.last; k++, dst++) {
2852                 /* Add overflow bits from previous byte */
2853                 buf[dst] = (tmp_s & 0xff00) >> 8;
2854
2855                 /* If mask is not valid, tmp_m is always zero, so just setting
2856                  * dontcare to 0 (no masked bits). If mask is valid, pulls in
2857                  * overflow bits of mask from prev byte
2858                  */
2859                 dontcare[dst] = (tmp_m & 0xff00) >> 8;
2860
2861                 /* If there is displacement, last byte will only contain
2862                  * displaced data, but there is no more data to read from user
2863                  * buffer, so skip so as not to potentially read beyond end of
2864                  * user buffer
2865                  */
2866                 if (!disp || k < info->entry.last - 1) {
2867                         /* Store shifted data to use in next byte */
2868                         tmp_s = data[src++] << disp;
2869
2870                         /* Add current (shifted) byte */
2871                         buf[dst] |= tmp_s & 0xff;
2872
2873                         /* Handle mask if valid */
2874                         if (use_mask) {
2875                                 tmp_m = (~data[mask++] & 0xff) << disp;
2876                                 dontcare[dst] |= tmp_m & 0xff;
2877                         }
2878                 }
2879         }
2880
2881         /* Fill in don't care bits at beginning of field */
2882         if (disp) {
2883                 dst = info->entry.val - ICE_AQC_ACL_PROF_BYTE_SEL_START_IDX;
2884                 for (k = 0; k < disp; k++)
2885                         dontcare[dst] |= BIT(k);
2886         }
2887
2888         end_disp = (disp + ice_flds_info[fld].size) % BITS_PER_BYTE;
2889
2890         /* Fill in don't care bits at end of field */
2891         if (end_disp) {
2892                 dst = info->entry.val - ICE_AQC_ACL_PROF_BYTE_SEL_START_IDX +
2893                       info->entry.last - 1;
2894                 for (k = end_disp; k < BITS_PER_BYTE; k++)
2895                         dontcare[dst] |= BIT(k);
2896         }
2897 }
2898
2899 /**
2900  * ice_flow_acl_frmt_entry - Format ACL entry
2901  * @hw: pointer to the hardware structure
2902  * @prof: pointer to flow profile
2903  * @e: pointer to the flow entry
2904  * @data: pointer to a data buffer containing flow entry's match values/masks
2905  * @acts: array of actions to be performed on a match
2906  * @acts_cnt: number of actions
2907  *
2908  * Formats the key (and key_inverse) to be matched from the data passed in,
2909  * along with data from the flow profile. This key/key_inverse pair makes up
2910  * the 'entry' for an ACL flow entry.
2911  */
2912 static enum ice_status
2913 ice_flow_acl_frmt_entry(struct ice_hw *hw, struct ice_flow_prof *prof,
2914                         struct ice_flow_entry *e, u8 *data,
2915                         struct ice_flow_action *acts, u8 acts_cnt)
2916 {
2917         u8 *buf = NULL, *dontcare = NULL, *key = NULL, range = 0, dir_flag_msk;
2918         struct ice_aqc_acl_profile_ranges *range_buf = NULL;
2919         enum ice_status status;
2920         bool cnt_alloc;
2921         u8 prof_id = 0;
2922         u16 i, buf_sz;
2923
2924         status = ice_flow_get_hw_prof(hw, ICE_BLK_ACL, prof->id, &prof_id);
2925         if (status)
2926                 return status;
2927
2928         /* Format the result action */
2929
2930         status = ice_flow_acl_check_actions(hw, acts, acts_cnt, &cnt_alloc);
2931         if (status)
2932                 return status;
2933
2934         status = ICE_ERR_NO_MEMORY;
2935
2936         e->acts = (struct ice_flow_action *)
2937                 ice_memdup(hw, acts, acts_cnt * sizeof(*acts),
2938                            ICE_NONDMA_TO_NONDMA);
2939         if (!e->acts)
2940                 goto out;
2941
2942         e->acts_cnt = acts_cnt;
2943
2944         /* Format the matching data */
2945         buf_sz = prof->cfg.scen->width;
2946         buf = (u8 *)ice_malloc(hw, buf_sz);
2947         if (!buf)
2948                 goto out;
2949
2950         dontcare = (u8 *)ice_malloc(hw, buf_sz);
2951         if (!dontcare)
2952                 goto out;
2953
2954         /* 'key' buffer will store both key and key_inverse, so must be twice
2955          * size of buf
2956          */
2957         key = (u8 *)ice_malloc(hw, buf_sz * 2);
2958         if (!key)
2959                 goto out;
2960
2961         range_buf = (struct ice_aqc_acl_profile_ranges *)
2962                 ice_malloc(hw, sizeof(struct ice_aqc_acl_profile_ranges));
2963         if (!range_buf)
2964                 goto out;
2965
2966         /* Set don't care mask to all 1's to start, will zero out used bytes */
2967         ice_memset(dontcare, 0xff, buf_sz, ICE_NONDMA_MEM);
2968
2969         for (i = 0; i < prof->segs_cnt; i++) {
2970                 struct ice_flow_seg_info *seg = &prof->segs[i];
2971                 u8 j;
2972
2973                 ice_for_each_set_bit(j, (ice_bitmap_t *)&seg->match,
2974                                      ICE_FLOW_FIELD_IDX_MAX) {
2975                         struct ice_flow_fld_info *info = &seg->fields[j];
2976
2977                         if (info->type == ICE_FLOW_FLD_TYPE_RANGE)
2978                                 ice_flow_acl_frmt_entry_range(j, info,
2979                                                               range_buf, data,
2980                                                               &range);
2981                         else
2982                                 ice_flow_acl_frmt_entry_fld(j, info, buf,
2983                                                             dontcare, data);
2984                 }
2985
2986                 for (j = 0; j < seg->raws_cnt; j++) {
2987                         struct ice_flow_fld_info *info = &seg->raws[j].info;
2988                         u16 dst, src, mask, k;
2989                         bool use_mask = false;
2990
2991                         src = info->src.val;
2992                         dst = info->entry.val -
2993                                         ICE_AQC_ACL_PROF_BYTE_SEL_START_IDX;
2994                         mask = info->src.mask;
2995
2996                         if (mask != ICE_FLOW_FLD_OFF_INVAL)
2997                                 use_mask = true;
2998
2999                         for (k = 0; k < info->entry.last; k++, dst++) {
3000                                 buf[dst] = data[src++];
3001                                 if (use_mask)
3002                                         dontcare[dst] = ~data[mask++];
3003                                 else
3004                                         dontcare[dst] = 0;
3005                         }
3006                 }
3007         }
3008
3009         buf[prof->cfg.scen->pid_idx] = (u8)prof_id;
3010         dontcare[prof->cfg.scen->pid_idx] = 0;
3011
3012         /* Format the buffer for direction flags */
3013         dir_flag_msk = BIT(ICE_FLG_PKT_DIR);
3014
3015         if (prof->dir == ICE_FLOW_RX)
3016                 buf[prof->cfg.scen->pkt_dir_idx] = dir_flag_msk;
3017
3018         if (range) {
3019                 buf[prof->cfg.scen->rng_chk_idx] = range;
3020                 /* Mark any unused range checkers as don't care */
3021                 dontcare[prof->cfg.scen->rng_chk_idx] = ~range;
3022                 e->range_buf = range_buf;
3023         } else {
3024                 ice_free(hw, range_buf);
3025         }
3026
3027         status = ice_set_key(key, buf_sz * 2, buf, NULL, dontcare, NULL, 0,
3028                              buf_sz);
3029         if (status)
3030                 goto out;
3031
3032         e->entry = key;
3033         e->entry_sz = buf_sz * 2;
3034
3035 out:
3036         if (buf)
3037                 ice_free(hw, buf);
3038
3039         if (dontcare)
3040                 ice_free(hw, dontcare);
3041
3042         if (status && key)
3043                 ice_free(hw, key);
3044
3045         if (status && range_buf) {
3046                 ice_free(hw, range_buf);
3047                 e->range_buf = NULL;
3048         }
3049
3050         if (status && e->acts) {
3051                 ice_free(hw, e->acts);
3052                 e->acts = NULL;
3053                 e->acts_cnt = 0;
3054         }
3055
3056         if (status && cnt_alloc)
3057                 ice_flow_acl_free_act_cntr(hw, acts, acts_cnt);
3058
3059         return status;
3060 }
3061
3062 /**
3063  * ice_flow_acl_find_scen_entry_cond - Find an ACL scenario entry that matches
3064  *                                     the compared data.
3065  * @prof: pointer to flow profile
3066  * @e: pointer to the comparing flow entry
3067  * @do_chg_action: decide if we want to change the ACL action
3068  * @do_add_entry: decide if we want to add the new ACL entry
3069  * @do_rem_entry: decide if we want to remove the current ACL entry
3070  *
3071  * Find an ACL scenario entry that matches the compared data. In the same time,
3072  * this function also figure out:
3073  * a/ If we want to change the ACL action
3074  * b/ If we want to add the new ACL entry
3075  * c/ If we want to remove the current ACL entry
3076  */
3077 static struct ice_flow_entry *
3078 ice_flow_acl_find_scen_entry_cond(struct ice_flow_prof *prof,
3079                                   struct ice_flow_entry *e, bool *do_chg_action,
3080                                   bool *do_add_entry, bool *do_rem_entry)
3081 {
3082         struct ice_flow_entry *p, *return_entry = NULL;
3083         u8 i, j;
3084
3085         /* Check if:
3086          * a/ There exists an entry with same matching data, but different
3087          *    priority, then we remove this existing ACL entry. Then, we
3088          *    will add the new entry to the ACL scenario.
3089          * b/ There exists an entry with same matching data, priority, and
3090          *    result action, then we do nothing
3091          * c/ There exists an entry with same matching data, priority, but
3092          *    different, action, then do only change the action's entry.
3093          * d/ Else, we add this new entry to the ACL scenario.
3094          */
3095         *do_chg_action = false;
3096         *do_add_entry = true;
3097         *do_rem_entry = false;
3098         LIST_FOR_EACH_ENTRY(p, &prof->entries, ice_flow_entry, l_entry) {
3099                 if (memcmp(p->entry, e->entry, p->entry_sz))
3100                         continue;
3101
3102                 /* From this point, we have the same matching_data. */
3103                 *do_add_entry = false;
3104                 return_entry = p;
3105
3106                 if (p->priority != e->priority) {
3107                         /* matching data && !priority */
3108                         *do_add_entry = true;
3109                         *do_rem_entry = true;
3110                         break;
3111                 }
3112
3113                 /* From this point, we will have matching_data && priority */
3114                 if (p->acts_cnt != e->acts_cnt)
3115                         *do_chg_action = true;
3116                 for (i = 0; i < p->acts_cnt; i++) {
3117                         bool found_not_match = false;
3118
3119                         for (j = 0; j < e->acts_cnt; j++)
3120                                 if (memcmp(&p->acts[i], &e->acts[j],
3121                                            sizeof(struct ice_flow_action))) {
3122                                         found_not_match = true;
3123                                         break;
3124                                 }
3125
3126                         if (found_not_match) {
3127                                 *do_chg_action = true;
3128                                 break;
3129                         }
3130                 }
3131
3132                 /* (do_chg_action = true) means :
3133                  *    matching_data && priority && !result_action
3134                  * (do_chg_action = false) means :
3135                  *    matching_data && priority && result_action
3136                  */
3137                 break;
3138         }
3139
3140         return return_entry;
3141 }
3142
3143 /**
3144  * ice_flow_acl_convert_to_acl_prio - Convert to ACL priority
3145  * @p: flow priority
3146  */
3147 static enum ice_acl_entry_prio
3148 ice_flow_acl_convert_to_acl_prio(enum ice_flow_priority p)
3149 {
3150         enum ice_acl_entry_prio acl_prio;
3151
3152         switch (p) {
3153         case ICE_FLOW_PRIO_LOW:
3154                 acl_prio = ICE_ACL_PRIO_LOW;
3155                 break;
3156         case ICE_FLOW_PRIO_NORMAL:
3157                 acl_prio = ICE_ACL_PRIO_NORMAL;
3158                 break;
3159         case ICE_FLOW_PRIO_HIGH:
3160                 acl_prio = ICE_ACL_PRIO_HIGH;
3161                 break;
3162         default:
3163                 acl_prio = ICE_ACL_PRIO_NORMAL;
3164                 break;
3165         }
3166
3167         return acl_prio;
3168 }
3169
3170 /**
3171  * ice_flow_acl_union_rng_chk - Perform union operation between two
3172  *                              range-range checker buffers
3173  * @dst_buf: pointer to destination range checker buffer
3174  * @src_buf: pointer to source range checker buffer
3175  *
3176  * For this function, we do the union between dst_buf and src_buf
3177  * range checker buffer, and we will save the result back to dst_buf
3178  */
3179 static enum ice_status
3180 ice_flow_acl_union_rng_chk(struct ice_aqc_acl_profile_ranges *dst_buf,
3181                            struct ice_aqc_acl_profile_ranges *src_buf)
3182 {
3183         u8 i, j;
3184
3185         if (!dst_buf || !src_buf)
3186                 return ICE_ERR_BAD_PTR;
3187
3188         for (i = 0; i < ICE_AQC_ACL_PROF_RANGES_NUM_CFG; i++) {
3189                 struct ice_acl_rng_data *cfg_data = NULL, *in_data;
3190                 bool will_populate = false;
3191
3192                 in_data = &src_buf->checker_cfg[i];
3193
3194                 if (!in_data->mask)
3195                         break;
3196
3197                 for (j = 0; j < ICE_AQC_ACL_PROF_RANGES_NUM_CFG; j++) {
3198                         cfg_data = &dst_buf->checker_cfg[j];
3199
3200                         if (!cfg_data->mask ||
3201                             !memcmp(cfg_data, in_data,
3202                                     sizeof(struct ice_acl_rng_data))) {
3203                                 will_populate = true;
3204                                 break;
3205                         }
3206                 }
3207
3208                 if (will_populate) {
3209                         ice_memcpy(cfg_data, in_data,
3210                                    sizeof(struct ice_acl_rng_data),
3211                                    ICE_NONDMA_TO_NONDMA);
3212                 } else {
3213                         /* No available slot left to program range checker */
3214                         return ICE_ERR_MAX_LIMIT;
3215                 }
3216         }
3217
3218         return ICE_SUCCESS;
3219 }
3220
3221 /**
3222  * ice_flow_acl_add_scen_entry_sync - Add entry to ACL scenario sync
3223  * @hw: pointer to the hardware structure
3224  * @prof: pointer to flow profile
3225  * @entry: double pointer to the flow entry
3226  *
3227  * For this function, we will look at the current added entries in the
3228  * corresponding ACL scenario. Then, we will perform matching logic to
3229  * see if we want to add/modify/do nothing with this new entry.
3230  */
3231 static enum ice_status
3232 ice_flow_acl_add_scen_entry_sync(struct ice_hw *hw, struct ice_flow_prof *prof,
3233                                  struct ice_flow_entry **entry)
3234 {
3235         bool do_add_entry, do_rem_entry, do_chg_action, do_chg_rng_chk;
3236         struct ice_aqc_acl_profile_ranges query_rng_buf, cfg_rng_buf;
3237         struct ice_acl_act_entry *acts = NULL;
3238         struct ice_flow_entry *exist;
3239         enum ice_status status = ICE_SUCCESS;
3240         struct ice_flow_entry *e;
3241         u8 i;
3242
3243         if (!entry || !(*entry) || !prof)
3244                 return ICE_ERR_BAD_PTR;
3245
3246         e = *entry;
3247
3248         do_chg_rng_chk = false;
3249         if (e->range_buf) {
3250                 u8 prof_id = 0;
3251
3252                 status = ice_flow_get_hw_prof(hw, ICE_BLK_ACL, prof->id,
3253                                               &prof_id);
3254                 if (status)
3255                         return status;
3256
3257                 /* Query the current range-checker value in FW */
3258                 status = ice_query_acl_prof_ranges(hw, prof_id, &query_rng_buf,
3259                                                    NULL);
3260                 if (status)
3261                         return status;
3262                 ice_memcpy(&cfg_rng_buf, &query_rng_buf,
3263                            sizeof(struct ice_aqc_acl_profile_ranges),
3264                            ICE_NONDMA_TO_NONDMA);
3265
3266                 /* Generate the new range-checker value */
3267                 status = ice_flow_acl_union_rng_chk(&cfg_rng_buf, e->range_buf);
3268                 if (status)
3269                         return status;
3270
3271                 /* Reconfigure the range check if the buffer is changed. */
3272                 do_chg_rng_chk = false;
3273                 if (memcmp(&query_rng_buf, &cfg_rng_buf,
3274                            sizeof(struct ice_aqc_acl_profile_ranges))) {
3275                         status = ice_prog_acl_prof_ranges(hw, prof_id,
3276                                                           &cfg_rng_buf, NULL);
3277                         if (status)
3278                                 return status;
3279
3280                         do_chg_rng_chk = true;
3281                 }
3282         }
3283
3284         /* Figure out if we want to (change the ACL action) and/or
3285          * (Add the new ACL entry) and/or (Remove the current ACL entry)
3286          */
3287         exist = ice_flow_acl_find_scen_entry_cond(prof, e, &do_chg_action,
3288                                                   &do_add_entry, &do_rem_entry);
3289         if (do_rem_entry) {
3290                 status = ice_flow_rem_entry_sync(hw, ICE_BLK_ACL, exist);
3291                 if (status)
3292                         return status;
3293         }
3294
3295         /* Prepare the result action buffer */
3296         acts = (struct ice_acl_act_entry *)
3297                 ice_calloc(hw, e->entry_sz, sizeof(struct ice_acl_act_entry));
3298         if (!acts)
3299                 return ICE_ERR_NO_MEMORY;
3300
3301         for (i = 0; i < e->acts_cnt; i++)
3302                 ice_memcpy(&acts[i], &e->acts[i].data.acl_act,
3303                            sizeof(struct ice_acl_act_entry),
3304                            ICE_NONDMA_TO_NONDMA);
3305
3306         if (do_add_entry) {
3307                 enum ice_acl_entry_prio prio;
3308                 u8 *keys, *inverts;
3309                 u16 entry_idx;
3310
3311                 keys = (u8 *)e->entry;
3312                 inverts = keys + (e->entry_sz / 2);
3313                 prio = ice_flow_acl_convert_to_acl_prio(e->priority);
3314
3315                 status = ice_acl_add_entry(hw, prof->cfg.scen, prio, keys,
3316                                            inverts, acts, e->acts_cnt,
3317                                            &entry_idx);
3318                 if (status)
3319                         goto out;
3320
3321                 e->scen_entry_idx = entry_idx;
3322                 LIST_ADD(&e->l_entry, &prof->entries);
3323         } else {
3324                 if (do_chg_action) {
3325                         /* For the action memory info, update the SW's copy of
3326                          * exist entry with e's action memory info
3327                          */
3328                         ice_free(hw, exist->acts);
3329                         exist->acts_cnt = e->acts_cnt;
3330                         exist->acts = (struct ice_flow_action *)
3331                                 ice_calloc(hw, exist->acts_cnt,
3332                                            sizeof(struct ice_flow_action));
3333                         if (!exist->acts) {
3334                                 status = ICE_ERR_NO_MEMORY;
3335                                 goto out;
3336                         }
3337
3338                         ice_memcpy(exist->acts, e->acts,
3339                                    sizeof(struct ice_flow_action) * e->acts_cnt,
3340                                    ICE_NONDMA_TO_NONDMA);
3341
3342                         status = ice_acl_prog_act(hw, prof->cfg.scen, acts,
3343                                                   e->acts_cnt,
3344                                                   exist->scen_entry_idx);
3345                         if (status)
3346                                 goto out;
3347                 }
3348
3349                 if (do_chg_rng_chk) {
3350                         /* In this case, we want to update the range checker
3351                          * information of the exist entry
3352                          */
3353                         status = ice_flow_acl_union_rng_chk(exist->range_buf,
3354                                                             e->range_buf);
3355                         if (status)
3356                                 goto out;
3357                 }
3358
3359                 /* As we don't add the new entry to our SW DB, deallocate its
3360                  * memories, and return the exist entry to the caller
3361                  */
3362                 ice_dealloc_flow_entry(hw, e);
3363                 *(entry) = exist;
3364         }
3365 out:
3366         ice_free(hw, acts);
3367
3368         return status;
3369 }
3370
3371 /**
3372  * ice_flow_acl_add_scen_entry - Add entry to ACL scenario
3373  * @hw: pointer to the hardware structure
3374  * @prof: pointer to flow profile
3375  * @e: double pointer to the flow entry
3376  */
3377 static enum ice_status
3378 ice_flow_acl_add_scen_entry(struct ice_hw *hw, struct ice_flow_prof *prof,
3379                             struct ice_flow_entry **e)
3380 {
3381         enum ice_status status;
3382
3383         ice_acquire_lock(&prof->entries_lock);
3384         status = ice_flow_acl_add_scen_entry_sync(hw, prof, e);
3385         ice_release_lock(&prof->entries_lock);
3386
3387         return status;
3388 }
3389
3390 /**
3391  * ice_flow_add_entry - Add a flow entry
3392  * @hw: pointer to the HW struct
3393  * @blk: classification stage
3394  * @prof_id: ID of the profile to add a new flow entry to
3395  * @entry_id: unique ID to identify this flow entry
3396  * @vsi_handle: software VSI handle for the flow entry
3397  * @prio: priority of the flow entry
3398  * @data: pointer to a data buffer containing flow entry's match values/masks
3399  * @acts: arrays of actions to be performed on a match
3400  * @acts_cnt: number of actions
3401  * @entry_h: pointer to buffer that receives the new flow entry's handle
3402  */
3403 enum ice_status
3404 ice_flow_add_entry(struct ice_hw *hw, enum ice_block blk, u64 prof_id,
3405                    u64 entry_id, u16 vsi_handle, enum ice_flow_priority prio,
3406                    void *data, struct ice_flow_action *acts, u8 acts_cnt,
3407                    u64 *entry_h)
3408 {
3409         struct ice_flow_entry *e = NULL;
3410         struct ice_flow_prof *prof;
3411         enum ice_status status = ICE_SUCCESS;
3412
3413         /* ACL entries must indicate an action */
3414         if (blk == ICE_BLK_ACL && (!acts || !acts_cnt))
3415                 return ICE_ERR_PARAM;
3416
3417         /* No flow entry data is expected for RSS */
3418         if (!entry_h || (!data && blk != ICE_BLK_RSS))
3419                 return ICE_ERR_BAD_PTR;
3420
3421         if (!ice_is_vsi_valid(hw, vsi_handle))
3422                 return ICE_ERR_PARAM;
3423
3424         ice_acquire_lock(&hw->fl_profs_locks[blk]);
3425
3426         prof = ice_flow_find_prof_id(hw, blk, prof_id);
3427         if (!prof) {
3428                 status = ICE_ERR_DOES_NOT_EXIST;
3429         } else {
3430                 /* Allocate memory for the entry being added and associate
3431                  * the VSI to the found flow profile
3432                  */
3433                 e = (struct ice_flow_entry *)ice_malloc(hw, sizeof(*e));
3434                 if (!e)
3435                         status = ICE_ERR_NO_MEMORY;
3436                 else
3437                         status = ice_flow_assoc_prof(hw, blk, prof, vsi_handle);
3438         }
3439
3440         ice_release_lock(&hw->fl_profs_locks[blk]);
3441         if (status)
3442                 goto out;
3443
3444         e->id = entry_id;
3445         e->vsi_handle = vsi_handle;
3446         e->prof = prof;
3447         e->priority = prio;
3448
3449         switch (blk) {
3450         case ICE_BLK_FD:
3451         case ICE_BLK_RSS:
3452                 break;
3453         case ICE_BLK_ACL:
3454                 /* ACL will handle the entry management */
3455                 status = ice_flow_acl_frmt_entry(hw, prof, e, (u8 *)data, acts,
3456                                                  acts_cnt);
3457                 if (status)
3458                         goto out;
3459
3460                 status = ice_flow_acl_add_scen_entry(hw, prof, &e);
3461                 if (status)
3462                         goto out;
3463
3464                 break;
3465         default:
3466                 status = ICE_ERR_NOT_IMPL;
3467                 goto out;
3468         }
3469
3470         if (blk != ICE_BLK_ACL) {
3471                 /* ACL will handle the entry management */
3472                 ice_acquire_lock(&prof->entries_lock);
3473                 LIST_ADD(&e->l_entry, &prof->entries);
3474                 ice_release_lock(&prof->entries_lock);
3475         }
3476
3477         *entry_h = ICE_FLOW_ENTRY_HNDL(e);
3478
3479 out:
3480         if (status && e) {
3481                 if (e->entry)
3482                         ice_free(hw, e->entry);
3483                 ice_free(hw, e);
3484         }
3485
3486         return status;
3487 }
3488
3489 /**
3490  * ice_flow_rem_entry - Remove a flow entry
3491  * @hw: pointer to the HW struct
3492  * @blk: classification stage
3493  * @entry_h: handle to the flow entry to be removed
3494  */
3495 enum ice_status ice_flow_rem_entry(struct ice_hw *hw, enum ice_block blk,
3496                                    u64 entry_h)
3497 {
3498         struct ice_flow_entry *entry;
3499         struct ice_flow_prof *prof;
3500         enum ice_status status = ICE_SUCCESS;
3501
3502         if (entry_h == ICE_FLOW_ENTRY_HANDLE_INVAL)
3503                 return ICE_ERR_PARAM;
3504
3505         entry = ICE_FLOW_ENTRY_PTR((intptr_t)entry_h);
3506
3507         /* Retain the pointer to the flow profile as the entry will be freed */
3508         prof = entry->prof;
3509
3510         if (prof) {
3511                 ice_acquire_lock(&prof->entries_lock);
3512                 status = ice_flow_rem_entry_sync(hw, blk, entry);
3513                 ice_release_lock(&prof->entries_lock);
3514         }
3515
3516         return status;
3517 }
3518
3519 /**
3520  * ice_flow_set_fld_ext - specifies locations of field from entry's input buffer
3521  * @seg: packet segment the field being set belongs to
3522  * @fld: field to be set
3523  * @field_type: type of the field
3524  * @val_loc: if not ICE_FLOW_FLD_OFF_INVAL, location of the value to match from
3525  *           entry's input buffer
3526  * @mask_loc: if not ICE_FLOW_FLD_OFF_INVAL, location of mask value from entry's
3527  *            input buffer
3528  * @last_loc: if not ICE_FLOW_FLD_OFF_INVAL, location of last/upper value from
3529  *            entry's input buffer
3530  *
3531  * This helper function stores information of a field being matched, including
3532  * the type of the field and the locations of the value to match, the mask, and
3533  * the upper-bound value in the start of the input buffer for a flow entry.
3534  * This function should only be used for fixed-size data structures.
3535  *
3536  * This function also opportunistically determines the protocol headers to be
3537  * present based on the fields being set. Some fields cannot be used alone to
3538  * determine the protocol headers present. Sometimes, fields for particular
3539  * protocol headers are not matched. In those cases, the protocol headers
3540  * must be explicitly set.
3541  */
3542 static void
3543 ice_flow_set_fld_ext(struct ice_flow_seg_info *seg, enum ice_flow_field fld,
3544                      enum ice_flow_fld_match_type field_type, u16 val_loc,
3545                      u16 mask_loc, u16 last_loc)
3546 {
3547         u64 bit = BIT_ULL(fld);
3548
3549         seg->match |= bit;
3550         if (field_type == ICE_FLOW_FLD_TYPE_RANGE)
3551                 seg->range |= bit;
3552
3553         seg->fields[fld].type = field_type;
3554         seg->fields[fld].src.val = val_loc;
3555         seg->fields[fld].src.mask = mask_loc;
3556         seg->fields[fld].src.last = last_loc;
3557
3558         ICE_FLOW_SET_HDRS(seg, ice_flds_info[fld].hdr);
3559 }
3560
3561 /**
3562  * ice_flow_set_fld - specifies locations of field from entry's input buffer
3563  * @seg: packet segment the field being set belongs to
3564  * @fld: field to be set
3565  * @val_loc: if not ICE_FLOW_FLD_OFF_INVAL, location of the value to match from
3566  *           entry's input buffer
3567  * @mask_loc: if not ICE_FLOW_FLD_OFF_INVAL, location of mask value from entry's
3568  *            input buffer
3569  * @last_loc: if not ICE_FLOW_FLD_OFF_INVAL, location of last/upper value from
3570  *            entry's input buffer
3571  * @range: indicate if field being matched is to be in a range
3572  *
3573  * This function specifies the locations, in the form of byte offsets from the
3574  * start of the input buffer for a flow entry, from where the value to match,
3575  * the mask value, and upper value can be extracted. These locations are then
3576  * stored in the flow profile. When adding a flow entry associated with the
3577  * flow profile, these locations will be used to quickly extract the values and
3578  * create the content of a match entry. This function should only be used for
3579  * fixed-size data structures.
3580  */
3581 void
3582 ice_flow_set_fld(struct ice_flow_seg_info *seg, enum ice_flow_field fld,
3583                  u16 val_loc, u16 mask_loc, u16 last_loc, bool range)
3584 {
3585         enum ice_flow_fld_match_type t = range ?
3586                 ICE_FLOW_FLD_TYPE_RANGE : ICE_FLOW_FLD_TYPE_REG;
3587
3588         ice_flow_set_fld_ext(seg, fld, t, val_loc, mask_loc, last_loc);
3589 }
3590
3591 /**
3592  * ice_flow_set_fld_prefix - sets locations of prefix field from entry's buf
3593  * @seg: packet segment the field being set belongs to
3594  * @fld: field to be set
3595  * @val_loc: if not ICE_FLOW_FLD_OFF_INVAL, location of the value to match from
3596  *           entry's input buffer
3597  * @pref_loc: location of prefix value from entry's input buffer
3598  * @pref_sz: size of the location holding the prefix value
3599  *
3600  * This function specifies the locations, in the form of byte offsets from the
3601  * start of the input buffer for a flow entry, from where the value to match
3602  * and the IPv4 prefix value can be extracted. These locations are then stored
3603  * in the flow profile. When adding flow entries to the associated flow profile,
3604  * these locations can be used to quickly extract the values to create the
3605  * content of a match entry. This function should only be used for fixed-size
3606  * data structures.
3607  */
3608 void
3609 ice_flow_set_fld_prefix(struct ice_flow_seg_info *seg, enum ice_flow_field fld,
3610                         u16 val_loc, u16 pref_loc, u8 pref_sz)
3611 {
3612         /* For this type of field, the "mask" location is for the prefix value's
3613          * location and the "last" location is for the size of the location of
3614          * the prefix value.
3615          */
3616         ice_flow_set_fld_ext(seg, fld, ICE_FLOW_FLD_TYPE_PREFIX, val_loc,
3617                              pref_loc, (u16)pref_sz);
3618 }
3619
3620 /**
3621  * ice_flow_add_fld_raw - sets locations of a raw field from entry's input buf
3622  * @seg: packet segment the field being set belongs to
3623  * @off: offset of the raw field from the beginning of the segment in bytes
3624  * @len: length of the raw pattern to be matched
3625  * @val_loc: location of the value to match from entry's input buffer
3626  * @mask_loc: location of mask value from entry's input buffer
3627  *
3628  * This function specifies the offset of the raw field to be match from the
3629  * beginning of the specified packet segment, and the locations, in the form of
3630  * byte offsets from the start of the input buffer for a flow entry, from where
3631  * the value to match and the mask value to be extracted. These locations are
3632  * then stored in the flow profile. When adding flow entries to the associated
3633  * flow profile, these locations can be used to quickly extract the values to
3634  * create the content of a match entry. This function should only be used for
3635  * fixed-size data structures.
3636  */
3637 void
3638 ice_flow_add_fld_raw(struct ice_flow_seg_info *seg, u16 off, u8 len,
3639                      u16 val_loc, u16 mask_loc)
3640 {
3641         if (seg->raws_cnt < ICE_FLOW_SEG_RAW_FLD_MAX) {
3642                 seg->raws[seg->raws_cnt].off = off;
3643                 seg->raws[seg->raws_cnt].info.type = ICE_FLOW_FLD_TYPE_SIZE;
3644                 seg->raws[seg->raws_cnt].info.src.val = val_loc;
3645                 seg->raws[seg->raws_cnt].info.src.mask = mask_loc;
3646                 /* The "last" field is used to store the length of the field */
3647                 seg->raws[seg->raws_cnt].info.src.last = len;
3648         }
3649
3650         /* Overflows of "raws" will be handled as an error condition later in
3651          * the flow when this information is processed.
3652          */
3653         seg->raws_cnt++;
3654 }
3655
3656 /**
3657  * ice_flow_rem_vsi_prof - remove vsi from flow profile
3658  * @hw: pointer to the hardware structure
3659  * @blk: classification stage
3660  * @vsi_handle: software VSI handle
3661  * @prof_id: unique ID to identify this flow profile
3662  *
3663  * This function removes the flow entries associated to the input
3664  * vsi handle and disassociates the vsi from the flow profile.
3665  */
3666 enum ice_status ice_flow_rem_vsi_prof(struct ice_hw *hw, enum ice_block blk, u16 vsi_handle,
3667                                       u64 prof_id)
3668 {
3669         struct ice_flow_prof *prof = NULL;
3670         enum ice_status status = ICE_SUCCESS;
3671
3672         if (blk >= ICE_BLK_COUNT || !ice_is_vsi_valid(hw, vsi_handle))
3673                 return ICE_ERR_PARAM;
3674
3675         /* find flow profile pointer with input package block and profile id */
3676         prof = ice_flow_find_prof_id(hw, ICE_BLK_FD, prof_id);
3677         if (!prof) {
3678                 ice_debug(hw, ICE_DBG_PKG,
3679                           "Cannot find flow profile id=%" PRIu64 "\n", prof_id);
3680                 return ICE_ERR_DOES_NOT_EXIST;
3681         }
3682
3683         /* Remove all remaining flow entries before removing the flow profile */
3684         if (!LIST_EMPTY(&prof->entries)) {
3685                 struct ice_flow_entry *e, *t;
3686
3687                 ice_acquire_lock(&prof->entries_lock);
3688                 LIST_FOR_EACH_ENTRY_SAFE(e, t, &prof->entries, ice_flow_entry,
3689                                          l_entry) {
3690                         if (e->vsi_handle != vsi_handle)
3691                                 continue;
3692
3693                         status = ice_flow_rem_entry_sync(hw, blk, e);
3694                         if (status)
3695                                 break;
3696                 }
3697                 ice_release_lock(&prof->entries_lock);
3698         }
3699         if (status)
3700                 return status;
3701
3702         /* disassociate the flow profile from sw vsi handle */
3703         status = ice_flow_disassoc_prof(hw, blk, prof, vsi_handle);
3704         if (status)
3705                 ice_debug(hw, ICE_DBG_PKG,
3706                           "ice_flow_disassoc_prof() failed with status=%d\n",
3707                           status);
3708         return status;
3709 }
3710
3711 #define ICE_FLOW_RSS_SEG_HDR_L2_MASKS \
3712 (ICE_FLOW_SEG_HDR_ETH | ICE_FLOW_SEG_HDR_ETH_NON_IP | ICE_FLOW_SEG_HDR_VLAN)
3713
3714 #define ICE_FLOW_RSS_SEG_HDR_L3_MASKS \
3715         (ICE_FLOW_SEG_HDR_IPV4 | ICE_FLOW_SEG_HDR_IPV6)
3716
3717 #define ICE_FLOW_RSS_SEG_HDR_L4_MASKS \
3718         (ICE_FLOW_SEG_HDR_TCP | ICE_FLOW_SEG_HDR_UDP | ICE_FLOW_SEG_HDR_SCTP)
3719
3720 #define ICE_FLOW_RSS_SEG_HDR_VAL_MASKS \
3721         (ICE_FLOW_RSS_SEG_HDR_L2_MASKS | \
3722          ICE_FLOW_RSS_SEG_HDR_L3_MASKS | \
3723          ICE_FLOW_RSS_SEG_HDR_L4_MASKS)
3724
3725 /**
3726  * ice_flow_set_rss_seg_info - setup packet segments for RSS
3727  * @segs: pointer to the flow field segment(s)
3728  * @seg_cnt: segment count
3729  * @cfg: configure parameters
3730  *
3731  * Helper function to extract fields from hash bitmap and use flow
3732  * header value to set flow field segment for further use in flow
3733  * profile entry or removal.
3734  */
3735 static enum ice_status
3736 ice_flow_set_rss_seg_info(struct ice_flow_seg_info *segs, u8 seg_cnt,
3737                           const struct ice_rss_hash_cfg *cfg)
3738 {
3739         struct ice_flow_seg_info *seg;
3740         u64 val;
3741         u8 i;
3742
3743         /* set inner most segment */
3744         seg = &segs[seg_cnt - 1];
3745
3746         ice_for_each_set_bit(i, (const ice_bitmap_t *)&cfg->hash_flds,
3747                              ICE_FLOW_FIELD_IDX_MAX)
3748                 ice_flow_set_fld(seg, (enum ice_flow_field)i,
3749                                  ICE_FLOW_FLD_OFF_INVAL, ICE_FLOW_FLD_OFF_INVAL,
3750                                  ICE_FLOW_FLD_OFF_INVAL, false);
3751
3752         ICE_FLOW_SET_HDRS(seg, cfg->addl_hdrs);
3753
3754         /* set outer most header */
3755         if (cfg->hdr_type == ICE_RSS_INNER_HEADERS_W_OUTER_IPV4)
3756                 segs[ICE_RSS_OUTER_HEADERS].hdrs |= ICE_FLOW_SEG_HDR_IPV4 |
3757                                                    ICE_FLOW_SEG_HDR_IPV_FRAG |
3758                                                    ICE_FLOW_SEG_HDR_IPV_OTHER;
3759         else if (cfg->hdr_type == ICE_RSS_INNER_HEADERS_W_OUTER_IPV6)
3760                 segs[ICE_RSS_OUTER_HEADERS].hdrs |= ICE_FLOW_SEG_HDR_IPV6 |
3761                                                    ICE_FLOW_SEG_HDR_IPV_FRAG |
3762                                                    ICE_FLOW_SEG_HDR_IPV_OTHER;
3763         else if (cfg->hdr_type == ICE_RSS_INNER_HEADERS_W_OUTER_IPV4_GRE)
3764                 segs[ICE_RSS_OUTER_HEADERS].hdrs |= ICE_FLOW_SEG_HDR_IPV4 |
3765                                                    ICE_FLOW_SEG_HDR_GRE |
3766                                                    ICE_FLOW_SEG_HDR_IPV_OTHER;
3767         else if (cfg->hdr_type == ICE_RSS_INNER_HEADERS_W_OUTER_IPV6_GRE)
3768                 segs[ICE_RSS_OUTER_HEADERS].hdrs |= ICE_FLOW_SEG_HDR_IPV6 |
3769                                                    ICE_FLOW_SEG_HDR_GRE |
3770                                                    ICE_FLOW_SEG_HDR_IPV_OTHER;
3771
3772         if (seg->hdrs & ~ICE_FLOW_RSS_SEG_HDR_VAL_MASKS &
3773             ~ICE_FLOW_RSS_HDRS_INNER_MASK & ~ICE_FLOW_SEG_HDR_IPV_OTHER &
3774             ~ICE_FLOW_SEG_HDR_IPV_FRAG)
3775                 return ICE_ERR_PARAM;
3776
3777         val = (u64)(seg->hdrs & ICE_FLOW_RSS_SEG_HDR_L3_MASKS);
3778         if (val && !ice_is_pow2(val))
3779                 return ICE_ERR_CFG;
3780
3781         val = (u64)(seg->hdrs & ICE_FLOW_RSS_SEG_HDR_L4_MASKS);
3782         if (val && !ice_is_pow2(val))
3783                 return ICE_ERR_CFG;
3784
3785         return ICE_SUCCESS;
3786 }
3787
3788 /**
3789  * ice_rem_vsi_rss_list - remove VSI from RSS list
3790  * @hw: pointer to the hardware structure
3791  * @vsi_handle: software VSI handle
3792  *
3793  * Remove the VSI from all RSS configurations in the list.
3794  */
3795 void ice_rem_vsi_rss_list(struct ice_hw *hw, u16 vsi_handle)
3796 {
3797         struct ice_rss_cfg *r, *tmp;
3798
3799         if (LIST_EMPTY(&hw->rss_list_head))
3800                 return;
3801
3802         ice_acquire_lock(&hw->rss_locks);
3803         LIST_FOR_EACH_ENTRY_SAFE(r, tmp, &hw->rss_list_head,
3804                                  ice_rss_cfg, l_entry)
3805                 if (ice_test_and_clear_bit(vsi_handle, r->vsis))
3806                         if (!ice_is_any_bit_set(r->vsis, ICE_MAX_VSI)) {
3807                                 LIST_DEL(&r->l_entry);
3808                                 ice_free(hw, r);
3809                         }
3810         ice_release_lock(&hw->rss_locks);
3811 }
3812
3813 /**
3814  * ice_rem_vsi_rss_cfg - remove RSS configurations associated with VSI
3815  * @hw: pointer to the hardware structure
3816  * @vsi_handle: software VSI handle
3817  *
3818  * This function will iterate through all flow profiles and disassociate
3819  * the VSI from that profile. If the flow profile has no VSIs it will
3820  * be removed.
3821  */
3822 enum ice_status ice_rem_vsi_rss_cfg(struct ice_hw *hw, u16 vsi_handle)
3823 {
3824         const enum ice_block blk = ICE_BLK_RSS;
3825         struct ice_flow_prof *p, *t;
3826         enum ice_status status = ICE_SUCCESS;
3827
3828         if (!ice_is_vsi_valid(hw, vsi_handle))
3829                 return ICE_ERR_PARAM;
3830
3831         if (LIST_EMPTY(&hw->fl_profs[blk]))
3832                 return ICE_SUCCESS;
3833
3834         ice_acquire_lock(&hw->rss_locks);
3835         LIST_FOR_EACH_ENTRY_SAFE(p, t, &hw->fl_profs[blk], ice_flow_prof,
3836                                  l_entry)
3837                 if (ice_is_bit_set(p->vsis, vsi_handle)) {
3838                         status = ice_flow_disassoc_prof(hw, blk, p, vsi_handle);
3839                         if (status)
3840                                 break;
3841
3842                         if (!ice_is_any_bit_set(p->vsis, ICE_MAX_VSI)) {
3843                                 status = ice_flow_rem_prof(hw, blk, p->id);
3844                                 if (status)
3845                                         break;
3846                         }
3847                 }
3848         ice_release_lock(&hw->rss_locks);
3849
3850         return status;
3851 }
3852
3853 /**
3854  * ice_get_rss_hdr_type - get a RSS profile's header type
3855  * @prof: RSS flow profile
3856  */
3857 static enum ice_rss_cfg_hdr_type
3858 ice_get_rss_hdr_type(struct ice_flow_prof *prof)
3859 {
3860         enum ice_rss_cfg_hdr_type hdr_type = ICE_RSS_ANY_HEADERS;
3861
3862         if (prof->segs_cnt == ICE_FLOW_SEG_SINGLE) {
3863                 hdr_type = ICE_RSS_OUTER_HEADERS;
3864         } else if (prof->segs_cnt == ICE_FLOW_SEG_MAX) {
3865                 if (prof->segs[ICE_RSS_OUTER_HEADERS].hdrs == ICE_FLOW_SEG_HDR_NONE)
3866                         hdr_type = ICE_RSS_INNER_HEADERS;
3867                 if (prof->segs[ICE_RSS_OUTER_HEADERS].hdrs & ICE_FLOW_SEG_HDR_IPV4)
3868                         hdr_type = ICE_RSS_INNER_HEADERS_W_OUTER_IPV4;
3869                 if (prof->segs[ICE_RSS_OUTER_HEADERS].hdrs & ICE_FLOW_SEG_HDR_IPV6)
3870                         hdr_type = ICE_RSS_INNER_HEADERS_W_OUTER_IPV6;
3871         }
3872
3873         return hdr_type;
3874 }
3875
3876 /**
3877  * ice_rem_rss_list - remove RSS configuration from list
3878  * @hw: pointer to the hardware structure
3879  * @vsi_handle: software VSI handle
3880  * @prof: pointer to flow profile
3881  *
3882  * Assumption: lock has already been acquired for RSS list
3883  */
3884 static void
3885 ice_rem_rss_list(struct ice_hw *hw, u16 vsi_handle, struct ice_flow_prof *prof)
3886 {
3887         enum ice_rss_cfg_hdr_type hdr_type;
3888         struct ice_rss_cfg *r, *tmp;
3889
3890         /* Search for RSS hash fields associated to the VSI that match the
3891          * hash configurations associated to the flow profile. If found
3892          * remove from the RSS entry list of the VSI context and delete entry.
3893          */
3894         hdr_type = ice_get_rss_hdr_type(prof);
3895         LIST_FOR_EACH_ENTRY_SAFE(r, tmp, &hw->rss_list_head,
3896                                  ice_rss_cfg, l_entry)
3897                 if (r->hash.hash_flds == prof->segs[prof->segs_cnt - 1].match &&
3898                     r->hash.addl_hdrs == prof->segs[prof->segs_cnt - 1].hdrs &&
3899                     r->hash.hdr_type == hdr_type) {
3900                         ice_clear_bit(vsi_handle, r->vsis);
3901                         if (!ice_is_any_bit_set(r->vsis, ICE_MAX_VSI)) {
3902                                 LIST_DEL(&r->l_entry);
3903                                 ice_free(hw, r);
3904                         }
3905                         return;
3906                 }
3907 }
3908
3909 /**
3910  * ice_add_rss_list - add RSS configuration to list
3911  * @hw: pointer to the hardware structure
3912  * @vsi_handle: software VSI handle
3913  * @prof: pointer to flow profile
3914  *
3915  * Assumption: lock has already been acquired for RSS list
3916  */
3917 static enum ice_status
3918 ice_add_rss_list(struct ice_hw *hw, u16 vsi_handle, struct ice_flow_prof *prof)
3919 {
3920         enum ice_rss_cfg_hdr_type hdr_type;
3921         struct ice_rss_cfg *r, *rss_cfg;
3922
3923         hdr_type = ice_get_rss_hdr_type(prof);
3924         LIST_FOR_EACH_ENTRY(r, &hw->rss_list_head,
3925                             ice_rss_cfg, l_entry)
3926                 if (r->hash.hash_flds == prof->segs[prof->segs_cnt - 1].match &&
3927                     r->hash.addl_hdrs == prof->segs[prof->segs_cnt - 1].hdrs &&
3928                     r->hash.hdr_type == hdr_type) {
3929                         ice_set_bit(vsi_handle, r->vsis);
3930                         return ICE_SUCCESS;
3931                 }
3932
3933         rss_cfg = (struct ice_rss_cfg *)ice_malloc(hw, sizeof(*rss_cfg));
3934         if (!rss_cfg)
3935                 return ICE_ERR_NO_MEMORY;
3936
3937         rss_cfg->hash.hash_flds = prof->segs[prof->segs_cnt - 1].match;
3938         rss_cfg->hash.addl_hdrs = prof->segs[prof->segs_cnt - 1].hdrs;
3939         rss_cfg->hash.hdr_type = hdr_type;
3940         rss_cfg->hash.symm = prof->cfg.symm;
3941         ice_set_bit(vsi_handle, rss_cfg->vsis);
3942
3943         LIST_ADD_TAIL(&rss_cfg->l_entry, &hw->rss_list_head);
3944
3945         return ICE_SUCCESS;
3946 }
3947
3948 #define ICE_FLOW_PROF_HASH_S    0
3949 #define ICE_FLOW_PROF_HASH_M    (0xFFFFFFFFULL << ICE_FLOW_PROF_HASH_S)
3950 #define ICE_FLOW_PROF_HDR_S     32
3951 #define ICE_FLOW_PROF_HDR_M     (0x3FFFFFFFULL << ICE_FLOW_PROF_HDR_S)
3952 #define ICE_FLOW_PROF_ENCAP_S   62
3953 #define ICE_FLOW_PROF_ENCAP_M   (0x3ULL << ICE_FLOW_PROF_ENCAP_S)
3954
3955 /* Flow profile ID format:
3956  * [0:31] - Packet match fields
3957  * [32:61] - Protocol header
3958  * [62:63] - Encapsulation flag:
3959  *           0 if non-tunneled
3960  *           1 if tunneled
3961  *           2 for tunneled with outer ipv4
3962  *           3 for tunneled with outer ipv6
3963  */
3964 #define ICE_FLOW_GEN_PROFID(hash, hdr, encap) \
3965         ((u64)(((u64)(hash) & ICE_FLOW_PROF_HASH_M) | \
3966                (((u64)(hdr) << ICE_FLOW_PROF_HDR_S) & ICE_FLOW_PROF_HDR_M) | \
3967                (((u64)(encap) << ICE_FLOW_PROF_ENCAP_S) & ICE_FLOW_PROF_ENCAP_M)))
3968
3969 static void
3970 ice_rss_config_xor_word(struct ice_hw *hw, u8 prof_id, u8 src, u8 dst)
3971 {
3972         u32 s = ((src % 4) << 3); /* byte shift */
3973         u32 v = dst | 0x80; /* value to program */
3974         u8 i = src / 4; /* register index */
3975         u32 reg;
3976
3977         reg = rd32(hw, GLQF_HSYMM(prof_id, i));
3978         reg = (reg & ~(0xff << s)) | (v << s);
3979         wr32(hw, GLQF_HSYMM(prof_id, i), reg);
3980 }
3981
3982 static void
3983 ice_rss_config_xor(struct ice_hw *hw, u8 prof_id, u8 src, u8 dst, u8 len)
3984 {
3985         int fv_last_word =
3986                 ICE_FLOW_SW_FIELD_VECTOR_MAX / ICE_FLOW_FV_EXTRACT_SZ - 1;
3987         int i;
3988
3989         for (i = 0; i < len; i++) {
3990                 ice_rss_config_xor_word(hw, prof_id,
3991                                         /* Yes, field vector in GLQF_HSYMM and
3992                                          * GLQF_HINSET is inversed!
3993                                          */
3994                                         fv_last_word - (src + i),
3995                                         fv_last_word - (dst + i));
3996                 ice_rss_config_xor_word(hw, prof_id,
3997                                         fv_last_word - (dst + i),
3998                                         fv_last_word - (src + i));
3999         }
4000 }
4001
4002 static void
4003 ice_rss_update_symm(struct ice_hw *hw,
4004                     struct ice_flow_prof *prof)
4005 {
4006         struct ice_prof_map *map;
4007         u8 prof_id, m;
4008
4009         ice_acquire_lock(&hw->blk[ICE_BLK_RSS].es.prof_map_lock);
4010         map = ice_search_prof_id(hw, ICE_BLK_RSS, prof->id);
4011         if (map)
4012                 prof_id = map->prof_id;
4013         ice_release_lock(&hw->blk[ICE_BLK_RSS].es.prof_map_lock);
4014         if (!map)
4015                 return;
4016         /* clear to default */
4017         for (m = 0; m < 6; m++)
4018                 wr32(hw, GLQF_HSYMM(prof_id, m), 0);
4019         if (prof->cfg.symm) {
4020                 struct ice_flow_seg_info *seg =
4021                         &prof->segs[prof->segs_cnt - 1];
4022
4023                 struct ice_flow_seg_xtrct *ipv4_src =
4024                         &seg->fields[ICE_FLOW_FIELD_IDX_IPV4_SA].xtrct;
4025                 struct ice_flow_seg_xtrct *ipv4_dst =
4026                         &seg->fields[ICE_FLOW_FIELD_IDX_IPV4_DA].xtrct;
4027                 struct ice_flow_seg_xtrct *ipv6_src =
4028                         &seg->fields[ICE_FLOW_FIELD_IDX_IPV6_SA].xtrct;
4029                 struct ice_flow_seg_xtrct *ipv6_dst =
4030                         &seg->fields[ICE_FLOW_FIELD_IDX_IPV6_DA].xtrct;
4031
4032                 struct ice_flow_seg_xtrct *tcp_src =
4033                         &seg->fields[ICE_FLOW_FIELD_IDX_TCP_SRC_PORT].xtrct;
4034                 struct ice_flow_seg_xtrct *tcp_dst =
4035                         &seg->fields[ICE_FLOW_FIELD_IDX_TCP_DST_PORT].xtrct;
4036
4037                 struct ice_flow_seg_xtrct *udp_src =
4038                         &seg->fields[ICE_FLOW_FIELD_IDX_UDP_SRC_PORT].xtrct;
4039                 struct ice_flow_seg_xtrct *udp_dst =
4040                         &seg->fields[ICE_FLOW_FIELD_IDX_UDP_DST_PORT].xtrct;
4041
4042                 struct ice_flow_seg_xtrct *sctp_src =
4043                         &seg->fields[ICE_FLOW_FIELD_IDX_SCTP_SRC_PORT].xtrct;
4044                 struct ice_flow_seg_xtrct *sctp_dst =
4045                         &seg->fields[ICE_FLOW_FIELD_IDX_SCTP_DST_PORT].xtrct;
4046
4047                 /* xor IPv4 */
4048                 if (ipv4_src->prot_id != 0 && ipv4_dst->prot_id != 0)
4049                         ice_rss_config_xor(hw, prof_id,
4050                                            ipv4_src->idx, ipv4_dst->idx, 2);
4051
4052                 /* xor IPv6 */
4053                 if (ipv6_src->prot_id != 0 && ipv6_dst->prot_id != 0)
4054                         ice_rss_config_xor(hw, prof_id,
4055                                            ipv6_src->idx, ipv6_dst->idx, 8);
4056
4057                 /* xor TCP */
4058                 if (tcp_src->prot_id != 0 && tcp_dst->prot_id != 0)
4059                         ice_rss_config_xor(hw, prof_id,
4060                                            tcp_src->idx, tcp_dst->idx, 1);
4061
4062                 /* xor UDP */
4063                 if (udp_src->prot_id != 0 && udp_dst->prot_id != 0)
4064                         ice_rss_config_xor(hw, prof_id,
4065                                            udp_src->idx, udp_dst->idx, 1);
4066
4067                 /* xor SCTP */
4068                 if (sctp_src->prot_id != 0 && sctp_dst->prot_id != 0)
4069                         ice_rss_config_xor(hw, prof_id,
4070                                            sctp_src->idx, sctp_dst->idx, 1);
4071         }
4072 }
4073
4074 /**
4075  * ice_rss_cfg_raw_symm - configure symmetric hash parameters
4076  * for raw pattern
4077  * @hw: pointer to the hardware structure
4078  * @prof: pointer to parser profile
4079  * @prof_id: profile ID
4080  *
4081  * Calculate symmetric hash parameters based on input protocol type.
4082  */
4083 static void
4084 ice_rss_cfg_raw_symm(struct ice_hw *hw,
4085                      struct ice_parser_profile *prof, u64 prof_id)
4086 {
4087         u8 src_idx, dst_idx, proto_id;
4088         int len, i = 0;
4089
4090         while (i < prof->fv_num) {
4091                 proto_id = prof->fv[i].proto_id;
4092
4093                 switch (proto_id) {
4094                 case ICE_PROT_IPV4_OF_OR_S:
4095                         len = ICE_FLOW_FLD_SZ_IPV4_ADDR /
4096                               ICE_FLOW_FV_EXTRACT_SZ;
4097                         if (prof->fv[i].offset ==
4098                             ICE_FLOW_FIELD_IPV4_SRC_OFFSET &&
4099                             prof->fv[i + len].proto_id == proto_id &&
4100                             prof->fv[i + len].offset ==
4101                             ICE_FLOW_FIELD_IPV4_DST_OFFSET) {
4102                                 src_idx = i;
4103                                 dst_idx = i + len;
4104                                 i += 2 * len;
4105                                 break;
4106                         }
4107                         i++;
4108                         continue;
4109                 case ICE_PROT_IPV6_OF_OR_S:
4110                         len = ICE_FLOW_FLD_SZ_IPV6_ADDR /
4111                               ICE_FLOW_FV_EXTRACT_SZ;
4112                         if (prof->fv[i].offset ==
4113                             ICE_FLOW_FIELD_IPV6_SRC_OFFSET &&
4114                             prof->fv[i + len].proto_id == proto_id &&
4115                             prof->fv[i + len].offset ==
4116                             ICE_FLOW_FIELD_IPV6_DST_OFFSET) {
4117                                 src_idx = i;
4118                                 dst_idx = i + len;
4119                                 i += 2 * len;
4120                                 break;
4121                         }
4122                         i++;
4123                         continue;
4124                 case ICE_PROT_TCP_IL:
4125                 case ICE_PROT_UDP_IL_OR_S:
4126                 case ICE_PROT_SCTP_IL:
4127                         len = ICE_FLOW_FLD_SZ_PORT /
4128                               ICE_FLOW_FV_EXTRACT_SZ;
4129                         if (prof->fv[i].offset ==
4130                             ICE_FLOW_FIELD_SRC_PORT_OFFSET &&
4131                             prof->fv[i + len].proto_id == proto_id &&
4132                             prof->fv[i + len].offset ==
4133                             ICE_FLOW_FIELD_DST_PORT_OFFSET) {
4134                                 src_idx = i;
4135                                 dst_idx = i + len;
4136                                 i += 2 * len;
4137                                 break;
4138                         }
4139                         i++;
4140                         continue;
4141                 default:
4142                         i++;
4143                         continue;
4144                 }
4145                 ice_rss_config_xor(hw, prof_id, src_idx, dst_idx, len);
4146         }
4147 }
4148
4149 /* Max registers index per packet profile */
4150 #define ICE_SYMM_REG_INDEX_MAX 6
4151
4152 /**
4153  * ice_rss_update_raw_symm - update symmetric hash configuration
4154  * for raw pattern
4155  * @hw: pointer to the hardware structure
4156  * @cfg: configure parameters for raw pattern
4157  * @id: profile tracking ID
4158  *
4159  * Update symmetric hash configuration for raw pattern if required.
4160  * Otherwise only clear to default.
4161  */
4162 void
4163 ice_rss_update_raw_symm(struct ice_hw *hw,
4164                         struct ice_rss_raw_cfg *cfg, u64 id)
4165 {
4166         struct ice_prof_map *map;
4167         u8 prof_id, m;
4168
4169         ice_acquire_lock(&hw->blk[ICE_BLK_RSS].es.prof_map_lock);
4170         map = ice_search_prof_id(hw, ICE_BLK_RSS, id);
4171         if (map)
4172                 prof_id = map->prof_id;
4173         ice_release_lock(&hw->blk[ICE_BLK_RSS].es.prof_map_lock);
4174         if (!map)
4175                 return;
4176         /* clear to default */
4177         for (m = 0; m < ICE_SYMM_REG_INDEX_MAX; m++)
4178                 wr32(hw, GLQF_HSYMM(prof_id, m), 0);
4179         if (cfg->symm)
4180                 ice_rss_cfg_raw_symm(hw, &cfg->prof, prof_id);
4181 }
4182
4183 /**
4184  * ice_add_rss_cfg_sync - add an RSS configuration
4185  * @hw: pointer to the hardware structure
4186  * @vsi_handle: software VSI handle
4187  * @cfg: configure parameters
4188  *
4189  * Assumption: lock has already been acquired for RSS list
4190  */
4191 static enum ice_status
4192 ice_add_rss_cfg_sync(struct ice_hw *hw, u16 vsi_handle,
4193                      const struct ice_rss_hash_cfg *cfg)
4194 {
4195         const enum ice_block blk = ICE_BLK_RSS;
4196         struct ice_flow_prof *prof = NULL;
4197         struct ice_flow_seg_info *segs;
4198         enum ice_status status;
4199         u8 segs_cnt;
4200
4201         segs_cnt = (cfg->hdr_type == ICE_RSS_OUTER_HEADERS) ?
4202                         ICE_FLOW_SEG_SINGLE : ICE_FLOW_SEG_MAX;
4203
4204         segs = (struct ice_flow_seg_info *)ice_calloc(hw, segs_cnt,
4205                                                       sizeof(*segs));
4206         if (!segs)
4207                 return ICE_ERR_NO_MEMORY;
4208
4209         /* Construct the packet segment info from the hashed fields */
4210         status = ice_flow_set_rss_seg_info(segs, segs_cnt, cfg);
4211         if (status)
4212                 goto exit;
4213
4214         /* Search for a flow profile that has matching headers, hash fields
4215          * and has the input VSI associated to it. If found, no further
4216          * operations required and exit.
4217          */
4218         prof = ice_flow_find_prof_conds(hw, blk, ICE_FLOW_RX, segs, segs_cnt,
4219                                         vsi_handle,
4220                                         ICE_FLOW_FIND_PROF_CHK_FLDS |
4221                                         ICE_FLOW_FIND_PROF_CHK_VSI);
4222         if (prof) {
4223                 if (prof->cfg.symm == cfg->symm)
4224                         goto exit;
4225                 prof->cfg.symm = cfg->symm;
4226                 goto update_symm;
4227         }
4228
4229         /* Check if a flow profile exists with the same protocol headers and
4230          * associated with the input VSI. If so disassociate the VSI from
4231          * this profile. The VSI will be added to a new profile created with
4232          * the protocol header and new hash field configuration.
4233          */
4234         prof = ice_flow_find_prof_conds(hw, blk, ICE_FLOW_RX, segs, segs_cnt,
4235                                         vsi_handle, ICE_FLOW_FIND_PROF_CHK_VSI);
4236         if (prof) {
4237                 status = ice_flow_disassoc_prof(hw, blk, prof, vsi_handle);
4238                 if (!status)
4239                         ice_rem_rss_list(hw, vsi_handle, prof);
4240                 else
4241                         goto exit;
4242
4243                 /* Remove profile if it has no VSIs associated */
4244                 if (!ice_is_any_bit_set(prof->vsis, ICE_MAX_VSI)) {
4245                         status = ice_flow_rem_prof(hw, blk, prof->id);
4246                         if (status)
4247                                 goto exit;
4248                 }
4249         }
4250
4251         /* Search for a profile that has same match fields only. If this
4252          * exists then associate the VSI to this profile.
4253          */
4254         prof = ice_flow_find_prof_conds(hw, blk, ICE_FLOW_RX, segs, segs_cnt,
4255                                         vsi_handle,
4256                                         ICE_FLOW_FIND_PROF_CHK_FLDS);
4257         if (prof) {
4258                 if (prof->cfg.symm == cfg->symm) {
4259                         status = ice_flow_assoc_prof(hw, blk, prof,
4260                                                      vsi_handle);
4261                         if (!status)
4262                                 status = ice_add_rss_list(hw, vsi_handle,
4263                                                           prof);
4264                 } else {
4265                         /* if a profile exist but with different symmetric
4266                          * requirement, just return error.
4267                          */
4268                         status = ICE_ERR_NOT_SUPPORTED;
4269                 }
4270                 goto exit;
4271         }
4272
4273         /* Create a new flow profile with generated profile and packet
4274          * segment information.
4275          */
4276         status = ice_flow_add_prof(hw, blk, ICE_FLOW_RX,
4277                                    ICE_FLOW_GEN_PROFID(cfg->hash_flds,
4278                                                        segs[segs_cnt - 1].hdrs,
4279                                                        cfg->hdr_type),
4280                                    segs, segs_cnt, NULL, 0, &prof);
4281         if (status)
4282                 goto exit;
4283
4284         status = ice_flow_assoc_prof(hw, blk, prof, vsi_handle);
4285         /* If association to a new flow profile failed then this profile can
4286          * be removed.
4287          */
4288         if (status) {
4289                 ice_flow_rem_prof(hw, blk, prof->id);
4290                 goto exit;
4291         }
4292
4293         status = ice_add_rss_list(hw, vsi_handle, prof);
4294
4295         prof->cfg.symm = cfg->symm;
4296 update_symm:
4297         ice_rss_update_symm(hw, prof);
4298
4299 exit:
4300         ice_free(hw, segs);
4301         return status;
4302 }
4303
4304 /**
4305  * ice_add_rss_cfg - add an RSS configuration with specified hashed fields
4306  * @hw: pointer to the hardware structure
4307  * @vsi_handle: software VSI handle
4308  * @cfg: configure parameters
4309  *
4310  * This function will generate a flow profile based on fields associated with
4311  * the input fields to hash on, the flow type and use the VSI number to add
4312  * a flow entry to the profile.
4313  */
4314 enum ice_status
4315 ice_add_rss_cfg(struct ice_hw *hw, u16 vsi_handle,
4316                 const struct ice_rss_hash_cfg *cfg)
4317 {
4318         struct ice_rss_hash_cfg local_cfg;
4319         enum ice_status status;
4320
4321         if (!ice_is_vsi_valid(hw, vsi_handle) ||
4322             !cfg || cfg->hdr_type > ICE_RSS_ANY_HEADERS ||
4323             cfg->hash_flds == ICE_HASH_INVALID)
4324                 return ICE_ERR_PARAM;
4325
4326         local_cfg = *cfg;
4327         if (cfg->hdr_type < ICE_RSS_ANY_HEADERS) {
4328                 ice_acquire_lock(&hw->rss_locks);
4329                 status = ice_add_rss_cfg_sync(hw, vsi_handle, &local_cfg);
4330                 ice_release_lock(&hw->rss_locks);
4331         } else {
4332                 ice_acquire_lock(&hw->rss_locks);
4333                 local_cfg.hdr_type = ICE_RSS_OUTER_HEADERS;
4334                 status = ice_add_rss_cfg_sync(hw, vsi_handle, &local_cfg);
4335                 if (!status) {
4336                         local_cfg.hdr_type = ICE_RSS_INNER_HEADERS;
4337                         status = ice_add_rss_cfg_sync(hw, vsi_handle,
4338                                                       &local_cfg);
4339                 }
4340                 ice_release_lock(&hw->rss_locks);
4341         }
4342
4343         return status;
4344 }
4345
4346 /**
4347  * ice_rem_rss_cfg_sync - remove an existing RSS configuration
4348  * @hw: pointer to the hardware structure
4349  * @vsi_handle: software VSI handle
4350  * @cfg: configure parameters
4351  *
4352  * Assumption: lock has already been acquired for RSS list
4353  */
4354 static enum ice_status
4355 ice_rem_rss_cfg_sync(struct ice_hw *hw, u16 vsi_handle,
4356                      const struct ice_rss_hash_cfg *cfg)
4357 {
4358         const enum ice_block blk = ICE_BLK_RSS;
4359         struct ice_flow_seg_info *segs;
4360         struct ice_flow_prof *prof;
4361         enum ice_status status;
4362         u8 segs_cnt;
4363
4364         segs_cnt = (cfg->hdr_type == ICE_RSS_OUTER_HEADERS) ?
4365                         ICE_FLOW_SEG_SINGLE : ICE_FLOW_SEG_MAX;
4366         segs = (struct ice_flow_seg_info *)ice_calloc(hw, segs_cnt,
4367                                                       sizeof(*segs));
4368         if (!segs)
4369                 return ICE_ERR_NO_MEMORY;
4370
4371         /* Construct the packet segment info from the hashed fields */
4372         status = ice_flow_set_rss_seg_info(segs, segs_cnt, cfg);
4373         if (status)
4374                 goto out;
4375
4376         prof = ice_flow_find_prof_conds(hw, blk, ICE_FLOW_RX, segs, segs_cnt,
4377                                         vsi_handle,
4378                                         ICE_FLOW_FIND_PROF_CHK_FLDS);
4379         if (!prof) {
4380                 status = ICE_ERR_DOES_NOT_EXIST;
4381                 goto out;
4382         }
4383
4384         status = ice_flow_disassoc_prof(hw, blk, prof, vsi_handle);
4385         if (status)
4386                 goto out;
4387
4388         /* Remove RSS configuration from VSI context before deleting
4389          * the flow profile.
4390          */
4391         ice_rem_rss_list(hw, vsi_handle, prof);
4392
4393         if (!ice_is_any_bit_set(prof->vsis, ICE_MAX_VSI))
4394                 status = ice_flow_rem_prof(hw, blk, prof->id);
4395
4396 out:
4397         ice_free(hw, segs);
4398         return status;
4399 }
4400
4401 /**
4402  * ice_rem_rss_cfg - remove an existing RSS config with matching hashed fields
4403  * @hw: pointer to the hardware structure
4404  * @vsi_handle: software VSI handle
4405  * @cfg: configure parameters
4406  *
4407  * This function will lookup the flow profile based on the input
4408  * hash field bitmap, iterate through the profile entry list of
4409  * that profile and find entry associated with input VSI to be
4410  * removed. Calls are made to underlying flow apis which will in
4411  * turn build or update buffers for RSS XLT1 section.
4412  */
4413 enum ice_status
4414 ice_rem_rss_cfg(struct ice_hw *hw, u16 vsi_handle,
4415                 const struct ice_rss_hash_cfg *cfg)
4416 {
4417         struct ice_rss_hash_cfg local_cfg;
4418         enum ice_status status;
4419
4420         if (!ice_is_vsi_valid(hw, vsi_handle) ||
4421             !cfg || cfg->hdr_type > ICE_RSS_ANY_HEADERS ||
4422             cfg->hash_flds == ICE_HASH_INVALID)
4423                 return ICE_ERR_PARAM;
4424
4425         ice_acquire_lock(&hw->rss_locks);
4426         local_cfg = *cfg;
4427         if (cfg->hdr_type < ICE_RSS_ANY_HEADERS) {
4428                 status = ice_rem_rss_cfg_sync(hw, vsi_handle, &local_cfg);
4429         } else {
4430                 local_cfg.hdr_type = ICE_RSS_OUTER_HEADERS;
4431                 status = ice_rem_rss_cfg_sync(hw, vsi_handle, &local_cfg);
4432
4433                 if (!status) {
4434                         local_cfg.hdr_type = ICE_RSS_INNER_HEADERS;
4435                         status = ice_rem_rss_cfg_sync(hw, vsi_handle,
4436                                                       &local_cfg);
4437                 }
4438         }
4439         ice_release_lock(&hw->rss_locks);
4440
4441         return status;
4442 }
4443
4444 /**
4445  * ice_replay_rss_cfg - replay RSS configurations associated with VSI
4446  * @hw: pointer to the hardware structure
4447  * @vsi_handle: software VSI handle
4448  */
4449 enum ice_status ice_replay_rss_cfg(struct ice_hw *hw, u16 vsi_handle)
4450 {
4451         enum ice_status status = ICE_SUCCESS;
4452         struct ice_rss_cfg *r;
4453
4454         if (!ice_is_vsi_valid(hw, vsi_handle))
4455                 return ICE_ERR_PARAM;
4456
4457         ice_acquire_lock(&hw->rss_locks);
4458         LIST_FOR_EACH_ENTRY(r, &hw->rss_list_head,
4459                             ice_rss_cfg, l_entry) {
4460                 if (ice_is_bit_set(r->vsis, vsi_handle)) {
4461                         status = ice_add_rss_cfg_sync(hw, vsi_handle, &r->hash);
4462                         if (status)
4463                                 break;
4464                 }
4465         }
4466         ice_release_lock(&hw->rss_locks);
4467
4468         return status;
4469 }
4470
4471 /**
4472  * ice_get_rss_cfg - returns hashed fields for the given header types
4473  * @hw: pointer to the hardware structure
4474  * @vsi_handle: software VSI handle
4475  * @hdrs: protocol header type
4476  *
4477  * This function will return the match fields of the first instance of flow
4478  * profile having the given header types and containing input VSI
4479  */
4480 u64 ice_get_rss_cfg(struct ice_hw *hw, u16 vsi_handle, u32 hdrs)
4481 {
4482         u64 rss_hash = ICE_HASH_INVALID;
4483         struct ice_rss_cfg *r;
4484
4485         /* verify if the protocol header is non zero and VSI is valid */
4486         if (hdrs == ICE_FLOW_SEG_HDR_NONE || !ice_is_vsi_valid(hw, vsi_handle))
4487                 return ICE_HASH_INVALID;
4488
4489         ice_acquire_lock(&hw->rss_locks);
4490         LIST_FOR_EACH_ENTRY(r, &hw->rss_list_head,
4491                             ice_rss_cfg, l_entry)
4492                 if (ice_is_bit_set(r->vsis, vsi_handle) &&
4493                     r->hash.addl_hdrs == hdrs) {
4494                         rss_hash = r->hash.hash_flds;
4495                         break;
4496                 }
4497         ice_release_lock(&hw->rss_locks);
4498
4499         return rss_hash;
4500 }