net/mlx5: support more tunnel types
[dpdk.git] / drivers / net / ice / base / ice_flow.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2001-2021 Intel Corporation
3  */
4
5 #include "ice_common.h"
6 #include "ice_flow.h"
7
8 /* Size of known protocol header fields */
9 #define ICE_FLOW_FLD_SZ_ETH_TYPE        2
10 #define ICE_FLOW_FLD_SZ_VLAN            2
11 #define ICE_FLOW_FLD_SZ_IPV4_ADDR       4
12 #define ICE_FLOW_FLD_SZ_IPV6_ADDR       16
13 #define ICE_FLOW_FLD_SZ_IPV6_PRE32_ADDR 4
14 #define ICE_FLOW_FLD_SZ_IPV6_PRE48_ADDR 6
15 #define ICE_FLOW_FLD_SZ_IPV6_PRE64_ADDR 8
16 #define ICE_FLOW_FLD_SZ_IPV4_ID         2
17 #define ICE_FLOW_FLD_SZ_IPV6_ID         4
18 #define ICE_FLOW_FLD_SZ_IP_CHKSUM       2
19 #define ICE_FLOW_FLD_SZ_TCP_CHKSUM      2
20 #define ICE_FLOW_FLD_SZ_UDP_CHKSUM      2
21 #define ICE_FLOW_FLD_SZ_SCTP_CHKSUM     4
22 #define ICE_FLOW_FLD_SZ_IP_DSCP         1
23 #define ICE_FLOW_FLD_SZ_IP_TTL          1
24 #define ICE_FLOW_FLD_SZ_IP_PROT         1
25 #define ICE_FLOW_FLD_SZ_PORT            2
26 #define ICE_FLOW_FLD_SZ_TCP_FLAGS       1
27 #define ICE_FLOW_FLD_SZ_ICMP_TYPE       1
28 #define ICE_FLOW_FLD_SZ_ICMP_CODE       1
29 #define ICE_FLOW_FLD_SZ_ARP_OPER        2
30 #define ICE_FLOW_FLD_SZ_GRE_KEYID       4
31 #define ICE_FLOW_FLD_SZ_GTP_TEID        4
32 #define ICE_FLOW_FLD_SZ_GTP_QFI         2
33 #define ICE_FLOW_FLD_SZ_PPPOE_SESS_ID   2
34 #define ICE_FLOW_FLD_SZ_PFCP_SEID 8
35 #define ICE_FLOW_FLD_SZ_L2TPV3_SESS_ID  4
36 #define ICE_FLOW_FLD_SZ_ESP_SPI 4
37 #define ICE_FLOW_FLD_SZ_AH_SPI  4
38 #define ICE_FLOW_FLD_SZ_NAT_T_ESP_SPI   4
39 #define ICE_FLOW_FLD_SZ_VXLAN_VNI       4
40 #define ICE_FLOW_FLD_SZ_ECPRI_TP0_PC_ID 2
41
42 /* Describe properties of a protocol header field */
43 struct ice_flow_field_info {
44         enum ice_flow_seg_hdr hdr;
45         s16 off;        /* Offset from start of a protocol header, in bits */
46         u16 size;       /* Size of fields in bits */
47         u16 mask;       /* 16-bit mask for field */
48 };
49
50 #define ICE_FLOW_FLD_INFO(_hdr, _offset_bytes, _size_bytes) { \
51         .hdr = _hdr, \
52         .off = (_offset_bytes) * BITS_PER_BYTE, \
53         .size = (_size_bytes) * BITS_PER_BYTE, \
54         .mask = 0, \
55 }
56
57 #define ICE_FLOW_FLD_INFO_MSK(_hdr, _offset_bytes, _size_bytes, _mask) { \
58         .hdr = _hdr, \
59         .off = (_offset_bytes) * BITS_PER_BYTE, \
60         .size = (_size_bytes) * BITS_PER_BYTE, \
61         .mask = _mask, \
62 }
63
64 /* Table containing properties of supported protocol header fields */
65 static const
66 struct ice_flow_field_info ice_flds_info[ICE_FLOW_FIELD_IDX_MAX] = {
67         /* Ether */
68         /* ICE_FLOW_FIELD_IDX_ETH_DA */
69         ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_ETH, 0, ETH_ALEN),
70         /* ICE_FLOW_FIELD_IDX_ETH_SA */
71         ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_ETH, ETH_ALEN, ETH_ALEN),
72         /* ICE_FLOW_FIELD_IDX_S_VLAN */
73         ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_VLAN, 12, ICE_FLOW_FLD_SZ_VLAN),
74         /* ICE_FLOW_FIELD_IDX_C_VLAN */
75         ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_VLAN, 14, ICE_FLOW_FLD_SZ_VLAN),
76         /* ICE_FLOW_FIELD_IDX_ETH_TYPE */
77         ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_ETH, 0, ICE_FLOW_FLD_SZ_ETH_TYPE),
78         /* IPv4 / IPv6 */
79         /* ICE_FLOW_FIELD_IDX_IPV4_DSCP */
80         ICE_FLOW_FLD_INFO_MSK(ICE_FLOW_SEG_HDR_IPV4, 0, ICE_FLOW_FLD_SZ_IP_DSCP,
81                               0x00fc),
82         /* ICE_FLOW_FIELD_IDX_IPV6_DSCP */
83         ICE_FLOW_FLD_INFO_MSK(ICE_FLOW_SEG_HDR_IPV6, 0, ICE_FLOW_FLD_SZ_IP_DSCP,
84                               0x0ff0),
85         /* ICE_FLOW_FIELD_IDX_IPV4_TTL */
86         ICE_FLOW_FLD_INFO_MSK(ICE_FLOW_SEG_HDR_NONE, 8,
87                               ICE_FLOW_FLD_SZ_IP_TTL, 0xff00),
88         /* ICE_FLOW_FIELD_IDX_IPV4_PROT */
89         ICE_FLOW_FLD_INFO_MSK(ICE_FLOW_SEG_HDR_NONE, 8,
90                               ICE_FLOW_FLD_SZ_IP_PROT, 0x00ff),
91         /* ICE_FLOW_FIELD_IDX_IPV6_TTL */
92         ICE_FLOW_FLD_INFO_MSK(ICE_FLOW_SEG_HDR_NONE, 6,
93                               ICE_FLOW_FLD_SZ_IP_TTL, 0x00ff),
94         /* ICE_FLOW_FIELD_IDX_IPV6_PROT */
95         ICE_FLOW_FLD_INFO_MSK(ICE_FLOW_SEG_HDR_NONE, 6,
96                               ICE_FLOW_FLD_SZ_IP_PROT, 0xff00),
97         /* ICE_FLOW_FIELD_IDX_IPV4_SA */
98         ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_IPV4, 12, ICE_FLOW_FLD_SZ_IPV4_ADDR),
99         /* ICE_FLOW_FIELD_IDX_IPV4_DA */
100         ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_IPV4, 16, ICE_FLOW_FLD_SZ_IPV4_ADDR),
101         /* ICE_FLOW_FIELD_IDX_IPV6_SA */
102         ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_IPV6, 8, ICE_FLOW_FLD_SZ_IPV6_ADDR),
103         /* ICE_FLOW_FIELD_IDX_IPV6_DA */
104         ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_IPV6, 24, ICE_FLOW_FLD_SZ_IPV6_ADDR),
105         /* ICE_FLOW_FIELD_IDX_IPV4_CHKSUM */
106         ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_IPV4, 10, ICE_FLOW_FLD_SZ_IP_CHKSUM),
107         /* ICE_FLOW_FIELD_IDX_IPV4_FRAG */
108         ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_IPV_FRAG, 4,
109                           ICE_FLOW_FLD_SZ_IPV4_ID),
110         /* ICE_FLOW_FIELD_IDX_IPV6_FRAG */
111         ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_IPV_FRAG, 4,
112                           ICE_FLOW_FLD_SZ_IPV6_ID),
113         /* ICE_FLOW_FIELD_IDX_IPV6_PRE32_SA */
114         ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_IPV6, 8,
115                           ICE_FLOW_FLD_SZ_IPV6_PRE32_ADDR),
116         /* ICE_FLOW_FIELD_IDX_IPV6_PRE32_DA */
117         ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_IPV6, 24,
118                           ICE_FLOW_FLD_SZ_IPV6_PRE32_ADDR),
119         /* ICE_FLOW_FIELD_IDX_IPV6_PRE48_SA */
120         ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_IPV6, 8,
121                           ICE_FLOW_FLD_SZ_IPV6_PRE48_ADDR),
122         /* ICE_FLOW_FIELD_IDX_IPV6_PRE48_DA */
123         ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_IPV6, 24,
124                           ICE_FLOW_FLD_SZ_IPV6_PRE48_ADDR),
125         /* ICE_FLOW_FIELD_IDX_IPV6_PRE64_SA */
126         ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_IPV6, 8,
127                           ICE_FLOW_FLD_SZ_IPV6_PRE64_ADDR),
128         /* ICE_FLOW_FIELD_IDX_IPV6_PRE64_DA */
129         ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_IPV6, 24,
130                           ICE_FLOW_FLD_SZ_IPV6_PRE64_ADDR),
131         /* Transport */
132         /* ICE_FLOW_FIELD_IDX_TCP_SRC_PORT */
133         ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_TCP, 0, ICE_FLOW_FLD_SZ_PORT),
134         /* ICE_FLOW_FIELD_IDX_TCP_DST_PORT */
135         ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_TCP, 2, ICE_FLOW_FLD_SZ_PORT),
136         /* ICE_FLOW_FIELD_IDX_UDP_SRC_PORT */
137         ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_UDP, 0, ICE_FLOW_FLD_SZ_PORT),
138         /* ICE_FLOW_FIELD_IDX_UDP_DST_PORT */
139         ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_UDP, 2, ICE_FLOW_FLD_SZ_PORT),
140         /* ICE_FLOW_FIELD_IDX_SCTP_SRC_PORT */
141         ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_SCTP, 0, ICE_FLOW_FLD_SZ_PORT),
142         /* ICE_FLOW_FIELD_IDX_SCTP_DST_PORT */
143         ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_SCTP, 2, ICE_FLOW_FLD_SZ_PORT),
144         /* ICE_FLOW_FIELD_IDX_TCP_FLAGS */
145         ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_TCP, 13, ICE_FLOW_FLD_SZ_TCP_FLAGS),
146         /* ICE_FLOW_FIELD_IDX_TCP_CHKSUM */
147         ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_TCP, 16, ICE_FLOW_FLD_SZ_TCP_CHKSUM),
148         /* ICE_FLOW_FIELD_IDX_UDP_CHKSUM */
149         ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_UDP, 6, ICE_FLOW_FLD_SZ_UDP_CHKSUM),
150         /* ICE_FLOW_FIELD_IDX_SCTP_CHKSUM */
151         ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_SCTP, 8,
152                           ICE_FLOW_FLD_SZ_SCTP_CHKSUM),
153         /* ARP */
154         /* ICE_FLOW_FIELD_IDX_ARP_SIP */
155         ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_ARP, 14, ICE_FLOW_FLD_SZ_IPV4_ADDR),
156         /* ICE_FLOW_FIELD_IDX_ARP_DIP */
157         ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_ARP, 24, ICE_FLOW_FLD_SZ_IPV4_ADDR),
158         /* ICE_FLOW_FIELD_IDX_ARP_SHA */
159         ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_ARP, 8, ETH_ALEN),
160         /* ICE_FLOW_FIELD_IDX_ARP_DHA */
161         ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_ARP, 18, ETH_ALEN),
162         /* ICE_FLOW_FIELD_IDX_ARP_OP */
163         ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_ARP, 6, ICE_FLOW_FLD_SZ_ARP_OPER),
164         /* ICMP */
165         /* ICE_FLOW_FIELD_IDX_ICMP_TYPE */
166         ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_ICMP, 0, ICE_FLOW_FLD_SZ_ICMP_TYPE),
167         /* ICE_FLOW_FIELD_IDX_ICMP_CODE */
168         ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_ICMP, 1, ICE_FLOW_FLD_SZ_ICMP_CODE),
169         /* GRE */
170         /* ICE_FLOW_FIELD_IDX_GRE_KEYID */
171         ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_GRE, 12, ICE_FLOW_FLD_SZ_GRE_KEYID),
172         /* GTP */
173         /* ICE_FLOW_FIELD_IDX_GTPC_TEID */
174         ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_GTPC_TEID, 12,
175                           ICE_FLOW_FLD_SZ_GTP_TEID),
176         /* ICE_FLOW_FIELD_IDX_GTPU_IP_TEID */
177         ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_GTPU_IP, 12,
178                           ICE_FLOW_FLD_SZ_GTP_TEID),
179         /* ICE_FLOW_FIELD_IDX_GTPU_EH_TEID */
180         ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_GTPU_EH, 12,
181                           ICE_FLOW_FLD_SZ_GTP_TEID),
182         /* ICE_FLOW_FIELD_IDX_GTPU_EH_QFI */
183         ICE_FLOW_FLD_INFO_MSK(ICE_FLOW_SEG_HDR_GTPU_EH, 22,
184                               ICE_FLOW_FLD_SZ_GTP_QFI, 0x3f00),
185         /* ICE_FLOW_FIELD_IDX_GTPU_UP_TEID */
186         ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_GTPU_UP, 12,
187                           ICE_FLOW_FLD_SZ_GTP_TEID),
188         /* ICE_FLOW_FIELD_IDX_GTPU_UP_QFI */
189         ICE_FLOW_FLD_INFO_MSK(ICE_FLOW_SEG_HDR_GTPU_UP, 22,
190                               ICE_FLOW_FLD_SZ_GTP_QFI, 0x3f00),
191         /* ICE_FLOW_FIELD_IDX_GTPU_DWN_TEID */
192         ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_GTPU_DWN, 12,
193                           ICE_FLOW_FLD_SZ_GTP_TEID),
194         /* ICE_FLOW_FIELD_IDX_GTPU_DWN_QFI */
195         ICE_FLOW_FLD_INFO_MSK(ICE_FLOW_SEG_HDR_GTPU_DWN, 22,
196                               ICE_FLOW_FLD_SZ_GTP_QFI, 0x3f00),
197         /* PPPOE */
198         /* ICE_FLOW_FIELD_IDX_PPPOE_SESS_ID */
199         ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_PPPOE, 2,
200                           ICE_FLOW_FLD_SZ_PPPOE_SESS_ID),
201         /* PFCP */
202         /* ICE_FLOW_FIELD_IDX_PFCP_SEID */
203         ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_PFCP_SESSION, 12,
204                           ICE_FLOW_FLD_SZ_PFCP_SEID),
205         /* L2TPV3 */
206         /* ICE_FLOW_FIELD_IDX_L2TPV3_SESS_ID */
207         ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_L2TPV3, 0,
208                           ICE_FLOW_FLD_SZ_L2TPV3_SESS_ID),
209         /* ESP */
210         /* ICE_FLOW_FIELD_IDX_ESP_SPI */
211         ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_ESP, 0,
212                           ICE_FLOW_FLD_SZ_ESP_SPI),
213         /* AH */
214         /* ICE_FLOW_FIELD_IDX_AH_SPI */
215         ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_AH, 4,
216                           ICE_FLOW_FLD_SZ_AH_SPI),
217         /* NAT_T_ESP */
218         /* ICE_FLOW_FIELD_IDX_NAT_T_ESP_SPI */
219         ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_NAT_T_ESP, 8,
220                           ICE_FLOW_FLD_SZ_NAT_T_ESP_SPI),
221         /* ICE_FLOW_FIELD_IDX_VXLAN_VNI */
222         ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_VXLAN, 12,
223                           ICE_FLOW_FLD_SZ_VXLAN_VNI),
224         /* ECPRI_TP0 */
225         /* ICE_FLOW_FIELD_IDX_ECPRI_TP0_PC_ID */
226         ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_ECPRI_TP0, 4,
227                           ICE_FLOW_FLD_SZ_ECPRI_TP0_PC_ID),
228         /* UDP_ECPRI_TP0 */
229         /* ICE_FLOW_FIELD_IDX_UDP_ECPRI_TP0_PC_ID */
230         ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_UDP_ECPRI_TP0, 12,
231                           ICE_FLOW_FLD_SZ_ECPRI_TP0_PC_ID),
232 };
233
234 /* Bitmaps indicating relevant packet types for a particular protocol header
235  *
236  * Packet types for packets with an Outer/First/Single MAC header
237  */
238 static const u32 ice_ptypes_mac_ofos[] = {
239         0xFDC00846, 0xBFBF7F7E, 0xF70001DF, 0xFEFDFDFB,
240         0x0000077E, 0x000003FF, 0x00000000, 0x00000000,
241         0x00400000, 0x03FFF000, 0xFFFFFFE0, 0x00100707,
242         0xFFFFFF00, 0x0000003F, 0x00000000, 0x00000000,
243         0x00000000, 0x00000000, 0x00000000, 0x00000000,
244         0x00000000, 0x00000000, 0x00000000, 0x00000000,
245         0x00000000, 0x00000000, 0x00000000, 0x00000000,
246         0x00000000, 0x00000000, 0x00000000, 0x00000000,
247 };
248
249 /* Packet types for packets with an Innermost/Last MAC VLAN header */
250 static const u32 ice_ptypes_macvlan_il[] = {
251         0x00000000, 0xBC000000, 0x000001DF, 0xF0000000,
252         0x0000077E, 0x00000000, 0x00000000, 0x00000000,
253         0x00000000, 0x00000000, 0x00000000, 0x00000000,
254         0x00000000, 0x00000000, 0x00000000, 0x00000000,
255         0x00000000, 0x00000000, 0x00000000, 0x00000000,
256         0x00000000, 0x00000000, 0x00000000, 0x00000000,
257         0x00000000, 0x00000000, 0x00000000, 0x00000000,
258         0x00000000, 0x00000000, 0x00000000, 0x00000000,
259 };
260
261 /* Packet types for packets with an Outer/First/Single non-frag IPv4 header,
262  * does NOT include IPV4 other PTYPEs
263  */
264 static const u32 ice_ptypes_ipv4_ofos[] = {
265         0x1D800000, 0x24000800, 0x00000000, 0x00000000,
266         0x00000000, 0x00000155, 0x00000000, 0x00000000,
267         0x00000000, 0x000FC000, 0x000002A0, 0x00100000,
268         0x00001500, 0x00000000, 0x00000000, 0x00000000,
269         0x00000000, 0x00000000, 0x00000000, 0x00000000,
270         0x00000000, 0x00000000, 0x00000000, 0x00000000,
271         0x00000000, 0x00000000, 0x00000000, 0x00000000,
272         0x00000000, 0x00000000, 0x00000000, 0x00000000,
273 };
274
275 /* Packet types for packets with an Outer/First/Single non-frag IPv4 header,
276  * includes IPV4 other PTYPEs
277  */
278 static const u32 ice_ptypes_ipv4_ofos_all[] = {
279         0x1D800000, 0x27BF7800, 0x00000000, 0x00000000,
280         0x00000000, 0x00000155, 0x00000000, 0x00000000,
281         0x00000000, 0x000FC000, 0x83E0FAA0, 0x00000101,
282         0x03FFD000, 0x00000000, 0x02FBEFBC, 0x00000000,
283         0x00000000, 0x00000000, 0x00000000, 0x00000000,
284         0x00000000, 0x00000000, 0x00000000, 0x00000000,
285         0x00000000, 0x00000000, 0x00000000, 0x00000000,
286         0x00000000, 0x00000000, 0x00000000, 0x00000000,
287 };
288
289 /* Packet types for packets with an Innermost/Last IPv4 header */
290 static const u32 ice_ptypes_ipv4_il[] = {
291         0xE0000000, 0xB807700E, 0x80000003, 0xE01DC03B,
292         0x0000000E, 0x00000000, 0x00000000, 0x00000000,
293         0x00000000, 0x00000000, 0x001FF800, 0x00100000,
294         0xFC0FC000, 0x00000000, 0xBC0BC0BC, 0x00000BC0,
295         0x00000000, 0x00000000, 0x00000000, 0x00000000,
296         0x00000000, 0x00000000, 0x00000000, 0x00000000,
297         0x00000000, 0x00000000, 0x00000000, 0x00000000,
298         0x00000000, 0x00000000, 0x00000000, 0x00000000,
299 };
300
301 /* Packet types for packets with an Outer/First/Single non-frag IPv6 header,
302  * does NOT include IVP6 other PTYPEs
303  */
304 static const u32 ice_ptypes_ipv6_ofos[] = {
305         0x00000000, 0x00000000, 0x76000000, 0x10002000,
306         0x00000000, 0x000002AA, 0x00000000, 0x00000000,
307         0x00000000, 0x03F00000, 0x00000540, 0x00000000,
308         0x00002A00, 0x00000000, 0x00000000, 0x00000000,
309         0x00000000, 0x00000000, 0x00000000, 0x00000000,
310         0x00000000, 0x00000000, 0x00000000, 0x00000000,
311         0x00000000, 0x00000000, 0x00000000, 0x00000000,
312         0x00000000, 0x00000000, 0x00000000, 0x00000000,
313 };
314
315 /* Packet types for packets with an Outer/First/Single non-frag IPv6 header,
316  * includes IPV6 other PTYPEs
317  */
318 static const u32 ice_ptypes_ipv6_ofos_all[] = {
319         0x00000000, 0x00000000, 0x76000000, 0x1EFDE000,
320         0x00000000, 0x000002AA, 0x00000000, 0x00000000,
321         0x00000000, 0x03F00000, 0x7C1F0540, 0x00000206,
322         0xFC002000, 0x0000003F, 0xBC000000, 0x0002FBEF,
323         0x00000000, 0x00000000, 0x00000000, 0x00000000,
324         0x00000000, 0x00000000, 0x00000000, 0x00000000,
325         0x00000000, 0x00000000, 0x00000000, 0x00000000,
326         0x00000000, 0x00000000, 0x00000000, 0x00000000,
327 };
328
329 /* Packet types for packets with an Innermost/Last IPv6 header */
330 static const u32 ice_ptypes_ipv6_il[] = {
331         0x00000000, 0x03B80770, 0x000001DC, 0x0EE00000,
332         0x00000770, 0x00000000, 0x00000000, 0x00000000,
333         0x00000000, 0x00000000, 0x7FE00000, 0x00000000,
334         0x03F00000, 0x0000003F, 0x02F02F00, 0x0002F02F,
335         0x00000000, 0x00000000, 0x00000000, 0x00000000,
336         0x00000000, 0x00000000, 0x00000000, 0x00000000,
337         0x00000000, 0x00000000, 0x00000000, 0x00000000,
338         0x00000000, 0x00000000, 0x00000000, 0x00000000,
339 };
340
341 /* Packet types for packets with an Outer/First/Single
342  * non-frag IPv4 header - no L4
343  */
344 static const u32 ice_ptypes_ipv4_ofos_no_l4[] = {
345         0x10800000, 0x04000800, 0x00000000, 0x00000000,
346         0x00000000, 0x00000000, 0x00000000, 0x00000000,
347         0x00000000, 0x000cc000, 0x000002A0, 0x00000000,
348         0x00001500, 0x00000000, 0x00000000, 0x00000000,
349         0x00000000, 0x00000000, 0x00000000, 0x00000000,
350         0x00000000, 0x00000000, 0x00000000, 0x00000000,
351         0x00000000, 0x00000000, 0x00000000, 0x00000000,
352         0x00000000, 0x00000000, 0x00000000, 0x00000000,
353 };
354
355 /* Packet types for packets with an Innermost/Last IPv4 header - no L4 */
356 static const u32 ice_ptypes_ipv4_il_no_l4[] = {
357         0x60000000, 0x18043008, 0x80000002, 0x6010c021,
358         0x00000008, 0x00000000, 0x00000000, 0x00000000,
359         0x00000000, 0x00000000, 0x00139800, 0x00000000,
360         0x8C08C000, 0x00000000, 0x00000000, 0x00000000,
361         0x00000000, 0x00000000, 0x00000000, 0x00000000,
362         0x00000000, 0x00000000, 0x00000000, 0x00000000,
363         0x00000000, 0x00000000, 0x00000000, 0x00000000,
364         0x00000000, 0x00000000, 0x00000000, 0x00000000,
365 };
366
367 /* Packet types for packets with an Outer/First/Single
368  * non-frag IPv6 header - no L4
369  */
370 static const u32 ice_ptypes_ipv6_ofos_no_l4[] = {
371         0x00000000, 0x00000000, 0x42000000, 0x10002000,
372         0x00000000, 0x00000000, 0x00000000, 0x00000000,
373         0x00000000, 0x02300000, 0x00000540, 0x00000000,
374         0x00002A00, 0x00000000, 0x00000000, 0x00000000,
375         0x00000000, 0x00000000, 0x00000000, 0x00000000,
376         0x00000000, 0x00000000, 0x00000000, 0x00000000,
377         0x00000000, 0x00000000, 0x00000000, 0x00000000,
378         0x00000000, 0x00000000, 0x00000000, 0x00000000,
379 };
380
381 /* Packet types for packets with an Innermost/Last IPv6 header - no L4 */
382 static const u32 ice_ptypes_ipv6_il_no_l4[] = {
383         0x00000000, 0x02180430, 0x0000010c, 0x086010c0,
384         0x00000430, 0x00000000, 0x00000000, 0x00000000,
385         0x00000000, 0x00000000, 0x4e600000, 0x00000000,
386         0x02300000, 0x00000023, 0x00000000, 0x00000000,
387         0x00000000, 0x00000000, 0x00000000, 0x00000000,
388         0x00000000, 0x00000000, 0x00000000, 0x00000000,
389         0x00000000, 0x00000000, 0x00000000, 0x00000000,
390         0x00000000, 0x00000000, 0x00000000, 0x00000000,
391 };
392
393 /* Packet types for packets with an Outermost/First ARP header */
394 static const u32 ice_ptypes_arp_of[] = {
395         0x00000800, 0x00000000, 0x00000000, 0x00000000,
396         0x00000000, 0x00000000, 0x00000000, 0x00000000,
397         0x00000000, 0x00000000, 0x00000000, 0x00000000,
398         0x00000000, 0x00000000, 0x00000000, 0x00000000,
399         0x00000000, 0x00000000, 0x00000000, 0x00000000,
400         0x00000000, 0x00000000, 0x00000000, 0x00000000,
401         0x00000000, 0x00000000, 0x00000000, 0x00000000,
402         0x00000000, 0x00000000, 0x00000000, 0x00000000,
403 };
404
405 /* UDP Packet types for non-tunneled packets or tunneled
406  * packets with inner UDP.
407  */
408 static const u32 ice_ptypes_udp_il[] = {
409         0x81000000, 0x20204040, 0x04000010, 0x80810102,
410         0x00000040, 0x00000000, 0x00000000, 0x00000000,
411         0x00000000, 0x00410000, 0x908427E0, 0x00100007,
412         0x10410000, 0x00000004, 0x10410410, 0x00004104,
413         0x00000000, 0x00000000, 0x00000000, 0x00000000,
414         0x00000000, 0x00000000, 0x00000000, 0x00000000,
415         0x00000000, 0x00000000, 0x00000000, 0x00000000,
416         0x00000000, 0x00000000, 0x00000000, 0x00000000,
417 };
418
419 /* Packet types for packets with an Innermost/Last TCP header */
420 static const u32 ice_ptypes_tcp_il[] = {
421         0x04000000, 0x80810102, 0x10000040, 0x02040408,
422         0x00000102, 0x00000000, 0x00000000, 0x00000000,
423         0x00000000, 0x00820000, 0x21084000, 0x00000000,
424         0x20820000, 0x00000008, 0x20820820, 0x00008208,
425         0x00000000, 0x00000000, 0x00000000, 0x00000000,
426         0x00000000, 0x00000000, 0x00000000, 0x00000000,
427         0x00000000, 0x00000000, 0x00000000, 0x00000000,
428         0x00000000, 0x00000000, 0x00000000, 0x00000000,
429 };
430
431 /* Packet types for packets with an Innermost/Last SCTP header */
432 static const u32 ice_ptypes_sctp_il[] = {
433         0x08000000, 0x01020204, 0x20000081, 0x04080810,
434         0x00000204, 0x00000000, 0x00000000, 0x00000000,
435         0x00000000, 0x01040000, 0x00000000, 0x00000000,
436         0x41040000, 0x00000010, 0x00000000, 0x00000000,
437         0x00000000, 0x00000000, 0x00000000, 0x00000000,
438         0x00000000, 0x00000000, 0x00000000, 0x00000000,
439         0x00000000, 0x00000000, 0x00000000, 0x00000000,
440         0x00000000, 0x00000000, 0x00000000, 0x00000000,
441 };
442
443 /* Packet types for packets with an Outermost/First ICMP header */
444 static const u32 ice_ptypes_icmp_of[] = {
445         0x10000000, 0x00000000, 0x00000000, 0x00000000,
446         0x00000000, 0x00000000, 0x00000000, 0x00000000,
447         0x00000000, 0x00000000, 0x00000000, 0x00000000,
448         0x00000000, 0x00000000, 0x00000000, 0x00000000,
449         0x00000000, 0x00000000, 0x00000000, 0x00000000,
450         0x00000000, 0x00000000, 0x00000000, 0x00000000,
451         0x00000000, 0x00000000, 0x00000000, 0x00000000,
452         0x00000000, 0x00000000, 0x00000000, 0x00000000,
453 };
454
455 /* Packet types for packets with an Innermost/Last ICMP header */
456 static const u32 ice_ptypes_icmp_il[] = {
457         0x00000000, 0x02040408, 0x40000102, 0x08101020,
458         0x00000408, 0x00000000, 0x00000000, 0x00000000,
459         0x00000000, 0x00000000, 0x42108000, 0x00000000,
460         0x82080000, 0x00000020, 0x00000000, 0x00000000,
461         0x00000000, 0x00000000, 0x00000000, 0x00000000,
462         0x00000000, 0x00000000, 0x00000000, 0x00000000,
463         0x00000000, 0x00000000, 0x00000000, 0x00000000,
464         0x00000000, 0x00000000, 0x00000000, 0x00000000,
465 };
466
467 /* Packet types for packets with an Outermost/First GRE header */
468 static const u32 ice_ptypes_gre_of[] = {
469         0x00000000, 0xBFBF7800, 0x000001DF, 0xFEFDE000,
470         0x0000017E, 0x00000000, 0x00000000, 0x00000000,
471         0x00000000, 0x00000000, 0x00000000, 0x00000000,
472         0x00000000, 0x00000000, 0xBEFBEFBC, 0x0002FBEF,
473         0x00000000, 0x00000000, 0x00000000, 0x00000000,
474         0x00000000, 0x00000000, 0x00000000, 0x00000000,
475         0x00000000, 0x00000000, 0x00000000, 0x00000000,
476         0x00000000, 0x00000000, 0x00000000, 0x00000000,
477 };
478
479 /* Packet types for packets with an Innermost/Last MAC header */
480 static const u32 ice_ptypes_mac_il[] = {
481         0x00000000, 0x20000000, 0x00000000, 0x00000000,
482         0x00000000, 0x00000000, 0x00000000, 0x00000000,
483         0x00000000, 0x00000000, 0x00000000, 0x00000000,
484         0x00000000, 0x00000000, 0x00000000, 0x00000000,
485         0x00000000, 0x00000000, 0x00000000, 0x00000000,
486         0x00000000, 0x00000000, 0x00000000, 0x00000000,
487         0x00000000, 0x00000000, 0x00000000, 0x00000000,
488         0x00000000, 0x00000000, 0x00000000, 0x00000000,
489 };
490
491 /* Packet types for GTPC */
492 static const u32 ice_ptypes_gtpc[] = {
493         0x00000000, 0x00000000, 0x00000000, 0x00000000,
494         0x00000000, 0x00000000, 0x00000000, 0x00000000,
495         0x00000000, 0x00000000, 0x000001E0, 0x00000000,
496         0x00000000, 0x00000000, 0x00000000, 0x00000000,
497         0x00000000, 0x00000000, 0x00000000, 0x00000000,
498         0x00000000, 0x00000000, 0x00000000, 0x00000000,
499         0x00000000, 0x00000000, 0x00000000, 0x00000000,
500         0x00000000, 0x00000000, 0x00000000, 0x00000000,
501 };
502
503 /* Packet types for VXLAN with VNI */
504 static const u32 ice_ptypes_vxlan_vni[] = {
505         0x00000000, 0xBFBFF800, 0x00EFDFDF, 0xFEFDE000,
506         0x03BF7F7E, 0x00000000, 0x00000000, 0x00000000,
507         0x00000000, 0x00000000, 0x00000000, 0x00000000,
508         0x00000000, 0x00000000, 0x00000000, 0x00000000,
509         0x00000000, 0x00000000, 0x00000000, 0x00000000,
510         0x00000000, 0x00000000, 0x00000000, 0x00000000,
511         0x00000000, 0x00000000, 0x00000000, 0x00000000,
512         0x00000000, 0x00000000, 0x00000000, 0x00000000,
513 };
514
515 /* Packet types for GTPC with TEID */
516 static const u32 ice_ptypes_gtpc_tid[] = {
517         0x00000000, 0x00000000, 0x00000000, 0x00000000,
518         0x00000000, 0x00000000, 0x00000000, 0x00000000,
519         0x00000000, 0x00000000, 0x00000060, 0x00000000,
520         0x00000000, 0x00000000, 0x00000000, 0x00000000,
521         0x00000000, 0x00000000, 0x00000000, 0x00000000,
522         0x00000000, 0x00000000, 0x00000000, 0x00000000,
523         0x00000000, 0x00000000, 0x00000000, 0x00000000,
524         0x00000000, 0x00000000, 0x00000000, 0x00000000,
525 };
526
527 /* Packet types for GTPU */
528 static const struct ice_ptype_attributes ice_attr_gtpu_session[] = {
529         { ICE_MAC_IPV4_GTPU_IPV4_FRAG,    ICE_PTYPE_ATTR_GTP_SESSION },
530         { ICE_MAC_IPV4_GTPU_IPV4_PAY,     ICE_PTYPE_ATTR_GTP_SESSION },
531         { ICE_MAC_IPV4_GTPU_IPV4_UDP_PAY, ICE_PTYPE_ATTR_GTP_SESSION },
532         { ICE_MAC_IPV4_GTPU_IPV4_TCP,     ICE_PTYPE_ATTR_GTP_SESSION },
533         { ICE_MAC_IPV4_GTPU_IPV4_ICMP,    ICE_PTYPE_ATTR_GTP_SESSION },
534         { ICE_MAC_IPV6_GTPU_IPV4_FRAG,    ICE_PTYPE_ATTR_GTP_SESSION },
535         { ICE_MAC_IPV6_GTPU_IPV4_PAY,     ICE_PTYPE_ATTR_GTP_SESSION },
536         { ICE_MAC_IPV6_GTPU_IPV4_UDP_PAY, ICE_PTYPE_ATTR_GTP_SESSION },
537         { ICE_MAC_IPV6_GTPU_IPV4_TCP,     ICE_PTYPE_ATTR_GTP_SESSION },
538         { ICE_MAC_IPV6_GTPU_IPV4_ICMP,    ICE_PTYPE_ATTR_GTP_SESSION },
539         { ICE_MAC_IPV4_GTPU_IPV6_FRAG,    ICE_PTYPE_ATTR_GTP_SESSION },
540         { ICE_MAC_IPV4_GTPU_IPV6_PAY,     ICE_PTYPE_ATTR_GTP_SESSION },
541         { ICE_MAC_IPV4_GTPU_IPV6_UDP_PAY, ICE_PTYPE_ATTR_GTP_SESSION },
542         { ICE_MAC_IPV4_GTPU_IPV6_TCP,     ICE_PTYPE_ATTR_GTP_SESSION },
543         { ICE_MAC_IPV4_GTPU_IPV6_ICMPV6,  ICE_PTYPE_ATTR_GTP_SESSION },
544         { ICE_MAC_IPV6_GTPU_IPV6_FRAG,    ICE_PTYPE_ATTR_GTP_SESSION },
545         { ICE_MAC_IPV6_GTPU_IPV6_PAY,     ICE_PTYPE_ATTR_GTP_SESSION },
546         { ICE_MAC_IPV6_GTPU_IPV6_UDP_PAY, ICE_PTYPE_ATTR_GTP_SESSION },
547         { ICE_MAC_IPV6_GTPU_IPV6_TCP,     ICE_PTYPE_ATTR_GTP_SESSION },
548         { ICE_MAC_IPV6_GTPU_IPV6_ICMPV6,  ICE_PTYPE_ATTR_GTP_SESSION },
549         { MAC_IPV4_TUN_IPV4_GTPU_IPV4_FRAG,     ICE_PTYPE_ATTR_GTP_SESSION },
550         { MAC_IPV4_TUN_IPV4_GTPU_IPV4_PAY,      ICE_PTYPE_ATTR_GTP_SESSION },
551         { MAC_IPV4_TUN_IPV4_GTPU_IPV4_UDP_PAY,  ICE_PTYPE_ATTR_GTP_SESSION },
552         { MAC_IPV4_TUN_IPV4_GTPU_IPV4_TCP,      ICE_PTYPE_ATTR_GTP_SESSION },
553         { MAC_IPV4_TUN_IPV4_GTPU_IPV4_ICMP,     ICE_PTYPE_ATTR_GTP_SESSION },
554         { MAC_IPV4_TUN_IPV4_GTPU_IPV6_FRAG,     ICE_PTYPE_ATTR_GTP_SESSION },
555         { MAC_IPV4_TUN_IPV4_GTPU_IPV6_PAY,      ICE_PTYPE_ATTR_GTP_SESSION },
556         { MAC_IPV4_TUN_IPV4_GTPU_IPV6_UDP_PAY,  ICE_PTYPE_ATTR_GTP_SESSION },
557         { MAC_IPV4_TUN_IPV4_GTPU_IPV6_TCP,      ICE_PTYPE_ATTR_GTP_SESSION },
558         { MAC_IPV4_TUN_IPV4_GTPU_IPV6_ICMPV6,   ICE_PTYPE_ATTR_GTP_SESSION },
559         { MAC_IPV4_TUN_IPV6_GTPU_IPV4_FRAG,     ICE_PTYPE_ATTR_GTP_SESSION },
560         { MAC_IPV4_TUN_IPV6_GTPU_IPV4_PAY,      ICE_PTYPE_ATTR_GTP_SESSION },
561         { MAC_IPV4_TUN_IPV6_GTPU_IPV4_UDP_PAY,  ICE_PTYPE_ATTR_GTP_SESSION },
562         { MAC_IPV4_TUN_IPV6_GTPU_IPV4_TCP,      ICE_PTYPE_ATTR_GTP_SESSION },
563         { MAC_IPV4_TUN_IPV6_GTPU_IPV4_ICMP,     ICE_PTYPE_ATTR_GTP_SESSION },
564         { MAC_IPV4_TUN_IPV6_GTPU_IPV6_FRAG,     ICE_PTYPE_ATTR_GTP_SESSION },
565         { MAC_IPV4_TUN_IPV6_GTPU_IPV6_PAY,      ICE_PTYPE_ATTR_GTP_SESSION },
566         { MAC_IPV4_TUN_IPV6_GTPU_IPV6_UDP_PAY,  ICE_PTYPE_ATTR_GTP_SESSION },
567         { MAC_IPV4_TUN_IPV6_GTPU_IPV6_TCP,      ICE_PTYPE_ATTR_GTP_SESSION },
568         { MAC_IPV4_TUN_IPV6_GTPU_IPV6_ICMPV6,   ICE_PTYPE_ATTR_GTP_SESSION },
569         { MAC_IPV6_TUN_IPV4_GTPU_IPV4_FRAG,     ICE_PTYPE_ATTR_GTP_SESSION },
570         { MAC_IPV6_TUN_IPV4_GTPU_IPV4_PAY,      ICE_PTYPE_ATTR_GTP_SESSION },
571         { MAC_IPV6_TUN_IPV4_GTPU_IPV4_UDP_PAY,  ICE_PTYPE_ATTR_GTP_SESSION },
572         { MAC_IPV6_TUN_IPV4_GTPU_IPV4_TCP,      ICE_PTYPE_ATTR_GTP_SESSION },
573         { MAC_IPV6_TUN_IPV4_GTPU_IPV4_ICMP,     ICE_PTYPE_ATTR_GTP_SESSION },
574         { MAC_IPV6_TUN_IPV4_GTPU_IPV6_FRAG,     ICE_PTYPE_ATTR_GTP_SESSION },
575         { MAC_IPV6_TUN_IPV4_GTPU_IPV6_PAY,      ICE_PTYPE_ATTR_GTP_SESSION },
576         { MAC_IPV6_TUN_IPV4_GTPU_IPV6_UDP_PAY,  ICE_PTYPE_ATTR_GTP_SESSION },
577         { MAC_IPV6_TUN_IPV4_GTPU_IPV6_TCP,      ICE_PTYPE_ATTR_GTP_SESSION },
578         { MAC_IPV6_TUN_IPV4_GTPU_IPV6_ICMPV6,   ICE_PTYPE_ATTR_GTP_SESSION },
579         { MAC_IPV6_TUN_IPV6_GTPU_IPV4_FRAG,     ICE_PTYPE_ATTR_GTP_SESSION },
580         { MAC_IPV6_TUN_IPV6_GTPU_IPV4_PAY,      ICE_PTYPE_ATTR_GTP_SESSION },
581         { MAC_IPV6_TUN_IPV6_GTPU_IPV4_UDP_PAY,  ICE_PTYPE_ATTR_GTP_SESSION },
582         { MAC_IPV6_TUN_IPV6_GTPU_IPV4_TCP,      ICE_PTYPE_ATTR_GTP_SESSION },
583         { MAC_IPV6_TUN_IPV6_GTPU_IPV4_ICMP,     ICE_PTYPE_ATTR_GTP_SESSION },
584         { MAC_IPV6_TUN_IPV6_GTPU_IPV6_FRAG,     ICE_PTYPE_ATTR_GTP_SESSION },
585         { MAC_IPV6_TUN_IPV6_GTPU_IPV6_PAY,      ICE_PTYPE_ATTR_GTP_SESSION },
586         { MAC_IPV6_TUN_IPV6_GTPU_IPV6_UDP_PAY,  ICE_PTYPE_ATTR_GTP_SESSION },
587         { MAC_IPV6_TUN_IPV6_GTPU_IPV6_TCP,      ICE_PTYPE_ATTR_GTP_SESSION },
588         { MAC_IPV6_TUN_IPV6_GTPU_IPV6_ICMPV6,   ICE_PTYPE_ATTR_GTP_SESSION },
589 };
590
591 static const struct ice_ptype_attributes ice_attr_gtpu_eh[] = {
592         { ICE_MAC_IPV4_GTPU_IPV4_FRAG,    ICE_PTYPE_ATTR_GTP_PDU_EH },
593         { ICE_MAC_IPV4_GTPU_IPV4_PAY,     ICE_PTYPE_ATTR_GTP_PDU_EH },
594         { ICE_MAC_IPV4_GTPU_IPV4_UDP_PAY, ICE_PTYPE_ATTR_GTP_PDU_EH },
595         { ICE_MAC_IPV4_GTPU_IPV4_TCP,     ICE_PTYPE_ATTR_GTP_PDU_EH },
596         { ICE_MAC_IPV4_GTPU_IPV4_ICMP,    ICE_PTYPE_ATTR_GTP_PDU_EH },
597         { ICE_MAC_IPV6_GTPU_IPV4_FRAG,    ICE_PTYPE_ATTR_GTP_PDU_EH },
598         { ICE_MAC_IPV6_GTPU_IPV4_PAY,     ICE_PTYPE_ATTR_GTP_PDU_EH },
599         { ICE_MAC_IPV6_GTPU_IPV4_UDP_PAY, ICE_PTYPE_ATTR_GTP_PDU_EH },
600         { ICE_MAC_IPV6_GTPU_IPV4_TCP,     ICE_PTYPE_ATTR_GTP_PDU_EH },
601         { ICE_MAC_IPV6_GTPU_IPV4_ICMP,    ICE_PTYPE_ATTR_GTP_PDU_EH },
602         { ICE_MAC_IPV4_GTPU_IPV6_FRAG,    ICE_PTYPE_ATTR_GTP_PDU_EH },
603         { ICE_MAC_IPV4_GTPU_IPV6_PAY,     ICE_PTYPE_ATTR_GTP_PDU_EH },
604         { ICE_MAC_IPV4_GTPU_IPV6_UDP_PAY, ICE_PTYPE_ATTR_GTP_PDU_EH },
605         { ICE_MAC_IPV4_GTPU_IPV6_TCP,     ICE_PTYPE_ATTR_GTP_PDU_EH },
606         { ICE_MAC_IPV4_GTPU_IPV6_ICMPV6,  ICE_PTYPE_ATTR_GTP_PDU_EH },
607         { ICE_MAC_IPV6_GTPU_IPV6_FRAG,    ICE_PTYPE_ATTR_GTP_PDU_EH },
608         { ICE_MAC_IPV6_GTPU_IPV6_PAY,     ICE_PTYPE_ATTR_GTP_PDU_EH },
609         { ICE_MAC_IPV6_GTPU_IPV6_UDP_PAY, ICE_PTYPE_ATTR_GTP_PDU_EH },
610         { ICE_MAC_IPV6_GTPU_IPV6_TCP,     ICE_PTYPE_ATTR_GTP_PDU_EH },
611         { ICE_MAC_IPV6_GTPU_IPV6_ICMPV6,  ICE_PTYPE_ATTR_GTP_PDU_EH },
612         { MAC_IPV4_TUN_IPV4_GTPU_IPV4_FRAG,     ICE_PTYPE_ATTR_GTP_PDU_EH },
613         { MAC_IPV4_TUN_IPV4_GTPU_IPV4_PAY,      ICE_PTYPE_ATTR_GTP_PDU_EH },
614         { MAC_IPV4_TUN_IPV4_GTPU_IPV4_UDP_PAY,  ICE_PTYPE_ATTR_GTP_PDU_EH },
615         { MAC_IPV4_TUN_IPV4_GTPU_IPV4_TCP,      ICE_PTYPE_ATTR_GTP_PDU_EH },
616         { MAC_IPV4_TUN_IPV4_GTPU_IPV4_ICMP,     ICE_PTYPE_ATTR_GTP_PDU_EH },
617         { MAC_IPV4_TUN_IPV4_GTPU_IPV6_FRAG,     ICE_PTYPE_ATTR_GTP_PDU_EH },
618         { MAC_IPV4_TUN_IPV4_GTPU_IPV6_PAY,      ICE_PTYPE_ATTR_GTP_PDU_EH },
619         { MAC_IPV4_TUN_IPV4_GTPU_IPV6_UDP_PAY,  ICE_PTYPE_ATTR_GTP_PDU_EH },
620         { MAC_IPV4_TUN_IPV4_GTPU_IPV6_TCP,      ICE_PTYPE_ATTR_GTP_PDU_EH },
621         { MAC_IPV4_TUN_IPV4_GTPU_IPV6_ICMPV6,   ICE_PTYPE_ATTR_GTP_PDU_EH },
622         { MAC_IPV4_TUN_IPV6_GTPU_IPV4_FRAG,     ICE_PTYPE_ATTR_GTP_PDU_EH },
623         { MAC_IPV4_TUN_IPV6_GTPU_IPV4_PAY,      ICE_PTYPE_ATTR_GTP_PDU_EH },
624         { MAC_IPV4_TUN_IPV6_GTPU_IPV4_UDP_PAY,  ICE_PTYPE_ATTR_GTP_PDU_EH },
625         { MAC_IPV4_TUN_IPV6_GTPU_IPV4_TCP,      ICE_PTYPE_ATTR_GTP_PDU_EH },
626         { MAC_IPV4_TUN_IPV6_GTPU_IPV4_ICMP,     ICE_PTYPE_ATTR_GTP_PDU_EH },
627         { MAC_IPV4_TUN_IPV6_GTPU_IPV6_FRAG,     ICE_PTYPE_ATTR_GTP_PDU_EH },
628         { MAC_IPV4_TUN_IPV6_GTPU_IPV6_PAY,      ICE_PTYPE_ATTR_GTP_PDU_EH },
629         { MAC_IPV4_TUN_IPV6_GTPU_IPV6_UDP_PAY,  ICE_PTYPE_ATTR_GTP_PDU_EH },
630         { MAC_IPV4_TUN_IPV6_GTPU_IPV6_TCP,      ICE_PTYPE_ATTR_GTP_PDU_EH },
631         { MAC_IPV4_TUN_IPV6_GTPU_IPV6_ICMPV6,   ICE_PTYPE_ATTR_GTP_PDU_EH },
632         { MAC_IPV6_TUN_IPV4_GTPU_IPV4_FRAG,     ICE_PTYPE_ATTR_GTP_PDU_EH },
633         { MAC_IPV6_TUN_IPV4_GTPU_IPV4_PAY,      ICE_PTYPE_ATTR_GTP_PDU_EH },
634         { MAC_IPV6_TUN_IPV4_GTPU_IPV4_UDP_PAY,  ICE_PTYPE_ATTR_GTP_PDU_EH },
635         { MAC_IPV6_TUN_IPV4_GTPU_IPV4_TCP,      ICE_PTYPE_ATTR_GTP_PDU_EH },
636         { MAC_IPV6_TUN_IPV4_GTPU_IPV4_ICMP,     ICE_PTYPE_ATTR_GTP_PDU_EH },
637         { MAC_IPV6_TUN_IPV4_GTPU_IPV6_FRAG,     ICE_PTYPE_ATTR_GTP_PDU_EH },
638         { MAC_IPV6_TUN_IPV4_GTPU_IPV6_PAY,      ICE_PTYPE_ATTR_GTP_PDU_EH },
639         { MAC_IPV6_TUN_IPV4_GTPU_IPV6_UDP_PAY,  ICE_PTYPE_ATTR_GTP_PDU_EH },
640         { MAC_IPV6_TUN_IPV4_GTPU_IPV6_TCP,      ICE_PTYPE_ATTR_GTP_PDU_EH },
641         { MAC_IPV6_TUN_IPV4_GTPU_IPV6_ICMPV6,   ICE_PTYPE_ATTR_GTP_PDU_EH },
642         { MAC_IPV6_TUN_IPV6_GTPU_IPV4_FRAG,     ICE_PTYPE_ATTR_GTP_PDU_EH },
643         { MAC_IPV6_TUN_IPV6_GTPU_IPV4_PAY,      ICE_PTYPE_ATTR_GTP_PDU_EH },
644         { MAC_IPV6_TUN_IPV6_GTPU_IPV4_UDP_PAY,  ICE_PTYPE_ATTR_GTP_PDU_EH },
645         { MAC_IPV6_TUN_IPV6_GTPU_IPV4_TCP,      ICE_PTYPE_ATTR_GTP_PDU_EH },
646         { MAC_IPV6_TUN_IPV6_GTPU_IPV4_ICMP,     ICE_PTYPE_ATTR_GTP_PDU_EH },
647         { MAC_IPV6_TUN_IPV6_GTPU_IPV6_FRAG,     ICE_PTYPE_ATTR_GTP_PDU_EH },
648         { MAC_IPV6_TUN_IPV6_GTPU_IPV6_PAY,      ICE_PTYPE_ATTR_GTP_PDU_EH },
649         { MAC_IPV6_TUN_IPV6_GTPU_IPV6_UDP_PAY,  ICE_PTYPE_ATTR_GTP_PDU_EH },
650         { MAC_IPV6_TUN_IPV6_GTPU_IPV6_TCP,      ICE_PTYPE_ATTR_GTP_PDU_EH },
651         { MAC_IPV6_TUN_IPV6_GTPU_IPV6_ICMPV6,   ICE_PTYPE_ATTR_GTP_PDU_EH },
652 };
653
654 static const struct ice_ptype_attributes ice_attr_gtpu_down[] = {
655         { ICE_MAC_IPV4_GTPU_IPV4_FRAG,    ICE_PTYPE_ATTR_GTP_DOWNLINK },
656         { ICE_MAC_IPV4_GTPU_IPV4_PAY,     ICE_PTYPE_ATTR_GTP_DOWNLINK },
657         { ICE_MAC_IPV4_GTPU_IPV4_UDP_PAY, ICE_PTYPE_ATTR_GTP_DOWNLINK },
658         { ICE_MAC_IPV4_GTPU_IPV4_TCP,     ICE_PTYPE_ATTR_GTP_DOWNLINK },
659         { ICE_MAC_IPV4_GTPU_IPV4_ICMP,    ICE_PTYPE_ATTR_GTP_DOWNLINK },
660         { ICE_MAC_IPV6_GTPU_IPV4_FRAG,    ICE_PTYPE_ATTR_GTP_DOWNLINK },
661         { ICE_MAC_IPV6_GTPU_IPV4_PAY,     ICE_PTYPE_ATTR_GTP_DOWNLINK },
662         { ICE_MAC_IPV6_GTPU_IPV4_UDP_PAY, ICE_PTYPE_ATTR_GTP_DOWNLINK },
663         { ICE_MAC_IPV6_GTPU_IPV4_TCP,     ICE_PTYPE_ATTR_GTP_DOWNLINK },
664         { ICE_MAC_IPV6_GTPU_IPV4_ICMP,    ICE_PTYPE_ATTR_GTP_DOWNLINK },
665         { ICE_MAC_IPV4_GTPU_IPV6_FRAG,    ICE_PTYPE_ATTR_GTP_DOWNLINK },
666         { ICE_MAC_IPV4_GTPU_IPV6_PAY,     ICE_PTYPE_ATTR_GTP_DOWNLINK },
667         { ICE_MAC_IPV4_GTPU_IPV6_UDP_PAY, ICE_PTYPE_ATTR_GTP_DOWNLINK },
668         { ICE_MAC_IPV4_GTPU_IPV6_TCP,     ICE_PTYPE_ATTR_GTP_DOWNLINK },
669         { ICE_MAC_IPV4_GTPU_IPV6_ICMPV6,  ICE_PTYPE_ATTR_GTP_DOWNLINK },
670         { ICE_MAC_IPV6_GTPU_IPV6_FRAG,    ICE_PTYPE_ATTR_GTP_DOWNLINK },
671         { ICE_MAC_IPV6_GTPU_IPV6_PAY,     ICE_PTYPE_ATTR_GTP_DOWNLINK },
672         { ICE_MAC_IPV6_GTPU_IPV6_UDP_PAY, ICE_PTYPE_ATTR_GTP_DOWNLINK },
673         { ICE_MAC_IPV6_GTPU_IPV6_TCP,     ICE_PTYPE_ATTR_GTP_DOWNLINK },
674         { ICE_MAC_IPV6_GTPU_IPV6_ICMPV6,  ICE_PTYPE_ATTR_GTP_DOWNLINK },
675         { MAC_IPV4_TUN_IPV4_GTPU_IPV4_FRAG,     ICE_PTYPE_ATTR_GTP_DOWNLINK },
676         { MAC_IPV4_TUN_IPV4_GTPU_IPV4_PAY,      ICE_PTYPE_ATTR_GTP_DOWNLINK },
677         { MAC_IPV4_TUN_IPV4_GTPU_IPV4_UDP_PAY,  ICE_PTYPE_ATTR_GTP_DOWNLINK },
678         { MAC_IPV4_TUN_IPV4_GTPU_IPV4_TCP,      ICE_PTYPE_ATTR_GTP_DOWNLINK },
679         { MAC_IPV4_TUN_IPV4_GTPU_IPV4_ICMP,     ICE_PTYPE_ATTR_GTP_DOWNLINK },
680         { MAC_IPV4_TUN_IPV4_GTPU_IPV6_FRAG,     ICE_PTYPE_ATTR_GTP_DOWNLINK },
681         { MAC_IPV4_TUN_IPV4_GTPU_IPV6_PAY,      ICE_PTYPE_ATTR_GTP_DOWNLINK },
682         { MAC_IPV4_TUN_IPV4_GTPU_IPV6_UDP_PAY,  ICE_PTYPE_ATTR_GTP_DOWNLINK },
683         { MAC_IPV4_TUN_IPV4_GTPU_IPV6_TCP,      ICE_PTYPE_ATTR_GTP_DOWNLINK },
684         { MAC_IPV4_TUN_IPV4_GTPU_IPV6_ICMPV6,   ICE_PTYPE_ATTR_GTP_DOWNLINK },
685         { MAC_IPV4_TUN_IPV6_GTPU_IPV4_FRAG,     ICE_PTYPE_ATTR_GTP_DOWNLINK },
686         { MAC_IPV4_TUN_IPV6_GTPU_IPV4_PAY,      ICE_PTYPE_ATTR_GTP_DOWNLINK },
687         { MAC_IPV4_TUN_IPV6_GTPU_IPV4_UDP_PAY,  ICE_PTYPE_ATTR_GTP_DOWNLINK },
688         { MAC_IPV4_TUN_IPV6_GTPU_IPV4_TCP,      ICE_PTYPE_ATTR_GTP_DOWNLINK },
689         { MAC_IPV4_TUN_IPV6_GTPU_IPV4_ICMP,     ICE_PTYPE_ATTR_GTP_DOWNLINK },
690         { MAC_IPV4_TUN_IPV6_GTPU_IPV6_FRAG,     ICE_PTYPE_ATTR_GTP_DOWNLINK },
691         { MAC_IPV4_TUN_IPV6_GTPU_IPV6_PAY,      ICE_PTYPE_ATTR_GTP_DOWNLINK },
692         { MAC_IPV4_TUN_IPV6_GTPU_IPV6_UDP_PAY,  ICE_PTYPE_ATTR_GTP_DOWNLINK },
693         { MAC_IPV4_TUN_IPV6_GTPU_IPV6_TCP,      ICE_PTYPE_ATTR_GTP_DOWNLINK },
694         { MAC_IPV4_TUN_IPV6_GTPU_IPV6_ICMPV6,   ICE_PTYPE_ATTR_GTP_DOWNLINK },
695         { MAC_IPV6_TUN_IPV4_GTPU_IPV4_FRAG,     ICE_PTYPE_ATTR_GTP_DOWNLINK },
696         { MAC_IPV6_TUN_IPV4_GTPU_IPV4_PAY,      ICE_PTYPE_ATTR_GTP_DOWNLINK },
697         { MAC_IPV6_TUN_IPV4_GTPU_IPV4_UDP_PAY,  ICE_PTYPE_ATTR_GTP_DOWNLINK },
698         { MAC_IPV6_TUN_IPV4_GTPU_IPV4_TCP,      ICE_PTYPE_ATTR_GTP_DOWNLINK },
699         { MAC_IPV6_TUN_IPV4_GTPU_IPV4_ICMP,     ICE_PTYPE_ATTR_GTP_DOWNLINK },
700         { MAC_IPV6_TUN_IPV4_GTPU_IPV6_FRAG,     ICE_PTYPE_ATTR_GTP_DOWNLINK },
701         { MAC_IPV6_TUN_IPV4_GTPU_IPV6_PAY,      ICE_PTYPE_ATTR_GTP_DOWNLINK },
702         { MAC_IPV6_TUN_IPV4_GTPU_IPV6_UDP_PAY,  ICE_PTYPE_ATTR_GTP_DOWNLINK },
703         { MAC_IPV6_TUN_IPV4_GTPU_IPV6_TCP,      ICE_PTYPE_ATTR_GTP_DOWNLINK },
704         { MAC_IPV6_TUN_IPV4_GTPU_IPV6_ICMPV6,   ICE_PTYPE_ATTR_GTP_DOWNLINK },
705         { MAC_IPV6_TUN_IPV6_GTPU_IPV4_FRAG,     ICE_PTYPE_ATTR_GTP_DOWNLINK },
706         { MAC_IPV6_TUN_IPV6_GTPU_IPV4_PAY,      ICE_PTYPE_ATTR_GTP_DOWNLINK },
707         { MAC_IPV6_TUN_IPV6_GTPU_IPV4_UDP_PAY,  ICE_PTYPE_ATTR_GTP_DOWNLINK },
708         { MAC_IPV6_TUN_IPV6_GTPU_IPV4_TCP,      ICE_PTYPE_ATTR_GTP_DOWNLINK },
709         { MAC_IPV6_TUN_IPV6_GTPU_IPV4_ICMP,     ICE_PTYPE_ATTR_GTP_DOWNLINK },
710         { MAC_IPV6_TUN_IPV6_GTPU_IPV6_FRAG,     ICE_PTYPE_ATTR_GTP_DOWNLINK },
711         { MAC_IPV6_TUN_IPV6_GTPU_IPV6_PAY,      ICE_PTYPE_ATTR_GTP_DOWNLINK },
712         { MAC_IPV6_TUN_IPV6_GTPU_IPV6_UDP_PAY,  ICE_PTYPE_ATTR_GTP_DOWNLINK },
713         { MAC_IPV6_TUN_IPV6_GTPU_IPV6_TCP,      ICE_PTYPE_ATTR_GTP_DOWNLINK },
714         { MAC_IPV6_TUN_IPV6_GTPU_IPV6_ICMPV6,   ICE_PTYPE_ATTR_GTP_DOWNLINK },
715 };
716
717 static const struct ice_ptype_attributes ice_attr_gtpu_up[] = {
718         { ICE_MAC_IPV4_GTPU_IPV4_FRAG,    ICE_PTYPE_ATTR_GTP_UPLINK },
719         { ICE_MAC_IPV4_GTPU_IPV4_PAY,     ICE_PTYPE_ATTR_GTP_UPLINK },
720         { ICE_MAC_IPV4_GTPU_IPV4_UDP_PAY, ICE_PTYPE_ATTR_GTP_UPLINK },
721         { ICE_MAC_IPV4_GTPU_IPV4_TCP,     ICE_PTYPE_ATTR_GTP_UPLINK },
722         { ICE_MAC_IPV4_GTPU_IPV4_ICMP,    ICE_PTYPE_ATTR_GTP_UPLINK },
723         { ICE_MAC_IPV6_GTPU_IPV4_FRAG,    ICE_PTYPE_ATTR_GTP_UPLINK },
724         { ICE_MAC_IPV6_GTPU_IPV4_PAY,     ICE_PTYPE_ATTR_GTP_UPLINK },
725         { ICE_MAC_IPV6_GTPU_IPV4_UDP_PAY, ICE_PTYPE_ATTR_GTP_UPLINK },
726         { ICE_MAC_IPV6_GTPU_IPV4_TCP,     ICE_PTYPE_ATTR_GTP_UPLINK },
727         { ICE_MAC_IPV6_GTPU_IPV4_ICMP,    ICE_PTYPE_ATTR_GTP_UPLINK },
728         { ICE_MAC_IPV4_GTPU_IPV6_FRAG,    ICE_PTYPE_ATTR_GTP_UPLINK },
729         { ICE_MAC_IPV4_GTPU_IPV6_PAY,     ICE_PTYPE_ATTR_GTP_UPLINK },
730         { ICE_MAC_IPV4_GTPU_IPV6_UDP_PAY, ICE_PTYPE_ATTR_GTP_UPLINK },
731         { ICE_MAC_IPV4_GTPU_IPV6_TCP,     ICE_PTYPE_ATTR_GTP_UPLINK },
732         { ICE_MAC_IPV4_GTPU_IPV6_ICMPV6,  ICE_PTYPE_ATTR_GTP_UPLINK },
733         { ICE_MAC_IPV6_GTPU_IPV6_FRAG,    ICE_PTYPE_ATTR_GTP_UPLINK },
734         { ICE_MAC_IPV6_GTPU_IPV6_PAY,     ICE_PTYPE_ATTR_GTP_UPLINK },
735         { ICE_MAC_IPV6_GTPU_IPV6_UDP_PAY, ICE_PTYPE_ATTR_GTP_UPLINK },
736         { ICE_MAC_IPV6_GTPU_IPV6_TCP,     ICE_PTYPE_ATTR_GTP_UPLINK },
737         { ICE_MAC_IPV6_GTPU_IPV6_ICMPV6,  ICE_PTYPE_ATTR_GTP_UPLINK },
738         { MAC_IPV4_TUN_IPV4_GTPU_IPV4_FRAG,     ICE_PTYPE_ATTR_GTP_UPLINK },
739         { MAC_IPV4_TUN_IPV4_GTPU_IPV4_PAY,      ICE_PTYPE_ATTR_GTP_UPLINK },
740         { MAC_IPV4_TUN_IPV4_GTPU_IPV4_UDP_PAY,  ICE_PTYPE_ATTR_GTP_UPLINK },
741         { MAC_IPV4_TUN_IPV4_GTPU_IPV4_TCP,      ICE_PTYPE_ATTR_GTP_UPLINK },
742         { MAC_IPV4_TUN_IPV4_GTPU_IPV4_ICMP,     ICE_PTYPE_ATTR_GTP_UPLINK },
743         { MAC_IPV4_TUN_IPV4_GTPU_IPV6_FRAG,     ICE_PTYPE_ATTR_GTP_UPLINK },
744         { MAC_IPV4_TUN_IPV4_GTPU_IPV6_PAY,      ICE_PTYPE_ATTR_GTP_UPLINK },
745         { MAC_IPV4_TUN_IPV4_GTPU_IPV6_UDP_PAY,  ICE_PTYPE_ATTR_GTP_UPLINK },
746         { MAC_IPV4_TUN_IPV4_GTPU_IPV6_TCP,      ICE_PTYPE_ATTR_GTP_UPLINK },
747         { MAC_IPV4_TUN_IPV4_GTPU_IPV6_ICMPV6,   ICE_PTYPE_ATTR_GTP_UPLINK },
748         { MAC_IPV4_TUN_IPV6_GTPU_IPV4_FRAG,     ICE_PTYPE_ATTR_GTP_UPLINK },
749         { MAC_IPV4_TUN_IPV6_GTPU_IPV4_PAY,      ICE_PTYPE_ATTR_GTP_UPLINK },
750         { MAC_IPV4_TUN_IPV6_GTPU_IPV4_UDP_PAY,  ICE_PTYPE_ATTR_GTP_UPLINK },
751         { MAC_IPV4_TUN_IPV6_GTPU_IPV4_TCP,      ICE_PTYPE_ATTR_GTP_UPLINK },
752         { MAC_IPV4_TUN_IPV6_GTPU_IPV4_ICMP,     ICE_PTYPE_ATTR_GTP_UPLINK },
753         { MAC_IPV4_TUN_IPV6_GTPU_IPV6_FRAG,     ICE_PTYPE_ATTR_GTP_UPLINK },
754         { MAC_IPV4_TUN_IPV6_GTPU_IPV6_PAY,      ICE_PTYPE_ATTR_GTP_UPLINK },
755         { MAC_IPV4_TUN_IPV6_GTPU_IPV6_UDP_PAY,  ICE_PTYPE_ATTR_GTP_UPLINK },
756         { MAC_IPV4_TUN_IPV6_GTPU_IPV6_TCP,      ICE_PTYPE_ATTR_GTP_UPLINK },
757         { MAC_IPV4_TUN_IPV6_GTPU_IPV6_ICMPV6,   ICE_PTYPE_ATTR_GTP_UPLINK },
758         { MAC_IPV6_TUN_IPV4_GTPU_IPV4_FRAG,     ICE_PTYPE_ATTR_GTP_UPLINK },
759         { MAC_IPV6_TUN_IPV4_GTPU_IPV4_PAY,      ICE_PTYPE_ATTR_GTP_UPLINK },
760         { MAC_IPV6_TUN_IPV4_GTPU_IPV4_UDP_PAY,  ICE_PTYPE_ATTR_GTP_UPLINK },
761         { MAC_IPV6_TUN_IPV4_GTPU_IPV4_TCP,      ICE_PTYPE_ATTR_GTP_UPLINK },
762         { MAC_IPV6_TUN_IPV4_GTPU_IPV4_ICMP,     ICE_PTYPE_ATTR_GTP_UPLINK },
763         { MAC_IPV6_TUN_IPV4_GTPU_IPV6_FRAG,     ICE_PTYPE_ATTR_GTP_UPLINK },
764         { MAC_IPV6_TUN_IPV4_GTPU_IPV6_PAY,      ICE_PTYPE_ATTR_GTP_UPLINK },
765         { MAC_IPV6_TUN_IPV4_GTPU_IPV6_UDP_PAY,  ICE_PTYPE_ATTR_GTP_UPLINK },
766         { MAC_IPV6_TUN_IPV4_GTPU_IPV6_TCP,      ICE_PTYPE_ATTR_GTP_UPLINK },
767         { MAC_IPV6_TUN_IPV4_GTPU_IPV6_ICMPV6,   ICE_PTYPE_ATTR_GTP_UPLINK },
768         { MAC_IPV6_TUN_IPV6_GTPU_IPV4_FRAG,     ICE_PTYPE_ATTR_GTP_UPLINK },
769         { MAC_IPV6_TUN_IPV6_GTPU_IPV4_PAY,      ICE_PTYPE_ATTR_GTP_UPLINK },
770         { MAC_IPV6_TUN_IPV6_GTPU_IPV4_UDP_PAY,  ICE_PTYPE_ATTR_GTP_UPLINK },
771         { MAC_IPV6_TUN_IPV6_GTPU_IPV4_TCP,      ICE_PTYPE_ATTR_GTP_UPLINK },
772         { MAC_IPV6_TUN_IPV6_GTPU_IPV4_ICMP,     ICE_PTYPE_ATTR_GTP_UPLINK },
773         { MAC_IPV6_TUN_IPV6_GTPU_IPV6_FRAG,     ICE_PTYPE_ATTR_GTP_UPLINK },
774         { MAC_IPV6_TUN_IPV6_GTPU_IPV6_PAY,      ICE_PTYPE_ATTR_GTP_UPLINK },
775         { MAC_IPV6_TUN_IPV6_GTPU_IPV6_UDP_PAY,  ICE_PTYPE_ATTR_GTP_UPLINK },
776         { MAC_IPV6_TUN_IPV6_GTPU_IPV6_TCP,      ICE_PTYPE_ATTR_GTP_UPLINK },
777         { MAC_IPV6_TUN_IPV6_GTPU_IPV6_ICMPV6,   ICE_PTYPE_ATTR_GTP_UPLINK },
778 };
779
780 static const u32 ice_ptypes_gtpu[] = {
781         0x00000000, 0x00000000, 0x00000000, 0x00000000,
782         0x00000000, 0x00000000, 0x00000000, 0x00000000,
783         0x00000000, 0x00000000, 0x7FFFFE00, 0x00000000,
784         0x00000000, 0x0000003F, 0xBEFBEFBC, 0x0002FBEF,
785         0x00000000, 0x00000000, 0x00000000, 0x00000000,
786         0x00000000, 0x00000000, 0x00000000, 0x00000000,
787         0x00000000, 0x00000000, 0x00000000, 0x00000000,
788         0x00000000, 0x00000000, 0x00000000, 0x00000000,
789 };
790
791 /* Packet types for pppoe */
792 static const u32 ice_ptypes_pppoe[] = {
793         0x00000000, 0x00000000, 0x00000000, 0x00000000,
794         0x00000000, 0x00000000, 0x00000000, 0x00000000,
795         0x00000000, 0x03ffe000, 0x00000000, 0x00000000,
796         0x00000000, 0x00000000, 0x00000000, 0x00000000,
797         0x00000000, 0x00000000, 0x00000000, 0x00000000,
798         0x00000000, 0x00000000, 0x00000000, 0x00000000,
799         0x00000000, 0x00000000, 0x00000000, 0x00000000,
800         0x00000000, 0x00000000, 0x00000000, 0x00000000,
801 };
802
803 /* Packet types for packets with PFCP NODE header */
804 static const u32 ice_ptypes_pfcp_node[] = {
805         0x00000000, 0x00000000, 0x00000000, 0x00000000,
806         0x00000000, 0x00000000, 0x00000000, 0x00000000,
807         0x00000000, 0x00000000, 0x80000000, 0x00000002,
808         0x00000000, 0x00000000, 0x00000000, 0x00000000,
809         0x00000000, 0x00000000, 0x00000000, 0x00000000,
810         0x00000000, 0x00000000, 0x00000000, 0x00000000,
811         0x00000000, 0x00000000, 0x00000000, 0x00000000,
812         0x00000000, 0x00000000, 0x00000000, 0x00000000,
813 };
814
815 /* Packet types for packets with PFCP SESSION header */
816 static const u32 ice_ptypes_pfcp_session[] = {
817         0x00000000, 0x00000000, 0x00000000, 0x00000000,
818         0x00000000, 0x00000000, 0x00000000, 0x00000000,
819         0x00000000, 0x00000000, 0x00000000, 0x00000005,
820         0x00000000, 0x00000000, 0x00000000, 0x00000000,
821         0x00000000, 0x00000000, 0x00000000, 0x00000000,
822         0x00000000, 0x00000000, 0x00000000, 0x00000000,
823         0x00000000, 0x00000000, 0x00000000, 0x00000000,
824         0x00000000, 0x00000000, 0x00000000, 0x00000000,
825 };
826
827 /* Packet types for l2tpv3 */
828 static const u32 ice_ptypes_l2tpv3[] = {
829         0x00000000, 0x00000000, 0x00000000, 0x00000000,
830         0x00000000, 0x00000000, 0x00000000, 0x00000000,
831         0x00000000, 0x00000000, 0x00000000, 0x00000300,
832         0x00000000, 0x00000000, 0x00000000, 0x00000000,
833         0x00000000, 0x00000000, 0x00000000, 0x00000000,
834         0x00000000, 0x00000000, 0x00000000, 0x00000000,
835         0x00000000, 0x00000000, 0x00000000, 0x00000000,
836         0x00000000, 0x00000000, 0x00000000, 0x00000000,
837 };
838
839 /* Packet types for esp */
840 static const u32 ice_ptypes_esp[] = {
841         0x00000000, 0x00000000, 0x00000000, 0x00000000,
842         0x00000000, 0x00000003, 0x00000000, 0x00000000,
843         0x00000000, 0x00000000, 0x00000000, 0x00000000,
844         0x00000000, 0x00000000, 0x00000000, 0x00000000,
845         0x00000000, 0x00000000, 0x00000000, 0x00000000,
846         0x00000000, 0x00000000, 0x00000000, 0x00000000,
847         0x00000000, 0x00000000, 0x00000000, 0x00000000,
848         0x00000000, 0x00000000, 0x00000000, 0x00000000,
849 };
850
851 /* Packet types for ah */
852 static const u32 ice_ptypes_ah[] = {
853         0x00000000, 0x00000000, 0x00000000, 0x00000000,
854         0x00000000, 0x0000000C, 0x00000000, 0x00000000,
855         0x00000000, 0x00000000, 0x00000000, 0x00000000,
856         0x00000000, 0x00000000, 0x00000000, 0x00000000,
857         0x00000000, 0x00000000, 0x00000000, 0x00000000,
858         0x00000000, 0x00000000, 0x00000000, 0x00000000,
859         0x00000000, 0x00000000, 0x00000000, 0x00000000,
860         0x00000000, 0x00000000, 0x00000000, 0x00000000,
861 };
862
863 /* Packet types for packets with NAT_T ESP header */
864 static const u32 ice_ptypes_nat_t_esp[] = {
865         0x00000000, 0x00000000, 0x00000000, 0x00000000,
866         0x00000000, 0x00000030, 0x00000000, 0x00000000,
867         0x00000000, 0x00000000, 0x00000000, 0x00000000,
868         0x00000000, 0x00000000, 0x00000000, 0x00000000,
869         0x00000000, 0x00000000, 0x00000000, 0x00000000,
870         0x00000000, 0x00000000, 0x00000000, 0x00000000,
871         0x00000000, 0x00000000, 0x00000000, 0x00000000,
872         0x00000000, 0x00000000, 0x00000000, 0x00000000,
873 };
874
875 static const u32 ice_ptypes_mac_non_ip_ofos[] = {
876         0x00000846, 0x00000000, 0x00000000, 0x00000000,
877         0x00000000, 0x00000000, 0x00000000, 0x00000000,
878         0x00400000, 0x03FFF000, 0x00000000, 0x00000000,
879         0x00000000, 0x00000000, 0x00000000, 0x00000000,
880         0x00000000, 0x00000000, 0x00000000, 0x00000000,
881         0x00000000, 0x00000000, 0x00000000, 0x00000000,
882         0x00000000, 0x00000000, 0x00000000, 0x00000000,
883         0x00000000, 0x00000000, 0x00000000, 0x00000000,
884 };
885
886 static const u32 ice_ptypes_gtpu_no_ip[] = {
887         0x00000000, 0x00000000, 0x00000000, 0x00000000,
888         0x00000000, 0x00000000, 0x00000000, 0x00000000,
889         0x00000000, 0x00000000, 0x00000600, 0x00000000,
890         0x00000000, 0x00000000, 0x00000000, 0x00000000,
891         0x00000000, 0x00000000, 0x00000000, 0x00000000,
892         0x00000000, 0x00000000, 0x00000000, 0x00000000,
893         0x00000000, 0x00000000, 0x00000000, 0x00000000,
894         0x00000000, 0x00000000, 0x00000000, 0x00000000,
895 };
896
897 static const u32 ice_ptypes_ecpri_tp0[] = {
898         0x00000000, 0x00000000, 0x00000000, 0x00000000,
899         0x00000000, 0x00000000, 0x00000000, 0x00000000,
900         0x00000000, 0x00000000, 0x00000000, 0x00000400,
901         0x00000000, 0x00000000, 0x00000000, 0x00000000,
902         0x00000000, 0x00000000, 0x00000000, 0x00000000,
903         0x00000000, 0x00000000, 0x00000000, 0x00000000,
904         0x00000000, 0x00000000, 0x00000000, 0x00000000,
905         0x00000000, 0x00000000, 0x00000000, 0x00000000,
906 };
907
908 static const u32 ice_ptypes_udp_ecpri_tp0[] = {
909         0x00000000, 0x00000000, 0x00000000, 0x00000000,
910         0x00000000, 0x00000000, 0x00000000, 0x00000000,
911         0x00000000, 0x00000000, 0x00000000, 0x00100000,
912         0x00000000, 0x00000000, 0x00000000, 0x00000000,
913         0x00000000, 0x00000000, 0x00000000, 0x00000000,
914         0x00000000, 0x00000000, 0x00000000, 0x00000000,
915         0x00000000, 0x00000000, 0x00000000, 0x00000000,
916         0x00000000, 0x00000000, 0x00000000, 0x00000000,
917 };
918
919 static const u32 ice_ptypes_l2tpv2[] = {
920         0x00000000, 0x00000000, 0x00000000, 0x00000000,
921         0x00000000, 0x00000000, 0x00000000, 0x00000000,
922         0x00000000, 0x00000000, 0x00000000, 0x00000000,
923         0xFFFFFF00, 0x0000003F, 0x00000000, 0x00000000,
924         0x00000000, 0x00000000, 0x00000000, 0x00000000,
925         0x00000000, 0x00000000, 0x00000000, 0x00000000,
926         0x00000000, 0x00000000, 0x00000000, 0x00000000,
927         0x00000000, 0x00000000, 0x00000000, 0x00000000,
928 };
929
930 static const u32 ice_ptypes_ppp[] = {
931         0x00000000, 0x00000000, 0x00000000, 0x00000000,
932         0x00000000, 0x00000000, 0x00000000, 0x00000000,
933         0x00000000, 0x00000000, 0x00000000, 0x00000000,
934         0xFFFFF000, 0x0000003F, 0x00000000, 0x00000000,
935         0x00000000, 0x00000000, 0x00000000, 0x00000000,
936         0x00000000, 0x00000000, 0x00000000, 0x00000000,
937         0x00000000, 0x00000000, 0x00000000, 0x00000000,
938         0x00000000, 0x00000000, 0x00000000, 0x00000000,
939 };
940
941 static const u32 ice_ptypes_ipv4_frag[] = {
942         0x00400000, 0x00000000, 0x00000000, 0x00000000,
943         0x00000000, 0x00000000, 0x00000000, 0x00000000,
944         0x00000000, 0x00000000, 0x00000000, 0x00000000,
945         0x00000000, 0x00000000, 0x00000000, 0x00000000,
946         0x00000000, 0x00000000, 0x00000000, 0x00000000,
947         0x00000000, 0x00000000, 0x00000000, 0x00000000,
948         0x00000000, 0x00000000, 0x00000000, 0x00000000,
949         0x00000000, 0x00000000, 0x00000000, 0x00000000,
950 };
951
952 static const u32 ice_ptypes_ipv6_frag[] = {
953         0x00000000, 0x00000000, 0x01000000, 0x00000000,
954         0x00000000, 0x00000000, 0x00000000, 0x00000000,
955         0x00000000, 0x00000000, 0x00000000, 0x00000000,
956         0x00000000, 0x00000000, 0x00000000, 0x00000000,
957         0x00000000, 0x00000000, 0x00000000, 0x00000000,
958         0x00000000, 0x00000000, 0x00000000, 0x00000000,
959         0x00000000, 0x00000000, 0x00000000, 0x00000000,
960         0x00000000, 0x00000000, 0x00000000, 0x00000000,
961 };
962
963 /* Manage parameters and info. used during the creation of a flow profile */
964 struct ice_flow_prof_params {
965         enum ice_block blk;
966         u16 entry_length; /* # of bytes formatted entry will require */
967         u8 es_cnt;
968         struct ice_flow_prof *prof;
969
970         /* For ACL, the es[0] will have the data of ICE_RX_MDID_PKT_FLAGS_15_0
971          * This will give us the direction flags.
972          */
973         struct ice_fv_word es[ICE_MAX_FV_WORDS];
974         /* attributes can be used to add attributes to a particular PTYPE */
975         const struct ice_ptype_attributes *attr;
976         u16 attr_cnt;
977
978         u16 mask[ICE_MAX_FV_WORDS];
979         ice_declare_bitmap(ptypes, ICE_FLOW_PTYPE_MAX);
980 };
981
982 #define ICE_FLOW_RSS_HDRS_INNER_MASK \
983         (ICE_FLOW_SEG_HDR_PPPOE | ICE_FLOW_SEG_HDR_GTPC | \
984         ICE_FLOW_SEG_HDR_GTPC_TEID | ICE_FLOW_SEG_HDR_GTPU | \
985         ICE_FLOW_SEG_HDR_PFCP_SESSION | ICE_FLOW_SEG_HDR_L2TPV3 | \
986         ICE_FLOW_SEG_HDR_ESP | ICE_FLOW_SEG_HDR_AH | \
987         ICE_FLOW_SEG_HDR_NAT_T_ESP | ICE_FLOW_SEG_HDR_GTPU_NON_IP | \
988         ICE_FLOW_SEG_HDR_ECPRI_TP0 | ICE_FLOW_SEG_HDR_UDP_ECPRI_TP0 | \
989         ICE_FLOW_SEG_HDR_L2TPV2 | ICE_FLOW_SEG_HDR_PPP | ICE_FLOW_SEG_HDR_GRE)
990
991 #define ICE_FLOW_SEG_HDRS_L2_MASK       \
992         (ICE_FLOW_SEG_HDR_ETH | ICE_FLOW_SEG_HDR_VLAN)
993 #define ICE_FLOW_SEG_HDRS_L3_MASK       \
994         (ICE_FLOW_SEG_HDR_IPV4 | ICE_FLOW_SEG_HDR_IPV6 | \
995          ICE_FLOW_SEG_HDR_ARP)
996 #define ICE_FLOW_SEG_HDRS_L4_MASK       \
997         (ICE_FLOW_SEG_HDR_ICMP | ICE_FLOW_SEG_HDR_TCP | ICE_FLOW_SEG_HDR_UDP | \
998          ICE_FLOW_SEG_HDR_SCTP)
999 /* mask for L4 protocols that are NOT part of IPV4/6 OTHER PTYPE groups */
1000 #define ICE_FLOW_SEG_HDRS_L4_MASK_NO_OTHER      \
1001         (ICE_FLOW_SEG_HDR_TCP | ICE_FLOW_SEG_HDR_UDP | ICE_FLOW_SEG_HDR_SCTP)
1002
1003 /**
1004  * ice_flow_val_hdrs - validates packet segments for valid protocol headers
1005  * @segs: array of one or more packet segments that describe the flow
1006  * @segs_cnt: number of packet segments provided
1007  */
1008 static enum ice_status
1009 ice_flow_val_hdrs(struct ice_flow_seg_info *segs, u8 segs_cnt)
1010 {
1011         u8 i;
1012
1013         for (i = 0; i < segs_cnt; i++) {
1014                 /* Multiple L3 headers */
1015                 if (segs[i].hdrs & ICE_FLOW_SEG_HDRS_L3_MASK &&
1016                     !ice_is_pow2(segs[i].hdrs & ICE_FLOW_SEG_HDRS_L3_MASK))
1017                         return ICE_ERR_PARAM;
1018
1019                 /* Multiple L4 headers */
1020                 if (segs[i].hdrs & ICE_FLOW_SEG_HDRS_L4_MASK &&
1021                     !ice_is_pow2(segs[i].hdrs & ICE_FLOW_SEG_HDRS_L4_MASK))
1022                         return ICE_ERR_PARAM;
1023         }
1024
1025         return ICE_SUCCESS;
1026 }
1027
1028 /* Sizes of fixed known protocol headers without header options */
1029 #define ICE_FLOW_PROT_HDR_SZ_MAC        14
1030 #define ICE_FLOW_PROT_HDR_SZ_MAC_VLAN   (ICE_FLOW_PROT_HDR_SZ_MAC + 2)
1031 #define ICE_FLOW_PROT_HDR_SZ_IPV4       20
1032 #define ICE_FLOW_PROT_HDR_SZ_IPV6       40
1033 #define ICE_FLOW_PROT_HDR_SZ_ARP        28
1034 #define ICE_FLOW_PROT_HDR_SZ_ICMP       8
1035 #define ICE_FLOW_PROT_HDR_SZ_TCP        20
1036 #define ICE_FLOW_PROT_HDR_SZ_UDP        8
1037 #define ICE_FLOW_PROT_HDR_SZ_SCTP       12
1038
1039 /**
1040  * ice_flow_calc_seg_sz - calculates size of a packet segment based on headers
1041  * @params: information about the flow to be processed
1042  * @seg: index of packet segment whose header size is to be determined
1043  */
1044 static u16 ice_flow_calc_seg_sz(struct ice_flow_prof_params *params, u8 seg)
1045 {
1046         u16 sz;
1047
1048         /* L2 headers */
1049         sz = (params->prof->segs[seg].hdrs & ICE_FLOW_SEG_HDR_VLAN) ?
1050                 ICE_FLOW_PROT_HDR_SZ_MAC_VLAN : ICE_FLOW_PROT_HDR_SZ_MAC;
1051
1052         /* L3 headers */
1053         if (params->prof->segs[seg].hdrs & ICE_FLOW_SEG_HDR_IPV4)
1054                 sz += ICE_FLOW_PROT_HDR_SZ_IPV4;
1055         else if (params->prof->segs[seg].hdrs & ICE_FLOW_SEG_HDR_IPV6)
1056                 sz += ICE_FLOW_PROT_HDR_SZ_IPV6;
1057         else if (params->prof->segs[seg].hdrs & ICE_FLOW_SEG_HDR_ARP)
1058                 sz += ICE_FLOW_PROT_HDR_SZ_ARP;
1059         else if (params->prof->segs[seg].hdrs & ICE_FLOW_SEG_HDRS_L4_MASK)
1060                 /* A L3 header is required if L4 is specified */
1061                 return 0;
1062
1063         /* L4 headers */
1064         if (params->prof->segs[seg].hdrs & ICE_FLOW_SEG_HDR_ICMP)
1065                 sz += ICE_FLOW_PROT_HDR_SZ_ICMP;
1066         else if (params->prof->segs[seg].hdrs & ICE_FLOW_SEG_HDR_TCP)
1067                 sz += ICE_FLOW_PROT_HDR_SZ_TCP;
1068         else if (params->prof->segs[seg].hdrs & ICE_FLOW_SEG_HDR_UDP)
1069                 sz += ICE_FLOW_PROT_HDR_SZ_UDP;
1070         else if (params->prof->segs[seg].hdrs & ICE_FLOW_SEG_HDR_SCTP)
1071                 sz += ICE_FLOW_PROT_HDR_SZ_SCTP;
1072
1073         return sz;
1074 }
1075
1076 /**
1077  * ice_flow_proc_seg_hdrs - process protocol headers present in pkt segments
1078  * @params: information about the flow to be processed
1079  *
1080  * This function identifies the packet types associated with the protocol
1081  * headers being present in packet segments of the specified flow profile.
1082  */
1083 static enum ice_status
1084 ice_flow_proc_seg_hdrs(struct ice_flow_prof_params *params)
1085 {
1086         struct ice_flow_prof *prof;
1087         u8 i;
1088
1089         ice_memset(params->ptypes, 0xff, sizeof(params->ptypes),
1090                    ICE_NONDMA_MEM);
1091
1092         prof = params->prof;
1093
1094         for (i = 0; i < params->prof->segs_cnt; i++) {
1095                 const ice_bitmap_t *src;
1096                 u32 hdrs;
1097
1098                 hdrs = prof->segs[i].hdrs;
1099
1100                 if (hdrs & ICE_FLOW_SEG_HDR_ETH) {
1101                         src = !i ? (const ice_bitmap_t *)ice_ptypes_mac_ofos :
1102                                 (const ice_bitmap_t *)ice_ptypes_mac_il;
1103                         ice_and_bitmap(params->ptypes, params->ptypes, src,
1104                                        ICE_FLOW_PTYPE_MAX);
1105                 }
1106
1107                 if (i && hdrs & ICE_FLOW_SEG_HDR_VLAN) {
1108                         src = (const ice_bitmap_t *)ice_ptypes_macvlan_il;
1109                         ice_and_bitmap(params->ptypes, params->ptypes, src,
1110                                        ICE_FLOW_PTYPE_MAX);
1111                 }
1112
1113                 if (!i && hdrs & ICE_FLOW_SEG_HDR_ARP) {
1114                         ice_and_bitmap(params->ptypes, params->ptypes,
1115                                        (const ice_bitmap_t *)ice_ptypes_arp_of,
1116                                        ICE_FLOW_PTYPE_MAX);
1117                 }
1118
1119                 if (hdrs & ICE_FLOW_SEG_HDR_ECPRI_TP0) {
1120                         src = (const ice_bitmap_t *)ice_ptypes_ecpri_tp0;
1121                         ice_and_bitmap(params->ptypes, params->ptypes, src,
1122                                        ICE_FLOW_PTYPE_MAX);
1123                 }
1124                 if ((hdrs & ICE_FLOW_SEG_HDR_IPV4) &&
1125                     (hdrs & ICE_FLOW_SEG_HDR_IPV_OTHER)) {
1126                         src = i ?
1127                                 (const ice_bitmap_t *)ice_ptypes_ipv4_il :
1128                                 (const ice_bitmap_t *)ice_ptypes_ipv4_ofos_all;
1129                         ice_and_bitmap(params->ptypes, params->ptypes, src,
1130                                        ICE_FLOW_PTYPE_MAX);
1131                 } else if ((hdrs & ICE_FLOW_SEG_HDR_IPV6) &&
1132                            (hdrs & ICE_FLOW_SEG_HDR_IPV_OTHER)) {
1133                         src = i ?
1134                                 (const ice_bitmap_t *)ice_ptypes_ipv6_il :
1135                                 (const ice_bitmap_t *)ice_ptypes_ipv6_ofos_all;
1136                         ice_and_bitmap(params->ptypes, params->ptypes, src,
1137                                        ICE_FLOW_PTYPE_MAX);
1138                 } else if ((hdrs & ICE_FLOW_SEG_HDR_IPV4) &&
1139                                 (hdrs & ICE_FLOW_SEG_HDR_IPV_FRAG)) {
1140                         src = (const ice_bitmap_t *)ice_ptypes_ipv4_frag;
1141                         ice_and_bitmap(params->ptypes, params->ptypes, src,
1142                                        ICE_FLOW_PTYPE_MAX);
1143                 } else if ((hdrs & ICE_FLOW_SEG_HDR_IPV6) &&
1144                                 (hdrs & ICE_FLOW_SEG_HDR_IPV_FRAG)) {
1145                         src = (const ice_bitmap_t *)ice_ptypes_ipv6_frag;
1146                         ice_and_bitmap(params->ptypes, params->ptypes, src,
1147                                        ICE_FLOW_PTYPE_MAX);
1148                 } else if ((hdrs & ICE_FLOW_SEG_HDR_IPV4) &&
1149                            !(hdrs & ICE_FLOW_SEG_HDRS_L4_MASK_NO_OTHER)) {
1150                         src = !i ? (const ice_bitmap_t *)ice_ptypes_ipv4_ofos_no_l4 :
1151                                 (const ice_bitmap_t *)ice_ptypes_ipv4_il_no_l4;
1152                         ice_and_bitmap(params->ptypes, params->ptypes, src,
1153                                        ICE_FLOW_PTYPE_MAX);
1154                 } else if (hdrs & ICE_FLOW_SEG_HDR_IPV4) {
1155                         src = !i ? (const ice_bitmap_t *)ice_ptypes_ipv4_ofos :
1156                                 (const ice_bitmap_t *)ice_ptypes_ipv4_il;
1157                         ice_and_bitmap(params->ptypes, params->ptypes, src,
1158                                        ICE_FLOW_PTYPE_MAX);
1159                 } else if ((hdrs & ICE_FLOW_SEG_HDR_IPV6) &&
1160                            !(hdrs & ICE_FLOW_SEG_HDRS_L4_MASK_NO_OTHER)) {
1161                         src = !i ? (const ice_bitmap_t *)ice_ptypes_ipv6_ofos_no_l4 :
1162                                 (const ice_bitmap_t *)ice_ptypes_ipv6_il_no_l4;
1163                         ice_and_bitmap(params->ptypes, params->ptypes, src,
1164                                        ICE_FLOW_PTYPE_MAX);
1165                 } else if (hdrs & ICE_FLOW_SEG_HDR_IPV6) {
1166                         src = !i ? (const ice_bitmap_t *)ice_ptypes_ipv6_ofos :
1167                                 (const ice_bitmap_t *)ice_ptypes_ipv6_il;
1168                         ice_and_bitmap(params->ptypes, params->ptypes, src,
1169                                        ICE_FLOW_PTYPE_MAX);
1170                 }
1171
1172                 if (hdrs & ICE_FLOW_SEG_HDR_ETH_NON_IP) {
1173                         src = (const ice_bitmap_t *)ice_ptypes_mac_non_ip_ofos;
1174                         ice_and_bitmap(params->ptypes, params->ptypes,
1175                                        src, ICE_FLOW_PTYPE_MAX);
1176                 } else if (hdrs & ICE_FLOW_SEG_HDR_PPPOE) {
1177                         src = (const ice_bitmap_t *)ice_ptypes_pppoe;
1178                         ice_and_bitmap(params->ptypes, params->ptypes, src,
1179                                        ICE_FLOW_PTYPE_MAX);
1180                 } else {
1181                         src = (const ice_bitmap_t *)ice_ptypes_pppoe;
1182                         ice_andnot_bitmap(params->ptypes, params->ptypes, src,
1183                                           ICE_FLOW_PTYPE_MAX);
1184                 }
1185
1186                 if (hdrs & ICE_FLOW_SEG_HDR_UDP) {
1187                         src = (const ice_bitmap_t *)ice_ptypes_udp_il;
1188                         ice_and_bitmap(params->ptypes, params->ptypes, src,
1189                                        ICE_FLOW_PTYPE_MAX);
1190                 } else if (hdrs & ICE_FLOW_SEG_HDR_TCP) {
1191                         ice_and_bitmap(params->ptypes, params->ptypes,
1192                                        (const ice_bitmap_t *)ice_ptypes_tcp_il,
1193                                        ICE_FLOW_PTYPE_MAX);
1194                 } else if (hdrs & ICE_FLOW_SEG_HDR_SCTP) {
1195                         src = (const ice_bitmap_t *)ice_ptypes_sctp_il;
1196                         ice_and_bitmap(params->ptypes, params->ptypes, src,
1197                                        ICE_FLOW_PTYPE_MAX);
1198                 }
1199
1200                 if (hdrs & ICE_FLOW_SEG_HDR_ICMP) {
1201                         src = !i ? (const ice_bitmap_t *)ice_ptypes_icmp_of :
1202                                 (const ice_bitmap_t *)ice_ptypes_icmp_il;
1203                         ice_and_bitmap(params->ptypes, params->ptypes, src,
1204                                        ICE_FLOW_PTYPE_MAX);
1205                 } else if (hdrs & ICE_FLOW_SEG_HDR_GRE) {
1206                         src = (const ice_bitmap_t *)ice_ptypes_gre_of;
1207                         ice_and_bitmap(params->ptypes, params->ptypes, src,
1208                                        ICE_FLOW_PTYPE_MAX);
1209                 } else if (hdrs & ICE_FLOW_SEG_HDR_GTPC) {
1210                         src = (const ice_bitmap_t *)ice_ptypes_gtpc;
1211                         ice_and_bitmap(params->ptypes, params->ptypes,
1212                                        src, ICE_FLOW_PTYPE_MAX);
1213                 } else if (hdrs & ICE_FLOW_SEG_HDR_GTPC_TEID) {
1214                         src = (const ice_bitmap_t *)ice_ptypes_gtpc_tid;
1215                         ice_and_bitmap(params->ptypes, params->ptypes,
1216                                        src, ICE_FLOW_PTYPE_MAX);
1217                 } else if (hdrs & ICE_FLOW_SEG_HDR_GTPU_NON_IP) {
1218                         src = (const ice_bitmap_t *)ice_ptypes_gtpu_no_ip;
1219                         ice_and_bitmap(params->ptypes, params->ptypes,
1220                                        src, ICE_FLOW_PTYPE_MAX);
1221                 } else if (hdrs & ICE_FLOW_SEG_HDR_GTPU_DWN) {
1222                         src = (const ice_bitmap_t *)ice_ptypes_gtpu;
1223                         ice_and_bitmap(params->ptypes, params->ptypes,
1224                                        src, ICE_FLOW_PTYPE_MAX);
1225
1226                         /* Attributes for GTP packet with downlink */
1227                         params->attr = ice_attr_gtpu_down;
1228                         params->attr_cnt = ARRAY_SIZE(ice_attr_gtpu_down);
1229                 } else if (hdrs & ICE_FLOW_SEG_HDR_GTPU_UP) {
1230                         src = (const ice_bitmap_t *)ice_ptypes_gtpu;
1231                         ice_and_bitmap(params->ptypes, params->ptypes,
1232                                        src, ICE_FLOW_PTYPE_MAX);
1233
1234                         /* Attributes for GTP packet with uplink */
1235                         params->attr = ice_attr_gtpu_up;
1236                         params->attr_cnt = ARRAY_SIZE(ice_attr_gtpu_up);
1237                 } else if (hdrs & ICE_FLOW_SEG_HDR_GTPU_EH) {
1238                         src = (const ice_bitmap_t *)ice_ptypes_gtpu;
1239                         ice_and_bitmap(params->ptypes, params->ptypes,
1240                                        src, ICE_FLOW_PTYPE_MAX);
1241
1242                         /* Attributes for GTP packet with Extension Header */
1243                         params->attr = ice_attr_gtpu_eh;
1244                         params->attr_cnt = ARRAY_SIZE(ice_attr_gtpu_eh);
1245                 } else if (hdrs & ICE_FLOW_SEG_HDR_GTPU_IP) {
1246                         src = (const ice_bitmap_t *)ice_ptypes_gtpu;
1247                         ice_and_bitmap(params->ptypes, params->ptypes,
1248                                        src, ICE_FLOW_PTYPE_MAX);
1249
1250                         /* Attributes for GTP packet without Extension Header */
1251                         params->attr = ice_attr_gtpu_session;
1252                         params->attr_cnt = ARRAY_SIZE(ice_attr_gtpu_session);
1253                 } else if (hdrs & ICE_FLOW_SEG_HDR_L2TPV2) {
1254                         src = (const ice_bitmap_t *)ice_ptypes_l2tpv2;
1255                         ice_and_bitmap(params->ptypes, params->ptypes,
1256                                        src, ICE_FLOW_PTYPE_MAX);
1257                 } else if (hdrs & ICE_FLOW_SEG_HDR_L2TPV3) {
1258                         src = (const ice_bitmap_t *)ice_ptypes_l2tpv3;
1259                         ice_and_bitmap(params->ptypes, params->ptypes,
1260                                        src, ICE_FLOW_PTYPE_MAX);
1261                 } else if (hdrs & ICE_FLOW_SEG_HDR_ESP) {
1262                         src = (const ice_bitmap_t *)ice_ptypes_esp;
1263                         ice_and_bitmap(params->ptypes, params->ptypes,
1264                                        src, ICE_FLOW_PTYPE_MAX);
1265                 } else if (hdrs & ICE_FLOW_SEG_HDR_AH) {
1266                         src = (const ice_bitmap_t *)ice_ptypes_ah;
1267                         ice_and_bitmap(params->ptypes, params->ptypes,
1268                                        src, ICE_FLOW_PTYPE_MAX);
1269                 } else if (hdrs & ICE_FLOW_SEG_HDR_NAT_T_ESP) {
1270                         src = (const ice_bitmap_t *)ice_ptypes_nat_t_esp;
1271                         ice_and_bitmap(params->ptypes, params->ptypes,
1272                                        src, ICE_FLOW_PTYPE_MAX);
1273                 } else if (hdrs & ICE_FLOW_SEG_HDR_VXLAN) {
1274                         src = (const ice_bitmap_t *)ice_ptypes_vxlan_vni;
1275                         ice_and_bitmap(params->ptypes, params->ptypes,
1276                                        src, ICE_FLOW_PTYPE_MAX);
1277                 } else if (hdrs & ICE_FLOW_SEG_HDR_UDP_ECPRI_TP0) {
1278                         src = (const ice_bitmap_t *)ice_ptypes_udp_ecpri_tp0;
1279                         ice_and_bitmap(params->ptypes, params->ptypes,
1280                                        src, ICE_FLOW_PTYPE_MAX);
1281                 }
1282
1283                 if (hdrs & ICE_FLOW_SEG_HDR_PPP) {
1284                         src = (const ice_bitmap_t *)ice_ptypes_ppp;
1285                         ice_and_bitmap(params->ptypes, params->ptypes,
1286                                        src, ICE_FLOW_PTYPE_MAX);
1287                 }
1288
1289                 if (hdrs & ICE_FLOW_SEG_HDR_PFCP) {
1290                         if (hdrs & ICE_FLOW_SEG_HDR_PFCP_NODE)
1291                                 src =
1292                                 (const ice_bitmap_t *)ice_ptypes_pfcp_node;
1293                         else
1294                                 src =
1295                                 (const ice_bitmap_t *)ice_ptypes_pfcp_session;
1296
1297                         ice_and_bitmap(params->ptypes, params->ptypes,
1298                                        src, ICE_FLOW_PTYPE_MAX);
1299                 } else {
1300                         src = (const ice_bitmap_t *)ice_ptypes_pfcp_node;
1301                         ice_andnot_bitmap(params->ptypes, params->ptypes,
1302                                           src, ICE_FLOW_PTYPE_MAX);
1303
1304                         src = (const ice_bitmap_t *)ice_ptypes_pfcp_session;
1305                         ice_andnot_bitmap(params->ptypes, params->ptypes,
1306                                           src, ICE_FLOW_PTYPE_MAX);
1307                 }
1308         }
1309
1310         return ICE_SUCCESS;
1311 }
1312
1313 /**
1314  * ice_flow_xtract_pkt_flags - Create an extr sequence entry for packet flags
1315  * @hw: pointer to the HW struct
1316  * @params: information about the flow to be processed
1317  * @flags: The value of pkt_flags[x:x] in Rx/Tx MDID metadata.
1318  *
1319  * This function will allocate an extraction sequence entries for a DWORD size
1320  * chunk of the packet flags.
1321  */
1322 static enum ice_status
1323 ice_flow_xtract_pkt_flags(struct ice_hw *hw,
1324                           struct ice_flow_prof_params *params,
1325                           enum ice_flex_mdid_pkt_flags flags)
1326 {
1327         u8 fv_words = hw->blk[params->blk].es.fvw;
1328         u8 idx;
1329
1330         /* Make sure the number of extraction sequence entries required does not
1331          * exceed the block's capacity.
1332          */
1333         if (params->es_cnt >= fv_words)
1334                 return ICE_ERR_MAX_LIMIT;
1335
1336         /* some blocks require a reversed field vector layout */
1337         if (hw->blk[params->blk].es.reverse)
1338                 idx = fv_words - params->es_cnt - 1;
1339         else
1340                 idx = params->es_cnt;
1341
1342         params->es[idx].prot_id = ICE_PROT_META_ID;
1343         params->es[idx].off = flags;
1344         params->es_cnt++;
1345
1346         return ICE_SUCCESS;
1347 }
1348
1349 /**
1350  * ice_flow_xtract_fld - Create an extraction sequence entry for the given field
1351  * @hw: pointer to the HW struct
1352  * @params: information about the flow to be processed
1353  * @seg: packet segment index of the field to be extracted
1354  * @fld: ID of field to be extracted
1355  * @match: bitfield of all fields
1356  *
1357  * This function determines the protocol ID, offset, and size of the given
1358  * field. It then allocates one or more extraction sequence entries for the
1359  * given field, and fill the entries with protocol ID and offset information.
1360  */
1361 static enum ice_status
1362 ice_flow_xtract_fld(struct ice_hw *hw, struct ice_flow_prof_params *params,
1363                     u8 seg, enum ice_flow_field fld, u64 match)
1364 {
1365         enum ice_flow_field sib = ICE_FLOW_FIELD_IDX_MAX;
1366         enum ice_prot_id prot_id = ICE_PROT_ID_INVAL;
1367         u8 fv_words = hw->blk[params->blk].es.fvw;
1368         struct ice_flow_fld_info *flds;
1369         u16 cnt, ese_bits, i;
1370         u16 sib_mask = 0;
1371         u16 mask;
1372         u16 off;
1373
1374         flds = params->prof->segs[seg].fields;
1375
1376         switch (fld) {
1377         case ICE_FLOW_FIELD_IDX_ETH_DA:
1378         case ICE_FLOW_FIELD_IDX_ETH_SA:
1379         case ICE_FLOW_FIELD_IDX_S_VLAN:
1380         case ICE_FLOW_FIELD_IDX_C_VLAN:
1381                 prot_id = seg == 0 ? ICE_PROT_MAC_OF_OR_S : ICE_PROT_MAC_IL;
1382                 break;
1383         case ICE_FLOW_FIELD_IDX_ETH_TYPE:
1384                 prot_id = seg == 0 ? ICE_PROT_ETYPE_OL : ICE_PROT_ETYPE_IL;
1385                 break;
1386         case ICE_FLOW_FIELD_IDX_IPV4_DSCP:
1387                 prot_id = seg == 0 ? ICE_PROT_IPV4_OF_OR_S : ICE_PROT_IPV4_IL;
1388                 break;
1389         case ICE_FLOW_FIELD_IDX_IPV6_DSCP:
1390                 prot_id = seg == 0 ? ICE_PROT_IPV6_OF_OR_S : ICE_PROT_IPV6_IL;
1391                 break;
1392         case ICE_FLOW_FIELD_IDX_IPV4_TTL:
1393         case ICE_FLOW_FIELD_IDX_IPV4_PROT:
1394                 prot_id = seg == 0 ? ICE_PROT_IPV4_OF_OR_S : ICE_PROT_IPV4_IL;
1395
1396                 /* TTL and PROT share the same extraction seq. entry.
1397                  * Each is considered a sibling to the other in terms of sharing
1398                  * the same extraction sequence entry.
1399                  */
1400                 if (fld == ICE_FLOW_FIELD_IDX_IPV4_TTL)
1401                         sib = ICE_FLOW_FIELD_IDX_IPV4_PROT;
1402                 else
1403                         sib = ICE_FLOW_FIELD_IDX_IPV4_TTL;
1404
1405                 /* If the sibling field is also included, that field's
1406                  * mask needs to be included.
1407                  */
1408                 if (match & BIT(sib))
1409                         sib_mask = ice_flds_info[sib].mask;
1410                 break;
1411         case ICE_FLOW_FIELD_IDX_IPV6_TTL:
1412         case ICE_FLOW_FIELD_IDX_IPV6_PROT:
1413                 prot_id = seg == 0 ? ICE_PROT_IPV6_OF_OR_S : ICE_PROT_IPV6_IL;
1414
1415                 /* TTL and PROT share the same extraction seq. entry.
1416                  * Each is considered a sibling to the other in terms of sharing
1417                  * the same extraction sequence entry.
1418                  */
1419                 if (fld == ICE_FLOW_FIELD_IDX_IPV6_TTL)
1420                         sib = ICE_FLOW_FIELD_IDX_IPV6_PROT;
1421                 else
1422                         sib = ICE_FLOW_FIELD_IDX_IPV6_TTL;
1423
1424                 /* If the sibling field is also included, that field's
1425                  * mask needs to be included.
1426                  */
1427                 if (match & BIT(sib))
1428                         sib_mask = ice_flds_info[sib].mask;
1429                 break;
1430         case ICE_FLOW_FIELD_IDX_IPV4_SA:
1431         case ICE_FLOW_FIELD_IDX_IPV4_DA:
1432         case ICE_FLOW_FIELD_IDX_IPV4_CHKSUM:
1433                 prot_id = seg == 0 ? ICE_PROT_IPV4_OF_OR_S : ICE_PROT_IPV4_IL;
1434                 if (params->prof->segs[0].hdrs & ICE_FLOW_SEG_HDR_GRE &&
1435                     params->prof->segs[1].hdrs & ICE_FLOW_SEG_HDR_GTPU &&
1436                     seg == 1)
1437                         prot_id = ICE_PROT_IPV4_IL_IL;
1438                 break;
1439         case ICE_FLOW_FIELD_IDX_IPV4_ID:
1440                 prot_id = ICE_PROT_IPV4_OF_OR_S;
1441                 break;
1442         case ICE_FLOW_FIELD_IDX_IPV6_SA:
1443         case ICE_FLOW_FIELD_IDX_IPV6_DA:
1444         case ICE_FLOW_FIELD_IDX_IPV6_PRE32_SA:
1445         case ICE_FLOW_FIELD_IDX_IPV6_PRE32_DA:
1446         case ICE_FLOW_FIELD_IDX_IPV6_PRE48_SA:
1447         case ICE_FLOW_FIELD_IDX_IPV6_PRE48_DA:
1448         case ICE_FLOW_FIELD_IDX_IPV6_PRE64_SA:
1449         case ICE_FLOW_FIELD_IDX_IPV6_PRE64_DA:
1450                 prot_id = seg == 0 ? ICE_PROT_IPV6_OF_OR_S : ICE_PROT_IPV6_IL;
1451                 if (params->prof->segs[0].hdrs & ICE_FLOW_SEG_HDR_GRE &&
1452                     params->prof->segs[1].hdrs & ICE_FLOW_SEG_HDR_GTPU &&
1453                     seg == 1)
1454                         prot_id = ICE_PROT_IPV6_IL_IL;
1455                 break;
1456         case ICE_FLOW_FIELD_IDX_IPV6_ID:
1457                 prot_id = ICE_PROT_IPV6_FRAG;
1458                 break;
1459         case ICE_FLOW_FIELD_IDX_TCP_SRC_PORT:
1460         case ICE_FLOW_FIELD_IDX_TCP_DST_PORT:
1461         case ICE_FLOW_FIELD_IDX_TCP_FLAGS:
1462         case ICE_FLOW_FIELD_IDX_TCP_CHKSUM:
1463                 prot_id = ICE_PROT_TCP_IL;
1464                 break;
1465         case ICE_FLOW_FIELD_IDX_UDP_SRC_PORT:
1466         case ICE_FLOW_FIELD_IDX_UDP_DST_PORT:
1467         case ICE_FLOW_FIELD_IDX_UDP_CHKSUM:
1468                 prot_id = ICE_PROT_UDP_IL_OR_S;
1469                 break;
1470         case ICE_FLOW_FIELD_IDX_SCTP_SRC_PORT:
1471         case ICE_FLOW_FIELD_IDX_SCTP_DST_PORT:
1472         case ICE_FLOW_FIELD_IDX_SCTP_CHKSUM:
1473                 prot_id = ICE_PROT_SCTP_IL;
1474                 break;
1475         case ICE_FLOW_FIELD_IDX_VXLAN_VNI:
1476         case ICE_FLOW_FIELD_IDX_GTPC_TEID:
1477         case ICE_FLOW_FIELD_IDX_GTPU_IP_TEID:
1478         case ICE_FLOW_FIELD_IDX_GTPU_UP_TEID:
1479         case ICE_FLOW_FIELD_IDX_GTPU_DWN_TEID:
1480         case ICE_FLOW_FIELD_IDX_GTPU_EH_TEID:
1481         case ICE_FLOW_FIELD_IDX_GTPU_EH_QFI:
1482         case ICE_FLOW_FIELD_IDX_GTPU_UP_QFI:
1483         case ICE_FLOW_FIELD_IDX_GTPU_DWN_QFI:
1484                 /* GTP is accessed through UDP OF protocol */
1485                 prot_id = ICE_PROT_UDP_OF;
1486                 break;
1487         case ICE_FLOW_FIELD_IDX_PPPOE_SESS_ID:
1488                 prot_id = ICE_PROT_PPPOE;
1489                 break;
1490         case ICE_FLOW_FIELD_IDX_PFCP_SEID:
1491                 prot_id = ICE_PROT_UDP_IL_OR_S;
1492                 break;
1493         case ICE_FLOW_FIELD_IDX_L2TPV3_SESS_ID:
1494                 prot_id = ICE_PROT_L2TPV3;
1495                 break;
1496         case ICE_FLOW_FIELD_IDX_ESP_SPI:
1497                 prot_id = ICE_PROT_ESP_F;
1498                 break;
1499         case ICE_FLOW_FIELD_IDX_AH_SPI:
1500                 prot_id = ICE_PROT_ESP_2;
1501                 break;
1502         case ICE_FLOW_FIELD_IDX_NAT_T_ESP_SPI:
1503                 prot_id = ICE_PROT_UDP_IL_OR_S;
1504                 break;
1505         case ICE_FLOW_FIELD_IDX_ECPRI_TP0_PC_ID:
1506                 prot_id = ICE_PROT_ECPRI;
1507                 break;
1508         case ICE_FLOW_FIELD_IDX_UDP_ECPRI_TP0_PC_ID:
1509                 prot_id = ICE_PROT_UDP_IL_OR_S;
1510                 break;
1511         case ICE_FLOW_FIELD_IDX_ARP_SIP:
1512         case ICE_FLOW_FIELD_IDX_ARP_DIP:
1513         case ICE_FLOW_FIELD_IDX_ARP_SHA:
1514         case ICE_FLOW_FIELD_IDX_ARP_DHA:
1515         case ICE_FLOW_FIELD_IDX_ARP_OP:
1516                 prot_id = ICE_PROT_ARP_OF;
1517                 break;
1518         case ICE_FLOW_FIELD_IDX_ICMP_TYPE:
1519         case ICE_FLOW_FIELD_IDX_ICMP_CODE:
1520                 /* ICMP type and code share the same extraction seq. entry */
1521                 prot_id = (params->prof->segs[seg].hdrs &
1522                            ICE_FLOW_SEG_HDR_IPV4) ?
1523                         ICE_PROT_ICMP_IL : ICE_PROT_ICMPV6_IL;
1524                 sib = fld == ICE_FLOW_FIELD_IDX_ICMP_TYPE ?
1525                         ICE_FLOW_FIELD_IDX_ICMP_CODE :
1526                         ICE_FLOW_FIELD_IDX_ICMP_TYPE;
1527                 break;
1528         case ICE_FLOW_FIELD_IDX_GRE_KEYID:
1529                 prot_id = ICE_PROT_GRE_OF;
1530                 break;
1531         default:
1532                 return ICE_ERR_NOT_IMPL;
1533         }
1534
1535         /* Each extraction sequence entry is a word in size, and extracts a
1536          * word-aligned offset from a protocol header.
1537          */
1538         ese_bits = ICE_FLOW_FV_EXTRACT_SZ * BITS_PER_BYTE;
1539
1540         flds[fld].xtrct.prot_id = prot_id;
1541         flds[fld].xtrct.off = (ice_flds_info[fld].off / ese_bits) *
1542                 ICE_FLOW_FV_EXTRACT_SZ;
1543         flds[fld].xtrct.disp = (u8)(ice_flds_info[fld].off % ese_bits);
1544         flds[fld].xtrct.idx = params->es_cnt;
1545         flds[fld].xtrct.mask = ice_flds_info[fld].mask;
1546
1547         /* Adjust the next field-entry index after accommodating the number of
1548          * entries this field consumes
1549          */
1550         cnt = DIVIDE_AND_ROUND_UP(flds[fld].xtrct.disp +
1551                                   ice_flds_info[fld].size, ese_bits);
1552
1553         /* Fill in the extraction sequence entries needed for this field */
1554         off = flds[fld].xtrct.off;
1555         mask = flds[fld].xtrct.mask;
1556         for (i = 0; i < cnt; i++) {
1557                 /* Only consume an extraction sequence entry if there is no
1558                  * sibling field associated with this field or the sibling entry
1559                  * already extracts the word shared with this field.
1560                  */
1561                 if (sib == ICE_FLOW_FIELD_IDX_MAX ||
1562                     flds[sib].xtrct.prot_id == ICE_PROT_ID_INVAL ||
1563                     flds[sib].xtrct.off != off) {
1564                         u8 idx;
1565
1566                         /* Make sure the number of extraction sequence required
1567                          * does not exceed the block's capability
1568                          */
1569                         if (params->es_cnt >= fv_words)
1570                                 return ICE_ERR_MAX_LIMIT;
1571
1572                         /* some blocks require a reversed field vector layout */
1573                         if (hw->blk[params->blk].es.reverse)
1574                                 idx = fv_words - params->es_cnt - 1;
1575                         else
1576                                 idx = params->es_cnt;
1577
1578                         params->es[idx].prot_id = prot_id;
1579                         params->es[idx].off = off;
1580                         params->mask[idx] = mask | sib_mask;
1581                         params->es_cnt++;
1582                 }
1583
1584                 off += ICE_FLOW_FV_EXTRACT_SZ;
1585         }
1586
1587         return ICE_SUCCESS;
1588 }
1589
1590 /**
1591  * ice_flow_xtract_raws - Create extract sequence entries for raw bytes
1592  * @hw: pointer to the HW struct
1593  * @params: information about the flow to be processed
1594  * @seg: index of packet segment whose raw fields are to be extracted
1595  */
1596 static enum ice_status
1597 ice_flow_xtract_raws(struct ice_hw *hw, struct ice_flow_prof_params *params,
1598                      u8 seg)
1599 {
1600         u16 fv_words;
1601         u16 hdrs_sz;
1602         u8 i;
1603
1604         if (!params->prof->segs[seg].raws_cnt)
1605                 return ICE_SUCCESS;
1606
1607         if (params->prof->segs[seg].raws_cnt >
1608             ARRAY_SIZE(params->prof->segs[seg].raws))
1609                 return ICE_ERR_MAX_LIMIT;
1610
1611         /* Offsets within the segment headers are not supported */
1612         hdrs_sz = ice_flow_calc_seg_sz(params, seg);
1613         if (!hdrs_sz)
1614                 return ICE_ERR_PARAM;
1615
1616         fv_words = hw->blk[params->blk].es.fvw;
1617
1618         for (i = 0; i < params->prof->segs[seg].raws_cnt; i++) {
1619                 struct ice_flow_seg_fld_raw *raw;
1620                 u16 off, cnt, j;
1621
1622                 raw = &params->prof->segs[seg].raws[i];
1623
1624                 /* Storing extraction information */
1625                 raw->info.xtrct.prot_id = ICE_PROT_MAC_OF_OR_S;
1626                 raw->info.xtrct.off = (raw->off / ICE_FLOW_FV_EXTRACT_SZ) *
1627                         ICE_FLOW_FV_EXTRACT_SZ;
1628                 raw->info.xtrct.disp = (raw->off % ICE_FLOW_FV_EXTRACT_SZ) *
1629                         BITS_PER_BYTE;
1630                 raw->info.xtrct.idx = params->es_cnt;
1631
1632                 /* Determine the number of field vector entries this raw field
1633                  * consumes.
1634                  */
1635                 cnt = DIVIDE_AND_ROUND_UP(raw->info.xtrct.disp +
1636                                           (raw->info.src.last * BITS_PER_BYTE),
1637                                           (ICE_FLOW_FV_EXTRACT_SZ *
1638                                            BITS_PER_BYTE));
1639                 off = raw->info.xtrct.off;
1640                 for (j = 0; j < cnt; j++) {
1641                         u16 idx;
1642
1643                         /* Make sure the number of extraction sequence required
1644                          * does not exceed the block's capability
1645                          */
1646                         if (params->es_cnt >= hw->blk[params->blk].es.count ||
1647                             params->es_cnt >= ICE_MAX_FV_WORDS)
1648                                 return ICE_ERR_MAX_LIMIT;
1649
1650                         /* some blocks require a reversed field vector layout */
1651                         if (hw->blk[params->blk].es.reverse)
1652                                 idx = fv_words - params->es_cnt - 1;
1653                         else
1654                                 idx = params->es_cnt;
1655
1656                         params->es[idx].prot_id = raw->info.xtrct.prot_id;
1657                         params->es[idx].off = off;
1658                         params->es_cnt++;
1659                         off += ICE_FLOW_FV_EXTRACT_SZ;
1660                 }
1661         }
1662
1663         return ICE_SUCCESS;
1664 }
1665
1666 /**
1667  * ice_flow_create_xtrct_seq - Create an extraction sequence for given segments
1668  * @hw: pointer to the HW struct
1669  * @params: information about the flow to be processed
1670  *
1671  * This function iterates through all matched fields in the given segments, and
1672  * creates an extraction sequence for the fields.
1673  */
1674 static enum ice_status
1675 ice_flow_create_xtrct_seq(struct ice_hw *hw,
1676                           struct ice_flow_prof_params *params)
1677 {
1678         enum ice_status status = ICE_SUCCESS;
1679         u8 i;
1680
1681         /* For ACL, we also need to extract the direction bit (Rx,Tx) data from
1682          * packet flags
1683          */
1684         if (params->blk == ICE_BLK_ACL) {
1685                 status = ice_flow_xtract_pkt_flags(hw, params,
1686                                                    ICE_RX_MDID_PKT_FLAGS_15_0);
1687                 if (status)
1688                         return status;
1689         }
1690
1691         for (i = 0; i < params->prof->segs_cnt; i++) {
1692                 u64 match = params->prof->segs[i].match;
1693                 enum ice_flow_field j;
1694
1695                 ice_for_each_set_bit(j, (ice_bitmap_t *)&match,
1696                                      ICE_FLOW_FIELD_IDX_MAX) {
1697                         status = ice_flow_xtract_fld(hw, params, i, j, match);
1698                         if (status)
1699                                 return status;
1700                         ice_clear_bit(j, (ice_bitmap_t *)&match);
1701                 }
1702
1703                 /* Process raw matching bytes */
1704                 status = ice_flow_xtract_raws(hw, params, i);
1705                 if (status)
1706                         return status;
1707         }
1708
1709         return status;
1710 }
1711
1712 /**
1713  * ice_flow_sel_acl_scen - returns the specific scenario
1714  * @hw: pointer to the hardware structure
1715  * @params: information about the flow to be processed
1716  *
1717  * This function will return the specific scenario based on the
1718  * params passed to it
1719  */
1720 static enum ice_status
1721 ice_flow_sel_acl_scen(struct ice_hw *hw, struct ice_flow_prof_params *params)
1722 {
1723         /* Find the best-fit scenario for the provided match width */
1724         struct ice_acl_scen *cand_scen = NULL, *scen;
1725
1726         if (!hw->acl_tbl)
1727                 return ICE_ERR_DOES_NOT_EXIST;
1728
1729         /* Loop through each scenario and match against the scenario width
1730          * to select the specific scenario
1731          */
1732         LIST_FOR_EACH_ENTRY(scen, &hw->acl_tbl->scens, ice_acl_scen, list_entry)
1733                 if (scen->eff_width >= params->entry_length &&
1734                     (!cand_scen || cand_scen->eff_width > scen->eff_width))
1735                         cand_scen = scen;
1736         if (!cand_scen)
1737                 return ICE_ERR_DOES_NOT_EXIST;
1738
1739         params->prof->cfg.scen = cand_scen;
1740
1741         return ICE_SUCCESS;
1742 }
1743
1744 /**
1745  * ice_flow_acl_def_entry_frmt - Determine the layout of flow entries
1746  * @params: information about the flow to be processed
1747  */
1748 static enum ice_status
1749 ice_flow_acl_def_entry_frmt(struct ice_flow_prof_params *params)
1750 {
1751         u16 index, i, range_idx = 0;
1752
1753         index = ICE_AQC_ACL_PROF_BYTE_SEL_START_IDX;
1754
1755         for (i = 0; i < params->prof->segs_cnt; i++) {
1756                 struct ice_flow_seg_info *seg = &params->prof->segs[i];
1757                 u8 j;
1758
1759                 ice_for_each_set_bit(j, (ice_bitmap_t *)&seg->match,
1760                                      ICE_FLOW_FIELD_IDX_MAX) {
1761                         struct ice_flow_fld_info *fld = &seg->fields[j];
1762
1763                         fld->entry.mask = ICE_FLOW_FLD_OFF_INVAL;
1764
1765                         if (fld->type == ICE_FLOW_FLD_TYPE_RANGE) {
1766                                 fld->entry.last = ICE_FLOW_FLD_OFF_INVAL;
1767
1768                                 /* Range checking only supported for single
1769                                  * words
1770                                  */
1771                                 if (DIVIDE_AND_ROUND_UP(ice_flds_info[j].size +
1772                                                         fld->xtrct.disp,
1773                                                         BITS_PER_BYTE * 2) > 1)
1774                                         return ICE_ERR_PARAM;
1775
1776                                 /* Ranges must define low and high values */
1777                                 if (fld->src.val == ICE_FLOW_FLD_OFF_INVAL ||
1778                                     fld->src.last == ICE_FLOW_FLD_OFF_INVAL)
1779                                         return ICE_ERR_PARAM;
1780
1781                                 fld->entry.val = range_idx++;
1782                         } else {
1783                                 /* Store adjusted byte-length of field for later
1784                                  * use, taking into account potential
1785                                  * non-byte-aligned displacement
1786                                  */
1787                                 fld->entry.last = DIVIDE_AND_ROUND_UP
1788                                         (ice_flds_info[j].size +
1789                                          (fld->xtrct.disp % BITS_PER_BYTE),
1790                                          BITS_PER_BYTE);
1791                                 fld->entry.val = index;
1792                                 index += fld->entry.last;
1793                         }
1794                 }
1795
1796                 for (j = 0; j < seg->raws_cnt; j++) {
1797                         struct ice_flow_seg_fld_raw *raw = &seg->raws[j];
1798
1799                         raw->info.entry.mask = ICE_FLOW_FLD_OFF_INVAL;
1800                         raw->info.entry.val = index;
1801                         raw->info.entry.last = raw->info.src.last;
1802                         index += raw->info.entry.last;
1803                 }
1804         }
1805
1806         /* Currently only support using the byte selection base, which only
1807          * allows for an effective entry size of 30 bytes. Reject anything
1808          * larger.
1809          */
1810         if (index > ICE_AQC_ACL_PROF_BYTE_SEL_ELEMS)
1811                 return ICE_ERR_PARAM;
1812
1813         /* Only 8 range checkers per profile, reject anything trying to use
1814          * more
1815          */
1816         if (range_idx > ICE_AQC_ACL_PROF_RANGES_NUM_CFG)
1817                 return ICE_ERR_PARAM;
1818
1819         /* Store # bytes required for entry for later use */
1820         params->entry_length = index - ICE_AQC_ACL_PROF_BYTE_SEL_START_IDX;
1821
1822         return ICE_SUCCESS;
1823 }
1824
1825 /**
1826  * ice_flow_proc_segs - process all packet segments associated with a profile
1827  * @hw: pointer to the HW struct
1828  * @params: information about the flow to be processed
1829  */
1830 static enum ice_status
1831 ice_flow_proc_segs(struct ice_hw *hw, struct ice_flow_prof_params *params)
1832 {
1833         enum ice_status status;
1834
1835         status = ice_flow_proc_seg_hdrs(params);
1836         if (status)
1837                 return status;
1838
1839         status = ice_flow_create_xtrct_seq(hw, params);
1840         if (status)
1841                 return status;
1842
1843         switch (params->blk) {
1844         case ICE_BLK_FD:
1845         case ICE_BLK_RSS:
1846                 status = ICE_SUCCESS;
1847                 break;
1848         case ICE_BLK_ACL:
1849                 status = ice_flow_acl_def_entry_frmt(params);
1850                 if (status)
1851                         return status;
1852                 status = ice_flow_sel_acl_scen(hw, params);
1853                 if (status)
1854                         return status;
1855                 break;
1856         default:
1857                 return ICE_ERR_NOT_IMPL;
1858         }
1859
1860         return status;
1861 }
1862
1863 #define ICE_FLOW_FIND_PROF_CHK_FLDS     0x00000001
1864 #define ICE_FLOW_FIND_PROF_CHK_VSI      0x00000002
1865 #define ICE_FLOW_FIND_PROF_NOT_CHK_DIR  0x00000004
1866
1867 /**
1868  * ice_flow_find_prof_conds - Find a profile matching headers and conditions
1869  * @hw: pointer to the HW struct
1870  * @blk: classification stage
1871  * @dir: flow direction
1872  * @segs: array of one or more packet segments that describe the flow
1873  * @segs_cnt: number of packet segments provided
1874  * @vsi_handle: software VSI handle to check VSI (ICE_FLOW_FIND_PROF_CHK_VSI)
1875  * @conds: additional conditions to be checked (ICE_FLOW_FIND_PROF_CHK_*)
1876  */
1877 static struct ice_flow_prof *
1878 ice_flow_find_prof_conds(struct ice_hw *hw, enum ice_block blk,
1879                          enum ice_flow_dir dir, struct ice_flow_seg_info *segs,
1880                          u8 segs_cnt, u16 vsi_handle, u32 conds)
1881 {
1882         struct ice_flow_prof *p, *prof = NULL;
1883
1884         ice_acquire_lock(&hw->fl_profs_locks[blk]);
1885         LIST_FOR_EACH_ENTRY(p, &hw->fl_profs[blk], ice_flow_prof, l_entry)
1886                 if ((p->dir == dir || conds & ICE_FLOW_FIND_PROF_NOT_CHK_DIR) &&
1887                     segs_cnt && segs_cnt == p->segs_cnt) {
1888                         u8 i;
1889
1890                         /* Check for profile-VSI association if specified */
1891                         if ((conds & ICE_FLOW_FIND_PROF_CHK_VSI) &&
1892                             ice_is_vsi_valid(hw, vsi_handle) &&
1893                             !ice_is_bit_set(p->vsis, vsi_handle))
1894                                 continue;
1895
1896                         /* Protocol headers must be checked. Matched fields are
1897                          * checked if specified.
1898                          */
1899                         for (i = 0; i < segs_cnt; i++)
1900                                 if (segs[i].hdrs != p->segs[i].hdrs ||
1901                                     ((conds & ICE_FLOW_FIND_PROF_CHK_FLDS) &&
1902                                      segs[i].match != p->segs[i].match))
1903                                         break;
1904
1905                         /* A match is found if all segments are matched */
1906                         if (i == segs_cnt) {
1907                                 prof = p;
1908                                 break;
1909                         }
1910                 }
1911         ice_release_lock(&hw->fl_profs_locks[blk]);
1912
1913         return prof;
1914 }
1915
1916 /**
1917  * ice_flow_find_prof - Look up a profile matching headers and matched fields
1918  * @hw: pointer to the HW struct
1919  * @blk: classification stage
1920  * @dir: flow direction
1921  * @segs: array of one or more packet segments that describe the flow
1922  * @segs_cnt: number of packet segments provided
1923  */
1924 u64
1925 ice_flow_find_prof(struct ice_hw *hw, enum ice_block blk, enum ice_flow_dir dir,
1926                    struct ice_flow_seg_info *segs, u8 segs_cnt)
1927 {
1928         struct ice_flow_prof *p;
1929
1930         p = ice_flow_find_prof_conds(hw, blk, dir, segs, segs_cnt,
1931                                      ICE_MAX_VSI, ICE_FLOW_FIND_PROF_CHK_FLDS);
1932
1933         return p ? p->id : ICE_FLOW_PROF_ID_INVAL;
1934 }
1935
1936 /**
1937  * ice_flow_find_prof_id - Look up a profile with given profile ID
1938  * @hw: pointer to the HW struct
1939  * @blk: classification stage
1940  * @prof_id: unique ID to identify this flow profile
1941  */
1942 static struct ice_flow_prof *
1943 ice_flow_find_prof_id(struct ice_hw *hw, enum ice_block blk, u64 prof_id)
1944 {
1945         struct ice_flow_prof *p;
1946
1947         LIST_FOR_EACH_ENTRY(p, &hw->fl_profs[blk], ice_flow_prof, l_entry)
1948                 if (p->id == prof_id)
1949                         return p;
1950
1951         return NULL;
1952 }
1953
1954 /**
1955  * ice_dealloc_flow_entry - Deallocate flow entry memory
1956  * @hw: pointer to the HW struct
1957  * @entry: flow entry to be removed
1958  */
1959 static void
1960 ice_dealloc_flow_entry(struct ice_hw *hw, struct ice_flow_entry *entry)
1961 {
1962         if (!entry)
1963                 return;
1964
1965         if (entry->entry)
1966                 ice_free(hw, entry->entry);
1967
1968         if (entry->range_buf) {
1969                 ice_free(hw, entry->range_buf);
1970                 entry->range_buf = NULL;
1971         }
1972
1973         if (entry->acts) {
1974                 ice_free(hw, entry->acts);
1975                 entry->acts = NULL;
1976                 entry->acts_cnt = 0;
1977         }
1978
1979         ice_free(hw, entry);
1980 }
1981
1982 /**
1983  * ice_flow_get_hw_prof - return the HW profile for a specific profile ID handle
1984  * @hw: pointer to the HW struct
1985  * @blk: classification stage
1986  * @prof_id: the profile ID handle
1987  * @hw_prof_id: pointer to variable to receive the HW profile ID
1988  */
1989 enum ice_status
1990 ice_flow_get_hw_prof(struct ice_hw *hw, enum ice_block blk, u64 prof_id,
1991                      u8 *hw_prof_id)
1992 {
1993         enum ice_status status = ICE_ERR_DOES_NOT_EXIST;
1994         struct ice_prof_map *map;
1995
1996         ice_acquire_lock(&hw->blk[blk].es.prof_map_lock);
1997         map = ice_search_prof_id(hw, blk, prof_id);
1998         if (map) {
1999                 *hw_prof_id = map->prof_id;
2000                 status = ICE_SUCCESS;
2001         }
2002         ice_release_lock(&hw->blk[blk].es.prof_map_lock);
2003         return status;
2004 }
2005
2006 #define ICE_ACL_INVALID_SCEN    0x3f
2007
2008 /**
2009  * ice_flow_acl_is_prof_in_use - Verify if the profile is associated to any PF
2010  * @hw: pointer to the hardware structure
2011  * @prof: pointer to flow profile
2012  * @buf: destination buffer function writes partial extraction sequence to
2013  *
2014  * returns ICE_SUCCESS if no PF is associated to the given profile
2015  * returns ICE_ERR_IN_USE if at least one PF is associated to the given profile
2016  * returns other error code for real error
2017  */
2018 static enum ice_status
2019 ice_flow_acl_is_prof_in_use(struct ice_hw *hw, struct ice_flow_prof *prof,
2020                             struct ice_aqc_acl_prof_generic_frmt *buf)
2021 {
2022         enum ice_status status;
2023         u8 prof_id = 0;
2024
2025         status = ice_flow_get_hw_prof(hw, ICE_BLK_ACL, prof->id, &prof_id);
2026         if (status)
2027                 return status;
2028
2029         status = ice_query_acl_prof(hw, prof_id, buf, NULL);
2030         if (status)
2031                 return status;
2032
2033         /* If all PF's associated scenarios are all 0 or all
2034          * ICE_ACL_INVALID_SCEN (63) for the given profile then the latter has
2035          * not been configured yet.
2036          */
2037         if (buf->pf_scenario_num[0] == 0 && buf->pf_scenario_num[1] == 0 &&
2038             buf->pf_scenario_num[2] == 0 && buf->pf_scenario_num[3] == 0 &&
2039             buf->pf_scenario_num[4] == 0 && buf->pf_scenario_num[5] == 0 &&
2040             buf->pf_scenario_num[6] == 0 && buf->pf_scenario_num[7] == 0)
2041                 return ICE_SUCCESS;
2042
2043         if (buf->pf_scenario_num[0] == ICE_ACL_INVALID_SCEN &&
2044             buf->pf_scenario_num[1] == ICE_ACL_INVALID_SCEN &&
2045             buf->pf_scenario_num[2] == ICE_ACL_INVALID_SCEN &&
2046             buf->pf_scenario_num[3] == ICE_ACL_INVALID_SCEN &&
2047             buf->pf_scenario_num[4] == ICE_ACL_INVALID_SCEN &&
2048             buf->pf_scenario_num[5] == ICE_ACL_INVALID_SCEN &&
2049             buf->pf_scenario_num[6] == ICE_ACL_INVALID_SCEN &&
2050             buf->pf_scenario_num[7] == ICE_ACL_INVALID_SCEN)
2051                 return ICE_SUCCESS;
2052
2053         return ICE_ERR_IN_USE;
2054 }
2055
2056 /**
2057  * ice_flow_acl_free_act_cntr - Free the ACL rule's actions
2058  * @hw: pointer to the hardware structure
2059  * @acts: array of actions to be performed on a match
2060  * @acts_cnt: number of actions
2061  */
2062 static enum ice_status
2063 ice_flow_acl_free_act_cntr(struct ice_hw *hw, struct ice_flow_action *acts,
2064                            u8 acts_cnt)
2065 {
2066         int i;
2067
2068         for (i = 0; i < acts_cnt; i++) {
2069                 if (acts[i].type == ICE_FLOW_ACT_CNTR_PKT ||
2070                     acts[i].type == ICE_FLOW_ACT_CNTR_BYTES ||
2071                     acts[i].type == ICE_FLOW_ACT_CNTR_PKT_BYTES) {
2072                         struct ice_acl_cntrs cntrs = { 0 };
2073                         enum ice_status status;
2074
2075                         /* amount is unused in the dealloc path but the common
2076                          * parameter check routine wants a value set, as zero
2077                          * is invalid for the check. Just set it.
2078                          */
2079                         cntrs.amount = 1;
2080                         cntrs.bank = 0; /* Only bank0 for the moment */
2081                         cntrs.first_cntr =
2082                                         LE16_TO_CPU(acts[i].data.acl_act.value);
2083                         cntrs.last_cntr =
2084                                         LE16_TO_CPU(acts[i].data.acl_act.value);
2085
2086                         if (acts[i].type == ICE_FLOW_ACT_CNTR_PKT_BYTES)
2087                                 cntrs.type = ICE_AQC_ACL_CNT_TYPE_DUAL;
2088                         else
2089                                 cntrs.type = ICE_AQC_ACL_CNT_TYPE_SINGLE;
2090
2091                         status = ice_aq_dealloc_acl_cntrs(hw, &cntrs, NULL);
2092                         if (status)
2093                                 return status;
2094                 }
2095         }
2096         return ICE_SUCCESS;
2097 }
2098
2099 /**
2100  * ice_flow_acl_disassoc_scen - Disassociate the scenario from the profile
2101  * @hw: pointer to the hardware structure
2102  * @prof: pointer to flow profile
2103  *
2104  * Disassociate the scenario from the profile for the PF of the VSI.
2105  */
2106 static enum ice_status
2107 ice_flow_acl_disassoc_scen(struct ice_hw *hw, struct ice_flow_prof *prof)
2108 {
2109         struct ice_aqc_acl_prof_generic_frmt buf;
2110         enum ice_status status = ICE_SUCCESS;
2111         u8 prof_id = 0;
2112
2113         ice_memset(&buf, 0, sizeof(buf), ICE_NONDMA_MEM);
2114
2115         status = ice_flow_get_hw_prof(hw, ICE_BLK_ACL, prof->id, &prof_id);
2116         if (status)
2117                 return status;
2118
2119         status = ice_query_acl_prof(hw, prof_id, &buf, NULL);
2120         if (status)
2121                 return status;
2122
2123         /* Clear scenario for this PF */
2124         buf.pf_scenario_num[hw->pf_id] = ICE_ACL_INVALID_SCEN;
2125         status = ice_prgm_acl_prof_xtrct(hw, prof_id, &buf, NULL);
2126
2127         return status;
2128 }
2129
2130 /**
2131  * ice_flow_rem_entry_sync - Remove a flow entry
2132  * @hw: pointer to the HW struct
2133  * @blk: classification stage
2134  * @entry: flow entry to be removed
2135  */
2136 static enum ice_status
2137 ice_flow_rem_entry_sync(struct ice_hw *hw, enum ice_block blk,
2138                         struct ice_flow_entry *entry)
2139 {
2140         if (!entry)
2141                 return ICE_ERR_BAD_PTR;
2142
2143         if (blk == ICE_BLK_ACL) {
2144                 enum ice_status status;
2145
2146                 if (!entry->prof)
2147                         return ICE_ERR_BAD_PTR;
2148
2149                 status = ice_acl_rem_entry(hw, entry->prof->cfg.scen,
2150                                            entry->scen_entry_idx);
2151                 if (status)
2152                         return status;
2153
2154                 /* Checks if we need to release an ACL counter. */
2155                 if (entry->acts_cnt && entry->acts)
2156                         ice_flow_acl_free_act_cntr(hw, entry->acts,
2157                                                    entry->acts_cnt);
2158         }
2159
2160         LIST_DEL(&entry->l_entry);
2161
2162         ice_dealloc_flow_entry(hw, entry);
2163
2164         return ICE_SUCCESS;
2165 }
2166
2167 /**
2168  * ice_flow_add_prof_sync - Add a flow profile for packet segments and fields
2169  * @hw: pointer to the HW struct
2170  * @blk: classification stage
2171  * @dir: flow direction
2172  * @prof_id: unique ID to identify this flow profile
2173  * @segs: array of one or more packet segments that describe the flow
2174  * @segs_cnt: number of packet segments provided
2175  * @acts: array of default actions
2176  * @acts_cnt: number of default actions
2177  * @prof: stores the returned flow profile added
2178  *
2179  * Assumption: the caller has acquired the lock to the profile list
2180  */
2181 static enum ice_status
2182 ice_flow_add_prof_sync(struct ice_hw *hw, enum ice_block blk,
2183                        enum ice_flow_dir dir, u64 prof_id,
2184                        struct ice_flow_seg_info *segs, u8 segs_cnt,
2185                        struct ice_flow_action *acts, u8 acts_cnt,
2186                        struct ice_flow_prof **prof)
2187 {
2188         struct ice_flow_prof_params *params;
2189         enum ice_status status;
2190         u8 i;
2191
2192         if (!prof || (acts_cnt && !acts))
2193                 return ICE_ERR_BAD_PTR;
2194
2195         params = (struct ice_flow_prof_params *)ice_malloc(hw, sizeof(*params));
2196         if (!params)
2197                 return ICE_ERR_NO_MEMORY;
2198
2199         params->prof = (struct ice_flow_prof *)
2200                 ice_malloc(hw, sizeof(*params->prof));
2201         if (!params->prof) {
2202                 status = ICE_ERR_NO_MEMORY;
2203                 goto free_params;
2204         }
2205
2206         /* initialize extraction sequence to all invalid (0xff) */
2207         for (i = 0; i < ICE_MAX_FV_WORDS; i++) {
2208                 params->es[i].prot_id = ICE_PROT_INVALID;
2209                 params->es[i].off = ICE_FV_OFFSET_INVAL;
2210         }
2211
2212         params->blk = blk;
2213         params->prof->id = prof_id;
2214         params->prof->dir = dir;
2215         params->prof->segs_cnt = segs_cnt;
2216
2217         /* Make a copy of the segments that need to be persistent in the flow
2218          * profile instance
2219          */
2220         for (i = 0; i < segs_cnt; i++)
2221                 ice_memcpy(&params->prof->segs[i], &segs[i], sizeof(*segs),
2222                            ICE_NONDMA_TO_NONDMA);
2223
2224         /* Make a copy of the actions that need to be persistent in the flow
2225          * profile instance.
2226          */
2227         if (acts_cnt) {
2228                 params->prof->acts = (struct ice_flow_action *)
2229                         ice_memdup(hw, acts, acts_cnt * sizeof(*acts),
2230                                    ICE_NONDMA_TO_NONDMA);
2231
2232                 if (!params->prof->acts) {
2233                         status = ICE_ERR_NO_MEMORY;
2234                         goto out;
2235                 }
2236         }
2237
2238         status = ice_flow_proc_segs(hw, params);
2239         if (status) {
2240                 ice_debug(hw, ICE_DBG_FLOW, "Error processing a flow's packet segments\n");
2241                 goto out;
2242         }
2243
2244         /* Add a HW profile for this flow profile */
2245         status = ice_add_prof(hw, blk, prof_id, (u8 *)params->ptypes,
2246                               params->attr, params->attr_cnt, params->es,
2247                               params->mask);
2248         if (status) {
2249                 ice_debug(hw, ICE_DBG_FLOW, "Error adding a HW flow profile\n");
2250                 goto out;
2251         }
2252
2253         INIT_LIST_HEAD(&params->prof->entries);
2254         ice_init_lock(&params->prof->entries_lock);
2255         *prof = params->prof;
2256
2257 out:
2258         if (status) {
2259                 if (params->prof->acts)
2260                         ice_free(hw, params->prof->acts);
2261                 ice_free(hw, params->prof);
2262         }
2263 free_params:
2264         ice_free(hw, params);
2265
2266         return status;
2267 }
2268
2269 /**
2270  * ice_flow_rem_prof_sync - remove a flow profile
2271  * @hw: pointer to the hardware structure
2272  * @blk: classification stage
2273  * @prof: pointer to flow profile to remove
2274  *
2275  * Assumption: the caller has acquired the lock to the profile list
2276  */
2277 static enum ice_status
2278 ice_flow_rem_prof_sync(struct ice_hw *hw, enum ice_block blk,
2279                        struct ice_flow_prof *prof)
2280 {
2281         enum ice_status status;
2282
2283         /* Remove all remaining flow entries before removing the flow profile */
2284         if (!LIST_EMPTY(&prof->entries)) {
2285                 struct ice_flow_entry *e, *t;
2286
2287                 ice_acquire_lock(&prof->entries_lock);
2288
2289                 LIST_FOR_EACH_ENTRY_SAFE(e, t, &prof->entries, ice_flow_entry,
2290                                          l_entry) {
2291                         status = ice_flow_rem_entry_sync(hw, blk, e);
2292                         if (status)
2293                                 break;
2294                 }
2295
2296                 ice_release_lock(&prof->entries_lock);
2297         }
2298
2299         if (blk == ICE_BLK_ACL) {
2300                 struct ice_aqc_acl_profile_ranges query_rng_buf;
2301                 struct ice_aqc_acl_prof_generic_frmt buf;
2302                 u8 prof_id = 0;
2303
2304                 /* Disassociate the scenario from the profile for the PF */
2305                 status = ice_flow_acl_disassoc_scen(hw, prof);
2306                 if (status)
2307                         return status;
2308
2309                 /* Clear the range-checker if the profile ID is no longer
2310                  * used by any PF
2311                  */
2312                 status = ice_flow_acl_is_prof_in_use(hw, prof, &buf);
2313                 if (status && status != ICE_ERR_IN_USE) {
2314                         return status;
2315                 } else if (!status) {
2316                         /* Clear the range-checker value for profile ID */
2317                         ice_memset(&query_rng_buf, 0,
2318                                    sizeof(struct ice_aqc_acl_profile_ranges),
2319                                    ICE_NONDMA_MEM);
2320
2321                         status = ice_flow_get_hw_prof(hw, blk, prof->id,
2322                                                       &prof_id);
2323                         if (status)
2324                                 return status;
2325
2326                         status = ice_prog_acl_prof_ranges(hw, prof_id,
2327                                                           &query_rng_buf, NULL);
2328                         if (status)
2329                                 return status;
2330                 }
2331         }
2332
2333         /* Remove all hardware profiles associated with this flow profile */
2334         status = ice_rem_prof(hw, blk, prof->id);
2335         if (!status) {
2336                 LIST_DEL(&prof->l_entry);
2337                 ice_destroy_lock(&prof->entries_lock);
2338                 if (prof->acts)
2339                         ice_free(hw, prof->acts);
2340                 ice_free(hw, prof);
2341         }
2342
2343         return status;
2344 }
2345
2346 /**
2347  * ice_flow_acl_set_xtrct_seq_fld - Populate xtrct seq for single field
2348  * @buf: Destination buffer function writes partial xtrct sequence to
2349  * @info: Info about field
2350  */
2351 static void
2352 ice_flow_acl_set_xtrct_seq_fld(struct ice_aqc_acl_prof_generic_frmt *buf,
2353                                struct ice_flow_fld_info *info)
2354 {
2355         u16 dst, i;
2356         u8 src;
2357
2358         src = info->xtrct.idx * ICE_FLOW_FV_EXTRACT_SZ +
2359                 info->xtrct.disp / BITS_PER_BYTE;
2360         dst = info->entry.val;
2361         for (i = 0; i < info->entry.last; i++)
2362                 /* HW stores field vector words in LE, convert words back to BE
2363                  * so constructed entries will end up in network order
2364                  */
2365                 buf->byte_selection[dst++] = src++ ^ 1;
2366 }
2367
2368 /**
2369  * ice_flow_acl_set_xtrct_seq - Program ACL extraction sequence
2370  * @hw: pointer to the hardware structure
2371  * @prof: pointer to flow profile
2372  */
2373 static enum ice_status
2374 ice_flow_acl_set_xtrct_seq(struct ice_hw *hw, struct ice_flow_prof *prof)
2375 {
2376         struct ice_aqc_acl_prof_generic_frmt buf;
2377         struct ice_flow_fld_info *info;
2378         enum ice_status status;
2379         u8 prof_id = 0;
2380         u16 i;
2381
2382         ice_memset(&buf, 0, sizeof(buf), ICE_NONDMA_MEM);
2383
2384         status = ice_flow_get_hw_prof(hw, ICE_BLK_ACL, prof->id, &prof_id);
2385         if (status)
2386                 return status;
2387
2388         status = ice_flow_acl_is_prof_in_use(hw, prof, &buf);
2389         if (status && status != ICE_ERR_IN_USE)
2390                 return status;
2391
2392         if (!status) {
2393                 /* Program the profile dependent configuration. This is done
2394                  * only once regardless of the number of PFs using that profile
2395                  */
2396                 ice_memset(&buf, 0, sizeof(buf), ICE_NONDMA_MEM);
2397
2398                 for (i = 0; i < prof->segs_cnt; i++) {
2399                         struct ice_flow_seg_info *seg = &prof->segs[i];
2400                         u16 j;
2401
2402                         ice_for_each_set_bit(j, (ice_bitmap_t *)&seg->match,
2403                                              ICE_FLOW_FIELD_IDX_MAX) {
2404                                 info = &seg->fields[j];
2405
2406                                 if (info->type == ICE_FLOW_FLD_TYPE_RANGE)
2407                                         buf.word_selection[info->entry.val] =
2408                                                 info->xtrct.idx;
2409                                 else
2410                                         ice_flow_acl_set_xtrct_seq_fld(&buf,
2411                                                                        info);
2412                         }
2413
2414                         for (j = 0; j < seg->raws_cnt; j++) {
2415                                 info = &seg->raws[j].info;
2416                                 ice_flow_acl_set_xtrct_seq_fld(&buf, info);
2417                         }
2418                 }
2419
2420                 ice_memset(&buf.pf_scenario_num[0], ICE_ACL_INVALID_SCEN,
2421                            ICE_AQC_ACL_PROF_PF_SCEN_NUM_ELEMS,
2422                            ICE_NONDMA_MEM);
2423         }
2424
2425         /* Update the current PF */
2426         buf.pf_scenario_num[hw->pf_id] = (u8)prof->cfg.scen->id;
2427         status = ice_prgm_acl_prof_xtrct(hw, prof_id, &buf, NULL);
2428
2429         return status;
2430 }
2431
2432 /**
2433  * ice_flow_assoc_vsig_vsi - associate a VSI with VSIG
2434  * @hw: pointer to the hardware structure
2435  * @blk: classification stage
2436  * @vsi_handle: software VSI handle
2437  * @vsig: target VSI group
2438  *
2439  * Assumption: the caller has already verified that the VSI to
2440  * be added has the same characteristics as the VSIG and will
2441  * thereby have access to all resources added to that VSIG.
2442  */
2443 enum ice_status
2444 ice_flow_assoc_vsig_vsi(struct ice_hw *hw, enum ice_block blk, u16 vsi_handle,
2445                         u16 vsig)
2446 {
2447         enum ice_status status;
2448
2449         if (!ice_is_vsi_valid(hw, vsi_handle) || blk >= ICE_BLK_COUNT)
2450                 return ICE_ERR_PARAM;
2451
2452         ice_acquire_lock(&hw->fl_profs_locks[blk]);
2453         status = ice_add_vsi_flow(hw, blk, ice_get_hw_vsi_num(hw, vsi_handle),
2454                                   vsig);
2455         ice_release_lock(&hw->fl_profs_locks[blk]);
2456
2457         return status;
2458 }
2459
2460 /**
2461  * ice_flow_assoc_prof - associate a VSI with a flow profile
2462  * @hw: pointer to the hardware structure
2463  * @blk: classification stage
2464  * @prof: pointer to flow profile
2465  * @vsi_handle: software VSI handle
2466  *
2467  * Assumption: the caller has acquired the lock to the profile list
2468  * and the software VSI handle has been validated
2469  */
2470 enum ice_status
2471 ice_flow_assoc_prof(struct ice_hw *hw, enum ice_block blk,
2472                     struct ice_flow_prof *prof, u16 vsi_handle)
2473 {
2474         enum ice_status status = ICE_SUCCESS;
2475
2476         if (!ice_is_bit_set(prof->vsis, vsi_handle)) {
2477                 if (blk == ICE_BLK_ACL) {
2478                         status = ice_flow_acl_set_xtrct_seq(hw, prof);
2479                         if (status)
2480                                 return status;
2481                 }
2482                 status = ice_add_prof_id_flow(hw, blk,
2483                                               ice_get_hw_vsi_num(hw,
2484                                                                  vsi_handle),
2485                                               prof->id);
2486                 if (!status)
2487                         ice_set_bit(vsi_handle, prof->vsis);
2488                 else
2489                         ice_debug(hw, ICE_DBG_FLOW, "HW profile add failed, %d\n",
2490                                   status);
2491         }
2492
2493         return status;
2494 }
2495
2496 /**
2497  * ice_flow_disassoc_prof - disassociate a VSI from a flow profile
2498  * @hw: pointer to the hardware structure
2499  * @blk: classification stage
2500  * @prof: pointer to flow profile
2501  * @vsi_handle: software VSI handle
2502  *
2503  * Assumption: the caller has acquired the lock to the profile list
2504  * and the software VSI handle has been validated
2505  */
2506 static enum ice_status
2507 ice_flow_disassoc_prof(struct ice_hw *hw, enum ice_block blk,
2508                        struct ice_flow_prof *prof, u16 vsi_handle)
2509 {
2510         enum ice_status status = ICE_SUCCESS;
2511
2512         if (ice_is_bit_set(prof->vsis, vsi_handle)) {
2513                 status = ice_rem_prof_id_flow(hw, blk,
2514                                               ice_get_hw_vsi_num(hw,
2515                                                                  vsi_handle),
2516                                               prof->id);
2517                 if (!status)
2518                         ice_clear_bit(vsi_handle, prof->vsis);
2519                 else
2520                         ice_debug(hw, ICE_DBG_FLOW, "HW profile remove failed, %d\n",
2521                                   status);
2522         }
2523
2524         return status;
2525 }
2526
2527 /**
2528  * ice_flow_add_prof - Add a flow profile for packet segments and matched fields
2529  * @hw: pointer to the HW struct
2530  * @blk: classification stage
2531  * @dir: flow direction
2532  * @prof_id: unique ID to identify this flow profile
2533  * @segs: array of one or more packet segments that describe the flow
2534  * @segs_cnt: number of packet segments provided
2535  * @acts: array of default actions
2536  * @acts_cnt: number of default actions
2537  * @prof: stores the returned flow profile added
2538  */
2539 enum ice_status
2540 ice_flow_add_prof(struct ice_hw *hw, enum ice_block blk, enum ice_flow_dir dir,
2541                   u64 prof_id, struct ice_flow_seg_info *segs, u8 segs_cnt,
2542                   struct ice_flow_action *acts, u8 acts_cnt,
2543                   struct ice_flow_prof **prof)
2544 {
2545         enum ice_status status;
2546
2547         if (segs_cnt > ICE_FLOW_SEG_MAX)
2548                 return ICE_ERR_MAX_LIMIT;
2549
2550         if (!segs_cnt)
2551                 return ICE_ERR_PARAM;
2552
2553         if (!segs)
2554                 return ICE_ERR_BAD_PTR;
2555
2556         status = ice_flow_val_hdrs(segs, segs_cnt);
2557         if (status)
2558                 return status;
2559
2560         ice_acquire_lock(&hw->fl_profs_locks[blk]);
2561
2562         status = ice_flow_add_prof_sync(hw, blk, dir, prof_id, segs, segs_cnt,
2563                                         acts, acts_cnt, prof);
2564         if (!status)
2565                 LIST_ADD(&(*prof)->l_entry, &hw->fl_profs[blk]);
2566
2567         ice_release_lock(&hw->fl_profs_locks[blk]);
2568
2569         return status;
2570 }
2571
2572 /**
2573  * ice_flow_rem_prof - Remove a flow profile and all entries associated with it
2574  * @hw: pointer to the HW struct
2575  * @blk: the block for which the flow profile is to be removed
2576  * @prof_id: unique ID of the flow profile to be removed
2577  */
2578 enum ice_status
2579 ice_flow_rem_prof(struct ice_hw *hw, enum ice_block blk, u64 prof_id)
2580 {
2581         struct ice_flow_prof *prof;
2582         enum ice_status status;
2583
2584         ice_acquire_lock(&hw->fl_profs_locks[blk]);
2585
2586         prof = ice_flow_find_prof_id(hw, blk, prof_id);
2587         if (!prof) {
2588                 status = ICE_ERR_DOES_NOT_EXIST;
2589                 goto out;
2590         }
2591
2592         /* prof becomes invalid after the call */
2593         status = ice_flow_rem_prof_sync(hw, blk, prof);
2594
2595 out:
2596         ice_release_lock(&hw->fl_profs_locks[blk]);
2597
2598         return status;
2599 }
2600
2601 /**
2602  * ice_flow_find_entry - look for a flow entry using its unique ID
2603  * @hw: pointer to the HW struct
2604  * @blk: classification stage
2605  * @entry_id: unique ID to identify this flow entry
2606  *
2607  * This function looks for the flow entry with the specified unique ID in all
2608  * flow profiles of the specified classification stage. If the entry is found,
2609  * and it returns the handle to the flow entry. Otherwise, it returns
2610  * ICE_FLOW_ENTRY_ID_INVAL.
2611  */
2612 u64 ice_flow_find_entry(struct ice_hw *hw, enum ice_block blk, u64 entry_id)
2613 {
2614         struct ice_flow_entry *found = NULL;
2615         struct ice_flow_prof *p;
2616
2617         ice_acquire_lock(&hw->fl_profs_locks[blk]);
2618
2619         LIST_FOR_EACH_ENTRY(p, &hw->fl_profs[blk], ice_flow_prof, l_entry) {
2620                 struct ice_flow_entry *e;
2621
2622                 ice_acquire_lock(&p->entries_lock);
2623                 LIST_FOR_EACH_ENTRY(e, &p->entries, ice_flow_entry, l_entry)
2624                         if (e->id == entry_id) {
2625                                 found = e;
2626                                 break;
2627                         }
2628                 ice_release_lock(&p->entries_lock);
2629
2630                 if (found)
2631                         break;
2632         }
2633
2634         ice_release_lock(&hw->fl_profs_locks[blk]);
2635
2636         return found ? ICE_FLOW_ENTRY_HNDL(found) : ICE_FLOW_ENTRY_HANDLE_INVAL;
2637 }
2638
2639 /**
2640  * ice_flow_acl_check_actions - Checks the ACL rule's actions
2641  * @hw: pointer to the hardware structure
2642  * @acts: array of actions to be performed on a match
2643  * @acts_cnt: number of actions
2644  * @cnt_alloc: indicates if an ACL counter has been allocated.
2645  */
2646 static enum ice_status
2647 ice_flow_acl_check_actions(struct ice_hw *hw, struct ice_flow_action *acts,
2648                            u8 acts_cnt, bool *cnt_alloc)
2649 {
2650         ice_declare_bitmap(dup_check, ICE_AQC_TBL_MAX_ACTION_PAIRS * 2);
2651         int i;
2652
2653         ice_zero_bitmap(dup_check, ICE_AQC_TBL_MAX_ACTION_PAIRS * 2);
2654         *cnt_alloc = false;
2655
2656         if (acts_cnt > ICE_FLOW_ACL_MAX_NUM_ACT)
2657                 return ICE_ERR_OUT_OF_RANGE;
2658
2659         for (i = 0; i < acts_cnt; i++) {
2660                 if (acts[i].type != ICE_FLOW_ACT_NOP &&
2661                     acts[i].type != ICE_FLOW_ACT_DROP &&
2662                     acts[i].type != ICE_FLOW_ACT_CNTR_PKT &&
2663                     acts[i].type != ICE_FLOW_ACT_FWD_QUEUE)
2664                         return ICE_ERR_CFG;
2665
2666                 /* If the caller want to add two actions of the same type, then
2667                  * it is considered invalid configuration.
2668                  */
2669                 if (ice_test_and_set_bit(acts[i].type, dup_check))
2670                         return ICE_ERR_PARAM;
2671         }
2672
2673         /* Checks if ACL counters are needed. */
2674         for (i = 0; i < acts_cnt; i++) {
2675                 if (acts[i].type == ICE_FLOW_ACT_CNTR_PKT ||
2676                     acts[i].type == ICE_FLOW_ACT_CNTR_BYTES ||
2677                     acts[i].type == ICE_FLOW_ACT_CNTR_PKT_BYTES) {
2678                         struct ice_acl_cntrs cntrs = { 0 };
2679                         enum ice_status status;
2680
2681                         cntrs.amount = 1;
2682                         cntrs.bank = 0; /* Only bank0 for the moment */
2683
2684                         if (acts[i].type == ICE_FLOW_ACT_CNTR_PKT_BYTES)
2685                                 cntrs.type = ICE_AQC_ACL_CNT_TYPE_DUAL;
2686                         else
2687                                 cntrs.type = ICE_AQC_ACL_CNT_TYPE_SINGLE;
2688
2689                         status = ice_aq_alloc_acl_cntrs(hw, &cntrs, NULL);
2690                         if (status)
2691                                 return status;
2692                         /* Counter index within the bank */
2693                         acts[i].data.acl_act.value =
2694                                                 CPU_TO_LE16(cntrs.first_cntr);
2695                         *cnt_alloc = true;
2696                 }
2697         }
2698
2699         return ICE_SUCCESS;
2700 }
2701
2702 /**
2703  * ice_flow_acl_frmt_entry_range - Format an ACL range checker for a given field
2704  * @fld: number of the given field
2705  * @info: info about field
2706  * @range_buf: range checker configuration buffer
2707  * @data: pointer to a data buffer containing flow entry's match values/masks
2708  * @range: Input/output param indicating which range checkers are being used
2709  */
2710 static void
2711 ice_flow_acl_frmt_entry_range(u16 fld, struct ice_flow_fld_info *info,
2712                               struct ice_aqc_acl_profile_ranges *range_buf,
2713                               u8 *data, u8 *range)
2714 {
2715         u16 new_mask;
2716
2717         /* If not specified, default mask is all bits in field */
2718         new_mask = (info->src.mask == ICE_FLOW_FLD_OFF_INVAL ?
2719                     BIT(ice_flds_info[fld].size) - 1 :
2720                     (*(u16 *)(data + info->src.mask))) << info->xtrct.disp;
2721
2722         /* If the mask is 0, then we don't need to worry about this input
2723          * range checker value.
2724          */
2725         if (new_mask) {
2726                 u16 new_high =
2727                         (*(u16 *)(data + info->src.last)) << info->xtrct.disp;
2728                 u16 new_low =
2729                         (*(u16 *)(data + info->src.val)) << info->xtrct.disp;
2730                 u8 range_idx = info->entry.val;
2731
2732                 range_buf->checker_cfg[range_idx].low_boundary =
2733                         CPU_TO_BE16(new_low);
2734                 range_buf->checker_cfg[range_idx].high_boundary =
2735                         CPU_TO_BE16(new_high);
2736                 range_buf->checker_cfg[range_idx].mask = CPU_TO_BE16(new_mask);
2737
2738                 /* Indicate which range checker is being used */
2739                 *range |= BIT(range_idx);
2740         }
2741 }
2742
2743 /**
2744  * ice_flow_acl_frmt_entry_fld - Partially format ACL entry for a given field
2745  * @fld: number of the given field
2746  * @info: info about the field
2747  * @buf: buffer containing the entry
2748  * @dontcare: buffer containing don't care mask for entry
2749  * @data: pointer to a data buffer containing flow entry's match values/masks
2750  */
2751 static void
2752 ice_flow_acl_frmt_entry_fld(u16 fld, struct ice_flow_fld_info *info, u8 *buf,
2753                             u8 *dontcare, u8 *data)
2754 {
2755         u16 dst, src, mask, k, end_disp, tmp_s = 0, tmp_m = 0;
2756         bool use_mask = false;
2757         u8 disp;
2758
2759         src = info->src.val;
2760         mask = info->src.mask;
2761         dst = info->entry.val - ICE_AQC_ACL_PROF_BYTE_SEL_START_IDX;
2762         disp = info->xtrct.disp % BITS_PER_BYTE;
2763
2764         if (mask != ICE_FLOW_FLD_OFF_INVAL)
2765                 use_mask = true;
2766
2767         for (k = 0; k < info->entry.last; k++, dst++) {
2768                 /* Add overflow bits from previous byte */
2769                 buf[dst] = (tmp_s & 0xff00) >> 8;
2770
2771                 /* If mask is not valid, tmp_m is always zero, so just setting
2772                  * dontcare to 0 (no masked bits). If mask is valid, pulls in
2773                  * overflow bits of mask from prev byte
2774                  */
2775                 dontcare[dst] = (tmp_m & 0xff00) >> 8;
2776
2777                 /* If there is displacement, last byte will only contain
2778                  * displaced data, but there is no more data to read from user
2779                  * buffer, so skip so as not to potentially read beyond end of
2780                  * user buffer
2781                  */
2782                 if (!disp || k < info->entry.last - 1) {
2783                         /* Store shifted data to use in next byte */
2784                         tmp_s = data[src++] << disp;
2785
2786                         /* Add current (shifted) byte */
2787                         buf[dst] |= tmp_s & 0xff;
2788
2789                         /* Handle mask if valid */
2790                         if (use_mask) {
2791                                 tmp_m = (~data[mask++] & 0xff) << disp;
2792                                 dontcare[dst] |= tmp_m & 0xff;
2793                         }
2794                 }
2795         }
2796
2797         /* Fill in don't care bits at beginning of field */
2798         if (disp) {
2799                 dst = info->entry.val - ICE_AQC_ACL_PROF_BYTE_SEL_START_IDX;
2800                 for (k = 0; k < disp; k++)
2801                         dontcare[dst] |= BIT(k);
2802         }
2803
2804         end_disp = (disp + ice_flds_info[fld].size) % BITS_PER_BYTE;
2805
2806         /* Fill in don't care bits at end of field */
2807         if (end_disp) {
2808                 dst = info->entry.val - ICE_AQC_ACL_PROF_BYTE_SEL_START_IDX +
2809                       info->entry.last - 1;
2810                 for (k = end_disp; k < BITS_PER_BYTE; k++)
2811                         dontcare[dst] |= BIT(k);
2812         }
2813 }
2814
2815 /**
2816  * ice_flow_acl_frmt_entry - Format ACL entry
2817  * @hw: pointer to the hardware structure
2818  * @prof: pointer to flow profile
2819  * @e: pointer to the flow entry
2820  * @data: pointer to a data buffer containing flow entry's match values/masks
2821  * @acts: array of actions to be performed on a match
2822  * @acts_cnt: number of actions
2823  *
2824  * Formats the key (and key_inverse) to be matched from the data passed in,
2825  * along with data from the flow profile. This key/key_inverse pair makes up
2826  * the 'entry' for an ACL flow entry.
2827  */
2828 static enum ice_status
2829 ice_flow_acl_frmt_entry(struct ice_hw *hw, struct ice_flow_prof *prof,
2830                         struct ice_flow_entry *e, u8 *data,
2831                         struct ice_flow_action *acts, u8 acts_cnt)
2832 {
2833         u8 *buf = NULL, *dontcare = NULL, *key = NULL, range = 0, dir_flag_msk;
2834         struct ice_aqc_acl_profile_ranges *range_buf = NULL;
2835         enum ice_status status;
2836         bool cnt_alloc;
2837         u8 prof_id = 0;
2838         u16 i, buf_sz;
2839
2840         status = ice_flow_get_hw_prof(hw, ICE_BLK_ACL, prof->id, &prof_id);
2841         if (status)
2842                 return status;
2843
2844         /* Format the result action */
2845
2846         status = ice_flow_acl_check_actions(hw, acts, acts_cnt, &cnt_alloc);
2847         if (status)
2848                 return status;
2849
2850         status = ICE_ERR_NO_MEMORY;
2851
2852         e->acts = (struct ice_flow_action *)
2853                 ice_memdup(hw, acts, acts_cnt * sizeof(*acts),
2854                            ICE_NONDMA_TO_NONDMA);
2855         if (!e->acts)
2856                 goto out;
2857
2858         e->acts_cnt = acts_cnt;
2859
2860         /* Format the matching data */
2861         buf_sz = prof->cfg.scen->width;
2862         buf = (u8 *)ice_malloc(hw, buf_sz);
2863         if (!buf)
2864                 goto out;
2865
2866         dontcare = (u8 *)ice_malloc(hw, buf_sz);
2867         if (!dontcare)
2868                 goto out;
2869
2870         /* 'key' buffer will store both key and key_inverse, so must be twice
2871          * size of buf
2872          */
2873         key = (u8 *)ice_malloc(hw, buf_sz * 2);
2874         if (!key)
2875                 goto out;
2876
2877         range_buf = (struct ice_aqc_acl_profile_ranges *)
2878                 ice_malloc(hw, sizeof(struct ice_aqc_acl_profile_ranges));
2879         if (!range_buf)
2880                 goto out;
2881
2882         /* Set don't care mask to all 1's to start, will zero out used bytes */
2883         ice_memset(dontcare, 0xff, buf_sz, ICE_NONDMA_MEM);
2884
2885         for (i = 0; i < prof->segs_cnt; i++) {
2886                 struct ice_flow_seg_info *seg = &prof->segs[i];
2887                 u8 j;
2888
2889                 ice_for_each_set_bit(j, (ice_bitmap_t *)&seg->match,
2890                                      ICE_FLOW_FIELD_IDX_MAX) {
2891                         struct ice_flow_fld_info *info = &seg->fields[j];
2892
2893                         if (info->type == ICE_FLOW_FLD_TYPE_RANGE)
2894                                 ice_flow_acl_frmt_entry_range(j, info,
2895                                                               range_buf, data,
2896                                                               &range);
2897                         else
2898                                 ice_flow_acl_frmt_entry_fld(j, info, buf,
2899                                                             dontcare, data);
2900                 }
2901
2902                 for (j = 0; j < seg->raws_cnt; j++) {
2903                         struct ice_flow_fld_info *info = &seg->raws[j].info;
2904                         u16 dst, src, mask, k;
2905                         bool use_mask = false;
2906
2907                         src = info->src.val;
2908                         dst = info->entry.val -
2909                                         ICE_AQC_ACL_PROF_BYTE_SEL_START_IDX;
2910                         mask = info->src.mask;
2911
2912                         if (mask != ICE_FLOW_FLD_OFF_INVAL)
2913                                 use_mask = true;
2914
2915                         for (k = 0; k < info->entry.last; k++, dst++) {
2916                                 buf[dst] = data[src++];
2917                                 if (use_mask)
2918                                         dontcare[dst] = ~data[mask++];
2919                                 else
2920                                         dontcare[dst] = 0;
2921                         }
2922                 }
2923         }
2924
2925         buf[prof->cfg.scen->pid_idx] = (u8)prof_id;
2926         dontcare[prof->cfg.scen->pid_idx] = 0;
2927
2928         /* Format the buffer for direction flags */
2929         dir_flag_msk = BIT(ICE_FLG_PKT_DIR);
2930
2931         if (prof->dir == ICE_FLOW_RX)
2932                 buf[prof->cfg.scen->pkt_dir_idx] = dir_flag_msk;
2933
2934         if (range) {
2935                 buf[prof->cfg.scen->rng_chk_idx] = range;
2936                 /* Mark any unused range checkers as don't care */
2937                 dontcare[prof->cfg.scen->rng_chk_idx] = ~range;
2938                 e->range_buf = range_buf;
2939         } else {
2940                 ice_free(hw, range_buf);
2941         }
2942
2943         status = ice_set_key(key, buf_sz * 2, buf, NULL, dontcare, NULL, 0,
2944                              buf_sz);
2945         if (status)
2946                 goto out;
2947
2948         e->entry = key;
2949         e->entry_sz = buf_sz * 2;
2950
2951 out:
2952         if (buf)
2953                 ice_free(hw, buf);
2954
2955         if (dontcare)
2956                 ice_free(hw, dontcare);
2957
2958         if (status && key)
2959                 ice_free(hw, key);
2960
2961         if (status && range_buf) {
2962                 ice_free(hw, range_buf);
2963                 e->range_buf = NULL;
2964         }
2965
2966         if (status && e->acts) {
2967                 ice_free(hw, e->acts);
2968                 e->acts = NULL;
2969                 e->acts_cnt = 0;
2970         }
2971
2972         if (status && cnt_alloc)
2973                 ice_flow_acl_free_act_cntr(hw, acts, acts_cnt);
2974
2975         return status;
2976 }
2977
2978 /**
2979  * ice_flow_acl_find_scen_entry_cond - Find an ACL scenario entry that matches
2980  *                                     the compared data.
2981  * @prof: pointer to flow profile
2982  * @e: pointer to the comparing flow entry
2983  * @do_chg_action: decide if we want to change the ACL action
2984  * @do_add_entry: decide if we want to add the new ACL entry
2985  * @do_rem_entry: decide if we want to remove the current ACL entry
2986  *
2987  * Find an ACL scenario entry that matches the compared data. In the same time,
2988  * this function also figure out:
2989  * a/ If we want to change the ACL action
2990  * b/ If we want to add the new ACL entry
2991  * c/ If we want to remove the current ACL entry
2992  */
2993 static struct ice_flow_entry *
2994 ice_flow_acl_find_scen_entry_cond(struct ice_flow_prof *prof,
2995                                   struct ice_flow_entry *e, bool *do_chg_action,
2996                                   bool *do_add_entry, bool *do_rem_entry)
2997 {
2998         struct ice_flow_entry *p, *return_entry = NULL;
2999         u8 i, j;
3000
3001         /* Check if:
3002          * a/ There exists an entry with same matching data, but different
3003          *    priority, then we remove this existing ACL entry. Then, we
3004          *    will add the new entry to the ACL scenario.
3005          * b/ There exists an entry with same matching data, priority, and
3006          *    result action, then we do nothing
3007          * c/ There exists an entry with same matching data, priority, but
3008          *    different, action, then do only change the action's entry.
3009          * d/ Else, we add this new entry to the ACL scenario.
3010          */
3011         *do_chg_action = false;
3012         *do_add_entry = true;
3013         *do_rem_entry = false;
3014         LIST_FOR_EACH_ENTRY(p, &prof->entries, ice_flow_entry, l_entry) {
3015                 if (memcmp(p->entry, e->entry, p->entry_sz))
3016                         continue;
3017
3018                 /* From this point, we have the same matching_data. */
3019                 *do_add_entry = false;
3020                 return_entry = p;
3021
3022                 if (p->priority != e->priority) {
3023                         /* matching data && !priority */
3024                         *do_add_entry = true;
3025                         *do_rem_entry = true;
3026                         break;
3027                 }
3028
3029                 /* From this point, we will have matching_data && priority */
3030                 if (p->acts_cnt != e->acts_cnt)
3031                         *do_chg_action = true;
3032                 for (i = 0; i < p->acts_cnt; i++) {
3033                         bool found_not_match = false;
3034
3035                         for (j = 0; j < e->acts_cnt; j++)
3036                                 if (memcmp(&p->acts[i], &e->acts[j],
3037                                            sizeof(struct ice_flow_action))) {
3038                                         found_not_match = true;
3039                                         break;
3040                                 }
3041
3042                         if (found_not_match) {
3043                                 *do_chg_action = true;
3044                                 break;
3045                         }
3046                 }
3047
3048                 /* (do_chg_action = true) means :
3049                  *    matching_data && priority && !result_action
3050                  * (do_chg_action = false) means :
3051                  *    matching_data && priority && result_action
3052                  */
3053                 break;
3054         }
3055
3056         return return_entry;
3057 }
3058
3059 /**
3060  * ice_flow_acl_convert_to_acl_prio - Convert to ACL priority
3061  * @p: flow priority
3062  */
3063 static enum ice_acl_entry_prio
3064 ice_flow_acl_convert_to_acl_prio(enum ice_flow_priority p)
3065 {
3066         enum ice_acl_entry_prio acl_prio;
3067
3068         switch (p) {
3069         case ICE_FLOW_PRIO_LOW:
3070                 acl_prio = ICE_ACL_PRIO_LOW;
3071                 break;
3072         case ICE_FLOW_PRIO_NORMAL:
3073                 acl_prio = ICE_ACL_PRIO_NORMAL;
3074                 break;
3075         case ICE_FLOW_PRIO_HIGH:
3076                 acl_prio = ICE_ACL_PRIO_HIGH;
3077                 break;
3078         default:
3079                 acl_prio = ICE_ACL_PRIO_NORMAL;
3080                 break;
3081         }
3082
3083         return acl_prio;
3084 }
3085
3086 /**
3087  * ice_flow_acl_union_rng_chk - Perform union operation between two
3088  *                              range-range checker buffers
3089  * @dst_buf: pointer to destination range checker buffer
3090  * @src_buf: pointer to source range checker buffer
3091  *
3092  * For this function, we do the union between dst_buf and src_buf
3093  * range checker buffer, and we will save the result back to dst_buf
3094  */
3095 static enum ice_status
3096 ice_flow_acl_union_rng_chk(struct ice_aqc_acl_profile_ranges *dst_buf,
3097                            struct ice_aqc_acl_profile_ranges *src_buf)
3098 {
3099         u8 i, j;
3100
3101         if (!dst_buf || !src_buf)
3102                 return ICE_ERR_BAD_PTR;
3103
3104         for (i = 0; i < ICE_AQC_ACL_PROF_RANGES_NUM_CFG; i++) {
3105                 struct ice_acl_rng_data *cfg_data = NULL, *in_data;
3106                 bool will_populate = false;
3107
3108                 in_data = &src_buf->checker_cfg[i];
3109
3110                 if (!in_data->mask)
3111                         break;
3112
3113                 for (j = 0; j < ICE_AQC_ACL_PROF_RANGES_NUM_CFG; j++) {
3114                         cfg_data = &dst_buf->checker_cfg[j];
3115
3116                         if (!cfg_data->mask ||
3117                             !memcmp(cfg_data, in_data,
3118                                     sizeof(struct ice_acl_rng_data))) {
3119                                 will_populate = true;
3120                                 break;
3121                         }
3122                 }
3123
3124                 if (will_populate) {
3125                         ice_memcpy(cfg_data, in_data,
3126                                    sizeof(struct ice_acl_rng_data),
3127                                    ICE_NONDMA_TO_NONDMA);
3128                 } else {
3129                         /* No available slot left to program range checker */
3130                         return ICE_ERR_MAX_LIMIT;
3131                 }
3132         }
3133
3134         return ICE_SUCCESS;
3135 }
3136
3137 /**
3138  * ice_flow_acl_add_scen_entry_sync - Add entry to ACL scenario sync
3139  * @hw: pointer to the hardware structure
3140  * @prof: pointer to flow profile
3141  * @entry: double pointer to the flow entry
3142  *
3143  * For this function, we will look at the current added entries in the
3144  * corresponding ACL scenario. Then, we will perform matching logic to
3145  * see if we want to add/modify/do nothing with this new entry.
3146  */
3147 static enum ice_status
3148 ice_flow_acl_add_scen_entry_sync(struct ice_hw *hw, struct ice_flow_prof *prof,
3149                                  struct ice_flow_entry **entry)
3150 {
3151         bool do_add_entry, do_rem_entry, do_chg_action, do_chg_rng_chk;
3152         struct ice_aqc_acl_profile_ranges query_rng_buf, cfg_rng_buf;
3153         struct ice_acl_act_entry *acts = NULL;
3154         struct ice_flow_entry *exist;
3155         enum ice_status status = ICE_SUCCESS;
3156         struct ice_flow_entry *e;
3157         u8 i;
3158
3159         if (!entry || !(*entry) || !prof)
3160                 return ICE_ERR_BAD_PTR;
3161
3162         e = *entry;
3163
3164         do_chg_rng_chk = false;
3165         if (e->range_buf) {
3166                 u8 prof_id = 0;
3167
3168                 status = ice_flow_get_hw_prof(hw, ICE_BLK_ACL, prof->id,
3169                                               &prof_id);
3170                 if (status)
3171                         return status;
3172
3173                 /* Query the current range-checker value in FW */
3174                 status = ice_query_acl_prof_ranges(hw, prof_id, &query_rng_buf,
3175                                                    NULL);
3176                 if (status)
3177                         return status;
3178                 ice_memcpy(&cfg_rng_buf, &query_rng_buf,
3179                            sizeof(struct ice_aqc_acl_profile_ranges),
3180                            ICE_NONDMA_TO_NONDMA);
3181
3182                 /* Generate the new range-checker value */
3183                 status = ice_flow_acl_union_rng_chk(&cfg_rng_buf, e->range_buf);
3184                 if (status)
3185                         return status;
3186
3187                 /* Reconfigure the range check if the buffer is changed. */
3188                 do_chg_rng_chk = false;
3189                 if (memcmp(&query_rng_buf, &cfg_rng_buf,
3190                            sizeof(struct ice_aqc_acl_profile_ranges))) {
3191                         status = ice_prog_acl_prof_ranges(hw, prof_id,
3192                                                           &cfg_rng_buf, NULL);
3193                         if (status)
3194                                 return status;
3195
3196                         do_chg_rng_chk = true;
3197                 }
3198         }
3199
3200         /* Figure out if we want to (change the ACL action) and/or
3201          * (Add the new ACL entry) and/or (Remove the current ACL entry)
3202          */
3203         exist = ice_flow_acl_find_scen_entry_cond(prof, e, &do_chg_action,
3204                                                   &do_add_entry, &do_rem_entry);
3205         if (do_rem_entry) {
3206                 status = ice_flow_rem_entry_sync(hw, ICE_BLK_ACL, exist);
3207                 if (status)
3208                         return status;
3209         }
3210
3211         /* Prepare the result action buffer */
3212         acts = (struct ice_acl_act_entry *)
3213                 ice_calloc(hw, e->entry_sz, sizeof(struct ice_acl_act_entry));
3214         if (!acts)
3215                 return ICE_ERR_NO_MEMORY;
3216
3217         for (i = 0; i < e->acts_cnt; i++)
3218                 ice_memcpy(&acts[i], &e->acts[i].data.acl_act,
3219                            sizeof(struct ice_acl_act_entry),
3220                            ICE_NONDMA_TO_NONDMA);
3221
3222         if (do_add_entry) {
3223                 enum ice_acl_entry_prio prio;
3224                 u8 *keys, *inverts;
3225                 u16 entry_idx;
3226
3227                 keys = (u8 *)e->entry;
3228                 inverts = keys + (e->entry_sz / 2);
3229                 prio = ice_flow_acl_convert_to_acl_prio(e->priority);
3230
3231                 status = ice_acl_add_entry(hw, prof->cfg.scen, prio, keys,
3232                                            inverts, acts, e->acts_cnt,
3233                                            &entry_idx);
3234                 if (status)
3235                         goto out;
3236
3237                 e->scen_entry_idx = entry_idx;
3238                 LIST_ADD(&e->l_entry, &prof->entries);
3239         } else {
3240                 if (do_chg_action) {
3241                         /* For the action memory info, update the SW's copy of
3242                          * exist entry with e's action memory info
3243                          */
3244                         ice_free(hw, exist->acts);
3245                         exist->acts_cnt = e->acts_cnt;
3246                         exist->acts = (struct ice_flow_action *)
3247                                 ice_calloc(hw, exist->acts_cnt,
3248                                            sizeof(struct ice_flow_action));
3249                         if (!exist->acts) {
3250                                 status = ICE_ERR_NO_MEMORY;
3251                                 goto out;
3252                         }
3253
3254                         ice_memcpy(exist->acts, e->acts,
3255                                    sizeof(struct ice_flow_action) * e->acts_cnt,
3256                                    ICE_NONDMA_TO_NONDMA);
3257
3258                         status = ice_acl_prog_act(hw, prof->cfg.scen, acts,
3259                                                   e->acts_cnt,
3260                                                   exist->scen_entry_idx);
3261                         if (status)
3262                                 goto out;
3263                 }
3264
3265                 if (do_chg_rng_chk) {
3266                         /* In this case, we want to update the range checker
3267                          * information of the exist entry
3268                          */
3269                         status = ice_flow_acl_union_rng_chk(exist->range_buf,
3270                                                             e->range_buf);
3271                         if (status)
3272                                 goto out;
3273                 }
3274
3275                 /* As we don't add the new entry to our SW DB, deallocate its
3276                  * memories, and return the exist entry to the caller
3277                  */
3278                 ice_dealloc_flow_entry(hw, e);
3279                 *(entry) = exist;
3280         }
3281 out:
3282         ice_free(hw, acts);
3283
3284         return status;
3285 }
3286
3287 /**
3288  * ice_flow_acl_add_scen_entry - Add entry to ACL scenario
3289  * @hw: pointer to the hardware structure
3290  * @prof: pointer to flow profile
3291  * @e: double pointer to the flow entry
3292  */
3293 static enum ice_status
3294 ice_flow_acl_add_scen_entry(struct ice_hw *hw, struct ice_flow_prof *prof,
3295                             struct ice_flow_entry **e)
3296 {
3297         enum ice_status status;
3298
3299         ice_acquire_lock(&prof->entries_lock);
3300         status = ice_flow_acl_add_scen_entry_sync(hw, prof, e);
3301         ice_release_lock(&prof->entries_lock);
3302
3303         return status;
3304 }
3305
3306 /**
3307  * ice_flow_add_entry - Add a flow entry
3308  * @hw: pointer to the HW struct
3309  * @blk: classification stage
3310  * @prof_id: ID of the profile to add a new flow entry to
3311  * @entry_id: unique ID to identify this flow entry
3312  * @vsi_handle: software VSI handle for the flow entry
3313  * @prio: priority of the flow entry
3314  * @data: pointer to a data buffer containing flow entry's match values/masks
3315  * @acts: arrays of actions to be performed on a match
3316  * @acts_cnt: number of actions
3317  * @entry_h: pointer to buffer that receives the new flow entry's handle
3318  */
3319 enum ice_status
3320 ice_flow_add_entry(struct ice_hw *hw, enum ice_block blk, u64 prof_id,
3321                    u64 entry_id, u16 vsi_handle, enum ice_flow_priority prio,
3322                    void *data, struct ice_flow_action *acts, u8 acts_cnt,
3323                    u64 *entry_h)
3324 {
3325         struct ice_flow_entry *e = NULL;
3326         struct ice_flow_prof *prof;
3327         enum ice_status status = ICE_SUCCESS;
3328
3329         /* ACL entries must indicate an action */
3330         if (blk == ICE_BLK_ACL && (!acts || !acts_cnt))
3331                 return ICE_ERR_PARAM;
3332
3333         /* No flow entry data is expected for RSS */
3334         if (!entry_h || (!data && blk != ICE_BLK_RSS))
3335                 return ICE_ERR_BAD_PTR;
3336
3337         if (!ice_is_vsi_valid(hw, vsi_handle))
3338                 return ICE_ERR_PARAM;
3339
3340         ice_acquire_lock(&hw->fl_profs_locks[blk]);
3341
3342         prof = ice_flow_find_prof_id(hw, blk, prof_id);
3343         if (!prof) {
3344                 status = ICE_ERR_DOES_NOT_EXIST;
3345         } else {
3346                 /* Allocate memory for the entry being added and associate
3347                  * the VSI to the found flow profile
3348                  */
3349                 e = (struct ice_flow_entry *)ice_malloc(hw, sizeof(*e));
3350                 if (!e)
3351                         status = ICE_ERR_NO_MEMORY;
3352                 else
3353                         status = ice_flow_assoc_prof(hw, blk, prof, vsi_handle);
3354         }
3355
3356         ice_release_lock(&hw->fl_profs_locks[blk]);
3357         if (status)
3358                 goto out;
3359
3360         e->id = entry_id;
3361         e->vsi_handle = vsi_handle;
3362         e->prof = prof;
3363         e->priority = prio;
3364
3365         switch (blk) {
3366         case ICE_BLK_FD:
3367         case ICE_BLK_RSS:
3368                 break;
3369         case ICE_BLK_ACL:
3370                 /* ACL will handle the entry management */
3371                 status = ice_flow_acl_frmt_entry(hw, prof, e, (u8 *)data, acts,
3372                                                  acts_cnt);
3373                 if (status)
3374                         goto out;
3375
3376                 status = ice_flow_acl_add_scen_entry(hw, prof, &e);
3377                 if (status)
3378                         goto out;
3379
3380                 break;
3381         default:
3382                 status = ICE_ERR_NOT_IMPL;
3383                 goto out;
3384         }
3385
3386         if (blk != ICE_BLK_ACL) {
3387                 /* ACL will handle the entry management */
3388                 ice_acquire_lock(&prof->entries_lock);
3389                 LIST_ADD(&e->l_entry, &prof->entries);
3390                 ice_release_lock(&prof->entries_lock);
3391         }
3392
3393         *entry_h = ICE_FLOW_ENTRY_HNDL(e);
3394
3395 out:
3396         if (status && e) {
3397                 if (e->entry)
3398                         ice_free(hw, e->entry);
3399                 ice_free(hw, e);
3400         }
3401
3402         return status;
3403 }
3404
3405 /**
3406  * ice_flow_rem_entry - Remove a flow entry
3407  * @hw: pointer to the HW struct
3408  * @blk: classification stage
3409  * @entry_h: handle to the flow entry to be removed
3410  */
3411 enum ice_status ice_flow_rem_entry(struct ice_hw *hw, enum ice_block blk,
3412                                    u64 entry_h)
3413 {
3414         struct ice_flow_entry *entry;
3415         struct ice_flow_prof *prof;
3416         enum ice_status status = ICE_SUCCESS;
3417
3418         if (entry_h == ICE_FLOW_ENTRY_HANDLE_INVAL)
3419                 return ICE_ERR_PARAM;
3420
3421         entry = ICE_FLOW_ENTRY_PTR((intptr_t)entry_h);
3422
3423         /* Retain the pointer to the flow profile as the entry will be freed */
3424         prof = entry->prof;
3425
3426         if (prof) {
3427                 ice_acquire_lock(&prof->entries_lock);
3428                 status = ice_flow_rem_entry_sync(hw, blk, entry);
3429                 ice_release_lock(&prof->entries_lock);
3430         }
3431
3432         return status;
3433 }
3434
3435 /**
3436  * ice_flow_set_fld_ext - specifies locations of field from entry's input buffer
3437  * @seg: packet segment the field being set belongs to
3438  * @fld: field to be set
3439  * @field_type: type of the field
3440  * @val_loc: if not ICE_FLOW_FLD_OFF_INVAL, location of the value to match from
3441  *           entry's input buffer
3442  * @mask_loc: if not ICE_FLOW_FLD_OFF_INVAL, location of mask value from entry's
3443  *            input buffer
3444  * @last_loc: if not ICE_FLOW_FLD_OFF_INVAL, location of last/upper value from
3445  *            entry's input buffer
3446  *
3447  * This helper function stores information of a field being matched, including
3448  * the type of the field and the locations of the value to match, the mask, and
3449  * the upper-bound value in the start of the input buffer for a flow entry.
3450  * This function should only be used for fixed-size data structures.
3451  *
3452  * This function also opportunistically determines the protocol headers to be
3453  * present based on the fields being set. Some fields cannot be used alone to
3454  * determine the protocol headers present. Sometimes, fields for particular
3455  * protocol headers are not matched. In those cases, the protocol headers
3456  * must be explicitly set.
3457  */
3458 static void
3459 ice_flow_set_fld_ext(struct ice_flow_seg_info *seg, enum ice_flow_field fld,
3460                      enum ice_flow_fld_match_type field_type, u16 val_loc,
3461                      u16 mask_loc, u16 last_loc)
3462 {
3463         u64 bit = BIT_ULL(fld);
3464
3465         seg->match |= bit;
3466         if (field_type == ICE_FLOW_FLD_TYPE_RANGE)
3467                 seg->range |= bit;
3468
3469         seg->fields[fld].type = field_type;
3470         seg->fields[fld].src.val = val_loc;
3471         seg->fields[fld].src.mask = mask_loc;
3472         seg->fields[fld].src.last = last_loc;
3473
3474         ICE_FLOW_SET_HDRS(seg, ice_flds_info[fld].hdr);
3475 }
3476
3477 /**
3478  * ice_flow_set_fld - specifies locations of field from entry's input buffer
3479  * @seg: packet segment the field being set belongs to
3480  * @fld: field to be set
3481  * @val_loc: if not ICE_FLOW_FLD_OFF_INVAL, location of the value to match from
3482  *           entry's input buffer
3483  * @mask_loc: if not ICE_FLOW_FLD_OFF_INVAL, location of mask value from entry's
3484  *            input buffer
3485  * @last_loc: if not ICE_FLOW_FLD_OFF_INVAL, location of last/upper value from
3486  *            entry's input buffer
3487  * @range: indicate if field being matched is to be in a range
3488  *
3489  * This function specifies the locations, in the form of byte offsets from the
3490  * start of the input buffer for a flow entry, from where the value to match,
3491  * the mask value, and upper value can be extracted. These locations are then
3492  * stored in the flow profile. When adding a flow entry associated with the
3493  * flow profile, these locations will be used to quickly extract the values and
3494  * create the content of a match entry. This function should only be used for
3495  * fixed-size data structures.
3496  */
3497 void
3498 ice_flow_set_fld(struct ice_flow_seg_info *seg, enum ice_flow_field fld,
3499                  u16 val_loc, u16 mask_loc, u16 last_loc, bool range)
3500 {
3501         enum ice_flow_fld_match_type t = range ?
3502                 ICE_FLOW_FLD_TYPE_RANGE : ICE_FLOW_FLD_TYPE_REG;
3503
3504         ice_flow_set_fld_ext(seg, fld, t, val_loc, mask_loc, last_loc);
3505 }
3506
3507 /**
3508  * ice_flow_set_fld_prefix - sets locations of prefix field from entry's buf
3509  * @seg: packet segment the field being set belongs to
3510  * @fld: field to be set
3511  * @val_loc: if not ICE_FLOW_FLD_OFF_INVAL, location of the value to match from
3512  *           entry's input buffer
3513  * @pref_loc: location of prefix value from entry's input buffer
3514  * @pref_sz: size of the location holding the prefix value
3515  *
3516  * This function specifies the locations, in the form of byte offsets from the
3517  * start of the input buffer for a flow entry, from where the value to match
3518  * and the IPv4 prefix value can be extracted. These locations are then stored
3519  * in the flow profile. When adding flow entries to the associated flow profile,
3520  * these locations can be used to quickly extract the values to create the
3521  * content of a match entry. This function should only be used for fixed-size
3522  * data structures.
3523  */
3524 void
3525 ice_flow_set_fld_prefix(struct ice_flow_seg_info *seg, enum ice_flow_field fld,
3526                         u16 val_loc, u16 pref_loc, u8 pref_sz)
3527 {
3528         /* For this type of field, the "mask" location is for the prefix value's
3529          * location and the "last" location is for the size of the location of
3530          * the prefix value.
3531          */
3532         ice_flow_set_fld_ext(seg, fld, ICE_FLOW_FLD_TYPE_PREFIX, val_loc,
3533                              pref_loc, (u16)pref_sz);
3534 }
3535
3536 /**
3537  * ice_flow_add_fld_raw - sets locations of a raw field from entry's input buf
3538  * @seg: packet segment the field being set belongs to
3539  * @off: offset of the raw field from the beginning of the segment in bytes
3540  * @len: length of the raw pattern to be matched
3541  * @val_loc: location of the value to match from entry's input buffer
3542  * @mask_loc: location of mask value from entry's input buffer
3543  *
3544  * This function specifies the offset of the raw field to be match from the
3545  * beginning of the specified packet segment, and the locations, in the form of
3546  * byte offsets from the start of the input buffer for a flow entry, from where
3547  * the value to match and the mask value to be extracted. These locations are
3548  * then stored in the flow profile. When adding flow entries to the associated
3549  * flow profile, these locations can be used to quickly extract the values to
3550  * create the content of a match entry. This function should only be used for
3551  * fixed-size data structures.
3552  */
3553 void
3554 ice_flow_add_fld_raw(struct ice_flow_seg_info *seg, u16 off, u8 len,
3555                      u16 val_loc, u16 mask_loc)
3556 {
3557         if (seg->raws_cnt < ICE_FLOW_SEG_RAW_FLD_MAX) {
3558                 seg->raws[seg->raws_cnt].off = off;
3559                 seg->raws[seg->raws_cnt].info.type = ICE_FLOW_FLD_TYPE_SIZE;
3560                 seg->raws[seg->raws_cnt].info.src.val = val_loc;
3561                 seg->raws[seg->raws_cnt].info.src.mask = mask_loc;
3562                 /* The "last" field is used to store the length of the field */
3563                 seg->raws[seg->raws_cnt].info.src.last = len;
3564         }
3565
3566         /* Overflows of "raws" will be handled as an error condition later in
3567          * the flow when this information is processed.
3568          */
3569         seg->raws_cnt++;
3570 }
3571
3572 /**
3573  * ice_flow_rem_vsi_prof - remove vsi from flow profile
3574  * @hw: pointer to the hardware structure
3575  * @blk: classification stage
3576  * @vsi_handle: software VSI handle
3577  * @prof_id: unique ID to identify this flow profile
3578  *
3579  * This function removes the flow entries associated to the input
3580  * vsi handle and disassociates the vsi from the flow profile.
3581  */
3582 enum ice_status ice_flow_rem_vsi_prof(struct ice_hw *hw, enum ice_block blk, u16 vsi_handle,
3583                                       u64 prof_id)
3584 {
3585         struct ice_flow_prof *prof = NULL;
3586         enum ice_status status = ICE_SUCCESS;
3587
3588         if (blk >= ICE_BLK_COUNT || !ice_is_vsi_valid(hw, vsi_handle))
3589                 return ICE_ERR_PARAM;
3590
3591         /* find flow profile pointer with input package block and profile id */
3592         prof = ice_flow_find_prof_id(hw, ICE_BLK_FD, prof_id);
3593         if (!prof) {
3594                 ice_debug(hw, ICE_DBG_PKG,
3595                           "Cannot find flow profile id=%" PRIu64 "\n", prof_id);
3596                 return ICE_ERR_DOES_NOT_EXIST;
3597         }
3598
3599         /* Remove all remaining flow entries before removing the flow profile */
3600         if (!LIST_EMPTY(&prof->entries)) {
3601                 struct ice_flow_entry *e, *t;
3602
3603                 ice_acquire_lock(&prof->entries_lock);
3604                 LIST_FOR_EACH_ENTRY_SAFE(e, t, &prof->entries, ice_flow_entry,
3605                                          l_entry) {
3606                         if (e->vsi_handle != vsi_handle)
3607                                 continue;
3608
3609                         status = ice_flow_rem_entry_sync(hw, blk, e);
3610                         if (status)
3611                                 break;
3612                 }
3613                 ice_release_lock(&prof->entries_lock);
3614         }
3615         if (status)
3616                 return status;
3617
3618         /* disassociate the flow profile from sw vsi handle */
3619         status = ice_flow_disassoc_prof(hw, blk, prof, vsi_handle);
3620         if (status)
3621                 ice_debug(hw, ICE_DBG_PKG,
3622                           "ice_flow_disassoc_prof() failed with status=%d\n",
3623                           status);
3624         return status;
3625 }
3626
3627 #define ICE_FLOW_RSS_SEG_HDR_L2_MASKS \
3628 (ICE_FLOW_SEG_HDR_ETH | ICE_FLOW_SEG_HDR_ETH_NON_IP | ICE_FLOW_SEG_HDR_VLAN)
3629
3630 #define ICE_FLOW_RSS_SEG_HDR_L3_MASKS \
3631         (ICE_FLOW_SEG_HDR_IPV4 | ICE_FLOW_SEG_HDR_IPV6)
3632
3633 #define ICE_FLOW_RSS_SEG_HDR_L4_MASKS \
3634         (ICE_FLOW_SEG_HDR_TCP | ICE_FLOW_SEG_HDR_UDP | ICE_FLOW_SEG_HDR_SCTP)
3635
3636 #define ICE_FLOW_RSS_SEG_HDR_VAL_MASKS \
3637         (ICE_FLOW_RSS_SEG_HDR_L2_MASKS | \
3638          ICE_FLOW_RSS_SEG_HDR_L3_MASKS | \
3639          ICE_FLOW_RSS_SEG_HDR_L4_MASKS)
3640
3641 /**
3642  * ice_flow_set_rss_seg_info - setup packet segments for RSS
3643  * @segs: pointer to the flow field segment(s)
3644  * @seg_cnt: segment count
3645  * @cfg: configure parameters
3646  *
3647  * Helper function to extract fields from hash bitmap and use flow
3648  * header value to set flow field segment for further use in flow
3649  * profile entry or removal.
3650  */
3651 static enum ice_status
3652 ice_flow_set_rss_seg_info(struct ice_flow_seg_info *segs, u8 seg_cnt,
3653                           const struct ice_rss_hash_cfg *cfg)
3654 {
3655         struct ice_flow_seg_info *seg;
3656         u64 val;
3657         u8 i;
3658
3659         /* set inner most segment */
3660         seg = &segs[seg_cnt - 1];
3661
3662         ice_for_each_set_bit(i, (const ice_bitmap_t *)&cfg->hash_flds,
3663                              ICE_FLOW_FIELD_IDX_MAX)
3664                 ice_flow_set_fld(seg, (enum ice_flow_field)i,
3665                                  ICE_FLOW_FLD_OFF_INVAL, ICE_FLOW_FLD_OFF_INVAL,
3666                                  ICE_FLOW_FLD_OFF_INVAL, false);
3667
3668         ICE_FLOW_SET_HDRS(seg, cfg->addl_hdrs);
3669
3670         /* set outer most header */
3671         if (cfg->hdr_type == ICE_RSS_INNER_HEADERS_W_OUTER_IPV4)
3672                 segs[ICE_RSS_OUTER_HEADERS].hdrs |= ICE_FLOW_SEG_HDR_IPV4 |
3673                                                    ICE_FLOW_SEG_HDR_IPV_FRAG |
3674                                                    ICE_FLOW_SEG_HDR_IPV_OTHER;
3675         else if (cfg->hdr_type == ICE_RSS_INNER_HEADERS_W_OUTER_IPV6)
3676                 segs[ICE_RSS_OUTER_HEADERS].hdrs |= ICE_FLOW_SEG_HDR_IPV6 |
3677                                                    ICE_FLOW_SEG_HDR_IPV_FRAG |
3678                                                    ICE_FLOW_SEG_HDR_IPV_OTHER;
3679         else if (cfg->hdr_type == ICE_RSS_INNER_HEADERS_W_OUTER_IPV4_GRE)
3680                 segs[ICE_RSS_OUTER_HEADERS].hdrs |= ICE_FLOW_SEG_HDR_IPV4 |
3681                                                    ICE_FLOW_SEG_HDR_GRE |
3682                                                    ICE_FLOW_SEG_HDR_IPV_OTHER;
3683         else if (cfg->hdr_type == ICE_RSS_INNER_HEADERS_W_OUTER_IPV6_GRE)
3684                 segs[ICE_RSS_OUTER_HEADERS].hdrs |= ICE_FLOW_SEG_HDR_IPV6 |
3685                                                    ICE_FLOW_SEG_HDR_GRE |
3686                                                    ICE_FLOW_SEG_HDR_IPV_OTHER;
3687
3688         if (seg->hdrs & ~ICE_FLOW_RSS_SEG_HDR_VAL_MASKS &
3689             ~ICE_FLOW_RSS_HDRS_INNER_MASK & ~ICE_FLOW_SEG_HDR_IPV_OTHER &
3690             ~ICE_FLOW_SEG_HDR_IPV_FRAG)
3691                 return ICE_ERR_PARAM;
3692
3693         val = (u64)(seg->hdrs & ICE_FLOW_RSS_SEG_HDR_L3_MASKS);
3694         if (val && !ice_is_pow2(val))
3695                 return ICE_ERR_CFG;
3696
3697         val = (u64)(seg->hdrs & ICE_FLOW_RSS_SEG_HDR_L4_MASKS);
3698         if (val && !ice_is_pow2(val))
3699                 return ICE_ERR_CFG;
3700
3701         return ICE_SUCCESS;
3702 }
3703
3704 /**
3705  * ice_rem_vsi_rss_list - remove VSI from RSS list
3706  * @hw: pointer to the hardware structure
3707  * @vsi_handle: software VSI handle
3708  *
3709  * Remove the VSI from all RSS configurations in the list.
3710  */
3711 void ice_rem_vsi_rss_list(struct ice_hw *hw, u16 vsi_handle)
3712 {
3713         struct ice_rss_cfg *r, *tmp;
3714
3715         if (LIST_EMPTY(&hw->rss_list_head))
3716                 return;
3717
3718         ice_acquire_lock(&hw->rss_locks);
3719         LIST_FOR_EACH_ENTRY_SAFE(r, tmp, &hw->rss_list_head,
3720                                  ice_rss_cfg, l_entry)
3721                 if (ice_test_and_clear_bit(vsi_handle, r->vsis))
3722                         if (!ice_is_any_bit_set(r->vsis, ICE_MAX_VSI)) {
3723                                 LIST_DEL(&r->l_entry);
3724                                 ice_free(hw, r);
3725                         }
3726         ice_release_lock(&hw->rss_locks);
3727 }
3728
3729 /**
3730  * ice_rem_vsi_rss_cfg - remove RSS configurations associated with VSI
3731  * @hw: pointer to the hardware structure
3732  * @vsi_handle: software VSI handle
3733  *
3734  * This function will iterate through all flow profiles and disassociate
3735  * the VSI from that profile. If the flow profile has no VSIs it will
3736  * be removed.
3737  */
3738 enum ice_status ice_rem_vsi_rss_cfg(struct ice_hw *hw, u16 vsi_handle)
3739 {
3740         const enum ice_block blk = ICE_BLK_RSS;
3741         struct ice_flow_prof *p, *t;
3742         enum ice_status status = ICE_SUCCESS;
3743
3744         if (!ice_is_vsi_valid(hw, vsi_handle))
3745                 return ICE_ERR_PARAM;
3746
3747         if (LIST_EMPTY(&hw->fl_profs[blk]))
3748                 return ICE_SUCCESS;
3749
3750         ice_acquire_lock(&hw->rss_locks);
3751         LIST_FOR_EACH_ENTRY_SAFE(p, t, &hw->fl_profs[blk], ice_flow_prof,
3752                                  l_entry)
3753                 if (ice_is_bit_set(p->vsis, vsi_handle)) {
3754                         status = ice_flow_disassoc_prof(hw, blk, p, vsi_handle);
3755                         if (status)
3756                                 break;
3757
3758                         if (!ice_is_any_bit_set(p->vsis, ICE_MAX_VSI)) {
3759                                 status = ice_flow_rem_prof(hw, blk, p->id);
3760                                 if (status)
3761                                         break;
3762                         }
3763                 }
3764         ice_release_lock(&hw->rss_locks);
3765
3766         return status;
3767 }
3768
3769 /**
3770  * ice_get_rss_hdr_type - get a RSS profile's header type
3771  * @prof: RSS flow profile
3772  */
3773 static enum ice_rss_cfg_hdr_type
3774 ice_get_rss_hdr_type(struct ice_flow_prof *prof)
3775 {
3776         enum ice_rss_cfg_hdr_type hdr_type = ICE_RSS_ANY_HEADERS;
3777
3778         if (prof->segs_cnt == ICE_FLOW_SEG_SINGLE) {
3779                 hdr_type = ICE_RSS_OUTER_HEADERS;
3780         } else if (prof->segs_cnt == ICE_FLOW_SEG_MAX) {
3781                 if (prof->segs[ICE_RSS_OUTER_HEADERS].hdrs == ICE_FLOW_SEG_HDR_NONE)
3782                         hdr_type = ICE_RSS_INNER_HEADERS;
3783                 if (prof->segs[ICE_RSS_OUTER_HEADERS].hdrs & ICE_FLOW_SEG_HDR_IPV4)
3784                         hdr_type = ICE_RSS_INNER_HEADERS_W_OUTER_IPV4;
3785                 if (prof->segs[ICE_RSS_OUTER_HEADERS].hdrs & ICE_FLOW_SEG_HDR_IPV6)
3786                         hdr_type = ICE_RSS_INNER_HEADERS_W_OUTER_IPV6;
3787         }
3788
3789         return hdr_type;
3790 }
3791
3792 /**
3793  * ice_rem_rss_list - remove RSS configuration from list
3794  * @hw: pointer to the hardware structure
3795  * @vsi_handle: software VSI handle
3796  * @prof: pointer to flow profile
3797  *
3798  * Assumption: lock has already been acquired for RSS list
3799  */
3800 static void
3801 ice_rem_rss_list(struct ice_hw *hw, u16 vsi_handle, struct ice_flow_prof *prof)
3802 {
3803         enum ice_rss_cfg_hdr_type hdr_type;
3804         struct ice_rss_cfg *r, *tmp;
3805
3806         /* Search for RSS hash fields associated to the VSI that match the
3807          * hash configurations associated to the flow profile. If found
3808          * remove from the RSS entry list of the VSI context and delete entry.
3809          */
3810         hdr_type = ice_get_rss_hdr_type(prof);
3811         LIST_FOR_EACH_ENTRY_SAFE(r, tmp, &hw->rss_list_head,
3812                                  ice_rss_cfg, l_entry)
3813                 if (r->hash.hash_flds == prof->segs[prof->segs_cnt - 1].match &&
3814                     r->hash.addl_hdrs == prof->segs[prof->segs_cnt - 1].hdrs &&
3815                     r->hash.hdr_type == hdr_type) {
3816                         ice_clear_bit(vsi_handle, r->vsis);
3817                         if (!ice_is_any_bit_set(r->vsis, ICE_MAX_VSI)) {
3818                                 LIST_DEL(&r->l_entry);
3819                                 ice_free(hw, r);
3820                         }
3821                         return;
3822                 }
3823 }
3824
3825 /**
3826  * ice_add_rss_list - add RSS configuration to list
3827  * @hw: pointer to the hardware structure
3828  * @vsi_handle: software VSI handle
3829  * @prof: pointer to flow profile
3830  *
3831  * Assumption: lock has already been acquired for RSS list
3832  */
3833 static enum ice_status
3834 ice_add_rss_list(struct ice_hw *hw, u16 vsi_handle, struct ice_flow_prof *prof)
3835 {
3836         enum ice_rss_cfg_hdr_type hdr_type;
3837         struct ice_rss_cfg *r, *rss_cfg;
3838
3839         hdr_type = ice_get_rss_hdr_type(prof);
3840         LIST_FOR_EACH_ENTRY(r, &hw->rss_list_head,
3841                             ice_rss_cfg, l_entry)
3842                 if (r->hash.hash_flds == prof->segs[prof->segs_cnt - 1].match &&
3843                     r->hash.addl_hdrs == prof->segs[prof->segs_cnt - 1].hdrs &&
3844                     r->hash.hdr_type == hdr_type) {
3845                         ice_set_bit(vsi_handle, r->vsis);
3846                         return ICE_SUCCESS;
3847                 }
3848
3849         rss_cfg = (struct ice_rss_cfg *)ice_malloc(hw, sizeof(*rss_cfg));
3850         if (!rss_cfg)
3851                 return ICE_ERR_NO_MEMORY;
3852
3853         rss_cfg->hash.hash_flds = prof->segs[prof->segs_cnt - 1].match;
3854         rss_cfg->hash.addl_hdrs = prof->segs[prof->segs_cnt - 1].hdrs;
3855         rss_cfg->hash.hdr_type = hdr_type;
3856         rss_cfg->hash.symm = prof->cfg.symm;
3857         ice_set_bit(vsi_handle, rss_cfg->vsis);
3858
3859         LIST_ADD_TAIL(&rss_cfg->l_entry, &hw->rss_list_head);
3860
3861         return ICE_SUCCESS;
3862 }
3863
3864 #define ICE_FLOW_PROF_HASH_S    0
3865 #define ICE_FLOW_PROF_HASH_M    (0xFFFFFFFFULL << ICE_FLOW_PROF_HASH_S)
3866 #define ICE_FLOW_PROF_HDR_S     32
3867 #define ICE_FLOW_PROF_HDR_M     (0x3FFFFFFFULL << ICE_FLOW_PROF_HDR_S)
3868 #define ICE_FLOW_PROF_ENCAP_S   62
3869 #define ICE_FLOW_PROF_ENCAP_M   (0x3ULL << ICE_FLOW_PROF_ENCAP_S)
3870
3871 /* Flow profile ID format:
3872  * [0:31] - Packet match fields
3873  * [32:61] - Protocol header
3874  * [62:63] - Encapsulation flag:
3875  *           0 if non-tunneled
3876  *           1 if tunneled
3877  *           2 for tunneled with outer ipv4
3878  *           3 for tunneled with outer ipv6
3879  */
3880 #define ICE_FLOW_GEN_PROFID(hash, hdr, encap) \
3881         ((u64)(((u64)(hash) & ICE_FLOW_PROF_HASH_M) | \
3882                (((u64)(hdr) << ICE_FLOW_PROF_HDR_S) & ICE_FLOW_PROF_HDR_M) | \
3883                (((u64)(encap) << ICE_FLOW_PROF_ENCAP_S) & ICE_FLOW_PROF_ENCAP_M)))
3884
3885 static void
3886 ice_rss_config_xor_word(struct ice_hw *hw, u8 prof_id, u8 src, u8 dst)
3887 {
3888         u32 s = ((src % 4) << 3); /* byte shift */
3889         u32 v = dst | 0x80; /* value to program */
3890         u8 i = src / 4; /* register index */
3891         u32 reg;
3892
3893         reg = rd32(hw, GLQF_HSYMM(prof_id, i));
3894         reg = (reg & ~(0xff << s)) | (v << s);
3895         wr32(hw, GLQF_HSYMM(prof_id, i), reg);
3896 }
3897
3898 static void
3899 ice_rss_config_xor(struct ice_hw *hw, u8 prof_id, u8 src, u8 dst, u8 len)
3900 {
3901         int fv_last_word =
3902                 ICE_FLOW_SW_FIELD_VECTOR_MAX / ICE_FLOW_FV_EXTRACT_SZ - 1;
3903         int i;
3904
3905         for (i = 0; i < len; i++) {
3906                 ice_rss_config_xor_word(hw, prof_id,
3907                                         /* Yes, field vector in GLQF_HSYMM and
3908                                          * GLQF_HINSET is inversed!
3909                                          */
3910                                         fv_last_word - (src + i),
3911                                         fv_last_word - (dst + i));
3912                 ice_rss_config_xor_word(hw, prof_id,
3913                                         fv_last_word - (dst + i),
3914                                         fv_last_word - (src + i));
3915         }
3916 }
3917
3918 static void
3919 ice_rss_update_symm(struct ice_hw *hw,
3920                     struct ice_flow_prof *prof)
3921 {
3922         struct ice_prof_map *map;
3923         u8 prof_id, m;
3924
3925         ice_acquire_lock(&hw->blk[ICE_BLK_RSS].es.prof_map_lock);
3926         map = ice_search_prof_id(hw, ICE_BLK_RSS, prof->id);
3927         if (map)
3928                 prof_id = map->prof_id;
3929         ice_release_lock(&hw->blk[ICE_BLK_RSS].es.prof_map_lock);
3930         if (!map)
3931                 return;
3932         /* clear to default */
3933         for (m = 0; m < 6; m++)
3934                 wr32(hw, GLQF_HSYMM(prof_id, m), 0);
3935         if (prof->cfg.symm) {
3936                 struct ice_flow_seg_info *seg =
3937                         &prof->segs[prof->segs_cnt - 1];
3938
3939                 struct ice_flow_seg_xtrct *ipv4_src =
3940                         &seg->fields[ICE_FLOW_FIELD_IDX_IPV4_SA].xtrct;
3941                 struct ice_flow_seg_xtrct *ipv4_dst =
3942                         &seg->fields[ICE_FLOW_FIELD_IDX_IPV4_DA].xtrct;
3943                 struct ice_flow_seg_xtrct *ipv6_src =
3944                         &seg->fields[ICE_FLOW_FIELD_IDX_IPV6_SA].xtrct;
3945                 struct ice_flow_seg_xtrct *ipv6_dst =
3946                         &seg->fields[ICE_FLOW_FIELD_IDX_IPV6_DA].xtrct;
3947
3948                 struct ice_flow_seg_xtrct *tcp_src =
3949                         &seg->fields[ICE_FLOW_FIELD_IDX_TCP_SRC_PORT].xtrct;
3950                 struct ice_flow_seg_xtrct *tcp_dst =
3951                         &seg->fields[ICE_FLOW_FIELD_IDX_TCP_DST_PORT].xtrct;
3952
3953                 struct ice_flow_seg_xtrct *udp_src =
3954                         &seg->fields[ICE_FLOW_FIELD_IDX_UDP_SRC_PORT].xtrct;
3955                 struct ice_flow_seg_xtrct *udp_dst =
3956                         &seg->fields[ICE_FLOW_FIELD_IDX_UDP_DST_PORT].xtrct;
3957
3958                 struct ice_flow_seg_xtrct *sctp_src =
3959                         &seg->fields[ICE_FLOW_FIELD_IDX_SCTP_SRC_PORT].xtrct;
3960                 struct ice_flow_seg_xtrct *sctp_dst =
3961                         &seg->fields[ICE_FLOW_FIELD_IDX_SCTP_DST_PORT].xtrct;
3962
3963                 /* xor IPv4 */
3964                 if (ipv4_src->prot_id != 0 && ipv4_dst->prot_id != 0)
3965                         ice_rss_config_xor(hw, prof_id,
3966                                            ipv4_src->idx, ipv4_dst->idx, 2);
3967
3968                 /* xor IPv6 */
3969                 if (ipv6_src->prot_id != 0 && ipv6_dst->prot_id != 0)
3970                         ice_rss_config_xor(hw, prof_id,
3971                                            ipv6_src->idx, ipv6_dst->idx, 8);
3972
3973                 /* xor TCP */
3974                 if (tcp_src->prot_id != 0 && tcp_dst->prot_id != 0)
3975                         ice_rss_config_xor(hw, prof_id,
3976                                            tcp_src->idx, tcp_dst->idx, 1);
3977
3978                 /* xor UDP */
3979                 if (udp_src->prot_id != 0 && udp_dst->prot_id != 0)
3980                         ice_rss_config_xor(hw, prof_id,
3981                                            udp_src->idx, udp_dst->idx, 1);
3982
3983                 /* xor SCTP */
3984                 if (sctp_src->prot_id != 0 && sctp_dst->prot_id != 0)
3985                         ice_rss_config_xor(hw, prof_id,
3986                                            sctp_src->idx, sctp_dst->idx, 1);
3987         }
3988 }
3989
3990 /**
3991  * ice_add_rss_cfg_sync - add an RSS configuration
3992  * @hw: pointer to the hardware structure
3993  * @vsi_handle: software VSI handle
3994  * @cfg: configure parameters
3995  *
3996  * Assumption: lock has already been acquired for RSS list
3997  */
3998 static enum ice_status
3999 ice_add_rss_cfg_sync(struct ice_hw *hw, u16 vsi_handle,
4000                      const struct ice_rss_hash_cfg *cfg)
4001 {
4002         const enum ice_block blk = ICE_BLK_RSS;
4003         struct ice_flow_prof *prof = NULL;
4004         struct ice_flow_seg_info *segs;
4005         enum ice_status status;
4006         u8 segs_cnt;
4007
4008         segs_cnt = (cfg->hdr_type == ICE_RSS_OUTER_HEADERS) ?
4009                         ICE_FLOW_SEG_SINGLE : ICE_FLOW_SEG_MAX;
4010
4011         segs = (struct ice_flow_seg_info *)ice_calloc(hw, segs_cnt,
4012                                                       sizeof(*segs));
4013         if (!segs)
4014                 return ICE_ERR_NO_MEMORY;
4015
4016         /* Construct the packet segment info from the hashed fields */
4017         status = ice_flow_set_rss_seg_info(segs, segs_cnt, cfg);
4018         if (status)
4019                 goto exit;
4020
4021         /* Search for a flow profile that has matching headers, hash fields
4022          * and has the input VSI associated to it. If found, no further
4023          * operations required and exit.
4024          */
4025         prof = ice_flow_find_prof_conds(hw, blk, ICE_FLOW_RX, segs, segs_cnt,
4026                                         vsi_handle,
4027                                         ICE_FLOW_FIND_PROF_CHK_FLDS |
4028                                         ICE_FLOW_FIND_PROF_CHK_VSI);
4029         if (prof) {
4030                 if (prof->cfg.symm == cfg->symm)
4031                         goto exit;
4032                 prof->cfg.symm = cfg->symm;
4033                 goto update_symm;
4034         }
4035
4036         /* Check if a flow profile exists with the same protocol headers and
4037          * associated with the input VSI. If so disassociate the VSI from
4038          * this profile. The VSI will be added to a new profile created with
4039          * the protocol header and new hash field configuration.
4040          */
4041         prof = ice_flow_find_prof_conds(hw, blk, ICE_FLOW_RX, segs, segs_cnt,
4042                                         vsi_handle, ICE_FLOW_FIND_PROF_CHK_VSI);
4043         if (prof) {
4044                 status = ice_flow_disassoc_prof(hw, blk, prof, vsi_handle);
4045                 if (!status)
4046                         ice_rem_rss_list(hw, vsi_handle, prof);
4047                 else
4048                         goto exit;
4049
4050                 /* Remove profile if it has no VSIs associated */
4051                 if (!ice_is_any_bit_set(prof->vsis, ICE_MAX_VSI)) {
4052                         status = ice_flow_rem_prof(hw, blk, prof->id);
4053                         if (status)
4054                                 goto exit;
4055                 }
4056         }
4057
4058         /* Search for a profile that has same match fields only. If this
4059          * exists then associate the VSI to this profile.
4060          */
4061         prof = ice_flow_find_prof_conds(hw, blk, ICE_FLOW_RX, segs, segs_cnt,
4062                                         vsi_handle,
4063                                         ICE_FLOW_FIND_PROF_CHK_FLDS);
4064         if (prof) {
4065                 if (prof->cfg.symm == cfg->symm) {
4066                         status = ice_flow_assoc_prof(hw, blk, prof,
4067                                                      vsi_handle);
4068                         if (!status)
4069                                 status = ice_add_rss_list(hw, vsi_handle,
4070                                                           prof);
4071                 } else {
4072                         /* if a profile exist but with different symmetric
4073                          * requirement, just return error.
4074                          */
4075                         status = ICE_ERR_NOT_SUPPORTED;
4076                 }
4077                 goto exit;
4078         }
4079
4080         /* Create a new flow profile with generated profile and packet
4081          * segment information.
4082          */
4083         status = ice_flow_add_prof(hw, blk, ICE_FLOW_RX,
4084                                    ICE_FLOW_GEN_PROFID(cfg->hash_flds,
4085                                                        segs[segs_cnt - 1].hdrs,
4086                                                        cfg->hdr_type),
4087                                    segs, segs_cnt, NULL, 0, &prof);
4088         if (status)
4089                 goto exit;
4090
4091         status = ice_flow_assoc_prof(hw, blk, prof, vsi_handle);
4092         /* If association to a new flow profile failed then this profile can
4093          * be removed.
4094          */
4095         if (status) {
4096                 ice_flow_rem_prof(hw, blk, prof->id);
4097                 goto exit;
4098         }
4099
4100         status = ice_add_rss_list(hw, vsi_handle, prof);
4101
4102         prof->cfg.symm = cfg->symm;
4103 update_symm:
4104         ice_rss_update_symm(hw, prof);
4105
4106 exit:
4107         ice_free(hw, segs);
4108         return status;
4109 }
4110
4111 /**
4112  * ice_add_rss_cfg - add an RSS configuration with specified hashed fields
4113  * @hw: pointer to the hardware structure
4114  * @vsi_handle: software VSI handle
4115  * @cfg: configure parameters
4116  *
4117  * This function will generate a flow profile based on fields associated with
4118  * the input fields to hash on, the flow type and use the VSI number to add
4119  * a flow entry to the profile.
4120  */
4121 enum ice_status
4122 ice_add_rss_cfg(struct ice_hw *hw, u16 vsi_handle,
4123                 const struct ice_rss_hash_cfg *cfg)
4124 {
4125         struct ice_rss_hash_cfg local_cfg;
4126         enum ice_status status;
4127
4128         if (!ice_is_vsi_valid(hw, vsi_handle) ||
4129             !cfg || cfg->hdr_type > ICE_RSS_ANY_HEADERS ||
4130             cfg->hash_flds == ICE_HASH_INVALID)
4131                 return ICE_ERR_PARAM;
4132
4133         local_cfg = *cfg;
4134         if (cfg->hdr_type < ICE_RSS_ANY_HEADERS) {
4135                 ice_acquire_lock(&hw->rss_locks);
4136                 status = ice_add_rss_cfg_sync(hw, vsi_handle, &local_cfg);
4137                 ice_release_lock(&hw->rss_locks);
4138         } else {
4139                 ice_acquire_lock(&hw->rss_locks);
4140                 local_cfg.hdr_type = ICE_RSS_OUTER_HEADERS;
4141                 status = ice_add_rss_cfg_sync(hw, vsi_handle, &local_cfg);
4142                 if (!status) {
4143                         local_cfg.hdr_type = ICE_RSS_INNER_HEADERS;
4144                         status = ice_add_rss_cfg_sync(hw, vsi_handle,
4145                                                       &local_cfg);
4146                 }
4147                 ice_release_lock(&hw->rss_locks);
4148         }
4149
4150         return status;
4151 }
4152
4153 /**
4154  * ice_rem_rss_cfg_sync - remove an existing RSS configuration
4155  * @hw: pointer to the hardware structure
4156  * @vsi_handle: software VSI handle
4157  * @cfg: configure parameters
4158  *
4159  * Assumption: lock has already been acquired for RSS list
4160  */
4161 static enum ice_status
4162 ice_rem_rss_cfg_sync(struct ice_hw *hw, u16 vsi_handle,
4163                      const struct ice_rss_hash_cfg *cfg)
4164 {
4165         const enum ice_block blk = ICE_BLK_RSS;
4166         struct ice_flow_seg_info *segs;
4167         struct ice_flow_prof *prof;
4168         enum ice_status status;
4169         u8 segs_cnt;
4170
4171         segs_cnt = (cfg->hdr_type == ICE_RSS_OUTER_HEADERS) ?
4172                         ICE_FLOW_SEG_SINGLE : ICE_FLOW_SEG_MAX;
4173         segs = (struct ice_flow_seg_info *)ice_calloc(hw, segs_cnt,
4174                                                       sizeof(*segs));
4175         if (!segs)
4176                 return ICE_ERR_NO_MEMORY;
4177
4178         /* Construct the packet segment info from the hashed fields */
4179         status = ice_flow_set_rss_seg_info(segs, segs_cnt, cfg);
4180         if (status)
4181                 goto out;
4182
4183         prof = ice_flow_find_prof_conds(hw, blk, ICE_FLOW_RX, segs, segs_cnt,
4184                                         vsi_handle,
4185                                         ICE_FLOW_FIND_PROF_CHK_FLDS);
4186         if (!prof) {
4187                 status = ICE_ERR_DOES_NOT_EXIST;
4188                 goto out;
4189         }
4190
4191         status = ice_flow_disassoc_prof(hw, blk, prof, vsi_handle);
4192         if (status)
4193                 goto out;
4194
4195         /* Remove RSS configuration from VSI context before deleting
4196          * the flow profile.
4197          */
4198         ice_rem_rss_list(hw, vsi_handle, prof);
4199
4200         if (!ice_is_any_bit_set(prof->vsis, ICE_MAX_VSI))
4201                 status = ice_flow_rem_prof(hw, blk, prof->id);
4202
4203 out:
4204         ice_free(hw, segs);
4205         return status;
4206 }
4207
4208 /**
4209  * ice_rem_rss_cfg - remove an existing RSS config with matching hashed fields
4210  * @hw: pointer to the hardware structure
4211  * @vsi_handle: software VSI handle
4212  * @cfg: configure parameters
4213  *
4214  * This function will lookup the flow profile based on the input
4215  * hash field bitmap, iterate through the profile entry list of
4216  * that profile and find entry associated with input VSI to be
4217  * removed. Calls are made to underlying flow apis which will in
4218  * turn build or update buffers for RSS XLT1 section.
4219  */
4220 enum ice_status
4221 ice_rem_rss_cfg(struct ice_hw *hw, u16 vsi_handle,
4222                 const struct ice_rss_hash_cfg *cfg)
4223 {
4224         struct ice_rss_hash_cfg local_cfg;
4225         enum ice_status status;
4226
4227         if (!ice_is_vsi_valid(hw, vsi_handle) ||
4228             !cfg || cfg->hdr_type > ICE_RSS_ANY_HEADERS ||
4229             cfg->hash_flds == ICE_HASH_INVALID)
4230                 return ICE_ERR_PARAM;
4231
4232         ice_acquire_lock(&hw->rss_locks);
4233         local_cfg = *cfg;
4234         if (cfg->hdr_type < ICE_RSS_ANY_HEADERS) {
4235                 status = ice_rem_rss_cfg_sync(hw, vsi_handle, &local_cfg);
4236         } else {
4237                 local_cfg.hdr_type = ICE_RSS_OUTER_HEADERS;
4238                 status = ice_rem_rss_cfg_sync(hw, vsi_handle, &local_cfg);
4239
4240                 if (!status) {
4241                         local_cfg.hdr_type = ICE_RSS_INNER_HEADERS;
4242                         status = ice_rem_rss_cfg_sync(hw, vsi_handle,
4243                                                       &local_cfg);
4244                 }
4245         }
4246         ice_release_lock(&hw->rss_locks);
4247
4248         return status;
4249 }
4250
4251 /**
4252  * ice_replay_rss_cfg - replay RSS configurations associated with VSI
4253  * @hw: pointer to the hardware structure
4254  * @vsi_handle: software VSI handle
4255  */
4256 enum ice_status ice_replay_rss_cfg(struct ice_hw *hw, u16 vsi_handle)
4257 {
4258         enum ice_status status = ICE_SUCCESS;
4259         struct ice_rss_cfg *r;
4260
4261         if (!ice_is_vsi_valid(hw, vsi_handle))
4262                 return ICE_ERR_PARAM;
4263
4264         ice_acquire_lock(&hw->rss_locks);
4265         LIST_FOR_EACH_ENTRY(r, &hw->rss_list_head,
4266                             ice_rss_cfg, l_entry) {
4267                 if (ice_is_bit_set(r->vsis, vsi_handle)) {
4268                         status = ice_add_rss_cfg_sync(hw, vsi_handle, &r->hash);
4269                         if (status)
4270                                 break;
4271                 }
4272         }
4273         ice_release_lock(&hw->rss_locks);
4274
4275         return status;
4276 }
4277
4278 /**
4279  * ice_get_rss_cfg - returns hashed fields for the given header types
4280  * @hw: pointer to the hardware structure
4281  * @vsi_handle: software VSI handle
4282  * @hdrs: protocol header type
4283  *
4284  * This function will return the match fields of the first instance of flow
4285  * profile having the given header types and containing input VSI
4286  */
4287 u64 ice_get_rss_cfg(struct ice_hw *hw, u16 vsi_handle, u32 hdrs)
4288 {
4289         u64 rss_hash = ICE_HASH_INVALID;
4290         struct ice_rss_cfg *r;
4291
4292         /* verify if the protocol header is non zero and VSI is valid */
4293         if (hdrs == ICE_FLOW_SEG_HDR_NONE || !ice_is_vsi_valid(hw, vsi_handle))
4294                 return ICE_HASH_INVALID;
4295
4296         ice_acquire_lock(&hw->rss_locks);
4297         LIST_FOR_EACH_ENTRY(r, &hw->rss_list_head,
4298                             ice_rss_cfg, l_entry)
4299                 if (ice_is_bit_set(r->vsis, vsi_handle) &&
4300                     r->hash.addl_hdrs == hdrs) {
4301                         rss_hash = r->hash.hash_flds;
4302                         break;
4303                 }
4304         ice_release_lock(&hw->rss_locks);
4305
4306         return rss_hash;
4307 }