common/cnxk: add lower bound check for SSO resources
[dpdk.git] / drivers / net / ice / base / ice_flow.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2001-2021 Intel Corporation
3  */
4
5 #include "ice_common.h"
6 #include "ice_flow.h"
7
8 /* Size of known protocol header fields */
9 #define ICE_FLOW_FLD_SZ_ETH_TYPE        2
10 #define ICE_FLOW_FLD_SZ_VLAN            2
11 #define ICE_FLOW_FLD_SZ_IPV4_ADDR       4
12 #define ICE_FLOW_FLD_SZ_IPV6_ADDR       16
13 #define ICE_FLOW_FLD_SZ_IPV6_PRE32_ADDR 4
14 #define ICE_FLOW_FLD_SZ_IPV6_PRE48_ADDR 6
15 #define ICE_FLOW_FLD_SZ_IPV6_PRE64_ADDR 8
16 #define ICE_FLOW_FLD_SZ_IPV4_ID         2
17 #define ICE_FLOW_FLD_SZ_IPV6_ID         4
18 #define ICE_FLOW_FLD_SZ_IP_CHKSUM       2
19 #define ICE_FLOW_FLD_SZ_TCP_CHKSUM      2
20 #define ICE_FLOW_FLD_SZ_UDP_CHKSUM      2
21 #define ICE_FLOW_FLD_SZ_SCTP_CHKSUM     4
22 #define ICE_FLOW_FLD_SZ_IP_DSCP         1
23 #define ICE_FLOW_FLD_SZ_IP_TTL          1
24 #define ICE_FLOW_FLD_SZ_IP_PROT         1
25 #define ICE_FLOW_FLD_SZ_PORT            2
26 #define ICE_FLOW_FLD_SZ_TCP_FLAGS       1
27 #define ICE_FLOW_FLD_SZ_ICMP_TYPE       1
28 #define ICE_FLOW_FLD_SZ_ICMP_CODE       1
29 #define ICE_FLOW_FLD_SZ_ARP_OPER        2
30 #define ICE_FLOW_FLD_SZ_GRE_KEYID       4
31 #define ICE_FLOW_FLD_SZ_GTP_TEID        4
32 #define ICE_FLOW_FLD_SZ_GTP_QFI         2
33 #define ICE_FLOW_FLD_SZ_PPPOE_SESS_ID   2
34 #define ICE_FLOW_FLD_SZ_PFCP_SEID 8
35 #define ICE_FLOW_FLD_SZ_L2TPV3_SESS_ID  4
36 #define ICE_FLOW_FLD_SZ_ESP_SPI 4
37 #define ICE_FLOW_FLD_SZ_AH_SPI  4
38 #define ICE_FLOW_FLD_SZ_NAT_T_ESP_SPI   4
39 #define ICE_FLOW_FLD_SZ_VXLAN_VNI       4
40 #define ICE_FLOW_FLD_SZ_ECPRI_TP0_PC_ID 2
41
42 /* Describe properties of a protocol header field */
43 struct ice_flow_field_info {
44         enum ice_flow_seg_hdr hdr;
45         s16 off;        /* Offset from start of a protocol header, in bits */
46         u16 size;       /* Size of fields in bits */
47         u16 mask;       /* 16-bit mask for field */
48 };
49
50 #define ICE_FLOW_FLD_INFO(_hdr, _offset_bytes, _size_bytes) { \
51         .hdr = _hdr, \
52         .off = (_offset_bytes) * BITS_PER_BYTE, \
53         .size = (_size_bytes) * BITS_PER_BYTE, \
54         .mask = 0, \
55 }
56
57 #define ICE_FLOW_FLD_INFO_MSK(_hdr, _offset_bytes, _size_bytes, _mask) { \
58         .hdr = _hdr, \
59         .off = (_offset_bytes) * BITS_PER_BYTE, \
60         .size = (_size_bytes) * BITS_PER_BYTE, \
61         .mask = _mask, \
62 }
63
64 /* Table containing properties of supported protocol header fields */
65 static const
66 struct ice_flow_field_info ice_flds_info[ICE_FLOW_FIELD_IDX_MAX] = {
67         /* Ether */
68         /* ICE_FLOW_FIELD_IDX_ETH_DA */
69         ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_ETH, 0, ETH_ALEN),
70         /* ICE_FLOW_FIELD_IDX_ETH_SA */
71         ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_ETH, ETH_ALEN, ETH_ALEN),
72         /* ICE_FLOW_FIELD_IDX_S_VLAN */
73         ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_VLAN, 12, ICE_FLOW_FLD_SZ_VLAN),
74         /* ICE_FLOW_FIELD_IDX_C_VLAN */
75         ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_VLAN, 14, ICE_FLOW_FLD_SZ_VLAN),
76         /* ICE_FLOW_FIELD_IDX_ETH_TYPE */
77         ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_ETH, 0, ICE_FLOW_FLD_SZ_ETH_TYPE),
78         /* IPv4 / IPv6 */
79         /* ICE_FLOW_FIELD_IDX_IPV4_DSCP */
80         ICE_FLOW_FLD_INFO_MSK(ICE_FLOW_SEG_HDR_IPV4, 0, ICE_FLOW_FLD_SZ_IP_DSCP,
81                               0x00fc),
82         /* ICE_FLOW_FIELD_IDX_IPV6_DSCP */
83         ICE_FLOW_FLD_INFO_MSK(ICE_FLOW_SEG_HDR_IPV6, 0, ICE_FLOW_FLD_SZ_IP_DSCP,
84                               0x0ff0),
85         /* ICE_FLOW_FIELD_IDX_IPV4_TTL */
86         ICE_FLOW_FLD_INFO_MSK(ICE_FLOW_SEG_HDR_NONE, 8,
87                               ICE_FLOW_FLD_SZ_IP_TTL, 0xff00),
88         /* ICE_FLOW_FIELD_IDX_IPV4_PROT */
89         ICE_FLOW_FLD_INFO_MSK(ICE_FLOW_SEG_HDR_NONE, 8,
90                               ICE_FLOW_FLD_SZ_IP_PROT, 0x00ff),
91         /* ICE_FLOW_FIELD_IDX_IPV6_TTL */
92         ICE_FLOW_FLD_INFO_MSK(ICE_FLOW_SEG_HDR_NONE, 6,
93                               ICE_FLOW_FLD_SZ_IP_TTL, 0x00ff),
94         /* ICE_FLOW_FIELD_IDX_IPV6_PROT */
95         ICE_FLOW_FLD_INFO_MSK(ICE_FLOW_SEG_HDR_NONE, 6,
96                               ICE_FLOW_FLD_SZ_IP_PROT, 0xff00),
97         /* ICE_FLOW_FIELD_IDX_IPV4_SA */
98         ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_IPV4, 12, ICE_FLOW_FLD_SZ_IPV4_ADDR),
99         /* ICE_FLOW_FIELD_IDX_IPV4_DA */
100         ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_IPV4, 16, ICE_FLOW_FLD_SZ_IPV4_ADDR),
101         /* ICE_FLOW_FIELD_IDX_IPV6_SA */
102         ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_IPV6, 8, ICE_FLOW_FLD_SZ_IPV6_ADDR),
103         /* ICE_FLOW_FIELD_IDX_IPV6_DA */
104         ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_IPV6, 24, ICE_FLOW_FLD_SZ_IPV6_ADDR),
105         /* ICE_FLOW_FIELD_IDX_IPV4_CHKSUM */
106         ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_IPV4, 10, ICE_FLOW_FLD_SZ_IP_CHKSUM),
107         /* ICE_FLOW_FIELD_IDX_IPV4_FRAG */
108         ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_IPV_FRAG, 4,
109                           ICE_FLOW_FLD_SZ_IPV4_ID),
110         /* ICE_FLOW_FIELD_IDX_IPV6_FRAG */
111         ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_IPV_FRAG, 4,
112                           ICE_FLOW_FLD_SZ_IPV6_ID),
113         /* ICE_FLOW_FIELD_IDX_IPV6_PRE32_SA */
114         ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_IPV6, 8,
115                           ICE_FLOW_FLD_SZ_IPV6_PRE32_ADDR),
116         /* ICE_FLOW_FIELD_IDX_IPV6_PRE32_DA */
117         ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_IPV6, 24,
118                           ICE_FLOW_FLD_SZ_IPV6_PRE32_ADDR),
119         /* ICE_FLOW_FIELD_IDX_IPV6_PRE48_SA */
120         ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_IPV6, 8,
121                           ICE_FLOW_FLD_SZ_IPV6_PRE48_ADDR),
122         /* ICE_FLOW_FIELD_IDX_IPV6_PRE48_DA */
123         ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_IPV6, 24,
124                           ICE_FLOW_FLD_SZ_IPV6_PRE48_ADDR),
125         /* ICE_FLOW_FIELD_IDX_IPV6_PRE64_SA */
126         ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_IPV6, 8,
127                           ICE_FLOW_FLD_SZ_IPV6_PRE64_ADDR),
128         /* ICE_FLOW_FIELD_IDX_IPV6_PRE64_DA */
129         ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_IPV6, 24,
130                           ICE_FLOW_FLD_SZ_IPV6_PRE64_ADDR),
131         /* Transport */
132         /* ICE_FLOW_FIELD_IDX_TCP_SRC_PORT */
133         ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_TCP, 0, ICE_FLOW_FLD_SZ_PORT),
134         /* ICE_FLOW_FIELD_IDX_TCP_DST_PORT */
135         ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_TCP, 2, ICE_FLOW_FLD_SZ_PORT),
136         /* ICE_FLOW_FIELD_IDX_UDP_SRC_PORT */
137         ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_UDP, 0, ICE_FLOW_FLD_SZ_PORT),
138         /* ICE_FLOW_FIELD_IDX_UDP_DST_PORT */
139         ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_UDP, 2, ICE_FLOW_FLD_SZ_PORT),
140         /* ICE_FLOW_FIELD_IDX_SCTP_SRC_PORT */
141         ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_SCTP, 0, ICE_FLOW_FLD_SZ_PORT),
142         /* ICE_FLOW_FIELD_IDX_SCTP_DST_PORT */
143         ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_SCTP, 2, ICE_FLOW_FLD_SZ_PORT),
144         /* ICE_FLOW_FIELD_IDX_TCP_FLAGS */
145         ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_TCP, 13, ICE_FLOW_FLD_SZ_TCP_FLAGS),
146         /* ICE_FLOW_FIELD_IDX_TCP_CHKSUM */
147         ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_TCP, 16, ICE_FLOW_FLD_SZ_TCP_CHKSUM),
148         /* ICE_FLOW_FIELD_IDX_UDP_CHKSUM */
149         ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_UDP, 6, ICE_FLOW_FLD_SZ_UDP_CHKSUM),
150         /* ICE_FLOW_FIELD_IDX_SCTP_CHKSUM */
151         ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_SCTP, 8,
152                           ICE_FLOW_FLD_SZ_SCTP_CHKSUM),
153         /* ARP */
154         /* ICE_FLOW_FIELD_IDX_ARP_SIP */
155         ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_ARP, 14, ICE_FLOW_FLD_SZ_IPV4_ADDR),
156         /* ICE_FLOW_FIELD_IDX_ARP_DIP */
157         ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_ARP, 24, ICE_FLOW_FLD_SZ_IPV4_ADDR),
158         /* ICE_FLOW_FIELD_IDX_ARP_SHA */
159         ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_ARP, 8, ETH_ALEN),
160         /* ICE_FLOW_FIELD_IDX_ARP_DHA */
161         ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_ARP, 18, ETH_ALEN),
162         /* ICE_FLOW_FIELD_IDX_ARP_OP */
163         ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_ARP, 6, ICE_FLOW_FLD_SZ_ARP_OPER),
164         /* ICMP */
165         /* ICE_FLOW_FIELD_IDX_ICMP_TYPE */
166         ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_ICMP, 0, ICE_FLOW_FLD_SZ_ICMP_TYPE),
167         /* ICE_FLOW_FIELD_IDX_ICMP_CODE */
168         ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_ICMP, 1, ICE_FLOW_FLD_SZ_ICMP_CODE),
169         /* GRE */
170         /* ICE_FLOW_FIELD_IDX_GRE_KEYID */
171         ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_GRE, 12, ICE_FLOW_FLD_SZ_GRE_KEYID),
172         /* GTP */
173         /* ICE_FLOW_FIELD_IDX_GTPC_TEID */
174         ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_GTPC_TEID, 12,
175                           ICE_FLOW_FLD_SZ_GTP_TEID),
176         /* ICE_FLOW_FIELD_IDX_GTPU_IP_TEID */
177         ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_GTPU_IP, 12,
178                           ICE_FLOW_FLD_SZ_GTP_TEID),
179         /* ICE_FLOW_FIELD_IDX_GTPU_EH_TEID */
180         ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_GTPU_EH, 12,
181                           ICE_FLOW_FLD_SZ_GTP_TEID),
182         /* ICE_FLOW_FIELD_IDX_GTPU_EH_QFI */
183         ICE_FLOW_FLD_INFO_MSK(ICE_FLOW_SEG_HDR_GTPU_EH, 22,
184                               ICE_FLOW_FLD_SZ_GTP_QFI, 0x3f00),
185         /* ICE_FLOW_FIELD_IDX_GTPU_UP_TEID */
186         ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_GTPU_UP, 12,
187                           ICE_FLOW_FLD_SZ_GTP_TEID),
188         /* ICE_FLOW_FIELD_IDX_GTPU_UP_QFI */
189         ICE_FLOW_FLD_INFO_MSK(ICE_FLOW_SEG_HDR_GTPU_UP, 22,
190                               ICE_FLOW_FLD_SZ_GTP_QFI, 0x3f00),
191         /* ICE_FLOW_FIELD_IDX_GTPU_DWN_TEID */
192         ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_GTPU_DWN, 12,
193                           ICE_FLOW_FLD_SZ_GTP_TEID),
194         /* ICE_FLOW_FIELD_IDX_GTPU_DWN_QFI */
195         ICE_FLOW_FLD_INFO_MSK(ICE_FLOW_SEG_HDR_GTPU_DWN, 22,
196                               ICE_FLOW_FLD_SZ_GTP_QFI, 0x3f00),
197         /* PPPOE */
198         /* ICE_FLOW_FIELD_IDX_PPPOE_SESS_ID */
199         ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_PPPOE, 2,
200                           ICE_FLOW_FLD_SZ_PPPOE_SESS_ID),
201         /* PFCP */
202         /* ICE_FLOW_FIELD_IDX_PFCP_SEID */
203         ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_PFCP_SESSION, 12,
204                           ICE_FLOW_FLD_SZ_PFCP_SEID),
205         /* L2TPV3 */
206         /* ICE_FLOW_FIELD_IDX_L2TPV3_SESS_ID */
207         ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_L2TPV3, 0,
208                           ICE_FLOW_FLD_SZ_L2TPV3_SESS_ID),
209         /* ESP */
210         /* ICE_FLOW_FIELD_IDX_ESP_SPI */
211         ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_ESP, 0,
212                           ICE_FLOW_FLD_SZ_ESP_SPI),
213         /* AH */
214         /* ICE_FLOW_FIELD_IDX_AH_SPI */
215         ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_AH, 4,
216                           ICE_FLOW_FLD_SZ_AH_SPI),
217         /* NAT_T_ESP */
218         /* ICE_FLOW_FIELD_IDX_NAT_T_ESP_SPI */
219         ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_NAT_T_ESP, 8,
220                           ICE_FLOW_FLD_SZ_NAT_T_ESP_SPI),
221         /* ICE_FLOW_FIELD_IDX_VXLAN_VNI */
222         ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_VXLAN, 12,
223                           ICE_FLOW_FLD_SZ_VXLAN_VNI),
224         /* ECPRI_TP0 */
225         /* ICE_FLOW_FIELD_IDX_ECPRI_TP0_PC_ID */
226         ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_ECPRI_TP0, 4,
227                           ICE_FLOW_FLD_SZ_ECPRI_TP0_PC_ID),
228         /* UDP_ECPRI_TP0 */
229         /* ICE_FLOW_FIELD_IDX_UDP_ECPRI_TP0_PC_ID */
230         ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_UDP_ECPRI_TP0, 12,
231                           ICE_FLOW_FLD_SZ_ECPRI_TP0_PC_ID),
232 };
233
234 /* Bitmaps indicating relevant packet types for a particular protocol header
235  *
236  * Packet types for packets with an Outer/First/Single MAC header
237  */
238 static const u32 ice_ptypes_mac_ofos[] = {
239         0xFDC00846, 0xBFBF7F7E, 0xF70001DF, 0xFEFDFDFB,
240         0x0000077E, 0x000003FF, 0x00000000, 0x00000000,
241         0x00400000, 0x03FFF000, 0xFFFFFFE0, 0x00100707,
242         0xFFFFFF00, 0x0000003F, 0x00000000, 0x00000000,
243         0x00000000, 0x00000000, 0x00000000, 0x00000000,
244         0x00000000, 0x00000000, 0x00000000, 0x00000000,
245         0x00000000, 0x00000000, 0x00000000, 0x00000000,
246         0x00000000, 0x00000000, 0x00000000, 0x00000000,
247 };
248
249 /* Packet types for packets with an Innermost/Last MAC VLAN header */
250 static const u32 ice_ptypes_macvlan_il[] = {
251         0x00000000, 0xBC000000, 0x000001DF, 0xF0000000,
252         0x0000077E, 0x00000000, 0x00000000, 0x00000000,
253         0x00000000, 0x00000000, 0x00000000, 0x00000000,
254         0x00000000, 0x00000000, 0x00000000, 0x00000000,
255         0x00000000, 0x00000000, 0x00000000, 0x00000000,
256         0x00000000, 0x00000000, 0x00000000, 0x00000000,
257         0x00000000, 0x00000000, 0x00000000, 0x00000000,
258         0x00000000, 0x00000000, 0x00000000, 0x00000000,
259 };
260
261 /* Packet types for packets with an Outer/First/Single non-frag IPv4 header,
262  * does NOT include IPV4 other PTYPEs
263  */
264 static const u32 ice_ptypes_ipv4_ofos[] = {
265         0x1D800000, 0x24000800, 0x00000000, 0x00000000,
266         0x00000000, 0x00000155, 0x00000000, 0x00000000,
267         0x00000000, 0x000FC000, 0x000002A0, 0x00100000,
268         0x00001500, 0x00000000, 0x00000000, 0x00000000,
269         0x00000000, 0x00000000, 0x00000000, 0x00000000,
270         0x00000000, 0x00000000, 0x00000000, 0x00000000,
271         0x00000000, 0x00000000, 0x00000000, 0x00000000,
272         0x00000000, 0x00000000, 0x00000000, 0x00000000,
273 };
274
275 /* Packet types for packets with an Outer/First/Single non-frag IPv4 header,
276  * includes IPV4 other PTYPEs
277  */
278 static const u32 ice_ptypes_ipv4_ofos_all[] = {
279         0x1D800000, 0x27BF7800, 0x00000000, 0x00000000,
280         0x00000000, 0x00000155, 0x00000000, 0x00000000,
281         0x00000000, 0x000FC000, 0x83E0FAA0, 0x00000101,
282         0x03FFD000, 0x00000000, 0x02FBEFBC, 0x00000000,
283         0x00000000, 0x00000000, 0x00000000, 0x00000000,
284         0x00000000, 0x00000000, 0x00000000, 0x00000000,
285         0x00000000, 0x00000000, 0x00000000, 0x00000000,
286         0x00000000, 0x00000000, 0x00000000, 0x00000000,
287 };
288
289 /* Packet types for packets with an Innermost/Last IPv4 header */
290 static const u32 ice_ptypes_ipv4_il[] = {
291         0xE0000000, 0xB807700E, 0x80000003, 0xE01DC03B,
292         0x0000000E, 0x00000000, 0x00000000, 0x00000000,
293         0x00000000, 0x00000000, 0x001FF800, 0x00100000,
294         0xFC0FC000, 0x00000000, 0xBC0BC0BC, 0x00000BC0,
295         0x00000000, 0x00000000, 0x00000000, 0x00000000,
296         0x00000000, 0x00000000, 0x00000000, 0x00000000,
297         0x00000000, 0x00000000, 0x00000000, 0x00000000,
298         0x00000000, 0x00000000, 0x00000000, 0x00000000,
299 };
300
301 /* Packet types for packets with an Outer/First/Single non-frag IPv6 header,
302  * does NOT include IVP6 other PTYPEs
303  */
304 static const u32 ice_ptypes_ipv6_ofos[] = {
305         0x00000000, 0x00000000, 0x76000000, 0x10002000,
306         0x00000000, 0x000002AA, 0x00000000, 0x00000000,
307         0x00000000, 0x03F00000, 0x00000540, 0x00000000,
308         0x00002A00, 0x00000000, 0x00000000, 0x00000000,
309         0x00000000, 0x00000000, 0x00000000, 0x00000000,
310         0x00000000, 0x00000000, 0x00000000, 0x00000000,
311         0x00000000, 0x00000000, 0x00000000, 0x00000000,
312         0x00000000, 0x00000000, 0x00000000, 0x00000000,
313 };
314
315 /* Packet types for packets with an Outer/First/Single non-frag IPv6 header,
316  * includes IPV6 other PTYPEs
317  */
318 static const u32 ice_ptypes_ipv6_ofos_all[] = {
319         0x00000000, 0x00000000, 0x76000000, 0x1EFDE000,
320         0x00000000, 0x000002AA, 0x00000000, 0x00000000,
321         0x00000000, 0x03F00000, 0x7C1F0540, 0x00000206,
322         0xFC002000, 0x0000003F, 0xBC000000, 0x0002FBEF,
323         0x00000000, 0x00000000, 0x00000000, 0x00000000,
324         0x00000000, 0x00000000, 0x00000000, 0x00000000,
325         0x00000000, 0x00000000, 0x00000000, 0x00000000,
326         0x00000000, 0x00000000, 0x00000000, 0x00000000,
327 };
328
329 /* Packet types for packets with an Innermost/Last IPv6 header */
330 static const u32 ice_ptypes_ipv6_il[] = {
331         0x00000000, 0x03B80770, 0x000001DC, 0x0EE00000,
332         0x00000770, 0x00000000, 0x00000000, 0x00000000,
333         0x00000000, 0x00000000, 0x7FE00000, 0x00000000,
334         0x03F00000, 0x0000003F, 0x02F02F00, 0x0002F02F,
335         0x00000000, 0x00000000, 0x00000000, 0x00000000,
336         0x00000000, 0x00000000, 0x00000000, 0x00000000,
337         0x00000000, 0x00000000, 0x00000000, 0x00000000,
338         0x00000000, 0x00000000, 0x00000000, 0x00000000,
339 };
340
341 /* Packet types for packets with an Outer/First/Single
342  * non-frag IPv4 header - no L4
343  */
344 static const u32 ice_ptypes_ipv4_ofos_no_l4[] = {
345         0x10800000, 0x04000800, 0x00000000, 0x00000000,
346         0x00000000, 0x00000000, 0x00000000, 0x00000000,
347         0x00000000, 0x000cc000, 0x000002A0, 0x00000000,
348         0x00001500, 0x00000000, 0x00000000, 0x00000000,
349         0x00000000, 0x00000000, 0x00000000, 0x00000000,
350         0x00000000, 0x00000000, 0x00000000, 0x00000000,
351         0x00000000, 0x00000000, 0x00000000, 0x00000000,
352         0x00000000, 0x00000000, 0x00000000, 0x00000000,
353 };
354
355 /* Packet types for packets with an Innermost/Last IPv4 header - no L4 */
356 static const u32 ice_ptypes_ipv4_il_no_l4[] = {
357         0x60000000, 0x18043008, 0x80000002, 0x6010c021,
358         0x00000008, 0x00000000, 0x00000000, 0x00000000,
359         0x00000000, 0x00000000, 0x00139800, 0x00000000,
360         0x8C08C000, 0x00000000, 0x00000000, 0x00000000,
361         0x00000000, 0x00000000, 0x00000000, 0x00000000,
362         0x00000000, 0x00000000, 0x00000000, 0x00000000,
363         0x00000000, 0x00000000, 0x00000000, 0x00000000,
364         0x00000000, 0x00000000, 0x00000000, 0x00000000,
365 };
366
367 /* Packet types for packets with an Outer/First/Single
368  * non-frag IPv6 header - no L4
369  */
370 static const u32 ice_ptypes_ipv6_ofos_no_l4[] = {
371         0x00000000, 0x00000000, 0x42000000, 0x10002000,
372         0x00000000, 0x00000000, 0x00000000, 0x00000000,
373         0x00000000, 0x02300000, 0x00000540, 0x00000000,
374         0x00002A00, 0x00000000, 0x00000000, 0x00000000,
375         0x00000000, 0x00000000, 0x00000000, 0x00000000,
376         0x00000000, 0x00000000, 0x00000000, 0x00000000,
377         0x00000000, 0x00000000, 0x00000000, 0x00000000,
378         0x00000000, 0x00000000, 0x00000000, 0x00000000,
379 };
380
381 /* Packet types for packets with an Innermost/Last IPv6 header - no L4 */
382 static const u32 ice_ptypes_ipv6_il_no_l4[] = {
383         0x00000000, 0x02180430, 0x0000010c, 0x086010c0,
384         0x00000430, 0x00000000, 0x00000000, 0x00000000,
385         0x00000000, 0x00000000, 0x4e600000, 0x00000000,
386         0x02300000, 0x00000023, 0x00000000, 0x00000000,
387         0x00000000, 0x00000000, 0x00000000, 0x00000000,
388         0x00000000, 0x00000000, 0x00000000, 0x00000000,
389         0x00000000, 0x00000000, 0x00000000, 0x00000000,
390         0x00000000, 0x00000000, 0x00000000, 0x00000000,
391 };
392
393 /* Packet types for packets with an Outermost/First ARP header */
394 static const u32 ice_ptypes_arp_of[] = {
395         0x00000800, 0x00000000, 0x00000000, 0x00000000,
396         0x00000000, 0x00000000, 0x00000000, 0x00000000,
397         0x00000000, 0x00000000, 0x00000000, 0x00000000,
398         0x00000000, 0x00000000, 0x00000000, 0x00000000,
399         0x00000000, 0x00000000, 0x00000000, 0x00000000,
400         0x00000000, 0x00000000, 0x00000000, 0x00000000,
401         0x00000000, 0x00000000, 0x00000000, 0x00000000,
402         0x00000000, 0x00000000, 0x00000000, 0x00000000,
403 };
404
405 /* UDP Packet types for non-tunneled packets or tunneled
406  * packets with inner UDP.
407  */
408 static const u32 ice_ptypes_udp_il[] = {
409         0x81000000, 0x20204040, 0x04000010, 0x80810102,
410         0x00000040, 0x00000000, 0x00000000, 0x00000000,
411         0x00000000, 0x00410000, 0x908427E0, 0x00100007,
412         0x10410000, 0x00000004, 0x10410410, 0x00004104,
413         0x00000000, 0x00000000, 0x00000000, 0x00000000,
414         0x00000000, 0x00000000, 0x00000000, 0x00000000,
415         0x00000000, 0x00000000, 0x00000000, 0x00000000,
416         0x00000000, 0x00000000, 0x00000000, 0x00000000,
417 };
418
419 /* Packet types for packets with an Innermost/Last TCP header */
420 static const u32 ice_ptypes_tcp_il[] = {
421         0x04000000, 0x80810102, 0x10000040, 0x02040408,
422         0x00000102, 0x00000000, 0x00000000, 0x00000000,
423         0x00000000, 0x00820000, 0x21084000, 0x00000000,
424         0x20820000, 0x00000008, 0x20820820, 0x00008208,
425         0x00000000, 0x00000000, 0x00000000, 0x00000000,
426         0x00000000, 0x00000000, 0x00000000, 0x00000000,
427         0x00000000, 0x00000000, 0x00000000, 0x00000000,
428         0x00000000, 0x00000000, 0x00000000, 0x00000000,
429 };
430
431 /* Packet types for packets with an Innermost/Last SCTP header */
432 static const u32 ice_ptypes_sctp_il[] = {
433         0x08000000, 0x01020204, 0x20000081, 0x04080810,
434         0x00000204, 0x00000000, 0x00000000, 0x00000000,
435         0x00000000, 0x01040000, 0x00000000, 0x00000000,
436         0x41040000, 0x00000010, 0x00000000, 0x00000000,
437         0x00000000, 0x00000000, 0x00000000, 0x00000000,
438         0x00000000, 0x00000000, 0x00000000, 0x00000000,
439         0x00000000, 0x00000000, 0x00000000, 0x00000000,
440         0x00000000, 0x00000000, 0x00000000, 0x00000000,
441 };
442
443 /* Packet types for packets with an Outermost/First ICMP header */
444 static const u32 ice_ptypes_icmp_of[] = {
445         0x10000000, 0x00000000, 0x00000000, 0x00000000,
446         0x00000000, 0x00000000, 0x00000000, 0x00000000,
447         0x00000000, 0x00000000, 0x00000000, 0x00000000,
448         0x00000000, 0x00000000, 0x00000000, 0x00000000,
449         0x00000000, 0x00000000, 0x00000000, 0x00000000,
450         0x00000000, 0x00000000, 0x00000000, 0x00000000,
451         0x00000000, 0x00000000, 0x00000000, 0x00000000,
452         0x00000000, 0x00000000, 0x00000000, 0x00000000,
453 };
454
455 /* Packet types for packets with an Innermost/Last ICMP header */
456 static const u32 ice_ptypes_icmp_il[] = {
457         0x00000000, 0x02040408, 0x40000102, 0x08101020,
458         0x00000408, 0x00000000, 0x00000000, 0x00000000,
459         0x00000000, 0x00000000, 0x42108000, 0x00000000,
460         0x82080000, 0x00000020, 0x00000000, 0x00000000,
461         0x00000000, 0x00000000, 0x00000000, 0x00000000,
462         0x00000000, 0x00000000, 0x00000000, 0x00000000,
463         0x00000000, 0x00000000, 0x00000000, 0x00000000,
464         0x00000000, 0x00000000, 0x00000000, 0x00000000,
465 };
466
467 /* Packet types for packets with an Outermost/First GRE header */
468 static const u32 ice_ptypes_gre_of[] = {
469         0x00000000, 0xBFBF7800, 0x000001DF, 0xFEFDE000,
470         0x0000017E, 0x00000000, 0x00000000, 0x00000000,
471         0x00000000, 0x00000000, 0x00000000, 0x00000000,
472         0x00000000, 0x00000000, 0xBEFBEFBC, 0x0002FBEF,
473         0x00000000, 0x00000000, 0x00000000, 0x00000000,
474         0x00000000, 0x00000000, 0x00000000, 0x00000000,
475         0x00000000, 0x00000000, 0x00000000, 0x00000000,
476         0x00000000, 0x00000000, 0x00000000, 0x00000000,
477 };
478
479 /* Packet types for packets with an Innermost/Last MAC header */
480 static const u32 ice_ptypes_mac_il[] = {
481         0x00000000, 0x20000000, 0x00000000, 0x00000000,
482         0x00000000, 0x00000000, 0x00000000, 0x00000000,
483         0x00000000, 0x00000000, 0x00000000, 0x00000000,
484         0x00000000, 0x00000000, 0x00000000, 0x00000000,
485         0x00000000, 0x00000000, 0x00000000, 0x00000000,
486         0x00000000, 0x00000000, 0x00000000, 0x00000000,
487         0x00000000, 0x00000000, 0x00000000, 0x00000000,
488         0x00000000, 0x00000000, 0x00000000, 0x00000000,
489 };
490
491 /* Packet types for GTPC */
492 static const u32 ice_ptypes_gtpc[] = {
493         0x00000000, 0x00000000, 0x00000000, 0x00000000,
494         0x00000000, 0x00000000, 0x00000000, 0x00000000,
495         0x00000000, 0x00000000, 0x000001E0, 0x00000000,
496         0x00000000, 0x00000000, 0x00000000, 0x00000000,
497         0x00000000, 0x00000000, 0x00000000, 0x00000000,
498         0x00000000, 0x00000000, 0x00000000, 0x00000000,
499         0x00000000, 0x00000000, 0x00000000, 0x00000000,
500         0x00000000, 0x00000000, 0x00000000, 0x00000000,
501 };
502
503 /* Packet types for VXLAN with VNI */
504 static const u32 ice_ptypes_vxlan_vni[] = {
505         0x00000000, 0xBFBFF800, 0x00EFDFDF, 0xFEFDE000,
506         0x03BF7F7E, 0x00000000, 0x00000000, 0x00000000,
507         0x00000000, 0x00000000, 0x00000000, 0x00000000,
508         0x00000000, 0x00000000, 0x00000000, 0x00000000,
509         0x00000000, 0x00000000, 0x00000000, 0x00000000,
510         0x00000000, 0x00000000, 0x00000000, 0x00000000,
511         0x00000000, 0x00000000, 0x00000000, 0x00000000,
512         0x00000000, 0x00000000, 0x00000000, 0x00000000,
513 };
514
515 /* Packet types for GTPC with TEID */
516 static const u32 ice_ptypes_gtpc_tid[] = {
517         0x00000000, 0x00000000, 0x00000000, 0x00000000,
518         0x00000000, 0x00000000, 0x00000000, 0x00000000,
519         0x00000000, 0x00000000, 0x00000060, 0x00000000,
520         0x00000000, 0x00000000, 0x00000000, 0x00000000,
521         0x00000000, 0x00000000, 0x00000000, 0x00000000,
522         0x00000000, 0x00000000, 0x00000000, 0x00000000,
523         0x00000000, 0x00000000, 0x00000000, 0x00000000,
524         0x00000000, 0x00000000, 0x00000000, 0x00000000,
525 };
526
527 /* Packet types for GTPU */
528 static const struct ice_ptype_attributes ice_attr_gtpu_session[] = {
529         { ICE_MAC_IPV4_GTPU_IPV4_FRAG,    ICE_PTYPE_ATTR_GTP_SESSION },
530         { ICE_MAC_IPV4_GTPU_IPV4_PAY,     ICE_PTYPE_ATTR_GTP_SESSION },
531         { ICE_MAC_IPV4_GTPU_IPV4_UDP_PAY, ICE_PTYPE_ATTR_GTP_SESSION },
532         { ICE_MAC_IPV4_GTPU_IPV4_TCP,     ICE_PTYPE_ATTR_GTP_SESSION },
533         { ICE_MAC_IPV4_GTPU_IPV4_ICMP,    ICE_PTYPE_ATTR_GTP_SESSION },
534         { ICE_MAC_IPV6_GTPU_IPV4_FRAG,    ICE_PTYPE_ATTR_GTP_SESSION },
535         { ICE_MAC_IPV6_GTPU_IPV4_PAY,     ICE_PTYPE_ATTR_GTP_SESSION },
536         { ICE_MAC_IPV6_GTPU_IPV4_UDP_PAY, ICE_PTYPE_ATTR_GTP_SESSION },
537         { ICE_MAC_IPV6_GTPU_IPV4_TCP,     ICE_PTYPE_ATTR_GTP_SESSION },
538         { ICE_MAC_IPV6_GTPU_IPV4_ICMP,    ICE_PTYPE_ATTR_GTP_SESSION },
539         { ICE_MAC_IPV4_GTPU_IPV6_FRAG,    ICE_PTYPE_ATTR_GTP_SESSION },
540         { ICE_MAC_IPV4_GTPU_IPV6_PAY,     ICE_PTYPE_ATTR_GTP_SESSION },
541         { ICE_MAC_IPV4_GTPU_IPV6_UDP_PAY, ICE_PTYPE_ATTR_GTP_SESSION },
542         { ICE_MAC_IPV4_GTPU_IPV6_TCP,     ICE_PTYPE_ATTR_GTP_SESSION },
543         { ICE_MAC_IPV4_GTPU_IPV6_ICMPV6,  ICE_PTYPE_ATTR_GTP_SESSION },
544         { ICE_MAC_IPV6_GTPU_IPV6_FRAG,    ICE_PTYPE_ATTR_GTP_SESSION },
545         { ICE_MAC_IPV6_GTPU_IPV6_PAY,     ICE_PTYPE_ATTR_GTP_SESSION },
546         { ICE_MAC_IPV6_GTPU_IPV6_UDP_PAY, ICE_PTYPE_ATTR_GTP_SESSION },
547         { ICE_MAC_IPV6_GTPU_IPV6_TCP,     ICE_PTYPE_ATTR_GTP_SESSION },
548         { ICE_MAC_IPV6_GTPU_IPV6_ICMPV6,  ICE_PTYPE_ATTR_GTP_SESSION },
549         { MAC_IPV4_TUN_IPV4_GTPU_IPV4_FRAG,     ICE_PTYPE_ATTR_GTP_SESSION },
550         { MAC_IPV4_TUN_IPV4_GTPU_IPV4_PAY,      ICE_PTYPE_ATTR_GTP_SESSION },
551         { MAC_IPV4_TUN_IPV4_GTPU_IPV4_UDP_PAY,  ICE_PTYPE_ATTR_GTP_SESSION },
552         { MAC_IPV4_TUN_IPV4_GTPU_IPV4_TCP,      ICE_PTYPE_ATTR_GTP_SESSION },
553         { MAC_IPV4_TUN_IPV4_GTPU_IPV4_ICMP,     ICE_PTYPE_ATTR_GTP_SESSION },
554         { MAC_IPV4_TUN_IPV4_GTPU_IPV6_FRAG,     ICE_PTYPE_ATTR_GTP_SESSION },
555         { MAC_IPV4_TUN_IPV4_GTPU_IPV6_PAY,      ICE_PTYPE_ATTR_GTP_SESSION },
556         { MAC_IPV4_TUN_IPV4_GTPU_IPV6_UDP_PAY,  ICE_PTYPE_ATTR_GTP_SESSION },
557         { MAC_IPV4_TUN_IPV4_GTPU_IPV6_TCP,      ICE_PTYPE_ATTR_GTP_SESSION },
558         { MAC_IPV4_TUN_IPV4_GTPU_IPV6_ICMPV6,   ICE_PTYPE_ATTR_GTP_SESSION },
559         { MAC_IPV4_TUN_IPV6_GTPU_IPV4_FRAG,     ICE_PTYPE_ATTR_GTP_SESSION },
560         { MAC_IPV4_TUN_IPV6_GTPU_IPV4_PAY,      ICE_PTYPE_ATTR_GTP_SESSION },
561         { MAC_IPV4_TUN_IPV6_GTPU_IPV4_UDP_PAY,  ICE_PTYPE_ATTR_GTP_SESSION },
562         { MAC_IPV4_TUN_IPV6_GTPU_IPV4_TCP,      ICE_PTYPE_ATTR_GTP_SESSION },
563         { MAC_IPV4_TUN_IPV6_GTPU_IPV4_ICMP,     ICE_PTYPE_ATTR_GTP_SESSION },
564         { MAC_IPV4_TUN_IPV6_GTPU_IPV6_FRAG,     ICE_PTYPE_ATTR_GTP_SESSION },
565         { MAC_IPV4_TUN_IPV6_GTPU_IPV6_PAY,      ICE_PTYPE_ATTR_GTP_SESSION },
566         { MAC_IPV4_TUN_IPV6_GTPU_IPV6_UDP_PAY,  ICE_PTYPE_ATTR_GTP_SESSION },
567         { MAC_IPV4_TUN_IPV6_GTPU_IPV6_TCP,      ICE_PTYPE_ATTR_GTP_SESSION },
568         { MAC_IPV4_TUN_IPV6_GTPU_IPV6_ICMPV6,   ICE_PTYPE_ATTR_GTP_SESSION },
569         { MAC_IPV6_TUN_IPV4_GTPU_IPV4_FRAG,     ICE_PTYPE_ATTR_GTP_SESSION },
570         { MAC_IPV6_TUN_IPV4_GTPU_IPV4_PAY,      ICE_PTYPE_ATTR_GTP_SESSION },
571         { MAC_IPV6_TUN_IPV4_GTPU_IPV4_UDP_PAY,  ICE_PTYPE_ATTR_GTP_SESSION },
572         { MAC_IPV6_TUN_IPV4_GTPU_IPV4_TCP,      ICE_PTYPE_ATTR_GTP_SESSION },
573         { MAC_IPV6_TUN_IPV4_GTPU_IPV4_ICMP,     ICE_PTYPE_ATTR_GTP_SESSION },
574         { MAC_IPV6_TUN_IPV4_GTPU_IPV6_FRAG,     ICE_PTYPE_ATTR_GTP_SESSION },
575         { MAC_IPV6_TUN_IPV4_GTPU_IPV6_PAY,      ICE_PTYPE_ATTR_GTP_SESSION },
576         { MAC_IPV6_TUN_IPV4_GTPU_IPV6_UDP_PAY,  ICE_PTYPE_ATTR_GTP_SESSION },
577         { MAC_IPV6_TUN_IPV4_GTPU_IPV6_TCP,      ICE_PTYPE_ATTR_GTP_SESSION },
578         { MAC_IPV6_TUN_IPV4_GTPU_IPV6_ICMPV6,   ICE_PTYPE_ATTR_GTP_SESSION },
579         { MAC_IPV6_TUN_IPV6_GTPU_IPV4_FRAG,     ICE_PTYPE_ATTR_GTP_SESSION },
580         { MAC_IPV6_TUN_IPV6_GTPU_IPV4_PAY,      ICE_PTYPE_ATTR_GTP_SESSION },
581         { MAC_IPV6_TUN_IPV6_GTPU_IPV4_UDP_PAY,  ICE_PTYPE_ATTR_GTP_SESSION },
582         { MAC_IPV6_TUN_IPV6_GTPU_IPV4_TCP,      ICE_PTYPE_ATTR_GTP_SESSION },
583         { MAC_IPV6_TUN_IPV6_GTPU_IPV4_ICMP,     ICE_PTYPE_ATTR_GTP_SESSION },
584         { MAC_IPV6_TUN_IPV6_GTPU_IPV6_FRAG,     ICE_PTYPE_ATTR_GTP_SESSION },
585         { MAC_IPV6_TUN_IPV6_GTPU_IPV6_PAY,      ICE_PTYPE_ATTR_GTP_SESSION },
586         { MAC_IPV6_TUN_IPV6_GTPU_IPV6_UDP_PAY,  ICE_PTYPE_ATTR_GTP_SESSION },
587         { MAC_IPV6_TUN_IPV6_GTPU_IPV6_TCP,      ICE_PTYPE_ATTR_GTP_SESSION },
588         { MAC_IPV6_TUN_IPV6_GTPU_IPV6_ICMPV6,   ICE_PTYPE_ATTR_GTP_SESSION },
589 };
590
591 static const struct ice_ptype_attributes ice_attr_gtpu_eh[] = {
592         { ICE_MAC_IPV4_GTPU_IPV4_FRAG,    ICE_PTYPE_ATTR_GTP_PDU_EH },
593         { ICE_MAC_IPV4_GTPU_IPV4_PAY,     ICE_PTYPE_ATTR_GTP_PDU_EH },
594         { ICE_MAC_IPV4_GTPU_IPV4_UDP_PAY, ICE_PTYPE_ATTR_GTP_PDU_EH },
595         { ICE_MAC_IPV4_GTPU_IPV4_TCP,     ICE_PTYPE_ATTR_GTP_PDU_EH },
596         { ICE_MAC_IPV4_GTPU_IPV4_ICMP,    ICE_PTYPE_ATTR_GTP_PDU_EH },
597         { ICE_MAC_IPV6_GTPU_IPV4_FRAG,    ICE_PTYPE_ATTR_GTP_PDU_EH },
598         { ICE_MAC_IPV6_GTPU_IPV4_PAY,     ICE_PTYPE_ATTR_GTP_PDU_EH },
599         { ICE_MAC_IPV6_GTPU_IPV4_UDP_PAY, ICE_PTYPE_ATTR_GTP_PDU_EH },
600         { ICE_MAC_IPV6_GTPU_IPV4_TCP,     ICE_PTYPE_ATTR_GTP_PDU_EH },
601         { ICE_MAC_IPV6_GTPU_IPV4_ICMP,    ICE_PTYPE_ATTR_GTP_PDU_EH },
602         { ICE_MAC_IPV4_GTPU_IPV6_FRAG,    ICE_PTYPE_ATTR_GTP_PDU_EH },
603         { ICE_MAC_IPV4_GTPU_IPV6_PAY,     ICE_PTYPE_ATTR_GTP_PDU_EH },
604         { ICE_MAC_IPV4_GTPU_IPV6_UDP_PAY, ICE_PTYPE_ATTR_GTP_PDU_EH },
605         { ICE_MAC_IPV4_GTPU_IPV6_TCP,     ICE_PTYPE_ATTR_GTP_PDU_EH },
606         { ICE_MAC_IPV4_GTPU_IPV6_ICMPV6,  ICE_PTYPE_ATTR_GTP_PDU_EH },
607         { ICE_MAC_IPV6_GTPU_IPV6_FRAG,    ICE_PTYPE_ATTR_GTP_PDU_EH },
608         { ICE_MAC_IPV6_GTPU_IPV6_PAY,     ICE_PTYPE_ATTR_GTP_PDU_EH },
609         { ICE_MAC_IPV6_GTPU_IPV6_UDP_PAY, ICE_PTYPE_ATTR_GTP_PDU_EH },
610         { ICE_MAC_IPV6_GTPU_IPV6_TCP,     ICE_PTYPE_ATTR_GTP_PDU_EH },
611         { ICE_MAC_IPV6_GTPU_IPV6_ICMPV6,  ICE_PTYPE_ATTR_GTP_PDU_EH },
612         { MAC_IPV4_TUN_IPV4_GTPU_IPV4_FRAG,     ICE_PTYPE_ATTR_GTP_PDU_EH },
613         { MAC_IPV4_TUN_IPV4_GTPU_IPV4_PAY,      ICE_PTYPE_ATTR_GTP_PDU_EH },
614         { MAC_IPV4_TUN_IPV4_GTPU_IPV4_UDP_PAY,  ICE_PTYPE_ATTR_GTP_PDU_EH },
615         { MAC_IPV4_TUN_IPV4_GTPU_IPV4_TCP,      ICE_PTYPE_ATTR_GTP_PDU_EH },
616         { MAC_IPV4_TUN_IPV4_GTPU_IPV4_ICMP,     ICE_PTYPE_ATTR_GTP_PDU_EH },
617         { MAC_IPV4_TUN_IPV4_GTPU_IPV6_FRAG,     ICE_PTYPE_ATTR_GTP_PDU_EH },
618         { MAC_IPV4_TUN_IPV4_GTPU_IPV6_PAY,      ICE_PTYPE_ATTR_GTP_PDU_EH },
619         { MAC_IPV4_TUN_IPV4_GTPU_IPV6_UDP_PAY,  ICE_PTYPE_ATTR_GTP_PDU_EH },
620         { MAC_IPV4_TUN_IPV4_GTPU_IPV6_TCP,      ICE_PTYPE_ATTR_GTP_PDU_EH },
621         { MAC_IPV4_TUN_IPV4_GTPU_IPV6_ICMPV6,   ICE_PTYPE_ATTR_GTP_PDU_EH },
622         { MAC_IPV4_TUN_IPV6_GTPU_IPV4_FRAG,     ICE_PTYPE_ATTR_GTP_PDU_EH },
623         { MAC_IPV4_TUN_IPV6_GTPU_IPV4_PAY,      ICE_PTYPE_ATTR_GTP_PDU_EH },
624         { MAC_IPV4_TUN_IPV6_GTPU_IPV4_UDP_PAY,  ICE_PTYPE_ATTR_GTP_PDU_EH },
625         { MAC_IPV4_TUN_IPV6_GTPU_IPV4_TCP,      ICE_PTYPE_ATTR_GTP_PDU_EH },
626         { MAC_IPV4_TUN_IPV6_GTPU_IPV4_ICMP,     ICE_PTYPE_ATTR_GTP_PDU_EH },
627         { MAC_IPV4_TUN_IPV6_GTPU_IPV6_FRAG,     ICE_PTYPE_ATTR_GTP_PDU_EH },
628         { MAC_IPV4_TUN_IPV6_GTPU_IPV6_PAY,      ICE_PTYPE_ATTR_GTP_PDU_EH },
629         { MAC_IPV4_TUN_IPV6_GTPU_IPV6_UDP_PAY,  ICE_PTYPE_ATTR_GTP_PDU_EH },
630         { MAC_IPV4_TUN_IPV6_GTPU_IPV6_TCP,      ICE_PTYPE_ATTR_GTP_PDU_EH },
631         { MAC_IPV4_TUN_IPV6_GTPU_IPV6_ICMPV6,   ICE_PTYPE_ATTR_GTP_PDU_EH },
632         { MAC_IPV6_TUN_IPV4_GTPU_IPV4_FRAG,     ICE_PTYPE_ATTR_GTP_PDU_EH },
633         { MAC_IPV6_TUN_IPV4_GTPU_IPV4_PAY,      ICE_PTYPE_ATTR_GTP_PDU_EH },
634         { MAC_IPV6_TUN_IPV4_GTPU_IPV4_UDP_PAY,  ICE_PTYPE_ATTR_GTP_PDU_EH },
635         { MAC_IPV6_TUN_IPV4_GTPU_IPV4_TCP,      ICE_PTYPE_ATTR_GTP_PDU_EH },
636         { MAC_IPV6_TUN_IPV4_GTPU_IPV4_ICMP,     ICE_PTYPE_ATTR_GTP_PDU_EH },
637         { MAC_IPV6_TUN_IPV4_GTPU_IPV6_FRAG,     ICE_PTYPE_ATTR_GTP_PDU_EH },
638         { MAC_IPV6_TUN_IPV4_GTPU_IPV6_PAY,      ICE_PTYPE_ATTR_GTP_PDU_EH },
639         { MAC_IPV6_TUN_IPV4_GTPU_IPV6_UDP_PAY,  ICE_PTYPE_ATTR_GTP_PDU_EH },
640         { MAC_IPV6_TUN_IPV4_GTPU_IPV6_TCP,      ICE_PTYPE_ATTR_GTP_PDU_EH },
641         { MAC_IPV6_TUN_IPV4_GTPU_IPV6_ICMPV6,   ICE_PTYPE_ATTR_GTP_PDU_EH },
642         { MAC_IPV6_TUN_IPV6_GTPU_IPV4_FRAG,     ICE_PTYPE_ATTR_GTP_PDU_EH },
643         { MAC_IPV6_TUN_IPV6_GTPU_IPV4_PAY,      ICE_PTYPE_ATTR_GTP_PDU_EH },
644         { MAC_IPV6_TUN_IPV6_GTPU_IPV4_UDP_PAY,  ICE_PTYPE_ATTR_GTP_PDU_EH },
645         { MAC_IPV6_TUN_IPV6_GTPU_IPV4_TCP,      ICE_PTYPE_ATTR_GTP_PDU_EH },
646         { MAC_IPV6_TUN_IPV6_GTPU_IPV4_ICMP,     ICE_PTYPE_ATTR_GTP_PDU_EH },
647         { MAC_IPV6_TUN_IPV6_GTPU_IPV6_FRAG,     ICE_PTYPE_ATTR_GTP_PDU_EH },
648         { MAC_IPV6_TUN_IPV6_GTPU_IPV6_PAY,      ICE_PTYPE_ATTR_GTP_PDU_EH },
649         { MAC_IPV6_TUN_IPV6_GTPU_IPV6_UDP_PAY,  ICE_PTYPE_ATTR_GTP_PDU_EH },
650         { MAC_IPV6_TUN_IPV6_GTPU_IPV6_TCP,      ICE_PTYPE_ATTR_GTP_PDU_EH },
651         { MAC_IPV6_TUN_IPV6_GTPU_IPV6_ICMPV6,   ICE_PTYPE_ATTR_GTP_PDU_EH },
652 };
653
654 static const struct ice_ptype_attributes ice_attr_gtpu_down[] = {
655         { ICE_MAC_IPV4_GTPU_IPV4_FRAG,    ICE_PTYPE_ATTR_GTP_DOWNLINK },
656         { ICE_MAC_IPV4_GTPU_IPV4_PAY,     ICE_PTYPE_ATTR_GTP_DOWNLINK },
657         { ICE_MAC_IPV4_GTPU_IPV4_UDP_PAY, ICE_PTYPE_ATTR_GTP_DOWNLINK },
658         { ICE_MAC_IPV4_GTPU_IPV4_TCP,     ICE_PTYPE_ATTR_GTP_DOWNLINK },
659         { ICE_MAC_IPV4_GTPU_IPV4_ICMP,    ICE_PTYPE_ATTR_GTP_DOWNLINK },
660         { ICE_MAC_IPV6_GTPU_IPV4_FRAG,    ICE_PTYPE_ATTR_GTP_DOWNLINK },
661         { ICE_MAC_IPV6_GTPU_IPV4_PAY,     ICE_PTYPE_ATTR_GTP_DOWNLINK },
662         { ICE_MAC_IPV6_GTPU_IPV4_UDP_PAY, ICE_PTYPE_ATTR_GTP_DOWNLINK },
663         { ICE_MAC_IPV6_GTPU_IPV4_TCP,     ICE_PTYPE_ATTR_GTP_DOWNLINK },
664         { ICE_MAC_IPV6_GTPU_IPV4_ICMP,    ICE_PTYPE_ATTR_GTP_DOWNLINK },
665         { ICE_MAC_IPV4_GTPU_IPV6_FRAG,    ICE_PTYPE_ATTR_GTP_DOWNLINK },
666         { ICE_MAC_IPV4_GTPU_IPV6_PAY,     ICE_PTYPE_ATTR_GTP_DOWNLINK },
667         { ICE_MAC_IPV4_GTPU_IPV6_UDP_PAY, ICE_PTYPE_ATTR_GTP_DOWNLINK },
668         { ICE_MAC_IPV4_GTPU_IPV6_TCP,     ICE_PTYPE_ATTR_GTP_DOWNLINK },
669         { ICE_MAC_IPV4_GTPU_IPV6_ICMPV6,  ICE_PTYPE_ATTR_GTP_DOWNLINK },
670         { ICE_MAC_IPV6_GTPU_IPV6_FRAG,    ICE_PTYPE_ATTR_GTP_DOWNLINK },
671         { ICE_MAC_IPV6_GTPU_IPV6_PAY,     ICE_PTYPE_ATTR_GTP_DOWNLINK },
672         { ICE_MAC_IPV6_GTPU_IPV6_UDP_PAY, ICE_PTYPE_ATTR_GTP_DOWNLINK },
673         { ICE_MAC_IPV6_GTPU_IPV6_TCP,     ICE_PTYPE_ATTR_GTP_DOWNLINK },
674         { ICE_MAC_IPV6_GTPU_IPV6_ICMPV6,  ICE_PTYPE_ATTR_GTP_DOWNLINK },
675         { MAC_IPV4_TUN_IPV4_GTPU_IPV4_FRAG,     ICE_PTYPE_ATTR_GTP_DOWNLINK },
676         { MAC_IPV4_TUN_IPV4_GTPU_IPV4_PAY,      ICE_PTYPE_ATTR_GTP_DOWNLINK },
677         { MAC_IPV4_TUN_IPV4_GTPU_IPV4_UDP_PAY,  ICE_PTYPE_ATTR_GTP_DOWNLINK },
678         { MAC_IPV4_TUN_IPV4_GTPU_IPV4_TCP,      ICE_PTYPE_ATTR_GTP_DOWNLINK },
679         { MAC_IPV4_TUN_IPV4_GTPU_IPV4_ICMP,     ICE_PTYPE_ATTR_GTP_DOWNLINK },
680         { MAC_IPV4_TUN_IPV4_GTPU_IPV6_FRAG,     ICE_PTYPE_ATTR_GTP_DOWNLINK },
681         { MAC_IPV4_TUN_IPV4_GTPU_IPV6_PAY,      ICE_PTYPE_ATTR_GTP_DOWNLINK },
682         { MAC_IPV4_TUN_IPV4_GTPU_IPV6_UDP_PAY,  ICE_PTYPE_ATTR_GTP_DOWNLINK },
683         { MAC_IPV4_TUN_IPV4_GTPU_IPV6_TCP,      ICE_PTYPE_ATTR_GTP_DOWNLINK },
684         { MAC_IPV4_TUN_IPV4_GTPU_IPV6_ICMPV6,   ICE_PTYPE_ATTR_GTP_DOWNLINK },
685         { MAC_IPV4_TUN_IPV6_GTPU_IPV4_FRAG,     ICE_PTYPE_ATTR_GTP_DOWNLINK },
686         { MAC_IPV4_TUN_IPV6_GTPU_IPV4_PAY,      ICE_PTYPE_ATTR_GTP_DOWNLINK },
687         { MAC_IPV4_TUN_IPV6_GTPU_IPV4_UDP_PAY,  ICE_PTYPE_ATTR_GTP_DOWNLINK },
688         { MAC_IPV4_TUN_IPV6_GTPU_IPV4_TCP,      ICE_PTYPE_ATTR_GTP_DOWNLINK },
689         { MAC_IPV4_TUN_IPV6_GTPU_IPV4_ICMP,     ICE_PTYPE_ATTR_GTP_DOWNLINK },
690         { MAC_IPV4_TUN_IPV6_GTPU_IPV6_FRAG,     ICE_PTYPE_ATTR_GTP_DOWNLINK },
691         { MAC_IPV4_TUN_IPV6_GTPU_IPV6_PAY,      ICE_PTYPE_ATTR_GTP_DOWNLINK },
692         { MAC_IPV4_TUN_IPV6_GTPU_IPV6_UDP_PAY,  ICE_PTYPE_ATTR_GTP_DOWNLINK },
693         { MAC_IPV4_TUN_IPV6_GTPU_IPV6_TCP,      ICE_PTYPE_ATTR_GTP_DOWNLINK },
694         { MAC_IPV4_TUN_IPV6_GTPU_IPV6_ICMPV6,   ICE_PTYPE_ATTR_GTP_DOWNLINK },
695         { MAC_IPV6_TUN_IPV4_GTPU_IPV4_FRAG,     ICE_PTYPE_ATTR_GTP_DOWNLINK },
696         { MAC_IPV6_TUN_IPV4_GTPU_IPV4_PAY,      ICE_PTYPE_ATTR_GTP_DOWNLINK },
697         { MAC_IPV6_TUN_IPV4_GTPU_IPV4_UDP_PAY,  ICE_PTYPE_ATTR_GTP_DOWNLINK },
698         { MAC_IPV6_TUN_IPV4_GTPU_IPV4_TCP,      ICE_PTYPE_ATTR_GTP_DOWNLINK },
699         { MAC_IPV6_TUN_IPV4_GTPU_IPV4_ICMP,     ICE_PTYPE_ATTR_GTP_DOWNLINK },
700         { MAC_IPV6_TUN_IPV4_GTPU_IPV6_FRAG,     ICE_PTYPE_ATTR_GTP_DOWNLINK },
701         { MAC_IPV6_TUN_IPV4_GTPU_IPV6_PAY,      ICE_PTYPE_ATTR_GTP_DOWNLINK },
702         { MAC_IPV6_TUN_IPV4_GTPU_IPV6_UDP_PAY,  ICE_PTYPE_ATTR_GTP_DOWNLINK },
703         { MAC_IPV6_TUN_IPV4_GTPU_IPV6_TCP,      ICE_PTYPE_ATTR_GTP_DOWNLINK },
704         { MAC_IPV6_TUN_IPV4_GTPU_IPV6_ICMPV6,   ICE_PTYPE_ATTR_GTP_DOWNLINK },
705         { MAC_IPV6_TUN_IPV6_GTPU_IPV4_FRAG,     ICE_PTYPE_ATTR_GTP_DOWNLINK },
706         { MAC_IPV6_TUN_IPV6_GTPU_IPV4_PAY,      ICE_PTYPE_ATTR_GTP_DOWNLINK },
707         { MAC_IPV6_TUN_IPV6_GTPU_IPV4_UDP_PAY,  ICE_PTYPE_ATTR_GTP_DOWNLINK },
708         { MAC_IPV6_TUN_IPV6_GTPU_IPV4_TCP,      ICE_PTYPE_ATTR_GTP_DOWNLINK },
709         { MAC_IPV6_TUN_IPV6_GTPU_IPV4_ICMP,     ICE_PTYPE_ATTR_GTP_DOWNLINK },
710         { MAC_IPV6_TUN_IPV6_GTPU_IPV6_FRAG,     ICE_PTYPE_ATTR_GTP_DOWNLINK },
711         { MAC_IPV6_TUN_IPV6_GTPU_IPV6_PAY,      ICE_PTYPE_ATTR_GTP_DOWNLINK },
712         { MAC_IPV6_TUN_IPV6_GTPU_IPV6_UDP_PAY,  ICE_PTYPE_ATTR_GTP_DOWNLINK },
713         { MAC_IPV6_TUN_IPV6_GTPU_IPV6_TCP,      ICE_PTYPE_ATTR_GTP_DOWNLINK },
714         { MAC_IPV6_TUN_IPV6_GTPU_IPV6_ICMPV6,   ICE_PTYPE_ATTR_GTP_DOWNLINK },
715 };
716
717 static const struct ice_ptype_attributes ice_attr_gtpu_up[] = {
718         { ICE_MAC_IPV4_GTPU_IPV4_FRAG,    ICE_PTYPE_ATTR_GTP_UPLINK },
719         { ICE_MAC_IPV4_GTPU_IPV4_PAY,     ICE_PTYPE_ATTR_GTP_UPLINK },
720         { ICE_MAC_IPV4_GTPU_IPV4_UDP_PAY, ICE_PTYPE_ATTR_GTP_UPLINK },
721         { ICE_MAC_IPV4_GTPU_IPV4_TCP,     ICE_PTYPE_ATTR_GTP_UPLINK },
722         { ICE_MAC_IPV4_GTPU_IPV4_ICMP,    ICE_PTYPE_ATTR_GTP_UPLINK },
723         { ICE_MAC_IPV6_GTPU_IPV4_FRAG,    ICE_PTYPE_ATTR_GTP_UPLINK },
724         { ICE_MAC_IPV6_GTPU_IPV4_PAY,     ICE_PTYPE_ATTR_GTP_UPLINK },
725         { ICE_MAC_IPV6_GTPU_IPV4_UDP_PAY, ICE_PTYPE_ATTR_GTP_UPLINK },
726         { ICE_MAC_IPV6_GTPU_IPV4_TCP,     ICE_PTYPE_ATTR_GTP_UPLINK },
727         { ICE_MAC_IPV6_GTPU_IPV4_ICMP,    ICE_PTYPE_ATTR_GTP_UPLINK },
728         { ICE_MAC_IPV4_GTPU_IPV6_FRAG,    ICE_PTYPE_ATTR_GTP_UPLINK },
729         { ICE_MAC_IPV4_GTPU_IPV6_PAY,     ICE_PTYPE_ATTR_GTP_UPLINK },
730         { ICE_MAC_IPV4_GTPU_IPV6_UDP_PAY, ICE_PTYPE_ATTR_GTP_UPLINK },
731         { ICE_MAC_IPV4_GTPU_IPV6_TCP,     ICE_PTYPE_ATTR_GTP_UPLINK },
732         { ICE_MAC_IPV4_GTPU_IPV6_ICMPV6,  ICE_PTYPE_ATTR_GTP_UPLINK },
733         { ICE_MAC_IPV6_GTPU_IPV6_FRAG,    ICE_PTYPE_ATTR_GTP_UPLINK },
734         { ICE_MAC_IPV6_GTPU_IPV6_PAY,     ICE_PTYPE_ATTR_GTP_UPLINK },
735         { ICE_MAC_IPV6_GTPU_IPV6_UDP_PAY, ICE_PTYPE_ATTR_GTP_UPLINK },
736         { ICE_MAC_IPV6_GTPU_IPV6_TCP,     ICE_PTYPE_ATTR_GTP_UPLINK },
737         { ICE_MAC_IPV6_GTPU_IPV6_ICMPV6,  ICE_PTYPE_ATTR_GTP_UPLINK },
738         { MAC_IPV4_TUN_IPV4_GTPU_IPV4_FRAG,     ICE_PTYPE_ATTR_GTP_UPLINK },
739         { MAC_IPV4_TUN_IPV4_GTPU_IPV4_PAY,      ICE_PTYPE_ATTR_GTP_UPLINK },
740         { MAC_IPV4_TUN_IPV4_GTPU_IPV4_UDP_PAY,  ICE_PTYPE_ATTR_GTP_UPLINK },
741         { MAC_IPV4_TUN_IPV4_GTPU_IPV4_TCP,      ICE_PTYPE_ATTR_GTP_UPLINK },
742         { MAC_IPV4_TUN_IPV4_GTPU_IPV4_ICMP,     ICE_PTYPE_ATTR_GTP_UPLINK },
743         { MAC_IPV4_TUN_IPV4_GTPU_IPV6_FRAG,     ICE_PTYPE_ATTR_GTP_UPLINK },
744         { MAC_IPV4_TUN_IPV4_GTPU_IPV6_PAY,      ICE_PTYPE_ATTR_GTP_UPLINK },
745         { MAC_IPV4_TUN_IPV4_GTPU_IPV6_UDP_PAY,  ICE_PTYPE_ATTR_GTP_UPLINK },
746         { MAC_IPV4_TUN_IPV4_GTPU_IPV6_TCP,      ICE_PTYPE_ATTR_GTP_UPLINK },
747         { MAC_IPV4_TUN_IPV4_GTPU_IPV6_ICMPV6,   ICE_PTYPE_ATTR_GTP_UPLINK },
748         { MAC_IPV4_TUN_IPV6_GTPU_IPV4_FRAG,     ICE_PTYPE_ATTR_GTP_UPLINK },
749         { MAC_IPV4_TUN_IPV6_GTPU_IPV4_PAY,      ICE_PTYPE_ATTR_GTP_UPLINK },
750         { MAC_IPV4_TUN_IPV6_GTPU_IPV4_UDP_PAY,  ICE_PTYPE_ATTR_GTP_UPLINK },
751         { MAC_IPV4_TUN_IPV6_GTPU_IPV4_TCP,      ICE_PTYPE_ATTR_GTP_UPLINK },
752         { MAC_IPV4_TUN_IPV6_GTPU_IPV4_ICMP,     ICE_PTYPE_ATTR_GTP_UPLINK },
753         { MAC_IPV4_TUN_IPV6_GTPU_IPV6_FRAG,     ICE_PTYPE_ATTR_GTP_UPLINK },
754         { MAC_IPV4_TUN_IPV6_GTPU_IPV6_PAY,      ICE_PTYPE_ATTR_GTP_UPLINK },
755         { MAC_IPV4_TUN_IPV6_GTPU_IPV6_UDP_PAY,  ICE_PTYPE_ATTR_GTP_UPLINK },
756         { MAC_IPV4_TUN_IPV6_GTPU_IPV6_TCP,      ICE_PTYPE_ATTR_GTP_UPLINK },
757         { MAC_IPV4_TUN_IPV6_GTPU_IPV6_ICMPV6,   ICE_PTYPE_ATTR_GTP_UPLINK },
758         { MAC_IPV6_TUN_IPV4_GTPU_IPV4_FRAG,     ICE_PTYPE_ATTR_GTP_UPLINK },
759         { MAC_IPV6_TUN_IPV4_GTPU_IPV4_PAY,      ICE_PTYPE_ATTR_GTP_UPLINK },
760         { MAC_IPV6_TUN_IPV4_GTPU_IPV4_UDP_PAY,  ICE_PTYPE_ATTR_GTP_UPLINK },
761         { MAC_IPV6_TUN_IPV4_GTPU_IPV4_TCP,      ICE_PTYPE_ATTR_GTP_UPLINK },
762         { MAC_IPV6_TUN_IPV4_GTPU_IPV4_ICMP,     ICE_PTYPE_ATTR_GTP_UPLINK },
763         { MAC_IPV6_TUN_IPV4_GTPU_IPV6_FRAG,     ICE_PTYPE_ATTR_GTP_UPLINK },
764         { MAC_IPV6_TUN_IPV4_GTPU_IPV6_PAY,      ICE_PTYPE_ATTR_GTP_UPLINK },
765         { MAC_IPV6_TUN_IPV4_GTPU_IPV6_UDP_PAY,  ICE_PTYPE_ATTR_GTP_UPLINK },
766         { MAC_IPV6_TUN_IPV4_GTPU_IPV6_TCP,      ICE_PTYPE_ATTR_GTP_UPLINK },
767         { MAC_IPV6_TUN_IPV4_GTPU_IPV6_ICMPV6,   ICE_PTYPE_ATTR_GTP_UPLINK },
768         { MAC_IPV6_TUN_IPV6_GTPU_IPV4_FRAG,     ICE_PTYPE_ATTR_GTP_UPLINK },
769         { MAC_IPV6_TUN_IPV6_GTPU_IPV4_PAY,      ICE_PTYPE_ATTR_GTP_UPLINK },
770         { MAC_IPV6_TUN_IPV6_GTPU_IPV4_UDP_PAY,  ICE_PTYPE_ATTR_GTP_UPLINK },
771         { MAC_IPV6_TUN_IPV6_GTPU_IPV4_TCP,      ICE_PTYPE_ATTR_GTP_UPLINK },
772         { MAC_IPV6_TUN_IPV6_GTPU_IPV4_ICMP,     ICE_PTYPE_ATTR_GTP_UPLINK },
773         { MAC_IPV6_TUN_IPV6_GTPU_IPV6_FRAG,     ICE_PTYPE_ATTR_GTP_UPLINK },
774         { MAC_IPV6_TUN_IPV6_GTPU_IPV6_PAY,      ICE_PTYPE_ATTR_GTP_UPLINK },
775         { MAC_IPV6_TUN_IPV6_GTPU_IPV6_UDP_PAY,  ICE_PTYPE_ATTR_GTP_UPLINK },
776         { MAC_IPV6_TUN_IPV6_GTPU_IPV6_TCP,      ICE_PTYPE_ATTR_GTP_UPLINK },
777         { MAC_IPV6_TUN_IPV6_GTPU_IPV6_ICMPV6,   ICE_PTYPE_ATTR_GTP_UPLINK },
778 };
779
780 static const u32 ice_ptypes_gtpu[] = {
781         0x00000000, 0x00000000, 0x00000000, 0x00000000,
782         0x00000000, 0x00000000, 0x00000000, 0x00000000,
783         0x00000000, 0x00000000, 0x7FFFFE00, 0x00000000,
784         0x00000000, 0x0000003F, 0xBEFBEFBC, 0x0002FBEF,
785         0x00000000, 0x00000000, 0x00000000, 0x00000000,
786         0x00000000, 0x00000000, 0x00000000, 0x00000000,
787         0x00000000, 0x00000000, 0x00000000, 0x00000000,
788         0x00000000, 0x00000000, 0x00000000, 0x00000000,
789 };
790
791 /* Packet types for pppoe */
792 static const u32 ice_ptypes_pppoe[] = {
793         0x00000000, 0x00000000, 0x00000000, 0x00000000,
794         0x00000000, 0x00000000, 0x00000000, 0x00000000,
795         0x00000000, 0x03ffe000, 0x00000000, 0x00000000,
796         0x00000000, 0x00000000, 0x00000000, 0x00000000,
797         0x00000000, 0x00000000, 0x00000000, 0x00000000,
798         0x00000000, 0x00000000, 0x00000000, 0x00000000,
799         0x00000000, 0x00000000, 0x00000000, 0x00000000,
800         0x00000000, 0x00000000, 0x00000000, 0x00000000,
801 };
802
803 /* Packet types for packets with PFCP NODE header */
804 static const u32 ice_ptypes_pfcp_node[] = {
805         0x00000000, 0x00000000, 0x00000000, 0x00000000,
806         0x00000000, 0x00000000, 0x00000000, 0x00000000,
807         0x00000000, 0x00000000, 0x80000000, 0x00000002,
808         0x00000000, 0x00000000, 0x00000000, 0x00000000,
809         0x00000000, 0x00000000, 0x00000000, 0x00000000,
810         0x00000000, 0x00000000, 0x00000000, 0x00000000,
811         0x00000000, 0x00000000, 0x00000000, 0x00000000,
812         0x00000000, 0x00000000, 0x00000000, 0x00000000,
813 };
814
815 /* Packet types for packets with PFCP SESSION header */
816 static const u32 ice_ptypes_pfcp_session[] = {
817         0x00000000, 0x00000000, 0x00000000, 0x00000000,
818         0x00000000, 0x00000000, 0x00000000, 0x00000000,
819         0x00000000, 0x00000000, 0x00000000, 0x00000005,
820         0x00000000, 0x00000000, 0x00000000, 0x00000000,
821         0x00000000, 0x00000000, 0x00000000, 0x00000000,
822         0x00000000, 0x00000000, 0x00000000, 0x00000000,
823         0x00000000, 0x00000000, 0x00000000, 0x00000000,
824         0x00000000, 0x00000000, 0x00000000, 0x00000000,
825 };
826
827 /* Packet types for l2tpv3 */
828 static const u32 ice_ptypes_l2tpv3[] = {
829         0x00000000, 0x00000000, 0x00000000, 0x00000000,
830         0x00000000, 0x00000000, 0x00000000, 0x00000000,
831         0x00000000, 0x00000000, 0x00000000, 0x00000300,
832         0x00000000, 0x00000000, 0x00000000, 0x00000000,
833         0x00000000, 0x00000000, 0x00000000, 0x00000000,
834         0x00000000, 0x00000000, 0x00000000, 0x00000000,
835         0x00000000, 0x00000000, 0x00000000, 0x00000000,
836         0x00000000, 0x00000000, 0x00000000, 0x00000000,
837 };
838
839 /* Packet types for esp */
840 static const u32 ice_ptypes_esp[] = {
841         0x00000000, 0x00000000, 0x00000000, 0x00000000,
842         0x00000000, 0x00000003, 0x00000000, 0x00000000,
843         0x00000000, 0x00000000, 0x00000000, 0x00000000,
844         0x00000000, 0x00000000, 0x00000000, 0x00000000,
845         0x00000000, 0x00000000, 0x00000000, 0x00000000,
846         0x00000000, 0x00000000, 0x00000000, 0x00000000,
847         0x00000000, 0x00000000, 0x00000000, 0x00000000,
848         0x00000000, 0x00000000, 0x00000000, 0x00000000,
849 };
850
851 /* Packet types for ah */
852 static const u32 ice_ptypes_ah[] = {
853         0x00000000, 0x00000000, 0x00000000, 0x00000000,
854         0x00000000, 0x0000000C, 0x00000000, 0x00000000,
855         0x00000000, 0x00000000, 0x00000000, 0x00000000,
856         0x00000000, 0x00000000, 0x00000000, 0x00000000,
857         0x00000000, 0x00000000, 0x00000000, 0x00000000,
858         0x00000000, 0x00000000, 0x00000000, 0x00000000,
859         0x00000000, 0x00000000, 0x00000000, 0x00000000,
860         0x00000000, 0x00000000, 0x00000000, 0x00000000,
861 };
862
863 /* Packet types for packets with NAT_T ESP header */
864 static const u32 ice_ptypes_nat_t_esp[] = {
865         0x00000000, 0x00000000, 0x00000000, 0x00000000,
866         0x00000000, 0x00000030, 0x00000000, 0x00000000,
867         0x00000000, 0x00000000, 0x00000000, 0x00000000,
868         0x00000000, 0x00000000, 0x00000000, 0x00000000,
869         0x00000000, 0x00000000, 0x00000000, 0x00000000,
870         0x00000000, 0x00000000, 0x00000000, 0x00000000,
871         0x00000000, 0x00000000, 0x00000000, 0x00000000,
872         0x00000000, 0x00000000, 0x00000000, 0x00000000,
873 };
874
875 static const u32 ice_ptypes_mac_non_ip_ofos[] = {
876         0x00000846, 0x00000000, 0x00000000, 0x00000000,
877         0x00000000, 0x00000000, 0x00000000, 0x00000000,
878         0x00400000, 0x03FFF000, 0x00000000, 0x00000000,
879         0x00000000, 0x00000000, 0x00000000, 0x00000000,
880         0x00000000, 0x00000000, 0x00000000, 0x00000000,
881         0x00000000, 0x00000000, 0x00000000, 0x00000000,
882         0x00000000, 0x00000000, 0x00000000, 0x00000000,
883         0x00000000, 0x00000000, 0x00000000, 0x00000000,
884 };
885
886 static const u32 ice_ptypes_gtpu_no_ip[] = {
887         0x00000000, 0x00000000, 0x00000000, 0x00000000,
888         0x00000000, 0x00000000, 0x00000000, 0x00000000,
889         0x00000000, 0x00000000, 0x00000600, 0x00000000,
890         0x00000000, 0x00000000, 0x00000000, 0x00000000,
891         0x00000000, 0x00000000, 0x00000000, 0x00000000,
892         0x00000000, 0x00000000, 0x00000000, 0x00000000,
893         0x00000000, 0x00000000, 0x00000000, 0x00000000,
894         0x00000000, 0x00000000, 0x00000000, 0x00000000,
895 };
896
897 static const u32 ice_ptypes_ecpri_tp0[] = {
898         0x00000000, 0x00000000, 0x00000000, 0x00000000,
899         0x00000000, 0x00000000, 0x00000000, 0x00000000,
900         0x00000000, 0x00000000, 0x00000000, 0x00000400,
901         0x00000000, 0x00000000, 0x00000000, 0x00000000,
902         0x00000000, 0x00000000, 0x00000000, 0x00000000,
903         0x00000000, 0x00000000, 0x00000000, 0x00000000,
904         0x00000000, 0x00000000, 0x00000000, 0x00000000,
905         0x00000000, 0x00000000, 0x00000000, 0x00000000,
906 };
907
908 static const u32 ice_ptypes_udp_ecpri_tp0[] = {
909         0x00000000, 0x00000000, 0x00000000, 0x00000000,
910         0x00000000, 0x00000000, 0x00000000, 0x00000000,
911         0x00000000, 0x00000000, 0x00000000, 0x00100000,
912         0x00000000, 0x00000000, 0x00000000, 0x00000000,
913         0x00000000, 0x00000000, 0x00000000, 0x00000000,
914         0x00000000, 0x00000000, 0x00000000, 0x00000000,
915         0x00000000, 0x00000000, 0x00000000, 0x00000000,
916         0x00000000, 0x00000000, 0x00000000, 0x00000000,
917 };
918
919 static const u32 ice_ptypes_l2tpv2[] = {
920         0x00000000, 0x00000000, 0x00000000, 0x00000000,
921         0x00000000, 0x00000000, 0x00000000, 0x00000000,
922         0x00000000, 0x00000000, 0x00000000, 0x00000000,
923         0xFFFFFF00, 0x0000003F, 0x00000000, 0x00000000,
924         0x00000000, 0x00000000, 0x00000000, 0x00000000,
925         0x00000000, 0x00000000, 0x00000000, 0x00000000,
926         0x00000000, 0x00000000, 0x00000000, 0x00000000,
927         0x00000000, 0x00000000, 0x00000000, 0x00000000,
928 };
929
930 static const u32 ice_ptypes_ppp[] = {
931         0x00000000, 0x00000000, 0x00000000, 0x00000000,
932         0x00000000, 0x00000000, 0x00000000, 0x00000000,
933         0x00000000, 0x00000000, 0x00000000, 0x00000000,
934         0xFFFFF000, 0x0000003F, 0x00000000, 0x00000000,
935         0x00000000, 0x00000000, 0x00000000, 0x00000000,
936         0x00000000, 0x00000000, 0x00000000, 0x00000000,
937         0x00000000, 0x00000000, 0x00000000, 0x00000000,
938         0x00000000, 0x00000000, 0x00000000, 0x00000000,
939 };
940
941 static const u32 ice_ptypes_ipv4_frag[] = {
942         0x00400000, 0x00000000, 0x00000000, 0x00000000,
943         0x00000000, 0x00000000, 0x00000000, 0x00000000,
944         0x00000000, 0x00000000, 0x00000000, 0x00000000,
945         0x00000000, 0x00000000, 0x00000000, 0x00000000,
946         0x00000000, 0x00000000, 0x00000000, 0x00000000,
947         0x00000000, 0x00000000, 0x00000000, 0x00000000,
948         0x00000000, 0x00000000, 0x00000000, 0x00000000,
949         0x00000000, 0x00000000, 0x00000000, 0x00000000,
950 };
951
952 static const u32 ice_ptypes_ipv6_frag[] = {
953         0x00000000, 0x00000000, 0x01000000, 0x00000000,
954         0x00000000, 0x00000000, 0x00000000, 0x00000000,
955         0x00000000, 0x00000000, 0x00000000, 0x00000000,
956         0x00000000, 0x00000000, 0x00000000, 0x00000000,
957         0x00000000, 0x00000000, 0x00000000, 0x00000000,
958         0x00000000, 0x00000000, 0x00000000, 0x00000000,
959         0x00000000, 0x00000000, 0x00000000, 0x00000000,
960         0x00000000, 0x00000000, 0x00000000, 0x00000000,
961 };
962
963 /* Manage parameters and info. used during the creation of a flow profile */
964 struct ice_flow_prof_params {
965         enum ice_block blk;
966         u16 entry_length; /* # of bytes formatted entry will require */
967         u8 es_cnt;
968         struct ice_flow_prof *prof;
969
970         /* For ACL, the es[0] will have the data of ICE_RX_MDID_PKT_FLAGS_15_0
971          * This will give us the direction flags.
972          */
973         struct ice_fv_word es[ICE_MAX_FV_WORDS];
974         /* attributes can be used to add attributes to a particular PTYPE */
975         const struct ice_ptype_attributes *attr;
976         u16 attr_cnt;
977
978         u16 mask[ICE_MAX_FV_WORDS];
979         ice_declare_bitmap(ptypes, ICE_FLOW_PTYPE_MAX);
980 };
981
982 #define ICE_FLOW_RSS_HDRS_INNER_MASK \
983         (ICE_FLOW_SEG_HDR_PPPOE | ICE_FLOW_SEG_HDR_GTPC | \
984         ICE_FLOW_SEG_HDR_GTPC_TEID | ICE_FLOW_SEG_HDR_GTPU | \
985         ICE_FLOW_SEG_HDR_PFCP_SESSION | ICE_FLOW_SEG_HDR_L2TPV3 | \
986         ICE_FLOW_SEG_HDR_ESP | ICE_FLOW_SEG_HDR_AH | \
987         ICE_FLOW_SEG_HDR_NAT_T_ESP | ICE_FLOW_SEG_HDR_GTPU_NON_IP | \
988         ICE_FLOW_SEG_HDR_ECPRI_TP0 | ICE_FLOW_SEG_HDR_UDP_ECPRI_TP0 | \
989         ICE_FLOW_SEG_HDR_L2TPV2 | ICE_FLOW_SEG_HDR_PPP | ICE_FLOW_SEG_HDR_GRE)
990
991 #define ICE_FLOW_SEG_HDRS_L2_MASK       \
992         (ICE_FLOW_SEG_HDR_ETH | ICE_FLOW_SEG_HDR_VLAN)
993 #define ICE_FLOW_SEG_HDRS_L3_MASK       \
994         (ICE_FLOW_SEG_HDR_IPV4 | ICE_FLOW_SEG_HDR_IPV6 | \
995          ICE_FLOW_SEG_HDR_ARP)
996 #define ICE_FLOW_SEG_HDRS_L4_MASK       \
997         (ICE_FLOW_SEG_HDR_ICMP | ICE_FLOW_SEG_HDR_TCP | ICE_FLOW_SEG_HDR_UDP | \
998          ICE_FLOW_SEG_HDR_SCTP)
999 /* mask for L4 protocols that are NOT part of IPV4/6 OTHER PTYPE groups */
1000 #define ICE_FLOW_SEG_HDRS_L4_MASK_NO_OTHER      \
1001         (ICE_FLOW_SEG_HDR_TCP | ICE_FLOW_SEG_HDR_UDP | ICE_FLOW_SEG_HDR_SCTP)
1002
1003 /**
1004  * ice_flow_val_hdrs - validates packet segments for valid protocol headers
1005  * @segs: array of one or more packet segments that describe the flow
1006  * @segs_cnt: number of packet segments provided
1007  */
1008 static enum ice_status
1009 ice_flow_val_hdrs(struct ice_flow_seg_info *segs, u8 segs_cnt)
1010 {
1011         u8 i;
1012
1013         for (i = 0; i < segs_cnt; i++) {
1014                 /* Multiple L3 headers */
1015                 if (segs[i].hdrs & ICE_FLOW_SEG_HDRS_L3_MASK &&
1016                     !ice_is_pow2(segs[i].hdrs & ICE_FLOW_SEG_HDRS_L3_MASK))
1017                         return ICE_ERR_PARAM;
1018
1019                 /* Multiple L4 headers */
1020                 if (segs[i].hdrs & ICE_FLOW_SEG_HDRS_L4_MASK &&
1021                     !ice_is_pow2(segs[i].hdrs & ICE_FLOW_SEG_HDRS_L4_MASK))
1022                         return ICE_ERR_PARAM;
1023         }
1024
1025         return ICE_SUCCESS;
1026 }
1027
1028 /* Sizes of fixed known protocol headers without header options */
1029 #define ICE_FLOW_PROT_HDR_SZ_MAC        14
1030 #define ICE_FLOW_PROT_HDR_SZ_MAC_VLAN   (ICE_FLOW_PROT_HDR_SZ_MAC + 2)
1031 #define ICE_FLOW_PROT_HDR_SZ_IPV4       20
1032 #define ICE_FLOW_PROT_HDR_SZ_IPV6       40
1033 #define ICE_FLOW_PROT_HDR_SZ_ARP        28
1034 #define ICE_FLOW_PROT_HDR_SZ_ICMP       8
1035 #define ICE_FLOW_PROT_HDR_SZ_TCP        20
1036 #define ICE_FLOW_PROT_HDR_SZ_UDP        8
1037 #define ICE_FLOW_PROT_HDR_SZ_SCTP       12
1038
1039 /**
1040  * ice_flow_calc_seg_sz - calculates size of a packet segment based on headers
1041  * @params: information about the flow to be processed
1042  * @seg: index of packet segment whose header size is to be determined
1043  */
1044 static u16 ice_flow_calc_seg_sz(struct ice_flow_prof_params *params, u8 seg)
1045 {
1046         u16 sz;
1047
1048         /* L2 headers */
1049         sz = (params->prof->segs[seg].hdrs & ICE_FLOW_SEG_HDR_VLAN) ?
1050                 ICE_FLOW_PROT_HDR_SZ_MAC_VLAN : ICE_FLOW_PROT_HDR_SZ_MAC;
1051
1052         /* L3 headers */
1053         if (params->prof->segs[seg].hdrs & ICE_FLOW_SEG_HDR_IPV4)
1054                 sz += ICE_FLOW_PROT_HDR_SZ_IPV4;
1055         else if (params->prof->segs[seg].hdrs & ICE_FLOW_SEG_HDR_IPV6)
1056                 sz += ICE_FLOW_PROT_HDR_SZ_IPV6;
1057         else if (params->prof->segs[seg].hdrs & ICE_FLOW_SEG_HDR_ARP)
1058                 sz += ICE_FLOW_PROT_HDR_SZ_ARP;
1059         else if (params->prof->segs[seg].hdrs & ICE_FLOW_SEG_HDRS_L4_MASK)
1060                 /* A L3 header is required if L4 is specified */
1061                 return 0;
1062
1063         /* L4 headers */
1064         if (params->prof->segs[seg].hdrs & ICE_FLOW_SEG_HDR_ICMP)
1065                 sz += ICE_FLOW_PROT_HDR_SZ_ICMP;
1066         else if (params->prof->segs[seg].hdrs & ICE_FLOW_SEG_HDR_TCP)
1067                 sz += ICE_FLOW_PROT_HDR_SZ_TCP;
1068         else if (params->prof->segs[seg].hdrs & ICE_FLOW_SEG_HDR_UDP)
1069                 sz += ICE_FLOW_PROT_HDR_SZ_UDP;
1070         else if (params->prof->segs[seg].hdrs & ICE_FLOW_SEG_HDR_SCTP)
1071                 sz += ICE_FLOW_PROT_HDR_SZ_SCTP;
1072
1073         return sz;
1074 }
1075
1076 /**
1077  * ice_flow_proc_seg_hdrs - process protocol headers present in pkt segments
1078  * @params: information about the flow to be processed
1079  *
1080  * This function identifies the packet types associated with the protocol
1081  * headers being present in packet segments of the specified flow profile.
1082  */
1083 static enum ice_status
1084 ice_flow_proc_seg_hdrs(struct ice_flow_prof_params *params)
1085 {
1086         struct ice_flow_prof *prof;
1087         u8 i;
1088
1089         ice_memset(params->ptypes, 0xff, sizeof(params->ptypes),
1090                    ICE_NONDMA_MEM);
1091
1092         prof = params->prof;
1093
1094         for (i = 0; i < params->prof->segs_cnt; i++) {
1095                 const ice_bitmap_t *src;
1096                 u32 hdrs;
1097
1098                 hdrs = prof->segs[i].hdrs;
1099
1100                 if (hdrs & ICE_FLOW_SEG_HDR_ETH) {
1101                         src = !i ? (const ice_bitmap_t *)ice_ptypes_mac_ofos :
1102                                 (const ice_bitmap_t *)ice_ptypes_mac_il;
1103                         ice_and_bitmap(params->ptypes, params->ptypes, src,
1104                                        ICE_FLOW_PTYPE_MAX);
1105                 }
1106
1107                 if (i && hdrs & ICE_FLOW_SEG_HDR_VLAN) {
1108                         src = (const ice_bitmap_t *)ice_ptypes_macvlan_il;
1109                         ice_and_bitmap(params->ptypes, params->ptypes, src,
1110                                        ICE_FLOW_PTYPE_MAX);
1111                 }
1112
1113                 if (!i && hdrs & ICE_FLOW_SEG_HDR_ARP) {
1114                         ice_and_bitmap(params->ptypes, params->ptypes,
1115                                        (const ice_bitmap_t *)ice_ptypes_arp_of,
1116                                        ICE_FLOW_PTYPE_MAX);
1117                 }
1118
1119                 if (hdrs & ICE_FLOW_SEG_HDR_ECPRI_TP0) {
1120                         src = (const ice_bitmap_t *)ice_ptypes_ecpri_tp0;
1121                         ice_and_bitmap(params->ptypes, params->ptypes, src,
1122                                        ICE_FLOW_PTYPE_MAX);
1123                 }
1124                 if ((hdrs & ICE_FLOW_SEG_HDR_IPV4) &&
1125                     (hdrs & ICE_FLOW_SEG_HDR_IPV_OTHER)) {
1126                         src = i ?
1127                                 (const ice_bitmap_t *)ice_ptypes_ipv4_il :
1128                                 (const ice_bitmap_t *)ice_ptypes_ipv4_ofos_all;
1129                         ice_and_bitmap(params->ptypes, params->ptypes, src,
1130                                        ICE_FLOW_PTYPE_MAX);
1131                 } else if ((hdrs & ICE_FLOW_SEG_HDR_IPV6) &&
1132                            (hdrs & ICE_FLOW_SEG_HDR_IPV_OTHER)) {
1133                         src = i ?
1134                                 (const ice_bitmap_t *)ice_ptypes_ipv6_il :
1135                                 (const ice_bitmap_t *)ice_ptypes_ipv6_ofos_all;
1136                         ice_and_bitmap(params->ptypes, params->ptypes, src,
1137                                        ICE_FLOW_PTYPE_MAX);
1138                 } else if ((hdrs & ICE_FLOW_SEG_HDR_IPV4) &&
1139                                 (hdrs & ICE_FLOW_SEG_HDR_IPV_FRAG)) {
1140                         src = (const ice_bitmap_t *)ice_ptypes_ipv4_frag;
1141                         ice_and_bitmap(params->ptypes, params->ptypes, src,
1142                                        ICE_FLOW_PTYPE_MAX);
1143                 } else if ((hdrs & ICE_FLOW_SEG_HDR_IPV6) &&
1144                                 (hdrs & ICE_FLOW_SEG_HDR_IPV_FRAG)) {
1145                         src = (const ice_bitmap_t *)ice_ptypes_ipv6_frag;
1146                         ice_and_bitmap(params->ptypes, params->ptypes, src,
1147                                        ICE_FLOW_PTYPE_MAX);
1148                 } else if ((hdrs & ICE_FLOW_SEG_HDR_IPV4) &&
1149                            !(hdrs & ICE_FLOW_SEG_HDRS_L4_MASK_NO_OTHER)) {
1150                         src = !i ? (const ice_bitmap_t *)ice_ptypes_ipv4_ofos_no_l4 :
1151                                 (const ice_bitmap_t *)ice_ptypes_ipv4_il_no_l4;
1152                         ice_and_bitmap(params->ptypes, params->ptypes, src,
1153                                        ICE_FLOW_PTYPE_MAX);
1154                 } else if (hdrs & ICE_FLOW_SEG_HDR_IPV4) {
1155                         src = !i ? (const ice_bitmap_t *)ice_ptypes_ipv4_ofos :
1156                                 (const ice_bitmap_t *)ice_ptypes_ipv4_il;
1157                         ice_and_bitmap(params->ptypes, params->ptypes, src,
1158                                        ICE_FLOW_PTYPE_MAX);
1159                 } else if ((hdrs & ICE_FLOW_SEG_HDR_IPV6) &&
1160                            !(hdrs & ICE_FLOW_SEG_HDRS_L4_MASK_NO_OTHER)) {
1161                         src = !i ? (const ice_bitmap_t *)ice_ptypes_ipv6_ofos_no_l4 :
1162                                 (const ice_bitmap_t *)ice_ptypes_ipv6_il_no_l4;
1163                         ice_and_bitmap(params->ptypes, params->ptypes, src,
1164                                        ICE_FLOW_PTYPE_MAX);
1165                 } else if (hdrs & ICE_FLOW_SEG_HDR_IPV6) {
1166                         src = !i ? (const ice_bitmap_t *)ice_ptypes_ipv6_ofos :
1167                                 (const ice_bitmap_t *)ice_ptypes_ipv6_il;
1168                         ice_and_bitmap(params->ptypes, params->ptypes, src,
1169                                        ICE_FLOW_PTYPE_MAX);
1170                 }
1171
1172                 if (hdrs & ICE_FLOW_SEG_HDR_ETH_NON_IP) {
1173                         src = (const ice_bitmap_t *)ice_ptypes_mac_non_ip_ofos;
1174                         ice_and_bitmap(params->ptypes, params->ptypes,
1175                                        src, ICE_FLOW_PTYPE_MAX);
1176                 } else if (hdrs & ICE_FLOW_SEG_HDR_PPPOE) {
1177                         src = (const ice_bitmap_t *)ice_ptypes_pppoe;
1178                         ice_and_bitmap(params->ptypes, params->ptypes, src,
1179                                        ICE_FLOW_PTYPE_MAX);
1180                 } else {
1181                         src = (const ice_bitmap_t *)ice_ptypes_pppoe;
1182                         ice_andnot_bitmap(params->ptypes, params->ptypes, src,
1183                                           ICE_FLOW_PTYPE_MAX);
1184                 }
1185
1186                 if (hdrs & ICE_FLOW_SEG_HDR_UDP) {
1187                         src = (const ice_bitmap_t *)ice_ptypes_udp_il;
1188                         ice_and_bitmap(params->ptypes, params->ptypes, src,
1189                                        ICE_FLOW_PTYPE_MAX);
1190                 } else if (hdrs & ICE_FLOW_SEG_HDR_TCP) {
1191                         ice_and_bitmap(params->ptypes, params->ptypes,
1192                                        (const ice_bitmap_t *)ice_ptypes_tcp_il,
1193                                        ICE_FLOW_PTYPE_MAX);
1194                 } else if (hdrs & ICE_FLOW_SEG_HDR_SCTP) {
1195                         src = (const ice_bitmap_t *)ice_ptypes_sctp_il;
1196                         ice_and_bitmap(params->ptypes, params->ptypes, src,
1197                                        ICE_FLOW_PTYPE_MAX);
1198                 }
1199
1200                 if (hdrs & ICE_FLOW_SEG_HDR_ICMP) {
1201                         src = !i ? (const ice_bitmap_t *)ice_ptypes_icmp_of :
1202                                 (const ice_bitmap_t *)ice_ptypes_icmp_il;
1203                         ice_and_bitmap(params->ptypes, params->ptypes, src,
1204                                        ICE_FLOW_PTYPE_MAX);
1205                 } else if (hdrs & ICE_FLOW_SEG_HDR_GRE) {
1206                         src = (const ice_bitmap_t *)ice_ptypes_gre_of;
1207                         ice_and_bitmap(params->ptypes, params->ptypes, src,
1208                                        ICE_FLOW_PTYPE_MAX);
1209                 } else if (hdrs & ICE_FLOW_SEG_HDR_GTPC) {
1210                         src = (const ice_bitmap_t *)ice_ptypes_gtpc;
1211                         ice_and_bitmap(params->ptypes, params->ptypes,
1212                                        src, ICE_FLOW_PTYPE_MAX);
1213                 } else if (hdrs & ICE_FLOW_SEG_HDR_GTPC_TEID) {
1214                         src = (const ice_bitmap_t *)ice_ptypes_gtpc_tid;
1215                         ice_and_bitmap(params->ptypes, params->ptypes,
1216                                        src, ICE_FLOW_PTYPE_MAX);
1217                 } else if (hdrs & ICE_FLOW_SEG_HDR_GTPU_NON_IP) {
1218                         src = (const ice_bitmap_t *)ice_ptypes_gtpu_no_ip;
1219                         ice_and_bitmap(params->ptypes, params->ptypes,
1220                                        src, ICE_FLOW_PTYPE_MAX);
1221                 } else if (hdrs & ICE_FLOW_SEG_HDR_GTPU_DWN) {
1222                         src = (const ice_bitmap_t *)ice_ptypes_gtpu;
1223                         ice_and_bitmap(params->ptypes, params->ptypes,
1224                                        src, ICE_FLOW_PTYPE_MAX);
1225
1226                         /* Attributes for GTP packet with downlink */
1227                         params->attr = ice_attr_gtpu_down;
1228                         params->attr_cnt = ARRAY_SIZE(ice_attr_gtpu_down);
1229                 } else if (hdrs & ICE_FLOW_SEG_HDR_GTPU_UP) {
1230                         src = (const ice_bitmap_t *)ice_ptypes_gtpu;
1231                         ice_and_bitmap(params->ptypes, params->ptypes,
1232                                        src, ICE_FLOW_PTYPE_MAX);
1233
1234                         /* Attributes for GTP packet with uplink */
1235                         params->attr = ice_attr_gtpu_up;
1236                         params->attr_cnt = ARRAY_SIZE(ice_attr_gtpu_up);
1237                 } else if (hdrs & ICE_FLOW_SEG_HDR_GTPU_EH) {
1238                         src = (const ice_bitmap_t *)ice_ptypes_gtpu;
1239                         ice_and_bitmap(params->ptypes, params->ptypes,
1240                                        src, ICE_FLOW_PTYPE_MAX);
1241
1242                         /* Attributes for GTP packet with Extension Header */
1243                         params->attr = ice_attr_gtpu_eh;
1244                         params->attr_cnt = ARRAY_SIZE(ice_attr_gtpu_eh);
1245                 } else if (hdrs & ICE_FLOW_SEG_HDR_GTPU_IP) {
1246                         src = (const ice_bitmap_t *)ice_ptypes_gtpu;
1247                         ice_and_bitmap(params->ptypes, params->ptypes,
1248                                        src, ICE_FLOW_PTYPE_MAX);
1249
1250                         /* Attributes for GTP packet without Extension Header */
1251                         params->attr = ice_attr_gtpu_session;
1252                         params->attr_cnt = ARRAY_SIZE(ice_attr_gtpu_session);
1253                 } else if (hdrs & ICE_FLOW_SEG_HDR_L2TPV2) {
1254                         src = (const ice_bitmap_t *)ice_ptypes_l2tpv2;
1255                         ice_and_bitmap(params->ptypes, params->ptypes,
1256                                        src, ICE_FLOW_PTYPE_MAX);
1257                 } else if (hdrs & ICE_FLOW_SEG_HDR_L2TPV3) {
1258                         src = (const ice_bitmap_t *)ice_ptypes_l2tpv3;
1259                         ice_and_bitmap(params->ptypes, params->ptypes,
1260                                        src, ICE_FLOW_PTYPE_MAX);
1261                 } else if (hdrs & ICE_FLOW_SEG_HDR_ESP) {
1262                         src = (const ice_bitmap_t *)ice_ptypes_esp;
1263                         ice_and_bitmap(params->ptypes, params->ptypes,
1264                                        src, ICE_FLOW_PTYPE_MAX);
1265                 } else if (hdrs & ICE_FLOW_SEG_HDR_AH) {
1266                         src = (const ice_bitmap_t *)ice_ptypes_ah;
1267                         ice_and_bitmap(params->ptypes, params->ptypes,
1268                                        src, ICE_FLOW_PTYPE_MAX);
1269                 } else if (hdrs & ICE_FLOW_SEG_HDR_NAT_T_ESP) {
1270                         src = (const ice_bitmap_t *)ice_ptypes_nat_t_esp;
1271                         ice_and_bitmap(params->ptypes, params->ptypes,
1272                                        src, ICE_FLOW_PTYPE_MAX);
1273                 } else if (hdrs & ICE_FLOW_SEG_HDR_VXLAN) {
1274                         src = (const ice_bitmap_t *)ice_ptypes_vxlan_vni;
1275                         ice_and_bitmap(params->ptypes, params->ptypes,
1276                                        src, ICE_FLOW_PTYPE_MAX);
1277                 } else if (hdrs & ICE_FLOW_SEG_HDR_UDP_ECPRI_TP0) {
1278                         src = (const ice_bitmap_t *)ice_ptypes_udp_ecpri_tp0;
1279                         ice_and_bitmap(params->ptypes, params->ptypes,
1280                                        src, ICE_FLOW_PTYPE_MAX);
1281                 }
1282
1283                 if (hdrs & ICE_FLOW_SEG_HDR_PPP) {
1284                         src = (const ice_bitmap_t *)ice_ptypes_ppp;
1285                         ice_and_bitmap(params->ptypes, params->ptypes,
1286                                        src, ICE_FLOW_PTYPE_MAX);
1287                 }
1288
1289                 if (hdrs & ICE_FLOW_SEG_HDR_PFCP) {
1290                         if (hdrs & ICE_FLOW_SEG_HDR_PFCP_NODE)
1291                                 src =
1292                                 (const ice_bitmap_t *)ice_ptypes_pfcp_node;
1293                         else
1294                                 src =
1295                                 (const ice_bitmap_t *)ice_ptypes_pfcp_session;
1296
1297                         ice_and_bitmap(params->ptypes, params->ptypes,
1298                                        src, ICE_FLOW_PTYPE_MAX);
1299                 } else {
1300                         src = (const ice_bitmap_t *)ice_ptypes_pfcp_node;
1301                         ice_andnot_bitmap(params->ptypes, params->ptypes,
1302                                           src, ICE_FLOW_PTYPE_MAX);
1303
1304                         src = (const ice_bitmap_t *)ice_ptypes_pfcp_session;
1305                         ice_andnot_bitmap(params->ptypes, params->ptypes,
1306                                           src, ICE_FLOW_PTYPE_MAX);
1307                 }
1308         }
1309
1310         return ICE_SUCCESS;
1311 }
1312
1313 /**
1314  * ice_flow_xtract_pkt_flags - Create an extr sequence entry for packet flags
1315  * @hw: pointer to the HW struct
1316  * @params: information about the flow to be processed
1317  * @flags: The value of pkt_flags[x:x] in Rx/Tx MDID metadata.
1318  *
1319  * This function will allocate an extraction sequence entries for a DWORD size
1320  * chunk of the packet flags.
1321  */
1322 static enum ice_status
1323 ice_flow_xtract_pkt_flags(struct ice_hw *hw,
1324                           struct ice_flow_prof_params *params,
1325                           enum ice_flex_mdid_pkt_flags flags)
1326 {
1327         u8 fv_words = hw->blk[params->blk].es.fvw;
1328         u8 idx;
1329
1330         /* Make sure the number of extraction sequence entries required does not
1331          * exceed the block's capacity.
1332          */
1333         if (params->es_cnt >= fv_words)
1334                 return ICE_ERR_MAX_LIMIT;
1335
1336         /* some blocks require a reversed field vector layout */
1337         if (hw->blk[params->blk].es.reverse)
1338                 idx = fv_words - params->es_cnt - 1;
1339         else
1340                 idx = params->es_cnt;
1341
1342         params->es[idx].prot_id = ICE_PROT_META_ID;
1343         params->es[idx].off = flags;
1344         params->es_cnt++;
1345
1346         return ICE_SUCCESS;
1347 }
1348
1349 /**
1350  * ice_flow_xtract_fld - Create an extraction sequence entry for the given field
1351  * @hw: pointer to the HW struct
1352  * @params: information about the flow to be processed
1353  * @seg: packet segment index of the field to be extracted
1354  * @fld: ID of field to be extracted
1355  * @match: bitfield of all fields
1356  *
1357  * This function determines the protocol ID, offset, and size of the given
1358  * field. It then allocates one or more extraction sequence entries for the
1359  * given field, and fill the entries with protocol ID and offset information.
1360  */
1361 static enum ice_status
1362 ice_flow_xtract_fld(struct ice_hw *hw, struct ice_flow_prof_params *params,
1363                     u8 seg, enum ice_flow_field fld, u64 match)
1364 {
1365         enum ice_flow_field sib = ICE_FLOW_FIELD_IDX_MAX;
1366         enum ice_prot_id prot_id = ICE_PROT_ID_INVAL;
1367         u8 fv_words = hw->blk[params->blk].es.fvw;
1368         struct ice_flow_fld_info *flds;
1369         u16 cnt, ese_bits, i;
1370         u16 sib_mask = 0;
1371         u16 mask;
1372         u16 off;
1373         bool exist;
1374
1375         flds = params->prof->segs[seg].fields;
1376
1377         switch (fld) {
1378         case ICE_FLOW_FIELD_IDX_ETH_DA:
1379         case ICE_FLOW_FIELD_IDX_ETH_SA:
1380         case ICE_FLOW_FIELD_IDX_S_VLAN:
1381         case ICE_FLOW_FIELD_IDX_C_VLAN:
1382                 prot_id = seg == 0 ? ICE_PROT_MAC_OF_OR_S : ICE_PROT_MAC_IL;
1383                 break;
1384         case ICE_FLOW_FIELD_IDX_ETH_TYPE:
1385                 prot_id = seg == 0 ? ICE_PROT_ETYPE_OL : ICE_PROT_ETYPE_IL;
1386                 break;
1387         case ICE_FLOW_FIELD_IDX_IPV4_DSCP:
1388                 prot_id = seg == 0 ? ICE_PROT_IPV4_OF_OR_S : ICE_PROT_IPV4_IL;
1389                 break;
1390         case ICE_FLOW_FIELD_IDX_IPV6_DSCP:
1391                 prot_id = seg == 0 ? ICE_PROT_IPV6_OF_OR_S : ICE_PROT_IPV6_IL;
1392                 break;
1393         case ICE_FLOW_FIELD_IDX_IPV4_TTL:
1394         case ICE_FLOW_FIELD_IDX_IPV4_PROT:
1395                 prot_id = seg == 0 ? ICE_PROT_IPV4_OF_OR_S : ICE_PROT_IPV4_IL;
1396
1397                 /* TTL and PROT share the same extraction seq. entry.
1398                  * Each is considered a sibling to the other in terms of sharing
1399                  * the same extraction sequence entry.
1400                  */
1401                 if (fld == ICE_FLOW_FIELD_IDX_IPV4_TTL)
1402                         sib = ICE_FLOW_FIELD_IDX_IPV4_PROT;
1403                 else
1404                         sib = ICE_FLOW_FIELD_IDX_IPV4_TTL;
1405
1406                 /* If the sibling field is also included, that field's
1407                  * mask needs to be included.
1408                  */
1409                 if (match & BIT(sib))
1410                         sib_mask = ice_flds_info[sib].mask;
1411                 break;
1412         case ICE_FLOW_FIELD_IDX_IPV6_TTL:
1413         case ICE_FLOW_FIELD_IDX_IPV6_PROT:
1414                 prot_id = ICE_PROT_IPV6_NEXT_PROTO;
1415                 exist = ice_check_ddp_support_proto_id(hw, prot_id);
1416                 if (!exist)
1417                         prot_id = seg == 0 ?
1418                                   ICE_PROT_IPV6_OF_OR_S :
1419                                   ICE_PROT_IPV6_IL;
1420                 else
1421                         prot_id = seg == 0 ?
1422                                   ICE_PROT_IPV6_NEXT_PROTO :
1423                                   ICE_PROT_IPV6_IL;
1424
1425                 /* TTL and PROT share the same extraction seq. entry.
1426                  * Each is considered a sibling to the other in terms of sharing
1427                  * the same extraction sequence entry.
1428                  */
1429                 if (fld == ICE_FLOW_FIELD_IDX_IPV6_TTL)
1430                         sib = ICE_FLOW_FIELD_IDX_IPV6_PROT;
1431                 else
1432                         sib = ICE_FLOW_FIELD_IDX_IPV6_TTL;
1433
1434                 /* If the sibling field is also included, that field's
1435                  * mask needs to be included.
1436                  */
1437                 if (match & BIT(sib))
1438                         sib_mask = ice_flds_info[sib].mask;
1439                 break;
1440         case ICE_FLOW_FIELD_IDX_IPV4_SA:
1441         case ICE_FLOW_FIELD_IDX_IPV4_DA:
1442         case ICE_FLOW_FIELD_IDX_IPV4_CHKSUM:
1443                 prot_id = seg == 0 ? ICE_PROT_IPV4_OF_OR_S : ICE_PROT_IPV4_IL;
1444                 if (params->prof->segs[0].hdrs & ICE_FLOW_SEG_HDR_GRE &&
1445                     params->prof->segs[1].hdrs & ICE_FLOW_SEG_HDR_GTPU &&
1446                     seg == 1)
1447                         prot_id = ICE_PROT_IPV4_IL_IL;
1448                 break;
1449         case ICE_FLOW_FIELD_IDX_IPV4_ID:
1450                 prot_id = ICE_PROT_IPV4_OF_OR_S;
1451                 break;
1452         case ICE_FLOW_FIELD_IDX_IPV6_SA:
1453         case ICE_FLOW_FIELD_IDX_IPV6_DA:
1454         case ICE_FLOW_FIELD_IDX_IPV6_PRE32_SA:
1455         case ICE_FLOW_FIELD_IDX_IPV6_PRE32_DA:
1456         case ICE_FLOW_FIELD_IDX_IPV6_PRE48_SA:
1457         case ICE_FLOW_FIELD_IDX_IPV6_PRE48_DA:
1458         case ICE_FLOW_FIELD_IDX_IPV6_PRE64_SA:
1459         case ICE_FLOW_FIELD_IDX_IPV6_PRE64_DA:
1460                 prot_id = seg == 0 ? ICE_PROT_IPV6_OF_OR_S : ICE_PROT_IPV6_IL;
1461                 if (params->prof->segs[0].hdrs & ICE_FLOW_SEG_HDR_GRE &&
1462                     params->prof->segs[1].hdrs & ICE_FLOW_SEG_HDR_GTPU &&
1463                     seg == 1)
1464                         prot_id = ICE_PROT_IPV6_IL_IL;
1465                 break;
1466         case ICE_FLOW_FIELD_IDX_IPV6_ID:
1467                 prot_id = ICE_PROT_IPV6_FRAG;
1468                 break;
1469         case ICE_FLOW_FIELD_IDX_TCP_SRC_PORT:
1470         case ICE_FLOW_FIELD_IDX_TCP_DST_PORT:
1471         case ICE_FLOW_FIELD_IDX_TCP_FLAGS:
1472         case ICE_FLOW_FIELD_IDX_TCP_CHKSUM:
1473                 prot_id = ICE_PROT_TCP_IL;
1474                 break;
1475         case ICE_FLOW_FIELD_IDX_UDP_SRC_PORT:
1476         case ICE_FLOW_FIELD_IDX_UDP_DST_PORT:
1477         case ICE_FLOW_FIELD_IDX_UDP_CHKSUM:
1478                 prot_id = ICE_PROT_UDP_IL_OR_S;
1479                 break;
1480         case ICE_FLOW_FIELD_IDX_SCTP_SRC_PORT:
1481         case ICE_FLOW_FIELD_IDX_SCTP_DST_PORT:
1482         case ICE_FLOW_FIELD_IDX_SCTP_CHKSUM:
1483                 prot_id = ICE_PROT_SCTP_IL;
1484                 break;
1485         case ICE_FLOW_FIELD_IDX_VXLAN_VNI:
1486         case ICE_FLOW_FIELD_IDX_GTPC_TEID:
1487         case ICE_FLOW_FIELD_IDX_GTPU_IP_TEID:
1488         case ICE_FLOW_FIELD_IDX_GTPU_UP_TEID:
1489         case ICE_FLOW_FIELD_IDX_GTPU_DWN_TEID:
1490         case ICE_FLOW_FIELD_IDX_GTPU_EH_TEID:
1491         case ICE_FLOW_FIELD_IDX_GTPU_EH_QFI:
1492         case ICE_FLOW_FIELD_IDX_GTPU_UP_QFI:
1493         case ICE_FLOW_FIELD_IDX_GTPU_DWN_QFI:
1494                 /* GTP is accessed through UDP OF protocol */
1495                 prot_id = ICE_PROT_UDP_OF;
1496                 break;
1497         case ICE_FLOW_FIELD_IDX_PPPOE_SESS_ID:
1498                 prot_id = ICE_PROT_PPPOE;
1499                 break;
1500         case ICE_FLOW_FIELD_IDX_PFCP_SEID:
1501                 prot_id = ICE_PROT_UDP_IL_OR_S;
1502                 break;
1503         case ICE_FLOW_FIELD_IDX_L2TPV3_SESS_ID:
1504                 prot_id = ICE_PROT_L2TPV3;
1505                 break;
1506         case ICE_FLOW_FIELD_IDX_ESP_SPI:
1507                 prot_id = ICE_PROT_ESP_F;
1508                 break;
1509         case ICE_FLOW_FIELD_IDX_AH_SPI:
1510                 prot_id = ICE_PROT_ESP_2;
1511                 break;
1512         case ICE_FLOW_FIELD_IDX_NAT_T_ESP_SPI:
1513                 prot_id = ICE_PROT_UDP_IL_OR_S;
1514                 break;
1515         case ICE_FLOW_FIELD_IDX_ECPRI_TP0_PC_ID:
1516                 prot_id = ICE_PROT_ECPRI;
1517                 break;
1518         case ICE_FLOW_FIELD_IDX_UDP_ECPRI_TP0_PC_ID:
1519                 prot_id = ICE_PROT_UDP_IL_OR_S;
1520                 break;
1521         case ICE_FLOW_FIELD_IDX_ARP_SIP:
1522         case ICE_FLOW_FIELD_IDX_ARP_DIP:
1523         case ICE_FLOW_FIELD_IDX_ARP_SHA:
1524         case ICE_FLOW_FIELD_IDX_ARP_DHA:
1525         case ICE_FLOW_FIELD_IDX_ARP_OP:
1526                 prot_id = ICE_PROT_ARP_OF;
1527                 break;
1528         case ICE_FLOW_FIELD_IDX_ICMP_TYPE:
1529         case ICE_FLOW_FIELD_IDX_ICMP_CODE:
1530                 /* ICMP type and code share the same extraction seq. entry */
1531                 prot_id = (params->prof->segs[seg].hdrs &
1532                            ICE_FLOW_SEG_HDR_IPV4) ?
1533                         ICE_PROT_ICMP_IL : ICE_PROT_ICMPV6_IL;
1534                 sib = fld == ICE_FLOW_FIELD_IDX_ICMP_TYPE ?
1535                         ICE_FLOW_FIELD_IDX_ICMP_CODE :
1536                         ICE_FLOW_FIELD_IDX_ICMP_TYPE;
1537                 break;
1538         case ICE_FLOW_FIELD_IDX_GRE_KEYID:
1539                 prot_id = ICE_PROT_GRE_OF;
1540                 break;
1541         default:
1542                 return ICE_ERR_NOT_IMPL;
1543         }
1544
1545         /* Each extraction sequence entry is a word in size, and extracts a
1546          * word-aligned offset from a protocol header.
1547          */
1548         ese_bits = ICE_FLOW_FV_EXTRACT_SZ * BITS_PER_BYTE;
1549
1550         flds[fld].xtrct.prot_id = prot_id;
1551         flds[fld].xtrct.off = (ice_flds_info[fld].off / ese_bits) *
1552                 ICE_FLOW_FV_EXTRACT_SZ;
1553         flds[fld].xtrct.disp = (u8)(ice_flds_info[fld].off % ese_bits);
1554         flds[fld].xtrct.idx = params->es_cnt;
1555         flds[fld].xtrct.mask = ice_flds_info[fld].mask;
1556         if (prot_id == ICE_PROT_IPV6_NEXT_PROTO) {
1557                 flds[fld].xtrct.off = 0;
1558                 flds[fld].xtrct.disp = 0;
1559         }
1560
1561         /* Adjust the next field-entry index after accommodating the number of
1562          * entries this field consumes
1563          */
1564         cnt = DIVIDE_AND_ROUND_UP(flds[fld].xtrct.disp +
1565                                   ice_flds_info[fld].size, ese_bits);
1566
1567         /* Fill in the extraction sequence entries needed for this field */
1568         off = flds[fld].xtrct.off;
1569         mask = flds[fld].xtrct.mask;
1570         for (i = 0; i < cnt; i++) {
1571                 /* Only consume an extraction sequence entry if there is no
1572                  * sibling field associated with this field or the sibling entry
1573                  * already extracts the word shared with this field.
1574                  */
1575                 if (sib == ICE_FLOW_FIELD_IDX_MAX ||
1576                     flds[sib].xtrct.prot_id == ICE_PROT_ID_INVAL ||
1577                     flds[sib].xtrct.off != off) {
1578                         u8 idx;
1579
1580                         /* Make sure the number of extraction sequence required
1581                          * does not exceed the block's capability
1582                          */
1583                         if (params->es_cnt >= fv_words)
1584                                 return ICE_ERR_MAX_LIMIT;
1585
1586                         /* some blocks require a reversed field vector layout */
1587                         if (hw->blk[params->blk].es.reverse)
1588                                 idx = fv_words - params->es_cnt - 1;
1589                         else
1590                                 idx = params->es_cnt;
1591
1592                         params->es[idx].prot_id = prot_id;
1593                         params->es[idx].off = off;
1594                         params->mask[idx] = mask | sib_mask;
1595                         params->es_cnt++;
1596                 }
1597
1598                 off += ICE_FLOW_FV_EXTRACT_SZ;
1599         }
1600
1601         return ICE_SUCCESS;
1602 }
1603
1604 /**
1605  * ice_flow_xtract_raws - Create extract sequence entries for raw bytes
1606  * @hw: pointer to the HW struct
1607  * @params: information about the flow to be processed
1608  * @seg: index of packet segment whose raw fields are to be extracted
1609  */
1610 static enum ice_status
1611 ice_flow_xtract_raws(struct ice_hw *hw, struct ice_flow_prof_params *params,
1612                      u8 seg)
1613 {
1614         u16 fv_words;
1615         u16 hdrs_sz;
1616         u8 i;
1617
1618         if (!params->prof->segs[seg].raws_cnt)
1619                 return ICE_SUCCESS;
1620
1621         if (params->prof->segs[seg].raws_cnt >
1622             ARRAY_SIZE(params->prof->segs[seg].raws))
1623                 return ICE_ERR_MAX_LIMIT;
1624
1625         /* Offsets within the segment headers are not supported */
1626         hdrs_sz = ice_flow_calc_seg_sz(params, seg);
1627         if (!hdrs_sz)
1628                 return ICE_ERR_PARAM;
1629
1630         fv_words = hw->blk[params->blk].es.fvw;
1631
1632         for (i = 0; i < params->prof->segs[seg].raws_cnt; i++) {
1633                 struct ice_flow_seg_fld_raw *raw;
1634                 u16 off, cnt, j;
1635
1636                 raw = &params->prof->segs[seg].raws[i];
1637
1638                 /* Storing extraction information */
1639                 raw->info.xtrct.prot_id = ICE_PROT_MAC_OF_OR_S;
1640                 raw->info.xtrct.off = (raw->off / ICE_FLOW_FV_EXTRACT_SZ) *
1641                         ICE_FLOW_FV_EXTRACT_SZ;
1642                 raw->info.xtrct.disp = (raw->off % ICE_FLOW_FV_EXTRACT_SZ) *
1643                         BITS_PER_BYTE;
1644                 raw->info.xtrct.idx = params->es_cnt;
1645
1646                 /* Determine the number of field vector entries this raw field
1647                  * consumes.
1648                  */
1649                 cnt = DIVIDE_AND_ROUND_UP(raw->info.xtrct.disp +
1650                                           (raw->info.src.last * BITS_PER_BYTE),
1651                                           (ICE_FLOW_FV_EXTRACT_SZ *
1652                                            BITS_PER_BYTE));
1653                 off = raw->info.xtrct.off;
1654                 for (j = 0; j < cnt; j++) {
1655                         u16 idx;
1656
1657                         /* Make sure the number of extraction sequence required
1658                          * does not exceed the block's capability
1659                          */
1660                         if (params->es_cnt >= hw->blk[params->blk].es.count ||
1661                             params->es_cnt >= ICE_MAX_FV_WORDS)
1662                                 return ICE_ERR_MAX_LIMIT;
1663
1664                         /* some blocks require a reversed field vector layout */
1665                         if (hw->blk[params->blk].es.reverse)
1666                                 idx = fv_words - params->es_cnt - 1;
1667                         else
1668                                 idx = params->es_cnt;
1669
1670                         params->es[idx].prot_id = raw->info.xtrct.prot_id;
1671                         params->es[idx].off = off;
1672                         params->es_cnt++;
1673                         off += ICE_FLOW_FV_EXTRACT_SZ;
1674                 }
1675         }
1676
1677         return ICE_SUCCESS;
1678 }
1679
1680 /**
1681  * ice_flow_create_xtrct_seq - Create an extraction sequence for given segments
1682  * @hw: pointer to the HW struct
1683  * @params: information about the flow to be processed
1684  *
1685  * This function iterates through all matched fields in the given segments, and
1686  * creates an extraction sequence for the fields.
1687  */
1688 static enum ice_status
1689 ice_flow_create_xtrct_seq(struct ice_hw *hw,
1690                           struct ice_flow_prof_params *params)
1691 {
1692         enum ice_status status = ICE_SUCCESS;
1693         u8 i;
1694
1695         /* For ACL, we also need to extract the direction bit (Rx,Tx) data from
1696          * packet flags
1697          */
1698         if (params->blk == ICE_BLK_ACL) {
1699                 status = ice_flow_xtract_pkt_flags(hw, params,
1700                                                    ICE_RX_MDID_PKT_FLAGS_15_0);
1701                 if (status)
1702                         return status;
1703         }
1704
1705         for (i = 0; i < params->prof->segs_cnt; i++) {
1706                 u64 match = params->prof->segs[i].match;
1707                 enum ice_flow_field j;
1708
1709                 ice_for_each_set_bit(j, (ice_bitmap_t *)&match,
1710                                      ICE_FLOW_FIELD_IDX_MAX) {
1711                         status = ice_flow_xtract_fld(hw, params, i, j, match);
1712                         if (status)
1713                                 return status;
1714                         ice_clear_bit(j, (ice_bitmap_t *)&match);
1715                 }
1716
1717                 /* Process raw matching bytes */
1718                 status = ice_flow_xtract_raws(hw, params, i);
1719                 if (status)
1720                         return status;
1721         }
1722
1723         return status;
1724 }
1725
1726 /**
1727  * ice_flow_sel_acl_scen - returns the specific scenario
1728  * @hw: pointer to the hardware structure
1729  * @params: information about the flow to be processed
1730  *
1731  * This function will return the specific scenario based on the
1732  * params passed to it
1733  */
1734 static enum ice_status
1735 ice_flow_sel_acl_scen(struct ice_hw *hw, struct ice_flow_prof_params *params)
1736 {
1737         /* Find the best-fit scenario for the provided match width */
1738         struct ice_acl_scen *cand_scen = NULL, *scen;
1739
1740         if (!hw->acl_tbl)
1741                 return ICE_ERR_DOES_NOT_EXIST;
1742
1743         /* Loop through each scenario and match against the scenario width
1744          * to select the specific scenario
1745          */
1746         LIST_FOR_EACH_ENTRY(scen, &hw->acl_tbl->scens, ice_acl_scen, list_entry)
1747                 if (scen->eff_width >= params->entry_length &&
1748                     (!cand_scen || cand_scen->eff_width > scen->eff_width))
1749                         cand_scen = scen;
1750         if (!cand_scen)
1751                 return ICE_ERR_DOES_NOT_EXIST;
1752
1753         params->prof->cfg.scen = cand_scen;
1754
1755         return ICE_SUCCESS;
1756 }
1757
1758 /**
1759  * ice_flow_acl_def_entry_frmt - Determine the layout of flow entries
1760  * @params: information about the flow to be processed
1761  */
1762 static enum ice_status
1763 ice_flow_acl_def_entry_frmt(struct ice_flow_prof_params *params)
1764 {
1765         u16 index, i, range_idx = 0;
1766
1767         index = ICE_AQC_ACL_PROF_BYTE_SEL_START_IDX;
1768
1769         for (i = 0; i < params->prof->segs_cnt; i++) {
1770                 struct ice_flow_seg_info *seg = &params->prof->segs[i];
1771                 u8 j;
1772
1773                 ice_for_each_set_bit(j, (ice_bitmap_t *)&seg->match,
1774                                      ICE_FLOW_FIELD_IDX_MAX) {
1775                         struct ice_flow_fld_info *fld = &seg->fields[j];
1776
1777                         fld->entry.mask = ICE_FLOW_FLD_OFF_INVAL;
1778
1779                         if (fld->type == ICE_FLOW_FLD_TYPE_RANGE) {
1780                                 fld->entry.last = ICE_FLOW_FLD_OFF_INVAL;
1781
1782                                 /* Range checking only supported for single
1783                                  * words
1784                                  */
1785                                 if (DIVIDE_AND_ROUND_UP(ice_flds_info[j].size +
1786                                                         fld->xtrct.disp,
1787                                                         BITS_PER_BYTE * 2) > 1)
1788                                         return ICE_ERR_PARAM;
1789
1790                                 /* Ranges must define low and high values */
1791                                 if (fld->src.val == ICE_FLOW_FLD_OFF_INVAL ||
1792                                     fld->src.last == ICE_FLOW_FLD_OFF_INVAL)
1793                                         return ICE_ERR_PARAM;
1794
1795                                 fld->entry.val = range_idx++;
1796                         } else {
1797                                 /* Store adjusted byte-length of field for later
1798                                  * use, taking into account potential
1799                                  * non-byte-aligned displacement
1800                                  */
1801                                 fld->entry.last = DIVIDE_AND_ROUND_UP
1802                                         (ice_flds_info[j].size +
1803                                          (fld->xtrct.disp % BITS_PER_BYTE),
1804                                          BITS_PER_BYTE);
1805                                 fld->entry.val = index;
1806                                 index += fld->entry.last;
1807                         }
1808                 }
1809
1810                 for (j = 0; j < seg->raws_cnt; j++) {
1811                         struct ice_flow_seg_fld_raw *raw = &seg->raws[j];
1812
1813                         raw->info.entry.mask = ICE_FLOW_FLD_OFF_INVAL;
1814                         raw->info.entry.val = index;
1815                         raw->info.entry.last = raw->info.src.last;
1816                         index += raw->info.entry.last;
1817                 }
1818         }
1819
1820         /* Currently only support using the byte selection base, which only
1821          * allows for an effective entry size of 30 bytes. Reject anything
1822          * larger.
1823          */
1824         if (index > ICE_AQC_ACL_PROF_BYTE_SEL_ELEMS)
1825                 return ICE_ERR_PARAM;
1826
1827         /* Only 8 range checkers per profile, reject anything trying to use
1828          * more
1829          */
1830         if (range_idx > ICE_AQC_ACL_PROF_RANGES_NUM_CFG)
1831                 return ICE_ERR_PARAM;
1832
1833         /* Store # bytes required for entry for later use */
1834         params->entry_length = index - ICE_AQC_ACL_PROF_BYTE_SEL_START_IDX;
1835
1836         return ICE_SUCCESS;
1837 }
1838
1839 /**
1840  * ice_flow_proc_segs - process all packet segments associated with a profile
1841  * @hw: pointer to the HW struct
1842  * @params: information about the flow to be processed
1843  */
1844 static enum ice_status
1845 ice_flow_proc_segs(struct ice_hw *hw, struct ice_flow_prof_params *params)
1846 {
1847         enum ice_status status;
1848
1849         status = ice_flow_proc_seg_hdrs(params);
1850         if (status)
1851                 return status;
1852
1853         status = ice_flow_create_xtrct_seq(hw, params);
1854         if (status)
1855                 return status;
1856
1857         switch (params->blk) {
1858         case ICE_BLK_FD:
1859         case ICE_BLK_RSS:
1860                 status = ICE_SUCCESS;
1861                 break;
1862         case ICE_BLK_ACL:
1863                 status = ice_flow_acl_def_entry_frmt(params);
1864                 if (status)
1865                         return status;
1866                 status = ice_flow_sel_acl_scen(hw, params);
1867                 if (status)
1868                         return status;
1869                 break;
1870         default:
1871                 return ICE_ERR_NOT_IMPL;
1872         }
1873
1874         return status;
1875 }
1876
1877 #define ICE_FLOW_FIND_PROF_CHK_FLDS     0x00000001
1878 #define ICE_FLOW_FIND_PROF_CHK_VSI      0x00000002
1879 #define ICE_FLOW_FIND_PROF_NOT_CHK_DIR  0x00000004
1880
1881 /**
1882  * ice_flow_find_prof_conds - Find a profile matching headers and conditions
1883  * @hw: pointer to the HW struct
1884  * @blk: classification stage
1885  * @dir: flow direction
1886  * @segs: array of one or more packet segments that describe the flow
1887  * @segs_cnt: number of packet segments provided
1888  * @vsi_handle: software VSI handle to check VSI (ICE_FLOW_FIND_PROF_CHK_VSI)
1889  * @conds: additional conditions to be checked (ICE_FLOW_FIND_PROF_CHK_*)
1890  */
1891 static struct ice_flow_prof *
1892 ice_flow_find_prof_conds(struct ice_hw *hw, enum ice_block blk,
1893                          enum ice_flow_dir dir, struct ice_flow_seg_info *segs,
1894                          u8 segs_cnt, u16 vsi_handle, u32 conds)
1895 {
1896         struct ice_flow_prof *p, *prof = NULL;
1897
1898         ice_acquire_lock(&hw->fl_profs_locks[blk]);
1899         LIST_FOR_EACH_ENTRY(p, &hw->fl_profs[blk], ice_flow_prof, l_entry)
1900                 if ((p->dir == dir || conds & ICE_FLOW_FIND_PROF_NOT_CHK_DIR) &&
1901                     segs_cnt && segs_cnt == p->segs_cnt) {
1902                         u8 i;
1903
1904                         /* Check for profile-VSI association if specified */
1905                         if ((conds & ICE_FLOW_FIND_PROF_CHK_VSI) &&
1906                             ice_is_vsi_valid(hw, vsi_handle) &&
1907                             !ice_is_bit_set(p->vsis, vsi_handle))
1908                                 continue;
1909
1910                         /* Protocol headers must be checked. Matched fields are
1911                          * checked if specified.
1912                          */
1913                         for (i = 0; i < segs_cnt; i++)
1914                                 if (segs[i].hdrs != p->segs[i].hdrs ||
1915                                     ((conds & ICE_FLOW_FIND_PROF_CHK_FLDS) &&
1916                                      segs[i].match != p->segs[i].match))
1917                                         break;
1918
1919                         /* A match is found if all segments are matched */
1920                         if (i == segs_cnt) {
1921                                 prof = p;
1922                                 break;
1923                         }
1924                 }
1925         ice_release_lock(&hw->fl_profs_locks[blk]);
1926
1927         return prof;
1928 }
1929
1930 /**
1931  * ice_flow_find_prof - Look up a profile matching headers and matched fields
1932  * @hw: pointer to the HW struct
1933  * @blk: classification stage
1934  * @dir: flow direction
1935  * @segs: array of one or more packet segments that describe the flow
1936  * @segs_cnt: number of packet segments provided
1937  */
1938 u64
1939 ice_flow_find_prof(struct ice_hw *hw, enum ice_block blk, enum ice_flow_dir dir,
1940                    struct ice_flow_seg_info *segs, u8 segs_cnt)
1941 {
1942         struct ice_flow_prof *p;
1943
1944         p = ice_flow_find_prof_conds(hw, blk, dir, segs, segs_cnt,
1945                                      ICE_MAX_VSI, ICE_FLOW_FIND_PROF_CHK_FLDS);
1946
1947         return p ? p->id : ICE_FLOW_PROF_ID_INVAL;
1948 }
1949
1950 /**
1951  * ice_flow_find_prof_id - Look up a profile with given profile ID
1952  * @hw: pointer to the HW struct
1953  * @blk: classification stage
1954  * @prof_id: unique ID to identify this flow profile
1955  */
1956 static struct ice_flow_prof *
1957 ice_flow_find_prof_id(struct ice_hw *hw, enum ice_block blk, u64 prof_id)
1958 {
1959         struct ice_flow_prof *p;
1960
1961         LIST_FOR_EACH_ENTRY(p, &hw->fl_profs[blk], ice_flow_prof, l_entry)
1962                 if (p->id == prof_id)
1963                         return p;
1964
1965         return NULL;
1966 }
1967
1968 /**
1969  * ice_dealloc_flow_entry - Deallocate flow entry memory
1970  * @hw: pointer to the HW struct
1971  * @entry: flow entry to be removed
1972  */
1973 static void
1974 ice_dealloc_flow_entry(struct ice_hw *hw, struct ice_flow_entry *entry)
1975 {
1976         if (!entry)
1977                 return;
1978
1979         if (entry->entry)
1980                 ice_free(hw, entry->entry);
1981
1982         if (entry->range_buf) {
1983                 ice_free(hw, entry->range_buf);
1984                 entry->range_buf = NULL;
1985         }
1986
1987         if (entry->acts) {
1988                 ice_free(hw, entry->acts);
1989                 entry->acts = NULL;
1990                 entry->acts_cnt = 0;
1991         }
1992
1993         ice_free(hw, entry);
1994 }
1995
1996 /**
1997  * ice_flow_get_hw_prof - return the HW profile for a specific profile ID handle
1998  * @hw: pointer to the HW struct
1999  * @blk: classification stage
2000  * @prof_id: the profile ID handle
2001  * @hw_prof_id: pointer to variable to receive the HW profile ID
2002  */
2003 enum ice_status
2004 ice_flow_get_hw_prof(struct ice_hw *hw, enum ice_block blk, u64 prof_id,
2005                      u8 *hw_prof_id)
2006 {
2007         enum ice_status status = ICE_ERR_DOES_NOT_EXIST;
2008         struct ice_prof_map *map;
2009
2010         ice_acquire_lock(&hw->blk[blk].es.prof_map_lock);
2011         map = ice_search_prof_id(hw, blk, prof_id);
2012         if (map) {
2013                 *hw_prof_id = map->prof_id;
2014                 status = ICE_SUCCESS;
2015         }
2016         ice_release_lock(&hw->blk[blk].es.prof_map_lock);
2017         return status;
2018 }
2019
2020 #define ICE_ACL_INVALID_SCEN    0x3f
2021
2022 /**
2023  * ice_flow_acl_is_prof_in_use - Verify if the profile is associated to any PF
2024  * @hw: pointer to the hardware structure
2025  * @prof: pointer to flow profile
2026  * @buf: destination buffer function writes partial extraction sequence to
2027  *
2028  * returns ICE_SUCCESS if no PF is associated to the given profile
2029  * returns ICE_ERR_IN_USE if at least one PF is associated to the given profile
2030  * returns other error code for real error
2031  */
2032 static enum ice_status
2033 ice_flow_acl_is_prof_in_use(struct ice_hw *hw, struct ice_flow_prof *prof,
2034                             struct ice_aqc_acl_prof_generic_frmt *buf)
2035 {
2036         enum ice_status status;
2037         u8 prof_id = 0;
2038
2039         status = ice_flow_get_hw_prof(hw, ICE_BLK_ACL, prof->id, &prof_id);
2040         if (status)
2041                 return status;
2042
2043         status = ice_query_acl_prof(hw, prof_id, buf, NULL);
2044         if (status)
2045                 return status;
2046
2047         /* If all PF's associated scenarios are all 0 or all
2048          * ICE_ACL_INVALID_SCEN (63) for the given profile then the latter has
2049          * not been configured yet.
2050          */
2051         if (buf->pf_scenario_num[0] == 0 && buf->pf_scenario_num[1] == 0 &&
2052             buf->pf_scenario_num[2] == 0 && buf->pf_scenario_num[3] == 0 &&
2053             buf->pf_scenario_num[4] == 0 && buf->pf_scenario_num[5] == 0 &&
2054             buf->pf_scenario_num[6] == 0 && buf->pf_scenario_num[7] == 0)
2055                 return ICE_SUCCESS;
2056
2057         if (buf->pf_scenario_num[0] == ICE_ACL_INVALID_SCEN &&
2058             buf->pf_scenario_num[1] == ICE_ACL_INVALID_SCEN &&
2059             buf->pf_scenario_num[2] == ICE_ACL_INVALID_SCEN &&
2060             buf->pf_scenario_num[3] == ICE_ACL_INVALID_SCEN &&
2061             buf->pf_scenario_num[4] == ICE_ACL_INVALID_SCEN &&
2062             buf->pf_scenario_num[5] == ICE_ACL_INVALID_SCEN &&
2063             buf->pf_scenario_num[6] == ICE_ACL_INVALID_SCEN &&
2064             buf->pf_scenario_num[7] == ICE_ACL_INVALID_SCEN)
2065                 return ICE_SUCCESS;
2066
2067         return ICE_ERR_IN_USE;
2068 }
2069
2070 /**
2071  * ice_flow_acl_free_act_cntr - Free the ACL rule's actions
2072  * @hw: pointer to the hardware structure
2073  * @acts: array of actions to be performed on a match
2074  * @acts_cnt: number of actions
2075  */
2076 static enum ice_status
2077 ice_flow_acl_free_act_cntr(struct ice_hw *hw, struct ice_flow_action *acts,
2078                            u8 acts_cnt)
2079 {
2080         int i;
2081
2082         for (i = 0; i < acts_cnt; i++) {
2083                 if (acts[i].type == ICE_FLOW_ACT_CNTR_PKT ||
2084                     acts[i].type == ICE_FLOW_ACT_CNTR_BYTES ||
2085                     acts[i].type == ICE_FLOW_ACT_CNTR_PKT_BYTES) {
2086                         struct ice_acl_cntrs cntrs = { 0 };
2087                         enum ice_status status;
2088
2089                         /* amount is unused in the dealloc path but the common
2090                          * parameter check routine wants a value set, as zero
2091                          * is invalid for the check. Just set it.
2092                          */
2093                         cntrs.amount = 1;
2094                         cntrs.bank = 0; /* Only bank0 for the moment */
2095                         cntrs.first_cntr =
2096                                         LE16_TO_CPU(acts[i].data.acl_act.value);
2097                         cntrs.last_cntr =
2098                                         LE16_TO_CPU(acts[i].data.acl_act.value);
2099
2100                         if (acts[i].type == ICE_FLOW_ACT_CNTR_PKT_BYTES)
2101                                 cntrs.type = ICE_AQC_ACL_CNT_TYPE_DUAL;
2102                         else
2103                                 cntrs.type = ICE_AQC_ACL_CNT_TYPE_SINGLE;
2104
2105                         status = ice_aq_dealloc_acl_cntrs(hw, &cntrs, NULL);
2106                         if (status)
2107                                 return status;
2108                 }
2109         }
2110         return ICE_SUCCESS;
2111 }
2112
2113 /**
2114  * ice_flow_acl_disassoc_scen - Disassociate the scenario from the profile
2115  * @hw: pointer to the hardware structure
2116  * @prof: pointer to flow profile
2117  *
2118  * Disassociate the scenario from the profile for the PF of the VSI.
2119  */
2120 static enum ice_status
2121 ice_flow_acl_disassoc_scen(struct ice_hw *hw, struct ice_flow_prof *prof)
2122 {
2123         struct ice_aqc_acl_prof_generic_frmt buf;
2124         enum ice_status status = ICE_SUCCESS;
2125         u8 prof_id = 0;
2126
2127         ice_memset(&buf, 0, sizeof(buf), ICE_NONDMA_MEM);
2128
2129         status = ice_flow_get_hw_prof(hw, ICE_BLK_ACL, prof->id, &prof_id);
2130         if (status)
2131                 return status;
2132
2133         status = ice_query_acl_prof(hw, prof_id, &buf, NULL);
2134         if (status)
2135                 return status;
2136
2137         /* Clear scenario for this PF */
2138         buf.pf_scenario_num[hw->pf_id] = ICE_ACL_INVALID_SCEN;
2139         status = ice_prgm_acl_prof_xtrct(hw, prof_id, &buf, NULL);
2140
2141         return status;
2142 }
2143
2144 /**
2145  * ice_flow_rem_entry_sync - Remove a flow entry
2146  * @hw: pointer to the HW struct
2147  * @blk: classification stage
2148  * @entry: flow entry to be removed
2149  */
2150 static enum ice_status
2151 ice_flow_rem_entry_sync(struct ice_hw *hw, enum ice_block blk,
2152                         struct ice_flow_entry *entry)
2153 {
2154         if (!entry)
2155                 return ICE_ERR_BAD_PTR;
2156
2157         if (blk == ICE_BLK_ACL) {
2158                 enum ice_status status;
2159
2160                 if (!entry->prof)
2161                         return ICE_ERR_BAD_PTR;
2162
2163                 status = ice_acl_rem_entry(hw, entry->prof->cfg.scen,
2164                                            entry->scen_entry_idx);
2165                 if (status)
2166                         return status;
2167
2168                 /* Checks if we need to release an ACL counter. */
2169                 if (entry->acts_cnt && entry->acts)
2170                         ice_flow_acl_free_act_cntr(hw, entry->acts,
2171                                                    entry->acts_cnt);
2172         }
2173
2174         LIST_DEL(&entry->l_entry);
2175
2176         ice_dealloc_flow_entry(hw, entry);
2177
2178         return ICE_SUCCESS;
2179 }
2180
2181 /**
2182  * ice_flow_add_prof_sync - Add a flow profile for packet segments and fields
2183  * @hw: pointer to the HW struct
2184  * @blk: classification stage
2185  * @dir: flow direction
2186  * @prof_id: unique ID to identify this flow profile
2187  * @segs: array of one or more packet segments that describe the flow
2188  * @segs_cnt: number of packet segments provided
2189  * @acts: array of default actions
2190  * @acts_cnt: number of default actions
2191  * @prof: stores the returned flow profile added
2192  *
2193  * Assumption: the caller has acquired the lock to the profile list
2194  */
2195 static enum ice_status
2196 ice_flow_add_prof_sync(struct ice_hw *hw, enum ice_block blk,
2197                        enum ice_flow_dir dir, u64 prof_id,
2198                        struct ice_flow_seg_info *segs, u8 segs_cnt,
2199                        struct ice_flow_action *acts, u8 acts_cnt,
2200                        struct ice_flow_prof **prof)
2201 {
2202         struct ice_flow_prof_params *params;
2203         enum ice_status status;
2204         u8 i;
2205
2206         if (!prof || (acts_cnt && !acts))
2207                 return ICE_ERR_BAD_PTR;
2208
2209         params = (struct ice_flow_prof_params *)ice_malloc(hw, sizeof(*params));
2210         if (!params)
2211                 return ICE_ERR_NO_MEMORY;
2212
2213         params->prof = (struct ice_flow_prof *)
2214                 ice_malloc(hw, sizeof(*params->prof));
2215         if (!params->prof) {
2216                 status = ICE_ERR_NO_MEMORY;
2217                 goto free_params;
2218         }
2219
2220         /* initialize extraction sequence to all invalid (0xff) */
2221         for (i = 0; i < ICE_MAX_FV_WORDS; i++) {
2222                 params->es[i].prot_id = ICE_PROT_INVALID;
2223                 params->es[i].off = ICE_FV_OFFSET_INVAL;
2224         }
2225
2226         params->blk = blk;
2227         params->prof->id = prof_id;
2228         params->prof->dir = dir;
2229         params->prof->segs_cnt = segs_cnt;
2230
2231         /* Make a copy of the segments that need to be persistent in the flow
2232          * profile instance
2233          */
2234         for (i = 0; i < segs_cnt; i++)
2235                 ice_memcpy(&params->prof->segs[i], &segs[i], sizeof(*segs),
2236                            ICE_NONDMA_TO_NONDMA);
2237
2238         /* Make a copy of the actions that need to be persistent in the flow
2239          * profile instance.
2240          */
2241         if (acts_cnt) {
2242                 params->prof->acts = (struct ice_flow_action *)
2243                         ice_memdup(hw, acts, acts_cnt * sizeof(*acts),
2244                                    ICE_NONDMA_TO_NONDMA);
2245
2246                 if (!params->prof->acts) {
2247                         status = ICE_ERR_NO_MEMORY;
2248                         goto out;
2249                 }
2250         }
2251
2252         status = ice_flow_proc_segs(hw, params);
2253         if (status) {
2254                 ice_debug(hw, ICE_DBG_FLOW, "Error processing a flow's packet segments\n");
2255                 goto out;
2256         }
2257
2258         /* Add a HW profile for this flow profile */
2259         status = ice_add_prof(hw, blk, prof_id, (u8 *)params->ptypes,
2260                               params->attr, params->attr_cnt, params->es,
2261                               params->mask, true);
2262         if (status) {
2263                 ice_debug(hw, ICE_DBG_FLOW, "Error adding a HW flow profile\n");
2264                 goto out;
2265         }
2266
2267         INIT_LIST_HEAD(&params->prof->entries);
2268         ice_init_lock(&params->prof->entries_lock);
2269         *prof = params->prof;
2270
2271 out:
2272         if (status) {
2273                 if (params->prof->acts)
2274                         ice_free(hw, params->prof->acts);
2275                 ice_free(hw, params->prof);
2276         }
2277 free_params:
2278         ice_free(hw, params);
2279
2280         return status;
2281 }
2282
2283 /**
2284  * ice_flow_rem_prof_sync - remove a flow profile
2285  * @hw: pointer to the hardware structure
2286  * @blk: classification stage
2287  * @prof: pointer to flow profile to remove
2288  *
2289  * Assumption: the caller has acquired the lock to the profile list
2290  */
2291 static enum ice_status
2292 ice_flow_rem_prof_sync(struct ice_hw *hw, enum ice_block blk,
2293                        struct ice_flow_prof *prof)
2294 {
2295         enum ice_status status;
2296
2297         /* Remove all remaining flow entries before removing the flow profile */
2298         if (!LIST_EMPTY(&prof->entries)) {
2299                 struct ice_flow_entry *e, *t;
2300
2301                 ice_acquire_lock(&prof->entries_lock);
2302
2303                 LIST_FOR_EACH_ENTRY_SAFE(e, t, &prof->entries, ice_flow_entry,
2304                                          l_entry) {
2305                         status = ice_flow_rem_entry_sync(hw, blk, e);
2306                         if (status)
2307                                 break;
2308                 }
2309
2310                 ice_release_lock(&prof->entries_lock);
2311         }
2312
2313         if (blk == ICE_BLK_ACL) {
2314                 struct ice_aqc_acl_profile_ranges query_rng_buf;
2315                 struct ice_aqc_acl_prof_generic_frmt buf;
2316                 u8 prof_id = 0;
2317
2318                 /* Disassociate the scenario from the profile for the PF */
2319                 status = ice_flow_acl_disassoc_scen(hw, prof);
2320                 if (status)
2321                         return status;
2322
2323                 /* Clear the range-checker if the profile ID is no longer
2324                  * used by any PF
2325                  */
2326                 status = ice_flow_acl_is_prof_in_use(hw, prof, &buf);
2327                 if (status && status != ICE_ERR_IN_USE) {
2328                         return status;
2329                 } else if (!status) {
2330                         /* Clear the range-checker value for profile ID */
2331                         ice_memset(&query_rng_buf, 0,
2332                                    sizeof(struct ice_aqc_acl_profile_ranges),
2333                                    ICE_NONDMA_MEM);
2334
2335                         status = ice_flow_get_hw_prof(hw, blk, prof->id,
2336                                                       &prof_id);
2337                         if (status)
2338                                 return status;
2339
2340                         status = ice_prog_acl_prof_ranges(hw, prof_id,
2341                                                           &query_rng_buf, NULL);
2342                         if (status)
2343                                 return status;
2344                 }
2345         }
2346
2347         /* Remove all hardware profiles associated with this flow profile */
2348         status = ice_rem_prof(hw, blk, prof->id);
2349         if (!status) {
2350                 LIST_DEL(&prof->l_entry);
2351                 ice_destroy_lock(&prof->entries_lock);
2352                 if (prof->acts)
2353                         ice_free(hw, prof->acts);
2354                 ice_free(hw, prof);
2355         }
2356
2357         return status;
2358 }
2359
2360 /**
2361  * ice_flow_acl_set_xtrct_seq_fld - Populate xtrct seq for single field
2362  * @buf: Destination buffer function writes partial xtrct sequence to
2363  * @info: Info about field
2364  */
2365 static void
2366 ice_flow_acl_set_xtrct_seq_fld(struct ice_aqc_acl_prof_generic_frmt *buf,
2367                                struct ice_flow_fld_info *info)
2368 {
2369         u16 dst, i;
2370         u8 src;
2371
2372         src = info->xtrct.idx * ICE_FLOW_FV_EXTRACT_SZ +
2373                 info->xtrct.disp / BITS_PER_BYTE;
2374         dst = info->entry.val;
2375         for (i = 0; i < info->entry.last; i++)
2376                 /* HW stores field vector words in LE, convert words back to BE
2377                  * so constructed entries will end up in network order
2378                  */
2379                 buf->byte_selection[dst++] = src++ ^ 1;
2380 }
2381
2382 /**
2383  * ice_flow_acl_set_xtrct_seq - Program ACL extraction sequence
2384  * @hw: pointer to the hardware structure
2385  * @prof: pointer to flow profile
2386  */
2387 static enum ice_status
2388 ice_flow_acl_set_xtrct_seq(struct ice_hw *hw, struct ice_flow_prof *prof)
2389 {
2390         struct ice_aqc_acl_prof_generic_frmt buf;
2391         struct ice_flow_fld_info *info;
2392         enum ice_status status;
2393         u8 prof_id = 0;
2394         u16 i;
2395
2396         ice_memset(&buf, 0, sizeof(buf), ICE_NONDMA_MEM);
2397
2398         status = ice_flow_get_hw_prof(hw, ICE_BLK_ACL, prof->id, &prof_id);
2399         if (status)
2400                 return status;
2401
2402         status = ice_flow_acl_is_prof_in_use(hw, prof, &buf);
2403         if (status && status != ICE_ERR_IN_USE)
2404                 return status;
2405
2406         if (!status) {
2407                 /* Program the profile dependent configuration. This is done
2408                  * only once regardless of the number of PFs using that profile
2409                  */
2410                 ice_memset(&buf, 0, sizeof(buf), ICE_NONDMA_MEM);
2411
2412                 for (i = 0; i < prof->segs_cnt; i++) {
2413                         struct ice_flow_seg_info *seg = &prof->segs[i];
2414                         u16 j;
2415
2416                         ice_for_each_set_bit(j, (ice_bitmap_t *)&seg->match,
2417                                              ICE_FLOW_FIELD_IDX_MAX) {
2418                                 info = &seg->fields[j];
2419
2420                                 if (info->type == ICE_FLOW_FLD_TYPE_RANGE)
2421                                         buf.word_selection[info->entry.val] =
2422                                                 info->xtrct.idx;
2423                                 else
2424                                         ice_flow_acl_set_xtrct_seq_fld(&buf,
2425                                                                        info);
2426                         }
2427
2428                         for (j = 0; j < seg->raws_cnt; j++) {
2429                                 info = &seg->raws[j].info;
2430                                 ice_flow_acl_set_xtrct_seq_fld(&buf, info);
2431                         }
2432                 }
2433
2434                 ice_memset(&buf.pf_scenario_num[0], ICE_ACL_INVALID_SCEN,
2435                            ICE_AQC_ACL_PROF_PF_SCEN_NUM_ELEMS,
2436                            ICE_NONDMA_MEM);
2437         }
2438
2439         /* Update the current PF */
2440         buf.pf_scenario_num[hw->pf_id] = (u8)prof->cfg.scen->id;
2441         status = ice_prgm_acl_prof_xtrct(hw, prof_id, &buf, NULL);
2442
2443         return status;
2444 }
2445
2446 /**
2447  * ice_flow_assoc_vsig_vsi - associate a VSI with VSIG
2448  * @hw: pointer to the hardware structure
2449  * @blk: classification stage
2450  * @vsi_handle: software VSI handle
2451  * @vsig: target VSI group
2452  *
2453  * Assumption: the caller has already verified that the VSI to
2454  * be added has the same characteristics as the VSIG and will
2455  * thereby have access to all resources added to that VSIG.
2456  */
2457 enum ice_status
2458 ice_flow_assoc_vsig_vsi(struct ice_hw *hw, enum ice_block blk, u16 vsi_handle,
2459                         u16 vsig)
2460 {
2461         enum ice_status status;
2462
2463         if (!ice_is_vsi_valid(hw, vsi_handle) || blk >= ICE_BLK_COUNT)
2464                 return ICE_ERR_PARAM;
2465
2466         ice_acquire_lock(&hw->fl_profs_locks[blk]);
2467         status = ice_add_vsi_flow(hw, blk, ice_get_hw_vsi_num(hw, vsi_handle),
2468                                   vsig);
2469         ice_release_lock(&hw->fl_profs_locks[blk]);
2470
2471         return status;
2472 }
2473
2474 /**
2475  * ice_flow_assoc_prof - associate a VSI with a flow profile
2476  * @hw: pointer to the hardware structure
2477  * @blk: classification stage
2478  * @prof: pointer to flow profile
2479  * @vsi_handle: software VSI handle
2480  *
2481  * Assumption: the caller has acquired the lock to the profile list
2482  * and the software VSI handle has been validated
2483  */
2484 enum ice_status
2485 ice_flow_assoc_prof(struct ice_hw *hw, enum ice_block blk,
2486                     struct ice_flow_prof *prof, u16 vsi_handle)
2487 {
2488         enum ice_status status = ICE_SUCCESS;
2489
2490         if (!ice_is_bit_set(prof->vsis, vsi_handle)) {
2491                 if (blk == ICE_BLK_ACL) {
2492                         status = ice_flow_acl_set_xtrct_seq(hw, prof);
2493                         if (status)
2494                                 return status;
2495                 }
2496                 status = ice_add_prof_id_flow(hw, blk,
2497                                               ice_get_hw_vsi_num(hw,
2498                                                                  vsi_handle),
2499                                               prof->id);
2500                 if (!status)
2501                         ice_set_bit(vsi_handle, prof->vsis);
2502                 else
2503                         ice_debug(hw, ICE_DBG_FLOW, "HW profile add failed, %d\n",
2504                                   status);
2505         }
2506
2507         return status;
2508 }
2509
2510 /**
2511  * ice_flow_disassoc_prof - disassociate a VSI from a flow profile
2512  * @hw: pointer to the hardware structure
2513  * @blk: classification stage
2514  * @prof: pointer to flow profile
2515  * @vsi_handle: software VSI handle
2516  *
2517  * Assumption: the caller has acquired the lock to the profile list
2518  * and the software VSI handle has been validated
2519  */
2520 static enum ice_status
2521 ice_flow_disassoc_prof(struct ice_hw *hw, enum ice_block blk,
2522                        struct ice_flow_prof *prof, u16 vsi_handle)
2523 {
2524         enum ice_status status = ICE_SUCCESS;
2525
2526         if (ice_is_bit_set(prof->vsis, vsi_handle)) {
2527                 status = ice_rem_prof_id_flow(hw, blk,
2528                                               ice_get_hw_vsi_num(hw,
2529                                                                  vsi_handle),
2530                                               prof->id);
2531                 if (!status)
2532                         ice_clear_bit(vsi_handle, prof->vsis);
2533                 else
2534                         ice_debug(hw, ICE_DBG_FLOW, "HW profile remove failed, %d\n",
2535                                   status);
2536         }
2537
2538         return status;
2539 }
2540
2541 #define FLAG_GTP_EH_PDU_LINK    BIT_ULL(13)
2542 #define FLAG_GTP_EH_PDU         BIT_ULL(14)
2543
2544 #define FLAG_GTPU_MSK   \
2545         (FLAG_GTP_EH_PDU | FLAG_GTP_EH_PDU_LINK)
2546 #define FLAG_GTPU_UP    \
2547         (FLAG_GTP_EH_PDU | FLAG_GTP_EH_PDU_LINK)
2548 #define FLAG_GTPU_DW    \
2549         (FLAG_GTP_EH_PDU)
2550 /**
2551  * ice_flow_set_hw_prof - Set HW flow profile based on the parsed profile info
2552  * @hw: pointer to the HW struct
2553  * @dest_vsi_handle: dest VSI handle
2554  * @fdir_vsi_handle: fdir programming VSI handle
2555  * @prof: stores parsed profile info from raw flow
2556  * @blk: classification stage
2557  */
2558 enum ice_status
2559 ice_flow_set_hw_prof(struct ice_hw *hw, u16 dest_vsi_handle,
2560                      u16 fdir_vsi_handle, struct ice_parser_profile *prof,
2561                      enum ice_block blk)
2562 {
2563         int id = ice_find_first_bit(prof->ptypes, UINT16_MAX);
2564         struct ice_flow_prof_params *params;
2565         u8 fv_words = hw->blk[blk].es.fvw;
2566         enum ice_status status;
2567         u16 vsi_num;
2568         int i, idx;
2569
2570         params = (struct ice_flow_prof_params *)ice_malloc(hw, sizeof(*params));
2571         if (!params)
2572                 return ICE_ERR_NO_MEMORY;
2573
2574         for (i = 0; i < ICE_MAX_FV_WORDS; i++) {
2575                 params->es[i].prot_id = ICE_PROT_INVALID;
2576                 params->es[i].off = ICE_FV_OFFSET_INVAL;
2577         }
2578
2579         for (i = 0; i < prof->fv_num; i++) {
2580                 if (hw->blk[blk].es.reverse)
2581                         idx = fv_words - i - 1;
2582                 else
2583                         idx = i;
2584                 params->es[idx].prot_id = prof->fv[i].proto_id;
2585                 params->es[idx].off = prof->fv[i].offset;
2586                 params->mask[idx] = CPU_TO_BE16(prof->fv[i].msk);
2587         }
2588
2589         switch (prof->flags) {
2590         case FLAG_GTPU_DW:
2591                 params->attr = ice_attr_gtpu_down;
2592                 params->attr_cnt = ARRAY_SIZE(ice_attr_gtpu_down);
2593                 break;
2594         case FLAG_GTPU_UP:
2595                 params->attr = ice_attr_gtpu_up;
2596                 params->attr_cnt = ARRAY_SIZE(ice_attr_gtpu_up);
2597                 break;
2598         default:
2599                 if (prof->flags_msk & FLAG_GTPU_MSK) {
2600                         params->attr = ice_attr_gtpu_session;
2601                         params->attr_cnt = ARRAY_SIZE(ice_attr_gtpu_session);
2602                 }
2603                 break;
2604         }
2605
2606         status = ice_add_prof(hw, blk, id, (u8 *)prof->ptypes,
2607                               params->attr, params->attr_cnt,
2608                               params->es, params->mask, false);
2609         if (status)
2610                 goto free_params;
2611
2612         status = ice_flow_assoc_hw_prof(hw, blk, dest_vsi_handle,
2613                                         fdir_vsi_handle, id);
2614         if (status)
2615                 goto free_params;
2616
2617         return ICE_SUCCESS;
2618
2619 free_params:
2620         ice_free(hw, params);
2621
2622         return status;
2623 }
2624
2625 /**
2626  * ice_flow_add_prof - Add a flow profile for packet segments and matched fields
2627  * @hw: pointer to the HW struct
2628  * @blk: classification stage
2629  * @dir: flow direction
2630  * @prof_id: unique ID to identify this flow profile
2631  * @segs: array of one or more packet segments that describe the flow
2632  * @segs_cnt: number of packet segments provided
2633  * @acts: array of default actions
2634  * @acts_cnt: number of default actions
2635  * @prof: stores the returned flow profile added
2636  */
2637 enum ice_status
2638 ice_flow_add_prof(struct ice_hw *hw, enum ice_block blk, enum ice_flow_dir dir,
2639                   u64 prof_id, struct ice_flow_seg_info *segs, u8 segs_cnt,
2640                   struct ice_flow_action *acts, u8 acts_cnt,
2641                   struct ice_flow_prof **prof)
2642 {
2643         enum ice_status status;
2644
2645         if (segs_cnt > ICE_FLOW_SEG_MAX)
2646                 return ICE_ERR_MAX_LIMIT;
2647
2648         if (!segs_cnt)
2649                 return ICE_ERR_PARAM;
2650
2651         if (!segs)
2652                 return ICE_ERR_BAD_PTR;
2653
2654         status = ice_flow_val_hdrs(segs, segs_cnt);
2655         if (status)
2656                 return status;
2657
2658         ice_acquire_lock(&hw->fl_profs_locks[blk]);
2659
2660         status = ice_flow_add_prof_sync(hw, blk, dir, prof_id, segs, segs_cnt,
2661                                         acts, acts_cnt, prof);
2662         if (!status)
2663                 LIST_ADD(&(*prof)->l_entry, &hw->fl_profs[blk]);
2664
2665         ice_release_lock(&hw->fl_profs_locks[blk]);
2666
2667         return status;
2668 }
2669
2670 /**
2671  * ice_flow_rem_prof - Remove a flow profile and all entries associated with it
2672  * @hw: pointer to the HW struct
2673  * @blk: the block for which the flow profile is to be removed
2674  * @prof_id: unique ID of the flow profile to be removed
2675  */
2676 enum ice_status
2677 ice_flow_rem_prof(struct ice_hw *hw, enum ice_block blk, u64 prof_id)
2678 {
2679         struct ice_flow_prof *prof;
2680         enum ice_status status;
2681
2682         ice_acquire_lock(&hw->fl_profs_locks[blk]);
2683
2684         prof = ice_flow_find_prof_id(hw, blk, prof_id);
2685         if (!prof) {
2686                 status = ICE_ERR_DOES_NOT_EXIST;
2687                 goto out;
2688         }
2689
2690         /* prof becomes invalid after the call */
2691         status = ice_flow_rem_prof_sync(hw, blk, prof);
2692
2693 out:
2694         ice_release_lock(&hw->fl_profs_locks[blk]);
2695
2696         return status;
2697 }
2698
2699 /**
2700  * ice_flow_find_entry - look for a flow entry using its unique ID
2701  * @hw: pointer to the HW struct
2702  * @blk: classification stage
2703  * @entry_id: unique ID to identify this flow entry
2704  *
2705  * This function looks for the flow entry with the specified unique ID in all
2706  * flow profiles of the specified classification stage. If the entry is found,
2707  * and it returns the handle to the flow entry. Otherwise, it returns
2708  * ICE_FLOW_ENTRY_ID_INVAL.
2709  */
2710 u64 ice_flow_find_entry(struct ice_hw *hw, enum ice_block blk, u64 entry_id)
2711 {
2712         struct ice_flow_entry *found = NULL;
2713         struct ice_flow_prof *p;
2714
2715         ice_acquire_lock(&hw->fl_profs_locks[blk]);
2716
2717         LIST_FOR_EACH_ENTRY(p, &hw->fl_profs[blk], ice_flow_prof, l_entry) {
2718                 struct ice_flow_entry *e;
2719
2720                 ice_acquire_lock(&p->entries_lock);
2721                 LIST_FOR_EACH_ENTRY(e, &p->entries, ice_flow_entry, l_entry)
2722                         if (e->id == entry_id) {
2723                                 found = e;
2724                                 break;
2725                         }
2726                 ice_release_lock(&p->entries_lock);
2727
2728                 if (found)
2729                         break;
2730         }
2731
2732         ice_release_lock(&hw->fl_profs_locks[blk]);
2733
2734         return found ? ICE_FLOW_ENTRY_HNDL(found) : ICE_FLOW_ENTRY_HANDLE_INVAL;
2735 }
2736
2737 /**
2738  * ice_flow_acl_check_actions - Checks the ACL rule's actions
2739  * @hw: pointer to the hardware structure
2740  * @acts: array of actions to be performed on a match
2741  * @acts_cnt: number of actions
2742  * @cnt_alloc: indicates if an ACL counter has been allocated.
2743  */
2744 static enum ice_status
2745 ice_flow_acl_check_actions(struct ice_hw *hw, struct ice_flow_action *acts,
2746                            u8 acts_cnt, bool *cnt_alloc)
2747 {
2748         ice_declare_bitmap(dup_check, ICE_AQC_TBL_MAX_ACTION_PAIRS * 2);
2749         int i;
2750
2751         ice_zero_bitmap(dup_check, ICE_AQC_TBL_MAX_ACTION_PAIRS * 2);
2752         *cnt_alloc = false;
2753
2754         if (acts_cnt > ICE_FLOW_ACL_MAX_NUM_ACT)
2755                 return ICE_ERR_OUT_OF_RANGE;
2756
2757         for (i = 0; i < acts_cnt; i++) {
2758                 if (acts[i].type != ICE_FLOW_ACT_NOP &&
2759                     acts[i].type != ICE_FLOW_ACT_DROP &&
2760                     acts[i].type != ICE_FLOW_ACT_CNTR_PKT &&
2761                     acts[i].type != ICE_FLOW_ACT_FWD_QUEUE)
2762                         return ICE_ERR_CFG;
2763
2764                 /* If the caller want to add two actions of the same type, then
2765                  * it is considered invalid configuration.
2766                  */
2767                 if (ice_test_and_set_bit(acts[i].type, dup_check))
2768                         return ICE_ERR_PARAM;
2769         }
2770
2771         /* Checks if ACL counters are needed. */
2772         for (i = 0; i < acts_cnt; i++) {
2773                 if (acts[i].type == ICE_FLOW_ACT_CNTR_PKT ||
2774                     acts[i].type == ICE_FLOW_ACT_CNTR_BYTES ||
2775                     acts[i].type == ICE_FLOW_ACT_CNTR_PKT_BYTES) {
2776                         struct ice_acl_cntrs cntrs = { 0 };
2777                         enum ice_status status;
2778
2779                         cntrs.amount = 1;
2780                         cntrs.bank = 0; /* Only bank0 for the moment */
2781
2782                         if (acts[i].type == ICE_FLOW_ACT_CNTR_PKT_BYTES)
2783                                 cntrs.type = ICE_AQC_ACL_CNT_TYPE_DUAL;
2784                         else
2785                                 cntrs.type = ICE_AQC_ACL_CNT_TYPE_SINGLE;
2786
2787                         status = ice_aq_alloc_acl_cntrs(hw, &cntrs, NULL);
2788                         if (status)
2789                                 return status;
2790                         /* Counter index within the bank */
2791                         acts[i].data.acl_act.value =
2792                                                 CPU_TO_LE16(cntrs.first_cntr);
2793                         *cnt_alloc = true;
2794                 }
2795         }
2796
2797         return ICE_SUCCESS;
2798 }
2799
2800 /**
2801  * ice_flow_acl_frmt_entry_range - Format an ACL range checker for a given field
2802  * @fld: number of the given field
2803  * @info: info about field
2804  * @range_buf: range checker configuration buffer
2805  * @data: pointer to a data buffer containing flow entry's match values/masks
2806  * @range: Input/output param indicating which range checkers are being used
2807  */
2808 static void
2809 ice_flow_acl_frmt_entry_range(u16 fld, struct ice_flow_fld_info *info,
2810                               struct ice_aqc_acl_profile_ranges *range_buf,
2811                               u8 *data, u8 *range)
2812 {
2813         u16 new_mask;
2814
2815         /* If not specified, default mask is all bits in field */
2816         new_mask = (info->src.mask == ICE_FLOW_FLD_OFF_INVAL ?
2817                     BIT(ice_flds_info[fld].size) - 1 :
2818                     (*(u16 *)(data + info->src.mask))) << info->xtrct.disp;
2819
2820         /* If the mask is 0, then we don't need to worry about this input
2821          * range checker value.
2822          */
2823         if (new_mask) {
2824                 u16 new_high =
2825                         (*(u16 *)(data + info->src.last)) << info->xtrct.disp;
2826                 u16 new_low =
2827                         (*(u16 *)(data + info->src.val)) << info->xtrct.disp;
2828                 u8 range_idx = info->entry.val;
2829
2830                 range_buf->checker_cfg[range_idx].low_boundary =
2831                         CPU_TO_BE16(new_low);
2832                 range_buf->checker_cfg[range_idx].high_boundary =
2833                         CPU_TO_BE16(new_high);
2834                 range_buf->checker_cfg[range_idx].mask = CPU_TO_BE16(new_mask);
2835
2836                 /* Indicate which range checker is being used */
2837                 *range |= BIT(range_idx);
2838         }
2839 }
2840
2841 /**
2842  * ice_flow_acl_frmt_entry_fld - Partially format ACL entry for a given field
2843  * @fld: number of the given field
2844  * @info: info about the field
2845  * @buf: buffer containing the entry
2846  * @dontcare: buffer containing don't care mask for entry
2847  * @data: pointer to a data buffer containing flow entry's match values/masks
2848  */
2849 static void
2850 ice_flow_acl_frmt_entry_fld(u16 fld, struct ice_flow_fld_info *info, u8 *buf,
2851                             u8 *dontcare, u8 *data)
2852 {
2853         u16 dst, src, mask, k, end_disp, tmp_s = 0, tmp_m = 0;
2854         bool use_mask = false;
2855         u8 disp;
2856
2857         src = info->src.val;
2858         mask = info->src.mask;
2859         dst = info->entry.val - ICE_AQC_ACL_PROF_BYTE_SEL_START_IDX;
2860         disp = info->xtrct.disp % BITS_PER_BYTE;
2861
2862         if (mask != ICE_FLOW_FLD_OFF_INVAL)
2863                 use_mask = true;
2864
2865         for (k = 0; k < info->entry.last; k++, dst++) {
2866                 /* Add overflow bits from previous byte */
2867                 buf[dst] = (tmp_s & 0xff00) >> 8;
2868
2869                 /* If mask is not valid, tmp_m is always zero, so just setting
2870                  * dontcare to 0 (no masked bits). If mask is valid, pulls in
2871                  * overflow bits of mask from prev byte
2872                  */
2873                 dontcare[dst] = (tmp_m & 0xff00) >> 8;
2874
2875                 /* If there is displacement, last byte will only contain
2876                  * displaced data, but there is no more data to read from user
2877                  * buffer, so skip so as not to potentially read beyond end of
2878                  * user buffer
2879                  */
2880                 if (!disp || k < info->entry.last - 1) {
2881                         /* Store shifted data to use in next byte */
2882                         tmp_s = data[src++] << disp;
2883
2884                         /* Add current (shifted) byte */
2885                         buf[dst] |= tmp_s & 0xff;
2886
2887                         /* Handle mask if valid */
2888                         if (use_mask) {
2889                                 tmp_m = (~data[mask++] & 0xff) << disp;
2890                                 dontcare[dst] |= tmp_m & 0xff;
2891                         }
2892                 }
2893         }
2894
2895         /* Fill in don't care bits at beginning of field */
2896         if (disp) {
2897                 dst = info->entry.val - ICE_AQC_ACL_PROF_BYTE_SEL_START_IDX;
2898                 for (k = 0; k < disp; k++)
2899                         dontcare[dst] |= BIT(k);
2900         }
2901
2902         end_disp = (disp + ice_flds_info[fld].size) % BITS_PER_BYTE;
2903
2904         /* Fill in don't care bits at end of field */
2905         if (end_disp) {
2906                 dst = info->entry.val - ICE_AQC_ACL_PROF_BYTE_SEL_START_IDX +
2907                       info->entry.last - 1;
2908                 for (k = end_disp; k < BITS_PER_BYTE; k++)
2909                         dontcare[dst] |= BIT(k);
2910         }
2911 }
2912
2913 /**
2914  * ice_flow_acl_frmt_entry - Format ACL entry
2915  * @hw: pointer to the hardware structure
2916  * @prof: pointer to flow profile
2917  * @e: pointer to the flow entry
2918  * @data: pointer to a data buffer containing flow entry's match values/masks
2919  * @acts: array of actions to be performed on a match
2920  * @acts_cnt: number of actions
2921  *
2922  * Formats the key (and key_inverse) to be matched from the data passed in,
2923  * along with data from the flow profile. This key/key_inverse pair makes up
2924  * the 'entry' for an ACL flow entry.
2925  */
2926 static enum ice_status
2927 ice_flow_acl_frmt_entry(struct ice_hw *hw, struct ice_flow_prof *prof,
2928                         struct ice_flow_entry *e, u8 *data,
2929                         struct ice_flow_action *acts, u8 acts_cnt)
2930 {
2931         u8 *buf = NULL, *dontcare = NULL, *key = NULL, range = 0, dir_flag_msk;
2932         struct ice_aqc_acl_profile_ranges *range_buf = NULL;
2933         enum ice_status status;
2934         bool cnt_alloc;
2935         u8 prof_id = 0;
2936         u16 i, buf_sz;
2937
2938         status = ice_flow_get_hw_prof(hw, ICE_BLK_ACL, prof->id, &prof_id);
2939         if (status)
2940                 return status;
2941
2942         /* Format the result action */
2943
2944         status = ice_flow_acl_check_actions(hw, acts, acts_cnt, &cnt_alloc);
2945         if (status)
2946                 return status;
2947
2948         status = ICE_ERR_NO_MEMORY;
2949
2950         e->acts = (struct ice_flow_action *)
2951                 ice_memdup(hw, acts, acts_cnt * sizeof(*acts),
2952                            ICE_NONDMA_TO_NONDMA);
2953         if (!e->acts)
2954                 goto out;
2955
2956         e->acts_cnt = acts_cnt;
2957
2958         /* Format the matching data */
2959         buf_sz = prof->cfg.scen->width;
2960         buf = (u8 *)ice_malloc(hw, buf_sz);
2961         if (!buf)
2962                 goto out;
2963
2964         dontcare = (u8 *)ice_malloc(hw, buf_sz);
2965         if (!dontcare)
2966                 goto out;
2967
2968         /* 'key' buffer will store both key and key_inverse, so must be twice
2969          * size of buf
2970          */
2971         key = (u8 *)ice_malloc(hw, buf_sz * 2);
2972         if (!key)
2973                 goto out;
2974
2975         range_buf = (struct ice_aqc_acl_profile_ranges *)
2976                 ice_malloc(hw, sizeof(struct ice_aqc_acl_profile_ranges));
2977         if (!range_buf)
2978                 goto out;
2979
2980         /* Set don't care mask to all 1's to start, will zero out used bytes */
2981         ice_memset(dontcare, 0xff, buf_sz, ICE_NONDMA_MEM);
2982
2983         for (i = 0; i < prof->segs_cnt; i++) {
2984                 struct ice_flow_seg_info *seg = &prof->segs[i];
2985                 u8 j;
2986
2987                 ice_for_each_set_bit(j, (ice_bitmap_t *)&seg->match,
2988                                      ICE_FLOW_FIELD_IDX_MAX) {
2989                         struct ice_flow_fld_info *info = &seg->fields[j];
2990
2991                         if (info->type == ICE_FLOW_FLD_TYPE_RANGE)
2992                                 ice_flow_acl_frmt_entry_range(j, info,
2993                                                               range_buf, data,
2994                                                               &range);
2995                         else
2996                                 ice_flow_acl_frmt_entry_fld(j, info, buf,
2997                                                             dontcare, data);
2998                 }
2999
3000                 for (j = 0; j < seg->raws_cnt; j++) {
3001                         struct ice_flow_fld_info *info = &seg->raws[j].info;
3002                         u16 dst, src, mask, k;
3003                         bool use_mask = false;
3004
3005                         src = info->src.val;
3006                         dst = info->entry.val -
3007                                         ICE_AQC_ACL_PROF_BYTE_SEL_START_IDX;
3008                         mask = info->src.mask;
3009
3010                         if (mask != ICE_FLOW_FLD_OFF_INVAL)
3011                                 use_mask = true;
3012
3013                         for (k = 0; k < info->entry.last; k++, dst++) {
3014                                 buf[dst] = data[src++];
3015                                 if (use_mask)
3016                                         dontcare[dst] = ~data[mask++];
3017                                 else
3018                                         dontcare[dst] = 0;
3019                         }
3020                 }
3021         }
3022
3023         buf[prof->cfg.scen->pid_idx] = (u8)prof_id;
3024         dontcare[prof->cfg.scen->pid_idx] = 0;
3025
3026         /* Format the buffer for direction flags */
3027         dir_flag_msk = BIT(ICE_FLG_PKT_DIR);
3028
3029         if (prof->dir == ICE_FLOW_RX)
3030                 buf[prof->cfg.scen->pkt_dir_idx] = dir_flag_msk;
3031
3032         if (range) {
3033                 buf[prof->cfg.scen->rng_chk_idx] = range;
3034                 /* Mark any unused range checkers as don't care */
3035                 dontcare[prof->cfg.scen->rng_chk_idx] = ~range;
3036                 e->range_buf = range_buf;
3037         } else {
3038                 ice_free(hw, range_buf);
3039         }
3040
3041         status = ice_set_key(key, buf_sz * 2, buf, NULL, dontcare, NULL, 0,
3042                              buf_sz);
3043         if (status)
3044                 goto out;
3045
3046         e->entry = key;
3047         e->entry_sz = buf_sz * 2;
3048
3049 out:
3050         if (buf)
3051                 ice_free(hw, buf);
3052
3053         if (dontcare)
3054                 ice_free(hw, dontcare);
3055
3056         if (status && key)
3057                 ice_free(hw, key);
3058
3059         if (status && range_buf) {
3060                 ice_free(hw, range_buf);
3061                 e->range_buf = NULL;
3062         }
3063
3064         if (status && e->acts) {
3065                 ice_free(hw, e->acts);
3066                 e->acts = NULL;
3067                 e->acts_cnt = 0;
3068         }
3069
3070         if (status && cnt_alloc)
3071                 ice_flow_acl_free_act_cntr(hw, acts, acts_cnt);
3072
3073         return status;
3074 }
3075
3076 /**
3077  * ice_flow_acl_find_scen_entry_cond - Find an ACL scenario entry that matches
3078  *                                     the compared data.
3079  * @prof: pointer to flow profile
3080  * @e: pointer to the comparing flow entry
3081  * @do_chg_action: decide if we want to change the ACL action
3082  * @do_add_entry: decide if we want to add the new ACL entry
3083  * @do_rem_entry: decide if we want to remove the current ACL entry
3084  *
3085  * Find an ACL scenario entry that matches the compared data. In the same time,
3086  * this function also figure out:
3087  * a/ If we want to change the ACL action
3088  * b/ If we want to add the new ACL entry
3089  * c/ If we want to remove the current ACL entry
3090  */
3091 static struct ice_flow_entry *
3092 ice_flow_acl_find_scen_entry_cond(struct ice_flow_prof *prof,
3093                                   struct ice_flow_entry *e, bool *do_chg_action,
3094                                   bool *do_add_entry, bool *do_rem_entry)
3095 {
3096         struct ice_flow_entry *p, *return_entry = NULL;
3097         u8 i, j;
3098
3099         /* Check if:
3100          * a/ There exists an entry with same matching data, but different
3101          *    priority, then we remove this existing ACL entry. Then, we
3102          *    will add the new entry to the ACL scenario.
3103          * b/ There exists an entry with same matching data, priority, and
3104          *    result action, then we do nothing
3105          * c/ There exists an entry with same matching data, priority, but
3106          *    different, action, then do only change the action's entry.
3107          * d/ Else, we add this new entry to the ACL scenario.
3108          */
3109         *do_chg_action = false;
3110         *do_add_entry = true;
3111         *do_rem_entry = false;
3112         LIST_FOR_EACH_ENTRY(p, &prof->entries, ice_flow_entry, l_entry) {
3113                 if (memcmp(p->entry, e->entry, p->entry_sz))
3114                         continue;
3115
3116                 /* From this point, we have the same matching_data. */
3117                 *do_add_entry = false;
3118                 return_entry = p;
3119
3120                 if (p->priority != e->priority) {
3121                         /* matching data && !priority */
3122                         *do_add_entry = true;
3123                         *do_rem_entry = true;
3124                         break;
3125                 }
3126
3127                 /* From this point, we will have matching_data && priority */
3128                 if (p->acts_cnt != e->acts_cnt)
3129                         *do_chg_action = true;
3130                 for (i = 0; i < p->acts_cnt; i++) {
3131                         bool found_not_match = false;
3132
3133                         for (j = 0; j < e->acts_cnt; j++)
3134                                 if (memcmp(&p->acts[i], &e->acts[j],
3135                                            sizeof(struct ice_flow_action))) {
3136                                         found_not_match = true;
3137                                         break;
3138                                 }
3139
3140                         if (found_not_match) {
3141                                 *do_chg_action = true;
3142                                 break;
3143                         }
3144                 }
3145
3146                 /* (do_chg_action = true) means :
3147                  *    matching_data && priority && !result_action
3148                  * (do_chg_action = false) means :
3149                  *    matching_data && priority && result_action
3150                  */
3151                 break;
3152         }
3153
3154         return return_entry;
3155 }
3156
3157 /**
3158  * ice_flow_acl_convert_to_acl_prio - Convert to ACL priority
3159  * @p: flow priority
3160  */
3161 static enum ice_acl_entry_prio
3162 ice_flow_acl_convert_to_acl_prio(enum ice_flow_priority p)
3163 {
3164         enum ice_acl_entry_prio acl_prio;
3165
3166         switch (p) {
3167         case ICE_FLOW_PRIO_LOW:
3168                 acl_prio = ICE_ACL_PRIO_LOW;
3169                 break;
3170         case ICE_FLOW_PRIO_NORMAL:
3171                 acl_prio = ICE_ACL_PRIO_NORMAL;
3172                 break;
3173         case ICE_FLOW_PRIO_HIGH:
3174                 acl_prio = ICE_ACL_PRIO_HIGH;
3175                 break;
3176         default:
3177                 acl_prio = ICE_ACL_PRIO_NORMAL;
3178                 break;
3179         }
3180
3181         return acl_prio;
3182 }
3183
3184 /**
3185  * ice_flow_acl_union_rng_chk - Perform union operation between two
3186  *                              range-range checker buffers
3187  * @dst_buf: pointer to destination range checker buffer
3188  * @src_buf: pointer to source range checker buffer
3189  *
3190  * For this function, we do the union between dst_buf and src_buf
3191  * range checker buffer, and we will save the result back to dst_buf
3192  */
3193 static enum ice_status
3194 ice_flow_acl_union_rng_chk(struct ice_aqc_acl_profile_ranges *dst_buf,
3195                            struct ice_aqc_acl_profile_ranges *src_buf)
3196 {
3197         u8 i, j;
3198
3199         if (!dst_buf || !src_buf)
3200                 return ICE_ERR_BAD_PTR;
3201
3202         for (i = 0; i < ICE_AQC_ACL_PROF_RANGES_NUM_CFG; i++) {
3203                 struct ice_acl_rng_data *cfg_data = NULL, *in_data;
3204                 bool will_populate = false;
3205
3206                 in_data = &src_buf->checker_cfg[i];
3207
3208                 if (!in_data->mask)
3209                         break;
3210
3211                 for (j = 0; j < ICE_AQC_ACL_PROF_RANGES_NUM_CFG; j++) {
3212                         cfg_data = &dst_buf->checker_cfg[j];
3213
3214                         if (!cfg_data->mask ||
3215                             !memcmp(cfg_data, in_data,
3216                                     sizeof(struct ice_acl_rng_data))) {
3217                                 will_populate = true;
3218                                 break;
3219                         }
3220                 }
3221
3222                 if (will_populate) {
3223                         ice_memcpy(cfg_data, in_data,
3224                                    sizeof(struct ice_acl_rng_data),
3225                                    ICE_NONDMA_TO_NONDMA);
3226                 } else {
3227                         /* No available slot left to program range checker */
3228                         return ICE_ERR_MAX_LIMIT;
3229                 }
3230         }
3231
3232         return ICE_SUCCESS;
3233 }
3234
3235 /**
3236  * ice_flow_acl_add_scen_entry_sync - Add entry to ACL scenario sync
3237  * @hw: pointer to the hardware structure
3238  * @prof: pointer to flow profile
3239  * @entry: double pointer to the flow entry
3240  *
3241  * For this function, we will look at the current added entries in the
3242  * corresponding ACL scenario. Then, we will perform matching logic to
3243  * see if we want to add/modify/do nothing with this new entry.
3244  */
3245 static enum ice_status
3246 ice_flow_acl_add_scen_entry_sync(struct ice_hw *hw, struct ice_flow_prof *prof,
3247                                  struct ice_flow_entry **entry)
3248 {
3249         bool do_add_entry, do_rem_entry, do_chg_action, do_chg_rng_chk;
3250         struct ice_aqc_acl_profile_ranges query_rng_buf, cfg_rng_buf;
3251         struct ice_acl_act_entry *acts = NULL;
3252         struct ice_flow_entry *exist;
3253         enum ice_status status = ICE_SUCCESS;
3254         struct ice_flow_entry *e;
3255         u8 i;
3256
3257         if (!entry || !(*entry) || !prof)
3258                 return ICE_ERR_BAD_PTR;
3259
3260         e = *entry;
3261
3262         do_chg_rng_chk = false;
3263         if (e->range_buf) {
3264                 u8 prof_id = 0;
3265
3266                 status = ice_flow_get_hw_prof(hw, ICE_BLK_ACL, prof->id,
3267                                               &prof_id);
3268                 if (status)
3269                         return status;
3270
3271                 /* Query the current range-checker value in FW */
3272                 status = ice_query_acl_prof_ranges(hw, prof_id, &query_rng_buf,
3273                                                    NULL);
3274                 if (status)
3275                         return status;
3276                 ice_memcpy(&cfg_rng_buf, &query_rng_buf,
3277                            sizeof(struct ice_aqc_acl_profile_ranges),
3278                            ICE_NONDMA_TO_NONDMA);
3279
3280                 /* Generate the new range-checker value */
3281                 status = ice_flow_acl_union_rng_chk(&cfg_rng_buf, e->range_buf);
3282                 if (status)
3283                         return status;
3284
3285                 /* Reconfigure the range check if the buffer is changed. */
3286                 do_chg_rng_chk = false;
3287                 if (memcmp(&query_rng_buf, &cfg_rng_buf,
3288                            sizeof(struct ice_aqc_acl_profile_ranges))) {
3289                         status = ice_prog_acl_prof_ranges(hw, prof_id,
3290                                                           &cfg_rng_buf, NULL);
3291                         if (status)
3292                                 return status;
3293
3294                         do_chg_rng_chk = true;
3295                 }
3296         }
3297
3298         /* Figure out if we want to (change the ACL action) and/or
3299          * (Add the new ACL entry) and/or (Remove the current ACL entry)
3300          */
3301         exist = ice_flow_acl_find_scen_entry_cond(prof, e, &do_chg_action,
3302                                                   &do_add_entry, &do_rem_entry);
3303         if (do_rem_entry) {
3304                 status = ice_flow_rem_entry_sync(hw, ICE_BLK_ACL, exist);
3305                 if (status)
3306                         return status;
3307         }
3308
3309         /* Prepare the result action buffer */
3310         acts = (struct ice_acl_act_entry *)
3311                 ice_calloc(hw, e->entry_sz, sizeof(struct ice_acl_act_entry));
3312         if (!acts)
3313                 return ICE_ERR_NO_MEMORY;
3314
3315         for (i = 0; i < e->acts_cnt; i++)
3316                 ice_memcpy(&acts[i], &e->acts[i].data.acl_act,
3317                            sizeof(struct ice_acl_act_entry),
3318                            ICE_NONDMA_TO_NONDMA);
3319
3320         if (do_add_entry) {
3321                 enum ice_acl_entry_prio prio;
3322                 u8 *keys, *inverts;
3323                 u16 entry_idx;
3324
3325                 keys = (u8 *)e->entry;
3326                 inverts = keys + (e->entry_sz / 2);
3327                 prio = ice_flow_acl_convert_to_acl_prio(e->priority);
3328
3329                 status = ice_acl_add_entry(hw, prof->cfg.scen, prio, keys,
3330                                            inverts, acts, e->acts_cnt,
3331                                            &entry_idx);
3332                 if (status)
3333                         goto out;
3334
3335                 e->scen_entry_idx = entry_idx;
3336                 LIST_ADD(&e->l_entry, &prof->entries);
3337         } else {
3338                 if (do_chg_action) {
3339                         /* For the action memory info, update the SW's copy of
3340                          * exist entry with e's action memory info
3341                          */
3342                         ice_free(hw, exist->acts);
3343                         exist->acts_cnt = e->acts_cnt;
3344                         exist->acts = (struct ice_flow_action *)
3345                                 ice_calloc(hw, exist->acts_cnt,
3346                                            sizeof(struct ice_flow_action));
3347                         if (!exist->acts) {
3348                                 status = ICE_ERR_NO_MEMORY;
3349                                 goto out;
3350                         }
3351
3352                         ice_memcpy(exist->acts, e->acts,
3353                                    sizeof(struct ice_flow_action) * e->acts_cnt,
3354                                    ICE_NONDMA_TO_NONDMA);
3355
3356                         status = ice_acl_prog_act(hw, prof->cfg.scen, acts,
3357                                                   e->acts_cnt,
3358                                                   exist->scen_entry_idx);
3359                         if (status)
3360                                 goto out;
3361                 }
3362
3363                 if (do_chg_rng_chk) {
3364                         /* In this case, we want to update the range checker
3365                          * information of the exist entry
3366                          */
3367                         status = ice_flow_acl_union_rng_chk(exist->range_buf,
3368                                                             e->range_buf);
3369                         if (status)
3370                                 goto out;
3371                 }
3372
3373                 /* As we don't add the new entry to our SW DB, deallocate its
3374                  * memories, and return the exist entry to the caller
3375                  */
3376                 ice_dealloc_flow_entry(hw, e);
3377                 *(entry) = exist;
3378         }
3379 out:
3380         ice_free(hw, acts);
3381
3382         return status;
3383 }
3384
3385 /**
3386  * ice_flow_acl_add_scen_entry - Add entry to ACL scenario
3387  * @hw: pointer to the hardware structure
3388  * @prof: pointer to flow profile
3389  * @e: double pointer to the flow entry
3390  */
3391 static enum ice_status
3392 ice_flow_acl_add_scen_entry(struct ice_hw *hw, struct ice_flow_prof *prof,
3393                             struct ice_flow_entry **e)
3394 {
3395         enum ice_status status;
3396
3397         ice_acquire_lock(&prof->entries_lock);
3398         status = ice_flow_acl_add_scen_entry_sync(hw, prof, e);
3399         ice_release_lock(&prof->entries_lock);
3400
3401         return status;
3402 }
3403
3404 /**
3405  * ice_flow_add_entry - Add a flow entry
3406  * @hw: pointer to the HW struct
3407  * @blk: classification stage
3408  * @prof_id: ID of the profile to add a new flow entry to
3409  * @entry_id: unique ID to identify this flow entry
3410  * @vsi_handle: software VSI handle for the flow entry
3411  * @prio: priority of the flow entry
3412  * @data: pointer to a data buffer containing flow entry's match values/masks
3413  * @acts: arrays of actions to be performed on a match
3414  * @acts_cnt: number of actions
3415  * @entry_h: pointer to buffer that receives the new flow entry's handle
3416  */
3417 enum ice_status
3418 ice_flow_add_entry(struct ice_hw *hw, enum ice_block blk, u64 prof_id,
3419                    u64 entry_id, u16 vsi_handle, enum ice_flow_priority prio,
3420                    void *data, struct ice_flow_action *acts, u8 acts_cnt,
3421                    u64 *entry_h)
3422 {
3423         struct ice_flow_entry *e = NULL;
3424         struct ice_flow_prof *prof;
3425         enum ice_status status = ICE_SUCCESS;
3426
3427         /* ACL entries must indicate an action */
3428         if (blk == ICE_BLK_ACL && (!acts || !acts_cnt))
3429                 return ICE_ERR_PARAM;
3430
3431         /* No flow entry data is expected for RSS */
3432         if (!entry_h || (!data && blk != ICE_BLK_RSS))
3433                 return ICE_ERR_BAD_PTR;
3434
3435         if (!ice_is_vsi_valid(hw, vsi_handle))
3436                 return ICE_ERR_PARAM;
3437
3438         ice_acquire_lock(&hw->fl_profs_locks[blk]);
3439
3440         prof = ice_flow_find_prof_id(hw, blk, prof_id);
3441         if (!prof) {
3442                 status = ICE_ERR_DOES_NOT_EXIST;
3443         } else {
3444                 /* Allocate memory for the entry being added and associate
3445                  * the VSI to the found flow profile
3446                  */
3447                 e = (struct ice_flow_entry *)ice_malloc(hw, sizeof(*e));
3448                 if (!e)
3449                         status = ICE_ERR_NO_MEMORY;
3450                 else
3451                         status = ice_flow_assoc_prof(hw, blk, prof, vsi_handle);
3452         }
3453
3454         ice_release_lock(&hw->fl_profs_locks[blk]);
3455         if (status)
3456                 goto out;
3457
3458         e->id = entry_id;
3459         e->vsi_handle = vsi_handle;
3460         e->prof = prof;
3461         e->priority = prio;
3462
3463         switch (blk) {
3464         case ICE_BLK_FD:
3465         case ICE_BLK_RSS:
3466                 break;
3467         case ICE_BLK_ACL:
3468                 /* ACL will handle the entry management */
3469                 status = ice_flow_acl_frmt_entry(hw, prof, e, (u8 *)data, acts,
3470                                                  acts_cnt);
3471                 if (status)
3472                         goto out;
3473
3474                 status = ice_flow_acl_add_scen_entry(hw, prof, &e);
3475                 if (status)
3476                         goto out;
3477
3478                 break;
3479         default:
3480                 status = ICE_ERR_NOT_IMPL;
3481                 goto out;
3482         }
3483
3484         if (blk != ICE_BLK_ACL) {
3485                 /* ACL will handle the entry management */
3486                 ice_acquire_lock(&prof->entries_lock);
3487                 LIST_ADD(&e->l_entry, &prof->entries);
3488                 ice_release_lock(&prof->entries_lock);
3489         }
3490
3491         *entry_h = ICE_FLOW_ENTRY_HNDL(e);
3492
3493 out:
3494         if (status && e) {
3495                 if (e->entry)
3496                         ice_free(hw, e->entry);
3497                 ice_free(hw, e);
3498         }
3499
3500         return status;
3501 }
3502
3503 /**
3504  * ice_flow_rem_entry - Remove a flow entry
3505  * @hw: pointer to the HW struct
3506  * @blk: classification stage
3507  * @entry_h: handle to the flow entry to be removed
3508  */
3509 enum ice_status ice_flow_rem_entry(struct ice_hw *hw, enum ice_block blk,
3510                                    u64 entry_h)
3511 {
3512         struct ice_flow_entry *entry;
3513         struct ice_flow_prof *prof;
3514         enum ice_status status = ICE_SUCCESS;
3515
3516         if (entry_h == ICE_FLOW_ENTRY_HANDLE_INVAL)
3517                 return ICE_ERR_PARAM;
3518
3519         entry = ICE_FLOW_ENTRY_PTR((intptr_t)entry_h);
3520
3521         /* Retain the pointer to the flow profile as the entry will be freed */
3522         prof = entry->prof;
3523
3524         if (prof) {
3525                 ice_acquire_lock(&prof->entries_lock);
3526                 status = ice_flow_rem_entry_sync(hw, blk, entry);
3527                 ice_release_lock(&prof->entries_lock);
3528         }
3529
3530         return status;
3531 }
3532
3533 /**
3534  * ice_flow_set_fld_ext - specifies locations of field from entry's input buffer
3535  * @seg: packet segment the field being set belongs to
3536  * @fld: field to be set
3537  * @field_type: type of the field
3538  * @val_loc: if not ICE_FLOW_FLD_OFF_INVAL, location of the value to match from
3539  *           entry's input buffer
3540  * @mask_loc: if not ICE_FLOW_FLD_OFF_INVAL, location of mask value from entry's
3541  *            input buffer
3542  * @last_loc: if not ICE_FLOW_FLD_OFF_INVAL, location of last/upper value from
3543  *            entry's input buffer
3544  *
3545  * This helper function stores information of a field being matched, including
3546  * the type of the field and the locations of the value to match, the mask, and
3547  * the upper-bound value in the start of the input buffer for a flow entry.
3548  * This function should only be used for fixed-size data structures.
3549  *
3550  * This function also opportunistically determines the protocol headers to be
3551  * present based on the fields being set. Some fields cannot be used alone to
3552  * determine the protocol headers present. Sometimes, fields for particular
3553  * protocol headers are not matched. In those cases, the protocol headers
3554  * must be explicitly set.
3555  */
3556 static void
3557 ice_flow_set_fld_ext(struct ice_flow_seg_info *seg, enum ice_flow_field fld,
3558                      enum ice_flow_fld_match_type field_type, u16 val_loc,
3559                      u16 mask_loc, u16 last_loc)
3560 {
3561         u64 bit = BIT_ULL(fld);
3562
3563         seg->match |= bit;
3564         if (field_type == ICE_FLOW_FLD_TYPE_RANGE)
3565                 seg->range |= bit;
3566
3567         seg->fields[fld].type = field_type;
3568         seg->fields[fld].src.val = val_loc;
3569         seg->fields[fld].src.mask = mask_loc;
3570         seg->fields[fld].src.last = last_loc;
3571
3572         ICE_FLOW_SET_HDRS(seg, ice_flds_info[fld].hdr);
3573 }
3574
3575 /**
3576  * ice_flow_set_fld - specifies locations of field from entry's input buffer
3577  * @seg: packet segment the field being set belongs to
3578  * @fld: field to be set
3579  * @val_loc: if not ICE_FLOW_FLD_OFF_INVAL, location of the value to match from
3580  *           entry's input buffer
3581  * @mask_loc: if not ICE_FLOW_FLD_OFF_INVAL, location of mask value from entry's
3582  *            input buffer
3583  * @last_loc: if not ICE_FLOW_FLD_OFF_INVAL, location of last/upper value from
3584  *            entry's input buffer
3585  * @range: indicate if field being matched is to be in a range
3586  *
3587  * This function specifies the locations, in the form of byte offsets from the
3588  * start of the input buffer for a flow entry, from where the value to match,
3589  * the mask value, and upper value can be extracted. These locations are then
3590  * stored in the flow profile. When adding a flow entry associated with the
3591  * flow profile, these locations will be used to quickly extract the values and
3592  * create the content of a match entry. This function should only be used for
3593  * fixed-size data structures.
3594  */
3595 void
3596 ice_flow_set_fld(struct ice_flow_seg_info *seg, enum ice_flow_field fld,
3597                  u16 val_loc, u16 mask_loc, u16 last_loc, bool range)
3598 {
3599         enum ice_flow_fld_match_type t = range ?
3600                 ICE_FLOW_FLD_TYPE_RANGE : ICE_FLOW_FLD_TYPE_REG;
3601
3602         ice_flow_set_fld_ext(seg, fld, t, val_loc, mask_loc, last_loc);
3603 }
3604
3605 /**
3606  * ice_flow_set_fld_prefix - sets locations of prefix field from entry's buf
3607  * @seg: packet segment the field being set belongs to
3608  * @fld: field to be set
3609  * @val_loc: if not ICE_FLOW_FLD_OFF_INVAL, location of the value to match from
3610  *           entry's input buffer
3611  * @pref_loc: location of prefix value from entry's input buffer
3612  * @pref_sz: size of the location holding the prefix value
3613  *
3614  * This function specifies the locations, in the form of byte offsets from the
3615  * start of the input buffer for a flow entry, from where the value to match
3616  * and the IPv4 prefix value can be extracted. These locations are then stored
3617  * in the flow profile. When adding flow entries to the associated flow profile,
3618  * these locations can be used to quickly extract the values to create the
3619  * content of a match entry. This function should only be used for fixed-size
3620  * data structures.
3621  */
3622 void
3623 ice_flow_set_fld_prefix(struct ice_flow_seg_info *seg, enum ice_flow_field fld,
3624                         u16 val_loc, u16 pref_loc, u8 pref_sz)
3625 {
3626         /* For this type of field, the "mask" location is for the prefix value's
3627          * location and the "last" location is for the size of the location of
3628          * the prefix value.
3629          */
3630         ice_flow_set_fld_ext(seg, fld, ICE_FLOW_FLD_TYPE_PREFIX, val_loc,
3631                              pref_loc, (u16)pref_sz);
3632 }
3633
3634 /**
3635  * ice_flow_add_fld_raw - sets locations of a raw field from entry's input buf
3636  * @seg: packet segment the field being set belongs to
3637  * @off: offset of the raw field from the beginning of the segment in bytes
3638  * @len: length of the raw pattern to be matched
3639  * @val_loc: location of the value to match from entry's input buffer
3640  * @mask_loc: location of mask value from entry's input buffer
3641  *
3642  * This function specifies the offset of the raw field to be match from the
3643  * beginning of the specified packet segment, and the locations, in the form of
3644  * byte offsets from the start of the input buffer for a flow entry, from where
3645  * the value to match and the mask value to be extracted. These locations are
3646  * then stored in the flow profile. When adding flow entries to the associated
3647  * flow profile, these locations can be used to quickly extract the values to
3648  * create the content of a match entry. This function should only be used for
3649  * fixed-size data structures.
3650  */
3651 void
3652 ice_flow_add_fld_raw(struct ice_flow_seg_info *seg, u16 off, u8 len,
3653                      u16 val_loc, u16 mask_loc)
3654 {
3655         if (seg->raws_cnt < ICE_FLOW_SEG_RAW_FLD_MAX) {
3656                 seg->raws[seg->raws_cnt].off = off;
3657                 seg->raws[seg->raws_cnt].info.type = ICE_FLOW_FLD_TYPE_SIZE;
3658                 seg->raws[seg->raws_cnt].info.src.val = val_loc;
3659                 seg->raws[seg->raws_cnt].info.src.mask = mask_loc;
3660                 /* The "last" field is used to store the length of the field */
3661                 seg->raws[seg->raws_cnt].info.src.last = len;
3662         }
3663
3664         /* Overflows of "raws" will be handled as an error condition later in
3665          * the flow when this information is processed.
3666          */
3667         seg->raws_cnt++;
3668 }
3669
3670 /**
3671  * ice_flow_rem_vsi_prof - remove vsi from flow profile
3672  * @hw: pointer to the hardware structure
3673  * @blk: classification stage
3674  * @vsi_handle: software VSI handle
3675  * @prof_id: unique ID to identify this flow profile
3676  *
3677  * This function removes the flow entries associated to the input
3678  * vsi handle and disassociates the vsi from the flow profile.
3679  */
3680 enum ice_status ice_flow_rem_vsi_prof(struct ice_hw *hw, enum ice_block blk, u16 vsi_handle,
3681                                       u64 prof_id)
3682 {
3683         struct ice_flow_prof *prof = NULL;
3684         enum ice_status status = ICE_SUCCESS;
3685
3686         if (blk >= ICE_BLK_COUNT || !ice_is_vsi_valid(hw, vsi_handle))
3687                 return ICE_ERR_PARAM;
3688
3689         /* find flow profile pointer with input package block and profile id */
3690         prof = ice_flow_find_prof_id(hw, ICE_BLK_FD, prof_id);
3691         if (!prof) {
3692                 ice_debug(hw, ICE_DBG_PKG,
3693                           "Cannot find flow profile id=%" PRIu64 "\n", prof_id);
3694                 return ICE_ERR_DOES_NOT_EXIST;
3695         }
3696
3697         /* Remove all remaining flow entries before removing the flow profile */
3698         if (!LIST_EMPTY(&prof->entries)) {
3699                 struct ice_flow_entry *e, *t;
3700
3701                 ice_acquire_lock(&prof->entries_lock);
3702                 LIST_FOR_EACH_ENTRY_SAFE(e, t, &prof->entries, ice_flow_entry,
3703                                          l_entry) {
3704                         if (e->vsi_handle != vsi_handle)
3705                                 continue;
3706
3707                         status = ice_flow_rem_entry_sync(hw, blk, e);
3708                         if (status)
3709                                 break;
3710                 }
3711                 ice_release_lock(&prof->entries_lock);
3712         }
3713         if (status)
3714                 return status;
3715
3716         /* disassociate the flow profile from sw vsi handle */
3717         status = ice_flow_disassoc_prof(hw, blk, prof, vsi_handle);
3718         if (status)
3719                 ice_debug(hw, ICE_DBG_PKG,
3720                           "ice_flow_disassoc_prof() failed with status=%d\n",
3721                           status);
3722         return status;
3723 }
3724
3725 #define ICE_FLOW_RSS_SEG_HDR_L2_MASKS \
3726 (ICE_FLOW_SEG_HDR_ETH | ICE_FLOW_SEG_HDR_ETH_NON_IP | ICE_FLOW_SEG_HDR_VLAN)
3727
3728 #define ICE_FLOW_RSS_SEG_HDR_L3_MASKS \
3729         (ICE_FLOW_SEG_HDR_IPV4 | ICE_FLOW_SEG_HDR_IPV6)
3730
3731 #define ICE_FLOW_RSS_SEG_HDR_L4_MASKS \
3732         (ICE_FLOW_SEG_HDR_TCP | ICE_FLOW_SEG_HDR_UDP | ICE_FLOW_SEG_HDR_SCTP)
3733
3734 #define ICE_FLOW_RSS_SEG_HDR_VAL_MASKS \
3735         (ICE_FLOW_RSS_SEG_HDR_L2_MASKS | \
3736          ICE_FLOW_RSS_SEG_HDR_L3_MASKS | \
3737          ICE_FLOW_RSS_SEG_HDR_L4_MASKS)
3738
3739 /**
3740  * ice_flow_set_rss_seg_info - setup packet segments for RSS
3741  * @segs: pointer to the flow field segment(s)
3742  * @seg_cnt: segment count
3743  * @cfg: configure parameters
3744  *
3745  * Helper function to extract fields from hash bitmap and use flow
3746  * header value to set flow field segment for further use in flow
3747  * profile entry or removal.
3748  */
3749 static enum ice_status
3750 ice_flow_set_rss_seg_info(struct ice_flow_seg_info *segs, u8 seg_cnt,
3751                           const struct ice_rss_hash_cfg *cfg)
3752 {
3753         struct ice_flow_seg_info *seg;
3754         u64 val;
3755         u8 i;
3756
3757         /* set inner most segment */
3758         seg = &segs[seg_cnt - 1];
3759
3760         ice_for_each_set_bit(i, (const ice_bitmap_t *)&cfg->hash_flds,
3761                              ICE_FLOW_FIELD_IDX_MAX)
3762                 ice_flow_set_fld(seg, (enum ice_flow_field)i,
3763                                  ICE_FLOW_FLD_OFF_INVAL, ICE_FLOW_FLD_OFF_INVAL,
3764                                  ICE_FLOW_FLD_OFF_INVAL, false);
3765
3766         ICE_FLOW_SET_HDRS(seg, cfg->addl_hdrs);
3767
3768         /* set outer most header */
3769         if (cfg->hdr_type == ICE_RSS_INNER_HEADERS_W_OUTER_IPV4)
3770                 segs[ICE_RSS_OUTER_HEADERS].hdrs |= ICE_FLOW_SEG_HDR_IPV4 |
3771                                                    ICE_FLOW_SEG_HDR_IPV_FRAG |
3772                                                    ICE_FLOW_SEG_HDR_IPV_OTHER;
3773         else if (cfg->hdr_type == ICE_RSS_INNER_HEADERS_W_OUTER_IPV6)
3774                 segs[ICE_RSS_OUTER_HEADERS].hdrs |= ICE_FLOW_SEG_HDR_IPV6 |
3775                                                    ICE_FLOW_SEG_HDR_IPV_FRAG |
3776                                                    ICE_FLOW_SEG_HDR_IPV_OTHER;
3777         else if (cfg->hdr_type == ICE_RSS_INNER_HEADERS_W_OUTER_IPV4_GRE)
3778                 segs[ICE_RSS_OUTER_HEADERS].hdrs |= ICE_FLOW_SEG_HDR_IPV4 |
3779                                                    ICE_FLOW_SEG_HDR_GRE |
3780                                                    ICE_FLOW_SEG_HDR_IPV_OTHER;
3781         else if (cfg->hdr_type == ICE_RSS_INNER_HEADERS_W_OUTER_IPV6_GRE)
3782                 segs[ICE_RSS_OUTER_HEADERS].hdrs |= ICE_FLOW_SEG_HDR_IPV6 |
3783                                                    ICE_FLOW_SEG_HDR_GRE |
3784                                                    ICE_FLOW_SEG_HDR_IPV_OTHER;
3785
3786         if (seg->hdrs & ~ICE_FLOW_RSS_SEG_HDR_VAL_MASKS &
3787             ~ICE_FLOW_RSS_HDRS_INNER_MASK & ~ICE_FLOW_SEG_HDR_IPV_OTHER &
3788             ~ICE_FLOW_SEG_HDR_IPV_FRAG)
3789                 return ICE_ERR_PARAM;
3790
3791         val = (u64)(seg->hdrs & ICE_FLOW_RSS_SEG_HDR_L3_MASKS);
3792         if (val && !ice_is_pow2(val))
3793                 return ICE_ERR_CFG;
3794
3795         val = (u64)(seg->hdrs & ICE_FLOW_RSS_SEG_HDR_L4_MASKS);
3796         if (val && !ice_is_pow2(val))
3797                 return ICE_ERR_CFG;
3798
3799         return ICE_SUCCESS;
3800 }
3801
3802 /**
3803  * ice_rem_vsi_rss_list - remove VSI from RSS list
3804  * @hw: pointer to the hardware structure
3805  * @vsi_handle: software VSI handle
3806  *
3807  * Remove the VSI from all RSS configurations in the list.
3808  */
3809 void ice_rem_vsi_rss_list(struct ice_hw *hw, u16 vsi_handle)
3810 {
3811         struct ice_rss_cfg *r, *tmp;
3812
3813         if (LIST_EMPTY(&hw->rss_list_head))
3814                 return;
3815
3816         ice_acquire_lock(&hw->rss_locks);
3817         LIST_FOR_EACH_ENTRY_SAFE(r, tmp, &hw->rss_list_head,
3818                                  ice_rss_cfg, l_entry)
3819                 if (ice_test_and_clear_bit(vsi_handle, r->vsis))
3820                         if (!ice_is_any_bit_set(r->vsis, ICE_MAX_VSI)) {
3821                                 LIST_DEL(&r->l_entry);
3822                                 ice_free(hw, r);
3823                         }
3824         ice_release_lock(&hw->rss_locks);
3825 }
3826
3827 /**
3828  * ice_rem_vsi_rss_cfg - remove RSS configurations associated with VSI
3829  * @hw: pointer to the hardware structure
3830  * @vsi_handle: software VSI handle
3831  *
3832  * This function will iterate through all flow profiles and disassociate
3833  * the VSI from that profile. If the flow profile has no VSIs it will
3834  * be removed.
3835  */
3836 enum ice_status ice_rem_vsi_rss_cfg(struct ice_hw *hw, u16 vsi_handle)
3837 {
3838         const enum ice_block blk = ICE_BLK_RSS;
3839         struct ice_flow_prof *p, *t;
3840         enum ice_status status = ICE_SUCCESS;
3841
3842         if (!ice_is_vsi_valid(hw, vsi_handle))
3843                 return ICE_ERR_PARAM;
3844
3845         if (LIST_EMPTY(&hw->fl_profs[blk]))
3846                 return ICE_SUCCESS;
3847
3848         ice_acquire_lock(&hw->rss_locks);
3849         LIST_FOR_EACH_ENTRY_SAFE(p, t, &hw->fl_profs[blk], ice_flow_prof,
3850                                  l_entry)
3851                 if (ice_is_bit_set(p->vsis, vsi_handle)) {
3852                         status = ice_flow_disassoc_prof(hw, blk, p, vsi_handle);
3853                         if (status)
3854                                 break;
3855
3856                         if (!ice_is_any_bit_set(p->vsis, ICE_MAX_VSI)) {
3857                                 status = ice_flow_rem_prof(hw, blk, p->id);
3858                                 if (status)
3859                                         break;
3860                         }
3861                 }
3862         ice_release_lock(&hw->rss_locks);
3863
3864         return status;
3865 }
3866
3867 /**
3868  * ice_get_rss_hdr_type - get a RSS profile's header type
3869  * @prof: RSS flow profile
3870  */
3871 static enum ice_rss_cfg_hdr_type
3872 ice_get_rss_hdr_type(struct ice_flow_prof *prof)
3873 {
3874         enum ice_rss_cfg_hdr_type hdr_type = ICE_RSS_ANY_HEADERS;
3875
3876         if (prof->segs_cnt == ICE_FLOW_SEG_SINGLE) {
3877                 hdr_type = ICE_RSS_OUTER_HEADERS;
3878         } else if (prof->segs_cnt == ICE_FLOW_SEG_MAX) {
3879                 if (prof->segs[ICE_RSS_OUTER_HEADERS].hdrs == ICE_FLOW_SEG_HDR_NONE)
3880                         hdr_type = ICE_RSS_INNER_HEADERS;
3881                 if (prof->segs[ICE_RSS_OUTER_HEADERS].hdrs & ICE_FLOW_SEG_HDR_IPV4)
3882                         hdr_type = ICE_RSS_INNER_HEADERS_W_OUTER_IPV4;
3883                 if (prof->segs[ICE_RSS_OUTER_HEADERS].hdrs & ICE_FLOW_SEG_HDR_IPV6)
3884                         hdr_type = ICE_RSS_INNER_HEADERS_W_OUTER_IPV6;
3885         }
3886
3887         return hdr_type;
3888 }
3889
3890 /**
3891  * ice_rem_rss_list - remove RSS configuration from list
3892  * @hw: pointer to the hardware structure
3893  * @vsi_handle: software VSI handle
3894  * @prof: pointer to flow profile
3895  *
3896  * Assumption: lock has already been acquired for RSS list
3897  */
3898 static void
3899 ice_rem_rss_list(struct ice_hw *hw, u16 vsi_handle, struct ice_flow_prof *prof)
3900 {
3901         enum ice_rss_cfg_hdr_type hdr_type;
3902         struct ice_rss_cfg *r, *tmp;
3903
3904         /* Search for RSS hash fields associated to the VSI that match the
3905          * hash configurations associated to the flow profile. If found
3906          * remove from the RSS entry list of the VSI context and delete entry.
3907          */
3908         hdr_type = ice_get_rss_hdr_type(prof);
3909         LIST_FOR_EACH_ENTRY_SAFE(r, tmp, &hw->rss_list_head,
3910                                  ice_rss_cfg, l_entry)
3911                 if (r->hash.hash_flds == prof->segs[prof->segs_cnt - 1].match &&
3912                     r->hash.addl_hdrs == prof->segs[prof->segs_cnt - 1].hdrs &&
3913                     r->hash.hdr_type == hdr_type) {
3914                         ice_clear_bit(vsi_handle, r->vsis);
3915                         if (!ice_is_any_bit_set(r->vsis, ICE_MAX_VSI)) {
3916                                 LIST_DEL(&r->l_entry);
3917                                 ice_free(hw, r);
3918                         }
3919                         return;
3920                 }
3921 }
3922
3923 /**
3924  * ice_add_rss_list - add RSS configuration to list
3925  * @hw: pointer to the hardware structure
3926  * @vsi_handle: software VSI handle
3927  * @prof: pointer to flow profile
3928  *
3929  * Assumption: lock has already been acquired for RSS list
3930  */
3931 static enum ice_status
3932 ice_add_rss_list(struct ice_hw *hw, u16 vsi_handle, struct ice_flow_prof *prof)
3933 {
3934         enum ice_rss_cfg_hdr_type hdr_type;
3935         struct ice_rss_cfg *r, *rss_cfg;
3936
3937         hdr_type = ice_get_rss_hdr_type(prof);
3938         LIST_FOR_EACH_ENTRY(r, &hw->rss_list_head,
3939                             ice_rss_cfg, l_entry)
3940                 if (r->hash.hash_flds == prof->segs[prof->segs_cnt - 1].match &&
3941                     r->hash.addl_hdrs == prof->segs[prof->segs_cnt - 1].hdrs &&
3942                     r->hash.hdr_type == hdr_type) {
3943                         ice_set_bit(vsi_handle, r->vsis);
3944                         return ICE_SUCCESS;
3945                 }
3946
3947         rss_cfg = (struct ice_rss_cfg *)ice_malloc(hw, sizeof(*rss_cfg));
3948         if (!rss_cfg)
3949                 return ICE_ERR_NO_MEMORY;
3950
3951         rss_cfg->hash.hash_flds = prof->segs[prof->segs_cnt - 1].match;
3952         rss_cfg->hash.addl_hdrs = prof->segs[prof->segs_cnt - 1].hdrs;
3953         rss_cfg->hash.hdr_type = hdr_type;
3954         rss_cfg->hash.symm = prof->cfg.symm;
3955         ice_set_bit(vsi_handle, rss_cfg->vsis);
3956
3957         LIST_ADD_TAIL(&rss_cfg->l_entry, &hw->rss_list_head);
3958
3959         return ICE_SUCCESS;
3960 }
3961
3962 #define ICE_FLOW_PROF_HASH_S    0
3963 #define ICE_FLOW_PROF_HASH_M    (0xFFFFFFFFULL << ICE_FLOW_PROF_HASH_S)
3964 #define ICE_FLOW_PROF_HDR_S     32
3965 #define ICE_FLOW_PROF_HDR_M     (0x3FFFFFFFULL << ICE_FLOW_PROF_HDR_S)
3966 #define ICE_FLOW_PROF_ENCAP_S   62
3967 #define ICE_FLOW_PROF_ENCAP_M   (0x3ULL << ICE_FLOW_PROF_ENCAP_S)
3968
3969 /* Flow profile ID format:
3970  * [0:31] - Packet match fields
3971  * [32:61] - Protocol header
3972  * [62:63] - Encapsulation flag:
3973  *           0 if non-tunneled
3974  *           1 if tunneled
3975  *           2 for tunneled with outer ipv4
3976  *           3 for tunneled with outer ipv6
3977  */
3978 #define ICE_FLOW_GEN_PROFID(hash, hdr, encap) \
3979         ((u64)(((u64)(hash) & ICE_FLOW_PROF_HASH_M) | \
3980                (((u64)(hdr) << ICE_FLOW_PROF_HDR_S) & ICE_FLOW_PROF_HDR_M) | \
3981                (((u64)(encap) << ICE_FLOW_PROF_ENCAP_S) & ICE_FLOW_PROF_ENCAP_M)))
3982
3983 static void
3984 ice_rss_config_xor_word(struct ice_hw *hw, u8 prof_id, u8 src, u8 dst)
3985 {
3986         u32 s = ((src % 4) << 3); /* byte shift */
3987         u32 v = dst | 0x80; /* value to program */
3988         u8 i = src / 4; /* register index */
3989         u32 reg;
3990
3991         reg = rd32(hw, GLQF_HSYMM(prof_id, i));
3992         reg = (reg & ~(0xff << s)) | (v << s);
3993         wr32(hw, GLQF_HSYMM(prof_id, i), reg);
3994 }
3995
3996 static void
3997 ice_rss_config_xor(struct ice_hw *hw, u8 prof_id, u8 src, u8 dst, u8 len)
3998 {
3999         int fv_last_word =
4000                 ICE_FLOW_SW_FIELD_VECTOR_MAX / ICE_FLOW_FV_EXTRACT_SZ - 1;
4001         int i;
4002
4003         for (i = 0; i < len; i++) {
4004                 ice_rss_config_xor_word(hw, prof_id,
4005                                         /* Yes, field vector in GLQF_HSYMM and
4006                                          * GLQF_HINSET is inversed!
4007                                          */
4008                                         fv_last_word - (src + i),
4009                                         fv_last_word - (dst + i));
4010                 ice_rss_config_xor_word(hw, prof_id,
4011                                         fv_last_word - (dst + i),
4012                                         fv_last_word - (src + i));
4013         }
4014 }
4015
4016 static void
4017 ice_rss_update_symm(struct ice_hw *hw,
4018                     struct ice_flow_prof *prof)
4019 {
4020         struct ice_prof_map *map;
4021         u8 prof_id, m;
4022
4023         ice_acquire_lock(&hw->blk[ICE_BLK_RSS].es.prof_map_lock);
4024         map = ice_search_prof_id(hw, ICE_BLK_RSS, prof->id);
4025         if (map)
4026                 prof_id = map->prof_id;
4027         ice_release_lock(&hw->blk[ICE_BLK_RSS].es.prof_map_lock);
4028         if (!map)
4029                 return;
4030         /* clear to default */
4031         for (m = 0; m < 6; m++)
4032                 wr32(hw, GLQF_HSYMM(prof_id, m), 0);
4033         if (prof->cfg.symm) {
4034                 struct ice_flow_seg_info *seg =
4035                         &prof->segs[prof->segs_cnt - 1];
4036
4037                 struct ice_flow_seg_xtrct *ipv4_src =
4038                         &seg->fields[ICE_FLOW_FIELD_IDX_IPV4_SA].xtrct;
4039                 struct ice_flow_seg_xtrct *ipv4_dst =
4040                         &seg->fields[ICE_FLOW_FIELD_IDX_IPV4_DA].xtrct;
4041                 struct ice_flow_seg_xtrct *ipv6_src =
4042                         &seg->fields[ICE_FLOW_FIELD_IDX_IPV6_SA].xtrct;
4043                 struct ice_flow_seg_xtrct *ipv6_dst =
4044                         &seg->fields[ICE_FLOW_FIELD_IDX_IPV6_DA].xtrct;
4045
4046                 struct ice_flow_seg_xtrct *tcp_src =
4047                         &seg->fields[ICE_FLOW_FIELD_IDX_TCP_SRC_PORT].xtrct;
4048                 struct ice_flow_seg_xtrct *tcp_dst =
4049                         &seg->fields[ICE_FLOW_FIELD_IDX_TCP_DST_PORT].xtrct;
4050
4051                 struct ice_flow_seg_xtrct *udp_src =
4052                         &seg->fields[ICE_FLOW_FIELD_IDX_UDP_SRC_PORT].xtrct;
4053                 struct ice_flow_seg_xtrct *udp_dst =
4054                         &seg->fields[ICE_FLOW_FIELD_IDX_UDP_DST_PORT].xtrct;
4055
4056                 struct ice_flow_seg_xtrct *sctp_src =
4057                         &seg->fields[ICE_FLOW_FIELD_IDX_SCTP_SRC_PORT].xtrct;
4058                 struct ice_flow_seg_xtrct *sctp_dst =
4059                         &seg->fields[ICE_FLOW_FIELD_IDX_SCTP_DST_PORT].xtrct;
4060
4061                 /* xor IPv4 */
4062                 if (ipv4_src->prot_id != 0 && ipv4_dst->prot_id != 0)
4063                         ice_rss_config_xor(hw, prof_id,
4064                                            ipv4_src->idx, ipv4_dst->idx, 2);
4065
4066                 /* xor IPv6 */
4067                 if (ipv6_src->prot_id != 0 && ipv6_dst->prot_id != 0)
4068                         ice_rss_config_xor(hw, prof_id,
4069                                            ipv6_src->idx, ipv6_dst->idx, 8);
4070
4071                 /* xor TCP */
4072                 if (tcp_src->prot_id != 0 && tcp_dst->prot_id != 0)
4073                         ice_rss_config_xor(hw, prof_id,
4074                                            tcp_src->idx, tcp_dst->idx, 1);
4075
4076                 /* xor UDP */
4077                 if (udp_src->prot_id != 0 && udp_dst->prot_id != 0)
4078                         ice_rss_config_xor(hw, prof_id,
4079                                            udp_src->idx, udp_dst->idx, 1);
4080
4081                 /* xor SCTP */
4082                 if (sctp_src->prot_id != 0 && sctp_dst->prot_id != 0)
4083                         ice_rss_config_xor(hw, prof_id,
4084                                            sctp_src->idx, sctp_dst->idx, 1);
4085         }
4086 }
4087
4088 /**
4089  * ice_rss_cfg_raw_symm - configure symmetric hash parameters
4090  * for raw pattern
4091  * @hw: pointer to the hardware structure
4092  * @prof: pointer to parser profile
4093  * @prof_id: profile ID
4094  *
4095  * Calculate symmetric hash parameters based on input protocol type.
4096  */
4097 static void
4098 ice_rss_cfg_raw_symm(struct ice_hw *hw,
4099                      struct ice_parser_profile *prof, u64 prof_id)
4100 {
4101         u8 src_idx, dst_idx, proto_id;
4102         int len, i = 0;
4103
4104         while (i < prof->fv_num) {
4105                 proto_id = prof->fv[i].proto_id;
4106
4107                 switch (proto_id) {
4108                 case ICE_PROT_IPV4_OF_OR_S:
4109                         len = ICE_FLOW_FLD_SZ_IPV4_ADDR /
4110                               ICE_FLOW_FV_EXTRACT_SZ;
4111                         if (prof->fv[i].offset ==
4112                             ICE_FLOW_FIELD_IPV4_SRC_OFFSET &&
4113                             prof->fv[i + len].proto_id == proto_id &&
4114                             prof->fv[i + len].offset ==
4115                             ICE_FLOW_FIELD_IPV4_DST_OFFSET) {
4116                                 src_idx = i;
4117                                 dst_idx = i + len;
4118                                 i += 2 * len;
4119                                 break;
4120                         }
4121                         i++;
4122                         continue;
4123                 case ICE_PROT_IPV6_OF_OR_S:
4124                         len = ICE_FLOW_FLD_SZ_IPV6_ADDR /
4125                               ICE_FLOW_FV_EXTRACT_SZ;
4126                         if (prof->fv[i].offset ==
4127                             ICE_FLOW_FIELD_IPV6_SRC_OFFSET &&
4128                             prof->fv[i + len].proto_id == proto_id &&
4129                             prof->fv[i + len].offset ==
4130                             ICE_FLOW_FIELD_IPV6_DST_OFFSET) {
4131                                 src_idx = i;
4132                                 dst_idx = i + len;
4133                                 i += 2 * len;
4134                                 break;
4135                         }
4136                         i++;
4137                         continue;
4138                 case ICE_PROT_TCP_IL:
4139                 case ICE_PROT_UDP_IL_OR_S:
4140                 case ICE_PROT_SCTP_IL:
4141                         len = ICE_FLOW_FLD_SZ_PORT /
4142                               ICE_FLOW_FV_EXTRACT_SZ;
4143                         if (prof->fv[i].offset ==
4144                             ICE_FLOW_FIELD_SRC_PORT_OFFSET &&
4145                             prof->fv[i + len].proto_id == proto_id &&
4146                             prof->fv[i + len].offset ==
4147                             ICE_FLOW_FIELD_DST_PORT_OFFSET) {
4148                                 src_idx = i;
4149                                 dst_idx = i + len;
4150                                 i += 2 * len;
4151                                 break;
4152                         }
4153                         i++;
4154                         continue;
4155                 default:
4156                         i++;
4157                         continue;
4158                 }
4159                 ice_rss_config_xor(hw, prof_id, src_idx, dst_idx, len);
4160         }
4161 }
4162
4163 /* Max registers index per packet profile */
4164 #define ICE_SYMM_REG_INDEX_MAX 6
4165
4166 /**
4167  * ice_rss_update_raw_symm - update symmetric hash configuration
4168  * for raw pattern
4169  * @hw: pointer to the hardware structure
4170  * @cfg: configure parameters for raw pattern
4171  * @id: profile tracking ID
4172  *
4173  * Update symmetric hash configuration for raw pattern if required.
4174  * Otherwise only clear to default.
4175  */
4176 void
4177 ice_rss_update_raw_symm(struct ice_hw *hw,
4178                         struct ice_rss_raw_cfg *cfg, u64 id)
4179 {
4180         struct ice_prof_map *map;
4181         u8 prof_id, m;
4182
4183         ice_acquire_lock(&hw->blk[ICE_BLK_RSS].es.prof_map_lock);
4184         map = ice_search_prof_id(hw, ICE_BLK_RSS, id);
4185         if (map)
4186                 prof_id = map->prof_id;
4187         ice_release_lock(&hw->blk[ICE_BLK_RSS].es.prof_map_lock);
4188         if (!map)
4189                 return;
4190         /* clear to default */
4191         for (m = 0; m < ICE_SYMM_REG_INDEX_MAX; m++)
4192                 wr32(hw, GLQF_HSYMM(prof_id, m), 0);
4193         if (cfg->symm)
4194                 ice_rss_cfg_raw_symm(hw, &cfg->prof, prof_id);
4195 }
4196
4197 /**
4198  * ice_add_rss_cfg_sync - add an RSS configuration
4199  * @hw: pointer to the hardware structure
4200  * @vsi_handle: software VSI handle
4201  * @cfg: configure parameters
4202  *
4203  * Assumption: lock has already been acquired for RSS list
4204  */
4205 static enum ice_status
4206 ice_add_rss_cfg_sync(struct ice_hw *hw, u16 vsi_handle,
4207                      const struct ice_rss_hash_cfg *cfg)
4208 {
4209         const enum ice_block blk = ICE_BLK_RSS;
4210         struct ice_flow_prof *prof = NULL;
4211         struct ice_flow_seg_info *segs;
4212         enum ice_status status;
4213         u8 segs_cnt;
4214
4215         segs_cnt = (cfg->hdr_type == ICE_RSS_OUTER_HEADERS) ?
4216                         ICE_FLOW_SEG_SINGLE : ICE_FLOW_SEG_MAX;
4217
4218         segs = (struct ice_flow_seg_info *)ice_calloc(hw, segs_cnt,
4219                                                       sizeof(*segs));
4220         if (!segs)
4221                 return ICE_ERR_NO_MEMORY;
4222
4223         /* Construct the packet segment info from the hashed fields */
4224         status = ice_flow_set_rss_seg_info(segs, segs_cnt, cfg);
4225         if (status)
4226                 goto exit;
4227
4228         /* Search for a flow profile that has matching headers, hash fields
4229          * and has the input VSI associated to it. If found, no further
4230          * operations required and exit.
4231          */
4232         prof = ice_flow_find_prof_conds(hw, blk, ICE_FLOW_RX, segs, segs_cnt,
4233                                         vsi_handle,
4234                                         ICE_FLOW_FIND_PROF_CHK_FLDS |
4235                                         ICE_FLOW_FIND_PROF_CHK_VSI);
4236         if (prof) {
4237                 if (prof->cfg.symm == cfg->symm)
4238                         goto exit;
4239                 prof->cfg.symm = cfg->symm;
4240                 goto update_symm;
4241         }
4242
4243         /* Check if a flow profile exists with the same protocol headers and
4244          * associated with the input VSI. If so disassociate the VSI from
4245          * this profile. The VSI will be added to a new profile created with
4246          * the protocol header and new hash field configuration.
4247          */
4248         prof = ice_flow_find_prof_conds(hw, blk, ICE_FLOW_RX, segs, segs_cnt,
4249                                         vsi_handle, ICE_FLOW_FIND_PROF_CHK_VSI);
4250         if (prof) {
4251                 status = ice_flow_disassoc_prof(hw, blk, prof, vsi_handle);
4252                 if (!status)
4253                         ice_rem_rss_list(hw, vsi_handle, prof);
4254                 else
4255                         goto exit;
4256
4257                 /* Remove profile if it has no VSIs associated */
4258                 if (!ice_is_any_bit_set(prof->vsis, ICE_MAX_VSI)) {
4259                         status = ice_flow_rem_prof(hw, blk, prof->id);
4260                         if (status)
4261                                 goto exit;
4262                 }
4263         }
4264
4265         /* Search for a profile that has same match fields only. If this
4266          * exists then associate the VSI to this profile.
4267          */
4268         prof = ice_flow_find_prof_conds(hw, blk, ICE_FLOW_RX, segs, segs_cnt,
4269                                         vsi_handle,
4270                                         ICE_FLOW_FIND_PROF_CHK_FLDS);
4271         if (prof) {
4272                 if (prof->cfg.symm == cfg->symm) {
4273                         status = ice_flow_assoc_prof(hw, blk, prof,
4274                                                      vsi_handle);
4275                         if (!status)
4276                                 status = ice_add_rss_list(hw, vsi_handle,
4277                                                           prof);
4278                 } else {
4279                         /* if a profile exist but with different symmetric
4280                          * requirement, just return error.
4281                          */
4282                         status = ICE_ERR_NOT_SUPPORTED;
4283                 }
4284                 goto exit;
4285         }
4286
4287         /* Create a new flow profile with generated profile and packet
4288          * segment information.
4289          */
4290         status = ice_flow_add_prof(hw, blk, ICE_FLOW_RX,
4291                                    ICE_FLOW_GEN_PROFID(cfg->hash_flds,
4292                                                        segs[segs_cnt - 1].hdrs,
4293                                                        cfg->hdr_type),
4294                                    segs, segs_cnt, NULL, 0, &prof);
4295         if (status)
4296                 goto exit;
4297
4298         status = ice_flow_assoc_prof(hw, blk, prof, vsi_handle);
4299         /* If association to a new flow profile failed then this profile can
4300          * be removed.
4301          */
4302         if (status) {
4303                 ice_flow_rem_prof(hw, blk, prof->id);
4304                 goto exit;
4305         }
4306
4307         status = ice_add_rss_list(hw, vsi_handle, prof);
4308
4309         prof->cfg.symm = cfg->symm;
4310 update_symm:
4311         ice_rss_update_symm(hw, prof);
4312
4313 exit:
4314         ice_free(hw, segs);
4315         return status;
4316 }
4317
4318 /**
4319  * ice_add_rss_cfg - add an RSS configuration with specified hashed fields
4320  * @hw: pointer to the hardware structure
4321  * @vsi_handle: software VSI handle
4322  * @cfg: configure parameters
4323  *
4324  * This function will generate a flow profile based on fields associated with
4325  * the input fields to hash on, the flow type and use the VSI number to add
4326  * a flow entry to the profile.
4327  */
4328 enum ice_status
4329 ice_add_rss_cfg(struct ice_hw *hw, u16 vsi_handle,
4330                 const struct ice_rss_hash_cfg *cfg)
4331 {
4332         struct ice_rss_hash_cfg local_cfg;
4333         enum ice_status status;
4334
4335         if (!ice_is_vsi_valid(hw, vsi_handle) ||
4336             !cfg || cfg->hdr_type > ICE_RSS_ANY_HEADERS ||
4337             cfg->hash_flds == ICE_HASH_INVALID)
4338                 return ICE_ERR_PARAM;
4339
4340         local_cfg = *cfg;
4341         if (cfg->hdr_type < ICE_RSS_ANY_HEADERS) {
4342                 ice_acquire_lock(&hw->rss_locks);
4343                 status = ice_add_rss_cfg_sync(hw, vsi_handle, &local_cfg);
4344                 ice_release_lock(&hw->rss_locks);
4345         } else {
4346                 ice_acquire_lock(&hw->rss_locks);
4347                 local_cfg.hdr_type = ICE_RSS_OUTER_HEADERS;
4348                 status = ice_add_rss_cfg_sync(hw, vsi_handle, &local_cfg);
4349                 if (!status) {
4350                         local_cfg.hdr_type = ICE_RSS_INNER_HEADERS;
4351                         status = ice_add_rss_cfg_sync(hw, vsi_handle,
4352                                                       &local_cfg);
4353                 }
4354                 ice_release_lock(&hw->rss_locks);
4355         }
4356
4357         return status;
4358 }
4359
4360 /**
4361  * ice_rem_rss_cfg_sync - remove an existing RSS configuration
4362  * @hw: pointer to the hardware structure
4363  * @vsi_handle: software VSI handle
4364  * @cfg: configure parameters
4365  *
4366  * Assumption: lock has already been acquired for RSS list
4367  */
4368 static enum ice_status
4369 ice_rem_rss_cfg_sync(struct ice_hw *hw, u16 vsi_handle,
4370                      const struct ice_rss_hash_cfg *cfg)
4371 {
4372         const enum ice_block blk = ICE_BLK_RSS;
4373         struct ice_flow_seg_info *segs;
4374         struct ice_flow_prof *prof;
4375         enum ice_status status;
4376         u8 segs_cnt;
4377
4378         segs_cnt = (cfg->hdr_type == ICE_RSS_OUTER_HEADERS) ?
4379                         ICE_FLOW_SEG_SINGLE : ICE_FLOW_SEG_MAX;
4380         segs = (struct ice_flow_seg_info *)ice_calloc(hw, segs_cnt,
4381                                                       sizeof(*segs));
4382         if (!segs)
4383                 return ICE_ERR_NO_MEMORY;
4384
4385         /* Construct the packet segment info from the hashed fields */
4386         status = ice_flow_set_rss_seg_info(segs, segs_cnt, cfg);
4387         if (status)
4388                 goto out;
4389
4390         prof = ice_flow_find_prof_conds(hw, blk, ICE_FLOW_RX, segs, segs_cnt,
4391                                         vsi_handle,
4392                                         ICE_FLOW_FIND_PROF_CHK_FLDS);
4393         if (!prof) {
4394                 status = ICE_ERR_DOES_NOT_EXIST;
4395                 goto out;
4396         }
4397
4398         status = ice_flow_disassoc_prof(hw, blk, prof, vsi_handle);
4399         if (status)
4400                 goto out;
4401
4402         /* Remove RSS configuration from VSI context before deleting
4403          * the flow profile.
4404          */
4405         ice_rem_rss_list(hw, vsi_handle, prof);
4406
4407         if (!ice_is_any_bit_set(prof->vsis, ICE_MAX_VSI))
4408                 status = ice_flow_rem_prof(hw, blk, prof->id);
4409
4410 out:
4411         ice_free(hw, segs);
4412         return status;
4413 }
4414
4415 /**
4416  * ice_rem_rss_cfg - remove an existing RSS config with matching hashed fields
4417  * @hw: pointer to the hardware structure
4418  * @vsi_handle: software VSI handle
4419  * @cfg: configure parameters
4420  *
4421  * This function will lookup the flow profile based on the input
4422  * hash field bitmap, iterate through the profile entry list of
4423  * that profile and find entry associated with input VSI to be
4424  * removed. Calls are made to underlying flow apis which will in
4425  * turn build or update buffers for RSS XLT1 section.
4426  */
4427 enum ice_status
4428 ice_rem_rss_cfg(struct ice_hw *hw, u16 vsi_handle,
4429                 const struct ice_rss_hash_cfg *cfg)
4430 {
4431         struct ice_rss_hash_cfg local_cfg;
4432         enum ice_status status;
4433
4434         if (!ice_is_vsi_valid(hw, vsi_handle) ||
4435             !cfg || cfg->hdr_type > ICE_RSS_ANY_HEADERS ||
4436             cfg->hash_flds == ICE_HASH_INVALID)
4437                 return ICE_ERR_PARAM;
4438
4439         ice_acquire_lock(&hw->rss_locks);
4440         local_cfg = *cfg;
4441         if (cfg->hdr_type < ICE_RSS_ANY_HEADERS) {
4442                 status = ice_rem_rss_cfg_sync(hw, vsi_handle, &local_cfg);
4443         } else {
4444                 local_cfg.hdr_type = ICE_RSS_OUTER_HEADERS;
4445                 status = ice_rem_rss_cfg_sync(hw, vsi_handle, &local_cfg);
4446
4447                 if (!status) {
4448                         local_cfg.hdr_type = ICE_RSS_INNER_HEADERS;
4449                         status = ice_rem_rss_cfg_sync(hw, vsi_handle,
4450                                                       &local_cfg);
4451                 }
4452         }
4453         ice_release_lock(&hw->rss_locks);
4454
4455         return status;
4456 }
4457
4458 /**
4459  * ice_replay_rss_cfg - replay RSS configurations associated with VSI
4460  * @hw: pointer to the hardware structure
4461  * @vsi_handle: software VSI handle
4462  */
4463 enum ice_status ice_replay_rss_cfg(struct ice_hw *hw, u16 vsi_handle)
4464 {
4465         enum ice_status status = ICE_SUCCESS;
4466         struct ice_rss_cfg *r;
4467
4468         if (!ice_is_vsi_valid(hw, vsi_handle))
4469                 return ICE_ERR_PARAM;
4470
4471         ice_acquire_lock(&hw->rss_locks);
4472         LIST_FOR_EACH_ENTRY(r, &hw->rss_list_head,
4473                             ice_rss_cfg, l_entry) {
4474                 if (ice_is_bit_set(r->vsis, vsi_handle)) {
4475                         status = ice_add_rss_cfg_sync(hw, vsi_handle, &r->hash);
4476                         if (status)
4477                                 break;
4478                 }
4479         }
4480         ice_release_lock(&hw->rss_locks);
4481
4482         return status;
4483 }
4484
4485 /**
4486  * ice_get_rss_cfg - returns hashed fields for the given header types
4487  * @hw: pointer to the hardware structure
4488  * @vsi_handle: software VSI handle
4489  * @hdrs: protocol header type
4490  *
4491  * This function will return the match fields of the first instance of flow
4492  * profile having the given header types and containing input VSI
4493  */
4494 u64 ice_get_rss_cfg(struct ice_hw *hw, u16 vsi_handle, u32 hdrs)
4495 {
4496         u64 rss_hash = ICE_HASH_INVALID;
4497         struct ice_rss_cfg *r;
4498
4499         /* verify if the protocol header is non zero and VSI is valid */
4500         if (hdrs == ICE_FLOW_SEG_HDR_NONE || !ice_is_vsi_valid(hw, vsi_handle))
4501                 return ICE_HASH_INVALID;
4502
4503         ice_acquire_lock(&hw->rss_locks);
4504         LIST_FOR_EACH_ENTRY(r, &hw->rss_list_head,
4505                             ice_rss_cfg, l_entry)
4506                 if (ice_is_bit_set(r->vsis, vsi_handle) &&
4507                     r->hash.addl_hdrs == hdrs) {
4508                         rss_hash = r->hash.hash_flds;
4509                         break;
4510                 }
4511         ice_release_lock(&hw->rss_locks);
4512
4513         return rss_hash;
4514 }