1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2001-2021 Intel Corporation
5 #include "ice_common.h"
8 /* Size of known protocol header fields */
9 #define ICE_FLOW_FLD_SZ_ETH_TYPE 2
10 #define ICE_FLOW_FLD_SZ_VLAN 2
11 #define ICE_FLOW_FLD_SZ_IPV4_ADDR 4
12 #define ICE_FLOW_FLD_SZ_IPV6_ADDR 16
13 #define ICE_FLOW_FLD_SZ_IPV6_PRE32_ADDR 4
14 #define ICE_FLOW_FLD_SZ_IPV6_PRE48_ADDR 6
15 #define ICE_FLOW_FLD_SZ_IPV6_PRE64_ADDR 8
16 #define ICE_FLOW_FLD_SZ_IPV4_ID 2
17 #define ICE_FLOW_FLD_SZ_IPV6_ID 4
18 #define ICE_FLOW_FLD_SZ_IP_CHKSUM 2
19 #define ICE_FLOW_FLD_SZ_TCP_CHKSUM 2
20 #define ICE_FLOW_FLD_SZ_UDP_CHKSUM 2
21 #define ICE_FLOW_FLD_SZ_SCTP_CHKSUM 4
22 #define ICE_FLOW_FLD_SZ_IP_DSCP 1
23 #define ICE_FLOW_FLD_SZ_IP_TTL 1
24 #define ICE_FLOW_FLD_SZ_IP_PROT 1
25 #define ICE_FLOW_FLD_SZ_PORT 2
26 #define ICE_FLOW_FLD_SZ_TCP_FLAGS 1
27 #define ICE_FLOW_FLD_SZ_ICMP_TYPE 1
28 #define ICE_FLOW_FLD_SZ_ICMP_CODE 1
29 #define ICE_FLOW_FLD_SZ_ARP_OPER 2
30 #define ICE_FLOW_FLD_SZ_GRE_KEYID 4
31 #define ICE_FLOW_FLD_SZ_GTP_TEID 4
32 #define ICE_FLOW_FLD_SZ_GTP_QFI 2
33 #define ICE_FLOW_FLD_SZ_PPPOE_SESS_ID 2
34 #define ICE_FLOW_FLD_SZ_PFCP_SEID 8
35 #define ICE_FLOW_FLD_SZ_L2TPV3_SESS_ID 4
36 #define ICE_FLOW_FLD_SZ_ESP_SPI 4
37 #define ICE_FLOW_FLD_SZ_AH_SPI 4
38 #define ICE_FLOW_FLD_SZ_NAT_T_ESP_SPI 4
39 #define ICE_FLOW_FLD_SZ_VXLAN_VNI 4
40 #define ICE_FLOW_FLD_SZ_ECPRI_TP0_PC_ID 2
42 /* Describe properties of a protocol header field */
43 struct ice_flow_field_info {
44 enum ice_flow_seg_hdr hdr;
45 s16 off; /* Offset from start of a protocol header, in bits */
46 u16 size; /* Size of fields in bits */
47 u16 mask; /* 16-bit mask for field */
50 #define ICE_FLOW_FLD_INFO(_hdr, _offset_bytes, _size_bytes) { \
52 .off = (_offset_bytes) * BITS_PER_BYTE, \
53 .size = (_size_bytes) * BITS_PER_BYTE, \
57 #define ICE_FLOW_FLD_INFO_MSK(_hdr, _offset_bytes, _size_bytes, _mask) { \
59 .off = (_offset_bytes) * BITS_PER_BYTE, \
60 .size = (_size_bytes) * BITS_PER_BYTE, \
64 /* Table containing properties of supported protocol header fields */
66 struct ice_flow_field_info ice_flds_info[ICE_FLOW_FIELD_IDX_MAX] = {
68 /* ICE_FLOW_FIELD_IDX_ETH_DA */
69 ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_ETH, 0, ETH_ALEN),
70 /* ICE_FLOW_FIELD_IDX_ETH_SA */
71 ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_ETH, ETH_ALEN, ETH_ALEN),
72 /* ICE_FLOW_FIELD_IDX_S_VLAN */
73 ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_VLAN, 12, ICE_FLOW_FLD_SZ_VLAN),
74 /* ICE_FLOW_FIELD_IDX_C_VLAN */
75 ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_VLAN, 14, ICE_FLOW_FLD_SZ_VLAN),
76 /* ICE_FLOW_FIELD_IDX_ETH_TYPE */
77 ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_ETH, 0, ICE_FLOW_FLD_SZ_ETH_TYPE),
79 /* ICE_FLOW_FIELD_IDX_IPV4_DSCP */
80 ICE_FLOW_FLD_INFO_MSK(ICE_FLOW_SEG_HDR_IPV4, 0, ICE_FLOW_FLD_SZ_IP_DSCP,
82 /* ICE_FLOW_FIELD_IDX_IPV6_DSCP */
83 ICE_FLOW_FLD_INFO_MSK(ICE_FLOW_SEG_HDR_IPV6, 0, ICE_FLOW_FLD_SZ_IP_DSCP,
85 /* ICE_FLOW_FIELD_IDX_IPV4_TTL */
86 ICE_FLOW_FLD_INFO_MSK(ICE_FLOW_SEG_HDR_NONE, 8,
87 ICE_FLOW_FLD_SZ_IP_TTL, 0xff00),
88 /* ICE_FLOW_FIELD_IDX_IPV4_PROT */
89 ICE_FLOW_FLD_INFO_MSK(ICE_FLOW_SEG_HDR_NONE, 8,
90 ICE_FLOW_FLD_SZ_IP_PROT, 0x00ff),
91 /* ICE_FLOW_FIELD_IDX_IPV6_TTL */
92 ICE_FLOW_FLD_INFO_MSK(ICE_FLOW_SEG_HDR_NONE, 6,
93 ICE_FLOW_FLD_SZ_IP_TTL, 0x00ff),
94 /* ICE_FLOW_FIELD_IDX_IPV6_PROT */
95 ICE_FLOW_FLD_INFO_MSK(ICE_FLOW_SEG_HDR_NONE, 6,
96 ICE_FLOW_FLD_SZ_IP_PROT, 0xff00),
97 /* ICE_FLOW_FIELD_IDX_IPV4_SA */
98 ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_IPV4, 12, ICE_FLOW_FLD_SZ_IPV4_ADDR),
99 /* ICE_FLOW_FIELD_IDX_IPV4_DA */
100 ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_IPV4, 16, ICE_FLOW_FLD_SZ_IPV4_ADDR),
101 /* ICE_FLOW_FIELD_IDX_IPV6_SA */
102 ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_IPV6, 8, ICE_FLOW_FLD_SZ_IPV6_ADDR),
103 /* ICE_FLOW_FIELD_IDX_IPV6_DA */
104 ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_IPV6, 24, ICE_FLOW_FLD_SZ_IPV6_ADDR),
105 /* ICE_FLOW_FIELD_IDX_IPV4_CHKSUM */
106 ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_IPV4, 10, ICE_FLOW_FLD_SZ_IP_CHKSUM),
107 /* ICE_FLOW_FIELD_IDX_IPV4_FRAG */
108 ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_IPV_FRAG, 4,
109 ICE_FLOW_FLD_SZ_IPV4_ID),
110 /* ICE_FLOW_FIELD_IDX_IPV6_FRAG */
111 ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_IPV_FRAG, 4,
112 ICE_FLOW_FLD_SZ_IPV6_ID),
113 /* ICE_FLOW_FIELD_IDX_IPV6_PRE32_SA */
114 ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_IPV6, 8,
115 ICE_FLOW_FLD_SZ_IPV6_PRE32_ADDR),
116 /* ICE_FLOW_FIELD_IDX_IPV6_PRE32_DA */
117 ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_IPV6, 24,
118 ICE_FLOW_FLD_SZ_IPV6_PRE32_ADDR),
119 /* ICE_FLOW_FIELD_IDX_IPV6_PRE48_SA */
120 ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_IPV6, 8,
121 ICE_FLOW_FLD_SZ_IPV6_PRE48_ADDR),
122 /* ICE_FLOW_FIELD_IDX_IPV6_PRE48_DA */
123 ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_IPV6, 24,
124 ICE_FLOW_FLD_SZ_IPV6_PRE48_ADDR),
125 /* ICE_FLOW_FIELD_IDX_IPV6_PRE64_SA */
126 ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_IPV6, 8,
127 ICE_FLOW_FLD_SZ_IPV6_PRE64_ADDR),
128 /* ICE_FLOW_FIELD_IDX_IPV6_PRE64_DA */
129 ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_IPV6, 24,
130 ICE_FLOW_FLD_SZ_IPV6_PRE64_ADDR),
132 /* ICE_FLOW_FIELD_IDX_TCP_SRC_PORT */
133 ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_TCP, 0, ICE_FLOW_FLD_SZ_PORT),
134 /* ICE_FLOW_FIELD_IDX_TCP_DST_PORT */
135 ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_TCP, 2, ICE_FLOW_FLD_SZ_PORT),
136 /* ICE_FLOW_FIELD_IDX_UDP_SRC_PORT */
137 ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_UDP, 0, ICE_FLOW_FLD_SZ_PORT),
138 /* ICE_FLOW_FIELD_IDX_UDP_DST_PORT */
139 ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_UDP, 2, ICE_FLOW_FLD_SZ_PORT),
140 /* ICE_FLOW_FIELD_IDX_SCTP_SRC_PORT */
141 ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_SCTP, 0, ICE_FLOW_FLD_SZ_PORT),
142 /* ICE_FLOW_FIELD_IDX_SCTP_DST_PORT */
143 ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_SCTP, 2, ICE_FLOW_FLD_SZ_PORT),
144 /* ICE_FLOW_FIELD_IDX_TCP_FLAGS */
145 ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_TCP, 13, ICE_FLOW_FLD_SZ_TCP_FLAGS),
146 /* ICE_FLOW_FIELD_IDX_TCP_CHKSUM */
147 ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_TCP, 16, ICE_FLOW_FLD_SZ_TCP_CHKSUM),
148 /* ICE_FLOW_FIELD_IDX_UDP_CHKSUM */
149 ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_UDP, 6, ICE_FLOW_FLD_SZ_UDP_CHKSUM),
150 /* ICE_FLOW_FIELD_IDX_SCTP_CHKSUM */
151 ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_SCTP, 8,
152 ICE_FLOW_FLD_SZ_SCTP_CHKSUM),
154 /* ICE_FLOW_FIELD_IDX_ARP_SIP */
155 ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_ARP, 14, ICE_FLOW_FLD_SZ_IPV4_ADDR),
156 /* ICE_FLOW_FIELD_IDX_ARP_DIP */
157 ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_ARP, 24, ICE_FLOW_FLD_SZ_IPV4_ADDR),
158 /* ICE_FLOW_FIELD_IDX_ARP_SHA */
159 ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_ARP, 8, ETH_ALEN),
160 /* ICE_FLOW_FIELD_IDX_ARP_DHA */
161 ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_ARP, 18, ETH_ALEN),
162 /* ICE_FLOW_FIELD_IDX_ARP_OP */
163 ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_ARP, 6, ICE_FLOW_FLD_SZ_ARP_OPER),
165 /* ICE_FLOW_FIELD_IDX_ICMP_TYPE */
166 ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_ICMP, 0, ICE_FLOW_FLD_SZ_ICMP_TYPE),
167 /* ICE_FLOW_FIELD_IDX_ICMP_CODE */
168 ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_ICMP, 1, ICE_FLOW_FLD_SZ_ICMP_CODE),
170 /* ICE_FLOW_FIELD_IDX_GRE_KEYID */
171 ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_GRE, 12, ICE_FLOW_FLD_SZ_GRE_KEYID),
173 /* ICE_FLOW_FIELD_IDX_GTPC_TEID */
174 ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_GTPC_TEID, 12,
175 ICE_FLOW_FLD_SZ_GTP_TEID),
176 /* ICE_FLOW_FIELD_IDX_GTPU_IP_TEID */
177 ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_GTPU_IP, 12,
178 ICE_FLOW_FLD_SZ_GTP_TEID),
179 /* ICE_FLOW_FIELD_IDX_GTPU_EH_TEID */
180 ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_GTPU_EH, 12,
181 ICE_FLOW_FLD_SZ_GTP_TEID),
182 /* ICE_FLOW_FIELD_IDX_GTPU_EH_QFI */
183 ICE_FLOW_FLD_INFO_MSK(ICE_FLOW_SEG_HDR_GTPU_EH, 22,
184 ICE_FLOW_FLD_SZ_GTP_QFI, 0x3f00),
185 /* ICE_FLOW_FIELD_IDX_GTPU_UP_TEID */
186 ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_GTPU_UP, 12,
187 ICE_FLOW_FLD_SZ_GTP_TEID),
188 /* ICE_FLOW_FIELD_IDX_GTPU_UP_QFI */
189 ICE_FLOW_FLD_INFO_MSK(ICE_FLOW_SEG_HDR_GTPU_UP, 22,
190 ICE_FLOW_FLD_SZ_GTP_QFI, 0x3f00),
191 /* ICE_FLOW_FIELD_IDX_GTPU_DWN_TEID */
192 ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_GTPU_DWN, 12,
193 ICE_FLOW_FLD_SZ_GTP_TEID),
194 /* ICE_FLOW_FIELD_IDX_GTPU_DWN_QFI */
195 ICE_FLOW_FLD_INFO_MSK(ICE_FLOW_SEG_HDR_GTPU_DWN, 22,
196 ICE_FLOW_FLD_SZ_GTP_QFI, 0x3f00),
198 /* ICE_FLOW_FIELD_IDX_PPPOE_SESS_ID */
199 ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_PPPOE, 2,
200 ICE_FLOW_FLD_SZ_PPPOE_SESS_ID),
202 /* ICE_FLOW_FIELD_IDX_PFCP_SEID */
203 ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_PFCP_SESSION, 12,
204 ICE_FLOW_FLD_SZ_PFCP_SEID),
206 /* ICE_FLOW_FIELD_IDX_L2TPV3_SESS_ID */
207 ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_L2TPV3, 0,
208 ICE_FLOW_FLD_SZ_L2TPV3_SESS_ID),
210 /* ICE_FLOW_FIELD_IDX_ESP_SPI */
211 ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_ESP, 0,
212 ICE_FLOW_FLD_SZ_ESP_SPI),
214 /* ICE_FLOW_FIELD_IDX_AH_SPI */
215 ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_AH, 4,
216 ICE_FLOW_FLD_SZ_AH_SPI),
218 /* ICE_FLOW_FIELD_IDX_NAT_T_ESP_SPI */
219 ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_NAT_T_ESP, 8,
220 ICE_FLOW_FLD_SZ_NAT_T_ESP_SPI),
221 /* ICE_FLOW_FIELD_IDX_VXLAN_VNI */
222 ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_VXLAN, 12,
223 ICE_FLOW_FLD_SZ_VXLAN_VNI),
225 /* ICE_FLOW_FIELD_IDX_ECPRI_TP0_PC_ID */
226 ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_ECPRI_TP0, 4,
227 ICE_FLOW_FLD_SZ_ECPRI_TP0_PC_ID),
229 /* ICE_FLOW_FIELD_IDX_UDP_ECPRI_TP0_PC_ID */
230 ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_UDP_ECPRI_TP0, 12,
231 ICE_FLOW_FLD_SZ_ECPRI_TP0_PC_ID),
234 /* Bitmaps indicating relevant packet types for a particular protocol header
236 * Packet types for packets with an Outer/First/Single MAC header
238 static const u32 ice_ptypes_mac_ofos[] = {
239 0xFDC00846, 0xBFBF7F7E, 0xF70001DF, 0xFEFDFDFB,
240 0x0000077E, 0x000003FF, 0x00000000, 0x00000000,
241 0x00400000, 0x03FFF000, 0xFFFFFFE0, 0x00100707,
242 0xFFFFFF00, 0x0000003F, 0x00000000, 0x00000000,
243 0x00000000, 0x00000000, 0x00000000, 0x00000000,
244 0x00000000, 0x00000000, 0x00000000, 0x00000000,
245 0x00000000, 0x00000000, 0x00000000, 0x00000000,
246 0x00000000, 0x00000000, 0x00000000, 0x00000000,
249 /* Packet types for packets with an Innermost/Last MAC VLAN header */
250 static const u32 ice_ptypes_macvlan_il[] = {
251 0x00000000, 0xBC000000, 0x000001DF, 0xF0000000,
252 0x0000077E, 0x00000000, 0x00000000, 0x00000000,
253 0x00000000, 0x00000000, 0x00000000, 0x00000000,
254 0x00000000, 0x00000000, 0x00000000, 0x00000000,
255 0x00000000, 0x00000000, 0x00000000, 0x00000000,
256 0x00000000, 0x00000000, 0x00000000, 0x00000000,
257 0x00000000, 0x00000000, 0x00000000, 0x00000000,
258 0x00000000, 0x00000000, 0x00000000, 0x00000000,
261 /* Packet types for packets with an Outer/First/Single non-frag IPv4 header,
262 * does NOT include IPV4 other PTYPEs
264 static const u32 ice_ptypes_ipv4_ofos[] = {
265 0x1D800000, 0x24000800, 0x00000000, 0x00000000,
266 0x00000000, 0x00000155, 0x00000000, 0x00000000,
267 0x00000000, 0x000FC000, 0x000002A0, 0x00100000,
268 0x00001500, 0x00000000, 0x00000000, 0x00000000,
269 0x00000000, 0x00000000, 0x00000000, 0x00000000,
270 0x00000000, 0x00000000, 0x00000000, 0x00000000,
271 0x00000000, 0x00000000, 0x00000000, 0x00000000,
272 0x00000000, 0x00000000, 0x00000000, 0x00000000,
275 /* Packet types for packets with an Outer/First/Single non-frag IPv4 header,
276 * includes IPV4 other PTYPEs
278 static const u32 ice_ptypes_ipv4_ofos_all[] = {
279 0x1D800000, 0x27BF7800, 0x00000000, 0x00000000,
280 0x00000000, 0x00000155, 0x00000000, 0x00000000,
281 0x00000000, 0x000FC000, 0x83E0FAA0, 0x00000101,
282 0x03FFD000, 0x00000000, 0x02FBEFBC, 0x00000000,
283 0x00000000, 0x00000000, 0x00000000, 0x00000000,
284 0x00000000, 0x00000000, 0x00000000, 0x00000000,
285 0x00000000, 0x00000000, 0x00000000, 0x00000000,
286 0x00000000, 0x00000000, 0x00000000, 0x00000000,
289 /* Packet types for packets with an Innermost/Last IPv4 header */
290 static const u32 ice_ptypes_ipv4_il[] = {
291 0xE0000000, 0xB807700E, 0x80000003, 0xE01DC03B,
292 0x0000000E, 0x00000000, 0x00000000, 0x00000000,
293 0x00000000, 0x00000000, 0x001FF800, 0x00100000,
294 0xFC0FC000, 0x00000000, 0xBC0BC0BC, 0x00000BC0,
295 0x00000000, 0x00000000, 0x00000000, 0x00000000,
296 0x00000000, 0x00000000, 0x00000000, 0x00000000,
297 0x00000000, 0x00000000, 0x00000000, 0x00000000,
298 0x00000000, 0x00000000, 0x00000000, 0x00000000,
301 /* Packet types for packets with an Outer/First/Single non-frag IPv6 header,
302 * does NOT include IVP6 other PTYPEs
304 static const u32 ice_ptypes_ipv6_ofos[] = {
305 0x00000000, 0x00000000, 0x76000000, 0x10002000,
306 0x00000000, 0x000002AA, 0x00000000, 0x00000000,
307 0x00000000, 0x03F00000, 0x00000540, 0x00000000,
308 0x00002A00, 0x00000000, 0x00000000, 0x00000000,
309 0x00000000, 0x00000000, 0x00000000, 0x00000000,
310 0x00000000, 0x00000000, 0x00000000, 0x00000000,
311 0x00000000, 0x00000000, 0x00000000, 0x00000000,
312 0x00000000, 0x00000000, 0x00000000, 0x00000000,
315 /* Packet types for packets with an Outer/First/Single non-frag IPv6 header,
316 * includes IPV6 other PTYPEs
318 static const u32 ice_ptypes_ipv6_ofos_all[] = {
319 0x00000000, 0x00000000, 0x76000000, 0x1EFDE000,
320 0x00000000, 0x000002AA, 0x00000000, 0x00000000,
321 0x00000000, 0x03F00000, 0x7C1F0540, 0x00000206,
322 0xFC002000, 0x0000003F, 0xBC000000, 0x0002FBEF,
323 0x00000000, 0x00000000, 0x00000000, 0x00000000,
324 0x00000000, 0x00000000, 0x00000000, 0x00000000,
325 0x00000000, 0x00000000, 0x00000000, 0x00000000,
326 0x00000000, 0x00000000, 0x00000000, 0x00000000,
329 /* Packet types for packets with an Innermost/Last IPv6 header */
330 static const u32 ice_ptypes_ipv6_il[] = {
331 0x00000000, 0x03B80770, 0x000001DC, 0x0EE00000,
332 0x00000770, 0x00000000, 0x00000000, 0x00000000,
333 0x00000000, 0x00000000, 0x7FE00000, 0x00000000,
334 0x03F00000, 0x0000003F, 0x02F02F00, 0x0002F02F,
335 0x00000000, 0x00000000, 0x00000000, 0x00000000,
336 0x00000000, 0x00000000, 0x00000000, 0x00000000,
337 0x00000000, 0x00000000, 0x00000000, 0x00000000,
338 0x00000000, 0x00000000, 0x00000000, 0x00000000,
341 /* Packet types for packets with an Outer/First/Single
342 * non-frag IPv4 header - no L4
344 static const u32 ice_ptypes_ipv4_ofos_no_l4[] = {
345 0x10800000, 0x04000800, 0x00000000, 0x00000000,
346 0x00000000, 0x00000000, 0x00000000, 0x00000000,
347 0x00000000, 0x000cc000, 0x000002A0, 0x00000000,
348 0x00001500, 0x00000000, 0x00000000, 0x00000000,
349 0x00000000, 0x00000000, 0x00000000, 0x00000000,
350 0x00000000, 0x00000000, 0x00000000, 0x00000000,
351 0x00000000, 0x00000000, 0x00000000, 0x00000000,
352 0x00000000, 0x00000000, 0x00000000, 0x00000000,
355 /* Packet types for packets with an Innermost/Last IPv4 header - no L4 */
356 static const u32 ice_ptypes_ipv4_il_no_l4[] = {
357 0x60000000, 0x18043008, 0x80000002, 0x6010c021,
358 0x00000008, 0x00000000, 0x00000000, 0x00000000,
359 0x00000000, 0x00000000, 0x00139800, 0x00000000,
360 0x8C08C000, 0x00000000, 0x00000000, 0x00000000,
361 0x00000000, 0x00000000, 0x00000000, 0x00000000,
362 0x00000000, 0x00000000, 0x00000000, 0x00000000,
363 0x00000000, 0x00000000, 0x00000000, 0x00000000,
364 0x00000000, 0x00000000, 0x00000000, 0x00000000,
367 /* Packet types for packets with an Outer/First/Single
368 * non-frag IPv6 header - no L4
370 static const u32 ice_ptypes_ipv6_ofos_no_l4[] = {
371 0x00000000, 0x00000000, 0x42000000, 0x10002000,
372 0x00000000, 0x00000000, 0x00000000, 0x00000000,
373 0x00000000, 0x02300000, 0x00000540, 0x00000000,
374 0x00002A00, 0x00000000, 0x00000000, 0x00000000,
375 0x00000000, 0x00000000, 0x00000000, 0x00000000,
376 0x00000000, 0x00000000, 0x00000000, 0x00000000,
377 0x00000000, 0x00000000, 0x00000000, 0x00000000,
378 0x00000000, 0x00000000, 0x00000000, 0x00000000,
381 /* Packet types for packets with an Innermost/Last IPv6 header - no L4 */
382 static const u32 ice_ptypes_ipv6_il_no_l4[] = {
383 0x00000000, 0x02180430, 0x0000010c, 0x086010c0,
384 0x00000430, 0x00000000, 0x00000000, 0x00000000,
385 0x00000000, 0x00000000, 0x4e600000, 0x00000000,
386 0x02300000, 0x00000023, 0x00000000, 0x00000000,
387 0x00000000, 0x00000000, 0x00000000, 0x00000000,
388 0x00000000, 0x00000000, 0x00000000, 0x00000000,
389 0x00000000, 0x00000000, 0x00000000, 0x00000000,
390 0x00000000, 0x00000000, 0x00000000, 0x00000000,
393 /* Packet types for packets with an Outermost/First ARP header */
394 static const u32 ice_ptypes_arp_of[] = {
395 0x00000800, 0x00000000, 0x00000000, 0x00000000,
396 0x00000000, 0x00000000, 0x00000000, 0x00000000,
397 0x00000000, 0x00000000, 0x00000000, 0x00000000,
398 0x00000000, 0x00000000, 0x00000000, 0x00000000,
399 0x00000000, 0x00000000, 0x00000000, 0x00000000,
400 0x00000000, 0x00000000, 0x00000000, 0x00000000,
401 0x00000000, 0x00000000, 0x00000000, 0x00000000,
402 0x00000000, 0x00000000, 0x00000000, 0x00000000,
405 /* UDP Packet types for non-tunneled packets or tunneled
406 * packets with inner UDP.
408 static const u32 ice_ptypes_udp_il[] = {
409 0x81000000, 0x20204040, 0x04000010, 0x80810102,
410 0x00000040, 0x00000000, 0x00000000, 0x00000000,
411 0x00000000, 0x00410000, 0x908427E0, 0x00100007,
412 0x10410000, 0x00000004, 0x10410410, 0x00004104,
413 0x00000000, 0x00000000, 0x00000000, 0x00000000,
414 0x00000000, 0x00000000, 0x00000000, 0x00000000,
415 0x00000000, 0x00000000, 0x00000000, 0x00000000,
416 0x00000000, 0x00000000, 0x00000000, 0x00000000,
419 /* Packet types for packets with an Innermost/Last TCP header */
420 static const u32 ice_ptypes_tcp_il[] = {
421 0x04000000, 0x80810102, 0x10000040, 0x02040408,
422 0x00000102, 0x00000000, 0x00000000, 0x00000000,
423 0x00000000, 0x00820000, 0x21084000, 0x00000000,
424 0x20820000, 0x00000008, 0x20820820, 0x00008208,
425 0x00000000, 0x00000000, 0x00000000, 0x00000000,
426 0x00000000, 0x00000000, 0x00000000, 0x00000000,
427 0x00000000, 0x00000000, 0x00000000, 0x00000000,
428 0x00000000, 0x00000000, 0x00000000, 0x00000000,
431 /* Packet types for packets with an Innermost/Last SCTP header */
432 static const u32 ice_ptypes_sctp_il[] = {
433 0x08000000, 0x01020204, 0x20000081, 0x04080810,
434 0x00000204, 0x00000000, 0x00000000, 0x00000000,
435 0x00000000, 0x01040000, 0x00000000, 0x00000000,
436 0x41040000, 0x00000010, 0x00000000, 0x00000000,
437 0x00000000, 0x00000000, 0x00000000, 0x00000000,
438 0x00000000, 0x00000000, 0x00000000, 0x00000000,
439 0x00000000, 0x00000000, 0x00000000, 0x00000000,
440 0x00000000, 0x00000000, 0x00000000, 0x00000000,
443 /* Packet types for packets with an Outermost/First ICMP header */
444 static const u32 ice_ptypes_icmp_of[] = {
445 0x10000000, 0x00000000, 0x00000000, 0x00000000,
446 0x00000000, 0x00000000, 0x00000000, 0x00000000,
447 0x00000000, 0x00000000, 0x00000000, 0x00000000,
448 0x00000000, 0x00000000, 0x00000000, 0x00000000,
449 0x00000000, 0x00000000, 0x00000000, 0x00000000,
450 0x00000000, 0x00000000, 0x00000000, 0x00000000,
451 0x00000000, 0x00000000, 0x00000000, 0x00000000,
452 0x00000000, 0x00000000, 0x00000000, 0x00000000,
455 /* Packet types for packets with an Innermost/Last ICMP header */
456 static const u32 ice_ptypes_icmp_il[] = {
457 0x00000000, 0x02040408, 0x40000102, 0x08101020,
458 0x00000408, 0x00000000, 0x00000000, 0x00000000,
459 0x00000000, 0x00000000, 0x42108000, 0x00000000,
460 0x82080000, 0x00000020, 0x00000000, 0x00000000,
461 0x00000000, 0x00000000, 0x00000000, 0x00000000,
462 0x00000000, 0x00000000, 0x00000000, 0x00000000,
463 0x00000000, 0x00000000, 0x00000000, 0x00000000,
464 0x00000000, 0x00000000, 0x00000000, 0x00000000,
467 /* Packet types for packets with an Outermost/First GRE header */
468 static const u32 ice_ptypes_gre_of[] = {
469 0x00000000, 0xBFBF7800, 0x000001DF, 0xFEFDE000,
470 0x0000017E, 0x00000000, 0x00000000, 0x00000000,
471 0x00000000, 0x00000000, 0x00000000, 0x00000000,
472 0x00000000, 0x00000000, 0xBEFBEFBC, 0x0002FBEF,
473 0x00000000, 0x00000000, 0x00000000, 0x00000000,
474 0x00000000, 0x00000000, 0x00000000, 0x00000000,
475 0x00000000, 0x00000000, 0x00000000, 0x00000000,
476 0x00000000, 0x00000000, 0x00000000, 0x00000000,
479 /* Packet types for packets with an Innermost/Last MAC header */
480 static const u32 ice_ptypes_mac_il[] = {
481 0x00000000, 0x20000000, 0x00000000, 0x00000000,
482 0x00000000, 0x00000000, 0x00000000, 0x00000000,
483 0x00000000, 0x00000000, 0x00000000, 0x00000000,
484 0x00000000, 0x00000000, 0x00000000, 0x00000000,
485 0x00000000, 0x00000000, 0x00000000, 0x00000000,
486 0x00000000, 0x00000000, 0x00000000, 0x00000000,
487 0x00000000, 0x00000000, 0x00000000, 0x00000000,
488 0x00000000, 0x00000000, 0x00000000, 0x00000000,
491 /* Packet types for GTPC */
492 static const u32 ice_ptypes_gtpc[] = {
493 0x00000000, 0x00000000, 0x00000000, 0x00000000,
494 0x00000000, 0x00000000, 0x00000000, 0x00000000,
495 0x00000000, 0x00000000, 0x000001E0, 0x00000000,
496 0x00000000, 0x00000000, 0x00000000, 0x00000000,
497 0x00000000, 0x00000000, 0x00000000, 0x00000000,
498 0x00000000, 0x00000000, 0x00000000, 0x00000000,
499 0x00000000, 0x00000000, 0x00000000, 0x00000000,
500 0x00000000, 0x00000000, 0x00000000, 0x00000000,
503 /* Packet types for VXLAN with VNI */
504 static const u32 ice_ptypes_vxlan_vni[] = {
505 0x00000000, 0xBFBFF800, 0x00EFDFDF, 0xFEFDE000,
506 0x03BF7F7E, 0x00000000, 0x00000000, 0x00000000,
507 0x00000000, 0x00000000, 0x00000000, 0x00000000,
508 0x00000000, 0x00000000, 0x00000000, 0x00000000,
509 0x00000000, 0x00000000, 0x00000000, 0x00000000,
510 0x00000000, 0x00000000, 0x00000000, 0x00000000,
511 0x00000000, 0x00000000, 0x00000000, 0x00000000,
512 0x00000000, 0x00000000, 0x00000000, 0x00000000,
515 /* Packet types for GTPC with TEID */
516 static const u32 ice_ptypes_gtpc_tid[] = {
517 0x00000000, 0x00000000, 0x00000000, 0x00000000,
518 0x00000000, 0x00000000, 0x00000000, 0x00000000,
519 0x00000000, 0x00000000, 0x00000060, 0x00000000,
520 0x00000000, 0x00000000, 0x00000000, 0x00000000,
521 0x00000000, 0x00000000, 0x00000000, 0x00000000,
522 0x00000000, 0x00000000, 0x00000000, 0x00000000,
523 0x00000000, 0x00000000, 0x00000000, 0x00000000,
524 0x00000000, 0x00000000, 0x00000000, 0x00000000,
527 /* Packet types for GTPU */
528 static const struct ice_ptype_attributes ice_attr_gtpu_session[] = {
529 { ICE_MAC_IPV4_GTPU_IPV4_FRAG, ICE_PTYPE_ATTR_GTP_SESSION },
530 { ICE_MAC_IPV4_GTPU_IPV4_PAY, ICE_PTYPE_ATTR_GTP_SESSION },
531 { ICE_MAC_IPV4_GTPU_IPV4_UDP_PAY, ICE_PTYPE_ATTR_GTP_SESSION },
532 { ICE_MAC_IPV4_GTPU_IPV4_TCP, ICE_PTYPE_ATTR_GTP_SESSION },
533 { ICE_MAC_IPV4_GTPU_IPV4_ICMP, ICE_PTYPE_ATTR_GTP_SESSION },
534 { ICE_MAC_IPV6_GTPU_IPV4_FRAG, ICE_PTYPE_ATTR_GTP_SESSION },
535 { ICE_MAC_IPV6_GTPU_IPV4_PAY, ICE_PTYPE_ATTR_GTP_SESSION },
536 { ICE_MAC_IPV6_GTPU_IPV4_UDP_PAY, ICE_PTYPE_ATTR_GTP_SESSION },
537 { ICE_MAC_IPV6_GTPU_IPV4_TCP, ICE_PTYPE_ATTR_GTP_SESSION },
538 { ICE_MAC_IPV6_GTPU_IPV4_ICMP, ICE_PTYPE_ATTR_GTP_SESSION },
539 { ICE_MAC_IPV4_GTPU_IPV6_FRAG, ICE_PTYPE_ATTR_GTP_SESSION },
540 { ICE_MAC_IPV4_GTPU_IPV6_PAY, ICE_PTYPE_ATTR_GTP_SESSION },
541 { ICE_MAC_IPV4_GTPU_IPV6_UDP_PAY, ICE_PTYPE_ATTR_GTP_SESSION },
542 { ICE_MAC_IPV4_GTPU_IPV6_TCP, ICE_PTYPE_ATTR_GTP_SESSION },
543 { ICE_MAC_IPV4_GTPU_IPV6_ICMPV6, ICE_PTYPE_ATTR_GTP_SESSION },
544 { ICE_MAC_IPV6_GTPU_IPV6_FRAG, ICE_PTYPE_ATTR_GTP_SESSION },
545 { ICE_MAC_IPV6_GTPU_IPV6_PAY, ICE_PTYPE_ATTR_GTP_SESSION },
546 { ICE_MAC_IPV6_GTPU_IPV6_UDP_PAY, ICE_PTYPE_ATTR_GTP_SESSION },
547 { ICE_MAC_IPV6_GTPU_IPV6_TCP, ICE_PTYPE_ATTR_GTP_SESSION },
548 { ICE_MAC_IPV6_GTPU_IPV6_ICMPV6, ICE_PTYPE_ATTR_GTP_SESSION },
549 { MAC_IPV4_TUN_IPV4_GTPU_IPV4_FRAG, ICE_PTYPE_ATTR_GTP_SESSION },
550 { MAC_IPV4_TUN_IPV4_GTPU_IPV4_PAY, ICE_PTYPE_ATTR_GTP_SESSION },
551 { MAC_IPV4_TUN_IPV4_GTPU_IPV4_UDP_PAY, ICE_PTYPE_ATTR_GTP_SESSION },
552 { MAC_IPV4_TUN_IPV4_GTPU_IPV4_TCP, ICE_PTYPE_ATTR_GTP_SESSION },
553 { MAC_IPV4_TUN_IPV4_GTPU_IPV4_ICMP, ICE_PTYPE_ATTR_GTP_SESSION },
554 { MAC_IPV4_TUN_IPV4_GTPU_IPV6_FRAG, ICE_PTYPE_ATTR_GTP_SESSION },
555 { MAC_IPV4_TUN_IPV4_GTPU_IPV6_PAY, ICE_PTYPE_ATTR_GTP_SESSION },
556 { MAC_IPV4_TUN_IPV4_GTPU_IPV6_UDP_PAY, ICE_PTYPE_ATTR_GTP_SESSION },
557 { MAC_IPV4_TUN_IPV4_GTPU_IPV6_TCP, ICE_PTYPE_ATTR_GTP_SESSION },
558 { MAC_IPV4_TUN_IPV4_GTPU_IPV6_ICMPV6, ICE_PTYPE_ATTR_GTP_SESSION },
559 { MAC_IPV4_TUN_IPV6_GTPU_IPV4_FRAG, ICE_PTYPE_ATTR_GTP_SESSION },
560 { MAC_IPV4_TUN_IPV6_GTPU_IPV4_PAY, ICE_PTYPE_ATTR_GTP_SESSION },
561 { MAC_IPV4_TUN_IPV6_GTPU_IPV4_UDP_PAY, ICE_PTYPE_ATTR_GTP_SESSION },
562 { MAC_IPV4_TUN_IPV6_GTPU_IPV4_TCP, ICE_PTYPE_ATTR_GTP_SESSION },
563 { MAC_IPV4_TUN_IPV6_GTPU_IPV4_ICMP, ICE_PTYPE_ATTR_GTP_SESSION },
564 { MAC_IPV4_TUN_IPV6_GTPU_IPV6_FRAG, ICE_PTYPE_ATTR_GTP_SESSION },
565 { MAC_IPV4_TUN_IPV6_GTPU_IPV6_PAY, ICE_PTYPE_ATTR_GTP_SESSION },
566 { MAC_IPV4_TUN_IPV6_GTPU_IPV6_UDP_PAY, ICE_PTYPE_ATTR_GTP_SESSION },
567 { MAC_IPV4_TUN_IPV6_GTPU_IPV6_TCP, ICE_PTYPE_ATTR_GTP_SESSION },
568 { MAC_IPV4_TUN_IPV6_GTPU_IPV6_ICMPV6, ICE_PTYPE_ATTR_GTP_SESSION },
569 { MAC_IPV6_TUN_IPV4_GTPU_IPV4_FRAG, ICE_PTYPE_ATTR_GTP_SESSION },
570 { MAC_IPV6_TUN_IPV4_GTPU_IPV4_PAY, ICE_PTYPE_ATTR_GTP_SESSION },
571 { MAC_IPV6_TUN_IPV4_GTPU_IPV4_UDP_PAY, ICE_PTYPE_ATTR_GTP_SESSION },
572 { MAC_IPV6_TUN_IPV4_GTPU_IPV4_TCP, ICE_PTYPE_ATTR_GTP_SESSION },
573 { MAC_IPV6_TUN_IPV4_GTPU_IPV4_ICMP, ICE_PTYPE_ATTR_GTP_SESSION },
574 { MAC_IPV6_TUN_IPV4_GTPU_IPV6_FRAG, ICE_PTYPE_ATTR_GTP_SESSION },
575 { MAC_IPV6_TUN_IPV4_GTPU_IPV6_PAY, ICE_PTYPE_ATTR_GTP_SESSION },
576 { MAC_IPV6_TUN_IPV4_GTPU_IPV6_UDP_PAY, ICE_PTYPE_ATTR_GTP_SESSION },
577 { MAC_IPV6_TUN_IPV4_GTPU_IPV6_TCP, ICE_PTYPE_ATTR_GTP_SESSION },
578 { MAC_IPV6_TUN_IPV4_GTPU_IPV6_ICMPV6, ICE_PTYPE_ATTR_GTP_SESSION },
579 { MAC_IPV6_TUN_IPV6_GTPU_IPV4_FRAG, ICE_PTYPE_ATTR_GTP_SESSION },
580 { MAC_IPV6_TUN_IPV6_GTPU_IPV4_PAY, ICE_PTYPE_ATTR_GTP_SESSION },
581 { MAC_IPV6_TUN_IPV6_GTPU_IPV4_UDP_PAY, ICE_PTYPE_ATTR_GTP_SESSION },
582 { MAC_IPV6_TUN_IPV6_GTPU_IPV4_TCP, ICE_PTYPE_ATTR_GTP_SESSION },
583 { MAC_IPV6_TUN_IPV6_GTPU_IPV4_ICMP, ICE_PTYPE_ATTR_GTP_SESSION },
584 { MAC_IPV6_TUN_IPV6_GTPU_IPV6_FRAG, ICE_PTYPE_ATTR_GTP_SESSION },
585 { MAC_IPV6_TUN_IPV6_GTPU_IPV6_PAY, ICE_PTYPE_ATTR_GTP_SESSION },
586 { MAC_IPV6_TUN_IPV6_GTPU_IPV6_UDP_PAY, ICE_PTYPE_ATTR_GTP_SESSION },
587 { MAC_IPV6_TUN_IPV6_GTPU_IPV6_TCP, ICE_PTYPE_ATTR_GTP_SESSION },
588 { MAC_IPV6_TUN_IPV6_GTPU_IPV6_ICMPV6, ICE_PTYPE_ATTR_GTP_SESSION },
591 static const struct ice_ptype_attributes ice_attr_gtpu_eh[] = {
592 { ICE_MAC_IPV4_GTPU_IPV4_FRAG, ICE_PTYPE_ATTR_GTP_PDU_EH },
593 { ICE_MAC_IPV4_GTPU_IPV4_PAY, ICE_PTYPE_ATTR_GTP_PDU_EH },
594 { ICE_MAC_IPV4_GTPU_IPV4_UDP_PAY, ICE_PTYPE_ATTR_GTP_PDU_EH },
595 { ICE_MAC_IPV4_GTPU_IPV4_TCP, ICE_PTYPE_ATTR_GTP_PDU_EH },
596 { ICE_MAC_IPV4_GTPU_IPV4_ICMP, ICE_PTYPE_ATTR_GTP_PDU_EH },
597 { ICE_MAC_IPV6_GTPU_IPV4_FRAG, ICE_PTYPE_ATTR_GTP_PDU_EH },
598 { ICE_MAC_IPV6_GTPU_IPV4_PAY, ICE_PTYPE_ATTR_GTP_PDU_EH },
599 { ICE_MAC_IPV6_GTPU_IPV4_UDP_PAY, ICE_PTYPE_ATTR_GTP_PDU_EH },
600 { ICE_MAC_IPV6_GTPU_IPV4_TCP, ICE_PTYPE_ATTR_GTP_PDU_EH },
601 { ICE_MAC_IPV6_GTPU_IPV4_ICMP, ICE_PTYPE_ATTR_GTP_PDU_EH },
602 { ICE_MAC_IPV4_GTPU_IPV6_FRAG, ICE_PTYPE_ATTR_GTP_PDU_EH },
603 { ICE_MAC_IPV4_GTPU_IPV6_PAY, ICE_PTYPE_ATTR_GTP_PDU_EH },
604 { ICE_MAC_IPV4_GTPU_IPV6_UDP_PAY, ICE_PTYPE_ATTR_GTP_PDU_EH },
605 { ICE_MAC_IPV4_GTPU_IPV6_TCP, ICE_PTYPE_ATTR_GTP_PDU_EH },
606 { ICE_MAC_IPV4_GTPU_IPV6_ICMPV6, ICE_PTYPE_ATTR_GTP_PDU_EH },
607 { ICE_MAC_IPV6_GTPU_IPV6_FRAG, ICE_PTYPE_ATTR_GTP_PDU_EH },
608 { ICE_MAC_IPV6_GTPU_IPV6_PAY, ICE_PTYPE_ATTR_GTP_PDU_EH },
609 { ICE_MAC_IPV6_GTPU_IPV6_UDP_PAY, ICE_PTYPE_ATTR_GTP_PDU_EH },
610 { ICE_MAC_IPV6_GTPU_IPV6_TCP, ICE_PTYPE_ATTR_GTP_PDU_EH },
611 { ICE_MAC_IPV6_GTPU_IPV6_ICMPV6, ICE_PTYPE_ATTR_GTP_PDU_EH },
612 { MAC_IPV4_TUN_IPV4_GTPU_IPV4_FRAG, ICE_PTYPE_ATTR_GTP_PDU_EH },
613 { MAC_IPV4_TUN_IPV4_GTPU_IPV4_PAY, ICE_PTYPE_ATTR_GTP_PDU_EH },
614 { MAC_IPV4_TUN_IPV4_GTPU_IPV4_UDP_PAY, ICE_PTYPE_ATTR_GTP_PDU_EH },
615 { MAC_IPV4_TUN_IPV4_GTPU_IPV4_TCP, ICE_PTYPE_ATTR_GTP_PDU_EH },
616 { MAC_IPV4_TUN_IPV4_GTPU_IPV4_ICMP, ICE_PTYPE_ATTR_GTP_PDU_EH },
617 { MAC_IPV4_TUN_IPV4_GTPU_IPV6_FRAG, ICE_PTYPE_ATTR_GTP_PDU_EH },
618 { MAC_IPV4_TUN_IPV4_GTPU_IPV6_PAY, ICE_PTYPE_ATTR_GTP_PDU_EH },
619 { MAC_IPV4_TUN_IPV4_GTPU_IPV6_UDP_PAY, ICE_PTYPE_ATTR_GTP_PDU_EH },
620 { MAC_IPV4_TUN_IPV4_GTPU_IPV6_TCP, ICE_PTYPE_ATTR_GTP_PDU_EH },
621 { MAC_IPV4_TUN_IPV4_GTPU_IPV6_ICMPV6, ICE_PTYPE_ATTR_GTP_PDU_EH },
622 { MAC_IPV4_TUN_IPV6_GTPU_IPV4_FRAG, ICE_PTYPE_ATTR_GTP_PDU_EH },
623 { MAC_IPV4_TUN_IPV6_GTPU_IPV4_PAY, ICE_PTYPE_ATTR_GTP_PDU_EH },
624 { MAC_IPV4_TUN_IPV6_GTPU_IPV4_UDP_PAY, ICE_PTYPE_ATTR_GTP_PDU_EH },
625 { MAC_IPV4_TUN_IPV6_GTPU_IPV4_TCP, ICE_PTYPE_ATTR_GTP_PDU_EH },
626 { MAC_IPV4_TUN_IPV6_GTPU_IPV4_ICMP, ICE_PTYPE_ATTR_GTP_PDU_EH },
627 { MAC_IPV4_TUN_IPV6_GTPU_IPV6_FRAG, ICE_PTYPE_ATTR_GTP_PDU_EH },
628 { MAC_IPV4_TUN_IPV6_GTPU_IPV6_PAY, ICE_PTYPE_ATTR_GTP_PDU_EH },
629 { MAC_IPV4_TUN_IPV6_GTPU_IPV6_UDP_PAY, ICE_PTYPE_ATTR_GTP_PDU_EH },
630 { MAC_IPV4_TUN_IPV6_GTPU_IPV6_TCP, ICE_PTYPE_ATTR_GTP_PDU_EH },
631 { MAC_IPV4_TUN_IPV6_GTPU_IPV6_ICMPV6, ICE_PTYPE_ATTR_GTP_PDU_EH },
632 { MAC_IPV6_TUN_IPV4_GTPU_IPV4_FRAG, ICE_PTYPE_ATTR_GTP_PDU_EH },
633 { MAC_IPV6_TUN_IPV4_GTPU_IPV4_PAY, ICE_PTYPE_ATTR_GTP_PDU_EH },
634 { MAC_IPV6_TUN_IPV4_GTPU_IPV4_UDP_PAY, ICE_PTYPE_ATTR_GTP_PDU_EH },
635 { MAC_IPV6_TUN_IPV4_GTPU_IPV4_TCP, ICE_PTYPE_ATTR_GTP_PDU_EH },
636 { MAC_IPV6_TUN_IPV4_GTPU_IPV4_ICMP, ICE_PTYPE_ATTR_GTP_PDU_EH },
637 { MAC_IPV6_TUN_IPV4_GTPU_IPV6_FRAG, ICE_PTYPE_ATTR_GTP_PDU_EH },
638 { MAC_IPV6_TUN_IPV4_GTPU_IPV6_PAY, ICE_PTYPE_ATTR_GTP_PDU_EH },
639 { MAC_IPV6_TUN_IPV4_GTPU_IPV6_UDP_PAY, ICE_PTYPE_ATTR_GTP_PDU_EH },
640 { MAC_IPV6_TUN_IPV4_GTPU_IPV6_TCP, ICE_PTYPE_ATTR_GTP_PDU_EH },
641 { MAC_IPV6_TUN_IPV4_GTPU_IPV6_ICMPV6, ICE_PTYPE_ATTR_GTP_PDU_EH },
642 { MAC_IPV6_TUN_IPV6_GTPU_IPV4_FRAG, ICE_PTYPE_ATTR_GTP_PDU_EH },
643 { MAC_IPV6_TUN_IPV6_GTPU_IPV4_PAY, ICE_PTYPE_ATTR_GTP_PDU_EH },
644 { MAC_IPV6_TUN_IPV6_GTPU_IPV4_UDP_PAY, ICE_PTYPE_ATTR_GTP_PDU_EH },
645 { MAC_IPV6_TUN_IPV6_GTPU_IPV4_TCP, ICE_PTYPE_ATTR_GTP_PDU_EH },
646 { MAC_IPV6_TUN_IPV6_GTPU_IPV4_ICMP, ICE_PTYPE_ATTR_GTP_PDU_EH },
647 { MAC_IPV6_TUN_IPV6_GTPU_IPV6_FRAG, ICE_PTYPE_ATTR_GTP_PDU_EH },
648 { MAC_IPV6_TUN_IPV6_GTPU_IPV6_PAY, ICE_PTYPE_ATTR_GTP_PDU_EH },
649 { MAC_IPV6_TUN_IPV6_GTPU_IPV6_UDP_PAY, ICE_PTYPE_ATTR_GTP_PDU_EH },
650 { MAC_IPV6_TUN_IPV6_GTPU_IPV6_TCP, ICE_PTYPE_ATTR_GTP_PDU_EH },
651 { MAC_IPV6_TUN_IPV6_GTPU_IPV6_ICMPV6, ICE_PTYPE_ATTR_GTP_PDU_EH },
654 static const struct ice_ptype_attributes ice_attr_gtpu_down[] = {
655 { ICE_MAC_IPV4_GTPU_IPV4_FRAG, ICE_PTYPE_ATTR_GTP_DOWNLINK },
656 { ICE_MAC_IPV4_GTPU_IPV4_PAY, ICE_PTYPE_ATTR_GTP_DOWNLINK },
657 { ICE_MAC_IPV4_GTPU_IPV4_UDP_PAY, ICE_PTYPE_ATTR_GTP_DOWNLINK },
658 { ICE_MAC_IPV4_GTPU_IPV4_TCP, ICE_PTYPE_ATTR_GTP_DOWNLINK },
659 { ICE_MAC_IPV4_GTPU_IPV4_ICMP, ICE_PTYPE_ATTR_GTP_DOWNLINK },
660 { ICE_MAC_IPV6_GTPU_IPV4_FRAG, ICE_PTYPE_ATTR_GTP_DOWNLINK },
661 { ICE_MAC_IPV6_GTPU_IPV4_PAY, ICE_PTYPE_ATTR_GTP_DOWNLINK },
662 { ICE_MAC_IPV6_GTPU_IPV4_UDP_PAY, ICE_PTYPE_ATTR_GTP_DOWNLINK },
663 { ICE_MAC_IPV6_GTPU_IPV4_TCP, ICE_PTYPE_ATTR_GTP_DOWNLINK },
664 { ICE_MAC_IPV6_GTPU_IPV4_ICMP, ICE_PTYPE_ATTR_GTP_DOWNLINK },
665 { ICE_MAC_IPV4_GTPU_IPV6_FRAG, ICE_PTYPE_ATTR_GTP_DOWNLINK },
666 { ICE_MAC_IPV4_GTPU_IPV6_PAY, ICE_PTYPE_ATTR_GTP_DOWNLINK },
667 { ICE_MAC_IPV4_GTPU_IPV6_UDP_PAY, ICE_PTYPE_ATTR_GTP_DOWNLINK },
668 { ICE_MAC_IPV4_GTPU_IPV6_TCP, ICE_PTYPE_ATTR_GTP_DOWNLINK },
669 { ICE_MAC_IPV4_GTPU_IPV6_ICMPV6, ICE_PTYPE_ATTR_GTP_DOWNLINK },
670 { ICE_MAC_IPV6_GTPU_IPV6_FRAG, ICE_PTYPE_ATTR_GTP_DOWNLINK },
671 { ICE_MAC_IPV6_GTPU_IPV6_PAY, ICE_PTYPE_ATTR_GTP_DOWNLINK },
672 { ICE_MAC_IPV6_GTPU_IPV6_UDP_PAY, ICE_PTYPE_ATTR_GTP_DOWNLINK },
673 { ICE_MAC_IPV6_GTPU_IPV6_TCP, ICE_PTYPE_ATTR_GTP_DOWNLINK },
674 { ICE_MAC_IPV6_GTPU_IPV6_ICMPV6, ICE_PTYPE_ATTR_GTP_DOWNLINK },
675 { MAC_IPV4_TUN_IPV4_GTPU_IPV4_FRAG, ICE_PTYPE_ATTR_GTP_DOWNLINK },
676 { MAC_IPV4_TUN_IPV4_GTPU_IPV4_PAY, ICE_PTYPE_ATTR_GTP_DOWNLINK },
677 { MAC_IPV4_TUN_IPV4_GTPU_IPV4_UDP_PAY, ICE_PTYPE_ATTR_GTP_DOWNLINK },
678 { MAC_IPV4_TUN_IPV4_GTPU_IPV4_TCP, ICE_PTYPE_ATTR_GTP_DOWNLINK },
679 { MAC_IPV4_TUN_IPV4_GTPU_IPV4_ICMP, ICE_PTYPE_ATTR_GTP_DOWNLINK },
680 { MAC_IPV4_TUN_IPV4_GTPU_IPV6_FRAG, ICE_PTYPE_ATTR_GTP_DOWNLINK },
681 { MAC_IPV4_TUN_IPV4_GTPU_IPV6_PAY, ICE_PTYPE_ATTR_GTP_DOWNLINK },
682 { MAC_IPV4_TUN_IPV4_GTPU_IPV6_UDP_PAY, ICE_PTYPE_ATTR_GTP_DOWNLINK },
683 { MAC_IPV4_TUN_IPV4_GTPU_IPV6_TCP, ICE_PTYPE_ATTR_GTP_DOWNLINK },
684 { MAC_IPV4_TUN_IPV4_GTPU_IPV6_ICMPV6, ICE_PTYPE_ATTR_GTP_DOWNLINK },
685 { MAC_IPV4_TUN_IPV6_GTPU_IPV4_FRAG, ICE_PTYPE_ATTR_GTP_DOWNLINK },
686 { MAC_IPV4_TUN_IPV6_GTPU_IPV4_PAY, ICE_PTYPE_ATTR_GTP_DOWNLINK },
687 { MAC_IPV4_TUN_IPV6_GTPU_IPV4_UDP_PAY, ICE_PTYPE_ATTR_GTP_DOWNLINK },
688 { MAC_IPV4_TUN_IPV6_GTPU_IPV4_TCP, ICE_PTYPE_ATTR_GTP_DOWNLINK },
689 { MAC_IPV4_TUN_IPV6_GTPU_IPV4_ICMP, ICE_PTYPE_ATTR_GTP_DOWNLINK },
690 { MAC_IPV4_TUN_IPV6_GTPU_IPV6_FRAG, ICE_PTYPE_ATTR_GTP_DOWNLINK },
691 { MAC_IPV4_TUN_IPV6_GTPU_IPV6_PAY, ICE_PTYPE_ATTR_GTP_DOWNLINK },
692 { MAC_IPV4_TUN_IPV6_GTPU_IPV6_UDP_PAY, ICE_PTYPE_ATTR_GTP_DOWNLINK },
693 { MAC_IPV4_TUN_IPV6_GTPU_IPV6_TCP, ICE_PTYPE_ATTR_GTP_DOWNLINK },
694 { MAC_IPV4_TUN_IPV6_GTPU_IPV6_ICMPV6, ICE_PTYPE_ATTR_GTP_DOWNLINK },
695 { MAC_IPV6_TUN_IPV4_GTPU_IPV4_FRAG, ICE_PTYPE_ATTR_GTP_DOWNLINK },
696 { MAC_IPV6_TUN_IPV4_GTPU_IPV4_PAY, ICE_PTYPE_ATTR_GTP_DOWNLINK },
697 { MAC_IPV6_TUN_IPV4_GTPU_IPV4_UDP_PAY, ICE_PTYPE_ATTR_GTP_DOWNLINK },
698 { MAC_IPV6_TUN_IPV4_GTPU_IPV4_TCP, ICE_PTYPE_ATTR_GTP_DOWNLINK },
699 { MAC_IPV6_TUN_IPV4_GTPU_IPV4_ICMP, ICE_PTYPE_ATTR_GTP_DOWNLINK },
700 { MAC_IPV6_TUN_IPV4_GTPU_IPV6_FRAG, ICE_PTYPE_ATTR_GTP_DOWNLINK },
701 { MAC_IPV6_TUN_IPV4_GTPU_IPV6_PAY, ICE_PTYPE_ATTR_GTP_DOWNLINK },
702 { MAC_IPV6_TUN_IPV4_GTPU_IPV6_UDP_PAY, ICE_PTYPE_ATTR_GTP_DOWNLINK },
703 { MAC_IPV6_TUN_IPV4_GTPU_IPV6_TCP, ICE_PTYPE_ATTR_GTP_DOWNLINK },
704 { MAC_IPV6_TUN_IPV4_GTPU_IPV6_ICMPV6, ICE_PTYPE_ATTR_GTP_DOWNLINK },
705 { MAC_IPV6_TUN_IPV6_GTPU_IPV4_FRAG, ICE_PTYPE_ATTR_GTP_DOWNLINK },
706 { MAC_IPV6_TUN_IPV6_GTPU_IPV4_PAY, ICE_PTYPE_ATTR_GTP_DOWNLINK },
707 { MAC_IPV6_TUN_IPV6_GTPU_IPV4_UDP_PAY, ICE_PTYPE_ATTR_GTP_DOWNLINK },
708 { MAC_IPV6_TUN_IPV6_GTPU_IPV4_TCP, ICE_PTYPE_ATTR_GTP_DOWNLINK },
709 { MAC_IPV6_TUN_IPV6_GTPU_IPV4_ICMP, ICE_PTYPE_ATTR_GTP_DOWNLINK },
710 { MAC_IPV6_TUN_IPV6_GTPU_IPV6_FRAG, ICE_PTYPE_ATTR_GTP_DOWNLINK },
711 { MAC_IPV6_TUN_IPV6_GTPU_IPV6_PAY, ICE_PTYPE_ATTR_GTP_DOWNLINK },
712 { MAC_IPV6_TUN_IPV6_GTPU_IPV6_UDP_PAY, ICE_PTYPE_ATTR_GTP_DOWNLINK },
713 { MAC_IPV6_TUN_IPV6_GTPU_IPV6_TCP, ICE_PTYPE_ATTR_GTP_DOWNLINK },
714 { MAC_IPV6_TUN_IPV6_GTPU_IPV6_ICMPV6, ICE_PTYPE_ATTR_GTP_DOWNLINK },
717 static const struct ice_ptype_attributes ice_attr_gtpu_up[] = {
718 { ICE_MAC_IPV4_GTPU_IPV4_FRAG, ICE_PTYPE_ATTR_GTP_UPLINK },
719 { ICE_MAC_IPV4_GTPU_IPV4_PAY, ICE_PTYPE_ATTR_GTP_UPLINK },
720 { ICE_MAC_IPV4_GTPU_IPV4_UDP_PAY, ICE_PTYPE_ATTR_GTP_UPLINK },
721 { ICE_MAC_IPV4_GTPU_IPV4_TCP, ICE_PTYPE_ATTR_GTP_UPLINK },
722 { ICE_MAC_IPV4_GTPU_IPV4_ICMP, ICE_PTYPE_ATTR_GTP_UPLINK },
723 { ICE_MAC_IPV6_GTPU_IPV4_FRAG, ICE_PTYPE_ATTR_GTP_UPLINK },
724 { ICE_MAC_IPV6_GTPU_IPV4_PAY, ICE_PTYPE_ATTR_GTP_UPLINK },
725 { ICE_MAC_IPV6_GTPU_IPV4_UDP_PAY, ICE_PTYPE_ATTR_GTP_UPLINK },
726 { ICE_MAC_IPV6_GTPU_IPV4_TCP, ICE_PTYPE_ATTR_GTP_UPLINK },
727 { ICE_MAC_IPV6_GTPU_IPV4_ICMP, ICE_PTYPE_ATTR_GTP_UPLINK },
728 { ICE_MAC_IPV4_GTPU_IPV6_FRAG, ICE_PTYPE_ATTR_GTP_UPLINK },
729 { ICE_MAC_IPV4_GTPU_IPV6_PAY, ICE_PTYPE_ATTR_GTP_UPLINK },
730 { ICE_MAC_IPV4_GTPU_IPV6_UDP_PAY, ICE_PTYPE_ATTR_GTP_UPLINK },
731 { ICE_MAC_IPV4_GTPU_IPV6_TCP, ICE_PTYPE_ATTR_GTP_UPLINK },
732 { ICE_MAC_IPV4_GTPU_IPV6_ICMPV6, ICE_PTYPE_ATTR_GTP_UPLINK },
733 { ICE_MAC_IPV6_GTPU_IPV6_FRAG, ICE_PTYPE_ATTR_GTP_UPLINK },
734 { ICE_MAC_IPV6_GTPU_IPV6_PAY, ICE_PTYPE_ATTR_GTP_UPLINK },
735 { ICE_MAC_IPV6_GTPU_IPV6_UDP_PAY, ICE_PTYPE_ATTR_GTP_UPLINK },
736 { ICE_MAC_IPV6_GTPU_IPV6_TCP, ICE_PTYPE_ATTR_GTP_UPLINK },
737 { ICE_MAC_IPV6_GTPU_IPV6_ICMPV6, ICE_PTYPE_ATTR_GTP_UPLINK },
738 { MAC_IPV4_TUN_IPV4_GTPU_IPV4_FRAG, ICE_PTYPE_ATTR_GTP_UPLINK },
739 { MAC_IPV4_TUN_IPV4_GTPU_IPV4_PAY, ICE_PTYPE_ATTR_GTP_UPLINK },
740 { MAC_IPV4_TUN_IPV4_GTPU_IPV4_UDP_PAY, ICE_PTYPE_ATTR_GTP_UPLINK },
741 { MAC_IPV4_TUN_IPV4_GTPU_IPV4_TCP, ICE_PTYPE_ATTR_GTP_UPLINK },
742 { MAC_IPV4_TUN_IPV4_GTPU_IPV4_ICMP, ICE_PTYPE_ATTR_GTP_UPLINK },
743 { MAC_IPV4_TUN_IPV4_GTPU_IPV6_FRAG, ICE_PTYPE_ATTR_GTP_UPLINK },
744 { MAC_IPV4_TUN_IPV4_GTPU_IPV6_PAY, ICE_PTYPE_ATTR_GTP_UPLINK },
745 { MAC_IPV4_TUN_IPV4_GTPU_IPV6_UDP_PAY, ICE_PTYPE_ATTR_GTP_UPLINK },
746 { MAC_IPV4_TUN_IPV4_GTPU_IPV6_TCP, ICE_PTYPE_ATTR_GTP_UPLINK },
747 { MAC_IPV4_TUN_IPV4_GTPU_IPV6_ICMPV6, ICE_PTYPE_ATTR_GTP_UPLINK },
748 { MAC_IPV4_TUN_IPV6_GTPU_IPV4_FRAG, ICE_PTYPE_ATTR_GTP_UPLINK },
749 { MAC_IPV4_TUN_IPV6_GTPU_IPV4_PAY, ICE_PTYPE_ATTR_GTP_UPLINK },
750 { MAC_IPV4_TUN_IPV6_GTPU_IPV4_UDP_PAY, ICE_PTYPE_ATTR_GTP_UPLINK },
751 { MAC_IPV4_TUN_IPV6_GTPU_IPV4_TCP, ICE_PTYPE_ATTR_GTP_UPLINK },
752 { MAC_IPV4_TUN_IPV6_GTPU_IPV4_ICMP, ICE_PTYPE_ATTR_GTP_UPLINK },
753 { MAC_IPV4_TUN_IPV6_GTPU_IPV6_FRAG, ICE_PTYPE_ATTR_GTP_UPLINK },
754 { MAC_IPV4_TUN_IPV6_GTPU_IPV6_PAY, ICE_PTYPE_ATTR_GTP_UPLINK },
755 { MAC_IPV4_TUN_IPV6_GTPU_IPV6_UDP_PAY, ICE_PTYPE_ATTR_GTP_UPLINK },
756 { MAC_IPV4_TUN_IPV6_GTPU_IPV6_TCP, ICE_PTYPE_ATTR_GTP_UPLINK },
757 { MAC_IPV4_TUN_IPV6_GTPU_IPV6_ICMPV6, ICE_PTYPE_ATTR_GTP_UPLINK },
758 { MAC_IPV6_TUN_IPV4_GTPU_IPV4_FRAG, ICE_PTYPE_ATTR_GTP_UPLINK },
759 { MAC_IPV6_TUN_IPV4_GTPU_IPV4_PAY, ICE_PTYPE_ATTR_GTP_UPLINK },
760 { MAC_IPV6_TUN_IPV4_GTPU_IPV4_UDP_PAY, ICE_PTYPE_ATTR_GTP_UPLINK },
761 { MAC_IPV6_TUN_IPV4_GTPU_IPV4_TCP, ICE_PTYPE_ATTR_GTP_UPLINK },
762 { MAC_IPV6_TUN_IPV4_GTPU_IPV4_ICMP, ICE_PTYPE_ATTR_GTP_UPLINK },
763 { MAC_IPV6_TUN_IPV4_GTPU_IPV6_FRAG, ICE_PTYPE_ATTR_GTP_UPLINK },
764 { MAC_IPV6_TUN_IPV4_GTPU_IPV6_PAY, ICE_PTYPE_ATTR_GTP_UPLINK },
765 { MAC_IPV6_TUN_IPV4_GTPU_IPV6_UDP_PAY, ICE_PTYPE_ATTR_GTP_UPLINK },
766 { MAC_IPV6_TUN_IPV4_GTPU_IPV6_TCP, ICE_PTYPE_ATTR_GTP_UPLINK },
767 { MAC_IPV6_TUN_IPV4_GTPU_IPV6_ICMPV6, ICE_PTYPE_ATTR_GTP_UPLINK },
768 { MAC_IPV6_TUN_IPV6_GTPU_IPV4_FRAG, ICE_PTYPE_ATTR_GTP_UPLINK },
769 { MAC_IPV6_TUN_IPV6_GTPU_IPV4_PAY, ICE_PTYPE_ATTR_GTP_UPLINK },
770 { MAC_IPV6_TUN_IPV6_GTPU_IPV4_UDP_PAY, ICE_PTYPE_ATTR_GTP_UPLINK },
771 { MAC_IPV6_TUN_IPV6_GTPU_IPV4_TCP, ICE_PTYPE_ATTR_GTP_UPLINK },
772 { MAC_IPV6_TUN_IPV6_GTPU_IPV4_ICMP, ICE_PTYPE_ATTR_GTP_UPLINK },
773 { MAC_IPV6_TUN_IPV6_GTPU_IPV6_FRAG, ICE_PTYPE_ATTR_GTP_UPLINK },
774 { MAC_IPV6_TUN_IPV6_GTPU_IPV6_PAY, ICE_PTYPE_ATTR_GTP_UPLINK },
775 { MAC_IPV6_TUN_IPV6_GTPU_IPV6_UDP_PAY, ICE_PTYPE_ATTR_GTP_UPLINK },
776 { MAC_IPV6_TUN_IPV6_GTPU_IPV6_TCP, ICE_PTYPE_ATTR_GTP_UPLINK },
777 { MAC_IPV6_TUN_IPV6_GTPU_IPV6_ICMPV6, ICE_PTYPE_ATTR_GTP_UPLINK },
780 static const u32 ice_ptypes_gtpu[] = {
781 0x00000000, 0x00000000, 0x00000000, 0x00000000,
782 0x00000000, 0x00000000, 0x00000000, 0x00000000,
783 0x00000000, 0x00000000, 0x7FFFFE00, 0x00000000,
784 0x00000000, 0x0000003F, 0xBEFBEFBC, 0x0002FBEF,
785 0x00000000, 0x00000000, 0x00000000, 0x00000000,
786 0x00000000, 0x00000000, 0x00000000, 0x00000000,
787 0x00000000, 0x00000000, 0x00000000, 0x00000000,
788 0x00000000, 0x00000000, 0x00000000, 0x00000000,
791 /* Packet types for pppoe */
792 static const u32 ice_ptypes_pppoe[] = {
793 0x00000000, 0x00000000, 0x00000000, 0x00000000,
794 0x00000000, 0x00000000, 0x00000000, 0x00000000,
795 0x00000000, 0x03ffe000, 0x00000000, 0x00000000,
796 0x00000000, 0x00000000, 0x00000000, 0x00000000,
797 0x00000000, 0x00000000, 0x00000000, 0x00000000,
798 0x00000000, 0x00000000, 0x00000000, 0x00000000,
799 0x00000000, 0x00000000, 0x00000000, 0x00000000,
800 0x00000000, 0x00000000, 0x00000000, 0x00000000,
803 /* Packet types for packets with PFCP NODE header */
804 static const u32 ice_ptypes_pfcp_node[] = {
805 0x00000000, 0x00000000, 0x00000000, 0x00000000,
806 0x00000000, 0x00000000, 0x00000000, 0x00000000,
807 0x00000000, 0x00000000, 0x80000000, 0x00000002,
808 0x00000000, 0x00000000, 0x00000000, 0x00000000,
809 0x00000000, 0x00000000, 0x00000000, 0x00000000,
810 0x00000000, 0x00000000, 0x00000000, 0x00000000,
811 0x00000000, 0x00000000, 0x00000000, 0x00000000,
812 0x00000000, 0x00000000, 0x00000000, 0x00000000,
815 /* Packet types for packets with PFCP SESSION header */
816 static const u32 ice_ptypes_pfcp_session[] = {
817 0x00000000, 0x00000000, 0x00000000, 0x00000000,
818 0x00000000, 0x00000000, 0x00000000, 0x00000000,
819 0x00000000, 0x00000000, 0x00000000, 0x00000005,
820 0x00000000, 0x00000000, 0x00000000, 0x00000000,
821 0x00000000, 0x00000000, 0x00000000, 0x00000000,
822 0x00000000, 0x00000000, 0x00000000, 0x00000000,
823 0x00000000, 0x00000000, 0x00000000, 0x00000000,
824 0x00000000, 0x00000000, 0x00000000, 0x00000000,
827 /* Packet types for l2tpv3 */
828 static const u32 ice_ptypes_l2tpv3[] = {
829 0x00000000, 0x00000000, 0x00000000, 0x00000000,
830 0x00000000, 0x00000000, 0x00000000, 0x00000000,
831 0x00000000, 0x00000000, 0x00000000, 0x00000300,
832 0x00000000, 0x00000000, 0x00000000, 0x00000000,
833 0x00000000, 0x00000000, 0x00000000, 0x00000000,
834 0x00000000, 0x00000000, 0x00000000, 0x00000000,
835 0x00000000, 0x00000000, 0x00000000, 0x00000000,
836 0x00000000, 0x00000000, 0x00000000, 0x00000000,
839 /* Packet types for esp */
840 static const u32 ice_ptypes_esp[] = {
841 0x00000000, 0x00000000, 0x00000000, 0x00000000,
842 0x00000000, 0x00000003, 0x00000000, 0x00000000,
843 0x00000000, 0x00000000, 0x00000000, 0x00000000,
844 0x00000000, 0x00000000, 0x00000000, 0x00000000,
845 0x00000000, 0x00000000, 0x00000000, 0x00000000,
846 0x00000000, 0x00000000, 0x00000000, 0x00000000,
847 0x00000000, 0x00000000, 0x00000000, 0x00000000,
848 0x00000000, 0x00000000, 0x00000000, 0x00000000,
851 /* Packet types for ah */
852 static const u32 ice_ptypes_ah[] = {
853 0x00000000, 0x00000000, 0x00000000, 0x00000000,
854 0x00000000, 0x0000000C, 0x00000000, 0x00000000,
855 0x00000000, 0x00000000, 0x00000000, 0x00000000,
856 0x00000000, 0x00000000, 0x00000000, 0x00000000,
857 0x00000000, 0x00000000, 0x00000000, 0x00000000,
858 0x00000000, 0x00000000, 0x00000000, 0x00000000,
859 0x00000000, 0x00000000, 0x00000000, 0x00000000,
860 0x00000000, 0x00000000, 0x00000000, 0x00000000,
863 /* Packet types for packets with NAT_T ESP header */
864 static const u32 ice_ptypes_nat_t_esp[] = {
865 0x00000000, 0x00000000, 0x00000000, 0x00000000,
866 0x00000000, 0x00000030, 0x00000000, 0x00000000,
867 0x00000000, 0x00000000, 0x00000000, 0x00000000,
868 0x00000000, 0x00000000, 0x00000000, 0x00000000,
869 0x00000000, 0x00000000, 0x00000000, 0x00000000,
870 0x00000000, 0x00000000, 0x00000000, 0x00000000,
871 0x00000000, 0x00000000, 0x00000000, 0x00000000,
872 0x00000000, 0x00000000, 0x00000000, 0x00000000,
875 static const u32 ice_ptypes_mac_non_ip_ofos[] = {
876 0x00000846, 0x00000000, 0x00000000, 0x00000000,
877 0x00000000, 0x00000000, 0x00000000, 0x00000000,
878 0x00400000, 0x03FFF000, 0x00000000, 0x00000000,
879 0x00000000, 0x00000000, 0x00000000, 0x00000000,
880 0x00000000, 0x00000000, 0x00000000, 0x00000000,
881 0x00000000, 0x00000000, 0x00000000, 0x00000000,
882 0x00000000, 0x00000000, 0x00000000, 0x00000000,
883 0x00000000, 0x00000000, 0x00000000, 0x00000000,
886 static const u32 ice_ptypes_gtpu_no_ip[] = {
887 0x00000000, 0x00000000, 0x00000000, 0x00000000,
888 0x00000000, 0x00000000, 0x00000000, 0x00000000,
889 0x00000000, 0x00000000, 0x00000600, 0x00000000,
890 0x00000000, 0x00000000, 0x00000000, 0x00000000,
891 0x00000000, 0x00000000, 0x00000000, 0x00000000,
892 0x00000000, 0x00000000, 0x00000000, 0x00000000,
893 0x00000000, 0x00000000, 0x00000000, 0x00000000,
894 0x00000000, 0x00000000, 0x00000000, 0x00000000,
897 static const u32 ice_ptypes_ecpri_tp0[] = {
898 0x00000000, 0x00000000, 0x00000000, 0x00000000,
899 0x00000000, 0x00000000, 0x00000000, 0x00000000,
900 0x00000000, 0x00000000, 0x00000000, 0x00000400,
901 0x00000000, 0x00000000, 0x00000000, 0x00000000,
902 0x00000000, 0x00000000, 0x00000000, 0x00000000,
903 0x00000000, 0x00000000, 0x00000000, 0x00000000,
904 0x00000000, 0x00000000, 0x00000000, 0x00000000,
905 0x00000000, 0x00000000, 0x00000000, 0x00000000,
908 static const u32 ice_ptypes_udp_ecpri_tp0[] = {
909 0x00000000, 0x00000000, 0x00000000, 0x00000000,
910 0x00000000, 0x00000000, 0x00000000, 0x00000000,
911 0x00000000, 0x00000000, 0x00000000, 0x00100000,
912 0x00000000, 0x00000000, 0x00000000, 0x00000000,
913 0x00000000, 0x00000000, 0x00000000, 0x00000000,
914 0x00000000, 0x00000000, 0x00000000, 0x00000000,
915 0x00000000, 0x00000000, 0x00000000, 0x00000000,
916 0x00000000, 0x00000000, 0x00000000, 0x00000000,
919 static const u32 ice_ptypes_l2tpv2[] = {
920 0x00000000, 0x00000000, 0x00000000, 0x00000000,
921 0x00000000, 0x00000000, 0x00000000, 0x00000000,
922 0x00000000, 0x00000000, 0x00000000, 0x00000000,
923 0xFFFFFF00, 0x0000003F, 0x00000000, 0x00000000,
924 0x00000000, 0x00000000, 0x00000000, 0x00000000,
925 0x00000000, 0x00000000, 0x00000000, 0x00000000,
926 0x00000000, 0x00000000, 0x00000000, 0x00000000,
927 0x00000000, 0x00000000, 0x00000000, 0x00000000,
930 static const u32 ice_ptypes_ppp[] = {
931 0x00000000, 0x00000000, 0x00000000, 0x00000000,
932 0x00000000, 0x00000000, 0x00000000, 0x00000000,
933 0x00000000, 0x00000000, 0x00000000, 0x00000000,
934 0xFFFFF000, 0x0000003F, 0x00000000, 0x00000000,
935 0x00000000, 0x00000000, 0x00000000, 0x00000000,
936 0x00000000, 0x00000000, 0x00000000, 0x00000000,
937 0x00000000, 0x00000000, 0x00000000, 0x00000000,
938 0x00000000, 0x00000000, 0x00000000, 0x00000000,
941 static const u32 ice_ptypes_ipv4_frag[] = {
942 0x00400000, 0x00000000, 0x00000000, 0x00000000,
943 0x00000000, 0x00000000, 0x00000000, 0x00000000,
944 0x00000000, 0x00000000, 0x00000000, 0x00000000,
945 0x00000000, 0x00000000, 0x00000000, 0x00000000,
946 0x00000000, 0x00000000, 0x00000000, 0x00000000,
947 0x00000000, 0x00000000, 0x00000000, 0x00000000,
948 0x00000000, 0x00000000, 0x00000000, 0x00000000,
949 0x00000000, 0x00000000, 0x00000000, 0x00000000,
952 static const u32 ice_ptypes_ipv6_frag[] = {
953 0x00000000, 0x00000000, 0x01000000, 0x00000000,
954 0x00000000, 0x00000000, 0x00000000, 0x00000000,
955 0x00000000, 0x00000000, 0x00000000, 0x00000000,
956 0x00000000, 0x00000000, 0x00000000, 0x00000000,
957 0x00000000, 0x00000000, 0x00000000, 0x00000000,
958 0x00000000, 0x00000000, 0x00000000, 0x00000000,
959 0x00000000, 0x00000000, 0x00000000, 0x00000000,
960 0x00000000, 0x00000000, 0x00000000, 0x00000000,
963 /* Manage parameters and info. used during the creation of a flow profile */
964 struct ice_flow_prof_params {
966 u16 entry_length; /* # of bytes formatted entry will require */
968 struct ice_flow_prof *prof;
970 /* For ACL, the es[0] will have the data of ICE_RX_MDID_PKT_FLAGS_15_0
971 * This will give us the direction flags.
973 struct ice_fv_word es[ICE_MAX_FV_WORDS];
974 /* attributes can be used to add attributes to a particular PTYPE */
975 const struct ice_ptype_attributes *attr;
978 u16 mask[ICE_MAX_FV_WORDS];
979 ice_declare_bitmap(ptypes, ICE_FLOW_PTYPE_MAX);
982 #define ICE_FLOW_RSS_HDRS_INNER_MASK \
983 (ICE_FLOW_SEG_HDR_PPPOE | ICE_FLOW_SEG_HDR_GTPC | \
984 ICE_FLOW_SEG_HDR_GTPC_TEID | ICE_FLOW_SEG_HDR_GTPU | \
985 ICE_FLOW_SEG_HDR_PFCP_SESSION | ICE_FLOW_SEG_HDR_L2TPV3 | \
986 ICE_FLOW_SEG_HDR_ESP | ICE_FLOW_SEG_HDR_AH | \
987 ICE_FLOW_SEG_HDR_NAT_T_ESP | ICE_FLOW_SEG_HDR_GTPU_NON_IP | \
988 ICE_FLOW_SEG_HDR_ECPRI_TP0 | ICE_FLOW_SEG_HDR_UDP_ECPRI_TP0 | \
989 ICE_FLOW_SEG_HDR_L2TPV2 | ICE_FLOW_SEG_HDR_PPP | ICE_FLOW_SEG_HDR_GRE)
991 #define ICE_FLOW_SEG_HDRS_L2_MASK \
992 (ICE_FLOW_SEG_HDR_ETH | ICE_FLOW_SEG_HDR_VLAN)
993 #define ICE_FLOW_SEG_HDRS_L3_MASK \
994 (ICE_FLOW_SEG_HDR_IPV4 | ICE_FLOW_SEG_HDR_IPV6 | \
995 ICE_FLOW_SEG_HDR_ARP)
996 #define ICE_FLOW_SEG_HDRS_L4_MASK \
997 (ICE_FLOW_SEG_HDR_ICMP | ICE_FLOW_SEG_HDR_TCP | ICE_FLOW_SEG_HDR_UDP | \
998 ICE_FLOW_SEG_HDR_SCTP)
999 /* mask for L4 protocols that are NOT part of IPV4/6 OTHER PTYPE groups */
1000 #define ICE_FLOW_SEG_HDRS_L4_MASK_NO_OTHER \
1001 (ICE_FLOW_SEG_HDR_TCP | ICE_FLOW_SEG_HDR_UDP | ICE_FLOW_SEG_HDR_SCTP)
1004 * ice_flow_val_hdrs - validates packet segments for valid protocol headers
1005 * @segs: array of one or more packet segments that describe the flow
1006 * @segs_cnt: number of packet segments provided
1008 static enum ice_status
1009 ice_flow_val_hdrs(struct ice_flow_seg_info *segs, u8 segs_cnt)
1013 for (i = 0; i < segs_cnt; i++) {
1014 /* Multiple L3 headers */
1015 if (segs[i].hdrs & ICE_FLOW_SEG_HDRS_L3_MASK &&
1016 !ice_is_pow2(segs[i].hdrs & ICE_FLOW_SEG_HDRS_L3_MASK))
1017 return ICE_ERR_PARAM;
1019 /* Multiple L4 headers */
1020 if (segs[i].hdrs & ICE_FLOW_SEG_HDRS_L4_MASK &&
1021 !ice_is_pow2(segs[i].hdrs & ICE_FLOW_SEG_HDRS_L4_MASK))
1022 return ICE_ERR_PARAM;
1028 /* Sizes of fixed known protocol headers without header options */
1029 #define ICE_FLOW_PROT_HDR_SZ_MAC 14
1030 #define ICE_FLOW_PROT_HDR_SZ_MAC_VLAN (ICE_FLOW_PROT_HDR_SZ_MAC + 2)
1031 #define ICE_FLOW_PROT_HDR_SZ_IPV4 20
1032 #define ICE_FLOW_PROT_HDR_SZ_IPV6 40
1033 #define ICE_FLOW_PROT_HDR_SZ_ARP 28
1034 #define ICE_FLOW_PROT_HDR_SZ_ICMP 8
1035 #define ICE_FLOW_PROT_HDR_SZ_TCP 20
1036 #define ICE_FLOW_PROT_HDR_SZ_UDP 8
1037 #define ICE_FLOW_PROT_HDR_SZ_SCTP 12
1040 * ice_flow_calc_seg_sz - calculates size of a packet segment based on headers
1041 * @params: information about the flow to be processed
1042 * @seg: index of packet segment whose header size is to be determined
1044 static u16 ice_flow_calc_seg_sz(struct ice_flow_prof_params *params, u8 seg)
1049 sz = (params->prof->segs[seg].hdrs & ICE_FLOW_SEG_HDR_VLAN) ?
1050 ICE_FLOW_PROT_HDR_SZ_MAC_VLAN : ICE_FLOW_PROT_HDR_SZ_MAC;
1053 if (params->prof->segs[seg].hdrs & ICE_FLOW_SEG_HDR_IPV4)
1054 sz += ICE_FLOW_PROT_HDR_SZ_IPV4;
1055 else if (params->prof->segs[seg].hdrs & ICE_FLOW_SEG_HDR_IPV6)
1056 sz += ICE_FLOW_PROT_HDR_SZ_IPV6;
1057 else if (params->prof->segs[seg].hdrs & ICE_FLOW_SEG_HDR_ARP)
1058 sz += ICE_FLOW_PROT_HDR_SZ_ARP;
1059 else if (params->prof->segs[seg].hdrs & ICE_FLOW_SEG_HDRS_L4_MASK)
1060 /* A L3 header is required if L4 is specified */
1064 if (params->prof->segs[seg].hdrs & ICE_FLOW_SEG_HDR_ICMP)
1065 sz += ICE_FLOW_PROT_HDR_SZ_ICMP;
1066 else if (params->prof->segs[seg].hdrs & ICE_FLOW_SEG_HDR_TCP)
1067 sz += ICE_FLOW_PROT_HDR_SZ_TCP;
1068 else if (params->prof->segs[seg].hdrs & ICE_FLOW_SEG_HDR_UDP)
1069 sz += ICE_FLOW_PROT_HDR_SZ_UDP;
1070 else if (params->prof->segs[seg].hdrs & ICE_FLOW_SEG_HDR_SCTP)
1071 sz += ICE_FLOW_PROT_HDR_SZ_SCTP;
1077 * ice_flow_proc_seg_hdrs - process protocol headers present in pkt segments
1078 * @params: information about the flow to be processed
1080 * This function identifies the packet types associated with the protocol
1081 * headers being present in packet segments of the specified flow profile.
1083 static enum ice_status
1084 ice_flow_proc_seg_hdrs(struct ice_flow_prof_params *params)
1086 struct ice_flow_prof *prof;
1089 ice_memset(params->ptypes, 0xff, sizeof(params->ptypes),
1092 prof = params->prof;
1094 for (i = 0; i < params->prof->segs_cnt; i++) {
1095 const ice_bitmap_t *src;
1098 hdrs = prof->segs[i].hdrs;
1100 if (hdrs & ICE_FLOW_SEG_HDR_ETH) {
1101 src = !i ? (const ice_bitmap_t *)ice_ptypes_mac_ofos :
1102 (const ice_bitmap_t *)ice_ptypes_mac_il;
1103 ice_and_bitmap(params->ptypes, params->ptypes, src,
1104 ICE_FLOW_PTYPE_MAX);
1107 if (i && hdrs & ICE_FLOW_SEG_HDR_VLAN) {
1108 src = (const ice_bitmap_t *)ice_ptypes_macvlan_il;
1109 ice_and_bitmap(params->ptypes, params->ptypes, src,
1110 ICE_FLOW_PTYPE_MAX);
1113 if (!i && hdrs & ICE_FLOW_SEG_HDR_ARP) {
1114 ice_and_bitmap(params->ptypes, params->ptypes,
1115 (const ice_bitmap_t *)ice_ptypes_arp_of,
1116 ICE_FLOW_PTYPE_MAX);
1119 if (hdrs & ICE_FLOW_SEG_HDR_ECPRI_TP0) {
1120 src = (const ice_bitmap_t *)ice_ptypes_ecpri_tp0;
1121 ice_and_bitmap(params->ptypes, params->ptypes, src,
1122 ICE_FLOW_PTYPE_MAX);
1124 if ((hdrs & ICE_FLOW_SEG_HDR_IPV4) &&
1125 (hdrs & ICE_FLOW_SEG_HDR_IPV_OTHER)) {
1127 (const ice_bitmap_t *)ice_ptypes_ipv4_il :
1128 (const ice_bitmap_t *)ice_ptypes_ipv4_ofos_all;
1129 ice_and_bitmap(params->ptypes, params->ptypes, src,
1130 ICE_FLOW_PTYPE_MAX);
1131 } else if ((hdrs & ICE_FLOW_SEG_HDR_IPV6) &&
1132 (hdrs & ICE_FLOW_SEG_HDR_IPV_OTHER)) {
1134 (const ice_bitmap_t *)ice_ptypes_ipv6_il :
1135 (const ice_bitmap_t *)ice_ptypes_ipv6_ofos_all;
1136 ice_and_bitmap(params->ptypes, params->ptypes, src,
1137 ICE_FLOW_PTYPE_MAX);
1138 } else if ((hdrs & ICE_FLOW_SEG_HDR_IPV4) &&
1139 (hdrs & ICE_FLOW_SEG_HDR_IPV_FRAG)) {
1140 src = (const ice_bitmap_t *)ice_ptypes_ipv4_frag;
1141 ice_and_bitmap(params->ptypes, params->ptypes, src,
1142 ICE_FLOW_PTYPE_MAX);
1143 } else if ((hdrs & ICE_FLOW_SEG_HDR_IPV6) &&
1144 (hdrs & ICE_FLOW_SEG_HDR_IPV_FRAG)) {
1145 src = (const ice_bitmap_t *)ice_ptypes_ipv6_frag;
1146 ice_and_bitmap(params->ptypes, params->ptypes, src,
1147 ICE_FLOW_PTYPE_MAX);
1148 } else if ((hdrs & ICE_FLOW_SEG_HDR_IPV4) &&
1149 !(hdrs & ICE_FLOW_SEG_HDRS_L4_MASK_NO_OTHER)) {
1150 src = !i ? (const ice_bitmap_t *)ice_ptypes_ipv4_ofos_no_l4 :
1151 (const ice_bitmap_t *)ice_ptypes_ipv4_il_no_l4;
1152 ice_and_bitmap(params->ptypes, params->ptypes, src,
1153 ICE_FLOW_PTYPE_MAX);
1154 } else if (hdrs & ICE_FLOW_SEG_HDR_IPV4) {
1155 src = !i ? (const ice_bitmap_t *)ice_ptypes_ipv4_ofos :
1156 (const ice_bitmap_t *)ice_ptypes_ipv4_il;
1157 ice_and_bitmap(params->ptypes, params->ptypes, src,
1158 ICE_FLOW_PTYPE_MAX);
1159 } else if ((hdrs & ICE_FLOW_SEG_HDR_IPV6) &&
1160 !(hdrs & ICE_FLOW_SEG_HDRS_L4_MASK_NO_OTHER)) {
1161 src = !i ? (const ice_bitmap_t *)ice_ptypes_ipv6_ofos_no_l4 :
1162 (const ice_bitmap_t *)ice_ptypes_ipv6_il_no_l4;
1163 ice_and_bitmap(params->ptypes, params->ptypes, src,
1164 ICE_FLOW_PTYPE_MAX);
1165 } else if (hdrs & ICE_FLOW_SEG_HDR_IPV6) {
1166 src = !i ? (const ice_bitmap_t *)ice_ptypes_ipv6_ofos :
1167 (const ice_bitmap_t *)ice_ptypes_ipv6_il;
1168 ice_and_bitmap(params->ptypes, params->ptypes, src,
1169 ICE_FLOW_PTYPE_MAX);
1172 if (hdrs & ICE_FLOW_SEG_HDR_ETH_NON_IP) {
1173 src = (const ice_bitmap_t *)ice_ptypes_mac_non_ip_ofos;
1174 ice_and_bitmap(params->ptypes, params->ptypes,
1175 src, ICE_FLOW_PTYPE_MAX);
1176 } else if (hdrs & ICE_FLOW_SEG_HDR_PPPOE) {
1177 src = (const ice_bitmap_t *)ice_ptypes_pppoe;
1178 ice_and_bitmap(params->ptypes, params->ptypes, src,
1179 ICE_FLOW_PTYPE_MAX);
1181 src = (const ice_bitmap_t *)ice_ptypes_pppoe;
1182 ice_andnot_bitmap(params->ptypes, params->ptypes, src,
1183 ICE_FLOW_PTYPE_MAX);
1186 if (hdrs & ICE_FLOW_SEG_HDR_UDP) {
1187 src = (const ice_bitmap_t *)ice_ptypes_udp_il;
1188 ice_and_bitmap(params->ptypes, params->ptypes, src,
1189 ICE_FLOW_PTYPE_MAX);
1190 } else if (hdrs & ICE_FLOW_SEG_HDR_TCP) {
1191 ice_and_bitmap(params->ptypes, params->ptypes,
1192 (const ice_bitmap_t *)ice_ptypes_tcp_il,
1193 ICE_FLOW_PTYPE_MAX);
1194 } else if (hdrs & ICE_FLOW_SEG_HDR_SCTP) {
1195 src = (const ice_bitmap_t *)ice_ptypes_sctp_il;
1196 ice_and_bitmap(params->ptypes, params->ptypes, src,
1197 ICE_FLOW_PTYPE_MAX);
1200 if (hdrs & ICE_FLOW_SEG_HDR_ICMP) {
1201 src = !i ? (const ice_bitmap_t *)ice_ptypes_icmp_of :
1202 (const ice_bitmap_t *)ice_ptypes_icmp_il;
1203 ice_and_bitmap(params->ptypes, params->ptypes, src,
1204 ICE_FLOW_PTYPE_MAX);
1205 } else if (hdrs & ICE_FLOW_SEG_HDR_GRE) {
1206 src = (const ice_bitmap_t *)ice_ptypes_gre_of;
1207 ice_and_bitmap(params->ptypes, params->ptypes, src,
1208 ICE_FLOW_PTYPE_MAX);
1209 } else if (hdrs & ICE_FLOW_SEG_HDR_GTPC) {
1210 src = (const ice_bitmap_t *)ice_ptypes_gtpc;
1211 ice_and_bitmap(params->ptypes, params->ptypes,
1212 src, ICE_FLOW_PTYPE_MAX);
1213 } else if (hdrs & ICE_FLOW_SEG_HDR_GTPC_TEID) {
1214 src = (const ice_bitmap_t *)ice_ptypes_gtpc_tid;
1215 ice_and_bitmap(params->ptypes, params->ptypes,
1216 src, ICE_FLOW_PTYPE_MAX);
1217 } else if (hdrs & ICE_FLOW_SEG_HDR_GTPU_NON_IP) {
1218 src = (const ice_bitmap_t *)ice_ptypes_gtpu_no_ip;
1219 ice_and_bitmap(params->ptypes, params->ptypes,
1220 src, ICE_FLOW_PTYPE_MAX);
1221 } else if (hdrs & ICE_FLOW_SEG_HDR_GTPU_DWN) {
1222 src = (const ice_bitmap_t *)ice_ptypes_gtpu;
1223 ice_and_bitmap(params->ptypes, params->ptypes,
1224 src, ICE_FLOW_PTYPE_MAX);
1226 /* Attributes for GTP packet with downlink */
1227 params->attr = ice_attr_gtpu_down;
1228 params->attr_cnt = ARRAY_SIZE(ice_attr_gtpu_down);
1229 } else if (hdrs & ICE_FLOW_SEG_HDR_GTPU_UP) {
1230 src = (const ice_bitmap_t *)ice_ptypes_gtpu;
1231 ice_and_bitmap(params->ptypes, params->ptypes,
1232 src, ICE_FLOW_PTYPE_MAX);
1234 /* Attributes for GTP packet with uplink */
1235 params->attr = ice_attr_gtpu_up;
1236 params->attr_cnt = ARRAY_SIZE(ice_attr_gtpu_up);
1237 } else if (hdrs & ICE_FLOW_SEG_HDR_GTPU_EH) {
1238 src = (const ice_bitmap_t *)ice_ptypes_gtpu;
1239 ice_and_bitmap(params->ptypes, params->ptypes,
1240 src, ICE_FLOW_PTYPE_MAX);
1242 /* Attributes for GTP packet with Extension Header */
1243 params->attr = ice_attr_gtpu_eh;
1244 params->attr_cnt = ARRAY_SIZE(ice_attr_gtpu_eh);
1245 } else if (hdrs & ICE_FLOW_SEG_HDR_GTPU_IP) {
1246 src = (const ice_bitmap_t *)ice_ptypes_gtpu;
1247 ice_and_bitmap(params->ptypes, params->ptypes,
1248 src, ICE_FLOW_PTYPE_MAX);
1250 /* Attributes for GTP packet without Extension Header */
1251 params->attr = ice_attr_gtpu_session;
1252 params->attr_cnt = ARRAY_SIZE(ice_attr_gtpu_session);
1253 } else if (hdrs & ICE_FLOW_SEG_HDR_L2TPV2) {
1254 src = (const ice_bitmap_t *)ice_ptypes_l2tpv2;
1255 ice_and_bitmap(params->ptypes, params->ptypes,
1256 src, ICE_FLOW_PTYPE_MAX);
1257 } else if (hdrs & ICE_FLOW_SEG_HDR_L2TPV3) {
1258 src = (const ice_bitmap_t *)ice_ptypes_l2tpv3;
1259 ice_and_bitmap(params->ptypes, params->ptypes,
1260 src, ICE_FLOW_PTYPE_MAX);
1261 } else if (hdrs & ICE_FLOW_SEG_HDR_ESP) {
1262 src = (const ice_bitmap_t *)ice_ptypes_esp;
1263 ice_and_bitmap(params->ptypes, params->ptypes,
1264 src, ICE_FLOW_PTYPE_MAX);
1265 } else if (hdrs & ICE_FLOW_SEG_HDR_AH) {
1266 src = (const ice_bitmap_t *)ice_ptypes_ah;
1267 ice_and_bitmap(params->ptypes, params->ptypes,
1268 src, ICE_FLOW_PTYPE_MAX);
1269 } else if (hdrs & ICE_FLOW_SEG_HDR_NAT_T_ESP) {
1270 src = (const ice_bitmap_t *)ice_ptypes_nat_t_esp;
1271 ice_and_bitmap(params->ptypes, params->ptypes,
1272 src, ICE_FLOW_PTYPE_MAX);
1273 } else if (hdrs & ICE_FLOW_SEG_HDR_VXLAN) {
1274 src = (const ice_bitmap_t *)ice_ptypes_vxlan_vni;
1275 ice_and_bitmap(params->ptypes, params->ptypes,
1276 src, ICE_FLOW_PTYPE_MAX);
1277 } else if (hdrs & ICE_FLOW_SEG_HDR_UDP_ECPRI_TP0) {
1278 src = (const ice_bitmap_t *)ice_ptypes_udp_ecpri_tp0;
1279 ice_and_bitmap(params->ptypes, params->ptypes,
1280 src, ICE_FLOW_PTYPE_MAX);
1283 if (hdrs & ICE_FLOW_SEG_HDR_PPP) {
1284 src = (const ice_bitmap_t *)ice_ptypes_ppp;
1285 ice_and_bitmap(params->ptypes, params->ptypes,
1286 src, ICE_FLOW_PTYPE_MAX);
1289 if (hdrs & ICE_FLOW_SEG_HDR_PFCP) {
1290 if (hdrs & ICE_FLOW_SEG_HDR_PFCP_NODE)
1292 (const ice_bitmap_t *)ice_ptypes_pfcp_node;
1295 (const ice_bitmap_t *)ice_ptypes_pfcp_session;
1297 ice_and_bitmap(params->ptypes, params->ptypes,
1298 src, ICE_FLOW_PTYPE_MAX);
1300 src = (const ice_bitmap_t *)ice_ptypes_pfcp_node;
1301 ice_andnot_bitmap(params->ptypes, params->ptypes,
1302 src, ICE_FLOW_PTYPE_MAX);
1304 src = (const ice_bitmap_t *)ice_ptypes_pfcp_session;
1305 ice_andnot_bitmap(params->ptypes, params->ptypes,
1306 src, ICE_FLOW_PTYPE_MAX);
1314 * ice_flow_xtract_pkt_flags - Create an extr sequence entry for packet flags
1315 * @hw: pointer to the HW struct
1316 * @params: information about the flow to be processed
1317 * @flags: The value of pkt_flags[x:x] in Rx/Tx MDID metadata.
1319 * This function will allocate an extraction sequence entries for a DWORD size
1320 * chunk of the packet flags.
1322 static enum ice_status
1323 ice_flow_xtract_pkt_flags(struct ice_hw *hw,
1324 struct ice_flow_prof_params *params,
1325 enum ice_flex_mdid_pkt_flags flags)
1327 u8 fv_words = hw->blk[params->blk].es.fvw;
1330 /* Make sure the number of extraction sequence entries required does not
1331 * exceed the block's capacity.
1333 if (params->es_cnt >= fv_words)
1334 return ICE_ERR_MAX_LIMIT;
1336 /* some blocks require a reversed field vector layout */
1337 if (hw->blk[params->blk].es.reverse)
1338 idx = fv_words - params->es_cnt - 1;
1340 idx = params->es_cnt;
1342 params->es[idx].prot_id = ICE_PROT_META_ID;
1343 params->es[idx].off = flags;
1350 * ice_flow_xtract_fld - Create an extraction sequence entry for the given field
1351 * @hw: pointer to the HW struct
1352 * @params: information about the flow to be processed
1353 * @seg: packet segment index of the field to be extracted
1354 * @fld: ID of field to be extracted
1355 * @match: bitfield of all fields
1357 * This function determines the protocol ID, offset, and size of the given
1358 * field. It then allocates one or more extraction sequence entries for the
1359 * given field, and fill the entries with protocol ID and offset information.
1361 static enum ice_status
1362 ice_flow_xtract_fld(struct ice_hw *hw, struct ice_flow_prof_params *params,
1363 u8 seg, enum ice_flow_field fld, u64 match)
1365 enum ice_flow_field sib = ICE_FLOW_FIELD_IDX_MAX;
1366 enum ice_prot_id prot_id = ICE_PROT_ID_INVAL;
1367 u8 fv_words = hw->blk[params->blk].es.fvw;
1368 struct ice_flow_fld_info *flds;
1369 u16 cnt, ese_bits, i;
1375 flds = params->prof->segs[seg].fields;
1378 case ICE_FLOW_FIELD_IDX_ETH_DA:
1379 case ICE_FLOW_FIELD_IDX_ETH_SA:
1380 case ICE_FLOW_FIELD_IDX_S_VLAN:
1381 case ICE_FLOW_FIELD_IDX_C_VLAN:
1382 prot_id = seg == 0 ? ICE_PROT_MAC_OF_OR_S : ICE_PROT_MAC_IL;
1384 case ICE_FLOW_FIELD_IDX_ETH_TYPE:
1385 prot_id = seg == 0 ? ICE_PROT_ETYPE_OL : ICE_PROT_ETYPE_IL;
1387 case ICE_FLOW_FIELD_IDX_IPV4_DSCP:
1388 prot_id = seg == 0 ? ICE_PROT_IPV4_OF_OR_S : ICE_PROT_IPV4_IL;
1390 case ICE_FLOW_FIELD_IDX_IPV6_DSCP:
1391 prot_id = seg == 0 ? ICE_PROT_IPV6_OF_OR_S : ICE_PROT_IPV6_IL;
1393 case ICE_FLOW_FIELD_IDX_IPV4_TTL:
1394 case ICE_FLOW_FIELD_IDX_IPV4_PROT:
1395 prot_id = seg == 0 ? ICE_PROT_IPV4_OF_OR_S : ICE_PROT_IPV4_IL;
1397 /* TTL and PROT share the same extraction seq. entry.
1398 * Each is considered a sibling to the other in terms of sharing
1399 * the same extraction sequence entry.
1401 if (fld == ICE_FLOW_FIELD_IDX_IPV4_TTL)
1402 sib = ICE_FLOW_FIELD_IDX_IPV4_PROT;
1404 sib = ICE_FLOW_FIELD_IDX_IPV4_TTL;
1406 /* If the sibling field is also included, that field's
1407 * mask needs to be included.
1409 if (match & BIT(sib))
1410 sib_mask = ice_flds_info[sib].mask;
1412 case ICE_FLOW_FIELD_IDX_IPV6_TTL:
1413 case ICE_FLOW_FIELD_IDX_IPV6_PROT:
1414 prot_id = ICE_PROT_IPV6_NEXT_PROTO;
1415 exist = ice_check_ddp_support_proto_id(hw, prot_id);
1417 prot_id = seg == 0 ?
1418 ICE_PROT_IPV6_OF_OR_S :
1421 prot_id = seg == 0 ?
1422 ICE_PROT_IPV6_NEXT_PROTO :
1425 /* TTL and PROT share the same extraction seq. entry.
1426 * Each is considered a sibling to the other in terms of sharing
1427 * the same extraction sequence entry.
1429 if (fld == ICE_FLOW_FIELD_IDX_IPV6_TTL)
1430 sib = ICE_FLOW_FIELD_IDX_IPV6_PROT;
1432 sib = ICE_FLOW_FIELD_IDX_IPV6_TTL;
1434 /* If the sibling field is also included, that field's
1435 * mask needs to be included.
1437 if (match & BIT(sib))
1438 sib_mask = ice_flds_info[sib].mask;
1440 case ICE_FLOW_FIELD_IDX_IPV4_SA:
1441 case ICE_FLOW_FIELD_IDX_IPV4_DA:
1442 case ICE_FLOW_FIELD_IDX_IPV4_CHKSUM:
1443 prot_id = seg == 0 ? ICE_PROT_IPV4_OF_OR_S : ICE_PROT_IPV4_IL;
1444 if (params->prof->segs[0].hdrs & ICE_FLOW_SEG_HDR_GRE &&
1445 params->prof->segs[1].hdrs & ICE_FLOW_SEG_HDR_GTPU &&
1447 prot_id = ICE_PROT_IPV4_IL_IL;
1449 case ICE_FLOW_FIELD_IDX_IPV4_ID:
1450 prot_id = ICE_PROT_IPV4_OF_OR_S;
1452 case ICE_FLOW_FIELD_IDX_IPV6_SA:
1453 case ICE_FLOW_FIELD_IDX_IPV6_DA:
1454 case ICE_FLOW_FIELD_IDX_IPV6_PRE32_SA:
1455 case ICE_FLOW_FIELD_IDX_IPV6_PRE32_DA:
1456 case ICE_FLOW_FIELD_IDX_IPV6_PRE48_SA:
1457 case ICE_FLOW_FIELD_IDX_IPV6_PRE48_DA:
1458 case ICE_FLOW_FIELD_IDX_IPV6_PRE64_SA:
1459 case ICE_FLOW_FIELD_IDX_IPV6_PRE64_DA:
1460 prot_id = seg == 0 ? ICE_PROT_IPV6_OF_OR_S : ICE_PROT_IPV6_IL;
1461 if (params->prof->segs[0].hdrs & ICE_FLOW_SEG_HDR_GRE &&
1462 params->prof->segs[1].hdrs & ICE_FLOW_SEG_HDR_GTPU &&
1464 prot_id = ICE_PROT_IPV6_IL_IL;
1466 case ICE_FLOW_FIELD_IDX_IPV6_ID:
1467 prot_id = ICE_PROT_IPV6_FRAG;
1469 case ICE_FLOW_FIELD_IDX_TCP_SRC_PORT:
1470 case ICE_FLOW_FIELD_IDX_TCP_DST_PORT:
1471 case ICE_FLOW_FIELD_IDX_TCP_FLAGS:
1472 case ICE_FLOW_FIELD_IDX_TCP_CHKSUM:
1473 prot_id = ICE_PROT_TCP_IL;
1475 case ICE_FLOW_FIELD_IDX_UDP_SRC_PORT:
1476 case ICE_FLOW_FIELD_IDX_UDP_DST_PORT:
1477 case ICE_FLOW_FIELD_IDX_UDP_CHKSUM:
1478 prot_id = ICE_PROT_UDP_IL_OR_S;
1480 case ICE_FLOW_FIELD_IDX_SCTP_SRC_PORT:
1481 case ICE_FLOW_FIELD_IDX_SCTP_DST_PORT:
1482 case ICE_FLOW_FIELD_IDX_SCTP_CHKSUM:
1483 prot_id = ICE_PROT_SCTP_IL;
1485 case ICE_FLOW_FIELD_IDX_VXLAN_VNI:
1486 case ICE_FLOW_FIELD_IDX_GTPC_TEID:
1487 case ICE_FLOW_FIELD_IDX_GTPU_IP_TEID:
1488 case ICE_FLOW_FIELD_IDX_GTPU_UP_TEID:
1489 case ICE_FLOW_FIELD_IDX_GTPU_DWN_TEID:
1490 case ICE_FLOW_FIELD_IDX_GTPU_EH_TEID:
1491 case ICE_FLOW_FIELD_IDX_GTPU_EH_QFI:
1492 case ICE_FLOW_FIELD_IDX_GTPU_UP_QFI:
1493 case ICE_FLOW_FIELD_IDX_GTPU_DWN_QFI:
1494 /* GTP is accessed through UDP OF protocol */
1495 prot_id = ICE_PROT_UDP_OF;
1497 case ICE_FLOW_FIELD_IDX_PPPOE_SESS_ID:
1498 prot_id = ICE_PROT_PPPOE;
1500 case ICE_FLOW_FIELD_IDX_PFCP_SEID:
1501 prot_id = ICE_PROT_UDP_IL_OR_S;
1503 case ICE_FLOW_FIELD_IDX_L2TPV3_SESS_ID:
1504 prot_id = ICE_PROT_L2TPV3;
1506 case ICE_FLOW_FIELD_IDX_ESP_SPI:
1507 prot_id = ICE_PROT_ESP_F;
1509 case ICE_FLOW_FIELD_IDX_AH_SPI:
1510 prot_id = ICE_PROT_ESP_2;
1512 case ICE_FLOW_FIELD_IDX_NAT_T_ESP_SPI:
1513 prot_id = ICE_PROT_UDP_IL_OR_S;
1515 case ICE_FLOW_FIELD_IDX_ECPRI_TP0_PC_ID:
1516 prot_id = ICE_PROT_ECPRI;
1518 case ICE_FLOW_FIELD_IDX_UDP_ECPRI_TP0_PC_ID:
1519 prot_id = ICE_PROT_UDP_IL_OR_S;
1521 case ICE_FLOW_FIELD_IDX_ARP_SIP:
1522 case ICE_FLOW_FIELD_IDX_ARP_DIP:
1523 case ICE_FLOW_FIELD_IDX_ARP_SHA:
1524 case ICE_FLOW_FIELD_IDX_ARP_DHA:
1525 case ICE_FLOW_FIELD_IDX_ARP_OP:
1526 prot_id = ICE_PROT_ARP_OF;
1528 case ICE_FLOW_FIELD_IDX_ICMP_TYPE:
1529 case ICE_FLOW_FIELD_IDX_ICMP_CODE:
1530 /* ICMP type and code share the same extraction seq. entry */
1531 prot_id = (params->prof->segs[seg].hdrs &
1532 ICE_FLOW_SEG_HDR_IPV4) ?
1533 ICE_PROT_ICMP_IL : ICE_PROT_ICMPV6_IL;
1534 sib = fld == ICE_FLOW_FIELD_IDX_ICMP_TYPE ?
1535 ICE_FLOW_FIELD_IDX_ICMP_CODE :
1536 ICE_FLOW_FIELD_IDX_ICMP_TYPE;
1538 case ICE_FLOW_FIELD_IDX_GRE_KEYID:
1539 prot_id = ICE_PROT_GRE_OF;
1542 return ICE_ERR_NOT_IMPL;
1545 /* Each extraction sequence entry is a word in size, and extracts a
1546 * word-aligned offset from a protocol header.
1548 ese_bits = ICE_FLOW_FV_EXTRACT_SZ * BITS_PER_BYTE;
1550 flds[fld].xtrct.prot_id = prot_id;
1551 flds[fld].xtrct.off = (ice_flds_info[fld].off / ese_bits) *
1552 ICE_FLOW_FV_EXTRACT_SZ;
1553 flds[fld].xtrct.disp = (u8)(ice_flds_info[fld].off % ese_bits);
1554 flds[fld].xtrct.idx = params->es_cnt;
1555 flds[fld].xtrct.mask = ice_flds_info[fld].mask;
1556 if (prot_id == ICE_PROT_IPV6_NEXT_PROTO) {
1557 flds[fld].xtrct.off = 0;
1558 flds[fld].xtrct.disp = 0;
1561 /* Adjust the next field-entry index after accommodating the number of
1562 * entries this field consumes
1564 cnt = DIVIDE_AND_ROUND_UP(flds[fld].xtrct.disp +
1565 ice_flds_info[fld].size, ese_bits);
1567 /* Fill in the extraction sequence entries needed for this field */
1568 off = flds[fld].xtrct.off;
1569 mask = flds[fld].xtrct.mask;
1570 for (i = 0; i < cnt; i++) {
1571 /* Only consume an extraction sequence entry if there is no
1572 * sibling field associated with this field or the sibling entry
1573 * already extracts the word shared with this field.
1575 if (sib == ICE_FLOW_FIELD_IDX_MAX ||
1576 flds[sib].xtrct.prot_id == ICE_PROT_ID_INVAL ||
1577 flds[sib].xtrct.off != off) {
1580 /* Make sure the number of extraction sequence required
1581 * does not exceed the block's capability
1583 if (params->es_cnt >= fv_words)
1584 return ICE_ERR_MAX_LIMIT;
1586 /* some blocks require a reversed field vector layout */
1587 if (hw->blk[params->blk].es.reverse)
1588 idx = fv_words - params->es_cnt - 1;
1590 idx = params->es_cnt;
1592 params->es[idx].prot_id = prot_id;
1593 params->es[idx].off = off;
1594 params->mask[idx] = mask | sib_mask;
1598 off += ICE_FLOW_FV_EXTRACT_SZ;
1605 * ice_flow_xtract_raws - Create extract sequence entries for raw bytes
1606 * @hw: pointer to the HW struct
1607 * @params: information about the flow to be processed
1608 * @seg: index of packet segment whose raw fields are to be extracted
1610 static enum ice_status
1611 ice_flow_xtract_raws(struct ice_hw *hw, struct ice_flow_prof_params *params,
1618 if (!params->prof->segs[seg].raws_cnt)
1621 if (params->prof->segs[seg].raws_cnt >
1622 ARRAY_SIZE(params->prof->segs[seg].raws))
1623 return ICE_ERR_MAX_LIMIT;
1625 /* Offsets within the segment headers are not supported */
1626 hdrs_sz = ice_flow_calc_seg_sz(params, seg);
1628 return ICE_ERR_PARAM;
1630 fv_words = hw->blk[params->blk].es.fvw;
1632 for (i = 0; i < params->prof->segs[seg].raws_cnt; i++) {
1633 struct ice_flow_seg_fld_raw *raw;
1636 raw = ¶ms->prof->segs[seg].raws[i];
1638 /* Storing extraction information */
1639 raw->info.xtrct.prot_id = ICE_PROT_MAC_OF_OR_S;
1640 raw->info.xtrct.off = (raw->off / ICE_FLOW_FV_EXTRACT_SZ) *
1641 ICE_FLOW_FV_EXTRACT_SZ;
1642 raw->info.xtrct.disp = (raw->off % ICE_FLOW_FV_EXTRACT_SZ) *
1644 raw->info.xtrct.idx = params->es_cnt;
1646 /* Determine the number of field vector entries this raw field
1649 cnt = DIVIDE_AND_ROUND_UP(raw->info.xtrct.disp +
1650 (raw->info.src.last * BITS_PER_BYTE),
1651 (ICE_FLOW_FV_EXTRACT_SZ *
1653 off = raw->info.xtrct.off;
1654 for (j = 0; j < cnt; j++) {
1657 /* Make sure the number of extraction sequence required
1658 * does not exceed the block's capability
1660 if (params->es_cnt >= hw->blk[params->blk].es.count ||
1661 params->es_cnt >= ICE_MAX_FV_WORDS)
1662 return ICE_ERR_MAX_LIMIT;
1664 /* some blocks require a reversed field vector layout */
1665 if (hw->blk[params->blk].es.reverse)
1666 idx = fv_words - params->es_cnt - 1;
1668 idx = params->es_cnt;
1670 params->es[idx].prot_id = raw->info.xtrct.prot_id;
1671 params->es[idx].off = off;
1673 off += ICE_FLOW_FV_EXTRACT_SZ;
1681 * ice_flow_create_xtrct_seq - Create an extraction sequence for given segments
1682 * @hw: pointer to the HW struct
1683 * @params: information about the flow to be processed
1685 * This function iterates through all matched fields in the given segments, and
1686 * creates an extraction sequence for the fields.
1688 static enum ice_status
1689 ice_flow_create_xtrct_seq(struct ice_hw *hw,
1690 struct ice_flow_prof_params *params)
1692 enum ice_status status = ICE_SUCCESS;
1695 /* For ACL, we also need to extract the direction bit (Rx,Tx) data from
1698 if (params->blk == ICE_BLK_ACL) {
1699 status = ice_flow_xtract_pkt_flags(hw, params,
1700 ICE_RX_MDID_PKT_FLAGS_15_0);
1705 for (i = 0; i < params->prof->segs_cnt; i++) {
1706 u64 match = params->prof->segs[i].match;
1707 enum ice_flow_field j;
1709 ice_for_each_set_bit(j, (ice_bitmap_t *)&match,
1710 ICE_FLOW_FIELD_IDX_MAX) {
1711 status = ice_flow_xtract_fld(hw, params, i, j, match);
1714 ice_clear_bit(j, (ice_bitmap_t *)&match);
1717 /* Process raw matching bytes */
1718 status = ice_flow_xtract_raws(hw, params, i);
1727 * ice_flow_sel_acl_scen - returns the specific scenario
1728 * @hw: pointer to the hardware structure
1729 * @params: information about the flow to be processed
1731 * This function will return the specific scenario based on the
1732 * params passed to it
1734 static enum ice_status
1735 ice_flow_sel_acl_scen(struct ice_hw *hw, struct ice_flow_prof_params *params)
1737 /* Find the best-fit scenario for the provided match width */
1738 struct ice_acl_scen *cand_scen = NULL, *scen;
1741 return ICE_ERR_DOES_NOT_EXIST;
1743 /* Loop through each scenario and match against the scenario width
1744 * to select the specific scenario
1746 LIST_FOR_EACH_ENTRY(scen, &hw->acl_tbl->scens, ice_acl_scen, list_entry)
1747 if (scen->eff_width >= params->entry_length &&
1748 (!cand_scen || cand_scen->eff_width > scen->eff_width))
1751 return ICE_ERR_DOES_NOT_EXIST;
1753 params->prof->cfg.scen = cand_scen;
1759 * ice_flow_acl_def_entry_frmt - Determine the layout of flow entries
1760 * @params: information about the flow to be processed
1762 static enum ice_status
1763 ice_flow_acl_def_entry_frmt(struct ice_flow_prof_params *params)
1765 u16 index, i, range_idx = 0;
1767 index = ICE_AQC_ACL_PROF_BYTE_SEL_START_IDX;
1769 for (i = 0; i < params->prof->segs_cnt; i++) {
1770 struct ice_flow_seg_info *seg = ¶ms->prof->segs[i];
1773 ice_for_each_set_bit(j, (ice_bitmap_t *)&seg->match,
1774 ICE_FLOW_FIELD_IDX_MAX) {
1775 struct ice_flow_fld_info *fld = &seg->fields[j];
1777 fld->entry.mask = ICE_FLOW_FLD_OFF_INVAL;
1779 if (fld->type == ICE_FLOW_FLD_TYPE_RANGE) {
1780 fld->entry.last = ICE_FLOW_FLD_OFF_INVAL;
1782 /* Range checking only supported for single
1785 if (DIVIDE_AND_ROUND_UP(ice_flds_info[j].size +
1787 BITS_PER_BYTE * 2) > 1)
1788 return ICE_ERR_PARAM;
1790 /* Ranges must define low and high values */
1791 if (fld->src.val == ICE_FLOW_FLD_OFF_INVAL ||
1792 fld->src.last == ICE_FLOW_FLD_OFF_INVAL)
1793 return ICE_ERR_PARAM;
1795 fld->entry.val = range_idx++;
1797 /* Store adjusted byte-length of field for later
1798 * use, taking into account potential
1799 * non-byte-aligned displacement
1801 fld->entry.last = DIVIDE_AND_ROUND_UP
1802 (ice_flds_info[j].size +
1803 (fld->xtrct.disp % BITS_PER_BYTE),
1805 fld->entry.val = index;
1806 index += fld->entry.last;
1810 for (j = 0; j < seg->raws_cnt; j++) {
1811 struct ice_flow_seg_fld_raw *raw = &seg->raws[j];
1813 raw->info.entry.mask = ICE_FLOW_FLD_OFF_INVAL;
1814 raw->info.entry.val = index;
1815 raw->info.entry.last = raw->info.src.last;
1816 index += raw->info.entry.last;
1820 /* Currently only support using the byte selection base, which only
1821 * allows for an effective entry size of 30 bytes. Reject anything
1824 if (index > ICE_AQC_ACL_PROF_BYTE_SEL_ELEMS)
1825 return ICE_ERR_PARAM;
1827 /* Only 8 range checkers per profile, reject anything trying to use
1830 if (range_idx > ICE_AQC_ACL_PROF_RANGES_NUM_CFG)
1831 return ICE_ERR_PARAM;
1833 /* Store # bytes required for entry for later use */
1834 params->entry_length = index - ICE_AQC_ACL_PROF_BYTE_SEL_START_IDX;
1840 * ice_flow_proc_segs - process all packet segments associated with a profile
1841 * @hw: pointer to the HW struct
1842 * @params: information about the flow to be processed
1844 static enum ice_status
1845 ice_flow_proc_segs(struct ice_hw *hw, struct ice_flow_prof_params *params)
1847 enum ice_status status;
1849 status = ice_flow_proc_seg_hdrs(params);
1853 status = ice_flow_create_xtrct_seq(hw, params);
1857 switch (params->blk) {
1860 status = ICE_SUCCESS;
1863 status = ice_flow_acl_def_entry_frmt(params);
1866 status = ice_flow_sel_acl_scen(hw, params);
1871 return ICE_ERR_NOT_IMPL;
1877 #define ICE_FLOW_FIND_PROF_CHK_FLDS 0x00000001
1878 #define ICE_FLOW_FIND_PROF_CHK_VSI 0x00000002
1879 #define ICE_FLOW_FIND_PROF_NOT_CHK_DIR 0x00000004
1882 * ice_flow_find_prof_conds - Find a profile matching headers and conditions
1883 * @hw: pointer to the HW struct
1884 * @blk: classification stage
1885 * @dir: flow direction
1886 * @segs: array of one or more packet segments that describe the flow
1887 * @segs_cnt: number of packet segments provided
1888 * @vsi_handle: software VSI handle to check VSI (ICE_FLOW_FIND_PROF_CHK_VSI)
1889 * @conds: additional conditions to be checked (ICE_FLOW_FIND_PROF_CHK_*)
1891 static struct ice_flow_prof *
1892 ice_flow_find_prof_conds(struct ice_hw *hw, enum ice_block blk,
1893 enum ice_flow_dir dir, struct ice_flow_seg_info *segs,
1894 u8 segs_cnt, u16 vsi_handle, u32 conds)
1896 struct ice_flow_prof *p, *prof = NULL;
1898 ice_acquire_lock(&hw->fl_profs_locks[blk]);
1899 LIST_FOR_EACH_ENTRY(p, &hw->fl_profs[blk], ice_flow_prof, l_entry)
1900 if ((p->dir == dir || conds & ICE_FLOW_FIND_PROF_NOT_CHK_DIR) &&
1901 segs_cnt && segs_cnt == p->segs_cnt) {
1904 /* Check for profile-VSI association if specified */
1905 if ((conds & ICE_FLOW_FIND_PROF_CHK_VSI) &&
1906 ice_is_vsi_valid(hw, vsi_handle) &&
1907 !ice_is_bit_set(p->vsis, vsi_handle))
1910 /* Protocol headers must be checked. Matched fields are
1911 * checked if specified.
1913 for (i = 0; i < segs_cnt; i++)
1914 if (segs[i].hdrs != p->segs[i].hdrs ||
1915 ((conds & ICE_FLOW_FIND_PROF_CHK_FLDS) &&
1916 segs[i].match != p->segs[i].match))
1919 /* A match is found if all segments are matched */
1920 if (i == segs_cnt) {
1925 ice_release_lock(&hw->fl_profs_locks[blk]);
1931 * ice_flow_find_prof - Look up a profile matching headers and matched fields
1932 * @hw: pointer to the HW struct
1933 * @blk: classification stage
1934 * @dir: flow direction
1935 * @segs: array of one or more packet segments that describe the flow
1936 * @segs_cnt: number of packet segments provided
1939 ice_flow_find_prof(struct ice_hw *hw, enum ice_block blk, enum ice_flow_dir dir,
1940 struct ice_flow_seg_info *segs, u8 segs_cnt)
1942 struct ice_flow_prof *p;
1944 p = ice_flow_find_prof_conds(hw, blk, dir, segs, segs_cnt,
1945 ICE_MAX_VSI, ICE_FLOW_FIND_PROF_CHK_FLDS);
1947 return p ? p->id : ICE_FLOW_PROF_ID_INVAL;
1951 * ice_flow_find_prof_id - Look up a profile with given profile ID
1952 * @hw: pointer to the HW struct
1953 * @blk: classification stage
1954 * @prof_id: unique ID to identify this flow profile
1956 static struct ice_flow_prof *
1957 ice_flow_find_prof_id(struct ice_hw *hw, enum ice_block blk, u64 prof_id)
1959 struct ice_flow_prof *p;
1961 LIST_FOR_EACH_ENTRY(p, &hw->fl_profs[blk], ice_flow_prof, l_entry)
1962 if (p->id == prof_id)
1969 * ice_dealloc_flow_entry - Deallocate flow entry memory
1970 * @hw: pointer to the HW struct
1971 * @entry: flow entry to be removed
1974 ice_dealloc_flow_entry(struct ice_hw *hw, struct ice_flow_entry *entry)
1980 ice_free(hw, entry->entry);
1982 if (entry->range_buf) {
1983 ice_free(hw, entry->range_buf);
1984 entry->range_buf = NULL;
1988 ice_free(hw, entry->acts);
1990 entry->acts_cnt = 0;
1993 ice_free(hw, entry);
1997 * ice_flow_get_hw_prof - return the HW profile for a specific profile ID handle
1998 * @hw: pointer to the HW struct
1999 * @blk: classification stage
2000 * @prof_id: the profile ID handle
2001 * @hw_prof_id: pointer to variable to receive the HW profile ID
2004 ice_flow_get_hw_prof(struct ice_hw *hw, enum ice_block blk, u64 prof_id,
2007 enum ice_status status = ICE_ERR_DOES_NOT_EXIST;
2008 struct ice_prof_map *map;
2010 ice_acquire_lock(&hw->blk[blk].es.prof_map_lock);
2011 map = ice_search_prof_id(hw, blk, prof_id);
2013 *hw_prof_id = map->prof_id;
2014 status = ICE_SUCCESS;
2016 ice_release_lock(&hw->blk[blk].es.prof_map_lock);
2020 #define ICE_ACL_INVALID_SCEN 0x3f
2023 * ice_flow_acl_is_prof_in_use - Verify if the profile is associated to any PF
2024 * @hw: pointer to the hardware structure
2025 * @prof: pointer to flow profile
2026 * @buf: destination buffer function writes partial extraction sequence to
2028 * returns ICE_SUCCESS if no PF is associated to the given profile
2029 * returns ICE_ERR_IN_USE if at least one PF is associated to the given profile
2030 * returns other error code for real error
2032 static enum ice_status
2033 ice_flow_acl_is_prof_in_use(struct ice_hw *hw, struct ice_flow_prof *prof,
2034 struct ice_aqc_acl_prof_generic_frmt *buf)
2036 enum ice_status status;
2039 status = ice_flow_get_hw_prof(hw, ICE_BLK_ACL, prof->id, &prof_id);
2043 status = ice_query_acl_prof(hw, prof_id, buf, NULL);
2047 /* If all PF's associated scenarios are all 0 or all
2048 * ICE_ACL_INVALID_SCEN (63) for the given profile then the latter has
2049 * not been configured yet.
2051 if (buf->pf_scenario_num[0] == 0 && buf->pf_scenario_num[1] == 0 &&
2052 buf->pf_scenario_num[2] == 0 && buf->pf_scenario_num[3] == 0 &&
2053 buf->pf_scenario_num[4] == 0 && buf->pf_scenario_num[5] == 0 &&
2054 buf->pf_scenario_num[6] == 0 && buf->pf_scenario_num[7] == 0)
2057 if (buf->pf_scenario_num[0] == ICE_ACL_INVALID_SCEN &&
2058 buf->pf_scenario_num[1] == ICE_ACL_INVALID_SCEN &&
2059 buf->pf_scenario_num[2] == ICE_ACL_INVALID_SCEN &&
2060 buf->pf_scenario_num[3] == ICE_ACL_INVALID_SCEN &&
2061 buf->pf_scenario_num[4] == ICE_ACL_INVALID_SCEN &&
2062 buf->pf_scenario_num[5] == ICE_ACL_INVALID_SCEN &&
2063 buf->pf_scenario_num[6] == ICE_ACL_INVALID_SCEN &&
2064 buf->pf_scenario_num[7] == ICE_ACL_INVALID_SCEN)
2067 return ICE_ERR_IN_USE;
2071 * ice_flow_acl_free_act_cntr - Free the ACL rule's actions
2072 * @hw: pointer to the hardware structure
2073 * @acts: array of actions to be performed on a match
2074 * @acts_cnt: number of actions
2076 static enum ice_status
2077 ice_flow_acl_free_act_cntr(struct ice_hw *hw, struct ice_flow_action *acts,
2082 for (i = 0; i < acts_cnt; i++) {
2083 if (acts[i].type == ICE_FLOW_ACT_CNTR_PKT ||
2084 acts[i].type == ICE_FLOW_ACT_CNTR_BYTES ||
2085 acts[i].type == ICE_FLOW_ACT_CNTR_PKT_BYTES) {
2086 struct ice_acl_cntrs cntrs = { 0 };
2087 enum ice_status status;
2089 /* amount is unused in the dealloc path but the common
2090 * parameter check routine wants a value set, as zero
2091 * is invalid for the check. Just set it.
2094 cntrs.bank = 0; /* Only bank0 for the moment */
2096 LE16_TO_CPU(acts[i].data.acl_act.value);
2098 LE16_TO_CPU(acts[i].data.acl_act.value);
2100 if (acts[i].type == ICE_FLOW_ACT_CNTR_PKT_BYTES)
2101 cntrs.type = ICE_AQC_ACL_CNT_TYPE_DUAL;
2103 cntrs.type = ICE_AQC_ACL_CNT_TYPE_SINGLE;
2105 status = ice_aq_dealloc_acl_cntrs(hw, &cntrs, NULL);
2114 * ice_flow_acl_disassoc_scen - Disassociate the scenario from the profile
2115 * @hw: pointer to the hardware structure
2116 * @prof: pointer to flow profile
2118 * Disassociate the scenario from the profile for the PF of the VSI.
2120 static enum ice_status
2121 ice_flow_acl_disassoc_scen(struct ice_hw *hw, struct ice_flow_prof *prof)
2123 struct ice_aqc_acl_prof_generic_frmt buf;
2124 enum ice_status status = ICE_SUCCESS;
2127 ice_memset(&buf, 0, sizeof(buf), ICE_NONDMA_MEM);
2129 status = ice_flow_get_hw_prof(hw, ICE_BLK_ACL, prof->id, &prof_id);
2133 status = ice_query_acl_prof(hw, prof_id, &buf, NULL);
2137 /* Clear scenario for this PF */
2138 buf.pf_scenario_num[hw->pf_id] = ICE_ACL_INVALID_SCEN;
2139 status = ice_prgm_acl_prof_xtrct(hw, prof_id, &buf, NULL);
2145 * ice_flow_rem_entry_sync - Remove a flow entry
2146 * @hw: pointer to the HW struct
2147 * @blk: classification stage
2148 * @entry: flow entry to be removed
2150 static enum ice_status
2151 ice_flow_rem_entry_sync(struct ice_hw *hw, enum ice_block blk,
2152 struct ice_flow_entry *entry)
2155 return ICE_ERR_BAD_PTR;
2157 if (blk == ICE_BLK_ACL) {
2158 enum ice_status status;
2161 return ICE_ERR_BAD_PTR;
2163 status = ice_acl_rem_entry(hw, entry->prof->cfg.scen,
2164 entry->scen_entry_idx);
2168 /* Checks if we need to release an ACL counter. */
2169 if (entry->acts_cnt && entry->acts)
2170 ice_flow_acl_free_act_cntr(hw, entry->acts,
2174 LIST_DEL(&entry->l_entry);
2176 ice_dealloc_flow_entry(hw, entry);
2182 * ice_flow_add_prof_sync - Add a flow profile for packet segments and fields
2183 * @hw: pointer to the HW struct
2184 * @blk: classification stage
2185 * @dir: flow direction
2186 * @prof_id: unique ID to identify this flow profile
2187 * @segs: array of one or more packet segments that describe the flow
2188 * @segs_cnt: number of packet segments provided
2189 * @acts: array of default actions
2190 * @acts_cnt: number of default actions
2191 * @prof: stores the returned flow profile added
2193 * Assumption: the caller has acquired the lock to the profile list
2195 static enum ice_status
2196 ice_flow_add_prof_sync(struct ice_hw *hw, enum ice_block blk,
2197 enum ice_flow_dir dir, u64 prof_id,
2198 struct ice_flow_seg_info *segs, u8 segs_cnt,
2199 struct ice_flow_action *acts, u8 acts_cnt,
2200 struct ice_flow_prof **prof)
2202 struct ice_flow_prof_params *params;
2203 enum ice_status status;
2206 if (!prof || (acts_cnt && !acts))
2207 return ICE_ERR_BAD_PTR;
2209 params = (struct ice_flow_prof_params *)ice_malloc(hw, sizeof(*params));
2211 return ICE_ERR_NO_MEMORY;
2213 params->prof = (struct ice_flow_prof *)
2214 ice_malloc(hw, sizeof(*params->prof));
2215 if (!params->prof) {
2216 status = ICE_ERR_NO_MEMORY;
2220 /* initialize extraction sequence to all invalid (0xff) */
2221 for (i = 0; i < ICE_MAX_FV_WORDS; i++) {
2222 params->es[i].prot_id = ICE_PROT_INVALID;
2223 params->es[i].off = ICE_FV_OFFSET_INVAL;
2227 params->prof->id = prof_id;
2228 params->prof->dir = dir;
2229 params->prof->segs_cnt = segs_cnt;
2231 /* Make a copy of the segments that need to be persistent in the flow
2234 for (i = 0; i < segs_cnt; i++)
2235 ice_memcpy(¶ms->prof->segs[i], &segs[i], sizeof(*segs),
2236 ICE_NONDMA_TO_NONDMA);
2238 /* Make a copy of the actions that need to be persistent in the flow
2242 params->prof->acts = (struct ice_flow_action *)
2243 ice_memdup(hw, acts, acts_cnt * sizeof(*acts),
2244 ICE_NONDMA_TO_NONDMA);
2246 if (!params->prof->acts) {
2247 status = ICE_ERR_NO_MEMORY;
2252 status = ice_flow_proc_segs(hw, params);
2254 ice_debug(hw, ICE_DBG_FLOW, "Error processing a flow's packet segments\n");
2258 /* Add a HW profile for this flow profile */
2259 status = ice_add_prof(hw, blk, prof_id, (u8 *)params->ptypes,
2260 params->attr, params->attr_cnt, params->es,
2261 params->mask, true);
2263 ice_debug(hw, ICE_DBG_FLOW, "Error adding a HW flow profile\n");
2267 INIT_LIST_HEAD(¶ms->prof->entries);
2268 ice_init_lock(¶ms->prof->entries_lock);
2269 *prof = params->prof;
2273 if (params->prof->acts)
2274 ice_free(hw, params->prof->acts);
2275 ice_free(hw, params->prof);
2278 ice_free(hw, params);
2284 * ice_flow_rem_prof_sync - remove a flow profile
2285 * @hw: pointer to the hardware structure
2286 * @blk: classification stage
2287 * @prof: pointer to flow profile to remove
2289 * Assumption: the caller has acquired the lock to the profile list
2291 static enum ice_status
2292 ice_flow_rem_prof_sync(struct ice_hw *hw, enum ice_block blk,
2293 struct ice_flow_prof *prof)
2295 enum ice_status status;
2297 /* Remove all remaining flow entries before removing the flow profile */
2298 if (!LIST_EMPTY(&prof->entries)) {
2299 struct ice_flow_entry *e, *t;
2301 ice_acquire_lock(&prof->entries_lock);
2303 LIST_FOR_EACH_ENTRY_SAFE(e, t, &prof->entries, ice_flow_entry,
2305 status = ice_flow_rem_entry_sync(hw, blk, e);
2310 ice_release_lock(&prof->entries_lock);
2313 if (blk == ICE_BLK_ACL) {
2314 struct ice_aqc_acl_profile_ranges query_rng_buf;
2315 struct ice_aqc_acl_prof_generic_frmt buf;
2318 /* Disassociate the scenario from the profile for the PF */
2319 status = ice_flow_acl_disassoc_scen(hw, prof);
2323 /* Clear the range-checker if the profile ID is no longer
2326 status = ice_flow_acl_is_prof_in_use(hw, prof, &buf);
2327 if (status && status != ICE_ERR_IN_USE) {
2329 } else if (!status) {
2330 /* Clear the range-checker value for profile ID */
2331 ice_memset(&query_rng_buf, 0,
2332 sizeof(struct ice_aqc_acl_profile_ranges),
2335 status = ice_flow_get_hw_prof(hw, blk, prof->id,
2340 status = ice_prog_acl_prof_ranges(hw, prof_id,
2341 &query_rng_buf, NULL);
2347 /* Remove all hardware profiles associated with this flow profile */
2348 status = ice_rem_prof(hw, blk, prof->id);
2350 LIST_DEL(&prof->l_entry);
2351 ice_destroy_lock(&prof->entries_lock);
2353 ice_free(hw, prof->acts);
2361 * ice_flow_acl_set_xtrct_seq_fld - Populate xtrct seq for single field
2362 * @buf: Destination buffer function writes partial xtrct sequence to
2363 * @info: Info about field
2366 ice_flow_acl_set_xtrct_seq_fld(struct ice_aqc_acl_prof_generic_frmt *buf,
2367 struct ice_flow_fld_info *info)
2372 src = info->xtrct.idx * ICE_FLOW_FV_EXTRACT_SZ +
2373 info->xtrct.disp / BITS_PER_BYTE;
2374 dst = info->entry.val;
2375 for (i = 0; i < info->entry.last; i++)
2376 /* HW stores field vector words in LE, convert words back to BE
2377 * so constructed entries will end up in network order
2379 buf->byte_selection[dst++] = src++ ^ 1;
2383 * ice_flow_acl_set_xtrct_seq - Program ACL extraction sequence
2384 * @hw: pointer to the hardware structure
2385 * @prof: pointer to flow profile
2387 static enum ice_status
2388 ice_flow_acl_set_xtrct_seq(struct ice_hw *hw, struct ice_flow_prof *prof)
2390 struct ice_aqc_acl_prof_generic_frmt buf;
2391 struct ice_flow_fld_info *info;
2392 enum ice_status status;
2396 ice_memset(&buf, 0, sizeof(buf), ICE_NONDMA_MEM);
2398 status = ice_flow_get_hw_prof(hw, ICE_BLK_ACL, prof->id, &prof_id);
2402 status = ice_flow_acl_is_prof_in_use(hw, prof, &buf);
2403 if (status && status != ICE_ERR_IN_USE)
2407 /* Program the profile dependent configuration. This is done
2408 * only once regardless of the number of PFs using that profile
2410 ice_memset(&buf, 0, sizeof(buf), ICE_NONDMA_MEM);
2412 for (i = 0; i < prof->segs_cnt; i++) {
2413 struct ice_flow_seg_info *seg = &prof->segs[i];
2416 ice_for_each_set_bit(j, (ice_bitmap_t *)&seg->match,
2417 ICE_FLOW_FIELD_IDX_MAX) {
2418 info = &seg->fields[j];
2420 if (info->type == ICE_FLOW_FLD_TYPE_RANGE)
2421 buf.word_selection[info->entry.val] =
2424 ice_flow_acl_set_xtrct_seq_fld(&buf,
2428 for (j = 0; j < seg->raws_cnt; j++) {
2429 info = &seg->raws[j].info;
2430 ice_flow_acl_set_xtrct_seq_fld(&buf, info);
2434 ice_memset(&buf.pf_scenario_num[0], ICE_ACL_INVALID_SCEN,
2435 ICE_AQC_ACL_PROF_PF_SCEN_NUM_ELEMS,
2439 /* Update the current PF */
2440 buf.pf_scenario_num[hw->pf_id] = (u8)prof->cfg.scen->id;
2441 status = ice_prgm_acl_prof_xtrct(hw, prof_id, &buf, NULL);
2447 * ice_flow_assoc_vsig_vsi - associate a VSI with VSIG
2448 * @hw: pointer to the hardware structure
2449 * @blk: classification stage
2450 * @vsi_handle: software VSI handle
2451 * @vsig: target VSI group
2453 * Assumption: the caller has already verified that the VSI to
2454 * be added has the same characteristics as the VSIG and will
2455 * thereby have access to all resources added to that VSIG.
2458 ice_flow_assoc_vsig_vsi(struct ice_hw *hw, enum ice_block blk, u16 vsi_handle,
2461 enum ice_status status;
2463 if (!ice_is_vsi_valid(hw, vsi_handle) || blk >= ICE_BLK_COUNT)
2464 return ICE_ERR_PARAM;
2466 ice_acquire_lock(&hw->fl_profs_locks[blk]);
2467 status = ice_add_vsi_flow(hw, blk, ice_get_hw_vsi_num(hw, vsi_handle),
2469 ice_release_lock(&hw->fl_profs_locks[blk]);
2475 * ice_flow_assoc_prof - associate a VSI with a flow profile
2476 * @hw: pointer to the hardware structure
2477 * @blk: classification stage
2478 * @prof: pointer to flow profile
2479 * @vsi_handle: software VSI handle
2481 * Assumption: the caller has acquired the lock to the profile list
2482 * and the software VSI handle has been validated
2485 ice_flow_assoc_prof(struct ice_hw *hw, enum ice_block blk,
2486 struct ice_flow_prof *prof, u16 vsi_handle)
2488 enum ice_status status = ICE_SUCCESS;
2490 if (!ice_is_bit_set(prof->vsis, vsi_handle)) {
2491 if (blk == ICE_BLK_ACL) {
2492 status = ice_flow_acl_set_xtrct_seq(hw, prof);
2496 status = ice_add_prof_id_flow(hw, blk,
2497 ice_get_hw_vsi_num(hw,
2501 ice_set_bit(vsi_handle, prof->vsis);
2503 ice_debug(hw, ICE_DBG_FLOW, "HW profile add failed, %d\n",
2511 * ice_flow_disassoc_prof - disassociate a VSI from a flow profile
2512 * @hw: pointer to the hardware structure
2513 * @blk: classification stage
2514 * @prof: pointer to flow profile
2515 * @vsi_handle: software VSI handle
2517 * Assumption: the caller has acquired the lock to the profile list
2518 * and the software VSI handle has been validated
2520 static enum ice_status
2521 ice_flow_disassoc_prof(struct ice_hw *hw, enum ice_block blk,
2522 struct ice_flow_prof *prof, u16 vsi_handle)
2524 enum ice_status status = ICE_SUCCESS;
2526 if (ice_is_bit_set(prof->vsis, vsi_handle)) {
2527 status = ice_rem_prof_id_flow(hw, blk,
2528 ice_get_hw_vsi_num(hw,
2532 ice_clear_bit(vsi_handle, prof->vsis);
2534 ice_debug(hw, ICE_DBG_FLOW, "HW profile remove failed, %d\n",
2541 #define FLAG_GTP_EH_PDU_LINK BIT_ULL(13)
2542 #define FLAG_GTP_EH_PDU BIT_ULL(14)
2544 #define FLAG_GTPU_MSK \
2545 (FLAG_GTP_EH_PDU | FLAG_GTP_EH_PDU_LINK)
2546 #define FLAG_GTPU_UP \
2547 (FLAG_GTP_EH_PDU | FLAG_GTP_EH_PDU_LINK)
2548 #define FLAG_GTPU_DW \
2551 * ice_flow_set_hw_prof - Set HW flow profile based on the parsed profile info
2552 * @hw: pointer to the HW struct
2553 * @dest_vsi_handle: dest VSI handle
2554 * @fdir_vsi_handle: fdir programming VSI handle
2555 * @prof: stores parsed profile info from raw flow
2556 * @blk: classification stage
2559 ice_flow_set_hw_prof(struct ice_hw *hw, u16 dest_vsi_handle,
2560 u16 fdir_vsi_handle, struct ice_parser_profile *prof,
2563 int id = ice_find_first_bit(prof->ptypes, UINT16_MAX);
2564 struct ice_flow_prof_params *params;
2565 u8 fv_words = hw->blk[blk].es.fvw;
2566 enum ice_status status;
2570 params = (struct ice_flow_prof_params *)ice_malloc(hw, sizeof(*params));
2572 return ICE_ERR_NO_MEMORY;
2574 for (i = 0; i < ICE_MAX_FV_WORDS; i++) {
2575 params->es[i].prot_id = ICE_PROT_INVALID;
2576 params->es[i].off = ICE_FV_OFFSET_INVAL;
2579 for (i = 0; i < prof->fv_num; i++) {
2580 if (hw->blk[blk].es.reverse)
2581 idx = fv_words - i - 1;
2584 params->es[idx].prot_id = prof->fv[i].proto_id;
2585 params->es[idx].off = prof->fv[i].offset;
2586 params->mask[idx] = CPU_TO_BE16(prof->fv[i].msk);
2589 switch (prof->flags) {
2591 params->attr = ice_attr_gtpu_down;
2592 params->attr_cnt = ARRAY_SIZE(ice_attr_gtpu_down);
2595 params->attr = ice_attr_gtpu_up;
2596 params->attr_cnt = ARRAY_SIZE(ice_attr_gtpu_up);
2599 if (prof->flags_msk & FLAG_GTPU_MSK) {
2600 params->attr = ice_attr_gtpu_session;
2601 params->attr_cnt = ARRAY_SIZE(ice_attr_gtpu_session);
2606 status = ice_add_prof(hw, blk, id, (u8 *)prof->ptypes,
2607 params->attr, params->attr_cnt,
2608 params->es, params->mask, false);
2612 status = ice_flow_assoc_hw_prof(hw, blk, dest_vsi_handle,
2613 fdir_vsi_handle, id);
2620 ice_free(hw, params);
2626 * ice_flow_add_prof - Add a flow profile for packet segments and matched fields
2627 * @hw: pointer to the HW struct
2628 * @blk: classification stage
2629 * @dir: flow direction
2630 * @prof_id: unique ID to identify this flow profile
2631 * @segs: array of one or more packet segments that describe the flow
2632 * @segs_cnt: number of packet segments provided
2633 * @acts: array of default actions
2634 * @acts_cnt: number of default actions
2635 * @prof: stores the returned flow profile added
2638 ice_flow_add_prof(struct ice_hw *hw, enum ice_block blk, enum ice_flow_dir dir,
2639 u64 prof_id, struct ice_flow_seg_info *segs, u8 segs_cnt,
2640 struct ice_flow_action *acts, u8 acts_cnt,
2641 struct ice_flow_prof **prof)
2643 enum ice_status status;
2645 if (segs_cnt > ICE_FLOW_SEG_MAX)
2646 return ICE_ERR_MAX_LIMIT;
2649 return ICE_ERR_PARAM;
2652 return ICE_ERR_BAD_PTR;
2654 status = ice_flow_val_hdrs(segs, segs_cnt);
2658 ice_acquire_lock(&hw->fl_profs_locks[blk]);
2660 status = ice_flow_add_prof_sync(hw, blk, dir, prof_id, segs, segs_cnt,
2661 acts, acts_cnt, prof);
2663 LIST_ADD(&(*prof)->l_entry, &hw->fl_profs[blk]);
2665 ice_release_lock(&hw->fl_profs_locks[blk]);
2671 * ice_flow_rem_prof - Remove a flow profile and all entries associated with it
2672 * @hw: pointer to the HW struct
2673 * @blk: the block for which the flow profile is to be removed
2674 * @prof_id: unique ID of the flow profile to be removed
2677 ice_flow_rem_prof(struct ice_hw *hw, enum ice_block blk, u64 prof_id)
2679 struct ice_flow_prof *prof;
2680 enum ice_status status;
2682 ice_acquire_lock(&hw->fl_profs_locks[blk]);
2684 prof = ice_flow_find_prof_id(hw, blk, prof_id);
2686 status = ICE_ERR_DOES_NOT_EXIST;
2690 /* prof becomes invalid after the call */
2691 status = ice_flow_rem_prof_sync(hw, blk, prof);
2694 ice_release_lock(&hw->fl_profs_locks[blk]);
2700 * ice_flow_find_entry - look for a flow entry using its unique ID
2701 * @hw: pointer to the HW struct
2702 * @blk: classification stage
2703 * @entry_id: unique ID to identify this flow entry
2705 * This function looks for the flow entry with the specified unique ID in all
2706 * flow profiles of the specified classification stage. If the entry is found,
2707 * and it returns the handle to the flow entry. Otherwise, it returns
2708 * ICE_FLOW_ENTRY_ID_INVAL.
2710 u64 ice_flow_find_entry(struct ice_hw *hw, enum ice_block blk, u64 entry_id)
2712 struct ice_flow_entry *found = NULL;
2713 struct ice_flow_prof *p;
2715 ice_acquire_lock(&hw->fl_profs_locks[blk]);
2717 LIST_FOR_EACH_ENTRY(p, &hw->fl_profs[blk], ice_flow_prof, l_entry) {
2718 struct ice_flow_entry *e;
2720 ice_acquire_lock(&p->entries_lock);
2721 LIST_FOR_EACH_ENTRY(e, &p->entries, ice_flow_entry, l_entry)
2722 if (e->id == entry_id) {
2726 ice_release_lock(&p->entries_lock);
2732 ice_release_lock(&hw->fl_profs_locks[blk]);
2734 return found ? ICE_FLOW_ENTRY_HNDL(found) : ICE_FLOW_ENTRY_HANDLE_INVAL;
2738 * ice_flow_acl_check_actions - Checks the ACL rule's actions
2739 * @hw: pointer to the hardware structure
2740 * @acts: array of actions to be performed on a match
2741 * @acts_cnt: number of actions
2742 * @cnt_alloc: indicates if an ACL counter has been allocated.
2744 static enum ice_status
2745 ice_flow_acl_check_actions(struct ice_hw *hw, struct ice_flow_action *acts,
2746 u8 acts_cnt, bool *cnt_alloc)
2748 ice_declare_bitmap(dup_check, ICE_AQC_TBL_MAX_ACTION_PAIRS * 2);
2751 ice_zero_bitmap(dup_check, ICE_AQC_TBL_MAX_ACTION_PAIRS * 2);
2754 if (acts_cnt > ICE_FLOW_ACL_MAX_NUM_ACT)
2755 return ICE_ERR_OUT_OF_RANGE;
2757 for (i = 0; i < acts_cnt; i++) {
2758 if (acts[i].type != ICE_FLOW_ACT_NOP &&
2759 acts[i].type != ICE_FLOW_ACT_DROP &&
2760 acts[i].type != ICE_FLOW_ACT_CNTR_PKT &&
2761 acts[i].type != ICE_FLOW_ACT_FWD_QUEUE)
2764 /* If the caller want to add two actions of the same type, then
2765 * it is considered invalid configuration.
2767 if (ice_test_and_set_bit(acts[i].type, dup_check))
2768 return ICE_ERR_PARAM;
2771 /* Checks if ACL counters are needed. */
2772 for (i = 0; i < acts_cnt; i++) {
2773 if (acts[i].type == ICE_FLOW_ACT_CNTR_PKT ||
2774 acts[i].type == ICE_FLOW_ACT_CNTR_BYTES ||
2775 acts[i].type == ICE_FLOW_ACT_CNTR_PKT_BYTES) {
2776 struct ice_acl_cntrs cntrs = { 0 };
2777 enum ice_status status;
2780 cntrs.bank = 0; /* Only bank0 for the moment */
2782 if (acts[i].type == ICE_FLOW_ACT_CNTR_PKT_BYTES)
2783 cntrs.type = ICE_AQC_ACL_CNT_TYPE_DUAL;
2785 cntrs.type = ICE_AQC_ACL_CNT_TYPE_SINGLE;
2787 status = ice_aq_alloc_acl_cntrs(hw, &cntrs, NULL);
2790 /* Counter index within the bank */
2791 acts[i].data.acl_act.value =
2792 CPU_TO_LE16(cntrs.first_cntr);
2801 * ice_flow_acl_frmt_entry_range - Format an ACL range checker for a given field
2802 * @fld: number of the given field
2803 * @info: info about field
2804 * @range_buf: range checker configuration buffer
2805 * @data: pointer to a data buffer containing flow entry's match values/masks
2806 * @range: Input/output param indicating which range checkers are being used
2809 ice_flow_acl_frmt_entry_range(u16 fld, struct ice_flow_fld_info *info,
2810 struct ice_aqc_acl_profile_ranges *range_buf,
2811 u8 *data, u8 *range)
2815 /* If not specified, default mask is all bits in field */
2816 new_mask = (info->src.mask == ICE_FLOW_FLD_OFF_INVAL ?
2817 BIT(ice_flds_info[fld].size) - 1 :
2818 (*(u16 *)(data + info->src.mask))) << info->xtrct.disp;
2820 /* If the mask is 0, then we don't need to worry about this input
2821 * range checker value.
2825 (*(u16 *)(data + info->src.last)) << info->xtrct.disp;
2827 (*(u16 *)(data + info->src.val)) << info->xtrct.disp;
2828 u8 range_idx = info->entry.val;
2830 range_buf->checker_cfg[range_idx].low_boundary =
2831 CPU_TO_BE16(new_low);
2832 range_buf->checker_cfg[range_idx].high_boundary =
2833 CPU_TO_BE16(new_high);
2834 range_buf->checker_cfg[range_idx].mask = CPU_TO_BE16(new_mask);
2836 /* Indicate which range checker is being used */
2837 *range |= BIT(range_idx);
2842 * ice_flow_acl_frmt_entry_fld - Partially format ACL entry for a given field
2843 * @fld: number of the given field
2844 * @info: info about the field
2845 * @buf: buffer containing the entry
2846 * @dontcare: buffer containing don't care mask for entry
2847 * @data: pointer to a data buffer containing flow entry's match values/masks
2850 ice_flow_acl_frmt_entry_fld(u16 fld, struct ice_flow_fld_info *info, u8 *buf,
2851 u8 *dontcare, u8 *data)
2853 u16 dst, src, mask, k, end_disp, tmp_s = 0, tmp_m = 0;
2854 bool use_mask = false;
2857 src = info->src.val;
2858 mask = info->src.mask;
2859 dst = info->entry.val - ICE_AQC_ACL_PROF_BYTE_SEL_START_IDX;
2860 disp = info->xtrct.disp % BITS_PER_BYTE;
2862 if (mask != ICE_FLOW_FLD_OFF_INVAL)
2865 for (k = 0; k < info->entry.last; k++, dst++) {
2866 /* Add overflow bits from previous byte */
2867 buf[dst] = (tmp_s & 0xff00) >> 8;
2869 /* If mask is not valid, tmp_m is always zero, so just setting
2870 * dontcare to 0 (no masked bits). If mask is valid, pulls in
2871 * overflow bits of mask from prev byte
2873 dontcare[dst] = (tmp_m & 0xff00) >> 8;
2875 /* If there is displacement, last byte will only contain
2876 * displaced data, but there is no more data to read from user
2877 * buffer, so skip so as not to potentially read beyond end of
2880 if (!disp || k < info->entry.last - 1) {
2881 /* Store shifted data to use in next byte */
2882 tmp_s = data[src++] << disp;
2884 /* Add current (shifted) byte */
2885 buf[dst] |= tmp_s & 0xff;
2887 /* Handle mask if valid */
2889 tmp_m = (~data[mask++] & 0xff) << disp;
2890 dontcare[dst] |= tmp_m & 0xff;
2895 /* Fill in don't care bits at beginning of field */
2897 dst = info->entry.val - ICE_AQC_ACL_PROF_BYTE_SEL_START_IDX;
2898 for (k = 0; k < disp; k++)
2899 dontcare[dst] |= BIT(k);
2902 end_disp = (disp + ice_flds_info[fld].size) % BITS_PER_BYTE;
2904 /* Fill in don't care bits at end of field */
2906 dst = info->entry.val - ICE_AQC_ACL_PROF_BYTE_SEL_START_IDX +
2907 info->entry.last - 1;
2908 for (k = end_disp; k < BITS_PER_BYTE; k++)
2909 dontcare[dst] |= BIT(k);
2914 * ice_flow_acl_frmt_entry - Format ACL entry
2915 * @hw: pointer to the hardware structure
2916 * @prof: pointer to flow profile
2917 * @e: pointer to the flow entry
2918 * @data: pointer to a data buffer containing flow entry's match values/masks
2919 * @acts: array of actions to be performed on a match
2920 * @acts_cnt: number of actions
2922 * Formats the key (and key_inverse) to be matched from the data passed in,
2923 * along with data from the flow profile. This key/key_inverse pair makes up
2924 * the 'entry' for an ACL flow entry.
2926 static enum ice_status
2927 ice_flow_acl_frmt_entry(struct ice_hw *hw, struct ice_flow_prof *prof,
2928 struct ice_flow_entry *e, u8 *data,
2929 struct ice_flow_action *acts, u8 acts_cnt)
2931 u8 *buf = NULL, *dontcare = NULL, *key = NULL, range = 0, dir_flag_msk;
2932 struct ice_aqc_acl_profile_ranges *range_buf = NULL;
2933 enum ice_status status;
2938 status = ice_flow_get_hw_prof(hw, ICE_BLK_ACL, prof->id, &prof_id);
2942 /* Format the result action */
2944 status = ice_flow_acl_check_actions(hw, acts, acts_cnt, &cnt_alloc);
2948 status = ICE_ERR_NO_MEMORY;
2950 e->acts = (struct ice_flow_action *)
2951 ice_memdup(hw, acts, acts_cnt * sizeof(*acts),
2952 ICE_NONDMA_TO_NONDMA);
2956 e->acts_cnt = acts_cnt;
2958 /* Format the matching data */
2959 buf_sz = prof->cfg.scen->width;
2960 buf = (u8 *)ice_malloc(hw, buf_sz);
2964 dontcare = (u8 *)ice_malloc(hw, buf_sz);
2968 /* 'key' buffer will store both key and key_inverse, so must be twice
2971 key = (u8 *)ice_malloc(hw, buf_sz * 2);
2975 range_buf = (struct ice_aqc_acl_profile_ranges *)
2976 ice_malloc(hw, sizeof(struct ice_aqc_acl_profile_ranges));
2980 /* Set don't care mask to all 1's to start, will zero out used bytes */
2981 ice_memset(dontcare, 0xff, buf_sz, ICE_NONDMA_MEM);
2983 for (i = 0; i < prof->segs_cnt; i++) {
2984 struct ice_flow_seg_info *seg = &prof->segs[i];
2987 ice_for_each_set_bit(j, (ice_bitmap_t *)&seg->match,
2988 ICE_FLOW_FIELD_IDX_MAX) {
2989 struct ice_flow_fld_info *info = &seg->fields[j];
2991 if (info->type == ICE_FLOW_FLD_TYPE_RANGE)
2992 ice_flow_acl_frmt_entry_range(j, info,
2996 ice_flow_acl_frmt_entry_fld(j, info, buf,
3000 for (j = 0; j < seg->raws_cnt; j++) {
3001 struct ice_flow_fld_info *info = &seg->raws[j].info;
3002 u16 dst, src, mask, k;
3003 bool use_mask = false;
3005 src = info->src.val;
3006 dst = info->entry.val -
3007 ICE_AQC_ACL_PROF_BYTE_SEL_START_IDX;
3008 mask = info->src.mask;
3010 if (mask != ICE_FLOW_FLD_OFF_INVAL)
3013 for (k = 0; k < info->entry.last; k++, dst++) {
3014 buf[dst] = data[src++];
3016 dontcare[dst] = ~data[mask++];
3023 buf[prof->cfg.scen->pid_idx] = (u8)prof_id;
3024 dontcare[prof->cfg.scen->pid_idx] = 0;
3026 /* Format the buffer for direction flags */
3027 dir_flag_msk = BIT(ICE_FLG_PKT_DIR);
3029 if (prof->dir == ICE_FLOW_RX)
3030 buf[prof->cfg.scen->pkt_dir_idx] = dir_flag_msk;
3033 buf[prof->cfg.scen->rng_chk_idx] = range;
3034 /* Mark any unused range checkers as don't care */
3035 dontcare[prof->cfg.scen->rng_chk_idx] = ~range;
3036 e->range_buf = range_buf;
3038 ice_free(hw, range_buf);
3041 status = ice_set_key(key, buf_sz * 2, buf, NULL, dontcare, NULL, 0,
3047 e->entry_sz = buf_sz * 2;
3054 ice_free(hw, dontcare);
3059 if (status && range_buf) {
3060 ice_free(hw, range_buf);
3061 e->range_buf = NULL;
3064 if (status && e->acts) {
3065 ice_free(hw, e->acts);
3070 if (status && cnt_alloc)
3071 ice_flow_acl_free_act_cntr(hw, acts, acts_cnt);
3077 * ice_flow_acl_find_scen_entry_cond - Find an ACL scenario entry that matches
3078 * the compared data.
3079 * @prof: pointer to flow profile
3080 * @e: pointer to the comparing flow entry
3081 * @do_chg_action: decide if we want to change the ACL action
3082 * @do_add_entry: decide if we want to add the new ACL entry
3083 * @do_rem_entry: decide if we want to remove the current ACL entry
3085 * Find an ACL scenario entry that matches the compared data. In the same time,
3086 * this function also figure out:
3087 * a/ If we want to change the ACL action
3088 * b/ If we want to add the new ACL entry
3089 * c/ If we want to remove the current ACL entry
3091 static struct ice_flow_entry *
3092 ice_flow_acl_find_scen_entry_cond(struct ice_flow_prof *prof,
3093 struct ice_flow_entry *e, bool *do_chg_action,
3094 bool *do_add_entry, bool *do_rem_entry)
3096 struct ice_flow_entry *p, *return_entry = NULL;
3100 * a/ There exists an entry with same matching data, but different
3101 * priority, then we remove this existing ACL entry. Then, we
3102 * will add the new entry to the ACL scenario.
3103 * b/ There exists an entry with same matching data, priority, and
3104 * result action, then we do nothing
3105 * c/ There exists an entry with same matching data, priority, but
3106 * different, action, then do only change the action's entry.
3107 * d/ Else, we add this new entry to the ACL scenario.
3109 *do_chg_action = false;
3110 *do_add_entry = true;
3111 *do_rem_entry = false;
3112 LIST_FOR_EACH_ENTRY(p, &prof->entries, ice_flow_entry, l_entry) {
3113 if (memcmp(p->entry, e->entry, p->entry_sz))
3116 /* From this point, we have the same matching_data. */
3117 *do_add_entry = false;
3120 if (p->priority != e->priority) {
3121 /* matching data && !priority */
3122 *do_add_entry = true;
3123 *do_rem_entry = true;
3127 /* From this point, we will have matching_data && priority */
3128 if (p->acts_cnt != e->acts_cnt)
3129 *do_chg_action = true;
3130 for (i = 0; i < p->acts_cnt; i++) {
3131 bool found_not_match = false;
3133 for (j = 0; j < e->acts_cnt; j++)
3134 if (memcmp(&p->acts[i], &e->acts[j],
3135 sizeof(struct ice_flow_action))) {
3136 found_not_match = true;
3140 if (found_not_match) {
3141 *do_chg_action = true;
3146 /* (do_chg_action = true) means :
3147 * matching_data && priority && !result_action
3148 * (do_chg_action = false) means :
3149 * matching_data && priority && result_action
3154 return return_entry;
3158 * ice_flow_acl_convert_to_acl_prio - Convert to ACL priority
3161 static enum ice_acl_entry_prio
3162 ice_flow_acl_convert_to_acl_prio(enum ice_flow_priority p)
3164 enum ice_acl_entry_prio acl_prio;
3167 case ICE_FLOW_PRIO_LOW:
3168 acl_prio = ICE_ACL_PRIO_LOW;
3170 case ICE_FLOW_PRIO_NORMAL:
3171 acl_prio = ICE_ACL_PRIO_NORMAL;
3173 case ICE_FLOW_PRIO_HIGH:
3174 acl_prio = ICE_ACL_PRIO_HIGH;
3177 acl_prio = ICE_ACL_PRIO_NORMAL;
3185 * ice_flow_acl_union_rng_chk - Perform union operation between two
3186 * range-range checker buffers
3187 * @dst_buf: pointer to destination range checker buffer
3188 * @src_buf: pointer to source range checker buffer
3190 * For this function, we do the union between dst_buf and src_buf
3191 * range checker buffer, and we will save the result back to dst_buf
3193 static enum ice_status
3194 ice_flow_acl_union_rng_chk(struct ice_aqc_acl_profile_ranges *dst_buf,
3195 struct ice_aqc_acl_profile_ranges *src_buf)
3199 if (!dst_buf || !src_buf)
3200 return ICE_ERR_BAD_PTR;
3202 for (i = 0; i < ICE_AQC_ACL_PROF_RANGES_NUM_CFG; i++) {
3203 struct ice_acl_rng_data *cfg_data = NULL, *in_data;
3204 bool will_populate = false;
3206 in_data = &src_buf->checker_cfg[i];
3211 for (j = 0; j < ICE_AQC_ACL_PROF_RANGES_NUM_CFG; j++) {
3212 cfg_data = &dst_buf->checker_cfg[j];
3214 if (!cfg_data->mask ||
3215 !memcmp(cfg_data, in_data,
3216 sizeof(struct ice_acl_rng_data))) {
3217 will_populate = true;
3222 if (will_populate) {
3223 ice_memcpy(cfg_data, in_data,
3224 sizeof(struct ice_acl_rng_data),
3225 ICE_NONDMA_TO_NONDMA);
3227 /* No available slot left to program range checker */
3228 return ICE_ERR_MAX_LIMIT;
3236 * ice_flow_acl_add_scen_entry_sync - Add entry to ACL scenario sync
3237 * @hw: pointer to the hardware structure
3238 * @prof: pointer to flow profile
3239 * @entry: double pointer to the flow entry
3241 * For this function, we will look at the current added entries in the
3242 * corresponding ACL scenario. Then, we will perform matching logic to
3243 * see if we want to add/modify/do nothing with this new entry.
3245 static enum ice_status
3246 ice_flow_acl_add_scen_entry_sync(struct ice_hw *hw, struct ice_flow_prof *prof,
3247 struct ice_flow_entry **entry)
3249 bool do_add_entry, do_rem_entry, do_chg_action, do_chg_rng_chk;
3250 struct ice_aqc_acl_profile_ranges query_rng_buf, cfg_rng_buf;
3251 struct ice_acl_act_entry *acts = NULL;
3252 struct ice_flow_entry *exist;
3253 enum ice_status status = ICE_SUCCESS;
3254 struct ice_flow_entry *e;
3257 if (!entry || !(*entry) || !prof)
3258 return ICE_ERR_BAD_PTR;
3262 do_chg_rng_chk = false;
3266 status = ice_flow_get_hw_prof(hw, ICE_BLK_ACL, prof->id,
3271 /* Query the current range-checker value in FW */
3272 status = ice_query_acl_prof_ranges(hw, prof_id, &query_rng_buf,
3276 ice_memcpy(&cfg_rng_buf, &query_rng_buf,
3277 sizeof(struct ice_aqc_acl_profile_ranges),
3278 ICE_NONDMA_TO_NONDMA);
3280 /* Generate the new range-checker value */
3281 status = ice_flow_acl_union_rng_chk(&cfg_rng_buf, e->range_buf);
3285 /* Reconfigure the range check if the buffer is changed. */
3286 do_chg_rng_chk = false;
3287 if (memcmp(&query_rng_buf, &cfg_rng_buf,
3288 sizeof(struct ice_aqc_acl_profile_ranges))) {
3289 status = ice_prog_acl_prof_ranges(hw, prof_id,
3290 &cfg_rng_buf, NULL);
3294 do_chg_rng_chk = true;
3298 /* Figure out if we want to (change the ACL action) and/or
3299 * (Add the new ACL entry) and/or (Remove the current ACL entry)
3301 exist = ice_flow_acl_find_scen_entry_cond(prof, e, &do_chg_action,
3302 &do_add_entry, &do_rem_entry);
3304 status = ice_flow_rem_entry_sync(hw, ICE_BLK_ACL, exist);
3309 /* Prepare the result action buffer */
3310 acts = (struct ice_acl_act_entry *)
3311 ice_calloc(hw, e->entry_sz, sizeof(struct ice_acl_act_entry));
3313 return ICE_ERR_NO_MEMORY;
3315 for (i = 0; i < e->acts_cnt; i++)
3316 ice_memcpy(&acts[i], &e->acts[i].data.acl_act,
3317 sizeof(struct ice_acl_act_entry),
3318 ICE_NONDMA_TO_NONDMA);
3321 enum ice_acl_entry_prio prio;
3325 keys = (u8 *)e->entry;
3326 inverts = keys + (e->entry_sz / 2);
3327 prio = ice_flow_acl_convert_to_acl_prio(e->priority);
3329 status = ice_acl_add_entry(hw, prof->cfg.scen, prio, keys,
3330 inverts, acts, e->acts_cnt,
3335 e->scen_entry_idx = entry_idx;
3336 LIST_ADD(&e->l_entry, &prof->entries);
3338 if (do_chg_action) {
3339 /* For the action memory info, update the SW's copy of
3340 * exist entry with e's action memory info
3342 ice_free(hw, exist->acts);
3343 exist->acts_cnt = e->acts_cnt;
3344 exist->acts = (struct ice_flow_action *)
3345 ice_calloc(hw, exist->acts_cnt,
3346 sizeof(struct ice_flow_action));
3348 status = ICE_ERR_NO_MEMORY;
3352 ice_memcpy(exist->acts, e->acts,
3353 sizeof(struct ice_flow_action) * e->acts_cnt,
3354 ICE_NONDMA_TO_NONDMA);
3356 status = ice_acl_prog_act(hw, prof->cfg.scen, acts,
3358 exist->scen_entry_idx);
3363 if (do_chg_rng_chk) {
3364 /* In this case, we want to update the range checker
3365 * information of the exist entry
3367 status = ice_flow_acl_union_rng_chk(exist->range_buf,
3373 /* As we don't add the new entry to our SW DB, deallocate its
3374 * memories, and return the exist entry to the caller
3376 ice_dealloc_flow_entry(hw, e);
3386 * ice_flow_acl_add_scen_entry - Add entry to ACL scenario
3387 * @hw: pointer to the hardware structure
3388 * @prof: pointer to flow profile
3389 * @e: double pointer to the flow entry
3391 static enum ice_status
3392 ice_flow_acl_add_scen_entry(struct ice_hw *hw, struct ice_flow_prof *prof,
3393 struct ice_flow_entry **e)
3395 enum ice_status status;
3397 ice_acquire_lock(&prof->entries_lock);
3398 status = ice_flow_acl_add_scen_entry_sync(hw, prof, e);
3399 ice_release_lock(&prof->entries_lock);
3405 * ice_flow_add_entry - Add a flow entry
3406 * @hw: pointer to the HW struct
3407 * @blk: classification stage
3408 * @prof_id: ID of the profile to add a new flow entry to
3409 * @entry_id: unique ID to identify this flow entry
3410 * @vsi_handle: software VSI handle for the flow entry
3411 * @prio: priority of the flow entry
3412 * @data: pointer to a data buffer containing flow entry's match values/masks
3413 * @acts: arrays of actions to be performed on a match
3414 * @acts_cnt: number of actions
3415 * @entry_h: pointer to buffer that receives the new flow entry's handle
3418 ice_flow_add_entry(struct ice_hw *hw, enum ice_block blk, u64 prof_id,
3419 u64 entry_id, u16 vsi_handle, enum ice_flow_priority prio,
3420 void *data, struct ice_flow_action *acts, u8 acts_cnt,
3423 struct ice_flow_entry *e = NULL;
3424 struct ice_flow_prof *prof;
3425 enum ice_status status = ICE_SUCCESS;
3427 /* ACL entries must indicate an action */
3428 if (blk == ICE_BLK_ACL && (!acts || !acts_cnt))
3429 return ICE_ERR_PARAM;
3431 /* No flow entry data is expected for RSS */
3432 if (!entry_h || (!data && blk != ICE_BLK_RSS))
3433 return ICE_ERR_BAD_PTR;
3435 if (!ice_is_vsi_valid(hw, vsi_handle))
3436 return ICE_ERR_PARAM;
3438 ice_acquire_lock(&hw->fl_profs_locks[blk]);
3440 prof = ice_flow_find_prof_id(hw, blk, prof_id);
3442 status = ICE_ERR_DOES_NOT_EXIST;
3444 /* Allocate memory for the entry being added and associate
3445 * the VSI to the found flow profile
3447 e = (struct ice_flow_entry *)ice_malloc(hw, sizeof(*e));
3449 status = ICE_ERR_NO_MEMORY;
3451 status = ice_flow_assoc_prof(hw, blk, prof, vsi_handle);
3454 ice_release_lock(&hw->fl_profs_locks[blk]);
3459 e->vsi_handle = vsi_handle;
3468 /* ACL will handle the entry management */
3469 status = ice_flow_acl_frmt_entry(hw, prof, e, (u8 *)data, acts,
3474 status = ice_flow_acl_add_scen_entry(hw, prof, &e);
3480 status = ICE_ERR_NOT_IMPL;
3484 if (blk != ICE_BLK_ACL) {
3485 /* ACL will handle the entry management */
3486 ice_acquire_lock(&prof->entries_lock);
3487 LIST_ADD(&e->l_entry, &prof->entries);
3488 ice_release_lock(&prof->entries_lock);
3491 *entry_h = ICE_FLOW_ENTRY_HNDL(e);
3496 ice_free(hw, e->entry);
3504 * ice_flow_rem_entry - Remove a flow entry
3505 * @hw: pointer to the HW struct
3506 * @blk: classification stage
3507 * @entry_h: handle to the flow entry to be removed
3509 enum ice_status ice_flow_rem_entry(struct ice_hw *hw, enum ice_block blk,
3512 struct ice_flow_entry *entry;
3513 struct ice_flow_prof *prof;
3514 enum ice_status status = ICE_SUCCESS;
3516 if (entry_h == ICE_FLOW_ENTRY_HANDLE_INVAL)
3517 return ICE_ERR_PARAM;
3519 entry = ICE_FLOW_ENTRY_PTR((intptr_t)entry_h);
3521 /* Retain the pointer to the flow profile as the entry will be freed */
3525 ice_acquire_lock(&prof->entries_lock);
3526 status = ice_flow_rem_entry_sync(hw, blk, entry);
3527 ice_release_lock(&prof->entries_lock);
3534 * ice_flow_set_fld_ext - specifies locations of field from entry's input buffer
3535 * @seg: packet segment the field being set belongs to
3536 * @fld: field to be set
3537 * @field_type: type of the field
3538 * @val_loc: if not ICE_FLOW_FLD_OFF_INVAL, location of the value to match from
3539 * entry's input buffer
3540 * @mask_loc: if not ICE_FLOW_FLD_OFF_INVAL, location of mask value from entry's
3542 * @last_loc: if not ICE_FLOW_FLD_OFF_INVAL, location of last/upper value from
3543 * entry's input buffer
3545 * This helper function stores information of a field being matched, including
3546 * the type of the field and the locations of the value to match, the mask, and
3547 * the upper-bound value in the start of the input buffer for a flow entry.
3548 * This function should only be used for fixed-size data structures.
3550 * This function also opportunistically determines the protocol headers to be
3551 * present based on the fields being set. Some fields cannot be used alone to
3552 * determine the protocol headers present. Sometimes, fields for particular
3553 * protocol headers are not matched. In those cases, the protocol headers
3554 * must be explicitly set.
3557 ice_flow_set_fld_ext(struct ice_flow_seg_info *seg, enum ice_flow_field fld,
3558 enum ice_flow_fld_match_type field_type, u16 val_loc,
3559 u16 mask_loc, u16 last_loc)
3561 u64 bit = BIT_ULL(fld);
3564 if (field_type == ICE_FLOW_FLD_TYPE_RANGE)
3567 seg->fields[fld].type = field_type;
3568 seg->fields[fld].src.val = val_loc;
3569 seg->fields[fld].src.mask = mask_loc;
3570 seg->fields[fld].src.last = last_loc;
3572 ICE_FLOW_SET_HDRS(seg, ice_flds_info[fld].hdr);
3576 * ice_flow_set_fld - specifies locations of field from entry's input buffer
3577 * @seg: packet segment the field being set belongs to
3578 * @fld: field to be set
3579 * @val_loc: if not ICE_FLOW_FLD_OFF_INVAL, location of the value to match from
3580 * entry's input buffer
3581 * @mask_loc: if not ICE_FLOW_FLD_OFF_INVAL, location of mask value from entry's
3583 * @last_loc: if not ICE_FLOW_FLD_OFF_INVAL, location of last/upper value from
3584 * entry's input buffer
3585 * @range: indicate if field being matched is to be in a range
3587 * This function specifies the locations, in the form of byte offsets from the
3588 * start of the input buffer for a flow entry, from where the value to match,
3589 * the mask value, and upper value can be extracted. These locations are then
3590 * stored in the flow profile. When adding a flow entry associated with the
3591 * flow profile, these locations will be used to quickly extract the values and
3592 * create the content of a match entry. This function should only be used for
3593 * fixed-size data structures.
3596 ice_flow_set_fld(struct ice_flow_seg_info *seg, enum ice_flow_field fld,
3597 u16 val_loc, u16 mask_loc, u16 last_loc, bool range)
3599 enum ice_flow_fld_match_type t = range ?
3600 ICE_FLOW_FLD_TYPE_RANGE : ICE_FLOW_FLD_TYPE_REG;
3602 ice_flow_set_fld_ext(seg, fld, t, val_loc, mask_loc, last_loc);
3606 * ice_flow_set_fld_prefix - sets locations of prefix field from entry's buf
3607 * @seg: packet segment the field being set belongs to
3608 * @fld: field to be set
3609 * @val_loc: if not ICE_FLOW_FLD_OFF_INVAL, location of the value to match from
3610 * entry's input buffer
3611 * @pref_loc: location of prefix value from entry's input buffer
3612 * @pref_sz: size of the location holding the prefix value
3614 * This function specifies the locations, in the form of byte offsets from the
3615 * start of the input buffer for a flow entry, from where the value to match
3616 * and the IPv4 prefix value can be extracted. These locations are then stored
3617 * in the flow profile. When adding flow entries to the associated flow profile,
3618 * these locations can be used to quickly extract the values to create the
3619 * content of a match entry. This function should only be used for fixed-size
3623 ice_flow_set_fld_prefix(struct ice_flow_seg_info *seg, enum ice_flow_field fld,
3624 u16 val_loc, u16 pref_loc, u8 pref_sz)
3626 /* For this type of field, the "mask" location is for the prefix value's
3627 * location and the "last" location is for the size of the location of
3630 ice_flow_set_fld_ext(seg, fld, ICE_FLOW_FLD_TYPE_PREFIX, val_loc,
3631 pref_loc, (u16)pref_sz);
3635 * ice_flow_add_fld_raw - sets locations of a raw field from entry's input buf
3636 * @seg: packet segment the field being set belongs to
3637 * @off: offset of the raw field from the beginning of the segment in bytes
3638 * @len: length of the raw pattern to be matched
3639 * @val_loc: location of the value to match from entry's input buffer
3640 * @mask_loc: location of mask value from entry's input buffer
3642 * This function specifies the offset of the raw field to be match from the
3643 * beginning of the specified packet segment, and the locations, in the form of
3644 * byte offsets from the start of the input buffer for a flow entry, from where
3645 * the value to match and the mask value to be extracted. These locations are
3646 * then stored in the flow profile. When adding flow entries to the associated
3647 * flow profile, these locations can be used to quickly extract the values to
3648 * create the content of a match entry. This function should only be used for
3649 * fixed-size data structures.
3652 ice_flow_add_fld_raw(struct ice_flow_seg_info *seg, u16 off, u8 len,
3653 u16 val_loc, u16 mask_loc)
3655 if (seg->raws_cnt < ICE_FLOW_SEG_RAW_FLD_MAX) {
3656 seg->raws[seg->raws_cnt].off = off;
3657 seg->raws[seg->raws_cnt].info.type = ICE_FLOW_FLD_TYPE_SIZE;
3658 seg->raws[seg->raws_cnt].info.src.val = val_loc;
3659 seg->raws[seg->raws_cnt].info.src.mask = mask_loc;
3660 /* The "last" field is used to store the length of the field */
3661 seg->raws[seg->raws_cnt].info.src.last = len;
3664 /* Overflows of "raws" will be handled as an error condition later in
3665 * the flow when this information is processed.
3671 * ice_flow_rem_vsi_prof - remove vsi from flow profile
3672 * @hw: pointer to the hardware structure
3673 * @blk: classification stage
3674 * @vsi_handle: software VSI handle
3675 * @prof_id: unique ID to identify this flow profile
3677 * This function removes the flow entries associated to the input
3678 * vsi handle and disassociates the vsi from the flow profile.
3680 enum ice_status ice_flow_rem_vsi_prof(struct ice_hw *hw, enum ice_block blk, u16 vsi_handle,
3683 struct ice_flow_prof *prof = NULL;
3684 enum ice_status status = ICE_SUCCESS;
3686 if (blk >= ICE_BLK_COUNT || !ice_is_vsi_valid(hw, vsi_handle))
3687 return ICE_ERR_PARAM;
3689 /* find flow profile pointer with input package block and profile id */
3690 prof = ice_flow_find_prof_id(hw, ICE_BLK_FD, prof_id);
3692 ice_debug(hw, ICE_DBG_PKG,
3693 "Cannot find flow profile id=%" PRIu64 "\n", prof_id);
3694 return ICE_ERR_DOES_NOT_EXIST;
3697 /* Remove all remaining flow entries before removing the flow profile */
3698 if (!LIST_EMPTY(&prof->entries)) {
3699 struct ice_flow_entry *e, *t;
3701 ice_acquire_lock(&prof->entries_lock);
3702 LIST_FOR_EACH_ENTRY_SAFE(e, t, &prof->entries, ice_flow_entry,
3704 if (e->vsi_handle != vsi_handle)
3707 status = ice_flow_rem_entry_sync(hw, blk, e);
3711 ice_release_lock(&prof->entries_lock);
3716 /* disassociate the flow profile from sw vsi handle */
3717 status = ice_flow_disassoc_prof(hw, blk, prof, vsi_handle);
3719 ice_debug(hw, ICE_DBG_PKG,
3720 "ice_flow_disassoc_prof() failed with status=%d\n",
3725 #define ICE_FLOW_RSS_SEG_HDR_L2_MASKS \
3726 (ICE_FLOW_SEG_HDR_ETH | ICE_FLOW_SEG_HDR_ETH_NON_IP | ICE_FLOW_SEG_HDR_VLAN)
3728 #define ICE_FLOW_RSS_SEG_HDR_L3_MASKS \
3729 (ICE_FLOW_SEG_HDR_IPV4 | ICE_FLOW_SEG_HDR_IPV6)
3731 #define ICE_FLOW_RSS_SEG_HDR_L4_MASKS \
3732 (ICE_FLOW_SEG_HDR_TCP | ICE_FLOW_SEG_HDR_UDP | ICE_FLOW_SEG_HDR_SCTP)
3734 #define ICE_FLOW_RSS_SEG_HDR_VAL_MASKS \
3735 (ICE_FLOW_RSS_SEG_HDR_L2_MASKS | \
3736 ICE_FLOW_RSS_SEG_HDR_L3_MASKS | \
3737 ICE_FLOW_RSS_SEG_HDR_L4_MASKS)
3740 * ice_flow_set_rss_seg_info - setup packet segments for RSS
3741 * @segs: pointer to the flow field segment(s)
3742 * @seg_cnt: segment count
3743 * @cfg: configure parameters
3745 * Helper function to extract fields from hash bitmap and use flow
3746 * header value to set flow field segment for further use in flow
3747 * profile entry or removal.
3749 static enum ice_status
3750 ice_flow_set_rss_seg_info(struct ice_flow_seg_info *segs, u8 seg_cnt,
3751 const struct ice_rss_hash_cfg *cfg)
3753 struct ice_flow_seg_info *seg;
3757 /* set inner most segment */
3758 seg = &segs[seg_cnt - 1];
3760 ice_for_each_set_bit(i, (const ice_bitmap_t *)&cfg->hash_flds,
3761 ICE_FLOW_FIELD_IDX_MAX)
3762 ice_flow_set_fld(seg, (enum ice_flow_field)i,
3763 ICE_FLOW_FLD_OFF_INVAL, ICE_FLOW_FLD_OFF_INVAL,
3764 ICE_FLOW_FLD_OFF_INVAL, false);
3766 ICE_FLOW_SET_HDRS(seg, cfg->addl_hdrs);
3768 /* set outer most header */
3769 if (cfg->hdr_type == ICE_RSS_INNER_HEADERS_W_OUTER_IPV4)
3770 segs[ICE_RSS_OUTER_HEADERS].hdrs |= ICE_FLOW_SEG_HDR_IPV4 |
3771 ICE_FLOW_SEG_HDR_IPV_FRAG |
3772 ICE_FLOW_SEG_HDR_IPV_OTHER;
3773 else if (cfg->hdr_type == ICE_RSS_INNER_HEADERS_W_OUTER_IPV6)
3774 segs[ICE_RSS_OUTER_HEADERS].hdrs |= ICE_FLOW_SEG_HDR_IPV6 |
3775 ICE_FLOW_SEG_HDR_IPV_FRAG |
3776 ICE_FLOW_SEG_HDR_IPV_OTHER;
3777 else if (cfg->hdr_type == ICE_RSS_INNER_HEADERS_W_OUTER_IPV4_GRE)
3778 segs[ICE_RSS_OUTER_HEADERS].hdrs |= ICE_FLOW_SEG_HDR_IPV4 |
3779 ICE_FLOW_SEG_HDR_GRE |
3780 ICE_FLOW_SEG_HDR_IPV_OTHER;
3781 else if (cfg->hdr_type == ICE_RSS_INNER_HEADERS_W_OUTER_IPV6_GRE)
3782 segs[ICE_RSS_OUTER_HEADERS].hdrs |= ICE_FLOW_SEG_HDR_IPV6 |
3783 ICE_FLOW_SEG_HDR_GRE |
3784 ICE_FLOW_SEG_HDR_IPV_OTHER;
3786 if (seg->hdrs & ~ICE_FLOW_RSS_SEG_HDR_VAL_MASKS &
3787 ~ICE_FLOW_RSS_HDRS_INNER_MASK & ~ICE_FLOW_SEG_HDR_IPV_OTHER &
3788 ~ICE_FLOW_SEG_HDR_IPV_FRAG)
3789 return ICE_ERR_PARAM;
3791 val = (u64)(seg->hdrs & ICE_FLOW_RSS_SEG_HDR_L3_MASKS);
3792 if (val && !ice_is_pow2(val))
3795 val = (u64)(seg->hdrs & ICE_FLOW_RSS_SEG_HDR_L4_MASKS);
3796 if (val && !ice_is_pow2(val))
3803 * ice_rem_vsi_rss_list - remove VSI from RSS list
3804 * @hw: pointer to the hardware structure
3805 * @vsi_handle: software VSI handle
3807 * Remove the VSI from all RSS configurations in the list.
3809 void ice_rem_vsi_rss_list(struct ice_hw *hw, u16 vsi_handle)
3811 struct ice_rss_cfg *r, *tmp;
3813 if (LIST_EMPTY(&hw->rss_list_head))
3816 ice_acquire_lock(&hw->rss_locks);
3817 LIST_FOR_EACH_ENTRY_SAFE(r, tmp, &hw->rss_list_head,
3818 ice_rss_cfg, l_entry)
3819 if (ice_test_and_clear_bit(vsi_handle, r->vsis))
3820 if (!ice_is_any_bit_set(r->vsis, ICE_MAX_VSI)) {
3821 LIST_DEL(&r->l_entry);
3824 ice_release_lock(&hw->rss_locks);
3828 * ice_rem_vsi_rss_cfg - remove RSS configurations associated with VSI
3829 * @hw: pointer to the hardware structure
3830 * @vsi_handle: software VSI handle
3832 * This function will iterate through all flow profiles and disassociate
3833 * the VSI from that profile. If the flow profile has no VSIs it will
3836 enum ice_status ice_rem_vsi_rss_cfg(struct ice_hw *hw, u16 vsi_handle)
3838 const enum ice_block blk = ICE_BLK_RSS;
3839 struct ice_flow_prof *p, *t;
3840 enum ice_status status = ICE_SUCCESS;
3842 if (!ice_is_vsi_valid(hw, vsi_handle))
3843 return ICE_ERR_PARAM;
3845 if (LIST_EMPTY(&hw->fl_profs[blk]))
3848 ice_acquire_lock(&hw->rss_locks);
3849 LIST_FOR_EACH_ENTRY_SAFE(p, t, &hw->fl_profs[blk], ice_flow_prof,
3851 if (ice_is_bit_set(p->vsis, vsi_handle)) {
3852 status = ice_flow_disassoc_prof(hw, blk, p, vsi_handle);
3856 if (!ice_is_any_bit_set(p->vsis, ICE_MAX_VSI)) {
3857 status = ice_flow_rem_prof(hw, blk, p->id);
3862 ice_release_lock(&hw->rss_locks);
3868 * ice_get_rss_hdr_type - get a RSS profile's header type
3869 * @prof: RSS flow profile
3871 static enum ice_rss_cfg_hdr_type
3872 ice_get_rss_hdr_type(struct ice_flow_prof *prof)
3874 enum ice_rss_cfg_hdr_type hdr_type = ICE_RSS_ANY_HEADERS;
3876 if (prof->segs_cnt == ICE_FLOW_SEG_SINGLE) {
3877 hdr_type = ICE_RSS_OUTER_HEADERS;
3878 } else if (prof->segs_cnt == ICE_FLOW_SEG_MAX) {
3879 if (prof->segs[ICE_RSS_OUTER_HEADERS].hdrs == ICE_FLOW_SEG_HDR_NONE)
3880 hdr_type = ICE_RSS_INNER_HEADERS;
3881 if (prof->segs[ICE_RSS_OUTER_HEADERS].hdrs & ICE_FLOW_SEG_HDR_IPV4)
3882 hdr_type = ICE_RSS_INNER_HEADERS_W_OUTER_IPV4;
3883 if (prof->segs[ICE_RSS_OUTER_HEADERS].hdrs & ICE_FLOW_SEG_HDR_IPV6)
3884 hdr_type = ICE_RSS_INNER_HEADERS_W_OUTER_IPV6;
3891 * ice_rem_rss_list - remove RSS configuration from list
3892 * @hw: pointer to the hardware structure
3893 * @vsi_handle: software VSI handle
3894 * @prof: pointer to flow profile
3896 * Assumption: lock has already been acquired for RSS list
3899 ice_rem_rss_list(struct ice_hw *hw, u16 vsi_handle, struct ice_flow_prof *prof)
3901 enum ice_rss_cfg_hdr_type hdr_type;
3902 struct ice_rss_cfg *r, *tmp;
3904 /* Search for RSS hash fields associated to the VSI that match the
3905 * hash configurations associated to the flow profile. If found
3906 * remove from the RSS entry list of the VSI context and delete entry.
3908 hdr_type = ice_get_rss_hdr_type(prof);
3909 LIST_FOR_EACH_ENTRY_SAFE(r, tmp, &hw->rss_list_head,
3910 ice_rss_cfg, l_entry)
3911 if (r->hash.hash_flds == prof->segs[prof->segs_cnt - 1].match &&
3912 r->hash.addl_hdrs == prof->segs[prof->segs_cnt - 1].hdrs &&
3913 r->hash.hdr_type == hdr_type) {
3914 ice_clear_bit(vsi_handle, r->vsis);
3915 if (!ice_is_any_bit_set(r->vsis, ICE_MAX_VSI)) {
3916 LIST_DEL(&r->l_entry);
3924 * ice_add_rss_list - add RSS configuration to list
3925 * @hw: pointer to the hardware structure
3926 * @vsi_handle: software VSI handle
3927 * @prof: pointer to flow profile
3929 * Assumption: lock has already been acquired for RSS list
3931 static enum ice_status
3932 ice_add_rss_list(struct ice_hw *hw, u16 vsi_handle, struct ice_flow_prof *prof)
3934 enum ice_rss_cfg_hdr_type hdr_type;
3935 struct ice_rss_cfg *r, *rss_cfg;
3937 hdr_type = ice_get_rss_hdr_type(prof);
3938 LIST_FOR_EACH_ENTRY(r, &hw->rss_list_head,
3939 ice_rss_cfg, l_entry)
3940 if (r->hash.hash_flds == prof->segs[prof->segs_cnt - 1].match &&
3941 r->hash.addl_hdrs == prof->segs[prof->segs_cnt - 1].hdrs &&
3942 r->hash.hdr_type == hdr_type) {
3943 ice_set_bit(vsi_handle, r->vsis);
3947 rss_cfg = (struct ice_rss_cfg *)ice_malloc(hw, sizeof(*rss_cfg));
3949 return ICE_ERR_NO_MEMORY;
3951 rss_cfg->hash.hash_flds = prof->segs[prof->segs_cnt - 1].match;
3952 rss_cfg->hash.addl_hdrs = prof->segs[prof->segs_cnt - 1].hdrs;
3953 rss_cfg->hash.hdr_type = hdr_type;
3954 rss_cfg->hash.symm = prof->cfg.symm;
3955 ice_set_bit(vsi_handle, rss_cfg->vsis);
3957 LIST_ADD_TAIL(&rss_cfg->l_entry, &hw->rss_list_head);
3962 #define ICE_FLOW_PROF_HASH_S 0
3963 #define ICE_FLOW_PROF_HASH_M (0xFFFFFFFFULL << ICE_FLOW_PROF_HASH_S)
3964 #define ICE_FLOW_PROF_HDR_S 32
3965 #define ICE_FLOW_PROF_HDR_M (0x3FFFFFFFULL << ICE_FLOW_PROF_HDR_S)
3966 #define ICE_FLOW_PROF_ENCAP_S 62
3967 #define ICE_FLOW_PROF_ENCAP_M (0x3ULL << ICE_FLOW_PROF_ENCAP_S)
3969 /* Flow profile ID format:
3970 * [0:31] - Packet match fields
3971 * [32:61] - Protocol header
3972 * [62:63] - Encapsulation flag:
3975 * 2 for tunneled with outer ipv4
3976 * 3 for tunneled with outer ipv6
3978 #define ICE_FLOW_GEN_PROFID(hash, hdr, encap) \
3979 ((u64)(((u64)(hash) & ICE_FLOW_PROF_HASH_M) | \
3980 (((u64)(hdr) << ICE_FLOW_PROF_HDR_S) & ICE_FLOW_PROF_HDR_M) | \
3981 (((u64)(encap) << ICE_FLOW_PROF_ENCAP_S) & ICE_FLOW_PROF_ENCAP_M)))
3984 ice_rss_config_xor_word(struct ice_hw *hw, u8 prof_id, u8 src, u8 dst)
3986 u32 s = ((src % 4) << 3); /* byte shift */
3987 u32 v = dst | 0x80; /* value to program */
3988 u8 i = src / 4; /* register index */
3991 reg = rd32(hw, GLQF_HSYMM(prof_id, i));
3992 reg = (reg & ~(0xff << s)) | (v << s);
3993 wr32(hw, GLQF_HSYMM(prof_id, i), reg);
3997 ice_rss_config_xor(struct ice_hw *hw, u8 prof_id, u8 src, u8 dst, u8 len)
4000 ICE_FLOW_SW_FIELD_VECTOR_MAX / ICE_FLOW_FV_EXTRACT_SZ - 1;
4003 for (i = 0; i < len; i++) {
4004 ice_rss_config_xor_word(hw, prof_id,
4005 /* Yes, field vector in GLQF_HSYMM and
4006 * GLQF_HINSET is inversed!
4008 fv_last_word - (src + i),
4009 fv_last_word - (dst + i));
4010 ice_rss_config_xor_word(hw, prof_id,
4011 fv_last_word - (dst + i),
4012 fv_last_word - (src + i));
4017 ice_rss_update_symm(struct ice_hw *hw,
4018 struct ice_flow_prof *prof)
4020 struct ice_prof_map *map;
4023 ice_acquire_lock(&hw->blk[ICE_BLK_RSS].es.prof_map_lock);
4024 map = ice_search_prof_id(hw, ICE_BLK_RSS, prof->id);
4026 prof_id = map->prof_id;
4027 ice_release_lock(&hw->blk[ICE_BLK_RSS].es.prof_map_lock);
4030 /* clear to default */
4031 for (m = 0; m < 6; m++)
4032 wr32(hw, GLQF_HSYMM(prof_id, m), 0);
4033 if (prof->cfg.symm) {
4034 struct ice_flow_seg_info *seg =
4035 &prof->segs[prof->segs_cnt - 1];
4037 struct ice_flow_seg_xtrct *ipv4_src =
4038 &seg->fields[ICE_FLOW_FIELD_IDX_IPV4_SA].xtrct;
4039 struct ice_flow_seg_xtrct *ipv4_dst =
4040 &seg->fields[ICE_FLOW_FIELD_IDX_IPV4_DA].xtrct;
4041 struct ice_flow_seg_xtrct *ipv6_src =
4042 &seg->fields[ICE_FLOW_FIELD_IDX_IPV6_SA].xtrct;
4043 struct ice_flow_seg_xtrct *ipv6_dst =
4044 &seg->fields[ICE_FLOW_FIELD_IDX_IPV6_DA].xtrct;
4046 struct ice_flow_seg_xtrct *tcp_src =
4047 &seg->fields[ICE_FLOW_FIELD_IDX_TCP_SRC_PORT].xtrct;
4048 struct ice_flow_seg_xtrct *tcp_dst =
4049 &seg->fields[ICE_FLOW_FIELD_IDX_TCP_DST_PORT].xtrct;
4051 struct ice_flow_seg_xtrct *udp_src =
4052 &seg->fields[ICE_FLOW_FIELD_IDX_UDP_SRC_PORT].xtrct;
4053 struct ice_flow_seg_xtrct *udp_dst =
4054 &seg->fields[ICE_FLOW_FIELD_IDX_UDP_DST_PORT].xtrct;
4056 struct ice_flow_seg_xtrct *sctp_src =
4057 &seg->fields[ICE_FLOW_FIELD_IDX_SCTP_SRC_PORT].xtrct;
4058 struct ice_flow_seg_xtrct *sctp_dst =
4059 &seg->fields[ICE_FLOW_FIELD_IDX_SCTP_DST_PORT].xtrct;
4062 if (ipv4_src->prot_id != 0 && ipv4_dst->prot_id != 0)
4063 ice_rss_config_xor(hw, prof_id,
4064 ipv4_src->idx, ipv4_dst->idx, 2);
4067 if (ipv6_src->prot_id != 0 && ipv6_dst->prot_id != 0)
4068 ice_rss_config_xor(hw, prof_id,
4069 ipv6_src->idx, ipv6_dst->idx, 8);
4072 if (tcp_src->prot_id != 0 && tcp_dst->prot_id != 0)
4073 ice_rss_config_xor(hw, prof_id,
4074 tcp_src->idx, tcp_dst->idx, 1);
4077 if (udp_src->prot_id != 0 && udp_dst->prot_id != 0)
4078 ice_rss_config_xor(hw, prof_id,
4079 udp_src->idx, udp_dst->idx, 1);
4082 if (sctp_src->prot_id != 0 && sctp_dst->prot_id != 0)
4083 ice_rss_config_xor(hw, prof_id,
4084 sctp_src->idx, sctp_dst->idx, 1);
4089 * ice_rss_cfg_raw_symm - configure symmetric hash parameters
4091 * @hw: pointer to the hardware structure
4092 * @prof: pointer to parser profile
4093 * @prof_id: profile ID
4095 * Calculate symmetric hash parameters based on input protocol type.
4098 ice_rss_cfg_raw_symm(struct ice_hw *hw,
4099 struct ice_parser_profile *prof, u64 prof_id)
4101 u8 src_idx, dst_idx, proto_id;
4104 while (i < prof->fv_num) {
4105 proto_id = prof->fv[i].proto_id;
4108 case ICE_PROT_IPV4_OF_OR_S:
4109 len = ICE_FLOW_FLD_SZ_IPV4_ADDR /
4110 ICE_FLOW_FV_EXTRACT_SZ;
4111 if (prof->fv[i].offset ==
4112 ICE_FLOW_FIELD_IPV4_SRC_OFFSET &&
4113 prof->fv[i + len].proto_id == proto_id &&
4114 prof->fv[i + len].offset ==
4115 ICE_FLOW_FIELD_IPV4_DST_OFFSET) {
4123 case ICE_PROT_IPV6_OF_OR_S:
4124 len = ICE_FLOW_FLD_SZ_IPV6_ADDR /
4125 ICE_FLOW_FV_EXTRACT_SZ;
4126 if (prof->fv[i].offset ==
4127 ICE_FLOW_FIELD_IPV6_SRC_OFFSET &&
4128 prof->fv[i + len].proto_id == proto_id &&
4129 prof->fv[i + len].offset ==
4130 ICE_FLOW_FIELD_IPV6_DST_OFFSET) {
4138 case ICE_PROT_TCP_IL:
4139 case ICE_PROT_UDP_IL_OR_S:
4140 case ICE_PROT_SCTP_IL:
4141 len = ICE_FLOW_FLD_SZ_PORT /
4142 ICE_FLOW_FV_EXTRACT_SZ;
4143 if (prof->fv[i].offset ==
4144 ICE_FLOW_FIELD_SRC_PORT_OFFSET &&
4145 prof->fv[i + len].proto_id == proto_id &&
4146 prof->fv[i + len].offset ==
4147 ICE_FLOW_FIELD_DST_PORT_OFFSET) {
4159 ice_rss_config_xor(hw, prof_id, src_idx, dst_idx, len);
4163 /* Max registers index per packet profile */
4164 #define ICE_SYMM_REG_INDEX_MAX 6
4167 * ice_rss_update_raw_symm - update symmetric hash configuration
4169 * @hw: pointer to the hardware structure
4170 * @cfg: configure parameters for raw pattern
4171 * @id: profile tracking ID
4173 * Update symmetric hash configuration for raw pattern if required.
4174 * Otherwise only clear to default.
4177 ice_rss_update_raw_symm(struct ice_hw *hw,
4178 struct ice_rss_raw_cfg *cfg, u64 id)
4180 struct ice_prof_map *map;
4183 ice_acquire_lock(&hw->blk[ICE_BLK_RSS].es.prof_map_lock);
4184 map = ice_search_prof_id(hw, ICE_BLK_RSS, id);
4186 prof_id = map->prof_id;
4187 ice_release_lock(&hw->blk[ICE_BLK_RSS].es.prof_map_lock);
4190 /* clear to default */
4191 for (m = 0; m < ICE_SYMM_REG_INDEX_MAX; m++)
4192 wr32(hw, GLQF_HSYMM(prof_id, m), 0);
4194 ice_rss_cfg_raw_symm(hw, &cfg->prof, prof_id);
4198 * ice_add_rss_cfg_sync - add an RSS configuration
4199 * @hw: pointer to the hardware structure
4200 * @vsi_handle: software VSI handle
4201 * @cfg: configure parameters
4203 * Assumption: lock has already been acquired for RSS list
4205 static enum ice_status
4206 ice_add_rss_cfg_sync(struct ice_hw *hw, u16 vsi_handle,
4207 const struct ice_rss_hash_cfg *cfg)
4209 const enum ice_block blk = ICE_BLK_RSS;
4210 struct ice_flow_prof *prof = NULL;
4211 struct ice_flow_seg_info *segs;
4212 enum ice_status status;
4215 segs_cnt = (cfg->hdr_type == ICE_RSS_OUTER_HEADERS) ?
4216 ICE_FLOW_SEG_SINGLE : ICE_FLOW_SEG_MAX;
4218 segs = (struct ice_flow_seg_info *)ice_calloc(hw, segs_cnt,
4221 return ICE_ERR_NO_MEMORY;
4223 /* Construct the packet segment info from the hashed fields */
4224 status = ice_flow_set_rss_seg_info(segs, segs_cnt, cfg);
4228 /* Search for a flow profile that has matching headers, hash fields
4229 * and has the input VSI associated to it. If found, no further
4230 * operations required and exit.
4232 prof = ice_flow_find_prof_conds(hw, blk, ICE_FLOW_RX, segs, segs_cnt,
4234 ICE_FLOW_FIND_PROF_CHK_FLDS |
4235 ICE_FLOW_FIND_PROF_CHK_VSI);
4237 if (prof->cfg.symm == cfg->symm)
4239 prof->cfg.symm = cfg->symm;
4243 /* Check if a flow profile exists with the same protocol headers and
4244 * associated with the input VSI. If so disassociate the VSI from
4245 * this profile. The VSI will be added to a new profile created with
4246 * the protocol header and new hash field configuration.
4248 prof = ice_flow_find_prof_conds(hw, blk, ICE_FLOW_RX, segs, segs_cnt,
4249 vsi_handle, ICE_FLOW_FIND_PROF_CHK_VSI);
4251 status = ice_flow_disassoc_prof(hw, blk, prof, vsi_handle);
4253 ice_rem_rss_list(hw, vsi_handle, prof);
4257 /* Remove profile if it has no VSIs associated */
4258 if (!ice_is_any_bit_set(prof->vsis, ICE_MAX_VSI)) {
4259 status = ice_flow_rem_prof(hw, blk, prof->id);
4265 /* Search for a profile that has same match fields only. If this
4266 * exists then associate the VSI to this profile.
4268 prof = ice_flow_find_prof_conds(hw, blk, ICE_FLOW_RX, segs, segs_cnt,
4270 ICE_FLOW_FIND_PROF_CHK_FLDS);
4272 if (prof->cfg.symm == cfg->symm) {
4273 status = ice_flow_assoc_prof(hw, blk, prof,
4276 status = ice_add_rss_list(hw, vsi_handle,
4279 /* if a profile exist but with different symmetric
4280 * requirement, just return error.
4282 status = ICE_ERR_NOT_SUPPORTED;
4287 /* Create a new flow profile with generated profile and packet
4288 * segment information.
4290 status = ice_flow_add_prof(hw, blk, ICE_FLOW_RX,
4291 ICE_FLOW_GEN_PROFID(cfg->hash_flds,
4292 segs[segs_cnt - 1].hdrs,
4294 segs, segs_cnt, NULL, 0, &prof);
4298 status = ice_flow_assoc_prof(hw, blk, prof, vsi_handle);
4299 /* If association to a new flow profile failed then this profile can
4303 ice_flow_rem_prof(hw, blk, prof->id);
4307 status = ice_add_rss_list(hw, vsi_handle, prof);
4309 prof->cfg.symm = cfg->symm;
4311 ice_rss_update_symm(hw, prof);
4319 * ice_add_rss_cfg - add an RSS configuration with specified hashed fields
4320 * @hw: pointer to the hardware structure
4321 * @vsi_handle: software VSI handle
4322 * @cfg: configure parameters
4324 * This function will generate a flow profile based on fields associated with
4325 * the input fields to hash on, the flow type and use the VSI number to add
4326 * a flow entry to the profile.
4329 ice_add_rss_cfg(struct ice_hw *hw, u16 vsi_handle,
4330 const struct ice_rss_hash_cfg *cfg)
4332 struct ice_rss_hash_cfg local_cfg;
4333 enum ice_status status;
4335 if (!ice_is_vsi_valid(hw, vsi_handle) ||
4336 !cfg || cfg->hdr_type > ICE_RSS_ANY_HEADERS ||
4337 cfg->hash_flds == ICE_HASH_INVALID)
4338 return ICE_ERR_PARAM;
4341 if (cfg->hdr_type < ICE_RSS_ANY_HEADERS) {
4342 ice_acquire_lock(&hw->rss_locks);
4343 status = ice_add_rss_cfg_sync(hw, vsi_handle, &local_cfg);
4344 ice_release_lock(&hw->rss_locks);
4346 ice_acquire_lock(&hw->rss_locks);
4347 local_cfg.hdr_type = ICE_RSS_OUTER_HEADERS;
4348 status = ice_add_rss_cfg_sync(hw, vsi_handle, &local_cfg);
4350 local_cfg.hdr_type = ICE_RSS_INNER_HEADERS;
4351 status = ice_add_rss_cfg_sync(hw, vsi_handle,
4354 ice_release_lock(&hw->rss_locks);
4361 * ice_rem_rss_cfg_sync - remove an existing RSS configuration
4362 * @hw: pointer to the hardware structure
4363 * @vsi_handle: software VSI handle
4364 * @cfg: configure parameters
4366 * Assumption: lock has already been acquired for RSS list
4368 static enum ice_status
4369 ice_rem_rss_cfg_sync(struct ice_hw *hw, u16 vsi_handle,
4370 const struct ice_rss_hash_cfg *cfg)
4372 const enum ice_block blk = ICE_BLK_RSS;
4373 struct ice_flow_seg_info *segs;
4374 struct ice_flow_prof *prof;
4375 enum ice_status status;
4378 segs_cnt = (cfg->hdr_type == ICE_RSS_OUTER_HEADERS) ?
4379 ICE_FLOW_SEG_SINGLE : ICE_FLOW_SEG_MAX;
4380 segs = (struct ice_flow_seg_info *)ice_calloc(hw, segs_cnt,
4383 return ICE_ERR_NO_MEMORY;
4385 /* Construct the packet segment info from the hashed fields */
4386 status = ice_flow_set_rss_seg_info(segs, segs_cnt, cfg);
4390 prof = ice_flow_find_prof_conds(hw, blk, ICE_FLOW_RX, segs, segs_cnt,
4392 ICE_FLOW_FIND_PROF_CHK_FLDS);
4394 status = ICE_ERR_DOES_NOT_EXIST;
4398 status = ice_flow_disassoc_prof(hw, blk, prof, vsi_handle);
4402 /* Remove RSS configuration from VSI context before deleting
4405 ice_rem_rss_list(hw, vsi_handle, prof);
4407 if (!ice_is_any_bit_set(prof->vsis, ICE_MAX_VSI))
4408 status = ice_flow_rem_prof(hw, blk, prof->id);
4416 * ice_rem_rss_cfg - remove an existing RSS config with matching hashed fields
4417 * @hw: pointer to the hardware structure
4418 * @vsi_handle: software VSI handle
4419 * @cfg: configure parameters
4421 * This function will lookup the flow profile based on the input
4422 * hash field bitmap, iterate through the profile entry list of
4423 * that profile and find entry associated with input VSI to be
4424 * removed. Calls are made to underlying flow apis which will in
4425 * turn build or update buffers for RSS XLT1 section.
4428 ice_rem_rss_cfg(struct ice_hw *hw, u16 vsi_handle,
4429 const struct ice_rss_hash_cfg *cfg)
4431 struct ice_rss_hash_cfg local_cfg;
4432 enum ice_status status;
4434 if (!ice_is_vsi_valid(hw, vsi_handle) ||
4435 !cfg || cfg->hdr_type > ICE_RSS_ANY_HEADERS ||
4436 cfg->hash_flds == ICE_HASH_INVALID)
4437 return ICE_ERR_PARAM;
4439 ice_acquire_lock(&hw->rss_locks);
4441 if (cfg->hdr_type < ICE_RSS_ANY_HEADERS) {
4442 status = ice_rem_rss_cfg_sync(hw, vsi_handle, &local_cfg);
4444 local_cfg.hdr_type = ICE_RSS_OUTER_HEADERS;
4445 status = ice_rem_rss_cfg_sync(hw, vsi_handle, &local_cfg);
4448 local_cfg.hdr_type = ICE_RSS_INNER_HEADERS;
4449 status = ice_rem_rss_cfg_sync(hw, vsi_handle,
4453 ice_release_lock(&hw->rss_locks);
4459 * ice_replay_rss_cfg - replay RSS configurations associated with VSI
4460 * @hw: pointer to the hardware structure
4461 * @vsi_handle: software VSI handle
4463 enum ice_status ice_replay_rss_cfg(struct ice_hw *hw, u16 vsi_handle)
4465 enum ice_status status = ICE_SUCCESS;
4466 struct ice_rss_cfg *r;
4468 if (!ice_is_vsi_valid(hw, vsi_handle))
4469 return ICE_ERR_PARAM;
4471 ice_acquire_lock(&hw->rss_locks);
4472 LIST_FOR_EACH_ENTRY(r, &hw->rss_list_head,
4473 ice_rss_cfg, l_entry) {
4474 if (ice_is_bit_set(r->vsis, vsi_handle)) {
4475 status = ice_add_rss_cfg_sync(hw, vsi_handle, &r->hash);
4480 ice_release_lock(&hw->rss_locks);
4486 * ice_get_rss_cfg - returns hashed fields for the given header types
4487 * @hw: pointer to the hardware structure
4488 * @vsi_handle: software VSI handle
4489 * @hdrs: protocol header type
4491 * This function will return the match fields of the first instance of flow
4492 * profile having the given header types and containing input VSI
4494 u64 ice_get_rss_cfg(struct ice_hw *hw, u16 vsi_handle, u32 hdrs)
4496 u64 rss_hash = ICE_HASH_INVALID;
4497 struct ice_rss_cfg *r;
4499 /* verify if the protocol header is non zero and VSI is valid */
4500 if (hdrs == ICE_FLOW_SEG_HDR_NONE || !ice_is_vsi_valid(hw, vsi_handle))
4501 return ICE_HASH_INVALID;
4503 ice_acquire_lock(&hw->rss_locks);
4504 LIST_FOR_EACH_ENTRY(r, &hw->rss_list_head,
4505 ice_rss_cfg, l_entry)
4506 if (ice_is_bit_set(r->vsis, vsi_handle) &&
4507 r->hash.addl_hdrs == hdrs) {
4508 rss_hash = r->hash.hash_flds;
4511 ice_release_lock(&hw->rss_locks);