1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2001-2021 Intel Corporation
5 #include "ice_common.h"
8 /* Size of known protocol header fields */
9 #define ICE_FLOW_FLD_SZ_ETH_TYPE 2
10 #define ICE_FLOW_FLD_SZ_VLAN 2
11 #define ICE_FLOW_FLD_SZ_IPV4_ADDR 4
12 #define ICE_FLOW_FLD_SZ_IPV6_ADDR 16
13 #define ICE_FLOW_FLD_SZ_IPV6_PRE32_ADDR 4
14 #define ICE_FLOW_FLD_SZ_IPV6_PRE48_ADDR 6
15 #define ICE_FLOW_FLD_SZ_IPV6_PRE64_ADDR 8
16 #define ICE_FLOW_FLD_SZ_IPV4_ID 2
17 #define ICE_FLOW_FLD_SZ_IPV6_ID 4
18 #define ICE_FLOW_FLD_SZ_IP_CHKSUM 2
19 #define ICE_FLOW_FLD_SZ_TCP_CHKSUM 2
20 #define ICE_FLOW_FLD_SZ_UDP_CHKSUM 2
21 #define ICE_FLOW_FLD_SZ_SCTP_CHKSUM 4
22 #define ICE_FLOW_FLD_SZ_IP_DSCP 1
23 #define ICE_FLOW_FLD_SZ_IP_TTL 1
24 #define ICE_FLOW_FLD_SZ_IP_PROT 1
25 #define ICE_FLOW_FLD_SZ_PORT 2
26 #define ICE_FLOW_FLD_SZ_TCP_FLAGS 1
27 #define ICE_FLOW_FLD_SZ_ICMP_TYPE 1
28 #define ICE_FLOW_FLD_SZ_ICMP_CODE 1
29 #define ICE_FLOW_FLD_SZ_ARP_OPER 2
30 #define ICE_FLOW_FLD_SZ_GRE_KEYID 4
31 #define ICE_FLOW_FLD_SZ_GTP_TEID 4
32 #define ICE_FLOW_FLD_SZ_GTP_QFI 2
33 #define ICE_FLOW_FLD_SZ_PPPOE_SESS_ID 2
34 #define ICE_FLOW_FLD_SZ_PFCP_SEID 8
35 #define ICE_FLOW_FLD_SZ_L2TPV3_SESS_ID 4
36 #define ICE_FLOW_FLD_SZ_ESP_SPI 4
37 #define ICE_FLOW_FLD_SZ_AH_SPI 4
38 #define ICE_FLOW_FLD_SZ_NAT_T_ESP_SPI 4
39 #define ICE_FLOW_FLD_SZ_VXLAN_VNI 4
40 #define ICE_FLOW_FLD_SZ_ECPRI_TP0_PC_ID 2
42 /* Describe properties of a protocol header field */
43 struct ice_flow_field_info {
44 enum ice_flow_seg_hdr hdr;
45 s16 off; /* Offset from start of a protocol header, in bits */
46 u16 size; /* Size of fields in bits */
47 u16 mask; /* 16-bit mask for field */
50 #define ICE_FLOW_FLD_INFO(_hdr, _offset_bytes, _size_bytes) { \
52 .off = (_offset_bytes) * BITS_PER_BYTE, \
53 .size = (_size_bytes) * BITS_PER_BYTE, \
57 #define ICE_FLOW_FLD_INFO_MSK(_hdr, _offset_bytes, _size_bytes, _mask) { \
59 .off = (_offset_bytes) * BITS_PER_BYTE, \
60 .size = (_size_bytes) * BITS_PER_BYTE, \
64 /* Table containing properties of supported protocol header fields */
66 struct ice_flow_field_info ice_flds_info[ICE_FLOW_FIELD_IDX_MAX] = {
68 /* ICE_FLOW_FIELD_IDX_ETH_DA */
69 ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_ETH, 0, ETH_ALEN),
70 /* ICE_FLOW_FIELD_IDX_ETH_SA */
71 ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_ETH, ETH_ALEN, ETH_ALEN),
72 /* ICE_FLOW_FIELD_IDX_S_VLAN */
73 ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_VLAN, 12, ICE_FLOW_FLD_SZ_VLAN),
74 /* ICE_FLOW_FIELD_IDX_C_VLAN */
75 ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_VLAN, 14, ICE_FLOW_FLD_SZ_VLAN),
76 /* ICE_FLOW_FIELD_IDX_ETH_TYPE */
77 ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_ETH, 0, ICE_FLOW_FLD_SZ_ETH_TYPE),
79 /* ICE_FLOW_FIELD_IDX_IPV4_DSCP */
80 ICE_FLOW_FLD_INFO_MSK(ICE_FLOW_SEG_HDR_IPV4, 0, ICE_FLOW_FLD_SZ_IP_DSCP,
82 /* ICE_FLOW_FIELD_IDX_IPV6_DSCP */
83 ICE_FLOW_FLD_INFO_MSK(ICE_FLOW_SEG_HDR_IPV6, 0, ICE_FLOW_FLD_SZ_IP_DSCP,
85 /* ICE_FLOW_FIELD_IDX_IPV4_TTL */
86 ICE_FLOW_FLD_INFO_MSK(ICE_FLOW_SEG_HDR_NONE, 8,
87 ICE_FLOW_FLD_SZ_IP_TTL, 0xff00),
88 /* ICE_FLOW_FIELD_IDX_IPV4_PROT */
89 ICE_FLOW_FLD_INFO_MSK(ICE_FLOW_SEG_HDR_NONE, 8,
90 ICE_FLOW_FLD_SZ_IP_PROT, 0x00ff),
91 /* ICE_FLOW_FIELD_IDX_IPV6_TTL */
92 ICE_FLOW_FLD_INFO_MSK(ICE_FLOW_SEG_HDR_NONE, 6,
93 ICE_FLOW_FLD_SZ_IP_TTL, 0x00ff),
94 /* ICE_FLOW_FIELD_IDX_IPV6_PROT */
95 ICE_FLOW_FLD_INFO_MSK(ICE_FLOW_SEG_HDR_NONE, 6,
96 ICE_FLOW_FLD_SZ_IP_PROT, 0xff00),
97 /* ICE_FLOW_FIELD_IDX_IPV4_SA */
98 ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_IPV4, 12, ICE_FLOW_FLD_SZ_IPV4_ADDR),
99 /* ICE_FLOW_FIELD_IDX_IPV4_DA */
100 ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_IPV4, 16, ICE_FLOW_FLD_SZ_IPV4_ADDR),
101 /* ICE_FLOW_FIELD_IDX_IPV6_SA */
102 ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_IPV6, 8, ICE_FLOW_FLD_SZ_IPV6_ADDR),
103 /* ICE_FLOW_FIELD_IDX_IPV6_DA */
104 ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_IPV6, 24, ICE_FLOW_FLD_SZ_IPV6_ADDR),
105 /* ICE_FLOW_FIELD_IDX_IPV4_CHKSUM */
106 ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_IPV4, 10, ICE_FLOW_FLD_SZ_IP_CHKSUM),
107 /* ICE_FLOW_FIELD_IDX_IPV4_FRAG */
108 ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_IPV_FRAG, 4,
109 ICE_FLOW_FLD_SZ_IPV4_ID),
110 /* ICE_FLOW_FIELD_IDX_IPV6_FRAG */
111 ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_IPV_FRAG, 4,
112 ICE_FLOW_FLD_SZ_IPV6_ID),
113 /* ICE_FLOW_FIELD_IDX_IPV6_PRE32_SA */
114 ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_IPV6, 8,
115 ICE_FLOW_FLD_SZ_IPV6_PRE32_ADDR),
116 /* ICE_FLOW_FIELD_IDX_IPV6_PRE32_DA */
117 ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_IPV6, 24,
118 ICE_FLOW_FLD_SZ_IPV6_PRE32_ADDR),
119 /* ICE_FLOW_FIELD_IDX_IPV6_PRE48_SA */
120 ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_IPV6, 8,
121 ICE_FLOW_FLD_SZ_IPV6_PRE48_ADDR),
122 /* ICE_FLOW_FIELD_IDX_IPV6_PRE48_DA */
123 ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_IPV6, 24,
124 ICE_FLOW_FLD_SZ_IPV6_PRE48_ADDR),
125 /* ICE_FLOW_FIELD_IDX_IPV6_PRE64_SA */
126 ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_IPV6, 8,
127 ICE_FLOW_FLD_SZ_IPV6_PRE64_ADDR),
128 /* ICE_FLOW_FIELD_IDX_IPV6_PRE64_DA */
129 ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_IPV6, 24,
130 ICE_FLOW_FLD_SZ_IPV6_PRE64_ADDR),
132 /* ICE_FLOW_FIELD_IDX_TCP_SRC_PORT */
133 ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_TCP, 0, ICE_FLOW_FLD_SZ_PORT),
134 /* ICE_FLOW_FIELD_IDX_TCP_DST_PORT */
135 ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_TCP, 2, ICE_FLOW_FLD_SZ_PORT),
136 /* ICE_FLOW_FIELD_IDX_UDP_SRC_PORT */
137 ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_UDP, 0, ICE_FLOW_FLD_SZ_PORT),
138 /* ICE_FLOW_FIELD_IDX_UDP_DST_PORT */
139 ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_UDP, 2, ICE_FLOW_FLD_SZ_PORT),
140 /* ICE_FLOW_FIELD_IDX_SCTP_SRC_PORT */
141 ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_SCTP, 0, ICE_FLOW_FLD_SZ_PORT),
142 /* ICE_FLOW_FIELD_IDX_SCTP_DST_PORT */
143 ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_SCTP, 2, ICE_FLOW_FLD_SZ_PORT),
144 /* ICE_FLOW_FIELD_IDX_TCP_FLAGS */
145 ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_TCP, 13, ICE_FLOW_FLD_SZ_TCP_FLAGS),
146 /* ICE_FLOW_FIELD_IDX_TCP_CHKSUM */
147 ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_TCP, 16, ICE_FLOW_FLD_SZ_TCP_CHKSUM),
148 /* ICE_FLOW_FIELD_IDX_UDP_CHKSUM */
149 ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_UDP, 6, ICE_FLOW_FLD_SZ_UDP_CHKSUM),
150 /* ICE_FLOW_FIELD_IDX_SCTP_CHKSUM */
151 ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_SCTP, 8,
152 ICE_FLOW_FLD_SZ_SCTP_CHKSUM),
154 /* ICE_FLOW_FIELD_IDX_ARP_SIP */
155 ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_ARP, 14, ICE_FLOW_FLD_SZ_IPV4_ADDR),
156 /* ICE_FLOW_FIELD_IDX_ARP_DIP */
157 ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_ARP, 24, ICE_FLOW_FLD_SZ_IPV4_ADDR),
158 /* ICE_FLOW_FIELD_IDX_ARP_SHA */
159 ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_ARP, 8, ETH_ALEN),
160 /* ICE_FLOW_FIELD_IDX_ARP_DHA */
161 ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_ARP, 18, ETH_ALEN),
162 /* ICE_FLOW_FIELD_IDX_ARP_OP */
163 ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_ARP, 6, ICE_FLOW_FLD_SZ_ARP_OPER),
165 /* ICE_FLOW_FIELD_IDX_ICMP_TYPE */
166 ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_ICMP, 0, ICE_FLOW_FLD_SZ_ICMP_TYPE),
167 /* ICE_FLOW_FIELD_IDX_ICMP_CODE */
168 ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_ICMP, 1, ICE_FLOW_FLD_SZ_ICMP_CODE),
170 /* ICE_FLOW_FIELD_IDX_GRE_KEYID */
171 ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_GRE, 12, ICE_FLOW_FLD_SZ_GRE_KEYID),
173 /* ICE_FLOW_FIELD_IDX_GTPC_TEID */
174 ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_GTPC_TEID, 12,
175 ICE_FLOW_FLD_SZ_GTP_TEID),
176 /* ICE_FLOW_FIELD_IDX_GTPU_IP_TEID */
177 ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_GTPU_IP, 12,
178 ICE_FLOW_FLD_SZ_GTP_TEID),
179 /* ICE_FLOW_FIELD_IDX_GTPU_EH_TEID */
180 ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_GTPU_EH, 12,
181 ICE_FLOW_FLD_SZ_GTP_TEID),
182 /* ICE_FLOW_FIELD_IDX_GTPU_EH_QFI */
183 ICE_FLOW_FLD_INFO_MSK(ICE_FLOW_SEG_HDR_GTPU_EH, 22,
184 ICE_FLOW_FLD_SZ_GTP_QFI, 0x3f00),
185 /* ICE_FLOW_FIELD_IDX_GTPU_UP_TEID */
186 ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_GTPU_UP, 12,
187 ICE_FLOW_FLD_SZ_GTP_TEID),
188 /* ICE_FLOW_FIELD_IDX_GTPU_UP_QFI */
189 ICE_FLOW_FLD_INFO_MSK(ICE_FLOW_SEG_HDR_GTPU_UP, 22,
190 ICE_FLOW_FLD_SZ_GTP_QFI, 0x3f00),
191 /* ICE_FLOW_FIELD_IDX_GTPU_DWN_TEID */
192 ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_GTPU_DWN, 12,
193 ICE_FLOW_FLD_SZ_GTP_TEID),
194 /* ICE_FLOW_FIELD_IDX_GTPU_DWN_QFI */
195 ICE_FLOW_FLD_INFO_MSK(ICE_FLOW_SEG_HDR_GTPU_DWN, 22,
196 ICE_FLOW_FLD_SZ_GTP_QFI, 0x3f00),
198 /* ICE_FLOW_FIELD_IDX_PPPOE_SESS_ID */
199 ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_PPPOE, 2,
200 ICE_FLOW_FLD_SZ_PPPOE_SESS_ID),
202 /* ICE_FLOW_FIELD_IDX_PFCP_SEID */
203 ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_PFCP_SESSION, 12,
204 ICE_FLOW_FLD_SZ_PFCP_SEID),
206 /* ICE_FLOW_FIELD_IDX_L2TPV3_SESS_ID */
207 ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_L2TPV3, 0,
208 ICE_FLOW_FLD_SZ_L2TPV3_SESS_ID),
210 /* ICE_FLOW_FIELD_IDX_ESP_SPI */
211 ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_ESP, 0,
212 ICE_FLOW_FLD_SZ_ESP_SPI),
214 /* ICE_FLOW_FIELD_IDX_AH_SPI */
215 ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_AH, 4,
216 ICE_FLOW_FLD_SZ_AH_SPI),
218 /* ICE_FLOW_FIELD_IDX_NAT_T_ESP_SPI */
219 ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_NAT_T_ESP, 8,
220 ICE_FLOW_FLD_SZ_NAT_T_ESP_SPI),
221 /* ICE_FLOW_FIELD_IDX_VXLAN_VNI */
222 ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_VXLAN, 12,
223 ICE_FLOW_FLD_SZ_VXLAN_VNI),
225 /* ICE_FLOW_FIELD_IDX_ECPRI_TP0_PC_ID */
226 ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_ECPRI_TP0, 4,
227 ICE_FLOW_FLD_SZ_ECPRI_TP0_PC_ID),
229 /* ICE_FLOW_FIELD_IDX_UDP_ECPRI_TP0_PC_ID */
230 ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_UDP_ECPRI_TP0, 12,
231 ICE_FLOW_FLD_SZ_ECPRI_TP0_PC_ID),
234 /* Bitmaps indicating relevant packet types for a particular protocol header
236 * Packet types for packets with an Outer/First/Single MAC header
238 static const u32 ice_ptypes_mac_ofos[] = {
239 0xFDC00846, 0xBFBF7F7E, 0xF70001DF, 0xFEFDFDFB,
240 0x0000077E, 0x000003FF, 0x00000000, 0x00000000,
241 0x00400000, 0x03FFF000, 0xFFFFFFE0, 0x00100707,
242 0xFFFFFF00, 0x0000003F, 0x00000000, 0x00000000,
243 0x00000000, 0x00000000, 0x00000000, 0x00000000,
244 0x00000000, 0x00000000, 0x00000000, 0x00000000,
245 0x00000000, 0x00000000, 0x00000000, 0x00000000,
246 0x00000000, 0x00000000, 0x00000000, 0x00000000,
249 /* Packet types for packets with an Innermost/Last MAC VLAN header */
250 static const u32 ice_ptypes_macvlan_il[] = {
251 0x00000000, 0xBC000000, 0x000001DF, 0xF0000000,
252 0x0000077E, 0x00000000, 0x00000000, 0x00000000,
253 0x00000000, 0x00000000, 0x00000000, 0x00000000,
254 0x00000000, 0x00000000, 0x00000000, 0x00000000,
255 0x00000000, 0x00000000, 0x00000000, 0x00000000,
256 0x00000000, 0x00000000, 0x00000000, 0x00000000,
257 0x00000000, 0x00000000, 0x00000000, 0x00000000,
258 0x00000000, 0x00000000, 0x00000000, 0x00000000,
261 /* Packet types for packets with an Outer/First/Single non-frag IPv4 header,
262 * does NOT include IPV4 other PTYPEs
264 static const u32 ice_ptypes_ipv4_ofos[] = {
265 0x1D800000, 0x24000800, 0x00000000, 0x00000000,
266 0x00000000, 0x00000155, 0x00000000, 0x00000000,
267 0x00000000, 0x000FC000, 0x000002A0, 0x00100000,
268 0x00001500, 0x00000000, 0x00000000, 0x00000000,
269 0x00000000, 0x00000000, 0x00000000, 0x00000000,
270 0x00000000, 0x00000000, 0x00000000, 0x00000000,
271 0x00000000, 0x00000000, 0x00000000, 0x00000000,
272 0x00000000, 0x00000000, 0x00000000, 0x00000000,
275 /* Packet types for packets with an Outer/First/Single non-frag IPv4 header,
276 * includes IPV4 other PTYPEs
278 static const u32 ice_ptypes_ipv4_ofos_all[] = {
279 0x1D800000, 0x27BF7800, 0x00000000, 0x00000000,
280 0x00000000, 0x00000155, 0x00000000, 0x00000000,
281 0x00000000, 0x000FC000, 0x83E0FAA0, 0x00000101,
282 0x03FFD000, 0x00000000, 0x02FBEFBC, 0x00000000,
283 0x00000000, 0x00000000, 0x00000000, 0x00000000,
284 0x00000000, 0x00000000, 0x00000000, 0x00000000,
285 0x00000000, 0x00000000, 0x00000000, 0x00000000,
286 0x00000000, 0x00000000, 0x00000000, 0x00000000,
289 /* Packet types for packets with an Innermost/Last IPv4 header */
290 static const u32 ice_ptypes_ipv4_il[] = {
291 0xE0000000, 0xB807700E, 0x80000003, 0xE01DC03B,
292 0x0000000E, 0x00000000, 0x00000000, 0x00000000,
293 0x00000000, 0x00000000, 0x001FF800, 0x00100000,
294 0xFC0FC000, 0x00000000, 0xBC0BC0BC, 0x00000BC0,
295 0x00000000, 0x00000000, 0x00000000, 0x00000000,
296 0x00000000, 0x00000000, 0x00000000, 0x00000000,
297 0x00000000, 0x00000000, 0x00000000, 0x00000000,
298 0x00000000, 0x00000000, 0x00000000, 0x00000000,
301 /* Packet types for packets with an Outer/First/Single non-frag IPv6 header,
302 * does NOT include IVP6 other PTYPEs
304 static const u32 ice_ptypes_ipv6_ofos[] = {
305 0x00000000, 0x00000000, 0x76000000, 0x10002000,
306 0x00000000, 0x000002AA, 0x00000000, 0x00000000,
307 0x00000000, 0x03F00000, 0x00000540, 0x00000000,
308 0x00002A00, 0x00000000, 0x00000000, 0x00000000,
309 0x00000000, 0x00000000, 0x00000000, 0x00000000,
310 0x00000000, 0x00000000, 0x00000000, 0x00000000,
311 0x00000000, 0x00000000, 0x00000000, 0x00000000,
312 0x00000000, 0x00000000, 0x00000000, 0x00000000,
315 /* Packet types for packets with an Outer/First/Single non-frag IPv6 header,
316 * includes IPV6 other PTYPEs
318 static const u32 ice_ptypes_ipv6_ofos_all[] = {
319 0x00000000, 0x00000000, 0x76000000, 0x1EFDE000,
320 0x00000000, 0x000002AA, 0x00000000, 0x00000000,
321 0x00000000, 0x03F00000, 0x7C1F0540, 0x00000206,
322 0xFC002000, 0x0000003F, 0xBC000000, 0x0002FBEF,
323 0x00000000, 0x00000000, 0x00000000, 0x00000000,
324 0x00000000, 0x00000000, 0x00000000, 0x00000000,
325 0x00000000, 0x00000000, 0x00000000, 0x00000000,
326 0x00000000, 0x00000000, 0x00000000, 0x00000000,
329 /* Packet types for packets with an Innermost/Last IPv6 header */
330 static const u32 ice_ptypes_ipv6_il[] = {
331 0x00000000, 0x03B80770, 0x000001DC, 0x0EE00000,
332 0x00000770, 0x00000000, 0x00000000, 0x00000000,
333 0x00000000, 0x00000000, 0x7FE00000, 0x00000000,
334 0x03F00000, 0x0000003F, 0x02F02F00, 0x0002F02F,
335 0x00000000, 0x00000000, 0x00000000, 0x00000000,
336 0x00000000, 0x00000000, 0x00000000, 0x00000000,
337 0x00000000, 0x00000000, 0x00000000, 0x00000000,
338 0x00000000, 0x00000000, 0x00000000, 0x00000000,
341 /* Packet types for packets with an Outer/First/Single
342 * non-frag IPv4 header - no L4
344 static const u32 ice_ptypes_ipv4_ofos_no_l4[] = {
345 0x10800000, 0x04000800, 0x00000000, 0x00000000,
346 0x00000000, 0x00000000, 0x00000000, 0x00000000,
347 0x00000000, 0x000cc000, 0x000002A0, 0x00000000,
348 0x00001500, 0x00000000, 0x00000000, 0x00000000,
349 0x00000000, 0x00000000, 0x00000000, 0x00000000,
350 0x00000000, 0x00000000, 0x00000000, 0x00000000,
351 0x00000000, 0x00000000, 0x00000000, 0x00000000,
352 0x00000000, 0x00000000, 0x00000000, 0x00000000,
355 /* Packet types for packets with an Innermost/Last IPv4 header - no L4 */
356 static const u32 ice_ptypes_ipv4_il_no_l4[] = {
357 0x60000000, 0x18043008, 0x80000002, 0x6010c021,
358 0x00000008, 0x00000000, 0x00000000, 0x00000000,
359 0x00000000, 0x00000000, 0x00139800, 0x00000000,
360 0x8C08C000, 0x00000000, 0x00000000, 0x00000000,
361 0x00000000, 0x00000000, 0x00000000, 0x00000000,
362 0x00000000, 0x00000000, 0x00000000, 0x00000000,
363 0x00000000, 0x00000000, 0x00000000, 0x00000000,
364 0x00000000, 0x00000000, 0x00000000, 0x00000000,
367 /* Packet types for packets with an Outer/First/Single
368 * non-frag IPv6 header - no L4
370 static const u32 ice_ptypes_ipv6_ofos_no_l4[] = {
371 0x00000000, 0x00000000, 0x42000000, 0x10002000,
372 0x00000000, 0x00000000, 0x00000000, 0x00000000,
373 0x00000000, 0x02300000, 0x00000540, 0x00000000,
374 0x00002A00, 0x00000000, 0x00000000, 0x00000000,
375 0x00000000, 0x00000000, 0x00000000, 0x00000000,
376 0x00000000, 0x00000000, 0x00000000, 0x00000000,
377 0x00000000, 0x00000000, 0x00000000, 0x00000000,
378 0x00000000, 0x00000000, 0x00000000, 0x00000000,
381 /* Packet types for packets with an Innermost/Last IPv6 header - no L4 */
382 static const u32 ice_ptypes_ipv6_il_no_l4[] = {
383 0x00000000, 0x02180430, 0x0000010c, 0x086010c0,
384 0x00000430, 0x00000000, 0x00000000, 0x00000000,
385 0x00000000, 0x00000000, 0x4e600000, 0x00000000,
386 0x02300000, 0x00000023, 0x00000000, 0x00000000,
387 0x00000000, 0x00000000, 0x00000000, 0x00000000,
388 0x00000000, 0x00000000, 0x00000000, 0x00000000,
389 0x00000000, 0x00000000, 0x00000000, 0x00000000,
390 0x00000000, 0x00000000, 0x00000000, 0x00000000,
393 /* Packet types for packets with an Outermost/First ARP header */
394 static const u32 ice_ptypes_arp_of[] = {
395 0x00000800, 0x00000000, 0x00000000, 0x00000000,
396 0x00000000, 0x00000000, 0x00000000, 0x00000000,
397 0x00000000, 0x00000000, 0x00000000, 0x00000000,
398 0x00000000, 0x00000000, 0x00000000, 0x00000000,
399 0x00000000, 0x00000000, 0x00000000, 0x00000000,
400 0x00000000, 0x00000000, 0x00000000, 0x00000000,
401 0x00000000, 0x00000000, 0x00000000, 0x00000000,
402 0x00000000, 0x00000000, 0x00000000, 0x00000000,
405 /* UDP Packet types for non-tunneled packets or tunneled
406 * packets with inner UDP.
408 static const u32 ice_ptypes_udp_il[] = {
409 0x81000000, 0x20204040, 0x04000010, 0x80810102,
410 0x00000040, 0x00000000, 0x00000000, 0x00000000,
411 0x00000000, 0x00410000, 0x908427E0, 0x00100007,
412 0x10410000, 0x00000004, 0x10410410, 0x00004104,
413 0x00000000, 0x00000000, 0x00000000, 0x00000000,
414 0x00000000, 0x00000000, 0x00000000, 0x00000000,
415 0x00000000, 0x00000000, 0x00000000, 0x00000000,
416 0x00000000, 0x00000000, 0x00000000, 0x00000000,
419 /* Packet types for packets with an Innermost/Last TCP header */
420 static const u32 ice_ptypes_tcp_il[] = {
421 0x04000000, 0x80810102, 0x10000040, 0x02040408,
422 0x00000102, 0x00000000, 0x00000000, 0x00000000,
423 0x00000000, 0x00820000, 0x21084000, 0x00000000,
424 0x20820000, 0x00000008, 0x20820820, 0x00008208,
425 0x00000000, 0x00000000, 0x00000000, 0x00000000,
426 0x00000000, 0x00000000, 0x00000000, 0x00000000,
427 0x00000000, 0x00000000, 0x00000000, 0x00000000,
428 0x00000000, 0x00000000, 0x00000000, 0x00000000,
431 /* Packet types for packets with an Innermost/Last SCTP header */
432 static const u32 ice_ptypes_sctp_il[] = {
433 0x08000000, 0x01020204, 0x20000081, 0x04080810,
434 0x00000204, 0x00000000, 0x00000000, 0x00000000,
435 0x00000000, 0x01040000, 0x00000000, 0x00000000,
436 0x41040000, 0x00000010, 0x00000000, 0x00000000,
437 0x00000000, 0x00000000, 0x00000000, 0x00000000,
438 0x00000000, 0x00000000, 0x00000000, 0x00000000,
439 0x00000000, 0x00000000, 0x00000000, 0x00000000,
440 0x00000000, 0x00000000, 0x00000000, 0x00000000,
443 /* Packet types for packets with an Outermost/First ICMP header */
444 static const u32 ice_ptypes_icmp_of[] = {
445 0x10000000, 0x00000000, 0x00000000, 0x00000000,
446 0x00000000, 0x00000000, 0x00000000, 0x00000000,
447 0x00000000, 0x00000000, 0x00000000, 0x00000000,
448 0x00000000, 0x00000000, 0x00000000, 0x00000000,
449 0x00000000, 0x00000000, 0x00000000, 0x00000000,
450 0x00000000, 0x00000000, 0x00000000, 0x00000000,
451 0x00000000, 0x00000000, 0x00000000, 0x00000000,
452 0x00000000, 0x00000000, 0x00000000, 0x00000000,
455 /* Packet types for packets with an Innermost/Last ICMP header */
456 static const u32 ice_ptypes_icmp_il[] = {
457 0x00000000, 0x02040408, 0x40000102, 0x08101020,
458 0x00000408, 0x00000000, 0x00000000, 0x00000000,
459 0x00000000, 0x00000000, 0x42108000, 0x00000000,
460 0x82080000, 0x00000020, 0x00000000, 0x00000000,
461 0x00000000, 0x00000000, 0x00000000, 0x00000000,
462 0x00000000, 0x00000000, 0x00000000, 0x00000000,
463 0x00000000, 0x00000000, 0x00000000, 0x00000000,
464 0x00000000, 0x00000000, 0x00000000, 0x00000000,
467 /* Packet types for packets with an Outermost/First GRE header */
468 static const u32 ice_ptypes_gre_of[] = {
469 0x00000000, 0xBFBF7800, 0x000001DF, 0xFEFDE000,
470 0x0000017E, 0x00000000, 0x00000000, 0x00000000,
471 0x00000000, 0x00000000, 0x00000000, 0x00000000,
472 0x00000000, 0x00000000, 0xBEFBEFBC, 0x0002FBEF,
473 0x00000000, 0x00000000, 0x00000000, 0x00000000,
474 0x00000000, 0x00000000, 0x00000000, 0x00000000,
475 0x00000000, 0x00000000, 0x00000000, 0x00000000,
476 0x00000000, 0x00000000, 0x00000000, 0x00000000,
479 /* Packet types for packets with an Innermost/Last MAC header */
480 static const u32 ice_ptypes_mac_il[] = {
481 0x00000000, 0x20000000, 0x00000000, 0x00000000,
482 0x00000000, 0x00000000, 0x00000000, 0x00000000,
483 0x00000000, 0x00000000, 0x00000000, 0x00000000,
484 0x00000000, 0x00000000, 0x00000000, 0x00000000,
485 0x00000000, 0x00000000, 0x00000000, 0x00000000,
486 0x00000000, 0x00000000, 0x00000000, 0x00000000,
487 0x00000000, 0x00000000, 0x00000000, 0x00000000,
488 0x00000000, 0x00000000, 0x00000000, 0x00000000,
491 /* Packet types for GTPC */
492 static const u32 ice_ptypes_gtpc[] = {
493 0x00000000, 0x00000000, 0x00000000, 0x00000000,
494 0x00000000, 0x00000000, 0x00000000, 0x00000000,
495 0x00000000, 0x00000000, 0x000001E0, 0x00000000,
496 0x00000000, 0x00000000, 0x00000000, 0x00000000,
497 0x00000000, 0x00000000, 0x00000000, 0x00000000,
498 0x00000000, 0x00000000, 0x00000000, 0x00000000,
499 0x00000000, 0x00000000, 0x00000000, 0x00000000,
500 0x00000000, 0x00000000, 0x00000000, 0x00000000,
503 /* Packet types for VXLAN with VNI */
504 static const u32 ice_ptypes_vxlan_vni[] = {
505 0x00000000, 0xBFBFF800, 0x00EFDFDF, 0xFEFDE000,
506 0x03BF7F7E, 0x00000000, 0x00000000, 0x00000000,
507 0x00000000, 0x00000000, 0x00000000, 0x00000000,
508 0x00000000, 0x00000000, 0x00000000, 0x00000000,
509 0x00000000, 0x00000000, 0x00000000, 0x00000000,
510 0x00000000, 0x00000000, 0x00000000, 0x00000000,
511 0x00000000, 0x00000000, 0x00000000, 0x00000000,
512 0x00000000, 0x00000000, 0x00000000, 0x00000000,
515 /* Packet types for GTPC with TEID */
516 static const u32 ice_ptypes_gtpc_tid[] = {
517 0x00000000, 0x00000000, 0x00000000, 0x00000000,
518 0x00000000, 0x00000000, 0x00000000, 0x00000000,
519 0x00000000, 0x00000000, 0x00000060, 0x00000000,
520 0x00000000, 0x00000000, 0x00000000, 0x00000000,
521 0x00000000, 0x00000000, 0x00000000, 0x00000000,
522 0x00000000, 0x00000000, 0x00000000, 0x00000000,
523 0x00000000, 0x00000000, 0x00000000, 0x00000000,
524 0x00000000, 0x00000000, 0x00000000, 0x00000000,
527 /* Packet types for GTPU */
528 static const struct ice_ptype_attributes ice_attr_gtpu_session[] = {
529 { ICE_MAC_IPV4_GTPU_IPV4_FRAG, ICE_PTYPE_ATTR_GTP_SESSION },
530 { ICE_MAC_IPV4_GTPU_IPV4_PAY, ICE_PTYPE_ATTR_GTP_SESSION },
531 { ICE_MAC_IPV4_GTPU_IPV4_UDP_PAY, ICE_PTYPE_ATTR_GTP_SESSION },
532 { ICE_MAC_IPV4_GTPU_IPV4_TCP, ICE_PTYPE_ATTR_GTP_SESSION },
533 { ICE_MAC_IPV4_GTPU_IPV4_ICMP, ICE_PTYPE_ATTR_GTP_SESSION },
534 { ICE_MAC_IPV6_GTPU_IPV4_FRAG, ICE_PTYPE_ATTR_GTP_SESSION },
535 { ICE_MAC_IPV6_GTPU_IPV4_PAY, ICE_PTYPE_ATTR_GTP_SESSION },
536 { ICE_MAC_IPV6_GTPU_IPV4_UDP_PAY, ICE_PTYPE_ATTR_GTP_SESSION },
537 { ICE_MAC_IPV6_GTPU_IPV4_TCP, ICE_PTYPE_ATTR_GTP_SESSION },
538 { ICE_MAC_IPV6_GTPU_IPV4_ICMP, ICE_PTYPE_ATTR_GTP_SESSION },
539 { ICE_MAC_IPV4_GTPU_IPV6_FRAG, ICE_PTYPE_ATTR_GTP_SESSION },
540 { ICE_MAC_IPV4_GTPU_IPV6_PAY, ICE_PTYPE_ATTR_GTP_SESSION },
541 { ICE_MAC_IPV4_GTPU_IPV6_UDP_PAY, ICE_PTYPE_ATTR_GTP_SESSION },
542 { ICE_MAC_IPV4_GTPU_IPV6_TCP, ICE_PTYPE_ATTR_GTP_SESSION },
543 { ICE_MAC_IPV4_GTPU_IPV6_ICMPV6, ICE_PTYPE_ATTR_GTP_SESSION },
544 { ICE_MAC_IPV6_GTPU_IPV6_FRAG, ICE_PTYPE_ATTR_GTP_SESSION },
545 { ICE_MAC_IPV6_GTPU_IPV6_PAY, ICE_PTYPE_ATTR_GTP_SESSION },
546 { ICE_MAC_IPV6_GTPU_IPV6_UDP_PAY, ICE_PTYPE_ATTR_GTP_SESSION },
547 { ICE_MAC_IPV6_GTPU_IPV6_TCP, ICE_PTYPE_ATTR_GTP_SESSION },
548 { ICE_MAC_IPV6_GTPU_IPV6_ICMPV6, ICE_PTYPE_ATTR_GTP_SESSION },
549 { MAC_IPV4_TUN_IPV4_GTPU_IPV4_FRAG, ICE_PTYPE_ATTR_GTP_SESSION },
550 { MAC_IPV4_TUN_IPV4_GTPU_IPV4_PAY, ICE_PTYPE_ATTR_GTP_SESSION },
551 { MAC_IPV4_TUN_IPV4_GTPU_IPV4_UDP_PAY, ICE_PTYPE_ATTR_GTP_SESSION },
552 { MAC_IPV4_TUN_IPV4_GTPU_IPV4_TCP, ICE_PTYPE_ATTR_GTP_SESSION },
553 { MAC_IPV4_TUN_IPV4_GTPU_IPV4_ICMP, ICE_PTYPE_ATTR_GTP_SESSION },
554 { MAC_IPV4_TUN_IPV4_GTPU_IPV6_FRAG, ICE_PTYPE_ATTR_GTP_SESSION },
555 { MAC_IPV4_TUN_IPV4_GTPU_IPV6_PAY, ICE_PTYPE_ATTR_GTP_SESSION },
556 { MAC_IPV4_TUN_IPV4_GTPU_IPV6_UDP_PAY, ICE_PTYPE_ATTR_GTP_SESSION },
557 { MAC_IPV4_TUN_IPV4_GTPU_IPV6_TCP, ICE_PTYPE_ATTR_GTP_SESSION },
558 { MAC_IPV4_TUN_IPV4_GTPU_IPV6_ICMPV6, ICE_PTYPE_ATTR_GTP_SESSION },
559 { MAC_IPV4_TUN_IPV6_GTPU_IPV4_FRAG, ICE_PTYPE_ATTR_GTP_SESSION },
560 { MAC_IPV4_TUN_IPV6_GTPU_IPV4_PAY, ICE_PTYPE_ATTR_GTP_SESSION },
561 { MAC_IPV4_TUN_IPV6_GTPU_IPV4_UDP_PAY, ICE_PTYPE_ATTR_GTP_SESSION },
562 { MAC_IPV4_TUN_IPV6_GTPU_IPV4_TCP, ICE_PTYPE_ATTR_GTP_SESSION },
563 { MAC_IPV4_TUN_IPV6_GTPU_IPV4_ICMP, ICE_PTYPE_ATTR_GTP_SESSION },
564 { MAC_IPV4_TUN_IPV6_GTPU_IPV6_FRAG, ICE_PTYPE_ATTR_GTP_SESSION },
565 { MAC_IPV4_TUN_IPV6_GTPU_IPV6_PAY, ICE_PTYPE_ATTR_GTP_SESSION },
566 { MAC_IPV4_TUN_IPV6_GTPU_IPV6_UDP_PAY, ICE_PTYPE_ATTR_GTP_SESSION },
567 { MAC_IPV4_TUN_IPV6_GTPU_IPV6_TCP, ICE_PTYPE_ATTR_GTP_SESSION },
568 { MAC_IPV4_TUN_IPV6_GTPU_IPV6_ICMPV6, ICE_PTYPE_ATTR_GTP_SESSION },
569 { MAC_IPV6_TUN_IPV4_GTPU_IPV4_FRAG, ICE_PTYPE_ATTR_GTP_SESSION },
570 { MAC_IPV6_TUN_IPV4_GTPU_IPV4_PAY, ICE_PTYPE_ATTR_GTP_SESSION },
571 { MAC_IPV6_TUN_IPV4_GTPU_IPV4_UDP_PAY, ICE_PTYPE_ATTR_GTP_SESSION },
572 { MAC_IPV6_TUN_IPV4_GTPU_IPV4_TCP, ICE_PTYPE_ATTR_GTP_SESSION },
573 { MAC_IPV6_TUN_IPV4_GTPU_IPV4_ICMP, ICE_PTYPE_ATTR_GTP_SESSION },
574 { MAC_IPV6_TUN_IPV4_GTPU_IPV6_FRAG, ICE_PTYPE_ATTR_GTP_SESSION },
575 { MAC_IPV6_TUN_IPV4_GTPU_IPV6_PAY, ICE_PTYPE_ATTR_GTP_SESSION },
576 { MAC_IPV6_TUN_IPV4_GTPU_IPV6_UDP_PAY, ICE_PTYPE_ATTR_GTP_SESSION },
577 { MAC_IPV6_TUN_IPV4_GTPU_IPV6_TCP, ICE_PTYPE_ATTR_GTP_SESSION },
578 { MAC_IPV6_TUN_IPV4_GTPU_IPV6_ICMPV6, ICE_PTYPE_ATTR_GTP_SESSION },
579 { MAC_IPV6_TUN_IPV6_GTPU_IPV4_FRAG, ICE_PTYPE_ATTR_GTP_SESSION },
580 { MAC_IPV6_TUN_IPV6_GTPU_IPV4_PAY, ICE_PTYPE_ATTR_GTP_SESSION },
581 { MAC_IPV6_TUN_IPV6_GTPU_IPV4_UDP_PAY, ICE_PTYPE_ATTR_GTP_SESSION },
582 { MAC_IPV6_TUN_IPV6_GTPU_IPV4_TCP, ICE_PTYPE_ATTR_GTP_SESSION },
583 { MAC_IPV6_TUN_IPV6_GTPU_IPV4_ICMP, ICE_PTYPE_ATTR_GTP_SESSION },
584 { MAC_IPV6_TUN_IPV6_GTPU_IPV6_FRAG, ICE_PTYPE_ATTR_GTP_SESSION },
585 { MAC_IPV6_TUN_IPV6_GTPU_IPV6_PAY, ICE_PTYPE_ATTR_GTP_SESSION },
586 { MAC_IPV6_TUN_IPV6_GTPU_IPV6_UDP_PAY, ICE_PTYPE_ATTR_GTP_SESSION },
587 { MAC_IPV6_TUN_IPV6_GTPU_IPV6_TCP, ICE_PTYPE_ATTR_GTP_SESSION },
588 { MAC_IPV6_TUN_IPV6_GTPU_IPV6_ICMPV6, ICE_PTYPE_ATTR_GTP_SESSION },
591 static const struct ice_ptype_attributes ice_attr_gtpu_eh[] = {
592 { ICE_MAC_IPV4_GTPU_IPV4_FRAG, ICE_PTYPE_ATTR_GTP_PDU_EH },
593 { ICE_MAC_IPV4_GTPU_IPV4_PAY, ICE_PTYPE_ATTR_GTP_PDU_EH },
594 { ICE_MAC_IPV4_GTPU_IPV4_UDP_PAY, ICE_PTYPE_ATTR_GTP_PDU_EH },
595 { ICE_MAC_IPV4_GTPU_IPV4_TCP, ICE_PTYPE_ATTR_GTP_PDU_EH },
596 { ICE_MAC_IPV4_GTPU_IPV4_ICMP, ICE_PTYPE_ATTR_GTP_PDU_EH },
597 { ICE_MAC_IPV6_GTPU_IPV4_FRAG, ICE_PTYPE_ATTR_GTP_PDU_EH },
598 { ICE_MAC_IPV6_GTPU_IPV4_PAY, ICE_PTYPE_ATTR_GTP_PDU_EH },
599 { ICE_MAC_IPV6_GTPU_IPV4_UDP_PAY, ICE_PTYPE_ATTR_GTP_PDU_EH },
600 { ICE_MAC_IPV6_GTPU_IPV4_TCP, ICE_PTYPE_ATTR_GTP_PDU_EH },
601 { ICE_MAC_IPV6_GTPU_IPV4_ICMP, ICE_PTYPE_ATTR_GTP_PDU_EH },
602 { ICE_MAC_IPV4_GTPU_IPV6_FRAG, ICE_PTYPE_ATTR_GTP_PDU_EH },
603 { ICE_MAC_IPV4_GTPU_IPV6_PAY, ICE_PTYPE_ATTR_GTP_PDU_EH },
604 { ICE_MAC_IPV4_GTPU_IPV6_UDP_PAY, ICE_PTYPE_ATTR_GTP_PDU_EH },
605 { ICE_MAC_IPV4_GTPU_IPV6_TCP, ICE_PTYPE_ATTR_GTP_PDU_EH },
606 { ICE_MAC_IPV4_GTPU_IPV6_ICMPV6, ICE_PTYPE_ATTR_GTP_PDU_EH },
607 { ICE_MAC_IPV6_GTPU_IPV6_FRAG, ICE_PTYPE_ATTR_GTP_PDU_EH },
608 { ICE_MAC_IPV6_GTPU_IPV6_PAY, ICE_PTYPE_ATTR_GTP_PDU_EH },
609 { ICE_MAC_IPV6_GTPU_IPV6_UDP_PAY, ICE_PTYPE_ATTR_GTP_PDU_EH },
610 { ICE_MAC_IPV6_GTPU_IPV6_TCP, ICE_PTYPE_ATTR_GTP_PDU_EH },
611 { ICE_MAC_IPV6_GTPU_IPV6_ICMPV6, ICE_PTYPE_ATTR_GTP_PDU_EH },
612 { MAC_IPV4_TUN_IPV4_GTPU_IPV4_FRAG, ICE_PTYPE_ATTR_GTP_PDU_EH },
613 { MAC_IPV4_TUN_IPV4_GTPU_IPV4_PAY, ICE_PTYPE_ATTR_GTP_PDU_EH },
614 { MAC_IPV4_TUN_IPV4_GTPU_IPV4_UDP_PAY, ICE_PTYPE_ATTR_GTP_PDU_EH },
615 { MAC_IPV4_TUN_IPV4_GTPU_IPV4_TCP, ICE_PTYPE_ATTR_GTP_PDU_EH },
616 { MAC_IPV4_TUN_IPV4_GTPU_IPV4_ICMP, ICE_PTYPE_ATTR_GTP_PDU_EH },
617 { MAC_IPV4_TUN_IPV4_GTPU_IPV6_FRAG, ICE_PTYPE_ATTR_GTP_PDU_EH },
618 { MAC_IPV4_TUN_IPV4_GTPU_IPV6_PAY, ICE_PTYPE_ATTR_GTP_PDU_EH },
619 { MAC_IPV4_TUN_IPV4_GTPU_IPV6_UDP_PAY, ICE_PTYPE_ATTR_GTP_PDU_EH },
620 { MAC_IPV4_TUN_IPV4_GTPU_IPV6_TCP, ICE_PTYPE_ATTR_GTP_PDU_EH },
621 { MAC_IPV4_TUN_IPV4_GTPU_IPV6_ICMPV6, ICE_PTYPE_ATTR_GTP_PDU_EH },
622 { MAC_IPV4_TUN_IPV6_GTPU_IPV4_FRAG, ICE_PTYPE_ATTR_GTP_PDU_EH },
623 { MAC_IPV4_TUN_IPV6_GTPU_IPV4_PAY, ICE_PTYPE_ATTR_GTP_PDU_EH },
624 { MAC_IPV4_TUN_IPV6_GTPU_IPV4_UDP_PAY, ICE_PTYPE_ATTR_GTP_PDU_EH },
625 { MAC_IPV4_TUN_IPV6_GTPU_IPV4_TCP, ICE_PTYPE_ATTR_GTP_PDU_EH },
626 { MAC_IPV4_TUN_IPV6_GTPU_IPV4_ICMP, ICE_PTYPE_ATTR_GTP_PDU_EH },
627 { MAC_IPV4_TUN_IPV6_GTPU_IPV6_FRAG, ICE_PTYPE_ATTR_GTP_PDU_EH },
628 { MAC_IPV4_TUN_IPV6_GTPU_IPV6_PAY, ICE_PTYPE_ATTR_GTP_PDU_EH },
629 { MAC_IPV4_TUN_IPV6_GTPU_IPV6_UDP_PAY, ICE_PTYPE_ATTR_GTP_PDU_EH },
630 { MAC_IPV4_TUN_IPV6_GTPU_IPV6_TCP, ICE_PTYPE_ATTR_GTP_PDU_EH },
631 { MAC_IPV4_TUN_IPV6_GTPU_IPV6_ICMPV6, ICE_PTYPE_ATTR_GTP_PDU_EH },
632 { MAC_IPV6_TUN_IPV4_GTPU_IPV4_FRAG, ICE_PTYPE_ATTR_GTP_PDU_EH },
633 { MAC_IPV6_TUN_IPV4_GTPU_IPV4_PAY, ICE_PTYPE_ATTR_GTP_PDU_EH },
634 { MAC_IPV6_TUN_IPV4_GTPU_IPV4_UDP_PAY, ICE_PTYPE_ATTR_GTP_PDU_EH },
635 { MAC_IPV6_TUN_IPV4_GTPU_IPV4_TCP, ICE_PTYPE_ATTR_GTP_PDU_EH },
636 { MAC_IPV6_TUN_IPV4_GTPU_IPV4_ICMP, ICE_PTYPE_ATTR_GTP_PDU_EH },
637 { MAC_IPV6_TUN_IPV4_GTPU_IPV6_FRAG, ICE_PTYPE_ATTR_GTP_PDU_EH },
638 { MAC_IPV6_TUN_IPV4_GTPU_IPV6_PAY, ICE_PTYPE_ATTR_GTP_PDU_EH },
639 { MAC_IPV6_TUN_IPV4_GTPU_IPV6_UDP_PAY, ICE_PTYPE_ATTR_GTP_PDU_EH },
640 { MAC_IPV6_TUN_IPV4_GTPU_IPV6_TCP, ICE_PTYPE_ATTR_GTP_PDU_EH },
641 { MAC_IPV6_TUN_IPV4_GTPU_IPV6_ICMPV6, ICE_PTYPE_ATTR_GTP_PDU_EH },
642 { MAC_IPV6_TUN_IPV6_GTPU_IPV4_FRAG, ICE_PTYPE_ATTR_GTP_PDU_EH },
643 { MAC_IPV6_TUN_IPV6_GTPU_IPV4_PAY, ICE_PTYPE_ATTR_GTP_PDU_EH },
644 { MAC_IPV6_TUN_IPV6_GTPU_IPV4_UDP_PAY, ICE_PTYPE_ATTR_GTP_PDU_EH },
645 { MAC_IPV6_TUN_IPV6_GTPU_IPV4_TCP, ICE_PTYPE_ATTR_GTP_PDU_EH },
646 { MAC_IPV6_TUN_IPV6_GTPU_IPV4_ICMP, ICE_PTYPE_ATTR_GTP_PDU_EH },
647 { MAC_IPV6_TUN_IPV6_GTPU_IPV6_FRAG, ICE_PTYPE_ATTR_GTP_PDU_EH },
648 { MAC_IPV6_TUN_IPV6_GTPU_IPV6_PAY, ICE_PTYPE_ATTR_GTP_PDU_EH },
649 { MAC_IPV6_TUN_IPV6_GTPU_IPV6_UDP_PAY, ICE_PTYPE_ATTR_GTP_PDU_EH },
650 { MAC_IPV6_TUN_IPV6_GTPU_IPV6_TCP, ICE_PTYPE_ATTR_GTP_PDU_EH },
651 { MAC_IPV6_TUN_IPV6_GTPU_IPV6_ICMPV6, ICE_PTYPE_ATTR_GTP_PDU_EH },
654 static const struct ice_ptype_attributes ice_attr_gtpu_down[] = {
655 { ICE_MAC_IPV4_GTPU_IPV4_FRAG, ICE_PTYPE_ATTR_GTP_DOWNLINK },
656 { ICE_MAC_IPV4_GTPU_IPV4_PAY, ICE_PTYPE_ATTR_GTP_DOWNLINK },
657 { ICE_MAC_IPV4_GTPU_IPV4_UDP_PAY, ICE_PTYPE_ATTR_GTP_DOWNLINK },
658 { ICE_MAC_IPV4_GTPU_IPV4_TCP, ICE_PTYPE_ATTR_GTP_DOWNLINK },
659 { ICE_MAC_IPV4_GTPU_IPV4_ICMP, ICE_PTYPE_ATTR_GTP_DOWNLINK },
660 { ICE_MAC_IPV6_GTPU_IPV4_FRAG, ICE_PTYPE_ATTR_GTP_DOWNLINK },
661 { ICE_MAC_IPV6_GTPU_IPV4_PAY, ICE_PTYPE_ATTR_GTP_DOWNLINK },
662 { ICE_MAC_IPV6_GTPU_IPV4_UDP_PAY, ICE_PTYPE_ATTR_GTP_DOWNLINK },
663 { ICE_MAC_IPV6_GTPU_IPV4_TCP, ICE_PTYPE_ATTR_GTP_DOWNLINK },
664 { ICE_MAC_IPV6_GTPU_IPV4_ICMP, ICE_PTYPE_ATTR_GTP_DOWNLINK },
665 { ICE_MAC_IPV4_GTPU_IPV6_FRAG, ICE_PTYPE_ATTR_GTP_DOWNLINK },
666 { ICE_MAC_IPV4_GTPU_IPV6_PAY, ICE_PTYPE_ATTR_GTP_DOWNLINK },
667 { ICE_MAC_IPV4_GTPU_IPV6_UDP_PAY, ICE_PTYPE_ATTR_GTP_DOWNLINK },
668 { ICE_MAC_IPV4_GTPU_IPV6_TCP, ICE_PTYPE_ATTR_GTP_DOWNLINK },
669 { ICE_MAC_IPV4_GTPU_IPV6_ICMPV6, ICE_PTYPE_ATTR_GTP_DOWNLINK },
670 { ICE_MAC_IPV6_GTPU_IPV6_FRAG, ICE_PTYPE_ATTR_GTP_DOWNLINK },
671 { ICE_MAC_IPV6_GTPU_IPV6_PAY, ICE_PTYPE_ATTR_GTP_DOWNLINK },
672 { ICE_MAC_IPV6_GTPU_IPV6_UDP_PAY, ICE_PTYPE_ATTR_GTP_DOWNLINK },
673 { ICE_MAC_IPV6_GTPU_IPV6_TCP, ICE_PTYPE_ATTR_GTP_DOWNLINK },
674 { ICE_MAC_IPV6_GTPU_IPV6_ICMPV6, ICE_PTYPE_ATTR_GTP_DOWNLINK },
675 { MAC_IPV4_TUN_IPV4_GTPU_IPV4_FRAG, ICE_PTYPE_ATTR_GTP_DOWNLINK },
676 { MAC_IPV4_TUN_IPV4_GTPU_IPV4_PAY, ICE_PTYPE_ATTR_GTP_DOWNLINK },
677 { MAC_IPV4_TUN_IPV4_GTPU_IPV4_UDP_PAY, ICE_PTYPE_ATTR_GTP_DOWNLINK },
678 { MAC_IPV4_TUN_IPV4_GTPU_IPV4_TCP, ICE_PTYPE_ATTR_GTP_DOWNLINK },
679 { MAC_IPV4_TUN_IPV4_GTPU_IPV4_ICMP, ICE_PTYPE_ATTR_GTP_DOWNLINK },
680 { MAC_IPV4_TUN_IPV4_GTPU_IPV6_FRAG, ICE_PTYPE_ATTR_GTP_DOWNLINK },
681 { MAC_IPV4_TUN_IPV4_GTPU_IPV6_PAY, ICE_PTYPE_ATTR_GTP_DOWNLINK },
682 { MAC_IPV4_TUN_IPV4_GTPU_IPV6_UDP_PAY, ICE_PTYPE_ATTR_GTP_DOWNLINK },
683 { MAC_IPV4_TUN_IPV4_GTPU_IPV6_TCP, ICE_PTYPE_ATTR_GTP_DOWNLINK },
684 { MAC_IPV4_TUN_IPV4_GTPU_IPV6_ICMPV6, ICE_PTYPE_ATTR_GTP_DOWNLINK },
685 { MAC_IPV4_TUN_IPV6_GTPU_IPV4_FRAG, ICE_PTYPE_ATTR_GTP_DOWNLINK },
686 { MAC_IPV4_TUN_IPV6_GTPU_IPV4_PAY, ICE_PTYPE_ATTR_GTP_DOWNLINK },
687 { MAC_IPV4_TUN_IPV6_GTPU_IPV4_UDP_PAY, ICE_PTYPE_ATTR_GTP_DOWNLINK },
688 { MAC_IPV4_TUN_IPV6_GTPU_IPV4_TCP, ICE_PTYPE_ATTR_GTP_DOWNLINK },
689 { MAC_IPV4_TUN_IPV6_GTPU_IPV4_ICMP, ICE_PTYPE_ATTR_GTP_DOWNLINK },
690 { MAC_IPV4_TUN_IPV6_GTPU_IPV6_FRAG, ICE_PTYPE_ATTR_GTP_DOWNLINK },
691 { MAC_IPV4_TUN_IPV6_GTPU_IPV6_PAY, ICE_PTYPE_ATTR_GTP_DOWNLINK },
692 { MAC_IPV4_TUN_IPV6_GTPU_IPV6_UDP_PAY, ICE_PTYPE_ATTR_GTP_DOWNLINK },
693 { MAC_IPV4_TUN_IPV6_GTPU_IPV6_TCP, ICE_PTYPE_ATTR_GTP_DOWNLINK },
694 { MAC_IPV4_TUN_IPV6_GTPU_IPV6_ICMPV6, ICE_PTYPE_ATTR_GTP_DOWNLINK },
695 { MAC_IPV6_TUN_IPV4_GTPU_IPV4_FRAG, ICE_PTYPE_ATTR_GTP_DOWNLINK },
696 { MAC_IPV6_TUN_IPV4_GTPU_IPV4_PAY, ICE_PTYPE_ATTR_GTP_DOWNLINK },
697 { MAC_IPV6_TUN_IPV4_GTPU_IPV4_UDP_PAY, ICE_PTYPE_ATTR_GTP_DOWNLINK },
698 { MAC_IPV6_TUN_IPV4_GTPU_IPV4_TCP, ICE_PTYPE_ATTR_GTP_DOWNLINK },
699 { MAC_IPV6_TUN_IPV4_GTPU_IPV4_ICMP, ICE_PTYPE_ATTR_GTP_DOWNLINK },
700 { MAC_IPV6_TUN_IPV4_GTPU_IPV6_FRAG, ICE_PTYPE_ATTR_GTP_DOWNLINK },
701 { MAC_IPV6_TUN_IPV4_GTPU_IPV6_PAY, ICE_PTYPE_ATTR_GTP_DOWNLINK },
702 { MAC_IPV6_TUN_IPV4_GTPU_IPV6_UDP_PAY, ICE_PTYPE_ATTR_GTP_DOWNLINK },
703 { MAC_IPV6_TUN_IPV4_GTPU_IPV6_TCP, ICE_PTYPE_ATTR_GTP_DOWNLINK },
704 { MAC_IPV6_TUN_IPV4_GTPU_IPV6_ICMPV6, ICE_PTYPE_ATTR_GTP_DOWNLINK },
705 { MAC_IPV6_TUN_IPV6_GTPU_IPV4_FRAG, ICE_PTYPE_ATTR_GTP_DOWNLINK },
706 { MAC_IPV6_TUN_IPV6_GTPU_IPV4_PAY, ICE_PTYPE_ATTR_GTP_DOWNLINK },
707 { MAC_IPV6_TUN_IPV6_GTPU_IPV4_UDP_PAY, ICE_PTYPE_ATTR_GTP_DOWNLINK },
708 { MAC_IPV6_TUN_IPV6_GTPU_IPV4_TCP, ICE_PTYPE_ATTR_GTP_DOWNLINK },
709 { MAC_IPV6_TUN_IPV6_GTPU_IPV4_ICMP, ICE_PTYPE_ATTR_GTP_DOWNLINK },
710 { MAC_IPV6_TUN_IPV6_GTPU_IPV6_FRAG, ICE_PTYPE_ATTR_GTP_DOWNLINK },
711 { MAC_IPV6_TUN_IPV6_GTPU_IPV6_PAY, ICE_PTYPE_ATTR_GTP_DOWNLINK },
712 { MAC_IPV6_TUN_IPV6_GTPU_IPV6_UDP_PAY, ICE_PTYPE_ATTR_GTP_DOWNLINK },
713 { MAC_IPV6_TUN_IPV6_GTPU_IPV6_TCP, ICE_PTYPE_ATTR_GTP_DOWNLINK },
714 { MAC_IPV6_TUN_IPV6_GTPU_IPV6_ICMPV6, ICE_PTYPE_ATTR_GTP_DOWNLINK },
717 static const struct ice_ptype_attributes ice_attr_gtpu_up[] = {
718 { ICE_MAC_IPV4_GTPU_IPV4_FRAG, ICE_PTYPE_ATTR_GTP_UPLINK },
719 { ICE_MAC_IPV4_GTPU_IPV4_PAY, ICE_PTYPE_ATTR_GTP_UPLINK },
720 { ICE_MAC_IPV4_GTPU_IPV4_UDP_PAY, ICE_PTYPE_ATTR_GTP_UPLINK },
721 { ICE_MAC_IPV4_GTPU_IPV4_TCP, ICE_PTYPE_ATTR_GTP_UPLINK },
722 { ICE_MAC_IPV4_GTPU_IPV4_ICMP, ICE_PTYPE_ATTR_GTP_UPLINK },
723 { ICE_MAC_IPV6_GTPU_IPV4_FRAG, ICE_PTYPE_ATTR_GTP_UPLINK },
724 { ICE_MAC_IPV6_GTPU_IPV4_PAY, ICE_PTYPE_ATTR_GTP_UPLINK },
725 { ICE_MAC_IPV6_GTPU_IPV4_UDP_PAY, ICE_PTYPE_ATTR_GTP_UPLINK },
726 { ICE_MAC_IPV6_GTPU_IPV4_TCP, ICE_PTYPE_ATTR_GTP_UPLINK },
727 { ICE_MAC_IPV6_GTPU_IPV4_ICMP, ICE_PTYPE_ATTR_GTP_UPLINK },
728 { ICE_MAC_IPV4_GTPU_IPV6_FRAG, ICE_PTYPE_ATTR_GTP_UPLINK },
729 { ICE_MAC_IPV4_GTPU_IPV6_PAY, ICE_PTYPE_ATTR_GTP_UPLINK },
730 { ICE_MAC_IPV4_GTPU_IPV6_UDP_PAY, ICE_PTYPE_ATTR_GTP_UPLINK },
731 { ICE_MAC_IPV4_GTPU_IPV6_TCP, ICE_PTYPE_ATTR_GTP_UPLINK },
732 { ICE_MAC_IPV4_GTPU_IPV6_ICMPV6, ICE_PTYPE_ATTR_GTP_UPLINK },
733 { ICE_MAC_IPV6_GTPU_IPV6_FRAG, ICE_PTYPE_ATTR_GTP_UPLINK },
734 { ICE_MAC_IPV6_GTPU_IPV6_PAY, ICE_PTYPE_ATTR_GTP_UPLINK },
735 { ICE_MAC_IPV6_GTPU_IPV6_UDP_PAY, ICE_PTYPE_ATTR_GTP_UPLINK },
736 { ICE_MAC_IPV6_GTPU_IPV6_TCP, ICE_PTYPE_ATTR_GTP_UPLINK },
737 { ICE_MAC_IPV6_GTPU_IPV6_ICMPV6, ICE_PTYPE_ATTR_GTP_UPLINK },
738 { MAC_IPV4_TUN_IPV4_GTPU_IPV4_FRAG, ICE_PTYPE_ATTR_GTP_UPLINK },
739 { MAC_IPV4_TUN_IPV4_GTPU_IPV4_PAY, ICE_PTYPE_ATTR_GTP_UPLINK },
740 { MAC_IPV4_TUN_IPV4_GTPU_IPV4_UDP_PAY, ICE_PTYPE_ATTR_GTP_UPLINK },
741 { MAC_IPV4_TUN_IPV4_GTPU_IPV4_TCP, ICE_PTYPE_ATTR_GTP_UPLINK },
742 { MAC_IPV4_TUN_IPV4_GTPU_IPV4_ICMP, ICE_PTYPE_ATTR_GTP_UPLINK },
743 { MAC_IPV4_TUN_IPV4_GTPU_IPV6_FRAG, ICE_PTYPE_ATTR_GTP_UPLINK },
744 { MAC_IPV4_TUN_IPV4_GTPU_IPV6_PAY, ICE_PTYPE_ATTR_GTP_UPLINK },
745 { MAC_IPV4_TUN_IPV4_GTPU_IPV6_UDP_PAY, ICE_PTYPE_ATTR_GTP_UPLINK },
746 { MAC_IPV4_TUN_IPV4_GTPU_IPV6_TCP, ICE_PTYPE_ATTR_GTP_UPLINK },
747 { MAC_IPV4_TUN_IPV4_GTPU_IPV6_ICMPV6, ICE_PTYPE_ATTR_GTP_UPLINK },
748 { MAC_IPV4_TUN_IPV6_GTPU_IPV4_FRAG, ICE_PTYPE_ATTR_GTP_UPLINK },
749 { MAC_IPV4_TUN_IPV6_GTPU_IPV4_PAY, ICE_PTYPE_ATTR_GTP_UPLINK },
750 { MAC_IPV4_TUN_IPV6_GTPU_IPV4_UDP_PAY, ICE_PTYPE_ATTR_GTP_UPLINK },
751 { MAC_IPV4_TUN_IPV6_GTPU_IPV4_TCP, ICE_PTYPE_ATTR_GTP_UPLINK },
752 { MAC_IPV4_TUN_IPV6_GTPU_IPV4_ICMP, ICE_PTYPE_ATTR_GTP_UPLINK },
753 { MAC_IPV4_TUN_IPV6_GTPU_IPV6_FRAG, ICE_PTYPE_ATTR_GTP_UPLINK },
754 { MAC_IPV4_TUN_IPV6_GTPU_IPV6_PAY, ICE_PTYPE_ATTR_GTP_UPLINK },
755 { MAC_IPV4_TUN_IPV6_GTPU_IPV6_UDP_PAY, ICE_PTYPE_ATTR_GTP_UPLINK },
756 { MAC_IPV4_TUN_IPV6_GTPU_IPV6_TCP, ICE_PTYPE_ATTR_GTP_UPLINK },
757 { MAC_IPV4_TUN_IPV6_GTPU_IPV6_ICMPV6, ICE_PTYPE_ATTR_GTP_UPLINK },
758 { MAC_IPV6_TUN_IPV4_GTPU_IPV4_FRAG, ICE_PTYPE_ATTR_GTP_UPLINK },
759 { MAC_IPV6_TUN_IPV4_GTPU_IPV4_PAY, ICE_PTYPE_ATTR_GTP_UPLINK },
760 { MAC_IPV6_TUN_IPV4_GTPU_IPV4_UDP_PAY, ICE_PTYPE_ATTR_GTP_UPLINK },
761 { MAC_IPV6_TUN_IPV4_GTPU_IPV4_TCP, ICE_PTYPE_ATTR_GTP_UPLINK },
762 { MAC_IPV6_TUN_IPV4_GTPU_IPV4_ICMP, ICE_PTYPE_ATTR_GTP_UPLINK },
763 { MAC_IPV6_TUN_IPV4_GTPU_IPV6_FRAG, ICE_PTYPE_ATTR_GTP_UPLINK },
764 { MAC_IPV6_TUN_IPV4_GTPU_IPV6_PAY, ICE_PTYPE_ATTR_GTP_UPLINK },
765 { MAC_IPV6_TUN_IPV4_GTPU_IPV6_UDP_PAY, ICE_PTYPE_ATTR_GTP_UPLINK },
766 { MAC_IPV6_TUN_IPV4_GTPU_IPV6_TCP, ICE_PTYPE_ATTR_GTP_UPLINK },
767 { MAC_IPV6_TUN_IPV4_GTPU_IPV6_ICMPV6, ICE_PTYPE_ATTR_GTP_UPLINK },
768 { MAC_IPV6_TUN_IPV6_GTPU_IPV4_FRAG, ICE_PTYPE_ATTR_GTP_UPLINK },
769 { MAC_IPV6_TUN_IPV6_GTPU_IPV4_PAY, ICE_PTYPE_ATTR_GTP_UPLINK },
770 { MAC_IPV6_TUN_IPV6_GTPU_IPV4_UDP_PAY, ICE_PTYPE_ATTR_GTP_UPLINK },
771 { MAC_IPV6_TUN_IPV6_GTPU_IPV4_TCP, ICE_PTYPE_ATTR_GTP_UPLINK },
772 { MAC_IPV6_TUN_IPV6_GTPU_IPV4_ICMP, ICE_PTYPE_ATTR_GTP_UPLINK },
773 { MAC_IPV6_TUN_IPV6_GTPU_IPV6_FRAG, ICE_PTYPE_ATTR_GTP_UPLINK },
774 { MAC_IPV6_TUN_IPV6_GTPU_IPV6_PAY, ICE_PTYPE_ATTR_GTP_UPLINK },
775 { MAC_IPV6_TUN_IPV6_GTPU_IPV6_UDP_PAY, ICE_PTYPE_ATTR_GTP_UPLINK },
776 { MAC_IPV6_TUN_IPV6_GTPU_IPV6_TCP, ICE_PTYPE_ATTR_GTP_UPLINK },
777 { MAC_IPV6_TUN_IPV6_GTPU_IPV6_ICMPV6, ICE_PTYPE_ATTR_GTP_UPLINK },
780 static const u32 ice_ptypes_gtpu[] = {
781 0x00000000, 0x00000000, 0x00000000, 0x00000000,
782 0x00000000, 0x00000000, 0x00000000, 0x00000000,
783 0x00000000, 0x00000000, 0x7FFFFE00, 0x00000000,
784 0x00000000, 0x0000003F, 0xBEFBEFBC, 0x0002FBEF,
785 0x00000000, 0x00000000, 0x00000000, 0x00000000,
786 0x00000000, 0x00000000, 0x00000000, 0x00000000,
787 0x00000000, 0x00000000, 0x00000000, 0x00000000,
788 0x00000000, 0x00000000, 0x00000000, 0x00000000,
791 /* Packet types for pppoe */
792 static const u32 ice_ptypes_pppoe[] = {
793 0x00000000, 0x00000000, 0x00000000, 0x00000000,
794 0x00000000, 0x00000000, 0x00000000, 0x00000000,
795 0x00000000, 0x03ffe000, 0x00000000, 0x00000000,
796 0x00000000, 0x00000000, 0x00000000, 0x00000000,
797 0x00000000, 0x00000000, 0x00000000, 0x00000000,
798 0x00000000, 0x00000000, 0x00000000, 0x00000000,
799 0x00000000, 0x00000000, 0x00000000, 0x00000000,
800 0x00000000, 0x00000000, 0x00000000, 0x00000000,
803 /* Packet types for packets with PFCP NODE header */
804 static const u32 ice_ptypes_pfcp_node[] = {
805 0x00000000, 0x00000000, 0x00000000, 0x00000000,
806 0x00000000, 0x00000000, 0x00000000, 0x00000000,
807 0x00000000, 0x00000000, 0x80000000, 0x00000002,
808 0x00000000, 0x00000000, 0x00000000, 0x00000000,
809 0x00000000, 0x00000000, 0x00000000, 0x00000000,
810 0x00000000, 0x00000000, 0x00000000, 0x00000000,
811 0x00000000, 0x00000000, 0x00000000, 0x00000000,
812 0x00000000, 0x00000000, 0x00000000, 0x00000000,
815 /* Packet types for packets with PFCP SESSION header */
816 static const u32 ice_ptypes_pfcp_session[] = {
817 0x00000000, 0x00000000, 0x00000000, 0x00000000,
818 0x00000000, 0x00000000, 0x00000000, 0x00000000,
819 0x00000000, 0x00000000, 0x00000000, 0x00000005,
820 0x00000000, 0x00000000, 0x00000000, 0x00000000,
821 0x00000000, 0x00000000, 0x00000000, 0x00000000,
822 0x00000000, 0x00000000, 0x00000000, 0x00000000,
823 0x00000000, 0x00000000, 0x00000000, 0x00000000,
824 0x00000000, 0x00000000, 0x00000000, 0x00000000,
827 /* Packet types for l2tpv3 */
828 static const u32 ice_ptypes_l2tpv3[] = {
829 0x00000000, 0x00000000, 0x00000000, 0x00000000,
830 0x00000000, 0x00000000, 0x00000000, 0x00000000,
831 0x00000000, 0x00000000, 0x00000000, 0x00000300,
832 0x00000000, 0x00000000, 0x00000000, 0x00000000,
833 0x00000000, 0x00000000, 0x00000000, 0x00000000,
834 0x00000000, 0x00000000, 0x00000000, 0x00000000,
835 0x00000000, 0x00000000, 0x00000000, 0x00000000,
836 0x00000000, 0x00000000, 0x00000000, 0x00000000,
839 /* Packet types for esp */
840 static const u32 ice_ptypes_esp[] = {
841 0x00000000, 0x00000000, 0x00000000, 0x00000000,
842 0x00000000, 0x00000003, 0x00000000, 0x00000000,
843 0x00000000, 0x00000000, 0x00000000, 0x00000000,
844 0x00000000, 0x00000000, 0x00000000, 0x00000000,
845 0x00000000, 0x00000000, 0x00000000, 0x00000000,
846 0x00000000, 0x00000000, 0x00000000, 0x00000000,
847 0x00000000, 0x00000000, 0x00000000, 0x00000000,
848 0x00000000, 0x00000000, 0x00000000, 0x00000000,
851 /* Packet types for ah */
852 static const u32 ice_ptypes_ah[] = {
853 0x00000000, 0x00000000, 0x00000000, 0x00000000,
854 0x00000000, 0x0000000C, 0x00000000, 0x00000000,
855 0x00000000, 0x00000000, 0x00000000, 0x00000000,
856 0x00000000, 0x00000000, 0x00000000, 0x00000000,
857 0x00000000, 0x00000000, 0x00000000, 0x00000000,
858 0x00000000, 0x00000000, 0x00000000, 0x00000000,
859 0x00000000, 0x00000000, 0x00000000, 0x00000000,
860 0x00000000, 0x00000000, 0x00000000, 0x00000000,
863 /* Packet types for packets with NAT_T ESP header */
864 static const u32 ice_ptypes_nat_t_esp[] = {
865 0x00000000, 0x00000000, 0x00000000, 0x00000000,
866 0x00000000, 0x00000030, 0x00000000, 0x00000000,
867 0x00000000, 0x00000000, 0x00000000, 0x00000000,
868 0x00000000, 0x00000000, 0x00000000, 0x00000000,
869 0x00000000, 0x00000000, 0x00000000, 0x00000000,
870 0x00000000, 0x00000000, 0x00000000, 0x00000000,
871 0x00000000, 0x00000000, 0x00000000, 0x00000000,
872 0x00000000, 0x00000000, 0x00000000, 0x00000000,
875 static const u32 ice_ptypes_mac_non_ip_ofos[] = {
876 0x00000846, 0x00000000, 0x00000000, 0x00000000,
877 0x00000000, 0x00000000, 0x00000000, 0x00000000,
878 0x00400000, 0x03FFF000, 0x00000000, 0x00000000,
879 0x00000000, 0x00000000, 0x00000000, 0x00000000,
880 0x00000000, 0x00000000, 0x00000000, 0x00000000,
881 0x00000000, 0x00000000, 0x00000000, 0x00000000,
882 0x00000000, 0x00000000, 0x00000000, 0x00000000,
883 0x00000000, 0x00000000, 0x00000000, 0x00000000,
886 static const u32 ice_ptypes_gtpu_no_ip[] = {
887 0x00000000, 0x00000000, 0x00000000, 0x00000000,
888 0x00000000, 0x00000000, 0x00000000, 0x00000000,
889 0x00000000, 0x00000000, 0x00000600, 0x00000000,
890 0x00000000, 0x00000000, 0x00000000, 0x00000000,
891 0x00000000, 0x00000000, 0x00000000, 0x00000000,
892 0x00000000, 0x00000000, 0x00000000, 0x00000000,
893 0x00000000, 0x00000000, 0x00000000, 0x00000000,
894 0x00000000, 0x00000000, 0x00000000, 0x00000000,
897 static const u32 ice_ptypes_ecpri_tp0[] = {
898 0x00000000, 0x00000000, 0x00000000, 0x00000000,
899 0x00000000, 0x00000000, 0x00000000, 0x00000000,
900 0x00000000, 0x00000000, 0x00000000, 0x00000400,
901 0x00000000, 0x00000000, 0x00000000, 0x00000000,
902 0x00000000, 0x00000000, 0x00000000, 0x00000000,
903 0x00000000, 0x00000000, 0x00000000, 0x00000000,
904 0x00000000, 0x00000000, 0x00000000, 0x00000000,
905 0x00000000, 0x00000000, 0x00000000, 0x00000000,
908 static const u32 ice_ptypes_udp_ecpri_tp0[] = {
909 0x00000000, 0x00000000, 0x00000000, 0x00000000,
910 0x00000000, 0x00000000, 0x00000000, 0x00000000,
911 0x00000000, 0x00000000, 0x00000000, 0x00100000,
912 0x00000000, 0x00000000, 0x00000000, 0x00000000,
913 0x00000000, 0x00000000, 0x00000000, 0x00000000,
914 0x00000000, 0x00000000, 0x00000000, 0x00000000,
915 0x00000000, 0x00000000, 0x00000000, 0x00000000,
916 0x00000000, 0x00000000, 0x00000000, 0x00000000,
919 static const u32 ice_ptypes_l2tpv2[] = {
920 0x00000000, 0x00000000, 0x00000000, 0x00000000,
921 0x00000000, 0x00000000, 0x00000000, 0x00000000,
922 0x00000000, 0x00000000, 0x00000000, 0x00000000,
923 0xFFFFFF00, 0x0000003F, 0x00000000, 0x00000000,
924 0x00000000, 0x00000000, 0x00000000, 0x00000000,
925 0x00000000, 0x00000000, 0x00000000, 0x00000000,
926 0x00000000, 0x00000000, 0x00000000, 0x00000000,
927 0x00000000, 0x00000000, 0x00000000, 0x00000000,
930 static const u32 ice_ptypes_ppp[] = {
931 0x00000000, 0x00000000, 0x00000000, 0x00000000,
932 0x00000000, 0x00000000, 0x00000000, 0x00000000,
933 0x00000000, 0x00000000, 0x00000000, 0x00000000,
934 0xFFFFF000, 0x0000003F, 0x00000000, 0x00000000,
935 0x00000000, 0x00000000, 0x00000000, 0x00000000,
936 0x00000000, 0x00000000, 0x00000000, 0x00000000,
937 0x00000000, 0x00000000, 0x00000000, 0x00000000,
938 0x00000000, 0x00000000, 0x00000000, 0x00000000,
941 static const u32 ice_ptypes_ipv4_frag[] = {
942 0x00400000, 0x00000000, 0x00000000, 0x00000000,
943 0x00000000, 0x00000000, 0x00000000, 0x00000000,
944 0x00000000, 0x00000000, 0x00000000, 0x00000000,
945 0x00000000, 0x00000000, 0x00000000, 0x00000000,
946 0x00000000, 0x00000000, 0x00000000, 0x00000000,
947 0x00000000, 0x00000000, 0x00000000, 0x00000000,
948 0x00000000, 0x00000000, 0x00000000, 0x00000000,
949 0x00000000, 0x00000000, 0x00000000, 0x00000000,
952 static const u32 ice_ptypes_ipv6_frag[] = {
953 0x00000000, 0x00000000, 0x01000000, 0x00000000,
954 0x00000000, 0x00000000, 0x00000000, 0x00000000,
955 0x00000000, 0x00000000, 0x00000000, 0x00000000,
956 0x00000000, 0x00000000, 0x00000000, 0x00000000,
957 0x00000000, 0x00000000, 0x00000000, 0x00000000,
958 0x00000000, 0x00000000, 0x00000000, 0x00000000,
959 0x00000000, 0x00000000, 0x00000000, 0x00000000,
960 0x00000000, 0x00000000, 0x00000000, 0x00000000,
963 /* Manage parameters and info. used during the creation of a flow profile */
964 struct ice_flow_prof_params {
966 u16 entry_length; /* # of bytes formatted entry will require */
968 struct ice_flow_prof *prof;
970 /* For ACL, the es[0] will have the data of ICE_RX_MDID_PKT_FLAGS_15_0
971 * This will give us the direction flags.
973 struct ice_fv_word es[ICE_MAX_FV_WORDS];
974 /* attributes can be used to add attributes to a particular PTYPE */
975 const struct ice_ptype_attributes *attr;
978 u16 mask[ICE_MAX_FV_WORDS];
979 ice_declare_bitmap(ptypes, ICE_FLOW_PTYPE_MAX);
982 #define ICE_FLOW_RSS_HDRS_INNER_MASK \
983 (ICE_FLOW_SEG_HDR_PPPOE | ICE_FLOW_SEG_HDR_GTPC | \
984 ICE_FLOW_SEG_HDR_GTPC_TEID | ICE_FLOW_SEG_HDR_GTPU | \
985 ICE_FLOW_SEG_HDR_PFCP_SESSION | ICE_FLOW_SEG_HDR_L2TPV3 | \
986 ICE_FLOW_SEG_HDR_ESP | ICE_FLOW_SEG_HDR_AH | \
987 ICE_FLOW_SEG_HDR_NAT_T_ESP | ICE_FLOW_SEG_HDR_GTPU_NON_IP | \
988 ICE_FLOW_SEG_HDR_ECPRI_TP0 | ICE_FLOW_SEG_HDR_UDP_ECPRI_TP0 | \
989 ICE_FLOW_SEG_HDR_L2TPV2 | ICE_FLOW_SEG_HDR_PPP | ICE_FLOW_SEG_HDR_GRE)
991 #define ICE_FLOW_SEG_HDRS_L2_MASK \
992 (ICE_FLOW_SEG_HDR_ETH | ICE_FLOW_SEG_HDR_VLAN)
993 #define ICE_FLOW_SEG_HDRS_L3_MASK \
994 (ICE_FLOW_SEG_HDR_IPV4 | ICE_FLOW_SEG_HDR_IPV6 | \
995 ICE_FLOW_SEG_HDR_ARP)
996 #define ICE_FLOW_SEG_HDRS_L4_MASK \
997 (ICE_FLOW_SEG_HDR_ICMP | ICE_FLOW_SEG_HDR_TCP | ICE_FLOW_SEG_HDR_UDP | \
998 ICE_FLOW_SEG_HDR_SCTP)
999 /* mask for L4 protocols that are NOT part of IPV4/6 OTHER PTYPE groups */
1000 #define ICE_FLOW_SEG_HDRS_L4_MASK_NO_OTHER \
1001 (ICE_FLOW_SEG_HDR_TCP | ICE_FLOW_SEG_HDR_UDP | ICE_FLOW_SEG_HDR_SCTP)
1004 * ice_flow_val_hdrs - validates packet segments for valid protocol headers
1005 * @segs: array of one or more packet segments that describe the flow
1006 * @segs_cnt: number of packet segments provided
1008 static enum ice_status
1009 ice_flow_val_hdrs(struct ice_flow_seg_info *segs, u8 segs_cnt)
1013 for (i = 0; i < segs_cnt; i++) {
1014 /* Multiple L3 headers */
1015 if (segs[i].hdrs & ICE_FLOW_SEG_HDRS_L3_MASK &&
1016 !ice_is_pow2(segs[i].hdrs & ICE_FLOW_SEG_HDRS_L3_MASK))
1017 return ICE_ERR_PARAM;
1019 /* Multiple L4 headers */
1020 if (segs[i].hdrs & ICE_FLOW_SEG_HDRS_L4_MASK &&
1021 !ice_is_pow2(segs[i].hdrs & ICE_FLOW_SEG_HDRS_L4_MASK))
1022 return ICE_ERR_PARAM;
1028 /* Sizes of fixed known protocol headers without header options */
1029 #define ICE_FLOW_PROT_HDR_SZ_MAC 14
1030 #define ICE_FLOW_PROT_HDR_SZ_MAC_VLAN (ICE_FLOW_PROT_HDR_SZ_MAC + 2)
1031 #define ICE_FLOW_PROT_HDR_SZ_IPV4 20
1032 #define ICE_FLOW_PROT_HDR_SZ_IPV6 40
1033 #define ICE_FLOW_PROT_HDR_SZ_ARP 28
1034 #define ICE_FLOW_PROT_HDR_SZ_ICMP 8
1035 #define ICE_FLOW_PROT_HDR_SZ_TCP 20
1036 #define ICE_FLOW_PROT_HDR_SZ_UDP 8
1037 #define ICE_FLOW_PROT_HDR_SZ_SCTP 12
1040 * ice_flow_calc_seg_sz - calculates size of a packet segment based on headers
1041 * @params: information about the flow to be processed
1042 * @seg: index of packet segment whose header size is to be determined
1044 static u16 ice_flow_calc_seg_sz(struct ice_flow_prof_params *params, u8 seg)
1049 sz = (params->prof->segs[seg].hdrs & ICE_FLOW_SEG_HDR_VLAN) ?
1050 ICE_FLOW_PROT_HDR_SZ_MAC_VLAN : ICE_FLOW_PROT_HDR_SZ_MAC;
1053 if (params->prof->segs[seg].hdrs & ICE_FLOW_SEG_HDR_IPV4)
1054 sz += ICE_FLOW_PROT_HDR_SZ_IPV4;
1055 else if (params->prof->segs[seg].hdrs & ICE_FLOW_SEG_HDR_IPV6)
1056 sz += ICE_FLOW_PROT_HDR_SZ_IPV6;
1057 else if (params->prof->segs[seg].hdrs & ICE_FLOW_SEG_HDR_ARP)
1058 sz += ICE_FLOW_PROT_HDR_SZ_ARP;
1059 else if (params->prof->segs[seg].hdrs & ICE_FLOW_SEG_HDRS_L4_MASK)
1060 /* A L3 header is required if L4 is specified */
1064 if (params->prof->segs[seg].hdrs & ICE_FLOW_SEG_HDR_ICMP)
1065 sz += ICE_FLOW_PROT_HDR_SZ_ICMP;
1066 else if (params->prof->segs[seg].hdrs & ICE_FLOW_SEG_HDR_TCP)
1067 sz += ICE_FLOW_PROT_HDR_SZ_TCP;
1068 else if (params->prof->segs[seg].hdrs & ICE_FLOW_SEG_HDR_UDP)
1069 sz += ICE_FLOW_PROT_HDR_SZ_UDP;
1070 else if (params->prof->segs[seg].hdrs & ICE_FLOW_SEG_HDR_SCTP)
1071 sz += ICE_FLOW_PROT_HDR_SZ_SCTP;
1077 * ice_flow_proc_seg_hdrs - process protocol headers present in pkt segments
1078 * @params: information about the flow to be processed
1080 * This function identifies the packet types associated with the protocol
1081 * headers being present in packet segments of the specified flow profile.
1083 static enum ice_status
1084 ice_flow_proc_seg_hdrs(struct ice_flow_prof_params *params)
1086 struct ice_flow_prof *prof;
1089 ice_memset(params->ptypes, 0xff, sizeof(params->ptypes),
1092 prof = params->prof;
1094 for (i = 0; i < params->prof->segs_cnt; i++) {
1095 const ice_bitmap_t *src;
1098 hdrs = prof->segs[i].hdrs;
1100 if (hdrs & ICE_FLOW_SEG_HDR_ETH) {
1101 src = !i ? (const ice_bitmap_t *)ice_ptypes_mac_ofos :
1102 (const ice_bitmap_t *)ice_ptypes_mac_il;
1103 ice_and_bitmap(params->ptypes, params->ptypes, src,
1104 ICE_FLOW_PTYPE_MAX);
1107 if (i && hdrs & ICE_FLOW_SEG_HDR_VLAN) {
1108 src = (const ice_bitmap_t *)ice_ptypes_macvlan_il;
1109 ice_and_bitmap(params->ptypes, params->ptypes, src,
1110 ICE_FLOW_PTYPE_MAX);
1113 if (!i && hdrs & ICE_FLOW_SEG_HDR_ARP) {
1114 ice_and_bitmap(params->ptypes, params->ptypes,
1115 (const ice_bitmap_t *)ice_ptypes_arp_of,
1116 ICE_FLOW_PTYPE_MAX);
1119 if (hdrs & ICE_FLOW_SEG_HDR_ECPRI_TP0) {
1120 src = (const ice_bitmap_t *)ice_ptypes_ecpri_tp0;
1121 ice_and_bitmap(params->ptypes, params->ptypes, src,
1122 ICE_FLOW_PTYPE_MAX);
1124 if ((hdrs & ICE_FLOW_SEG_HDR_IPV4) &&
1125 (hdrs & ICE_FLOW_SEG_HDR_IPV_OTHER)) {
1127 (const ice_bitmap_t *)ice_ptypes_ipv4_il :
1128 (const ice_bitmap_t *)ice_ptypes_ipv4_ofos_all;
1129 ice_and_bitmap(params->ptypes, params->ptypes, src,
1130 ICE_FLOW_PTYPE_MAX);
1131 } else if ((hdrs & ICE_FLOW_SEG_HDR_IPV6) &&
1132 (hdrs & ICE_FLOW_SEG_HDR_IPV_OTHER)) {
1134 (const ice_bitmap_t *)ice_ptypes_ipv6_il :
1135 (const ice_bitmap_t *)ice_ptypes_ipv6_ofos_all;
1136 ice_and_bitmap(params->ptypes, params->ptypes, src,
1137 ICE_FLOW_PTYPE_MAX);
1138 } else if ((hdrs & ICE_FLOW_SEG_HDR_IPV4) &&
1139 (hdrs & ICE_FLOW_SEG_HDR_IPV_FRAG)) {
1140 src = (const ice_bitmap_t *)ice_ptypes_ipv4_frag;
1141 ice_and_bitmap(params->ptypes, params->ptypes, src,
1142 ICE_FLOW_PTYPE_MAX);
1143 } else if ((hdrs & ICE_FLOW_SEG_HDR_IPV6) &&
1144 (hdrs & ICE_FLOW_SEG_HDR_IPV_FRAG)) {
1145 src = (const ice_bitmap_t *)ice_ptypes_ipv6_frag;
1146 ice_and_bitmap(params->ptypes, params->ptypes, src,
1147 ICE_FLOW_PTYPE_MAX);
1148 } else if ((hdrs & ICE_FLOW_SEG_HDR_IPV4) &&
1149 !(hdrs & ICE_FLOW_SEG_HDRS_L4_MASK_NO_OTHER)) {
1150 src = !i ? (const ice_bitmap_t *)ice_ptypes_ipv4_ofos_no_l4 :
1151 (const ice_bitmap_t *)ice_ptypes_ipv4_il_no_l4;
1152 ice_and_bitmap(params->ptypes, params->ptypes, src,
1153 ICE_FLOW_PTYPE_MAX);
1154 } else if (hdrs & ICE_FLOW_SEG_HDR_IPV4) {
1155 src = !i ? (const ice_bitmap_t *)ice_ptypes_ipv4_ofos :
1156 (const ice_bitmap_t *)ice_ptypes_ipv4_il;
1157 ice_and_bitmap(params->ptypes, params->ptypes, src,
1158 ICE_FLOW_PTYPE_MAX);
1159 } else if ((hdrs & ICE_FLOW_SEG_HDR_IPV6) &&
1160 !(hdrs & ICE_FLOW_SEG_HDRS_L4_MASK_NO_OTHER)) {
1161 src = !i ? (const ice_bitmap_t *)ice_ptypes_ipv6_ofos_no_l4 :
1162 (const ice_bitmap_t *)ice_ptypes_ipv6_il_no_l4;
1163 ice_and_bitmap(params->ptypes, params->ptypes, src,
1164 ICE_FLOW_PTYPE_MAX);
1165 } else if (hdrs & ICE_FLOW_SEG_HDR_IPV6) {
1166 src = !i ? (const ice_bitmap_t *)ice_ptypes_ipv6_ofos :
1167 (const ice_bitmap_t *)ice_ptypes_ipv6_il;
1168 ice_and_bitmap(params->ptypes, params->ptypes, src,
1169 ICE_FLOW_PTYPE_MAX);
1172 if (hdrs & ICE_FLOW_SEG_HDR_ETH_NON_IP) {
1173 src = (const ice_bitmap_t *)ice_ptypes_mac_non_ip_ofos;
1174 ice_and_bitmap(params->ptypes, params->ptypes,
1175 src, ICE_FLOW_PTYPE_MAX);
1176 } else if (hdrs & ICE_FLOW_SEG_HDR_PPPOE) {
1177 src = (const ice_bitmap_t *)ice_ptypes_pppoe;
1178 ice_and_bitmap(params->ptypes, params->ptypes, src,
1179 ICE_FLOW_PTYPE_MAX);
1181 src = (const ice_bitmap_t *)ice_ptypes_pppoe;
1182 ice_andnot_bitmap(params->ptypes, params->ptypes, src,
1183 ICE_FLOW_PTYPE_MAX);
1186 if (hdrs & ICE_FLOW_SEG_HDR_UDP) {
1187 src = (const ice_bitmap_t *)ice_ptypes_udp_il;
1188 ice_and_bitmap(params->ptypes, params->ptypes, src,
1189 ICE_FLOW_PTYPE_MAX);
1190 } else if (hdrs & ICE_FLOW_SEG_HDR_TCP) {
1191 ice_and_bitmap(params->ptypes, params->ptypes,
1192 (const ice_bitmap_t *)ice_ptypes_tcp_il,
1193 ICE_FLOW_PTYPE_MAX);
1194 } else if (hdrs & ICE_FLOW_SEG_HDR_SCTP) {
1195 src = (const ice_bitmap_t *)ice_ptypes_sctp_il;
1196 ice_and_bitmap(params->ptypes, params->ptypes, src,
1197 ICE_FLOW_PTYPE_MAX);
1200 if (hdrs & ICE_FLOW_SEG_HDR_ICMP) {
1201 src = !i ? (const ice_bitmap_t *)ice_ptypes_icmp_of :
1202 (const ice_bitmap_t *)ice_ptypes_icmp_il;
1203 ice_and_bitmap(params->ptypes, params->ptypes, src,
1204 ICE_FLOW_PTYPE_MAX);
1205 } else if (hdrs & ICE_FLOW_SEG_HDR_GRE) {
1206 src = (const ice_bitmap_t *)ice_ptypes_gre_of;
1207 ice_and_bitmap(params->ptypes, params->ptypes, src,
1208 ICE_FLOW_PTYPE_MAX);
1209 } else if (hdrs & ICE_FLOW_SEG_HDR_GTPC) {
1210 src = (const ice_bitmap_t *)ice_ptypes_gtpc;
1211 ice_and_bitmap(params->ptypes, params->ptypes,
1212 src, ICE_FLOW_PTYPE_MAX);
1213 } else if (hdrs & ICE_FLOW_SEG_HDR_GTPC_TEID) {
1214 src = (const ice_bitmap_t *)ice_ptypes_gtpc_tid;
1215 ice_and_bitmap(params->ptypes, params->ptypes,
1216 src, ICE_FLOW_PTYPE_MAX);
1217 } else if (hdrs & ICE_FLOW_SEG_HDR_GTPU_NON_IP) {
1218 src = (const ice_bitmap_t *)ice_ptypes_gtpu_no_ip;
1219 ice_and_bitmap(params->ptypes, params->ptypes,
1220 src, ICE_FLOW_PTYPE_MAX);
1221 } else if (hdrs & ICE_FLOW_SEG_HDR_GTPU_DWN) {
1222 src = (const ice_bitmap_t *)ice_ptypes_gtpu;
1223 ice_and_bitmap(params->ptypes, params->ptypes,
1224 src, ICE_FLOW_PTYPE_MAX);
1226 /* Attributes for GTP packet with downlink */
1227 params->attr = ice_attr_gtpu_down;
1228 params->attr_cnt = ARRAY_SIZE(ice_attr_gtpu_down);
1229 } else if (hdrs & ICE_FLOW_SEG_HDR_GTPU_UP) {
1230 src = (const ice_bitmap_t *)ice_ptypes_gtpu;
1231 ice_and_bitmap(params->ptypes, params->ptypes,
1232 src, ICE_FLOW_PTYPE_MAX);
1234 /* Attributes for GTP packet with uplink */
1235 params->attr = ice_attr_gtpu_up;
1236 params->attr_cnt = ARRAY_SIZE(ice_attr_gtpu_up);
1237 } else if (hdrs & ICE_FLOW_SEG_HDR_GTPU_EH) {
1238 src = (const ice_bitmap_t *)ice_ptypes_gtpu;
1239 ice_and_bitmap(params->ptypes, params->ptypes,
1240 src, ICE_FLOW_PTYPE_MAX);
1242 /* Attributes for GTP packet with Extension Header */
1243 params->attr = ice_attr_gtpu_eh;
1244 params->attr_cnt = ARRAY_SIZE(ice_attr_gtpu_eh);
1245 } else if (hdrs & ICE_FLOW_SEG_HDR_GTPU_IP) {
1246 src = (const ice_bitmap_t *)ice_ptypes_gtpu;
1247 ice_and_bitmap(params->ptypes, params->ptypes,
1248 src, ICE_FLOW_PTYPE_MAX);
1250 /* Attributes for GTP packet without Extension Header */
1251 params->attr = ice_attr_gtpu_session;
1252 params->attr_cnt = ARRAY_SIZE(ice_attr_gtpu_session);
1253 } else if (hdrs & ICE_FLOW_SEG_HDR_L2TPV2) {
1254 src = (const ice_bitmap_t *)ice_ptypes_l2tpv2;
1255 ice_and_bitmap(params->ptypes, params->ptypes,
1256 src, ICE_FLOW_PTYPE_MAX);
1257 } else if (hdrs & ICE_FLOW_SEG_HDR_L2TPV3) {
1258 src = (const ice_bitmap_t *)ice_ptypes_l2tpv3;
1259 ice_and_bitmap(params->ptypes, params->ptypes,
1260 src, ICE_FLOW_PTYPE_MAX);
1261 } else if (hdrs & ICE_FLOW_SEG_HDR_ESP) {
1262 src = (const ice_bitmap_t *)ice_ptypes_esp;
1263 ice_and_bitmap(params->ptypes, params->ptypes,
1264 src, ICE_FLOW_PTYPE_MAX);
1265 } else if (hdrs & ICE_FLOW_SEG_HDR_AH) {
1266 src = (const ice_bitmap_t *)ice_ptypes_ah;
1267 ice_and_bitmap(params->ptypes, params->ptypes,
1268 src, ICE_FLOW_PTYPE_MAX);
1269 } else if (hdrs & ICE_FLOW_SEG_HDR_NAT_T_ESP) {
1270 src = (const ice_bitmap_t *)ice_ptypes_nat_t_esp;
1271 ice_and_bitmap(params->ptypes, params->ptypes,
1272 src, ICE_FLOW_PTYPE_MAX);
1273 } else if (hdrs & ICE_FLOW_SEG_HDR_VXLAN) {
1274 src = (const ice_bitmap_t *)ice_ptypes_vxlan_vni;
1275 ice_and_bitmap(params->ptypes, params->ptypes,
1276 src, ICE_FLOW_PTYPE_MAX);
1277 } else if (hdrs & ICE_FLOW_SEG_HDR_UDP_ECPRI_TP0) {
1278 src = (const ice_bitmap_t *)ice_ptypes_udp_ecpri_tp0;
1279 ice_and_bitmap(params->ptypes, params->ptypes,
1280 src, ICE_FLOW_PTYPE_MAX);
1283 if (hdrs & ICE_FLOW_SEG_HDR_PPP) {
1284 src = (const ice_bitmap_t *)ice_ptypes_ppp;
1285 ice_and_bitmap(params->ptypes, params->ptypes,
1286 src, ICE_FLOW_PTYPE_MAX);
1289 if (hdrs & ICE_FLOW_SEG_HDR_PFCP) {
1290 if (hdrs & ICE_FLOW_SEG_HDR_PFCP_NODE)
1292 (const ice_bitmap_t *)ice_ptypes_pfcp_node;
1295 (const ice_bitmap_t *)ice_ptypes_pfcp_session;
1297 ice_and_bitmap(params->ptypes, params->ptypes,
1298 src, ICE_FLOW_PTYPE_MAX);
1300 src = (const ice_bitmap_t *)ice_ptypes_pfcp_node;
1301 ice_andnot_bitmap(params->ptypes, params->ptypes,
1302 src, ICE_FLOW_PTYPE_MAX);
1304 src = (const ice_bitmap_t *)ice_ptypes_pfcp_session;
1305 ice_andnot_bitmap(params->ptypes, params->ptypes,
1306 src, ICE_FLOW_PTYPE_MAX);
1314 * ice_flow_xtract_pkt_flags - Create an extr sequence entry for packet flags
1315 * @hw: pointer to the HW struct
1316 * @params: information about the flow to be processed
1317 * @flags: The value of pkt_flags[x:x] in Rx/Tx MDID metadata.
1319 * This function will allocate an extraction sequence entries for a DWORD size
1320 * chunk of the packet flags.
1322 static enum ice_status
1323 ice_flow_xtract_pkt_flags(struct ice_hw *hw,
1324 struct ice_flow_prof_params *params,
1325 enum ice_flex_mdid_pkt_flags flags)
1327 u8 fv_words = hw->blk[params->blk].es.fvw;
1330 /* Make sure the number of extraction sequence entries required does not
1331 * exceed the block's capacity.
1333 if (params->es_cnt >= fv_words)
1334 return ICE_ERR_MAX_LIMIT;
1336 /* some blocks require a reversed field vector layout */
1337 if (hw->blk[params->blk].es.reverse)
1338 idx = fv_words - params->es_cnt - 1;
1340 idx = params->es_cnt;
1342 params->es[idx].prot_id = ICE_PROT_META_ID;
1343 params->es[idx].off = flags;
1350 * ice_flow_xtract_fld - Create an extraction sequence entry for the given field
1351 * @hw: pointer to the HW struct
1352 * @params: information about the flow to be processed
1353 * @seg: packet segment index of the field to be extracted
1354 * @fld: ID of field to be extracted
1355 * @match: bitfield of all fields
1357 * This function determines the protocol ID, offset, and size of the given
1358 * field. It then allocates one or more extraction sequence entries for the
1359 * given field, and fill the entries with protocol ID and offset information.
1361 static enum ice_status
1362 ice_flow_xtract_fld(struct ice_hw *hw, struct ice_flow_prof_params *params,
1363 u8 seg, enum ice_flow_field fld, u64 match)
1365 enum ice_flow_field sib = ICE_FLOW_FIELD_IDX_MAX;
1366 enum ice_prot_id prot_id = ICE_PROT_ID_INVAL;
1367 u8 fv_words = hw->blk[params->blk].es.fvw;
1368 struct ice_flow_fld_info *flds;
1369 u16 cnt, ese_bits, i;
1374 flds = params->prof->segs[seg].fields;
1377 case ICE_FLOW_FIELD_IDX_ETH_DA:
1378 case ICE_FLOW_FIELD_IDX_ETH_SA:
1379 case ICE_FLOW_FIELD_IDX_S_VLAN:
1380 case ICE_FLOW_FIELD_IDX_C_VLAN:
1381 prot_id = seg == 0 ? ICE_PROT_MAC_OF_OR_S : ICE_PROT_MAC_IL;
1383 case ICE_FLOW_FIELD_IDX_ETH_TYPE:
1384 prot_id = seg == 0 ? ICE_PROT_ETYPE_OL : ICE_PROT_ETYPE_IL;
1386 case ICE_FLOW_FIELD_IDX_IPV4_DSCP:
1387 prot_id = seg == 0 ? ICE_PROT_IPV4_OF_OR_S : ICE_PROT_IPV4_IL;
1389 case ICE_FLOW_FIELD_IDX_IPV6_DSCP:
1390 prot_id = seg == 0 ? ICE_PROT_IPV6_OF_OR_S : ICE_PROT_IPV6_IL;
1392 case ICE_FLOW_FIELD_IDX_IPV4_TTL:
1393 case ICE_FLOW_FIELD_IDX_IPV4_PROT:
1394 prot_id = seg == 0 ? ICE_PROT_IPV4_OF_OR_S : ICE_PROT_IPV4_IL;
1396 /* TTL and PROT share the same extraction seq. entry.
1397 * Each is considered a sibling to the other in terms of sharing
1398 * the same extraction sequence entry.
1400 if (fld == ICE_FLOW_FIELD_IDX_IPV4_TTL)
1401 sib = ICE_FLOW_FIELD_IDX_IPV4_PROT;
1403 sib = ICE_FLOW_FIELD_IDX_IPV4_TTL;
1405 /* If the sibling field is also included, that field's
1406 * mask needs to be included.
1408 if (match & BIT(sib))
1409 sib_mask = ice_flds_info[sib].mask;
1411 case ICE_FLOW_FIELD_IDX_IPV6_TTL:
1412 case ICE_FLOW_FIELD_IDX_IPV6_PROT:
1413 prot_id = seg == 0 ? ICE_PROT_IPV6_OF_OR_S : ICE_PROT_IPV6_IL;
1415 /* TTL and PROT share the same extraction seq. entry.
1416 * Each is considered a sibling to the other in terms of sharing
1417 * the same extraction sequence entry.
1419 if (fld == ICE_FLOW_FIELD_IDX_IPV6_TTL)
1420 sib = ICE_FLOW_FIELD_IDX_IPV6_PROT;
1422 sib = ICE_FLOW_FIELD_IDX_IPV6_TTL;
1424 /* If the sibling field is also included, that field's
1425 * mask needs to be included.
1427 if (match & BIT(sib))
1428 sib_mask = ice_flds_info[sib].mask;
1430 case ICE_FLOW_FIELD_IDX_IPV4_SA:
1431 case ICE_FLOW_FIELD_IDX_IPV4_DA:
1432 case ICE_FLOW_FIELD_IDX_IPV4_CHKSUM:
1433 prot_id = seg == 0 ? ICE_PROT_IPV4_OF_OR_S : ICE_PROT_IPV4_IL;
1434 if (params->prof->segs[0].hdrs & ICE_FLOW_SEG_HDR_GRE &&
1435 params->prof->segs[1].hdrs & ICE_FLOW_SEG_HDR_GTPU &&
1437 prot_id = ICE_PROT_IPV4_IL_IL;
1439 case ICE_FLOW_FIELD_IDX_IPV4_ID:
1440 prot_id = ICE_PROT_IPV4_OF_OR_S;
1442 case ICE_FLOW_FIELD_IDX_IPV6_SA:
1443 case ICE_FLOW_FIELD_IDX_IPV6_DA:
1444 case ICE_FLOW_FIELD_IDX_IPV6_PRE32_SA:
1445 case ICE_FLOW_FIELD_IDX_IPV6_PRE32_DA:
1446 case ICE_FLOW_FIELD_IDX_IPV6_PRE48_SA:
1447 case ICE_FLOW_FIELD_IDX_IPV6_PRE48_DA:
1448 case ICE_FLOW_FIELD_IDX_IPV6_PRE64_SA:
1449 case ICE_FLOW_FIELD_IDX_IPV6_PRE64_DA:
1450 prot_id = seg == 0 ? ICE_PROT_IPV6_OF_OR_S : ICE_PROT_IPV6_IL;
1451 if (params->prof->segs[0].hdrs & ICE_FLOW_SEG_HDR_GRE &&
1452 params->prof->segs[1].hdrs & ICE_FLOW_SEG_HDR_GTPU &&
1454 prot_id = ICE_PROT_IPV6_IL_IL;
1456 case ICE_FLOW_FIELD_IDX_IPV6_ID:
1457 prot_id = ICE_PROT_IPV6_FRAG;
1459 case ICE_FLOW_FIELD_IDX_TCP_SRC_PORT:
1460 case ICE_FLOW_FIELD_IDX_TCP_DST_PORT:
1461 case ICE_FLOW_FIELD_IDX_TCP_FLAGS:
1462 case ICE_FLOW_FIELD_IDX_TCP_CHKSUM:
1463 prot_id = ICE_PROT_TCP_IL;
1465 case ICE_FLOW_FIELD_IDX_UDP_SRC_PORT:
1466 case ICE_FLOW_FIELD_IDX_UDP_DST_PORT:
1467 case ICE_FLOW_FIELD_IDX_UDP_CHKSUM:
1468 prot_id = ICE_PROT_UDP_IL_OR_S;
1470 case ICE_FLOW_FIELD_IDX_SCTP_SRC_PORT:
1471 case ICE_FLOW_FIELD_IDX_SCTP_DST_PORT:
1472 case ICE_FLOW_FIELD_IDX_SCTP_CHKSUM:
1473 prot_id = ICE_PROT_SCTP_IL;
1475 case ICE_FLOW_FIELD_IDX_VXLAN_VNI:
1476 case ICE_FLOW_FIELD_IDX_GTPC_TEID:
1477 case ICE_FLOW_FIELD_IDX_GTPU_IP_TEID:
1478 case ICE_FLOW_FIELD_IDX_GTPU_UP_TEID:
1479 case ICE_FLOW_FIELD_IDX_GTPU_DWN_TEID:
1480 case ICE_FLOW_FIELD_IDX_GTPU_EH_TEID:
1481 case ICE_FLOW_FIELD_IDX_GTPU_EH_QFI:
1482 case ICE_FLOW_FIELD_IDX_GTPU_UP_QFI:
1483 case ICE_FLOW_FIELD_IDX_GTPU_DWN_QFI:
1484 /* GTP is accessed through UDP OF protocol */
1485 prot_id = ICE_PROT_UDP_OF;
1487 case ICE_FLOW_FIELD_IDX_PPPOE_SESS_ID:
1488 prot_id = ICE_PROT_PPPOE;
1490 case ICE_FLOW_FIELD_IDX_PFCP_SEID:
1491 prot_id = ICE_PROT_UDP_IL_OR_S;
1493 case ICE_FLOW_FIELD_IDX_L2TPV3_SESS_ID:
1494 prot_id = ICE_PROT_L2TPV3;
1496 case ICE_FLOW_FIELD_IDX_ESP_SPI:
1497 prot_id = ICE_PROT_ESP_F;
1499 case ICE_FLOW_FIELD_IDX_AH_SPI:
1500 prot_id = ICE_PROT_ESP_2;
1502 case ICE_FLOW_FIELD_IDX_NAT_T_ESP_SPI:
1503 prot_id = ICE_PROT_UDP_IL_OR_S;
1505 case ICE_FLOW_FIELD_IDX_ECPRI_TP0_PC_ID:
1506 prot_id = ICE_PROT_ECPRI;
1508 case ICE_FLOW_FIELD_IDX_UDP_ECPRI_TP0_PC_ID:
1509 prot_id = ICE_PROT_UDP_IL_OR_S;
1511 case ICE_FLOW_FIELD_IDX_ARP_SIP:
1512 case ICE_FLOW_FIELD_IDX_ARP_DIP:
1513 case ICE_FLOW_FIELD_IDX_ARP_SHA:
1514 case ICE_FLOW_FIELD_IDX_ARP_DHA:
1515 case ICE_FLOW_FIELD_IDX_ARP_OP:
1516 prot_id = ICE_PROT_ARP_OF;
1518 case ICE_FLOW_FIELD_IDX_ICMP_TYPE:
1519 case ICE_FLOW_FIELD_IDX_ICMP_CODE:
1520 /* ICMP type and code share the same extraction seq. entry */
1521 prot_id = (params->prof->segs[seg].hdrs &
1522 ICE_FLOW_SEG_HDR_IPV4) ?
1523 ICE_PROT_ICMP_IL : ICE_PROT_ICMPV6_IL;
1524 sib = fld == ICE_FLOW_FIELD_IDX_ICMP_TYPE ?
1525 ICE_FLOW_FIELD_IDX_ICMP_CODE :
1526 ICE_FLOW_FIELD_IDX_ICMP_TYPE;
1528 case ICE_FLOW_FIELD_IDX_GRE_KEYID:
1529 prot_id = ICE_PROT_GRE_OF;
1532 return ICE_ERR_NOT_IMPL;
1535 /* Each extraction sequence entry is a word in size, and extracts a
1536 * word-aligned offset from a protocol header.
1538 ese_bits = ICE_FLOW_FV_EXTRACT_SZ * BITS_PER_BYTE;
1540 flds[fld].xtrct.prot_id = prot_id;
1541 flds[fld].xtrct.off = (ice_flds_info[fld].off / ese_bits) *
1542 ICE_FLOW_FV_EXTRACT_SZ;
1543 flds[fld].xtrct.disp = (u8)(ice_flds_info[fld].off % ese_bits);
1544 flds[fld].xtrct.idx = params->es_cnt;
1545 flds[fld].xtrct.mask = ice_flds_info[fld].mask;
1547 /* Adjust the next field-entry index after accommodating the number of
1548 * entries this field consumes
1550 cnt = DIVIDE_AND_ROUND_UP(flds[fld].xtrct.disp +
1551 ice_flds_info[fld].size, ese_bits);
1553 /* Fill in the extraction sequence entries needed for this field */
1554 off = flds[fld].xtrct.off;
1555 mask = flds[fld].xtrct.mask;
1556 for (i = 0; i < cnt; i++) {
1557 /* Only consume an extraction sequence entry if there is no
1558 * sibling field associated with this field or the sibling entry
1559 * already extracts the word shared with this field.
1561 if (sib == ICE_FLOW_FIELD_IDX_MAX ||
1562 flds[sib].xtrct.prot_id == ICE_PROT_ID_INVAL ||
1563 flds[sib].xtrct.off != off) {
1566 /* Make sure the number of extraction sequence required
1567 * does not exceed the block's capability
1569 if (params->es_cnt >= fv_words)
1570 return ICE_ERR_MAX_LIMIT;
1572 /* some blocks require a reversed field vector layout */
1573 if (hw->blk[params->blk].es.reverse)
1574 idx = fv_words - params->es_cnt - 1;
1576 idx = params->es_cnt;
1578 params->es[idx].prot_id = prot_id;
1579 params->es[idx].off = off;
1580 params->mask[idx] = mask | sib_mask;
1584 off += ICE_FLOW_FV_EXTRACT_SZ;
1591 * ice_flow_xtract_raws - Create extract sequence entries for raw bytes
1592 * @hw: pointer to the HW struct
1593 * @params: information about the flow to be processed
1594 * @seg: index of packet segment whose raw fields are to be extracted
1596 static enum ice_status
1597 ice_flow_xtract_raws(struct ice_hw *hw, struct ice_flow_prof_params *params,
1604 if (!params->prof->segs[seg].raws_cnt)
1607 if (params->prof->segs[seg].raws_cnt >
1608 ARRAY_SIZE(params->prof->segs[seg].raws))
1609 return ICE_ERR_MAX_LIMIT;
1611 /* Offsets within the segment headers are not supported */
1612 hdrs_sz = ice_flow_calc_seg_sz(params, seg);
1614 return ICE_ERR_PARAM;
1616 fv_words = hw->blk[params->blk].es.fvw;
1618 for (i = 0; i < params->prof->segs[seg].raws_cnt; i++) {
1619 struct ice_flow_seg_fld_raw *raw;
1622 raw = ¶ms->prof->segs[seg].raws[i];
1624 /* Storing extraction information */
1625 raw->info.xtrct.prot_id = ICE_PROT_MAC_OF_OR_S;
1626 raw->info.xtrct.off = (raw->off / ICE_FLOW_FV_EXTRACT_SZ) *
1627 ICE_FLOW_FV_EXTRACT_SZ;
1628 raw->info.xtrct.disp = (raw->off % ICE_FLOW_FV_EXTRACT_SZ) *
1630 raw->info.xtrct.idx = params->es_cnt;
1632 /* Determine the number of field vector entries this raw field
1635 cnt = DIVIDE_AND_ROUND_UP(raw->info.xtrct.disp +
1636 (raw->info.src.last * BITS_PER_BYTE),
1637 (ICE_FLOW_FV_EXTRACT_SZ *
1639 off = raw->info.xtrct.off;
1640 for (j = 0; j < cnt; j++) {
1643 /* Make sure the number of extraction sequence required
1644 * does not exceed the block's capability
1646 if (params->es_cnt >= hw->blk[params->blk].es.count ||
1647 params->es_cnt >= ICE_MAX_FV_WORDS)
1648 return ICE_ERR_MAX_LIMIT;
1650 /* some blocks require a reversed field vector layout */
1651 if (hw->blk[params->blk].es.reverse)
1652 idx = fv_words - params->es_cnt - 1;
1654 idx = params->es_cnt;
1656 params->es[idx].prot_id = raw->info.xtrct.prot_id;
1657 params->es[idx].off = off;
1659 off += ICE_FLOW_FV_EXTRACT_SZ;
1667 * ice_flow_create_xtrct_seq - Create an extraction sequence for given segments
1668 * @hw: pointer to the HW struct
1669 * @params: information about the flow to be processed
1671 * This function iterates through all matched fields in the given segments, and
1672 * creates an extraction sequence for the fields.
1674 static enum ice_status
1675 ice_flow_create_xtrct_seq(struct ice_hw *hw,
1676 struct ice_flow_prof_params *params)
1678 enum ice_status status = ICE_SUCCESS;
1681 /* For ACL, we also need to extract the direction bit (Rx,Tx) data from
1684 if (params->blk == ICE_BLK_ACL) {
1685 status = ice_flow_xtract_pkt_flags(hw, params,
1686 ICE_RX_MDID_PKT_FLAGS_15_0);
1691 for (i = 0; i < params->prof->segs_cnt; i++) {
1692 u64 match = params->prof->segs[i].match;
1693 enum ice_flow_field j;
1695 ice_for_each_set_bit(j, (ice_bitmap_t *)&match,
1696 ICE_FLOW_FIELD_IDX_MAX) {
1697 status = ice_flow_xtract_fld(hw, params, i, j, match);
1700 ice_clear_bit(j, (ice_bitmap_t *)&match);
1703 /* Process raw matching bytes */
1704 status = ice_flow_xtract_raws(hw, params, i);
1713 * ice_flow_sel_acl_scen - returns the specific scenario
1714 * @hw: pointer to the hardware structure
1715 * @params: information about the flow to be processed
1717 * This function will return the specific scenario based on the
1718 * params passed to it
1720 static enum ice_status
1721 ice_flow_sel_acl_scen(struct ice_hw *hw, struct ice_flow_prof_params *params)
1723 /* Find the best-fit scenario for the provided match width */
1724 struct ice_acl_scen *cand_scen = NULL, *scen;
1727 return ICE_ERR_DOES_NOT_EXIST;
1729 /* Loop through each scenario and match against the scenario width
1730 * to select the specific scenario
1732 LIST_FOR_EACH_ENTRY(scen, &hw->acl_tbl->scens, ice_acl_scen, list_entry)
1733 if (scen->eff_width >= params->entry_length &&
1734 (!cand_scen || cand_scen->eff_width > scen->eff_width))
1737 return ICE_ERR_DOES_NOT_EXIST;
1739 params->prof->cfg.scen = cand_scen;
1745 * ice_flow_acl_def_entry_frmt - Determine the layout of flow entries
1746 * @params: information about the flow to be processed
1748 static enum ice_status
1749 ice_flow_acl_def_entry_frmt(struct ice_flow_prof_params *params)
1751 u16 index, i, range_idx = 0;
1753 index = ICE_AQC_ACL_PROF_BYTE_SEL_START_IDX;
1755 for (i = 0; i < params->prof->segs_cnt; i++) {
1756 struct ice_flow_seg_info *seg = ¶ms->prof->segs[i];
1759 ice_for_each_set_bit(j, (ice_bitmap_t *)&seg->match,
1760 ICE_FLOW_FIELD_IDX_MAX) {
1761 struct ice_flow_fld_info *fld = &seg->fields[j];
1763 fld->entry.mask = ICE_FLOW_FLD_OFF_INVAL;
1765 if (fld->type == ICE_FLOW_FLD_TYPE_RANGE) {
1766 fld->entry.last = ICE_FLOW_FLD_OFF_INVAL;
1768 /* Range checking only supported for single
1771 if (DIVIDE_AND_ROUND_UP(ice_flds_info[j].size +
1773 BITS_PER_BYTE * 2) > 1)
1774 return ICE_ERR_PARAM;
1776 /* Ranges must define low and high values */
1777 if (fld->src.val == ICE_FLOW_FLD_OFF_INVAL ||
1778 fld->src.last == ICE_FLOW_FLD_OFF_INVAL)
1779 return ICE_ERR_PARAM;
1781 fld->entry.val = range_idx++;
1783 /* Store adjusted byte-length of field for later
1784 * use, taking into account potential
1785 * non-byte-aligned displacement
1787 fld->entry.last = DIVIDE_AND_ROUND_UP
1788 (ice_flds_info[j].size +
1789 (fld->xtrct.disp % BITS_PER_BYTE),
1791 fld->entry.val = index;
1792 index += fld->entry.last;
1796 for (j = 0; j < seg->raws_cnt; j++) {
1797 struct ice_flow_seg_fld_raw *raw = &seg->raws[j];
1799 raw->info.entry.mask = ICE_FLOW_FLD_OFF_INVAL;
1800 raw->info.entry.val = index;
1801 raw->info.entry.last = raw->info.src.last;
1802 index += raw->info.entry.last;
1806 /* Currently only support using the byte selection base, which only
1807 * allows for an effective entry size of 30 bytes. Reject anything
1810 if (index > ICE_AQC_ACL_PROF_BYTE_SEL_ELEMS)
1811 return ICE_ERR_PARAM;
1813 /* Only 8 range checkers per profile, reject anything trying to use
1816 if (range_idx > ICE_AQC_ACL_PROF_RANGES_NUM_CFG)
1817 return ICE_ERR_PARAM;
1819 /* Store # bytes required for entry for later use */
1820 params->entry_length = index - ICE_AQC_ACL_PROF_BYTE_SEL_START_IDX;
1826 * ice_flow_proc_segs - process all packet segments associated with a profile
1827 * @hw: pointer to the HW struct
1828 * @params: information about the flow to be processed
1830 static enum ice_status
1831 ice_flow_proc_segs(struct ice_hw *hw, struct ice_flow_prof_params *params)
1833 enum ice_status status;
1835 status = ice_flow_proc_seg_hdrs(params);
1839 status = ice_flow_create_xtrct_seq(hw, params);
1843 switch (params->blk) {
1846 status = ICE_SUCCESS;
1849 status = ice_flow_acl_def_entry_frmt(params);
1852 status = ice_flow_sel_acl_scen(hw, params);
1857 return ICE_ERR_NOT_IMPL;
1863 #define ICE_FLOW_FIND_PROF_CHK_FLDS 0x00000001
1864 #define ICE_FLOW_FIND_PROF_CHK_VSI 0x00000002
1865 #define ICE_FLOW_FIND_PROF_NOT_CHK_DIR 0x00000004
1868 * ice_flow_find_prof_conds - Find a profile matching headers and conditions
1869 * @hw: pointer to the HW struct
1870 * @blk: classification stage
1871 * @dir: flow direction
1872 * @segs: array of one or more packet segments that describe the flow
1873 * @segs_cnt: number of packet segments provided
1874 * @vsi_handle: software VSI handle to check VSI (ICE_FLOW_FIND_PROF_CHK_VSI)
1875 * @conds: additional conditions to be checked (ICE_FLOW_FIND_PROF_CHK_*)
1877 static struct ice_flow_prof *
1878 ice_flow_find_prof_conds(struct ice_hw *hw, enum ice_block blk,
1879 enum ice_flow_dir dir, struct ice_flow_seg_info *segs,
1880 u8 segs_cnt, u16 vsi_handle, u32 conds)
1882 struct ice_flow_prof *p, *prof = NULL;
1884 ice_acquire_lock(&hw->fl_profs_locks[blk]);
1885 LIST_FOR_EACH_ENTRY(p, &hw->fl_profs[blk], ice_flow_prof, l_entry)
1886 if ((p->dir == dir || conds & ICE_FLOW_FIND_PROF_NOT_CHK_DIR) &&
1887 segs_cnt && segs_cnt == p->segs_cnt) {
1890 /* Check for profile-VSI association if specified */
1891 if ((conds & ICE_FLOW_FIND_PROF_CHK_VSI) &&
1892 ice_is_vsi_valid(hw, vsi_handle) &&
1893 !ice_is_bit_set(p->vsis, vsi_handle))
1896 /* Protocol headers must be checked. Matched fields are
1897 * checked if specified.
1899 for (i = 0; i < segs_cnt; i++)
1900 if (segs[i].hdrs != p->segs[i].hdrs ||
1901 ((conds & ICE_FLOW_FIND_PROF_CHK_FLDS) &&
1902 segs[i].match != p->segs[i].match))
1905 /* A match is found if all segments are matched */
1906 if (i == segs_cnt) {
1911 ice_release_lock(&hw->fl_profs_locks[blk]);
1917 * ice_flow_find_prof - Look up a profile matching headers and matched fields
1918 * @hw: pointer to the HW struct
1919 * @blk: classification stage
1920 * @dir: flow direction
1921 * @segs: array of one or more packet segments that describe the flow
1922 * @segs_cnt: number of packet segments provided
1925 ice_flow_find_prof(struct ice_hw *hw, enum ice_block blk, enum ice_flow_dir dir,
1926 struct ice_flow_seg_info *segs, u8 segs_cnt)
1928 struct ice_flow_prof *p;
1930 p = ice_flow_find_prof_conds(hw, blk, dir, segs, segs_cnt,
1931 ICE_MAX_VSI, ICE_FLOW_FIND_PROF_CHK_FLDS);
1933 return p ? p->id : ICE_FLOW_PROF_ID_INVAL;
1937 * ice_flow_find_prof_id - Look up a profile with given profile ID
1938 * @hw: pointer to the HW struct
1939 * @blk: classification stage
1940 * @prof_id: unique ID to identify this flow profile
1942 static struct ice_flow_prof *
1943 ice_flow_find_prof_id(struct ice_hw *hw, enum ice_block blk, u64 prof_id)
1945 struct ice_flow_prof *p;
1947 LIST_FOR_EACH_ENTRY(p, &hw->fl_profs[blk], ice_flow_prof, l_entry)
1948 if (p->id == prof_id)
1955 * ice_dealloc_flow_entry - Deallocate flow entry memory
1956 * @hw: pointer to the HW struct
1957 * @entry: flow entry to be removed
1960 ice_dealloc_flow_entry(struct ice_hw *hw, struct ice_flow_entry *entry)
1966 ice_free(hw, entry->entry);
1968 if (entry->range_buf) {
1969 ice_free(hw, entry->range_buf);
1970 entry->range_buf = NULL;
1974 ice_free(hw, entry->acts);
1976 entry->acts_cnt = 0;
1979 ice_free(hw, entry);
1983 * ice_flow_get_hw_prof - return the HW profile for a specific profile ID handle
1984 * @hw: pointer to the HW struct
1985 * @blk: classification stage
1986 * @prof_id: the profile ID handle
1987 * @hw_prof_id: pointer to variable to receive the HW profile ID
1990 ice_flow_get_hw_prof(struct ice_hw *hw, enum ice_block blk, u64 prof_id,
1993 enum ice_status status = ICE_ERR_DOES_NOT_EXIST;
1994 struct ice_prof_map *map;
1996 ice_acquire_lock(&hw->blk[blk].es.prof_map_lock);
1997 map = ice_search_prof_id(hw, blk, prof_id);
1999 *hw_prof_id = map->prof_id;
2000 status = ICE_SUCCESS;
2002 ice_release_lock(&hw->blk[blk].es.prof_map_lock);
2006 #define ICE_ACL_INVALID_SCEN 0x3f
2009 * ice_flow_acl_is_prof_in_use - Verify if the profile is associated to any PF
2010 * @hw: pointer to the hardware structure
2011 * @prof: pointer to flow profile
2012 * @buf: destination buffer function writes partial extraction sequence to
2014 * returns ICE_SUCCESS if no PF is associated to the given profile
2015 * returns ICE_ERR_IN_USE if at least one PF is associated to the given profile
2016 * returns other error code for real error
2018 static enum ice_status
2019 ice_flow_acl_is_prof_in_use(struct ice_hw *hw, struct ice_flow_prof *prof,
2020 struct ice_aqc_acl_prof_generic_frmt *buf)
2022 enum ice_status status;
2025 status = ice_flow_get_hw_prof(hw, ICE_BLK_ACL, prof->id, &prof_id);
2029 status = ice_query_acl_prof(hw, prof_id, buf, NULL);
2033 /* If all PF's associated scenarios are all 0 or all
2034 * ICE_ACL_INVALID_SCEN (63) for the given profile then the latter has
2035 * not been configured yet.
2037 if (buf->pf_scenario_num[0] == 0 && buf->pf_scenario_num[1] == 0 &&
2038 buf->pf_scenario_num[2] == 0 && buf->pf_scenario_num[3] == 0 &&
2039 buf->pf_scenario_num[4] == 0 && buf->pf_scenario_num[5] == 0 &&
2040 buf->pf_scenario_num[6] == 0 && buf->pf_scenario_num[7] == 0)
2043 if (buf->pf_scenario_num[0] == ICE_ACL_INVALID_SCEN &&
2044 buf->pf_scenario_num[1] == ICE_ACL_INVALID_SCEN &&
2045 buf->pf_scenario_num[2] == ICE_ACL_INVALID_SCEN &&
2046 buf->pf_scenario_num[3] == ICE_ACL_INVALID_SCEN &&
2047 buf->pf_scenario_num[4] == ICE_ACL_INVALID_SCEN &&
2048 buf->pf_scenario_num[5] == ICE_ACL_INVALID_SCEN &&
2049 buf->pf_scenario_num[6] == ICE_ACL_INVALID_SCEN &&
2050 buf->pf_scenario_num[7] == ICE_ACL_INVALID_SCEN)
2053 return ICE_ERR_IN_USE;
2057 * ice_flow_acl_free_act_cntr - Free the ACL rule's actions
2058 * @hw: pointer to the hardware structure
2059 * @acts: array of actions to be performed on a match
2060 * @acts_cnt: number of actions
2062 static enum ice_status
2063 ice_flow_acl_free_act_cntr(struct ice_hw *hw, struct ice_flow_action *acts,
2068 for (i = 0; i < acts_cnt; i++) {
2069 if (acts[i].type == ICE_FLOW_ACT_CNTR_PKT ||
2070 acts[i].type == ICE_FLOW_ACT_CNTR_BYTES ||
2071 acts[i].type == ICE_FLOW_ACT_CNTR_PKT_BYTES) {
2072 struct ice_acl_cntrs cntrs = { 0 };
2073 enum ice_status status;
2075 /* amount is unused in the dealloc path but the common
2076 * parameter check routine wants a value set, as zero
2077 * is invalid for the check. Just set it.
2080 cntrs.bank = 0; /* Only bank0 for the moment */
2082 LE16_TO_CPU(acts[i].data.acl_act.value);
2084 LE16_TO_CPU(acts[i].data.acl_act.value);
2086 if (acts[i].type == ICE_FLOW_ACT_CNTR_PKT_BYTES)
2087 cntrs.type = ICE_AQC_ACL_CNT_TYPE_DUAL;
2089 cntrs.type = ICE_AQC_ACL_CNT_TYPE_SINGLE;
2091 status = ice_aq_dealloc_acl_cntrs(hw, &cntrs, NULL);
2100 * ice_flow_acl_disassoc_scen - Disassociate the scenario from the profile
2101 * @hw: pointer to the hardware structure
2102 * @prof: pointer to flow profile
2104 * Disassociate the scenario from the profile for the PF of the VSI.
2106 static enum ice_status
2107 ice_flow_acl_disassoc_scen(struct ice_hw *hw, struct ice_flow_prof *prof)
2109 struct ice_aqc_acl_prof_generic_frmt buf;
2110 enum ice_status status = ICE_SUCCESS;
2113 ice_memset(&buf, 0, sizeof(buf), ICE_NONDMA_MEM);
2115 status = ice_flow_get_hw_prof(hw, ICE_BLK_ACL, prof->id, &prof_id);
2119 status = ice_query_acl_prof(hw, prof_id, &buf, NULL);
2123 /* Clear scenario for this PF */
2124 buf.pf_scenario_num[hw->pf_id] = ICE_ACL_INVALID_SCEN;
2125 status = ice_prgm_acl_prof_xtrct(hw, prof_id, &buf, NULL);
2131 * ice_flow_rem_entry_sync - Remove a flow entry
2132 * @hw: pointer to the HW struct
2133 * @blk: classification stage
2134 * @entry: flow entry to be removed
2136 static enum ice_status
2137 ice_flow_rem_entry_sync(struct ice_hw *hw, enum ice_block blk,
2138 struct ice_flow_entry *entry)
2141 return ICE_ERR_BAD_PTR;
2143 if (blk == ICE_BLK_ACL) {
2144 enum ice_status status;
2147 return ICE_ERR_BAD_PTR;
2149 status = ice_acl_rem_entry(hw, entry->prof->cfg.scen,
2150 entry->scen_entry_idx);
2154 /* Checks if we need to release an ACL counter. */
2155 if (entry->acts_cnt && entry->acts)
2156 ice_flow_acl_free_act_cntr(hw, entry->acts,
2160 LIST_DEL(&entry->l_entry);
2162 ice_dealloc_flow_entry(hw, entry);
2168 * ice_flow_add_prof_sync - Add a flow profile for packet segments and fields
2169 * @hw: pointer to the HW struct
2170 * @blk: classification stage
2171 * @dir: flow direction
2172 * @prof_id: unique ID to identify this flow profile
2173 * @segs: array of one or more packet segments that describe the flow
2174 * @segs_cnt: number of packet segments provided
2175 * @acts: array of default actions
2176 * @acts_cnt: number of default actions
2177 * @prof: stores the returned flow profile added
2179 * Assumption: the caller has acquired the lock to the profile list
2181 static enum ice_status
2182 ice_flow_add_prof_sync(struct ice_hw *hw, enum ice_block blk,
2183 enum ice_flow_dir dir, u64 prof_id,
2184 struct ice_flow_seg_info *segs, u8 segs_cnt,
2185 struct ice_flow_action *acts, u8 acts_cnt,
2186 struct ice_flow_prof **prof)
2188 struct ice_flow_prof_params *params;
2189 enum ice_status status;
2192 if (!prof || (acts_cnt && !acts))
2193 return ICE_ERR_BAD_PTR;
2195 params = (struct ice_flow_prof_params *)ice_malloc(hw, sizeof(*params));
2197 return ICE_ERR_NO_MEMORY;
2199 params->prof = (struct ice_flow_prof *)
2200 ice_malloc(hw, sizeof(*params->prof));
2201 if (!params->prof) {
2202 status = ICE_ERR_NO_MEMORY;
2206 /* initialize extraction sequence to all invalid (0xff) */
2207 for (i = 0; i < ICE_MAX_FV_WORDS; i++) {
2208 params->es[i].prot_id = ICE_PROT_INVALID;
2209 params->es[i].off = ICE_FV_OFFSET_INVAL;
2213 params->prof->id = prof_id;
2214 params->prof->dir = dir;
2215 params->prof->segs_cnt = segs_cnt;
2217 /* Make a copy of the segments that need to be persistent in the flow
2220 for (i = 0; i < segs_cnt; i++)
2221 ice_memcpy(¶ms->prof->segs[i], &segs[i], sizeof(*segs),
2222 ICE_NONDMA_TO_NONDMA);
2224 /* Make a copy of the actions that need to be persistent in the flow
2228 params->prof->acts = (struct ice_flow_action *)
2229 ice_memdup(hw, acts, acts_cnt * sizeof(*acts),
2230 ICE_NONDMA_TO_NONDMA);
2232 if (!params->prof->acts) {
2233 status = ICE_ERR_NO_MEMORY;
2238 status = ice_flow_proc_segs(hw, params);
2240 ice_debug(hw, ICE_DBG_FLOW, "Error processing a flow's packet segments\n");
2244 /* Add a HW profile for this flow profile */
2245 status = ice_add_prof(hw, blk, prof_id, (u8 *)params->ptypes,
2246 params->attr, params->attr_cnt, params->es,
2247 params->mask, true);
2249 ice_debug(hw, ICE_DBG_FLOW, "Error adding a HW flow profile\n");
2253 INIT_LIST_HEAD(¶ms->prof->entries);
2254 ice_init_lock(¶ms->prof->entries_lock);
2255 *prof = params->prof;
2259 if (params->prof->acts)
2260 ice_free(hw, params->prof->acts);
2261 ice_free(hw, params->prof);
2264 ice_free(hw, params);
2270 * ice_flow_rem_prof_sync - remove a flow profile
2271 * @hw: pointer to the hardware structure
2272 * @blk: classification stage
2273 * @prof: pointer to flow profile to remove
2275 * Assumption: the caller has acquired the lock to the profile list
2277 static enum ice_status
2278 ice_flow_rem_prof_sync(struct ice_hw *hw, enum ice_block blk,
2279 struct ice_flow_prof *prof)
2281 enum ice_status status;
2283 /* Remove all remaining flow entries before removing the flow profile */
2284 if (!LIST_EMPTY(&prof->entries)) {
2285 struct ice_flow_entry *e, *t;
2287 ice_acquire_lock(&prof->entries_lock);
2289 LIST_FOR_EACH_ENTRY_SAFE(e, t, &prof->entries, ice_flow_entry,
2291 status = ice_flow_rem_entry_sync(hw, blk, e);
2296 ice_release_lock(&prof->entries_lock);
2299 if (blk == ICE_BLK_ACL) {
2300 struct ice_aqc_acl_profile_ranges query_rng_buf;
2301 struct ice_aqc_acl_prof_generic_frmt buf;
2304 /* Disassociate the scenario from the profile for the PF */
2305 status = ice_flow_acl_disassoc_scen(hw, prof);
2309 /* Clear the range-checker if the profile ID is no longer
2312 status = ice_flow_acl_is_prof_in_use(hw, prof, &buf);
2313 if (status && status != ICE_ERR_IN_USE) {
2315 } else if (!status) {
2316 /* Clear the range-checker value for profile ID */
2317 ice_memset(&query_rng_buf, 0,
2318 sizeof(struct ice_aqc_acl_profile_ranges),
2321 status = ice_flow_get_hw_prof(hw, blk, prof->id,
2326 status = ice_prog_acl_prof_ranges(hw, prof_id,
2327 &query_rng_buf, NULL);
2333 /* Remove all hardware profiles associated with this flow profile */
2334 status = ice_rem_prof(hw, blk, prof->id);
2336 LIST_DEL(&prof->l_entry);
2337 ice_destroy_lock(&prof->entries_lock);
2339 ice_free(hw, prof->acts);
2347 * ice_flow_acl_set_xtrct_seq_fld - Populate xtrct seq for single field
2348 * @buf: Destination buffer function writes partial xtrct sequence to
2349 * @info: Info about field
2352 ice_flow_acl_set_xtrct_seq_fld(struct ice_aqc_acl_prof_generic_frmt *buf,
2353 struct ice_flow_fld_info *info)
2358 src = info->xtrct.idx * ICE_FLOW_FV_EXTRACT_SZ +
2359 info->xtrct.disp / BITS_PER_BYTE;
2360 dst = info->entry.val;
2361 for (i = 0; i < info->entry.last; i++)
2362 /* HW stores field vector words in LE, convert words back to BE
2363 * so constructed entries will end up in network order
2365 buf->byte_selection[dst++] = src++ ^ 1;
2369 * ice_flow_acl_set_xtrct_seq - Program ACL extraction sequence
2370 * @hw: pointer to the hardware structure
2371 * @prof: pointer to flow profile
2373 static enum ice_status
2374 ice_flow_acl_set_xtrct_seq(struct ice_hw *hw, struct ice_flow_prof *prof)
2376 struct ice_aqc_acl_prof_generic_frmt buf;
2377 struct ice_flow_fld_info *info;
2378 enum ice_status status;
2382 ice_memset(&buf, 0, sizeof(buf), ICE_NONDMA_MEM);
2384 status = ice_flow_get_hw_prof(hw, ICE_BLK_ACL, prof->id, &prof_id);
2388 status = ice_flow_acl_is_prof_in_use(hw, prof, &buf);
2389 if (status && status != ICE_ERR_IN_USE)
2393 /* Program the profile dependent configuration. This is done
2394 * only once regardless of the number of PFs using that profile
2396 ice_memset(&buf, 0, sizeof(buf), ICE_NONDMA_MEM);
2398 for (i = 0; i < prof->segs_cnt; i++) {
2399 struct ice_flow_seg_info *seg = &prof->segs[i];
2402 ice_for_each_set_bit(j, (ice_bitmap_t *)&seg->match,
2403 ICE_FLOW_FIELD_IDX_MAX) {
2404 info = &seg->fields[j];
2406 if (info->type == ICE_FLOW_FLD_TYPE_RANGE)
2407 buf.word_selection[info->entry.val] =
2410 ice_flow_acl_set_xtrct_seq_fld(&buf,
2414 for (j = 0; j < seg->raws_cnt; j++) {
2415 info = &seg->raws[j].info;
2416 ice_flow_acl_set_xtrct_seq_fld(&buf, info);
2420 ice_memset(&buf.pf_scenario_num[0], ICE_ACL_INVALID_SCEN,
2421 ICE_AQC_ACL_PROF_PF_SCEN_NUM_ELEMS,
2425 /* Update the current PF */
2426 buf.pf_scenario_num[hw->pf_id] = (u8)prof->cfg.scen->id;
2427 status = ice_prgm_acl_prof_xtrct(hw, prof_id, &buf, NULL);
2433 * ice_flow_assoc_vsig_vsi - associate a VSI with VSIG
2434 * @hw: pointer to the hardware structure
2435 * @blk: classification stage
2436 * @vsi_handle: software VSI handle
2437 * @vsig: target VSI group
2439 * Assumption: the caller has already verified that the VSI to
2440 * be added has the same characteristics as the VSIG and will
2441 * thereby have access to all resources added to that VSIG.
2444 ice_flow_assoc_vsig_vsi(struct ice_hw *hw, enum ice_block blk, u16 vsi_handle,
2447 enum ice_status status;
2449 if (!ice_is_vsi_valid(hw, vsi_handle) || blk >= ICE_BLK_COUNT)
2450 return ICE_ERR_PARAM;
2452 ice_acquire_lock(&hw->fl_profs_locks[blk]);
2453 status = ice_add_vsi_flow(hw, blk, ice_get_hw_vsi_num(hw, vsi_handle),
2455 ice_release_lock(&hw->fl_profs_locks[blk]);
2461 * ice_flow_assoc_prof - associate a VSI with a flow profile
2462 * @hw: pointer to the hardware structure
2463 * @blk: classification stage
2464 * @prof: pointer to flow profile
2465 * @vsi_handle: software VSI handle
2467 * Assumption: the caller has acquired the lock to the profile list
2468 * and the software VSI handle has been validated
2471 ice_flow_assoc_prof(struct ice_hw *hw, enum ice_block blk,
2472 struct ice_flow_prof *prof, u16 vsi_handle)
2474 enum ice_status status = ICE_SUCCESS;
2476 if (!ice_is_bit_set(prof->vsis, vsi_handle)) {
2477 if (blk == ICE_BLK_ACL) {
2478 status = ice_flow_acl_set_xtrct_seq(hw, prof);
2482 status = ice_add_prof_id_flow(hw, blk,
2483 ice_get_hw_vsi_num(hw,
2487 ice_set_bit(vsi_handle, prof->vsis);
2489 ice_debug(hw, ICE_DBG_FLOW, "HW profile add failed, %d\n",
2497 * ice_flow_disassoc_prof - disassociate a VSI from a flow profile
2498 * @hw: pointer to the hardware structure
2499 * @blk: classification stage
2500 * @prof: pointer to flow profile
2501 * @vsi_handle: software VSI handle
2503 * Assumption: the caller has acquired the lock to the profile list
2504 * and the software VSI handle has been validated
2506 static enum ice_status
2507 ice_flow_disassoc_prof(struct ice_hw *hw, enum ice_block blk,
2508 struct ice_flow_prof *prof, u16 vsi_handle)
2510 enum ice_status status = ICE_SUCCESS;
2512 if (ice_is_bit_set(prof->vsis, vsi_handle)) {
2513 status = ice_rem_prof_id_flow(hw, blk,
2514 ice_get_hw_vsi_num(hw,
2518 ice_clear_bit(vsi_handle, prof->vsis);
2520 ice_debug(hw, ICE_DBG_FLOW, "HW profile remove failed, %d\n",
2527 #define FLAG_GTP_EH_PDU_LINK BIT_ULL(13)
2528 #define FLAG_GTP_EH_PDU BIT_ULL(14)
2530 #define FLAG_GTPU_MSK \
2531 (FLAG_GTP_EH_PDU | FLAG_GTP_EH_PDU_LINK)
2532 #define FLAG_GTPU_DW \
2533 (FLAG_GTP_EH_PDU | FLAG_GTP_EH_PDU_LINK)
2534 #define FLAG_GTPU_UP \
2537 * ice_flow_set_hw_prof - Set HW flow profile based on the parsed profile info
2538 * @hw: pointer to the HW struct
2539 * @dest_vsi_handle: dest VSI handle
2540 * @fdir_vsi_handle: fdir programming VSI handle
2541 * @prof: stores parsed profile info from raw flow
2542 * @blk: classification stage
2545 ice_flow_set_hw_prof(struct ice_hw *hw, u16 dest_vsi_handle,
2546 u16 fdir_vsi_handle, struct ice_parser_profile *prof,
2549 int id = ice_find_first_bit(prof->ptypes, UINT16_MAX);
2550 struct ice_flow_prof_params *params;
2551 u8 fv_words = hw->blk[blk].es.fvw;
2552 enum ice_status status;
2556 params = (struct ice_flow_prof_params *)ice_malloc(hw, sizeof(*params));
2558 return ICE_ERR_NO_MEMORY;
2560 for (i = 0; i < ICE_MAX_FV_WORDS; i++) {
2561 params->es[i].prot_id = ICE_PROT_INVALID;
2562 params->es[i].off = ICE_FV_OFFSET_INVAL;
2565 for (i = 0; i < prof->fv_num; i++) {
2566 if (hw->blk[blk].es.reverse)
2567 idx = fv_words - i - 1;
2570 params->es[idx].prot_id = prof->fv[i].proto_id;
2571 params->es[idx].off = prof->fv[i].offset;
2572 params->mask[idx] = CPU_TO_BE16(prof->fv[i].msk);
2575 switch (prof->flags) {
2577 params->attr = ice_attr_gtpu_down;
2578 params->attr_cnt = ARRAY_SIZE(ice_attr_gtpu_down);
2581 params->attr = ice_attr_gtpu_up;
2582 params->attr_cnt = ARRAY_SIZE(ice_attr_gtpu_up);
2585 if (prof->flags_msk & FLAG_GTPU_MSK) {
2586 params->attr = ice_attr_gtpu_session;
2587 params->attr_cnt = ARRAY_SIZE(ice_attr_gtpu_session);
2592 status = ice_add_prof(hw, blk, id, (u8 *)prof->ptypes,
2593 params->attr, params->attr_cnt,
2594 params->es, params->mask, false);
2598 status = ice_flow_assoc_hw_prof(hw, blk, dest_vsi_handle,
2599 fdir_vsi_handle, id);
2606 ice_free(hw, params);
2612 * ice_flow_add_prof - Add a flow profile for packet segments and matched fields
2613 * @hw: pointer to the HW struct
2614 * @blk: classification stage
2615 * @dir: flow direction
2616 * @prof_id: unique ID to identify this flow profile
2617 * @segs: array of one or more packet segments that describe the flow
2618 * @segs_cnt: number of packet segments provided
2619 * @acts: array of default actions
2620 * @acts_cnt: number of default actions
2621 * @prof: stores the returned flow profile added
2624 ice_flow_add_prof(struct ice_hw *hw, enum ice_block blk, enum ice_flow_dir dir,
2625 u64 prof_id, struct ice_flow_seg_info *segs, u8 segs_cnt,
2626 struct ice_flow_action *acts, u8 acts_cnt,
2627 struct ice_flow_prof **prof)
2629 enum ice_status status;
2631 if (segs_cnt > ICE_FLOW_SEG_MAX)
2632 return ICE_ERR_MAX_LIMIT;
2635 return ICE_ERR_PARAM;
2638 return ICE_ERR_BAD_PTR;
2640 status = ice_flow_val_hdrs(segs, segs_cnt);
2644 ice_acquire_lock(&hw->fl_profs_locks[blk]);
2646 status = ice_flow_add_prof_sync(hw, blk, dir, prof_id, segs, segs_cnt,
2647 acts, acts_cnt, prof);
2649 LIST_ADD(&(*prof)->l_entry, &hw->fl_profs[blk]);
2651 ice_release_lock(&hw->fl_profs_locks[blk]);
2657 * ice_flow_rem_prof - Remove a flow profile and all entries associated with it
2658 * @hw: pointer to the HW struct
2659 * @blk: the block for which the flow profile is to be removed
2660 * @prof_id: unique ID of the flow profile to be removed
2663 ice_flow_rem_prof(struct ice_hw *hw, enum ice_block blk, u64 prof_id)
2665 struct ice_flow_prof *prof;
2666 enum ice_status status;
2668 ice_acquire_lock(&hw->fl_profs_locks[blk]);
2670 prof = ice_flow_find_prof_id(hw, blk, prof_id);
2672 status = ICE_ERR_DOES_NOT_EXIST;
2676 /* prof becomes invalid after the call */
2677 status = ice_flow_rem_prof_sync(hw, blk, prof);
2680 ice_release_lock(&hw->fl_profs_locks[blk]);
2686 * ice_flow_find_entry - look for a flow entry using its unique ID
2687 * @hw: pointer to the HW struct
2688 * @blk: classification stage
2689 * @entry_id: unique ID to identify this flow entry
2691 * This function looks for the flow entry with the specified unique ID in all
2692 * flow profiles of the specified classification stage. If the entry is found,
2693 * and it returns the handle to the flow entry. Otherwise, it returns
2694 * ICE_FLOW_ENTRY_ID_INVAL.
2696 u64 ice_flow_find_entry(struct ice_hw *hw, enum ice_block blk, u64 entry_id)
2698 struct ice_flow_entry *found = NULL;
2699 struct ice_flow_prof *p;
2701 ice_acquire_lock(&hw->fl_profs_locks[blk]);
2703 LIST_FOR_EACH_ENTRY(p, &hw->fl_profs[blk], ice_flow_prof, l_entry) {
2704 struct ice_flow_entry *e;
2706 ice_acquire_lock(&p->entries_lock);
2707 LIST_FOR_EACH_ENTRY(e, &p->entries, ice_flow_entry, l_entry)
2708 if (e->id == entry_id) {
2712 ice_release_lock(&p->entries_lock);
2718 ice_release_lock(&hw->fl_profs_locks[blk]);
2720 return found ? ICE_FLOW_ENTRY_HNDL(found) : ICE_FLOW_ENTRY_HANDLE_INVAL;
2724 * ice_flow_acl_check_actions - Checks the ACL rule's actions
2725 * @hw: pointer to the hardware structure
2726 * @acts: array of actions to be performed on a match
2727 * @acts_cnt: number of actions
2728 * @cnt_alloc: indicates if an ACL counter has been allocated.
2730 static enum ice_status
2731 ice_flow_acl_check_actions(struct ice_hw *hw, struct ice_flow_action *acts,
2732 u8 acts_cnt, bool *cnt_alloc)
2734 ice_declare_bitmap(dup_check, ICE_AQC_TBL_MAX_ACTION_PAIRS * 2);
2737 ice_zero_bitmap(dup_check, ICE_AQC_TBL_MAX_ACTION_PAIRS * 2);
2740 if (acts_cnt > ICE_FLOW_ACL_MAX_NUM_ACT)
2741 return ICE_ERR_OUT_OF_RANGE;
2743 for (i = 0; i < acts_cnt; i++) {
2744 if (acts[i].type != ICE_FLOW_ACT_NOP &&
2745 acts[i].type != ICE_FLOW_ACT_DROP &&
2746 acts[i].type != ICE_FLOW_ACT_CNTR_PKT &&
2747 acts[i].type != ICE_FLOW_ACT_FWD_QUEUE)
2750 /* If the caller want to add two actions of the same type, then
2751 * it is considered invalid configuration.
2753 if (ice_test_and_set_bit(acts[i].type, dup_check))
2754 return ICE_ERR_PARAM;
2757 /* Checks if ACL counters are needed. */
2758 for (i = 0; i < acts_cnt; i++) {
2759 if (acts[i].type == ICE_FLOW_ACT_CNTR_PKT ||
2760 acts[i].type == ICE_FLOW_ACT_CNTR_BYTES ||
2761 acts[i].type == ICE_FLOW_ACT_CNTR_PKT_BYTES) {
2762 struct ice_acl_cntrs cntrs = { 0 };
2763 enum ice_status status;
2766 cntrs.bank = 0; /* Only bank0 for the moment */
2768 if (acts[i].type == ICE_FLOW_ACT_CNTR_PKT_BYTES)
2769 cntrs.type = ICE_AQC_ACL_CNT_TYPE_DUAL;
2771 cntrs.type = ICE_AQC_ACL_CNT_TYPE_SINGLE;
2773 status = ice_aq_alloc_acl_cntrs(hw, &cntrs, NULL);
2776 /* Counter index within the bank */
2777 acts[i].data.acl_act.value =
2778 CPU_TO_LE16(cntrs.first_cntr);
2787 * ice_flow_acl_frmt_entry_range - Format an ACL range checker for a given field
2788 * @fld: number of the given field
2789 * @info: info about field
2790 * @range_buf: range checker configuration buffer
2791 * @data: pointer to a data buffer containing flow entry's match values/masks
2792 * @range: Input/output param indicating which range checkers are being used
2795 ice_flow_acl_frmt_entry_range(u16 fld, struct ice_flow_fld_info *info,
2796 struct ice_aqc_acl_profile_ranges *range_buf,
2797 u8 *data, u8 *range)
2801 /* If not specified, default mask is all bits in field */
2802 new_mask = (info->src.mask == ICE_FLOW_FLD_OFF_INVAL ?
2803 BIT(ice_flds_info[fld].size) - 1 :
2804 (*(u16 *)(data + info->src.mask))) << info->xtrct.disp;
2806 /* If the mask is 0, then we don't need to worry about this input
2807 * range checker value.
2811 (*(u16 *)(data + info->src.last)) << info->xtrct.disp;
2813 (*(u16 *)(data + info->src.val)) << info->xtrct.disp;
2814 u8 range_idx = info->entry.val;
2816 range_buf->checker_cfg[range_idx].low_boundary =
2817 CPU_TO_BE16(new_low);
2818 range_buf->checker_cfg[range_idx].high_boundary =
2819 CPU_TO_BE16(new_high);
2820 range_buf->checker_cfg[range_idx].mask = CPU_TO_BE16(new_mask);
2822 /* Indicate which range checker is being used */
2823 *range |= BIT(range_idx);
2828 * ice_flow_acl_frmt_entry_fld - Partially format ACL entry for a given field
2829 * @fld: number of the given field
2830 * @info: info about the field
2831 * @buf: buffer containing the entry
2832 * @dontcare: buffer containing don't care mask for entry
2833 * @data: pointer to a data buffer containing flow entry's match values/masks
2836 ice_flow_acl_frmt_entry_fld(u16 fld, struct ice_flow_fld_info *info, u8 *buf,
2837 u8 *dontcare, u8 *data)
2839 u16 dst, src, mask, k, end_disp, tmp_s = 0, tmp_m = 0;
2840 bool use_mask = false;
2843 src = info->src.val;
2844 mask = info->src.mask;
2845 dst = info->entry.val - ICE_AQC_ACL_PROF_BYTE_SEL_START_IDX;
2846 disp = info->xtrct.disp % BITS_PER_BYTE;
2848 if (mask != ICE_FLOW_FLD_OFF_INVAL)
2851 for (k = 0; k < info->entry.last; k++, dst++) {
2852 /* Add overflow bits from previous byte */
2853 buf[dst] = (tmp_s & 0xff00) >> 8;
2855 /* If mask is not valid, tmp_m is always zero, so just setting
2856 * dontcare to 0 (no masked bits). If mask is valid, pulls in
2857 * overflow bits of mask from prev byte
2859 dontcare[dst] = (tmp_m & 0xff00) >> 8;
2861 /* If there is displacement, last byte will only contain
2862 * displaced data, but there is no more data to read from user
2863 * buffer, so skip so as not to potentially read beyond end of
2866 if (!disp || k < info->entry.last - 1) {
2867 /* Store shifted data to use in next byte */
2868 tmp_s = data[src++] << disp;
2870 /* Add current (shifted) byte */
2871 buf[dst] |= tmp_s & 0xff;
2873 /* Handle mask if valid */
2875 tmp_m = (~data[mask++] & 0xff) << disp;
2876 dontcare[dst] |= tmp_m & 0xff;
2881 /* Fill in don't care bits at beginning of field */
2883 dst = info->entry.val - ICE_AQC_ACL_PROF_BYTE_SEL_START_IDX;
2884 for (k = 0; k < disp; k++)
2885 dontcare[dst] |= BIT(k);
2888 end_disp = (disp + ice_flds_info[fld].size) % BITS_PER_BYTE;
2890 /* Fill in don't care bits at end of field */
2892 dst = info->entry.val - ICE_AQC_ACL_PROF_BYTE_SEL_START_IDX +
2893 info->entry.last - 1;
2894 for (k = end_disp; k < BITS_PER_BYTE; k++)
2895 dontcare[dst] |= BIT(k);
2900 * ice_flow_acl_frmt_entry - Format ACL entry
2901 * @hw: pointer to the hardware structure
2902 * @prof: pointer to flow profile
2903 * @e: pointer to the flow entry
2904 * @data: pointer to a data buffer containing flow entry's match values/masks
2905 * @acts: array of actions to be performed on a match
2906 * @acts_cnt: number of actions
2908 * Formats the key (and key_inverse) to be matched from the data passed in,
2909 * along with data from the flow profile. This key/key_inverse pair makes up
2910 * the 'entry' for an ACL flow entry.
2912 static enum ice_status
2913 ice_flow_acl_frmt_entry(struct ice_hw *hw, struct ice_flow_prof *prof,
2914 struct ice_flow_entry *e, u8 *data,
2915 struct ice_flow_action *acts, u8 acts_cnt)
2917 u8 *buf = NULL, *dontcare = NULL, *key = NULL, range = 0, dir_flag_msk;
2918 struct ice_aqc_acl_profile_ranges *range_buf = NULL;
2919 enum ice_status status;
2924 status = ice_flow_get_hw_prof(hw, ICE_BLK_ACL, prof->id, &prof_id);
2928 /* Format the result action */
2930 status = ice_flow_acl_check_actions(hw, acts, acts_cnt, &cnt_alloc);
2934 status = ICE_ERR_NO_MEMORY;
2936 e->acts = (struct ice_flow_action *)
2937 ice_memdup(hw, acts, acts_cnt * sizeof(*acts),
2938 ICE_NONDMA_TO_NONDMA);
2942 e->acts_cnt = acts_cnt;
2944 /* Format the matching data */
2945 buf_sz = prof->cfg.scen->width;
2946 buf = (u8 *)ice_malloc(hw, buf_sz);
2950 dontcare = (u8 *)ice_malloc(hw, buf_sz);
2954 /* 'key' buffer will store both key and key_inverse, so must be twice
2957 key = (u8 *)ice_malloc(hw, buf_sz * 2);
2961 range_buf = (struct ice_aqc_acl_profile_ranges *)
2962 ice_malloc(hw, sizeof(struct ice_aqc_acl_profile_ranges));
2966 /* Set don't care mask to all 1's to start, will zero out used bytes */
2967 ice_memset(dontcare, 0xff, buf_sz, ICE_NONDMA_MEM);
2969 for (i = 0; i < prof->segs_cnt; i++) {
2970 struct ice_flow_seg_info *seg = &prof->segs[i];
2973 ice_for_each_set_bit(j, (ice_bitmap_t *)&seg->match,
2974 ICE_FLOW_FIELD_IDX_MAX) {
2975 struct ice_flow_fld_info *info = &seg->fields[j];
2977 if (info->type == ICE_FLOW_FLD_TYPE_RANGE)
2978 ice_flow_acl_frmt_entry_range(j, info,
2982 ice_flow_acl_frmt_entry_fld(j, info, buf,
2986 for (j = 0; j < seg->raws_cnt; j++) {
2987 struct ice_flow_fld_info *info = &seg->raws[j].info;
2988 u16 dst, src, mask, k;
2989 bool use_mask = false;
2991 src = info->src.val;
2992 dst = info->entry.val -
2993 ICE_AQC_ACL_PROF_BYTE_SEL_START_IDX;
2994 mask = info->src.mask;
2996 if (mask != ICE_FLOW_FLD_OFF_INVAL)
2999 for (k = 0; k < info->entry.last; k++, dst++) {
3000 buf[dst] = data[src++];
3002 dontcare[dst] = ~data[mask++];
3009 buf[prof->cfg.scen->pid_idx] = (u8)prof_id;
3010 dontcare[prof->cfg.scen->pid_idx] = 0;
3012 /* Format the buffer for direction flags */
3013 dir_flag_msk = BIT(ICE_FLG_PKT_DIR);
3015 if (prof->dir == ICE_FLOW_RX)
3016 buf[prof->cfg.scen->pkt_dir_idx] = dir_flag_msk;
3019 buf[prof->cfg.scen->rng_chk_idx] = range;
3020 /* Mark any unused range checkers as don't care */
3021 dontcare[prof->cfg.scen->rng_chk_idx] = ~range;
3022 e->range_buf = range_buf;
3024 ice_free(hw, range_buf);
3027 status = ice_set_key(key, buf_sz * 2, buf, NULL, dontcare, NULL, 0,
3033 e->entry_sz = buf_sz * 2;
3040 ice_free(hw, dontcare);
3045 if (status && range_buf) {
3046 ice_free(hw, range_buf);
3047 e->range_buf = NULL;
3050 if (status && e->acts) {
3051 ice_free(hw, e->acts);
3056 if (status && cnt_alloc)
3057 ice_flow_acl_free_act_cntr(hw, acts, acts_cnt);
3063 * ice_flow_acl_find_scen_entry_cond - Find an ACL scenario entry that matches
3064 * the compared data.
3065 * @prof: pointer to flow profile
3066 * @e: pointer to the comparing flow entry
3067 * @do_chg_action: decide if we want to change the ACL action
3068 * @do_add_entry: decide if we want to add the new ACL entry
3069 * @do_rem_entry: decide if we want to remove the current ACL entry
3071 * Find an ACL scenario entry that matches the compared data. In the same time,
3072 * this function also figure out:
3073 * a/ If we want to change the ACL action
3074 * b/ If we want to add the new ACL entry
3075 * c/ If we want to remove the current ACL entry
3077 static struct ice_flow_entry *
3078 ice_flow_acl_find_scen_entry_cond(struct ice_flow_prof *prof,
3079 struct ice_flow_entry *e, bool *do_chg_action,
3080 bool *do_add_entry, bool *do_rem_entry)
3082 struct ice_flow_entry *p, *return_entry = NULL;
3086 * a/ There exists an entry with same matching data, but different
3087 * priority, then we remove this existing ACL entry. Then, we
3088 * will add the new entry to the ACL scenario.
3089 * b/ There exists an entry with same matching data, priority, and
3090 * result action, then we do nothing
3091 * c/ There exists an entry with same matching data, priority, but
3092 * different, action, then do only change the action's entry.
3093 * d/ Else, we add this new entry to the ACL scenario.
3095 *do_chg_action = false;
3096 *do_add_entry = true;
3097 *do_rem_entry = false;
3098 LIST_FOR_EACH_ENTRY(p, &prof->entries, ice_flow_entry, l_entry) {
3099 if (memcmp(p->entry, e->entry, p->entry_sz))
3102 /* From this point, we have the same matching_data. */
3103 *do_add_entry = false;
3106 if (p->priority != e->priority) {
3107 /* matching data && !priority */
3108 *do_add_entry = true;
3109 *do_rem_entry = true;
3113 /* From this point, we will have matching_data && priority */
3114 if (p->acts_cnt != e->acts_cnt)
3115 *do_chg_action = true;
3116 for (i = 0; i < p->acts_cnt; i++) {
3117 bool found_not_match = false;
3119 for (j = 0; j < e->acts_cnt; j++)
3120 if (memcmp(&p->acts[i], &e->acts[j],
3121 sizeof(struct ice_flow_action))) {
3122 found_not_match = true;
3126 if (found_not_match) {
3127 *do_chg_action = true;
3132 /* (do_chg_action = true) means :
3133 * matching_data && priority && !result_action
3134 * (do_chg_action = false) means :
3135 * matching_data && priority && result_action
3140 return return_entry;
3144 * ice_flow_acl_convert_to_acl_prio - Convert to ACL priority
3147 static enum ice_acl_entry_prio
3148 ice_flow_acl_convert_to_acl_prio(enum ice_flow_priority p)
3150 enum ice_acl_entry_prio acl_prio;
3153 case ICE_FLOW_PRIO_LOW:
3154 acl_prio = ICE_ACL_PRIO_LOW;
3156 case ICE_FLOW_PRIO_NORMAL:
3157 acl_prio = ICE_ACL_PRIO_NORMAL;
3159 case ICE_FLOW_PRIO_HIGH:
3160 acl_prio = ICE_ACL_PRIO_HIGH;
3163 acl_prio = ICE_ACL_PRIO_NORMAL;
3171 * ice_flow_acl_union_rng_chk - Perform union operation between two
3172 * range-range checker buffers
3173 * @dst_buf: pointer to destination range checker buffer
3174 * @src_buf: pointer to source range checker buffer
3176 * For this function, we do the union between dst_buf and src_buf
3177 * range checker buffer, and we will save the result back to dst_buf
3179 static enum ice_status
3180 ice_flow_acl_union_rng_chk(struct ice_aqc_acl_profile_ranges *dst_buf,
3181 struct ice_aqc_acl_profile_ranges *src_buf)
3185 if (!dst_buf || !src_buf)
3186 return ICE_ERR_BAD_PTR;
3188 for (i = 0; i < ICE_AQC_ACL_PROF_RANGES_NUM_CFG; i++) {
3189 struct ice_acl_rng_data *cfg_data = NULL, *in_data;
3190 bool will_populate = false;
3192 in_data = &src_buf->checker_cfg[i];
3197 for (j = 0; j < ICE_AQC_ACL_PROF_RANGES_NUM_CFG; j++) {
3198 cfg_data = &dst_buf->checker_cfg[j];
3200 if (!cfg_data->mask ||
3201 !memcmp(cfg_data, in_data,
3202 sizeof(struct ice_acl_rng_data))) {
3203 will_populate = true;
3208 if (will_populate) {
3209 ice_memcpy(cfg_data, in_data,
3210 sizeof(struct ice_acl_rng_data),
3211 ICE_NONDMA_TO_NONDMA);
3213 /* No available slot left to program range checker */
3214 return ICE_ERR_MAX_LIMIT;
3222 * ice_flow_acl_add_scen_entry_sync - Add entry to ACL scenario sync
3223 * @hw: pointer to the hardware structure
3224 * @prof: pointer to flow profile
3225 * @entry: double pointer to the flow entry
3227 * For this function, we will look at the current added entries in the
3228 * corresponding ACL scenario. Then, we will perform matching logic to
3229 * see if we want to add/modify/do nothing with this new entry.
3231 static enum ice_status
3232 ice_flow_acl_add_scen_entry_sync(struct ice_hw *hw, struct ice_flow_prof *prof,
3233 struct ice_flow_entry **entry)
3235 bool do_add_entry, do_rem_entry, do_chg_action, do_chg_rng_chk;
3236 struct ice_aqc_acl_profile_ranges query_rng_buf, cfg_rng_buf;
3237 struct ice_acl_act_entry *acts = NULL;
3238 struct ice_flow_entry *exist;
3239 enum ice_status status = ICE_SUCCESS;
3240 struct ice_flow_entry *e;
3243 if (!entry || !(*entry) || !prof)
3244 return ICE_ERR_BAD_PTR;
3248 do_chg_rng_chk = false;
3252 status = ice_flow_get_hw_prof(hw, ICE_BLK_ACL, prof->id,
3257 /* Query the current range-checker value in FW */
3258 status = ice_query_acl_prof_ranges(hw, prof_id, &query_rng_buf,
3262 ice_memcpy(&cfg_rng_buf, &query_rng_buf,
3263 sizeof(struct ice_aqc_acl_profile_ranges),
3264 ICE_NONDMA_TO_NONDMA);
3266 /* Generate the new range-checker value */
3267 status = ice_flow_acl_union_rng_chk(&cfg_rng_buf, e->range_buf);
3271 /* Reconfigure the range check if the buffer is changed. */
3272 do_chg_rng_chk = false;
3273 if (memcmp(&query_rng_buf, &cfg_rng_buf,
3274 sizeof(struct ice_aqc_acl_profile_ranges))) {
3275 status = ice_prog_acl_prof_ranges(hw, prof_id,
3276 &cfg_rng_buf, NULL);
3280 do_chg_rng_chk = true;
3284 /* Figure out if we want to (change the ACL action) and/or
3285 * (Add the new ACL entry) and/or (Remove the current ACL entry)
3287 exist = ice_flow_acl_find_scen_entry_cond(prof, e, &do_chg_action,
3288 &do_add_entry, &do_rem_entry);
3290 status = ice_flow_rem_entry_sync(hw, ICE_BLK_ACL, exist);
3295 /* Prepare the result action buffer */
3296 acts = (struct ice_acl_act_entry *)
3297 ice_calloc(hw, e->entry_sz, sizeof(struct ice_acl_act_entry));
3299 return ICE_ERR_NO_MEMORY;
3301 for (i = 0; i < e->acts_cnt; i++)
3302 ice_memcpy(&acts[i], &e->acts[i].data.acl_act,
3303 sizeof(struct ice_acl_act_entry),
3304 ICE_NONDMA_TO_NONDMA);
3307 enum ice_acl_entry_prio prio;
3311 keys = (u8 *)e->entry;
3312 inverts = keys + (e->entry_sz / 2);
3313 prio = ice_flow_acl_convert_to_acl_prio(e->priority);
3315 status = ice_acl_add_entry(hw, prof->cfg.scen, prio, keys,
3316 inverts, acts, e->acts_cnt,
3321 e->scen_entry_idx = entry_idx;
3322 LIST_ADD(&e->l_entry, &prof->entries);
3324 if (do_chg_action) {
3325 /* For the action memory info, update the SW's copy of
3326 * exist entry with e's action memory info
3328 ice_free(hw, exist->acts);
3329 exist->acts_cnt = e->acts_cnt;
3330 exist->acts = (struct ice_flow_action *)
3331 ice_calloc(hw, exist->acts_cnt,
3332 sizeof(struct ice_flow_action));
3334 status = ICE_ERR_NO_MEMORY;
3338 ice_memcpy(exist->acts, e->acts,
3339 sizeof(struct ice_flow_action) * e->acts_cnt,
3340 ICE_NONDMA_TO_NONDMA);
3342 status = ice_acl_prog_act(hw, prof->cfg.scen, acts,
3344 exist->scen_entry_idx);
3349 if (do_chg_rng_chk) {
3350 /* In this case, we want to update the range checker
3351 * information of the exist entry
3353 status = ice_flow_acl_union_rng_chk(exist->range_buf,
3359 /* As we don't add the new entry to our SW DB, deallocate its
3360 * memories, and return the exist entry to the caller
3362 ice_dealloc_flow_entry(hw, e);
3372 * ice_flow_acl_add_scen_entry - Add entry to ACL scenario
3373 * @hw: pointer to the hardware structure
3374 * @prof: pointer to flow profile
3375 * @e: double pointer to the flow entry
3377 static enum ice_status
3378 ice_flow_acl_add_scen_entry(struct ice_hw *hw, struct ice_flow_prof *prof,
3379 struct ice_flow_entry **e)
3381 enum ice_status status;
3383 ice_acquire_lock(&prof->entries_lock);
3384 status = ice_flow_acl_add_scen_entry_sync(hw, prof, e);
3385 ice_release_lock(&prof->entries_lock);
3391 * ice_flow_add_entry - Add a flow entry
3392 * @hw: pointer to the HW struct
3393 * @blk: classification stage
3394 * @prof_id: ID of the profile to add a new flow entry to
3395 * @entry_id: unique ID to identify this flow entry
3396 * @vsi_handle: software VSI handle for the flow entry
3397 * @prio: priority of the flow entry
3398 * @data: pointer to a data buffer containing flow entry's match values/masks
3399 * @acts: arrays of actions to be performed on a match
3400 * @acts_cnt: number of actions
3401 * @entry_h: pointer to buffer that receives the new flow entry's handle
3404 ice_flow_add_entry(struct ice_hw *hw, enum ice_block blk, u64 prof_id,
3405 u64 entry_id, u16 vsi_handle, enum ice_flow_priority prio,
3406 void *data, struct ice_flow_action *acts, u8 acts_cnt,
3409 struct ice_flow_entry *e = NULL;
3410 struct ice_flow_prof *prof;
3411 enum ice_status status = ICE_SUCCESS;
3413 /* ACL entries must indicate an action */
3414 if (blk == ICE_BLK_ACL && (!acts || !acts_cnt))
3415 return ICE_ERR_PARAM;
3417 /* No flow entry data is expected for RSS */
3418 if (!entry_h || (!data && blk != ICE_BLK_RSS))
3419 return ICE_ERR_BAD_PTR;
3421 if (!ice_is_vsi_valid(hw, vsi_handle))
3422 return ICE_ERR_PARAM;
3424 ice_acquire_lock(&hw->fl_profs_locks[blk]);
3426 prof = ice_flow_find_prof_id(hw, blk, prof_id);
3428 status = ICE_ERR_DOES_NOT_EXIST;
3430 /* Allocate memory for the entry being added and associate
3431 * the VSI to the found flow profile
3433 e = (struct ice_flow_entry *)ice_malloc(hw, sizeof(*e));
3435 status = ICE_ERR_NO_MEMORY;
3437 status = ice_flow_assoc_prof(hw, blk, prof, vsi_handle);
3440 ice_release_lock(&hw->fl_profs_locks[blk]);
3445 e->vsi_handle = vsi_handle;
3454 /* ACL will handle the entry management */
3455 status = ice_flow_acl_frmt_entry(hw, prof, e, (u8 *)data, acts,
3460 status = ice_flow_acl_add_scen_entry(hw, prof, &e);
3466 status = ICE_ERR_NOT_IMPL;
3470 if (blk != ICE_BLK_ACL) {
3471 /* ACL will handle the entry management */
3472 ice_acquire_lock(&prof->entries_lock);
3473 LIST_ADD(&e->l_entry, &prof->entries);
3474 ice_release_lock(&prof->entries_lock);
3477 *entry_h = ICE_FLOW_ENTRY_HNDL(e);
3482 ice_free(hw, e->entry);
3490 * ice_flow_rem_entry - Remove a flow entry
3491 * @hw: pointer to the HW struct
3492 * @blk: classification stage
3493 * @entry_h: handle to the flow entry to be removed
3495 enum ice_status ice_flow_rem_entry(struct ice_hw *hw, enum ice_block blk,
3498 struct ice_flow_entry *entry;
3499 struct ice_flow_prof *prof;
3500 enum ice_status status = ICE_SUCCESS;
3502 if (entry_h == ICE_FLOW_ENTRY_HANDLE_INVAL)
3503 return ICE_ERR_PARAM;
3505 entry = ICE_FLOW_ENTRY_PTR((intptr_t)entry_h);
3507 /* Retain the pointer to the flow profile as the entry will be freed */
3511 ice_acquire_lock(&prof->entries_lock);
3512 status = ice_flow_rem_entry_sync(hw, blk, entry);
3513 ice_release_lock(&prof->entries_lock);
3520 * ice_flow_set_fld_ext - specifies locations of field from entry's input buffer
3521 * @seg: packet segment the field being set belongs to
3522 * @fld: field to be set
3523 * @field_type: type of the field
3524 * @val_loc: if not ICE_FLOW_FLD_OFF_INVAL, location of the value to match from
3525 * entry's input buffer
3526 * @mask_loc: if not ICE_FLOW_FLD_OFF_INVAL, location of mask value from entry's
3528 * @last_loc: if not ICE_FLOW_FLD_OFF_INVAL, location of last/upper value from
3529 * entry's input buffer
3531 * This helper function stores information of a field being matched, including
3532 * the type of the field and the locations of the value to match, the mask, and
3533 * the upper-bound value in the start of the input buffer for a flow entry.
3534 * This function should only be used for fixed-size data structures.
3536 * This function also opportunistically determines the protocol headers to be
3537 * present based on the fields being set. Some fields cannot be used alone to
3538 * determine the protocol headers present. Sometimes, fields for particular
3539 * protocol headers are not matched. In those cases, the protocol headers
3540 * must be explicitly set.
3543 ice_flow_set_fld_ext(struct ice_flow_seg_info *seg, enum ice_flow_field fld,
3544 enum ice_flow_fld_match_type field_type, u16 val_loc,
3545 u16 mask_loc, u16 last_loc)
3547 u64 bit = BIT_ULL(fld);
3550 if (field_type == ICE_FLOW_FLD_TYPE_RANGE)
3553 seg->fields[fld].type = field_type;
3554 seg->fields[fld].src.val = val_loc;
3555 seg->fields[fld].src.mask = mask_loc;
3556 seg->fields[fld].src.last = last_loc;
3558 ICE_FLOW_SET_HDRS(seg, ice_flds_info[fld].hdr);
3562 * ice_flow_set_fld - specifies locations of field from entry's input buffer
3563 * @seg: packet segment the field being set belongs to
3564 * @fld: field to be set
3565 * @val_loc: if not ICE_FLOW_FLD_OFF_INVAL, location of the value to match from
3566 * entry's input buffer
3567 * @mask_loc: if not ICE_FLOW_FLD_OFF_INVAL, location of mask value from entry's
3569 * @last_loc: if not ICE_FLOW_FLD_OFF_INVAL, location of last/upper value from
3570 * entry's input buffer
3571 * @range: indicate if field being matched is to be in a range
3573 * This function specifies the locations, in the form of byte offsets from the
3574 * start of the input buffer for a flow entry, from where the value to match,
3575 * the mask value, and upper value can be extracted. These locations are then
3576 * stored in the flow profile. When adding a flow entry associated with the
3577 * flow profile, these locations will be used to quickly extract the values and
3578 * create the content of a match entry. This function should only be used for
3579 * fixed-size data structures.
3582 ice_flow_set_fld(struct ice_flow_seg_info *seg, enum ice_flow_field fld,
3583 u16 val_loc, u16 mask_loc, u16 last_loc, bool range)
3585 enum ice_flow_fld_match_type t = range ?
3586 ICE_FLOW_FLD_TYPE_RANGE : ICE_FLOW_FLD_TYPE_REG;
3588 ice_flow_set_fld_ext(seg, fld, t, val_loc, mask_loc, last_loc);
3592 * ice_flow_set_fld_prefix - sets locations of prefix field from entry's buf
3593 * @seg: packet segment the field being set belongs to
3594 * @fld: field to be set
3595 * @val_loc: if not ICE_FLOW_FLD_OFF_INVAL, location of the value to match from
3596 * entry's input buffer
3597 * @pref_loc: location of prefix value from entry's input buffer
3598 * @pref_sz: size of the location holding the prefix value
3600 * This function specifies the locations, in the form of byte offsets from the
3601 * start of the input buffer for a flow entry, from where the value to match
3602 * and the IPv4 prefix value can be extracted. These locations are then stored
3603 * in the flow profile. When adding flow entries to the associated flow profile,
3604 * these locations can be used to quickly extract the values to create the
3605 * content of a match entry. This function should only be used for fixed-size
3609 ice_flow_set_fld_prefix(struct ice_flow_seg_info *seg, enum ice_flow_field fld,
3610 u16 val_loc, u16 pref_loc, u8 pref_sz)
3612 /* For this type of field, the "mask" location is for the prefix value's
3613 * location and the "last" location is for the size of the location of
3616 ice_flow_set_fld_ext(seg, fld, ICE_FLOW_FLD_TYPE_PREFIX, val_loc,
3617 pref_loc, (u16)pref_sz);
3621 * ice_flow_add_fld_raw - sets locations of a raw field from entry's input buf
3622 * @seg: packet segment the field being set belongs to
3623 * @off: offset of the raw field from the beginning of the segment in bytes
3624 * @len: length of the raw pattern to be matched
3625 * @val_loc: location of the value to match from entry's input buffer
3626 * @mask_loc: location of mask value from entry's input buffer
3628 * This function specifies the offset of the raw field to be match from the
3629 * beginning of the specified packet segment, and the locations, in the form of
3630 * byte offsets from the start of the input buffer for a flow entry, from where
3631 * the value to match and the mask value to be extracted. These locations are
3632 * then stored in the flow profile. When adding flow entries to the associated
3633 * flow profile, these locations can be used to quickly extract the values to
3634 * create the content of a match entry. This function should only be used for
3635 * fixed-size data structures.
3638 ice_flow_add_fld_raw(struct ice_flow_seg_info *seg, u16 off, u8 len,
3639 u16 val_loc, u16 mask_loc)
3641 if (seg->raws_cnt < ICE_FLOW_SEG_RAW_FLD_MAX) {
3642 seg->raws[seg->raws_cnt].off = off;
3643 seg->raws[seg->raws_cnt].info.type = ICE_FLOW_FLD_TYPE_SIZE;
3644 seg->raws[seg->raws_cnt].info.src.val = val_loc;
3645 seg->raws[seg->raws_cnt].info.src.mask = mask_loc;
3646 /* The "last" field is used to store the length of the field */
3647 seg->raws[seg->raws_cnt].info.src.last = len;
3650 /* Overflows of "raws" will be handled as an error condition later in
3651 * the flow when this information is processed.
3657 * ice_flow_rem_vsi_prof - remove vsi from flow profile
3658 * @hw: pointer to the hardware structure
3659 * @blk: classification stage
3660 * @vsi_handle: software VSI handle
3661 * @prof_id: unique ID to identify this flow profile
3663 * This function removes the flow entries associated to the input
3664 * vsi handle and disassociates the vsi from the flow profile.
3666 enum ice_status ice_flow_rem_vsi_prof(struct ice_hw *hw, enum ice_block blk, u16 vsi_handle,
3669 struct ice_flow_prof *prof = NULL;
3670 enum ice_status status = ICE_SUCCESS;
3672 if (blk >= ICE_BLK_COUNT || !ice_is_vsi_valid(hw, vsi_handle))
3673 return ICE_ERR_PARAM;
3675 /* find flow profile pointer with input package block and profile id */
3676 prof = ice_flow_find_prof_id(hw, ICE_BLK_FD, prof_id);
3678 ice_debug(hw, ICE_DBG_PKG,
3679 "Cannot find flow profile id=%" PRIu64 "\n", prof_id);
3680 return ICE_ERR_DOES_NOT_EXIST;
3683 /* Remove all remaining flow entries before removing the flow profile */
3684 if (!LIST_EMPTY(&prof->entries)) {
3685 struct ice_flow_entry *e, *t;
3687 ice_acquire_lock(&prof->entries_lock);
3688 LIST_FOR_EACH_ENTRY_SAFE(e, t, &prof->entries, ice_flow_entry,
3690 if (e->vsi_handle != vsi_handle)
3693 status = ice_flow_rem_entry_sync(hw, blk, e);
3697 ice_release_lock(&prof->entries_lock);
3702 /* disassociate the flow profile from sw vsi handle */
3703 status = ice_flow_disassoc_prof(hw, blk, prof, vsi_handle);
3705 ice_debug(hw, ICE_DBG_PKG,
3706 "ice_flow_disassoc_prof() failed with status=%d\n",
3711 #define ICE_FLOW_RSS_SEG_HDR_L2_MASKS \
3712 (ICE_FLOW_SEG_HDR_ETH | ICE_FLOW_SEG_HDR_ETH_NON_IP | ICE_FLOW_SEG_HDR_VLAN)
3714 #define ICE_FLOW_RSS_SEG_HDR_L3_MASKS \
3715 (ICE_FLOW_SEG_HDR_IPV4 | ICE_FLOW_SEG_HDR_IPV6)
3717 #define ICE_FLOW_RSS_SEG_HDR_L4_MASKS \
3718 (ICE_FLOW_SEG_HDR_TCP | ICE_FLOW_SEG_HDR_UDP | ICE_FLOW_SEG_HDR_SCTP)
3720 #define ICE_FLOW_RSS_SEG_HDR_VAL_MASKS \
3721 (ICE_FLOW_RSS_SEG_HDR_L2_MASKS | \
3722 ICE_FLOW_RSS_SEG_HDR_L3_MASKS | \
3723 ICE_FLOW_RSS_SEG_HDR_L4_MASKS)
3726 * ice_flow_set_rss_seg_info - setup packet segments for RSS
3727 * @segs: pointer to the flow field segment(s)
3728 * @seg_cnt: segment count
3729 * @cfg: configure parameters
3731 * Helper function to extract fields from hash bitmap and use flow
3732 * header value to set flow field segment for further use in flow
3733 * profile entry or removal.
3735 static enum ice_status
3736 ice_flow_set_rss_seg_info(struct ice_flow_seg_info *segs, u8 seg_cnt,
3737 const struct ice_rss_hash_cfg *cfg)
3739 struct ice_flow_seg_info *seg;
3743 /* set inner most segment */
3744 seg = &segs[seg_cnt - 1];
3746 ice_for_each_set_bit(i, (const ice_bitmap_t *)&cfg->hash_flds,
3747 ICE_FLOW_FIELD_IDX_MAX)
3748 ice_flow_set_fld(seg, (enum ice_flow_field)i,
3749 ICE_FLOW_FLD_OFF_INVAL, ICE_FLOW_FLD_OFF_INVAL,
3750 ICE_FLOW_FLD_OFF_INVAL, false);
3752 ICE_FLOW_SET_HDRS(seg, cfg->addl_hdrs);
3754 /* set outer most header */
3755 if (cfg->hdr_type == ICE_RSS_INNER_HEADERS_W_OUTER_IPV4)
3756 segs[ICE_RSS_OUTER_HEADERS].hdrs |= ICE_FLOW_SEG_HDR_IPV4 |
3757 ICE_FLOW_SEG_HDR_IPV_FRAG |
3758 ICE_FLOW_SEG_HDR_IPV_OTHER;
3759 else if (cfg->hdr_type == ICE_RSS_INNER_HEADERS_W_OUTER_IPV6)
3760 segs[ICE_RSS_OUTER_HEADERS].hdrs |= ICE_FLOW_SEG_HDR_IPV6 |
3761 ICE_FLOW_SEG_HDR_IPV_FRAG |
3762 ICE_FLOW_SEG_HDR_IPV_OTHER;
3763 else if (cfg->hdr_type == ICE_RSS_INNER_HEADERS_W_OUTER_IPV4_GRE)
3764 segs[ICE_RSS_OUTER_HEADERS].hdrs |= ICE_FLOW_SEG_HDR_IPV4 |
3765 ICE_FLOW_SEG_HDR_GRE |
3766 ICE_FLOW_SEG_HDR_IPV_OTHER;
3767 else if (cfg->hdr_type == ICE_RSS_INNER_HEADERS_W_OUTER_IPV6_GRE)
3768 segs[ICE_RSS_OUTER_HEADERS].hdrs |= ICE_FLOW_SEG_HDR_IPV6 |
3769 ICE_FLOW_SEG_HDR_GRE |
3770 ICE_FLOW_SEG_HDR_IPV_OTHER;
3772 if (seg->hdrs & ~ICE_FLOW_RSS_SEG_HDR_VAL_MASKS &
3773 ~ICE_FLOW_RSS_HDRS_INNER_MASK & ~ICE_FLOW_SEG_HDR_IPV_OTHER &
3774 ~ICE_FLOW_SEG_HDR_IPV_FRAG)
3775 return ICE_ERR_PARAM;
3777 val = (u64)(seg->hdrs & ICE_FLOW_RSS_SEG_HDR_L3_MASKS);
3778 if (val && !ice_is_pow2(val))
3781 val = (u64)(seg->hdrs & ICE_FLOW_RSS_SEG_HDR_L4_MASKS);
3782 if (val && !ice_is_pow2(val))
3789 * ice_rem_vsi_rss_list - remove VSI from RSS list
3790 * @hw: pointer to the hardware structure
3791 * @vsi_handle: software VSI handle
3793 * Remove the VSI from all RSS configurations in the list.
3795 void ice_rem_vsi_rss_list(struct ice_hw *hw, u16 vsi_handle)
3797 struct ice_rss_cfg *r, *tmp;
3799 if (LIST_EMPTY(&hw->rss_list_head))
3802 ice_acquire_lock(&hw->rss_locks);
3803 LIST_FOR_EACH_ENTRY_SAFE(r, tmp, &hw->rss_list_head,
3804 ice_rss_cfg, l_entry)
3805 if (ice_test_and_clear_bit(vsi_handle, r->vsis))
3806 if (!ice_is_any_bit_set(r->vsis, ICE_MAX_VSI)) {
3807 LIST_DEL(&r->l_entry);
3810 ice_release_lock(&hw->rss_locks);
3814 * ice_rem_vsi_rss_cfg - remove RSS configurations associated with VSI
3815 * @hw: pointer to the hardware structure
3816 * @vsi_handle: software VSI handle
3818 * This function will iterate through all flow profiles and disassociate
3819 * the VSI from that profile. If the flow profile has no VSIs it will
3822 enum ice_status ice_rem_vsi_rss_cfg(struct ice_hw *hw, u16 vsi_handle)
3824 const enum ice_block blk = ICE_BLK_RSS;
3825 struct ice_flow_prof *p, *t;
3826 enum ice_status status = ICE_SUCCESS;
3828 if (!ice_is_vsi_valid(hw, vsi_handle))
3829 return ICE_ERR_PARAM;
3831 if (LIST_EMPTY(&hw->fl_profs[blk]))
3834 ice_acquire_lock(&hw->rss_locks);
3835 LIST_FOR_EACH_ENTRY_SAFE(p, t, &hw->fl_profs[blk], ice_flow_prof,
3837 if (ice_is_bit_set(p->vsis, vsi_handle)) {
3838 status = ice_flow_disassoc_prof(hw, blk, p, vsi_handle);
3842 if (!ice_is_any_bit_set(p->vsis, ICE_MAX_VSI)) {
3843 status = ice_flow_rem_prof(hw, blk, p->id);
3848 ice_release_lock(&hw->rss_locks);
3854 * ice_get_rss_hdr_type - get a RSS profile's header type
3855 * @prof: RSS flow profile
3857 static enum ice_rss_cfg_hdr_type
3858 ice_get_rss_hdr_type(struct ice_flow_prof *prof)
3860 enum ice_rss_cfg_hdr_type hdr_type = ICE_RSS_ANY_HEADERS;
3862 if (prof->segs_cnt == ICE_FLOW_SEG_SINGLE) {
3863 hdr_type = ICE_RSS_OUTER_HEADERS;
3864 } else if (prof->segs_cnt == ICE_FLOW_SEG_MAX) {
3865 if (prof->segs[ICE_RSS_OUTER_HEADERS].hdrs == ICE_FLOW_SEG_HDR_NONE)
3866 hdr_type = ICE_RSS_INNER_HEADERS;
3867 if (prof->segs[ICE_RSS_OUTER_HEADERS].hdrs & ICE_FLOW_SEG_HDR_IPV4)
3868 hdr_type = ICE_RSS_INNER_HEADERS_W_OUTER_IPV4;
3869 if (prof->segs[ICE_RSS_OUTER_HEADERS].hdrs & ICE_FLOW_SEG_HDR_IPV6)
3870 hdr_type = ICE_RSS_INNER_HEADERS_W_OUTER_IPV6;
3877 * ice_rem_rss_list - remove RSS configuration from list
3878 * @hw: pointer to the hardware structure
3879 * @vsi_handle: software VSI handle
3880 * @prof: pointer to flow profile
3882 * Assumption: lock has already been acquired for RSS list
3885 ice_rem_rss_list(struct ice_hw *hw, u16 vsi_handle, struct ice_flow_prof *prof)
3887 enum ice_rss_cfg_hdr_type hdr_type;
3888 struct ice_rss_cfg *r, *tmp;
3890 /* Search for RSS hash fields associated to the VSI that match the
3891 * hash configurations associated to the flow profile. If found
3892 * remove from the RSS entry list of the VSI context and delete entry.
3894 hdr_type = ice_get_rss_hdr_type(prof);
3895 LIST_FOR_EACH_ENTRY_SAFE(r, tmp, &hw->rss_list_head,
3896 ice_rss_cfg, l_entry)
3897 if (r->hash.hash_flds == prof->segs[prof->segs_cnt - 1].match &&
3898 r->hash.addl_hdrs == prof->segs[prof->segs_cnt - 1].hdrs &&
3899 r->hash.hdr_type == hdr_type) {
3900 ice_clear_bit(vsi_handle, r->vsis);
3901 if (!ice_is_any_bit_set(r->vsis, ICE_MAX_VSI)) {
3902 LIST_DEL(&r->l_entry);
3910 * ice_add_rss_list - add RSS configuration to list
3911 * @hw: pointer to the hardware structure
3912 * @vsi_handle: software VSI handle
3913 * @prof: pointer to flow profile
3915 * Assumption: lock has already been acquired for RSS list
3917 static enum ice_status
3918 ice_add_rss_list(struct ice_hw *hw, u16 vsi_handle, struct ice_flow_prof *prof)
3920 enum ice_rss_cfg_hdr_type hdr_type;
3921 struct ice_rss_cfg *r, *rss_cfg;
3923 hdr_type = ice_get_rss_hdr_type(prof);
3924 LIST_FOR_EACH_ENTRY(r, &hw->rss_list_head,
3925 ice_rss_cfg, l_entry)
3926 if (r->hash.hash_flds == prof->segs[prof->segs_cnt - 1].match &&
3927 r->hash.addl_hdrs == prof->segs[prof->segs_cnt - 1].hdrs &&
3928 r->hash.hdr_type == hdr_type) {
3929 ice_set_bit(vsi_handle, r->vsis);
3933 rss_cfg = (struct ice_rss_cfg *)ice_malloc(hw, sizeof(*rss_cfg));
3935 return ICE_ERR_NO_MEMORY;
3937 rss_cfg->hash.hash_flds = prof->segs[prof->segs_cnt - 1].match;
3938 rss_cfg->hash.addl_hdrs = prof->segs[prof->segs_cnt - 1].hdrs;
3939 rss_cfg->hash.hdr_type = hdr_type;
3940 rss_cfg->hash.symm = prof->cfg.symm;
3941 ice_set_bit(vsi_handle, rss_cfg->vsis);
3943 LIST_ADD_TAIL(&rss_cfg->l_entry, &hw->rss_list_head);
3948 #define ICE_FLOW_PROF_HASH_S 0
3949 #define ICE_FLOW_PROF_HASH_M (0xFFFFFFFFULL << ICE_FLOW_PROF_HASH_S)
3950 #define ICE_FLOW_PROF_HDR_S 32
3951 #define ICE_FLOW_PROF_HDR_M (0x3FFFFFFFULL << ICE_FLOW_PROF_HDR_S)
3952 #define ICE_FLOW_PROF_ENCAP_S 62
3953 #define ICE_FLOW_PROF_ENCAP_M (0x3ULL << ICE_FLOW_PROF_ENCAP_S)
3955 /* Flow profile ID format:
3956 * [0:31] - Packet match fields
3957 * [32:61] - Protocol header
3958 * [62:63] - Encapsulation flag:
3961 * 2 for tunneled with outer ipv4
3962 * 3 for tunneled with outer ipv6
3964 #define ICE_FLOW_GEN_PROFID(hash, hdr, encap) \
3965 ((u64)(((u64)(hash) & ICE_FLOW_PROF_HASH_M) | \
3966 (((u64)(hdr) << ICE_FLOW_PROF_HDR_S) & ICE_FLOW_PROF_HDR_M) | \
3967 (((u64)(encap) << ICE_FLOW_PROF_ENCAP_S) & ICE_FLOW_PROF_ENCAP_M)))
3970 ice_rss_config_xor_word(struct ice_hw *hw, u8 prof_id, u8 src, u8 dst)
3972 u32 s = ((src % 4) << 3); /* byte shift */
3973 u32 v = dst | 0x80; /* value to program */
3974 u8 i = src / 4; /* register index */
3977 reg = rd32(hw, GLQF_HSYMM(prof_id, i));
3978 reg = (reg & ~(0xff << s)) | (v << s);
3979 wr32(hw, GLQF_HSYMM(prof_id, i), reg);
3983 ice_rss_config_xor(struct ice_hw *hw, u8 prof_id, u8 src, u8 dst, u8 len)
3986 ICE_FLOW_SW_FIELD_VECTOR_MAX / ICE_FLOW_FV_EXTRACT_SZ - 1;
3989 for (i = 0; i < len; i++) {
3990 ice_rss_config_xor_word(hw, prof_id,
3991 /* Yes, field vector in GLQF_HSYMM and
3992 * GLQF_HINSET is inversed!
3994 fv_last_word - (src + i),
3995 fv_last_word - (dst + i));
3996 ice_rss_config_xor_word(hw, prof_id,
3997 fv_last_word - (dst + i),
3998 fv_last_word - (src + i));
4003 ice_rss_update_symm(struct ice_hw *hw,
4004 struct ice_flow_prof *prof)
4006 struct ice_prof_map *map;
4009 ice_acquire_lock(&hw->blk[ICE_BLK_RSS].es.prof_map_lock);
4010 map = ice_search_prof_id(hw, ICE_BLK_RSS, prof->id);
4012 prof_id = map->prof_id;
4013 ice_release_lock(&hw->blk[ICE_BLK_RSS].es.prof_map_lock);
4016 /* clear to default */
4017 for (m = 0; m < 6; m++)
4018 wr32(hw, GLQF_HSYMM(prof_id, m), 0);
4019 if (prof->cfg.symm) {
4020 struct ice_flow_seg_info *seg =
4021 &prof->segs[prof->segs_cnt - 1];
4023 struct ice_flow_seg_xtrct *ipv4_src =
4024 &seg->fields[ICE_FLOW_FIELD_IDX_IPV4_SA].xtrct;
4025 struct ice_flow_seg_xtrct *ipv4_dst =
4026 &seg->fields[ICE_FLOW_FIELD_IDX_IPV4_DA].xtrct;
4027 struct ice_flow_seg_xtrct *ipv6_src =
4028 &seg->fields[ICE_FLOW_FIELD_IDX_IPV6_SA].xtrct;
4029 struct ice_flow_seg_xtrct *ipv6_dst =
4030 &seg->fields[ICE_FLOW_FIELD_IDX_IPV6_DA].xtrct;
4032 struct ice_flow_seg_xtrct *tcp_src =
4033 &seg->fields[ICE_FLOW_FIELD_IDX_TCP_SRC_PORT].xtrct;
4034 struct ice_flow_seg_xtrct *tcp_dst =
4035 &seg->fields[ICE_FLOW_FIELD_IDX_TCP_DST_PORT].xtrct;
4037 struct ice_flow_seg_xtrct *udp_src =
4038 &seg->fields[ICE_FLOW_FIELD_IDX_UDP_SRC_PORT].xtrct;
4039 struct ice_flow_seg_xtrct *udp_dst =
4040 &seg->fields[ICE_FLOW_FIELD_IDX_UDP_DST_PORT].xtrct;
4042 struct ice_flow_seg_xtrct *sctp_src =
4043 &seg->fields[ICE_FLOW_FIELD_IDX_SCTP_SRC_PORT].xtrct;
4044 struct ice_flow_seg_xtrct *sctp_dst =
4045 &seg->fields[ICE_FLOW_FIELD_IDX_SCTP_DST_PORT].xtrct;
4048 if (ipv4_src->prot_id != 0 && ipv4_dst->prot_id != 0)
4049 ice_rss_config_xor(hw, prof_id,
4050 ipv4_src->idx, ipv4_dst->idx, 2);
4053 if (ipv6_src->prot_id != 0 && ipv6_dst->prot_id != 0)
4054 ice_rss_config_xor(hw, prof_id,
4055 ipv6_src->idx, ipv6_dst->idx, 8);
4058 if (tcp_src->prot_id != 0 && tcp_dst->prot_id != 0)
4059 ice_rss_config_xor(hw, prof_id,
4060 tcp_src->idx, tcp_dst->idx, 1);
4063 if (udp_src->prot_id != 0 && udp_dst->prot_id != 0)
4064 ice_rss_config_xor(hw, prof_id,
4065 udp_src->idx, udp_dst->idx, 1);
4068 if (sctp_src->prot_id != 0 && sctp_dst->prot_id != 0)
4069 ice_rss_config_xor(hw, prof_id,
4070 sctp_src->idx, sctp_dst->idx, 1);
4075 * ice_rss_cfg_raw_symm - configure symmetric hash parameters
4077 * @hw: pointer to the hardware structure
4078 * @prof: pointer to parser profile
4079 * @prof_id: profile ID
4081 * Calculate symmetric hash parameters based on input protocol type.
4084 ice_rss_cfg_raw_symm(struct ice_hw *hw,
4085 struct ice_parser_profile *prof, u64 prof_id)
4087 u8 src_idx, dst_idx, proto_id;
4090 while (i < prof->fv_num) {
4091 proto_id = prof->fv[i].proto_id;
4094 case ICE_PROT_IPV4_OF_OR_S:
4095 len = ICE_FLOW_FLD_SZ_IPV4_ADDR /
4096 ICE_FLOW_FV_EXTRACT_SZ;
4097 if (prof->fv[i].offset ==
4098 ICE_FLOW_FIELD_IPV4_SRC_OFFSET &&
4099 prof->fv[i + len].proto_id == proto_id &&
4100 prof->fv[i + len].offset ==
4101 ICE_FLOW_FIELD_IPV4_DST_OFFSET) {
4109 case ICE_PROT_IPV6_OF_OR_S:
4110 len = ICE_FLOW_FLD_SZ_IPV6_ADDR /
4111 ICE_FLOW_FV_EXTRACT_SZ;
4112 if (prof->fv[i].offset ==
4113 ICE_FLOW_FIELD_IPV6_SRC_OFFSET &&
4114 prof->fv[i + len].proto_id == proto_id &&
4115 prof->fv[i + len].offset ==
4116 ICE_FLOW_FIELD_IPV6_DST_OFFSET) {
4124 case ICE_PROT_TCP_IL:
4125 case ICE_PROT_UDP_IL_OR_S:
4126 case ICE_PROT_SCTP_IL:
4127 len = ICE_FLOW_FLD_SZ_PORT /
4128 ICE_FLOW_FV_EXTRACT_SZ;
4129 if (prof->fv[i].offset ==
4130 ICE_FLOW_FIELD_SRC_PORT_OFFSET &&
4131 prof->fv[i + len].proto_id == proto_id &&
4132 prof->fv[i + len].offset ==
4133 ICE_FLOW_FIELD_DST_PORT_OFFSET) {
4145 ice_rss_config_xor(hw, prof_id, src_idx, dst_idx, len);
4149 /* Max registers index per packet profile */
4150 #define ICE_SYMM_REG_INDEX_MAX 6
4153 * ice_rss_update_raw_symm - update symmetric hash configuration
4155 * @hw: pointer to the hardware structure
4156 * @cfg: configure parameters for raw pattern
4157 * @id: profile tracking ID
4159 * Update symmetric hash configuration for raw pattern if required.
4160 * Otherwise only clear to default.
4163 ice_rss_update_raw_symm(struct ice_hw *hw,
4164 struct ice_rss_raw_cfg *cfg, u64 id)
4166 struct ice_prof_map *map;
4169 ice_acquire_lock(&hw->blk[ICE_BLK_RSS].es.prof_map_lock);
4170 map = ice_search_prof_id(hw, ICE_BLK_RSS, id);
4172 prof_id = map->prof_id;
4173 ice_release_lock(&hw->blk[ICE_BLK_RSS].es.prof_map_lock);
4176 /* clear to default */
4177 for (m = 0; m < ICE_SYMM_REG_INDEX_MAX; m++)
4178 wr32(hw, GLQF_HSYMM(prof_id, m), 0);
4180 ice_rss_cfg_raw_symm(hw, &cfg->prof, prof_id);
4184 * ice_add_rss_cfg_sync - add an RSS configuration
4185 * @hw: pointer to the hardware structure
4186 * @vsi_handle: software VSI handle
4187 * @cfg: configure parameters
4189 * Assumption: lock has already been acquired for RSS list
4191 static enum ice_status
4192 ice_add_rss_cfg_sync(struct ice_hw *hw, u16 vsi_handle,
4193 const struct ice_rss_hash_cfg *cfg)
4195 const enum ice_block blk = ICE_BLK_RSS;
4196 struct ice_flow_prof *prof = NULL;
4197 struct ice_flow_seg_info *segs;
4198 enum ice_status status;
4201 segs_cnt = (cfg->hdr_type == ICE_RSS_OUTER_HEADERS) ?
4202 ICE_FLOW_SEG_SINGLE : ICE_FLOW_SEG_MAX;
4204 segs = (struct ice_flow_seg_info *)ice_calloc(hw, segs_cnt,
4207 return ICE_ERR_NO_MEMORY;
4209 /* Construct the packet segment info from the hashed fields */
4210 status = ice_flow_set_rss_seg_info(segs, segs_cnt, cfg);
4214 /* Search for a flow profile that has matching headers, hash fields
4215 * and has the input VSI associated to it. If found, no further
4216 * operations required and exit.
4218 prof = ice_flow_find_prof_conds(hw, blk, ICE_FLOW_RX, segs, segs_cnt,
4220 ICE_FLOW_FIND_PROF_CHK_FLDS |
4221 ICE_FLOW_FIND_PROF_CHK_VSI);
4223 if (prof->cfg.symm == cfg->symm)
4225 prof->cfg.symm = cfg->symm;
4229 /* Check if a flow profile exists with the same protocol headers and
4230 * associated with the input VSI. If so disassociate the VSI from
4231 * this profile. The VSI will be added to a new profile created with
4232 * the protocol header and new hash field configuration.
4234 prof = ice_flow_find_prof_conds(hw, blk, ICE_FLOW_RX, segs, segs_cnt,
4235 vsi_handle, ICE_FLOW_FIND_PROF_CHK_VSI);
4237 status = ice_flow_disassoc_prof(hw, blk, prof, vsi_handle);
4239 ice_rem_rss_list(hw, vsi_handle, prof);
4243 /* Remove profile if it has no VSIs associated */
4244 if (!ice_is_any_bit_set(prof->vsis, ICE_MAX_VSI)) {
4245 status = ice_flow_rem_prof(hw, blk, prof->id);
4251 /* Search for a profile that has same match fields only. If this
4252 * exists then associate the VSI to this profile.
4254 prof = ice_flow_find_prof_conds(hw, blk, ICE_FLOW_RX, segs, segs_cnt,
4256 ICE_FLOW_FIND_PROF_CHK_FLDS);
4258 if (prof->cfg.symm == cfg->symm) {
4259 status = ice_flow_assoc_prof(hw, blk, prof,
4262 status = ice_add_rss_list(hw, vsi_handle,
4265 /* if a profile exist but with different symmetric
4266 * requirement, just return error.
4268 status = ICE_ERR_NOT_SUPPORTED;
4273 /* Create a new flow profile with generated profile and packet
4274 * segment information.
4276 status = ice_flow_add_prof(hw, blk, ICE_FLOW_RX,
4277 ICE_FLOW_GEN_PROFID(cfg->hash_flds,
4278 segs[segs_cnt - 1].hdrs,
4280 segs, segs_cnt, NULL, 0, &prof);
4284 status = ice_flow_assoc_prof(hw, blk, prof, vsi_handle);
4285 /* If association to a new flow profile failed then this profile can
4289 ice_flow_rem_prof(hw, blk, prof->id);
4293 status = ice_add_rss_list(hw, vsi_handle, prof);
4295 prof->cfg.symm = cfg->symm;
4297 ice_rss_update_symm(hw, prof);
4305 * ice_add_rss_cfg - add an RSS configuration with specified hashed fields
4306 * @hw: pointer to the hardware structure
4307 * @vsi_handle: software VSI handle
4308 * @cfg: configure parameters
4310 * This function will generate a flow profile based on fields associated with
4311 * the input fields to hash on, the flow type and use the VSI number to add
4312 * a flow entry to the profile.
4315 ice_add_rss_cfg(struct ice_hw *hw, u16 vsi_handle,
4316 const struct ice_rss_hash_cfg *cfg)
4318 struct ice_rss_hash_cfg local_cfg;
4319 enum ice_status status;
4321 if (!ice_is_vsi_valid(hw, vsi_handle) ||
4322 !cfg || cfg->hdr_type > ICE_RSS_ANY_HEADERS ||
4323 cfg->hash_flds == ICE_HASH_INVALID)
4324 return ICE_ERR_PARAM;
4327 if (cfg->hdr_type < ICE_RSS_ANY_HEADERS) {
4328 ice_acquire_lock(&hw->rss_locks);
4329 status = ice_add_rss_cfg_sync(hw, vsi_handle, &local_cfg);
4330 ice_release_lock(&hw->rss_locks);
4332 ice_acquire_lock(&hw->rss_locks);
4333 local_cfg.hdr_type = ICE_RSS_OUTER_HEADERS;
4334 status = ice_add_rss_cfg_sync(hw, vsi_handle, &local_cfg);
4336 local_cfg.hdr_type = ICE_RSS_INNER_HEADERS;
4337 status = ice_add_rss_cfg_sync(hw, vsi_handle,
4340 ice_release_lock(&hw->rss_locks);
4347 * ice_rem_rss_cfg_sync - remove an existing RSS configuration
4348 * @hw: pointer to the hardware structure
4349 * @vsi_handle: software VSI handle
4350 * @cfg: configure parameters
4352 * Assumption: lock has already been acquired for RSS list
4354 static enum ice_status
4355 ice_rem_rss_cfg_sync(struct ice_hw *hw, u16 vsi_handle,
4356 const struct ice_rss_hash_cfg *cfg)
4358 const enum ice_block blk = ICE_BLK_RSS;
4359 struct ice_flow_seg_info *segs;
4360 struct ice_flow_prof *prof;
4361 enum ice_status status;
4364 segs_cnt = (cfg->hdr_type == ICE_RSS_OUTER_HEADERS) ?
4365 ICE_FLOW_SEG_SINGLE : ICE_FLOW_SEG_MAX;
4366 segs = (struct ice_flow_seg_info *)ice_calloc(hw, segs_cnt,
4369 return ICE_ERR_NO_MEMORY;
4371 /* Construct the packet segment info from the hashed fields */
4372 status = ice_flow_set_rss_seg_info(segs, segs_cnt, cfg);
4376 prof = ice_flow_find_prof_conds(hw, blk, ICE_FLOW_RX, segs, segs_cnt,
4378 ICE_FLOW_FIND_PROF_CHK_FLDS);
4380 status = ICE_ERR_DOES_NOT_EXIST;
4384 status = ice_flow_disassoc_prof(hw, blk, prof, vsi_handle);
4388 /* Remove RSS configuration from VSI context before deleting
4391 ice_rem_rss_list(hw, vsi_handle, prof);
4393 if (!ice_is_any_bit_set(prof->vsis, ICE_MAX_VSI))
4394 status = ice_flow_rem_prof(hw, blk, prof->id);
4402 * ice_rem_rss_cfg - remove an existing RSS config with matching hashed fields
4403 * @hw: pointer to the hardware structure
4404 * @vsi_handle: software VSI handle
4405 * @cfg: configure parameters
4407 * This function will lookup the flow profile based on the input
4408 * hash field bitmap, iterate through the profile entry list of
4409 * that profile and find entry associated with input VSI to be
4410 * removed. Calls are made to underlying flow apis which will in
4411 * turn build or update buffers for RSS XLT1 section.
4414 ice_rem_rss_cfg(struct ice_hw *hw, u16 vsi_handle,
4415 const struct ice_rss_hash_cfg *cfg)
4417 struct ice_rss_hash_cfg local_cfg;
4418 enum ice_status status;
4420 if (!ice_is_vsi_valid(hw, vsi_handle) ||
4421 !cfg || cfg->hdr_type > ICE_RSS_ANY_HEADERS ||
4422 cfg->hash_flds == ICE_HASH_INVALID)
4423 return ICE_ERR_PARAM;
4425 ice_acquire_lock(&hw->rss_locks);
4427 if (cfg->hdr_type < ICE_RSS_ANY_HEADERS) {
4428 status = ice_rem_rss_cfg_sync(hw, vsi_handle, &local_cfg);
4430 local_cfg.hdr_type = ICE_RSS_OUTER_HEADERS;
4431 status = ice_rem_rss_cfg_sync(hw, vsi_handle, &local_cfg);
4434 local_cfg.hdr_type = ICE_RSS_INNER_HEADERS;
4435 status = ice_rem_rss_cfg_sync(hw, vsi_handle,
4439 ice_release_lock(&hw->rss_locks);
4445 * ice_replay_rss_cfg - replay RSS configurations associated with VSI
4446 * @hw: pointer to the hardware structure
4447 * @vsi_handle: software VSI handle
4449 enum ice_status ice_replay_rss_cfg(struct ice_hw *hw, u16 vsi_handle)
4451 enum ice_status status = ICE_SUCCESS;
4452 struct ice_rss_cfg *r;
4454 if (!ice_is_vsi_valid(hw, vsi_handle))
4455 return ICE_ERR_PARAM;
4457 ice_acquire_lock(&hw->rss_locks);
4458 LIST_FOR_EACH_ENTRY(r, &hw->rss_list_head,
4459 ice_rss_cfg, l_entry) {
4460 if (ice_is_bit_set(r->vsis, vsi_handle)) {
4461 status = ice_add_rss_cfg_sync(hw, vsi_handle, &r->hash);
4466 ice_release_lock(&hw->rss_locks);
4472 * ice_get_rss_cfg - returns hashed fields for the given header types
4473 * @hw: pointer to the hardware structure
4474 * @vsi_handle: software VSI handle
4475 * @hdrs: protocol header type
4477 * This function will return the match fields of the first instance of flow
4478 * profile having the given header types and containing input VSI
4480 u64 ice_get_rss_cfg(struct ice_hw *hw, u16 vsi_handle, u32 hdrs)
4482 u64 rss_hash = ICE_HASH_INVALID;
4483 struct ice_rss_cfg *r;
4485 /* verify if the protocol header is non zero and VSI is valid */
4486 if (hdrs == ICE_FLOW_SEG_HDR_NONE || !ice_is_vsi_valid(hw, vsi_handle))
4487 return ICE_HASH_INVALID;
4489 ice_acquire_lock(&hw->rss_locks);
4490 LIST_FOR_EACH_ENTRY(r, &hw->rss_list_head,
4491 ice_rss_cfg, l_entry)
4492 if (ice_is_bit_set(r->vsis, vsi_handle) &&
4493 r->hash.addl_hdrs == hdrs) {
4494 rss_hash = r->hash.hash_flds;
4497 ice_release_lock(&hw->rss_locks);