1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2001-2021 Intel Corporation
5 #include "ice_common.h"
8 /* Size of known protocol header fields */
9 #define ICE_FLOW_FLD_SZ_ETH_TYPE 2
10 #define ICE_FLOW_FLD_SZ_VLAN 2
11 #define ICE_FLOW_FLD_SZ_IPV4_ADDR 4
12 #define ICE_FLOW_FLD_SZ_IPV6_ADDR 16
13 #define ICE_FLOW_FLD_SZ_IPV6_PRE32_ADDR 4
14 #define ICE_FLOW_FLD_SZ_IPV6_PRE48_ADDR 6
15 #define ICE_FLOW_FLD_SZ_IPV6_PRE64_ADDR 8
16 #define ICE_FLOW_FLD_SZ_IPV4_ID 2
17 #define ICE_FLOW_FLD_SZ_IPV6_ID 4
18 #define ICE_FLOW_FLD_SZ_IP_CHKSUM 2
19 #define ICE_FLOW_FLD_SZ_TCP_CHKSUM 2
20 #define ICE_FLOW_FLD_SZ_UDP_CHKSUM 2
21 #define ICE_FLOW_FLD_SZ_SCTP_CHKSUM 4
22 #define ICE_FLOW_FLD_SZ_IP_DSCP 1
23 #define ICE_FLOW_FLD_SZ_IP_TTL 1
24 #define ICE_FLOW_FLD_SZ_IP_PROT 1
25 #define ICE_FLOW_FLD_SZ_PORT 2
26 #define ICE_FLOW_FLD_SZ_TCP_FLAGS 1
27 #define ICE_FLOW_FLD_SZ_ICMP_TYPE 1
28 #define ICE_FLOW_FLD_SZ_ICMP_CODE 1
29 #define ICE_FLOW_FLD_SZ_ARP_OPER 2
30 #define ICE_FLOW_FLD_SZ_GRE_KEYID 4
31 #define ICE_FLOW_FLD_SZ_GTP_TEID 4
32 #define ICE_FLOW_FLD_SZ_GTP_QFI 2
33 #define ICE_FLOW_FLD_SZ_PPPOE_SESS_ID 2
34 #define ICE_FLOW_FLD_SZ_PFCP_SEID 8
35 #define ICE_FLOW_FLD_SZ_L2TPV3_SESS_ID 4
36 #define ICE_FLOW_FLD_SZ_ESP_SPI 4
37 #define ICE_FLOW_FLD_SZ_AH_SPI 4
38 #define ICE_FLOW_FLD_SZ_NAT_T_ESP_SPI 4
39 #define ICE_FLOW_FLD_SZ_VXLAN_VNI 4
40 #define ICE_FLOW_FLD_SZ_ECPRI_TP0_PC_ID 2
42 /* Describe properties of a protocol header field */
43 struct ice_flow_field_info {
44 enum ice_flow_seg_hdr hdr;
45 s16 off; /* Offset from start of a protocol header, in bits */
46 u16 size; /* Size of fields in bits */
47 u16 mask; /* 16-bit mask for field */
50 #define ICE_FLOW_FLD_INFO(_hdr, _offset_bytes, _size_bytes) { \
52 .off = (_offset_bytes) * BITS_PER_BYTE, \
53 .size = (_size_bytes) * BITS_PER_BYTE, \
57 #define ICE_FLOW_FLD_INFO_MSK(_hdr, _offset_bytes, _size_bytes, _mask) { \
59 .off = (_offset_bytes) * BITS_PER_BYTE, \
60 .size = (_size_bytes) * BITS_PER_BYTE, \
64 /* Table containing properties of supported protocol header fields */
66 struct ice_flow_field_info ice_flds_info[ICE_FLOW_FIELD_IDX_MAX] = {
68 /* ICE_FLOW_FIELD_IDX_ETH_DA */
69 ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_ETH, 0, ETH_ALEN),
70 /* ICE_FLOW_FIELD_IDX_ETH_SA */
71 ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_ETH, ETH_ALEN, ETH_ALEN),
72 /* ICE_FLOW_FIELD_IDX_S_VLAN */
73 ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_VLAN, 12, ICE_FLOW_FLD_SZ_VLAN),
74 /* ICE_FLOW_FIELD_IDX_C_VLAN */
75 ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_VLAN, 14, ICE_FLOW_FLD_SZ_VLAN),
76 /* ICE_FLOW_FIELD_IDX_ETH_TYPE */
77 ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_ETH, 0, ICE_FLOW_FLD_SZ_ETH_TYPE),
79 /* ICE_FLOW_FIELD_IDX_IPV4_DSCP */
80 ICE_FLOW_FLD_INFO_MSK(ICE_FLOW_SEG_HDR_IPV4, 0, ICE_FLOW_FLD_SZ_IP_DSCP,
82 /* ICE_FLOW_FIELD_IDX_IPV6_DSCP */
83 ICE_FLOW_FLD_INFO_MSK(ICE_FLOW_SEG_HDR_IPV6, 0, ICE_FLOW_FLD_SZ_IP_DSCP,
85 /* ICE_FLOW_FIELD_IDX_IPV4_TTL */
86 ICE_FLOW_FLD_INFO_MSK(ICE_FLOW_SEG_HDR_NONE, 8,
87 ICE_FLOW_FLD_SZ_IP_TTL, 0xff00),
88 /* ICE_FLOW_FIELD_IDX_IPV4_PROT */
89 ICE_FLOW_FLD_INFO_MSK(ICE_FLOW_SEG_HDR_NONE, 8,
90 ICE_FLOW_FLD_SZ_IP_PROT, 0x00ff),
91 /* ICE_FLOW_FIELD_IDX_IPV6_TTL */
92 ICE_FLOW_FLD_INFO_MSK(ICE_FLOW_SEG_HDR_NONE, 6,
93 ICE_FLOW_FLD_SZ_IP_TTL, 0x00ff),
94 /* ICE_FLOW_FIELD_IDX_IPV6_PROT */
95 ICE_FLOW_FLD_INFO_MSK(ICE_FLOW_SEG_HDR_NONE, 6,
96 ICE_FLOW_FLD_SZ_IP_PROT, 0xff00),
97 /* ICE_FLOW_FIELD_IDX_IPV4_SA */
98 ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_IPV4, 12, ICE_FLOW_FLD_SZ_IPV4_ADDR),
99 /* ICE_FLOW_FIELD_IDX_IPV4_DA */
100 ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_IPV4, 16, ICE_FLOW_FLD_SZ_IPV4_ADDR),
101 /* ICE_FLOW_FIELD_IDX_IPV6_SA */
102 ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_IPV6, 8, ICE_FLOW_FLD_SZ_IPV6_ADDR),
103 /* ICE_FLOW_FIELD_IDX_IPV6_DA */
104 ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_IPV6, 24, ICE_FLOW_FLD_SZ_IPV6_ADDR),
105 /* ICE_FLOW_FIELD_IDX_IPV4_CHKSUM */
106 ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_IPV4, 10, ICE_FLOW_FLD_SZ_IP_CHKSUM),
107 /* ICE_FLOW_FIELD_IDX_IPV4_FRAG */
108 ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_IPV_FRAG, 4,
109 ICE_FLOW_FLD_SZ_IPV4_ID),
110 /* ICE_FLOW_FIELD_IDX_IPV6_FRAG */
111 ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_IPV_FRAG, 4,
112 ICE_FLOW_FLD_SZ_IPV6_ID),
113 /* ICE_FLOW_FIELD_IDX_IPV6_PRE32_SA */
114 ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_IPV6, 8,
115 ICE_FLOW_FLD_SZ_IPV6_PRE32_ADDR),
116 /* ICE_FLOW_FIELD_IDX_IPV6_PRE32_DA */
117 ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_IPV6, 24,
118 ICE_FLOW_FLD_SZ_IPV6_PRE32_ADDR),
119 /* ICE_FLOW_FIELD_IDX_IPV6_PRE48_SA */
120 ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_IPV6, 8,
121 ICE_FLOW_FLD_SZ_IPV6_PRE48_ADDR),
122 /* ICE_FLOW_FIELD_IDX_IPV6_PRE48_DA */
123 ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_IPV6, 24,
124 ICE_FLOW_FLD_SZ_IPV6_PRE48_ADDR),
125 /* ICE_FLOW_FIELD_IDX_IPV6_PRE64_SA */
126 ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_IPV6, 8,
127 ICE_FLOW_FLD_SZ_IPV6_PRE64_ADDR),
128 /* ICE_FLOW_FIELD_IDX_IPV6_PRE64_DA */
129 ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_IPV6, 24,
130 ICE_FLOW_FLD_SZ_IPV6_PRE64_ADDR),
132 /* ICE_FLOW_FIELD_IDX_TCP_SRC_PORT */
133 ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_TCP, 0, ICE_FLOW_FLD_SZ_PORT),
134 /* ICE_FLOW_FIELD_IDX_TCP_DST_PORT */
135 ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_TCP, 2, ICE_FLOW_FLD_SZ_PORT),
136 /* ICE_FLOW_FIELD_IDX_UDP_SRC_PORT */
137 ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_UDP, 0, ICE_FLOW_FLD_SZ_PORT),
138 /* ICE_FLOW_FIELD_IDX_UDP_DST_PORT */
139 ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_UDP, 2, ICE_FLOW_FLD_SZ_PORT),
140 /* ICE_FLOW_FIELD_IDX_SCTP_SRC_PORT */
141 ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_SCTP, 0, ICE_FLOW_FLD_SZ_PORT),
142 /* ICE_FLOW_FIELD_IDX_SCTP_DST_PORT */
143 ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_SCTP, 2, ICE_FLOW_FLD_SZ_PORT),
144 /* ICE_FLOW_FIELD_IDX_TCP_FLAGS */
145 ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_TCP, 13, ICE_FLOW_FLD_SZ_TCP_FLAGS),
146 /* ICE_FLOW_FIELD_IDX_TCP_CHKSUM */
147 ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_TCP, 16, ICE_FLOW_FLD_SZ_TCP_CHKSUM),
148 /* ICE_FLOW_FIELD_IDX_UDP_CHKSUM */
149 ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_UDP, 6, ICE_FLOW_FLD_SZ_UDP_CHKSUM),
150 /* ICE_FLOW_FIELD_IDX_SCTP_CHKSUM */
151 ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_SCTP, 8,
152 ICE_FLOW_FLD_SZ_SCTP_CHKSUM),
154 /* ICE_FLOW_FIELD_IDX_ARP_SIP */
155 ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_ARP, 14, ICE_FLOW_FLD_SZ_IPV4_ADDR),
156 /* ICE_FLOW_FIELD_IDX_ARP_DIP */
157 ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_ARP, 24, ICE_FLOW_FLD_SZ_IPV4_ADDR),
158 /* ICE_FLOW_FIELD_IDX_ARP_SHA */
159 ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_ARP, 8, ETH_ALEN),
160 /* ICE_FLOW_FIELD_IDX_ARP_DHA */
161 ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_ARP, 18, ETH_ALEN),
162 /* ICE_FLOW_FIELD_IDX_ARP_OP */
163 ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_ARP, 6, ICE_FLOW_FLD_SZ_ARP_OPER),
165 /* ICE_FLOW_FIELD_IDX_ICMP_TYPE */
166 ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_ICMP, 0, ICE_FLOW_FLD_SZ_ICMP_TYPE),
167 /* ICE_FLOW_FIELD_IDX_ICMP_CODE */
168 ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_ICMP, 1, ICE_FLOW_FLD_SZ_ICMP_CODE),
170 /* ICE_FLOW_FIELD_IDX_GRE_KEYID */
171 ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_GRE, 12, ICE_FLOW_FLD_SZ_GRE_KEYID),
173 /* ICE_FLOW_FIELD_IDX_GTPC_TEID */
174 ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_GTPC_TEID, 12,
175 ICE_FLOW_FLD_SZ_GTP_TEID),
176 /* ICE_FLOW_FIELD_IDX_GTPU_IP_TEID */
177 ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_GTPU_IP, 12,
178 ICE_FLOW_FLD_SZ_GTP_TEID),
179 /* ICE_FLOW_FIELD_IDX_GTPU_EH_TEID */
180 ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_GTPU_EH, 12,
181 ICE_FLOW_FLD_SZ_GTP_TEID),
182 /* ICE_FLOW_FIELD_IDX_GTPU_EH_QFI */
183 ICE_FLOW_FLD_INFO_MSK(ICE_FLOW_SEG_HDR_GTPU_EH, 22,
184 ICE_FLOW_FLD_SZ_GTP_QFI, 0x3f00),
185 /* ICE_FLOW_FIELD_IDX_GTPU_UP_TEID */
186 ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_GTPU_UP, 12,
187 ICE_FLOW_FLD_SZ_GTP_TEID),
188 /* ICE_FLOW_FIELD_IDX_GTPU_UP_QFI */
189 ICE_FLOW_FLD_INFO_MSK(ICE_FLOW_SEG_HDR_GTPU_UP, 22,
190 ICE_FLOW_FLD_SZ_GTP_QFI, 0x3f00),
191 /* ICE_FLOW_FIELD_IDX_GTPU_DWN_TEID */
192 ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_GTPU_DWN, 12,
193 ICE_FLOW_FLD_SZ_GTP_TEID),
194 /* ICE_FLOW_FIELD_IDX_GTPU_DWN_QFI */
195 ICE_FLOW_FLD_INFO_MSK(ICE_FLOW_SEG_HDR_GTPU_DWN, 22,
196 ICE_FLOW_FLD_SZ_GTP_QFI, 0x3f00),
198 /* ICE_FLOW_FIELD_IDX_PPPOE_SESS_ID */
199 ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_PPPOE, 2,
200 ICE_FLOW_FLD_SZ_PPPOE_SESS_ID),
202 /* ICE_FLOW_FIELD_IDX_PFCP_SEID */
203 ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_PFCP_SESSION, 12,
204 ICE_FLOW_FLD_SZ_PFCP_SEID),
206 /* ICE_FLOW_FIELD_IDX_L2TPV3_SESS_ID */
207 ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_L2TPV3, 0,
208 ICE_FLOW_FLD_SZ_L2TPV3_SESS_ID),
210 /* ICE_FLOW_FIELD_IDX_ESP_SPI */
211 ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_ESP, 0,
212 ICE_FLOW_FLD_SZ_ESP_SPI),
214 /* ICE_FLOW_FIELD_IDX_AH_SPI */
215 ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_AH, 4,
216 ICE_FLOW_FLD_SZ_AH_SPI),
218 /* ICE_FLOW_FIELD_IDX_NAT_T_ESP_SPI */
219 ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_NAT_T_ESP, 8,
220 ICE_FLOW_FLD_SZ_NAT_T_ESP_SPI),
221 /* ICE_FLOW_FIELD_IDX_VXLAN_VNI */
222 ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_VXLAN, 12,
223 ICE_FLOW_FLD_SZ_VXLAN_VNI),
225 /* ICE_FLOW_FIELD_IDX_ECPRI_TP0_PC_ID */
226 ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_ECPRI_TP0, 4,
227 ICE_FLOW_FLD_SZ_ECPRI_TP0_PC_ID),
229 /* ICE_FLOW_FIELD_IDX_UDP_ECPRI_TP0_PC_ID */
230 ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_UDP_ECPRI_TP0, 12,
231 ICE_FLOW_FLD_SZ_ECPRI_TP0_PC_ID),
234 /* Bitmaps indicating relevant packet types for a particular protocol header
236 * Packet types for packets with an Outer/First/Single MAC header
238 static const u32 ice_ptypes_mac_ofos[] = {
239 0xFDC00846, 0xBFBF7F7E, 0xF70001DF, 0xFEFDFDFB,
240 0x0000077E, 0x000003FF, 0x00000000, 0x00000000,
241 0x00400000, 0x03FFF000, 0xFFFFFFE0, 0x00100707,
242 0xFFFFFF00, 0x0000003F, 0x00000000, 0x00000000,
243 0x00000000, 0x00000000, 0x00000000, 0x00000000,
244 0x00000000, 0x00000000, 0x00000000, 0x00000000,
245 0x00000000, 0x00000000, 0x00000000, 0x00000000,
246 0x00000000, 0x00000000, 0x00000000, 0x00000000,
249 /* Packet types for packets with an Innermost/Last MAC VLAN header */
250 static const u32 ice_ptypes_macvlan_il[] = {
251 0x00000000, 0xBC000000, 0x000001DF, 0xF0000000,
252 0x0000077E, 0x00000000, 0x00000000, 0x00000000,
253 0x00000000, 0x00000000, 0x00000000, 0x00000000,
254 0x00000000, 0x00000000, 0x00000000, 0x00000000,
255 0x00000000, 0x00000000, 0x00000000, 0x00000000,
256 0x00000000, 0x00000000, 0x00000000, 0x00000000,
257 0x00000000, 0x00000000, 0x00000000, 0x00000000,
258 0x00000000, 0x00000000, 0x00000000, 0x00000000,
261 /* Packet types for packets with an Outer/First/Single non-frag IPv4 header,
262 * does NOT include IPV4 other PTYPEs
264 static const u32 ice_ptypes_ipv4_ofos[] = {
265 0x1D800000, 0x24000800, 0x00000000, 0x00000000,
266 0x00000000, 0x00000155, 0x00000000, 0x00000000,
267 0x00000000, 0x000FC000, 0x000002A0, 0x00100000,
268 0x00001500, 0x00000000, 0x00000000, 0x00000000,
269 0x00000000, 0x00000000, 0x00000000, 0x00000000,
270 0x00000000, 0x00000000, 0x00000000, 0x00000000,
271 0x00000000, 0x00000000, 0x00000000, 0x00000000,
272 0x00000000, 0x00000000, 0x00000000, 0x00000000,
275 /* Packet types for packets with an Outer/First/Single non-frag IPv4 header,
276 * includes IPV4 other PTYPEs
278 static const u32 ice_ptypes_ipv4_ofos_all[] = {
279 0x1D800000, 0x27BF7800, 0x00000000, 0x00000000,
280 0x00000000, 0x00000155, 0x00000000, 0x00000000,
281 0x00000000, 0x000FC000, 0x83E0FAA0, 0x00000101,
282 0x03FFD000, 0x00000000, 0x02FBEFBC, 0x00000000,
283 0x00000000, 0x00000000, 0x00000000, 0x00000000,
284 0x00000000, 0x00000000, 0x00000000, 0x00000000,
285 0x00000000, 0x00000000, 0x00000000, 0x00000000,
286 0x00000000, 0x00000000, 0x00000000, 0x00000000,
289 /* Packet types for packets with an Innermost/Last IPv4 header */
290 static const u32 ice_ptypes_ipv4_il[] = {
291 0xE0000000, 0xB807700E, 0x80000003, 0xE01DC03B,
292 0x0000000E, 0x00000000, 0x00000000, 0x00000000,
293 0x00000000, 0x00000000, 0x001FF800, 0x00100000,
294 0xFC0FC000, 0x00000000, 0xBC0BC0BC, 0x00000BC0,
295 0x00000000, 0x00000000, 0x00000000, 0x00000000,
296 0x00000000, 0x00000000, 0x00000000, 0x00000000,
297 0x00000000, 0x00000000, 0x00000000, 0x00000000,
298 0x00000000, 0x00000000, 0x00000000, 0x00000000,
301 /* Packet types for packets with an Outer/First/Single non-frag IPv6 header,
302 * does NOT include IVP6 other PTYPEs
304 static const u32 ice_ptypes_ipv6_ofos[] = {
305 0x00000000, 0x00000000, 0x76000000, 0x10002000,
306 0x00000000, 0x000002AA, 0x00000000, 0x00000000,
307 0x00000000, 0x03F00000, 0x00000540, 0x00000000,
308 0x00002A00, 0x00000000, 0x00000000, 0x00000000,
309 0x00000000, 0x00000000, 0x00000000, 0x00000000,
310 0x00000000, 0x00000000, 0x00000000, 0x00000000,
311 0x00000000, 0x00000000, 0x00000000, 0x00000000,
312 0x00000000, 0x00000000, 0x00000000, 0x00000000,
315 /* Packet types for packets with an Outer/First/Single non-frag IPv6 header,
316 * includes IPV6 other PTYPEs
318 static const u32 ice_ptypes_ipv6_ofos_all[] = {
319 0x00000000, 0x00000000, 0x76000000, 0x1EFDE000,
320 0x00000000, 0x000002AA, 0x00000000, 0x00000000,
321 0x00000000, 0x03F00000, 0x7C1F0540, 0x00000206,
322 0xFC002000, 0x0000003F, 0xBC000000, 0x0002FBEF,
323 0x00000000, 0x00000000, 0x00000000, 0x00000000,
324 0x00000000, 0x00000000, 0x00000000, 0x00000000,
325 0x00000000, 0x00000000, 0x00000000, 0x00000000,
326 0x00000000, 0x00000000, 0x00000000, 0x00000000,
329 /* Packet types for packets with an Innermost/Last IPv6 header */
330 static const u32 ice_ptypes_ipv6_il[] = {
331 0x00000000, 0x03B80770, 0x000001DC, 0x0EE00000,
332 0x00000770, 0x00000000, 0x00000000, 0x00000000,
333 0x00000000, 0x00000000, 0x7FE00000, 0x00000000,
334 0x03F00000, 0x0000003F, 0x02F02F00, 0x0002F02F,
335 0x00000000, 0x00000000, 0x00000000, 0x00000000,
336 0x00000000, 0x00000000, 0x00000000, 0x00000000,
337 0x00000000, 0x00000000, 0x00000000, 0x00000000,
338 0x00000000, 0x00000000, 0x00000000, 0x00000000,
341 /* Packet types for packets with an Outer/First/Single
342 * non-frag IPv4 header - no L4
344 static const u32 ice_ptypes_ipv4_ofos_no_l4[] = {
345 0x10800000, 0x04000800, 0x00000000, 0x00000000,
346 0x00000000, 0x00000000, 0x00000000, 0x00000000,
347 0x00000000, 0x000cc000, 0x000002A0, 0x00000000,
348 0x00001500, 0x00000000, 0x00000000, 0x00000000,
349 0x00000000, 0x00000000, 0x00000000, 0x00000000,
350 0x00000000, 0x00000000, 0x00000000, 0x00000000,
351 0x00000000, 0x00000000, 0x00000000, 0x00000000,
352 0x00000000, 0x00000000, 0x00000000, 0x00000000,
355 /* Packet types for packets with an Innermost/Last IPv4 header - no L4 */
356 static const u32 ice_ptypes_ipv4_il_no_l4[] = {
357 0x60000000, 0x18043008, 0x80000002, 0x6010c021,
358 0x00000008, 0x00000000, 0x00000000, 0x00000000,
359 0x00000000, 0x00000000, 0x00139800, 0x00000000,
360 0x8C08C000, 0x00000000, 0x00000000, 0x00000000,
361 0x00000000, 0x00000000, 0x00000000, 0x00000000,
362 0x00000000, 0x00000000, 0x00000000, 0x00000000,
363 0x00000000, 0x00000000, 0x00000000, 0x00000000,
364 0x00000000, 0x00000000, 0x00000000, 0x00000000,
367 /* Packet types for packets with an Outer/First/Single
368 * non-frag IPv6 header - no L4
370 static const u32 ice_ptypes_ipv6_ofos_no_l4[] = {
371 0x00000000, 0x00000000, 0x42000000, 0x10002000,
372 0x00000000, 0x00000000, 0x00000000, 0x00000000,
373 0x00000000, 0x02300000, 0x00000540, 0x00000000,
374 0x00002A00, 0x00000000, 0x00000000, 0x00000000,
375 0x00000000, 0x00000000, 0x00000000, 0x00000000,
376 0x00000000, 0x00000000, 0x00000000, 0x00000000,
377 0x00000000, 0x00000000, 0x00000000, 0x00000000,
378 0x00000000, 0x00000000, 0x00000000, 0x00000000,
381 /* Packet types for packets with an Innermost/Last IPv6 header - no L4 */
382 static const u32 ice_ptypes_ipv6_il_no_l4[] = {
383 0x00000000, 0x02180430, 0x0000010c, 0x086010c0,
384 0x00000430, 0x00000000, 0x00000000, 0x00000000,
385 0x00000000, 0x00000000, 0x4e600000, 0x00000000,
386 0x02300000, 0x00000023, 0x00000000, 0x00000000,
387 0x00000000, 0x00000000, 0x00000000, 0x00000000,
388 0x00000000, 0x00000000, 0x00000000, 0x00000000,
389 0x00000000, 0x00000000, 0x00000000, 0x00000000,
390 0x00000000, 0x00000000, 0x00000000, 0x00000000,
393 /* Packet types for packets with an Outermost/First ARP header */
394 static const u32 ice_ptypes_arp_of[] = {
395 0x00000800, 0x00000000, 0x00000000, 0x00000000,
396 0x00000000, 0x00000000, 0x00000000, 0x00000000,
397 0x00000000, 0x00000000, 0x00000000, 0x00000000,
398 0x00000000, 0x00000000, 0x00000000, 0x00000000,
399 0x00000000, 0x00000000, 0x00000000, 0x00000000,
400 0x00000000, 0x00000000, 0x00000000, 0x00000000,
401 0x00000000, 0x00000000, 0x00000000, 0x00000000,
402 0x00000000, 0x00000000, 0x00000000, 0x00000000,
405 /* UDP Packet types for non-tunneled packets or tunneled
406 * packets with inner UDP.
408 static const u32 ice_ptypes_udp_il[] = {
409 0x81000000, 0x20204040, 0x04000010, 0x80810102,
410 0x00000040, 0x00000000, 0x00000000, 0x00000000,
411 0x00000000, 0x00410000, 0x908427E0, 0x00100007,
412 0x10410000, 0x00000004, 0x10410410, 0x00004104,
413 0x00000000, 0x00000000, 0x00000000, 0x00000000,
414 0x00000000, 0x00000000, 0x00000000, 0x00000000,
415 0x00000000, 0x00000000, 0x00000000, 0x00000000,
416 0x00000000, 0x00000000, 0x00000000, 0x00000000,
419 /* Packet types for packets with an Innermost/Last TCP header */
420 static const u32 ice_ptypes_tcp_il[] = {
421 0x04000000, 0x80810102, 0x10000040, 0x02040408,
422 0x00000102, 0x00000000, 0x00000000, 0x00000000,
423 0x00000000, 0x00820000, 0x21084000, 0x00000000,
424 0x20820000, 0x00000008, 0x20820820, 0x00008208,
425 0x00000000, 0x00000000, 0x00000000, 0x00000000,
426 0x00000000, 0x00000000, 0x00000000, 0x00000000,
427 0x00000000, 0x00000000, 0x00000000, 0x00000000,
428 0x00000000, 0x00000000, 0x00000000, 0x00000000,
431 /* Packet types for packets with an Innermost/Last SCTP header */
432 static const u32 ice_ptypes_sctp_il[] = {
433 0x08000000, 0x01020204, 0x20000081, 0x04080810,
434 0x00000204, 0x00000000, 0x00000000, 0x00000000,
435 0x00000000, 0x01040000, 0x00000000, 0x00000000,
436 0x41040000, 0x00000010, 0x00000000, 0x00000000,
437 0x00000000, 0x00000000, 0x00000000, 0x00000000,
438 0x00000000, 0x00000000, 0x00000000, 0x00000000,
439 0x00000000, 0x00000000, 0x00000000, 0x00000000,
440 0x00000000, 0x00000000, 0x00000000, 0x00000000,
443 /* Packet types for packets with an Outermost/First ICMP header */
444 static const u32 ice_ptypes_icmp_of[] = {
445 0x10000000, 0x00000000, 0x00000000, 0x00000000,
446 0x00000000, 0x00000000, 0x00000000, 0x00000000,
447 0x00000000, 0x00000000, 0x00000000, 0x00000000,
448 0x00000000, 0x00000000, 0x00000000, 0x00000000,
449 0x00000000, 0x00000000, 0x00000000, 0x00000000,
450 0x00000000, 0x00000000, 0x00000000, 0x00000000,
451 0x00000000, 0x00000000, 0x00000000, 0x00000000,
452 0x00000000, 0x00000000, 0x00000000, 0x00000000,
455 /* Packet types for packets with an Innermost/Last ICMP header */
456 static const u32 ice_ptypes_icmp_il[] = {
457 0x00000000, 0x02040408, 0x40000102, 0x08101020,
458 0x00000408, 0x00000000, 0x00000000, 0x00000000,
459 0x00000000, 0x00000000, 0x42108000, 0x00000000,
460 0x82080000, 0x00000020, 0x00000000, 0x00000000,
461 0x00000000, 0x00000000, 0x00000000, 0x00000000,
462 0x00000000, 0x00000000, 0x00000000, 0x00000000,
463 0x00000000, 0x00000000, 0x00000000, 0x00000000,
464 0x00000000, 0x00000000, 0x00000000, 0x00000000,
467 /* Packet types for packets with an Outermost/First GRE header */
468 static const u32 ice_ptypes_gre_of[] = {
469 0x00000000, 0xBFBF7800, 0x000001DF, 0xFEFDE000,
470 0x0000017E, 0x00000000, 0x00000000, 0x00000000,
471 0x00000000, 0x00000000, 0x00000000, 0x00000000,
472 0x00000000, 0x00000000, 0xBEFBEFBC, 0x0002FBEF,
473 0x00000000, 0x00000000, 0x00000000, 0x00000000,
474 0x00000000, 0x00000000, 0x00000000, 0x00000000,
475 0x00000000, 0x00000000, 0x00000000, 0x00000000,
476 0x00000000, 0x00000000, 0x00000000, 0x00000000,
479 /* Packet types for packets with an Innermost/Last MAC header */
480 static const u32 ice_ptypes_mac_il[] = {
481 0x00000000, 0x20000000, 0x00000000, 0x00000000,
482 0x00000000, 0x00000000, 0x00000000, 0x00000000,
483 0x00000000, 0x00000000, 0x00000000, 0x00000000,
484 0x00000000, 0x00000000, 0x00000000, 0x00000000,
485 0x00000000, 0x00000000, 0x00000000, 0x00000000,
486 0x00000000, 0x00000000, 0x00000000, 0x00000000,
487 0x00000000, 0x00000000, 0x00000000, 0x00000000,
488 0x00000000, 0x00000000, 0x00000000, 0x00000000,
491 /* Packet types for GTPC */
492 static const u32 ice_ptypes_gtpc[] = {
493 0x00000000, 0x00000000, 0x00000000, 0x00000000,
494 0x00000000, 0x00000000, 0x00000000, 0x00000000,
495 0x00000000, 0x00000000, 0x000001E0, 0x00000000,
496 0x00000000, 0x00000000, 0x00000000, 0x00000000,
497 0x00000000, 0x00000000, 0x00000000, 0x00000000,
498 0x00000000, 0x00000000, 0x00000000, 0x00000000,
499 0x00000000, 0x00000000, 0x00000000, 0x00000000,
500 0x00000000, 0x00000000, 0x00000000, 0x00000000,
503 /* Packet types for VXLAN with VNI */
504 static const u32 ice_ptypes_vxlan_vni[] = {
505 0x00000000, 0xBFBFF800, 0x00EFDFDF, 0xFEFDE000,
506 0x03BF7F7E, 0x00000000, 0x00000000, 0x00000000,
507 0x00000000, 0x00000000, 0x00000000, 0x00000000,
508 0x00000000, 0x00000000, 0x00000000, 0x00000000,
509 0x00000000, 0x00000000, 0x00000000, 0x00000000,
510 0x00000000, 0x00000000, 0x00000000, 0x00000000,
511 0x00000000, 0x00000000, 0x00000000, 0x00000000,
512 0x00000000, 0x00000000, 0x00000000, 0x00000000,
515 /* Packet types for GTPC with TEID */
516 static const u32 ice_ptypes_gtpc_tid[] = {
517 0x00000000, 0x00000000, 0x00000000, 0x00000000,
518 0x00000000, 0x00000000, 0x00000000, 0x00000000,
519 0x00000000, 0x00000000, 0x00000060, 0x00000000,
520 0x00000000, 0x00000000, 0x00000000, 0x00000000,
521 0x00000000, 0x00000000, 0x00000000, 0x00000000,
522 0x00000000, 0x00000000, 0x00000000, 0x00000000,
523 0x00000000, 0x00000000, 0x00000000, 0x00000000,
524 0x00000000, 0x00000000, 0x00000000, 0x00000000,
527 /* Packet types for GTPU */
528 static const struct ice_ptype_attributes ice_attr_gtpu_session[] = {
529 { ICE_MAC_IPV4_GTPU_IPV4_FRAG, ICE_PTYPE_ATTR_GTP_SESSION },
530 { ICE_MAC_IPV4_GTPU_IPV4_PAY, ICE_PTYPE_ATTR_GTP_SESSION },
531 { ICE_MAC_IPV4_GTPU_IPV4_UDP_PAY, ICE_PTYPE_ATTR_GTP_SESSION },
532 { ICE_MAC_IPV4_GTPU_IPV4_TCP, ICE_PTYPE_ATTR_GTP_SESSION },
533 { ICE_MAC_IPV4_GTPU_IPV4_ICMP, ICE_PTYPE_ATTR_GTP_SESSION },
534 { ICE_MAC_IPV6_GTPU_IPV4_FRAG, ICE_PTYPE_ATTR_GTP_SESSION },
535 { ICE_MAC_IPV6_GTPU_IPV4_PAY, ICE_PTYPE_ATTR_GTP_SESSION },
536 { ICE_MAC_IPV6_GTPU_IPV4_UDP_PAY, ICE_PTYPE_ATTR_GTP_SESSION },
537 { ICE_MAC_IPV6_GTPU_IPV4_TCP, ICE_PTYPE_ATTR_GTP_SESSION },
538 { ICE_MAC_IPV6_GTPU_IPV4_ICMP, ICE_PTYPE_ATTR_GTP_SESSION },
539 { ICE_MAC_IPV4_GTPU_IPV6_FRAG, ICE_PTYPE_ATTR_GTP_SESSION },
540 { ICE_MAC_IPV4_GTPU_IPV6_PAY, ICE_PTYPE_ATTR_GTP_SESSION },
541 { ICE_MAC_IPV4_GTPU_IPV6_UDP_PAY, ICE_PTYPE_ATTR_GTP_SESSION },
542 { ICE_MAC_IPV4_GTPU_IPV6_TCP, ICE_PTYPE_ATTR_GTP_SESSION },
543 { ICE_MAC_IPV4_GTPU_IPV6_ICMPV6, ICE_PTYPE_ATTR_GTP_SESSION },
544 { ICE_MAC_IPV6_GTPU_IPV6_FRAG, ICE_PTYPE_ATTR_GTP_SESSION },
545 { ICE_MAC_IPV6_GTPU_IPV6_PAY, ICE_PTYPE_ATTR_GTP_SESSION },
546 { ICE_MAC_IPV6_GTPU_IPV6_UDP_PAY, ICE_PTYPE_ATTR_GTP_SESSION },
547 { ICE_MAC_IPV6_GTPU_IPV6_TCP, ICE_PTYPE_ATTR_GTP_SESSION },
548 { ICE_MAC_IPV6_GTPU_IPV6_ICMPV6, ICE_PTYPE_ATTR_GTP_SESSION },
549 { MAC_IPV4_TUN_IPV4_GTPU_IPV4_FRAG, ICE_PTYPE_ATTR_GTP_SESSION },
550 { MAC_IPV4_TUN_IPV4_GTPU_IPV4_PAY, ICE_PTYPE_ATTR_GTP_SESSION },
551 { MAC_IPV4_TUN_IPV4_GTPU_IPV4_UDP_PAY, ICE_PTYPE_ATTR_GTP_SESSION },
552 { MAC_IPV4_TUN_IPV4_GTPU_IPV4_TCP, ICE_PTYPE_ATTR_GTP_SESSION },
553 { MAC_IPV4_TUN_IPV4_GTPU_IPV4_ICMP, ICE_PTYPE_ATTR_GTP_SESSION },
554 { MAC_IPV4_TUN_IPV4_GTPU_IPV6_FRAG, ICE_PTYPE_ATTR_GTP_SESSION },
555 { MAC_IPV4_TUN_IPV4_GTPU_IPV6_PAY, ICE_PTYPE_ATTR_GTP_SESSION },
556 { MAC_IPV4_TUN_IPV4_GTPU_IPV6_UDP_PAY, ICE_PTYPE_ATTR_GTP_SESSION },
557 { MAC_IPV4_TUN_IPV4_GTPU_IPV6_TCP, ICE_PTYPE_ATTR_GTP_SESSION },
558 { MAC_IPV4_TUN_IPV4_GTPU_IPV6_ICMPV6, ICE_PTYPE_ATTR_GTP_SESSION },
559 { MAC_IPV4_TUN_IPV6_GTPU_IPV4_FRAG, ICE_PTYPE_ATTR_GTP_SESSION },
560 { MAC_IPV4_TUN_IPV6_GTPU_IPV4_PAY, ICE_PTYPE_ATTR_GTP_SESSION },
561 { MAC_IPV4_TUN_IPV6_GTPU_IPV4_UDP_PAY, ICE_PTYPE_ATTR_GTP_SESSION },
562 { MAC_IPV4_TUN_IPV6_GTPU_IPV4_TCP, ICE_PTYPE_ATTR_GTP_SESSION },
563 { MAC_IPV4_TUN_IPV6_GTPU_IPV4_ICMP, ICE_PTYPE_ATTR_GTP_SESSION },
564 { MAC_IPV4_TUN_IPV6_GTPU_IPV6_FRAG, ICE_PTYPE_ATTR_GTP_SESSION },
565 { MAC_IPV4_TUN_IPV6_GTPU_IPV6_PAY, ICE_PTYPE_ATTR_GTP_SESSION },
566 { MAC_IPV4_TUN_IPV6_GTPU_IPV6_UDP_PAY, ICE_PTYPE_ATTR_GTP_SESSION },
567 { MAC_IPV4_TUN_IPV6_GTPU_IPV6_TCP, ICE_PTYPE_ATTR_GTP_SESSION },
568 { MAC_IPV4_TUN_IPV6_GTPU_IPV6_ICMPV6, ICE_PTYPE_ATTR_GTP_SESSION },
569 { MAC_IPV6_TUN_IPV4_GTPU_IPV4_FRAG, ICE_PTYPE_ATTR_GTP_SESSION },
570 { MAC_IPV6_TUN_IPV4_GTPU_IPV4_PAY, ICE_PTYPE_ATTR_GTP_SESSION },
571 { MAC_IPV6_TUN_IPV4_GTPU_IPV4_UDP_PAY, ICE_PTYPE_ATTR_GTP_SESSION },
572 { MAC_IPV6_TUN_IPV4_GTPU_IPV4_TCP, ICE_PTYPE_ATTR_GTP_SESSION },
573 { MAC_IPV6_TUN_IPV4_GTPU_IPV4_ICMP, ICE_PTYPE_ATTR_GTP_SESSION },
574 { MAC_IPV6_TUN_IPV4_GTPU_IPV6_FRAG, ICE_PTYPE_ATTR_GTP_SESSION },
575 { MAC_IPV6_TUN_IPV4_GTPU_IPV6_PAY, ICE_PTYPE_ATTR_GTP_SESSION },
576 { MAC_IPV6_TUN_IPV4_GTPU_IPV6_UDP_PAY, ICE_PTYPE_ATTR_GTP_SESSION },
577 { MAC_IPV6_TUN_IPV4_GTPU_IPV6_TCP, ICE_PTYPE_ATTR_GTP_SESSION },
578 { MAC_IPV6_TUN_IPV4_GTPU_IPV6_ICMPV6, ICE_PTYPE_ATTR_GTP_SESSION },
579 { MAC_IPV6_TUN_IPV6_GTPU_IPV4_FRAG, ICE_PTYPE_ATTR_GTP_SESSION },
580 { MAC_IPV6_TUN_IPV6_GTPU_IPV4_PAY, ICE_PTYPE_ATTR_GTP_SESSION },
581 { MAC_IPV6_TUN_IPV6_GTPU_IPV4_UDP_PAY, ICE_PTYPE_ATTR_GTP_SESSION },
582 { MAC_IPV6_TUN_IPV6_GTPU_IPV4_TCP, ICE_PTYPE_ATTR_GTP_SESSION },
583 { MAC_IPV6_TUN_IPV6_GTPU_IPV4_ICMP, ICE_PTYPE_ATTR_GTP_SESSION },
584 { MAC_IPV6_TUN_IPV6_GTPU_IPV6_FRAG, ICE_PTYPE_ATTR_GTP_SESSION },
585 { MAC_IPV6_TUN_IPV6_GTPU_IPV6_PAY, ICE_PTYPE_ATTR_GTP_SESSION },
586 { MAC_IPV6_TUN_IPV6_GTPU_IPV6_UDP_PAY, ICE_PTYPE_ATTR_GTP_SESSION },
587 { MAC_IPV6_TUN_IPV6_GTPU_IPV6_TCP, ICE_PTYPE_ATTR_GTP_SESSION },
588 { MAC_IPV6_TUN_IPV6_GTPU_IPV6_ICMPV6, ICE_PTYPE_ATTR_GTP_SESSION },
591 static const struct ice_ptype_attributes ice_attr_gtpu_eh[] = {
592 { ICE_MAC_IPV4_GTPU_IPV4_FRAG, ICE_PTYPE_ATTR_GTP_PDU_EH },
593 { ICE_MAC_IPV4_GTPU_IPV4_PAY, ICE_PTYPE_ATTR_GTP_PDU_EH },
594 { ICE_MAC_IPV4_GTPU_IPV4_UDP_PAY, ICE_PTYPE_ATTR_GTP_PDU_EH },
595 { ICE_MAC_IPV4_GTPU_IPV4_TCP, ICE_PTYPE_ATTR_GTP_PDU_EH },
596 { ICE_MAC_IPV4_GTPU_IPV4_ICMP, ICE_PTYPE_ATTR_GTP_PDU_EH },
597 { ICE_MAC_IPV6_GTPU_IPV4_FRAG, ICE_PTYPE_ATTR_GTP_PDU_EH },
598 { ICE_MAC_IPV6_GTPU_IPV4_PAY, ICE_PTYPE_ATTR_GTP_PDU_EH },
599 { ICE_MAC_IPV6_GTPU_IPV4_UDP_PAY, ICE_PTYPE_ATTR_GTP_PDU_EH },
600 { ICE_MAC_IPV6_GTPU_IPV4_TCP, ICE_PTYPE_ATTR_GTP_PDU_EH },
601 { ICE_MAC_IPV6_GTPU_IPV4_ICMP, ICE_PTYPE_ATTR_GTP_PDU_EH },
602 { ICE_MAC_IPV4_GTPU_IPV6_FRAG, ICE_PTYPE_ATTR_GTP_PDU_EH },
603 { ICE_MAC_IPV4_GTPU_IPV6_PAY, ICE_PTYPE_ATTR_GTP_PDU_EH },
604 { ICE_MAC_IPV4_GTPU_IPV6_UDP_PAY, ICE_PTYPE_ATTR_GTP_PDU_EH },
605 { ICE_MAC_IPV4_GTPU_IPV6_TCP, ICE_PTYPE_ATTR_GTP_PDU_EH },
606 { ICE_MAC_IPV4_GTPU_IPV6_ICMPV6, ICE_PTYPE_ATTR_GTP_PDU_EH },
607 { ICE_MAC_IPV6_GTPU_IPV6_FRAG, ICE_PTYPE_ATTR_GTP_PDU_EH },
608 { ICE_MAC_IPV6_GTPU_IPV6_PAY, ICE_PTYPE_ATTR_GTP_PDU_EH },
609 { ICE_MAC_IPV6_GTPU_IPV6_UDP_PAY, ICE_PTYPE_ATTR_GTP_PDU_EH },
610 { ICE_MAC_IPV6_GTPU_IPV6_TCP, ICE_PTYPE_ATTR_GTP_PDU_EH },
611 { ICE_MAC_IPV6_GTPU_IPV6_ICMPV6, ICE_PTYPE_ATTR_GTP_PDU_EH },
612 { MAC_IPV4_TUN_IPV4_GTPU_IPV4_FRAG, ICE_PTYPE_ATTR_GTP_PDU_EH },
613 { MAC_IPV4_TUN_IPV4_GTPU_IPV4_PAY, ICE_PTYPE_ATTR_GTP_PDU_EH },
614 { MAC_IPV4_TUN_IPV4_GTPU_IPV4_UDP_PAY, ICE_PTYPE_ATTR_GTP_PDU_EH },
615 { MAC_IPV4_TUN_IPV4_GTPU_IPV4_TCP, ICE_PTYPE_ATTR_GTP_PDU_EH },
616 { MAC_IPV4_TUN_IPV4_GTPU_IPV4_ICMP, ICE_PTYPE_ATTR_GTP_PDU_EH },
617 { MAC_IPV4_TUN_IPV4_GTPU_IPV6_FRAG, ICE_PTYPE_ATTR_GTP_PDU_EH },
618 { MAC_IPV4_TUN_IPV4_GTPU_IPV6_PAY, ICE_PTYPE_ATTR_GTP_PDU_EH },
619 { MAC_IPV4_TUN_IPV4_GTPU_IPV6_UDP_PAY, ICE_PTYPE_ATTR_GTP_PDU_EH },
620 { MAC_IPV4_TUN_IPV4_GTPU_IPV6_TCP, ICE_PTYPE_ATTR_GTP_PDU_EH },
621 { MAC_IPV4_TUN_IPV4_GTPU_IPV6_ICMPV6, ICE_PTYPE_ATTR_GTP_PDU_EH },
622 { MAC_IPV4_TUN_IPV6_GTPU_IPV4_FRAG, ICE_PTYPE_ATTR_GTP_PDU_EH },
623 { MAC_IPV4_TUN_IPV6_GTPU_IPV4_PAY, ICE_PTYPE_ATTR_GTP_PDU_EH },
624 { MAC_IPV4_TUN_IPV6_GTPU_IPV4_UDP_PAY, ICE_PTYPE_ATTR_GTP_PDU_EH },
625 { MAC_IPV4_TUN_IPV6_GTPU_IPV4_TCP, ICE_PTYPE_ATTR_GTP_PDU_EH },
626 { MAC_IPV4_TUN_IPV6_GTPU_IPV4_ICMP, ICE_PTYPE_ATTR_GTP_PDU_EH },
627 { MAC_IPV4_TUN_IPV6_GTPU_IPV6_FRAG, ICE_PTYPE_ATTR_GTP_PDU_EH },
628 { MAC_IPV4_TUN_IPV6_GTPU_IPV6_PAY, ICE_PTYPE_ATTR_GTP_PDU_EH },
629 { MAC_IPV4_TUN_IPV6_GTPU_IPV6_UDP_PAY, ICE_PTYPE_ATTR_GTP_PDU_EH },
630 { MAC_IPV4_TUN_IPV6_GTPU_IPV6_TCP, ICE_PTYPE_ATTR_GTP_PDU_EH },
631 { MAC_IPV4_TUN_IPV6_GTPU_IPV6_ICMPV6, ICE_PTYPE_ATTR_GTP_PDU_EH },
632 { MAC_IPV6_TUN_IPV4_GTPU_IPV4_FRAG, ICE_PTYPE_ATTR_GTP_PDU_EH },
633 { MAC_IPV6_TUN_IPV4_GTPU_IPV4_PAY, ICE_PTYPE_ATTR_GTP_PDU_EH },
634 { MAC_IPV6_TUN_IPV4_GTPU_IPV4_UDP_PAY, ICE_PTYPE_ATTR_GTP_PDU_EH },
635 { MAC_IPV6_TUN_IPV4_GTPU_IPV4_TCP, ICE_PTYPE_ATTR_GTP_PDU_EH },
636 { MAC_IPV6_TUN_IPV4_GTPU_IPV4_ICMP, ICE_PTYPE_ATTR_GTP_PDU_EH },
637 { MAC_IPV6_TUN_IPV4_GTPU_IPV6_FRAG, ICE_PTYPE_ATTR_GTP_PDU_EH },
638 { MAC_IPV6_TUN_IPV4_GTPU_IPV6_PAY, ICE_PTYPE_ATTR_GTP_PDU_EH },
639 { MAC_IPV6_TUN_IPV4_GTPU_IPV6_UDP_PAY, ICE_PTYPE_ATTR_GTP_PDU_EH },
640 { MAC_IPV6_TUN_IPV4_GTPU_IPV6_TCP, ICE_PTYPE_ATTR_GTP_PDU_EH },
641 { MAC_IPV6_TUN_IPV4_GTPU_IPV6_ICMPV6, ICE_PTYPE_ATTR_GTP_PDU_EH },
642 { MAC_IPV6_TUN_IPV6_GTPU_IPV4_FRAG, ICE_PTYPE_ATTR_GTP_PDU_EH },
643 { MAC_IPV6_TUN_IPV6_GTPU_IPV4_PAY, ICE_PTYPE_ATTR_GTP_PDU_EH },
644 { MAC_IPV6_TUN_IPV6_GTPU_IPV4_UDP_PAY, ICE_PTYPE_ATTR_GTP_PDU_EH },
645 { MAC_IPV6_TUN_IPV6_GTPU_IPV4_TCP, ICE_PTYPE_ATTR_GTP_PDU_EH },
646 { MAC_IPV6_TUN_IPV6_GTPU_IPV4_ICMP, ICE_PTYPE_ATTR_GTP_PDU_EH },
647 { MAC_IPV6_TUN_IPV6_GTPU_IPV6_FRAG, ICE_PTYPE_ATTR_GTP_PDU_EH },
648 { MAC_IPV6_TUN_IPV6_GTPU_IPV6_PAY, ICE_PTYPE_ATTR_GTP_PDU_EH },
649 { MAC_IPV6_TUN_IPV6_GTPU_IPV6_UDP_PAY, ICE_PTYPE_ATTR_GTP_PDU_EH },
650 { MAC_IPV6_TUN_IPV6_GTPU_IPV6_TCP, ICE_PTYPE_ATTR_GTP_PDU_EH },
651 { MAC_IPV6_TUN_IPV6_GTPU_IPV6_ICMPV6, ICE_PTYPE_ATTR_GTP_PDU_EH },
654 static const struct ice_ptype_attributes ice_attr_gtpu_down[] = {
655 { ICE_MAC_IPV4_GTPU_IPV4_FRAG, ICE_PTYPE_ATTR_GTP_DOWNLINK },
656 { ICE_MAC_IPV4_GTPU_IPV4_PAY, ICE_PTYPE_ATTR_GTP_DOWNLINK },
657 { ICE_MAC_IPV4_GTPU_IPV4_UDP_PAY, ICE_PTYPE_ATTR_GTP_DOWNLINK },
658 { ICE_MAC_IPV4_GTPU_IPV4_TCP, ICE_PTYPE_ATTR_GTP_DOWNLINK },
659 { ICE_MAC_IPV4_GTPU_IPV4_ICMP, ICE_PTYPE_ATTR_GTP_DOWNLINK },
660 { ICE_MAC_IPV6_GTPU_IPV4_FRAG, ICE_PTYPE_ATTR_GTP_DOWNLINK },
661 { ICE_MAC_IPV6_GTPU_IPV4_PAY, ICE_PTYPE_ATTR_GTP_DOWNLINK },
662 { ICE_MAC_IPV6_GTPU_IPV4_UDP_PAY, ICE_PTYPE_ATTR_GTP_DOWNLINK },
663 { ICE_MAC_IPV6_GTPU_IPV4_TCP, ICE_PTYPE_ATTR_GTP_DOWNLINK },
664 { ICE_MAC_IPV6_GTPU_IPV4_ICMP, ICE_PTYPE_ATTR_GTP_DOWNLINK },
665 { ICE_MAC_IPV4_GTPU_IPV6_FRAG, ICE_PTYPE_ATTR_GTP_DOWNLINK },
666 { ICE_MAC_IPV4_GTPU_IPV6_PAY, ICE_PTYPE_ATTR_GTP_DOWNLINK },
667 { ICE_MAC_IPV4_GTPU_IPV6_UDP_PAY, ICE_PTYPE_ATTR_GTP_DOWNLINK },
668 { ICE_MAC_IPV4_GTPU_IPV6_TCP, ICE_PTYPE_ATTR_GTP_DOWNLINK },
669 { ICE_MAC_IPV4_GTPU_IPV6_ICMPV6, ICE_PTYPE_ATTR_GTP_DOWNLINK },
670 { ICE_MAC_IPV6_GTPU_IPV6_FRAG, ICE_PTYPE_ATTR_GTP_DOWNLINK },
671 { ICE_MAC_IPV6_GTPU_IPV6_PAY, ICE_PTYPE_ATTR_GTP_DOWNLINK },
672 { ICE_MAC_IPV6_GTPU_IPV6_UDP_PAY, ICE_PTYPE_ATTR_GTP_DOWNLINK },
673 { ICE_MAC_IPV6_GTPU_IPV6_TCP, ICE_PTYPE_ATTR_GTP_DOWNLINK },
674 { ICE_MAC_IPV6_GTPU_IPV6_ICMPV6, ICE_PTYPE_ATTR_GTP_DOWNLINK },
675 { MAC_IPV4_TUN_IPV4_GTPU_IPV4_FRAG, ICE_PTYPE_ATTR_GTP_DOWNLINK },
676 { MAC_IPV4_TUN_IPV4_GTPU_IPV4_PAY, ICE_PTYPE_ATTR_GTP_DOWNLINK },
677 { MAC_IPV4_TUN_IPV4_GTPU_IPV4_UDP_PAY, ICE_PTYPE_ATTR_GTP_DOWNLINK },
678 { MAC_IPV4_TUN_IPV4_GTPU_IPV4_TCP, ICE_PTYPE_ATTR_GTP_DOWNLINK },
679 { MAC_IPV4_TUN_IPV4_GTPU_IPV4_ICMP, ICE_PTYPE_ATTR_GTP_DOWNLINK },
680 { MAC_IPV4_TUN_IPV4_GTPU_IPV6_FRAG, ICE_PTYPE_ATTR_GTP_DOWNLINK },
681 { MAC_IPV4_TUN_IPV4_GTPU_IPV6_PAY, ICE_PTYPE_ATTR_GTP_DOWNLINK },
682 { MAC_IPV4_TUN_IPV4_GTPU_IPV6_UDP_PAY, ICE_PTYPE_ATTR_GTP_DOWNLINK },
683 { MAC_IPV4_TUN_IPV4_GTPU_IPV6_TCP, ICE_PTYPE_ATTR_GTP_DOWNLINK },
684 { MAC_IPV4_TUN_IPV4_GTPU_IPV6_ICMPV6, ICE_PTYPE_ATTR_GTP_DOWNLINK },
685 { MAC_IPV4_TUN_IPV6_GTPU_IPV4_FRAG, ICE_PTYPE_ATTR_GTP_DOWNLINK },
686 { MAC_IPV4_TUN_IPV6_GTPU_IPV4_PAY, ICE_PTYPE_ATTR_GTP_DOWNLINK },
687 { MAC_IPV4_TUN_IPV6_GTPU_IPV4_UDP_PAY, ICE_PTYPE_ATTR_GTP_DOWNLINK },
688 { MAC_IPV4_TUN_IPV6_GTPU_IPV4_TCP, ICE_PTYPE_ATTR_GTP_DOWNLINK },
689 { MAC_IPV4_TUN_IPV6_GTPU_IPV4_ICMP, ICE_PTYPE_ATTR_GTP_DOWNLINK },
690 { MAC_IPV4_TUN_IPV6_GTPU_IPV6_FRAG, ICE_PTYPE_ATTR_GTP_DOWNLINK },
691 { MAC_IPV4_TUN_IPV6_GTPU_IPV6_PAY, ICE_PTYPE_ATTR_GTP_DOWNLINK },
692 { MAC_IPV4_TUN_IPV6_GTPU_IPV6_UDP_PAY, ICE_PTYPE_ATTR_GTP_DOWNLINK },
693 { MAC_IPV4_TUN_IPV6_GTPU_IPV6_TCP, ICE_PTYPE_ATTR_GTP_DOWNLINK },
694 { MAC_IPV4_TUN_IPV6_GTPU_IPV6_ICMPV6, ICE_PTYPE_ATTR_GTP_DOWNLINK },
695 { MAC_IPV6_TUN_IPV4_GTPU_IPV4_FRAG, ICE_PTYPE_ATTR_GTP_DOWNLINK },
696 { MAC_IPV6_TUN_IPV4_GTPU_IPV4_PAY, ICE_PTYPE_ATTR_GTP_DOWNLINK },
697 { MAC_IPV6_TUN_IPV4_GTPU_IPV4_UDP_PAY, ICE_PTYPE_ATTR_GTP_DOWNLINK },
698 { MAC_IPV6_TUN_IPV4_GTPU_IPV4_TCP, ICE_PTYPE_ATTR_GTP_DOWNLINK },
699 { MAC_IPV6_TUN_IPV4_GTPU_IPV4_ICMP, ICE_PTYPE_ATTR_GTP_DOWNLINK },
700 { MAC_IPV6_TUN_IPV4_GTPU_IPV6_FRAG, ICE_PTYPE_ATTR_GTP_DOWNLINK },
701 { MAC_IPV6_TUN_IPV4_GTPU_IPV6_PAY, ICE_PTYPE_ATTR_GTP_DOWNLINK },
702 { MAC_IPV6_TUN_IPV4_GTPU_IPV6_UDP_PAY, ICE_PTYPE_ATTR_GTP_DOWNLINK },
703 { MAC_IPV6_TUN_IPV4_GTPU_IPV6_TCP, ICE_PTYPE_ATTR_GTP_DOWNLINK },
704 { MAC_IPV6_TUN_IPV4_GTPU_IPV6_ICMPV6, ICE_PTYPE_ATTR_GTP_DOWNLINK },
705 { MAC_IPV6_TUN_IPV6_GTPU_IPV4_FRAG, ICE_PTYPE_ATTR_GTP_DOWNLINK },
706 { MAC_IPV6_TUN_IPV6_GTPU_IPV4_PAY, ICE_PTYPE_ATTR_GTP_DOWNLINK },
707 { MAC_IPV6_TUN_IPV6_GTPU_IPV4_UDP_PAY, ICE_PTYPE_ATTR_GTP_DOWNLINK },
708 { MAC_IPV6_TUN_IPV6_GTPU_IPV4_TCP, ICE_PTYPE_ATTR_GTP_DOWNLINK },
709 { MAC_IPV6_TUN_IPV6_GTPU_IPV4_ICMP, ICE_PTYPE_ATTR_GTP_DOWNLINK },
710 { MAC_IPV6_TUN_IPV6_GTPU_IPV6_FRAG, ICE_PTYPE_ATTR_GTP_DOWNLINK },
711 { MAC_IPV6_TUN_IPV6_GTPU_IPV6_PAY, ICE_PTYPE_ATTR_GTP_DOWNLINK },
712 { MAC_IPV6_TUN_IPV6_GTPU_IPV6_UDP_PAY, ICE_PTYPE_ATTR_GTP_DOWNLINK },
713 { MAC_IPV6_TUN_IPV6_GTPU_IPV6_TCP, ICE_PTYPE_ATTR_GTP_DOWNLINK },
714 { MAC_IPV6_TUN_IPV6_GTPU_IPV6_ICMPV6, ICE_PTYPE_ATTR_GTP_DOWNLINK },
717 static const struct ice_ptype_attributes ice_attr_gtpu_up[] = {
718 { ICE_MAC_IPV4_GTPU_IPV4_FRAG, ICE_PTYPE_ATTR_GTP_UPLINK },
719 { ICE_MAC_IPV4_GTPU_IPV4_PAY, ICE_PTYPE_ATTR_GTP_UPLINK },
720 { ICE_MAC_IPV4_GTPU_IPV4_UDP_PAY, ICE_PTYPE_ATTR_GTP_UPLINK },
721 { ICE_MAC_IPV4_GTPU_IPV4_TCP, ICE_PTYPE_ATTR_GTP_UPLINK },
722 { ICE_MAC_IPV4_GTPU_IPV4_ICMP, ICE_PTYPE_ATTR_GTP_UPLINK },
723 { ICE_MAC_IPV6_GTPU_IPV4_FRAG, ICE_PTYPE_ATTR_GTP_UPLINK },
724 { ICE_MAC_IPV6_GTPU_IPV4_PAY, ICE_PTYPE_ATTR_GTP_UPLINK },
725 { ICE_MAC_IPV6_GTPU_IPV4_UDP_PAY, ICE_PTYPE_ATTR_GTP_UPLINK },
726 { ICE_MAC_IPV6_GTPU_IPV4_TCP, ICE_PTYPE_ATTR_GTP_UPLINK },
727 { ICE_MAC_IPV6_GTPU_IPV4_ICMP, ICE_PTYPE_ATTR_GTP_UPLINK },
728 { ICE_MAC_IPV4_GTPU_IPV6_FRAG, ICE_PTYPE_ATTR_GTP_UPLINK },
729 { ICE_MAC_IPV4_GTPU_IPV6_PAY, ICE_PTYPE_ATTR_GTP_UPLINK },
730 { ICE_MAC_IPV4_GTPU_IPV6_UDP_PAY, ICE_PTYPE_ATTR_GTP_UPLINK },
731 { ICE_MAC_IPV4_GTPU_IPV6_TCP, ICE_PTYPE_ATTR_GTP_UPLINK },
732 { ICE_MAC_IPV4_GTPU_IPV6_ICMPV6, ICE_PTYPE_ATTR_GTP_UPLINK },
733 { ICE_MAC_IPV6_GTPU_IPV6_FRAG, ICE_PTYPE_ATTR_GTP_UPLINK },
734 { ICE_MAC_IPV6_GTPU_IPV6_PAY, ICE_PTYPE_ATTR_GTP_UPLINK },
735 { ICE_MAC_IPV6_GTPU_IPV6_UDP_PAY, ICE_PTYPE_ATTR_GTP_UPLINK },
736 { ICE_MAC_IPV6_GTPU_IPV6_TCP, ICE_PTYPE_ATTR_GTP_UPLINK },
737 { ICE_MAC_IPV6_GTPU_IPV6_ICMPV6, ICE_PTYPE_ATTR_GTP_UPLINK },
738 { MAC_IPV4_TUN_IPV4_GTPU_IPV4_FRAG, ICE_PTYPE_ATTR_GTP_UPLINK },
739 { MAC_IPV4_TUN_IPV4_GTPU_IPV4_PAY, ICE_PTYPE_ATTR_GTP_UPLINK },
740 { MAC_IPV4_TUN_IPV4_GTPU_IPV4_UDP_PAY, ICE_PTYPE_ATTR_GTP_UPLINK },
741 { MAC_IPV4_TUN_IPV4_GTPU_IPV4_TCP, ICE_PTYPE_ATTR_GTP_UPLINK },
742 { MAC_IPV4_TUN_IPV4_GTPU_IPV4_ICMP, ICE_PTYPE_ATTR_GTP_UPLINK },
743 { MAC_IPV4_TUN_IPV4_GTPU_IPV6_FRAG, ICE_PTYPE_ATTR_GTP_UPLINK },
744 { MAC_IPV4_TUN_IPV4_GTPU_IPV6_PAY, ICE_PTYPE_ATTR_GTP_UPLINK },
745 { MAC_IPV4_TUN_IPV4_GTPU_IPV6_UDP_PAY, ICE_PTYPE_ATTR_GTP_UPLINK },
746 { MAC_IPV4_TUN_IPV4_GTPU_IPV6_TCP, ICE_PTYPE_ATTR_GTP_UPLINK },
747 { MAC_IPV4_TUN_IPV4_GTPU_IPV6_ICMPV6, ICE_PTYPE_ATTR_GTP_UPLINK },
748 { MAC_IPV4_TUN_IPV6_GTPU_IPV4_FRAG, ICE_PTYPE_ATTR_GTP_UPLINK },
749 { MAC_IPV4_TUN_IPV6_GTPU_IPV4_PAY, ICE_PTYPE_ATTR_GTP_UPLINK },
750 { MAC_IPV4_TUN_IPV6_GTPU_IPV4_UDP_PAY, ICE_PTYPE_ATTR_GTP_UPLINK },
751 { MAC_IPV4_TUN_IPV6_GTPU_IPV4_TCP, ICE_PTYPE_ATTR_GTP_UPLINK },
752 { MAC_IPV4_TUN_IPV6_GTPU_IPV4_ICMP, ICE_PTYPE_ATTR_GTP_UPLINK },
753 { MAC_IPV4_TUN_IPV6_GTPU_IPV6_FRAG, ICE_PTYPE_ATTR_GTP_UPLINK },
754 { MAC_IPV4_TUN_IPV6_GTPU_IPV6_PAY, ICE_PTYPE_ATTR_GTP_UPLINK },
755 { MAC_IPV4_TUN_IPV6_GTPU_IPV6_UDP_PAY, ICE_PTYPE_ATTR_GTP_UPLINK },
756 { MAC_IPV4_TUN_IPV6_GTPU_IPV6_TCP, ICE_PTYPE_ATTR_GTP_UPLINK },
757 { MAC_IPV4_TUN_IPV6_GTPU_IPV6_ICMPV6, ICE_PTYPE_ATTR_GTP_UPLINK },
758 { MAC_IPV6_TUN_IPV4_GTPU_IPV4_FRAG, ICE_PTYPE_ATTR_GTP_UPLINK },
759 { MAC_IPV6_TUN_IPV4_GTPU_IPV4_PAY, ICE_PTYPE_ATTR_GTP_UPLINK },
760 { MAC_IPV6_TUN_IPV4_GTPU_IPV4_UDP_PAY, ICE_PTYPE_ATTR_GTP_UPLINK },
761 { MAC_IPV6_TUN_IPV4_GTPU_IPV4_TCP, ICE_PTYPE_ATTR_GTP_UPLINK },
762 { MAC_IPV6_TUN_IPV4_GTPU_IPV4_ICMP, ICE_PTYPE_ATTR_GTP_UPLINK },
763 { MAC_IPV6_TUN_IPV4_GTPU_IPV6_FRAG, ICE_PTYPE_ATTR_GTP_UPLINK },
764 { MAC_IPV6_TUN_IPV4_GTPU_IPV6_PAY, ICE_PTYPE_ATTR_GTP_UPLINK },
765 { MAC_IPV6_TUN_IPV4_GTPU_IPV6_UDP_PAY, ICE_PTYPE_ATTR_GTP_UPLINK },
766 { MAC_IPV6_TUN_IPV4_GTPU_IPV6_TCP, ICE_PTYPE_ATTR_GTP_UPLINK },
767 { MAC_IPV6_TUN_IPV4_GTPU_IPV6_ICMPV6, ICE_PTYPE_ATTR_GTP_UPLINK },
768 { MAC_IPV6_TUN_IPV6_GTPU_IPV4_FRAG, ICE_PTYPE_ATTR_GTP_UPLINK },
769 { MAC_IPV6_TUN_IPV6_GTPU_IPV4_PAY, ICE_PTYPE_ATTR_GTP_UPLINK },
770 { MAC_IPV6_TUN_IPV6_GTPU_IPV4_UDP_PAY, ICE_PTYPE_ATTR_GTP_UPLINK },
771 { MAC_IPV6_TUN_IPV6_GTPU_IPV4_TCP, ICE_PTYPE_ATTR_GTP_UPLINK },
772 { MAC_IPV6_TUN_IPV6_GTPU_IPV4_ICMP, ICE_PTYPE_ATTR_GTP_UPLINK },
773 { MAC_IPV6_TUN_IPV6_GTPU_IPV6_FRAG, ICE_PTYPE_ATTR_GTP_UPLINK },
774 { MAC_IPV6_TUN_IPV6_GTPU_IPV6_PAY, ICE_PTYPE_ATTR_GTP_UPLINK },
775 { MAC_IPV6_TUN_IPV6_GTPU_IPV6_UDP_PAY, ICE_PTYPE_ATTR_GTP_UPLINK },
776 { MAC_IPV6_TUN_IPV6_GTPU_IPV6_TCP, ICE_PTYPE_ATTR_GTP_UPLINK },
777 { MAC_IPV6_TUN_IPV6_GTPU_IPV6_ICMPV6, ICE_PTYPE_ATTR_GTP_UPLINK },
780 static const u32 ice_ptypes_gtpu[] = {
781 0x00000000, 0x00000000, 0x00000000, 0x00000000,
782 0x00000000, 0x00000000, 0x00000000, 0x00000000,
783 0x00000000, 0x00000000, 0x7FFFFE00, 0x00000000,
784 0x00000000, 0x0000003F, 0xBEFBEFBC, 0x0002FBEF,
785 0x00000000, 0x00000000, 0x00000000, 0x00000000,
786 0x00000000, 0x00000000, 0x00000000, 0x00000000,
787 0x00000000, 0x00000000, 0x00000000, 0x00000000,
788 0x00000000, 0x00000000, 0x00000000, 0x00000000,
791 /* Packet types for pppoe */
792 static const u32 ice_ptypes_pppoe[] = {
793 0x00000000, 0x00000000, 0x00000000, 0x00000000,
794 0x00000000, 0x00000000, 0x00000000, 0x00000000,
795 0x00000000, 0x03ffe000, 0x00000000, 0x00000000,
796 0x00000000, 0x00000000, 0x00000000, 0x00000000,
797 0x00000000, 0x00000000, 0x00000000, 0x00000000,
798 0x00000000, 0x00000000, 0x00000000, 0x00000000,
799 0x00000000, 0x00000000, 0x00000000, 0x00000000,
800 0x00000000, 0x00000000, 0x00000000, 0x00000000,
803 /* Packet types for packets with PFCP NODE header */
804 static const u32 ice_ptypes_pfcp_node[] = {
805 0x00000000, 0x00000000, 0x00000000, 0x00000000,
806 0x00000000, 0x00000000, 0x00000000, 0x00000000,
807 0x00000000, 0x00000000, 0x80000000, 0x00000002,
808 0x00000000, 0x00000000, 0x00000000, 0x00000000,
809 0x00000000, 0x00000000, 0x00000000, 0x00000000,
810 0x00000000, 0x00000000, 0x00000000, 0x00000000,
811 0x00000000, 0x00000000, 0x00000000, 0x00000000,
812 0x00000000, 0x00000000, 0x00000000, 0x00000000,
815 /* Packet types for packets with PFCP SESSION header */
816 static const u32 ice_ptypes_pfcp_session[] = {
817 0x00000000, 0x00000000, 0x00000000, 0x00000000,
818 0x00000000, 0x00000000, 0x00000000, 0x00000000,
819 0x00000000, 0x00000000, 0x00000000, 0x00000005,
820 0x00000000, 0x00000000, 0x00000000, 0x00000000,
821 0x00000000, 0x00000000, 0x00000000, 0x00000000,
822 0x00000000, 0x00000000, 0x00000000, 0x00000000,
823 0x00000000, 0x00000000, 0x00000000, 0x00000000,
824 0x00000000, 0x00000000, 0x00000000, 0x00000000,
827 /* Packet types for l2tpv3 */
828 static const u32 ice_ptypes_l2tpv3[] = {
829 0x00000000, 0x00000000, 0x00000000, 0x00000000,
830 0x00000000, 0x00000000, 0x00000000, 0x00000000,
831 0x00000000, 0x00000000, 0x00000000, 0x00000300,
832 0x00000000, 0x00000000, 0x00000000, 0x00000000,
833 0x00000000, 0x00000000, 0x00000000, 0x00000000,
834 0x00000000, 0x00000000, 0x00000000, 0x00000000,
835 0x00000000, 0x00000000, 0x00000000, 0x00000000,
836 0x00000000, 0x00000000, 0x00000000, 0x00000000,
839 /* Packet types for esp */
840 static const u32 ice_ptypes_esp[] = {
841 0x00000000, 0x00000000, 0x00000000, 0x00000000,
842 0x00000000, 0x00000003, 0x00000000, 0x00000000,
843 0x00000000, 0x00000000, 0x00000000, 0x00000000,
844 0x00000000, 0x00000000, 0x00000000, 0x00000000,
845 0x00000000, 0x00000000, 0x00000000, 0x00000000,
846 0x00000000, 0x00000000, 0x00000000, 0x00000000,
847 0x00000000, 0x00000000, 0x00000000, 0x00000000,
848 0x00000000, 0x00000000, 0x00000000, 0x00000000,
851 /* Packet types for ah */
852 static const u32 ice_ptypes_ah[] = {
853 0x00000000, 0x00000000, 0x00000000, 0x00000000,
854 0x00000000, 0x0000000C, 0x00000000, 0x00000000,
855 0x00000000, 0x00000000, 0x00000000, 0x00000000,
856 0x00000000, 0x00000000, 0x00000000, 0x00000000,
857 0x00000000, 0x00000000, 0x00000000, 0x00000000,
858 0x00000000, 0x00000000, 0x00000000, 0x00000000,
859 0x00000000, 0x00000000, 0x00000000, 0x00000000,
860 0x00000000, 0x00000000, 0x00000000, 0x00000000,
863 /* Packet types for packets with NAT_T ESP header */
864 static const u32 ice_ptypes_nat_t_esp[] = {
865 0x00000000, 0x00000000, 0x00000000, 0x00000000,
866 0x00000000, 0x00000030, 0x00000000, 0x00000000,
867 0x00000000, 0x00000000, 0x00000000, 0x00000000,
868 0x00000000, 0x00000000, 0x00000000, 0x00000000,
869 0x00000000, 0x00000000, 0x00000000, 0x00000000,
870 0x00000000, 0x00000000, 0x00000000, 0x00000000,
871 0x00000000, 0x00000000, 0x00000000, 0x00000000,
872 0x00000000, 0x00000000, 0x00000000, 0x00000000,
875 static const u32 ice_ptypes_mac_non_ip_ofos[] = {
876 0x00000846, 0x00000000, 0x00000000, 0x00000000,
877 0x00000000, 0x00000000, 0x00000000, 0x00000000,
878 0x00400000, 0x03FFF000, 0x00000000, 0x00000000,
879 0x00000000, 0x00000000, 0x00000000, 0x00000000,
880 0x00000000, 0x00000000, 0x00000000, 0x00000000,
881 0x00000000, 0x00000000, 0x00000000, 0x00000000,
882 0x00000000, 0x00000000, 0x00000000, 0x00000000,
883 0x00000000, 0x00000000, 0x00000000, 0x00000000,
886 static const u32 ice_ptypes_gtpu_no_ip[] = {
887 0x00000000, 0x00000000, 0x00000000, 0x00000000,
888 0x00000000, 0x00000000, 0x00000000, 0x00000000,
889 0x00000000, 0x00000000, 0x00000600, 0x00000000,
890 0x00000000, 0x00000000, 0x00000000, 0x00000000,
891 0x00000000, 0x00000000, 0x00000000, 0x00000000,
892 0x00000000, 0x00000000, 0x00000000, 0x00000000,
893 0x00000000, 0x00000000, 0x00000000, 0x00000000,
894 0x00000000, 0x00000000, 0x00000000, 0x00000000,
897 static const u32 ice_ptypes_ecpri_tp0[] = {
898 0x00000000, 0x00000000, 0x00000000, 0x00000000,
899 0x00000000, 0x00000000, 0x00000000, 0x00000000,
900 0x00000000, 0x00000000, 0x00000000, 0x00000400,
901 0x00000000, 0x00000000, 0x00000000, 0x00000000,
902 0x00000000, 0x00000000, 0x00000000, 0x00000000,
903 0x00000000, 0x00000000, 0x00000000, 0x00000000,
904 0x00000000, 0x00000000, 0x00000000, 0x00000000,
905 0x00000000, 0x00000000, 0x00000000, 0x00000000,
908 static const u32 ice_ptypes_udp_ecpri_tp0[] = {
909 0x00000000, 0x00000000, 0x00000000, 0x00000000,
910 0x00000000, 0x00000000, 0x00000000, 0x00000000,
911 0x00000000, 0x00000000, 0x00000000, 0x00100000,
912 0x00000000, 0x00000000, 0x00000000, 0x00000000,
913 0x00000000, 0x00000000, 0x00000000, 0x00000000,
914 0x00000000, 0x00000000, 0x00000000, 0x00000000,
915 0x00000000, 0x00000000, 0x00000000, 0x00000000,
916 0x00000000, 0x00000000, 0x00000000, 0x00000000,
919 static const u32 ice_ptypes_l2tpv2[] = {
920 0x00000000, 0x00000000, 0x00000000, 0x00000000,
921 0x00000000, 0x00000000, 0x00000000, 0x00000000,
922 0x00000000, 0x00000000, 0x00000000, 0x00000000,
923 0xFFFFFF00, 0x0000003F, 0x00000000, 0x00000000,
924 0x00000000, 0x00000000, 0x00000000, 0x00000000,
925 0x00000000, 0x00000000, 0x00000000, 0x00000000,
926 0x00000000, 0x00000000, 0x00000000, 0x00000000,
927 0x00000000, 0x00000000, 0x00000000, 0x00000000,
930 static const u32 ice_ptypes_ppp[] = {
931 0x00000000, 0x00000000, 0x00000000, 0x00000000,
932 0x00000000, 0x00000000, 0x00000000, 0x00000000,
933 0x00000000, 0x00000000, 0x00000000, 0x00000000,
934 0xFFFFF000, 0x0000003F, 0x00000000, 0x00000000,
935 0x00000000, 0x00000000, 0x00000000, 0x00000000,
936 0x00000000, 0x00000000, 0x00000000, 0x00000000,
937 0x00000000, 0x00000000, 0x00000000, 0x00000000,
938 0x00000000, 0x00000000, 0x00000000, 0x00000000,
941 static const u32 ice_ptypes_ipv4_frag[] = {
942 0x00400000, 0x00000000, 0x00000000, 0x00000000,
943 0x00000000, 0x00000000, 0x00000000, 0x00000000,
944 0x00000000, 0x00000000, 0x00000000, 0x00000000,
945 0x00000000, 0x00000000, 0x00000000, 0x00000000,
946 0x00000000, 0x00000000, 0x00000000, 0x00000000,
947 0x00000000, 0x00000000, 0x00000000, 0x00000000,
948 0x00000000, 0x00000000, 0x00000000, 0x00000000,
949 0x00000000, 0x00000000, 0x00000000, 0x00000000,
952 static const u32 ice_ptypes_ipv6_frag[] = {
953 0x00000000, 0x00000000, 0x01000000, 0x00000000,
954 0x00000000, 0x00000000, 0x00000000, 0x00000000,
955 0x00000000, 0x00000000, 0x00000000, 0x00000000,
956 0x00000000, 0x00000000, 0x00000000, 0x00000000,
957 0x00000000, 0x00000000, 0x00000000, 0x00000000,
958 0x00000000, 0x00000000, 0x00000000, 0x00000000,
959 0x00000000, 0x00000000, 0x00000000, 0x00000000,
960 0x00000000, 0x00000000, 0x00000000, 0x00000000,
963 /* Manage parameters and info. used during the creation of a flow profile */
964 struct ice_flow_prof_params {
966 u16 entry_length; /* # of bytes formatted entry will require */
968 struct ice_flow_prof *prof;
970 /* For ACL, the es[0] will have the data of ICE_RX_MDID_PKT_FLAGS_15_0
971 * This will give us the direction flags.
973 struct ice_fv_word es[ICE_MAX_FV_WORDS];
974 /* attributes can be used to add attributes to a particular PTYPE */
975 const struct ice_ptype_attributes *attr;
978 u16 mask[ICE_MAX_FV_WORDS];
979 ice_declare_bitmap(ptypes, ICE_FLOW_PTYPE_MAX);
982 #define ICE_FLOW_RSS_HDRS_INNER_MASK \
983 (ICE_FLOW_SEG_HDR_PPPOE | ICE_FLOW_SEG_HDR_GTPC | \
984 ICE_FLOW_SEG_HDR_GTPC_TEID | ICE_FLOW_SEG_HDR_GTPU | \
985 ICE_FLOW_SEG_HDR_PFCP_SESSION | ICE_FLOW_SEG_HDR_L2TPV3 | \
986 ICE_FLOW_SEG_HDR_ESP | ICE_FLOW_SEG_HDR_AH | \
987 ICE_FLOW_SEG_HDR_NAT_T_ESP | ICE_FLOW_SEG_HDR_GTPU_NON_IP | \
988 ICE_FLOW_SEG_HDR_ECPRI_TP0 | ICE_FLOW_SEG_HDR_UDP_ECPRI_TP0 | \
989 ICE_FLOW_SEG_HDR_L2TPV2 | ICE_FLOW_SEG_HDR_PPP | ICE_FLOW_SEG_HDR_GRE)
991 #define ICE_FLOW_SEG_HDRS_L2_MASK \
992 (ICE_FLOW_SEG_HDR_ETH | ICE_FLOW_SEG_HDR_VLAN)
993 #define ICE_FLOW_SEG_HDRS_L3_MASK \
994 (ICE_FLOW_SEG_HDR_IPV4 | ICE_FLOW_SEG_HDR_IPV6 | \
995 ICE_FLOW_SEG_HDR_ARP)
996 #define ICE_FLOW_SEG_HDRS_L4_MASK \
997 (ICE_FLOW_SEG_HDR_ICMP | ICE_FLOW_SEG_HDR_TCP | ICE_FLOW_SEG_HDR_UDP | \
998 ICE_FLOW_SEG_HDR_SCTP)
999 /* mask for L4 protocols that are NOT part of IPV4/6 OTHER PTYPE groups */
1000 #define ICE_FLOW_SEG_HDRS_L4_MASK_NO_OTHER \
1001 (ICE_FLOW_SEG_HDR_TCP | ICE_FLOW_SEG_HDR_UDP | ICE_FLOW_SEG_HDR_SCTP)
1004 * ice_flow_val_hdrs - validates packet segments for valid protocol headers
1005 * @segs: array of one or more packet segments that describe the flow
1006 * @segs_cnt: number of packet segments provided
1008 static enum ice_status
1009 ice_flow_val_hdrs(struct ice_flow_seg_info *segs, u8 segs_cnt)
1013 for (i = 0; i < segs_cnt; i++) {
1014 /* Multiple L3 headers */
1015 if (segs[i].hdrs & ICE_FLOW_SEG_HDRS_L3_MASK &&
1016 !ice_is_pow2(segs[i].hdrs & ICE_FLOW_SEG_HDRS_L3_MASK))
1017 return ICE_ERR_PARAM;
1019 /* Multiple L4 headers */
1020 if (segs[i].hdrs & ICE_FLOW_SEG_HDRS_L4_MASK &&
1021 !ice_is_pow2(segs[i].hdrs & ICE_FLOW_SEG_HDRS_L4_MASK))
1022 return ICE_ERR_PARAM;
1028 /* Sizes of fixed known protocol headers without header options */
1029 #define ICE_FLOW_PROT_HDR_SZ_MAC 14
1030 #define ICE_FLOW_PROT_HDR_SZ_MAC_VLAN (ICE_FLOW_PROT_HDR_SZ_MAC + 2)
1031 #define ICE_FLOW_PROT_HDR_SZ_IPV4 20
1032 #define ICE_FLOW_PROT_HDR_SZ_IPV6 40
1033 #define ICE_FLOW_PROT_HDR_SZ_ARP 28
1034 #define ICE_FLOW_PROT_HDR_SZ_ICMP 8
1035 #define ICE_FLOW_PROT_HDR_SZ_TCP 20
1036 #define ICE_FLOW_PROT_HDR_SZ_UDP 8
1037 #define ICE_FLOW_PROT_HDR_SZ_SCTP 12
1040 * ice_flow_calc_seg_sz - calculates size of a packet segment based on headers
1041 * @params: information about the flow to be processed
1042 * @seg: index of packet segment whose header size is to be determined
1044 static u16 ice_flow_calc_seg_sz(struct ice_flow_prof_params *params, u8 seg)
1049 sz = (params->prof->segs[seg].hdrs & ICE_FLOW_SEG_HDR_VLAN) ?
1050 ICE_FLOW_PROT_HDR_SZ_MAC_VLAN : ICE_FLOW_PROT_HDR_SZ_MAC;
1053 if (params->prof->segs[seg].hdrs & ICE_FLOW_SEG_HDR_IPV4)
1054 sz += ICE_FLOW_PROT_HDR_SZ_IPV4;
1055 else if (params->prof->segs[seg].hdrs & ICE_FLOW_SEG_HDR_IPV6)
1056 sz += ICE_FLOW_PROT_HDR_SZ_IPV6;
1057 else if (params->prof->segs[seg].hdrs & ICE_FLOW_SEG_HDR_ARP)
1058 sz += ICE_FLOW_PROT_HDR_SZ_ARP;
1059 else if (params->prof->segs[seg].hdrs & ICE_FLOW_SEG_HDRS_L4_MASK)
1060 /* A L3 header is required if L4 is specified */
1064 if (params->prof->segs[seg].hdrs & ICE_FLOW_SEG_HDR_ICMP)
1065 sz += ICE_FLOW_PROT_HDR_SZ_ICMP;
1066 else if (params->prof->segs[seg].hdrs & ICE_FLOW_SEG_HDR_TCP)
1067 sz += ICE_FLOW_PROT_HDR_SZ_TCP;
1068 else if (params->prof->segs[seg].hdrs & ICE_FLOW_SEG_HDR_UDP)
1069 sz += ICE_FLOW_PROT_HDR_SZ_UDP;
1070 else if (params->prof->segs[seg].hdrs & ICE_FLOW_SEG_HDR_SCTP)
1071 sz += ICE_FLOW_PROT_HDR_SZ_SCTP;
1077 * ice_flow_proc_seg_hdrs - process protocol headers present in pkt segments
1078 * @params: information about the flow to be processed
1080 * This function identifies the packet types associated with the protocol
1081 * headers being present in packet segments of the specified flow profile.
1083 static enum ice_status
1084 ice_flow_proc_seg_hdrs(struct ice_flow_prof_params *params)
1086 struct ice_flow_prof *prof;
1089 ice_memset(params->ptypes, 0xff, sizeof(params->ptypes),
1092 prof = params->prof;
1094 for (i = 0; i < params->prof->segs_cnt; i++) {
1095 const ice_bitmap_t *src;
1098 hdrs = prof->segs[i].hdrs;
1100 if (hdrs & ICE_FLOW_SEG_HDR_ETH) {
1101 src = !i ? (const ice_bitmap_t *)ice_ptypes_mac_ofos :
1102 (const ice_bitmap_t *)ice_ptypes_mac_il;
1103 ice_and_bitmap(params->ptypes, params->ptypes, src,
1104 ICE_FLOW_PTYPE_MAX);
1107 if (i && hdrs & ICE_FLOW_SEG_HDR_VLAN) {
1108 src = (const ice_bitmap_t *)ice_ptypes_macvlan_il;
1109 ice_and_bitmap(params->ptypes, params->ptypes, src,
1110 ICE_FLOW_PTYPE_MAX);
1113 if (!i && hdrs & ICE_FLOW_SEG_HDR_ARP) {
1114 ice_and_bitmap(params->ptypes, params->ptypes,
1115 (const ice_bitmap_t *)ice_ptypes_arp_of,
1116 ICE_FLOW_PTYPE_MAX);
1119 if (hdrs & ICE_FLOW_SEG_HDR_ECPRI_TP0) {
1120 src = (const ice_bitmap_t *)ice_ptypes_ecpri_tp0;
1121 ice_and_bitmap(params->ptypes, params->ptypes, src,
1122 ICE_FLOW_PTYPE_MAX);
1124 if ((hdrs & ICE_FLOW_SEG_HDR_IPV4) &&
1125 (hdrs & ICE_FLOW_SEG_HDR_IPV_OTHER)) {
1127 (const ice_bitmap_t *)ice_ptypes_ipv4_il :
1128 (const ice_bitmap_t *)ice_ptypes_ipv4_ofos_all;
1129 ice_and_bitmap(params->ptypes, params->ptypes, src,
1130 ICE_FLOW_PTYPE_MAX);
1131 } else if ((hdrs & ICE_FLOW_SEG_HDR_IPV6) &&
1132 (hdrs & ICE_FLOW_SEG_HDR_IPV_OTHER)) {
1134 (const ice_bitmap_t *)ice_ptypes_ipv6_il :
1135 (const ice_bitmap_t *)ice_ptypes_ipv6_ofos_all;
1136 ice_and_bitmap(params->ptypes, params->ptypes, src,
1137 ICE_FLOW_PTYPE_MAX);
1138 } else if ((hdrs & ICE_FLOW_SEG_HDR_IPV4) &&
1139 (hdrs & ICE_FLOW_SEG_HDR_IPV_FRAG)) {
1140 src = (const ice_bitmap_t *)ice_ptypes_ipv4_frag;
1141 ice_and_bitmap(params->ptypes, params->ptypes, src,
1142 ICE_FLOW_PTYPE_MAX);
1143 } else if ((hdrs & ICE_FLOW_SEG_HDR_IPV6) &&
1144 (hdrs & ICE_FLOW_SEG_HDR_IPV_FRAG)) {
1145 src = (const ice_bitmap_t *)ice_ptypes_ipv6_frag;
1146 ice_and_bitmap(params->ptypes, params->ptypes, src,
1147 ICE_FLOW_PTYPE_MAX);
1148 } else if ((hdrs & ICE_FLOW_SEG_HDR_IPV4) &&
1149 !(hdrs & ICE_FLOW_SEG_HDRS_L4_MASK_NO_OTHER)) {
1150 src = !i ? (const ice_bitmap_t *)ice_ptypes_ipv4_ofos_no_l4 :
1151 (const ice_bitmap_t *)ice_ptypes_ipv4_il_no_l4;
1152 ice_and_bitmap(params->ptypes, params->ptypes, src,
1153 ICE_FLOW_PTYPE_MAX);
1154 } else if (hdrs & ICE_FLOW_SEG_HDR_IPV4) {
1155 src = !i ? (const ice_bitmap_t *)ice_ptypes_ipv4_ofos :
1156 (const ice_bitmap_t *)ice_ptypes_ipv4_il;
1157 ice_and_bitmap(params->ptypes, params->ptypes, src,
1158 ICE_FLOW_PTYPE_MAX);
1159 } else if ((hdrs & ICE_FLOW_SEG_HDR_IPV6) &&
1160 !(hdrs & ICE_FLOW_SEG_HDRS_L4_MASK_NO_OTHER)) {
1161 src = !i ? (const ice_bitmap_t *)ice_ptypes_ipv6_ofos_no_l4 :
1162 (const ice_bitmap_t *)ice_ptypes_ipv6_il_no_l4;
1163 ice_and_bitmap(params->ptypes, params->ptypes, src,
1164 ICE_FLOW_PTYPE_MAX);
1165 } else if (hdrs & ICE_FLOW_SEG_HDR_IPV6) {
1166 src = !i ? (const ice_bitmap_t *)ice_ptypes_ipv6_ofos :
1167 (const ice_bitmap_t *)ice_ptypes_ipv6_il;
1168 ice_and_bitmap(params->ptypes, params->ptypes, src,
1169 ICE_FLOW_PTYPE_MAX);
1172 if (hdrs & ICE_FLOW_SEG_HDR_ETH_NON_IP) {
1173 src = (const ice_bitmap_t *)ice_ptypes_mac_non_ip_ofos;
1174 ice_and_bitmap(params->ptypes, params->ptypes,
1175 src, ICE_FLOW_PTYPE_MAX);
1176 } else if (hdrs & ICE_FLOW_SEG_HDR_PPPOE) {
1177 src = (const ice_bitmap_t *)ice_ptypes_pppoe;
1178 ice_and_bitmap(params->ptypes, params->ptypes, src,
1179 ICE_FLOW_PTYPE_MAX);
1181 src = (const ice_bitmap_t *)ice_ptypes_pppoe;
1182 ice_andnot_bitmap(params->ptypes, params->ptypes, src,
1183 ICE_FLOW_PTYPE_MAX);
1186 if (hdrs & ICE_FLOW_SEG_HDR_UDP) {
1187 src = (const ice_bitmap_t *)ice_ptypes_udp_il;
1188 ice_and_bitmap(params->ptypes, params->ptypes, src,
1189 ICE_FLOW_PTYPE_MAX);
1190 } else if (hdrs & ICE_FLOW_SEG_HDR_TCP) {
1191 ice_and_bitmap(params->ptypes, params->ptypes,
1192 (const ice_bitmap_t *)ice_ptypes_tcp_il,
1193 ICE_FLOW_PTYPE_MAX);
1194 } else if (hdrs & ICE_FLOW_SEG_HDR_SCTP) {
1195 src = (const ice_bitmap_t *)ice_ptypes_sctp_il;
1196 ice_and_bitmap(params->ptypes, params->ptypes, src,
1197 ICE_FLOW_PTYPE_MAX);
1200 if (hdrs & ICE_FLOW_SEG_HDR_ICMP) {
1201 src = !i ? (const ice_bitmap_t *)ice_ptypes_icmp_of :
1202 (const ice_bitmap_t *)ice_ptypes_icmp_il;
1203 ice_and_bitmap(params->ptypes, params->ptypes, src,
1204 ICE_FLOW_PTYPE_MAX);
1205 } else if (hdrs & ICE_FLOW_SEG_HDR_GRE) {
1206 src = (const ice_bitmap_t *)ice_ptypes_gre_of;
1207 ice_and_bitmap(params->ptypes, params->ptypes, src,
1208 ICE_FLOW_PTYPE_MAX);
1209 } else if (hdrs & ICE_FLOW_SEG_HDR_GTPC) {
1210 src = (const ice_bitmap_t *)ice_ptypes_gtpc;
1211 ice_and_bitmap(params->ptypes, params->ptypes,
1212 src, ICE_FLOW_PTYPE_MAX);
1213 } else if (hdrs & ICE_FLOW_SEG_HDR_GTPC_TEID) {
1214 src = (const ice_bitmap_t *)ice_ptypes_gtpc_tid;
1215 ice_and_bitmap(params->ptypes, params->ptypes,
1216 src, ICE_FLOW_PTYPE_MAX);
1217 } else if (hdrs & ICE_FLOW_SEG_HDR_GTPU_NON_IP) {
1218 src = (const ice_bitmap_t *)ice_ptypes_gtpu_no_ip;
1219 ice_and_bitmap(params->ptypes, params->ptypes,
1220 src, ICE_FLOW_PTYPE_MAX);
1221 } else if (hdrs & ICE_FLOW_SEG_HDR_GTPU_DWN) {
1222 src = (const ice_bitmap_t *)ice_ptypes_gtpu;
1223 ice_and_bitmap(params->ptypes, params->ptypes,
1224 src, ICE_FLOW_PTYPE_MAX);
1226 /* Attributes for GTP packet with downlink */
1227 params->attr = ice_attr_gtpu_down;
1228 params->attr_cnt = ARRAY_SIZE(ice_attr_gtpu_down);
1229 } else if (hdrs & ICE_FLOW_SEG_HDR_GTPU_UP) {
1230 src = (const ice_bitmap_t *)ice_ptypes_gtpu;
1231 ice_and_bitmap(params->ptypes, params->ptypes,
1232 src, ICE_FLOW_PTYPE_MAX);
1234 /* Attributes for GTP packet with uplink */
1235 params->attr = ice_attr_gtpu_up;
1236 params->attr_cnt = ARRAY_SIZE(ice_attr_gtpu_up);
1237 } else if (hdrs & ICE_FLOW_SEG_HDR_GTPU_EH) {
1238 src = (const ice_bitmap_t *)ice_ptypes_gtpu;
1239 ice_and_bitmap(params->ptypes, params->ptypes,
1240 src, ICE_FLOW_PTYPE_MAX);
1242 /* Attributes for GTP packet with Extension Header */
1243 params->attr = ice_attr_gtpu_eh;
1244 params->attr_cnt = ARRAY_SIZE(ice_attr_gtpu_eh);
1245 } else if (hdrs & ICE_FLOW_SEG_HDR_GTPU_IP) {
1246 src = (const ice_bitmap_t *)ice_ptypes_gtpu;
1247 ice_and_bitmap(params->ptypes, params->ptypes,
1248 src, ICE_FLOW_PTYPE_MAX);
1250 /* Attributes for GTP packet without Extension Header */
1251 params->attr = ice_attr_gtpu_session;
1252 params->attr_cnt = ARRAY_SIZE(ice_attr_gtpu_session);
1253 } else if (hdrs & ICE_FLOW_SEG_HDR_L2TPV2) {
1254 src = (const ice_bitmap_t *)ice_ptypes_l2tpv2;
1255 ice_and_bitmap(params->ptypes, params->ptypes,
1256 src, ICE_FLOW_PTYPE_MAX);
1257 } else if (hdrs & ICE_FLOW_SEG_HDR_L2TPV3) {
1258 src = (const ice_bitmap_t *)ice_ptypes_l2tpv3;
1259 ice_and_bitmap(params->ptypes, params->ptypes,
1260 src, ICE_FLOW_PTYPE_MAX);
1261 } else if (hdrs & ICE_FLOW_SEG_HDR_ESP) {
1262 src = (const ice_bitmap_t *)ice_ptypes_esp;
1263 ice_and_bitmap(params->ptypes, params->ptypes,
1264 src, ICE_FLOW_PTYPE_MAX);
1265 } else if (hdrs & ICE_FLOW_SEG_HDR_AH) {
1266 src = (const ice_bitmap_t *)ice_ptypes_ah;
1267 ice_and_bitmap(params->ptypes, params->ptypes,
1268 src, ICE_FLOW_PTYPE_MAX);
1269 } else if (hdrs & ICE_FLOW_SEG_HDR_NAT_T_ESP) {
1270 src = (const ice_bitmap_t *)ice_ptypes_nat_t_esp;
1271 ice_and_bitmap(params->ptypes, params->ptypes,
1272 src, ICE_FLOW_PTYPE_MAX);
1273 } else if (hdrs & ICE_FLOW_SEG_HDR_VXLAN) {
1274 src = (const ice_bitmap_t *)ice_ptypes_vxlan_vni;
1275 ice_and_bitmap(params->ptypes, params->ptypes,
1276 src, ICE_FLOW_PTYPE_MAX);
1277 } else if (hdrs & ICE_FLOW_SEG_HDR_UDP_ECPRI_TP0) {
1278 src = (const ice_bitmap_t *)ice_ptypes_udp_ecpri_tp0;
1279 ice_and_bitmap(params->ptypes, params->ptypes,
1280 src, ICE_FLOW_PTYPE_MAX);
1283 if (hdrs & ICE_FLOW_SEG_HDR_PPP) {
1284 src = (const ice_bitmap_t *)ice_ptypes_ppp;
1285 ice_and_bitmap(params->ptypes, params->ptypes,
1286 src, ICE_FLOW_PTYPE_MAX);
1289 if (hdrs & ICE_FLOW_SEG_HDR_PFCP) {
1290 if (hdrs & ICE_FLOW_SEG_HDR_PFCP_NODE)
1292 (const ice_bitmap_t *)ice_ptypes_pfcp_node;
1295 (const ice_bitmap_t *)ice_ptypes_pfcp_session;
1297 ice_and_bitmap(params->ptypes, params->ptypes,
1298 src, ICE_FLOW_PTYPE_MAX);
1300 src = (const ice_bitmap_t *)ice_ptypes_pfcp_node;
1301 ice_andnot_bitmap(params->ptypes, params->ptypes,
1302 src, ICE_FLOW_PTYPE_MAX);
1304 src = (const ice_bitmap_t *)ice_ptypes_pfcp_session;
1305 ice_andnot_bitmap(params->ptypes, params->ptypes,
1306 src, ICE_FLOW_PTYPE_MAX);
1314 * ice_flow_xtract_pkt_flags - Create an extr sequence entry for packet flags
1315 * @hw: pointer to the HW struct
1316 * @params: information about the flow to be processed
1317 * @flags: The value of pkt_flags[x:x] in Rx/Tx MDID metadata.
1319 * This function will allocate an extraction sequence entries for a DWORD size
1320 * chunk of the packet flags.
1322 static enum ice_status
1323 ice_flow_xtract_pkt_flags(struct ice_hw *hw,
1324 struct ice_flow_prof_params *params,
1325 enum ice_flex_mdid_pkt_flags flags)
1327 u8 fv_words = hw->blk[params->blk].es.fvw;
1330 /* Make sure the number of extraction sequence entries required does not
1331 * exceed the block's capacity.
1333 if (params->es_cnt >= fv_words)
1334 return ICE_ERR_MAX_LIMIT;
1336 /* some blocks require a reversed field vector layout */
1337 if (hw->blk[params->blk].es.reverse)
1338 idx = fv_words - params->es_cnt - 1;
1340 idx = params->es_cnt;
1342 params->es[idx].prot_id = ICE_PROT_META_ID;
1343 params->es[idx].off = flags;
1350 * ice_flow_xtract_fld - Create an extraction sequence entry for the given field
1351 * @hw: pointer to the HW struct
1352 * @params: information about the flow to be processed
1353 * @seg: packet segment index of the field to be extracted
1354 * @fld: ID of field to be extracted
1355 * @match: bitfield of all fields
1357 * This function determines the protocol ID, offset, and size of the given
1358 * field. It then allocates one or more extraction sequence entries for the
1359 * given field, and fill the entries with protocol ID and offset information.
1361 static enum ice_status
1362 ice_flow_xtract_fld(struct ice_hw *hw, struct ice_flow_prof_params *params,
1363 u8 seg, enum ice_flow_field fld, u64 match)
1365 enum ice_flow_field sib = ICE_FLOW_FIELD_IDX_MAX;
1366 enum ice_prot_id prot_id = ICE_PROT_ID_INVAL;
1367 u8 fv_words = hw->blk[params->blk].es.fvw;
1368 struct ice_flow_fld_info *flds;
1369 u16 cnt, ese_bits, i;
1374 flds = params->prof->segs[seg].fields;
1377 case ICE_FLOW_FIELD_IDX_ETH_DA:
1378 case ICE_FLOW_FIELD_IDX_ETH_SA:
1379 case ICE_FLOW_FIELD_IDX_S_VLAN:
1380 case ICE_FLOW_FIELD_IDX_C_VLAN:
1381 prot_id = seg == 0 ? ICE_PROT_MAC_OF_OR_S : ICE_PROT_MAC_IL;
1383 case ICE_FLOW_FIELD_IDX_ETH_TYPE:
1384 prot_id = seg == 0 ? ICE_PROT_ETYPE_OL : ICE_PROT_ETYPE_IL;
1386 case ICE_FLOW_FIELD_IDX_IPV4_DSCP:
1387 prot_id = seg == 0 ? ICE_PROT_IPV4_OF_OR_S : ICE_PROT_IPV4_IL;
1389 case ICE_FLOW_FIELD_IDX_IPV6_DSCP:
1390 prot_id = seg == 0 ? ICE_PROT_IPV6_OF_OR_S : ICE_PROT_IPV6_IL;
1392 case ICE_FLOW_FIELD_IDX_IPV4_TTL:
1393 case ICE_FLOW_FIELD_IDX_IPV4_PROT:
1394 prot_id = seg == 0 ? ICE_PROT_IPV4_OF_OR_S : ICE_PROT_IPV4_IL;
1396 /* TTL and PROT share the same extraction seq. entry.
1397 * Each is considered a sibling to the other in terms of sharing
1398 * the same extraction sequence entry.
1400 if (fld == ICE_FLOW_FIELD_IDX_IPV4_TTL)
1401 sib = ICE_FLOW_FIELD_IDX_IPV4_PROT;
1403 sib = ICE_FLOW_FIELD_IDX_IPV4_TTL;
1405 /* If the sibling field is also included, that field's
1406 * mask needs to be included.
1408 if (match & BIT(sib))
1409 sib_mask = ice_flds_info[sib].mask;
1411 case ICE_FLOW_FIELD_IDX_IPV6_TTL:
1412 case ICE_FLOW_FIELD_IDX_IPV6_PROT:
1413 prot_id = seg == 0 ? ICE_PROT_IPV6_OF_OR_S : ICE_PROT_IPV6_IL;
1415 /* TTL and PROT share the same extraction seq. entry.
1416 * Each is considered a sibling to the other in terms of sharing
1417 * the same extraction sequence entry.
1419 if (fld == ICE_FLOW_FIELD_IDX_IPV6_TTL)
1420 sib = ICE_FLOW_FIELD_IDX_IPV6_PROT;
1422 sib = ICE_FLOW_FIELD_IDX_IPV6_TTL;
1424 /* If the sibling field is also included, that field's
1425 * mask needs to be included.
1427 if (match & BIT(sib))
1428 sib_mask = ice_flds_info[sib].mask;
1430 case ICE_FLOW_FIELD_IDX_IPV4_SA:
1431 case ICE_FLOW_FIELD_IDX_IPV4_DA:
1432 case ICE_FLOW_FIELD_IDX_IPV4_CHKSUM:
1433 prot_id = seg == 0 ? ICE_PROT_IPV4_OF_OR_S : ICE_PROT_IPV4_IL;
1434 if (params->prof->segs[0].hdrs & ICE_FLOW_SEG_HDR_GRE &&
1435 params->prof->segs[1].hdrs & ICE_FLOW_SEG_HDR_GTPU &&
1437 prot_id = ICE_PROT_IPV4_IL_IL;
1439 case ICE_FLOW_FIELD_IDX_IPV4_ID:
1440 prot_id = ICE_PROT_IPV4_OF_OR_S;
1442 case ICE_FLOW_FIELD_IDX_IPV6_SA:
1443 case ICE_FLOW_FIELD_IDX_IPV6_DA:
1444 case ICE_FLOW_FIELD_IDX_IPV6_PRE32_SA:
1445 case ICE_FLOW_FIELD_IDX_IPV6_PRE32_DA:
1446 case ICE_FLOW_FIELD_IDX_IPV6_PRE48_SA:
1447 case ICE_FLOW_FIELD_IDX_IPV6_PRE48_DA:
1448 case ICE_FLOW_FIELD_IDX_IPV6_PRE64_SA:
1449 case ICE_FLOW_FIELD_IDX_IPV6_PRE64_DA:
1450 prot_id = seg == 0 ? ICE_PROT_IPV6_OF_OR_S : ICE_PROT_IPV6_IL;
1451 if (params->prof->segs[0].hdrs & ICE_FLOW_SEG_HDR_GRE &&
1452 params->prof->segs[1].hdrs & ICE_FLOW_SEG_HDR_GTPU &&
1454 prot_id = ICE_PROT_IPV6_IL_IL;
1456 case ICE_FLOW_FIELD_IDX_IPV6_ID:
1457 prot_id = ICE_PROT_IPV6_FRAG;
1459 case ICE_FLOW_FIELD_IDX_TCP_SRC_PORT:
1460 case ICE_FLOW_FIELD_IDX_TCP_DST_PORT:
1461 case ICE_FLOW_FIELD_IDX_TCP_FLAGS:
1462 case ICE_FLOW_FIELD_IDX_TCP_CHKSUM:
1463 prot_id = ICE_PROT_TCP_IL;
1465 case ICE_FLOW_FIELD_IDX_UDP_SRC_PORT:
1466 case ICE_FLOW_FIELD_IDX_UDP_DST_PORT:
1467 case ICE_FLOW_FIELD_IDX_UDP_CHKSUM:
1468 prot_id = ICE_PROT_UDP_IL_OR_S;
1470 case ICE_FLOW_FIELD_IDX_SCTP_SRC_PORT:
1471 case ICE_FLOW_FIELD_IDX_SCTP_DST_PORT:
1472 case ICE_FLOW_FIELD_IDX_SCTP_CHKSUM:
1473 prot_id = ICE_PROT_SCTP_IL;
1475 case ICE_FLOW_FIELD_IDX_VXLAN_VNI:
1476 case ICE_FLOW_FIELD_IDX_GTPC_TEID:
1477 case ICE_FLOW_FIELD_IDX_GTPU_IP_TEID:
1478 case ICE_FLOW_FIELD_IDX_GTPU_UP_TEID:
1479 case ICE_FLOW_FIELD_IDX_GTPU_DWN_TEID:
1480 case ICE_FLOW_FIELD_IDX_GTPU_EH_TEID:
1481 case ICE_FLOW_FIELD_IDX_GTPU_EH_QFI:
1482 case ICE_FLOW_FIELD_IDX_GTPU_UP_QFI:
1483 case ICE_FLOW_FIELD_IDX_GTPU_DWN_QFI:
1484 /* GTP is accessed through UDP OF protocol */
1485 prot_id = ICE_PROT_UDP_OF;
1487 case ICE_FLOW_FIELD_IDX_PPPOE_SESS_ID:
1488 prot_id = ICE_PROT_PPPOE;
1490 case ICE_FLOW_FIELD_IDX_PFCP_SEID:
1491 prot_id = ICE_PROT_UDP_IL_OR_S;
1493 case ICE_FLOW_FIELD_IDX_L2TPV3_SESS_ID:
1494 prot_id = ICE_PROT_L2TPV3;
1496 case ICE_FLOW_FIELD_IDX_ESP_SPI:
1497 prot_id = ICE_PROT_ESP_F;
1499 case ICE_FLOW_FIELD_IDX_AH_SPI:
1500 prot_id = ICE_PROT_ESP_2;
1502 case ICE_FLOW_FIELD_IDX_NAT_T_ESP_SPI:
1503 prot_id = ICE_PROT_UDP_IL_OR_S;
1505 case ICE_FLOW_FIELD_IDX_ECPRI_TP0_PC_ID:
1506 prot_id = ICE_PROT_ECPRI;
1508 case ICE_FLOW_FIELD_IDX_UDP_ECPRI_TP0_PC_ID:
1509 prot_id = ICE_PROT_UDP_IL_OR_S;
1511 case ICE_FLOW_FIELD_IDX_ARP_SIP:
1512 case ICE_FLOW_FIELD_IDX_ARP_DIP:
1513 case ICE_FLOW_FIELD_IDX_ARP_SHA:
1514 case ICE_FLOW_FIELD_IDX_ARP_DHA:
1515 case ICE_FLOW_FIELD_IDX_ARP_OP:
1516 prot_id = ICE_PROT_ARP_OF;
1518 case ICE_FLOW_FIELD_IDX_ICMP_TYPE:
1519 case ICE_FLOW_FIELD_IDX_ICMP_CODE:
1520 /* ICMP type and code share the same extraction seq. entry */
1521 prot_id = (params->prof->segs[seg].hdrs &
1522 ICE_FLOW_SEG_HDR_IPV4) ?
1523 ICE_PROT_ICMP_IL : ICE_PROT_ICMPV6_IL;
1524 sib = fld == ICE_FLOW_FIELD_IDX_ICMP_TYPE ?
1525 ICE_FLOW_FIELD_IDX_ICMP_CODE :
1526 ICE_FLOW_FIELD_IDX_ICMP_TYPE;
1528 case ICE_FLOW_FIELD_IDX_GRE_KEYID:
1529 prot_id = ICE_PROT_GRE_OF;
1532 return ICE_ERR_NOT_IMPL;
1535 /* Each extraction sequence entry is a word in size, and extracts a
1536 * word-aligned offset from a protocol header.
1538 ese_bits = ICE_FLOW_FV_EXTRACT_SZ * BITS_PER_BYTE;
1540 flds[fld].xtrct.prot_id = prot_id;
1541 flds[fld].xtrct.off = (ice_flds_info[fld].off / ese_bits) *
1542 ICE_FLOW_FV_EXTRACT_SZ;
1543 flds[fld].xtrct.disp = (u8)(ice_flds_info[fld].off % ese_bits);
1544 flds[fld].xtrct.idx = params->es_cnt;
1545 flds[fld].xtrct.mask = ice_flds_info[fld].mask;
1547 /* Adjust the next field-entry index after accommodating the number of
1548 * entries this field consumes
1550 cnt = DIVIDE_AND_ROUND_UP(flds[fld].xtrct.disp +
1551 ice_flds_info[fld].size, ese_bits);
1553 /* Fill in the extraction sequence entries needed for this field */
1554 off = flds[fld].xtrct.off;
1555 mask = flds[fld].xtrct.mask;
1556 for (i = 0; i < cnt; i++) {
1557 /* Only consume an extraction sequence entry if there is no
1558 * sibling field associated with this field or the sibling entry
1559 * already extracts the word shared with this field.
1561 if (sib == ICE_FLOW_FIELD_IDX_MAX ||
1562 flds[sib].xtrct.prot_id == ICE_PROT_ID_INVAL ||
1563 flds[sib].xtrct.off != off) {
1566 /* Make sure the number of extraction sequence required
1567 * does not exceed the block's capability
1569 if (params->es_cnt >= fv_words)
1570 return ICE_ERR_MAX_LIMIT;
1572 /* some blocks require a reversed field vector layout */
1573 if (hw->blk[params->blk].es.reverse)
1574 idx = fv_words - params->es_cnt - 1;
1576 idx = params->es_cnt;
1578 params->es[idx].prot_id = prot_id;
1579 params->es[idx].off = off;
1580 params->mask[idx] = mask | sib_mask;
1584 off += ICE_FLOW_FV_EXTRACT_SZ;
1591 * ice_flow_xtract_raws - Create extract sequence entries for raw bytes
1592 * @hw: pointer to the HW struct
1593 * @params: information about the flow to be processed
1594 * @seg: index of packet segment whose raw fields are to be extracted
1596 static enum ice_status
1597 ice_flow_xtract_raws(struct ice_hw *hw, struct ice_flow_prof_params *params,
1604 if (!params->prof->segs[seg].raws_cnt)
1607 if (params->prof->segs[seg].raws_cnt >
1608 ARRAY_SIZE(params->prof->segs[seg].raws))
1609 return ICE_ERR_MAX_LIMIT;
1611 /* Offsets within the segment headers are not supported */
1612 hdrs_sz = ice_flow_calc_seg_sz(params, seg);
1614 return ICE_ERR_PARAM;
1616 fv_words = hw->blk[params->blk].es.fvw;
1618 for (i = 0; i < params->prof->segs[seg].raws_cnt; i++) {
1619 struct ice_flow_seg_fld_raw *raw;
1622 raw = ¶ms->prof->segs[seg].raws[i];
1624 /* Storing extraction information */
1625 raw->info.xtrct.prot_id = ICE_PROT_MAC_OF_OR_S;
1626 raw->info.xtrct.off = (raw->off / ICE_FLOW_FV_EXTRACT_SZ) *
1627 ICE_FLOW_FV_EXTRACT_SZ;
1628 raw->info.xtrct.disp = (raw->off % ICE_FLOW_FV_EXTRACT_SZ) *
1630 raw->info.xtrct.idx = params->es_cnt;
1632 /* Determine the number of field vector entries this raw field
1635 cnt = DIVIDE_AND_ROUND_UP(raw->info.xtrct.disp +
1636 (raw->info.src.last * BITS_PER_BYTE),
1637 (ICE_FLOW_FV_EXTRACT_SZ *
1639 off = raw->info.xtrct.off;
1640 for (j = 0; j < cnt; j++) {
1643 /* Make sure the number of extraction sequence required
1644 * does not exceed the block's capability
1646 if (params->es_cnt >= hw->blk[params->blk].es.count ||
1647 params->es_cnt >= ICE_MAX_FV_WORDS)
1648 return ICE_ERR_MAX_LIMIT;
1650 /* some blocks require a reversed field vector layout */
1651 if (hw->blk[params->blk].es.reverse)
1652 idx = fv_words - params->es_cnt - 1;
1654 idx = params->es_cnt;
1656 params->es[idx].prot_id = raw->info.xtrct.prot_id;
1657 params->es[idx].off = off;
1659 off += ICE_FLOW_FV_EXTRACT_SZ;
1667 * ice_flow_create_xtrct_seq - Create an extraction sequence for given segments
1668 * @hw: pointer to the HW struct
1669 * @params: information about the flow to be processed
1671 * This function iterates through all matched fields in the given segments, and
1672 * creates an extraction sequence for the fields.
1674 static enum ice_status
1675 ice_flow_create_xtrct_seq(struct ice_hw *hw,
1676 struct ice_flow_prof_params *params)
1678 enum ice_status status = ICE_SUCCESS;
1681 /* For ACL, we also need to extract the direction bit (Rx,Tx) data from
1684 if (params->blk == ICE_BLK_ACL) {
1685 status = ice_flow_xtract_pkt_flags(hw, params,
1686 ICE_RX_MDID_PKT_FLAGS_15_0);
1691 for (i = 0; i < params->prof->segs_cnt; i++) {
1692 u64 match = params->prof->segs[i].match;
1693 enum ice_flow_field j;
1695 ice_for_each_set_bit(j, (ice_bitmap_t *)&match,
1696 ICE_FLOW_FIELD_IDX_MAX) {
1697 status = ice_flow_xtract_fld(hw, params, i, j, match);
1700 ice_clear_bit(j, (ice_bitmap_t *)&match);
1703 /* Process raw matching bytes */
1704 status = ice_flow_xtract_raws(hw, params, i);
1713 * ice_flow_sel_acl_scen - returns the specific scenario
1714 * @hw: pointer to the hardware structure
1715 * @params: information about the flow to be processed
1717 * This function will return the specific scenario based on the
1718 * params passed to it
1720 static enum ice_status
1721 ice_flow_sel_acl_scen(struct ice_hw *hw, struct ice_flow_prof_params *params)
1723 /* Find the best-fit scenario for the provided match width */
1724 struct ice_acl_scen *cand_scen = NULL, *scen;
1727 return ICE_ERR_DOES_NOT_EXIST;
1729 /* Loop through each scenario and match against the scenario width
1730 * to select the specific scenario
1732 LIST_FOR_EACH_ENTRY(scen, &hw->acl_tbl->scens, ice_acl_scen, list_entry)
1733 if (scen->eff_width >= params->entry_length &&
1734 (!cand_scen || cand_scen->eff_width > scen->eff_width))
1737 return ICE_ERR_DOES_NOT_EXIST;
1739 params->prof->cfg.scen = cand_scen;
1745 * ice_flow_acl_def_entry_frmt - Determine the layout of flow entries
1746 * @params: information about the flow to be processed
1748 static enum ice_status
1749 ice_flow_acl_def_entry_frmt(struct ice_flow_prof_params *params)
1751 u16 index, i, range_idx = 0;
1753 index = ICE_AQC_ACL_PROF_BYTE_SEL_START_IDX;
1755 for (i = 0; i < params->prof->segs_cnt; i++) {
1756 struct ice_flow_seg_info *seg = ¶ms->prof->segs[i];
1759 ice_for_each_set_bit(j, (ice_bitmap_t *)&seg->match,
1760 ICE_FLOW_FIELD_IDX_MAX) {
1761 struct ice_flow_fld_info *fld = &seg->fields[j];
1763 fld->entry.mask = ICE_FLOW_FLD_OFF_INVAL;
1765 if (fld->type == ICE_FLOW_FLD_TYPE_RANGE) {
1766 fld->entry.last = ICE_FLOW_FLD_OFF_INVAL;
1768 /* Range checking only supported for single
1771 if (DIVIDE_AND_ROUND_UP(ice_flds_info[j].size +
1773 BITS_PER_BYTE * 2) > 1)
1774 return ICE_ERR_PARAM;
1776 /* Ranges must define low and high values */
1777 if (fld->src.val == ICE_FLOW_FLD_OFF_INVAL ||
1778 fld->src.last == ICE_FLOW_FLD_OFF_INVAL)
1779 return ICE_ERR_PARAM;
1781 fld->entry.val = range_idx++;
1783 /* Store adjusted byte-length of field for later
1784 * use, taking into account potential
1785 * non-byte-aligned displacement
1787 fld->entry.last = DIVIDE_AND_ROUND_UP
1788 (ice_flds_info[j].size +
1789 (fld->xtrct.disp % BITS_PER_BYTE),
1791 fld->entry.val = index;
1792 index += fld->entry.last;
1796 for (j = 0; j < seg->raws_cnt; j++) {
1797 struct ice_flow_seg_fld_raw *raw = &seg->raws[j];
1799 raw->info.entry.mask = ICE_FLOW_FLD_OFF_INVAL;
1800 raw->info.entry.val = index;
1801 raw->info.entry.last = raw->info.src.last;
1802 index += raw->info.entry.last;
1806 /* Currently only support using the byte selection base, which only
1807 * allows for an effective entry size of 30 bytes. Reject anything
1810 if (index > ICE_AQC_ACL_PROF_BYTE_SEL_ELEMS)
1811 return ICE_ERR_PARAM;
1813 /* Only 8 range checkers per profile, reject anything trying to use
1816 if (range_idx > ICE_AQC_ACL_PROF_RANGES_NUM_CFG)
1817 return ICE_ERR_PARAM;
1819 /* Store # bytes required for entry for later use */
1820 params->entry_length = index - ICE_AQC_ACL_PROF_BYTE_SEL_START_IDX;
1826 * ice_flow_proc_segs - process all packet segments associated with a profile
1827 * @hw: pointer to the HW struct
1828 * @params: information about the flow to be processed
1830 static enum ice_status
1831 ice_flow_proc_segs(struct ice_hw *hw, struct ice_flow_prof_params *params)
1833 enum ice_status status;
1835 status = ice_flow_proc_seg_hdrs(params);
1839 status = ice_flow_create_xtrct_seq(hw, params);
1843 switch (params->blk) {
1846 status = ICE_SUCCESS;
1849 status = ice_flow_acl_def_entry_frmt(params);
1852 status = ice_flow_sel_acl_scen(hw, params);
1857 return ICE_ERR_NOT_IMPL;
1863 #define ICE_FLOW_FIND_PROF_CHK_FLDS 0x00000001
1864 #define ICE_FLOW_FIND_PROF_CHK_VSI 0x00000002
1865 #define ICE_FLOW_FIND_PROF_NOT_CHK_DIR 0x00000004
1868 * ice_flow_find_prof_conds - Find a profile matching headers and conditions
1869 * @hw: pointer to the HW struct
1870 * @blk: classification stage
1871 * @dir: flow direction
1872 * @segs: array of one or more packet segments that describe the flow
1873 * @segs_cnt: number of packet segments provided
1874 * @vsi_handle: software VSI handle to check VSI (ICE_FLOW_FIND_PROF_CHK_VSI)
1875 * @conds: additional conditions to be checked (ICE_FLOW_FIND_PROF_CHK_*)
1877 static struct ice_flow_prof *
1878 ice_flow_find_prof_conds(struct ice_hw *hw, enum ice_block blk,
1879 enum ice_flow_dir dir, struct ice_flow_seg_info *segs,
1880 u8 segs_cnt, u16 vsi_handle, u32 conds)
1882 struct ice_flow_prof *p, *prof = NULL;
1884 ice_acquire_lock(&hw->fl_profs_locks[blk]);
1885 LIST_FOR_EACH_ENTRY(p, &hw->fl_profs[blk], ice_flow_prof, l_entry)
1886 if ((p->dir == dir || conds & ICE_FLOW_FIND_PROF_NOT_CHK_DIR) &&
1887 segs_cnt && segs_cnt == p->segs_cnt) {
1890 /* Check for profile-VSI association if specified */
1891 if ((conds & ICE_FLOW_FIND_PROF_CHK_VSI) &&
1892 ice_is_vsi_valid(hw, vsi_handle) &&
1893 !ice_is_bit_set(p->vsis, vsi_handle))
1896 /* Protocol headers must be checked. Matched fields are
1897 * checked if specified.
1899 for (i = 0; i < segs_cnt; i++)
1900 if (segs[i].hdrs != p->segs[i].hdrs ||
1901 ((conds & ICE_FLOW_FIND_PROF_CHK_FLDS) &&
1902 segs[i].match != p->segs[i].match))
1905 /* A match is found if all segments are matched */
1906 if (i == segs_cnt) {
1911 ice_release_lock(&hw->fl_profs_locks[blk]);
1917 * ice_flow_find_prof - Look up a profile matching headers and matched fields
1918 * @hw: pointer to the HW struct
1919 * @blk: classification stage
1920 * @dir: flow direction
1921 * @segs: array of one or more packet segments that describe the flow
1922 * @segs_cnt: number of packet segments provided
1925 ice_flow_find_prof(struct ice_hw *hw, enum ice_block blk, enum ice_flow_dir dir,
1926 struct ice_flow_seg_info *segs, u8 segs_cnt)
1928 struct ice_flow_prof *p;
1930 p = ice_flow_find_prof_conds(hw, blk, dir, segs, segs_cnt,
1931 ICE_MAX_VSI, ICE_FLOW_FIND_PROF_CHK_FLDS);
1933 return p ? p->id : ICE_FLOW_PROF_ID_INVAL;
1937 * ice_flow_find_prof_id - Look up a profile with given profile ID
1938 * @hw: pointer to the HW struct
1939 * @blk: classification stage
1940 * @prof_id: unique ID to identify this flow profile
1942 static struct ice_flow_prof *
1943 ice_flow_find_prof_id(struct ice_hw *hw, enum ice_block blk, u64 prof_id)
1945 struct ice_flow_prof *p;
1947 LIST_FOR_EACH_ENTRY(p, &hw->fl_profs[blk], ice_flow_prof, l_entry)
1948 if (p->id == prof_id)
1955 * ice_dealloc_flow_entry - Deallocate flow entry memory
1956 * @hw: pointer to the HW struct
1957 * @entry: flow entry to be removed
1960 ice_dealloc_flow_entry(struct ice_hw *hw, struct ice_flow_entry *entry)
1966 ice_free(hw, entry->entry);
1968 if (entry->range_buf) {
1969 ice_free(hw, entry->range_buf);
1970 entry->range_buf = NULL;
1974 ice_free(hw, entry->acts);
1976 entry->acts_cnt = 0;
1979 ice_free(hw, entry);
1983 * ice_flow_get_hw_prof - return the HW profile for a specific profile ID handle
1984 * @hw: pointer to the HW struct
1985 * @blk: classification stage
1986 * @prof_id: the profile ID handle
1987 * @hw_prof_id: pointer to variable to receive the HW profile ID
1990 ice_flow_get_hw_prof(struct ice_hw *hw, enum ice_block blk, u64 prof_id,
1993 enum ice_status status = ICE_ERR_DOES_NOT_EXIST;
1994 struct ice_prof_map *map;
1996 ice_acquire_lock(&hw->blk[blk].es.prof_map_lock);
1997 map = ice_search_prof_id(hw, blk, prof_id);
1999 *hw_prof_id = map->prof_id;
2000 status = ICE_SUCCESS;
2002 ice_release_lock(&hw->blk[blk].es.prof_map_lock);
2006 #define ICE_ACL_INVALID_SCEN 0x3f
2009 * ice_flow_acl_is_prof_in_use - Verify if the profile is associated to any PF
2010 * @hw: pointer to the hardware structure
2011 * @prof: pointer to flow profile
2012 * @buf: destination buffer function writes partial extraction sequence to
2014 * returns ICE_SUCCESS if no PF is associated to the given profile
2015 * returns ICE_ERR_IN_USE if at least one PF is associated to the given profile
2016 * returns other error code for real error
2018 static enum ice_status
2019 ice_flow_acl_is_prof_in_use(struct ice_hw *hw, struct ice_flow_prof *prof,
2020 struct ice_aqc_acl_prof_generic_frmt *buf)
2022 enum ice_status status;
2025 status = ice_flow_get_hw_prof(hw, ICE_BLK_ACL, prof->id, &prof_id);
2029 status = ice_query_acl_prof(hw, prof_id, buf, NULL);
2033 /* If all PF's associated scenarios are all 0 or all
2034 * ICE_ACL_INVALID_SCEN (63) for the given profile then the latter has
2035 * not been configured yet.
2037 if (buf->pf_scenario_num[0] == 0 && buf->pf_scenario_num[1] == 0 &&
2038 buf->pf_scenario_num[2] == 0 && buf->pf_scenario_num[3] == 0 &&
2039 buf->pf_scenario_num[4] == 0 && buf->pf_scenario_num[5] == 0 &&
2040 buf->pf_scenario_num[6] == 0 && buf->pf_scenario_num[7] == 0)
2043 if (buf->pf_scenario_num[0] == ICE_ACL_INVALID_SCEN &&
2044 buf->pf_scenario_num[1] == ICE_ACL_INVALID_SCEN &&
2045 buf->pf_scenario_num[2] == ICE_ACL_INVALID_SCEN &&
2046 buf->pf_scenario_num[3] == ICE_ACL_INVALID_SCEN &&
2047 buf->pf_scenario_num[4] == ICE_ACL_INVALID_SCEN &&
2048 buf->pf_scenario_num[5] == ICE_ACL_INVALID_SCEN &&
2049 buf->pf_scenario_num[6] == ICE_ACL_INVALID_SCEN &&
2050 buf->pf_scenario_num[7] == ICE_ACL_INVALID_SCEN)
2053 return ICE_ERR_IN_USE;
2057 * ice_flow_acl_free_act_cntr - Free the ACL rule's actions
2058 * @hw: pointer to the hardware structure
2059 * @acts: array of actions to be performed on a match
2060 * @acts_cnt: number of actions
2062 static enum ice_status
2063 ice_flow_acl_free_act_cntr(struct ice_hw *hw, struct ice_flow_action *acts,
2068 for (i = 0; i < acts_cnt; i++) {
2069 if (acts[i].type == ICE_FLOW_ACT_CNTR_PKT ||
2070 acts[i].type == ICE_FLOW_ACT_CNTR_BYTES ||
2071 acts[i].type == ICE_FLOW_ACT_CNTR_PKT_BYTES) {
2072 struct ice_acl_cntrs cntrs = { 0 };
2073 enum ice_status status;
2075 /* amount is unused in the dealloc path but the common
2076 * parameter check routine wants a value set, as zero
2077 * is invalid for the check. Just set it.
2080 cntrs.bank = 0; /* Only bank0 for the moment */
2082 LE16_TO_CPU(acts[i].data.acl_act.value);
2084 LE16_TO_CPU(acts[i].data.acl_act.value);
2086 if (acts[i].type == ICE_FLOW_ACT_CNTR_PKT_BYTES)
2087 cntrs.type = ICE_AQC_ACL_CNT_TYPE_DUAL;
2089 cntrs.type = ICE_AQC_ACL_CNT_TYPE_SINGLE;
2091 status = ice_aq_dealloc_acl_cntrs(hw, &cntrs, NULL);
2100 * ice_flow_acl_disassoc_scen - Disassociate the scenario from the profile
2101 * @hw: pointer to the hardware structure
2102 * @prof: pointer to flow profile
2104 * Disassociate the scenario from the profile for the PF of the VSI.
2106 static enum ice_status
2107 ice_flow_acl_disassoc_scen(struct ice_hw *hw, struct ice_flow_prof *prof)
2109 struct ice_aqc_acl_prof_generic_frmt buf;
2110 enum ice_status status = ICE_SUCCESS;
2113 ice_memset(&buf, 0, sizeof(buf), ICE_NONDMA_MEM);
2115 status = ice_flow_get_hw_prof(hw, ICE_BLK_ACL, prof->id, &prof_id);
2119 status = ice_query_acl_prof(hw, prof_id, &buf, NULL);
2123 /* Clear scenario for this PF */
2124 buf.pf_scenario_num[hw->pf_id] = ICE_ACL_INVALID_SCEN;
2125 status = ice_prgm_acl_prof_xtrct(hw, prof_id, &buf, NULL);
2131 * ice_flow_rem_entry_sync - Remove a flow entry
2132 * @hw: pointer to the HW struct
2133 * @blk: classification stage
2134 * @entry: flow entry to be removed
2136 static enum ice_status
2137 ice_flow_rem_entry_sync(struct ice_hw *hw, enum ice_block blk,
2138 struct ice_flow_entry *entry)
2141 return ICE_ERR_BAD_PTR;
2143 if (blk == ICE_BLK_ACL) {
2144 enum ice_status status;
2147 return ICE_ERR_BAD_PTR;
2149 status = ice_acl_rem_entry(hw, entry->prof->cfg.scen,
2150 entry->scen_entry_idx);
2154 /* Checks if we need to release an ACL counter. */
2155 if (entry->acts_cnt && entry->acts)
2156 ice_flow_acl_free_act_cntr(hw, entry->acts,
2160 LIST_DEL(&entry->l_entry);
2162 ice_dealloc_flow_entry(hw, entry);
2168 * ice_flow_add_prof_sync - Add a flow profile for packet segments and fields
2169 * @hw: pointer to the HW struct
2170 * @blk: classification stage
2171 * @dir: flow direction
2172 * @prof_id: unique ID to identify this flow profile
2173 * @segs: array of one or more packet segments that describe the flow
2174 * @segs_cnt: number of packet segments provided
2175 * @acts: array of default actions
2176 * @acts_cnt: number of default actions
2177 * @prof: stores the returned flow profile added
2179 * Assumption: the caller has acquired the lock to the profile list
2181 static enum ice_status
2182 ice_flow_add_prof_sync(struct ice_hw *hw, enum ice_block blk,
2183 enum ice_flow_dir dir, u64 prof_id,
2184 struct ice_flow_seg_info *segs, u8 segs_cnt,
2185 struct ice_flow_action *acts, u8 acts_cnt,
2186 struct ice_flow_prof **prof)
2188 struct ice_flow_prof_params *params;
2189 enum ice_status status;
2192 if (!prof || (acts_cnt && !acts))
2193 return ICE_ERR_BAD_PTR;
2195 params = (struct ice_flow_prof_params *)ice_malloc(hw, sizeof(*params));
2197 return ICE_ERR_NO_MEMORY;
2199 params->prof = (struct ice_flow_prof *)
2200 ice_malloc(hw, sizeof(*params->prof));
2201 if (!params->prof) {
2202 status = ICE_ERR_NO_MEMORY;
2206 /* initialize extraction sequence to all invalid (0xff) */
2207 for (i = 0; i < ICE_MAX_FV_WORDS; i++) {
2208 params->es[i].prot_id = ICE_PROT_INVALID;
2209 params->es[i].off = ICE_FV_OFFSET_INVAL;
2213 params->prof->id = prof_id;
2214 params->prof->dir = dir;
2215 params->prof->segs_cnt = segs_cnt;
2217 /* Make a copy of the segments that need to be persistent in the flow
2220 for (i = 0; i < segs_cnt; i++)
2221 ice_memcpy(¶ms->prof->segs[i], &segs[i], sizeof(*segs),
2222 ICE_NONDMA_TO_NONDMA);
2224 /* Make a copy of the actions that need to be persistent in the flow
2228 params->prof->acts = (struct ice_flow_action *)
2229 ice_memdup(hw, acts, acts_cnt * sizeof(*acts),
2230 ICE_NONDMA_TO_NONDMA);
2232 if (!params->prof->acts) {
2233 status = ICE_ERR_NO_MEMORY;
2238 status = ice_flow_proc_segs(hw, params);
2240 ice_debug(hw, ICE_DBG_FLOW, "Error processing a flow's packet segments\n");
2244 /* Add a HW profile for this flow profile */
2245 status = ice_add_prof(hw, blk, prof_id, (u8 *)params->ptypes,
2246 params->attr, params->attr_cnt, params->es,
2247 params->mask, true);
2249 ice_debug(hw, ICE_DBG_FLOW, "Error adding a HW flow profile\n");
2253 INIT_LIST_HEAD(¶ms->prof->entries);
2254 ice_init_lock(¶ms->prof->entries_lock);
2255 *prof = params->prof;
2259 if (params->prof->acts)
2260 ice_free(hw, params->prof->acts);
2261 ice_free(hw, params->prof);
2264 ice_free(hw, params);
2270 * ice_flow_rem_prof_sync - remove a flow profile
2271 * @hw: pointer to the hardware structure
2272 * @blk: classification stage
2273 * @prof: pointer to flow profile to remove
2275 * Assumption: the caller has acquired the lock to the profile list
2277 static enum ice_status
2278 ice_flow_rem_prof_sync(struct ice_hw *hw, enum ice_block blk,
2279 struct ice_flow_prof *prof)
2281 enum ice_status status;
2283 /* Remove all remaining flow entries before removing the flow profile */
2284 if (!LIST_EMPTY(&prof->entries)) {
2285 struct ice_flow_entry *e, *t;
2287 ice_acquire_lock(&prof->entries_lock);
2289 LIST_FOR_EACH_ENTRY_SAFE(e, t, &prof->entries, ice_flow_entry,
2291 status = ice_flow_rem_entry_sync(hw, blk, e);
2296 ice_release_lock(&prof->entries_lock);
2299 if (blk == ICE_BLK_ACL) {
2300 struct ice_aqc_acl_profile_ranges query_rng_buf;
2301 struct ice_aqc_acl_prof_generic_frmt buf;
2304 /* Disassociate the scenario from the profile for the PF */
2305 status = ice_flow_acl_disassoc_scen(hw, prof);
2309 /* Clear the range-checker if the profile ID is no longer
2312 status = ice_flow_acl_is_prof_in_use(hw, prof, &buf);
2313 if (status && status != ICE_ERR_IN_USE) {
2315 } else if (!status) {
2316 /* Clear the range-checker value for profile ID */
2317 ice_memset(&query_rng_buf, 0,
2318 sizeof(struct ice_aqc_acl_profile_ranges),
2321 status = ice_flow_get_hw_prof(hw, blk, prof->id,
2326 status = ice_prog_acl_prof_ranges(hw, prof_id,
2327 &query_rng_buf, NULL);
2333 /* Remove all hardware profiles associated with this flow profile */
2334 status = ice_rem_prof(hw, blk, prof->id);
2336 LIST_DEL(&prof->l_entry);
2337 ice_destroy_lock(&prof->entries_lock);
2339 ice_free(hw, prof->acts);
2347 * ice_flow_acl_set_xtrct_seq_fld - Populate xtrct seq for single field
2348 * @buf: Destination buffer function writes partial xtrct sequence to
2349 * @info: Info about field
2352 ice_flow_acl_set_xtrct_seq_fld(struct ice_aqc_acl_prof_generic_frmt *buf,
2353 struct ice_flow_fld_info *info)
2358 src = info->xtrct.idx * ICE_FLOW_FV_EXTRACT_SZ +
2359 info->xtrct.disp / BITS_PER_BYTE;
2360 dst = info->entry.val;
2361 for (i = 0; i < info->entry.last; i++)
2362 /* HW stores field vector words in LE, convert words back to BE
2363 * so constructed entries will end up in network order
2365 buf->byte_selection[dst++] = src++ ^ 1;
2369 * ice_flow_acl_set_xtrct_seq - Program ACL extraction sequence
2370 * @hw: pointer to the hardware structure
2371 * @prof: pointer to flow profile
2373 static enum ice_status
2374 ice_flow_acl_set_xtrct_seq(struct ice_hw *hw, struct ice_flow_prof *prof)
2376 struct ice_aqc_acl_prof_generic_frmt buf;
2377 struct ice_flow_fld_info *info;
2378 enum ice_status status;
2382 ice_memset(&buf, 0, sizeof(buf), ICE_NONDMA_MEM);
2384 status = ice_flow_get_hw_prof(hw, ICE_BLK_ACL, prof->id, &prof_id);
2388 status = ice_flow_acl_is_prof_in_use(hw, prof, &buf);
2389 if (status && status != ICE_ERR_IN_USE)
2393 /* Program the profile dependent configuration. This is done
2394 * only once regardless of the number of PFs using that profile
2396 ice_memset(&buf, 0, sizeof(buf), ICE_NONDMA_MEM);
2398 for (i = 0; i < prof->segs_cnt; i++) {
2399 struct ice_flow_seg_info *seg = &prof->segs[i];
2402 ice_for_each_set_bit(j, (ice_bitmap_t *)&seg->match,
2403 ICE_FLOW_FIELD_IDX_MAX) {
2404 info = &seg->fields[j];
2406 if (info->type == ICE_FLOW_FLD_TYPE_RANGE)
2407 buf.word_selection[info->entry.val] =
2410 ice_flow_acl_set_xtrct_seq_fld(&buf,
2414 for (j = 0; j < seg->raws_cnt; j++) {
2415 info = &seg->raws[j].info;
2416 ice_flow_acl_set_xtrct_seq_fld(&buf, info);
2420 ice_memset(&buf.pf_scenario_num[0], ICE_ACL_INVALID_SCEN,
2421 ICE_AQC_ACL_PROF_PF_SCEN_NUM_ELEMS,
2425 /* Update the current PF */
2426 buf.pf_scenario_num[hw->pf_id] = (u8)prof->cfg.scen->id;
2427 status = ice_prgm_acl_prof_xtrct(hw, prof_id, &buf, NULL);
2433 * ice_flow_assoc_vsig_vsi - associate a VSI with VSIG
2434 * @hw: pointer to the hardware structure
2435 * @blk: classification stage
2436 * @vsi_handle: software VSI handle
2437 * @vsig: target VSI group
2439 * Assumption: the caller has already verified that the VSI to
2440 * be added has the same characteristics as the VSIG and will
2441 * thereby have access to all resources added to that VSIG.
2444 ice_flow_assoc_vsig_vsi(struct ice_hw *hw, enum ice_block blk, u16 vsi_handle,
2447 enum ice_status status;
2449 if (!ice_is_vsi_valid(hw, vsi_handle) || blk >= ICE_BLK_COUNT)
2450 return ICE_ERR_PARAM;
2452 ice_acquire_lock(&hw->fl_profs_locks[blk]);
2453 status = ice_add_vsi_flow(hw, blk, ice_get_hw_vsi_num(hw, vsi_handle),
2455 ice_release_lock(&hw->fl_profs_locks[blk]);
2461 * ice_flow_assoc_prof - associate a VSI with a flow profile
2462 * @hw: pointer to the hardware structure
2463 * @blk: classification stage
2464 * @prof: pointer to flow profile
2465 * @vsi_handle: software VSI handle
2467 * Assumption: the caller has acquired the lock to the profile list
2468 * and the software VSI handle has been validated
2471 ice_flow_assoc_prof(struct ice_hw *hw, enum ice_block blk,
2472 struct ice_flow_prof *prof, u16 vsi_handle)
2474 enum ice_status status = ICE_SUCCESS;
2476 if (!ice_is_bit_set(prof->vsis, vsi_handle)) {
2477 if (blk == ICE_BLK_ACL) {
2478 status = ice_flow_acl_set_xtrct_seq(hw, prof);
2482 status = ice_add_prof_id_flow(hw, blk,
2483 ice_get_hw_vsi_num(hw,
2487 ice_set_bit(vsi_handle, prof->vsis);
2489 ice_debug(hw, ICE_DBG_FLOW, "HW profile add failed, %d\n",
2497 * ice_flow_disassoc_prof - disassociate a VSI from a flow profile
2498 * @hw: pointer to the hardware structure
2499 * @blk: classification stage
2500 * @prof: pointer to flow profile
2501 * @vsi_handle: software VSI handle
2503 * Assumption: the caller has acquired the lock to the profile list
2504 * and the software VSI handle has been validated
2506 static enum ice_status
2507 ice_flow_disassoc_prof(struct ice_hw *hw, enum ice_block blk,
2508 struct ice_flow_prof *prof, u16 vsi_handle)
2510 enum ice_status status = ICE_SUCCESS;
2512 if (ice_is_bit_set(prof->vsis, vsi_handle)) {
2513 status = ice_rem_prof_id_flow(hw, blk,
2514 ice_get_hw_vsi_num(hw,
2518 ice_clear_bit(vsi_handle, prof->vsis);
2520 ice_debug(hw, ICE_DBG_FLOW, "HW profile remove failed, %d\n",
2528 * ice_flow_add_prof - Add a flow profile for packet segments and matched fields
2529 * @hw: pointer to the HW struct
2530 * @blk: classification stage
2531 * @dir: flow direction
2532 * @prof_id: unique ID to identify this flow profile
2533 * @segs: array of one or more packet segments that describe the flow
2534 * @segs_cnt: number of packet segments provided
2535 * @acts: array of default actions
2536 * @acts_cnt: number of default actions
2537 * @prof: stores the returned flow profile added
2540 ice_flow_add_prof(struct ice_hw *hw, enum ice_block blk, enum ice_flow_dir dir,
2541 u64 prof_id, struct ice_flow_seg_info *segs, u8 segs_cnt,
2542 struct ice_flow_action *acts, u8 acts_cnt,
2543 struct ice_flow_prof **prof)
2545 enum ice_status status;
2547 if (segs_cnt > ICE_FLOW_SEG_MAX)
2548 return ICE_ERR_MAX_LIMIT;
2551 return ICE_ERR_PARAM;
2554 return ICE_ERR_BAD_PTR;
2556 status = ice_flow_val_hdrs(segs, segs_cnt);
2560 ice_acquire_lock(&hw->fl_profs_locks[blk]);
2562 status = ice_flow_add_prof_sync(hw, blk, dir, prof_id, segs, segs_cnt,
2563 acts, acts_cnt, prof);
2565 LIST_ADD(&(*prof)->l_entry, &hw->fl_profs[blk]);
2567 ice_release_lock(&hw->fl_profs_locks[blk]);
2573 * ice_flow_rem_prof - Remove a flow profile and all entries associated with it
2574 * @hw: pointer to the HW struct
2575 * @blk: the block for which the flow profile is to be removed
2576 * @prof_id: unique ID of the flow profile to be removed
2579 ice_flow_rem_prof(struct ice_hw *hw, enum ice_block blk, u64 prof_id)
2581 struct ice_flow_prof *prof;
2582 enum ice_status status;
2584 ice_acquire_lock(&hw->fl_profs_locks[blk]);
2586 prof = ice_flow_find_prof_id(hw, blk, prof_id);
2588 status = ICE_ERR_DOES_NOT_EXIST;
2592 /* prof becomes invalid after the call */
2593 status = ice_flow_rem_prof_sync(hw, blk, prof);
2596 ice_release_lock(&hw->fl_profs_locks[blk]);
2602 * ice_flow_find_entry - look for a flow entry using its unique ID
2603 * @hw: pointer to the HW struct
2604 * @blk: classification stage
2605 * @entry_id: unique ID to identify this flow entry
2607 * This function looks for the flow entry with the specified unique ID in all
2608 * flow profiles of the specified classification stage. If the entry is found,
2609 * and it returns the handle to the flow entry. Otherwise, it returns
2610 * ICE_FLOW_ENTRY_ID_INVAL.
2612 u64 ice_flow_find_entry(struct ice_hw *hw, enum ice_block blk, u64 entry_id)
2614 struct ice_flow_entry *found = NULL;
2615 struct ice_flow_prof *p;
2617 ice_acquire_lock(&hw->fl_profs_locks[blk]);
2619 LIST_FOR_EACH_ENTRY(p, &hw->fl_profs[blk], ice_flow_prof, l_entry) {
2620 struct ice_flow_entry *e;
2622 ice_acquire_lock(&p->entries_lock);
2623 LIST_FOR_EACH_ENTRY(e, &p->entries, ice_flow_entry, l_entry)
2624 if (e->id == entry_id) {
2628 ice_release_lock(&p->entries_lock);
2634 ice_release_lock(&hw->fl_profs_locks[blk]);
2636 return found ? ICE_FLOW_ENTRY_HNDL(found) : ICE_FLOW_ENTRY_HANDLE_INVAL;
2640 * ice_flow_acl_check_actions - Checks the ACL rule's actions
2641 * @hw: pointer to the hardware structure
2642 * @acts: array of actions to be performed on a match
2643 * @acts_cnt: number of actions
2644 * @cnt_alloc: indicates if an ACL counter has been allocated.
2646 static enum ice_status
2647 ice_flow_acl_check_actions(struct ice_hw *hw, struct ice_flow_action *acts,
2648 u8 acts_cnt, bool *cnt_alloc)
2650 ice_declare_bitmap(dup_check, ICE_AQC_TBL_MAX_ACTION_PAIRS * 2);
2653 ice_zero_bitmap(dup_check, ICE_AQC_TBL_MAX_ACTION_PAIRS * 2);
2656 if (acts_cnt > ICE_FLOW_ACL_MAX_NUM_ACT)
2657 return ICE_ERR_OUT_OF_RANGE;
2659 for (i = 0; i < acts_cnt; i++) {
2660 if (acts[i].type != ICE_FLOW_ACT_NOP &&
2661 acts[i].type != ICE_FLOW_ACT_DROP &&
2662 acts[i].type != ICE_FLOW_ACT_CNTR_PKT &&
2663 acts[i].type != ICE_FLOW_ACT_FWD_QUEUE)
2666 /* If the caller want to add two actions of the same type, then
2667 * it is considered invalid configuration.
2669 if (ice_test_and_set_bit(acts[i].type, dup_check))
2670 return ICE_ERR_PARAM;
2673 /* Checks if ACL counters are needed. */
2674 for (i = 0; i < acts_cnt; i++) {
2675 if (acts[i].type == ICE_FLOW_ACT_CNTR_PKT ||
2676 acts[i].type == ICE_FLOW_ACT_CNTR_BYTES ||
2677 acts[i].type == ICE_FLOW_ACT_CNTR_PKT_BYTES) {
2678 struct ice_acl_cntrs cntrs = { 0 };
2679 enum ice_status status;
2682 cntrs.bank = 0; /* Only bank0 for the moment */
2684 if (acts[i].type == ICE_FLOW_ACT_CNTR_PKT_BYTES)
2685 cntrs.type = ICE_AQC_ACL_CNT_TYPE_DUAL;
2687 cntrs.type = ICE_AQC_ACL_CNT_TYPE_SINGLE;
2689 status = ice_aq_alloc_acl_cntrs(hw, &cntrs, NULL);
2692 /* Counter index within the bank */
2693 acts[i].data.acl_act.value =
2694 CPU_TO_LE16(cntrs.first_cntr);
2703 * ice_flow_acl_frmt_entry_range - Format an ACL range checker for a given field
2704 * @fld: number of the given field
2705 * @info: info about field
2706 * @range_buf: range checker configuration buffer
2707 * @data: pointer to a data buffer containing flow entry's match values/masks
2708 * @range: Input/output param indicating which range checkers are being used
2711 ice_flow_acl_frmt_entry_range(u16 fld, struct ice_flow_fld_info *info,
2712 struct ice_aqc_acl_profile_ranges *range_buf,
2713 u8 *data, u8 *range)
2717 /* If not specified, default mask is all bits in field */
2718 new_mask = (info->src.mask == ICE_FLOW_FLD_OFF_INVAL ?
2719 BIT(ice_flds_info[fld].size) - 1 :
2720 (*(u16 *)(data + info->src.mask))) << info->xtrct.disp;
2722 /* If the mask is 0, then we don't need to worry about this input
2723 * range checker value.
2727 (*(u16 *)(data + info->src.last)) << info->xtrct.disp;
2729 (*(u16 *)(data + info->src.val)) << info->xtrct.disp;
2730 u8 range_idx = info->entry.val;
2732 range_buf->checker_cfg[range_idx].low_boundary =
2733 CPU_TO_BE16(new_low);
2734 range_buf->checker_cfg[range_idx].high_boundary =
2735 CPU_TO_BE16(new_high);
2736 range_buf->checker_cfg[range_idx].mask = CPU_TO_BE16(new_mask);
2738 /* Indicate which range checker is being used */
2739 *range |= BIT(range_idx);
2744 * ice_flow_acl_frmt_entry_fld - Partially format ACL entry for a given field
2745 * @fld: number of the given field
2746 * @info: info about the field
2747 * @buf: buffer containing the entry
2748 * @dontcare: buffer containing don't care mask for entry
2749 * @data: pointer to a data buffer containing flow entry's match values/masks
2752 ice_flow_acl_frmt_entry_fld(u16 fld, struct ice_flow_fld_info *info, u8 *buf,
2753 u8 *dontcare, u8 *data)
2755 u16 dst, src, mask, k, end_disp, tmp_s = 0, tmp_m = 0;
2756 bool use_mask = false;
2759 src = info->src.val;
2760 mask = info->src.mask;
2761 dst = info->entry.val - ICE_AQC_ACL_PROF_BYTE_SEL_START_IDX;
2762 disp = info->xtrct.disp % BITS_PER_BYTE;
2764 if (mask != ICE_FLOW_FLD_OFF_INVAL)
2767 for (k = 0; k < info->entry.last; k++, dst++) {
2768 /* Add overflow bits from previous byte */
2769 buf[dst] = (tmp_s & 0xff00) >> 8;
2771 /* If mask is not valid, tmp_m is always zero, so just setting
2772 * dontcare to 0 (no masked bits). If mask is valid, pulls in
2773 * overflow bits of mask from prev byte
2775 dontcare[dst] = (tmp_m & 0xff00) >> 8;
2777 /* If there is displacement, last byte will only contain
2778 * displaced data, but there is no more data to read from user
2779 * buffer, so skip so as not to potentially read beyond end of
2782 if (!disp || k < info->entry.last - 1) {
2783 /* Store shifted data to use in next byte */
2784 tmp_s = data[src++] << disp;
2786 /* Add current (shifted) byte */
2787 buf[dst] |= tmp_s & 0xff;
2789 /* Handle mask if valid */
2791 tmp_m = (~data[mask++] & 0xff) << disp;
2792 dontcare[dst] |= tmp_m & 0xff;
2797 /* Fill in don't care bits at beginning of field */
2799 dst = info->entry.val - ICE_AQC_ACL_PROF_BYTE_SEL_START_IDX;
2800 for (k = 0; k < disp; k++)
2801 dontcare[dst] |= BIT(k);
2804 end_disp = (disp + ice_flds_info[fld].size) % BITS_PER_BYTE;
2806 /* Fill in don't care bits at end of field */
2808 dst = info->entry.val - ICE_AQC_ACL_PROF_BYTE_SEL_START_IDX +
2809 info->entry.last - 1;
2810 for (k = end_disp; k < BITS_PER_BYTE; k++)
2811 dontcare[dst] |= BIT(k);
2816 * ice_flow_acl_frmt_entry - Format ACL entry
2817 * @hw: pointer to the hardware structure
2818 * @prof: pointer to flow profile
2819 * @e: pointer to the flow entry
2820 * @data: pointer to a data buffer containing flow entry's match values/masks
2821 * @acts: array of actions to be performed on a match
2822 * @acts_cnt: number of actions
2824 * Formats the key (and key_inverse) to be matched from the data passed in,
2825 * along with data from the flow profile. This key/key_inverse pair makes up
2826 * the 'entry' for an ACL flow entry.
2828 static enum ice_status
2829 ice_flow_acl_frmt_entry(struct ice_hw *hw, struct ice_flow_prof *prof,
2830 struct ice_flow_entry *e, u8 *data,
2831 struct ice_flow_action *acts, u8 acts_cnt)
2833 u8 *buf = NULL, *dontcare = NULL, *key = NULL, range = 0, dir_flag_msk;
2834 struct ice_aqc_acl_profile_ranges *range_buf = NULL;
2835 enum ice_status status;
2840 status = ice_flow_get_hw_prof(hw, ICE_BLK_ACL, prof->id, &prof_id);
2844 /* Format the result action */
2846 status = ice_flow_acl_check_actions(hw, acts, acts_cnt, &cnt_alloc);
2850 status = ICE_ERR_NO_MEMORY;
2852 e->acts = (struct ice_flow_action *)
2853 ice_memdup(hw, acts, acts_cnt * sizeof(*acts),
2854 ICE_NONDMA_TO_NONDMA);
2858 e->acts_cnt = acts_cnt;
2860 /* Format the matching data */
2861 buf_sz = prof->cfg.scen->width;
2862 buf = (u8 *)ice_malloc(hw, buf_sz);
2866 dontcare = (u8 *)ice_malloc(hw, buf_sz);
2870 /* 'key' buffer will store both key and key_inverse, so must be twice
2873 key = (u8 *)ice_malloc(hw, buf_sz * 2);
2877 range_buf = (struct ice_aqc_acl_profile_ranges *)
2878 ice_malloc(hw, sizeof(struct ice_aqc_acl_profile_ranges));
2882 /* Set don't care mask to all 1's to start, will zero out used bytes */
2883 ice_memset(dontcare, 0xff, buf_sz, ICE_NONDMA_MEM);
2885 for (i = 0; i < prof->segs_cnt; i++) {
2886 struct ice_flow_seg_info *seg = &prof->segs[i];
2889 ice_for_each_set_bit(j, (ice_bitmap_t *)&seg->match,
2890 ICE_FLOW_FIELD_IDX_MAX) {
2891 struct ice_flow_fld_info *info = &seg->fields[j];
2893 if (info->type == ICE_FLOW_FLD_TYPE_RANGE)
2894 ice_flow_acl_frmt_entry_range(j, info,
2898 ice_flow_acl_frmt_entry_fld(j, info, buf,
2902 for (j = 0; j < seg->raws_cnt; j++) {
2903 struct ice_flow_fld_info *info = &seg->raws[j].info;
2904 u16 dst, src, mask, k;
2905 bool use_mask = false;
2907 src = info->src.val;
2908 dst = info->entry.val -
2909 ICE_AQC_ACL_PROF_BYTE_SEL_START_IDX;
2910 mask = info->src.mask;
2912 if (mask != ICE_FLOW_FLD_OFF_INVAL)
2915 for (k = 0; k < info->entry.last; k++, dst++) {
2916 buf[dst] = data[src++];
2918 dontcare[dst] = ~data[mask++];
2925 buf[prof->cfg.scen->pid_idx] = (u8)prof_id;
2926 dontcare[prof->cfg.scen->pid_idx] = 0;
2928 /* Format the buffer for direction flags */
2929 dir_flag_msk = BIT(ICE_FLG_PKT_DIR);
2931 if (prof->dir == ICE_FLOW_RX)
2932 buf[prof->cfg.scen->pkt_dir_idx] = dir_flag_msk;
2935 buf[prof->cfg.scen->rng_chk_idx] = range;
2936 /* Mark any unused range checkers as don't care */
2937 dontcare[prof->cfg.scen->rng_chk_idx] = ~range;
2938 e->range_buf = range_buf;
2940 ice_free(hw, range_buf);
2943 status = ice_set_key(key, buf_sz * 2, buf, NULL, dontcare, NULL, 0,
2949 e->entry_sz = buf_sz * 2;
2956 ice_free(hw, dontcare);
2961 if (status && range_buf) {
2962 ice_free(hw, range_buf);
2963 e->range_buf = NULL;
2966 if (status && e->acts) {
2967 ice_free(hw, e->acts);
2972 if (status && cnt_alloc)
2973 ice_flow_acl_free_act_cntr(hw, acts, acts_cnt);
2979 * ice_flow_acl_find_scen_entry_cond - Find an ACL scenario entry that matches
2980 * the compared data.
2981 * @prof: pointer to flow profile
2982 * @e: pointer to the comparing flow entry
2983 * @do_chg_action: decide if we want to change the ACL action
2984 * @do_add_entry: decide if we want to add the new ACL entry
2985 * @do_rem_entry: decide if we want to remove the current ACL entry
2987 * Find an ACL scenario entry that matches the compared data. In the same time,
2988 * this function also figure out:
2989 * a/ If we want to change the ACL action
2990 * b/ If we want to add the new ACL entry
2991 * c/ If we want to remove the current ACL entry
2993 static struct ice_flow_entry *
2994 ice_flow_acl_find_scen_entry_cond(struct ice_flow_prof *prof,
2995 struct ice_flow_entry *e, bool *do_chg_action,
2996 bool *do_add_entry, bool *do_rem_entry)
2998 struct ice_flow_entry *p, *return_entry = NULL;
3002 * a/ There exists an entry with same matching data, but different
3003 * priority, then we remove this existing ACL entry. Then, we
3004 * will add the new entry to the ACL scenario.
3005 * b/ There exists an entry with same matching data, priority, and
3006 * result action, then we do nothing
3007 * c/ There exists an entry with same matching data, priority, but
3008 * different, action, then do only change the action's entry.
3009 * d/ Else, we add this new entry to the ACL scenario.
3011 *do_chg_action = false;
3012 *do_add_entry = true;
3013 *do_rem_entry = false;
3014 LIST_FOR_EACH_ENTRY(p, &prof->entries, ice_flow_entry, l_entry) {
3015 if (memcmp(p->entry, e->entry, p->entry_sz))
3018 /* From this point, we have the same matching_data. */
3019 *do_add_entry = false;
3022 if (p->priority != e->priority) {
3023 /* matching data && !priority */
3024 *do_add_entry = true;
3025 *do_rem_entry = true;
3029 /* From this point, we will have matching_data && priority */
3030 if (p->acts_cnt != e->acts_cnt)
3031 *do_chg_action = true;
3032 for (i = 0; i < p->acts_cnt; i++) {
3033 bool found_not_match = false;
3035 for (j = 0; j < e->acts_cnt; j++)
3036 if (memcmp(&p->acts[i], &e->acts[j],
3037 sizeof(struct ice_flow_action))) {
3038 found_not_match = true;
3042 if (found_not_match) {
3043 *do_chg_action = true;
3048 /* (do_chg_action = true) means :
3049 * matching_data && priority && !result_action
3050 * (do_chg_action = false) means :
3051 * matching_data && priority && result_action
3056 return return_entry;
3060 * ice_flow_acl_convert_to_acl_prio - Convert to ACL priority
3063 static enum ice_acl_entry_prio
3064 ice_flow_acl_convert_to_acl_prio(enum ice_flow_priority p)
3066 enum ice_acl_entry_prio acl_prio;
3069 case ICE_FLOW_PRIO_LOW:
3070 acl_prio = ICE_ACL_PRIO_LOW;
3072 case ICE_FLOW_PRIO_NORMAL:
3073 acl_prio = ICE_ACL_PRIO_NORMAL;
3075 case ICE_FLOW_PRIO_HIGH:
3076 acl_prio = ICE_ACL_PRIO_HIGH;
3079 acl_prio = ICE_ACL_PRIO_NORMAL;
3087 * ice_flow_acl_union_rng_chk - Perform union operation between two
3088 * range-range checker buffers
3089 * @dst_buf: pointer to destination range checker buffer
3090 * @src_buf: pointer to source range checker buffer
3092 * For this function, we do the union between dst_buf and src_buf
3093 * range checker buffer, and we will save the result back to dst_buf
3095 static enum ice_status
3096 ice_flow_acl_union_rng_chk(struct ice_aqc_acl_profile_ranges *dst_buf,
3097 struct ice_aqc_acl_profile_ranges *src_buf)
3101 if (!dst_buf || !src_buf)
3102 return ICE_ERR_BAD_PTR;
3104 for (i = 0; i < ICE_AQC_ACL_PROF_RANGES_NUM_CFG; i++) {
3105 struct ice_acl_rng_data *cfg_data = NULL, *in_data;
3106 bool will_populate = false;
3108 in_data = &src_buf->checker_cfg[i];
3113 for (j = 0; j < ICE_AQC_ACL_PROF_RANGES_NUM_CFG; j++) {
3114 cfg_data = &dst_buf->checker_cfg[j];
3116 if (!cfg_data->mask ||
3117 !memcmp(cfg_data, in_data,
3118 sizeof(struct ice_acl_rng_data))) {
3119 will_populate = true;
3124 if (will_populate) {
3125 ice_memcpy(cfg_data, in_data,
3126 sizeof(struct ice_acl_rng_data),
3127 ICE_NONDMA_TO_NONDMA);
3129 /* No available slot left to program range checker */
3130 return ICE_ERR_MAX_LIMIT;
3138 * ice_flow_acl_add_scen_entry_sync - Add entry to ACL scenario sync
3139 * @hw: pointer to the hardware structure
3140 * @prof: pointer to flow profile
3141 * @entry: double pointer to the flow entry
3143 * For this function, we will look at the current added entries in the
3144 * corresponding ACL scenario. Then, we will perform matching logic to
3145 * see if we want to add/modify/do nothing with this new entry.
3147 static enum ice_status
3148 ice_flow_acl_add_scen_entry_sync(struct ice_hw *hw, struct ice_flow_prof *prof,
3149 struct ice_flow_entry **entry)
3151 bool do_add_entry, do_rem_entry, do_chg_action, do_chg_rng_chk;
3152 struct ice_aqc_acl_profile_ranges query_rng_buf, cfg_rng_buf;
3153 struct ice_acl_act_entry *acts = NULL;
3154 struct ice_flow_entry *exist;
3155 enum ice_status status = ICE_SUCCESS;
3156 struct ice_flow_entry *e;
3159 if (!entry || !(*entry) || !prof)
3160 return ICE_ERR_BAD_PTR;
3164 do_chg_rng_chk = false;
3168 status = ice_flow_get_hw_prof(hw, ICE_BLK_ACL, prof->id,
3173 /* Query the current range-checker value in FW */
3174 status = ice_query_acl_prof_ranges(hw, prof_id, &query_rng_buf,
3178 ice_memcpy(&cfg_rng_buf, &query_rng_buf,
3179 sizeof(struct ice_aqc_acl_profile_ranges),
3180 ICE_NONDMA_TO_NONDMA);
3182 /* Generate the new range-checker value */
3183 status = ice_flow_acl_union_rng_chk(&cfg_rng_buf, e->range_buf);
3187 /* Reconfigure the range check if the buffer is changed. */
3188 do_chg_rng_chk = false;
3189 if (memcmp(&query_rng_buf, &cfg_rng_buf,
3190 sizeof(struct ice_aqc_acl_profile_ranges))) {
3191 status = ice_prog_acl_prof_ranges(hw, prof_id,
3192 &cfg_rng_buf, NULL);
3196 do_chg_rng_chk = true;
3200 /* Figure out if we want to (change the ACL action) and/or
3201 * (Add the new ACL entry) and/or (Remove the current ACL entry)
3203 exist = ice_flow_acl_find_scen_entry_cond(prof, e, &do_chg_action,
3204 &do_add_entry, &do_rem_entry);
3206 status = ice_flow_rem_entry_sync(hw, ICE_BLK_ACL, exist);
3211 /* Prepare the result action buffer */
3212 acts = (struct ice_acl_act_entry *)
3213 ice_calloc(hw, e->entry_sz, sizeof(struct ice_acl_act_entry));
3215 return ICE_ERR_NO_MEMORY;
3217 for (i = 0; i < e->acts_cnt; i++)
3218 ice_memcpy(&acts[i], &e->acts[i].data.acl_act,
3219 sizeof(struct ice_acl_act_entry),
3220 ICE_NONDMA_TO_NONDMA);
3223 enum ice_acl_entry_prio prio;
3227 keys = (u8 *)e->entry;
3228 inverts = keys + (e->entry_sz / 2);
3229 prio = ice_flow_acl_convert_to_acl_prio(e->priority);
3231 status = ice_acl_add_entry(hw, prof->cfg.scen, prio, keys,
3232 inverts, acts, e->acts_cnt,
3237 e->scen_entry_idx = entry_idx;
3238 LIST_ADD(&e->l_entry, &prof->entries);
3240 if (do_chg_action) {
3241 /* For the action memory info, update the SW's copy of
3242 * exist entry with e's action memory info
3244 ice_free(hw, exist->acts);
3245 exist->acts_cnt = e->acts_cnt;
3246 exist->acts = (struct ice_flow_action *)
3247 ice_calloc(hw, exist->acts_cnt,
3248 sizeof(struct ice_flow_action));
3250 status = ICE_ERR_NO_MEMORY;
3254 ice_memcpy(exist->acts, e->acts,
3255 sizeof(struct ice_flow_action) * e->acts_cnt,
3256 ICE_NONDMA_TO_NONDMA);
3258 status = ice_acl_prog_act(hw, prof->cfg.scen, acts,
3260 exist->scen_entry_idx);
3265 if (do_chg_rng_chk) {
3266 /* In this case, we want to update the range checker
3267 * information of the exist entry
3269 status = ice_flow_acl_union_rng_chk(exist->range_buf,
3275 /* As we don't add the new entry to our SW DB, deallocate its
3276 * memories, and return the exist entry to the caller
3278 ice_dealloc_flow_entry(hw, e);
3288 * ice_flow_acl_add_scen_entry - Add entry to ACL scenario
3289 * @hw: pointer to the hardware structure
3290 * @prof: pointer to flow profile
3291 * @e: double pointer to the flow entry
3293 static enum ice_status
3294 ice_flow_acl_add_scen_entry(struct ice_hw *hw, struct ice_flow_prof *prof,
3295 struct ice_flow_entry **e)
3297 enum ice_status status;
3299 ice_acquire_lock(&prof->entries_lock);
3300 status = ice_flow_acl_add_scen_entry_sync(hw, prof, e);
3301 ice_release_lock(&prof->entries_lock);
3307 * ice_flow_add_entry - Add a flow entry
3308 * @hw: pointer to the HW struct
3309 * @blk: classification stage
3310 * @prof_id: ID of the profile to add a new flow entry to
3311 * @entry_id: unique ID to identify this flow entry
3312 * @vsi_handle: software VSI handle for the flow entry
3313 * @prio: priority of the flow entry
3314 * @data: pointer to a data buffer containing flow entry's match values/masks
3315 * @acts: arrays of actions to be performed on a match
3316 * @acts_cnt: number of actions
3317 * @entry_h: pointer to buffer that receives the new flow entry's handle
3320 ice_flow_add_entry(struct ice_hw *hw, enum ice_block blk, u64 prof_id,
3321 u64 entry_id, u16 vsi_handle, enum ice_flow_priority prio,
3322 void *data, struct ice_flow_action *acts, u8 acts_cnt,
3325 struct ice_flow_entry *e = NULL;
3326 struct ice_flow_prof *prof;
3327 enum ice_status status = ICE_SUCCESS;
3329 /* ACL entries must indicate an action */
3330 if (blk == ICE_BLK_ACL && (!acts || !acts_cnt))
3331 return ICE_ERR_PARAM;
3333 /* No flow entry data is expected for RSS */
3334 if (!entry_h || (!data && blk != ICE_BLK_RSS))
3335 return ICE_ERR_BAD_PTR;
3337 if (!ice_is_vsi_valid(hw, vsi_handle))
3338 return ICE_ERR_PARAM;
3340 ice_acquire_lock(&hw->fl_profs_locks[blk]);
3342 prof = ice_flow_find_prof_id(hw, blk, prof_id);
3344 status = ICE_ERR_DOES_NOT_EXIST;
3346 /* Allocate memory for the entry being added and associate
3347 * the VSI to the found flow profile
3349 e = (struct ice_flow_entry *)ice_malloc(hw, sizeof(*e));
3351 status = ICE_ERR_NO_MEMORY;
3353 status = ice_flow_assoc_prof(hw, blk, prof, vsi_handle);
3356 ice_release_lock(&hw->fl_profs_locks[blk]);
3361 e->vsi_handle = vsi_handle;
3370 /* ACL will handle the entry management */
3371 status = ice_flow_acl_frmt_entry(hw, prof, e, (u8 *)data, acts,
3376 status = ice_flow_acl_add_scen_entry(hw, prof, &e);
3382 status = ICE_ERR_NOT_IMPL;
3386 if (blk != ICE_BLK_ACL) {
3387 /* ACL will handle the entry management */
3388 ice_acquire_lock(&prof->entries_lock);
3389 LIST_ADD(&e->l_entry, &prof->entries);
3390 ice_release_lock(&prof->entries_lock);
3393 *entry_h = ICE_FLOW_ENTRY_HNDL(e);
3398 ice_free(hw, e->entry);
3406 * ice_flow_rem_entry - Remove a flow entry
3407 * @hw: pointer to the HW struct
3408 * @blk: classification stage
3409 * @entry_h: handle to the flow entry to be removed
3411 enum ice_status ice_flow_rem_entry(struct ice_hw *hw, enum ice_block blk,
3414 struct ice_flow_entry *entry;
3415 struct ice_flow_prof *prof;
3416 enum ice_status status = ICE_SUCCESS;
3418 if (entry_h == ICE_FLOW_ENTRY_HANDLE_INVAL)
3419 return ICE_ERR_PARAM;
3421 entry = ICE_FLOW_ENTRY_PTR((intptr_t)entry_h);
3423 /* Retain the pointer to the flow profile as the entry will be freed */
3427 ice_acquire_lock(&prof->entries_lock);
3428 status = ice_flow_rem_entry_sync(hw, blk, entry);
3429 ice_release_lock(&prof->entries_lock);
3436 * ice_flow_set_fld_ext - specifies locations of field from entry's input buffer
3437 * @seg: packet segment the field being set belongs to
3438 * @fld: field to be set
3439 * @field_type: type of the field
3440 * @val_loc: if not ICE_FLOW_FLD_OFF_INVAL, location of the value to match from
3441 * entry's input buffer
3442 * @mask_loc: if not ICE_FLOW_FLD_OFF_INVAL, location of mask value from entry's
3444 * @last_loc: if not ICE_FLOW_FLD_OFF_INVAL, location of last/upper value from
3445 * entry's input buffer
3447 * This helper function stores information of a field being matched, including
3448 * the type of the field and the locations of the value to match, the mask, and
3449 * the upper-bound value in the start of the input buffer for a flow entry.
3450 * This function should only be used for fixed-size data structures.
3452 * This function also opportunistically determines the protocol headers to be
3453 * present based on the fields being set. Some fields cannot be used alone to
3454 * determine the protocol headers present. Sometimes, fields for particular
3455 * protocol headers are not matched. In those cases, the protocol headers
3456 * must be explicitly set.
3459 ice_flow_set_fld_ext(struct ice_flow_seg_info *seg, enum ice_flow_field fld,
3460 enum ice_flow_fld_match_type field_type, u16 val_loc,
3461 u16 mask_loc, u16 last_loc)
3463 u64 bit = BIT_ULL(fld);
3466 if (field_type == ICE_FLOW_FLD_TYPE_RANGE)
3469 seg->fields[fld].type = field_type;
3470 seg->fields[fld].src.val = val_loc;
3471 seg->fields[fld].src.mask = mask_loc;
3472 seg->fields[fld].src.last = last_loc;
3474 ICE_FLOW_SET_HDRS(seg, ice_flds_info[fld].hdr);
3478 * ice_flow_set_fld - specifies locations of field from entry's input buffer
3479 * @seg: packet segment the field being set belongs to
3480 * @fld: field to be set
3481 * @val_loc: if not ICE_FLOW_FLD_OFF_INVAL, location of the value to match from
3482 * entry's input buffer
3483 * @mask_loc: if not ICE_FLOW_FLD_OFF_INVAL, location of mask value from entry's
3485 * @last_loc: if not ICE_FLOW_FLD_OFF_INVAL, location of last/upper value from
3486 * entry's input buffer
3487 * @range: indicate if field being matched is to be in a range
3489 * This function specifies the locations, in the form of byte offsets from the
3490 * start of the input buffer for a flow entry, from where the value to match,
3491 * the mask value, and upper value can be extracted. These locations are then
3492 * stored in the flow profile. When adding a flow entry associated with the
3493 * flow profile, these locations will be used to quickly extract the values and
3494 * create the content of a match entry. This function should only be used for
3495 * fixed-size data structures.
3498 ice_flow_set_fld(struct ice_flow_seg_info *seg, enum ice_flow_field fld,
3499 u16 val_loc, u16 mask_loc, u16 last_loc, bool range)
3501 enum ice_flow_fld_match_type t = range ?
3502 ICE_FLOW_FLD_TYPE_RANGE : ICE_FLOW_FLD_TYPE_REG;
3504 ice_flow_set_fld_ext(seg, fld, t, val_loc, mask_loc, last_loc);
3508 * ice_flow_set_fld_prefix - sets locations of prefix field from entry's buf
3509 * @seg: packet segment the field being set belongs to
3510 * @fld: field to be set
3511 * @val_loc: if not ICE_FLOW_FLD_OFF_INVAL, location of the value to match from
3512 * entry's input buffer
3513 * @pref_loc: location of prefix value from entry's input buffer
3514 * @pref_sz: size of the location holding the prefix value
3516 * This function specifies the locations, in the form of byte offsets from the
3517 * start of the input buffer for a flow entry, from where the value to match
3518 * and the IPv4 prefix value can be extracted. These locations are then stored
3519 * in the flow profile. When adding flow entries to the associated flow profile,
3520 * these locations can be used to quickly extract the values to create the
3521 * content of a match entry. This function should only be used for fixed-size
3525 ice_flow_set_fld_prefix(struct ice_flow_seg_info *seg, enum ice_flow_field fld,
3526 u16 val_loc, u16 pref_loc, u8 pref_sz)
3528 /* For this type of field, the "mask" location is for the prefix value's
3529 * location and the "last" location is for the size of the location of
3532 ice_flow_set_fld_ext(seg, fld, ICE_FLOW_FLD_TYPE_PREFIX, val_loc,
3533 pref_loc, (u16)pref_sz);
3537 * ice_flow_add_fld_raw - sets locations of a raw field from entry's input buf
3538 * @seg: packet segment the field being set belongs to
3539 * @off: offset of the raw field from the beginning of the segment in bytes
3540 * @len: length of the raw pattern to be matched
3541 * @val_loc: location of the value to match from entry's input buffer
3542 * @mask_loc: location of mask value from entry's input buffer
3544 * This function specifies the offset of the raw field to be match from the
3545 * beginning of the specified packet segment, and the locations, in the form of
3546 * byte offsets from the start of the input buffer for a flow entry, from where
3547 * the value to match and the mask value to be extracted. These locations are
3548 * then stored in the flow profile. When adding flow entries to the associated
3549 * flow profile, these locations can be used to quickly extract the values to
3550 * create the content of a match entry. This function should only be used for
3551 * fixed-size data structures.
3554 ice_flow_add_fld_raw(struct ice_flow_seg_info *seg, u16 off, u8 len,
3555 u16 val_loc, u16 mask_loc)
3557 if (seg->raws_cnt < ICE_FLOW_SEG_RAW_FLD_MAX) {
3558 seg->raws[seg->raws_cnt].off = off;
3559 seg->raws[seg->raws_cnt].info.type = ICE_FLOW_FLD_TYPE_SIZE;
3560 seg->raws[seg->raws_cnt].info.src.val = val_loc;
3561 seg->raws[seg->raws_cnt].info.src.mask = mask_loc;
3562 /* The "last" field is used to store the length of the field */
3563 seg->raws[seg->raws_cnt].info.src.last = len;
3566 /* Overflows of "raws" will be handled as an error condition later in
3567 * the flow when this information is processed.
3573 * ice_flow_rem_vsi_prof - remove vsi from flow profile
3574 * @hw: pointer to the hardware structure
3575 * @blk: classification stage
3576 * @vsi_handle: software VSI handle
3577 * @prof_id: unique ID to identify this flow profile
3579 * This function removes the flow entries associated to the input
3580 * vsi handle and disassociates the vsi from the flow profile.
3582 enum ice_status ice_flow_rem_vsi_prof(struct ice_hw *hw, enum ice_block blk, u16 vsi_handle,
3585 struct ice_flow_prof *prof = NULL;
3586 enum ice_status status = ICE_SUCCESS;
3588 if (blk >= ICE_BLK_COUNT || !ice_is_vsi_valid(hw, vsi_handle))
3589 return ICE_ERR_PARAM;
3591 /* find flow profile pointer with input package block and profile id */
3592 prof = ice_flow_find_prof_id(hw, ICE_BLK_FD, prof_id);
3594 ice_debug(hw, ICE_DBG_PKG,
3595 "Cannot find flow profile id=%" PRIu64 "\n", prof_id);
3596 return ICE_ERR_DOES_NOT_EXIST;
3599 /* Remove all remaining flow entries before removing the flow profile */
3600 if (!LIST_EMPTY(&prof->entries)) {
3601 struct ice_flow_entry *e, *t;
3603 ice_acquire_lock(&prof->entries_lock);
3604 LIST_FOR_EACH_ENTRY_SAFE(e, t, &prof->entries, ice_flow_entry,
3606 if (e->vsi_handle != vsi_handle)
3609 status = ice_flow_rem_entry_sync(hw, blk, e);
3613 ice_release_lock(&prof->entries_lock);
3618 /* disassociate the flow profile from sw vsi handle */
3619 status = ice_flow_disassoc_prof(hw, blk, prof, vsi_handle);
3621 ice_debug(hw, ICE_DBG_PKG,
3622 "ice_flow_disassoc_prof() failed with status=%d\n",
3627 #define ICE_FLOW_RSS_SEG_HDR_L2_MASKS \
3628 (ICE_FLOW_SEG_HDR_ETH | ICE_FLOW_SEG_HDR_ETH_NON_IP | ICE_FLOW_SEG_HDR_VLAN)
3630 #define ICE_FLOW_RSS_SEG_HDR_L3_MASKS \
3631 (ICE_FLOW_SEG_HDR_IPV4 | ICE_FLOW_SEG_HDR_IPV6)
3633 #define ICE_FLOW_RSS_SEG_HDR_L4_MASKS \
3634 (ICE_FLOW_SEG_HDR_TCP | ICE_FLOW_SEG_HDR_UDP | ICE_FLOW_SEG_HDR_SCTP)
3636 #define ICE_FLOW_RSS_SEG_HDR_VAL_MASKS \
3637 (ICE_FLOW_RSS_SEG_HDR_L2_MASKS | \
3638 ICE_FLOW_RSS_SEG_HDR_L3_MASKS | \
3639 ICE_FLOW_RSS_SEG_HDR_L4_MASKS)
3642 * ice_flow_set_rss_seg_info - setup packet segments for RSS
3643 * @segs: pointer to the flow field segment(s)
3644 * @seg_cnt: segment count
3645 * @cfg: configure parameters
3647 * Helper function to extract fields from hash bitmap and use flow
3648 * header value to set flow field segment for further use in flow
3649 * profile entry or removal.
3651 static enum ice_status
3652 ice_flow_set_rss_seg_info(struct ice_flow_seg_info *segs, u8 seg_cnt,
3653 const struct ice_rss_hash_cfg *cfg)
3655 struct ice_flow_seg_info *seg;
3659 /* set inner most segment */
3660 seg = &segs[seg_cnt - 1];
3662 ice_for_each_set_bit(i, (const ice_bitmap_t *)&cfg->hash_flds,
3663 ICE_FLOW_FIELD_IDX_MAX)
3664 ice_flow_set_fld(seg, (enum ice_flow_field)i,
3665 ICE_FLOW_FLD_OFF_INVAL, ICE_FLOW_FLD_OFF_INVAL,
3666 ICE_FLOW_FLD_OFF_INVAL, false);
3668 ICE_FLOW_SET_HDRS(seg, cfg->addl_hdrs);
3670 /* set outer most header */
3671 if (cfg->hdr_type == ICE_RSS_INNER_HEADERS_W_OUTER_IPV4)
3672 segs[ICE_RSS_OUTER_HEADERS].hdrs |= ICE_FLOW_SEG_HDR_IPV4 |
3673 ICE_FLOW_SEG_HDR_IPV_FRAG |
3674 ICE_FLOW_SEG_HDR_IPV_OTHER;
3675 else if (cfg->hdr_type == ICE_RSS_INNER_HEADERS_W_OUTER_IPV6)
3676 segs[ICE_RSS_OUTER_HEADERS].hdrs |= ICE_FLOW_SEG_HDR_IPV6 |
3677 ICE_FLOW_SEG_HDR_IPV_FRAG |
3678 ICE_FLOW_SEG_HDR_IPV_OTHER;
3679 else if (cfg->hdr_type == ICE_RSS_INNER_HEADERS_W_OUTER_IPV4_GRE)
3680 segs[ICE_RSS_OUTER_HEADERS].hdrs |= ICE_FLOW_SEG_HDR_IPV4 |
3681 ICE_FLOW_SEG_HDR_GRE |
3682 ICE_FLOW_SEG_HDR_IPV_OTHER;
3683 else if (cfg->hdr_type == ICE_RSS_INNER_HEADERS_W_OUTER_IPV6_GRE)
3684 segs[ICE_RSS_OUTER_HEADERS].hdrs |= ICE_FLOW_SEG_HDR_IPV6 |
3685 ICE_FLOW_SEG_HDR_GRE |
3686 ICE_FLOW_SEG_HDR_IPV_OTHER;
3688 if (seg->hdrs & ~ICE_FLOW_RSS_SEG_HDR_VAL_MASKS &
3689 ~ICE_FLOW_RSS_HDRS_INNER_MASK & ~ICE_FLOW_SEG_HDR_IPV_OTHER &
3690 ~ICE_FLOW_SEG_HDR_IPV_FRAG)
3691 return ICE_ERR_PARAM;
3693 val = (u64)(seg->hdrs & ICE_FLOW_RSS_SEG_HDR_L3_MASKS);
3694 if (val && !ice_is_pow2(val))
3697 val = (u64)(seg->hdrs & ICE_FLOW_RSS_SEG_HDR_L4_MASKS);
3698 if (val && !ice_is_pow2(val))
3705 * ice_rem_vsi_rss_list - remove VSI from RSS list
3706 * @hw: pointer to the hardware structure
3707 * @vsi_handle: software VSI handle
3709 * Remove the VSI from all RSS configurations in the list.
3711 void ice_rem_vsi_rss_list(struct ice_hw *hw, u16 vsi_handle)
3713 struct ice_rss_cfg *r, *tmp;
3715 if (LIST_EMPTY(&hw->rss_list_head))
3718 ice_acquire_lock(&hw->rss_locks);
3719 LIST_FOR_EACH_ENTRY_SAFE(r, tmp, &hw->rss_list_head,
3720 ice_rss_cfg, l_entry)
3721 if (ice_test_and_clear_bit(vsi_handle, r->vsis))
3722 if (!ice_is_any_bit_set(r->vsis, ICE_MAX_VSI)) {
3723 LIST_DEL(&r->l_entry);
3726 ice_release_lock(&hw->rss_locks);
3730 * ice_rem_vsi_rss_cfg - remove RSS configurations associated with VSI
3731 * @hw: pointer to the hardware structure
3732 * @vsi_handle: software VSI handle
3734 * This function will iterate through all flow profiles and disassociate
3735 * the VSI from that profile. If the flow profile has no VSIs it will
3738 enum ice_status ice_rem_vsi_rss_cfg(struct ice_hw *hw, u16 vsi_handle)
3740 const enum ice_block blk = ICE_BLK_RSS;
3741 struct ice_flow_prof *p, *t;
3742 enum ice_status status = ICE_SUCCESS;
3744 if (!ice_is_vsi_valid(hw, vsi_handle))
3745 return ICE_ERR_PARAM;
3747 if (LIST_EMPTY(&hw->fl_profs[blk]))
3750 ice_acquire_lock(&hw->rss_locks);
3751 LIST_FOR_EACH_ENTRY_SAFE(p, t, &hw->fl_profs[blk], ice_flow_prof,
3753 if (ice_is_bit_set(p->vsis, vsi_handle)) {
3754 status = ice_flow_disassoc_prof(hw, blk, p, vsi_handle);
3758 if (!ice_is_any_bit_set(p->vsis, ICE_MAX_VSI)) {
3759 status = ice_flow_rem_prof(hw, blk, p->id);
3764 ice_release_lock(&hw->rss_locks);
3770 * ice_get_rss_hdr_type - get a RSS profile's header type
3771 * @prof: RSS flow profile
3773 static enum ice_rss_cfg_hdr_type
3774 ice_get_rss_hdr_type(struct ice_flow_prof *prof)
3776 enum ice_rss_cfg_hdr_type hdr_type = ICE_RSS_ANY_HEADERS;
3778 if (prof->segs_cnt == ICE_FLOW_SEG_SINGLE) {
3779 hdr_type = ICE_RSS_OUTER_HEADERS;
3780 } else if (prof->segs_cnt == ICE_FLOW_SEG_MAX) {
3781 if (prof->segs[ICE_RSS_OUTER_HEADERS].hdrs == ICE_FLOW_SEG_HDR_NONE)
3782 hdr_type = ICE_RSS_INNER_HEADERS;
3783 if (prof->segs[ICE_RSS_OUTER_HEADERS].hdrs & ICE_FLOW_SEG_HDR_IPV4)
3784 hdr_type = ICE_RSS_INNER_HEADERS_W_OUTER_IPV4;
3785 if (prof->segs[ICE_RSS_OUTER_HEADERS].hdrs & ICE_FLOW_SEG_HDR_IPV6)
3786 hdr_type = ICE_RSS_INNER_HEADERS_W_OUTER_IPV6;
3793 * ice_rem_rss_list - remove RSS configuration from list
3794 * @hw: pointer to the hardware structure
3795 * @vsi_handle: software VSI handle
3796 * @prof: pointer to flow profile
3798 * Assumption: lock has already been acquired for RSS list
3801 ice_rem_rss_list(struct ice_hw *hw, u16 vsi_handle, struct ice_flow_prof *prof)
3803 enum ice_rss_cfg_hdr_type hdr_type;
3804 struct ice_rss_cfg *r, *tmp;
3806 /* Search for RSS hash fields associated to the VSI that match the
3807 * hash configurations associated to the flow profile. If found
3808 * remove from the RSS entry list of the VSI context and delete entry.
3810 hdr_type = ice_get_rss_hdr_type(prof);
3811 LIST_FOR_EACH_ENTRY_SAFE(r, tmp, &hw->rss_list_head,
3812 ice_rss_cfg, l_entry)
3813 if (r->hash.hash_flds == prof->segs[prof->segs_cnt - 1].match &&
3814 r->hash.addl_hdrs == prof->segs[prof->segs_cnt - 1].hdrs &&
3815 r->hash.hdr_type == hdr_type) {
3816 ice_clear_bit(vsi_handle, r->vsis);
3817 if (!ice_is_any_bit_set(r->vsis, ICE_MAX_VSI)) {
3818 LIST_DEL(&r->l_entry);
3826 * ice_add_rss_list - add RSS configuration to list
3827 * @hw: pointer to the hardware structure
3828 * @vsi_handle: software VSI handle
3829 * @prof: pointer to flow profile
3831 * Assumption: lock has already been acquired for RSS list
3833 static enum ice_status
3834 ice_add_rss_list(struct ice_hw *hw, u16 vsi_handle, struct ice_flow_prof *prof)
3836 enum ice_rss_cfg_hdr_type hdr_type;
3837 struct ice_rss_cfg *r, *rss_cfg;
3839 hdr_type = ice_get_rss_hdr_type(prof);
3840 LIST_FOR_EACH_ENTRY(r, &hw->rss_list_head,
3841 ice_rss_cfg, l_entry)
3842 if (r->hash.hash_flds == prof->segs[prof->segs_cnt - 1].match &&
3843 r->hash.addl_hdrs == prof->segs[prof->segs_cnt - 1].hdrs &&
3844 r->hash.hdr_type == hdr_type) {
3845 ice_set_bit(vsi_handle, r->vsis);
3849 rss_cfg = (struct ice_rss_cfg *)ice_malloc(hw, sizeof(*rss_cfg));
3851 return ICE_ERR_NO_MEMORY;
3853 rss_cfg->hash.hash_flds = prof->segs[prof->segs_cnt - 1].match;
3854 rss_cfg->hash.addl_hdrs = prof->segs[prof->segs_cnt - 1].hdrs;
3855 rss_cfg->hash.hdr_type = hdr_type;
3856 rss_cfg->hash.symm = prof->cfg.symm;
3857 ice_set_bit(vsi_handle, rss_cfg->vsis);
3859 LIST_ADD_TAIL(&rss_cfg->l_entry, &hw->rss_list_head);
3864 #define ICE_FLOW_PROF_HASH_S 0
3865 #define ICE_FLOW_PROF_HASH_M (0xFFFFFFFFULL << ICE_FLOW_PROF_HASH_S)
3866 #define ICE_FLOW_PROF_HDR_S 32
3867 #define ICE_FLOW_PROF_HDR_M (0x3FFFFFFFULL << ICE_FLOW_PROF_HDR_S)
3868 #define ICE_FLOW_PROF_ENCAP_S 62
3869 #define ICE_FLOW_PROF_ENCAP_M (0x3ULL << ICE_FLOW_PROF_ENCAP_S)
3871 /* Flow profile ID format:
3872 * [0:31] - Packet match fields
3873 * [32:61] - Protocol header
3874 * [62:63] - Encapsulation flag:
3877 * 2 for tunneled with outer ipv4
3878 * 3 for tunneled with outer ipv6
3880 #define ICE_FLOW_GEN_PROFID(hash, hdr, encap) \
3881 ((u64)(((u64)(hash) & ICE_FLOW_PROF_HASH_M) | \
3882 (((u64)(hdr) << ICE_FLOW_PROF_HDR_S) & ICE_FLOW_PROF_HDR_M) | \
3883 (((u64)(encap) << ICE_FLOW_PROF_ENCAP_S) & ICE_FLOW_PROF_ENCAP_M)))
3886 ice_rss_config_xor_word(struct ice_hw *hw, u8 prof_id, u8 src, u8 dst)
3888 u32 s = ((src % 4) << 3); /* byte shift */
3889 u32 v = dst | 0x80; /* value to program */
3890 u8 i = src / 4; /* register index */
3893 reg = rd32(hw, GLQF_HSYMM(prof_id, i));
3894 reg = (reg & ~(0xff << s)) | (v << s);
3895 wr32(hw, GLQF_HSYMM(prof_id, i), reg);
3899 ice_rss_config_xor(struct ice_hw *hw, u8 prof_id, u8 src, u8 dst, u8 len)
3902 ICE_FLOW_SW_FIELD_VECTOR_MAX / ICE_FLOW_FV_EXTRACT_SZ - 1;
3905 for (i = 0; i < len; i++) {
3906 ice_rss_config_xor_word(hw, prof_id,
3907 /* Yes, field vector in GLQF_HSYMM and
3908 * GLQF_HINSET is inversed!
3910 fv_last_word - (src + i),
3911 fv_last_word - (dst + i));
3912 ice_rss_config_xor_word(hw, prof_id,
3913 fv_last_word - (dst + i),
3914 fv_last_word - (src + i));
3919 ice_rss_update_symm(struct ice_hw *hw,
3920 struct ice_flow_prof *prof)
3922 struct ice_prof_map *map;
3925 ice_acquire_lock(&hw->blk[ICE_BLK_RSS].es.prof_map_lock);
3926 map = ice_search_prof_id(hw, ICE_BLK_RSS, prof->id);
3928 prof_id = map->prof_id;
3929 ice_release_lock(&hw->blk[ICE_BLK_RSS].es.prof_map_lock);
3932 /* clear to default */
3933 for (m = 0; m < 6; m++)
3934 wr32(hw, GLQF_HSYMM(prof_id, m), 0);
3935 if (prof->cfg.symm) {
3936 struct ice_flow_seg_info *seg =
3937 &prof->segs[prof->segs_cnt - 1];
3939 struct ice_flow_seg_xtrct *ipv4_src =
3940 &seg->fields[ICE_FLOW_FIELD_IDX_IPV4_SA].xtrct;
3941 struct ice_flow_seg_xtrct *ipv4_dst =
3942 &seg->fields[ICE_FLOW_FIELD_IDX_IPV4_DA].xtrct;
3943 struct ice_flow_seg_xtrct *ipv6_src =
3944 &seg->fields[ICE_FLOW_FIELD_IDX_IPV6_SA].xtrct;
3945 struct ice_flow_seg_xtrct *ipv6_dst =
3946 &seg->fields[ICE_FLOW_FIELD_IDX_IPV6_DA].xtrct;
3948 struct ice_flow_seg_xtrct *tcp_src =
3949 &seg->fields[ICE_FLOW_FIELD_IDX_TCP_SRC_PORT].xtrct;
3950 struct ice_flow_seg_xtrct *tcp_dst =
3951 &seg->fields[ICE_FLOW_FIELD_IDX_TCP_DST_PORT].xtrct;
3953 struct ice_flow_seg_xtrct *udp_src =
3954 &seg->fields[ICE_FLOW_FIELD_IDX_UDP_SRC_PORT].xtrct;
3955 struct ice_flow_seg_xtrct *udp_dst =
3956 &seg->fields[ICE_FLOW_FIELD_IDX_UDP_DST_PORT].xtrct;
3958 struct ice_flow_seg_xtrct *sctp_src =
3959 &seg->fields[ICE_FLOW_FIELD_IDX_SCTP_SRC_PORT].xtrct;
3960 struct ice_flow_seg_xtrct *sctp_dst =
3961 &seg->fields[ICE_FLOW_FIELD_IDX_SCTP_DST_PORT].xtrct;
3964 if (ipv4_src->prot_id != 0 && ipv4_dst->prot_id != 0)
3965 ice_rss_config_xor(hw, prof_id,
3966 ipv4_src->idx, ipv4_dst->idx, 2);
3969 if (ipv6_src->prot_id != 0 && ipv6_dst->prot_id != 0)
3970 ice_rss_config_xor(hw, prof_id,
3971 ipv6_src->idx, ipv6_dst->idx, 8);
3974 if (tcp_src->prot_id != 0 && tcp_dst->prot_id != 0)
3975 ice_rss_config_xor(hw, prof_id,
3976 tcp_src->idx, tcp_dst->idx, 1);
3979 if (udp_src->prot_id != 0 && udp_dst->prot_id != 0)
3980 ice_rss_config_xor(hw, prof_id,
3981 udp_src->idx, udp_dst->idx, 1);
3984 if (sctp_src->prot_id != 0 && sctp_dst->prot_id != 0)
3985 ice_rss_config_xor(hw, prof_id,
3986 sctp_src->idx, sctp_dst->idx, 1);
3991 * ice_add_rss_cfg_sync - add an RSS configuration
3992 * @hw: pointer to the hardware structure
3993 * @vsi_handle: software VSI handle
3994 * @cfg: configure parameters
3996 * Assumption: lock has already been acquired for RSS list
3998 static enum ice_status
3999 ice_add_rss_cfg_sync(struct ice_hw *hw, u16 vsi_handle,
4000 const struct ice_rss_hash_cfg *cfg)
4002 const enum ice_block blk = ICE_BLK_RSS;
4003 struct ice_flow_prof *prof = NULL;
4004 struct ice_flow_seg_info *segs;
4005 enum ice_status status;
4008 segs_cnt = (cfg->hdr_type == ICE_RSS_OUTER_HEADERS) ?
4009 ICE_FLOW_SEG_SINGLE : ICE_FLOW_SEG_MAX;
4011 segs = (struct ice_flow_seg_info *)ice_calloc(hw, segs_cnt,
4014 return ICE_ERR_NO_MEMORY;
4016 /* Construct the packet segment info from the hashed fields */
4017 status = ice_flow_set_rss_seg_info(segs, segs_cnt, cfg);
4021 /* Search for a flow profile that has matching headers, hash fields
4022 * and has the input VSI associated to it. If found, no further
4023 * operations required and exit.
4025 prof = ice_flow_find_prof_conds(hw, blk, ICE_FLOW_RX, segs, segs_cnt,
4027 ICE_FLOW_FIND_PROF_CHK_FLDS |
4028 ICE_FLOW_FIND_PROF_CHK_VSI);
4030 if (prof->cfg.symm == cfg->symm)
4032 prof->cfg.symm = cfg->symm;
4036 /* Check if a flow profile exists with the same protocol headers and
4037 * associated with the input VSI. If so disassociate the VSI from
4038 * this profile. The VSI will be added to a new profile created with
4039 * the protocol header and new hash field configuration.
4041 prof = ice_flow_find_prof_conds(hw, blk, ICE_FLOW_RX, segs, segs_cnt,
4042 vsi_handle, ICE_FLOW_FIND_PROF_CHK_VSI);
4044 status = ice_flow_disassoc_prof(hw, blk, prof, vsi_handle);
4046 ice_rem_rss_list(hw, vsi_handle, prof);
4050 /* Remove profile if it has no VSIs associated */
4051 if (!ice_is_any_bit_set(prof->vsis, ICE_MAX_VSI)) {
4052 status = ice_flow_rem_prof(hw, blk, prof->id);
4058 /* Search for a profile that has same match fields only. If this
4059 * exists then associate the VSI to this profile.
4061 prof = ice_flow_find_prof_conds(hw, blk, ICE_FLOW_RX, segs, segs_cnt,
4063 ICE_FLOW_FIND_PROF_CHK_FLDS);
4065 if (prof->cfg.symm == cfg->symm) {
4066 status = ice_flow_assoc_prof(hw, blk, prof,
4069 status = ice_add_rss_list(hw, vsi_handle,
4072 /* if a profile exist but with different symmetric
4073 * requirement, just return error.
4075 status = ICE_ERR_NOT_SUPPORTED;
4080 /* Create a new flow profile with generated profile and packet
4081 * segment information.
4083 status = ice_flow_add_prof(hw, blk, ICE_FLOW_RX,
4084 ICE_FLOW_GEN_PROFID(cfg->hash_flds,
4085 segs[segs_cnt - 1].hdrs,
4087 segs, segs_cnt, NULL, 0, &prof);
4091 status = ice_flow_assoc_prof(hw, blk, prof, vsi_handle);
4092 /* If association to a new flow profile failed then this profile can
4096 ice_flow_rem_prof(hw, blk, prof->id);
4100 status = ice_add_rss_list(hw, vsi_handle, prof);
4102 prof->cfg.symm = cfg->symm;
4104 ice_rss_update_symm(hw, prof);
4112 * ice_add_rss_cfg - add an RSS configuration with specified hashed fields
4113 * @hw: pointer to the hardware structure
4114 * @vsi_handle: software VSI handle
4115 * @cfg: configure parameters
4117 * This function will generate a flow profile based on fields associated with
4118 * the input fields to hash on, the flow type and use the VSI number to add
4119 * a flow entry to the profile.
4122 ice_add_rss_cfg(struct ice_hw *hw, u16 vsi_handle,
4123 const struct ice_rss_hash_cfg *cfg)
4125 struct ice_rss_hash_cfg local_cfg;
4126 enum ice_status status;
4128 if (!ice_is_vsi_valid(hw, vsi_handle) ||
4129 !cfg || cfg->hdr_type > ICE_RSS_ANY_HEADERS ||
4130 cfg->hash_flds == ICE_HASH_INVALID)
4131 return ICE_ERR_PARAM;
4134 if (cfg->hdr_type < ICE_RSS_ANY_HEADERS) {
4135 ice_acquire_lock(&hw->rss_locks);
4136 status = ice_add_rss_cfg_sync(hw, vsi_handle, &local_cfg);
4137 ice_release_lock(&hw->rss_locks);
4139 ice_acquire_lock(&hw->rss_locks);
4140 local_cfg.hdr_type = ICE_RSS_OUTER_HEADERS;
4141 status = ice_add_rss_cfg_sync(hw, vsi_handle, &local_cfg);
4143 local_cfg.hdr_type = ICE_RSS_INNER_HEADERS;
4144 status = ice_add_rss_cfg_sync(hw, vsi_handle,
4147 ice_release_lock(&hw->rss_locks);
4154 * ice_rem_rss_cfg_sync - remove an existing RSS configuration
4155 * @hw: pointer to the hardware structure
4156 * @vsi_handle: software VSI handle
4157 * @cfg: configure parameters
4159 * Assumption: lock has already been acquired for RSS list
4161 static enum ice_status
4162 ice_rem_rss_cfg_sync(struct ice_hw *hw, u16 vsi_handle,
4163 const struct ice_rss_hash_cfg *cfg)
4165 const enum ice_block blk = ICE_BLK_RSS;
4166 struct ice_flow_seg_info *segs;
4167 struct ice_flow_prof *prof;
4168 enum ice_status status;
4171 segs_cnt = (cfg->hdr_type == ICE_RSS_OUTER_HEADERS) ?
4172 ICE_FLOW_SEG_SINGLE : ICE_FLOW_SEG_MAX;
4173 segs = (struct ice_flow_seg_info *)ice_calloc(hw, segs_cnt,
4176 return ICE_ERR_NO_MEMORY;
4178 /* Construct the packet segment info from the hashed fields */
4179 status = ice_flow_set_rss_seg_info(segs, segs_cnt, cfg);
4183 prof = ice_flow_find_prof_conds(hw, blk, ICE_FLOW_RX, segs, segs_cnt,
4185 ICE_FLOW_FIND_PROF_CHK_FLDS);
4187 status = ICE_ERR_DOES_NOT_EXIST;
4191 status = ice_flow_disassoc_prof(hw, blk, prof, vsi_handle);
4195 /* Remove RSS configuration from VSI context before deleting
4198 ice_rem_rss_list(hw, vsi_handle, prof);
4200 if (!ice_is_any_bit_set(prof->vsis, ICE_MAX_VSI))
4201 status = ice_flow_rem_prof(hw, blk, prof->id);
4209 * ice_rem_rss_cfg - remove an existing RSS config with matching hashed fields
4210 * @hw: pointer to the hardware structure
4211 * @vsi_handle: software VSI handle
4212 * @cfg: configure parameters
4214 * This function will lookup the flow profile based on the input
4215 * hash field bitmap, iterate through the profile entry list of
4216 * that profile and find entry associated with input VSI to be
4217 * removed. Calls are made to underlying flow apis which will in
4218 * turn build or update buffers for RSS XLT1 section.
4221 ice_rem_rss_cfg(struct ice_hw *hw, u16 vsi_handle,
4222 const struct ice_rss_hash_cfg *cfg)
4224 struct ice_rss_hash_cfg local_cfg;
4225 enum ice_status status;
4227 if (!ice_is_vsi_valid(hw, vsi_handle) ||
4228 !cfg || cfg->hdr_type > ICE_RSS_ANY_HEADERS ||
4229 cfg->hash_flds == ICE_HASH_INVALID)
4230 return ICE_ERR_PARAM;
4232 ice_acquire_lock(&hw->rss_locks);
4234 if (cfg->hdr_type < ICE_RSS_ANY_HEADERS) {
4235 status = ice_rem_rss_cfg_sync(hw, vsi_handle, &local_cfg);
4237 local_cfg.hdr_type = ICE_RSS_OUTER_HEADERS;
4238 status = ice_rem_rss_cfg_sync(hw, vsi_handle, &local_cfg);
4241 local_cfg.hdr_type = ICE_RSS_INNER_HEADERS;
4242 status = ice_rem_rss_cfg_sync(hw, vsi_handle,
4246 ice_release_lock(&hw->rss_locks);
4252 * ice_replay_rss_cfg - replay RSS configurations associated with VSI
4253 * @hw: pointer to the hardware structure
4254 * @vsi_handle: software VSI handle
4256 enum ice_status ice_replay_rss_cfg(struct ice_hw *hw, u16 vsi_handle)
4258 enum ice_status status = ICE_SUCCESS;
4259 struct ice_rss_cfg *r;
4261 if (!ice_is_vsi_valid(hw, vsi_handle))
4262 return ICE_ERR_PARAM;
4264 ice_acquire_lock(&hw->rss_locks);
4265 LIST_FOR_EACH_ENTRY(r, &hw->rss_list_head,
4266 ice_rss_cfg, l_entry) {
4267 if (ice_is_bit_set(r->vsis, vsi_handle)) {
4268 status = ice_add_rss_cfg_sync(hw, vsi_handle, &r->hash);
4273 ice_release_lock(&hw->rss_locks);
4279 * ice_get_rss_cfg - returns hashed fields for the given header types
4280 * @hw: pointer to the hardware structure
4281 * @vsi_handle: software VSI handle
4282 * @hdrs: protocol header type
4284 * This function will return the match fields of the first instance of flow
4285 * profile having the given header types and containing input VSI
4287 u64 ice_get_rss_cfg(struct ice_hw *hw, u16 vsi_handle, u32 hdrs)
4289 u64 rss_hash = ICE_HASH_INVALID;
4290 struct ice_rss_cfg *r;
4292 /* verify if the protocol header is non zero and VSI is valid */
4293 if (hdrs == ICE_FLOW_SEG_HDR_NONE || !ice_is_vsi_valid(hw, vsi_handle))
4294 return ICE_HASH_INVALID;
4296 ice_acquire_lock(&hw->rss_locks);
4297 LIST_FOR_EACH_ENTRY(r, &hw->rss_list_head,
4298 ice_rss_cfg, l_entry)
4299 if (ice_is_bit_set(r->vsis, vsi_handle) &&
4300 r->hash.addl_hdrs == hdrs) {
4301 rss_hash = r->hash.hash_flds;
4304 ice_release_lock(&hw->rss_locks);