1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2001-2021 Intel Corporation
5 #include "ice_common.h"
8 /* Size of known protocol header fields */
9 #define ICE_FLOW_FLD_SZ_ETH_TYPE 2
10 #define ICE_FLOW_FLD_SZ_VLAN 2
11 #define ICE_FLOW_FLD_SZ_IPV4_ADDR 4
12 #define ICE_FLOW_FLD_SZ_IPV6_ADDR 16
13 #define ICE_FLOW_FLD_SZ_IPV6_PRE32_ADDR 4
14 #define ICE_FLOW_FLD_SZ_IPV6_PRE48_ADDR 6
15 #define ICE_FLOW_FLD_SZ_IPV6_PRE64_ADDR 8
16 #define ICE_FLOW_FLD_SZ_IPV4_ID 2
17 #define ICE_FLOW_FLD_SZ_IPV6_ID 4
18 #define ICE_FLOW_FLD_SZ_IP_CHKSUM 2
19 #define ICE_FLOW_FLD_SZ_TCP_CHKSUM 2
20 #define ICE_FLOW_FLD_SZ_UDP_CHKSUM 2
21 #define ICE_FLOW_FLD_SZ_SCTP_CHKSUM 4
22 #define ICE_FLOW_FLD_SZ_IP_DSCP 1
23 #define ICE_FLOW_FLD_SZ_IP_TTL 1
24 #define ICE_FLOW_FLD_SZ_IP_PROT 1
25 #define ICE_FLOW_FLD_SZ_PORT 2
26 #define ICE_FLOW_FLD_SZ_TCP_FLAGS 1
27 #define ICE_FLOW_FLD_SZ_ICMP_TYPE 1
28 #define ICE_FLOW_FLD_SZ_ICMP_CODE 1
29 #define ICE_FLOW_FLD_SZ_ARP_OPER 2
30 #define ICE_FLOW_FLD_SZ_GRE_KEYID 4
31 #define ICE_FLOW_FLD_SZ_GTP_TEID 4
32 #define ICE_FLOW_FLD_SZ_GTP_QFI 2
33 #define ICE_FLOW_FLD_SZ_PPPOE_SESS_ID 2
34 #define ICE_FLOW_FLD_SZ_PFCP_SEID 8
35 #define ICE_FLOW_FLD_SZ_L2TPV3_SESS_ID 4
36 #define ICE_FLOW_FLD_SZ_ESP_SPI 4
37 #define ICE_FLOW_FLD_SZ_AH_SPI 4
38 #define ICE_FLOW_FLD_SZ_NAT_T_ESP_SPI 4
39 #define ICE_FLOW_FLD_SZ_VXLAN_VNI 4
40 #define ICE_FLOW_FLD_SZ_ECPRI_TP0_PC_ID 2
42 /* Describe properties of a protocol header field */
43 struct ice_flow_field_info {
44 enum ice_flow_seg_hdr hdr;
45 s16 off; /* Offset from start of a protocol header, in bits */
46 u16 size; /* Size of fields in bits */
47 u16 mask; /* 16-bit mask for field */
50 #define ICE_FLOW_FLD_INFO(_hdr, _offset_bytes, _size_bytes) { \
52 .off = (_offset_bytes) * BITS_PER_BYTE, \
53 .size = (_size_bytes) * BITS_PER_BYTE, \
57 #define ICE_FLOW_FLD_INFO_MSK(_hdr, _offset_bytes, _size_bytes, _mask) { \
59 .off = (_offset_bytes) * BITS_PER_BYTE, \
60 .size = (_size_bytes) * BITS_PER_BYTE, \
64 /* Table containing properties of supported protocol header fields */
66 struct ice_flow_field_info ice_flds_info[ICE_FLOW_FIELD_IDX_MAX] = {
68 /* ICE_FLOW_FIELD_IDX_ETH_DA */
69 ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_ETH, 0, ETH_ALEN),
70 /* ICE_FLOW_FIELD_IDX_ETH_SA */
71 ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_ETH, ETH_ALEN, ETH_ALEN),
72 /* ICE_FLOW_FIELD_IDX_S_VLAN */
73 ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_VLAN, 12, ICE_FLOW_FLD_SZ_VLAN),
74 /* ICE_FLOW_FIELD_IDX_C_VLAN */
75 ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_VLAN, 14, ICE_FLOW_FLD_SZ_VLAN),
76 /* ICE_FLOW_FIELD_IDX_ETH_TYPE */
77 ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_ETH, 0, ICE_FLOW_FLD_SZ_ETH_TYPE),
79 /* ICE_FLOW_FIELD_IDX_IPV4_DSCP */
80 ICE_FLOW_FLD_INFO_MSK(ICE_FLOW_SEG_HDR_IPV4, 0, ICE_FLOW_FLD_SZ_IP_DSCP,
82 /* ICE_FLOW_FIELD_IDX_IPV6_DSCP */
83 ICE_FLOW_FLD_INFO_MSK(ICE_FLOW_SEG_HDR_IPV6, 0, ICE_FLOW_FLD_SZ_IP_DSCP,
85 /* ICE_FLOW_FIELD_IDX_IPV4_TTL */
86 ICE_FLOW_FLD_INFO_MSK(ICE_FLOW_SEG_HDR_NONE, 8,
87 ICE_FLOW_FLD_SZ_IP_TTL, 0xff00),
88 /* ICE_FLOW_FIELD_IDX_IPV4_PROT */
89 ICE_FLOW_FLD_INFO_MSK(ICE_FLOW_SEG_HDR_NONE, 8,
90 ICE_FLOW_FLD_SZ_IP_PROT, 0x00ff),
91 /* ICE_FLOW_FIELD_IDX_IPV6_TTL */
92 ICE_FLOW_FLD_INFO_MSK(ICE_FLOW_SEG_HDR_NONE, 6,
93 ICE_FLOW_FLD_SZ_IP_TTL, 0x00ff),
94 /* ICE_FLOW_FIELD_IDX_IPV6_PROT */
95 ICE_FLOW_FLD_INFO_MSK(ICE_FLOW_SEG_HDR_NONE, 6,
96 ICE_FLOW_FLD_SZ_IP_PROT, 0xff00),
97 /* ICE_FLOW_FIELD_IDX_IPV4_SA */
98 ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_IPV4, 12, ICE_FLOW_FLD_SZ_IPV4_ADDR),
99 /* ICE_FLOW_FIELD_IDX_IPV4_DA */
100 ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_IPV4, 16, ICE_FLOW_FLD_SZ_IPV4_ADDR),
101 /* ICE_FLOW_FIELD_IDX_IPV6_SA */
102 ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_IPV6, 8, ICE_FLOW_FLD_SZ_IPV6_ADDR),
103 /* ICE_FLOW_FIELD_IDX_IPV6_DA */
104 ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_IPV6, 24, ICE_FLOW_FLD_SZ_IPV6_ADDR),
105 /* ICE_FLOW_FIELD_IDX_IPV4_CHKSUM */
106 ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_IPV4, 10, ICE_FLOW_FLD_SZ_IP_CHKSUM),
107 /* ICE_FLOW_FIELD_IDX_IPV4_FRAG */
108 ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_IPV_FRAG, 4,
109 ICE_FLOW_FLD_SZ_IPV4_ID),
110 /* ICE_FLOW_FIELD_IDX_IPV6_FRAG */
111 ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_IPV_FRAG, 4,
112 ICE_FLOW_FLD_SZ_IPV6_ID),
113 /* ICE_FLOW_FIELD_IDX_IPV6_PRE32_SA */
114 ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_IPV6, 8,
115 ICE_FLOW_FLD_SZ_IPV6_PRE32_ADDR),
116 /* ICE_FLOW_FIELD_IDX_IPV6_PRE32_DA */
117 ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_IPV6, 24,
118 ICE_FLOW_FLD_SZ_IPV6_PRE32_ADDR),
119 /* ICE_FLOW_FIELD_IDX_IPV6_PRE48_SA */
120 ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_IPV6, 8,
121 ICE_FLOW_FLD_SZ_IPV6_PRE48_ADDR),
122 /* ICE_FLOW_FIELD_IDX_IPV6_PRE48_DA */
123 ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_IPV6, 24,
124 ICE_FLOW_FLD_SZ_IPV6_PRE48_ADDR),
125 /* ICE_FLOW_FIELD_IDX_IPV6_PRE64_SA */
126 ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_IPV6, 8,
127 ICE_FLOW_FLD_SZ_IPV6_PRE64_ADDR),
128 /* ICE_FLOW_FIELD_IDX_IPV6_PRE64_DA */
129 ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_IPV6, 24,
130 ICE_FLOW_FLD_SZ_IPV6_PRE64_ADDR),
132 /* ICE_FLOW_FIELD_IDX_TCP_SRC_PORT */
133 ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_TCP, 0, ICE_FLOW_FLD_SZ_PORT),
134 /* ICE_FLOW_FIELD_IDX_TCP_DST_PORT */
135 ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_TCP, 2, ICE_FLOW_FLD_SZ_PORT),
136 /* ICE_FLOW_FIELD_IDX_UDP_SRC_PORT */
137 ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_UDP, 0, ICE_FLOW_FLD_SZ_PORT),
138 /* ICE_FLOW_FIELD_IDX_UDP_DST_PORT */
139 ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_UDP, 2, ICE_FLOW_FLD_SZ_PORT),
140 /* ICE_FLOW_FIELD_IDX_SCTP_SRC_PORT */
141 ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_SCTP, 0, ICE_FLOW_FLD_SZ_PORT),
142 /* ICE_FLOW_FIELD_IDX_SCTP_DST_PORT */
143 ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_SCTP, 2, ICE_FLOW_FLD_SZ_PORT),
144 /* ICE_FLOW_FIELD_IDX_TCP_FLAGS */
145 ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_TCP, 13, ICE_FLOW_FLD_SZ_TCP_FLAGS),
146 /* ICE_FLOW_FIELD_IDX_TCP_CHKSUM */
147 ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_TCP, 16, ICE_FLOW_FLD_SZ_TCP_CHKSUM),
148 /* ICE_FLOW_FIELD_IDX_UDP_CHKSUM */
149 ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_UDP, 6, ICE_FLOW_FLD_SZ_UDP_CHKSUM),
150 /* ICE_FLOW_FIELD_IDX_SCTP_CHKSUM */
151 ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_SCTP, 8,
152 ICE_FLOW_FLD_SZ_SCTP_CHKSUM),
154 /* ICE_FLOW_FIELD_IDX_ARP_SIP */
155 ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_ARP, 14, ICE_FLOW_FLD_SZ_IPV4_ADDR),
156 /* ICE_FLOW_FIELD_IDX_ARP_DIP */
157 ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_ARP, 24, ICE_FLOW_FLD_SZ_IPV4_ADDR),
158 /* ICE_FLOW_FIELD_IDX_ARP_SHA */
159 ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_ARP, 8, ETH_ALEN),
160 /* ICE_FLOW_FIELD_IDX_ARP_DHA */
161 ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_ARP, 18, ETH_ALEN),
162 /* ICE_FLOW_FIELD_IDX_ARP_OP */
163 ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_ARP, 6, ICE_FLOW_FLD_SZ_ARP_OPER),
165 /* ICE_FLOW_FIELD_IDX_ICMP_TYPE */
166 ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_ICMP, 0, ICE_FLOW_FLD_SZ_ICMP_TYPE),
167 /* ICE_FLOW_FIELD_IDX_ICMP_CODE */
168 ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_ICMP, 1, ICE_FLOW_FLD_SZ_ICMP_CODE),
170 /* ICE_FLOW_FIELD_IDX_GRE_KEYID */
171 ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_GRE, 12, ICE_FLOW_FLD_SZ_GRE_KEYID),
173 /* ICE_FLOW_FIELD_IDX_GTPC_TEID */
174 ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_GTPC_TEID, 12,
175 ICE_FLOW_FLD_SZ_GTP_TEID),
176 /* ICE_FLOW_FIELD_IDX_GTPU_IP_TEID */
177 ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_GTPU_IP, 12,
178 ICE_FLOW_FLD_SZ_GTP_TEID),
179 /* ICE_FLOW_FIELD_IDX_GTPU_EH_TEID */
180 ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_GTPU_EH, 12,
181 ICE_FLOW_FLD_SZ_GTP_TEID),
182 /* ICE_FLOW_FIELD_IDX_GTPU_EH_QFI */
183 ICE_FLOW_FLD_INFO_MSK(ICE_FLOW_SEG_HDR_GTPU_EH, 22,
184 ICE_FLOW_FLD_SZ_GTP_QFI, 0x3f00),
185 /* ICE_FLOW_FIELD_IDX_GTPU_UP_TEID */
186 ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_GTPU_UP, 12,
187 ICE_FLOW_FLD_SZ_GTP_TEID),
188 /* ICE_FLOW_FIELD_IDX_GTPU_DWN_TEID */
189 ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_GTPU_DWN, 12,
190 ICE_FLOW_FLD_SZ_GTP_TEID),
192 /* ICE_FLOW_FIELD_IDX_PPPOE_SESS_ID */
193 ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_PPPOE, 2,
194 ICE_FLOW_FLD_SZ_PPPOE_SESS_ID),
196 /* ICE_FLOW_FIELD_IDX_PFCP_SEID */
197 ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_PFCP_SESSION, 12,
198 ICE_FLOW_FLD_SZ_PFCP_SEID),
200 /* ICE_FLOW_FIELD_IDX_L2TPV3_SESS_ID */
201 ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_L2TPV3, 0,
202 ICE_FLOW_FLD_SZ_L2TPV3_SESS_ID),
204 /* ICE_FLOW_FIELD_IDX_ESP_SPI */
205 ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_ESP, 0,
206 ICE_FLOW_FLD_SZ_ESP_SPI),
208 /* ICE_FLOW_FIELD_IDX_AH_SPI */
209 ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_AH, 4,
210 ICE_FLOW_FLD_SZ_AH_SPI),
212 /* ICE_FLOW_FIELD_IDX_NAT_T_ESP_SPI */
213 ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_NAT_T_ESP, 8,
214 ICE_FLOW_FLD_SZ_NAT_T_ESP_SPI),
215 /* ICE_FLOW_FIELD_IDX_VXLAN_VNI */
216 ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_VXLAN, 12,
217 ICE_FLOW_FLD_SZ_VXLAN_VNI),
219 /* ICE_FLOW_FIELD_IDX_ECPRI_TP0_PC_ID */
220 ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_ECPRI_TP0, 4,
221 ICE_FLOW_FLD_SZ_ECPRI_TP0_PC_ID),
223 /* ICE_FLOW_FIELD_IDX_UDP_ECPRI_TP0_PC_ID */
224 ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_UDP_ECPRI_TP0, 12,
225 ICE_FLOW_FLD_SZ_ECPRI_TP0_PC_ID),
228 /* Bitmaps indicating relevant packet types for a particular protocol header
230 * Packet types for packets with an Outer/First/Single MAC header
232 static const u32 ice_ptypes_mac_ofos[] = {
233 0xFDC00846, 0xBFBF7F7E, 0xF70001DF, 0xFEFDFDFB,
234 0x0000077E, 0x000003FF, 0x00000000, 0x00000000,
235 0x00400000, 0x03FFF000, 0xFFFFFFE0, 0x00100707,
236 0xFFFFFF00, 0x0000003F, 0x00000000, 0x00000000,
237 0x00000000, 0x00000000, 0x00000000, 0x00000000,
238 0x00000000, 0x00000000, 0x00000000, 0x00000000,
239 0x00000000, 0x00000000, 0x00000000, 0x00000000,
240 0x00000000, 0x00000000, 0x00000000, 0x00000000,
243 /* Packet types for packets with an Innermost/Last MAC VLAN header */
244 static const u32 ice_ptypes_macvlan_il[] = {
245 0x00000000, 0xBC000000, 0x000001DF, 0xF0000000,
246 0x0000077E, 0x00000000, 0x00000000, 0x00000000,
247 0x00000000, 0x00000000, 0x00000000, 0x00000000,
248 0x00000000, 0x00000000, 0x00000000, 0x00000000,
249 0x00000000, 0x00000000, 0x00000000, 0x00000000,
250 0x00000000, 0x00000000, 0x00000000, 0x00000000,
251 0x00000000, 0x00000000, 0x00000000, 0x00000000,
252 0x00000000, 0x00000000, 0x00000000, 0x00000000,
255 /* Packet types for packets with an Outer/First/Single non-frag IPv4 header,
256 * does NOT include IPV4 other PTYPEs
258 static const u32 ice_ptypes_ipv4_ofos[] = {
259 0x1D800000, 0x24000800, 0x00000000, 0x00000000,
260 0x00000000, 0x00000155, 0x00000000, 0x00000000,
261 0x00000000, 0x000FC000, 0x000002A0, 0x00100000,
262 0x00001500, 0x00000000, 0x00000000, 0x00000000,
263 0x00000000, 0x00000000, 0x00000000, 0x00000000,
264 0x00000000, 0x00000000, 0x00000000, 0x00000000,
265 0x00000000, 0x00000000, 0x00000000, 0x00000000,
266 0x00000000, 0x00000000, 0x00000000, 0x00000000,
269 /* Packet types for packets with an Outer/First/Single non-frag IPv4 header,
270 * includes IPV4 other PTYPEs
272 static const u32 ice_ptypes_ipv4_ofos_all[] = {
273 0x1D800000, 0x27BF7800, 0x00000000, 0x00000000,
274 0x00000000, 0x00000155, 0x00000000, 0x00000000,
275 0x00000000, 0x000FC000, 0x83E0FAA0, 0x00000101,
276 0x03FFD000, 0x00000000, 0x02FBEFBC, 0x00000000,
277 0x00000000, 0x00000000, 0x00000000, 0x00000000,
278 0x00000000, 0x00000000, 0x00000000, 0x00000000,
279 0x00000000, 0x00000000, 0x00000000, 0x00000000,
280 0x00000000, 0x00000000, 0x00000000, 0x00000000,
283 /* Packet types for packets with an Innermost/Last IPv4 header */
284 static const u32 ice_ptypes_ipv4_il[] = {
285 0xE0000000, 0xB807700E, 0x80000003, 0xE01DC03B,
286 0x0000000E, 0x00000000, 0x00000000, 0x00000000,
287 0x00000000, 0x00000000, 0x001FF800, 0x00100000,
288 0xFC0FC000, 0x00000000, 0xBC0BC0BC, 0x00000BC0,
289 0x00000000, 0x00000000, 0x00000000, 0x00000000,
290 0x00000000, 0x00000000, 0x00000000, 0x00000000,
291 0x00000000, 0x00000000, 0x00000000, 0x00000000,
292 0x00000000, 0x00000000, 0x00000000, 0x00000000,
295 /* Packet types for packets with an Outer/First/Single non-frag IPv6 header,
296 * does NOT include IVP6 other PTYPEs
298 static const u32 ice_ptypes_ipv6_ofos[] = {
299 0x00000000, 0x00000000, 0x76000000, 0x10002000,
300 0x00000000, 0x000002AA, 0x00000000, 0x00000000,
301 0x00000000, 0x03F00000, 0x00000540, 0x00000000,
302 0x00002A00, 0x00000000, 0x00000000, 0x00000000,
303 0x00000000, 0x00000000, 0x00000000, 0x00000000,
304 0x00000000, 0x00000000, 0x00000000, 0x00000000,
305 0x00000000, 0x00000000, 0x00000000, 0x00000000,
306 0x00000000, 0x00000000, 0x00000000, 0x00000000,
309 /* Packet types for packets with an Outer/First/Single non-frag IPv6 header,
310 * includes IPV6 other PTYPEs
312 static const u32 ice_ptypes_ipv6_ofos_all[] = {
313 0x00000000, 0x00000000, 0x76000000, 0x1EFDE000,
314 0x00000000, 0x000002AA, 0x00000000, 0x00000000,
315 0x00000000, 0x03F00000, 0x7C1F0540, 0x00000206,
316 0xFC002000, 0x0000003F, 0xBC000000, 0x0002FBEF,
317 0x00000000, 0x00000000, 0x00000000, 0x00000000,
318 0x00000000, 0x00000000, 0x00000000, 0x00000000,
319 0x00000000, 0x00000000, 0x00000000, 0x00000000,
320 0x00000000, 0x00000000, 0x00000000, 0x00000000,
323 /* Packet types for packets with an Innermost/Last IPv6 header */
324 static const u32 ice_ptypes_ipv6_il[] = {
325 0x00000000, 0x03B80770, 0x000001DC, 0x0EE00000,
326 0x00000770, 0x00000000, 0x00000000, 0x00000000,
327 0x00000000, 0x00000000, 0x7FE00000, 0x00000000,
328 0x03F00000, 0x0000003F, 0x02F02F00, 0x0002F02F,
329 0x00000000, 0x00000000, 0x00000000, 0x00000000,
330 0x00000000, 0x00000000, 0x00000000, 0x00000000,
331 0x00000000, 0x00000000, 0x00000000, 0x00000000,
332 0x00000000, 0x00000000, 0x00000000, 0x00000000,
335 /* Packet types for packets with an Outer/First/Single
336 * non-frag IPv4 header - no L4
338 static const u32 ice_ptypes_ipv4_ofos_no_l4[] = {
339 0x10800000, 0x04000800, 0x00000000, 0x00000000,
340 0x00000000, 0x00000000, 0x00000000, 0x00000000,
341 0x00000000, 0x000cc000, 0x000002A0, 0x00000000,
342 0x00001500, 0x00000000, 0x00000000, 0x00000000,
343 0x00000000, 0x00000000, 0x00000000, 0x00000000,
344 0x00000000, 0x00000000, 0x00000000, 0x00000000,
345 0x00000000, 0x00000000, 0x00000000, 0x00000000,
346 0x00000000, 0x00000000, 0x00000000, 0x00000000,
349 /* Packet types for packets with an Innermost/Last IPv4 header - no L4 */
350 static const u32 ice_ptypes_ipv4_il_no_l4[] = {
351 0x60000000, 0x18043008, 0x80000002, 0x6010c021,
352 0x00000008, 0x00000000, 0x00000000, 0x00000000,
353 0x00000000, 0x00000000, 0x00139800, 0x00000000,
354 0x8C08C000, 0x00000000, 0x00000000, 0x00000000,
355 0x00000000, 0x00000000, 0x00000000, 0x00000000,
356 0x00000000, 0x00000000, 0x00000000, 0x00000000,
357 0x00000000, 0x00000000, 0x00000000, 0x00000000,
358 0x00000000, 0x00000000, 0x00000000, 0x00000000,
361 /* Packet types for packets with an Outer/First/Single
362 * non-frag IPv6 header - no L4
364 static const u32 ice_ptypes_ipv6_ofos_no_l4[] = {
365 0x00000000, 0x00000000, 0x42000000, 0x10002000,
366 0x00000000, 0x00000000, 0x00000000, 0x00000000,
367 0x00000000, 0x02300000, 0x00000540, 0x00000000,
368 0x00002A00, 0x00000000, 0x00000000, 0x00000000,
369 0x00000000, 0x00000000, 0x00000000, 0x00000000,
370 0x00000000, 0x00000000, 0x00000000, 0x00000000,
371 0x00000000, 0x00000000, 0x00000000, 0x00000000,
372 0x00000000, 0x00000000, 0x00000000, 0x00000000,
375 /* Packet types for packets with an Innermost/Last IPv6 header - no L4 */
376 static const u32 ice_ptypes_ipv6_il_no_l4[] = {
377 0x00000000, 0x02180430, 0x0000010c, 0x086010c0,
378 0x00000430, 0x00000000, 0x00000000, 0x00000000,
379 0x00000000, 0x00000000, 0x4e600000, 0x00000000,
380 0x02300000, 0x00000023, 0x00000000, 0x00000000,
381 0x00000000, 0x00000000, 0x00000000, 0x00000000,
382 0x00000000, 0x00000000, 0x00000000, 0x00000000,
383 0x00000000, 0x00000000, 0x00000000, 0x00000000,
384 0x00000000, 0x00000000, 0x00000000, 0x00000000,
387 /* Packet types for packets with an Outermost/First ARP header */
388 static const u32 ice_ptypes_arp_of[] = {
389 0x00000800, 0x00000000, 0x00000000, 0x00000000,
390 0x00000000, 0x00000000, 0x00000000, 0x00000000,
391 0x00000000, 0x00000000, 0x00000000, 0x00000000,
392 0x00000000, 0x00000000, 0x00000000, 0x00000000,
393 0x00000000, 0x00000000, 0x00000000, 0x00000000,
394 0x00000000, 0x00000000, 0x00000000, 0x00000000,
395 0x00000000, 0x00000000, 0x00000000, 0x00000000,
396 0x00000000, 0x00000000, 0x00000000, 0x00000000,
399 /* UDP Packet types for non-tunneled packets or tunneled
400 * packets with inner UDP.
402 static const u32 ice_ptypes_udp_il[] = {
403 0x81000000, 0x20204040, 0x04000010, 0x80810102,
404 0x00000040, 0x00000000, 0x00000000, 0x00000000,
405 0x00000000, 0x00410000, 0x908427E0, 0x00100007,
406 0x10410000, 0x00000004, 0x10410410, 0x00004104,
407 0x00000000, 0x00000000, 0x00000000, 0x00000000,
408 0x00000000, 0x00000000, 0x00000000, 0x00000000,
409 0x00000000, 0x00000000, 0x00000000, 0x00000000,
410 0x00000000, 0x00000000, 0x00000000, 0x00000000,
413 /* Packet types for packets with an Innermost/Last TCP header */
414 static const u32 ice_ptypes_tcp_il[] = {
415 0x04000000, 0x80810102, 0x10000040, 0x02040408,
416 0x00000102, 0x00000000, 0x00000000, 0x00000000,
417 0x00000000, 0x00820000, 0x21084000, 0x00000000,
418 0x20820000, 0x00000008, 0x20820820, 0x00008208,
419 0x00000000, 0x00000000, 0x00000000, 0x00000000,
420 0x00000000, 0x00000000, 0x00000000, 0x00000000,
421 0x00000000, 0x00000000, 0x00000000, 0x00000000,
422 0x00000000, 0x00000000, 0x00000000, 0x00000000,
425 /* Packet types for packets with an Innermost/Last SCTP header */
426 static const u32 ice_ptypes_sctp_il[] = {
427 0x08000000, 0x01020204, 0x20000081, 0x04080810,
428 0x00000204, 0x00000000, 0x00000000, 0x00000000,
429 0x00000000, 0x01040000, 0x00000000, 0x00000000,
430 0x41040000, 0x00000010, 0x00000000, 0x00000000,
431 0x00000000, 0x00000000, 0x00000000, 0x00000000,
432 0x00000000, 0x00000000, 0x00000000, 0x00000000,
433 0x00000000, 0x00000000, 0x00000000, 0x00000000,
434 0x00000000, 0x00000000, 0x00000000, 0x00000000,
437 /* Packet types for packets with an Outermost/First ICMP header */
438 static const u32 ice_ptypes_icmp_of[] = {
439 0x10000000, 0x00000000, 0x00000000, 0x00000000,
440 0x00000000, 0x00000000, 0x00000000, 0x00000000,
441 0x00000000, 0x00000000, 0x00000000, 0x00000000,
442 0x00000000, 0x00000000, 0x00000000, 0x00000000,
443 0x00000000, 0x00000000, 0x00000000, 0x00000000,
444 0x00000000, 0x00000000, 0x00000000, 0x00000000,
445 0x00000000, 0x00000000, 0x00000000, 0x00000000,
446 0x00000000, 0x00000000, 0x00000000, 0x00000000,
449 /* Packet types for packets with an Innermost/Last ICMP header */
450 static const u32 ice_ptypes_icmp_il[] = {
451 0x00000000, 0x02040408, 0x40000102, 0x08101020,
452 0x00000408, 0x00000000, 0x00000000, 0x00000000,
453 0x00000000, 0x00000000, 0x42108000, 0x00000000,
454 0x82080000, 0x00000020, 0x00000000, 0x00000000,
455 0x00000000, 0x00000000, 0x00000000, 0x00000000,
456 0x00000000, 0x00000000, 0x00000000, 0x00000000,
457 0x00000000, 0x00000000, 0x00000000, 0x00000000,
458 0x00000000, 0x00000000, 0x00000000, 0x00000000,
461 /* Packet types for packets with an Outermost/First GRE header */
462 static const u32 ice_ptypes_gre_of[] = {
463 0x00000000, 0xBFBF7800, 0x000001DF, 0xFEFDE000,
464 0x0000017E, 0x00000000, 0x00000000, 0x00000000,
465 0x00000000, 0x00000000, 0x00000000, 0x00000000,
466 0x00000000, 0x00000000, 0xBEFBEFBC, 0x0002FBEF,
467 0x00000000, 0x00000000, 0x00000000, 0x00000000,
468 0x00000000, 0x00000000, 0x00000000, 0x00000000,
469 0x00000000, 0x00000000, 0x00000000, 0x00000000,
470 0x00000000, 0x00000000, 0x00000000, 0x00000000,
473 /* Packet types for packets with an Innermost/Last MAC header */
474 static const u32 ice_ptypes_mac_il[] = {
475 0x00000000, 0x20000000, 0x00000000, 0x00000000,
476 0x00000000, 0x00000000, 0x00000000, 0x00000000,
477 0x00000000, 0x00000000, 0x00000000, 0x00000000,
478 0x00000000, 0x00000000, 0x00000000, 0x00000000,
479 0x00000000, 0x00000000, 0x00000000, 0x00000000,
480 0x00000000, 0x00000000, 0x00000000, 0x00000000,
481 0x00000000, 0x00000000, 0x00000000, 0x00000000,
482 0x00000000, 0x00000000, 0x00000000, 0x00000000,
485 /* Packet types for GTPC */
486 static const u32 ice_ptypes_gtpc[] = {
487 0x00000000, 0x00000000, 0x00000000, 0x00000000,
488 0x00000000, 0x00000000, 0x00000000, 0x00000000,
489 0x00000000, 0x00000000, 0x000001E0, 0x00000000,
490 0x00000000, 0x00000000, 0x00000000, 0x00000000,
491 0x00000000, 0x00000000, 0x00000000, 0x00000000,
492 0x00000000, 0x00000000, 0x00000000, 0x00000000,
493 0x00000000, 0x00000000, 0x00000000, 0x00000000,
494 0x00000000, 0x00000000, 0x00000000, 0x00000000,
497 /* Packet types for VXLAN with VNI */
498 static const u32 ice_ptypes_vxlan_vni[] = {
499 0x00000000, 0xBFBFF800, 0x00EFDFDF, 0xFEFDE000,
500 0x03BF7F7E, 0x00000000, 0x00000000, 0x00000000,
501 0x00000000, 0x00000000, 0x00000000, 0x00000000,
502 0x00000000, 0x00000000, 0x00000000, 0x00000000,
503 0x00000000, 0x00000000, 0x00000000, 0x00000000,
504 0x00000000, 0x00000000, 0x00000000, 0x00000000,
505 0x00000000, 0x00000000, 0x00000000, 0x00000000,
506 0x00000000, 0x00000000, 0x00000000, 0x00000000,
509 /* Packet types for GTPC with TEID */
510 static const u32 ice_ptypes_gtpc_tid[] = {
511 0x00000000, 0x00000000, 0x00000000, 0x00000000,
512 0x00000000, 0x00000000, 0x00000000, 0x00000000,
513 0x00000000, 0x00000000, 0x00000060, 0x00000000,
514 0x00000000, 0x00000000, 0x00000000, 0x00000000,
515 0x00000000, 0x00000000, 0x00000000, 0x00000000,
516 0x00000000, 0x00000000, 0x00000000, 0x00000000,
517 0x00000000, 0x00000000, 0x00000000, 0x00000000,
518 0x00000000, 0x00000000, 0x00000000, 0x00000000,
521 /* Packet types for GTPU */
522 static const struct ice_ptype_attributes ice_attr_gtpu_session[] = {
523 { ICE_MAC_IPV4_GTPU_IPV4_FRAG, ICE_PTYPE_ATTR_GTP_SESSION },
524 { ICE_MAC_IPV4_GTPU_IPV4_PAY, ICE_PTYPE_ATTR_GTP_SESSION },
525 { ICE_MAC_IPV4_GTPU_IPV4_UDP_PAY, ICE_PTYPE_ATTR_GTP_SESSION },
526 { ICE_MAC_IPV4_GTPU_IPV4_TCP, ICE_PTYPE_ATTR_GTP_SESSION },
527 { ICE_MAC_IPV4_GTPU_IPV4_ICMP, ICE_PTYPE_ATTR_GTP_SESSION },
528 { ICE_MAC_IPV6_GTPU_IPV4_FRAG, ICE_PTYPE_ATTR_GTP_SESSION },
529 { ICE_MAC_IPV6_GTPU_IPV4_PAY, ICE_PTYPE_ATTR_GTP_SESSION },
530 { ICE_MAC_IPV6_GTPU_IPV4_UDP_PAY, ICE_PTYPE_ATTR_GTP_SESSION },
531 { ICE_MAC_IPV6_GTPU_IPV4_TCP, ICE_PTYPE_ATTR_GTP_SESSION },
532 { ICE_MAC_IPV6_GTPU_IPV4_ICMP, ICE_PTYPE_ATTR_GTP_SESSION },
533 { ICE_MAC_IPV4_GTPU_IPV6_FRAG, ICE_PTYPE_ATTR_GTP_SESSION },
534 { ICE_MAC_IPV4_GTPU_IPV6_PAY, ICE_PTYPE_ATTR_GTP_SESSION },
535 { ICE_MAC_IPV4_GTPU_IPV6_UDP_PAY, ICE_PTYPE_ATTR_GTP_SESSION },
536 { ICE_MAC_IPV4_GTPU_IPV6_TCP, ICE_PTYPE_ATTR_GTP_SESSION },
537 { ICE_MAC_IPV4_GTPU_IPV6_ICMPV6, ICE_PTYPE_ATTR_GTP_SESSION },
538 { ICE_MAC_IPV6_GTPU_IPV6_FRAG, ICE_PTYPE_ATTR_GTP_SESSION },
539 { ICE_MAC_IPV6_GTPU_IPV6_PAY, ICE_PTYPE_ATTR_GTP_SESSION },
540 { ICE_MAC_IPV6_GTPU_IPV6_UDP_PAY, ICE_PTYPE_ATTR_GTP_SESSION },
541 { ICE_MAC_IPV6_GTPU_IPV6_TCP, ICE_PTYPE_ATTR_GTP_SESSION },
542 { ICE_MAC_IPV6_GTPU_IPV6_ICMPV6, ICE_PTYPE_ATTR_GTP_SESSION },
543 { MAC_IPV4_TUN_IPV4_GTPU_IPV4_FRAG, ICE_PTYPE_ATTR_GTP_SESSION },
544 { MAC_IPV4_TUN_IPV4_GTPU_IPV4_PAY, ICE_PTYPE_ATTR_GTP_SESSION },
545 { MAC_IPV4_TUN_IPV4_GTPU_IPV4_UDP_PAY, ICE_PTYPE_ATTR_GTP_SESSION },
546 { MAC_IPV4_TUN_IPV4_GTPU_IPV4_TCP, ICE_PTYPE_ATTR_GTP_SESSION },
547 { MAC_IPV4_TUN_IPV4_GTPU_IPV4_ICMP, ICE_PTYPE_ATTR_GTP_SESSION },
548 { MAC_IPV4_TUN_IPV4_GTPU_IPV6_FRAG, ICE_PTYPE_ATTR_GTP_SESSION },
549 { MAC_IPV4_TUN_IPV4_GTPU_IPV6_PAY, ICE_PTYPE_ATTR_GTP_SESSION },
550 { MAC_IPV4_TUN_IPV4_GTPU_IPV6_UDP_PAY, ICE_PTYPE_ATTR_GTP_SESSION },
551 { MAC_IPV4_TUN_IPV4_GTPU_IPV6_TCP, ICE_PTYPE_ATTR_GTP_SESSION },
552 { MAC_IPV4_TUN_IPV4_GTPU_IPV6_ICMPV6, ICE_PTYPE_ATTR_GTP_SESSION },
553 { MAC_IPV4_TUN_IPV6_GTPU_IPV4_FRAG, ICE_PTYPE_ATTR_GTP_SESSION },
554 { MAC_IPV4_TUN_IPV6_GTPU_IPV4_PAY, ICE_PTYPE_ATTR_GTP_SESSION },
555 { MAC_IPV4_TUN_IPV6_GTPU_IPV4_UDP_PAY, ICE_PTYPE_ATTR_GTP_SESSION },
556 { MAC_IPV4_TUN_IPV6_GTPU_IPV4_TCP, ICE_PTYPE_ATTR_GTP_SESSION },
557 { MAC_IPV4_TUN_IPV6_GTPU_IPV4_ICMP, ICE_PTYPE_ATTR_GTP_SESSION },
558 { MAC_IPV4_TUN_IPV6_GTPU_IPV6_FRAG, ICE_PTYPE_ATTR_GTP_SESSION },
559 { MAC_IPV4_TUN_IPV6_GTPU_IPV6_PAY, ICE_PTYPE_ATTR_GTP_SESSION },
560 { MAC_IPV4_TUN_IPV6_GTPU_IPV6_UDP_PAY, ICE_PTYPE_ATTR_GTP_SESSION },
561 { MAC_IPV4_TUN_IPV6_GTPU_IPV6_TCP, ICE_PTYPE_ATTR_GTP_SESSION },
562 { MAC_IPV4_TUN_IPV6_GTPU_IPV6_ICMPV6, ICE_PTYPE_ATTR_GTP_SESSION },
563 { MAC_IPV6_TUN_IPV4_GTPU_IPV4_FRAG, ICE_PTYPE_ATTR_GTP_SESSION },
564 { MAC_IPV6_TUN_IPV4_GTPU_IPV4_PAY, ICE_PTYPE_ATTR_GTP_SESSION },
565 { MAC_IPV6_TUN_IPV4_GTPU_IPV4_UDP_PAY, ICE_PTYPE_ATTR_GTP_SESSION },
566 { MAC_IPV6_TUN_IPV4_GTPU_IPV4_TCP, ICE_PTYPE_ATTR_GTP_SESSION },
567 { MAC_IPV6_TUN_IPV4_GTPU_IPV4_ICMP, ICE_PTYPE_ATTR_GTP_SESSION },
568 { MAC_IPV6_TUN_IPV4_GTPU_IPV6_FRAG, ICE_PTYPE_ATTR_GTP_SESSION },
569 { MAC_IPV6_TUN_IPV4_GTPU_IPV6_PAY, ICE_PTYPE_ATTR_GTP_SESSION },
570 { MAC_IPV6_TUN_IPV4_GTPU_IPV6_UDP_PAY, ICE_PTYPE_ATTR_GTP_SESSION },
571 { MAC_IPV6_TUN_IPV4_GTPU_IPV6_TCP, ICE_PTYPE_ATTR_GTP_SESSION },
572 { MAC_IPV6_TUN_IPV4_GTPU_IPV6_ICMPV6, ICE_PTYPE_ATTR_GTP_SESSION },
573 { MAC_IPV6_TUN_IPV6_GTPU_IPV4_FRAG, ICE_PTYPE_ATTR_GTP_SESSION },
574 { MAC_IPV6_TUN_IPV6_GTPU_IPV4_PAY, ICE_PTYPE_ATTR_GTP_SESSION },
575 { MAC_IPV6_TUN_IPV6_GTPU_IPV4_UDP_PAY, ICE_PTYPE_ATTR_GTP_SESSION },
576 { MAC_IPV6_TUN_IPV6_GTPU_IPV4_TCP, ICE_PTYPE_ATTR_GTP_SESSION },
577 { MAC_IPV6_TUN_IPV6_GTPU_IPV4_ICMP, ICE_PTYPE_ATTR_GTP_SESSION },
578 { MAC_IPV6_TUN_IPV6_GTPU_IPV6_FRAG, ICE_PTYPE_ATTR_GTP_SESSION },
579 { MAC_IPV6_TUN_IPV6_GTPU_IPV6_PAY, ICE_PTYPE_ATTR_GTP_SESSION },
580 { MAC_IPV6_TUN_IPV6_GTPU_IPV6_UDP_PAY, ICE_PTYPE_ATTR_GTP_SESSION },
581 { MAC_IPV6_TUN_IPV6_GTPU_IPV6_TCP, ICE_PTYPE_ATTR_GTP_SESSION },
582 { MAC_IPV6_TUN_IPV6_GTPU_IPV6_ICMPV6, ICE_PTYPE_ATTR_GTP_SESSION },
585 static const struct ice_ptype_attributes ice_attr_gtpu_eh[] = {
586 { ICE_MAC_IPV4_GTPU_IPV4_FRAG, ICE_PTYPE_ATTR_GTP_PDU_EH },
587 { ICE_MAC_IPV4_GTPU_IPV4_PAY, ICE_PTYPE_ATTR_GTP_PDU_EH },
588 { ICE_MAC_IPV4_GTPU_IPV4_UDP_PAY, ICE_PTYPE_ATTR_GTP_PDU_EH },
589 { ICE_MAC_IPV4_GTPU_IPV4_TCP, ICE_PTYPE_ATTR_GTP_PDU_EH },
590 { ICE_MAC_IPV4_GTPU_IPV4_ICMP, ICE_PTYPE_ATTR_GTP_PDU_EH },
591 { ICE_MAC_IPV6_GTPU_IPV4_FRAG, ICE_PTYPE_ATTR_GTP_PDU_EH },
592 { ICE_MAC_IPV6_GTPU_IPV4_PAY, ICE_PTYPE_ATTR_GTP_PDU_EH },
593 { ICE_MAC_IPV6_GTPU_IPV4_UDP_PAY, ICE_PTYPE_ATTR_GTP_PDU_EH },
594 { ICE_MAC_IPV6_GTPU_IPV4_TCP, ICE_PTYPE_ATTR_GTP_PDU_EH },
595 { ICE_MAC_IPV6_GTPU_IPV4_ICMP, ICE_PTYPE_ATTR_GTP_PDU_EH },
596 { ICE_MAC_IPV4_GTPU_IPV6_FRAG, ICE_PTYPE_ATTR_GTP_PDU_EH },
597 { ICE_MAC_IPV4_GTPU_IPV6_PAY, ICE_PTYPE_ATTR_GTP_PDU_EH },
598 { ICE_MAC_IPV4_GTPU_IPV6_UDP_PAY, ICE_PTYPE_ATTR_GTP_PDU_EH },
599 { ICE_MAC_IPV4_GTPU_IPV6_TCP, ICE_PTYPE_ATTR_GTP_PDU_EH },
600 { ICE_MAC_IPV4_GTPU_IPV6_ICMPV6, ICE_PTYPE_ATTR_GTP_PDU_EH },
601 { ICE_MAC_IPV6_GTPU_IPV6_FRAG, ICE_PTYPE_ATTR_GTP_PDU_EH },
602 { ICE_MAC_IPV6_GTPU_IPV6_PAY, ICE_PTYPE_ATTR_GTP_PDU_EH },
603 { ICE_MAC_IPV6_GTPU_IPV6_UDP_PAY, ICE_PTYPE_ATTR_GTP_PDU_EH },
604 { ICE_MAC_IPV6_GTPU_IPV6_TCP, ICE_PTYPE_ATTR_GTP_PDU_EH },
605 { ICE_MAC_IPV6_GTPU_IPV6_ICMPV6, ICE_PTYPE_ATTR_GTP_PDU_EH },
606 { MAC_IPV4_TUN_IPV4_GTPU_IPV4_FRAG, ICE_PTYPE_ATTR_GTP_PDU_EH },
607 { MAC_IPV4_TUN_IPV4_GTPU_IPV4_PAY, ICE_PTYPE_ATTR_GTP_PDU_EH },
608 { MAC_IPV4_TUN_IPV4_GTPU_IPV4_UDP_PAY, ICE_PTYPE_ATTR_GTP_PDU_EH },
609 { MAC_IPV4_TUN_IPV4_GTPU_IPV4_TCP, ICE_PTYPE_ATTR_GTP_PDU_EH },
610 { MAC_IPV4_TUN_IPV4_GTPU_IPV4_ICMP, ICE_PTYPE_ATTR_GTP_PDU_EH },
611 { MAC_IPV4_TUN_IPV4_GTPU_IPV6_FRAG, ICE_PTYPE_ATTR_GTP_PDU_EH },
612 { MAC_IPV4_TUN_IPV4_GTPU_IPV6_PAY, ICE_PTYPE_ATTR_GTP_PDU_EH },
613 { MAC_IPV4_TUN_IPV4_GTPU_IPV6_UDP_PAY, ICE_PTYPE_ATTR_GTP_PDU_EH },
614 { MAC_IPV4_TUN_IPV4_GTPU_IPV6_TCP, ICE_PTYPE_ATTR_GTP_PDU_EH },
615 { MAC_IPV4_TUN_IPV4_GTPU_IPV6_ICMPV6, ICE_PTYPE_ATTR_GTP_PDU_EH },
616 { MAC_IPV4_TUN_IPV6_GTPU_IPV4_FRAG, ICE_PTYPE_ATTR_GTP_PDU_EH },
617 { MAC_IPV4_TUN_IPV6_GTPU_IPV4_PAY, ICE_PTYPE_ATTR_GTP_PDU_EH },
618 { MAC_IPV4_TUN_IPV6_GTPU_IPV4_UDP_PAY, ICE_PTYPE_ATTR_GTP_PDU_EH },
619 { MAC_IPV4_TUN_IPV6_GTPU_IPV4_TCP, ICE_PTYPE_ATTR_GTP_PDU_EH },
620 { MAC_IPV4_TUN_IPV6_GTPU_IPV4_ICMP, ICE_PTYPE_ATTR_GTP_PDU_EH },
621 { MAC_IPV4_TUN_IPV6_GTPU_IPV6_FRAG, ICE_PTYPE_ATTR_GTP_PDU_EH },
622 { MAC_IPV4_TUN_IPV6_GTPU_IPV6_PAY, ICE_PTYPE_ATTR_GTP_PDU_EH },
623 { MAC_IPV4_TUN_IPV6_GTPU_IPV6_UDP_PAY, ICE_PTYPE_ATTR_GTP_PDU_EH },
624 { MAC_IPV4_TUN_IPV6_GTPU_IPV6_TCP, ICE_PTYPE_ATTR_GTP_PDU_EH },
625 { MAC_IPV4_TUN_IPV6_GTPU_IPV6_ICMPV6, ICE_PTYPE_ATTR_GTP_PDU_EH },
626 { MAC_IPV6_TUN_IPV4_GTPU_IPV4_FRAG, ICE_PTYPE_ATTR_GTP_PDU_EH },
627 { MAC_IPV6_TUN_IPV4_GTPU_IPV4_PAY, ICE_PTYPE_ATTR_GTP_PDU_EH },
628 { MAC_IPV6_TUN_IPV4_GTPU_IPV4_UDP_PAY, ICE_PTYPE_ATTR_GTP_PDU_EH },
629 { MAC_IPV6_TUN_IPV4_GTPU_IPV4_TCP, ICE_PTYPE_ATTR_GTP_PDU_EH },
630 { MAC_IPV6_TUN_IPV4_GTPU_IPV4_ICMP, ICE_PTYPE_ATTR_GTP_PDU_EH },
631 { MAC_IPV6_TUN_IPV4_GTPU_IPV6_FRAG, ICE_PTYPE_ATTR_GTP_PDU_EH },
632 { MAC_IPV6_TUN_IPV4_GTPU_IPV6_PAY, ICE_PTYPE_ATTR_GTP_PDU_EH },
633 { MAC_IPV6_TUN_IPV4_GTPU_IPV6_UDP_PAY, ICE_PTYPE_ATTR_GTP_PDU_EH },
634 { MAC_IPV6_TUN_IPV4_GTPU_IPV6_TCP, ICE_PTYPE_ATTR_GTP_PDU_EH },
635 { MAC_IPV6_TUN_IPV4_GTPU_IPV6_ICMPV6, ICE_PTYPE_ATTR_GTP_PDU_EH },
636 { MAC_IPV6_TUN_IPV6_GTPU_IPV4_FRAG, ICE_PTYPE_ATTR_GTP_PDU_EH },
637 { MAC_IPV6_TUN_IPV6_GTPU_IPV4_PAY, ICE_PTYPE_ATTR_GTP_PDU_EH },
638 { MAC_IPV6_TUN_IPV6_GTPU_IPV4_UDP_PAY, ICE_PTYPE_ATTR_GTP_PDU_EH },
639 { MAC_IPV6_TUN_IPV6_GTPU_IPV4_TCP, ICE_PTYPE_ATTR_GTP_PDU_EH },
640 { MAC_IPV6_TUN_IPV6_GTPU_IPV4_ICMP, ICE_PTYPE_ATTR_GTP_PDU_EH },
641 { MAC_IPV6_TUN_IPV6_GTPU_IPV6_FRAG, ICE_PTYPE_ATTR_GTP_PDU_EH },
642 { MAC_IPV6_TUN_IPV6_GTPU_IPV6_PAY, ICE_PTYPE_ATTR_GTP_PDU_EH },
643 { MAC_IPV6_TUN_IPV6_GTPU_IPV6_UDP_PAY, ICE_PTYPE_ATTR_GTP_PDU_EH },
644 { MAC_IPV6_TUN_IPV6_GTPU_IPV6_TCP, ICE_PTYPE_ATTR_GTP_PDU_EH },
645 { MAC_IPV6_TUN_IPV6_GTPU_IPV6_ICMPV6, ICE_PTYPE_ATTR_GTP_PDU_EH },
648 static const struct ice_ptype_attributes ice_attr_gtpu_down[] = {
649 { ICE_MAC_IPV4_GTPU_IPV4_FRAG, ICE_PTYPE_ATTR_GTP_DOWNLINK },
650 { ICE_MAC_IPV4_GTPU_IPV4_PAY, ICE_PTYPE_ATTR_GTP_DOWNLINK },
651 { ICE_MAC_IPV4_GTPU_IPV4_UDP_PAY, ICE_PTYPE_ATTR_GTP_DOWNLINK },
652 { ICE_MAC_IPV4_GTPU_IPV4_TCP, ICE_PTYPE_ATTR_GTP_DOWNLINK },
653 { ICE_MAC_IPV4_GTPU_IPV4_ICMP, ICE_PTYPE_ATTR_GTP_DOWNLINK },
654 { ICE_MAC_IPV6_GTPU_IPV4_FRAG, ICE_PTYPE_ATTR_GTP_DOWNLINK },
655 { ICE_MAC_IPV6_GTPU_IPV4_PAY, ICE_PTYPE_ATTR_GTP_DOWNLINK },
656 { ICE_MAC_IPV6_GTPU_IPV4_UDP_PAY, ICE_PTYPE_ATTR_GTP_DOWNLINK },
657 { ICE_MAC_IPV6_GTPU_IPV4_TCP, ICE_PTYPE_ATTR_GTP_DOWNLINK },
658 { ICE_MAC_IPV6_GTPU_IPV4_ICMP, ICE_PTYPE_ATTR_GTP_DOWNLINK },
659 { ICE_MAC_IPV4_GTPU_IPV6_FRAG, ICE_PTYPE_ATTR_GTP_DOWNLINK },
660 { ICE_MAC_IPV4_GTPU_IPV6_PAY, ICE_PTYPE_ATTR_GTP_DOWNLINK },
661 { ICE_MAC_IPV4_GTPU_IPV6_UDP_PAY, ICE_PTYPE_ATTR_GTP_DOWNLINK },
662 { ICE_MAC_IPV4_GTPU_IPV6_TCP, ICE_PTYPE_ATTR_GTP_DOWNLINK },
663 { ICE_MAC_IPV4_GTPU_IPV6_ICMPV6, ICE_PTYPE_ATTR_GTP_DOWNLINK },
664 { ICE_MAC_IPV6_GTPU_IPV6_FRAG, ICE_PTYPE_ATTR_GTP_DOWNLINK },
665 { ICE_MAC_IPV6_GTPU_IPV6_PAY, ICE_PTYPE_ATTR_GTP_DOWNLINK },
666 { ICE_MAC_IPV6_GTPU_IPV6_UDP_PAY, ICE_PTYPE_ATTR_GTP_DOWNLINK },
667 { ICE_MAC_IPV6_GTPU_IPV6_TCP, ICE_PTYPE_ATTR_GTP_DOWNLINK },
668 { ICE_MAC_IPV6_GTPU_IPV6_ICMPV6, ICE_PTYPE_ATTR_GTP_DOWNLINK },
669 { MAC_IPV4_TUN_IPV4_GTPU_IPV4_FRAG, ICE_PTYPE_ATTR_GTP_DOWNLINK },
670 { MAC_IPV4_TUN_IPV4_GTPU_IPV4_PAY, ICE_PTYPE_ATTR_GTP_DOWNLINK },
671 { MAC_IPV4_TUN_IPV4_GTPU_IPV4_UDP_PAY, ICE_PTYPE_ATTR_GTP_DOWNLINK },
672 { MAC_IPV4_TUN_IPV4_GTPU_IPV4_TCP, ICE_PTYPE_ATTR_GTP_DOWNLINK },
673 { MAC_IPV4_TUN_IPV4_GTPU_IPV4_ICMP, ICE_PTYPE_ATTR_GTP_DOWNLINK },
674 { MAC_IPV4_TUN_IPV4_GTPU_IPV6_FRAG, ICE_PTYPE_ATTR_GTP_DOWNLINK },
675 { MAC_IPV4_TUN_IPV4_GTPU_IPV6_PAY, ICE_PTYPE_ATTR_GTP_DOWNLINK },
676 { MAC_IPV4_TUN_IPV4_GTPU_IPV6_UDP_PAY, ICE_PTYPE_ATTR_GTP_DOWNLINK },
677 { MAC_IPV4_TUN_IPV4_GTPU_IPV6_TCP, ICE_PTYPE_ATTR_GTP_DOWNLINK },
678 { MAC_IPV4_TUN_IPV4_GTPU_IPV6_ICMPV6, ICE_PTYPE_ATTR_GTP_DOWNLINK },
679 { MAC_IPV4_TUN_IPV6_GTPU_IPV4_FRAG, ICE_PTYPE_ATTR_GTP_DOWNLINK },
680 { MAC_IPV4_TUN_IPV6_GTPU_IPV4_PAY, ICE_PTYPE_ATTR_GTP_DOWNLINK },
681 { MAC_IPV4_TUN_IPV6_GTPU_IPV4_UDP_PAY, ICE_PTYPE_ATTR_GTP_DOWNLINK },
682 { MAC_IPV4_TUN_IPV6_GTPU_IPV4_TCP, ICE_PTYPE_ATTR_GTP_DOWNLINK },
683 { MAC_IPV4_TUN_IPV6_GTPU_IPV4_ICMP, ICE_PTYPE_ATTR_GTP_DOWNLINK },
684 { MAC_IPV4_TUN_IPV6_GTPU_IPV6_FRAG, ICE_PTYPE_ATTR_GTP_DOWNLINK },
685 { MAC_IPV4_TUN_IPV6_GTPU_IPV6_PAY, ICE_PTYPE_ATTR_GTP_DOWNLINK },
686 { MAC_IPV4_TUN_IPV6_GTPU_IPV6_UDP_PAY, ICE_PTYPE_ATTR_GTP_DOWNLINK },
687 { MAC_IPV4_TUN_IPV6_GTPU_IPV6_TCP, ICE_PTYPE_ATTR_GTP_DOWNLINK },
688 { MAC_IPV4_TUN_IPV6_GTPU_IPV6_ICMPV6, ICE_PTYPE_ATTR_GTP_DOWNLINK },
689 { MAC_IPV6_TUN_IPV4_GTPU_IPV4_FRAG, ICE_PTYPE_ATTR_GTP_DOWNLINK },
690 { MAC_IPV6_TUN_IPV4_GTPU_IPV4_PAY, ICE_PTYPE_ATTR_GTP_DOWNLINK },
691 { MAC_IPV6_TUN_IPV4_GTPU_IPV4_UDP_PAY, ICE_PTYPE_ATTR_GTP_DOWNLINK },
692 { MAC_IPV6_TUN_IPV4_GTPU_IPV4_TCP, ICE_PTYPE_ATTR_GTP_DOWNLINK },
693 { MAC_IPV6_TUN_IPV4_GTPU_IPV4_ICMP, ICE_PTYPE_ATTR_GTP_DOWNLINK },
694 { MAC_IPV6_TUN_IPV4_GTPU_IPV6_FRAG, ICE_PTYPE_ATTR_GTP_DOWNLINK },
695 { MAC_IPV6_TUN_IPV4_GTPU_IPV6_PAY, ICE_PTYPE_ATTR_GTP_DOWNLINK },
696 { MAC_IPV6_TUN_IPV4_GTPU_IPV6_UDP_PAY, ICE_PTYPE_ATTR_GTP_DOWNLINK },
697 { MAC_IPV6_TUN_IPV4_GTPU_IPV6_TCP, ICE_PTYPE_ATTR_GTP_DOWNLINK },
698 { MAC_IPV6_TUN_IPV4_GTPU_IPV6_ICMPV6, ICE_PTYPE_ATTR_GTP_DOWNLINK },
699 { MAC_IPV6_TUN_IPV6_GTPU_IPV4_FRAG, ICE_PTYPE_ATTR_GTP_DOWNLINK },
700 { MAC_IPV6_TUN_IPV6_GTPU_IPV4_PAY, ICE_PTYPE_ATTR_GTP_DOWNLINK },
701 { MAC_IPV6_TUN_IPV6_GTPU_IPV4_UDP_PAY, ICE_PTYPE_ATTR_GTP_DOWNLINK },
702 { MAC_IPV6_TUN_IPV6_GTPU_IPV4_TCP, ICE_PTYPE_ATTR_GTP_DOWNLINK },
703 { MAC_IPV6_TUN_IPV6_GTPU_IPV4_ICMP, ICE_PTYPE_ATTR_GTP_DOWNLINK },
704 { MAC_IPV6_TUN_IPV6_GTPU_IPV6_FRAG, ICE_PTYPE_ATTR_GTP_DOWNLINK },
705 { MAC_IPV6_TUN_IPV6_GTPU_IPV6_PAY, ICE_PTYPE_ATTR_GTP_DOWNLINK },
706 { MAC_IPV6_TUN_IPV6_GTPU_IPV6_UDP_PAY, ICE_PTYPE_ATTR_GTP_DOWNLINK },
707 { MAC_IPV6_TUN_IPV6_GTPU_IPV6_TCP, ICE_PTYPE_ATTR_GTP_DOWNLINK },
708 { MAC_IPV6_TUN_IPV6_GTPU_IPV6_ICMPV6, ICE_PTYPE_ATTR_GTP_DOWNLINK },
711 static const struct ice_ptype_attributes ice_attr_gtpu_up[] = {
712 { ICE_MAC_IPV4_GTPU_IPV4_FRAG, ICE_PTYPE_ATTR_GTP_UPLINK },
713 { ICE_MAC_IPV4_GTPU_IPV4_PAY, ICE_PTYPE_ATTR_GTP_UPLINK },
714 { ICE_MAC_IPV4_GTPU_IPV4_UDP_PAY, ICE_PTYPE_ATTR_GTP_UPLINK },
715 { ICE_MAC_IPV4_GTPU_IPV4_TCP, ICE_PTYPE_ATTR_GTP_UPLINK },
716 { ICE_MAC_IPV4_GTPU_IPV4_ICMP, ICE_PTYPE_ATTR_GTP_UPLINK },
717 { ICE_MAC_IPV6_GTPU_IPV4_FRAG, ICE_PTYPE_ATTR_GTP_UPLINK },
718 { ICE_MAC_IPV6_GTPU_IPV4_PAY, ICE_PTYPE_ATTR_GTP_UPLINK },
719 { ICE_MAC_IPV6_GTPU_IPV4_UDP_PAY, ICE_PTYPE_ATTR_GTP_UPLINK },
720 { ICE_MAC_IPV6_GTPU_IPV4_TCP, ICE_PTYPE_ATTR_GTP_UPLINK },
721 { ICE_MAC_IPV6_GTPU_IPV4_ICMP, ICE_PTYPE_ATTR_GTP_UPLINK },
722 { ICE_MAC_IPV4_GTPU_IPV6_FRAG, ICE_PTYPE_ATTR_GTP_UPLINK },
723 { ICE_MAC_IPV4_GTPU_IPV6_PAY, ICE_PTYPE_ATTR_GTP_UPLINK },
724 { ICE_MAC_IPV4_GTPU_IPV6_UDP_PAY, ICE_PTYPE_ATTR_GTP_UPLINK },
725 { ICE_MAC_IPV4_GTPU_IPV6_TCP, ICE_PTYPE_ATTR_GTP_UPLINK },
726 { ICE_MAC_IPV4_GTPU_IPV6_ICMPV6, ICE_PTYPE_ATTR_GTP_UPLINK },
727 { ICE_MAC_IPV6_GTPU_IPV6_FRAG, ICE_PTYPE_ATTR_GTP_UPLINK },
728 { ICE_MAC_IPV6_GTPU_IPV6_PAY, ICE_PTYPE_ATTR_GTP_UPLINK },
729 { ICE_MAC_IPV6_GTPU_IPV6_UDP_PAY, ICE_PTYPE_ATTR_GTP_UPLINK },
730 { ICE_MAC_IPV6_GTPU_IPV6_TCP, ICE_PTYPE_ATTR_GTP_UPLINK },
731 { ICE_MAC_IPV6_GTPU_IPV6_ICMPV6, ICE_PTYPE_ATTR_GTP_UPLINK },
732 { MAC_IPV4_TUN_IPV4_GTPU_IPV4_FRAG, ICE_PTYPE_ATTR_GTP_UPLINK },
733 { MAC_IPV4_TUN_IPV4_GTPU_IPV4_PAY, ICE_PTYPE_ATTR_GTP_UPLINK },
734 { MAC_IPV4_TUN_IPV4_GTPU_IPV4_UDP_PAY, ICE_PTYPE_ATTR_GTP_UPLINK },
735 { MAC_IPV4_TUN_IPV4_GTPU_IPV4_TCP, ICE_PTYPE_ATTR_GTP_UPLINK },
736 { MAC_IPV4_TUN_IPV4_GTPU_IPV4_ICMP, ICE_PTYPE_ATTR_GTP_UPLINK },
737 { MAC_IPV4_TUN_IPV4_GTPU_IPV6_FRAG, ICE_PTYPE_ATTR_GTP_UPLINK },
738 { MAC_IPV4_TUN_IPV4_GTPU_IPV6_PAY, ICE_PTYPE_ATTR_GTP_UPLINK },
739 { MAC_IPV4_TUN_IPV4_GTPU_IPV6_UDP_PAY, ICE_PTYPE_ATTR_GTP_UPLINK },
740 { MAC_IPV4_TUN_IPV4_GTPU_IPV6_TCP, ICE_PTYPE_ATTR_GTP_UPLINK },
741 { MAC_IPV4_TUN_IPV4_GTPU_IPV6_ICMPV6, ICE_PTYPE_ATTR_GTP_UPLINK },
742 { MAC_IPV4_TUN_IPV6_GTPU_IPV4_FRAG, ICE_PTYPE_ATTR_GTP_UPLINK },
743 { MAC_IPV4_TUN_IPV6_GTPU_IPV4_PAY, ICE_PTYPE_ATTR_GTP_UPLINK },
744 { MAC_IPV4_TUN_IPV6_GTPU_IPV4_UDP_PAY, ICE_PTYPE_ATTR_GTP_UPLINK },
745 { MAC_IPV4_TUN_IPV6_GTPU_IPV4_TCP, ICE_PTYPE_ATTR_GTP_UPLINK },
746 { MAC_IPV4_TUN_IPV6_GTPU_IPV4_ICMP, ICE_PTYPE_ATTR_GTP_UPLINK },
747 { MAC_IPV4_TUN_IPV6_GTPU_IPV6_FRAG, ICE_PTYPE_ATTR_GTP_UPLINK },
748 { MAC_IPV4_TUN_IPV6_GTPU_IPV6_PAY, ICE_PTYPE_ATTR_GTP_UPLINK },
749 { MAC_IPV4_TUN_IPV6_GTPU_IPV6_UDP_PAY, ICE_PTYPE_ATTR_GTP_UPLINK },
750 { MAC_IPV4_TUN_IPV6_GTPU_IPV6_TCP, ICE_PTYPE_ATTR_GTP_UPLINK },
751 { MAC_IPV4_TUN_IPV6_GTPU_IPV6_ICMPV6, ICE_PTYPE_ATTR_GTP_UPLINK },
752 { MAC_IPV6_TUN_IPV4_GTPU_IPV4_FRAG, ICE_PTYPE_ATTR_GTP_UPLINK },
753 { MAC_IPV6_TUN_IPV4_GTPU_IPV4_PAY, ICE_PTYPE_ATTR_GTP_UPLINK },
754 { MAC_IPV6_TUN_IPV4_GTPU_IPV4_UDP_PAY, ICE_PTYPE_ATTR_GTP_UPLINK },
755 { MAC_IPV6_TUN_IPV4_GTPU_IPV4_TCP, ICE_PTYPE_ATTR_GTP_UPLINK },
756 { MAC_IPV6_TUN_IPV4_GTPU_IPV4_ICMP, ICE_PTYPE_ATTR_GTP_UPLINK },
757 { MAC_IPV6_TUN_IPV4_GTPU_IPV6_FRAG, ICE_PTYPE_ATTR_GTP_UPLINK },
758 { MAC_IPV6_TUN_IPV4_GTPU_IPV6_PAY, ICE_PTYPE_ATTR_GTP_UPLINK },
759 { MAC_IPV6_TUN_IPV4_GTPU_IPV6_UDP_PAY, ICE_PTYPE_ATTR_GTP_UPLINK },
760 { MAC_IPV6_TUN_IPV4_GTPU_IPV6_TCP, ICE_PTYPE_ATTR_GTP_UPLINK },
761 { MAC_IPV6_TUN_IPV4_GTPU_IPV6_ICMPV6, ICE_PTYPE_ATTR_GTP_UPLINK },
762 { MAC_IPV6_TUN_IPV6_GTPU_IPV4_FRAG, ICE_PTYPE_ATTR_GTP_UPLINK },
763 { MAC_IPV6_TUN_IPV6_GTPU_IPV4_PAY, ICE_PTYPE_ATTR_GTP_UPLINK },
764 { MAC_IPV6_TUN_IPV6_GTPU_IPV4_UDP_PAY, ICE_PTYPE_ATTR_GTP_UPLINK },
765 { MAC_IPV6_TUN_IPV6_GTPU_IPV4_TCP, ICE_PTYPE_ATTR_GTP_UPLINK },
766 { MAC_IPV6_TUN_IPV6_GTPU_IPV4_ICMP, ICE_PTYPE_ATTR_GTP_UPLINK },
767 { MAC_IPV6_TUN_IPV6_GTPU_IPV6_FRAG, ICE_PTYPE_ATTR_GTP_UPLINK },
768 { MAC_IPV6_TUN_IPV6_GTPU_IPV6_PAY, ICE_PTYPE_ATTR_GTP_UPLINK },
769 { MAC_IPV6_TUN_IPV6_GTPU_IPV6_UDP_PAY, ICE_PTYPE_ATTR_GTP_UPLINK },
770 { MAC_IPV6_TUN_IPV6_GTPU_IPV6_TCP, ICE_PTYPE_ATTR_GTP_UPLINK },
771 { MAC_IPV6_TUN_IPV6_GTPU_IPV6_ICMPV6, ICE_PTYPE_ATTR_GTP_UPLINK },
774 static const u32 ice_ptypes_gtpu[] = {
775 0x00000000, 0x00000000, 0x00000000, 0x00000000,
776 0x00000000, 0x00000000, 0x00000000, 0x00000000,
777 0x00000000, 0x00000000, 0x7FFFFE00, 0x00000000,
778 0x00000000, 0x0000003F, 0xBEFBEFBC, 0x0002FBEF,
779 0x00000000, 0x00000000, 0x00000000, 0x00000000,
780 0x00000000, 0x00000000, 0x00000000, 0x00000000,
781 0x00000000, 0x00000000, 0x00000000, 0x00000000,
782 0x00000000, 0x00000000, 0x00000000, 0x00000000,
785 /* Packet types for pppoe */
786 static const u32 ice_ptypes_pppoe[] = {
787 0x00000000, 0x00000000, 0x00000000, 0x00000000,
788 0x00000000, 0x00000000, 0x00000000, 0x00000000,
789 0x00000000, 0x03ffe000, 0x00000000, 0x00000000,
790 0x00000000, 0x00000000, 0x00000000, 0x00000000,
791 0x00000000, 0x00000000, 0x00000000, 0x00000000,
792 0x00000000, 0x00000000, 0x00000000, 0x00000000,
793 0x00000000, 0x00000000, 0x00000000, 0x00000000,
794 0x00000000, 0x00000000, 0x00000000, 0x00000000,
797 /* Packet types for packets with PFCP NODE header */
798 static const u32 ice_ptypes_pfcp_node[] = {
799 0x00000000, 0x00000000, 0x00000000, 0x00000000,
800 0x00000000, 0x00000000, 0x00000000, 0x00000000,
801 0x00000000, 0x00000000, 0x80000000, 0x00000002,
802 0x00000000, 0x00000000, 0x00000000, 0x00000000,
803 0x00000000, 0x00000000, 0x00000000, 0x00000000,
804 0x00000000, 0x00000000, 0x00000000, 0x00000000,
805 0x00000000, 0x00000000, 0x00000000, 0x00000000,
806 0x00000000, 0x00000000, 0x00000000, 0x00000000,
809 /* Packet types for packets with PFCP SESSION header */
810 static const u32 ice_ptypes_pfcp_session[] = {
811 0x00000000, 0x00000000, 0x00000000, 0x00000000,
812 0x00000000, 0x00000000, 0x00000000, 0x00000000,
813 0x00000000, 0x00000000, 0x00000000, 0x00000005,
814 0x00000000, 0x00000000, 0x00000000, 0x00000000,
815 0x00000000, 0x00000000, 0x00000000, 0x00000000,
816 0x00000000, 0x00000000, 0x00000000, 0x00000000,
817 0x00000000, 0x00000000, 0x00000000, 0x00000000,
818 0x00000000, 0x00000000, 0x00000000, 0x00000000,
821 /* Packet types for l2tpv3 */
822 static const u32 ice_ptypes_l2tpv3[] = {
823 0x00000000, 0x00000000, 0x00000000, 0x00000000,
824 0x00000000, 0x00000000, 0x00000000, 0x00000000,
825 0x00000000, 0x00000000, 0x00000000, 0x00000300,
826 0x00000000, 0x00000000, 0x00000000, 0x00000000,
827 0x00000000, 0x00000000, 0x00000000, 0x00000000,
828 0x00000000, 0x00000000, 0x00000000, 0x00000000,
829 0x00000000, 0x00000000, 0x00000000, 0x00000000,
830 0x00000000, 0x00000000, 0x00000000, 0x00000000,
833 /* Packet types for esp */
834 static const u32 ice_ptypes_esp[] = {
835 0x00000000, 0x00000000, 0x00000000, 0x00000000,
836 0x00000000, 0x00000003, 0x00000000, 0x00000000,
837 0x00000000, 0x00000000, 0x00000000, 0x00000000,
838 0x00000000, 0x00000000, 0x00000000, 0x00000000,
839 0x00000000, 0x00000000, 0x00000000, 0x00000000,
840 0x00000000, 0x00000000, 0x00000000, 0x00000000,
841 0x00000000, 0x00000000, 0x00000000, 0x00000000,
842 0x00000000, 0x00000000, 0x00000000, 0x00000000,
845 /* Packet types for ah */
846 static const u32 ice_ptypes_ah[] = {
847 0x00000000, 0x00000000, 0x00000000, 0x00000000,
848 0x00000000, 0x0000000C, 0x00000000, 0x00000000,
849 0x00000000, 0x00000000, 0x00000000, 0x00000000,
850 0x00000000, 0x00000000, 0x00000000, 0x00000000,
851 0x00000000, 0x00000000, 0x00000000, 0x00000000,
852 0x00000000, 0x00000000, 0x00000000, 0x00000000,
853 0x00000000, 0x00000000, 0x00000000, 0x00000000,
854 0x00000000, 0x00000000, 0x00000000, 0x00000000,
857 /* Packet types for packets with NAT_T ESP header */
858 static const u32 ice_ptypes_nat_t_esp[] = {
859 0x00000000, 0x00000000, 0x00000000, 0x00000000,
860 0x00000000, 0x00000030, 0x00000000, 0x00000000,
861 0x00000000, 0x00000000, 0x00000000, 0x00000000,
862 0x00000000, 0x00000000, 0x00000000, 0x00000000,
863 0x00000000, 0x00000000, 0x00000000, 0x00000000,
864 0x00000000, 0x00000000, 0x00000000, 0x00000000,
865 0x00000000, 0x00000000, 0x00000000, 0x00000000,
866 0x00000000, 0x00000000, 0x00000000, 0x00000000,
869 static const u32 ice_ptypes_mac_non_ip_ofos[] = {
870 0x00000846, 0x00000000, 0x00000000, 0x00000000,
871 0x00000000, 0x00000000, 0x00000000, 0x00000000,
872 0x00400000, 0x03FFF000, 0x00000000, 0x00000000,
873 0x00000000, 0x00000000, 0x00000000, 0x00000000,
874 0x00000000, 0x00000000, 0x00000000, 0x00000000,
875 0x00000000, 0x00000000, 0x00000000, 0x00000000,
876 0x00000000, 0x00000000, 0x00000000, 0x00000000,
877 0x00000000, 0x00000000, 0x00000000, 0x00000000,
880 static const u32 ice_ptypes_gtpu_no_ip[] = {
881 0x00000000, 0x00000000, 0x00000000, 0x00000000,
882 0x00000000, 0x00000000, 0x00000000, 0x00000000,
883 0x00000000, 0x00000000, 0x00000600, 0x00000000,
884 0x00000000, 0x00000000, 0x00000000, 0x00000000,
885 0x00000000, 0x00000000, 0x00000000, 0x00000000,
886 0x00000000, 0x00000000, 0x00000000, 0x00000000,
887 0x00000000, 0x00000000, 0x00000000, 0x00000000,
888 0x00000000, 0x00000000, 0x00000000, 0x00000000,
891 static const u32 ice_ptypes_ecpri_tp0[] = {
892 0x00000000, 0x00000000, 0x00000000, 0x00000000,
893 0x00000000, 0x00000000, 0x00000000, 0x00000000,
894 0x00000000, 0x00000000, 0x00000000, 0x00000400,
895 0x00000000, 0x00000000, 0x00000000, 0x00000000,
896 0x00000000, 0x00000000, 0x00000000, 0x00000000,
897 0x00000000, 0x00000000, 0x00000000, 0x00000000,
898 0x00000000, 0x00000000, 0x00000000, 0x00000000,
899 0x00000000, 0x00000000, 0x00000000, 0x00000000,
902 static const u32 ice_ptypes_udp_ecpri_tp0[] = {
903 0x00000000, 0x00000000, 0x00000000, 0x00000000,
904 0x00000000, 0x00000000, 0x00000000, 0x00000000,
905 0x00000000, 0x00000000, 0x00000000, 0x00100000,
906 0x00000000, 0x00000000, 0x00000000, 0x00000000,
907 0x00000000, 0x00000000, 0x00000000, 0x00000000,
908 0x00000000, 0x00000000, 0x00000000, 0x00000000,
909 0x00000000, 0x00000000, 0x00000000, 0x00000000,
910 0x00000000, 0x00000000, 0x00000000, 0x00000000,
913 static const u32 ice_ptypes_l2tpv2[] = {
914 0x00000000, 0x00000000, 0x00000000, 0x00000000,
915 0x00000000, 0x00000000, 0x00000000, 0x00000000,
916 0x00000000, 0x00000000, 0x00000000, 0x00000000,
917 0xFFFFFF00, 0x0000003F, 0x00000000, 0x00000000,
918 0x00000000, 0x00000000, 0x00000000, 0x00000000,
919 0x00000000, 0x00000000, 0x00000000, 0x00000000,
920 0x00000000, 0x00000000, 0x00000000, 0x00000000,
921 0x00000000, 0x00000000, 0x00000000, 0x00000000,
924 static const u32 ice_ptypes_ppp[] = {
925 0x00000000, 0x00000000, 0x00000000, 0x00000000,
926 0x00000000, 0x00000000, 0x00000000, 0x00000000,
927 0x00000000, 0x00000000, 0x00000000, 0x00000000,
928 0xFFFFF000, 0x0000003F, 0x00000000, 0x00000000,
929 0x00000000, 0x00000000, 0x00000000, 0x00000000,
930 0x00000000, 0x00000000, 0x00000000, 0x00000000,
931 0x00000000, 0x00000000, 0x00000000, 0x00000000,
932 0x00000000, 0x00000000, 0x00000000, 0x00000000,
935 static const u32 ice_ptypes_ipv4_frag[] = {
936 0x00400000, 0x00000000, 0x00000000, 0x00000000,
937 0x00000000, 0x00000000, 0x00000000, 0x00000000,
938 0x00000000, 0x00000000, 0x00000000, 0x00000000,
939 0x00000000, 0x00000000, 0x00000000, 0x00000000,
940 0x00000000, 0x00000000, 0x00000000, 0x00000000,
941 0x00000000, 0x00000000, 0x00000000, 0x00000000,
942 0x00000000, 0x00000000, 0x00000000, 0x00000000,
943 0x00000000, 0x00000000, 0x00000000, 0x00000000,
946 static const u32 ice_ptypes_ipv6_frag[] = {
947 0x00000000, 0x00000000, 0x01000000, 0x00000000,
948 0x00000000, 0x00000000, 0x00000000, 0x00000000,
949 0x00000000, 0x00000000, 0x00000000, 0x00000000,
950 0x00000000, 0x00000000, 0x00000000, 0x00000000,
951 0x00000000, 0x00000000, 0x00000000, 0x00000000,
952 0x00000000, 0x00000000, 0x00000000, 0x00000000,
953 0x00000000, 0x00000000, 0x00000000, 0x00000000,
954 0x00000000, 0x00000000, 0x00000000, 0x00000000,
957 /* Manage parameters and info. used during the creation of a flow profile */
958 struct ice_flow_prof_params {
960 u16 entry_length; /* # of bytes formatted entry will require */
962 struct ice_flow_prof *prof;
964 /* For ACL, the es[0] will have the data of ICE_RX_MDID_PKT_FLAGS_15_0
965 * This will give us the direction flags.
967 struct ice_fv_word es[ICE_MAX_FV_WORDS];
968 /* attributes can be used to add attributes to a particular PTYPE */
969 const struct ice_ptype_attributes *attr;
972 u16 mask[ICE_MAX_FV_WORDS];
973 ice_declare_bitmap(ptypes, ICE_FLOW_PTYPE_MAX);
976 #define ICE_FLOW_RSS_HDRS_INNER_MASK \
977 (ICE_FLOW_SEG_HDR_PPPOE | ICE_FLOW_SEG_HDR_GTPC | \
978 ICE_FLOW_SEG_HDR_GTPC_TEID | ICE_FLOW_SEG_HDR_GTPU | \
979 ICE_FLOW_SEG_HDR_PFCP_SESSION | ICE_FLOW_SEG_HDR_L2TPV3 | \
980 ICE_FLOW_SEG_HDR_ESP | ICE_FLOW_SEG_HDR_AH | \
981 ICE_FLOW_SEG_HDR_NAT_T_ESP | ICE_FLOW_SEG_HDR_GTPU_NON_IP | \
982 ICE_FLOW_SEG_HDR_ECPRI_TP0 | ICE_FLOW_SEG_HDR_UDP_ECPRI_TP0 | \
983 ICE_FLOW_SEG_HDR_L2TPV2 | ICE_FLOW_SEG_HDR_PPP | ICE_FLOW_SEG_HDR_GRE)
985 #define ICE_FLOW_SEG_HDRS_L2_MASK \
986 (ICE_FLOW_SEG_HDR_ETH | ICE_FLOW_SEG_HDR_VLAN)
987 #define ICE_FLOW_SEG_HDRS_L3_MASK \
988 (ICE_FLOW_SEG_HDR_IPV4 | ICE_FLOW_SEG_HDR_IPV6 | \
989 ICE_FLOW_SEG_HDR_ARP)
990 #define ICE_FLOW_SEG_HDRS_L4_MASK \
991 (ICE_FLOW_SEG_HDR_ICMP | ICE_FLOW_SEG_HDR_TCP | ICE_FLOW_SEG_HDR_UDP | \
992 ICE_FLOW_SEG_HDR_SCTP)
993 /* mask for L4 protocols that are NOT part of IPV4/6 OTHER PTYPE groups */
994 #define ICE_FLOW_SEG_HDRS_L4_MASK_NO_OTHER \
995 (ICE_FLOW_SEG_HDR_TCP | ICE_FLOW_SEG_HDR_UDP | ICE_FLOW_SEG_HDR_SCTP)
998 * ice_flow_val_hdrs - validates packet segments for valid protocol headers
999 * @segs: array of one or more packet segments that describe the flow
1000 * @segs_cnt: number of packet segments provided
1002 static enum ice_status
1003 ice_flow_val_hdrs(struct ice_flow_seg_info *segs, u8 segs_cnt)
1007 for (i = 0; i < segs_cnt; i++) {
1008 /* Multiple L3 headers */
1009 if (segs[i].hdrs & ICE_FLOW_SEG_HDRS_L3_MASK &&
1010 !ice_is_pow2(segs[i].hdrs & ICE_FLOW_SEG_HDRS_L3_MASK))
1011 return ICE_ERR_PARAM;
1013 /* Multiple L4 headers */
1014 if (segs[i].hdrs & ICE_FLOW_SEG_HDRS_L4_MASK &&
1015 !ice_is_pow2(segs[i].hdrs & ICE_FLOW_SEG_HDRS_L4_MASK))
1016 return ICE_ERR_PARAM;
1022 /* Sizes of fixed known protocol headers without header options */
1023 #define ICE_FLOW_PROT_HDR_SZ_MAC 14
1024 #define ICE_FLOW_PROT_HDR_SZ_MAC_VLAN (ICE_FLOW_PROT_HDR_SZ_MAC + 2)
1025 #define ICE_FLOW_PROT_HDR_SZ_IPV4 20
1026 #define ICE_FLOW_PROT_HDR_SZ_IPV6 40
1027 #define ICE_FLOW_PROT_HDR_SZ_ARP 28
1028 #define ICE_FLOW_PROT_HDR_SZ_ICMP 8
1029 #define ICE_FLOW_PROT_HDR_SZ_TCP 20
1030 #define ICE_FLOW_PROT_HDR_SZ_UDP 8
1031 #define ICE_FLOW_PROT_HDR_SZ_SCTP 12
1034 * ice_flow_calc_seg_sz - calculates size of a packet segment based on headers
1035 * @params: information about the flow to be processed
1036 * @seg: index of packet segment whose header size is to be determined
1038 static u16 ice_flow_calc_seg_sz(struct ice_flow_prof_params *params, u8 seg)
1043 sz = (params->prof->segs[seg].hdrs & ICE_FLOW_SEG_HDR_VLAN) ?
1044 ICE_FLOW_PROT_HDR_SZ_MAC_VLAN : ICE_FLOW_PROT_HDR_SZ_MAC;
1047 if (params->prof->segs[seg].hdrs & ICE_FLOW_SEG_HDR_IPV4)
1048 sz += ICE_FLOW_PROT_HDR_SZ_IPV4;
1049 else if (params->prof->segs[seg].hdrs & ICE_FLOW_SEG_HDR_IPV6)
1050 sz += ICE_FLOW_PROT_HDR_SZ_IPV6;
1051 else if (params->prof->segs[seg].hdrs & ICE_FLOW_SEG_HDR_ARP)
1052 sz += ICE_FLOW_PROT_HDR_SZ_ARP;
1053 else if (params->prof->segs[seg].hdrs & ICE_FLOW_SEG_HDRS_L4_MASK)
1054 /* A L3 header is required if L4 is specified */
1058 if (params->prof->segs[seg].hdrs & ICE_FLOW_SEG_HDR_ICMP)
1059 sz += ICE_FLOW_PROT_HDR_SZ_ICMP;
1060 else if (params->prof->segs[seg].hdrs & ICE_FLOW_SEG_HDR_TCP)
1061 sz += ICE_FLOW_PROT_HDR_SZ_TCP;
1062 else if (params->prof->segs[seg].hdrs & ICE_FLOW_SEG_HDR_UDP)
1063 sz += ICE_FLOW_PROT_HDR_SZ_UDP;
1064 else if (params->prof->segs[seg].hdrs & ICE_FLOW_SEG_HDR_SCTP)
1065 sz += ICE_FLOW_PROT_HDR_SZ_SCTP;
1071 * ice_flow_proc_seg_hdrs - process protocol headers present in pkt segments
1072 * @params: information about the flow to be processed
1074 * This function identifies the packet types associated with the protocol
1075 * headers being present in packet segments of the specified flow profile.
1077 static enum ice_status
1078 ice_flow_proc_seg_hdrs(struct ice_flow_prof_params *params)
1080 struct ice_flow_prof *prof;
1083 ice_memset(params->ptypes, 0xff, sizeof(params->ptypes),
1086 prof = params->prof;
1088 for (i = 0; i < params->prof->segs_cnt; i++) {
1089 const ice_bitmap_t *src;
1092 hdrs = prof->segs[i].hdrs;
1094 if (hdrs & ICE_FLOW_SEG_HDR_ETH) {
1095 src = !i ? (const ice_bitmap_t *)ice_ptypes_mac_ofos :
1096 (const ice_bitmap_t *)ice_ptypes_mac_il;
1097 ice_and_bitmap(params->ptypes, params->ptypes, src,
1098 ICE_FLOW_PTYPE_MAX);
1101 if (i && hdrs & ICE_FLOW_SEG_HDR_VLAN) {
1102 src = (const ice_bitmap_t *)ice_ptypes_macvlan_il;
1103 ice_and_bitmap(params->ptypes, params->ptypes, src,
1104 ICE_FLOW_PTYPE_MAX);
1107 if (!i && hdrs & ICE_FLOW_SEG_HDR_ARP) {
1108 ice_and_bitmap(params->ptypes, params->ptypes,
1109 (const ice_bitmap_t *)ice_ptypes_arp_of,
1110 ICE_FLOW_PTYPE_MAX);
1113 if (hdrs & ICE_FLOW_SEG_HDR_ECPRI_TP0) {
1114 src = (const ice_bitmap_t *)ice_ptypes_ecpri_tp0;
1115 ice_and_bitmap(params->ptypes, params->ptypes, src,
1116 ICE_FLOW_PTYPE_MAX);
1118 if ((hdrs & ICE_FLOW_SEG_HDR_IPV4) &&
1119 (hdrs & ICE_FLOW_SEG_HDR_IPV_OTHER)) {
1121 (const ice_bitmap_t *)ice_ptypes_ipv4_il :
1122 (const ice_bitmap_t *)ice_ptypes_ipv4_ofos_all;
1123 ice_and_bitmap(params->ptypes, params->ptypes, src,
1124 ICE_FLOW_PTYPE_MAX);
1125 } else if ((hdrs & ICE_FLOW_SEG_HDR_IPV6) &&
1126 (hdrs & ICE_FLOW_SEG_HDR_IPV_OTHER)) {
1128 (const ice_bitmap_t *)ice_ptypes_ipv6_il :
1129 (const ice_bitmap_t *)ice_ptypes_ipv6_ofos_all;
1130 ice_and_bitmap(params->ptypes, params->ptypes, src,
1131 ICE_FLOW_PTYPE_MAX);
1132 } else if ((hdrs & ICE_FLOW_SEG_HDR_IPV4) &&
1133 (hdrs & ICE_FLOW_SEG_HDR_IPV_FRAG)) {
1134 src = (const ice_bitmap_t *)ice_ptypes_ipv4_frag;
1135 ice_and_bitmap(params->ptypes, params->ptypes, src,
1136 ICE_FLOW_PTYPE_MAX);
1137 } else if ((hdrs & ICE_FLOW_SEG_HDR_IPV6) &&
1138 (hdrs & ICE_FLOW_SEG_HDR_IPV_FRAG)) {
1139 src = (const ice_bitmap_t *)ice_ptypes_ipv6_frag;
1140 ice_and_bitmap(params->ptypes, params->ptypes, src,
1141 ICE_FLOW_PTYPE_MAX);
1142 } else if ((hdrs & ICE_FLOW_SEG_HDR_IPV4) &&
1143 !(hdrs & ICE_FLOW_SEG_HDRS_L4_MASK_NO_OTHER)) {
1144 src = !i ? (const ice_bitmap_t *)ice_ptypes_ipv4_ofos_no_l4 :
1145 (const ice_bitmap_t *)ice_ptypes_ipv4_il_no_l4;
1146 ice_and_bitmap(params->ptypes, params->ptypes, src,
1147 ICE_FLOW_PTYPE_MAX);
1148 } else if (hdrs & ICE_FLOW_SEG_HDR_IPV4) {
1149 src = !i ? (const ice_bitmap_t *)ice_ptypes_ipv4_ofos :
1150 (const ice_bitmap_t *)ice_ptypes_ipv4_il;
1151 ice_and_bitmap(params->ptypes, params->ptypes, src,
1152 ICE_FLOW_PTYPE_MAX);
1153 } else if ((hdrs & ICE_FLOW_SEG_HDR_IPV6) &&
1154 !(hdrs & ICE_FLOW_SEG_HDRS_L4_MASK_NO_OTHER)) {
1155 src = !i ? (const ice_bitmap_t *)ice_ptypes_ipv6_ofos_no_l4 :
1156 (const ice_bitmap_t *)ice_ptypes_ipv6_il_no_l4;
1157 ice_and_bitmap(params->ptypes, params->ptypes, src,
1158 ICE_FLOW_PTYPE_MAX);
1159 } else if (hdrs & ICE_FLOW_SEG_HDR_IPV6) {
1160 src = !i ? (const ice_bitmap_t *)ice_ptypes_ipv6_ofos :
1161 (const ice_bitmap_t *)ice_ptypes_ipv6_il;
1162 ice_and_bitmap(params->ptypes, params->ptypes, src,
1163 ICE_FLOW_PTYPE_MAX);
1166 if (hdrs & ICE_FLOW_SEG_HDR_ETH_NON_IP) {
1167 src = (const ice_bitmap_t *)ice_ptypes_mac_non_ip_ofos;
1168 ice_and_bitmap(params->ptypes, params->ptypes,
1169 src, ICE_FLOW_PTYPE_MAX);
1170 } else if (hdrs & ICE_FLOW_SEG_HDR_PPPOE) {
1171 src = (const ice_bitmap_t *)ice_ptypes_pppoe;
1172 ice_and_bitmap(params->ptypes, params->ptypes, src,
1173 ICE_FLOW_PTYPE_MAX);
1175 src = (const ice_bitmap_t *)ice_ptypes_pppoe;
1176 ice_andnot_bitmap(params->ptypes, params->ptypes, src,
1177 ICE_FLOW_PTYPE_MAX);
1180 if (hdrs & ICE_FLOW_SEG_HDR_UDP) {
1181 src = (const ice_bitmap_t *)ice_ptypes_udp_il;
1182 ice_and_bitmap(params->ptypes, params->ptypes, src,
1183 ICE_FLOW_PTYPE_MAX);
1184 } else if (hdrs & ICE_FLOW_SEG_HDR_TCP) {
1185 ice_and_bitmap(params->ptypes, params->ptypes,
1186 (const ice_bitmap_t *)ice_ptypes_tcp_il,
1187 ICE_FLOW_PTYPE_MAX);
1188 } else if (hdrs & ICE_FLOW_SEG_HDR_SCTP) {
1189 src = (const ice_bitmap_t *)ice_ptypes_sctp_il;
1190 ice_and_bitmap(params->ptypes, params->ptypes, src,
1191 ICE_FLOW_PTYPE_MAX);
1194 if (hdrs & ICE_FLOW_SEG_HDR_ICMP) {
1195 src = !i ? (const ice_bitmap_t *)ice_ptypes_icmp_of :
1196 (const ice_bitmap_t *)ice_ptypes_icmp_il;
1197 ice_and_bitmap(params->ptypes, params->ptypes, src,
1198 ICE_FLOW_PTYPE_MAX);
1199 } else if (hdrs & ICE_FLOW_SEG_HDR_GRE) {
1200 src = (const ice_bitmap_t *)ice_ptypes_gre_of;
1201 ice_and_bitmap(params->ptypes, params->ptypes, src,
1202 ICE_FLOW_PTYPE_MAX);
1203 } else if (hdrs & ICE_FLOW_SEG_HDR_GTPC) {
1204 src = (const ice_bitmap_t *)ice_ptypes_gtpc;
1205 ice_and_bitmap(params->ptypes, params->ptypes,
1206 src, ICE_FLOW_PTYPE_MAX);
1207 } else if (hdrs & ICE_FLOW_SEG_HDR_GTPC_TEID) {
1208 src = (const ice_bitmap_t *)ice_ptypes_gtpc_tid;
1209 ice_and_bitmap(params->ptypes, params->ptypes,
1210 src, ICE_FLOW_PTYPE_MAX);
1211 } else if (hdrs & ICE_FLOW_SEG_HDR_GTPU_NON_IP) {
1212 src = (const ice_bitmap_t *)ice_ptypes_gtpu_no_ip;
1213 ice_and_bitmap(params->ptypes, params->ptypes,
1214 src, ICE_FLOW_PTYPE_MAX);
1215 } else if (hdrs & ICE_FLOW_SEG_HDR_GTPU_DWN) {
1216 src = (const ice_bitmap_t *)ice_ptypes_gtpu;
1217 ice_and_bitmap(params->ptypes, params->ptypes,
1218 src, ICE_FLOW_PTYPE_MAX);
1220 /* Attributes for GTP packet with downlink */
1221 params->attr = ice_attr_gtpu_down;
1222 params->attr_cnt = ARRAY_SIZE(ice_attr_gtpu_down);
1223 } else if (hdrs & ICE_FLOW_SEG_HDR_GTPU_UP) {
1224 src = (const ice_bitmap_t *)ice_ptypes_gtpu;
1225 ice_and_bitmap(params->ptypes, params->ptypes,
1226 src, ICE_FLOW_PTYPE_MAX);
1228 /* Attributes for GTP packet with uplink */
1229 params->attr = ice_attr_gtpu_up;
1230 params->attr_cnt = ARRAY_SIZE(ice_attr_gtpu_up);
1231 } else if (hdrs & ICE_FLOW_SEG_HDR_GTPU_EH) {
1232 src = (const ice_bitmap_t *)ice_ptypes_gtpu;
1233 ice_and_bitmap(params->ptypes, params->ptypes,
1234 src, ICE_FLOW_PTYPE_MAX);
1236 /* Attributes for GTP packet with Extension Header */
1237 params->attr = ice_attr_gtpu_eh;
1238 params->attr_cnt = ARRAY_SIZE(ice_attr_gtpu_eh);
1239 } else if (hdrs & ICE_FLOW_SEG_HDR_GTPU_IP) {
1240 src = (const ice_bitmap_t *)ice_ptypes_gtpu;
1241 ice_and_bitmap(params->ptypes, params->ptypes,
1242 src, ICE_FLOW_PTYPE_MAX);
1244 /* Attributes for GTP packet without Extension Header */
1245 params->attr = ice_attr_gtpu_session;
1246 params->attr_cnt = ARRAY_SIZE(ice_attr_gtpu_session);
1247 } else if (hdrs & ICE_FLOW_SEG_HDR_L2TPV2) {
1248 src = (const ice_bitmap_t *)ice_ptypes_l2tpv2;
1249 ice_and_bitmap(params->ptypes, params->ptypes,
1250 src, ICE_FLOW_PTYPE_MAX);
1251 } else if (hdrs & ICE_FLOW_SEG_HDR_L2TPV3) {
1252 src = (const ice_bitmap_t *)ice_ptypes_l2tpv3;
1253 ice_and_bitmap(params->ptypes, params->ptypes,
1254 src, ICE_FLOW_PTYPE_MAX);
1255 } else if (hdrs & ICE_FLOW_SEG_HDR_ESP) {
1256 src = (const ice_bitmap_t *)ice_ptypes_esp;
1257 ice_and_bitmap(params->ptypes, params->ptypes,
1258 src, ICE_FLOW_PTYPE_MAX);
1259 } else if (hdrs & ICE_FLOW_SEG_HDR_AH) {
1260 src = (const ice_bitmap_t *)ice_ptypes_ah;
1261 ice_and_bitmap(params->ptypes, params->ptypes,
1262 src, ICE_FLOW_PTYPE_MAX);
1263 } else if (hdrs & ICE_FLOW_SEG_HDR_NAT_T_ESP) {
1264 src = (const ice_bitmap_t *)ice_ptypes_nat_t_esp;
1265 ice_and_bitmap(params->ptypes, params->ptypes,
1266 src, ICE_FLOW_PTYPE_MAX);
1267 } else if (hdrs & ICE_FLOW_SEG_HDR_VXLAN) {
1268 src = (const ice_bitmap_t *)ice_ptypes_vxlan_vni;
1269 ice_and_bitmap(params->ptypes, params->ptypes,
1270 src, ICE_FLOW_PTYPE_MAX);
1271 } else if (hdrs & ICE_FLOW_SEG_HDR_UDP_ECPRI_TP0) {
1272 src = (const ice_bitmap_t *)ice_ptypes_udp_ecpri_tp0;
1273 ice_and_bitmap(params->ptypes, params->ptypes,
1274 src, ICE_FLOW_PTYPE_MAX);
1277 if (hdrs & ICE_FLOW_SEG_HDR_PPP) {
1278 src = (const ice_bitmap_t *)ice_ptypes_ppp;
1279 ice_and_bitmap(params->ptypes, params->ptypes,
1280 src, ICE_FLOW_PTYPE_MAX);
1283 if (hdrs & ICE_FLOW_SEG_HDR_PFCP) {
1284 if (hdrs & ICE_FLOW_SEG_HDR_PFCP_NODE)
1286 (const ice_bitmap_t *)ice_ptypes_pfcp_node;
1289 (const ice_bitmap_t *)ice_ptypes_pfcp_session;
1291 ice_and_bitmap(params->ptypes, params->ptypes,
1292 src, ICE_FLOW_PTYPE_MAX);
1294 src = (const ice_bitmap_t *)ice_ptypes_pfcp_node;
1295 ice_andnot_bitmap(params->ptypes, params->ptypes,
1296 src, ICE_FLOW_PTYPE_MAX);
1298 src = (const ice_bitmap_t *)ice_ptypes_pfcp_session;
1299 ice_andnot_bitmap(params->ptypes, params->ptypes,
1300 src, ICE_FLOW_PTYPE_MAX);
1308 * ice_flow_xtract_pkt_flags - Create an extr sequence entry for packet flags
1309 * @hw: pointer to the HW struct
1310 * @params: information about the flow to be processed
1311 * @flags: The value of pkt_flags[x:x] in Rx/Tx MDID metadata.
1313 * This function will allocate an extraction sequence entries for a DWORD size
1314 * chunk of the packet flags.
1316 static enum ice_status
1317 ice_flow_xtract_pkt_flags(struct ice_hw *hw,
1318 struct ice_flow_prof_params *params,
1319 enum ice_flex_mdid_pkt_flags flags)
1321 u8 fv_words = hw->blk[params->blk].es.fvw;
1324 /* Make sure the number of extraction sequence entries required does not
1325 * exceed the block's capacity.
1327 if (params->es_cnt >= fv_words)
1328 return ICE_ERR_MAX_LIMIT;
1330 /* some blocks require a reversed field vector layout */
1331 if (hw->blk[params->blk].es.reverse)
1332 idx = fv_words - params->es_cnt - 1;
1334 idx = params->es_cnt;
1336 params->es[idx].prot_id = ICE_PROT_META_ID;
1337 params->es[idx].off = flags;
1344 * ice_flow_xtract_fld - Create an extraction sequence entry for the given field
1345 * @hw: pointer to the HW struct
1346 * @params: information about the flow to be processed
1347 * @seg: packet segment index of the field to be extracted
1348 * @fld: ID of field to be extracted
1349 * @match: bitfield of all fields
1351 * This function determines the protocol ID, offset, and size of the given
1352 * field. It then allocates one or more extraction sequence entries for the
1353 * given field, and fill the entries with protocol ID and offset information.
1355 static enum ice_status
1356 ice_flow_xtract_fld(struct ice_hw *hw, struct ice_flow_prof_params *params,
1357 u8 seg, enum ice_flow_field fld, u64 match)
1359 enum ice_flow_field sib = ICE_FLOW_FIELD_IDX_MAX;
1360 enum ice_prot_id prot_id = ICE_PROT_ID_INVAL;
1361 u8 fv_words = hw->blk[params->blk].es.fvw;
1362 struct ice_flow_fld_info *flds;
1363 u16 cnt, ese_bits, i;
1368 flds = params->prof->segs[seg].fields;
1371 case ICE_FLOW_FIELD_IDX_ETH_DA:
1372 case ICE_FLOW_FIELD_IDX_ETH_SA:
1373 case ICE_FLOW_FIELD_IDX_S_VLAN:
1374 case ICE_FLOW_FIELD_IDX_C_VLAN:
1375 prot_id = seg == 0 ? ICE_PROT_MAC_OF_OR_S : ICE_PROT_MAC_IL;
1377 case ICE_FLOW_FIELD_IDX_ETH_TYPE:
1378 prot_id = seg == 0 ? ICE_PROT_ETYPE_OL : ICE_PROT_ETYPE_IL;
1380 case ICE_FLOW_FIELD_IDX_IPV4_DSCP:
1381 prot_id = seg == 0 ? ICE_PROT_IPV4_OF_OR_S : ICE_PROT_IPV4_IL;
1383 case ICE_FLOW_FIELD_IDX_IPV6_DSCP:
1384 prot_id = seg == 0 ? ICE_PROT_IPV6_OF_OR_S : ICE_PROT_IPV6_IL;
1386 case ICE_FLOW_FIELD_IDX_IPV4_TTL:
1387 case ICE_FLOW_FIELD_IDX_IPV4_PROT:
1388 prot_id = seg == 0 ? ICE_PROT_IPV4_OF_OR_S : ICE_PROT_IPV4_IL;
1390 /* TTL and PROT share the same extraction seq. entry.
1391 * Each is considered a sibling to the other in terms of sharing
1392 * the same extraction sequence entry.
1394 if (fld == ICE_FLOW_FIELD_IDX_IPV4_TTL)
1395 sib = ICE_FLOW_FIELD_IDX_IPV4_PROT;
1397 sib = ICE_FLOW_FIELD_IDX_IPV4_TTL;
1399 /* If the sibling field is also included, that field's
1400 * mask needs to be included.
1402 if (match & BIT(sib))
1403 sib_mask = ice_flds_info[sib].mask;
1405 case ICE_FLOW_FIELD_IDX_IPV6_TTL:
1406 case ICE_FLOW_FIELD_IDX_IPV6_PROT:
1407 prot_id = seg == 0 ? ICE_PROT_IPV6_OF_OR_S : ICE_PROT_IPV6_IL;
1409 /* TTL and PROT share the same extraction seq. entry.
1410 * Each is considered a sibling to the other in terms of sharing
1411 * the same extraction sequence entry.
1413 if (fld == ICE_FLOW_FIELD_IDX_IPV6_TTL)
1414 sib = ICE_FLOW_FIELD_IDX_IPV6_PROT;
1416 sib = ICE_FLOW_FIELD_IDX_IPV6_TTL;
1418 /* If the sibling field is also included, that field's
1419 * mask needs to be included.
1421 if (match & BIT(sib))
1422 sib_mask = ice_flds_info[sib].mask;
1424 case ICE_FLOW_FIELD_IDX_IPV4_SA:
1425 case ICE_FLOW_FIELD_IDX_IPV4_DA:
1426 case ICE_FLOW_FIELD_IDX_IPV4_CHKSUM:
1427 prot_id = seg == 0 ? ICE_PROT_IPV4_OF_OR_S : ICE_PROT_IPV4_IL;
1428 if (params->prof->segs[0].hdrs & ICE_FLOW_SEG_HDR_GRE &&
1429 params->prof->segs[1].hdrs & ICE_FLOW_SEG_HDR_GTPU &&
1431 prot_id = ICE_PROT_IPV4_IL_IL;
1433 case ICE_FLOW_FIELD_IDX_IPV4_ID:
1434 prot_id = ICE_PROT_IPV4_OF_OR_S;
1436 case ICE_FLOW_FIELD_IDX_IPV6_SA:
1437 case ICE_FLOW_FIELD_IDX_IPV6_DA:
1438 case ICE_FLOW_FIELD_IDX_IPV6_PRE32_SA:
1439 case ICE_FLOW_FIELD_IDX_IPV6_PRE32_DA:
1440 case ICE_FLOW_FIELD_IDX_IPV6_PRE48_SA:
1441 case ICE_FLOW_FIELD_IDX_IPV6_PRE48_DA:
1442 case ICE_FLOW_FIELD_IDX_IPV6_PRE64_SA:
1443 case ICE_FLOW_FIELD_IDX_IPV6_PRE64_DA:
1444 prot_id = seg == 0 ? ICE_PROT_IPV6_OF_OR_S : ICE_PROT_IPV6_IL;
1445 if (params->prof->segs[0].hdrs & ICE_FLOW_SEG_HDR_GRE &&
1446 params->prof->segs[1].hdrs & ICE_FLOW_SEG_HDR_GTPU &&
1448 prot_id = ICE_PROT_IPV6_IL_IL;
1450 case ICE_FLOW_FIELD_IDX_IPV6_ID:
1451 prot_id = ICE_PROT_IPV6_FRAG;
1453 case ICE_FLOW_FIELD_IDX_TCP_SRC_PORT:
1454 case ICE_FLOW_FIELD_IDX_TCP_DST_PORT:
1455 case ICE_FLOW_FIELD_IDX_TCP_FLAGS:
1456 case ICE_FLOW_FIELD_IDX_TCP_CHKSUM:
1457 prot_id = ICE_PROT_TCP_IL;
1459 case ICE_FLOW_FIELD_IDX_UDP_SRC_PORT:
1460 case ICE_FLOW_FIELD_IDX_UDP_DST_PORT:
1461 case ICE_FLOW_FIELD_IDX_UDP_CHKSUM:
1462 prot_id = ICE_PROT_UDP_IL_OR_S;
1464 case ICE_FLOW_FIELD_IDX_SCTP_SRC_PORT:
1465 case ICE_FLOW_FIELD_IDX_SCTP_DST_PORT:
1466 case ICE_FLOW_FIELD_IDX_SCTP_CHKSUM:
1467 prot_id = ICE_PROT_SCTP_IL;
1469 case ICE_FLOW_FIELD_IDX_VXLAN_VNI:
1470 case ICE_FLOW_FIELD_IDX_GTPC_TEID:
1471 case ICE_FLOW_FIELD_IDX_GTPU_IP_TEID:
1472 case ICE_FLOW_FIELD_IDX_GTPU_UP_TEID:
1473 case ICE_FLOW_FIELD_IDX_GTPU_DWN_TEID:
1474 case ICE_FLOW_FIELD_IDX_GTPU_EH_TEID:
1475 case ICE_FLOW_FIELD_IDX_GTPU_EH_QFI:
1476 /* GTP is accessed through UDP OF protocol */
1477 prot_id = ICE_PROT_UDP_OF;
1479 case ICE_FLOW_FIELD_IDX_PPPOE_SESS_ID:
1480 prot_id = ICE_PROT_PPPOE;
1482 case ICE_FLOW_FIELD_IDX_PFCP_SEID:
1483 prot_id = ICE_PROT_UDP_IL_OR_S;
1485 case ICE_FLOW_FIELD_IDX_L2TPV3_SESS_ID:
1486 prot_id = ICE_PROT_L2TPV3;
1488 case ICE_FLOW_FIELD_IDX_ESP_SPI:
1489 prot_id = ICE_PROT_ESP_F;
1491 case ICE_FLOW_FIELD_IDX_AH_SPI:
1492 prot_id = ICE_PROT_ESP_2;
1494 case ICE_FLOW_FIELD_IDX_NAT_T_ESP_SPI:
1495 prot_id = ICE_PROT_UDP_IL_OR_S;
1497 case ICE_FLOW_FIELD_IDX_ECPRI_TP0_PC_ID:
1498 prot_id = ICE_PROT_ECPRI;
1500 case ICE_FLOW_FIELD_IDX_UDP_ECPRI_TP0_PC_ID:
1501 prot_id = ICE_PROT_UDP_IL_OR_S;
1503 case ICE_FLOW_FIELD_IDX_ARP_SIP:
1504 case ICE_FLOW_FIELD_IDX_ARP_DIP:
1505 case ICE_FLOW_FIELD_IDX_ARP_SHA:
1506 case ICE_FLOW_FIELD_IDX_ARP_DHA:
1507 case ICE_FLOW_FIELD_IDX_ARP_OP:
1508 prot_id = ICE_PROT_ARP_OF;
1510 case ICE_FLOW_FIELD_IDX_ICMP_TYPE:
1511 case ICE_FLOW_FIELD_IDX_ICMP_CODE:
1512 /* ICMP type and code share the same extraction seq. entry */
1513 prot_id = (params->prof->segs[seg].hdrs &
1514 ICE_FLOW_SEG_HDR_IPV4) ?
1515 ICE_PROT_ICMP_IL : ICE_PROT_ICMPV6_IL;
1516 sib = fld == ICE_FLOW_FIELD_IDX_ICMP_TYPE ?
1517 ICE_FLOW_FIELD_IDX_ICMP_CODE :
1518 ICE_FLOW_FIELD_IDX_ICMP_TYPE;
1520 case ICE_FLOW_FIELD_IDX_GRE_KEYID:
1521 prot_id = ICE_PROT_GRE_OF;
1524 return ICE_ERR_NOT_IMPL;
1527 /* Each extraction sequence entry is a word in size, and extracts a
1528 * word-aligned offset from a protocol header.
1530 ese_bits = ICE_FLOW_FV_EXTRACT_SZ * BITS_PER_BYTE;
1532 flds[fld].xtrct.prot_id = prot_id;
1533 flds[fld].xtrct.off = (ice_flds_info[fld].off / ese_bits) *
1534 ICE_FLOW_FV_EXTRACT_SZ;
1535 flds[fld].xtrct.disp = (u8)(ice_flds_info[fld].off % ese_bits);
1536 flds[fld].xtrct.idx = params->es_cnt;
1537 flds[fld].xtrct.mask = ice_flds_info[fld].mask;
1539 /* Adjust the next field-entry index after accommodating the number of
1540 * entries this field consumes
1542 cnt = DIVIDE_AND_ROUND_UP(flds[fld].xtrct.disp +
1543 ice_flds_info[fld].size, ese_bits);
1545 /* Fill in the extraction sequence entries needed for this field */
1546 off = flds[fld].xtrct.off;
1547 mask = flds[fld].xtrct.mask;
1548 for (i = 0; i < cnt; i++) {
1549 /* Only consume an extraction sequence entry if there is no
1550 * sibling field associated with this field or the sibling entry
1551 * already extracts the word shared with this field.
1553 if (sib == ICE_FLOW_FIELD_IDX_MAX ||
1554 flds[sib].xtrct.prot_id == ICE_PROT_ID_INVAL ||
1555 flds[sib].xtrct.off != off) {
1558 /* Make sure the number of extraction sequence required
1559 * does not exceed the block's capability
1561 if (params->es_cnt >= fv_words)
1562 return ICE_ERR_MAX_LIMIT;
1564 /* some blocks require a reversed field vector layout */
1565 if (hw->blk[params->blk].es.reverse)
1566 idx = fv_words - params->es_cnt - 1;
1568 idx = params->es_cnt;
1570 params->es[idx].prot_id = prot_id;
1571 params->es[idx].off = off;
1572 params->mask[idx] = mask | sib_mask;
1576 off += ICE_FLOW_FV_EXTRACT_SZ;
1583 * ice_flow_xtract_raws - Create extract sequence entries for raw bytes
1584 * @hw: pointer to the HW struct
1585 * @params: information about the flow to be processed
1586 * @seg: index of packet segment whose raw fields are to be extracted
1588 static enum ice_status
1589 ice_flow_xtract_raws(struct ice_hw *hw, struct ice_flow_prof_params *params,
1596 if (!params->prof->segs[seg].raws_cnt)
1599 if (params->prof->segs[seg].raws_cnt >
1600 ARRAY_SIZE(params->prof->segs[seg].raws))
1601 return ICE_ERR_MAX_LIMIT;
1603 /* Offsets within the segment headers are not supported */
1604 hdrs_sz = ice_flow_calc_seg_sz(params, seg);
1606 return ICE_ERR_PARAM;
1608 fv_words = hw->blk[params->blk].es.fvw;
1610 for (i = 0; i < params->prof->segs[seg].raws_cnt; i++) {
1611 struct ice_flow_seg_fld_raw *raw;
1614 raw = ¶ms->prof->segs[seg].raws[i];
1616 /* Storing extraction information */
1617 raw->info.xtrct.prot_id = ICE_PROT_MAC_OF_OR_S;
1618 raw->info.xtrct.off = (raw->off / ICE_FLOW_FV_EXTRACT_SZ) *
1619 ICE_FLOW_FV_EXTRACT_SZ;
1620 raw->info.xtrct.disp = (raw->off % ICE_FLOW_FV_EXTRACT_SZ) *
1622 raw->info.xtrct.idx = params->es_cnt;
1624 /* Determine the number of field vector entries this raw field
1627 cnt = DIVIDE_AND_ROUND_UP(raw->info.xtrct.disp +
1628 (raw->info.src.last * BITS_PER_BYTE),
1629 (ICE_FLOW_FV_EXTRACT_SZ *
1631 off = raw->info.xtrct.off;
1632 for (j = 0; j < cnt; j++) {
1635 /* Make sure the number of extraction sequence required
1636 * does not exceed the block's capability
1638 if (params->es_cnt >= hw->blk[params->blk].es.count ||
1639 params->es_cnt >= ICE_MAX_FV_WORDS)
1640 return ICE_ERR_MAX_LIMIT;
1642 /* some blocks require a reversed field vector layout */
1643 if (hw->blk[params->blk].es.reverse)
1644 idx = fv_words - params->es_cnt - 1;
1646 idx = params->es_cnt;
1648 params->es[idx].prot_id = raw->info.xtrct.prot_id;
1649 params->es[idx].off = off;
1651 off += ICE_FLOW_FV_EXTRACT_SZ;
1659 * ice_flow_create_xtrct_seq - Create an extraction sequence for given segments
1660 * @hw: pointer to the HW struct
1661 * @params: information about the flow to be processed
1663 * This function iterates through all matched fields in the given segments, and
1664 * creates an extraction sequence for the fields.
1666 static enum ice_status
1667 ice_flow_create_xtrct_seq(struct ice_hw *hw,
1668 struct ice_flow_prof_params *params)
1670 enum ice_status status = ICE_SUCCESS;
1673 /* For ACL, we also need to extract the direction bit (Rx,Tx) data from
1676 if (params->blk == ICE_BLK_ACL) {
1677 status = ice_flow_xtract_pkt_flags(hw, params,
1678 ICE_RX_MDID_PKT_FLAGS_15_0);
1683 for (i = 0; i < params->prof->segs_cnt; i++) {
1684 u64 match = params->prof->segs[i].match;
1685 enum ice_flow_field j;
1687 ice_for_each_set_bit(j, (ice_bitmap_t *)&match,
1688 ICE_FLOW_FIELD_IDX_MAX) {
1689 status = ice_flow_xtract_fld(hw, params, i, j, match);
1692 ice_clear_bit(j, (ice_bitmap_t *)&match);
1695 /* Process raw matching bytes */
1696 status = ice_flow_xtract_raws(hw, params, i);
1705 * ice_flow_sel_acl_scen - returns the specific scenario
1706 * @hw: pointer to the hardware structure
1707 * @params: information about the flow to be processed
1709 * This function will return the specific scenario based on the
1710 * params passed to it
1712 static enum ice_status
1713 ice_flow_sel_acl_scen(struct ice_hw *hw, struct ice_flow_prof_params *params)
1715 /* Find the best-fit scenario for the provided match width */
1716 struct ice_acl_scen *cand_scen = NULL, *scen;
1719 return ICE_ERR_DOES_NOT_EXIST;
1721 /* Loop through each scenario and match against the scenario width
1722 * to select the specific scenario
1724 LIST_FOR_EACH_ENTRY(scen, &hw->acl_tbl->scens, ice_acl_scen, list_entry)
1725 if (scen->eff_width >= params->entry_length &&
1726 (!cand_scen || cand_scen->eff_width > scen->eff_width))
1729 return ICE_ERR_DOES_NOT_EXIST;
1731 params->prof->cfg.scen = cand_scen;
1737 * ice_flow_acl_def_entry_frmt - Determine the layout of flow entries
1738 * @params: information about the flow to be processed
1740 static enum ice_status
1741 ice_flow_acl_def_entry_frmt(struct ice_flow_prof_params *params)
1743 u16 index, i, range_idx = 0;
1745 index = ICE_AQC_ACL_PROF_BYTE_SEL_START_IDX;
1747 for (i = 0; i < params->prof->segs_cnt; i++) {
1748 struct ice_flow_seg_info *seg = ¶ms->prof->segs[i];
1751 ice_for_each_set_bit(j, (ice_bitmap_t *)&seg->match,
1752 ICE_FLOW_FIELD_IDX_MAX) {
1753 struct ice_flow_fld_info *fld = &seg->fields[j];
1755 fld->entry.mask = ICE_FLOW_FLD_OFF_INVAL;
1757 if (fld->type == ICE_FLOW_FLD_TYPE_RANGE) {
1758 fld->entry.last = ICE_FLOW_FLD_OFF_INVAL;
1760 /* Range checking only supported for single
1763 if (DIVIDE_AND_ROUND_UP(ice_flds_info[j].size +
1765 BITS_PER_BYTE * 2) > 1)
1766 return ICE_ERR_PARAM;
1768 /* Ranges must define low and high values */
1769 if (fld->src.val == ICE_FLOW_FLD_OFF_INVAL ||
1770 fld->src.last == ICE_FLOW_FLD_OFF_INVAL)
1771 return ICE_ERR_PARAM;
1773 fld->entry.val = range_idx++;
1775 /* Store adjusted byte-length of field for later
1776 * use, taking into account potential
1777 * non-byte-aligned displacement
1779 fld->entry.last = DIVIDE_AND_ROUND_UP
1780 (ice_flds_info[j].size +
1781 (fld->xtrct.disp % BITS_PER_BYTE),
1783 fld->entry.val = index;
1784 index += fld->entry.last;
1788 for (j = 0; j < seg->raws_cnt; j++) {
1789 struct ice_flow_seg_fld_raw *raw = &seg->raws[j];
1791 raw->info.entry.mask = ICE_FLOW_FLD_OFF_INVAL;
1792 raw->info.entry.val = index;
1793 raw->info.entry.last = raw->info.src.last;
1794 index += raw->info.entry.last;
1798 /* Currently only support using the byte selection base, which only
1799 * allows for an effective entry size of 30 bytes. Reject anything
1802 if (index > ICE_AQC_ACL_PROF_BYTE_SEL_ELEMS)
1803 return ICE_ERR_PARAM;
1805 /* Only 8 range checkers per profile, reject anything trying to use
1808 if (range_idx > ICE_AQC_ACL_PROF_RANGES_NUM_CFG)
1809 return ICE_ERR_PARAM;
1811 /* Store # bytes required for entry for later use */
1812 params->entry_length = index - ICE_AQC_ACL_PROF_BYTE_SEL_START_IDX;
1818 * ice_flow_proc_segs - process all packet segments associated with a profile
1819 * @hw: pointer to the HW struct
1820 * @params: information about the flow to be processed
1822 static enum ice_status
1823 ice_flow_proc_segs(struct ice_hw *hw, struct ice_flow_prof_params *params)
1825 enum ice_status status;
1827 status = ice_flow_proc_seg_hdrs(params);
1831 status = ice_flow_create_xtrct_seq(hw, params);
1835 switch (params->blk) {
1838 status = ICE_SUCCESS;
1841 status = ice_flow_acl_def_entry_frmt(params);
1844 status = ice_flow_sel_acl_scen(hw, params);
1849 return ICE_ERR_NOT_IMPL;
1855 #define ICE_FLOW_FIND_PROF_CHK_FLDS 0x00000001
1856 #define ICE_FLOW_FIND_PROF_CHK_VSI 0x00000002
1857 #define ICE_FLOW_FIND_PROF_NOT_CHK_DIR 0x00000004
1860 * ice_flow_find_prof_conds - Find a profile matching headers and conditions
1861 * @hw: pointer to the HW struct
1862 * @blk: classification stage
1863 * @dir: flow direction
1864 * @segs: array of one or more packet segments that describe the flow
1865 * @segs_cnt: number of packet segments provided
1866 * @vsi_handle: software VSI handle to check VSI (ICE_FLOW_FIND_PROF_CHK_VSI)
1867 * @conds: additional conditions to be checked (ICE_FLOW_FIND_PROF_CHK_*)
1869 static struct ice_flow_prof *
1870 ice_flow_find_prof_conds(struct ice_hw *hw, enum ice_block blk,
1871 enum ice_flow_dir dir, struct ice_flow_seg_info *segs,
1872 u8 segs_cnt, u16 vsi_handle, u32 conds)
1874 struct ice_flow_prof *p, *prof = NULL;
1876 ice_acquire_lock(&hw->fl_profs_locks[blk]);
1877 LIST_FOR_EACH_ENTRY(p, &hw->fl_profs[blk], ice_flow_prof, l_entry)
1878 if ((p->dir == dir || conds & ICE_FLOW_FIND_PROF_NOT_CHK_DIR) &&
1879 segs_cnt && segs_cnt == p->segs_cnt) {
1882 /* Check for profile-VSI association if specified */
1883 if ((conds & ICE_FLOW_FIND_PROF_CHK_VSI) &&
1884 ice_is_vsi_valid(hw, vsi_handle) &&
1885 !ice_is_bit_set(p->vsis, vsi_handle))
1888 /* Protocol headers must be checked. Matched fields are
1889 * checked if specified.
1891 for (i = 0; i < segs_cnt; i++)
1892 if (segs[i].hdrs != p->segs[i].hdrs ||
1893 ((conds & ICE_FLOW_FIND_PROF_CHK_FLDS) &&
1894 segs[i].match != p->segs[i].match))
1897 /* A match is found if all segments are matched */
1898 if (i == segs_cnt) {
1903 ice_release_lock(&hw->fl_profs_locks[blk]);
1909 * ice_flow_find_prof - Look up a profile matching headers and matched fields
1910 * @hw: pointer to the HW struct
1911 * @blk: classification stage
1912 * @dir: flow direction
1913 * @segs: array of one or more packet segments that describe the flow
1914 * @segs_cnt: number of packet segments provided
1917 ice_flow_find_prof(struct ice_hw *hw, enum ice_block blk, enum ice_flow_dir dir,
1918 struct ice_flow_seg_info *segs, u8 segs_cnt)
1920 struct ice_flow_prof *p;
1922 p = ice_flow_find_prof_conds(hw, blk, dir, segs, segs_cnt,
1923 ICE_MAX_VSI, ICE_FLOW_FIND_PROF_CHK_FLDS);
1925 return p ? p->id : ICE_FLOW_PROF_ID_INVAL;
1929 * ice_flow_find_prof_id - Look up a profile with given profile ID
1930 * @hw: pointer to the HW struct
1931 * @blk: classification stage
1932 * @prof_id: unique ID to identify this flow profile
1934 static struct ice_flow_prof *
1935 ice_flow_find_prof_id(struct ice_hw *hw, enum ice_block blk, u64 prof_id)
1937 struct ice_flow_prof *p;
1939 LIST_FOR_EACH_ENTRY(p, &hw->fl_profs[blk], ice_flow_prof, l_entry)
1940 if (p->id == prof_id)
1947 * ice_dealloc_flow_entry - Deallocate flow entry memory
1948 * @hw: pointer to the HW struct
1949 * @entry: flow entry to be removed
1952 ice_dealloc_flow_entry(struct ice_hw *hw, struct ice_flow_entry *entry)
1958 ice_free(hw, entry->entry);
1960 if (entry->range_buf) {
1961 ice_free(hw, entry->range_buf);
1962 entry->range_buf = NULL;
1966 ice_free(hw, entry->acts);
1968 entry->acts_cnt = 0;
1971 ice_free(hw, entry);
1975 * ice_flow_get_hw_prof - return the HW profile for a specific profile ID handle
1976 * @hw: pointer to the HW struct
1977 * @blk: classification stage
1978 * @prof_id: the profile ID handle
1979 * @hw_prof_id: pointer to variable to receive the HW profile ID
1982 ice_flow_get_hw_prof(struct ice_hw *hw, enum ice_block blk, u64 prof_id,
1985 enum ice_status status = ICE_ERR_DOES_NOT_EXIST;
1986 struct ice_prof_map *map;
1988 ice_acquire_lock(&hw->blk[blk].es.prof_map_lock);
1989 map = ice_search_prof_id(hw, blk, prof_id);
1991 *hw_prof_id = map->prof_id;
1992 status = ICE_SUCCESS;
1994 ice_release_lock(&hw->blk[blk].es.prof_map_lock);
1998 #define ICE_ACL_INVALID_SCEN 0x3f
2001 * ice_flow_acl_is_prof_in_use - Verify if the profile is associated to any PF
2002 * @hw: pointer to the hardware structure
2003 * @prof: pointer to flow profile
2004 * @buf: destination buffer function writes partial extraction sequence to
2006 * returns ICE_SUCCESS if no PF is associated to the given profile
2007 * returns ICE_ERR_IN_USE if at least one PF is associated to the given profile
2008 * returns other error code for real error
2010 static enum ice_status
2011 ice_flow_acl_is_prof_in_use(struct ice_hw *hw, struct ice_flow_prof *prof,
2012 struct ice_aqc_acl_prof_generic_frmt *buf)
2014 enum ice_status status;
2017 status = ice_flow_get_hw_prof(hw, ICE_BLK_ACL, prof->id, &prof_id);
2021 status = ice_query_acl_prof(hw, prof_id, buf, NULL);
2025 /* If all PF's associated scenarios are all 0 or all
2026 * ICE_ACL_INVALID_SCEN (63) for the given profile then the latter has
2027 * not been configured yet.
2029 if (buf->pf_scenario_num[0] == 0 && buf->pf_scenario_num[1] == 0 &&
2030 buf->pf_scenario_num[2] == 0 && buf->pf_scenario_num[3] == 0 &&
2031 buf->pf_scenario_num[4] == 0 && buf->pf_scenario_num[5] == 0 &&
2032 buf->pf_scenario_num[6] == 0 && buf->pf_scenario_num[7] == 0)
2035 if (buf->pf_scenario_num[0] == ICE_ACL_INVALID_SCEN &&
2036 buf->pf_scenario_num[1] == ICE_ACL_INVALID_SCEN &&
2037 buf->pf_scenario_num[2] == ICE_ACL_INVALID_SCEN &&
2038 buf->pf_scenario_num[3] == ICE_ACL_INVALID_SCEN &&
2039 buf->pf_scenario_num[4] == ICE_ACL_INVALID_SCEN &&
2040 buf->pf_scenario_num[5] == ICE_ACL_INVALID_SCEN &&
2041 buf->pf_scenario_num[6] == ICE_ACL_INVALID_SCEN &&
2042 buf->pf_scenario_num[7] == ICE_ACL_INVALID_SCEN)
2045 return ICE_ERR_IN_USE;
2049 * ice_flow_acl_free_act_cntr - Free the ACL rule's actions
2050 * @hw: pointer to the hardware structure
2051 * @acts: array of actions to be performed on a match
2052 * @acts_cnt: number of actions
2054 static enum ice_status
2055 ice_flow_acl_free_act_cntr(struct ice_hw *hw, struct ice_flow_action *acts,
2060 for (i = 0; i < acts_cnt; i++) {
2061 if (acts[i].type == ICE_FLOW_ACT_CNTR_PKT ||
2062 acts[i].type == ICE_FLOW_ACT_CNTR_BYTES ||
2063 acts[i].type == ICE_FLOW_ACT_CNTR_PKT_BYTES) {
2064 struct ice_acl_cntrs cntrs = { 0 };
2065 enum ice_status status;
2067 /* amount is unused in the dealloc path but the common
2068 * parameter check routine wants a value set, as zero
2069 * is invalid for the check. Just set it.
2072 cntrs.bank = 0; /* Only bank0 for the moment */
2074 LE16_TO_CPU(acts[i].data.acl_act.value);
2076 LE16_TO_CPU(acts[i].data.acl_act.value);
2078 if (acts[i].type == ICE_FLOW_ACT_CNTR_PKT_BYTES)
2079 cntrs.type = ICE_AQC_ACL_CNT_TYPE_DUAL;
2081 cntrs.type = ICE_AQC_ACL_CNT_TYPE_SINGLE;
2083 status = ice_aq_dealloc_acl_cntrs(hw, &cntrs, NULL);
2092 * ice_flow_acl_disassoc_scen - Disassociate the scenario from the profile
2093 * @hw: pointer to the hardware structure
2094 * @prof: pointer to flow profile
2096 * Disassociate the scenario from the profile for the PF of the VSI.
2098 static enum ice_status
2099 ice_flow_acl_disassoc_scen(struct ice_hw *hw, struct ice_flow_prof *prof)
2101 struct ice_aqc_acl_prof_generic_frmt buf;
2102 enum ice_status status = ICE_SUCCESS;
2105 ice_memset(&buf, 0, sizeof(buf), ICE_NONDMA_MEM);
2107 status = ice_flow_get_hw_prof(hw, ICE_BLK_ACL, prof->id, &prof_id);
2111 status = ice_query_acl_prof(hw, prof_id, &buf, NULL);
2115 /* Clear scenario for this PF */
2116 buf.pf_scenario_num[hw->pf_id] = ICE_ACL_INVALID_SCEN;
2117 status = ice_prgm_acl_prof_xtrct(hw, prof_id, &buf, NULL);
2123 * ice_flow_rem_entry_sync - Remove a flow entry
2124 * @hw: pointer to the HW struct
2125 * @blk: classification stage
2126 * @entry: flow entry to be removed
2128 static enum ice_status
2129 ice_flow_rem_entry_sync(struct ice_hw *hw, enum ice_block blk,
2130 struct ice_flow_entry *entry)
2133 return ICE_ERR_BAD_PTR;
2135 if (blk == ICE_BLK_ACL) {
2136 enum ice_status status;
2139 return ICE_ERR_BAD_PTR;
2141 status = ice_acl_rem_entry(hw, entry->prof->cfg.scen,
2142 entry->scen_entry_idx);
2146 /* Checks if we need to release an ACL counter. */
2147 if (entry->acts_cnt && entry->acts)
2148 ice_flow_acl_free_act_cntr(hw, entry->acts,
2152 LIST_DEL(&entry->l_entry);
2154 ice_dealloc_flow_entry(hw, entry);
2160 * ice_flow_add_prof_sync - Add a flow profile for packet segments and fields
2161 * @hw: pointer to the HW struct
2162 * @blk: classification stage
2163 * @dir: flow direction
2164 * @prof_id: unique ID to identify this flow profile
2165 * @segs: array of one or more packet segments that describe the flow
2166 * @segs_cnt: number of packet segments provided
2167 * @acts: array of default actions
2168 * @acts_cnt: number of default actions
2169 * @prof: stores the returned flow profile added
2171 * Assumption: the caller has acquired the lock to the profile list
2173 static enum ice_status
2174 ice_flow_add_prof_sync(struct ice_hw *hw, enum ice_block blk,
2175 enum ice_flow_dir dir, u64 prof_id,
2176 struct ice_flow_seg_info *segs, u8 segs_cnt,
2177 struct ice_flow_action *acts, u8 acts_cnt,
2178 struct ice_flow_prof **prof)
2180 struct ice_flow_prof_params *params;
2181 enum ice_status status;
2184 if (!prof || (acts_cnt && !acts))
2185 return ICE_ERR_BAD_PTR;
2187 params = (struct ice_flow_prof_params *)ice_malloc(hw, sizeof(*params));
2189 return ICE_ERR_NO_MEMORY;
2191 params->prof = (struct ice_flow_prof *)
2192 ice_malloc(hw, sizeof(*params->prof));
2193 if (!params->prof) {
2194 status = ICE_ERR_NO_MEMORY;
2198 /* initialize extraction sequence to all invalid (0xff) */
2199 for (i = 0; i < ICE_MAX_FV_WORDS; i++) {
2200 params->es[i].prot_id = ICE_PROT_INVALID;
2201 params->es[i].off = ICE_FV_OFFSET_INVAL;
2205 params->prof->id = prof_id;
2206 params->prof->dir = dir;
2207 params->prof->segs_cnt = segs_cnt;
2209 /* Make a copy of the segments that need to be persistent in the flow
2212 for (i = 0; i < segs_cnt; i++)
2213 ice_memcpy(¶ms->prof->segs[i], &segs[i], sizeof(*segs),
2214 ICE_NONDMA_TO_NONDMA);
2216 /* Make a copy of the actions that need to be persistent in the flow
2220 params->prof->acts = (struct ice_flow_action *)
2221 ice_memdup(hw, acts, acts_cnt * sizeof(*acts),
2222 ICE_NONDMA_TO_NONDMA);
2224 if (!params->prof->acts) {
2225 status = ICE_ERR_NO_MEMORY;
2230 status = ice_flow_proc_segs(hw, params);
2232 ice_debug(hw, ICE_DBG_FLOW, "Error processing a flow's packet segments\n");
2236 /* Add a HW profile for this flow profile */
2237 status = ice_add_prof(hw, blk, prof_id, (u8 *)params->ptypes,
2238 params->attr, params->attr_cnt, params->es,
2241 ice_debug(hw, ICE_DBG_FLOW, "Error adding a HW flow profile\n");
2245 INIT_LIST_HEAD(¶ms->prof->entries);
2246 ice_init_lock(¶ms->prof->entries_lock);
2247 *prof = params->prof;
2251 if (params->prof->acts)
2252 ice_free(hw, params->prof->acts);
2253 ice_free(hw, params->prof);
2256 ice_free(hw, params);
2262 * ice_flow_rem_prof_sync - remove a flow profile
2263 * @hw: pointer to the hardware structure
2264 * @blk: classification stage
2265 * @prof: pointer to flow profile to remove
2267 * Assumption: the caller has acquired the lock to the profile list
2269 static enum ice_status
2270 ice_flow_rem_prof_sync(struct ice_hw *hw, enum ice_block blk,
2271 struct ice_flow_prof *prof)
2273 enum ice_status status;
2275 /* Remove all remaining flow entries before removing the flow profile */
2276 if (!LIST_EMPTY(&prof->entries)) {
2277 struct ice_flow_entry *e, *t;
2279 ice_acquire_lock(&prof->entries_lock);
2281 LIST_FOR_EACH_ENTRY_SAFE(e, t, &prof->entries, ice_flow_entry,
2283 status = ice_flow_rem_entry_sync(hw, blk, e);
2288 ice_release_lock(&prof->entries_lock);
2291 if (blk == ICE_BLK_ACL) {
2292 struct ice_aqc_acl_profile_ranges query_rng_buf;
2293 struct ice_aqc_acl_prof_generic_frmt buf;
2296 /* Disassociate the scenario from the profile for the PF */
2297 status = ice_flow_acl_disassoc_scen(hw, prof);
2301 /* Clear the range-checker if the profile ID is no longer
2304 status = ice_flow_acl_is_prof_in_use(hw, prof, &buf);
2305 if (status && status != ICE_ERR_IN_USE) {
2307 } else if (!status) {
2308 /* Clear the range-checker value for profile ID */
2309 ice_memset(&query_rng_buf, 0,
2310 sizeof(struct ice_aqc_acl_profile_ranges),
2313 status = ice_flow_get_hw_prof(hw, blk, prof->id,
2318 status = ice_prog_acl_prof_ranges(hw, prof_id,
2319 &query_rng_buf, NULL);
2325 /* Remove all hardware profiles associated with this flow profile */
2326 status = ice_rem_prof(hw, blk, prof->id);
2328 LIST_DEL(&prof->l_entry);
2329 ice_destroy_lock(&prof->entries_lock);
2331 ice_free(hw, prof->acts);
2339 * ice_flow_acl_set_xtrct_seq_fld - Populate xtrct seq for single field
2340 * @buf: Destination buffer function writes partial xtrct sequence to
2341 * @info: Info about field
2344 ice_flow_acl_set_xtrct_seq_fld(struct ice_aqc_acl_prof_generic_frmt *buf,
2345 struct ice_flow_fld_info *info)
2350 src = info->xtrct.idx * ICE_FLOW_FV_EXTRACT_SZ +
2351 info->xtrct.disp / BITS_PER_BYTE;
2352 dst = info->entry.val;
2353 for (i = 0; i < info->entry.last; i++)
2354 /* HW stores field vector words in LE, convert words back to BE
2355 * so constructed entries will end up in network order
2357 buf->byte_selection[dst++] = src++ ^ 1;
2361 * ice_flow_acl_set_xtrct_seq - Program ACL extraction sequence
2362 * @hw: pointer to the hardware structure
2363 * @prof: pointer to flow profile
2365 static enum ice_status
2366 ice_flow_acl_set_xtrct_seq(struct ice_hw *hw, struct ice_flow_prof *prof)
2368 struct ice_aqc_acl_prof_generic_frmt buf;
2369 struct ice_flow_fld_info *info;
2370 enum ice_status status;
2374 ice_memset(&buf, 0, sizeof(buf), ICE_NONDMA_MEM);
2376 status = ice_flow_get_hw_prof(hw, ICE_BLK_ACL, prof->id, &prof_id);
2380 status = ice_flow_acl_is_prof_in_use(hw, prof, &buf);
2381 if (status && status != ICE_ERR_IN_USE)
2385 /* Program the profile dependent configuration. This is done
2386 * only once regardless of the number of PFs using that profile
2388 ice_memset(&buf, 0, sizeof(buf), ICE_NONDMA_MEM);
2390 for (i = 0; i < prof->segs_cnt; i++) {
2391 struct ice_flow_seg_info *seg = &prof->segs[i];
2394 ice_for_each_set_bit(j, (ice_bitmap_t *)&seg->match,
2395 ICE_FLOW_FIELD_IDX_MAX) {
2396 info = &seg->fields[j];
2398 if (info->type == ICE_FLOW_FLD_TYPE_RANGE)
2399 buf.word_selection[info->entry.val] =
2402 ice_flow_acl_set_xtrct_seq_fld(&buf,
2406 for (j = 0; j < seg->raws_cnt; j++) {
2407 info = &seg->raws[j].info;
2408 ice_flow_acl_set_xtrct_seq_fld(&buf, info);
2412 ice_memset(&buf.pf_scenario_num[0], ICE_ACL_INVALID_SCEN,
2413 ICE_AQC_ACL_PROF_PF_SCEN_NUM_ELEMS,
2417 /* Update the current PF */
2418 buf.pf_scenario_num[hw->pf_id] = (u8)prof->cfg.scen->id;
2419 status = ice_prgm_acl_prof_xtrct(hw, prof_id, &buf, NULL);
2425 * ice_flow_assoc_vsig_vsi - associate a VSI with VSIG
2426 * @hw: pointer to the hardware structure
2427 * @blk: classification stage
2428 * @vsi_handle: software VSI handle
2429 * @vsig: target VSI group
2431 * Assumption: the caller has already verified that the VSI to
2432 * be added has the same characteristics as the VSIG and will
2433 * thereby have access to all resources added to that VSIG.
2436 ice_flow_assoc_vsig_vsi(struct ice_hw *hw, enum ice_block blk, u16 vsi_handle,
2439 enum ice_status status;
2441 if (!ice_is_vsi_valid(hw, vsi_handle) || blk >= ICE_BLK_COUNT)
2442 return ICE_ERR_PARAM;
2444 ice_acquire_lock(&hw->fl_profs_locks[blk]);
2445 status = ice_add_vsi_flow(hw, blk, ice_get_hw_vsi_num(hw, vsi_handle),
2447 ice_release_lock(&hw->fl_profs_locks[blk]);
2453 * ice_flow_assoc_prof - associate a VSI with a flow profile
2454 * @hw: pointer to the hardware structure
2455 * @blk: classification stage
2456 * @prof: pointer to flow profile
2457 * @vsi_handle: software VSI handle
2459 * Assumption: the caller has acquired the lock to the profile list
2460 * and the software VSI handle has been validated
2463 ice_flow_assoc_prof(struct ice_hw *hw, enum ice_block blk,
2464 struct ice_flow_prof *prof, u16 vsi_handle)
2466 enum ice_status status = ICE_SUCCESS;
2468 if (!ice_is_bit_set(prof->vsis, vsi_handle)) {
2469 if (blk == ICE_BLK_ACL) {
2470 status = ice_flow_acl_set_xtrct_seq(hw, prof);
2474 status = ice_add_prof_id_flow(hw, blk,
2475 ice_get_hw_vsi_num(hw,
2479 ice_set_bit(vsi_handle, prof->vsis);
2481 ice_debug(hw, ICE_DBG_FLOW, "HW profile add failed, %d\n",
2489 * ice_flow_disassoc_prof - disassociate a VSI from a flow profile
2490 * @hw: pointer to the hardware structure
2491 * @blk: classification stage
2492 * @prof: pointer to flow profile
2493 * @vsi_handle: software VSI handle
2495 * Assumption: the caller has acquired the lock to the profile list
2496 * and the software VSI handle has been validated
2498 static enum ice_status
2499 ice_flow_disassoc_prof(struct ice_hw *hw, enum ice_block blk,
2500 struct ice_flow_prof *prof, u16 vsi_handle)
2502 enum ice_status status = ICE_SUCCESS;
2504 if (ice_is_bit_set(prof->vsis, vsi_handle)) {
2505 status = ice_rem_prof_id_flow(hw, blk,
2506 ice_get_hw_vsi_num(hw,
2510 ice_clear_bit(vsi_handle, prof->vsis);
2512 ice_debug(hw, ICE_DBG_FLOW, "HW profile remove failed, %d\n",
2520 * ice_flow_add_prof - Add a flow profile for packet segments and matched fields
2521 * @hw: pointer to the HW struct
2522 * @blk: classification stage
2523 * @dir: flow direction
2524 * @prof_id: unique ID to identify this flow profile
2525 * @segs: array of one or more packet segments that describe the flow
2526 * @segs_cnt: number of packet segments provided
2527 * @acts: array of default actions
2528 * @acts_cnt: number of default actions
2529 * @prof: stores the returned flow profile added
2532 ice_flow_add_prof(struct ice_hw *hw, enum ice_block blk, enum ice_flow_dir dir,
2533 u64 prof_id, struct ice_flow_seg_info *segs, u8 segs_cnt,
2534 struct ice_flow_action *acts, u8 acts_cnt,
2535 struct ice_flow_prof **prof)
2537 enum ice_status status;
2539 if (segs_cnt > ICE_FLOW_SEG_MAX)
2540 return ICE_ERR_MAX_LIMIT;
2543 return ICE_ERR_PARAM;
2546 return ICE_ERR_BAD_PTR;
2548 status = ice_flow_val_hdrs(segs, segs_cnt);
2552 ice_acquire_lock(&hw->fl_profs_locks[blk]);
2554 status = ice_flow_add_prof_sync(hw, blk, dir, prof_id, segs, segs_cnt,
2555 acts, acts_cnt, prof);
2557 LIST_ADD(&(*prof)->l_entry, &hw->fl_profs[blk]);
2559 ice_release_lock(&hw->fl_profs_locks[blk]);
2565 * ice_flow_rem_prof - Remove a flow profile and all entries associated with it
2566 * @hw: pointer to the HW struct
2567 * @blk: the block for which the flow profile is to be removed
2568 * @prof_id: unique ID of the flow profile to be removed
2571 ice_flow_rem_prof(struct ice_hw *hw, enum ice_block blk, u64 prof_id)
2573 struct ice_flow_prof *prof;
2574 enum ice_status status;
2576 ice_acquire_lock(&hw->fl_profs_locks[blk]);
2578 prof = ice_flow_find_prof_id(hw, blk, prof_id);
2580 status = ICE_ERR_DOES_NOT_EXIST;
2584 /* prof becomes invalid after the call */
2585 status = ice_flow_rem_prof_sync(hw, blk, prof);
2588 ice_release_lock(&hw->fl_profs_locks[blk]);
2594 * ice_flow_find_entry - look for a flow entry using its unique ID
2595 * @hw: pointer to the HW struct
2596 * @blk: classification stage
2597 * @entry_id: unique ID to identify this flow entry
2599 * This function looks for the flow entry with the specified unique ID in all
2600 * flow profiles of the specified classification stage. If the entry is found,
2601 * and it returns the handle to the flow entry. Otherwise, it returns
2602 * ICE_FLOW_ENTRY_ID_INVAL.
2604 u64 ice_flow_find_entry(struct ice_hw *hw, enum ice_block blk, u64 entry_id)
2606 struct ice_flow_entry *found = NULL;
2607 struct ice_flow_prof *p;
2609 ice_acquire_lock(&hw->fl_profs_locks[blk]);
2611 LIST_FOR_EACH_ENTRY(p, &hw->fl_profs[blk], ice_flow_prof, l_entry) {
2612 struct ice_flow_entry *e;
2614 ice_acquire_lock(&p->entries_lock);
2615 LIST_FOR_EACH_ENTRY(e, &p->entries, ice_flow_entry, l_entry)
2616 if (e->id == entry_id) {
2620 ice_release_lock(&p->entries_lock);
2626 ice_release_lock(&hw->fl_profs_locks[blk]);
2628 return found ? ICE_FLOW_ENTRY_HNDL(found) : ICE_FLOW_ENTRY_HANDLE_INVAL;
2632 * ice_flow_acl_check_actions - Checks the ACL rule's actions
2633 * @hw: pointer to the hardware structure
2634 * @acts: array of actions to be performed on a match
2635 * @acts_cnt: number of actions
2636 * @cnt_alloc: indicates if an ACL counter has been allocated.
2638 static enum ice_status
2639 ice_flow_acl_check_actions(struct ice_hw *hw, struct ice_flow_action *acts,
2640 u8 acts_cnt, bool *cnt_alloc)
2642 ice_declare_bitmap(dup_check, ICE_AQC_TBL_MAX_ACTION_PAIRS * 2);
2645 ice_zero_bitmap(dup_check, ICE_AQC_TBL_MAX_ACTION_PAIRS * 2);
2648 if (acts_cnt > ICE_FLOW_ACL_MAX_NUM_ACT)
2649 return ICE_ERR_OUT_OF_RANGE;
2651 for (i = 0; i < acts_cnt; i++) {
2652 if (acts[i].type != ICE_FLOW_ACT_NOP &&
2653 acts[i].type != ICE_FLOW_ACT_DROP &&
2654 acts[i].type != ICE_FLOW_ACT_CNTR_PKT &&
2655 acts[i].type != ICE_FLOW_ACT_FWD_QUEUE)
2658 /* If the caller want to add two actions of the same type, then
2659 * it is considered invalid configuration.
2661 if (ice_test_and_set_bit(acts[i].type, dup_check))
2662 return ICE_ERR_PARAM;
2665 /* Checks if ACL counters are needed. */
2666 for (i = 0; i < acts_cnt; i++) {
2667 if (acts[i].type == ICE_FLOW_ACT_CNTR_PKT ||
2668 acts[i].type == ICE_FLOW_ACT_CNTR_BYTES ||
2669 acts[i].type == ICE_FLOW_ACT_CNTR_PKT_BYTES) {
2670 struct ice_acl_cntrs cntrs = { 0 };
2671 enum ice_status status;
2674 cntrs.bank = 0; /* Only bank0 for the moment */
2676 if (acts[i].type == ICE_FLOW_ACT_CNTR_PKT_BYTES)
2677 cntrs.type = ICE_AQC_ACL_CNT_TYPE_DUAL;
2679 cntrs.type = ICE_AQC_ACL_CNT_TYPE_SINGLE;
2681 status = ice_aq_alloc_acl_cntrs(hw, &cntrs, NULL);
2684 /* Counter index within the bank */
2685 acts[i].data.acl_act.value =
2686 CPU_TO_LE16(cntrs.first_cntr);
2695 * ice_flow_acl_frmt_entry_range - Format an ACL range checker for a given field
2696 * @fld: number of the given field
2697 * @info: info about field
2698 * @range_buf: range checker configuration buffer
2699 * @data: pointer to a data buffer containing flow entry's match values/masks
2700 * @range: Input/output param indicating which range checkers are being used
2703 ice_flow_acl_frmt_entry_range(u16 fld, struct ice_flow_fld_info *info,
2704 struct ice_aqc_acl_profile_ranges *range_buf,
2705 u8 *data, u8 *range)
2709 /* If not specified, default mask is all bits in field */
2710 new_mask = (info->src.mask == ICE_FLOW_FLD_OFF_INVAL ?
2711 BIT(ice_flds_info[fld].size) - 1 :
2712 (*(u16 *)(data + info->src.mask))) << info->xtrct.disp;
2714 /* If the mask is 0, then we don't need to worry about this input
2715 * range checker value.
2719 (*(u16 *)(data + info->src.last)) << info->xtrct.disp;
2721 (*(u16 *)(data + info->src.val)) << info->xtrct.disp;
2722 u8 range_idx = info->entry.val;
2724 range_buf->checker_cfg[range_idx].low_boundary =
2725 CPU_TO_BE16(new_low);
2726 range_buf->checker_cfg[range_idx].high_boundary =
2727 CPU_TO_BE16(new_high);
2728 range_buf->checker_cfg[range_idx].mask = CPU_TO_BE16(new_mask);
2730 /* Indicate which range checker is being used */
2731 *range |= BIT(range_idx);
2736 * ice_flow_acl_frmt_entry_fld - Partially format ACL entry for a given field
2737 * @fld: number of the given field
2738 * @info: info about the field
2739 * @buf: buffer containing the entry
2740 * @dontcare: buffer containing don't care mask for entry
2741 * @data: pointer to a data buffer containing flow entry's match values/masks
2744 ice_flow_acl_frmt_entry_fld(u16 fld, struct ice_flow_fld_info *info, u8 *buf,
2745 u8 *dontcare, u8 *data)
2747 u16 dst, src, mask, k, end_disp, tmp_s = 0, tmp_m = 0;
2748 bool use_mask = false;
2751 src = info->src.val;
2752 mask = info->src.mask;
2753 dst = info->entry.val - ICE_AQC_ACL_PROF_BYTE_SEL_START_IDX;
2754 disp = info->xtrct.disp % BITS_PER_BYTE;
2756 if (mask != ICE_FLOW_FLD_OFF_INVAL)
2759 for (k = 0; k < info->entry.last; k++, dst++) {
2760 /* Add overflow bits from previous byte */
2761 buf[dst] = (tmp_s & 0xff00) >> 8;
2763 /* If mask is not valid, tmp_m is always zero, so just setting
2764 * dontcare to 0 (no masked bits). If mask is valid, pulls in
2765 * overflow bits of mask from prev byte
2767 dontcare[dst] = (tmp_m & 0xff00) >> 8;
2769 /* If there is displacement, last byte will only contain
2770 * displaced data, but there is no more data to read from user
2771 * buffer, so skip so as not to potentially read beyond end of
2774 if (!disp || k < info->entry.last - 1) {
2775 /* Store shifted data to use in next byte */
2776 tmp_s = data[src++] << disp;
2778 /* Add current (shifted) byte */
2779 buf[dst] |= tmp_s & 0xff;
2781 /* Handle mask if valid */
2783 tmp_m = (~data[mask++] & 0xff) << disp;
2784 dontcare[dst] |= tmp_m & 0xff;
2789 /* Fill in don't care bits at beginning of field */
2791 dst = info->entry.val - ICE_AQC_ACL_PROF_BYTE_SEL_START_IDX;
2792 for (k = 0; k < disp; k++)
2793 dontcare[dst] |= BIT(k);
2796 end_disp = (disp + ice_flds_info[fld].size) % BITS_PER_BYTE;
2798 /* Fill in don't care bits at end of field */
2800 dst = info->entry.val - ICE_AQC_ACL_PROF_BYTE_SEL_START_IDX +
2801 info->entry.last - 1;
2802 for (k = end_disp; k < BITS_PER_BYTE; k++)
2803 dontcare[dst] |= BIT(k);
2808 * ice_flow_acl_frmt_entry - Format ACL entry
2809 * @hw: pointer to the hardware structure
2810 * @prof: pointer to flow profile
2811 * @e: pointer to the flow entry
2812 * @data: pointer to a data buffer containing flow entry's match values/masks
2813 * @acts: array of actions to be performed on a match
2814 * @acts_cnt: number of actions
2816 * Formats the key (and key_inverse) to be matched from the data passed in,
2817 * along with data from the flow profile. This key/key_inverse pair makes up
2818 * the 'entry' for an ACL flow entry.
2820 static enum ice_status
2821 ice_flow_acl_frmt_entry(struct ice_hw *hw, struct ice_flow_prof *prof,
2822 struct ice_flow_entry *e, u8 *data,
2823 struct ice_flow_action *acts, u8 acts_cnt)
2825 u8 *buf = NULL, *dontcare = NULL, *key = NULL, range = 0, dir_flag_msk;
2826 struct ice_aqc_acl_profile_ranges *range_buf = NULL;
2827 enum ice_status status;
2832 status = ice_flow_get_hw_prof(hw, ICE_BLK_ACL, prof->id, &prof_id);
2836 /* Format the result action */
2838 status = ice_flow_acl_check_actions(hw, acts, acts_cnt, &cnt_alloc);
2842 status = ICE_ERR_NO_MEMORY;
2844 e->acts = (struct ice_flow_action *)
2845 ice_memdup(hw, acts, acts_cnt * sizeof(*acts),
2846 ICE_NONDMA_TO_NONDMA);
2850 e->acts_cnt = acts_cnt;
2852 /* Format the matching data */
2853 buf_sz = prof->cfg.scen->width;
2854 buf = (u8 *)ice_malloc(hw, buf_sz);
2858 dontcare = (u8 *)ice_malloc(hw, buf_sz);
2862 /* 'key' buffer will store both key and key_inverse, so must be twice
2865 key = (u8 *)ice_malloc(hw, buf_sz * 2);
2869 range_buf = (struct ice_aqc_acl_profile_ranges *)
2870 ice_malloc(hw, sizeof(struct ice_aqc_acl_profile_ranges));
2874 /* Set don't care mask to all 1's to start, will zero out used bytes */
2875 ice_memset(dontcare, 0xff, buf_sz, ICE_NONDMA_MEM);
2877 for (i = 0; i < prof->segs_cnt; i++) {
2878 struct ice_flow_seg_info *seg = &prof->segs[i];
2881 ice_for_each_set_bit(j, (ice_bitmap_t *)&seg->match,
2882 ICE_FLOW_FIELD_IDX_MAX) {
2883 struct ice_flow_fld_info *info = &seg->fields[j];
2885 if (info->type == ICE_FLOW_FLD_TYPE_RANGE)
2886 ice_flow_acl_frmt_entry_range(j, info,
2890 ice_flow_acl_frmt_entry_fld(j, info, buf,
2894 for (j = 0; j < seg->raws_cnt; j++) {
2895 struct ice_flow_fld_info *info = &seg->raws[j].info;
2896 u16 dst, src, mask, k;
2897 bool use_mask = false;
2899 src = info->src.val;
2900 dst = info->entry.val -
2901 ICE_AQC_ACL_PROF_BYTE_SEL_START_IDX;
2902 mask = info->src.mask;
2904 if (mask != ICE_FLOW_FLD_OFF_INVAL)
2907 for (k = 0; k < info->entry.last; k++, dst++) {
2908 buf[dst] = data[src++];
2910 dontcare[dst] = ~data[mask++];
2917 buf[prof->cfg.scen->pid_idx] = (u8)prof_id;
2918 dontcare[prof->cfg.scen->pid_idx] = 0;
2920 /* Format the buffer for direction flags */
2921 dir_flag_msk = BIT(ICE_FLG_PKT_DIR);
2923 if (prof->dir == ICE_FLOW_RX)
2924 buf[prof->cfg.scen->pkt_dir_idx] = dir_flag_msk;
2927 buf[prof->cfg.scen->rng_chk_idx] = range;
2928 /* Mark any unused range checkers as don't care */
2929 dontcare[prof->cfg.scen->rng_chk_idx] = ~range;
2930 e->range_buf = range_buf;
2932 ice_free(hw, range_buf);
2935 status = ice_set_key(key, buf_sz * 2, buf, NULL, dontcare, NULL, 0,
2941 e->entry_sz = buf_sz * 2;
2948 ice_free(hw, dontcare);
2953 if (status && range_buf) {
2954 ice_free(hw, range_buf);
2955 e->range_buf = NULL;
2958 if (status && e->acts) {
2959 ice_free(hw, e->acts);
2964 if (status && cnt_alloc)
2965 ice_flow_acl_free_act_cntr(hw, acts, acts_cnt);
2971 * ice_flow_acl_find_scen_entry_cond - Find an ACL scenario entry that matches
2972 * the compared data.
2973 * @prof: pointer to flow profile
2974 * @e: pointer to the comparing flow entry
2975 * @do_chg_action: decide if we want to change the ACL action
2976 * @do_add_entry: decide if we want to add the new ACL entry
2977 * @do_rem_entry: decide if we want to remove the current ACL entry
2979 * Find an ACL scenario entry that matches the compared data. In the same time,
2980 * this function also figure out:
2981 * a/ If we want to change the ACL action
2982 * b/ If we want to add the new ACL entry
2983 * c/ If we want to remove the current ACL entry
2985 static struct ice_flow_entry *
2986 ice_flow_acl_find_scen_entry_cond(struct ice_flow_prof *prof,
2987 struct ice_flow_entry *e, bool *do_chg_action,
2988 bool *do_add_entry, bool *do_rem_entry)
2990 struct ice_flow_entry *p, *return_entry = NULL;
2994 * a/ There exists an entry with same matching data, but different
2995 * priority, then we remove this existing ACL entry. Then, we
2996 * will add the new entry to the ACL scenario.
2997 * b/ There exists an entry with same matching data, priority, and
2998 * result action, then we do nothing
2999 * c/ There exists an entry with same matching data, priority, but
3000 * different, action, then do only change the action's entry.
3001 * d/ Else, we add this new entry to the ACL scenario.
3003 *do_chg_action = false;
3004 *do_add_entry = true;
3005 *do_rem_entry = false;
3006 LIST_FOR_EACH_ENTRY(p, &prof->entries, ice_flow_entry, l_entry) {
3007 if (memcmp(p->entry, e->entry, p->entry_sz))
3010 /* From this point, we have the same matching_data. */
3011 *do_add_entry = false;
3014 if (p->priority != e->priority) {
3015 /* matching data && !priority */
3016 *do_add_entry = true;
3017 *do_rem_entry = true;
3021 /* From this point, we will have matching_data && priority */
3022 if (p->acts_cnt != e->acts_cnt)
3023 *do_chg_action = true;
3024 for (i = 0; i < p->acts_cnt; i++) {
3025 bool found_not_match = false;
3027 for (j = 0; j < e->acts_cnt; j++)
3028 if (memcmp(&p->acts[i], &e->acts[j],
3029 sizeof(struct ice_flow_action))) {
3030 found_not_match = true;
3034 if (found_not_match) {
3035 *do_chg_action = true;
3040 /* (do_chg_action = true) means :
3041 * matching_data && priority && !result_action
3042 * (do_chg_action = false) means :
3043 * matching_data && priority && result_action
3048 return return_entry;
3052 * ice_flow_acl_convert_to_acl_prio - Convert to ACL priority
3055 static enum ice_acl_entry_prio
3056 ice_flow_acl_convert_to_acl_prio(enum ice_flow_priority p)
3058 enum ice_acl_entry_prio acl_prio;
3061 case ICE_FLOW_PRIO_LOW:
3062 acl_prio = ICE_ACL_PRIO_LOW;
3064 case ICE_FLOW_PRIO_NORMAL:
3065 acl_prio = ICE_ACL_PRIO_NORMAL;
3067 case ICE_FLOW_PRIO_HIGH:
3068 acl_prio = ICE_ACL_PRIO_HIGH;
3071 acl_prio = ICE_ACL_PRIO_NORMAL;
3079 * ice_flow_acl_union_rng_chk - Perform union operation between two
3080 * range-range checker buffers
3081 * @dst_buf: pointer to destination range checker buffer
3082 * @src_buf: pointer to source range checker buffer
3084 * For this function, we do the union between dst_buf and src_buf
3085 * range checker buffer, and we will save the result back to dst_buf
3087 static enum ice_status
3088 ice_flow_acl_union_rng_chk(struct ice_aqc_acl_profile_ranges *dst_buf,
3089 struct ice_aqc_acl_profile_ranges *src_buf)
3093 if (!dst_buf || !src_buf)
3094 return ICE_ERR_BAD_PTR;
3096 for (i = 0; i < ICE_AQC_ACL_PROF_RANGES_NUM_CFG; i++) {
3097 struct ice_acl_rng_data *cfg_data = NULL, *in_data;
3098 bool will_populate = false;
3100 in_data = &src_buf->checker_cfg[i];
3105 for (j = 0; j < ICE_AQC_ACL_PROF_RANGES_NUM_CFG; j++) {
3106 cfg_data = &dst_buf->checker_cfg[j];
3108 if (!cfg_data->mask ||
3109 !memcmp(cfg_data, in_data,
3110 sizeof(struct ice_acl_rng_data))) {
3111 will_populate = true;
3116 if (will_populate) {
3117 ice_memcpy(cfg_data, in_data,
3118 sizeof(struct ice_acl_rng_data),
3119 ICE_NONDMA_TO_NONDMA);
3121 /* No available slot left to program range checker */
3122 return ICE_ERR_MAX_LIMIT;
3130 * ice_flow_acl_add_scen_entry_sync - Add entry to ACL scenario sync
3131 * @hw: pointer to the hardware structure
3132 * @prof: pointer to flow profile
3133 * @entry: double pointer to the flow entry
3135 * For this function, we will look at the current added entries in the
3136 * corresponding ACL scenario. Then, we will perform matching logic to
3137 * see if we want to add/modify/do nothing with this new entry.
3139 static enum ice_status
3140 ice_flow_acl_add_scen_entry_sync(struct ice_hw *hw, struct ice_flow_prof *prof,
3141 struct ice_flow_entry **entry)
3143 bool do_add_entry, do_rem_entry, do_chg_action, do_chg_rng_chk;
3144 struct ice_aqc_acl_profile_ranges query_rng_buf, cfg_rng_buf;
3145 struct ice_acl_act_entry *acts = NULL;
3146 struct ice_flow_entry *exist;
3147 enum ice_status status = ICE_SUCCESS;
3148 struct ice_flow_entry *e;
3151 if (!entry || !(*entry) || !prof)
3152 return ICE_ERR_BAD_PTR;
3156 do_chg_rng_chk = false;
3160 status = ice_flow_get_hw_prof(hw, ICE_BLK_ACL, prof->id,
3165 /* Query the current range-checker value in FW */
3166 status = ice_query_acl_prof_ranges(hw, prof_id, &query_rng_buf,
3170 ice_memcpy(&cfg_rng_buf, &query_rng_buf,
3171 sizeof(struct ice_aqc_acl_profile_ranges),
3172 ICE_NONDMA_TO_NONDMA);
3174 /* Generate the new range-checker value */
3175 status = ice_flow_acl_union_rng_chk(&cfg_rng_buf, e->range_buf);
3179 /* Reconfigure the range check if the buffer is changed. */
3180 do_chg_rng_chk = false;
3181 if (memcmp(&query_rng_buf, &cfg_rng_buf,
3182 sizeof(struct ice_aqc_acl_profile_ranges))) {
3183 status = ice_prog_acl_prof_ranges(hw, prof_id,
3184 &cfg_rng_buf, NULL);
3188 do_chg_rng_chk = true;
3192 /* Figure out if we want to (change the ACL action) and/or
3193 * (Add the new ACL entry) and/or (Remove the current ACL entry)
3195 exist = ice_flow_acl_find_scen_entry_cond(prof, e, &do_chg_action,
3196 &do_add_entry, &do_rem_entry);
3198 status = ice_flow_rem_entry_sync(hw, ICE_BLK_ACL, exist);
3203 /* Prepare the result action buffer */
3204 acts = (struct ice_acl_act_entry *)
3205 ice_calloc(hw, e->entry_sz, sizeof(struct ice_acl_act_entry));
3207 return ICE_ERR_NO_MEMORY;
3209 for (i = 0; i < e->acts_cnt; i++)
3210 ice_memcpy(&acts[i], &e->acts[i].data.acl_act,
3211 sizeof(struct ice_acl_act_entry),
3212 ICE_NONDMA_TO_NONDMA);
3215 enum ice_acl_entry_prio prio;
3219 keys = (u8 *)e->entry;
3220 inverts = keys + (e->entry_sz / 2);
3221 prio = ice_flow_acl_convert_to_acl_prio(e->priority);
3223 status = ice_acl_add_entry(hw, prof->cfg.scen, prio, keys,
3224 inverts, acts, e->acts_cnt,
3229 e->scen_entry_idx = entry_idx;
3230 LIST_ADD(&e->l_entry, &prof->entries);
3232 if (do_chg_action) {
3233 /* For the action memory info, update the SW's copy of
3234 * exist entry with e's action memory info
3236 ice_free(hw, exist->acts);
3237 exist->acts_cnt = e->acts_cnt;
3238 exist->acts = (struct ice_flow_action *)
3239 ice_calloc(hw, exist->acts_cnt,
3240 sizeof(struct ice_flow_action));
3242 status = ICE_ERR_NO_MEMORY;
3246 ice_memcpy(exist->acts, e->acts,
3247 sizeof(struct ice_flow_action) * e->acts_cnt,
3248 ICE_NONDMA_TO_NONDMA);
3250 status = ice_acl_prog_act(hw, prof->cfg.scen, acts,
3252 exist->scen_entry_idx);
3257 if (do_chg_rng_chk) {
3258 /* In this case, we want to update the range checker
3259 * information of the exist entry
3261 status = ice_flow_acl_union_rng_chk(exist->range_buf,
3267 /* As we don't add the new entry to our SW DB, deallocate its
3268 * memories, and return the exist entry to the caller
3270 ice_dealloc_flow_entry(hw, e);
3280 * ice_flow_acl_add_scen_entry - Add entry to ACL scenario
3281 * @hw: pointer to the hardware structure
3282 * @prof: pointer to flow profile
3283 * @e: double pointer to the flow entry
3285 static enum ice_status
3286 ice_flow_acl_add_scen_entry(struct ice_hw *hw, struct ice_flow_prof *prof,
3287 struct ice_flow_entry **e)
3289 enum ice_status status;
3291 ice_acquire_lock(&prof->entries_lock);
3292 status = ice_flow_acl_add_scen_entry_sync(hw, prof, e);
3293 ice_release_lock(&prof->entries_lock);
3299 * ice_flow_add_entry - Add a flow entry
3300 * @hw: pointer to the HW struct
3301 * @blk: classification stage
3302 * @prof_id: ID of the profile to add a new flow entry to
3303 * @entry_id: unique ID to identify this flow entry
3304 * @vsi_handle: software VSI handle for the flow entry
3305 * @prio: priority of the flow entry
3306 * @data: pointer to a data buffer containing flow entry's match values/masks
3307 * @acts: arrays of actions to be performed on a match
3308 * @acts_cnt: number of actions
3309 * @entry_h: pointer to buffer that receives the new flow entry's handle
3312 ice_flow_add_entry(struct ice_hw *hw, enum ice_block blk, u64 prof_id,
3313 u64 entry_id, u16 vsi_handle, enum ice_flow_priority prio,
3314 void *data, struct ice_flow_action *acts, u8 acts_cnt,
3317 struct ice_flow_entry *e = NULL;
3318 struct ice_flow_prof *prof;
3319 enum ice_status status = ICE_SUCCESS;
3321 /* ACL entries must indicate an action */
3322 if (blk == ICE_BLK_ACL && (!acts || !acts_cnt))
3323 return ICE_ERR_PARAM;
3325 /* No flow entry data is expected for RSS */
3326 if (!entry_h || (!data && blk != ICE_BLK_RSS))
3327 return ICE_ERR_BAD_PTR;
3329 if (!ice_is_vsi_valid(hw, vsi_handle))
3330 return ICE_ERR_PARAM;
3332 ice_acquire_lock(&hw->fl_profs_locks[blk]);
3334 prof = ice_flow_find_prof_id(hw, blk, prof_id);
3336 status = ICE_ERR_DOES_NOT_EXIST;
3338 /* Allocate memory for the entry being added and associate
3339 * the VSI to the found flow profile
3341 e = (struct ice_flow_entry *)ice_malloc(hw, sizeof(*e));
3343 status = ICE_ERR_NO_MEMORY;
3345 status = ice_flow_assoc_prof(hw, blk, prof, vsi_handle);
3348 ice_release_lock(&hw->fl_profs_locks[blk]);
3353 e->vsi_handle = vsi_handle;
3362 /* ACL will handle the entry management */
3363 status = ice_flow_acl_frmt_entry(hw, prof, e, (u8 *)data, acts,
3368 status = ice_flow_acl_add_scen_entry(hw, prof, &e);
3374 status = ICE_ERR_NOT_IMPL;
3378 if (blk != ICE_BLK_ACL) {
3379 /* ACL will handle the entry management */
3380 ice_acquire_lock(&prof->entries_lock);
3381 LIST_ADD(&e->l_entry, &prof->entries);
3382 ice_release_lock(&prof->entries_lock);
3385 *entry_h = ICE_FLOW_ENTRY_HNDL(e);
3390 ice_free(hw, e->entry);
3398 * ice_flow_rem_entry - Remove a flow entry
3399 * @hw: pointer to the HW struct
3400 * @blk: classification stage
3401 * @entry_h: handle to the flow entry to be removed
3403 enum ice_status ice_flow_rem_entry(struct ice_hw *hw, enum ice_block blk,
3406 struct ice_flow_entry *entry;
3407 struct ice_flow_prof *prof;
3408 enum ice_status status = ICE_SUCCESS;
3410 if (entry_h == ICE_FLOW_ENTRY_HANDLE_INVAL)
3411 return ICE_ERR_PARAM;
3413 entry = ICE_FLOW_ENTRY_PTR((intptr_t)entry_h);
3415 /* Retain the pointer to the flow profile as the entry will be freed */
3419 ice_acquire_lock(&prof->entries_lock);
3420 status = ice_flow_rem_entry_sync(hw, blk, entry);
3421 ice_release_lock(&prof->entries_lock);
3428 * ice_flow_set_fld_ext - specifies locations of field from entry's input buffer
3429 * @seg: packet segment the field being set belongs to
3430 * @fld: field to be set
3431 * @field_type: type of the field
3432 * @val_loc: if not ICE_FLOW_FLD_OFF_INVAL, location of the value to match from
3433 * entry's input buffer
3434 * @mask_loc: if not ICE_FLOW_FLD_OFF_INVAL, location of mask value from entry's
3436 * @last_loc: if not ICE_FLOW_FLD_OFF_INVAL, location of last/upper value from
3437 * entry's input buffer
3439 * This helper function stores information of a field being matched, including
3440 * the type of the field and the locations of the value to match, the mask, and
3441 * the upper-bound value in the start of the input buffer for a flow entry.
3442 * This function should only be used for fixed-size data structures.
3444 * This function also opportunistically determines the protocol headers to be
3445 * present based on the fields being set. Some fields cannot be used alone to
3446 * determine the protocol headers present. Sometimes, fields for particular
3447 * protocol headers are not matched. In those cases, the protocol headers
3448 * must be explicitly set.
3451 ice_flow_set_fld_ext(struct ice_flow_seg_info *seg, enum ice_flow_field fld,
3452 enum ice_flow_fld_match_type field_type, u16 val_loc,
3453 u16 mask_loc, u16 last_loc)
3455 u64 bit = BIT_ULL(fld);
3458 if (field_type == ICE_FLOW_FLD_TYPE_RANGE)
3461 seg->fields[fld].type = field_type;
3462 seg->fields[fld].src.val = val_loc;
3463 seg->fields[fld].src.mask = mask_loc;
3464 seg->fields[fld].src.last = last_loc;
3466 ICE_FLOW_SET_HDRS(seg, ice_flds_info[fld].hdr);
3470 * ice_flow_set_fld - specifies locations of field from entry's input buffer
3471 * @seg: packet segment the field being set belongs to
3472 * @fld: field to be set
3473 * @val_loc: if not ICE_FLOW_FLD_OFF_INVAL, location of the value to match from
3474 * entry's input buffer
3475 * @mask_loc: if not ICE_FLOW_FLD_OFF_INVAL, location of mask value from entry's
3477 * @last_loc: if not ICE_FLOW_FLD_OFF_INVAL, location of last/upper value from
3478 * entry's input buffer
3479 * @range: indicate if field being matched is to be in a range
3481 * This function specifies the locations, in the form of byte offsets from the
3482 * start of the input buffer for a flow entry, from where the value to match,
3483 * the mask value, and upper value can be extracted. These locations are then
3484 * stored in the flow profile. When adding a flow entry associated with the
3485 * flow profile, these locations will be used to quickly extract the values and
3486 * create the content of a match entry. This function should only be used for
3487 * fixed-size data structures.
3490 ice_flow_set_fld(struct ice_flow_seg_info *seg, enum ice_flow_field fld,
3491 u16 val_loc, u16 mask_loc, u16 last_loc, bool range)
3493 enum ice_flow_fld_match_type t = range ?
3494 ICE_FLOW_FLD_TYPE_RANGE : ICE_FLOW_FLD_TYPE_REG;
3496 ice_flow_set_fld_ext(seg, fld, t, val_loc, mask_loc, last_loc);
3500 * ice_flow_set_fld_prefix - sets locations of prefix field from entry's buf
3501 * @seg: packet segment the field being set belongs to
3502 * @fld: field to be set
3503 * @val_loc: if not ICE_FLOW_FLD_OFF_INVAL, location of the value to match from
3504 * entry's input buffer
3505 * @pref_loc: location of prefix value from entry's input buffer
3506 * @pref_sz: size of the location holding the prefix value
3508 * This function specifies the locations, in the form of byte offsets from the
3509 * start of the input buffer for a flow entry, from where the value to match
3510 * and the IPv4 prefix value can be extracted. These locations are then stored
3511 * in the flow profile. When adding flow entries to the associated flow profile,
3512 * these locations can be used to quickly extract the values to create the
3513 * content of a match entry. This function should only be used for fixed-size
3517 ice_flow_set_fld_prefix(struct ice_flow_seg_info *seg, enum ice_flow_field fld,
3518 u16 val_loc, u16 pref_loc, u8 pref_sz)
3520 /* For this type of field, the "mask" location is for the prefix value's
3521 * location and the "last" location is for the size of the location of
3524 ice_flow_set_fld_ext(seg, fld, ICE_FLOW_FLD_TYPE_PREFIX, val_loc,
3525 pref_loc, (u16)pref_sz);
3529 * ice_flow_add_fld_raw - sets locations of a raw field from entry's input buf
3530 * @seg: packet segment the field being set belongs to
3531 * @off: offset of the raw field from the beginning of the segment in bytes
3532 * @len: length of the raw pattern to be matched
3533 * @val_loc: location of the value to match from entry's input buffer
3534 * @mask_loc: location of mask value from entry's input buffer
3536 * This function specifies the offset of the raw field to be match from the
3537 * beginning of the specified packet segment, and the locations, in the form of
3538 * byte offsets from the start of the input buffer for a flow entry, from where
3539 * the value to match and the mask value to be extracted. These locations are
3540 * then stored in the flow profile. When adding flow entries to the associated
3541 * flow profile, these locations can be used to quickly extract the values to
3542 * create the content of a match entry. This function should only be used for
3543 * fixed-size data structures.
3546 ice_flow_add_fld_raw(struct ice_flow_seg_info *seg, u16 off, u8 len,
3547 u16 val_loc, u16 mask_loc)
3549 if (seg->raws_cnt < ICE_FLOW_SEG_RAW_FLD_MAX) {
3550 seg->raws[seg->raws_cnt].off = off;
3551 seg->raws[seg->raws_cnt].info.type = ICE_FLOW_FLD_TYPE_SIZE;
3552 seg->raws[seg->raws_cnt].info.src.val = val_loc;
3553 seg->raws[seg->raws_cnt].info.src.mask = mask_loc;
3554 /* The "last" field is used to store the length of the field */
3555 seg->raws[seg->raws_cnt].info.src.last = len;
3558 /* Overflows of "raws" will be handled as an error condition later in
3559 * the flow when this information is processed.
3565 * ice_flow_rem_vsi_prof - remove vsi from flow profile
3566 * @hw: pointer to the hardware structure
3567 * @blk: classification stage
3568 * @vsi_handle: software VSI handle
3569 * @prof_id: unique ID to identify this flow profile
3571 * This function removes the flow entries associated to the input
3572 * vsi handle and disassociates the vsi from the flow profile.
3574 enum ice_status ice_flow_rem_vsi_prof(struct ice_hw *hw, enum ice_block blk, u16 vsi_handle,
3577 struct ice_flow_prof *prof = NULL;
3578 enum ice_status status = ICE_SUCCESS;
3580 if (blk >= ICE_BLK_COUNT || !ice_is_vsi_valid(hw, vsi_handle))
3581 return ICE_ERR_PARAM;
3583 /* find flow profile pointer with input package block and profile id */
3584 prof = ice_flow_find_prof_id(hw, ICE_BLK_FD, prof_id);
3586 ice_debug(hw, ICE_DBG_PKG,
3587 "Cannot find flow profile id=%" PRIu64 "\n", prof_id);
3588 return ICE_ERR_DOES_NOT_EXIST;
3591 /* Remove all remaining flow entries before removing the flow profile */
3592 if (!LIST_EMPTY(&prof->entries)) {
3593 struct ice_flow_entry *e, *t;
3595 ice_acquire_lock(&prof->entries_lock);
3596 LIST_FOR_EACH_ENTRY_SAFE(e, t, &prof->entries, ice_flow_entry,
3598 if (e->vsi_handle != vsi_handle)
3601 status = ice_flow_rem_entry_sync(hw, blk, e);
3605 ice_release_lock(&prof->entries_lock);
3610 /* disassociate the flow profile from sw vsi handle */
3611 status = ice_flow_disassoc_prof(hw, blk, prof, vsi_handle);
3613 ice_debug(hw, ICE_DBG_PKG,
3614 "ice_flow_disassoc_prof() failed with status=%d\n",
3619 #define ICE_FLOW_RSS_SEG_HDR_L2_MASKS \
3620 (ICE_FLOW_SEG_HDR_ETH | ICE_FLOW_SEG_HDR_ETH_NON_IP | ICE_FLOW_SEG_HDR_VLAN)
3622 #define ICE_FLOW_RSS_SEG_HDR_L3_MASKS \
3623 (ICE_FLOW_SEG_HDR_IPV4 | ICE_FLOW_SEG_HDR_IPV6)
3625 #define ICE_FLOW_RSS_SEG_HDR_L4_MASKS \
3626 (ICE_FLOW_SEG_HDR_TCP | ICE_FLOW_SEG_HDR_UDP | ICE_FLOW_SEG_HDR_SCTP)
3628 #define ICE_FLOW_RSS_SEG_HDR_VAL_MASKS \
3629 (ICE_FLOW_RSS_SEG_HDR_L2_MASKS | \
3630 ICE_FLOW_RSS_SEG_HDR_L3_MASKS | \
3631 ICE_FLOW_RSS_SEG_HDR_L4_MASKS)
3634 * ice_flow_set_rss_seg_info - setup packet segments for RSS
3635 * @segs: pointer to the flow field segment(s)
3636 * @seg_cnt: segment count
3637 * @cfg: configure parameters
3639 * Helper function to extract fields from hash bitmap and use flow
3640 * header value to set flow field segment for further use in flow
3641 * profile entry or removal.
3643 static enum ice_status
3644 ice_flow_set_rss_seg_info(struct ice_flow_seg_info *segs, u8 seg_cnt,
3645 const struct ice_rss_hash_cfg *cfg)
3647 struct ice_flow_seg_info *seg;
3651 /* set inner most segment */
3652 seg = &segs[seg_cnt - 1];
3654 ice_for_each_set_bit(i, (const ice_bitmap_t *)&cfg->hash_flds,
3655 ICE_FLOW_FIELD_IDX_MAX)
3656 ice_flow_set_fld(seg, (enum ice_flow_field)i,
3657 ICE_FLOW_FLD_OFF_INVAL, ICE_FLOW_FLD_OFF_INVAL,
3658 ICE_FLOW_FLD_OFF_INVAL, false);
3660 ICE_FLOW_SET_HDRS(seg, cfg->addl_hdrs);
3662 /* set outer most header */
3663 if (cfg->hdr_type == ICE_RSS_INNER_HEADERS_W_OUTER_IPV4)
3664 segs[ICE_RSS_OUTER_HEADERS].hdrs |= ICE_FLOW_SEG_HDR_IPV4 |
3665 ICE_FLOW_SEG_HDR_IPV_FRAG |
3666 ICE_FLOW_SEG_HDR_IPV_OTHER;
3667 else if (cfg->hdr_type == ICE_RSS_INNER_HEADERS_W_OUTER_IPV6)
3668 segs[ICE_RSS_OUTER_HEADERS].hdrs |= ICE_FLOW_SEG_HDR_IPV6 |
3669 ICE_FLOW_SEG_HDR_IPV_FRAG |
3670 ICE_FLOW_SEG_HDR_IPV_OTHER;
3671 else if (cfg->hdr_type == ICE_RSS_INNER_HEADERS_W_OUTER_IPV4_GRE)
3672 segs[ICE_RSS_OUTER_HEADERS].hdrs |= ICE_FLOW_SEG_HDR_IPV4 |
3673 ICE_FLOW_SEG_HDR_GRE |
3674 ICE_FLOW_SEG_HDR_IPV_OTHER;
3675 else if (cfg->hdr_type == ICE_RSS_INNER_HEADERS_W_OUTER_IPV6_GRE)
3676 segs[ICE_RSS_OUTER_HEADERS].hdrs |= ICE_FLOW_SEG_HDR_IPV6 |
3677 ICE_FLOW_SEG_HDR_GRE |
3678 ICE_FLOW_SEG_HDR_IPV_OTHER;
3680 if (seg->hdrs & ~ICE_FLOW_RSS_SEG_HDR_VAL_MASKS &
3681 ~ICE_FLOW_RSS_HDRS_INNER_MASK & ~ICE_FLOW_SEG_HDR_IPV_OTHER &
3682 ~ICE_FLOW_SEG_HDR_IPV_FRAG)
3683 return ICE_ERR_PARAM;
3685 val = (u64)(seg->hdrs & ICE_FLOW_RSS_SEG_HDR_L3_MASKS);
3686 if (val && !ice_is_pow2(val))
3689 val = (u64)(seg->hdrs & ICE_FLOW_RSS_SEG_HDR_L4_MASKS);
3690 if (val && !ice_is_pow2(val))
3697 * ice_rem_vsi_rss_list - remove VSI from RSS list
3698 * @hw: pointer to the hardware structure
3699 * @vsi_handle: software VSI handle
3701 * Remove the VSI from all RSS configurations in the list.
3703 void ice_rem_vsi_rss_list(struct ice_hw *hw, u16 vsi_handle)
3705 struct ice_rss_cfg *r, *tmp;
3707 if (LIST_EMPTY(&hw->rss_list_head))
3710 ice_acquire_lock(&hw->rss_locks);
3711 LIST_FOR_EACH_ENTRY_SAFE(r, tmp, &hw->rss_list_head,
3712 ice_rss_cfg, l_entry)
3713 if (ice_test_and_clear_bit(vsi_handle, r->vsis))
3714 if (!ice_is_any_bit_set(r->vsis, ICE_MAX_VSI)) {
3715 LIST_DEL(&r->l_entry);
3718 ice_release_lock(&hw->rss_locks);
3722 * ice_rem_vsi_rss_cfg - remove RSS configurations associated with VSI
3723 * @hw: pointer to the hardware structure
3724 * @vsi_handle: software VSI handle
3726 * This function will iterate through all flow profiles and disassociate
3727 * the VSI from that profile. If the flow profile has no VSIs it will
3730 enum ice_status ice_rem_vsi_rss_cfg(struct ice_hw *hw, u16 vsi_handle)
3732 const enum ice_block blk = ICE_BLK_RSS;
3733 struct ice_flow_prof *p, *t;
3734 enum ice_status status = ICE_SUCCESS;
3736 if (!ice_is_vsi_valid(hw, vsi_handle))
3737 return ICE_ERR_PARAM;
3739 if (LIST_EMPTY(&hw->fl_profs[blk]))
3742 ice_acquire_lock(&hw->rss_locks);
3743 LIST_FOR_EACH_ENTRY_SAFE(p, t, &hw->fl_profs[blk], ice_flow_prof,
3745 if (ice_is_bit_set(p->vsis, vsi_handle)) {
3746 status = ice_flow_disassoc_prof(hw, blk, p, vsi_handle);
3750 if (!ice_is_any_bit_set(p->vsis, ICE_MAX_VSI)) {
3751 status = ice_flow_rem_prof(hw, blk, p->id);
3756 ice_release_lock(&hw->rss_locks);
3762 * ice_get_rss_hdr_type - get a RSS profile's header type
3763 * @prof: RSS flow profile
3765 static enum ice_rss_cfg_hdr_type
3766 ice_get_rss_hdr_type(struct ice_flow_prof *prof)
3768 enum ice_rss_cfg_hdr_type hdr_type = ICE_RSS_ANY_HEADERS;
3770 if (prof->segs_cnt == ICE_FLOW_SEG_SINGLE) {
3771 hdr_type = ICE_RSS_OUTER_HEADERS;
3772 } else if (prof->segs_cnt == ICE_FLOW_SEG_MAX) {
3773 if (prof->segs[ICE_RSS_OUTER_HEADERS].hdrs == ICE_FLOW_SEG_HDR_NONE)
3774 hdr_type = ICE_RSS_INNER_HEADERS;
3775 if (prof->segs[ICE_RSS_OUTER_HEADERS].hdrs & ICE_FLOW_SEG_HDR_IPV4)
3776 hdr_type = ICE_RSS_INNER_HEADERS_W_OUTER_IPV4;
3777 if (prof->segs[ICE_RSS_OUTER_HEADERS].hdrs & ICE_FLOW_SEG_HDR_IPV6)
3778 hdr_type = ICE_RSS_INNER_HEADERS_W_OUTER_IPV6;
3785 * ice_rem_rss_list - remove RSS configuration from list
3786 * @hw: pointer to the hardware structure
3787 * @vsi_handle: software VSI handle
3788 * @prof: pointer to flow profile
3790 * Assumption: lock has already been acquired for RSS list
3793 ice_rem_rss_list(struct ice_hw *hw, u16 vsi_handle, struct ice_flow_prof *prof)
3795 enum ice_rss_cfg_hdr_type hdr_type;
3796 struct ice_rss_cfg *r, *tmp;
3798 /* Search for RSS hash fields associated to the VSI that match the
3799 * hash configurations associated to the flow profile. If found
3800 * remove from the RSS entry list of the VSI context and delete entry.
3802 hdr_type = ice_get_rss_hdr_type(prof);
3803 LIST_FOR_EACH_ENTRY_SAFE(r, tmp, &hw->rss_list_head,
3804 ice_rss_cfg, l_entry)
3805 if (r->hash.hash_flds == prof->segs[prof->segs_cnt - 1].match &&
3806 r->hash.addl_hdrs == prof->segs[prof->segs_cnt - 1].hdrs &&
3807 r->hash.hdr_type == hdr_type) {
3808 ice_clear_bit(vsi_handle, r->vsis);
3809 if (!ice_is_any_bit_set(r->vsis, ICE_MAX_VSI)) {
3810 LIST_DEL(&r->l_entry);
3818 * ice_add_rss_list - add RSS configuration to list
3819 * @hw: pointer to the hardware structure
3820 * @vsi_handle: software VSI handle
3821 * @prof: pointer to flow profile
3823 * Assumption: lock has already been acquired for RSS list
3825 static enum ice_status
3826 ice_add_rss_list(struct ice_hw *hw, u16 vsi_handle, struct ice_flow_prof *prof)
3828 enum ice_rss_cfg_hdr_type hdr_type;
3829 struct ice_rss_cfg *r, *rss_cfg;
3831 hdr_type = ice_get_rss_hdr_type(prof);
3832 LIST_FOR_EACH_ENTRY(r, &hw->rss_list_head,
3833 ice_rss_cfg, l_entry)
3834 if (r->hash.hash_flds == prof->segs[prof->segs_cnt - 1].match &&
3835 r->hash.addl_hdrs == prof->segs[prof->segs_cnt - 1].hdrs &&
3836 r->hash.hdr_type == hdr_type) {
3837 ice_set_bit(vsi_handle, r->vsis);
3841 rss_cfg = (struct ice_rss_cfg *)ice_malloc(hw, sizeof(*rss_cfg));
3843 return ICE_ERR_NO_MEMORY;
3845 rss_cfg->hash.hash_flds = prof->segs[prof->segs_cnt - 1].match;
3846 rss_cfg->hash.addl_hdrs = prof->segs[prof->segs_cnt - 1].hdrs;
3847 rss_cfg->hash.hdr_type = hdr_type;
3848 rss_cfg->hash.symm = prof->cfg.symm;
3849 ice_set_bit(vsi_handle, rss_cfg->vsis);
3851 LIST_ADD_TAIL(&rss_cfg->l_entry, &hw->rss_list_head);
3856 #define ICE_FLOW_PROF_HASH_S 0
3857 #define ICE_FLOW_PROF_HASH_M (0xFFFFFFFFULL << ICE_FLOW_PROF_HASH_S)
3858 #define ICE_FLOW_PROF_HDR_S 32
3859 #define ICE_FLOW_PROF_HDR_M (0x3FFFFFFFULL << ICE_FLOW_PROF_HDR_S)
3860 #define ICE_FLOW_PROF_ENCAP_S 62
3861 #define ICE_FLOW_PROF_ENCAP_M (0x3ULL << ICE_FLOW_PROF_ENCAP_S)
3863 /* Flow profile ID format:
3864 * [0:31] - Packet match fields
3865 * [32:61] - Protocol header
3866 * [62:63] - Encapsulation flag:
3869 * 2 for tunneled with outer ipv4
3870 * 3 for tunneled with outer ipv6
3872 #define ICE_FLOW_GEN_PROFID(hash, hdr, encap) \
3873 ((u64)(((u64)(hash) & ICE_FLOW_PROF_HASH_M) | \
3874 (((u64)(hdr) << ICE_FLOW_PROF_HDR_S) & ICE_FLOW_PROF_HDR_M) | \
3875 (((u64)(encap) << ICE_FLOW_PROF_ENCAP_S) & ICE_FLOW_PROF_ENCAP_M)))
3878 ice_rss_config_xor_word(struct ice_hw *hw, u8 prof_id, u8 src, u8 dst)
3880 u32 s = ((src % 4) << 3); /* byte shift */
3881 u32 v = dst | 0x80; /* value to program */
3882 u8 i = src / 4; /* register index */
3885 reg = rd32(hw, GLQF_HSYMM(prof_id, i));
3886 reg = (reg & ~(0xff << s)) | (v << s);
3887 wr32(hw, GLQF_HSYMM(prof_id, i), reg);
3891 ice_rss_config_xor(struct ice_hw *hw, u8 prof_id, u8 src, u8 dst, u8 len)
3894 ICE_FLOW_SW_FIELD_VECTOR_MAX / ICE_FLOW_FV_EXTRACT_SZ - 1;
3897 for (i = 0; i < len; i++) {
3898 ice_rss_config_xor_word(hw, prof_id,
3899 /* Yes, field vector in GLQF_HSYMM and
3900 * GLQF_HINSET is inversed!
3902 fv_last_word - (src + i),
3903 fv_last_word - (dst + i));
3904 ice_rss_config_xor_word(hw, prof_id,
3905 fv_last_word - (dst + i),
3906 fv_last_word - (src + i));
3911 ice_rss_update_symm(struct ice_hw *hw,
3912 struct ice_flow_prof *prof)
3914 struct ice_prof_map *map;
3917 ice_acquire_lock(&hw->blk[ICE_BLK_RSS].es.prof_map_lock);
3918 map = ice_search_prof_id(hw, ICE_BLK_RSS, prof->id);
3920 prof_id = map->prof_id;
3921 ice_release_lock(&hw->blk[ICE_BLK_RSS].es.prof_map_lock);
3924 /* clear to default */
3925 for (m = 0; m < 6; m++)
3926 wr32(hw, GLQF_HSYMM(prof_id, m), 0);
3927 if (prof->cfg.symm) {
3928 struct ice_flow_seg_info *seg =
3929 &prof->segs[prof->segs_cnt - 1];
3931 struct ice_flow_seg_xtrct *ipv4_src =
3932 &seg->fields[ICE_FLOW_FIELD_IDX_IPV4_SA].xtrct;
3933 struct ice_flow_seg_xtrct *ipv4_dst =
3934 &seg->fields[ICE_FLOW_FIELD_IDX_IPV4_DA].xtrct;
3935 struct ice_flow_seg_xtrct *ipv6_src =
3936 &seg->fields[ICE_FLOW_FIELD_IDX_IPV6_SA].xtrct;
3937 struct ice_flow_seg_xtrct *ipv6_dst =
3938 &seg->fields[ICE_FLOW_FIELD_IDX_IPV6_DA].xtrct;
3940 struct ice_flow_seg_xtrct *tcp_src =
3941 &seg->fields[ICE_FLOW_FIELD_IDX_TCP_SRC_PORT].xtrct;
3942 struct ice_flow_seg_xtrct *tcp_dst =
3943 &seg->fields[ICE_FLOW_FIELD_IDX_TCP_DST_PORT].xtrct;
3945 struct ice_flow_seg_xtrct *udp_src =
3946 &seg->fields[ICE_FLOW_FIELD_IDX_UDP_SRC_PORT].xtrct;
3947 struct ice_flow_seg_xtrct *udp_dst =
3948 &seg->fields[ICE_FLOW_FIELD_IDX_UDP_DST_PORT].xtrct;
3950 struct ice_flow_seg_xtrct *sctp_src =
3951 &seg->fields[ICE_FLOW_FIELD_IDX_SCTP_SRC_PORT].xtrct;
3952 struct ice_flow_seg_xtrct *sctp_dst =
3953 &seg->fields[ICE_FLOW_FIELD_IDX_SCTP_DST_PORT].xtrct;
3956 if (ipv4_src->prot_id != 0 && ipv4_dst->prot_id != 0)
3957 ice_rss_config_xor(hw, prof_id,
3958 ipv4_src->idx, ipv4_dst->idx, 2);
3961 if (ipv6_src->prot_id != 0 && ipv6_dst->prot_id != 0)
3962 ice_rss_config_xor(hw, prof_id,
3963 ipv6_src->idx, ipv6_dst->idx, 8);
3966 if (tcp_src->prot_id != 0 && tcp_dst->prot_id != 0)
3967 ice_rss_config_xor(hw, prof_id,
3968 tcp_src->idx, tcp_dst->idx, 1);
3971 if (udp_src->prot_id != 0 && udp_dst->prot_id != 0)
3972 ice_rss_config_xor(hw, prof_id,
3973 udp_src->idx, udp_dst->idx, 1);
3976 if (sctp_src->prot_id != 0 && sctp_dst->prot_id != 0)
3977 ice_rss_config_xor(hw, prof_id,
3978 sctp_src->idx, sctp_dst->idx, 1);
3983 * ice_add_rss_cfg_sync - add an RSS configuration
3984 * @hw: pointer to the hardware structure
3985 * @vsi_handle: software VSI handle
3986 * @cfg: configure parameters
3988 * Assumption: lock has already been acquired for RSS list
3990 static enum ice_status
3991 ice_add_rss_cfg_sync(struct ice_hw *hw, u16 vsi_handle,
3992 const struct ice_rss_hash_cfg *cfg)
3994 const enum ice_block blk = ICE_BLK_RSS;
3995 struct ice_flow_prof *prof = NULL;
3996 struct ice_flow_seg_info *segs;
3997 enum ice_status status;
4000 segs_cnt = (cfg->hdr_type == ICE_RSS_OUTER_HEADERS) ?
4001 ICE_FLOW_SEG_SINGLE : ICE_FLOW_SEG_MAX;
4003 segs = (struct ice_flow_seg_info *)ice_calloc(hw, segs_cnt,
4006 return ICE_ERR_NO_MEMORY;
4008 /* Construct the packet segment info from the hashed fields */
4009 status = ice_flow_set_rss_seg_info(segs, segs_cnt, cfg);
4013 /* Search for a flow profile that has matching headers, hash fields
4014 * and has the input VSI associated to it. If found, no further
4015 * operations required and exit.
4017 prof = ice_flow_find_prof_conds(hw, blk, ICE_FLOW_RX, segs, segs_cnt,
4019 ICE_FLOW_FIND_PROF_CHK_FLDS |
4020 ICE_FLOW_FIND_PROF_CHK_VSI);
4022 if (prof->cfg.symm == cfg->symm)
4024 prof->cfg.symm = cfg->symm;
4028 /* Check if a flow profile exists with the same protocol headers and
4029 * associated with the input VSI. If so disassociate the VSI from
4030 * this profile. The VSI will be added to a new profile created with
4031 * the protocol header and new hash field configuration.
4033 prof = ice_flow_find_prof_conds(hw, blk, ICE_FLOW_RX, segs, segs_cnt,
4034 vsi_handle, ICE_FLOW_FIND_PROF_CHK_VSI);
4036 status = ice_flow_disassoc_prof(hw, blk, prof, vsi_handle);
4038 ice_rem_rss_list(hw, vsi_handle, prof);
4042 /* Remove profile if it has no VSIs associated */
4043 if (!ice_is_any_bit_set(prof->vsis, ICE_MAX_VSI)) {
4044 status = ice_flow_rem_prof(hw, blk, prof->id);
4050 /* Search for a profile that has same match fields only. If this
4051 * exists then associate the VSI to this profile.
4053 prof = ice_flow_find_prof_conds(hw, blk, ICE_FLOW_RX, segs, segs_cnt,
4055 ICE_FLOW_FIND_PROF_CHK_FLDS);
4057 if (prof->cfg.symm == cfg->symm) {
4058 status = ice_flow_assoc_prof(hw, blk, prof,
4061 status = ice_add_rss_list(hw, vsi_handle,
4064 /* if a profile exist but with different symmetric
4065 * requirement, just return error.
4067 status = ICE_ERR_NOT_SUPPORTED;
4072 /* Create a new flow profile with generated profile and packet
4073 * segment information.
4075 status = ice_flow_add_prof(hw, blk, ICE_FLOW_RX,
4076 ICE_FLOW_GEN_PROFID(cfg->hash_flds,
4077 segs[segs_cnt - 1].hdrs,
4079 segs, segs_cnt, NULL, 0, &prof);
4083 status = ice_flow_assoc_prof(hw, blk, prof, vsi_handle);
4084 /* If association to a new flow profile failed then this profile can
4088 ice_flow_rem_prof(hw, blk, prof->id);
4092 status = ice_add_rss_list(hw, vsi_handle, prof);
4094 prof->cfg.symm = cfg->symm;
4096 ice_rss_update_symm(hw, prof);
4104 * ice_add_rss_cfg - add an RSS configuration with specified hashed fields
4105 * @hw: pointer to the hardware structure
4106 * @vsi_handle: software VSI handle
4107 * @cfg: configure parameters
4109 * This function will generate a flow profile based on fields associated with
4110 * the input fields to hash on, the flow type and use the VSI number to add
4111 * a flow entry to the profile.
4114 ice_add_rss_cfg(struct ice_hw *hw, u16 vsi_handle,
4115 const struct ice_rss_hash_cfg *cfg)
4117 struct ice_rss_hash_cfg local_cfg;
4118 enum ice_status status;
4120 if (!ice_is_vsi_valid(hw, vsi_handle) ||
4121 !cfg || cfg->hdr_type > ICE_RSS_ANY_HEADERS ||
4122 cfg->hash_flds == ICE_HASH_INVALID)
4123 return ICE_ERR_PARAM;
4126 if (cfg->hdr_type < ICE_RSS_ANY_HEADERS) {
4127 ice_acquire_lock(&hw->rss_locks);
4128 status = ice_add_rss_cfg_sync(hw, vsi_handle, &local_cfg);
4129 ice_release_lock(&hw->rss_locks);
4131 ice_acquire_lock(&hw->rss_locks);
4132 local_cfg.hdr_type = ICE_RSS_OUTER_HEADERS;
4133 status = ice_add_rss_cfg_sync(hw, vsi_handle, &local_cfg);
4135 local_cfg.hdr_type = ICE_RSS_INNER_HEADERS;
4136 status = ice_add_rss_cfg_sync(hw, vsi_handle,
4139 ice_release_lock(&hw->rss_locks);
4146 * ice_rem_rss_cfg_sync - remove an existing RSS configuration
4147 * @hw: pointer to the hardware structure
4148 * @vsi_handle: software VSI handle
4149 * @cfg: configure parameters
4151 * Assumption: lock has already been acquired for RSS list
4153 static enum ice_status
4154 ice_rem_rss_cfg_sync(struct ice_hw *hw, u16 vsi_handle,
4155 const struct ice_rss_hash_cfg *cfg)
4157 const enum ice_block blk = ICE_BLK_RSS;
4158 struct ice_flow_seg_info *segs;
4159 struct ice_flow_prof *prof;
4160 enum ice_status status;
4163 segs_cnt = (cfg->hdr_type == ICE_RSS_OUTER_HEADERS) ?
4164 ICE_FLOW_SEG_SINGLE : ICE_FLOW_SEG_MAX;
4165 segs = (struct ice_flow_seg_info *)ice_calloc(hw, segs_cnt,
4168 return ICE_ERR_NO_MEMORY;
4170 /* Construct the packet segment info from the hashed fields */
4171 status = ice_flow_set_rss_seg_info(segs, segs_cnt, cfg);
4175 prof = ice_flow_find_prof_conds(hw, blk, ICE_FLOW_RX, segs, segs_cnt,
4177 ICE_FLOW_FIND_PROF_CHK_FLDS);
4179 status = ICE_ERR_DOES_NOT_EXIST;
4183 status = ice_flow_disassoc_prof(hw, blk, prof, vsi_handle);
4187 /* Remove RSS configuration from VSI context before deleting
4190 ice_rem_rss_list(hw, vsi_handle, prof);
4192 if (!ice_is_any_bit_set(prof->vsis, ICE_MAX_VSI))
4193 status = ice_flow_rem_prof(hw, blk, prof->id);
4201 * ice_rem_rss_cfg - remove an existing RSS config with matching hashed fields
4202 * @hw: pointer to the hardware structure
4203 * @vsi_handle: software VSI handle
4204 * @cfg: configure parameters
4206 * This function will lookup the flow profile based on the input
4207 * hash field bitmap, iterate through the profile entry list of
4208 * that profile and find entry associated with input VSI to be
4209 * removed. Calls are made to underlying flow apis which will in
4210 * turn build or update buffers for RSS XLT1 section.
4213 ice_rem_rss_cfg(struct ice_hw *hw, u16 vsi_handle,
4214 const struct ice_rss_hash_cfg *cfg)
4216 struct ice_rss_hash_cfg local_cfg;
4217 enum ice_status status;
4219 if (!ice_is_vsi_valid(hw, vsi_handle) ||
4220 !cfg || cfg->hdr_type > ICE_RSS_ANY_HEADERS ||
4221 cfg->hash_flds == ICE_HASH_INVALID)
4222 return ICE_ERR_PARAM;
4224 ice_acquire_lock(&hw->rss_locks);
4226 if (cfg->hdr_type < ICE_RSS_ANY_HEADERS) {
4227 status = ice_rem_rss_cfg_sync(hw, vsi_handle, &local_cfg);
4229 local_cfg.hdr_type = ICE_RSS_OUTER_HEADERS;
4230 status = ice_rem_rss_cfg_sync(hw, vsi_handle, &local_cfg);
4233 local_cfg.hdr_type = ICE_RSS_INNER_HEADERS;
4234 status = ice_rem_rss_cfg_sync(hw, vsi_handle,
4238 ice_release_lock(&hw->rss_locks);
4244 * ice_replay_rss_cfg - replay RSS configurations associated with VSI
4245 * @hw: pointer to the hardware structure
4246 * @vsi_handle: software VSI handle
4248 enum ice_status ice_replay_rss_cfg(struct ice_hw *hw, u16 vsi_handle)
4250 enum ice_status status = ICE_SUCCESS;
4251 struct ice_rss_cfg *r;
4253 if (!ice_is_vsi_valid(hw, vsi_handle))
4254 return ICE_ERR_PARAM;
4256 ice_acquire_lock(&hw->rss_locks);
4257 LIST_FOR_EACH_ENTRY(r, &hw->rss_list_head,
4258 ice_rss_cfg, l_entry) {
4259 if (ice_is_bit_set(r->vsis, vsi_handle)) {
4260 status = ice_add_rss_cfg_sync(hw, vsi_handle, &r->hash);
4265 ice_release_lock(&hw->rss_locks);
4271 * ice_get_rss_cfg - returns hashed fields for the given header types
4272 * @hw: pointer to the hardware structure
4273 * @vsi_handle: software VSI handle
4274 * @hdrs: protocol header type
4276 * This function will return the match fields of the first instance of flow
4277 * profile having the given header types and containing input VSI
4279 u64 ice_get_rss_cfg(struct ice_hw *hw, u16 vsi_handle, u32 hdrs)
4281 u64 rss_hash = ICE_HASH_INVALID;
4282 struct ice_rss_cfg *r;
4284 /* verify if the protocol header is non zero and VSI is valid */
4285 if (hdrs == ICE_FLOW_SEG_HDR_NONE || !ice_is_vsi_valid(hw, vsi_handle))
4286 return ICE_HASH_INVALID;
4288 ice_acquire_lock(&hw->rss_locks);
4289 LIST_FOR_EACH_ENTRY(r, &hw->rss_list_head,
4290 ice_rss_cfg, l_entry)
4291 if (ice_is_bit_set(r->vsis, vsi_handle) &&
4292 r->hash.addl_hdrs == hdrs) {
4293 rss_hash = r->hash.hash_flds;
4296 ice_release_lock(&hw->rss_locks);