1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2001-2021 Intel Corporation
5 #include "ice_common.h"
8 /* Size of known protocol header fields */
9 #define ICE_FLOW_FLD_SZ_ETH_TYPE 2
10 #define ICE_FLOW_FLD_SZ_VLAN 2
11 #define ICE_FLOW_FLD_SZ_IPV4_ADDR 4
12 #define ICE_FLOW_FLD_SZ_IPV6_ADDR 16
13 #define ICE_FLOW_FLD_SZ_IPV6_PRE32_ADDR 4
14 #define ICE_FLOW_FLD_SZ_IPV6_PRE48_ADDR 6
15 #define ICE_FLOW_FLD_SZ_IPV6_PRE64_ADDR 8
16 #define ICE_FLOW_FLD_SZ_IPV4_ID 2
17 #define ICE_FLOW_FLD_SZ_IPV6_ID 4
18 #define ICE_FLOW_FLD_SZ_IP_DSCP 1
19 #define ICE_FLOW_FLD_SZ_IP_TTL 1
20 #define ICE_FLOW_FLD_SZ_IP_PROT 1
21 #define ICE_FLOW_FLD_SZ_PORT 2
22 #define ICE_FLOW_FLD_SZ_TCP_FLAGS 1
23 #define ICE_FLOW_FLD_SZ_ICMP_TYPE 1
24 #define ICE_FLOW_FLD_SZ_ICMP_CODE 1
25 #define ICE_FLOW_FLD_SZ_ARP_OPER 2
26 #define ICE_FLOW_FLD_SZ_GRE_KEYID 4
27 #define ICE_FLOW_FLD_SZ_GTP_TEID 4
28 #define ICE_FLOW_FLD_SZ_GTP_QFI 2
29 #define ICE_FLOW_FLD_SZ_PPPOE_SESS_ID 2
30 #define ICE_FLOW_FLD_SZ_PFCP_SEID 8
31 #define ICE_FLOW_FLD_SZ_L2TPV3_SESS_ID 4
32 #define ICE_FLOW_FLD_SZ_ESP_SPI 4
33 #define ICE_FLOW_FLD_SZ_AH_SPI 4
34 #define ICE_FLOW_FLD_SZ_NAT_T_ESP_SPI 4
35 #define ICE_FLOW_FLD_SZ_VXLAN_VNI 4
36 #define ICE_FLOW_FLD_SZ_ECPRI_TP0_PC_ID 2
38 /* Describe properties of a protocol header field */
39 struct ice_flow_field_info {
40 enum ice_flow_seg_hdr hdr;
41 s16 off; /* Offset from start of a protocol header, in bits */
42 u16 size; /* Size of fields in bits */
43 u16 mask; /* 16-bit mask for field */
46 #define ICE_FLOW_FLD_INFO(_hdr, _offset_bytes, _size_bytes) { \
48 .off = (_offset_bytes) * BITS_PER_BYTE, \
49 .size = (_size_bytes) * BITS_PER_BYTE, \
53 #define ICE_FLOW_FLD_INFO_MSK(_hdr, _offset_bytes, _size_bytes, _mask) { \
55 .off = (_offset_bytes) * BITS_PER_BYTE, \
56 .size = (_size_bytes) * BITS_PER_BYTE, \
60 /* Table containing properties of supported protocol header fields */
62 struct ice_flow_field_info ice_flds_info[ICE_FLOW_FIELD_IDX_MAX] = {
64 /* ICE_FLOW_FIELD_IDX_ETH_DA */
65 ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_ETH, 0, ETH_ALEN),
66 /* ICE_FLOW_FIELD_IDX_ETH_SA */
67 ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_ETH, ETH_ALEN, ETH_ALEN),
68 /* ICE_FLOW_FIELD_IDX_S_VLAN */
69 ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_VLAN, 12, ICE_FLOW_FLD_SZ_VLAN),
70 /* ICE_FLOW_FIELD_IDX_C_VLAN */
71 ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_VLAN, 14, ICE_FLOW_FLD_SZ_VLAN),
72 /* ICE_FLOW_FIELD_IDX_ETH_TYPE */
73 ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_ETH, 0, ICE_FLOW_FLD_SZ_ETH_TYPE),
75 /* ICE_FLOW_FIELD_IDX_IPV4_DSCP */
76 ICE_FLOW_FLD_INFO_MSK(ICE_FLOW_SEG_HDR_IPV4, 0, ICE_FLOW_FLD_SZ_IP_DSCP,
78 /* ICE_FLOW_FIELD_IDX_IPV6_DSCP */
79 ICE_FLOW_FLD_INFO_MSK(ICE_FLOW_SEG_HDR_IPV6, 0, ICE_FLOW_FLD_SZ_IP_DSCP,
81 /* ICE_FLOW_FIELD_IDX_IPV4_TTL */
82 ICE_FLOW_FLD_INFO_MSK(ICE_FLOW_SEG_HDR_NONE, 8,
83 ICE_FLOW_FLD_SZ_IP_TTL, 0xff00),
84 /* ICE_FLOW_FIELD_IDX_IPV4_PROT */
85 ICE_FLOW_FLD_INFO_MSK(ICE_FLOW_SEG_HDR_NONE, 8,
86 ICE_FLOW_FLD_SZ_IP_PROT, 0x00ff),
87 /* ICE_FLOW_FIELD_IDX_IPV6_TTL */
88 ICE_FLOW_FLD_INFO_MSK(ICE_FLOW_SEG_HDR_NONE, 6,
89 ICE_FLOW_FLD_SZ_IP_TTL, 0x00ff),
90 /* ICE_FLOW_FIELD_IDX_IPV6_PROT */
91 ICE_FLOW_FLD_INFO_MSK(ICE_FLOW_SEG_HDR_NONE, 6,
92 ICE_FLOW_FLD_SZ_IP_PROT, 0xff00),
93 /* ICE_FLOW_FIELD_IDX_IPV4_SA */
94 ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_IPV4, 12, ICE_FLOW_FLD_SZ_IPV4_ADDR),
95 /* ICE_FLOW_FIELD_IDX_IPV4_DA */
96 ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_IPV4, 16, ICE_FLOW_FLD_SZ_IPV4_ADDR),
97 /* ICE_FLOW_FIELD_IDX_IPV6_SA */
98 ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_IPV6, 8, ICE_FLOW_FLD_SZ_IPV6_ADDR),
99 /* ICE_FLOW_FIELD_IDX_IPV6_DA */
100 ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_IPV6, 24, ICE_FLOW_FLD_SZ_IPV6_ADDR),
101 /* ICE_FLOW_FIELD_IDX_IPV4_FRAG */
102 ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_IPV_FRAG, 4,
103 ICE_FLOW_FLD_SZ_IPV4_ID),
104 /* ICE_FLOW_FIELD_IDX_IPV6_FRAG */
105 ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_IPV_FRAG, 4,
106 ICE_FLOW_FLD_SZ_IPV6_ID),
107 /* ICE_FLOW_FIELD_IDX_IPV6_PRE32_SA */
108 ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_IPV6, 8,
109 ICE_FLOW_FLD_SZ_IPV6_PRE32_ADDR),
110 /* ICE_FLOW_FIELD_IDX_IPV6_PRE32_DA */
111 ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_IPV6, 24,
112 ICE_FLOW_FLD_SZ_IPV6_PRE32_ADDR),
113 /* ICE_FLOW_FIELD_IDX_IPV6_PRE48_SA */
114 ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_IPV6, 8,
115 ICE_FLOW_FLD_SZ_IPV6_PRE48_ADDR),
116 /* ICE_FLOW_FIELD_IDX_IPV6_PRE48_DA */
117 ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_IPV6, 24,
118 ICE_FLOW_FLD_SZ_IPV6_PRE48_ADDR),
119 /* ICE_FLOW_FIELD_IDX_IPV6_PRE64_SA */
120 ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_IPV6, 8,
121 ICE_FLOW_FLD_SZ_IPV6_PRE64_ADDR),
122 /* ICE_FLOW_FIELD_IDX_IPV6_PRE64_DA */
123 ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_IPV6, 24,
124 ICE_FLOW_FLD_SZ_IPV6_PRE64_ADDR),
126 /* ICE_FLOW_FIELD_IDX_TCP_SRC_PORT */
127 ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_TCP, 0, ICE_FLOW_FLD_SZ_PORT),
128 /* ICE_FLOW_FIELD_IDX_TCP_DST_PORT */
129 ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_TCP, 2, ICE_FLOW_FLD_SZ_PORT),
130 /* ICE_FLOW_FIELD_IDX_UDP_SRC_PORT */
131 ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_UDP, 0, ICE_FLOW_FLD_SZ_PORT),
132 /* ICE_FLOW_FIELD_IDX_UDP_DST_PORT */
133 ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_UDP, 2, ICE_FLOW_FLD_SZ_PORT),
134 /* ICE_FLOW_FIELD_IDX_SCTP_SRC_PORT */
135 ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_SCTP, 0, ICE_FLOW_FLD_SZ_PORT),
136 /* ICE_FLOW_FIELD_IDX_SCTP_DST_PORT */
137 ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_SCTP, 2, ICE_FLOW_FLD_SZ_PORT),
138 /* ICE_FLOW_FIELD_IDX_TCP_FLAGS */
139 ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_TCP, 13, ICE_FLOW_FLD_SZ_TCP_FLAGS),
141 /* ICE_FLOW_FIELD_IDX_ARP_SIP */
142 ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_ARP, 14, ICE_FLOW_FLD_SZ_IPV4_ADDR),
143 /* ICE_FLOW_FIELD_IDX_ARP_DIP */
144 ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_ARP, 24, ICE_FLOW_FLD_SZ_IPV4_ADDR),
145 /* ICE_FLOW_FIELD_IDX_ARP_SHA */
146 ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_ARP, 8, ETH_ALEN),
147 /* ICE_FLOW_FIELD_IDX_ARP_DHA */
148 ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_ARP, 18, ETH_ALEN),
149 /* ICE_FLOW_FIELD_IDX_ARP_OP */
150 ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_ARP, 6, ICE_FLOW_FLD_SZ_ARP_OPER),
152 /* ICE_FLOW_FIELD_IDX_ICMP_TYPE */
153 ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_ICMP, 0, ICE_FLOW_FLD_SZ_ICMP_TYPE),
154 /* ICE_FLOW_FIELD_IDX_ICMP_CODE */
155 ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_ICMP, 1, ICE_FLOW_FLD_SZ_ICMP_CODE),
157 /* ICE_FLOW_FIELD_IDX_GRE_KEYID */
158 ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_GRE, 12, ICE_FLOW_FLD_SZ_GRE_KEYID),
160 /* ICE_FLOW_FIELD_IDX_GTPC_TEID */
161 ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_GTPC_TEID, 12,
162 ICE_FLOW_FLD_SZ_GTP_TEID),
163 /* ICE_FLOW_FIELD_IDX_GTPU_IP_TEID */
164 ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_GTPU_IP, 12,
165 ICE_FLOW_FLD_SZ_GTP_TEID),
166 /* ICE_FLOW_FIELD_IDX_GTPU_EH_TEID */
167 ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_GTPU_EH, 12,
168 ICE_FLOW_FLD_SZ_GTP_TEID),
169 /* ICE_FLOW_FIELD_IDX_GTPU_EH_QFI */
170 ICE_FLOW_FLD_INFO_MSK(ICE_FLOW_SEG_HDR_GTPU_EH, 22,
171 ICE_FLOW_FLD_SZ_GTP_QFI, 0x3f00),
172 /* ICE_FLOW_FIELD_IDX_GTPU_UP_TEID */
173 ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_GTPU_UP, 12,
174 ICE_FLOW_FLD_SZ_GTP_TEID),
175 /* ICE_FLOW_FIELD_IDX_GTPU_DWN_TEID */
176 ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_GTPU_DWN, 12,
177 ICE_FLOW_FLD_SZ_GTP_TEID),
179 /* ICE_FLOW_FIELD_IDX_PPPOE_SESS_ID */
180 ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_PPPOE, 2,
181 ICE_FLOW_FLD_SZ_PPPOE_SESS_ID),
183 /* ICE_FLOW_FIELD_IDX_PFCP_SEID */
184 ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_PFCP_SESSION, 12,
185 ICE_FLOW_FLD_SZ_PFCP_SEID),
187 /* ICE_FLOW_FIELD_IDX_L2TPV3_SESS_ID */
188 ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_L2TPV3, 0,
189 ICE_FLOW_FLD_SZ_L2TPV3_SESS_ID),
191 /* ICE_FLOW_FIELD_IDX_ESP_SPI */
192 ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_ESP, 0,
193 ICE_FLOW_FLD_SZ_ESP_SPI),
195 /* ICE_FLOW_FIELD_IDX_AH_SPI */
196 ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_AH, 4,
197 ICE_FLOW_FLD_SZ_AH_SPI),
199 /* ICE_FLOW_FIELD_IDX_NAT_T_ESP_SPI */
200 ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_NAT_T_ESP, 8,
201 ICE_FLOW_FLD_SZ_NAT_T_ESP_SPI),
202 /* ICE_FLOW_FIELD_IDX_VXLAN_VNI */
203 ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_VXLAN, 12,
204 ICE_FLOW_FLD_SZ_VXLAN_VNI),
206 /* ICE_FLOW_FIELD_IDX_ECPRI_TP0_PC_ID */
207 ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_ECPRI_TP0, 4,
208 ICE_FLOW_FLD_SZ_ECPRI_TP0_PC_ID),
210 /* ICE_FLOW_FIELD_IDX_UDP_ECPRI_TP0_PC_ID */
211 ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_UDP_ECPRI_TP0, 12,
212 ICE_FLOW_FLD_SZ_ECPRI_TP0_PC_ID),
215 /* Bitmaps indicating relevant packet types for a particular protocol header
217 * Packet types for packets with an Outer/First/Single MAC header
219 static const u32 ice_ptypes_mac_ofos[] = {
220 0xFDC00846, 0xBFBF7F7E, 0xF70001DF, 0xFEFDFDFB,
221 0x0000077E, 0x000003FF, 0x00000000, 0x00000000,
222 0x00400000, 0x03FFF000, 0xFFFFFFE0, 0x00100707,
223 0xFFFFFF00, 0x0000003F, 0x00000000, 0x00000000,
224 0x00000000, 0x00000000, 0x00000000, 0x00000000,
225 0x00000000, 0x00000000, 0x00000000, 0x00000000,
226 0x00000000, 0x00000000, 0x00000000, 0x00000000,
227 0x00000000, 0x00000000, 0x00000000, 0x00000000,
230 /* Packet types for packets with an Innermost/Last MAC VLAN header */
231 static const u32 ice_ptypes_macvlan_il[] = {
232 0x00000000, 0xBC000000, 0x000001DF, 0xF0000000,
233 0x0000077E, 0x00000000, 0x00000000, 0x00000000,
234 0x00000000, 0x00000000, 0x00000000, 0x00000000,
235 0x00000000, 0x00000000, 0x00000000, 0x00000000,
236 0x00000000, 0x00000000, 0x00000000, 0x00000000,
237 0x00000000, 0x00000000, 0x00000000, 0x00000000,
238 0x00000000, 0x00000000, 0x00000000, 0x00000000,
239 0x00000000, 0x00000000, 0x00000000, 0x00000000,
242 /* Packet types for packets with an Outer/First/Single non-frag IPv4 header,
243 * does NOT include IPV4 other PTYPEs
245 static const u32 ice_ptypes_ipv4_ofos[] = {
246 0x1D800000, 0x24000800, 0x00000000, 0x00000000,
247 0x00000000, 0x00000155, 0x00000000, 0x00000000,
248 0x00000000, 0x000FC000, 0x000002A0, 0x00100000,
249 0x00001500, 0x00000000, 0x00000000, 0x00000000,
250 0x00000000, 0x00000000, 0x00000000, 0x00000000,
251 0x00000000, 0x00000000, 0x00000000, 0x00000000,
252 0x00000000, 0x00000000, 0x00000000, 0x00000000,
253 0x00000000, 0x00000000, 0x00000000, 0x00000000,
256 /* Packet types for packets with an Outer/First/Single non-frag IPv4 header,
257 * includes IPV4 other PTYPEs
259 static const u32 ice_ptypes_ipv4_ofos_all[] = {
260 0x1D800000, 0x24000800, 0x00000000, 0x00000000,
261 0x00000000, 0x00000155, 0x00000000, 0x00000000,
262 0x00000000, 0x000FC000, 0x83E0FAA0, 0x00000101,
263 0x03FFD500, 0x00000000, 0x00000000, 0x00000000,
264 0x00000000, 0x00000000, 0x00000000, 0x00000000,
265 0x00000000, 0x00000000, 0x00000000, 0x00000000,
266 0x00000000, 0x00000000, 0x00000000, 0x00000000,
267 0x00000000, 0x00000000, 0x00000000, 0x00000000,
270 /* Packet types for packets with an Innermost/Last IPv4 header */
271 static const u32 ice_ptypes_ipv4_il[] = {
272 0xE0000000, 0xB807700E, 0x80000003, 0xE01DC03B,
273 0x0000000E, 0x00000000, 0x00000000, 0x00000000,
274 0x00000000, 0x00000000, 0x001FF800, 0x00100000,
275 0xFC0FC000, 0x00000000, 0x00000000, 0x00000000,
276 0x00000000, 0x00000000, 0x00000000, 0x00000000,
277 0x00000000, 0x00000000, 0x00000000, 0x00000000,
278 0x00000000, 0x00000000, 0x00000000, 0x00000000,
279 0x00000000, 0x00000000, 0x00000000, 0x00000000,
282 /* Packet types for packets with an Outer/First/Single non-frag IPv6 header,
283 * does NOT include IVP6 other PTYPEs
285 static const u32 ice_ptypes_ipv6_ofos[] = {
286 0x00000000, 0x00000000, 0x76000000, 0x10002000,
287 0x00000000, 0x000002AA, 0x00000000, 0x00000000,
288 0x00000000, 0x03F00000, 0x00000540, 0x00000000,
289 0x00002A00, 0x00000000, 0x00000000, 0x00000000,
290 0x00000000, 0x00000000, 0x00000000, 0x00000000,
291 0x00000000, 0x00000000, 0x00000000, 0x00000000,
292 0x00000000, 0x00000000, 0x00000000, 0x00000000,
293 0x00000000, 0x00000000, 0x00000000, 0x00000000,
296 /* Packet types for packets with an Outer/First/Single non-frag IPv6 header,
297 * includes IPV6 other PTYPEs
299 static const u32 ice_ptypes_ipv6_ofos_all[] = {
300 0x00000000, 0x00000000, 0x76000000, 0x10002000,
301 0x00000000, 0x000002AA, 0x00000000, 0x00000000,
302 0x00000000, 0x03F00000, 0x7C1F0540, 0x00000206,
303 0xFC002A00, 0x0000003F, 0x00000000, 0x00000000,
304 0x00000000, 0x00000000, 0x00000000, 0x00000000,
305 0x00000000, 0x00000000, 0x00000000, 0x00000000,
306 0x00000000, 0x00000000, 0x00000000, 0x00000000,
307 0x00000000, 0x00000000, 0x00000000, 0x00000000,
310 /* Packet types for packets with an Innermost/Last IPv6 header */
311 static const u32 ice_ptypes_ipv6_il[] = {
312 0x00000000, 0x03B80770, 0x000001DC, 0x0EE00000,
313 0x00000770, 0x00000000, 0x00000000, 0x00000000,
314 0x00000000, 0x00000000, 0x7FE00000, 0x00000000,
315 0x03F00000, 0x0000003F, 0x00000000, 0x00000000,
316 0x00000000, 0x00000000, 0x00000000, 0x00000000,
317 0x00000000, 0x00000000, 0x00000000, 0x00000000,
318 0x00000000, 0x00000000, 0x00000000, 0x00000000,
319 0x00000000, 0x00000000, 0x00000000, 0x00000000,
322 /* Packet types for packets with an Outer/First/Single
323 * non-frag IPv4 header - no L4
325 static const u32 ice_ptypes_ipv4_ofos_no_l4[] = {
326 0x10800000, 0x04000800, 0x00000000, 0x00000000,
327 0x00000000, 0x00000000, 0x00000000, 0x00000000,
328 0x00000000, 0x000cc000, 0x000002A0, 0x00000000,
329 0x00001500, 0x00000000, 0x00000000, 0x00000000,
330 0x00000000, 0x00000000, 0x00000000, 0x00000000,
331 0x00000000, 0x00000000, 0x00000000, 0x00000000,
332 0x00000000, 0x00000000, 0x00000000, 0x00000000,
333 0x00000000, 0x00000000, 0x00000000, 0x00000000,
336 /* Packet types for packets with an Innermost/Last IPv4 header - no L4 */
337 static const u32 ice_ptypes_ipv4_il_no_l4[] = {
338 0x60000000, 0x18043008, 0x80000002, 0x6010c021,
339 0x00000008, 0x00000000, 0x00000000, 0x00000000,
340 0x00000000, 0x00000000, 0x00139800, 0x00000000,
341 0x8C08C000, 0x00000000, 0x00000000, 0x00000000,
342 0x00000000, 0x00000000, 0x00000000, 0x00000000,
343 0x00000000, 0x00000000, 0x00000000, 0x00000000,
344 0x00000000, 0x00000000, 0x00000000, 0x00000000,
345 0x00000000, 0x00000000, 0x00000000, 0x00000000,
348 /* Packet types for packets with an Outer/First/Single
349 * non-frag IPv6 header - no L4
351 static const u32 ice_ptypes_ipv6_ofos_no_l4[] = {
352 0x00000000, 0x00000000, 0x42000000, 0x10002000,
353 0x00000000, 0x00000000, 0x00000000, 0x00000000,
354 0x00000000, 0x02300000, 0x00000540, 0x00000000,
355 0x00002A00, 0x00000000, 0x00000000, 0x00000000,
356 0x00000000, 0x00000000, 0x00000000, 0x00000000,
357 0x00000000, 0x00000000, 0x00000000, 0x00000000,
358 0x00000000, 0x00000000, 0x00000000, 0x00000000,
359 0x00000000, 0x00000000, 0x00000000, 0x00000000,
362 /* Packet types for packets with an Innermost/Last IPv6 header - no L4 */
363 static const u32 ice_ptypes_ipv6_il_no_l4[] = {
364 0x00000000, 0x02180430, 0x0000010c, 0x086010c0,
365 0x00000430, 0x00000000, 0x00000000, 0x00000000,
366 0x00000000, 0x00000000, 0x4e600000, 0x00000000,
367 0x02300000, 0x00000023, 0x00000000, 0x00000000,
368 0x00000000, 0x00000000, 0x00000000, 0x00000000,
369 0x00000000, 0x00000000, 0x00000000, 0x00000000,
370 0x00000000, 0x00000000, 0x00000000, 0x00000000,
371 0x00000000, 0x00000000, 0x00000000, 0x00000000,
374 /* Packet types for packets with an Outermost/First ARP header */
375 static const u32 ice_ptypes_arp_of[] = {
376 0x00000800, 0x00000000, 0x00000000, 0x00000000,
377 0x00000000, 0x00000000, 0x00000000, 0x00000000,
378 0x00000000, 0x00000000, 0x00000000, 0x00000000,
379 0x00000000, 0x00000000, 0x00000000, 0x00000000,
380 0x00000000, 0x00000000, 0x00000000, 0x00000000,
381 0x00000000, 0x00000000, 0x00000000, 0x00000000,
382 0x00000000, 0x00000000, 0x00000000, 0x00000000,
383 0x00000000, 0x00000000, 0x00000000, 0x00000000,
386 /* UDP Packet types for non-tunneled packets or tunneled
387 * packets with inner UDP.
389 static const u32 ice_ptypes_udp_il[] = {
390 0x81000000, 0x20204040, 0x04000010, 0x80810102,
391 0x00000040, 0x00000000, 0x00000000, 0x00000000,
392 0x00000000, 0x00410000, 0x908427E0, 0x00100007,
393 0x10410000, 0x00000004, 0x00000000, 0x00000000,
394 0x00000000, 0x00000000, 0x00000000, 0x00000000,
395 0x00000000, 0x00000000, 0x00000000, 0x00000000,
396 0x00000000, 0x00000000, 0x00000000, 0x00000000,
397 0x00000000, 0x00000000, 0x00000000, 0x00000000,
400 /* Packet types for packets with an Innermost/Last TCP header */
401 static const u32 ice_ptypes_tcp_il[] = {
402 0x04000000, 0x80810102, 0x10000040, 0x02040408,
403 0x00000102, 0x00000000, 0x00000000, 0x00000000,
404 0x00000000, 0x00820000, 0x21084000, 0x00000000,
405 0x20820000, 0x00000008, 0x00000000, 0x00000000,
406 0x00000000, 0x00000000, 0x00000000, 0x00000000,
407 0x00000000, 0x00000000, 0x00000000, 0x00000000,
408 0x00000000, 0x00000000, 0x00000000, 0x00000000,
409 0x00000000, 0x00000000, 0x00000000, 0x00000000,
412 /* Packet types for packets with an Innermost/Last SCTP header */
413 static const u32 ice_ptypes_sctp_il[] = {
414 0x08000000, 0x01020204, 0x20000081, 0x04080810,
415 0x00000204, 0x00000000, 0x00000000, 0x00000000,
416 0x00000000, 0x01040000, 0x00000000, 0x00000000,
417 0x41040000, 0x00000010, 0x00000000, 0x00000000,
418 0x00000000, 0x00000000, 0x00000000, 0x00000000,
419 0x00000000, 0x00000000, 0x00000000, 0x00000000,
420 0x00000000, 0x00000000, 0x00000000, 0x00000000,
421 0x00000000, 0x00000000, 0x00000000, 0x00000000,
424 /* Packet types for packets with an Outermost/First ICMP header */
425 static const u32 ice_ptypes_icmp_of[] = {
426 0x10000000, 0x00000000, 0x00000000, 0x00000000,
427 0x00000000, 0x00000000, 0x00000000, 0x00000000,
428 0x00000000, 0x00000000, 0x00000000, 0x00000000,
429 0x00000000, 0x00000000, 0x00000000, 0x00000000,
430 0x00000000, 0x00000000, 0x00000000, 0x00000000,
431 0x00000000, 0x00000000, 0x00000000, 0x00000000,
432 0x00000000, 0x00000000, 0x00000000, 0x00000000,
433 0x00000000, 0x00000000, 0x00000000, 0x00000000,
436 /* Packet types for packets with an Innermost/Last ICMP header */
437 static const u32 ice_ptypes_icmp_il[] = {
438 0x00000000, 0x02040408, 0x40000102, 0x08101020,
439 0x00000408, 0x00000000, 0x00000000, 0x00000000,
440 0x00000000, 0x00000000, 0x42108000, 0x00000000,
441 0x82080000, 0x00000020, 0x00000000, 0x00000000,
442 0x00000000, 0x00000000, 0x00000000, 0x00000000,
443 0x00000000, 0x00000000, 0x00000000, 0x00000000,
444 0x00000000, 0x00000000, 0x00000000, 0x00000000,
445 0x00000000, 0x00000000, 0x00000000, 0x00000000,
448 /* Packet types for packets with an Outermost/First GRE header */
449 static const u32 ice_ptypes_gre_of[] = {
450 0x00000000, 0xBFBF7800, 0x000001DF, 0xFEFDE000,
451 0x0000017E, 0x00000000, 0x00000000, 0x00000000,
452 0x00000000, 0x00000000, 0x00000000, 0x00000000,
453 0x00000000, 0x00000000, 0x00000000, 0x00000000,
454 0x00000000, 0x00000000, 0x00000000, 0x00000000,
455 0x00000000, 0x00000000, 0x00000000, 0x00000000,
456 0x00000000, 0x00000000, 0x00000000, 0x00000000,
457 0x00000000, 0x00000000, 0x00000000, 0x00000000,
460 /* Packet types for packets with an Innermost/Last MAC header */
461 static const u32 ice_ptypes_mac_il[] = {
462 0x00000000, 0x20000000, 0x00000000, 0x00000000,
463 0x00000000, 0x00000000, 0x00000000, 0x00000000,
464 0x00000000, 0x00000000, 0x00000000, 0x00000000,
465 0x00000000, 0x00000000, 0x00000000, 0x00000000,
466 0x00000000, 0x00000000, 0x00000000, 0x00000000,
467 0x00000000, 0x00000000, 0x00000000, 0x00000000,
468 0x00000000, 0x00000000, 0x00000000, 0x00000000,
469 0x00000000, 0x00000000, 0x00000000, 0x00000000,
472 /* Packet types for GTPC */
473 static const u32 ice_ptypes_gtpc[] = {
474 0x00000000, 0x00000000, 0x00000000, 0x00000000,
475 0x00000000, 0x00000000, 0x00000000, 0x00000000,
476 0x00000000, 0x00000000, 0x000001E0, 0x00000000,
477 0x00000000, 0x00000000, 0x00000000, 0x00000000,
478 0x00000000, 0x00000000, 0x00000000, 0x00000000,
479 0x00000000, 0x00000000, 0x00000000, 0x00000000,
480 0x00000000, 0x00000000, 0x00000000, 0x00000000,
481 0x00000000, 0x00000000, 0x00000000, 0x00000000,
484 /* Packet types for VXLAN with VNI */
485 static const u32 ice_ptypes_vxlan_vni[] = {
486 0x00000000, 0xBFBFF800, 0x00EFDFDF, 0xFEFDE000,
487 0x03BF7F7E, 0x00000000, 0x00000000, 0x00000000,
488 0x00000000, 0x00000000, 0x00000000, 0x00000000,
489 0x00000000, 0x00000000, 0x00000000, 0x00000000,
490 0x00000000, 0x00000000, 0x00000000, 0x00000000,
491 0x00000000, 0x00000000, 0x00000000, 0x00000000,
492 0x00000000, 0x00000000, 0x00000000, 0x00000000,
493 0x00000000, 0x00000000, 0x00000000, 0x00000000,
496 /* Packet types for GTPC with TEID */
497 static const u32 ice_ptypes_gtpc_tid[] = {
498 0x00000000, 0x00000000, 0x00000000, 0x00000000,
499 0x00000000, 0x00000000, 0x00000000, 0x00000000,
500 0x00000000, 0x00000000, 0x00000060, 0x00000000,
501 0x00000000, 0x00000000, 0x00000000, 0x00000000,
502 0x00000000, 0x00000000, 0x00000000, 0x00000000,
503 0x00000000, 0x00000000, 0x00000000, 0x00000000,
504 0x00000000, 0x00000000, 0x00000000, 0x00000000,
505 0x00000000, 0x00000000, 0x00000000, 0x00000000,
508 /* Packet types for GTPU */
509 static const struct ice_ptype_attributes ice_attr_gtpu_session[] = {
510 { ICE_MAC_IPV4_GTPU_IPV4_FRAG, ICE_PTYPE_ATTR_GTP_SESSION },
511 { ICE_MAC_IPV4_GTPU_IPV4_PAY, ICE_PTYPE_ATTR_GTP_SESSION },
512 { ICE_MAC_IPV4_GTPU_IPV4_UDP_PAY, ICE_PTYPE_ATTR_GTP_SESSION },
513 { ICE_MAC_IPV4_GTPU_IPV4_TCP, ICE_PTYPE_ATTR_GTP_SESSION },
514 { ICE_MAC_IPV4_GTPU_IPV4_ICMP, ICE_PTYPE_ATTR_GTP_SESSION },
515 { ICE_MAC_IPV6_GTPU_IPV4_FRAG, ICE_PTYPE_ATTR_GTP_SESSION },
516 { ICE_MAC_IPV6_GTPU_IPV4_PAY, ICE_PTYPE_ATTR_GTP_SESSION },
517 { ICE_MAC_IPV6_GTPU_IPV4_UDP_PAY, ICE_PTYPE_ATTR_GTP_SESSION },
518 { ICE_MAC_IPV6_GTPU_IPV4_TCP, ICE_PTYPE_ATTR_GTP_SESSION },
519 { ICE_MAC_IPV6_GTPU_IPV4_ICMP, ICE_PTYPE_ATTR_GTP_SESSION },
520 { ICE_MAC_IPV4_GTPU_IPV6_FRAG, ICE_PTYPE_ATTR_GTP_SESSION },
521 { ICE_MAC_IPV4_GTPU_IPV6_PAY, ICE_PTYPE_ATTR_GTP_SESSION },
522 { ICE_MAC_IPV4_GTPU_IPV6_UDP_PAY, ICE_PTYPE_ATTR_GTP_SESSION },
523 { ICE_MAC_IPV4_GTPU_IPV6_TCP, ICE_PTYPE_ATTR_GTP_SESSION },
524 { ICE_MAC_IPV4_GTPU_IPV6_ICMPV6, ICE_PTYPE_ATTR_GTP_SESSION },
525 { ICE_MAC_IPV6_GTPU_IPV6_FRAG, ICE_PTYPE_ATTR_GTP_SESSION },
526 { ICE_MAC_IPV6_GTPU_IPV6_PAY, ICE_PTYPE_ATTR_GTP_SESSION },
527 { ICE_MAC_IPV6_GTPU_IPV6_UDP_PAY, ICE_PTYPE_ATTR_GTP_SESSION },
528 { ICE_MAC_IPV6_GTPU_IPV6_TCP, ICE_PTYPE_ATTR_GTP_SESSION },
529 { ICE_MAC_IPV6_GTPU_IPV6_ICMPV6, ICE_PTYPE_ATTR_GTP_SESSION },
532 static const struct ice_ptype_attributes ice_attr_gtpu_eh[] = {
533 { ICE_MAC_IPV4_GTPU_IPV4_FRAG, ICE_PTYPE_ATTR_GTP_PDU_EH },
534 { ICE_MAC_IPV4_GTPU_IPV4_PAY, ICE_PTYPE_ATTR_GTP_PDU_EH },
535 { ICE_MAC_IPV4_GTPU_IPV4_UDP_PAY, ICE_PTYPE_ATTR_GTP_PDU_EH },
536 { ICE_MAC_IPV4_GTPU_IPV4_TCP, ICE_PTYPE_ATTR_GTP_PDU_EH },
537 { ICE_MAC_IPV4_GTPU_IPV4_ICMP, ICE_PTYPE_ATTR_GTP_PDU_EH },
538 { ICE_MAC_IPV6_GTPU_IPV4_FRAG, ICE_PTYPE_ATTR_GTP_PDU_EH },
539 { ICE_MAC_IPV6_GTPU_IPV4_PAY, ICE_PTYPE_ATTR_GTP_PDU_EH },
540 { ICE_MAC_IPV6_GTPU_IPV4_UDP_PAY, ICE_PTYPE_ATTR_GTP_PDU_EH },
541 { ICE_MAC_IPV6_GTPU_IPV4_TCP, ICE_PTYPE_ATTR_GTP_PDU_EH },
542 { ICE_MAC_IPV6_GTPU_IPV4_ICMP, ICE_PTYPE_ATTR_GTP_PDU_EH },
543 { ICE_MAC_IPV4_GTPU_IPV6_FRAG, ICE_PTYPE_ATTR_GTP_PDU_EH },
544 { ICE_MAC_IPV4_GTPU_IPV6_PAY, ICE_PTYPE_ATTR_GTP_PDU_EH },
545 { ICE_MAC_IPV4_GTPU_IPV6_UDP_PAY, ICE_PTYPE_ATTR_GTP_PDU_EH },
546 { ICE_MAC_IPV4_GTPU_IPV6_TCP, ICE_PTYPE_ATTR_GTP_PDU_EH },
547 { ICE_MAC_IPV4_GTPU_IPV6_ICMPV6, ICE_PTYPE_ATTR_GTP_PDU_EH },
548 { ICE_MAC_IPV6_GTPU_IPV6_FRAG, ICE_PTYPE_ATTR_GTP_PDU_EH },
549 { ICE_MAC_IPV6_GTPU_IPV6_PAY, ICE_PTYPE_ATTR_GTP_PDU_EH },
550 { ICE_MAC_IPV6_GTPU_IPV6_UDP_PAY, ICE_PTYPE_ATTR_GTP_PDU_EH },
551 { ICE_MAC_IPV6_GTPU_IPV6_TCP, ICE_PTYPE_ATTR_GTP_PDU_EH },
552 { ICE_MAC_IPV6_GTPU_IPV6_ICMPV6, ICE_PTYPE_ATTR_GTP_PDU_EH },
555 static const struct ice_ptype_attributes ice_attr_gtpu_down[] = {
556 { ICE_MAC_IPV4_GTPU_IPV4_FRAG, ICE_PTYPE_ATTR_GTP_DOWNLINK },
557 { ICE_MAC_IPV4_GTPU_IPV4_PAY, ICE_PTYPE_ATTR_GTP_DOWNLINK },
558 { ICE_MAC_IPV4_GTPU_IPV4_UDP_PAY, ICE_PTYPE_ATTR_GTP_DOWNLINK },
559 { ICE_MAC_IPV4_GTPU_IPV4_TCP, ICE_PTYPE_ATTR_GTP_DOWNLINK },
560 { ICE_MAC_IPV4_GTPU_IPV4_ICMP, ICE_PTYPE_ATTR_GTP_DOWNLINK },
561 { ICE_MAC_IPV6_GTPU_IPV4_FRAG, ICE_PTYPE_ATTR_GTP_DOWNLINK },
562 { ICE_MAC_IPV6_GTPU_IPV4_PAY, ICE_PTYPE_ATTR_GTP_DOWNLINK },
563 { ICE_MAC_IPV6_GTPU_IPV4_UDP_PAY, ICE_PTYPE_ATTR_GTP_DOWNLINK },
564 { ICE_MAC_IPV6_GTPU_IPV4_TCP, ICE_PTYPE_ATTR_GTP_DOWNLINK },
565 { ICE_MAC_IPV6_GTPU_IPV4_ICMP, ICE_PTYPE_ATTR_GTP_DOWNLINK },
566 { ICE_MAC_IPV4_GTPU_IPV6_FRAG, ICE_PTYPE_ATTR_GTP_DOWNLINK },
567 { ICE_MAC_IPV4_GTPU_IPV6_PAY, ICE_PTYPE_ATTR_GTP_DOWNLINK },
568 { ICE_MAC_IPV4_GTPU_IPV6_UDP_PAY, ICE_PTYPE_ATTR_GTP_DOWNLINK },
569 { ICE_MAC_IPV4_GTPU_IPV6_TCP, ICE_PTYPE_ATTR_GTP_DOWNLINK },
570 { ICE_MAC_IPV4_GTPU_IPV6_ICMPV6, ICE_PTYPE_ATTR_GTP_DOWNLINK },
571 { ICE_MAC_IPV6_GTPU_IPV6_FRAG, ICE_PTYPE_ATTR_GTP_DOWNLINK },
572 { ICE_MAC_IPV6_GTPU_IPV6_PAY, ICE_PTYPE_ATTR_GTP_DOWNLINK },
573 { ICE_MAC_IPV6_GTPU_IPV6_UDP_PAY, ICE_PTYPE_ATTR_GTP_DOWNLINK },
574 { ICE_MAC_IPV6_GTPU_IPV6_TCP, ICE_PTYPE_ATTR_GTP_DOWNLINK },
575 { ICE_MAC_IPV6_GTPU_IPV6_ICMPV6, ICE_PTYPE_ATTR_GTP_DOWNLINK },
578 static const struct ice_ptype_attributes ice_attr_gtpu_up[] = {
579 { ICE_MAC_IPV4_GTPU_IPV4_FRAG, ICE_PTYPE_ATTR_GTP_UPLINK },
580 { ICE_MAC_IPV4_GTPU_IPV4_PAY, ICE_PTYPE_ATTR_GTP_UPLINK },
581 { ICE_MAC_IPV4_GTPU_IPV4_UDP_PAY, ICE_PTYPE_ATTR_GTP_UPLINK },
582 { ICE_MAC_IPV4_GTPU_IPV4_TCP, ICE_PTYPE_ATTR_GTP_UPLINK },
583 { ICE_MAC_IPV4_GTPU_IPV4_ICMP, ICE_PTYPE_ATTR_GTP_UPLINK },
584 { ICE_MAC_IPV6_GTPU_IPV4_FRAG, ICE_PTYPE_ATTR_GTP_UPLINK },
585 { ICE_MAC_IPV6_GTPU_IPV4_PAY, ICE_PTYPE_ATTR_GTP_UPLINK },
586 { ICE_MAC_IPV6_GTPU_IPV4_UDP_PAY, ICE_PTYPE_ATTR_GTP_UPLINK },
587 { ICE_MAC_IPV6_GTPU_IPV4_TCP, ICE_PTYPE_ATTR_GTP_UPLINK },
588 { ICE_MAC_IPV6_GTPU_IPV4_ICMP, ICE_PTYPE_ATTR_GTP_UPLINK },
589 { ICE_MAC_IPV4_GTPU_IPV6_FRAG, ICE_PTYPE_ATTR_GTP_UPLINK },
590 { ICE_MAC_IPV4_GTPU_IPV6_PAY, ICE_PTYPE_ATTR_GTP_UPLINK },
591 { ICE_MAC_IPV4_GTPU_IPV6_UDP_PAY, ICE_PTYPE_ATTR_GTP_UPLINK },
592 { ICE_MAC_IPV4_GTPU_IPV6_TCP, ICE_PTYPE_ATTR_GTP_UPLINK },
593 { ICE_MAC_IPV4_GTPU_IPV6_ICMPV6, ICE_PTYPE_ATTR_GTP_UPLINK },
594 { ICE_MAC_IPV6_GTPU_IPV6_FRAG, ICE_PTYPE_ATTR_GTP_UPLINK },
595 { ICE_MAC_IPV6_GTPU_IPV6_PAY, ICE_PTYPE_ATTR_GTP_UPLINK },
596 { ICE_MAC_IPV6_GTPU_IPV6_UDP_PAY, ICE_PTYPE_ATTR_GTP_UPLINK },
597 { ICE_MAC_IPV6_GTPU_IPV6_TCP, ICE_PTYPE_ATTR_GTP_UPLINK },
598 { ICE_MAC_IPV6_GTPU_IPV6_ICMPV6, ICE_PTYPE_ATTR_GTP_UPLINK },
601 static const u32 ice_ptypes_gtpu[] = {
602 0x00000000, 0x00000000, 0x00000000, 0x00000000,
603 0x00000000, 0x00000000, 0x00000000, 0x00000000,
604 0x00000000, 0x00000000, 0x7FFFFE00, 0x00000000,
605 0x00000000, 0x00000000, 0x00000000, 0x00000000,
606 0x00000000, 0x00000000, 0x00000000, 0x00000000,
607 0x00000000, 0x00000000, 0x00000000, 0x00000000,
608 0x00000000, 0x00000000, 0x00000000, 0x00000000,
609 0x00000000, 0x00000000, 0x00000000, 0x00000000,
612 /* Packet types for pppoe */
613 static const u32 ice_ptypes_pppoe[] = {
614 0x00000000, 0x00000000, 0x00000000, 0x00000000,
615 0x00000000, 0x00000000, 0x00000000, 0x00000000,
616 0x00000000, 0x03ffe000, 0x00000000, 0x00000000,
617 0x00000000, 0x00000000, 0x00000000, 0x00000000,
618 0x00000000, 0x00000000, 0x00000000, 0x00000000,
619 0x00000000, 0x00000000, 0x00000000, 0x00000000,
620 0x00000000, 0x00000000, 0x00000000, 0x00000000,
621 0x00000000, 0x00000000, 0x00000000, 0x00000000,
624 /* Packet types for packets with PFCP NODE header */
625 static const u32 ice_ptypes_pfcp_node[] = {
626 0x00000000, 0x00000000, 0x00000000, 0x00000000,
627 0x00000000, 0x00000000, 0x00000000, 0x00000000,
628 0x00000000, 0x00000000, 0x80000000, 0x00000002,
629 0x00000000, 0x00000000, 0x00000000, 0x00000000,
630 0x00000000, 0x00000000, 0x00000000, 0x00000000,
631 0x00000000, 0x00000000, 0x00000000, 0x00000000,
632 0x00000000, 0x00000000, 0x00000000, 0x00000000,
633 0x00000000, 0x00000000, 0x00000000, 0x00000000,
636 /* Packet types for packets with PFCP SESSION header */
637 static const u32 ice_ptypes_pfcp_session[] = {
638 0x00000000, 0x00000000, 0x00000000, 0x00000000,
639 0x00000000, 0x00000000, 0x00000000, 0x00000000,
640 0x00000000, 0x00000000, 0x00000000, 0x00000005,
641 0x00000000, 0x00000000, 0x00000000, 0x00000000,
642 0x00000000, 0x00000000, 0x00000000, 0x00000000,
643 0x00000000, 0x00000000, 0x00000000, 0x00000000,
644 0x00000000, 0x00000000, 0x00000000, 0x00000000,
645 0x00000000, 0x00000000, 0x00000000, 0x00000000,
648 /* Packet types for l2tpv3 */
649 static const u32 ice_ptypes_l2tpv3[] = {
650 0x00000000, 0x00000000, 0x00000000, 0x00000000,
651 0x00000000, 0x00000000, 0x00000000, 0x00000000,
652 0x00000000, 0x00000000, 0x00000000, 0x00000300,
653 0x00000000, 0x00000000, 0x00000000, 0x00000000,
654 0x00000000, 0x00000000, 0x00000000, 0x00000000,
655 0x00000000, 0x00000000, 0x00000000, 0x00000000,
656 0x00000000, 0x00000000, 0x00000000, 0x00000000,
657 0x00000000, 0x00000000, 0x00000000, 0x00000000,
660 /* Packet types for esp */
661 static const u32 ice_ptypes_esp[] = {
662 0x00000000, 0x00000000, 0x00000000, 0x00000000,
663 0x00000000, 0x00000003, 0x00000000, 0x00000000,
664 0x00000000, 0x00000000, 0x00000000, 0x00000000,
665 0x00000000, 0x00000000, 0x00000000, 0x00000000,
666 0x00000000, 0x00000000, 0x00000000, 0x00000000,
667 0x00000000, 0x00000000, 0x00000000, 0x00000000,
668 0x00000000, 0x00000000, 0x00000000, 0x00000000,
669 0x00000000, 0x00000000, 0x00000000, 0x00000000,
672 /* Packet types for ah */
673 static const u32 ice_ptypes_ah[] = {
674 0x00000000, 0x00000000, 0x00000000, 0x00000000,
675 0x00000000, 0x0000000C, 0x00000000, 0x00000000,
676 0x00000000, 0x00000000, 0x00000000, 0x00000000,
677 0x00000000, 0x00000000, 0x00000000, 0x00000000,
678 0x00000000, 0x00000000, 0x00000000, 0x00000000,
679 0x00000000, 0x00000000, 0x00000000, 0x00000000,
680 0x00000000, 0x00000000, 0x00000000, 0x00000000,
681 0x00000000, 0x00000000, 0x00000000, 0x00000000,
684 /* Packet types for packets with NAT_T ESP header */
685 static const u32 ice_ptypes_nat_t_esp[] = {
686 0x00000000, 0x00000000, 0x00000000, 0x00000000,
687 0x00000000, 0x00000030, 0x00000000, 0x00000000,
688 0x00000000, 0x00000000, 0x00000000, 0x00000000,
689 0x00000000, 0x00000000, 0x00000000, 0x00000000,
690 0x00000000, 0x00000000, 0x00000000, 0x00000000,
691 0x00000000, 0x00000000, 0x00000000, 0x00000000,
692 0x00000000, 0x00000000, 0x00000000, 0x00000000,
693 0x00000000, 0x00000000, 0x00000000, 0x00000000,
696 static const u32 ice_ptypes_mac_non_ip_ofos[] = {
697 0x00000846, 0x00000000, 0x00000000, 0x00000000,
698 0x00000000, 0x00000000, 0x00000000, 0x00000000,
699 0x00400000, 0x03FFF000, 0x00000000, 0x00000000,
700 0x00000000, 0x00000000, 0x00000000, 0x00000000,
701 0x00000000, 0x00000000, 0x00000000, 0x00000000,
702 0x00000000, 0x00000000, 0x00000000, 0x00000000,
703 0x00000000, 0x00000000, 0x00000000, 0x00000000,
704 0x00000000, 0x00000000, 0x00000000, 0x00000000,
707 static const u32 ice_ptypes_gtpu_no_ip[] = {
708 0x00000000, 0x00000000, 0x00000000, 0x00000000,
709 0x00000000, 0x00000000, 0x00000000, 0x00000000,
710 0x00000000, 0x00000000, 0x00000600, 0x00000000,
711 0x00000000, 0x00000000, 0x00000000, 0x00000000,
712 0x00000000, 0x00000000, 0x00000000, 0x00000000,
713 0x00000000, 0x00000000, 0x00000000, 0x00000000,
714 0x00000000, 0x00000000, 0x00000000, 0x00000000,
715 0x00000000, 0x00000000, 0x00000000, 0x00000000,
718 static const u32 ice_ptypes_ecpri_tp0[] = {
719 0x00000000, 0x00000000, 0x00000000, 0x00000000,
720 0x00000000, 0x00000000, 0x00000000, 0x00000000,
721 0x00000000, 0x00000000, 0x00000000, 0x00000400,
722 0x00000000, 0x00000000, 0x00000000, 0x00000000,
723 0x00000000, 0x00000000, 0x00000000, 0x00000000,
724 0x00000000, 0x00000000, 0x00000000, 0x00000000,
725 0x00000000, 0x00000000, 0x00000000, 0x00000000,
726 0x00000000, 0x00000000, 0x00000000, 0x00000000,
729 static const u32 ice_ptypes_udp_ecpri_tp0[] = {
730 0x00000000, 0x00000000, 0x00000000, 0x00000000,
731 0x00000000, 0x00000000, 0x00000000, 0x00000000,
732 0x00000000, 0x00000000, 0x00000000, 0x00100000,
733 0x00000000, 0x00000000, 0x00000000, 0x00000000,
734 0x00000000, 0x00000000, 0x00000000, 0x00000000,
735 0x00000000, 0x00000000, 0x00000000, 0x00000000,
736 0x00000000, 0x00000000, 0x00000000, 0x00000000,
737 0x00000000, 0x00000000, 0x00000000, 0x00000000,
740 static const u32 ice_ptypes_l2tpv2[] = {
741 0x00000000, 0x00000000, 0x00000000, 0x00000000,
742 0x00000000, 0x00000000, 0x00000000, 0x00000000,
743 0x00000000, 0x00000000, 0x00000000, 0x00000000,
744 0xFFFFFF00, 0x0000003F, 0x00000000, 0x00000000,
745 0x00000000, 0x00000000, 0x00000000, 0x00000000,
746 0x00000000, 0x00000000, 0x00000000, 0x00000000,
747 0x00000000, 0x00000000, 0x00000000, 0x00000000,
748 0x00000000, 0x00000000, 0x00000000, 0x00000000,
751 static const u32 ice_ptypes_ppp[] = {
752 0x00000000, 0x00000000, 0x00000000, 0x00000000,
753 0x00000000, 0x00000000, 0x00000000, 0x00000000,
754 0x00000000, 0x00000000, 0x00000000, 0x00000000,
755 0xFFFFF000, 0x0000003F, 0x00000000, 0x00000000,
756 0x00000000, 0x00000000, 0x00000000, 0x00000000,
757 0x00000000, 0x00000000, 0x00000000, 0x00000000,
758 0x00000000, 0x00000000, 0x00000000, 0x00000000,
759 0x00000000, 0x00000000, 0x00000000, 0x00000000,
762 static const u32 ice_ptypes_ipv4_frag[] = {
763 0x00400000, 0x00000000, 0x00000000, 0x00000000,
764 0x00000000, 0x00000000, 0x00000000, 0x00000000,
765 0x00000000, 0x00000000, 0x00000000, 0x00000000,
766 0x00000000, 0x00000000, 0x00000000, 0x00000000,
767 0x00000000, 0x00000000, 0x00000000, 0x00000000,
768 0x00000000, 0x00000000, 0x00000000, 0x00000000,
769 0x00000000, 0x00000000, 0x00000000, 0x00000000,
770 0x00000000, 0x00000000, 0x00000000, 0x00000000,
773 static const u32 ice_ptypes_ipv6_frag[] = {
774 0x00000000, 0x00000000, 0x01000000, 0x00000000,
775 0x00000000, 0x00000000, 0x00000000, 0x00000000,
776 0x00000000, 0x00000000, 0x00000000, 0x00000000,
777 0x00000000, 0x00000000, 0x00000000, 0x00000000,
778 0x00000000, 0x00000000, 0x00000000, 0x00000000,
779 0x00000000, 0x00000000, 0x00000000, 0x00000000,
780 0x00000000, 0x00000000, 0x00000000, 0x00000000,
781 0x00000000, 0x00000000, 0x00000000, 0x00000000,
784 /* Manage parameters and info. used during the creation of a flow profile */
785 struct ice_flow_prof_params {
787 u16 entry_length; /* # of bytes formatted entry will require */
789 struct ice_flow_prof *prof;
791 /* For ACL, the es[0] will have the data of ICE_RX_MDID_PKT_FLAGS_15_0
792 * This will give us the direction flags.
794 struct ice_fv_word es[ICE_MAX_FV_WORDS];
795 /* attributes can be used to add attributes to a particular PTYPE */
796 const struct ice_ptype_attributes *attr;
799 u16 mask[ICE_MAX_FV_WORDS];
800 ice_declare_bitmap(ptypes, ICE_FLOW_PTYPE_MAX);
803 #define ICE_FLOW_RSS_HDRS_INNER_MASK \
804 (ICE_FLOW_SEG_HDR_PPPOE | ICE_FLOW_SEG_HDR_GTPC | \
805 ICE_FLOW_SEG_HDR_GTPC_TEID | ICE_FLOW_SEG_HDR_GTPU | \
806 ICE_FLOW_SEG_HDR_PFCP_SESSION | ICE_FLOW_SEG_HDR_L2TPV3 | \
807 ICE_FLOW_SEG_HDR_ESP | ICE_FLOW_SEG_HDR_AH | \
808 ICE_FLOW_SEG_HDR_NAT_T_ESP | ICE_FLOW_SEG_HDR_GTPU_NON_IP | \
809 ICE_FLOW_SEG_HDR_ECPRI_TP0 | ICE_FLOW_SEG_HDR_UDP_ECPRI_TP0 | \
810 ICE_FLOW_SEG_HDR_L2TPV2 | ICE_FLOW_SEG_HDR_PPP)
812 #define ICE_FLOW_SEG_HDRS_L2_MASK \
813 (ICE_FLOW_SEG_HDR_ETH | ICE_FLOW_SEG_HDR_VLAN)
814 #define ICE_FLOW_SEG_HDRS_L3_MASK \
815 (ICE_FLOW_SEG_HDR_IPV4 | ICE_FLOW_SEG_HDR_IPV6 | \
816 ICE_FLOW_SEG_HDR_ARP)
817 #define ICE_FLOW_SEG_HDRS_L4_MASK \
818 (ICE_FLOW_SEG_HDR_ICMP | ICE_FLOW_SEG_HDR_TCP | ICE_FLOW_SEG_HDR_UDP | \
819 ICE_FLOW_SEG_HDR_SCTP)
820 /* mask for L4 protocols that are NOT part of IPV4/6 OTHER PTYPE groups */
821 #define ICE_FLOW_SEG_HDRS_L4_MASK_NO_OTHER \
822 (ICE_FLOW_SEG_HDR_TCP | ICE_FLOW_SEG_HDR_UDP | ICE_FLOW_SEG_HDR_SCTP)
825 * ice_flow_val_hdrs - validates packet segments for valid protocol headers
826 * @segs: array of one or more packet segments that describe the flow
827 * @segs_cnt: number of packet segments provided
829 static enum ice_status
830 ice_flow_val_hdrs(struct ice_flow_seg_info *segs, u8 segs_cnt)
834 for (i = 0; i < segs_cnt; i++) {
835 /* Multiple L3 headers */
836 if (segs[i].hdrs & ICE_FLOW_SEG_HDRS_L3_MASK &&
837 !ice_is_pow2(segs[i].hdrs & ICE_FLOW_SEG_HDRS_L3_MASK))
838 return ICE_ERR_PARAM;
840 /* Multiple L4 headers */
841 if (segs[i].hdrs & ICE_FLOW_SEG_HDRS_L4_MASK &&
842 !ice_is_pow2(segs[i].hdrs & ICE_FLOW_SEG_HDRS_L4_MASK))
843 return ICE_ERR_PARAM;
849 /* Sizes of fixed known protocol headers without header options */
850 #define ICE_FLOW_PROT_HDR_SZ_MAC 14
851 #define ICE_FLOW_PROT_HDR_SZ_MAC_VLAN (ICE_FLOW_PROT_HDR_SZ_MAC + 2)
852 #define ICE_FLOW_PROT_HDR_SZ_IPV4 20
853 #define ICE_FLOW_PROT_HDR_SZ_IPV6 40
854 #define ICE_FLOW_PROT_HDR_SZ_ARP 28
855 #define ICE_FLOW_PROT_HDR_SZ_ICMP 8
856 #define ICE_FLOW_PROT_HDR_SZ_TCP 20
857 #define ICE_FLOW_PROT_HDR_SZ_UDP 8
858 #define ICE_FLOW_PROT_HDR_SZ_SCTP 12
861 * ice_flow_calc_seg_sz - calculates size of a packet segment based on headers
862 * @params: information about the flow to be processed
863 * @seg: index of packet segment whose header size is to be determined
865 static u16 ice_flow_calc_seg_sz(struct ice_flow_prof_params *params, u8 seg)
870 sz = (params->prof->segs[seg].hdrs & ICE_FLOW_SEG_HDR_VLAN) ?
871 ICE_FLOW_PROT_HDR_SZ_MAC_VLAN : ICE_FLOW_PROT_HDR_SZ_MAC;
874 if (params->prof->segs[seg].hdrs & ICE_FLOW_SEG_HDR_IPV4)
875 sz += ICE_FLOW_PROT_HDR_SZ_IPV4;
876 else if (params->prof->segs[seg].hdrs & ICE_FLOW_SEG_HDR_IPV6)
877 sz += ICE_FLOW_PROT_HDR_SZ_IPV6;
878 else if (params->prof->segs[seg].hdrs & ICE_FLOW_SEG_HDR_ARP)
879 sz += ICE_FLOW_PROT_HDR_SZ_ARP;
880 else if (params->prof->segs[seg].hdrs & ICE_FLOW_SEG_HDRS_L4_MASK)
881 /* A L3 header is required if L4 is specified */
885 if (params->prof->segs[seg].hdrs & ICE_FLOW_SEG_HDR_ICMP)
886 sz += ICE_FLOW_PROT_HDR_SZ_ICMP;
887 else if (params->prof->segs[seg].hdrs & ICE_FLOW_SEG_HDR_TCP)
888 sz += ICE_FLOW_PROT_HDR_SZ_TCP;
889 else if (params->prof->segs[seg].hdrs & ICE_FLOW_SEG_HDR_UDP)
890 sz += ICE_FLOW_PROT_HDR_SZ_UDP;
891 else if (params->prof->segs[seg].hdrs & ICE_FLOW_SEG_HDR_SCTP)
892 sz += ICE_FLOW_PROT_HDR_SZ_SCTP;
898 * ice_flow_proc_seg_hdrs - process protocol headers present in pkt segments
899 * @params: information about the flow to be processed
901 * This function identifies the packet types associated with the protocol
902 * headers being present in packet segments of the specified flow profile.
904 static enum ice_status
905 ice_flow_proc_seg_hdrs(struct ice_flow_prof_params *params)
907 struct ice_flow_prof *prof;
910 ice_memset(params->ptypes, 0xff, sizeof(params->ptypes),
915 for (i = 0; i < params->prof->segs_cnt; i++) {
916 const ice_bitmap_t *src;
919 hdrs = prof->segs[i].hdrs;
921 if (hdrs & ICE_FLOW_SEG_HDR_ETH) {
922 src = !i ? (const ice_bitmap_t *)ice_ptypes_mac_ofos :
923 (const ice_bitmap_t *)ice_ptypes_mac_il;
924 ice_and_bitmap(params->ptypes, params->ptypes, src,
928 if (i && hdrs & ICE_FLOW_SEG_HDR_VLAN) {
929 src = (const ice_bitmap_t *)ice_ptypes_macvlan_il;
930 ice_and_bitmap(params->ptypes, params->ptypes, src,
934 if (!i && hdrs & ICE_FLOW_SEG_HDR_ARP) {
935 ice_and_bitmap(params->ptypes, params->ptypes,
936 (const ice_bitmap_t *)ice_ptypes_arp_of,
940 if (hdrs & ICE_FLOW_SEG_HDR_ECPRI_TP0) {
941 src = (const ice_bitmap_t *)ice_ptypes_ecpri_tp0;
942 ice_and_bitmap(params->ptypes, params->ptypes, src,
945 if ((hdrs & ICE_FLOW_SEG_HDR_IPV4) &&
946 (hdrs & ICE_FLOW_SEG_HDR_IPV_OTHER)) {
948 (const ice_bitmap_t *)ice_ptypes_ipv4_il :
949 (const ice_bitmap_t *)ice_ptypes_ipv4_ofos_all;
950 ice_and_bitmap(params->ptypes, params->ptypes, src,
952 } else if ((hdrs & ICE_FLOW_SEG_HDR_IPV6) &&
953 (hdrs & ICE_FLOW_SEG_HDR_IPV_OTHER)) {
955 (const ice_bitmap_t *)ice_ptypes_ipv6_il :
956 (const ice_bitmap_t *)ice_ptypes_ipv6_ofos_all;
957 ice_and_bitmap(params->ptypes, params->ptypes, src,
959 } else if ((hdrs & ICE_FLOW_SEG_HDR_IPV4) &&
960 (hdrs & ICE_FLOW_SEG_HDR_IPV_FRAG)) {
961 src = (const ice_bitmap_t *)ice_ptypes_ipv4_frag;
962 ice_and_bitmap(params->ptypes, params->ptypes, src,
964 } else if ((hdrs & ICE_FLOW_SEG_HDR_IPV6) &&
965 (hdrs & ICE_FLOW_SEG_HDR_IPV_FRAG)) {
966 src = (const ice_bitmap_t *)ice_ptypes_ipv6_frag;
967 ice_and_bitmap(params->ptypes, params->ptypes, src,
969 } else if ((hdrs & ICE_FLOW_SEG_HDR_IPV4) &&
970 !(hdrs & ICE_FLOW_SEG_HDRS_L4_MASK_NO_OTHER)) {
971 src = !i ? (const ice_bitmap_t *)ice_ptypes_ipv4_ofos_no_l4 :
972 (const ice_bitmap_t *)ice_ptypes_ipv4_il_no_l4;
973 ice_and_bitmap(params->ptypes, params->ptypes, src,
975 } else if (hdrs & ICE_FLOW_SEG_HDR_IPV4) {
976 src = !i ? (const ice_bitmap_t *)ice_ptypes_ipv4_ofos :
977 (const ice_bitmap_t *)ice_ptypes_ipv4_il;
978 ice_and_bitmap(params->ptypes, params->ptypes, src,
980 } else if ((hdrs & ICE_FLOW_SEG_HDR_IPV6) &&
981 !(hdrs & ICE_FLOW_SEG_HDRS_L4_MASK_NO_OTHER)) {
982 src = !i ? (const ice_bitmap_t *)ice_ptypes_ipv6_ofos_no_l4 :
983 (const ice_bitmap_t *)ice_ptypes_ipv6_il_no_l4;
984 ice_and_bitmap(params->ptypes, params->ptypes, src,
986 } else if (hdrs & ICE_FLOW_SEG_HDR_IPV6) {
987 src = !i ? (const ice_bitmap_t *)ice_ptypes_ipv6_ofos :
988 (const ice_bitmap_t *)ice_ptypes_ipv6_il;
989 ice_and_bitmap(params->ptypes, params->ptypes, src,
993 if (hdrs & ICE_FLOW_SEG_HDR_ETH_NON_IP) {
994 src = (const ice_bitmap_t *)ice_ptypes_mac_non_ip_ofos;
995 ice_and_bitmap(params->ptypes, params->ptypes,
996 src, ICE_FLOW_PTYPE_MAX);
997 } else if (hdrs & ICE_FLOW_SEG_HDR_PPPOE) {
998 src = (const ice_bitmap_t *)ice_ptypes_pppoe;
999 ice_and_bitmap(params->ptypes, params->ptypes, src,
1000 ICE_FLOW_PTYPE_MAX);
1002 src = (const ice_bitmap_t *)ice_ptypes_pppoe;
1003 ice_andnot_bitmap(params->ptypes, params->ptypes, src,
1004 ICE_FLOW_PTYPE_MAX);
1007 if (hdrs & ICE_FLOW_SEG_HDR_UDP) {
1008 src = (const ice_bitmap_t *)ice_ptypes_udp_il;
1009 ice_and_bitmap(params->ptypes, params->ptypes, src,
1010 ICE_FLOW_PTYPE_MAX);
1011 } else if (hdrs & ICE_FLOW_SEG_HDR_TCP) {
1012 ice_and_bitmap(params->ptypes, params->ptypes,
1013 (const ice_bitmap_t *)ice_ptypes_tcp_il,
1014 ICE_FLOW_PTYPE_MAX);
1015 } else if (hdrs & ICE_FLOW_SEG_HDR_SCTP) {
1016 src = (const ice_bitmap_t *)ice_ptypes_sctp_il;
1017 ice_and_bitmap(params->ptypes, params->ptypes, src,
1018 ICE_FLOW_PTYPE_MAX);
1021 if (hdrs & ICE_FLOW_SEG_HDR_ICMP) {
1022 src = !i ? (const ice_bitmap_t *)ice_ptypes_icmp_of :
1023 (const ice_bitmap_t *)ice_ptypes_icmp_il;
1024 ice_and_bitmap(params->ptypes, params->ptypes, src,
1025 ICE_FLOW_PTYPE_MAX);
1026 } else if (hdrs & ICE_FLOW_SEG_HDR_GRE) {
1028 src = (const ice_bitmap_t *)ice_ptypes_gre_of;
1029 ice_and_bitmap(params->ptypes, params->ptypes,
1030 src, ICE_FLOW_PTYPE_MAX);
1032 } else if (hdrs & ICE_FLOW_SEG_HDR_GTPC) {
1033 src = (const ice_bitmap_t *)ice_ptypes_gtpc;
1034 ice_and_bitmap(params->ptypes, params->ptypes,
1035 src, ICE_FLOW_PTYPE_MAX);
1036 } else if (hdrs & ICE_FLOW_SEG_HDR_GTPC_TEID) {
1037 src = (const ice_bitmap_t *)ice_ptypes_gtpc_tid;
1038 ice_and_bitmap(params->ptypes, params->ptypes,
1039 src, ICE_FLOW_PTYPE_MAX);
1040 } else if (hdrs & ICE_FLOW_SEG_HDR_GTPU_NON_IP) {
1041 src = (const ice_bitmap_t *)ice_ptypes_gtpu_no_ip;
1042 ice_and_bitmap(params->ptypes, params->ptypes,
1043 src, ICE_FLOW_PTYPE_MAX);
1044 } else if (hdrs & ICE_FLOW_SEG_HDR_GTPU_DWN) {
1045 src = (const ice_bitmap_t *)ice_ptypes_gtpu;
1046 ice_and_bitmap(params->ptypes, params->ptypes,
1047 src, ICE_FLOW_PTYPE_MAX);
1049 /* Attributes for GTP packet with downlink */
1050 params->attr = ice_attr_gtpu_down;
1051 params->attr_cnt = ARRAY_SIZE(ice_attr_gtpu_down);
1052 } else if (hdrs & ICE_FLOW_SEG_HDR_GTPU_UP) {
1053 src = (const ice_bitmap_t *)ice_ptypes_gtpu;
1054 ice_and_bitmap(params->ptypes, params->ptypes,
1055 src, ICE_FLOW_PTYPE_MAX);
1057 /* Attributes for GTP packet with uplink */
1058 params->attr = ice_attr_gtpu_up;
1059 params->attr_cnt = ARRAY_SIZE(ice_attr_gtpu_up);
1060 } else if (hdrs & ICE_FLOW_SEG_HDR_GTPU_EH) {
1061 src = (const ice_bitmap_t *)ice_ptypes_gtpu;
1062 ice_and_bitmap(params->ptypes, params->ptypes,
1063 src, ICE_FLOW_PTYPE_MAX);
1065 /* Attributes for GTP packet with Extension Header */
1066 params->attr = ice_attr_gtpu_eh;
1067 params->attr_cnt = ARRAY_SIZE(ice_attr_gtpu_eh);
1068 } else if (hdrs & ICE_FLOW_SEG_HDR_GTPU_IP) {
1069 src = (const ice_bitmap_t *)ice_ptypes_gtpu;
1070 ice_and_bitmap(params->ptypes, params->ptypes,
1071 src, ICE_FLOW_PTYPE_MAX);
1073 /* Attributes for GTP packet without Extension Header */
1074 params->attr = ice_attr_gtpu_session;
1075 params->attr_cnt = ARRAY_SIZE(ice_attr_gtpu_session);
1076 } else if (hdrs & ICE_FLOW_SEG_HDR_L2TPV2) {
1077 src = (const ice_bitmap_t *)ice_ptypes_l2tpv2;
1078 ice_and_bitmap(params->ptypes, params->ptypes,
1079 src, ICE_FLOW_PTYPE_MAX);
1080 } else if (hdrs & ICE_FLOW_SEG_HDR_L2TPV3) {
1081 src = (const ice_bitmap_t *)ice_ptypes_l2tpv3;
1082 ice_and_bitmap(params->ptypes, params->ptypes,
1083 src, ICE_FLOW_PTYPE_MAX);
1084 } else if (hdrs & ICE_FLOW_SEG_HDR_ESP) {
1085 src = (const ice_bitmap_t *)ice_ptypes_esp;
1086 ice_and_bitmap(params->ptypes, params->ptypes,
1087 src, ICE_FLOW_PTYPE_MAX);
1088 } else if (hdrs & ICE_FLOW_SEG_HDR_AH) {
1089 src = (const ice_bitmap_t *)ice_ptypes_ah;
1090 ice_and_bitmap(params->ptypes, params->ptypes,
1091 src, ICE_FLOW_PTYPE_MAX);
1092 } else if (hdrs & ICE_FLOW_SEG_HDR_NAT_T_ESP) {
1093 src = (const ice_bitmap_t *)ice_ptypes_nat_t_esp;
1094 ice_and_bitmap(params->ptypes, params->ptypes,
1095 src, ICE_FLOW_PTYPE_MAX);
1096 } else if (hdrs & ICE_FLOW_SEG_HDR_VXLAN) {
1097 src = (const ice_bitmap_t *)ice_ptypes_vxlan_vni;
1098 ice_and_bitmap(params->ptypes, params->ptypes,
1099 src, ICE_FLOW_PTYPE_MAX);
1100 } else if (hdrs & ICE_FLOW_SEG_HDR_UDP_ECPRI_TP0) {
1101 src = (const ice_bitmap_t *)ice_ptypes_udp_ecpri_tp0;
1102 ice_and_bitmap(params->ptypes, params->ptypes,
1103 src, ICE_FLOW_PTYPE_MAX);
1106 if (hdrs & ICE_FLOW_SEG_HDR_PPP) {
1107 src = (const ice_bitmap_t *)ice_ptypes_ppp;
1108 ice_and_bitmap(params->ptypes, params->ptypes,
1109 src, ICE_FLOW_PTYPE_MAX);
1112 if (hdrs & ICE_FLOW_SEG_HDR_PFCP) {
1113 if (hdrs & ICE_FLOW_SEG_HDR_PFCP_NODE)
1115 (const ice_bitmap_t *)ice_ptypes_pfcp_node;
1118 (const ice_bitmap_t *)ice_ptypes_pfcp_session;
1120 ice_and_bitmap(params->ptypes, params->ptypes,
1121 src, ICE_FLOW_PTYPE_MAX);
1123 src = (const ice_bitmap_t *)ice_ptypes_pfcp_node;
1124 ice_andnot_bitmap(params->ptypes, params->ptypes,
1125 src, ICE_FLOW_PTYPE_MAX);
1127 src = (const ice_bitmap_t *)ice_ptypes_pfcp_session;
1128 ice_andnot_bitmap(params->ptypes, params->ptypes,
1129 src, ICE_FLOW_PTYPE_MAX);
1137 * ice_flow_xtract_pkt_flags - Create an extr sequence entry for packet flags
1138 * @hw: pointer to the HW struct
1139 * @params: information about the flow to be processed
1140 * @flags: The value of pkt_flags[x:x] in Rx/Tx MDID metadata.
1142 * This function will allocate an extraction sequence entries for a DWORD size
1143 * chunk of the packet flags.
1145 static enum ice_status
1146 ice_flow_xtract_pkt_flags(struct ice_hw *hw,
1147 struct ice_flow_prof_params *params,
1148 enum ice_flex_mdid_pkt_flags flags)
1150 u8 fv_words = hw->blk[params->blk].es.fvw;
1153 /* Make sure the number of extraction sequence entries required does not
1154 * exceed the block's capacity.
1156 if (params->es_cnt >= fv_words)
1157 return ICE_ERR_MAX_LIMIT;
1159 /* some blocks require a reversed field vector layout */
1160 if (hw->blk[params->blk].es.reverse)
1161 idx = fv_words - params->es_cnt - 1;
1163 idx = params->es_cnt;
1165 params->es[idx].prot_id = ICE_PROT_META_ID;
1166 params->es[idx].off = flags;
1173 * ice_flow_xtract_fld - Create an extraction sequence entry for the given field
1174 * @hw: pointer to the HW struct
1175 * @params: information about the flow to be processed
1176 * @seg: packet segment index of the field to be extracted
1177 * @fld: ID of field to be extracted
1178 * @match: bitfield of all fields
1180 * This function determines the protocol ID, offset, and size of the given
1181 * field. It then allocates one or more extraction sequence entries for the
1182 * given field, and fill the entries with protocol ID and offset information.
1184 static enum ice_status
1185 ice_flow_xtract_fld(struct ice_hw *hw, struct ice_flow_prof_params *params,
1186 u8 seg, enum ice_flow_field fld, u64 match)
1188 enum ice_flow_field sib = ICE_FLOW_FIELD_IDX_MAX;
1189 enum ice_prot_id prot_id = ICE_PROT_ID_INVAL;
1190 u8 fv_words = hw->blk[params->blk].es.fvw;
1191 struct ice_flow_fld_info *flds;
1192 u16 cnt, ese_bits, i;
1197 flds = params->prof->segs[seg].fields;
1200 case ICE_FLOW_FIELD_IDX_ETH_DA:
1201 case ICE_FLOW_FIELD_IDX_ETH_SA:
1202 case ICE_FLOW_FIELD_IDX_S_VLAN:
1203 case ICE_FLOW_FIELD_IDX_C_VLAN:
1204 prot_id = seg == 0 ? ICE_PROT_MAC_OF_OR_S : ICE_PROT_MAC_IL;
1206 case ICE_FLOW_FIELD_IDX_ETH_TYPE:
1207 prot_id = seg == 0 ? ICE_PROT_ETYPE_OL : ICE_PROT_ETYPE_IL;
1209 case ICE_FLOW_FIELD_IDX_IPV4_DSCP:
1210 prot_id = seg == 0 ? ICE_PROT_IPV4_OF_OR_S : ICE_PROT_IPV4_IL;
1212 case ICE_FLOW_FIELD_IDX_IPV6_DSCP:
1213 prot_id = seg == 0 ? ICE_PROT_IPV6_OF_OR_S : ICE_PROT_IPV6_IL;
1215 case ICE_FLOW_FIELD_IDX_IPV4_TTL:
1216 case ICE_FLOW_FIELD_IDX_IPV4_PROT:
1217 prot_id = seg == 0 ? ICE_PROT_IPV4_OF_OR_S : ICE_PROT_IPV4_IL;
1219 /* TTL and PROT share the same extraction seq. entry.
1220 * Each is considered a sibling to the other in terms of sharing
1221 * the same extraction sequence entry.
1223 if (fld == ICE_FLOW_FIELD_IDX_IPV4_TTL)
1224 sib = ICE_FLOW_FIELD_IDX_IPV4_PROT;
1226 sib = ICE_FLOW_FIELD_IDX_IPV4_TTL;
1228 /* If the sibling field is also included, that field's
1229 * mask needs to be included.
1231 if (match & BIT(sib))
1232 sib_mask = ice_flds_info[sib].mask;
1234 case ICE_FLOW_FIELD_IDX_IPV6_TTL:
1235 case ICE_FLOW_FIELD_IDX_IPV6_PROT:
1236 prot_id = seg == 0 ? ICE_PROT_IPV6_OF_OR_S : ICE_PROT_IPV6_IL;
1238 /* TTL and PROT share the same extraction seq. entry.
1239 * Each is considered a sibling to the other in terms of sharing
1240 * the same extraction sequence entry.
1242 if (fld == ICE_FLOW_FIELD_IDX_IPV6_TTL)
1243 sib = ICE_FLOW_FIELD_IDX_IPV6_PROT;
1245 sib = ICE_FLOW_FIELD_IDX_IPV6_TTL;
1247 /* If the sibling field is also included, that field's
1248 * mask needs to be included.
1250 if (match & BIT(sib))
1251 sib_mask = ice_flds_info[sib].mask;
1253 case ICE_FLOW_FIELD_IDX_IPV4_SA:
1254 case ICE_FLOW_FIELD_IDX_IPV4_DA:
1255 prot_id = seg == 0 ? ICE_PROT_IPV4_OF_OR_S : ICE_PROT_IPV4_IL;
1257 case ICE_FLOW_FIELD_IDX_IPV4_ID:
1258 prot_id = ICE_PROT_IPV4_OF_OR_S;
1260 case ICE_FLOW_FIELD_IDX_IPV6_SA:
1261 case ICE_FLOW_FIELD_IDX_IPV6_DA:
1262 case ICE_FLOW_FIELD_IDX_IPV6_PRE32_SA:
1263 case ICE_FLOW_FIELD_IDX_IPV6_PRE32_DA:
1264 case ICE_FLOW_FIELD_IDX_IPV6_PRE48_SA:
1265 case ICE_FLOW_FIELD_IDX_IPV6_PRE48_DA:
1266 case ICE_FLOW_FIELD_IDX_IPV6_PRE64_SA:
1267 case ICE_FLOW_FIELD_IDX_IPV6_PRE64_DA:
1268 prot_id = seg == 0 ? ICE_PROT_IPV6_OF_OR_S : ICE_PROT_IPV6_IL;
1270 case ICE_FLOW_FIELD_IDX_IPV6_ID:
1271 prot_id = ICE_PROT_IPV6_FRAG;
1273 case ICE_FLOW_FIELD_IDX_TCP_SRC_PORT:
1274 case ICE_FLOW_FIELD_IDX_TCP_DST_PORT:
1275 case ICE_FLOW_FIELD_IDX_TCP_FLAGS:
1276 prot_id = ICE_PROT_TCP_IL;
1278 case ICE_FLOW_FIELD_IDX_UDP_SRC_PORT:
1279 case ICE_FLOW_FIELD_IDX_UDP_DST_PORT:
1280 prot_id = ICE_PROT_UDP_IL_OR_S;
1282 case ICE_FLOW_FIELD_IDX_SCTP_SRC_PORT:
1283 case ICE_FLOW_FIELD_IDX_SCTP_DST_PORT:
1284 prot_id = ICE_PROT_SCTP_IL;
1286 case ICE_FLOW_FIELD_IDX_VXLAN_VNI:
1287 case ICE_FLOW_FIELD_IDX_GTPC_TEID:
1288 case ICE_FLOW_FIELD_IDX_GTPU_IP_TEID:
1289 case ICE_FLOW_FIELD_IDX_GTPU_UP_TEID:
1290 case ICE_FLOW_FIELD_IDX_GTPU_DWN_TEID:
1291 case ICE_FLOW_FIELD_IDX_GTPU_EH_TEID:
1292 case ICE_FLOW_FIELD_IDX_GTPU_EH_QFI:
1293 /* GTP is accessed through UDP OF protocol */
1294 prot_id = ICE_PROT_UDP_OF;
1296 case ICE_FLOW_FIELD_IDX_PPPOE_SESS_ID:
1297 prot_id = ICE_PROT_PPPOE;
1299 case ICE_FLOW_FIELD_IDX_PFCP_SEID:
1300 prot_id = ICE_PROT_UDP_IL_OR_S;
1302 case ICE_FLOW_FIELD_IDX_L2TPV3_SESS_ID:
1303 prot_id = ICE_PROT_L2TPV3;
1305 case ICE_FLOW_FIELD_IDX_ESP_SPI:
1306 prot_id = ICE_PROT_ESP_F;
1308 case ICE_FLOW_FIELD_IDX_AH_SPI:
1309 prot_id = ICE_PROT_ESP_2;
1311 case ICE_FLOW_FIELD_IDX_NAT_T_ESP_SPI:
1312 prot_id = ICE_PROT_UDP_IL_OR_S;
1314 case ICE_FLOW_FIELD_IDX_ECPRI_TP0_PC_ID:
1315 prot_id = ICE_PROT_ECPRI;
1317 case ICE_FLOW_FIELD_IDX_UDP_ECPRI_TP0_PC_ID:
1318 prot_id = ICE_PROT_UDP_IL_OR_S;
1320 case ICE_FLOW_FIELD_IDX_ARP_SIP:
1321 case ICE_FLOW_FIELD_IDX_ARP_DIP:
1322 case ICE_FLOW_FIELD_IDX_ARP_SHA:
1323 case ICE_FLOW_FIELD_IDX_ARP_DHA:
1324 case ICE_FLOW_FIELD_IDX_ARP_OP:
1325 prot_id = ICE_PROT_ARP_OF;
1327 case ICE_FLOW_FIELD_IDX_ICMP_TYPE:
1328 case ICE_FLOW_FIELD_IDX_ICMP_CODE:
1329 /* ICMP type and code share the same extraction seq. entry */
1330 prot_id = (params->prof->segs[seg].hdrs &
1331 ICE_FLOW_SEG_HDR_IPV4) ?
1332 ICE_PROT_ICMP_IL : ICE_PROT_ICMPV6_IL;
1333 sib = fld == ICE_FLOW_FIELD_IDX_ICMP_TYPE ?
1334 ICE_FLOW_FIELD_IDX_ICMP_CODE :
1335 ICE_FLOW_FIELD_IDX_ICMP_TYPE;
1337 case ICE_FLOW_FIELD_IDX_GRE_KEYID:
1338 prot_id = ICE_PROT_GRE_OF;
1341 return ICE_ERR_NOT_IMPL;
1344 /* Each extraction sequence entry is a word in size, and extracts a
1345 * word-aligned offset from a protocol header.
1347 ese_bits = ICE_FLOW_FV_EXTRACT_SZ * BITS_PER_BYTE;
1349 flds[fld].xtrct.prot_id = prot_id;
1350 flds[fld].xtrct.off = (ice_flds_info[fld].off / ese_bits) *
1351 ICE_FLOW_FV_EXTRACT_SZ;
1352 flds[fld].xtrct.disp = (u8)(ice_flds_info[fld].off % ese_bits);
1353 flds[fld].xtrct.idx = params->es_cnt;
1354 flds[fld].xtrct.mask = ice_flds_info[fld].mask;
1356 /* Adjust the next field-entry index after accommodating the number of
1357 * entries this field consumes
1359 cnt = DIVIDE_AND_ROUND_UP(flds[fld].xtrct.disp +
1360 ice_flds_info[fld].size, ese_bits);
1362 /* Fill in the extraction sequence entries needed for this field */
1363 off = flds[fld].xtrct.off;
1364 mask = flds[fld].xtrct.mask;
1365 for (i = 0; i < cnt; i++) {
1366 /* Only consume an extraction sequence entry if there is no
1367 * sibling field associated with this field or the sibling entry
1368 * already extracts the word shared with this field.
1370 if (sib == ICE_FLOW_FIELD_IDX_MAX ||
1371 flds[sib].xtrct.prot_id == ICE_PROT_ID_INVAL ||
1372 flds[sib].xtrct.off != off) {
1375 /* Make sure the number of extraction sequence required
1376 * does not exceed the block's capability
1378 if (params->es_cnt >= fv_words)
1379 return ICE_ERR_MAX_LIMIT;
1381 /* some blocks require a reversed field vector layout */
1382 if (hw->blk[params->blk].es.reverse)
1383 idx = fv_words - params->es_cnt - 1;
1385 idx = params->es_cnt;
1387 params->es[idx].prot_id = prot_id;
1388 params->es[idx].off = off;
1389 params->mask[idx] = mask | sib_mask;
1393 off += ICE_FLOW_FV_EXTRACT_SZ;
1400 * ice_flow_xtract_raws - Create extract sequence entries for raw bytes
1401 * @hw: pointer to the HW struct
1402 * @params: information about the flow to be processed
1403 * @seg: index of packet segment whose raw fields are to be extracted
1405 static enum ice_status
1406 ice_flow_xtract_raws(struct ice_hw *hw, struct ice_flow_prof_params *params,
1413 if (!params->prof->segs[seg].raws_cnt)
1416 if (params->prof->segs[seg].raws_cnt >
1417 ARRAY_SIZE(params->prof->segs[seg].raws))
1418 return ICE_ERR_MAX_LIMIT;
1420 /* Offsets within the segment headers are not supported */
1421 hdrs_sz = ice_flow_calc_seg_sz(params, seg);
1423 return ICE_ERR_PARAM;
1425 fv_words = hw->blk[params->blk].es.fvw;
1427 for (i = 0; i < params->prof->segs[seg].raws_cnt; i++) {
1428 struct ice_flow_seg_fld_raw *raw;
1431 raw = ¶ms->prof->segs[seg].raws[i];
1433 /* Storing extraction information */
1434 raw->info.xtrct.prot_id = ICE_PROT_MAC_OF_OR_S;
1435 raw->info.xtrct.off = (raw->off / ICE_FLOW_FV_EXTRACT_SZ) *
1436 ICE_FLOW_FV_EXTRACT_SZ;
1437 raw->info.xtrct.disp = (raw->off % ICE_FLOW_FV_EXTRACT_SZ) *
1439 raw->info.xtrct.idx = params->es_cnt;
1441 /* Determine the number of field vector entries this raw field
1444 cnt = DIVIDE_AND_ROUND_UP(raw->info.xtrct.disp +
1445 (raw->info.src.last * BITS_PER_BYTE),
1446 (ICE_FLOW_FV_EXTRACT_SZ *
1448 off = raw->info.xtrct.off;
1449 for (j = 0; j < cnt; j++) {
1452 /* Make sure the number of extraction sequence required
1453 * does not exceed the block's capability
1455 if (params->es_cnt >= hw->blk[params->blk].es.count ||
1456 params->es_cnt >= ICE_MAX_FV_WORDS)
1457 return ICE_ERR_MAX_LIMIT;
1459 /* some blocks require a reversed field vector layout */
1460 if (hw->blk[params->blk].es.reverse)
1461 idx = fv_words - params->es_cnt - 1;
1463 idx = params->es_cnt;
1465 params->es[idx].prot_id = raw->info.xtrct.prot_id;
1466 params->es[idx].off = off;
1468 off += ICE_FLOW_FV_EXTRACT_SZ;
1476 * ice_flow_create_xtrct_seq - Create an extraction sequence for given segments
1477 * @hw: pointer to the HW struct
1478 * @params: information about the flow to be processed
1480 * This function iterates through all matched fields in the given segments, and
1481 * creates an extraction sequence for the fields.
1483 static enum ice_status
1484 ice_flow_create_xtrct_seq(struct ice_hw *hw,
1485 struct ice_flow_prof_params *params)
1487 enum ice_status status = ICE_SUCCESS;
1490 /* For ACL, we also need to extract the direction bit (Rx,Tx) data from
1493 if (params->blk == ICE_BLK_ACL) {
1494 status = ice_flow_xtract_pkt_flags(hw, params,
1495 ICE_RX_MDID_PKT_FLAGS_15_0);
1500 for (i = 0; i < params->prof->segs_cnt; i++) {
1501 u64 match = params->prof->segs[i].match;
1502 enum ice_flow_field j;
1504 ice_for_each_set_bit(j, (ice_bitmap_t *)&match,
1505 ICE_FLOW_FIELD_IDX_MAX) {
1506 status = ice_flow_xtract_fld(hw, params, i, j, match);
1509 ice_clear_bit(j, (ice_bitmap_t *)&match);
1512 /* Process raw matching bytes */
1513 status = ice_flow_xtract_raws(hw, params, i);
1522 * ice_flow_sel_acl_scen - returns the specific scenario
1523 * @hw: pointer to the hardware structure
1524 * @params: information about the flow to be processed
1526 * This function will return the specific scenario based on the
1527 * params passed to it
1529 static enum ice_status
1530 ice_flow_sel_acl_scen(struct ice_hw *hw, struct ice_flow_prof_params *params)
1532 /* Find the best-fit scenario for the provided match width */
1533 struct ice_acl_scen *cand_scen = NULL, *scen;
1536 return ICE_ERR_DOES_NOT_EXIST;
1538 /* Loop through each scenario and match against the scenario width
1539 * to select the specific scenario
1541 LIST_FOR_EACH_ENTRY(scen, &hw->acl_tbl->scens, ice_acl_scen, list_entry)
1542 if (scen->eff_width >= params->entry_length &&
1543 (!cand_scen || cand_scen->eff_width > scen->eff_width))
1546 return ICE_ERR_DOES_NOT_EXIST;
1548 params->prof->cfg.scen = cand_scen;
1554 * ice_flow_acl_def_entry_frmt - Determine the layout of flow entries
1555 * @params: information about the flow to be processed
1557 static enum ice_status
1558 ice_flow_acl_def_entry_frmt(struct ice_flow_prof_params *params)
1560 u16 index, i, range_idx = 0;
1562 index = ICE_AQC_ACL_PROF_BYTE_SEL_START_IDX;
1564 for (i = 0; i < params->prof->segs_cnt; i++) {
1565 struct ice_flow_seg_info *seg = ¶ms->prof->segs[i];
1568 ice_for_each_set_bit(j, (ice_bitmap_t *)&seg->match,
1569 ICE_FLOW_FIELD_IDX_MAX) {
1570 struct ice_flow_fld_info *fld = &seg->fields[j];
1572 fld->entry.mask = ICE_FLOW_FLD_OFF_INVAL;
1574 if (fld->type == ICE_FLOW_FLD_TYPE_RANGE) {
1575 fld->entry.last = ICE_FLOW_FLD_OFF_INVAL;
1577 /* Range checking only supported for single
1580 if (DIVIDE_AND_ROUND_UP(ice_flds_info[j].size +
1582 BITS_PER_BYTE * 2) > 1)
1583 return ICE_ERR_PARAM;
1585 /* Ranges must define low and high values */
1586 if (fld->src.val == ICE_FLOW_FLD_OFF_INVAL ||
1587 fld->src.last == ICE_FLOW_FLD_OFF_INVAL)
1588 return ICE_ERR_PARAM;
1590 fld->entry.val = range_idx++;
1592 /* Store adjusted byte-length of field for later
1593 * use, taking into account potential
1594 * non-byte-aligned displacement
1596 fld->entry.last = DIVIDE_AND_ROUND_UP
1597 (ice_flds_info[j].size +
1598 (fld->xtrct.disp % BITS_PER_BYTE),
1600 fld->entry.val = index;
1601 index += fld->entry.last;
1605 for (j = 0; j < seg->raws_cnt; j++) {
1606 struct ice_flow_seg_fld_raw *raw = &seg->raws[j];
1608 raw->info.entry.mask = ICE_FLOW_FLD_OFF_INVAL;
1609 raw->info.entry.val = index;
1610 raw->info.entry.last = raw->info.src.last;
1611 index += raw->info.entry.last;
1615 /* Currently only support using the byte selection base, which only
1616 * allows for an effective entry size of 30 bytes. Reject anything
1619 if (index > ICE_AQC_ACL_PROF_BYTE_SEL_ELEMS)
1620 return ICE_ERR_PARAM;
1622 /* Only 8 range checkers per profile, reject anything trying to use
1625 if (range_idx > ICE_AQC_ACL_PROF_RANGES_NUM_CFG)
1626 return ICE_ERR_PARAM;
1628 /* Store # bytes required for entry for later use */
1629 params->entry_length = index - ICE_AQC_ACL_PROF_BYTE_SEL_START_IDX;
1635 * ice_flow_proc_segs - process all packet segments associated with a profile
1636 * @hw: pointer to the HW struct
1637 * @params: information about the flow to be processed
1639 static enum ice_status
1640 ice_flow_proc_segs(struct ice_hw *hw, struct ice_flow_prof_params *params)
1642 enum ice_status status;
1644 status = ice_flow_proc_seg_hdrs(params);
1648 status = ice_flow_create_xtrct_seq(hw, params);
1652 switch (params->blk) {
1655 status = ICE_SUCCESS;
1658 status = ice_flow_acl_def_entry_frmt(params);
1661 status = ice_flow_sel_acl_scen(hw, params);
1666 return ICE_ERR_NOT_IMPL;
1672 #define ICE_FLOW_FIND_PROF_CHK_FLDS 0x00000001
1673 #define ICE_FLOW_FIND_PROF_CHK_VSI 0x00000002
1674 #define ICE_FLOW_FIND_PROF_NOT_CHK_DIR 0x00000004
1677 * ice_flow_find_prof_conds - Find a profile matching headers and conditions
1678 * @hw: pointer to the HW struct
1679 * @blk: classification stage
1680 * @dir: flow direction
1681 * @segs: array of one or more packet segments that describe the flow
1682 * @segs_cnt: number of packet segments provided
1683 * @vsi_handle: software VSI handle to check VSI (ICE_FLOW_FIND_PROF_CHK_VSI)
1684 * @conds: additional conditions to be checked (ICE_FLOW_FIND_PROF_CHK_*)
1686 static struct ice_flow_prof *
1687 ice_flow_find_prof_conds(struct ice_hw *hw, enum ice_block blk,
1688 enum ice_flow_dir dir, struct ice_flow_seg_info *segs,
1689 u8 segs_cnt, u16 vsi_handle, u32 conds)
1691 struct ice_flow_prof *p, *prof = NULL;
1693 ice_acquire_lock(&hw->fl_profs_locks[blk]);
1694 LIST_FOR_EACH_ENTRY(p, &hw->fl_profs[blk], ice_flow_prof, l_entry)
1695 if ((p->dir == dir || conds & ICE_FLOW_FIND_PROF_NOT_CHK_DIR) &&
1696 segs_cnt && segs_cnt == p->segs_cnt) {
1699 /* Check for profile-VSI association if specified */
1700 if ((conds & ICE_FLOW_FIND_PROF_CHK_VSI) &&
1701 ice_is_vsi_valid(hw, vsi_handle) &&
1702 !ice_is_bit_set(p->vsis, vsi_handle))
1705 /* Protocol headers must be checked. Matched fields are
1706 * checked if specified.
1708 for (i = 0; i < segs_cnt; i++)
1709 if (segs[i].hdrs != p->segs[i].hdrs ||
1710 ((conds & ICE_FLOW_FIND_PROF_CHK_FLDS) &&
1711 segs[i].match != p->segs[i].match))
1714 /* A match is found if all segments are matched */
1715 if (i == segs_cnt) {
1720 ice_release_lock(&hw->fl_profs_locks[blk]);
1726 * ice_flow_find_prof - Look up a profile matching headers and matched fields
1727 * @hw: pointer to the HW struct
1728 * @blk: classification stage
1729 * @dir: flow direction
1730 * @segs: array of one or more packet segments that describe the flow
1731 * @segs_cnt: number of packet segments provided
1734 ice_flow_find_prof(struct ice_hw *hw, enum ice_block blk, enum ice_flow_dir dir,
1735 struct ice_flow_seg_info *segs, u8 segs_cnt)
1737 struct ice_flow_prof *p;
1739 p = ice_flow_find_prof_conds(hw, blk, dir, segs, segs_cnt,
1740 ICE_MAX_VSI, ICE_FLOW_FIND_PROF_CHK_FLDS);
1742 return p ? p->id : ICE_FLOW_PROF_ID_INVAL;
1746 * ice_flow_find_prof_id - Look up a profile with given profile ID
1747 * @hw: pointer to the HW struct
1748 * @blk: classification stage
1749 * @prof_id: unique ID to identify this flow profile
1751 static struct ice_flow_prof *
1752 ice_flow_find_prof_id(struct ice_hw *hw, enum ice_block blk, u64 prof_id)
1754 struct ice_flow_prof *p;
1756 LIST_FOR_EACH_ENTRY(p, &hw->fl_profs[blk], ice_flow_prof, l_entry)
1757 if (p->id == prof_id)
1764 * ice_dealloc_flow_entry - Deallocate flow entry memory
1765 * @hw: pointer to the HW struct
1766 * @entry: flow entry to be removed
1769 ice_dealloc_flow_entry(struct ice_hw *hw, struct ice_flow_entry *entry)
1775 ice_free(hw, entry->entry);
1777 if (entry->range_buf) {
1778 ice_free(hw, entry->range_buf);
1779 entry->range_buf = NULL;
1783 ice_free(hw, entry->acts);
1785 entry->acts_cnt = 0;
1788 ice_free(hw, entry);
1792 * ice_flow_get_hw_prof - return the HW profile for a specific profile ID handle
1793 * @hw: pointer to the HW struct
1794 * @blk: classification stage
1795 * @prof_id: the profile ID handle
1796 * @hw_prof_id: pointer to variable to receive the HW profile ID
1799 ice_flow_get_hw_prof(struct ice_hw *hw, enum ice_block blk, u64 prof_id,
1802 enum ice_status status = ICE_ERR_DOES_NOT_EXIST;
1803 struct ice_prof_map *map;
1805 ice_acquire_lock(&hw->blk[blk].es.prof_map_lock);
1806 map = ice_search_prof_id(hw, blk, prof_id);
1808 *hw_prof_id = map->prof_id;
1809 status = ICE_SUCCESS;
1811 ice_release_lock(&hw->blk[blk].es.prof_map_lock);
1815 #define ICE_ACL_INVALID_SCEN 0x3f
1818 * ice_flow_acl_is_prof_in_use - Verify if the profile is associated to any PF
1819 * @hw: pointer to the hardware structure
1820 * @prof: pointer to flow profile
1821 * @buf: destination buffer function writes partial extraction sequence to
1823 * returns ICE_SUCCESS if no PF is associated to the given profile
1824 * returns ICE_ERR_IN_USE if at least one PF is associated to the given profile
1825 * returns other error code for real error
1827 static enum ice_status
1828 ice_flow_acl_is_prof_in_use(struct ice_hw *hw, struct ice_flow_prof *prof,
1829 struct ice_aqc_acl_prof_generic_frmt *buf)
1831 enum ice_status status;
1834 status = ice_flow_get_hw_prof(hw, ICE_BLK_ACL, prof->id, &prof_id);
1838 status = ice_query_acl_prof(hw, prof_id, buf, NULL);
1842 /* If all PF's associated scenarios are all 0 or all
1843 * ICE_ACL_INVALID_SCEN (63) for the given profile then the latter has
1844 * not been configured yet.
1846 if (buf->pf_scenario_num[0] == 0 && buf->pf_scenario_num[1] == 0 &&
1847 buf->pf_scenario_num[2] == 0 && buf->pf_scenario_num[3] == 0 &&
1848 buf->pf_scenario_num[4] == 0 && buf->pf_scenario_num[5] == 0 &&
1849 buf->pf_scenario_num[6] == 0 && buf->pf_scenario_num[7] == 0)
1852 if (buf->pf_scenario_num[0] == ICE_ACL_INVALID_SCEN &&
1853 buf->pf_scenario_num[1] == ICE_ACL_INVALID_SCEN &&
1854 buf->pf_scenario_num[2] == ICE_ACL_INVALID_SCEN &&
1855 buf->pf_scenario_num[3] == ICE_ACL_INVALID_SCEN &&
1856 buf->pf_scenario_num[4] == ICE_ACL_INVALID_SCEN &&
1857 buf->pf_scenario_num[5] == ICE_ACL_INVALID_SCEN &&
1858 buf->pf_scenario_num[6] == ICE_ACL_INVALID_SCEN &&
1859 buf->pf_scenario_num[7] == ICE_ACL_INVALID_SCEN)
1862 return ICE_ERR_IN_USE;
1866 * ice_flow_acl_free_act_cntr - Free the ACL rule's actions
1867 * @hw: pointer to the hardware structure
1868 * @acts: array of actions to be performed on a match
1869 * @acts_cnt: number of actions
1871 static enum ice_status
1872 ice_flow_acl_free_act_cntr(struct ice_hw *hw, struct ice_flow_action *acts,
1877 for (i = 0; i < acts_cnt; i++) {
1878 if (acts[i].type == ICE_FLOW_ACT_CNTR_PKT ||
1879 acts[i].type == ICE_FLOW_ACT_CNTR_BYTES ||
1880 acts[i].type == ICE_FLOW_ACT_CNTR_PKT_BYTES) {
1881 struct ice_acl_cntrs cntrs = { 0 };
1882 enum ice_status status;
1884 /* amount is unused in the dealloc path but the common
1885 * parameter check routine wants a value set, as zero
1886 * is invalid for the check. Just set it.
1889 cntrs.bank = 0; /* Only bank0 for the moment */
1891 LE16_TO_CPU(acts[i].data.acl_act.value);
1893 LE16_TO_CPU(acts[i].data.acl_act.value);
1895 if (acts[i].type == ICE_FLOW_ACT_CNTR_PKT_BYTES)
1896 cntrs.type = ICE_AQC_ACL_CNT_TYPE_DUAL;
1898 cntrs.type = ICE_AQC_ACL_CNT_TYPE_SINGLE;
1900 status = ice_aq_dealloc_acl_cntrs(hw, &cntrs, NULL);
1909 * ice_flow_acl_disassoc_scen - Disassociate the scenario from the profile
1910 * @hw: pointer to the hardware structure
1911 * @prof: pointer to flow profile
1913 * Disassociate the scenario from the profile for the PF of the VSI.
1915 static enum ice_status
1916 ice_flow_acl_disassoc_scen(struct ice_hw *hw, struct ice_flow_prof *prof)
1918 struct ice_aqc_acl_prof_generic_frmt buf;
1919 enum ice_status status = ICE_SUCCESS;
1922 ice_memset(&buf, 0, sizeof(buf), ICE_NONDMA_MEM);
1924 status = ice_flow_get_hw_prof(hw, ICE_BLK_ACL, prof->id, &prof_id);
1928 status = ice_query_acl_prof(hw, prof_id, &buf, NULL);
1932 /* Clear scenario for this PF */
1933 buf.pf_scenario_num[hw->pf_id] = ICE_ACL_INVALID_SCEN;
1934 status = ice_prgm_acl_prof_xtrct(hw, prof_id, &buf, NULL);
1940 * ice_flow_rem_entry_sync - Remove a flow entry
1941 * @hw: pointer to the HW struct
1942 * @blk: classification stage
1943 * @entry: flow entry to be removed
1945 static enum ice_status
1946 ice_flow_rem_entry_sync(struct ice_hw *hw, enum ice_block blk,
1947 struct ice_flow_entry *entry)
1950 return ICE_ERR_BAD_PTR;
1952 if (blk == ICE_BLK_ACL) {
1953 enum ice_status status;
1956 return ICE_ERR_BAD_PTR;
1958 status = ice_acl_rem_entry(hw, entry->prof->cfg.scen,
1959 entry->scen_entry_idx);
1963 /* Checks if we need to release an ACL counter. */
1964 if (entry->acts_cnt && entry->acts)
1965 ice_flow_acl_free_act_cntr(hw, entry->acts,
1969 LIST_DEL(&entry->l_entry);
1971 ice_dealloc_flow_entry(hw, entry);
1977 * ice_flow_add_prof_sync - Add a flow profile for packet segments and fields
1978 * @hw: pointer to the HW struct
1979 * @blk: classification stage
1980 * @dir: flow direction
1981 * @prof_id: unique ID to identify this flow profile
1982 * @segs: array of one or more packet segments that describe the flow
1983 * @segs_cnt: number of packet segments provided
1984 * @acts: array of default actions
1985 * @acts_cnt: number of default actions
1986 * @prof: stores the returned flow profile added
1988 * Assumption: the caller has acquired the lock to the profile list
1990 static enum ice_status
1991 ice_flow_add_prof_sync(struct ice_hw *hw, enum ice_block blk,
1992 enum ice_flow_dir dir, u64 prof_id,
1993 struct ice_flow_seg_info *segs, u8 segs_cnt,
1994 struct ice_flow_action *acts, u8 acts_cnt,
1995 struct ice_flow_prof **prof)
1997 struct ice_flow_prof_params *params;
1998 enum ice_status status;
2001 if (!prof || (acts_cnt && !acts))
2002 return ICE_ERR_BAD_PTR;
2004 params = (struct ice_flow_prof_params *)ice_malloc(hw, sizeof(*params));
2006 return ICE_ERR_NO_MEMORY;
2008 params->prof = (struct ice_flow_prof *)
2009 ice_malloc(hw, sizeof(*params->prof));
2010 if (!params->prof) {
2011 status = ICE_ERR_NO_MEMORY;
2015 /* initialize extraction sequence to all invalid (0xff) */
2016 for (i = 0; i < ICE_MAX_FV_WORDS; i++) {
2017 params->es[i].prot_id = ICE_PROT_INVALID;
2018 params->es[i].off = ICE_FV_OFFSET_INVAL;
2022 params->prof->id = prof_id;
2023 params->prof->dir = dir;
2024 params->prof->segs_cnt = segs_cnt;
2026 /* Make a copy of the segments that need to be persistent in the flow
2029 for (i = 0; i < segs_cnt; i++)
2030 ice_memcpy(¶ms->prof->segs[i], &segs[i], sizeof(*segs),
2031 ICE_NONDMA_TO_NONDMA);
2033 /* Make a copy of the actions that need to be persistent in the flow
2037 params->prof->acts = (struct ice_flow_action *)
2038 ice_memdup(hw, acts, acts_cnt * sizeof(*acts),
2039 ICE_NONDMA_TO_NONDMA);
2041 if (!params->prof->acts) {
2042 status = ICE_ERR_NO_MEMORY;
2047 status = ice_flow_proc_segs(hw, params);
2049 ice_debug(hw, ICE_DBG_FLOW, "Error processing a flow's packet segments\n");
2053 /* Add a HW profile for this flow profile */
2054 status = ice_add_prof(hw, blk, prof_id, (u8 *)params->ptypes,
2055 params->attr, params->attr_cnt, params->es,
2058 ice_debug(hw, ICE_DBG_FLOW, "Error adding a HW flow profile\n");
2062 INIT_LIST_HEAD(¶ms->prof->entries);
2063 ice_init_lock(¶ms->prof->entries_lock);
2064 *prof = params->prof;
2068 if (params->prof->acts)
2069 ice_free(hw, params->prof->acts);
2070 ice_free(hw, params->prof);
2073 ice_free(hw, params);
2079 * ice_flow_rem_prof_sync - remove a flow profile
2080 * @hw: pointer to the hardware structure
2081 * @blk: classification stage
2082 * @prof: pointer to flow profile to remove
2084 * Assumption: the caller has acquired the lock to the profile list
2086 static enum ice_status
2087 ice_flow_rem_prof_sync(struct ice_hw *hw, enum ice_block blk,
2088 struct ice_flow_prof *prof)
2090 enum ice_status status;
2092 /* Remove all remaining flow entries before removing the flow profile */
2093 if (!LIST_EMPTY(&prof->entries)) {
2094 struct ice_flow_entry *e, *t;
2096 ice_acquire_lock(&prof->entries_lock);
2098 LIST_FOR_EACH_ENTRY_SAFE(e, t, &prof->entries, ice_flow_entry,
2100 status = ice_flow_rem_entry_sync(hw, blk, e);
2105 ice_release_lock(&prof->entries_lock);
2108 if (blk == ICE_BLK_ACL) {
2109 struct ice_aqc_acl_profile_ranges query_rng_buf;
2110 struct ice_aqc_acl_prof_generic_frmt buf;
2113 /* Disassociate the scenario from the profile for the PF */
2114 status = ice_flow_acl_disassoc_scen(hw, prof);
2118 /* Clear the range-checker if the profile ID is no longer
2121 status = ice_flow_acl_is_prof_in_use(hw, prof, &buf);
2122 if (status && status != ICE_ERR_IN_USE) {
2124 } else if (!status) {
2125 /* Clear the range-checker value for profile ID */
2126 ice_memset(&query_rng_buf, 0,
2127 sizeof(struct ice_aqc_acl_profile_ranges),
2130 status = ice_flow_get_hw_prof(hw, blk, prof->id,
2135 status = ice_prog_acl_prof_ranges(hw, prof_id,
2136 &query_rng_buf, NULL);
2142 /* Remove all hardware profiles associated with this flow profile */
2143 status = ice_rem_prof(hw, blk, prof->id);
2145 LIST_DEL(&prof->l_entry);
2146 ice_destroy_lock(&prof->entries_lock);
2148 ice_free(hw, prof->acts);
2156 * ice_flow_acl_set_xtrct_seq_fld - Populate xtrct seq for single field
2157 * @buf: Destination buffer function writes partial xtrct sequence to
2158 * @info: Info about field
2161 ice_flow_acl_set_xtrct_seq_fld(struct ice_aqc_acl_prof_generic_frmt *buf,
2162 struct ice_flow_fld_info *info)
2167 src = info->xtrct.idx * ICE_FLOW_FV_EXTRACT_SZ +
2168 info->xtrct.disp / BITS_PER_BYTE;
2169 dst = info->entry.val;
2170 for (i = 0; i < info->entry.last; i++)
2171 /* HW stores field vector words in LE, convert words back to BE
2172 * so constructed entries will end up in network order
2174 buf->byte_selection[dst++] = src++ ^ 1;
2178 * ice_flow_acl_set_xtrct_seq - Program ACL extraction sequence
2179 * @hw: pointer to the hardware structure
2180 * @prof: pointer to flow profile
2182 static enum ice_status
2183 ice_flow_acl_set_xtrct_seq(struct ice_hw *hw, struct ice_flow_prof *prof)
2185 struct ice_aqc_acl_prof_generic_frmt buf;
2186 struct ice_flow_fld_info *info;
2187 enum ice_status status;
2191 ice_memset(&buf, 0, sizeof(buf), ICE_NONDMA_MEM);
2193 status = ice_flow_get_hw_prof(hw, ICE_BLK_ACL, prof->id, &prof_id);
2197 status = ice_flow_acl_is_prof_in_use(hw, prof, &buf);
2198 if (status && status != ICE_ERR_IN_USE)
2202 /* Program the profile dependent configuration. This is done
2203 * only once regardless of the number of PFs using that profile
2205 ice_memset(&buf, 0, sizeof(buf), ICE_NONDMA_MEM);
2207 for (i = 0; i < prof->segs_cnt; i++) {
2208 struct ice_flow_seg_info *seg = &prof->segs[i];
2211 ice_for_each_set_bit(j, (ice_bitmap_t *)&seg->match,
2212 ICE_FLOW_FIELD_IDX_MAX) {
2213 info = &seg->fields[j];
2215 if (info->type == ICE_FLOW_FLD_TYPE_RANGE)
2216 buf.word_selection[info->entry.val] =
2219 ice_flow_acl_set_xtrct_seq_fld(&buf,
2223 for (j = 0; j < seg->raws_cnt; j++) {
2224 info = &seg->raws[j].info;
2225 ice_flow_acl_set_xtrct_seq_fld(&buf, info);
2229 ice_memset(&buf.pf_scenario_num[0], ICE_ACL_INVALID_SCEN,
2230 ICE_AQC_ACL_PROF_PF_SCEN_NUM_ELEMS,
2234 /* Update the current PF */
2235 buf.pf_scenario_num[hw->pf_id] = (u8)prof->cfg.scen->id;
2236 status = ice_prgm_acl_prof_xtrct(hw, prof_id, &buf, NULL);
2242 * ice_flow_assoc_vsig_vsi - associate a VSI with VSIG
2243 * @hw: pointer to the hardware structure
2244 * @blk: classification stage
2245 * @vsi_handle: software VSI handle
2246 * @vsig: target VSI group
2248 * Assumption: the caller has already verified that the VSI to
2249 * be added has the same characteristics as the VSIG and will
2250 * thereby have access to all resources added to that VSIG.
2253 ice_flow_assoc_vsig_vsi(struct ice_hw *hw, enum ice_block blk, u16 vsi_handle,
2256 enum ice_status status;
2258 if (!ice_is_vsi_valid(hw, vsi_handle) || blk >= ICE_BLK_COUNT)
2259 return ICE_ERR_PARAM;
2261 ice_acquire_lock(&hw->fl_profs_locks[blk]);
2262 status = ice_add_vsi_flow(hw, blk, ice_get_hw_vsi_num(hw, vsi_handle),
2264 ice_release_lock(&hw->fl_profs_locks[blk]);
2270 * ice_flow_assoc_prof - associate a VSI with a flow profile
2271 * @hw: pointer to the hardware structure
2272 * @blk: classification stage
2273 * @prof: pointer to flow profile
2274 * @vsi_handle: software VSI handle
2276 * Assumption: the caller has acquired the lock to the profile list
2277 * and the software VSI handle has been validated
2280 ice_flow_assoc_prof(struct ice_hw *hw, enum ice_block blk,
2281 struct ice_flow_prof *prof, u16 vsi_handle)
2283 enum ice_status status = ICE_SUCCESS;
2285 if (!ice_is_bit_set(prof->vsis, vsi_handle)) {
2286 if (blk == ICE_BLK_ACL) {
2287 status = ice_flow_acl_set_xtrct_seq(hw, prof);
2291 status = ice_add_prof_id_flow(hw, blk,
2292 ice_get_hw_vsi_num(hw,
2296 ice_set_bit(vsi_handle, prof->vsis);
2298 ice_debug(hw, ICE_DBG_FLOW, "HW profile add failed, %d\n",
2306 * ice_flow_disassoc_prof - disassociate a VSI from a flow profile
2307 * @hw: pointer to the hardware structure
2308 * @blk: classification stage
2309 * @prof: pointer to flow profile
2310 * @vsi_handle: software VSI handle
2312 * Assumption: the caller has acquired the lock to the profile list
2313 * and the software VSI handle has been validated
2315 static enum ice_status
2316 ice_flow_disassoc_prof(struct ice_hw *hw, enum ice_block blk,
2317 struct ice_flow_prof *prof, u16 vsi_handle)
2319 enum ice_status status = ICE_SUCCESS;
2321 if (ice_is_bit_set(prof->vsis, vsi_handle)) {
2322 status = ice_rem_prof_id_flow(hw, blk,
2323 ice_get_hw_vsi_num(hw,
2327 ice_clear_bit(vsi_handle, prof->vsis);
2329 ice_debug(hw, ICE_DBG_FLOW, "HW profile remove failed, %d\n",
2337 * ice_flow_add_prof - Add a flow profile for packet segments and matched fields
2338 * @hw: pointer to the HW struct
2339 * @blk: classification stage
2340 * @dir: flow direction
2341 * @prof_id: unique ID to identify this flow profile
2342 * @segs: array of one or more packet segments that describe the flow
2343 * @segs_cnt: number of packet segments provided
2344 * @acts: array of default actions
2345 * @acts_cnt: number of default actions
2346 * @prof: stores the returned flow profile added
2349 ice_flow_add_prof(struct ice_hw *hw, enum ice_block blk, enum ice_flow_dir dir,
2350 u64 prof_id, struct ice_flow_seg_info *segs, u8 segs_cnt,
2351 struct ice_flow_action *acts, u8 acts_cnt,
2352 struct ice_flow_prof **prof)
2354 enum ice_status status;
2356 if (segs_cnt > ICE_FLOW_SEG_MAX)
2357 return ICE_ERR_MAX_LIMIT;
2360 return ICE_ERR_PARAM;
2363 return ICE_ERR_BAD_PTR;
2365 status = ice_flow_val_hdrs(segs, segs_cnt);
2369 ice_acquire_lock(&hw->fl_profs_locks[blk]);
2371 status = ice_flow_add_prof_sync(hw, blk, dir, prof_id, segs, segs_cnt,
2372 acts, acts_cnt, prof);
2374 LIST_ADD(&(*prof)->l_entry, &hw->fl_profs[blk]);
2376 ice_release_lock(&hw->fl_profs_locks[blk]);
2382 * ice_flow_rem_prof - Remove a flow profile and all entries associated with it
2383 * @hw: pointer to the HW struct
2384 * @blk: the block for which the flow profile is to be removed
2385 * @prof_id: unique ID of the flow profile to be removed
2388 ice_flow_rem_prof(struct ice_hw *hw, enum ice_block blk, u64 prof_id)
2390 struct ice_flow_prof *prof;
2391 enum ice_status status;
2393 ice_acquire_lock(&hw->fl_profs_locks[blk]);
2395 prof = ice_flow_find_prof_id(hw, blk, prof_id);
2397 status = ICE_ERR_DOES_NOT_EXIST;
2401 /* prof becomes invalid after the call */
2402 status = ice_flow_rem_prof_sync(hw, blk, prof);
2405 ice_release_lock(&hw->fl_profs_locks[blk]);
2411 * ice_flow_find_entry - look for a flow entry using its unique ID
2412 * @hw: pointer to the HW struct
2413 * @blk: classification stage
2414 * @entry_id: unique ID to identify this flow entry
2416 * This function looks for the flow entry with the specified unique ID in all
2417 * flow profiles of the specified classification stage. If the entry is found,
2418 * and it returns the handle to the flow entry. Otherwise, it returns
2419 * ICE_FLOW_ENTRY_ID_INVAL.
2421 u64 ice_flow_find_entry(struct ice_hw *hw, enum ice_block blk, u64 entry_id)
2423 struct ice_flow_entry *found = NULL;
2424 struct ice_flow_prof *p;
2426 ice_acquire_lock(&hw->fl_profs_locks[blk]);
2428 LIST_FOR_EACH_ENTRY(p, &hw->fl_profs[blk], ice_flow_prof, l_entry) {
2429 struct ice_flow_entry *e;
2431 ice_acquire_lock(&p->entries_lock);
2432 LIST_FOR_EACH_ENTRY(e, &p->entries, ice_flow_entry, l_entry)
2433 if (e->id == entry_id) {
2437 ice_release_lock(&p->entries_lock);
2443 ice_release_lock(&hw->fl_profs_locks[blk]);
2445 return found ? ICE_FLOW_ENTRY_HNDL(found) : ICE_FLOW_ENTRY_HANDLE_INVAL;
2449 * ice_flow_acl_check_actions - Checks the ACL rule's actions
2450 * @hw: pointer to the hardware structure
2451 * @acts: array of actions to be performed on a match
2452 * @acts_cnt: number of actions
2453 * @cnt_alloc: indicates if an ACL counter has been allocated.
2455 static enum ice_status
2456 ice_flow_acl_check_actions(struct ice_hw *hw, struct ice_flow_action *acts,
2457 u8 acts_cnt, bool *cnt_alloc)
2459 ice_declare_bitmap(dup_check, ICE_AQC_TBL_MAX_ACTION_PAIRS * 2);
2462 ice_zero_bitmap(dup_check, ICE_AQC_TBL_MAX_ACTION_PAIRS * 2);
2465 if (acts_cnt > ICE_FLOW_ACL_MAX_NUM_ACT)
2466 return ICE_ERR_OUT_OF_RANGE;
2468 for (i = 0; i < acts_cnt; i++) {
2469 if (acts[i].type != ICE_FLOW_ACT_NOP &&
2470 acts[i].type != ICE_FLOW_ACT_DROP &&
2471 acts[i].type != ICE_FLOW_ACT_CNTR_PKT &&
2472 acts[i].type != ICE_FLOW_ACT_FWD_QUEUE)
2475 /* If the caller want to add two actions of the same type, then
2476 * it is considered invalid configuration.
2478 if (ice_test_and_set_bit(acts[i].type, dup_check))
2479 return ICE_ERR_PARAM;
2482 /* Checks if ACL counters are needed. */
2483 for (i = 0; i < acts_cnt; i++) {
2484 if (acts[i].type == ICE_FLOW_ACT_CNTR_PKT ||
2485 acts[i].type == ICE_FLOW_ACT_CNTR_BYTES ||
2486 acts[i].type == ICE_FLOW_ACT_CNTR_PKT_BYTES) {
2487 struct ice_acl_cntrs cntrs = { 0 };
2488 enum ice_status status;
2491 cntrs.bank = 0; /* Only bank0 for the moment */
2493 if (acts[i].type == ICE_FLOW_ACT_CNTR_PKT_BYTES)
2494 cntrs.type = ICE_AQC_ACL_CNT_TYPE_DUAL;
2496 cntrs.type = ICE_AQC_ACL_CNT_TYPE_SINGLE;
2498 status = ice_aq_alloc_acl_cntrs(hw, &cntrs, NULL);
2501 /* Counter index within the bank */
2502 acts[i].data.acl_act.value =
2503 CPU_TO_LE16(cntrs.first_cntr);
2512 * ice_flow_acl_frmt_entry_range - Format an ACL range checker for a given field
2513 * @fld: number of the given field
2514 * @info: info about field
2515 * @range_buf: range checker configuration buffer
2516 * @data: pointer to a data buffer containing flow entry's match values/masks
2517 * @range: Input/output param indicating which range checkers are being used
2520 ice_flow_acl_frmt_entry_range(u16 fld, struct ice_flow_fld_info *info,
2521 struct ice_aqc_acl_profile_ranges *range_buf,
2522 u8 *data, u8 *range)
2526 /* If not specified, default mask is all bits in field */
2527 new_mask = (info->src.mask == ICE_FLOW_FLD_OFF_INVAL ?
2528 BIT(ice_flds_info[fld].size) - 1 :
2529 (*(u16 *)(data + info->src.mask))) << info->xtrct.disp;
2531 /* If the mask is 0, then we don't need to worry about this input
2532 * range checker value.
2536 (*(u16 *)(data + info->src.last)) << info->xtrct.disp;
2538 (*(u16 *)(data + info->src.val)) << info->xtrct.disp;
2539 u8 range_idx = info->entry.val;
2541 range_buf->checker_cfg[range_idx].low_boundary =
2542 CPU_TO_BE16(new_low);
2543 range_buf->checker_cfg[range_idx].high_boundary =
2544 CPU_TO_BE16(new_high);
2545 range_buf->checker_cfg[range_idx].mask = CPU_TO_BE16(new_mask);
2547 /* Indicate which range checker is being used */
2548 *range |= BIT(range_idx);
2553 * ice_flow_acl_frmt_entry_fld - Partially format ACL entry for a given field
2554 * @fld: number of the given field
2555 * @info: info about the field
2556 * @buf: buffer containing the entry
2557 * @dontcare: buffer containing don't care mask for entry
2558 * @data: pointer to a data buffer containing flow entry's match values/masks
2561 ice_flow_acl_frmt_entry_fld(u16 fld, struct ice_flow_fld_info *info, u8 *buf,
2562 u8 *dontcare, u8 *data)
2564 u16 dst, src, mask, k, end_disp, tmp_s = 0, tmp_m = 0;
2565 bool use_mask = false;
2568 src = info->src.val;
2569 mask = info->src.mask;
2570 dst = info->entry.val - ICE_AQC_ACL_PROF_BYTE_SEL_START_IDX;
2571 disp = info->xtrct.disp % BITS_PER_BYTE;
2573 if (mask != ICE_FLOW_FLD_OFF_INVAL)
2576 for (k = 0; k < info->entry.last; k++, dst++) {
2577 /* Add overflow bits from previous byte */
2578 buf[dst] = (tmp_s & 0xff00) >> 8;
2580 /* If mask is not valid, tmp_m is always zero, so just setting
2581 * dontcare to 0 (no masked bits). If mask is valid, pulls in
2582 * overflow bits of mask from prev byte
2584 dontcare[dst] = (tmp_m & 0xff00) >> 8;
2586 /* If there is displacement, last byte will only contain
2587 * displaced data, but there is no more data to read from user
2588 * buffer, so skip so as not to potentially read beyond end of
2591 if (!disp || k < info->entry.last - 1) {
2592 /* Store shifted data to use in next byte */
2593 tmp_s = data[src++] << disp;
2595 /* Add current (shifted) byte */
2596 buf[dst] |= tmp_s & 0xff;
2598 /* Handle mask if valid */
2600 tmp_m = (~data[mask++] & 0xff) << disp;
2601 dontcare[dst] |= tmp_m & 0xff;
2606 /* Fill in don't care bits at beginning of field */
2608 dst = info->entry.val - ICE_AQC_ACL_PROF_BYTE_SEL_START_IDX;
2609 for (k = 0; k < disp; k++)
2610 dontcare[dst] |= BIT(k);
2613 end_disp = (disp + ice_flds_info[fld].size) % BITS_PER_BYTE;
2615 /* Fill in don't care bits at end of field */
2617 dst = info->entry.val - ICE_AQC_ACL_PROF_BYTE_SEL_START_IDX +
2618 info->entry.last - 1;
2619 for (k = end_disp; k < BITS_PER_BYTE; k++)
2620 dontcare[dst] |= BIT(k);
2625 * ice_flow_acl_frmt_entry - Format ACL entry
2626 * @hw: pointer to the hardware structure
2627 * @prof: pointer to flow profile
2628 * @e: pointer to the flow entry
2629 * @data: pointer to a data buffer containing flow entry's match values/masks
2630 * @acts: array of actions to be performed on a match
2631 * @acts_cnt: number of actions
2633 * Formats the key (and key_inverse) to be matched from the data passed in,
2634 * along with data from the flow profile. This key/key_inverse pair makes up
2635 * the 'entry' for an ACL flow entry.
2637 static enum ice_status
2638 ice_flow_acl_frmt_entry(struct ice_hw *hw, struct ice_flow_prof *prof,
2639 struct ice_flow_entry *e, u8 *data,
2640 struct ice_flow_action *acts, u8 acts_cnt)
2642 u8 *buf = NULL, *dontcare = NULL, *key = NULL, range = 0, dir_flag_msk;
2643 struct ice_aqc_acl_profile_ranges *range_buf = NULL;
2644 enum ice_status status;
2649 status = ice_flow_get_hw_prof(hw, ICE_BLK_ACL, prof->id, &prof_id);
2653 /* Format the result action */
2655 status = ice_flow_acl_check_actions(hw, acts, acts_cnt, &cnt_alloc);
2659 status = ICE_ERR_NO_MEMORY;
2661 e->acts = (struct ice_flow_action *)
2662 ice_memdup(hw, acts, acts_cnt * sizeof(*acts),
2663 ICE_NONDMA_TO_NONDMA);
2667 e->acts_cnt = acts_cnt;
2669 /* Format the matching data */
2670 buf_sz = prof->cfg.scen->width;
2671 buf = (u8 *)ice_malloc(hw, buf_sz);
2675 dontcare = (u8 *)ice_malloc(hw, buf_sz);
2679 /* 'key' buffer will store both key and key_inverse, so must be twice
2682 key = (u8 *)ice_malloc(hw, buf_sz * 2);
2686 range_buf = (struct ice_aqc_acl_profile_ranges *)
2687 ice_malloc(hw, sizeof(struct ice_aqc_acl_profile_ranges));
2691 /* Set don't care mask to all 1's to start, will zero out used bytes */
2692 ice_memset(dontcare, 0xff, buf_sz, ICE_NONDMA_MEM);
2694 for (i = 0; i < prof->segs_cnt; i++) {
2695 struct ice_flow_seg_info *seg = &prof->segs[i];
2698 ice_for_each_set_bit(j, (ice_bitmap_t *)&seg->match,
2699 ICE_FLOW_FIELD_IDX_MAX) {
2700 struct ice_flow_fld_info *info = &seg->fields[j];
2702 if (info->type == ICE_FLOW_FLD_TYPE_RANGE)
2703 ice_flow_acl_frmt_entry_range(j, info,
2707 ice_flow_acl_frmt_entry_fld(j, info, buf,
2711 for (j = 0; j < seg->raws_cnt; j++) {
2712 struct ice_flow_fld_info *info = &seg->raws[j].info;
2713 u16 dst, src, mask, k;
2714 bool use_mask = false;
2716 src = info->src.val;
2717 dst = info->entry.val -
2718 ICE_AQC_ACL_PROF_BYTE_SEL_START_IDX;
2719 mask = info->src.mask;
2721 if (mask != ICE_FLOW_FLD_OFF_INVAL)
2724 for (k = 0; k < info->entry.last; k++, dst++) {
2725 buf[dst] = data[src++];
2727 dontcare[dst] = ~data[mask++];
2734 buf[prof->cfg.scen->pid_idx] = (u8)prof_id;
2735 dontcare[prof->cfg.scen->pid_idx] = 0;
2737 /* Format the buffer for direction flags */
2738 dir_flag_msk = BIT(ICE_FLG_PKT_DIR);
2740 if (prof->dir == ICE_FLOW_RX)
2741 buf[prof->cfg.scen->pkt_dir_idx] = dir_flag_msk;
2744 buf[prof->cfg.scen->rng_chk_idx] = range;
2745 /* Mark any unused range checkers as don't care */
2746 dontcare[prof->cfg.scen->rng_chk_idx] = ~range;
2747 e->range_buf = range_buf;
2749 ice_free(hw, range_buf);
2752 status = ice_set_key(key, buf_sz * 2, buf, NULL, dontcare, NULL, 0,
2758 e->entry_sz = buf_sz * 2;
2765 ice_free(hw, dontcare);
2770 if (status && range_buf) {
2771 ice_free(hw, range_buf);
2772 e->range_buf = NULL;
2775 if (status && e->acts) {
2776 ice_free(hw, e->acts);
2781 if (status && cnt_alloc)
2782 ice_flow_acl_free_act_cntr(hw, acts, acts_cnt);
2788 * ice_flow_acl_find_scen_entry_cond - Find an ACL scenario entry that matches
2789 * the compared data.
2790 * @prof: pointer to flow profile
2791 * @e: pointer to the comparing flow entry
2792 * @do_chg_action: decide if we want to change the ACL action
2793 * @do_add_entry: decide if we want to add the new ACL entry
2794 * @do_rem_entry: decide if we want to remove the current ACL entry
2796 * Find an ACL scenario entry that matches the compared data. In the same time,
2797 * this function also figure out:
2798 * a/ If we want to change the ACL action
2799 * b/ If we want to add the new ACL entry
2800 * c/ If we want to remove the current ACL entry
2802 static struct ice_flow_entry *
2803 ice_flow_acl_find_scen_entry_cond(struct ice_flow_prof *prof,
2804 struct ice_flow_entry *e, bool *do_chg_action,
2805 bool *do_add_entry, bool *do_rem_entry)
2807 struct ice_flow_entry *p, *return_entry = NULL;
2811 * a/ There exists an entry with same matching data, but different
2812 * priority, then we remove this existing ACL entry. Then, we
2813 * will add the new entry to the ACL scenario.
2814 * b/ There exists an entry with same matching data, priority, and
2815 * result action, then we do nothing
2816 * c/ There exists an entry with same matching data, priority, but
2817 * different, action, then do only change the action's entry.
2818 * d/ Else, we add this new entry to the ACL scenario.
2820 *do_chg_action = false;
2821 *do_add_entry = true;
2822 *do_rem_entry = false;
2823 LIST_FOR_EACH_ENTRY(p, &prof->entries, ice_flow_entry, l_entry) {
2824 if (memcmp(p->entry, e->entry, p->entry_sz))
2827 /* From this point, we have the same matching_data. */
2828 *do_add_entry = false;
2831 if (p->priority != e->priority) {
2832 /* matching data && !priority */
2833 *do_add_entry = true;
2834 *do_rem_entry = true;
2838 /* From this point, we will have matching_data && priority */
2839 if (p->acts_cnt != e->acts_cnt)
2840 *do_chg_action = true;
2841 for (i = 0; i < p->acts_cnt; i++) {
2842 bool found_not_match = false;
2844 for (j = 0; j < e->acts_cnt; j++)
2845 if (memcmp(&p->acts[i], &e->acts[j],
2846 sizeof(struct ice_flow_action))) {
2847 found_not_match = true;
2851 if (found_not_match) {
2852 *do_chg_action = true;
2857 /* (do_chg_action = true) means :
2858 * matching_data && priority && !result_action
2859 * (do_chg_action = false) means :
2860 * matching_data && priority && result_action
2865 return return_entry;
2869 * ice_flow_acl_convert_to_acl_prio - Convert to ACL priority
2872 static enum ice_acl_entry_prio
2873 ice_flow_acl_convert_to_acl_prio(enum ice_flow_priority p)
2875 enum ice_acl_entry_prio acl_prio;
2878 case ICE_FLOW_PRIO_LOW:
2879 acl_prio = ICE_ACL_PRIO_LOW;
2881 case ICE_FLOW_PRIO_NORMAL:
2882 acl_prio = ICE_ACL_PRIO_NORMAL;
2884 case ICE_FLOW_PRIO_HIGH:
2885 acl_prio = ICE_ACL_PRIO_HIGH;
2888 acl_prio = ICE_ACL_PRIO_NORMAL;
2896 * ice_flow_acl_union_rng_chk - Perform union operation between two
2897 * range-range checker buffers
2898 * @dst_buf: pointer to destination range checker buffer
2899 * @src_buf: pointer to source range checker buffer
2901 * For this function, we do the union between dst_buf and src_buf
2902 * range checker buffer, and we will save the result back to dst_buf
2904 static enum ice_status
2905 ice_flow_acl_union_rng_chk(struct ice_aqc_acl_profile_ranges *dst_buf,
2906 struct ice_aqc_acl_profile_ranges *src_buf)
2910 if (!dst_buf || !src_buf)
2911 return ICE_ERR_BAD_PTR;
2913 for (i = 0; i < ICE_AQC_ACL_PROF_RANGES_NUM_CFG; i++) {
2914 struct ice_acl_rng_data *cfg_data = NULL, *in_data;
2915 bool will_populate = false;
2917 in_data = &src_buf->checker_cfg[i];
2922 for (j = 0; j < ICE_AQC_ACL_PROF_RANGES_NUM_CFG; j++) {
2923 cfg_data = &dst_buf->checker_cfg[j];
2925 if (!cfg_data->mask ||
2926 !memcmp(cfg_data, in_data,
2927 sizeof(struct ice_acl_rng_data))) {
2928 will_populate = true;
2933 if (will_populate) {
2934 ice_memcpy(cfg_data, in_data,
2935 sizeof(struct ice_acl_rng_data),
2936 ICE_NONDMA_TO_NONDMA);
2938 /* No available slot left to program range checker */
2939 return ICE_ERR_MAX_LIMIT;
2947 * ice_flow_acl_add_scen_entry_sync - Add entry to ACL scenario sync
2948 * @hw: pointer to the hardware structure
2949 * @prof: pointer to flow profile
2950 * @entry: double pointer to the flow entry
2952 * For this function, we will look at the current added entries in the
2953 * corresponding ACL scenario. Then, we will perform matching logic to
2954 * see if we want to add/modify/do nothing with this new entry.
2956 static enum ice_status
2957 ice_flow_acl_add_scen_entry_sync(struct ice_hw *hw, struct ice_flow_prof *prof,
2958 struct ice_flow_entry **entry)
2960 bool do_add_entry, do_rem_entry, do_chg_action, do_chg_rng_chk;
2961 struct ice_aqc_acl_profile_ranges query_rng_buf, cfg_rng_buf;
2962 struct ice_acl_act_entry *acts = NULL;
2963 struct ice_flow_entry *exist;
2964 enum ice_status status = ICE_SUCCESS;
2965 struct ice_flow_entry *e;
2968 if (!entry || !(*entry) || !prof)
2969 return ICE_ERR_BAD_PTR;
2973 do_chg_rng_chk = false;
2977 status = ice_flow_get_hw_prof(hw, ICE_BLK_ACL, prof->id,
2982 /* Query the current range-checker value in FW */
2983 status = ice_query_acl_prof_ranges(hw, prof_id, &query_rng_buf,
2987 ice_memcpy(&cfg_rng_buf, &query_rng_buf,
2988 sizeof(struct ice_aqc_acl_profile_ranges),
2989 ICE_NONDMA_TO_NONDMA);
2991 /* Generate the new range-checker value */
2992 status = ice_flow_acl_union_rng_chk(&cfg_rng_buf, e->range_buf);
2996 /* Reconfigure the range check if the buffer is changed. */
2997 do_chg_rng_chk = false;
2998 if (memcmp(&query_rng_buf, &cfg_rng_buf,
2999 sizeof(struct ice_aqc_acl_profile_ranges))) {
3000 status = ice_prog_acl_prof_ranges(hw, prof_id,
3001 &cfg_rng_buf, NULL);
3005 do_chg_rng_chk = true;
3009 /* Figure out if we want to (change the ACL action) and/or
3010 * (Add the new ACL entry) and/or (Remove the current ACL entry)
3012 exist = ice_flow_acl_find_scen_entry_cond(prof, e, &do_chg_action,
3013 &do_add_entry, &do_rem_entry);
3015 status = ice_flow_rem_entry_sync(hw, ICE_BLK_ACL, exist);
3020 /* Prepare the result action buffer */
3021 acts = (struct ice_acl_act_entry *)
3022 ice_calloc(hw, e->entry_sz, sizeof(struct ice_acl_act_entry));
3024 return ICE_ERR_NO_MEMORY;
3026 for (i = 0; i < e->acts_cnt; i++)
3027 ice_memcpy(&acts[i], &e->acts[i].data.acl_act,
3028 sizeof(struct ice_acl_act_entry),
3029 ICE_NONDMA_TO_NONDMA);
3032 enum ice_acl_entry_prio prio;
3036 keys = (u8 *)e->entry;
3037 inverts = keys + (e->entry_sz / 2);
3038 prio = ice_flow_acl_convert_to_acl_prio(e->priority);
3040 status = ice_acl_add_entry(hw, prof->cfg.scen, prio, keys,
3041 inverts, acts, e->acts_cnt,
3046 e->scen_entry_idx = entry_idx;
3047 LIST_ADD(&e->l_entry, &prof->entries);
3049 if (do_chg_action) {
3050 /* For the action memory info, update the SW's copy of
3051 * exist entry with e's action memory info
3053 ice_free(hw, exist->acts);
3054 exist->acts_cnt = e->acts_cnt;
3055 exist->acts = (struct ice_flow_action *)
3056 ice_calloc(hw, exist->acts_cnt,
3057 sizeof(struct ice_flow_action));
3059 status = ICE_ERR_NO_MEMORY;
3063 ice_memcpy(exist->acts, e->acts,
3064 sizeof(struct ice_flow_action) * e->acts_cnt,
3065 ICE_NONDMA_TO_NONDMA);
3067 status = ice_acl_prog_act(hw, prof->cfg.scen, acts,
3069 exist->scen_entry_idx);
3074 if (do_chg_rng_chk) {
3075 /* In this case, we want to update the range checker
3076 * information of the exist entry
3078 status = ice_flow_acl_union_rng_chk(exist->range_buf,
3084 /* As we don't add the new entry to our SW DB, deallocate its
3085 * memories, and return the exist entry to the caller
3087 ice_dealloc_flow_entry(hw, e);
3097 * ice_flow_acl_add_scen_entry - Add entry to ACL scenario
3098 * @hw: pointer to the hardware structure
3099 * @prof: pointer to flow profile
3100 * @e: double pointer to the flow entry
3102 static enum ice_status
3103 ice_flow_acl_add_scen_entry(struct ice_hw *hw, struct ice_flow_prof *prof,
3104 struct ice_flow_entry **e)
3106 enum ice_status status;
3108 ice_acquire_lock(&prof->entries_lock);
3109 status = ice_flow_acl_add_scen_entry_sync(hw, prof, e);
3110 ice_release_lock(&prof->entries_lock);
3116 * ice_flow_add_entry - Add a flow entry
3117 * @hw: pointer to the HW struct
3118 * @blk: classification stage
3119 * @prof_id: ID of the profile to add a new flow entry to
3120 * @entry_id: unique ID to identify this flow entry
3121 * @vsi_handle: software VSI handle for the flow entry
3122 * @prio: priority of the flow entry
3123 * @data: pointer to a data buffer containing flow entry's match values/masks
3124 * @acts: arrays of actions to be performed on a match
3125 * @acts_cnt: number of actions
3126 * @entry_h: pointer to buffer that receives the new flow entry's handle
3129 ice_flow_add_entry(struct ice_hw *hw, enum ice_block blk, u64 prof_id,
3130 u64 entry_id, u16 vsi_handle, enum ice_flow_priority prio,
3131 void *data, struct ice_flow_action *acts, u8 acts_cnt,
3134 struct ice_flow_entry *e = NULL;
3135 struct ice_flow_prof *prof;
3136 enum ice_status status = ICE_SUCCESS;
3138 /* ACL entries must indicate an action */
3139 if (blk == ICE_BLK_ACL && (!acts || !acts_cnt))
3140 return ICE_ERR_PARAM;
3142 /* No flow entry data is expected for RSS */
3143 if (!entry_h || (!data && blk != ICE_BLK_RSS))
3144 return ICE_ERR_BAD_PTR;
3146 if (!ice_is_vsi_valid(hw, vsi_handle))
3147 return ICE_ERR_PARAM;
3149 ice_acquire_lock(&hw->fl_profs_locks[blk]);
3151 prof = ice_flow_find_prof_id(hw, blk, prof_id);
3153 status = ICE_ERR_DOES_NOT_EXIST;
3155 /* Allocate memory for the entry being added and associate
3156 * the VSI to the found flow profile
3158 e = (struct ice_flow_entry *)ice_malloc(hw, sizeof(*e));
3160 status = ICE_ERR_NO_MEMORY;
3162 status = ice_flow_assoc_prof(hw, blk, prof, vsi_handle);
3165 ice_release_lock(&hw->fl_profs_locks[blk]);
3170 e->vsi_handle = vsi_handle;
3179 /* ACL will handle the entry management */
3180 status = ice_flow_acl_frmt_entry(hw, prof, e, (u8 *)data, acts,
3185 status = ice_flow_acl_add_scen_entry(hw, prof, &e);
3191 status = ICE_ERR_NOT_IMPL;
3195 if (blk != ICE_BLK_ACL) {
3196 /* ACL will handle the entry management */
3197 ice_acquire_lock(&prof->entries_lock);
3198 LIST_ADD(&e->l_entry, &prof->entries);
3199 ice_release_lock(&prof->entries_lock);
3202 *entry_h = ICE_FLOW_ENTRY_HNDL(e);
3207 ice_free(hw, e->entry);
3215 * ice_flow_rem_entry - Remove a flow entry
3216 * @hw: pointer to the HW struct
3217 * @blk: classification stage
3218 * @entry_h: handle to the flow entry to be removed
3220 enum ice_status ice_flow_rem_entry(struct ice_hw *hw, enum ice_block blk,
3223 struct ice_flow_entry *entry;
3224 struct ice_flow_prof *prof;
3225 enum ice_status status = ICE_SUCCESS;
3227 if (entry_h == ICE_FLOW_ENTRY_HANDLE_INVAL)
3228 return ICE_ERR_PARAM;
3230 entry = ICE_FLOW_ENTRY_PTR((intptr_t)entry_h);
3232 /* Retain the pointer to the flow profile as the entry will be freed */
3236 ice_acquire_lock(&prof->entries_lock);
3237 status = ice_flow_rem_entry_sync(hw, blk, entry);
3238 ice_release_lock(&prof->entries_lock);
3245 * ice_flow_set_fld_ext - specifies locations of field from entry's input buffer
3246 * @seg: packet segment the field being set belongs to
3247 * @fld: field to be set
3248 * @field_type: type of the field
3249 * @val_loc: if not ICE_FLOW_FLD_OFF_INVAL, location of the value to match from
3250 * entry's input buffer
3251 * @mask_loc: if not ICE_FLOW_FLD_OFF_INVAL, location of mask value from entry's
3253 * @last_loc: if not ICE_FLOW_FLD_OFF_INVAL, location of last/upper value from
3254 * entry's input buffer
3256 * This helper function stores information of a field being matched, including
3257 * the type of the field and the locations of the value to match, the mask, and
3258 * the upper-bound value in the start of the input buffer for a flow entry.
3259 * This function should only be used for fixed-size data structures.
3261 * This function also opportunistically determines the protocol headers to be
3262 * present based on the fields being set. Some fields cannot be used alone to
3263 * determine the protocol headers present. Sometimes, fields for particular
3264 * protocol headers are not matched. In those cases, the protocol headers
3265 * must be explicitly set.
3268 ice_flow_set_fld_ext(struct ice_flow_seg_info *seg, enum ice_flow_field fld,
3269 enum ice_flow_fld_match_type field_type, u16 val_loc,
3270 u16 mask_loc, u16 last_loc)
3272 u64 bit = BIT_ULL(fld);
3275 if (field_type == ICE_FLOW_FLD_TYPE_RANGE)
3278 seg->fields[fld].type = field_type;
3279 seg->fields[fld].src.val = val_loc;
3280 seg->fields[fld].src.mask = mask_loc;
3281 seg->fields[fld].src.last = last_loc;
3283 ICE_FLOW_SET_HDRS(seg, ice_flds_info[fld].hdr);
3287 * ice_flow_set_fld - specifies locations of field from entry's input buffer
3288 * @seg: packet segment the field being set belongs to
3289 * @fld: field to be set
3290 * @val_loc: if not ICE_FLOW_FLD_OFF_INVAL, location of the value to match from
3291 * entry's input buffer
3292 * @mask_loc: if not ICE_FLOW_FLD_OFF_INVAL, location of mask value from entry's
3294 * @last_loc: if not ICE_FLOW_FLD_OFF_INVAL, location of last/upper value from
3295 * entry's input buffer
3296 * @range: indicate if field being matched is to be in a range
3298 * This function specifies the locations, in the form of byte offsets from the
3299 * start of the input buffer for a flow entry, from where the value to match,
3300 * the mask value, and upper value can be extracted. These locations are then
3301 * stored in the flow profile. When adding a flow entry associated with the
3302 * flow profile, these locations will be used to quickly extract the values and
3303 * create the content of a match entry. This function should only be used for
3304 * fixed-size data structures.
3307 ice_flow_set_fld(struct ice_flow_seg_info *seg, enum ice_flow_field fld,
3308 u16 val_loc, u16 mask_loc, u16 last_loc, bool range)
3310 enum ice_flow_fld_match_type t = range ?
3311 ICE_FLOW_FLD_TYPE_RANGE : ICE_FLOW_FLD_TYPE_REG;
3313 ice_flow_set_fld_ext(seg, fld, t, val_loc, mask_loc, last_loc);
3317 * ice_flow_set_fld_prefix - sets locations of prefix field from entry's buf
3318 * @seg: packet segment the field being set belongs to
3319 * @fld: field to be set
3320 * @val_loc: if not ICE_FLOW_FLD_OFF_INVAL, location of the value to match from
3321 * entry's input buffer
3322 * @pref_loc: location of prefix value from entry's input buffer
3323 * @pref_sz: size of the location holding the prefix value
3325 * This function specifies the locations, in the form of byte offsets from the
3326 * start of the input buffer for a flow entry, from where the value to match
3327 * and the IPv4 prefix value can be extracted. These locations are then stored
3328 * in the flow profile. When adding flow entries to the associated flow profile,
3329 * these locations can be used to quickly extract the values to create the
3330 * content of a match entry. This function should only be used for fixed-size
3334 ice_flow_set_fld_prefix(struct ice_flow_seg_info *seg, enum ice_flow_field fld,
3335 u16 val_loc, u16 pref_loc, u8 pref_sz)
3337 /* For this type of field, the "mask" location is for the prefix value's
3338 * location and the "last" location is for the size of the location of
3341 ice_flow_set_fld_ext(seg, fld, ICE_FLOW_FLD_TYPE_PREFIX, val_loc,
3342 pref_loc, (u16)pref_sz);
3346 * ice_flow_add_fld_raw - sets locations of a raw field from entry's input buf
3347 * @seg: packet segment the field being set belongs to
3348 * @off: offset of the raw field from the beginning of the segment in bytes
3349 * @len: length of the raw pattern to be matched
3350 * @val_loc: location of the value to match from entry's input buffer
3351 * @mask_loc: location of mask value from entry's input buffer
3353 * This function specifies the offset of the raw field to be match from the
3354 * beginning of the specified packet segment, and the locations, in the form of
3355 * byte offsets from the start of the input buffer for a flow entry, from where
3356 * the value to match and the mask value to be extracted. These locations are
3357 * then stored in the flow profile. When adding flow entries to the associated
3358 * flow profile, these locations can be used to quickly extract the values to
3359 * create the content of a match entry. This function should only be used for
3360 * fixed-size data structures.
3363 ice_flow_add_fld_raw(struct ice_flow_seg_info *seg, u16 off, u8 len,
3364 u16 val_loc, u16 mask_loc)
3366 if (seg->raws_cnt < ICE_FLOW_SEG_RAW_FLD_MAX) {
3367 seg->raws[seg->raws_cnt].off = off;
3368 seg->raws[seg->raws_cnt].info.type = ICE_FLOW_FLD_TYPE_SIZE;
3369 seg->raws[seg->raws_cnt].info.src.val = val_loc;
3370 seg->raws[seg->raws_cnt].info.src.mask = mask_loc;
3371 /* The "last" field is used to store the length of the field */
3372 seg->raws[seg->raws_cnt].info.src.last = len;
3375 /* Overflows of "raws" will be handled as an error condition later in
3376 * the flow when this information is processed.
3382 * ice_flow_rem_vsi_prof - remove vsi from flow profile
3383 * @hw: pointer to the hardware structure
3384 * @blk: classification stage
3385 * @vsi_handle: software VSI handle
3386 * @prof_id: unique ID to identify this flow profile
3388 * This function removes the flow entries associated to the input
3389 * vsi handle and disassociates the vsi from the flow profile.
3391 enum ice_status ice_flow_rem_vsi_prof(struct ice_hw *hw, enum ice_block blk, u16 vsi_handle,
3394 struct ice_flow_prof *prof = NULL;
3395 enum ice_status status = ICE_SUCCESS;
3397 if (blk >= ICE_BLK_COUNT || !ice_is_vsi_valid(hw, vsi_handle))
3398 return ICE_ERR_PARAM;
3400 /* find flow profile pointer with input package block and profile id */
3401 prof = ice_flow_find_prof_id(hw, ICE_BLK_FD, prof_id);
3403 ice_debug(hw, ICE_DBG_PKG,
3404 "Cannot find flow profile id=%" PRIu64 "\n", prof_id);
3405 return ICE_ERR_DOES_NOT_EXIST;
3408 /* Remove all remaining flow entries before removing the flow profile */
3409 if (!LIST_EMPTY(&prof->entries)) {
3410 struct ice_flow_entry *e, *t;
3412 ice_acquire_lock(&prof->entries_lock);
3413 LIST_FOR_EACH_ENTRY_SAFE(e, t, &prof->entries, ice_flow_entry,
3415 if (e->vsi_handle != vsi_handle)
3418 status = ice_flow_rem_entry_sync(hw, blk, e);
3422 ice_release_lock(&prof->entries_lock);
3427 /* disassociate the flow profile from sw vsi handle */
3428 status = ice_flow_disassoc_prof(hw, blk, prof, vsi_handle);
3430 ice_debug(hw, ICE_DBG_PKG,
3431 "ice_flow_disassoc_prof() failed with status=%d\n",
3436 #define ICE_FLOW_RSS_SEG_HDR_L2_MASKS \
3437 (ICE_FLOW_SEG_HDR_ETH | ICE_FLOW_SEG_HDR_ETH_NON_IP | ICE_FLOW_SEG_HDR_VLAN)
3439 #define ICE_FLOW_RSS_SEG_HDR_L3_MASKS \
3440 (ICE_FLOW_SEG_HDR_IPV4 | ICE_FLOW_SEG_HDR_IPV6)
3442 #define ICE_FLOW_RSS_SEG_HDR_L4_MASKS \
3443 (ICE_FLOW_SEG_HDR_TCP | ICE_FLOW_SEG_HDR_UDP | ICE_FLOW_SEG_HDR_SCTP)
3445 #define ICE_FLOW_RSS_SEG_HDR_VAL_MASKS \
3446 (ICE_FLOW_RSS_SEG_HDR_L2_MASKS | \
3447 ICE_FLOW_RSS_SEG_HDR_L3_MASKS | \
3448 ICE_FLOW_RSS_SEG_HDR_L4_MASKS)
3451 * ice_flow_set_rss_seg_info - setup packet segments for RSS
3452 * @segs: pointer to the flow field segment(s)
3453 * @seg_cnt: segment count
3454 * @cfg: configure parameters
3456 * Helper function to extract fields from hash bitmap and use flow
3457 * header value to set flow field segment for further use in flow
3458 * profile entry or removal.
3460 static enum ice_status
3461 ice_flow_set_rss_seg_info(struct ice_flow_seg_info *segs, u8 seg_cnt,
3462 const struct ice_rss_hash_cfg *cfg)
3464 struct ice_flow_seg_info *seg;
3468 /* set inner most segment */
3469 seg = &segs[seg_cnt - 1];
3471 ice_for_each_set_bit(i, (const ice_bitmap_t *)&cfg->hash_flds,
3472 ICE_FLOW_FIELD_IDX_MAX)
3473 ice_flow_set_fld(seg, (enum ice_flow_field)i,
3474 ICE_FLOW_FLD_OFF_INVAL, ICE_FLOW_FLD_OFF_INVAL,
3475 ICE_FLOW_FLD_OFF_INVAL, false);
3477 ICE_FLOW_SET_HDRS(seg, cfg->addl_hdrs);
3479 /* set outer most header */
3480 if (cfg->hdr_type == ICE_RSS_INNER_HEADERS_W_OUTER_IPV4)
3481 segs[ICE_RSS_OUTER_HEADERS].hdrs |= ICE_FLOW_SEG_HDR_IPV4 |
3482 ICE_FLOW_SEG_HDR_IPV_FRAG |
3483 ICE_FLOW_SEG_HDR_IPV_OTHER;
3484 else if (cfg->hdr_type == ICE_RSS_INNER_HEADERS_W_OUTER_IPV6)
3485 segs[ICE_RSS_OUTER_HEADERS].hdrs |= ICE_FLOW_SEG_HDR_IPV6 |
3486 ICE_FLOW_SEG_HDR_IPV_FRAG |
3487 ICE_FLOW_SEG_HDR_IPV_OTHER;
3489 if (seg->hdrs & ~ICE_FLOW_RSS_SEG_HDR_VAL_MASKS &
3490 ~ICE_FLOW_RSS_HDRS_INNER_MASK & ~ICE_FLOW_SEG_HDR_IPV_OTHER &
3491 ~ICE_FLOW_SEG_HDR_IPV_FRAG)
3492 return ICE_ERR_PARAM;
3494 val = (u64)(seg->hdrs & ICE_FLOW_RSS_SEG_HDR_L3_MASKS);
3495 if (val && !ice_is_pow2(val))
3498 val = (u64)(seg->hdrs & ICE_FLOW_RSS_SEG_HDR_L4_MASKS);
3499 if (val && !ice_is_pow2(val))
3506 * ice_rem_vsi_rss_list - remove VSI from RSS list
3507 * @hw: pointer to the hardware structure
3508 * @vsi_handle: software VSI handle
3510 * Remove the VSI from all RSS configurations in the list.
3512 void ice_rem_vsi_rss_list(struct ice_hw *hw, u16 vsi_handle)
3514 struct ice_rss_cfg *r, *tmp;
3516 if (LIST_EMPTY(&hw->rss_list_head))
3519 ice_acquire_lock(&hw->rss_locks);
3520 LIST_FOR_EACH_ENTRY_SAFE(r, tmp, &hw->rss_list_head,
3521 ice_rss_cfg, l_entry)
3522 if (ice_test_and_clear_bit(vsi_handle, r->vsis))
3523 if (!ice_is_any_bit_set(r->vsis, ICE_MAX_VSI)) {
3524 LIST_DEL(&r->l_entry);
3527 ice_release_lock(&hw->rss_locks);
3531 * ice_rem_vsi_rss_cfg - remove RSS configurations associated with VSI
3532 * @hw: pointer to the hardware structure
3533 * @vsi_handle: software VSI handle
3535 * This function will iterate through all flow profiles and disassociate
3536 * the VSI from that profile. If the flow profile has no VSIs it will
3539 enum ice_status ice_rem_vsi_rss_cfg(struct ice_hw *hw, u16 vsi_handle)
3541 const enum ice_block blk = ICE_BLK_RSS;
3542 struct ice_flow_prof *p, *t;
3543 enum ice_status status = ICE_SUCCESS;
3545 if (!ice_is_vsi_valid(hw, vsi_handle))
3546 return ICE_ERR_PARAM;
3548 if (LIST_EMPTY(&hw->fl_profs[blk]))
3551 ice_acquire_lock(&hw->rss_locks);
3552 LIST_FOR_EACH_ENTRY_SAFE(p, t, &hw->fl_profs[blk], ice_flow_prof,
3554 if (ice_is_bit_set(p->vsis, vsi_handle)) {
3555 status = ice_flow_disassoc_prof(hw, blk, p, vsi_handle);
3559 if (!ice_is_any_bit_set(p->vsis, ICE_MAX_VSI)) {
3560 status = ice_flow_rem_prof(hw, blk, p->id);
3565 ice_release_lock(&hw->rss_locks);
3571 * ice_get_rss_hdr_type - get a RSS profile's header type
3572 * @prof: RSS flow profile
3574 static enum ice_rss_cfg_hdr_type
3575 ice_get_rss_hdr_type(struct ice_flow_prof *prof)
3577 enum ice_rss_cfg_hdr_type hdr_type = ICE_RSS_ANY_HEADERS;
3579 if (prof->segs_cnt == ICE_FLOW_SEG_SINGLE) {
3580 hdr_type = ICE_RSS_OUTER_HEADERS;
3581 } else if (prof->segs_cnt == ICE_FLOW_SEG_MAX) {
3582 if (prof->segs[ICE_RSS_OUTER_HEADERS].hdrs == ICE_FLOW_SEG_HDR_NONE)
3583 hdr_type = ICE_RSS_INNER_HEADERS;
3584 if (prof->segs[ICE_RSS_OUTER_HEADERS].hdrs & ICE_FLOW_SEG_HDR_IPV4)
3585 hdr_type = ICE_RSS_INNER_HEADERS_W_OUTER_IPV4;
3586 if (prof->segs[ICE_RSS_OUTER_HEADERS].hdrs & ICE_FLOW_SEG_HDR_IPV6)
3587 hdr_type = ICE_RSS_INNER_HEADERS_W_OUTER_IPV6;
3594 * ice_rem_rss_list - remove RSS configuration from list
3595 * @hw: pointer to the hardware structure
3596 * @vsi_handle: software VSI handle
3597 * @prof: pointer to flow profile
3599 * Assumption: lock has already been acquired for RSS list
3602 ice_rem_rss_list(struct ice_hw *hw, u16 vsi_handle, struct ice_flow_prof *prof)
3604 enum ice_rss_cfg_hdr_type hdr_type;
3605 struct ice_rss_cfg *r, *tmp;
3607 /* Search for RSS hash fields associated to the VSI that match the
3608 * hash configurations associated to the flow profile. If found
3609 * remove from the RSS entry list of the VSI context and delete entry.
3611 hdr_type = ice_get_rss_hdr_type(prof);
3612 LIST_FOR_EACH_ENTRY_SAFE(r, tmp, &hw->rss_list_head,
3613 ice_rss_cfg, l_entry)
3614 if (r->hash.hash_flds == prof->segs[prof->segs_cnt - 1].match &&
3615 r->hash.addl_hdrs == prof->segs[prof->segs_cnt - 1].hdrs &&
3616 r->hash.hdr_type == hdr_type) {
3617 ice_clear_bit(vsi_handle, r->vsis);
3618 if (!ice_is_any_bit_set(r->vsis, ICE_MAX_VSI)) {
3619 LIST_DEL(&r->l_entry);
3627 * ice_add_rss_list - add RSS configuration to list
3628 * @hw: pointer to the hardware structure
3629 * @vsi_handle: software VSI handle
3630 * @prof: pointer to flow profile
3632 * Assumption: lock has already been acquired for RSS list
3634 static enum ice_status
3635 ice_add_rss_list(struct ice_hw *hw, u16 vsi_handle, struct ice_flow_prof *prof)
3637 enum ice_rss_cfg_hdr_type hdr_type;
3638 struct ice_rss_cfg *r, *rss_cfg;
3640 hdr_type = ice_get_rss_hdr_type(prof);
3641 LIST_FOR_EACH_ENTRY(r, &hw->rss_list_head,
3642 ice_rss_cfg, l_entry)
3643 if (r->hash.hash_flds == prof->segs[prof->segs_cnt - 1].match &&
3644 r->hash.addl_hdrs == prof->segs[prof->segs_cnt - 1].hdrs &&
3645 r->hash.hdr_type == hdr_type) {
3646 ice_set_bit(vsi_handle, r->vsis);
3650 rss_cfg = (struct ice_rss_cfg *)ice_malloc(hw, sizeof(*rss_cfg));
3652 return ICE_ERR_NO_MEMORY;
3654 rss_cfg->hash.hash_flds = prof->segs[prof->segs_cnt - 1].match;
3655 rss_cfg->hash.addl_hdrs = prof->segs[prof->segs_cnt - 1].hdrs;
3656 rss_cfg->hash.hdr_type = hdr_type;
3657 rss_cfg->hash.symm = prof->cfg.symm;
3658 ice_set_bit(vsi_handle, rss_cfg->vsis);
3660 LIST_ADD_TAIL(&rss_cfg->l_entry, &hw->rss_list_head);
3665 #define ICE_FLOW_PROF_HASH_S 0
3666 #define ICE_FLOW_PROF_HASH_M (0xFFFFFFFFULL << ICE_FLOW_PROF_HASH_S)
3667 #define ICE_FLOW_PROF_HDR_S 32
3668 #define ICE_FLOW_PROF_HDR_M (0x3FFFFFFFULL << ICE_FLOW_PROF_HDR_S)
3669 #define ICE_FLOW_PROF_ENCAP_S 62
3670 #define ICE_FLOW_PROF_ENCAP_M (0x3ULL << ICE_FLOW_PROF_ENCAP_S)
3672 /* Flow profile ID format:
3673 * [0:31] - Packet match fields
3674 * [32:61] - Protocol header
3675 * [62:63] - Encapsulation flag:
3678 * 2 for tunneled with outer ipv4
3679 * 3 for tunneled with outer ipv6
3681 #define ICE_FLOW_GEN_PROFID(hash, hdr, encap) \
3682 ((u64)(((u64)(hash) & ICE_FLOW_PROF_HASH_M) | \
3683 (((u64)(hdr) << ICE_FLOW_PROF_HDR_S) & ICE_FLOW_PROF_HDR_M) | \
3684 (((u64)(encap) << ICE_FLOW_PROF_ENCAP_S) & ICE_FLOW_PROF_ENCAP_M)))
3687 ice_rss_config_xor_word(struct ice_hw *hw, u8 prof_id, u8 src, u8 dst)
3689 u32 s = ((src % 4) << 3); /* byte shift */
3690 u32 v = dst | 0x80; /* value to program */
3691 u8 i = src / 4; /* register index */
3694 reg = rd32(hw, GLQF_HSYMM(prof_id, i));
3695 reg = (reg & ~(0xff << s)) | (v << s);
3696 wr32(hw, GLQF_HSYMM(prof_id, i), reg);
3700 ice_rss_config_xor(struct ice_hw *hw, u8 prof_id, u8 src, u8 dst, u8 len)
3703 ICE_FLOW_SW_FIELD_VECTOR_MAX / ICE_FLOW_FV_EXTRACT_SZ - 1;
3706 for (i = 0; i < len; i++) {
3707 ice_rss_config_xor_word(hw, prof_id,
3708 /* Yes, field vector in GLQF_HSYMM and
3709 * GLQF_HINSET is inversed!
3711 fv_last_word - (src + i),
3712 fv_last_word - (dst + i));
3713 ice_rss_config_xor_word(hw, prof_id,
3714 fv_last_word - (dst + i),
3715 fv_last_word - (src + i));
3720 ice_rss_update_symm(struct ice_hw *hw,
3721 struct ice_flow_prof *prof)
3723 struct ice_prof_map *map;
3726 ice_acquire_lock(&hw->blk[ICE_BLK_RSS].es.prof_map_lock);
3727 map = ice_search_prof_id(hw, ICE_BLK_RSS, prof->id);
3729 prof_id = map->prof_id;
3730 ice_release_lock(&hw->blk[ICE_BLK_RSS].es.prof_map_lock);
3733 /* clear to default */
3734 for (m = 0; m < 6; m++)
3735 wr32(hw, GLQF_HSYMM(prof_id, m), 0);
3736 if (prof->cfg.symm) {
3737 struct ice_flow_seg_info *seg =
3738 &prof->segs[prof->segs_cnt - 1];
3740 struct ice_flow_seg_xtrct *ipv4_src =
3741 &seg->fields[ICE_FLOW_FIELD_IDX_IPV4_SA].xtrct;
3742 struct ice_flow_seg_xtrct *ipv4_dst =
3743 &seg->fields[ICE_FLOW_FIELD_IDX_IPV4_DA].xtrct;
3744 struct ice_flow_seg_xtrct *ipv6_src =
3745 &seg->fields[ICE_FLOW_FIELD_IDX_IPV6_SA].xtrct;
3746 struct ice_flow_seg_xtrct *ipv6_dst =
3747 &seg->fields[ICE_FLOW_FIELD_IDX_IPV6_DA].xtrct;
3749 struct ice_flow_seg_xtrct *tcp_src =
3750 &seg->fields[ICE_FLOW_FIELD_IDX_TCP_SRC_PORT].xtrct;
3751 struct ice_flow_seg_xtrct *tcp_dst =
3752 &seg->fields[ICE_FLOW_FIELD_IDX_TCP_DST_PORT].xtrct;
3754 struct ice_flow_seg_xtrct *udp_src =
3755 &seg->fields[ICE_FLOW_FIELD_IDX_UDP_SRC_PORT].xtrct;
3756 struct ice_flow_seg_xtrct *udp_dst =
3757 &seg->fields[ICE_FLOW_FIELD_IDX_UDP_DST_PORT].xtrct;
3759 struct ice_flow_seg_xtrct *sctp_src =
3760 &seg->fields[ICE_FLOW_FIELD_IDX_SCTP_SRC_PORT].xtrct;
3761 struct ice_flow_seg_xtrct *sctp_dst =
3762 &seg->fields[ICE_FLOW_FIELD_IDX_SCTP_DST_PORT].xtrct;
3765 if (ipv4_src->prot_id != 0 && ipv4_dst->prot_id != 0)
3766 ice_rss_config_xor(hw, prof_id,
3767 ipv4_src->idx, ipv4_dst->idx, 2);
3770 if (ipv6_src->prot_id != 0 && ipv6_dst->prot_id != 0)
3771 ice_rss_config_xor(hw, prof_id,
3772 ipv6_src->idx, ipv6_dst->idx, 8);
3775 if (tcp_src->prot_id != 0 && tcp_dst->prot_id != 0)
3776 ice_rss_config_xor(hw, prof_id,
3777 tcp_src->idx, tcp_dst->idx, 1);
3780 if (udp_src->prot_id != 0 && udp_dst->prot_id != 0)
3781 ice_rss_config_xor(hw, prof_id,
3782 udp_src->idx, udp_dst->idx, 1);
3785 if (sctp_src->prot_id != 0 && sctp_dst->prot_id != 0)
3786 ice_rss_config_xor(hw, prof_id,
3787 sctp_src->idx, sctp_dst->idx, 1);
3792 * ice_add_rss_cfg_sync - add an RSS configuration
3793 * @hw: pointer to the hardware structure
3794 * @vsi_handle: software VSI handle
3795 * @cfg: configure parameters
3797 * Assumption: lock has already been acquired for RSS list
3799 static enum ice_status
3800 ice_add_rss_cfg_sync(struct ice_hw *hw, u16 vsi_handle,
3801 const struct ice_rss_hash_cfg *cfg)
3803 const enum ice_block blk = ICE_BLK_RSS;
3804 struct ice_flow_prof *prof = NULL;
3805 struct ice_flow_seg_info *segs;
3806 enum ice_status status;
3809 segs_cnt = (cfg->hdr_type == ICE_RSS_OUTER_HEADERS) ?
3810 ICE_FLOW_SEG_SINGLE : ICE_FLOW_SEG_MAX;
3812 segs = (struct ice_flow_seg_info *)ice_calloc(hw, segs_cnt,
3815 return ICE_ERR_NO_MEMORY;
3817 /* Construct the packet segment info from the hashed fields */
3818 status = ice_flow_set_rss_seg_info(segs, segs_cnt, cfg);
3822 /* Search for a flow profile that has matching headers, hash fields
3823 * and has the input VSI associated to it. If found, no further
3824 * operations required and exit.
3826 prof = ice_flow_find_prof_conds(hw, blk, ICE_FLOW_RX, segs, segs_cnt,
3828 ICE_FLOW_FIND_PROF_CHK_FLDS |
3829 ICE_FLOW_FIND_PROF_CHK_VSI);
3831 if (prof->cfg.symm == cfg->symm)
3833 prof->cfg.symm = cfg->symm;
3837 /* Check if a flow profile exists with the same protocol headers and
3838 * associated with the input VSI. If so disassociate the VSI from
3839 * this profile. The VSI will be added to a new profile created with
3840 * the protocol header and new hash field configuration.
3842 prof = ice_flow_find_prof_conds(hw, blk, ICE_FLOW_RX, segs, segs_cnt,
3843 vsi_handle, ICE_FLOW_FIND_PROF_CHK_VSI);
3845 status = ice_flow_disassoc_prof(hw, blk, prof, vsi_handle);
3847 ice_rem_rss_list(hw, vsi_handle, prof);
3851 /* Remove profile if it has no VSIs associated */
3852 if (!ice_is_any_bit_set(prof->vsis, ICE_MAX_VSI)) {
3853 status = ice_flow_rem_prof(hw, blk, prof->id);
3859 /* Search for a profile that has same match fields only. If this
3860 * exists then associate the VSI to this profile.
3862 prof = ice_flow_find_prof_conds(hw, blk, ICE_FLOW_RX, segs, segs_cnt,
3864 ICE_FLOW_FIND_PROF_CHK_FLDS);
3866 if (prof->cfg.symm == cfg->symm) {
3867 status = ice_flow_assoc_prof(hw, blk, prof,
3870 status = ice_add_rss_list(hw, vsi_handle,
3873 /* if a profile exist but with different symmetric
3874 * requirement, just return error.
3876 status = ICE_ERR_NOT_SUPPORTED;
3881 /* Create a new flow profile with generated profile and packet
3882 * segment information.
3884 status = ice_flow_add_prof(hw, blk, ICE_FLOW_RX,
3885 ICE_FLOW_GEN_PROFID(cfg->hash_flds,
3886 segs[segs_cnt - 1].hdrs,
3888 segs, segs_cnt, NULL, 0, &prof);
3892 status = ice_flow_assoc_prof(hw, blk, prof, vsi_handle);
3893 /* If association to a new flow profile failed then this profile can
3897 ice_flow_rem_prof(hw, blk, prof->id);
3901 status = ice_add_rss_list(hw, vsi_handle, prof);
3903 prof->cfg.symm = cfg->symm;
3905 ice_rss_update_symm(hw, prof);
3913 * ice_add_rss_cfg - add an RSS configuration with specified hashed fields
3914 * @hw: pointer to the hardware structure
3915 * @vsi_handle: software VSI handle
3916 * @cfg: configure parameters
3918 * This function will generate a flow profile based on fields associated with
3919 * the input fields to hash on, the flow type and use the VSI number to add
3920 * a flow entry to the profile.
3923 ice_add_rss_cfg(struct ice_hw *hw, u16 vsi_handle,
3924 const struct ice_rss_hash_cfg *cfg)
3926 struct ice_rss_hash_cfg local_cfg;
3927 enum ice_status status;
3929 if (!ice_is_vsi_valid(hw, vsi_handle) ||
3930 !cfg || cfg->hdr_type > ICE_RSS_ANY_HEADERS ||
3931 cfg->hash_flds == ICE_HASH_INVALID)
3932 return ICE_ERR_PARAM;
3935 if (cfg->hdr_type < ICE_RSS_ANY_HEADERS) {
3936 ice_acquire_lock(&hw->rss_locks);
3937 status = ice_add_rss_cfg_sync(hw, vsi_handle, &local_cfg);
3938 ice_release_lock(&hw->rss_locks);
3940 ice_acquire_lock(&hw->rss_locks);
3941 local_cfg.hdr_type = ICE_RSS_OUTER_HEADERS;
3942 status = ice_add_rss_cfg_sync(hw, vsi_handle, &local_cfg);
3944 local_cfg.hdr_type = ICE_RSS_INNER_HEADERS;
3945 status = ice_add_rss_cfg_sync(hw, vsi_handle,
3948 ice_release_lock(&hw->rss_locks);
3955 * ice_rem_rss_cfg_sync - remove an existing RSS configuration
3956 * @hw: pointer to the hardware structure
3957 * @vsi_handle: software VSI handle
3958 * @cfg: configure parameters
3960 * Assumption: lock has already been acquired for RSS list
3962 static enum ice_status
3963 ice_rem_rss_cfg_sync(struct ice_hw *hw, u16 vsi_handle,
3964 const struct ice_rss_hash_cfg *cfg)
3966 const enum ice_block blk = ICE_BLK_RSS;
3967 struct ice_flow_seg_info *segs;
3968 struct ice_flow_prof *prof;
3969 enum ice_status status;
3972 segs_cnt = (cfg->hdr_type == ICE_RSS_OUTER_HEADERS) ?
3973 ICE_FLOW_SEG_SINGLE : ICE_FLOW_SEG_MAX;
3974 segs = (struct ice_flow_seg_info *)ice_calloc(hw, segs_cnt,
3977 return ICE_ERR_NO_MEMORY;
3979 /* Construct the packet segment info from the hashed fields */
3980 status = ice_flow_set_rss_seg_info(segs, segs_cnt, cfg);
3984 prof = ice_flow_find_prof_conds(hw, blk, ICE_FLOW_RX, segs, segs_cnt,
3986 ICE_FLOW_FIND_PROF_CHK_FLDS);
3988 status = ICE_ERR_DOES_NOT_EXIST;
3992 status = ice_flow_disassoc_prof(hw, blk, prof, vsi_handle);
3996 /* Remove RSS configuration from VSI context before deleting
3999 ice_rem_rss_list(hw, vsi_handle, prof);
4001 if (!ice_is_any_bit_set(prof->vsis, ICE_MAX_VSI))
4002 status = ice_flow_rem_prof(hw, blk, prof->id);
4010 * ice_rem_rss_cfg - remove an existing RSS config with matching hashed fields
4011 * @hw: pointer to the hardware structure
4012 * @vsi_handle: software VSI handle
4013 * @cfg: configure parameters
4015 * This function will lookup the flow profile based on the input
4016 * hash field bitmap, iterate through the profile entry list of
4017 * that profile and find entry associated with input VSI to be
4018 * removed. Calls are made to underlying flow apis which will in
4019 * turn build or update buffers for RSS XLT1 section.
4022 ice_rem_rss_cfg(struct ice_hw *hw, u16 vsi_handle,
4023 const struct ice_rss_hash_cfg *cfg)
4025 struct ice_rss_hash_cfg local_cfg;
4026 enum ice_status status;
4028 if (!ice_is_vsi_valid(hw, vsi_handle) ||
4029 !cfg || cfg->hdr_type > ICE_RSS_ANY_HEADERS ||
4030 cfg->hash_flds == ICE_HASH_INVALID)
4031 return ICE_ERR_PARAM;
4033 ice_acquire_lock(&hw->rss_locks);
4035 if (cfg->hdr_type < ICE_RSS_ANY_HEADERS) {
4036 status = ice_rem_rss_cfg_sync(hw, vsi_handle, &local_cfg);
4038 local_cfg.hdr_type = ICE_RSS_OUTER_HEADERS;
4039 status = ice_rem_rss_cfg_sync(hw, vsi_handle, &local_cfg);
4042 local_cfg.hdr_type = ICE_RSS_INNER_HEADERS;
4043 status = ice_rem_rss_cfg_sync(hw, vsi_handle,
4047 ice_release_lock(&hw->rss_locks);
4053 * ice_replay_rss_cfg - replay RSS configurations associated with VSI
4054 * @hw: pointer to the hardware structure
4055 * @vsi_handle: software VSI handle
4057 enum ice_status ice_replay_rss_cfg(struct ice_hw *hw, u16 vsi_handle)
4059 enum ice_status status = ICE_SUCCESS;
4060 struct ice_rss_cfg *r;
4062 if (!ice_is_vsi_valid(hw, vsi_handle))
4063 return ICE_ERR_PARAM;
4065 ice_acquire_lock(&hw->rss_locks);
4066 LIST_FOR_EACH_ENTRY(r, &hw->rss_list_head,
4067 ice_rss_cfg, l_entry) {
4068 if (ice_is_bit_set(r->vsis, vsi_handle)) {
4069 status = ice_add_rss_cfg_sync(hw, vsi_handle, &r->hash);
4074 ice_release_lock(&hw->rss_locks);
4080 * ice_get_rss_cfg - returns hashed fields for the given header types
4081 * @hw: pointer to the hardware structure
4082 * @vsi_handle: software VSI handle
4083 * @hdrs: protocol header type
4085 * This function will return the match fields of the first instance of flow
4086 * profile having the given header types and containing input VSI
4088 u64 ice_get_rss_cfg(struct ice_hw *hw, u16 vsi_handle, u32 hdrs)
4090 u64 rss_hash = ICE_HASH_INVALID;
4091 struct ice_rss_cfg *r;
4093 /* verify if the protocol header is non zero and VSI is valid */
4094 if (hdrs == ICE_FLOW_SEG_HDR_NONE || !ice_is_vsi_valid(hw, vsi_handle))
4095 return ICE_HASH_INVALID;
4097 ice_acquire_lock(&hw->rss_locks);
4098 LIST_FOR_EACH_ENTRY(r, &hw->rss_list_head,
4099 ice_rss_cfg, l_entry)
4100 if (ice_is_bit_set(r->vsis, vsi_handle) &&
4101 r->hash.addl_hdrs == hdrs) {
4102 rss_hash = r->hash.hash_flds;
4105 ice_release_lock(&hw->rss_locks);