1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2001-2021 Intel Corporation
5 #include "ice_common.h"
8 /* Size of known protocol header fields */
9 #define ICE_FLOW_FLD_SZ_ETH_TYPE 2
10 #define ICE_FLOW_FLD_SZ_VLAN 2
11 #define ICE_FLOW_FLD_SZ_IPV4_ADDR 4
12 #define ICE_FLOW_FLD_SZ_IPV6_ADDR 16
13 #define ICE_FLOW_FLD_SZ_IPV6_PRE32_ADDR 4
14 #define ICE_FLOW_FLD_SZ_IPV6_PRE48_ADDR 6
15 #define ICE_FLOW_FLD_SZ_IPV6_PRE64_ADDR 8
16 #define ICE_FLOW_FLD_SZ_IP_DSCP 1
17 #define ICE_FLOW_FLD_SZ_IP_TTL 1
18 #define ICE_FLOW_FLD_SZ_IP_PROT 1
19 #define ICE_FLOW_FLD_SZ_PORT 2
20 #define ICE_FLOW_FLD_SZ_TCP_FLAGS 1
21 #define ICE_FLOW_FLD_SZ_ICMP_TYPE 1
22 #define ICE_FLOW_FLD_SZ_ICMP_CODE 1
23 #define ICE_FLOW_FLD_SZ_ARP_OPER 2
24 #define ICE_FLOW_FLD_SZ_GRE_KEYID 4
25 #define ICE_FLOW_FLD_SZ_GTP_TEID 4
26 #define ICE_FLOW_FLD_SZ_GTP_QFI 2
27 #define ICE_FLOW_FLD_SZ_PPPOE_SESS_ID 2
28 #define ICE_FLOW_FLD_SZ_PFCP_SEID 8
29 #define ICE_FLOW_FLD_SZ_L2TPV3_SESS_ID 4
30 #define ICE_FLOW_FLD_SZ_ESP_SPI 4
31 #define ICE_FLOW_FLD_SZ_AH_SPI 4
32 #define ICE_FLOW_FLD_SZ_NAT_T_ESP_SPI 4
33 #define ICE_FLOW_FLD_SZ_VXLAN_VNI 4
34 #define ICE_FLOW_FLD_SZ_ECPRI_TP0_PC_ID 2
36 /* Describe properties of a protocol header field */
37 struct ice_flow_field_info {
38 enum ice_flow_seg_hdr hdr;
39 s16 off; /* Offset from start of a protocol header, in bits */
40 u16 size; /* Size of fields in bits */
41 u16 mask; /* 16-bit mask for field */
44 #define ICE_FLOW_FLD_INFO(_hdr, _offset_bytes, _size_bytes) { \
46 .off = (_offset_bytes) * BITS_PER_BYTE, \
47 .size = (_size_bytes) * BITS_PER_BYTE, \
51 #define ICE_FLOW_FLD_INFO_MSK(_hdr, _offset_bytes, _size_bytes, _mask) { \
53 .off = (_offset_bytes) * BITS_PER_BYTE, \
54 .size = (_size_bytes) * BITS_PER_BYTE, \
58 /* Table containing properties of supported protocol header fields */
60 struct ice_flow_field_info ice_flds_info[ICE_FLOW_FIELD_IDX_MAX] = {
62 /* ICE_FLOW_FIELD_IDX_ETH_DA */
63 ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_ETH, 0, ETH_ALEN),
64 /* ICE_FLOW_FIELD_IDX_ETH_SA */
65 ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_ETH, ETH_ALEN, ETH_ALEN),
66 /* ICE_FLOW_FIELD_IDX_S_VLAN */
67 ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_VLAN, 12, ICE_FLOW_FLD_SZ_VLAN),
68 /* ICE_FLOW_FIELD_IDX_C_VLAN */
69 ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_VLAN, 14, ICE_FLOW_FLD_SZ_VLAN),
70 /* ICE_FLOW_FIELD_IDX_ETH_TYPE */
71 ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_ETH, 0, ICE_FLOW_FLD_SZ_ETH_TYPE),
73 /* ICE_FLOW_FIELD_IDX_IPV4_DSCP */
74 ICE_FLOW_FLD_INFO_MSK(ICE_FLOW_SEG_HDR_IPV4, 0, ICE_FLOW_FLD_SZ_IP_DSCP,
76 /* ICE_FLOW_FIELD_IDX_IPV6_DSCP */
77 ICE_FLOW_FLD_INFO_MSK(ICE_FLOW_SEG_HDR_IPV6, 0, ICE_FLOW_FLD_SZ_IP_DSCP,
79 /* ICE_FLOW_FIELD_IDX_IPV4_TTL */
80 ICE_FLOW_FLD_INFO_MSK(ICE_FLOW_SEG_HDR_NONE, 8,
81 ICE_FLOW_FLD_SZ_IP_TTL, 0xff00),
82 /* ICE_FLOW_FIELD_IDX_IPV4_PROT */
83 ICE_FLOW_FLD_INFO_MSK(ICE_FLOW_SEG_HDR_NONE, 8,
84 ICE_FLOW_FLD_SZ_IP_PROT, 0x00ff),
85 /* ICE_FLOW_FIELD_IDX_IPV6_TTL */
86 ICE_FLOW_FLD_INFO_MSK(ICE_FLOW_SEG_HDR_NONE, 6,
87 ICE_FLOW_FLD_SZ_IP_TTL, 0x00ff),
88 /* ICE_FLOW_FIELD_IDX_IPV6_PROT */
89 ICE_FLOW_FLD_INFO_MSK(ICE_FLOW_SEG_HDR_NONE, 6,
90 ICE_FLOW_FLD_SZ_IP_PROT, 0xff00),
91 /* ICE_FLOW_FIELD_IDX_IPV4_SA */
92 ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_IPV4, 12, ICE_FLOW_FLD_SZ_IPV4_ADDR),
93 /* ICE_FLOW_FIELD_IDX_IPV4_DA */
94 ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_IPV4, 16, ICE_FLOW_FLD_SZ_IPV4_ADDR),
95 /* ICE_FLOW_FIELD_IDX_IPV6_SA */
96 ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_IPV6, 8, ICE_FLOW_FLD_SZ_IPV6_ADDR),
97 /* ICE_FLOW_FIELD_IDX_IPV6_DA */
98 ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_IPV6, 24, ICE_FLOW_FLD_SZ_IPV6_ADDR),
99 /* ICE_FLOW_FIELD_IDX_IPV6_PRE32_SA */
100 ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_IPV6, 8,
101 ICE_FLOW_FLD_SZ_IPV6_PRE32_ADDR),
102 /* ICE_FLOW_FIELD_IDX_IPV6_PRE32_DA */
103 ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_IPV6, 24,
104 ICE_FLOW_FLD_SZ_IPV6_PRE32_ADDR),
105 /* ICE_FLOW_FIELD_IDX_IPV6_PRE48_SA */
106 ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_IPV6, 8,
107 ICE_FLOW_FLD_SZ_IPV6_PRE48_ADDR),
108 /* ICE_FLOW_FIELD_IDX_IPV6_PRE48_DA */
109 ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_IPV6, 24,
110 ICE_FLOW_FLD_SZ_IPV6_PRE48_ADDR),
111 /* ICE_FLOW_FIELD_IDX_IPV6_PRE64_SA */
112 ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_IPV6, 8,
113 ICE_FLOW_FLD_SZ_IPV6_PRE64_ADDR),
114 /* ICE_FLOW_FIELD_IDX_IPV6_PRE64_DA */
115 ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_IPV6, 24,
116 ICE_FLOW_FLD_SZ_IPV6_PRE64_ADDR),
118 /* ICE_FLOW_FIELD_IDX_TCP_SRC_PORT */
119 ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_TCP, 0, ICE_FLOW_FLD_SZ_PORT),
120 /* ICE_FLOW_FIELD_IDX_TCP_DST_PORT */
121 ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_TCP, 2, ICE_FLOW_FLD_SZ_PORT),
122 /* ICE_FLOW_FIELD_IDX_UDP_SRC_PORT */
123 ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_UDP, 0, ICE_FLOW_FLD_SZ_PORT),
124 /* ICE_FLOW_FIELD_IDX_UDP_DST_PORT */
125 ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_UDP, 2, ICE_FLOW_FLD_SZ_PORT),
126 /* ICE_FLOW_FIELD_IDX_SCTP_SRC_PORT */
127 ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_SCTP, 0, ICE_FLOW_FLD_SZ_PORT),
128 /* ICE_FLOW_FIELD_IDX_SCTP_DST_PORT */
129 ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_SCTP, 2, ICE_FLOW_FLD_SZ_PORT),
130 /* ICE_FLOW_FIELD_IDX_TCP_FLAGS */
131 ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_TCP, 13, ICE_FLOW_FLD_SZ_TCP_FLAGS),
133 /* ICE_FLOW_FIELD_IDX_ARP_SIP */
134 ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_ARP, 14, ICE_FLOW_FLD_SZ_IPV4_ADDR),
135 /* ICE_FLOW_FIELD_IDX_ARP_DIP */
136 ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_ARP, 24, ICE_FLOW_FLD_SZ_IPV4_ADDR),
137 /* ICE_FLOW_FIELD_IDX_ARP_SHA */
138 ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_ARP, 8, ETH_ALEN),
139 /* ICE_FLOW_FIELD_IDX_ARP_DHA */
140 ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_ARP, 18, ETH_ALEN),
141 /* ICE_FLOW_FIELD_IDX_ARP_OP */
142 ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_ARP, 6, ICE_FLOW_FLD_SZ_ARP_OPER),
144 /* ICE_FLOW_FIELD_IDX_ICMP_TYPE */
145 ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_ICMP, 0, ICE_FLOW_FLD_SZ_ICMP_TYPE),
146 /* ICE_FLOW_FIELD_IDX_ICMP_CODE */
147 ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_ICMP, 1, ICE_FLOW_FLD_SZ_ICMP_CODE),
149 /* ICE_FLOW_FIELD_IDX_GRE_KEYID */
150 ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_GRE, 12, ICE_FLOW_FLD_SZ_GRE_KEYID),
152 /* ICE_FLOW_FIELD_IDX_GTPC_TEID */
153 ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_GTPC_TEID, 12,
154 ICE_FLOW_FLD_SZ_GTP_TEID),
155 /* ICE_FLOW_FIELD_IDX_GTPU_IP_TEID */
156 ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_GTPU_IP, 12,
157 ICE_FLOW_FLD_SZ_GTP_TEID),
158 /* ICE_FLOW_FIELD_IDX_GTPU_EH_TEID */
159 ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_GTPU_EH, 12,
160 ICE_FLOW_FLD_SZ_GTP_TEID),
161 /* ICE_FLOW_FIELD_IDX_GTPU_EH_QFI */
162 ICE_FLOW_FLD_INFO_MSK(ICE_FLOW_SEG_HDR_GTPU_EH, 22,
163 ICE_FLOW_FLD_SZ_GTP_QFI, 0x3f00),
164 /* ICE_FLOW_FIELD_IDX_GTPU_UP_TEID */
165 ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_GTPU_UP, 12,
166 ICE_FLOW_FLD_SZ_GTP_TEID),
167 /* ICE_FLOW_FIELD_IDX_GTPU_DWN_TEID */
168 ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_GTPU_DWN, 12,
169 ICE_FLOW_FLD_SZ_GTP_TEID),
171 /* ICE_FLOW_FIELD_IDX_PPPOE_SESS_ID */
172 ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_PPPOE, 2,
173 ICE_FLOW_FLD_SZ_PPPOE_SESS_ID),
175 /* ICE_FLOW_FIELD_IDX_PFCP_SEID */
176 ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_PFCP_SESSION, 12,
177 ICE_FLOW_FLD_SZ_PFCP_SEID),
179 /* ICE_FLOW_FIELD_IDX_L2TPV3_SESS_ID */
180 ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_L2TPV3, 0,
181 ICE_FLOW_FLD_SZ_L2TPV3_SESS_ID),
183 /* ICE_FLOW_FIELD_IDX_ESP_SPI */
184 ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_ESP, 0,
185 ICE_FLOW_FLD_SZ_ESP_SPI),
187 /* ICE_FLOW_FIELD_IDX_AH_SPI */
188 ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_AH, 4,
189 ICE_FLOW_FLD_SZ_AH_SPI),
191 /* ICE_FLOW_FIELD_IDX_NAT_T_ESP_SPI */
192 ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_NAT_T_ESP, 8,
193 ICE_FLOW_FLD_SZ_NAT_T_ESP_SPI),
194 /* ICE_FLOW_FIELD_IDX_VXLAN_VNI */
195 ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_VXLAN, 12,
196 ICE_FLOW_FLD_SZ_VXLAN_VNI),
198 /* ICE_FLOW_FIELD_IDX_ECPRI_TP0_PC_ID */
199 ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_ECPRI_TP0, 4,
200 ICE_FLOW_FLD_SZ_ECPRI_TP0_PC_ID),
202 /* ICE_FLOW_FIELD_IDX_UDP_ECPRI_TP0_PC_ID */
203 ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_UDP_ECPRI_TP0, 12,
204 ICE_FLOW_FLD_SZ_ECPRI_TP0_PC_ID),
207 /* Bitmaps indicating relevant packet types for a particular protocol header
209 * Packet types for packets with an Outer/First/Single MAC header
211 static const u32 ice_ptypes_mac_ofos[] = {
212 0xFDC00846, 0xBFBF7F7E, 0xF70001DF, 0xFEFDFDFB,
213 0x0000077E, 0x000003FF, 0x00000000, 0x00000000,
214 0x00400000, 0x03FFF000, 0xFFFFFFE0, 0x00100707,
215 0xFFFFFF00, 0x0000003F, 0x00000000, 0x00000000,
216 0x00000000, 0x00000000, 0x00000000, 0x00000000,
217 0x00000000, 0x00000000, 0x00000000, 0x00000000,
218 0x00000000, 0x00000000, 0x00000000, 0x00000000,
219 0x00000000, 0x00000000, 0x00000000, 0x00000000,
222 /* Packet types for packets with an Innermost/Last MAC VLAN header */
223 static const u32 ice_ptypes_macvlan_il[] = {
224 0x00000000, 0xBC000000, 0x000001DF, 0xF0000000,
225 0x0000077E, 0x00000000, 0x00000000, 0x00000000,
226 0x00000000, 0x00000000, 0x00000000, 0x00000000,
227 0x00000000, 0x00000000, 0x00000000, 0x00000000,
228 0x00000000, 0x00000000, 0x00000000, 0x00000000,
229 0x00000000, 0x00000000, 0x00000000, 0x00000000,
230 0x00000000, 0x00000000, 0x00000000, 0x00000000,
231 0x00000000, 0x00000000, 0x00000000, 0x00000000,
234 /* Packet types for packets with an Outer/First/Single IPv4 header, does NOT
235 * include IPV4 other PTYPEs
237 static const u32 ice_ptypes_ipv4_ofos[] = {
238 0x1DC00000, 0x24000800, 0x00000000, 0x00000000,
239 0x00000000, 0x00000155, 0x00000000, 0x00000000,
240 0x00000000, 0x000FC000, 0x000002A0, 0x00100000,
241 0x00001500, 0x00000000, 0x00000000, 0x00000000,
242 0x00000000, 0x00000000, 0x00000000, 0x00000000,
243 0x00000000, 0x00000000, 0x00000000, 0x00000000,
244 0x00000000, 0x00000000, 0x00000000, 0x00000000,
245 0x00000000, 0x00000000, 0x00000000, 0x00000000,
248 /* Packet types for packets with an Outer/First/Single IPv4 header, includes
251 static const u32 ice_ptypes_ipv4_ofos_all[] = {
252 0x1DC00000, 0x24000800, 0x00000000, 0x00000000,
253 0x00000000, 0x00000155, 0x00000000, 0x00000000,
254 0x00000000, 0x000FC000, 0x83E0FAA0, 0x00000101,
255 0x03FFD500, 0x00000000, 0x00000000, 0x00000000,
256 0x00000000, 0x00000000, 0x00000000, 0x00000000,
257 0x00000000, 0x00000000, 0x00000000, 0x00000000,
258 0x00000000, 0x00000000, 0x00000000, 0x00000000,
259 0x00000000, 0x00000000, 0x00000000, 0x00000000,
262 /* Packet types for packets with an Innermost/Last IPv4 header */
263 static const u32 ice_ptypes_ipv4_il[] = {
264 0xE0000000, 0xB807700E, 0x80000003, 0xE01DC03B,
265 0x0000000E, 0x00000000, 0x00000000, 0x00000000,
266 0x00000000, 0x00000000, 0x001FF800, 0x00100000,
267 0xFC0FC000, 0x00000000, 0x00000000, 0x00000000,
268 0x00000000, 0x00000000, 0x00000000, 0x00000000,
269 0x00000000, 0x00000000, 0x00000000, 0x00000000,
270 0x00000000, 0x00000000, 0x00000000, 0x00000000,
271 0x00000000, 0x00000000, 0x00000000, 0x00000000,
274 /* Packet types for packets with an Outer/First/Single IPv6 header, does NOT
275 * include IVP6 other PTYPEs
277 static const u32 ice_ptypes_ipv6_ofos[] = {
278 0x00000000, 0x00000000, 0x77000000, 0x10002000,
279 0x00000000, 0x000002AA, 0x00000000, 0x00000000,
280 0x00000000, 0x03F00000, 0x00000540, 0x00000000,
281 0x00002A00, 0x00000000, 0x00000000, 0x00000000,
282 0x00000000, 0x00000000, 0x00000000, 0x00000000,
283 0x00000000, 0x00000000, 0x00000000, 0x00000000,
284 0x00000000, 0x00000000, 0x00000000, 0x00000000,
285 0x00000000, 0x00000000, 0x00000000, 0x00000000,
288 /* Packet types for packets with an Outer/First/Single IPv6 header, includes
291 static const u32 ice_ptypes_ipv6_ofos_all[] = {
292 0x00000000, 0x00000000, 0x77000000, 0x10002000,
293 0x00000000, 0x000002AA, 0x00000000, 0x00000000,
294 0x00000000, 0x03F00000, 0x7C1F0540, 0x00000206,
295 0xFC002A00, 0x0000003F, 0x00000000, 0x00000000,
296 0x00000000, 0x00000000, 0x00000000, 0x00000000,
297 0x00000000, 0x00000000, 0x00000000, 0x00000000,
298 0x00000000, 0x00000000, 0x00000000, 0x00000000,
299 0x00000000, 0x00000000, 0x00000000, 0x00000000,
302 /* Packet types for packets with an Innermost/Last IPv6 header */
303 static const u32 ice_ptypes_ipv6_il[] = {
304 0x00000000, 0x03B80770, 0x000001DC, 0x0EE00000,
305 0x00000770, 0x00000000, 0x00000000, 0x00000000,
306 0x00000000, 0x00000000, 0x7FE00000, 0x00000000,
307 0x03F00000, 0x0000003F, 0x00000000, 0x00000000,
308 0x00000000, 0x00000000, 0x00000000, 0x00000000,
309 0x00000000, 0x00000000, 0x00000000, 0x00000000,
310 0x00000000, 0x00000000, 0x00000000, 0x00000000,
311 0x00000000, 0x00000000, 0x00000000, 0x00000000,
314 /* Packet types for packets with an Outer/First/Single IPv4 header - no L4 */
315 static const u32 ice_ptypes_ipv4_ofos_no_l4[] = {
316 0x10C00000, 0x04000800, 0x00000000, 0x00000000,
317 0x00000000, 0x00000000, 0x00000000, 0x00000000,
318 0x00000000, 0x000cc000, 0x000002A0, 0x00000000,
319 0x00001500, 0x00000000, 0x00000000, 0x00000000,
320 0x00000000, 0x00000000, 0x00000000, 0x00000000,
321 0x00000000, 0x00000000, 0x00000000, 0x00000000,
322 0x00000000, 0x00000000, 0x00000000, 0x00000000,
323 0x00000000, 0x00000000, 0x00000000, 0x00000000,
326 /* Packet types for packets with an Innermost/Last IPv4 header - no L4 */
327 static const u32 ice_ptypes_ipv4_il_no_l4[] = {
328 0x60000000, 0x18043008, 0x80000002, 0x6010c021,
329 0x00000008, 0x00000000, 0x00000000, 0x00000000,
330 0x00000000, 0x00000000, 0x00139800, 0x00000000,
331 0x8C08C000, 0x00000000, 0x00000000, 0x00000000,
332 0x00000000, 0x00000000, 0x00000000, 0x00000000,
333 0x00000000, 0x00000000, 0x00000000, 0x00000000,
334 0x00000000, 0x00000000, 0x00000000, 0x00000000,
335 0x00000000, 0x00000000, 0x00000000, 0x00000000,
338 /* Packet types for packets with an Outer/First/Single IPv6 header - no L4 */
339 static const u32 ice_ptypes_ipv6_ofos_no_l4[] = {
340 0x00000000, 0x00000000, 0x43000000, 0x10002000,
341 0x00000000, 0x00000000, 0x00000000, 0x00000000,
342 0x00000000, 0x02300000, 0x00000540, 0x00000000,
343 0x00002A00, 0x00000000, 0x00000000, 0x00000000,
344 0x00000000, 0x00000000, 0x00000000, 0x00000000,
345 0x00000000, 0x00000000, 0x00000000, 0x00000000,
346 0x00000000, 0x00000000, 0x00000000, 0x00000000,
347 0x00000000, 0x00000000, 0x00000000, 0x00000000,
350 /* Packet types for packets with an Innermost/Last IPv6 header - no L4 */
351 static const u32 ice_ptypes_ipv6_il_no_l4[] = {
352 0x00000000, 0x02180430, 0x0000010c, 0x086010c0,
353 0x00000430, 0x00000000, 0x00000000, 0x00000000,
354 0x00000000, 0x00000000, 0x4e600000, 0x00000000,
355 0x02300000, 0x00000023, 0x00000000, 0x00000000,
356 0x00000000, 0x00000000, 0x00000000, 0x00000000,
357 0x00000000, 0x00000000, 0x00000000, 0x00000000,
358 0x00000000, 0x00000000, 0x00000000, 0x00000000,
359 0x00000000, 0x00000000, 0x00000000, 0x00000000,
362 /* Packet types for packets with an Outermost/First ARP header */
363 static const u32 ice_ptypes_arp_of[] = {
364 0x00000800, 0x00000000, 0x00000000, 0x00000000,
365 0x00000000, 0x00000000, 0x00000000, 0x00000000,
366 0x00000000, 0x00000000, 0x00000000, 0x00000000,
367 0x00000000, 0x00000000, 0x00000000, 0x00000000,
368 0x00000000, 0x00000000, 0x00000000, 0x00000000,
369 0x00000000, 0x00000000, 0x00000000, 0x00000000,
370 0x00000000, 0x00000000, 0x00000000, 0x00000000,
371 0x00000000, 0x00000000, 0x00000000, 0x00000000,
374 /* UDP Packet types for non-tunneled packets or tunneled
375 * packets with inner UDP.
377 static const u32 ice_ptypes_udp_il[] = {
378 0x81000000, 0x20204040, 0x04000010, 0x80810102,
379 0x00000040, 0x00000000, 0x00000000, 0x00000000,
380 0x00000000, 0x00410000, 0x908427E0, 0x00100007,
381 0x10410000, 0x00000004, 0x00000000, 0x00000000,
382 0x00000000, 0x00000000, 0x00000000, 0x00000000,
383 0x00000000, 0x00000000, 0x00000000, 0x00000000,
384 0x00000000, 0x00000000, 0x00000000, 0x00000000,
385 0x00000000, 0x00000000, 0x00000000, 0x00000000,
388 /* Packet types for packets with an Innermost/Last TCP header */
389 static const u32 ice_ptypes_tcp_il[] = {
390 0x04000000, 0x80810102, 0x10000040, 0x02040408,
391 0x00000102, 0x00000000, 0x00000000, 0x00000000,
392 0x00000000, 0x00820000, 0x21084000, 0x00000000,
393 0x20820000, 0x00000008, 0x00000000, 0x00000000,
394 0x00000000, 0x00000000, 0x00000000, 0x00000000,
395 0x00000000, 0x00000000, 0x00000000, 0x00000000,
396 0x00000000, 0x00000000, 0x00000000, 0x00000000,
397 0x00000000, 0x00000000, 0x00000000, 0x00000000,
400 /* Packet types for packets with an Innermost/Last SCTP header */
401 static const u32 ice_ptypes_sctp_il[] = {
402 0x08000000, 0x01020204, 0x20000081, 0x04080810,
403 0x00000204, 0x00000000, 0x00000000, 0x00000000,
404 0x00000000, 0x01040000, 0x00000000, 0x00000000,
405 0x41040000, 0x00000010, 0x00000000, 0x00000000,
406 0x00000000, 0x00000000, 0x00000000, 0x00000000,
407 0x00000000, 0x00000000, 0x00000000, 0x00000000,
408 0x00000000, 0x00000000, 0x00000000, 0x00000000,
409 0x00000000, 0x00000000, 0x00000000, 0x00000000,
412 /* Packet types for packets with an Outermost/First ICMP header */
413 static const u32 ice_ptypes_icmp_of[] = {
414 0x10000000, 0x00000000, 0x00000000, 0x00000000,
415 0x00000000, 0x00000000, 0x00000000, 0x00000000,
416 0x00000000, 0x00000000, 0x00000000, 0x00000000,
417 0x00000000, 0x00000000, 0x00000000, 0x00000000,
418 0x00000000, 0x00000000, 0x00000000, 0x00000000,
419 0x00000000, 0x00000000, 0x00000000, 0x00000000,
420 0x00000000, 0x00000000, 0x00000000, 0x00000000,
421 0x00000000, 0x00000000, 0x00000000, 0x00000000,
424 /* Packet types for packets with an Innermost/Last ICMP header */
425 static const u32 ice_ptypes_icmp_il[] = {
426 0x00000000, 0x02040408, 0x40000102, 0x08101020,
427 0x00000408, 0x00000000, 0x00000000, 0x00000000,
428 0x00000000, 0x00000000, 0x42108000, 0x00000000,
429 0x82080000, 0x00000020, 0x00000000, 0x00000000,
430 0x00000000, 0x00000000, 0x00000000, 0x00000000,
431 0x00000000, 0x00000000, 0x00000000, 0x00000000,
432 0x00000000, 0x00000000, 0x00000000, 0x00000000,
433 0x00000000, 0x00000000, 0x00000000, 0x00000000,
436 /* Packet types for packets with an Outermost/First GRE header */
437 static const u32 ice_ptypes_gre_of[] = {
438 0x00000000, 0xBFBF7800, 0x000001DF, 0xFEFDE000,
439 0x0000017E, 0x00000000, 0x00000000, 0x00000000,
440 0x00000000, 0x00000000, 0x00000000, 0x00000000,
441 0x00000000, 0x00000000, 0x00000000, 0x00000000,
442 0x00000000, 0x00000000, 0x00000000, 0x00000000,
443 0x00000000, 0x00000000, 0x00000000, 0x00000000,
444 0x00000000, 0x00000000, 0x00000000, 0x00000000,
445 0x00000000, 0x00000000, 0x00000000, 0x00000000,
448 /* Packet types for packets with an Innermost/Last MAC header */
449 static const u32 ice_ptypes_mac_il[] = {
450 0x00000000, 0x20000000, 0x00000000, 0x00000000,
451 0x00000000, 0x00000000, 0x00000000, 0x00000000,
452 0x00000000, 0x00000000, 0x00000000, 0x00000000,
453 0x00000000, 0x00000000, 0x00000000, 0x00000000,
454 0x00000000, 0x00000000, 0x00000000, 0x00000000,
455 0x00000000, 0x00000000, 0x00000000, 0x00000000,
456 0x00000000, 0x00000000, 0x00000000, 0x00000000,
457 0x00000000, 0x00000000, 0x00000000, 0x00000000,
460 /* Packet types for GTPC */
461 static const u32 ice_ptypes_gtpc[] = {
462 0x00000000, 0x00000000, 0x00000000, 0x00000000,
463 0x00000000, 0x00000000, 0x00000000, 0x00000000,
464 0x00000000, 0x00000000, 0x000001E0, 0x00000000,
465 0x00000000, 0x00000000, 0x00000000, 0x00000000,
466 0x00000000, 0x00000000, 0x00000000, 0x00000000,
467 0x00000000, 0x00000000, 0x00000000, 0x00000000,
468 0x00000000, 0x00000000, 0x00000000, 0x00000000,
469 0x00000000, 0x00000000, 0x00000000, 0x00000000,
472 /* Packet types for VXLAN with VNI */
473 static const u32 ice_ptypes_vxlan_vni[] = {
474 0x00000000, 0xBFBFF800, 0x00EFDFDF, 0xFEFDE000,
475 0x03BF7F7E, 0x00000000, 0x00000000, 0x00000000,
476 0x00000000, 0x00000000, 0x00000000, 0x00000000,
477 0x00000000, 0x00000000, 0x00000000, 0x00000000,
478 0x00000000, 0x00000000, 0x00000000, 0x00000000,
479 0x00000000, 0x00000000, 0x00000000, 0x00000000,
480 0x00000000, 0x00000000, 0x00000000, 0x00000000,
481 0x00000000, 0x00000000, 0x00000000, 0x00000000,
484 /* Packet types for GTPC with TEID */
485 static const u32 ice_ptypes_gtpc_tid[] = {
486 0x00000000, 0x00000000, 0x00000000, 0x00000000,
487 0x00000000, 0x00000000, 0x00000000, 0x00000000,
488 0x00000000, 0x00000000, 0x00000060, 0x00000000,
489 0x00000000, 0x00000000, 0x00000000, 0x00000000,
490 0x00000000, 0x00000000, 0x00000000, 0x00000000,
491 0x00000000, 0x00000000, 0x00000000, 0x00000000,
492 0x00000000, 0x00000000, 0x00000000, 0x00000000,
493 0x00000000, 0x00000000, 0x00000000, 0x00000000,
496 /* Packet types for GTPU */
497 static const struct ice_ptype_attributes ice_attr_gtpu_session[] = {
498 { ICE_MAC_IPV4_GTPU_IPV4_FRAG, ICE_PTYPE_ATTR_GTP_SESSION },
499 { ICE_MAC_IPV4_GTPU_IPV4_PAY, ICE_PTYPE_ATTR_GTP_SESSION },
500 { ICE_MAC_IPV4_GTPU_IPV4_UDP_PAY, ICE_PTYPE_ATTR_GTP_SESSION },
501 { ICE_MAC_IPV4_GTPU_IPV4_TCP, ICE_PTYPE_ATTR_GTP_SESSION },
502 { ICE_MAC_IPV4_GTPU_IPV4_ICMP, ICE_PTYPE_ATTR_GTP_SESSION },
503 { ICE_MAC_IPV6_GTPU_IPV4_FRAG, ICE_PTYPE_ATTR_GTP_SESSION },
504 { ICE_MAC_IPV6_GTPU_IPV4_PAY, ICE_PTYPE_ATTR_GTP_SESSION },
505 { ICE_MAC_IPV6_GTPU_IPV4_UDP_PAY, ICE_PTYPE_ATTR_GTP_SESSION },
506 { ICE_MAC_IPV6_GTPU_IPV4_TCP, ICE_PTYPE_ATTR_GTP_SESSION },
507 { ICE_MAC_IPV6_GTPU_IPV4_ICMP, ICE_PTYPE_ATTR_GTP_SESSION },
508 { ICE_MAC_IPV4_GTPU_IPV6_FRAG, ICE_PTYPE_ATTR_GTP_SESSION },
509 { ICE_MAC_IPV4_GTPU_IPV6_PAY, ICE_PTYPE_ATTR_GTP_SESSION },
510 { ICE_MAC_IPV4_GTPU_IPV6_UDP_PAY, ICE_PTYPE_ATTR_GTP_SESSION },
511 { ICE_MAC_IPV4_GTPU_IPV6_TCP, ICE_PTYPE_ATTR_GTP_SESSION },
512 { ICE_MAC_IPV4_GTPU_IPV6_ICMPV6, ICE_PTYPE_ATTR_GTP_SESSION },
513 { ICE_MAC_IPV6_GTPU_IPV6_FRAG, ICE_PTYPE_ATTR_GTP_SESSION },
514 { ICE_MAC_IPV6_GTPU_IPV6_PAY, ICE_PTYPE_ATTR_GTP_SESSION },
515 { ICE_MAC_IPV6_GTPU_IPV6_UDP_PAY, ICE_PTYPE_ATTR_GTP_SESSION },
516 { ICE_MAC_IPV6_GTPU_IPV6_TCP, ICE_PTYPE_ATTR_GTP_SESSION },
517 { ICE_MAC_IPV6_GTPU_IPV6_ICMPV6, ICE_PTYPE_ATTR_GTP_SESSION },
520 static const struct ice_ptype_attributes ice_attr_gtpu_eh[] = {
521 { ICE_MAC_IPV4_GTPU_IPV4_FRAG, ICE_PTYPE_ATTR_GTP_PDU_EH },
522 { ICE_MAC_IPV4_GTPU_IPV4_PAY, ICE_PTYPE_ATTR_GTP_PDU_EH },
523 { ICE_MAC_IPV4_GTPU_IPV4_UDP_PAY, ICE_PTYPE_ATTR_GTP_PDU_EH },
524 { ICE_MAC_IPV4_GTPU_IPV4_TCP, ICE_PTYPE_ATTR_GTP_PDU_EH },
525 { ICE_MAC_IPV4_GTPU_IPV4_ICMP, ICE_PTYPE_ATTR_GTP_PDU_EH },
526 { ICE_MAC_IPV6_GTPU_IPV4_FRAG, ICE_PTYPE_ATTR_GTP_PDU_EH },
527 { ICE_MAC_IPV6_GTPU_IPV4_PAY, ICE_PTYPE_ATTR_GTP_PDU_EH },
528 { ICE_MAC_IPV6_GTPU_IPV4_UDP_PAY, ICE_PTYPE_ATTR_GTP_PDU_EH },
529 { ICE_MAC_IPV6_GTPU_IPV4_TCP, ICE_PTYPE_ATTR_GTP_PDU_EH },
530 { ICE_MAC_IPV6_GTPU_IPV4_ICMP, ICE_PTYPE_ATTR_GTP_PDU_EH },
531 { ICE_MAC_IPV4_GTPU_IPV6_FRAG, ICE_PTYPE_ATTR_GTP_PDU_EH },
532 { ICE_MAC_IPV4_GTPU_IPV6_PAY, ICE_PTYPE_ATTR_GTP_PDU_EH },
533 { ICE_MAC_IPV4_GTPU_IPV6_UDP_PAY, ICE_PTYPE_ATTR_GTP_PDU_EH },
534 { ICE_MAC_IPV4_GTPU_IPV6_TCP, ICE_PTYPE_ATTR_GTP_PDU_EH },
535 { ICE_MAC_IPV4_GTPU_IPV6_ICMPV6, ICE_PTYPE_ATTR_GTP_PDU_EH },
536 { ICE_MAC_IPV6_GTPU_IPV6_FRAG, ICE_PTYPE_ATTR_GTP_PDU_EH },
537 { ICE_MAC_IPV6_GTPU_IPV6_PAY, ICE_PTYPE_ATTR_GTP_PDU_EH },
538 { ICE_MAC_IPV6_GTPU_IPV6_UDP_PAY, ICE_PTYPE_ATTR_GTP_PDU_EH },
539 { ICE_MAC_IPV6_GTPU_IPV6_TCP, ICE_PTYPE_ATTR_GTP_PDU_EH },
540 { ICE_MAC_IPV6_GTPU_IPV6_ICMPV6, ICE_PTYPE_ATTR_GTP_PDU_EH },
543 static const struct ice_ptype_attributes ice_attr_gtpu_down[] = {
544 { ICE_MAC_IPV4_GTPU_IPV4_FRAG, ICE_PTYPE_ATTR_GTP_DOWNLINK },
545 { ICE_MAC_IPV4_GTPU_IPV4_PAY, ICE_PTYPE_ATTR_GTP_DOWNLINK },
546 { ICE_MAC_IPV4_GTPU_IPV4_UDP_PAY, ICE_PTYPE_ATTR_GTP_DOWNLINK },
547 { ICE_MAC_IPV4_GTPU_IPV4_TCP, ICE_PTYPE_ATTR_GTP_DOWNLINK },
548 { ICE_MAC_IPV4_GTPU_IPV4_ICMP, ICE_PTYPE_ATTR_GTP_DOWNLINK },
549 { ICE_MAC_IPV6_GTPU_IPV4_FRAG, ICE_PTYPE_ATTR_GTP_DOWNLINK },
550 { ICE_MAC_IPV6_GTPU_IPV4_PAY, ICE_PTYPE_ATTR_GTP_DOWNLINK },
551 { ICE_MAC_IPV6_GTPU_IPV4_UDP_PAY, ICE_PTYPE_ATTR_GTP_DOWNLINK },
552 { ICE_MAC_IPV6_GTPU_IPV4_TCP, ICE_PTYPE_ATTR_GTP_DOWNLINK },
553 { ICE_MAC_IPV6_GTPU_IPV4_ICMP, ICE_PTYPE_ATTR_GTP_DOWNLINK },
554 { ICE_MAC_IPV4_GTPU_IPV6_FRAG, ICE_PTYPE_ATTR_GTP_DOWNLINK },
555 { ICE_MAC_IPV4_GTPU_IPV6_PAY, ICE_PTYPE_ATTR_GTP_DOWNLINK },
556 { ICE_MAC_IPV4_GTPU_IPV6_UDP_PAY, ICE_PTYPE_ATTR_GTP_DOWNLINK },
557 { ICE_MAC_IPV4_GTPU_IPV6_TCP, ICE_PTYPE_ATTR_GTP_DOWNLINK },
558 { ICE_MAC_IPV4_GTPU_IPV6_ICMPV6, ICE_PTYPE_ATTR_GTP_DOWNLINK },
559 { ICE_MAC_IPV6_GTPU_IPV6_FRAG, ICE_PTYPE_ATTR_GTP_DOWNLINK },
560 { ICE_MAC_IPV6_GTPU_IPV6_PAY, ICE_PTYPE_ATTR_GTP_DOWNLINK },
561 { ICE_MAC_IPV6_GTPU_IPV6_UDP_PAY, ICE_PTYPE_ATTR_GTP_DOWNLINK },
562 { ICE_MAC_IPV6_GTPU_IPV6_TCP, ICE_PTYPE_ATTR_GTP_DOWNLINK },
563 { ICE_MAC_IPV6_GTPU_IPV6_ICMPV6, ICE_PTYPE_ATTR_GTP_DOWNLINK },
566 static const struct ice_ptype_attributes ice_attr_gtpu_up[] = {
567 { ICE_MAC_IPV4_GTPU_IPV4_FRAG, ICE_PTYPE_ATTR_GTP_UPLINK },
568 { ICE_MAC_IPV4_GTPU_IPV4_PAY, ICE_PTYPE_ATTR_GTP_UPLINK },
569 { ICE_MAC_IPV4_GTPU_IPV4_UDP_PAY, ICE_PTYPE_ATTR_GTP_UPLINK },
570 { ICE_MAC_IPV4_GTPU_IPV4_TCP, ICE_PTYPE_ATTR_GTP_UPLINK },
571 { ICE_MAC_IPV4_GTPU_IPV4_ICMP, ICE_PTYPE_ATTR_GTP_UPLINK },
572 { ICE_MAC_IPV6_GTPU_IPV4_FRAG, ICE_PTYPE_ATTR_GTP_UPLINK },
573 { ICE_MAC_IPV6_GTPU_IPV4_PAY, ICE_PTYPE_ATTR_GTP_UPLINK },
574 { ICE_MAC_IPV6_GTPU_IPV4_UDP_PAY, ICE_PTYPE_ATTR_GTP_UPLINK },
575 { ICE_MAC_IPV6_GTPU_IPV4_TCP, ICE_PTYPE_ATTR_GTP_UPLINK },
576 { ICE_MAC_IPV6_GTPU_IPV4_ICMP, ICE_PTYPE_ATTR_GTP_UPLINK },
577 { ICE_MAC_IPV4_GTPU_IPV6_FRAG, ICE_PTYPE_ATTR_GTP_UPLINK },
578 { ICE_MAC_IPV4_GTPU_IPV6_PAY, ICE_PTYPE_ATTR_GTP_UPLINK },
579 { ICE_MAC_IPV4_GTPU_IPV6_UDP_PAY, ICE_PTYPE_ATTR_GTP_UPLINK },
580 { ICE_MAC_IPV4_GTPU_IPV6_TCP, ICE_PTYPE_ATTR_GTP_UPLINK },
581 { ICE_MAC_IPV4_GTPU_IPV6_ICMPV6, ICE_PTYPE_ATTR_GTP_UPLINK },
582 { ICE_MAC_IPV6_GTPU_IPV6_FRAG, ICE_PTYPE_ATTR_GTP_UPLINK },
583 { ICE_MAC_IPV6_GTPU_IPV6_PAY, ICE_PTYPE_ATTR_GTP_UPLINK },
584 { ICE_MAC_IPV6_GTPU_IPV6_UDP_PAY, ICE_PTYPE_ATTR_GTP_UPLINK },
585 { ICE_MAC_IPV6_GTPU_IPV6_TCP, ICE_PTYPE_ATTR_GTP_UPLINK },
586 { ICE_MAC_IPV6_GTPU_IPV6_ICMPV6, ICE_PTYPE_ATTR_GTP_UPLINK },
589 static const u32 ice_ptypes_gtpu[] = {
590 0x00000000, 0x00000000, 0x00000000, 0x00000000,
591 0x00000000, 0x00000000, 0x00000000, 0x00000000,
592 0x00000000, 0x00000000, 0x7FFFFE00, 0x00000000,
593 0x00000000, 0x00000000, 0x00000000, 0x00000000,
594 0x00000000, 0x00000000, 0x00000000, 0x00000000,
595 0x00000000, 0x00000000, 0x00000000, 0x00000000,
596 0x00000000, 0x00000000, 0x00000000, 0x00000000,
597 0x00000000, 0x00000000, 0x00000000, 0x00000000,
600 /* Packet types for pppoe */
601 static const u32 ice_ptypes_pppoe[] = {
602 0x00000000, 0x00000000, 0x00000000, 0x00000000,
603 0x00000000, 0x00000000, 0x00000000, 0x00000000,
604 0x00000000, 0x03ffe000, 0x00000000, 0x00000000,
605 0x00000000, 0x00000000, 0x00000000, 0x00000000,
606 0x00000000, 0x00000000, 0x00000000, 0x00000000,
607 0x00000000, 0x00000000, 0x00000000, 0x00000000,
608 0x00000000, 0x00000000, 0x00000000, 0x00000000,
609 0x00000000, 0x00000000, 0x00000000, 0x00000000,
612 /* Packet types for packets with PFCP NODE header */
613 static const u32 ice_ptypes_pfcp_node[] = {
614 0x00000000, 0x00000000, 0x00000000, 0x00000000,
615 0x00000000, 0x00000000, 0x00000000, 0x00000000,
616 0x00000000, 0x00000000, 0x80000000, 0x00000002,
617 0x00000000, 0x00000000, 0x00000000, 0x00000000,
618 0x00000000, 0x00000000, 0x00000000, 0x00000000,
619 0x00000000, 0x00000000, 0x00000000, 0x00000000,
620 0x00000000, 0x00000000, 0x00000000, 0x00000000,
621 0x00000000, 0x00000000, 0x00000000, 0x00000000,
624 /* Packet types for packets with PFCP SESSION header */
625 static const u32 ice_ptypes_pfcp_session[] = {
626 0x00000000, 0x00000000, 0x00000000, 0x00000000,
627 0x00000000, 0x00000000, 0x00000000, 0x00000000,
628 0x00000000, 0x00000000, 0x00000000, 0x00000005,
629 0x00000000, 0x00000000, 0x00000000, 0x00000000,
630 0x00000000, 0x00000000, 0x00000000, 0x00000000,
631 0x00000000, 0x00000000, 0x00000000, 0x00000000,
632 0x00000000, 0x00000000, 0x00000000, 0x00000000,
633 0x00000000, 0x00000000, 0x00000000, 0x00000000,
636 /* Packet types for l2tpv3 */
637 static const u32 ice_ptypes_l2tpv3[] = {
638 0x00000000, 0x00000000, 0x00000000, 0x00000000,
639 0x00000000, 0x00000000, 0x00000000, 0x00000000,
640 0x00000000, 0x00000000, 0x00000000, 0x00000300,
641 0x00000000, 0x00000000, 0x00000000, 0x00000000,
642 0x00000000, 0x00000000, 0x00000000, 0x00000000,
643 0x00000000, 0x00000000, 0x00000000, 0x00000000,
644 0x00000000, 0x00000000, 0x00000000, 0x00000000,
645 0x00000000, 0x00000000, 0x00000000, 0x00000000,
648 /* Packet types for esp */
649 static const u32 ice_ptypes_esp[] = {
650 0x00000000, 0x00000000, 0x00000000, 0x00000000,
651 0x00000000, 0x00000003, 0x00000000, 0x00000000,
652 0x00000000, 0x00000000, 0x00000000, 0x00000000,
653 0x00000000, 0x00000000, 0x00000000, 0x00000000,
654 0x00000000, 0x00000000, 0x00000000, 0x00000000,
655 0x00000000, 0x00000000, 0x00000000, 0x00000000,
656 0x00000000, 0x00000000, 0x00000000, 0x00000000,
657 0x00000000, 0x00000000, 0x00000000, 0x00000000,
660 /* Packet types for ah */
661 static const u32 ice_ptypes_ah[] = {
662 0x00000000, 0x00000000, 0x00000000, 0x00000000,
663 0x00000000, 0x0000000C, 0x00000000, 0x00000000,
664 0x00000000, 0x00000000, 0x00000000, 0x00000000,
665 0x00000000, 0x00000000, 0x00000000, 0x00000000,
666 0x00000000, 0x00000000, 0x00000000, 0x00000000,
667 0x00000000, 0x00000000, 0x00000000, 0x00000000,
668 0x00000000, 0x00000000, 0x00000000, 0x00000000,
669 0x00000000, 0x00000000, 0x00000000, 0x00000000,
672 /* Packet types for packets with NAT_T ESP header */
673 static const u32 ice_ptypes_nat_t_esp[] = {
674 0x00000000, 0x00000000, 0x00000000, 0x00000000,
675 0x00000000, 0x00000030, 0x00000000, 0x00000000,
676 0x00000000, 0x00000000, 0x00000000, 0x00000000,
677 0x00000000, 0x00000000, 0x00000000, 0x00000000,
678 0x00000000, 0x00000000, 0x00000000, 0x00000000,
679 0x00000000, 0x00000000, 0x00000000, 0x00000000,
680 0x00000000, 0x00000000, 0x00000000, 0x00000000,
681 0x00000000, 0x00000000, 0x00000000, 0x00000000,
684 static const u32 ice_ptypes_mac_non_ip_ofos[] = {
685 0x00000846, 0x00000000, 0x00000000, 0x00000000,
686 0x00000000, 0x00000000, 0x00000000, 0x00000000,
687 0x00400000, 0x03FFF000, 0x00000000, 0x00000000,
688 0x00000000, 0x00000000, 0x00000000, 0x00000000,
689 0x00000000, 0x00000000, 0x00000000, 0x00000000,
690 0x00000000, 0x00000000, 0x00000000, 0x00000000,
691 0x00000000, 0x00000000, 0x00000000, 0x00000000,
692 0x00000000, 0x00000000, 0x00000000, 0x00000000,
695 static const u32 ice_ptypes_gtpu_no_ip[] = {
696 0x00000000, 0x00000000, 0x00000000, 0x00000000,
697 0x00000000, 0x00000000, 0x00000000, 0x00000000,
698 0x00000000, 0x00000000, 0x00000600, 0x00000000,
699 0x00000000, 0x00000000, 0x00000000, 0x00000000,
700 0x00000000, 0x00000000, 0x00000000, 0x00000000,
701 0x00000000, 0x00000000, 0x00000000, 0x00000000,
702 0x00000000, 0x00000000, 0x00000000, 0x00000000,
703 0x00000000, 0x00000000, 0x00000000, 0x00000000,
706 static const u32 ice_ptypes_ecpri_tp0[] = {
707 0x00000000, 0x00000000, 0x00000000, 0x00000000,
708 0x00000000, 0x00000000, 0x00000000, 0x00000000,
709 0x00000000, 0x00000000, 0x00000000, 0x00000400,
710 0x00000000, 0x00000000, 0x00000000, 0x00000000,
711 0x00000000, 0x00000000, 0x00000000, 0x00000000,
712 0x00000000, 0x00000000, 0x00000000, 0x00000000,
713 0x00000000, 0x00000000, 0x00000000, 0x00000000,
714 0x00000000, 0x00000000, 0x00000000, 0x00000000,
717 static const u32 ice_ptypes_udp_ecpri_tp0[] = {
718 0x00000000, 0x00000000, 0x00000000, 0x00000000,
719 0x00000000, 0x00000000, 0x00000000, 0x00000000,
720 0x00000000, 0x00000000, 0x00000000, 0x00100000,
721 0x00000000, 0x00000000, 0x00000000, 0x00000000,
722 0x00000000, 0x00000000, 0x00000000, 0x00000000,
723 0x00000000, 0x00000000, 0x00000000, 0x00000000,
724 0x00000000, 0x00000000, 0x00000000, 0x00000000,
725 0x00000000, 0x00000000, 0x00000000, 0x00000000,
728 static const u32 ice_ptypes_l2tpv2[] = {
729 0x00000000, 0x00000000, 0x00000000, 0x00000000,
730 0x00000000, 0x00000000, 0x00000000, 0x00000000,
731 0x00000000, 0x00000000, 0x00000000, 0x00000000,
732 0xFFFFFF00, 0x0000003F, 0x00000000, 0x00000000,
733 0x00000000, 0x00000000, 0x00000000, 0x00000000,
734 0x00000000, 0x00000000, 0x00000000, 0x00000000,
735 0x00000000, 0x00000000, 0x00000000, 0x00000000,
736 0x00000000, 0x00000000, 0x00000000, 0x00000000,
739 static const u32 ice_ptypes_ppp[] = {
740 0x00000000, 0x00000000, 0x00000000, 0x00000000,
741 0x00000000, 0x00000000, 0x00000000, 0x00000000,
742 0x00000000, 0x00000000, 0x00000000, 0x00000000,
743 0xFFFFF000, 0x0000003F, 0x00000000, 0x00000000,
744 0x00000000, 0x00000000, 0x00000000, 0x00000000,
745 0x00000000, 0x00000000, 0x00000000, 0x00000000,
746 0x00000000, 0x00000000, 0x00000000, 0x00000000,
747 0x00000000, 0x00000000, 0x00000000, 0x00000000,
750 /* Manage parameters and info. used during the creation of a flow profile */
751 struct ice_flow_prof_params {
753 u16 entry_length; /* # of bytes formatted entry will require */
755 struct ice_flow_prof *prof;
757 /* For ACL, the es[0] will have the data of ICE_RX_MDID_PKT_FLAGS_15_0
758 * This will give us the direction flags.
760 struct ice_fv_word es[ICE_MAX_FV_WORDS];
761 /* attributes can be used to add attributes to a particular PTYPE */
762 const struct ice_ptype_attributes *attr;
765 u16 mask[ICE_MAX_FV_WORDS];
766 ice_declare_bitmap(ptypes, ICE_FLOW_PTYPE_MAX);
769 #define ICE_FLOW_RSS_HDRS_INNER_MASK \
770 (ICE_FLOW_SEG_HDR_PPPOE | ICE_FLOW_SEG_HDR_GTPC | \
771 ICE_FLOW_SEG_HDR_GTPC_TEID | ICE_FLOW_SEG_HDR_GTPU | \
772 ICE_FLOW_SEG_HDR_PFCP_SESSION | ICE_FLOW_SEG_HDR_L2TPV3 | \
773 ICE_FLOW_SEG_HDR_ESP | ICE_FLOW_SEG_HDR_AH | \
774 ICE_FLOW_SEG_HDR_NAT_T_ESP | ICE_FLOW_SEG_HDR_GTPU_NON_IP | \
775 ICE_FLOW_SEG_HDR_ECPRI_TP0 | ICE_FLOW_SEG_HDR_UDP_ECPRI_TP0 | \
776 ICE_FLOW_SEG_HDR_L2TPV2 | ICE_FLOW_SEG_HDR_PPP)
778 #define ICE_FLOW_SEG_HDRS_L2_MASK \
779 (ICE_FLOW_SEG_HDR_ETH | ICE_FLOW_SEG_HDR_VLAN)
780 #define ICE_FLOW_SEG_HDRS_L3_MASK \
781 (ICE_FLOW_SEG_HDR_IPV4 | ICE_FLOW_SEG_HDR_IPV6 | \
782 ICE_FLOW_SEG_HDR_ARP)
783 #define ICE_FLOW_SEG_HDRS_L4_MASK \
784 (ICE_FLOW_SEG_HDR_ICMP | ICE_FLOW_SEG_HDR_TCP | ICE_FLOW_SEG_HDR_UDP | \
785 ICE_FLOW_SEG_HDR_SCTP)
786 /* mask for L4 protocols that are NOT part of IPV4/6 OTHER PTYPE groups */
787 #define ICE_FLOW_SEG_HDRS_L4_MASK_NO_OTHER \
788 (ICE_FLOW_SEG_HDR_TCP | ICE_FLOW_SEG_HDR_UDP | ICE_FLOW_SEG_HDR_SCTP)
791 * ice_flow_val_hdrs - validates packet segments for valid protocol headers
792 * @segs: array of one or more packet segments that describe the flow
793 * @segs_cnt: number of packet segments provided
795 static enum ice_status
796 ice_flow_val_hdrs(struct ice_flow_seg_info *segs, u8 segs_cnt)
800 for (i = 0; i < segs_cnt; i++) {
801 /* Multiple L3 headers */
802 if (segs[i].hdrs & ICE_FLOW_SEG_HDRS_L3_MASK &&
803 !ice_is_pow2(segs[i].hdrs & ICE_FLOW_SEG_HDRS_L3_MASK))
804 return ICE_ERR_PARAM;
806 /* Multiple L4 headers */
807 if (segs[i].hdrs & ICE_FLOW_SEG_HDRS_L4_MASK &&
808 !ice_is_pow2(segs[i].hdrs & ICE_FLOW_SEG_HDRS_L4_MASK))
809 return ICE_ERR_PARAM;
815 /* Sizes of fixed known protocol headers without header options */
816 #define ICE_FLOW_PROT_HDR_SZ_MAC 14
817 #define ICE_FLOW_PROT_HDR_SZ_MAC_VLAN (ICE_FLOW_PROT_HDR_SZ_MAC + 2)
818 #define ICE_FLOW_PROT_HDR_SZ_IPV4 20
819 #define ICE_FLOW_PROT_HDR_SZ_IPV6 40
820 #define ICE_FLOW_PROT_HDR_SZ_ARP 28
821 #define ICE_FLOW_PROT_HDR_SZ_ICMP 8
822 #define ICE_FLOW_PROT_HDR_SZ_TCP 20
823 #define ICE_FLOW_PROT_HDR_SZ_UDP 8
824 #define ICE_FLOW_PROT_HDR_SZ_SCTP 12
827 * ice_flow_calc_seg_sz - calculates size of a packet segment based on headers
828 * @params: information about the flow to be processed
829 * @seg: index of packet segment whose header size is to be determined
831 static u16 ice_flow_calc_seg_sz(struct ice_flow_prof_params *params, u8 seg)
836 sz = (params->prof->segs[seg].hdrs & ICE_FLOW_SEG_HDR_VLAN) ?
837 ICE_FLOW_PROT_HDR_SZ_MAC_VLAN : ICE_FLOW_PROT_HDR_SZ_MAC;
840 if (params->prof->segs[seg].hdrs & ICE_FLOW_SEG_HDR_IPV4)
841 sz += ICE_FLOW_PROT_HDR_SZ_IPV4;
842 else if (params->prof->segs[seg].hdrs & ICE_FLOW_SEG_HDR_IPV6)
843 sz += ICE_FLOW_PROT_HDR_SZ_IPV6;
844 else if (params->prof->segs[seg].hdrs & ICE_FLOW_SEG_HDR_ARP)
845 sz += ICE_FLOW_PROT_HDR_SZ_ARP;
846 else if (params->prof->segs[seg].hdrs & ICE_FLOW_SEG_HDRS_L4_MASK)
847 /* A L3 header is required if L4 is specified */
851 if (params->prof->segs[seg].hdrs & ICE_FLOW_SEG_HDR_ICMP)
852 sz += ICE_FLOW_PROT_HDR_SZ_ICMP;
853 else if (params->prof->segs[seg].hdrs & ICE_FLOW_SEG_HDR_TCP)
854 sz += ICE_FLOW_PROT_HDR_SZ_TCP;
855 else if (params->prof->segs[seg].hdrs & ICE_FLOW_SEG_HDR_UDP)
856 sz += ICE_FLOW_PROT_HDR_SZ_UDP;
857 else if (params->prof->segs[seg].hdrs & ICE_FLOW_SEG_HDR_SCTP)
858 sz += ICE_FLOW_PROT_HDR_SZ_SCTP;
864 * ice_flow_proc_seg_hdrs - process protocol headers present in pkt segments
865 * @params: information about the flow to be processed
867 * This function identifies the packet types associated with the protocol
868 * headers being present in packet segments of the specified flow profile.
870 static enum ice_status
871 ice_flow_proc_seg_hdrs(struct ice_flow_prof_params *params)
873 struct ice_flow_prof *prof;
876 ice_memset(params->ptypes, 0xff, sizeof(params->ptypes),
881 for (i = 0; i < params->prof->segs_cnt; i++) {
882 const ice_bitmap_t *src;
885 hdrs = prof->segs[i].hdrs;
887 if (hdrs & ICE_FLOW_SEG_HDR_ETH) {
888 src = !i ? (const ice_bitmap_t *)ice_ptypes_mac_ofos :
889 (const ice_bitmap_t *)ice_ptypes_mac_il;
890 ice_and_bitmap(params->ptypes, params->ptypes, src,
894 if (i && hdrs & ICE_FLOW_SEG_HDR_VLAN) {
895 src = (const ice_bitmap_t *)ice_ptypes_macvlan_il;
896 ice_and_bitmap(params->ptypes, params->ptypes, src,
900 if (!i && hdrs & ICE_FLOW_SEG_HDR_ARP) {
901 ice_and_bitmap(params->ptypes, params->ptypes,
902 (const ice_bitmap_t *)ice_ptypes_arp_of,
906 if (hdrs & ICE_FLOW_SEG_HDR_ECPRI_TP0) {
907 src = (const ice_bitmap_t *)ice_ptypes_ecpri_tp0;
908 ice_and_bitmap(params->ptypes, params->ptypes, src,
911 if ((hdrs & ICE_FLOW_SEG_HDR_IPV4) &&
912 (hdrs & ICE_FLOW_SEG_HDR_IPV_OTHER)) {
914 (const ice_bitmap_t *)ice_ptypes_ipv4_il :
915 (const ice_bitmap_t *)ice_ptypes_ipv4_ofos_all;
916 ice_and_bitmap(params->ptypes, params->ptypes, src,
918 } else if ((hdrs & ICE_FLOW_SEG_HDR_IPV6) &&
919 (hdrs & ICE_FLOW_SEG_HDR_IPV_OTHER)) {
921 (const ice_bitmap_t *)ice_ptypes_ipv6_il :
922 (const ice_bitmap_t *)ice_ptypes_ipv6_ofos_all;
923 ice_and_bitmap(params->ptypes, params->ptypes, src,
925 } else if ((hdrs & ICE_FLOW_SEG_HDR_IPV4) &&
926 !(hdrs & ICE_FLOW_SEG_HDRS_L4_MASK_NO_OTHER)) {
927 src = !i ? (const ice_bitmap_t *)ice_ptypes_ipv4_ofos_no_l4 :
928 (const ice_bitmap_t *)ice_ptypes_ipv4_il_no_l4;
929 ice_and_bitmap(params->ptypes, params->ptypes, src,
931 } else if (hdrs & ICE_FLOW_SEG_HDR_IPV4) {
932 src = !i ? (const ice_bitmap_t *)ice_ptypes_ipv4_ofos :
933 (const ice_bitmap_t *)ice_ptypes_ipv4_il;
934 ice_and_bitmap(params->ptypes, params->ptypes, src,
936 } else if ((hdrs & ICE_FLOW_SEG_HDR_IPV6) &&
937 !(hdrs & ICE_FLOW_SEG_HDRS_L4_MASK_NO_OTHER)) {
938 src = !i ? (const ice_bitmap_t *)ice_ptypes_ipv6_ofos_no_l4 :
939 (const ice_bitmap_t *)ice_ptypes_ipv6_il_no_l4;
940 ice_and_bitmap(params->ptypes, params->ptypes, src,
942 } else if (hdrs & ICE_FLOW_SEG_HDR_IPV6) {
943 src = !i ? (const ice_bitmap_t *)ice_ptypes_ipv6_ofos :
944 (const ice_bitmap_t *)ice_ptypes_ipv6_il;
945 ice_and_bitmap(params->ptypes, params->ptypes, src,
949 if (hdrs & ICE_FLOW_SEG_HDR_ETH_NON_IP) {
950 src = (const ice_bitmap_t *)ice_ptypes_mac_non_ip_ofos;
951 ice_and_bitmap(params->ptypes, params->ptypes,
952 src, ICE_FLOW_PTYPE_MAX);
953 } else if (hdrs & ICE_FLOW_SEG_HDR_PPPOE) {
954 src = (const ice_bitmap_t *)ice_ptypes_pppoe;
955 ice_and_bitmap(params->ptypes, params->ptypes, src,
958 src = (const ice_bitmap_t *)ice_ptypes_pppoe;
959 ice_andnot_bitmap(params->ptypes, params->ptypes, src,
963 if (hdrs & ICE_FLOW_SEG_HDR_UDP) {
964 src = (const ice_bitmap_t *)ice_ptypes_udp_il;
965 ice_and_bitmap(params->ptypes, params->ptypes, src,
967 } else if (hdrs & ICE_FLOW_SEG_HDR_TCP) {
968 ice_and_bitmap(params->ptypes, params->ptypes,
969 (const ice_bitmap_t *)ice_ptypes_tcp_il,
971 } else if (hdrs & ICE_FLOW_SEG_HDR_SCTP) {
972 src = (const ice_bitmap_t *)ice_ptypes_sctp_il;
973 ice_and_bitmap(params->ptypes, params->ptypes, src,
977 if (hdrs & ICE_FLOW_SEG_HDR_ICMP) {
978 src = !i ? (const ice_bitmap_t *)ice_ptypes_icmp_of :
979 (const ice_bitmap_t *)ice_ptypes_icmp_il;
980 ice_and_bitmap(params->ptypes, params->ptypes, src,
982 } else if (hdrs & ICE_FLOW_SEG_HDR_GRE) {
984 src = (const ice_bitmap_t *)ice_ptypes_gre_of;
985 ice_and_bitmap(params->ptypes, params->ptypes,
986 src, ICE_FLOW_PTYPE_MAX);
988 } else if (hdrs & ICE_FLOW_SEG_HDR_GTPC) {
989 src = (const ice_bitmap_t *)ice_ptypes_gtpc;
990 ice_and_bitmap(params->ptypes, params->ptypes,
991 src, ICE_FLOW_PTYPE_MAX);
992 } else if (hdrs & ICE_FLOW_SEG_HDR_GTPC_TEID) {
993 src = (const ice_bitmap_t *)ice_ptypes_gtpc_tid;
994 ice_and_bitmap(params->ptypes, params->ptypes,
995 src, ICE_FLOW_PTYPE_MAX);
996 } else if (hdrs & ICE_FLOW_SEG_HDR_GTPU_NON_IP) {
997 src = (const ice_bitmap_t *)ice_ptypes_gtpu_no_ip;
998 ice_and_bitmap(params->ptypes, params->ptypes,
999 src, ICE_FLOW_PTYPE_MAX);
1000 } else if (hdrs & ICE_FLOW_SEG_HDR_GTPU_DWN) {
1001 src = (const ice_bitmap_t *)ice_ptypes_gtpu;
1002 ice_and_bitmap(params->ptypes, params->ptypes,
1003 src, ICE_FLOW_PTYPE_MAX);
1005 /* Attributes for GTP packet with downlink */
1006 params->attr = ice_attr_gtpu_down;
1007 params->attr_cnt = ARRAY_SIZE(ice_attr_gtpu_down);
1008 } else if (hdrs & ICE_FLOW_SEG_HDR_GTPU_UP) {
1009 src = (const ice_bitmap_t *)ice_ptypes_gtpu;
1010 ice_and_bitmap(params->ptypes, params->ptypes,
1011 src, ICE_FLOW_PTYPE_MAX);
1013 /* Attributes for GTP packet with uplink */
1014 params->attr = ice_attr_gtpu_up;
1015 params->attr_cnt = ARRAY_SIZE(ice_attr_gtpu_up);
1016 } else if (hdrs & ICE_FLOW_SEG_HDR_GTPU_EH) {
1017 src = (const ice_bitmap_t *)ice_ptypes_gtpu;
1018 ice_and_bitmap(params->ptypes, params->ptypes,
1019 src, ICE_FLOW_PTYPE_MAX);
1021 /* Attributes for GTP packet with Extension Header */
1022 params->attr = ice_attr_gtpu_eh;
1023 params->attr_cnt = ARRAY_SIZE(ice_attr_gtpu_eh);
1024 } else if (hdrs & ICE_FLOW_SEG_HDR_GTPU_IP) {
1025 src = (const ice_bitmap_t *)ice_ptypes_gtpu;
1026 ice_and_bitmap(params->ptypes, params->ptypes,
1027 src, ICE_FLOW_PTYPE_MAX);
1029 /* Attributes for GTP packet without Extension Header */
1030 params->attr = ice_attr_gtpu_session;
1031 params->attr_cnt = ARRAY_SIZE(ice_attr_gtpu_session);
1032 } else if (hdrs & ICE_FLOW_SEG_HDR_L2TPV2) {
1033 src = (const ice_bitmap_t *)ice_ptypes_l2tpv2;
1034 ice_and_bitmap(params->ptypes, params->ptypes,
1035 src, ICE_FLOW_PTYPE_MAX);
1036 } else if (hdrs & ICE_FLOW_SEG_HDR_L2TPV3) {
1037 src = (const ice_bitmap_t *)ice_ptypes_l2tpv3;
1038 ice_and_bitmap(params->ptypes, params->ptypes,
1039 src, ICE_FLOW_PTYPE_MAX);
1040 } else if (hdrs & ICE_FLOW_SEG_HDR_ESP) {
1041 src = (const ice_bitmap_t *)ice_ptypes_esp;
1042 ice_and_bitmap(params->ptypes, params->ptypes,
1043 src, ICE_FLOW_PTYPE_MAX);
1044 } else if (hdrs & ICE_FLOW_SEG_HDR_AH) {
1045 src = (const ice_bitmap_t *)ice_ptypes_ah;
1046 ice_and_bitmap(params->ptypes, params->ptypes,
1047 src, ICE_FLOW_PTYPE_MAX);
1048 } else if (hdrs & ICE_FLOW_SEG_HDR_NAT_T_ESP) {
1049 src = (const ice_bitmap_t *)ice_ptypes_nat_t_esp;
1050 ice_and_bitmap(params->ptypes, params->ptypes,
1051 src, ICE_FLOW_PTYPE_MAX);
1052 } else if (hdrs & ICE_FLOW_SEG_HDR_VXLAN) {
1053 src = (const ice_bitmap_t *)ice_ptypes_vxlan_vni;
1054 ice_and_bitmap(params->ptypes, params->ptypes,
1055 src, ICE_FLOW_PTYPE_MAX);
1056 } else if (hdrs & ICE_FLOW_SEG_HDR_UDP_ECPRI_TP0) {
1057 src = (const ice_bitmap_t *)ice_ptypes_udp_ecpri_tp0;
1058 ice_and_bitmap(params->ptypes, params->ptypes,
1059 src, ICE_FLOW_PTYPE_MAX);
1062 if (hdrs & ICE_FLOW_SEG_HDR_PPP) {
1063 src = (const ice_bitmap_t *)ice_ptypes_ppp;
1064 ice_and_bitmap(params->ptypes, params->ptypes,
1065 src, ICE_FLOW_PTYPE_MAX);
1068 if (hdrs & ICE_FLOW_SEG_HDR_PFCP) {
1069 if (hdrs & ICE_FLOW_SEG_HDR_PFCP_NODE)
1071 (const ice_bitmap_t *)ice_ptypes_pfcp_node;
1074 (const ice_bitmap_t *)ice_ptypes_pfcp_session;
1076 ice_and_bitmap(params->ptypes, params->ptypes,
1077 src, ICE_FLOW_PTYPE_MAX);
1079 src = (const ice_bitmap_t *)ice_ptypes_pfcp_node;
1080 ice_andnot_bitmap(params->ptypes, params->ptypes,
1081 src, ICE_FLOW_PTYPE_MAX);
1083 src = (const ice_bitmap_t *)ice_ptypes_pfcp_session;
1084 ice_andnot_bitmap(params->ptypes, params->ptypes,
1085 src, ICE_FLOW_PTYPE_MAX);
1093 * ice_flow_xtract_pkt_flags - Create an extr sequence entry for packet flags
1094 * @hw: pointer to the HW struct
1095 * @params: information about the flow to be processed
1096 * @flags: The value of pkt_flags[x:x] in Rx/Tx MDID metadata.
1098 * This function will allocate an extraction sequence entries for a DWORD size
1099 * chunk of the packet flags.
1101 static enum ice_status
1102 ice_flow_xtract_pkt_flags(struct ice_hw *hw,
1103 struct ice_flow_prof_params *params,
1104 enum ice_flex_mdid_pkt_flags flags)
1106 u8 fv_words = hw->blk[params->blk].es.fvw;
1109 /* Make sure the number of extraction sequence entries required does not
1110 * exceed the block's capacity.
1112 if (params->es_cnt >= fv_words)
1113 return ICE_ERR_MAX_LIMIT;
1115 /* some blocks require a reversed field vector layout */
1116 if (hw->blk[params->blk].es.reverse)
1117 idx = fv_words - params->es_cnt - 1;
1119 idx = params->es_cnt;
1121 params->es[idx].prot_id = ICE_PROT_META_ID;
1122 params->es[idx].off = flags;
1129 * ice_flow_xtract_fld - Create an extraction sequence entry for the given field
1130 * @hw: pointer to the HW struct
1131 * @params: information about the flow to be processed
1132 * @seg: packet segment index of the field to be extracted
1133 * @fld: ID of field to be extracted
1134 * @match: bitfield of all fields
1136 * This function determines the protocol ID, offset, and size of the given
1137 * field. It then allocates one or more extraction sequence entries for the
1138 * given field, and fill the entries with protocol ID and offset information.
1140 static enum ice_status
1141 ice_flow_xtract_fld(struct ice_hw *hw, struct ice_flow_prof_params *params,
1142 u8 seg, enum ice_flow_field fld, u64 match)
1144 enum ice_flow_field sib = ICE_FLOW_FIELD_IDX_MAX;
1145 enum ice_prot_id prot_id = ICE_PROT_ID_INVAL;
1146 u8 fv_words = hw->blk[params->blk].es.fvw;
1147 struct ice_flow_fld_info *flds;
1148 u16 cnt, ese_bits, i;
1153 flds = params->prof->segs[seg].fields;
1156 case ICE_FLOW_FIELD_IDX_ETH_DA:
1157 case ICE_FLOW_FIELD_IDX_ETH_SA:
1158 case ICE_FLOW_FIELD_IDX_S_VLAN:
1159 case ICE_FLOW_FIELD_IDX_C_VLAN:
1160 prot_id = seg == 0 ? ICE_PROT_MAC_OF_OR_S : ICE_PROT_MAC_IL;
1162 case ICE_FLOW_FIELD_IDX_ETH_TYPE:
1163 prot_id = seg == 0 ? ICE_PROT_ETYPE_OL : ICE_PROT_ETYPE_IL;
1165 case ICE_FLOW_FIELD_IDX_IPV4_DSCP:
1166 prot_id = seg == 0 ? ICE_PROT_IPV4_OF_OR_S : ICE_PROT_IPV4_IL;
1168 case ICE_FLOW_FIELD_IDX_IPV6_DSCP:
1169 prot_id = seg == 0 ? ICE_PROT_IPV6_OF_OR_S : ICE_PROT_IPV6_IL;
1171 case ICE_FLOW_FIELD_IDX_IPV4_TTL:
1172 case ICE_FLOW_FIELD_IDX_IPV4_PROT:
1173 prot_id = seg == 0 ? ICE_PROT_IPV4_OF_OR_S : ICE_PROT_IPV4_IL;
1175 /* TTL and PROT share the same extraction seq. entry.
1176 * Each is considered a sibling to the other in terms of sharing
1177 * the same extraction sequence entry.
1179 if (fld == ICE_FLOW_FIELD_IDX_IPV4_TTL)
1180 sib = ICE_FLOW_FIELD_IDX_IPV4_PROT;
1182 sib = ICE_FLOW_FIELD_IDX_IPV4_TTL;
1184 /* If the sibling field is also included, that field's
1185 * mask needs to be included.
1187 if (match & BIT(sib))
1188 sib_mask = ice_flds_info[sib].mask;
1190 case ICE_FLOW_FIELD_IDX_IPV6_TTL:
1191 case ICE_FLOW_FIELD_IDX_IPV6_PROT:
1192 prot_id = seg == 0 ? ICE_PROT_IPV6_OF_OR_S : ICE_PROT_IPV6_IL;
1194 /* TTL and PROT share the same extraction seq. entry.
1195 * Each is considered a sibling to the other in terms of sharing
1196 * the same extraction sequence entry.
1198 if (fld == ICE_FLOW_FIELD_IDX_IPV6_TTL)
1199 sib = ICE_FLOW_FIELD_IDX_IPV6_PROT;
1201 sib = ICE_FLOW_FIELD_IDX_IPV6_TTL;
1203 /* If the sibling field is also included, that field's
1204 * mask needs to be included.
1206 if (match & BIT(sib))
1207 sib_mask = ice_flds_info[sib].mask;
1209 case ICE_FLOW_FIELD_IDX_IPV4_SA:
1210 case ICE_FLOW_FIELD_IDX_IPV4_DA:
1211 prot_id = seg == 0 ? ICE_PROT_IPV4_OF_OR_S : ICE_PROT_IPV4_IL;
1213 case ICE_FLOW_FIELD_IDX_IPV6_SA:
1214 case ICE_FLOW_FIELD_IDX_IPV6_DA:
1215 case ICE_FLOW_FIELD_IDX_IPV6_PRE32_SA:
1216 case ICE_FLOW_FIELD_IDX_IPV6_PRE32_DA:
1217 case ICE_FLOW_FIELD_IDX_IPV6_PRE48_SA:
1218 case ICE_FLOW_FIELD_IDX_IPV6_PRE48_DA:
1219 case ICE_FLOW_FIELD_IDX_IPV6_PRE64_SA:
1220 case ICE_FLOW_FIELD_IDX_IPV6_PRE64_DA:
1221 prot_id = seg == 0 ? ICE_PROT_IPV6_OF_OR_S : ICE_PROT_IPV6_IL;
1223 case ICE_FLOW_FIELD_IDX_TCP_SRC_PORT:
1224 case ICE_FLOW_FIELD_IDX_TCP_DST_PORT:
1225 case ICE_FLOW_FIELD_IDX_TCP_FLAGS:
1226 prot_id = ICE_PROT_TCP_IL;
1228 case ICE_FLOW_FIELD_IDX_UDP_SRC_PORT:
1229 case ICE_FLOW_FIELD_IDX_UDP_DST_PORT:
1230 prot_id = ICE_PROT_UDP_IL_OR_S;
1232 case ICE_FLOW_FIELD_IDX_SCTP_SRC_PORT:
1233 case ICE_FLOW_FIELD_IDX_SCTP_DST_PORT:
1234 prot_id = ICE_PROT_SCTP_IL;
1236 case ICE_FLOW_FIELD_IDX_VXLAN_VNI:
1237 case ICE_FLOW_FIELD_IDX_GTPC_TEID:
1238 case ICE_FLOW_FIELD_IDX_GTPU_IP_TEID:
1239 case ICE_FLOW_FIELD_IDX_GTPU_UP_TEID:
1240 case ICE_FLOW_FIELD_IDX_GTPU_DWN_TEID:
1241 case ICE_FLOW_FIELD_IDX_GTPU_EH_TEID:
1242 case ICE_FLOW_FIELD_IDX_GTPU_EH_QFI:
1243 /* GTP is accessed through UDP OF protocol */
1244 prot_id = ICE_PROT_UDP_OF;
1246 case ICE_FLOW_FIELD_IDX_PPPOE_SESS_ID:
1247 prot_id = ICE_PROT_PPPOE;
1249 case ICE_FLOW_FIELD_IDX_PFCP_SEID:
1250 prot_id = ICE_PROT_UDP_IL_OR_S;
1252 case ICE_FLOW_FIELD_IDX_L2TPV3_SESS_ID:
1253 prot_id = ICE_PROT_L2TPV3;
1255 case ICE_FLOW_FIELD_IDX_ESP_SPI:
1256 prot_id = ICE_PROT_ESP_F;
1258 case ICE_FLOW_FIELD_IDX_AH_SPI:
1259 prot_id = ICE_PROT_ESP_2;
1261 case ICE_FLOW_FIELD_IDX_NAT_T_ESP_SPI:
1262 prot_id = ICE_PROT_UDP_IL_OR_S;
1264 case ICE_FLOW_FIELD_IDX_ECPRI_TP0_PC_ID:
1265 prot_id = ICE_PROT_ECPRI;
1267 case ICE_FLOW_FIELD_IDX_UDP_ECPRI_TP0_PC_ID:
1268 prot_id = ICE_PROT_UDP_IL_OR_S;
1270 case ICE_FLOW_FIELD_IDX_ARP_SIP:
1271 case ICE_FLOW_FIELD_IDX_ARP_DIP:
1272 case ICE_FLOW_FIELD_IDX_ARP_SHA:
1273 case ICE_FLOW_FIELD_IDX_ARP_DHA:
1274 case ICE_FLOW_FIELD_IDX_ARP_OP:
1275 prot_id = ICE_PROT_ARP_OF;
1277 case ICE_FLOW_FIELD_IDX_ICMP_TYPE:
1278 case ICE_FLOW_FIELD_IDX_ICMP_CODE:
1279 /* ICMP type and code share the same extraction seq. entry */
1280 prot_id = (params->prof->segs[seg].hdrs &
1281 ICE_FLOW_SEG_HDR_IPV4) ?
1282 ICE_PROT_ICMP_IL : ICE_PROT_ICMPV6_IL;
1283 sib = fld == ICE_FLOW_FIELD_IDX_ICMP_TYPE ?
1284 ICE_FLOW_FIELD_IDX_ICMP_CODE :
1285 ICE_FLOW_FIELD_IDX_ICMP_TYPE;
1287 case ICE_FLOW_FIELD_IDX_GRE_KEYID:
1288 prot_id = ICE_PROT_GRE_OF;
1291 return ICE_ERR_NOT_IMPL;
1294 /* Each extraction sequence entry is a word in size, and extracts a
1295 * word-aligned offset from a protocol header.
1297 ese_bits = ICE_FLOW_FV_EXTRACT_SZ * BITS_PER_BYTE;
1299 flds[fld].xtrct.prot_id = prot_id;
1300 flds[fld].xtrct.off = (ice_flds_info[fld].off / ese_bits) *
1301 ICE_FLOW_FV_EXTRACT_SZ;
1302 flds[fld].xtrct.disp = (u8)(ice_flds_info[fld].off % ese_bits);
1303 flds[fld].xtrct.idx = params->es_cnt;
1304 flds[fld].xtrct.mask = ice_flds_info[fld].mask;
1306 /* Adjust the next field-entry index after accommodating the number of
1307 * entries this field consumes
1309 cnt = DIVIDE_AND_ROUND_UP(flds[fld].xtrct.disp +
1310 ice_flds_info[fld].size, ese_bits);
1312 /* Fill in the extraction sequence entries needed for this field */
1313 off = flds[fld].xtrct.off;
1314 mask = flds[fld].xtrct.mask;
1315 for (i = 0; i < cnt; i++) {
1316 /* Only consume an extraction sequence entry if there is no
1317 * sibling field associated with this field or the sibling entry
1318 * already extracts the word shared with this field.
1320 if (sib == ICE_FLOW_FIELD_IDX_MAX ||
1321 flds[sib].xtrct.prot_id == ICE_PROT_ID_INVAL ||
1322 flds[sib].xtrct.off != off) {
1325 /* Make sure the number of extraction sequence required
1326 * does not exceed the block's capability
1328 if (params->es_cnt >= fv_words)
1329 return ICE_ERR_MAX_LIMIT;
1331 /* some blocks require a reversed field vector layout */
1332 if (hw->blk[params->blk].es.reverse)
1333 idx = fv_words - params->es_cnt - 1;
1335 idx = params->es_cnt;
1337 params->es[idx].prot_id = prot_id;
1338 params->es[idx].off = off;
1339 params->mask[idx] = mask | sib_mask;
1343 off += ICE_FLOW_FV_EXTRACT_SZ;
1350 * ice_flow_xtract_raws - Create extract sequence entries for raw bytes
1351 * @hw: pointer to the HW struct
1352 * @params: information about the flow to be processed
1353 * @seg: index of packet segment whose raw fields are to be extracted
1355 static enum ice_status
1356 ice_flow_xtract_raws(struct ice_hw *hw, struct ice_flow_prof_params *params,
1363 if (!params->prof->segs[seg].raws_cnt)
1366 if (params->prof->segs[seg].raws_cnt >
1367 ARRAY_SIZE(params->prof->segs[seg].raws))
1368 return ICE_ERR_MAX_LIMIT;
1370 /* Offsets within the segment headers are not supported */
1371 hdrs_sz = ice_flow_calc_seg_sz(params, seg);
1373 return ICE_ERR_PARAM;
1375 fv_words = hw->blk[params->blk].es.fvw;
1377 for (i = 0; i < params->prof->segs[seg].raws_cnt; i++) {
1378 struct ice_flow_seg_fld_raw *raw;
1381 raw = ¶ms->prof->segs[seg].raws[i];
1383 /* Storing extraction information */
1384 raw->info.xtrct.prot_id = ICE_PROT_MAC_OF_OR_S;
1385 raw->info.xtrct.off = (raw->off / ICE_FLOW_FV_EXTRACT_SZ) *
1386 ICE_FLOW_FV_EXTRACT_SZ;
1387 raw->info.xtrct.disp = (raw->off % ICE_FLOW_FV_EXTRACT_SZ) *
1389 raw->info.xtrct.idx = params->es_cnt;
1391 /* Determine the number of field vector entries this raw field
1394 cnt = DIVIDE_AND_ROUND_UP(raw->info.xtrct.disp +
1395 (raw->info.src.last * BITS_PER_BYTE),
1396 (ICE_FLOW_FV_EXTRACT_SZ *
1398 off = raw->info.xtrct.off;
1399 for (j = 0; j < cnt; j++) {
1402 /* Make sure the number of extraction sequence required
1403 * does not exceed the block's capability
1405 if (params->es_cnt >= hw->blk[params->blk].es.count ||
1406 params->es_cnt >= ICE_MAX_FV_WORDS)
1407 return ICE_ERR_MAX_LIMIT;
1409 /* some blocks require a reversed field vector layout */
1410 if (hw->blk[params->blk].es.reverse)
1411 idx = fv_words - params->es_cnt - 1;
1413 idx = params->es_cnt;
1415 params->es[idx].prot_id = raw->info.xtrct.prot_id;
1416 params->es[idx].off = off;
1418 off += ICE_FLOW_FV_EXTRACT_SZ;
1426 * ice_flow_create_xtrct_seq - Create an extraction sequence for given segments
1427 * @hw: pointer to the HW struct
1428 * @params: information about the flow to be processed
1430 * This function iterates through all matched fields in the given segments, and
1431 * creates an extraction sequence for the fields.
1433 static enum ice_status
1434 ice_flow_create_xtrct_seq(struct ice_hw *hw,
1435 struct ice_flow_prof_params *params)
1437 enum ice_status status = ICE_SUCCESS;
1440 /* For ACL, we also need to extract the direction bit (Rx,Tx) data from
1443 if (params->blk == ICE_BLK_ACL) {
1444 status = ice_flow_xtract_pkt_flags(hw, params,
1445 ICE_RX_MDID_PKT_FLAGS_15_0);
1450 for (i = 0; i < params->prof->segs_cnt; i++) {
1451 u64 match = params->prof->segs[i].match;
1452 enum ice_flow_field j;
1454 ice_for_each_set_bit(j, (ice_bitmap_t *)&match,
1455 ICE_FLOW_FIELD_IDX_MAX) {
1456 status = ice_flow_xtract_fld(hw, params, i, j, match);
1459 ice_clear_bit(j, (ice_bitmap_t *)&match);
1462 /* Process raw matching bytes */
1463 status = ice_flow_xtract_raws(hw, params, i);
1472 * ice_flow_sel_acl_scen - returns the specific scenario
1473 * @hw: pointer to the hardware structure
1474 * @params: information about the flow to be processed
1476 * This function will return the specific scenario based on the
1477 * params passed to it
1479 static enum ice_status
1480 ice_flow_sel_acl_scen(struct ice_hw *hw, struct ice_flow_prof_params *params)
1482 /* Find the best-fit scenario for the provided match width */
1483 struct ice_acl_scen *cand_scen = NULL, *scen;
1486 return ICE_ERR_DOES_NOT_EXIST;
1488 /* Loop through each scenario and match against the scenario width
1489 * to select the specific scenario
1491 LIST_FOR_EACH_ENTRY(scen, &hw->acl_tbl->scens, ice_acl_scen, list_entry)
1492 if (scen->eff_width >= params->entry_length &&
1493 (!cand_scen || cand_scen->eff_width > scen->eff_width))
1496 return ICE_ERR_DOES_NOT_EXIST;
1498 params->prof->cfg.scen = cand_scen;
1504 * ice_flow_acl_def_entry_frmt - Determine the layout of flow entries
1505 * @params: information about the flow to be processed
1507 static enum ice_status
1508 ice_flow_acl_def_entry_frmt(struct ice_flow_prof_params *params)
1510 u16 index, i, range_idx = 0;
1512 index = ICE_AQC_ACL_PROF_BYTE_SEL_START_IDX;
1514 for (i = 0; i < params->prof->segs_cnt; i++) {
1515 struct ice_flow_seg_info *seg = ¶ms->prof->segs[i];
1518 ice_for_each_set_bit(j, (ice_bitmap_t *)&seg->match,
1519 ICE_FLOW_FIELD_IDX_MAX) {
1520 struct ice_flow_fld_info *fld = &seg->fields[j];
1522 fld->entry.mask = ICE_FLOW_FLD_OFF_INVAL;
1524 if (fld->type == ICE_FLOW_FLD_TYPE_RANGE) {
1525 fld->entry.last = ICE_FLOW_FLD_OFF_INVAL;
1527 /* Range checking only supported for single
1530 if (DIVIDE_AND_ROUND_UP(ice_flds_info[j].size +
1532 BITS_PER_BYTE * 2) > 1)
1533 return ICE_ERR_PARAM;
1535 /* Ranges must define low and high values */
1536 if (fld->src.val == ICE_FLOW_FLD_OFF_INVAL ||
1537 fld->src.last == ICE_FLOW_FLD_OFF_INVAL)
1538 return ICE_ERR_PARAM;
1540 fld->entry.val = range_idx++;
1542 /* Store adjusted byte-length of field for later
1543 * use, taking into account potential
1544 * non-byte-aligned displacement
1546 fld->entry.last = DIVIDE_AND_ROUND_UP
1547 (ice_flds_info[j].size +
1548 (fld->xtrct.disp % BITS_PER_BYTE),
1550 fld->entry.val = index;
1551 index += fld->entry.last;
1555 for (j = 0; j < seg->raws_cnt; j++) {
1556 struct ice_flow_seg_fld_raw *raw = &seg->raws[j];
1558 raw->info.entry.mask = ICE_FLOW_FLD_OFF_INVAL;
1559 raw->info.entry.val = index;
1560 raw->info.entry.last = raw->info.src.last;
1561 index += raw->info.entry.last;
1565 /* Currently only support using the byte selection base, which only
1566 * allows for an effective entry size of 30 bytes. Reject anything
1569 if (index > ICE_AQC_ACL_PROF_BYTE_SEL_ELEMS)
1570 return ICE_ERR_PARAM;
1572 /* Only 8 range checkers per profile, reject anything trying to use
1575 if (range_idx > ICE_AQC_ACL_PROF_RANGES_NUM_CFG)
1576 return ICE_ERR_PARAM;
1578 /* Store # bytes required for entry for later use */
1579 params->entry_length = index - ICE_AQC_ACL_PROF_BYTE_SEL_START_IDX;
1585 * ice_flow_proc_segs - process all packet segments associated with a profile
1586 * @hw: pointer to the HW struct
1587 * @params: information about the flow to be processed
1589 static enum ice_status
1590 ice_flow_proc_segs(struct ice_hw *hw, struct ice_flow_prof_params *params)
1592 enum ice_status status;
1594 status = ice_flow_proc_seg_hdrs(params);
1598 status = ice_flow_create_xtrct_seq(hw, params);
1602 switch (params->blk) {
1605 status = ICE_SUCCESS;
1608 status = ice_flow_acl_def_entry_frmt(params);
1611 status = ice_flow_sel_acl_scen(hw, params);
1616 return ICE_ERR_NOT_IMPL;
1622 #define ICE_FLOW_FIND_PROF_CHK_FLDS 0x00000001
1623 #define ICE_FLOW_FIND_PROF_CHK_VSI 0x00000002
1624 #define ICE_FLOW_FIND_PROF_NOT_CHK_DIR 0x00000004
1627 * ice_flow_find_prof_conds - Find a profile matching headers and conditions
1628 * @hw: pointer to the HW struct
1629 * @blk: classification stage
1630 * @dir: flow direction
1631 * @segs: array of one or more packet segments that describe the flow
1632 * @segs_cnt: number of packet segments provided
1633 * @vsi_handle: software VSI handle to check VSI (ICE_FLOW_FIND_PROF_CHK_VSI)
1634 * @conds: additional conditions to be checked (ICE_FLOW_FIND_PROF_CHK_*)
1636 static struct ice_flow_prof *
1637 ice_flow_find_prof_conds(struct ice_hw *hw, enum ice_block blk,
1638 enum ice_flow_dir dir, struct ice_flow_seg_info *segs,
1639 u8 segs_cnt, u16 vsi_handle, u32 conds)
1641 struct ice_flow_prof *p, *prof = NULL;
1643 ice_acquire_lock(&hw->fl_profs_locks[blk]);
1644 LIST_FOR_EACH_ENTRY(p, &hw->fl_profs[blk], ice_flow_prof, l_entry)
1645 if ((p->dir == dir || conds & ICE_FLOW_FIND_PROF_NOT_CHK_DIR) &&
1646 segs_cnt && segs_cnt == p->segs_cnt) {
1649 /* Check for profile-VSI association if specified */
1650 if ((conds & ICE_FLOW_FIND_PROF_CHK_VSI) &&
1651 ice_is_vsi_valid(hw, vsi_handle) &&
1652 !ice_is_bit_set(p->vsis, vsi_handle))
1655 /* Protocol headers must be checked. Matched fields are
1656 * checked if specified.
1658 for (i = 0; i < segs_cnt; i++)
1659 if (segs[i].hdrs != p->segs[i].hdrs ||
1660 ((conds & ICE_FLOW_FIND_PROF_CHK_FLDS) &&
1661 segs[i].match != p->segs[i].match))
1664 /* A match is found if all segments are matched */
1665 if (i == segs_cnt) {
1670 ice_release_lock(&hw->fl_profs_locks[blk]);
1676 * ice_flow_find_prof - Look up a profile matching headers and matched fields
1677 * @hw: pointer to the HW struct
1678 * @blk: classification stage
1679 * @dir: flow direction
1680 * @segs: array of one or more packet segments that describe the flow
1681 * @segs_cnt: number of packet segments provided
1684 ice_flow_find_prof(struct ice_hw *hw, enum ice_block blk, enum ice_flow_dir dir,
1685 struct ice_flow_seg_info *segs, u8 segs_cnt)
1687 struct ice_flow_prof *p;
1689 p = ice_flow_find_prof_conds(hw, blk, dir, segs, segs_cnt,
1690 ICE_MAX_VSI, ICE_FLOW_FIND_PROF_CHK_FLDS);
1692 return p ? p->id : ICE_FLOW_PROF_ID_INVAL;
1696 * ice_flow_find_prof_id - Look up a profile with given profile ID
1697 * @hw: pointer to the HW struct
1698 * @blk: classification stage
1699 * @prof_id: unique ID to identify this flow profile
1701 static struct ice_flow_prof *
1702 ice_flow_find_prof_id(struct ice_hw *hw, enum ice_block blk, u64 prof_id)
1704 struct ice_flow_prof *p;
1706 LIST_FOR_EACH_ENTRY(p, &hw->fl_profs[blk], ice_flow_prof, l_entry)
1707 if (p->id == prof_id)
1714 * ice_dealloc_flow_entry - Deallocate flow entry memory
1715 * @hw: pointer to the HW struct
1716 * @entry: flow entry to be removed
1719 ice_dealloc_flow_entry(struct ice_hw *hw, struct ice_flow_entry *entry)
1725 ice_free(hw, entry->entry);
1727 if (entry->range_buf) {
1728 ice_free(hw, entry->range_buf);
1729 entry->range_buf = NULL;
1733 ice_free(hw, entry->acts);
1735 entry->acts_cnt = 0;
1738 ice_free(hw, entry);
1742 * ice_flow_get_hw_prof - return the HW profile for a specific profile ID handle
1743 * @hw: pointer to the HW struct
1744 * @blk: classification stage
1745 * @prof_id: the profile ID handle
1746 * @hw_prof_id: pointer to variable to receive the HW profile ID
1749 ice_flow_get_hw_prof(struct ice_hw *hw, enum ice_block blk, u64 prof_id,
1752 enum ice_status status = ICE_ERR_DOES_NOT_EXIST;
1753 struct ice_prof_map *map;
1755 ice_acquire_lock(&hw->blk[blk].es.prof_map_lock);
1756 map = ice_search_prof_id(hw, blk, prof_id);
1758 *hw_prof_id = map->prof_id;
1759 status = ICE_SUCCESS;
1761 ice_release_lock(&hw->blk[blk].es.prof_map_lock);
1765 #define ICE_ACL_INVALID_SCEN 0x3f
1768 * ice_flow_acl_is_prof_in_use - Verify if the profile is associated to any PF
1769 * @hw: pointer to the hardware structure
1770 * @prof: pointer to flow profile
1771 * @buf: destination buffer function writes partial extraction sequence to
1773 * returns ICE_SUCCESS if no PF is associated to the given profile
1774 * returns ICE_ERR_IN_USE if at least one PF is associated to the given profile
1775 * returns other error code for real error
1777 static enum ice_status
1778 ice_flow_acl_is_prof_in_use(struct ice_hw *hw, struct ice_flow_prof *prof,
1779 struct ice_aqc_acl_prof_generic_frmt *buf)
1781 enum ice_status status;
1784 status = ice_flow_get_hw_prof(hw, ICE_BLK_ACL, prof->id, &prof_id);
1788 status = ice_query_acl_prof(hw, prof_id, buf, NULL);
1792 /* If all PF's associated scenarios are all 0 or all
1793 * ICE_ACL_INVALID_SCEN (63) for the given profile then the latter has
1794 * not been configured yet.
1796 if (buf->pf_scenario_num[0] == 0 && buf->pf_scenario_num[1] == 0 &&
1797 buf->pf_scenario_num[2] == 0 && buf->pf_scenario_num[3] == 0 &&
1798 buf->pf_scenario_num[4] == 0 && buf->pf_scenario_num[5] == 0 &&
1799 buf->pf_scenario_num[6] == 0 && buf->pf_scenario_num[7] == 0)
1802 if (buf->pf_scenario_num[0] == ICE_ACL_INVALID_SCEN &&
1803 buf->pf_scenario_num[1] == ICE_ACL_INVALID_SCEN &&
1804 buf->pf_scenario_num[2] == ICE_ACL_INVALID_SCEN &&
1805 buf->pf_scenario_num[3] == ICE_ACL_INVALID_SCEN &&
1806 buf->pf_scenario_num[4] == ICE_ACL_INVALID_SCEN &&
1807 buf->pf_scenario_num[5] == ICE_ACL_INVALID_SCEN &&
1808 buf->pf_scenario_num[6] == ICE_ACL_INVALID_SCEN &&
1809 buf->pf_scenario_num[7] == ICE_ACL_INVALID_SCEN)
1812 return ICE_ERR_IN_USE;
1816 * ice_flow_acl_free_act_cntr - Free the ACL rule's actions
1817 * @hw: pointer to the hardware structure
1818 * @acts: array of actions to be performed on a match
1819 * @acts_cnt: number of actions
1821 static enum ice_status
1822 ice_flow_acl_free_act_cntr(struct ice_hw *hw, struct ice_flow_action *acts,
1827 for (i = 0; i < acts_cnt; i++) {
1828 if (acts[i].type == ICE_FLOW_ACT_CNTR_PKT ||
1829 acts[i].type == ICE_FLOW_ACT_CNTR_BYTES ||
1830 acts[i].type == ICE_FLOW_ACT_CNTR_PKT_BYTES) {
1831 struct ice_acl_cntrs cntrs = { 0 };
1832 enum ice_status status;
1834 /* amount is unused in the dealloc path but the common
1835 * parameter check routine wants a value set, as zero
1836 * is invalid for the check. Just set it.
1839 cntrs.bank = 0; /* Only bank0 for the moment */
1841 LE16_TO_CPU(acts[i].data.acl_act.value);
1843 LE16_TO_CPU(acts[i].data.acl_act.value);
1845 if (acts[i].type == ICE_FLOW_ACT_CNTR_PKT_BYTES)
1846 cntrs.type = ICE_AQC_ACL_CNT_TYPE_DUAL;
1848 cntrs.type = ICE_AQC_ACL_CNT_TYPE_SINGLE;
1850 status = ice_aq_dealloc_acl_cntrs(hw, &cntrs, NULL);
1859 * ice_flow_acl_disassoc_scen - Disassociate the scenario from the profile
1860 * @hw: pointer to the hardware structure
1861 * @prof: pointer to flow profile
1863 * Disassociate the scenario from the profile for the PF of the VSI.
1865 static enum ice_status
1866 ice_flow_acl_disassoc_scen(struct ice_hw *hw, struct ice_flow_prof *prof)
1868 struct ice_aqc_acl_prof_generic_frmt buf;
1869 enum ice_status status = ICE_SUCCESS;
1872 ice_memset(&buf, 0, sizeof(buf), ICE_NONDMA_MEM);
1874 status = ice_flow_get_hw_prof(hw, ICE_BLK_ACL, prof->id, &prof_id);
1878 status = ice_query_acl_prof(hw, prof_id, &buf, NULL);
1882 /* Clear scenario for this PF */
1883 buf.pf_scenario_num[hw->pf_id] = ICE_ACL_INVALID_SCEN;
1884 status = ice_prgm_acl_prof_xtrct(hw, prof_id, &buf, NULL);
1890 * ice_flow_rem_entry_sync - Remove a flow entry
1891 * @hw: pointer to the HW struct
1892 * @blk: classification stage
1893 * @entry: flow entry to be removed
1895 static enum ice_status
1896 ice_flow_rem_entry_sync(struct ice_hw *hw, enum ice_block blk,
1897 struct ice_flow_entry *entry)
1900 return ICE_ERR_BAD_PTR;
1902 if (blk == ICE_BLK_ACL) {
1903 enum ice_status status;
1906 return ICE_ERR_BAD_PTR;
1908 status = ice_acl_rem_entry(hw, entry->prof->cfg.scen,
1909 entry->scen_entry_idx);
1913 /* Checks if we need to release an ACL counter. */
1914 if (entry->acts_cnt && entry->acts)
1915 ice_flow_acl_free_act_cntr(hw, entry->acts,
1919 LIST_DEL(&entry->l_entry);
1921 ice_dealloc_flow_entry(hw, entry);
1927 * ice_flow_add_prof_sync - Add a flow profile for packet segments and fields
1928 * @hw: pointer to the HW struct
1929 * @blk: classification stage
1930 * @dir: flow direction
1931 * @prof_id: unique ID to identify this flow profile
1932 * @segs: array of one or more packet segments that describe the flow
1933 * @segs_cnt: number of packet segments provided
1934 * @acts: array of default actions
1935 * @acts_cnt: number of default actions
1936 * @prof: stores the returned flow profile added
1938 * Assumption: the caller has acquired the lock to the profile list
1940 static enum ice_status
1941 ice_flow_add_prof_sync(struct ice_hw *hw, enum ice_block blk,
1942 enum ice_flow_dir dir, u64 prof_id,
1943 struct ice_flow_seg_info *segs, u8 segs_cnt,
1944 struct ice_flow_action *acts, u8 acts_cnt,
1945 struct ice_flow_prof **prof)
1947 struct ice_flow_prof_params *params;
1948 enum ice_status status;
1951 if (!prof || (acts_cnt && !acts))
1952 return ICE_ERR_BAD_PTR;
1954 params = (struct ice_flow_prof_params *)ice_malloc(hw, sizeof(*params));
1956 return ICE_ERR_NO_MEMORY;
1958 params->prof = (struct ice_flow_prof *)
1959 ice_malloc(hw, sizeof(*params->prof));
1960 if (!params->prof) {
1961 status = ICE_ERR_NO_MEMORY;
1965 /* initialize extraction sequence to all invalid (0xff) */
1966 for (i = 0; i < ICE_MAX_FV_WORDS; i++) {
1967 params->es[i].prot_id = ICE_PROT_INVALID;
1968 params->es[i].off = ICE_FV_OFFSET_INVAL;
1972 params->prof->id = prof_id;
1973 params->prof->dir = dir;
1974 params->prof->segs_cnt = segs_cnt;
1976 /* Make a copy of the segments that need to be persistent in the flow
1979 for (i = 0; i < segs_cnt; i++)
1980 ice_memcpy(¶ms->prof->segs[i], &segs[i], sizeof(*segs),
1981 ICE_NONDMA_TO_NONDMA);
1983 /* Make a copy of the actions that need to be persistent in the flow
1987 params->prof->acts = (struct ice_flow_action *)
1988 ice_memdup(hw, acts, acts_cnt * sizeof(*acts),
1989 ICE_NONDMA_TO_NONDMA);
1991 if (!params->prof->acts) {
1992 status = ICE_ERR_NO_MEMORY;
1997 status = ice_flow_proc_segs(hw, params);
1999 ice_debug(hw, ICE_DBG_FLOW, "Error processing a flow's packet segments\n");
2003 /* Add a HW profile for this flow profile */
2004 status = ice_add_prof(hw, blk, prof_id, (u8 *)params->ptypes,
2005 params->attr, params->attr_cnt, params->es,
2008 ice_debug(hw, ICE_DBG_FLOW, "Error adding a HW flow profile\n");
2012 INIT_LIST_HEAD(¶ms->prof->entries);
2013 ice_init_lock(¶ms->prof->entries_lock);
2014 *prof = params->prof;
2018 if (params->prof->acts)
2019 ice_free(hw, params->prof->acts);
2020 ice_free(hw, params->prof);
2023 ice_free(hw, params);
2029 * ice_flow_rem_prof_sync - remove a flow profile
2030 * @hw: pointer to the hardware structure
2031 * @blk: classification stage
2032 * @prof: pointer to flow profile to remove
2034 * Assumption: the caller has acquired the lock to the profile list
2036 static enum ice_status
2037 ice_flow_rem_prof_sync(struct ice_hw *hw, enum ice_block blk,
2038 struct ice_flow_prof *prof)
2040 enum ice_status status;
2042 /* Remove all remaining flow entries before removing the flow profile */
2043 if (!LIST_EMPTY(&prof->entries)) {
2044 struct ice_flow_entry *e, *t;
2046 ice_acquire_lock(&prof->entries_lock);
2048 LIST_FOR_EACH_ENTRY_SAFE(e, t, &prof->entries, ice_flow_entry,
2050 status = ice_flow_rem_entry_sync(hw, blk, e);
2055 ice_release_lock(&prof->entries_lock);
2058 if (blk == ICE_BLK_ACL) {
2059 struct ice_aqc_acl_profile_ranges query_rng_buf;
2060 struct ice_aqc_acl_prof_generic_frmt buf;
2063 /* Disassociate the scenario from the profile for the PF */
2064 status = ice_flow_acl_disassoc_scen(hw, prof);
2068 /* Clear the range-checker if the profile ID is no longer
2071 status = ice_flow_acl_is_prof_in_use(hw, prof, &buf);
2072 if (status && status != ICE_ERR_IN_USE) {
2074 } else if (!status) {
2075 /* Clear the range-checker value for profile ID */
2076 ice_memset(&query_rng_buf, 0,
2077 sizeof(struct ice_aqc_acl_profile_ranges),
2080 status = ice_flow_get_hw_prof(hw, blk, prof->id,
2085 status = ice_prog_acl_prof_ranges(hw, prof_id,
2086 &query_rng_buf, NULL);
2092 /* Remove all hardware profiles associated with this flow profile */
2093 status = ice_rem_prof(hw, blk, prof->id);
2095 LIST_DEL(&prof->l_entry);
2096 ice_destroy_lock(&prof->entries_lock);
2098 ice_free(hw, prof->acts);
2106 * ice_flow_acl_set_xtrct_seq_fld - Populate xtrct seq for single field
2107 * @buf: Destination buffer function writes partial xtrct sequence to
2108 * @info: Info about field
2111 ice_flow_acl_set_xtrct_seq_fld(struct ice_aqc_acl_prof_generic_frmt *buf,
2112 struct ice_flow_fld_info *info)
2117 src = info->xtrct.idx * ICE_FLOW_FV_EXTRACT_SZ +
2118 info->xtrct.disp / BITS_PER_BYTE;
2119 dst = info->entry.val;
2120 for (i = 0; i < info->entry.last; i++)
2121 /* HW stores field vector words in LE, convert words back to BE
2122 * so constructed entries will end up in network order
2124 buf->byte_selection[dst++] = src++ ^ 1;
2128 * ice_flow_acl_set_xtrct_seq - Program ACL extraction sequence
2129 * @hw: pointer to the hardware structure
2130 * @prof: pointer to flow profile
2132 static enum ice_status
2133 ice_flow_acl_set_xtrct_seq(struct ice_hw *hw, struct ice_flow_prof *prof)
2135 struct ice_aqc_acl_prof_generic_frmt buf;
2136 struct ice_flow_fld_info *info;
2137 enum ice_status status;
2141 ice_memset(&buf, 0, sizeof(buf), ICE_NONDMA_MEM);
2143 status = ice_flow_get_hw_prof(hw, ICE_BLK_ACL, prof->id, &prof_id);
2147 status = ice_flow_acl_is_prof_in_use(hw, prof, &buf);
2148 if (status && status != ICE_ERR_IN_USE)
2152 /* Program the profile dependent configuration. This is done
2153 * only once regardless of the number of PFs using that profile
2155 ice_memset(&buf, 0, sizeof(buf), ICE_NONDMA_MEM);
2157 for (i = 0; i < prof->segs_cnt; i++) {
2158 struct ice_flow_seg_info *seg = &prof->segs[i];
2161 ice_for_each_set_bit(j, (ice_bitmap_t *)&seg->match,
2162 ICE_FLOW_FIELD_IDX_MAX) {
2163 info = &seg->fields[j];
2165 if (info->type == ICE_FLOW_FLD_TYPE_RANGE)
2166 buf.word_selection[info->entry.val] =
2169 ice_flow_acl_set_xtrct_seq_fld(&buf,
2173 for (j = 0; j < seg->raws_cnt; j++) {
2174 info = &seg->raws[j].info;
2175 ice_flow_acl_set_xtrct_seq_fld(&buf, info);
2179 ice_memset(&buf.pf_scenario_num[0], ICE_ACL_INVALID_SCEN,
2180 ICE_AQC_ACL_PROF_PF_SCEN_NUM_ELEMS,
2184 /* Update the current PF */
2185 buf.pf_scenario_num[hw->pf_id] = (u8)prof->cfg.scen->id;
2186 status = ice_prgm_acl_prof_xtrct(hw, prof_id, &buf, NULL);
2192 * ice_flow_assoc_vsig_vsi - associate a VSI with VSIG
2193 * @hw: pointer to the hardware structure
2194 * @blk: classification stage
2195 * @vsi_handle: software VSI handle
2196 * @vsig: target VSI group
2198 * Assumption: the caller has already verified that the VSI to
2199 * be added has the same characteristics as the VSIG and will
2200 * thereby have access to all resources added to that VSIG.
2203 ice_flow_assoc_vsig_vsi(struct ice_hw *hw, enum ice_block blk, u16 vsi_handle,
2206 enum ice_status status;
2208 if (!ice_is_vsi_valid(hw, vsi_handle) || blk >= ICE_BLK_COUNT)
2209 return ICE_ERR_PARAM;
2211 ice_acquire_lock(&hw->fl_profs_locks[blk]);
2212 status = ice_add_vsi_flow(hw, blk, ice_get_hw_vsi_num(hw, vsi_handle),
2214 ice_release_lock(&hw->fl_profs_locks[blk]);
2220 * ice_flow_assoc_prof - associate a VSI with a flow profile
2221 * @hw: pointer to the hardware structure
2222 * @blk: classification stage
2223 * @prof: pointer to flow profile
2224 * @vsi_handle: software VSI handle
2226 * Assumption: the caller has acquired the lock to the profile list
2227 * and the software VSI handle has been validated
2230 ice_flow_assoc_prof(struct ice_hw *hw, enum ice_block blk,
2231 struct ice_flow_prof *prof, u16 vsi_handle)
2233 enum ice_status status = ICE_SUCCESS;
2235 if (!ice_is_bit_set(prof->vsis, vsi_handle)) {
2236 if (blk == ICE_BLK_ACL) {
2237 status = ice_flow_acl_set_xtrct_seq(hw, prof);
2241 status = ice_add_prof_id_flow(hw, blk,
2242 ice_get_hw_vsi_num(hw,
2246 ice_set_bit(vsi_handle, prof->vsis);
2248 ice_debug(hw, ICE_DBG_FLOW, "HW profile add failed, %d\n",
2256 * ice_flow_disassoc_prof - disassociate a VSI from a flow profile
2257 * @hw: pointer to the hardware structure
2258 * @blk: classification stage
2259 * @prof: pointer to flow profile
2260 * @vsi_handle: software VSI handle
2262 * Assumption: the caller has acquired the lock to the profile list
2263 * and the software VSI handle has been validated
2265 static enum ice_status
2266 ice_flow_disassoc_prof(struct ice_hw *hw, enum ice_block blk,
2267 struct ice_flow_prof *prof, u16 vsi_handle)
2269 enum ice_status status = ICE_SUCCESS;
2271 if (ice_is_bit_set(prof->vsis, vsi_handle)) {
2272 status = ice_rem_prof_id_flow(hw, blk,
2273 ice_get_hw_vsi_num(hw,
2277 ice_clear_bit(vsi_handle, prof->vsis);
2279 ice_debug(hw, ICE_DBG_FLOW, "HW profile remove failed, %d\n",
2287 * ice_flow_add_prof - Add a flow profile for packet segments and matched fields
2288 * @hw: pointer to the HW struct
2289 * @blk: classification stage
2290 * @dir: flow direction
2291 * @prof_id: unique ID to identify this flow profile
2292 * @segs: array of one or more packet segments that describe the flow
2293 * @segs_cnt: number of packet segments provided
2294 * @acts: array of default actions
2295 * @acts_cnt: number of default actions
2296 * @prof: stores the returned flow profile added
2299 ice_flow_add_prof(struct ice_hw *hw, enum ice_block blk, enum ice_flow_dir dir,
2300 u64 prof_id, struct ice_flow_seg_info *segs, u8 segs_cnt,
2301 struct ice_flow_action *acts, u8 acts_cnt,
2302 struct ice_flow_prof **prof)
2304 enum ice_status status;
2306 if (segs_cnt > ICE_FLOW_SEG_MAX)
2307 return ICE_ERR_MAX_LIMIT;
2310 return ICE_ERR_PARAM;
2313 return ICE_ERR_BAD_PTR;
2315 status = ice_flow_val_hdrs(segs, segs_cnt);
2319 ice_acquire_lock(&hw->fl_profs_locks[blk]);
2321 status = ice_flow_add_prof_sync(hw, blk, dir, prof_id, segs, segs_cnt,
2322 acts, acts_cnt, prof);
2324 LIST_ADD(&(*prof)->l_entry, &hw->fl_profs[blk]);
2326 ice_release_lock(&hw->fl_profs_locks[blk]);
2332 * ice_flow_rem_prof - Remove a flow profile and all entries associated with it
2333 * @hw: pointer to the HW struct
2334 * @blk: the block for which the flow profile is to be removed
2335 * @prof_id: unique ID of the flow profile to be removed
2338 ice_flow_rem_prof(struct ice_hw *hw, enum ice_block blk, u64 prof_id)
2340 struct ice_flow_prof *prof;
2341 enum ice_status status;
2343 ice_acquire_lock(&hw->fl_profs_locks[blk]);
2345 prof = ice_flow_find_prof_id(hw, blk, prof_id);
2347 status = ICE_ERR_DOES_NOT_EXIST;
2351 /* prof becomes invalid after the call */
2352 status = ice_flow_rem_prof_sync(hw, blk, prof);
2355 ice_release_lock(&hw->fl_profs_locks[blk]);
2361 * ice_flow_find_entry - look for a flow entry using its unique ID
2362 * @hw: pointer to the HW struct
2363 * @blk: classification stage
2364 * @entry_id: unique ID to identify this flow entry
2366 * This function looks for the flow entry with the specified unique ID in all
2367 * flow profiles of the specified classification stage. If the entry is found,
2368 * and it returns the handle to the flow entry. Otherwise, it returns
2369 * ICE_FLOW_ENTRY_ID_INVAL.
2371 u64 ice_flow_find_entry(struct ice_hw *hw, enum ice_block blk, u64 entry_id)
2373 struct ice_flow_entry *found = NULL;
2374 struct ice_flow_prof *p;
2376 ice_acquire_lock(&hw->fl_profs_locks[blk]);
2378 LIST_FOR_EACH_ENTRY(p, &hw->fl_profs[blk], ice_flow_prof, l_entry) {
2379 struct ice_flow_entry *e;
2381 ice_acquire_lock(&p->entries_lock);
2382 LIST_FOR_EACH_ENTRY(e, &p->entries, ice_flow_entry, l_entry)
2383 if (e->id == entry_id) {
2387 ice_release_lock(&p->entries_lock);
2393 ice_release_lock(&hw->fl_profs_locks[blk]);
2395 return found ? ICE_FLOW_ENTRY_HNDL(found) : ICE_FLOW_ENTRY_HANDLE_INVAL;
2399 * ice_flow_acl_check_actions - Checks the ACL rule's actions
2400 * @hw: pointer to the hardware structure
2401 * @acts: array of actions to be performed on a match
2402 * @acts_cnt: number of actions
2403 * @cnt_alloc: indicates if an ACL counter has been allocated.
2405 static enum ice_status
2406 ice_flow_acl_check_actions(struct ice_hw *hw, struct ice_flow_action *acts,
2407 u8 acts_cnt, bool *cnt_alloc)
2409 ice_declare_bitmap(dup_check, ICE_AQC_TBL_MAX_ACTION_PAIRS * 2);
2412 ice_zero_bitmap(dup_check, ICE_AQC_TBL_MAX_ACTION_PAIRS * 2);
2415 if (acts_cnt > ICE_FLOW_ACL_MAX_NUM_ACT)
2416 return ICE_ERR_OUT_OF_RANGE;
2418 for (i = 0; i < acts_cnt; i++) {
2419 if (acts[i].type != ICE_FLOW_ACT_NOP &&
2420 acts[i].type != ICE_FLOW_ACT_DROP &&
2421 acts[i].type != ICE_FLOW_ACT_CNTR_PKT &&
2422 acts[i].type != ICE_FLOW_ACT_FWD_QUEUE)
2425 /* If the caller want to add two actions of the same type, then
2426 * it is considered invalid configuration.
2428 if (ice_test_and_set_bit(acts[i].type, dup_check))
2429 return ICE_ERR_PARAM;
2432 /* Checks if ACL counters are needed. */
2433 for (i = 0; i < acts_cnt; i++) {
2434 if (acts[i].type == ICE_FLOW_ACT_CNTR_PKT ||
2435 acts[i].type == ICE_FLOW_ACT_CNTR_BYTES ||
2436 acts[i].type == ICE_FLOW_ACT_CNTR_PKT_BYTES) {
2437 struct ice_acl_cntrs cntrs = { 0 };
2438 enum ice_status status;
2441 cntrs.bank = 0; /* Only bank0 for the moment */
2443 if (acts[i].type == ICE_FLOW_ACT_CNTR_PKT_BYTES)
2444 cntrs.type = ICE_AQC_ACL_CNT_TYPE_DUAL;
2446 cntrs.type = ICE_AQC_ACL_CNT_TYPE_SINGLE;
2448 status = ice_aq_alloc_acl_cntrs(hw, &cntrs, NULL);
2451 /* Counter index within the bank */
2452 acts[i].data.acl_act.value =
2453 CPU_TO_LE16(cntrs.first_cntr);
2462 * ice_flow_acl_frmt_entry_range - Format an ACL range checker for a given field
2463 * @fld: number of the given field
2464 * @info: info about field
2465 * @range_buf: range checker configuration buffer
2466 * @data: pointer to a data buffer containing flow entry's match values/masks
2467 * @range: Input/output param indicating which range checkers are being used
2470 ice_flow_acl_frmt_entry_range(u16 fld, struct ice_flow_fld_info *info,
2471 struct ice_aqc_acl_profile_ranges *range_buf,
2472 u8 *data, u8 *range)
2476 /* If not specified, default mask is all bits in field */
2477 new_mask = (info->src.mask == ICE_FLOW_FLD_OFF_INVAL ?
2478 BIT(ice_flds_info[fld].size) - 1 :
2479 (*(u16 *)(data + info->src.mask))) << info->xtrct.disp;
2481 /* If the mask is 0, then we don't need to worry about this input
2482 * range checker value.
2486 (*(u16 *)(data + info->src.last)) << info->xtrct.disp;
2488 (*(u16 *)(data + info->src.val)) << info->xtrct.disp;
2489 u8 range_idx = info->entry.val;
2491 range_buf->checker_cfg[range_idx].low_boundary =
2492 CPU_TO_BE16(new_low);
2493 range_buf->checker_cfg[range_idx].high_boundary =
2494 CPU_TO_BE16(new_high);
2495 range_buf->checker_cfg[range_idx].mask = CPU_TO_BE16(new_mask);
2497 /* Indicate which range checker is being used */
2498 *range |= BIT(range_idx);
2503 * ice_flow_acl_frmt_entry_fld - Partially format ACL entry for a given field
2504 * @fld: number of the given field
2505 * @info: info about the field
2506 * @buf: buffer containing the entry
2507 * @dontcare: buffer containing don't care mask for entry
2508 * @data: pointer to a data buffer containing flow entry's match values/masks
2511 ice_flow_acl_frmt_entry_fld(u16 fld, struct ice_flow_fld_info *info, u8 *buf,
2512 u8 *dontcare, u8 *data)
2514 u16 dst, src, mask, k, end_disp, tmp_s = 0, tmp_m = 0;
2515 bool use_mask = false;
2518 src = info->src.val;
2519 mask = info->src.mask;
2520 dst = info->entry.val - ICE_AQC_ACL_PROF_BYTE_SEL_START_IDX;
2521 disp = info->xtrct.disp % BITS_PER_BYTE;
2523 if (mask != ICE_FLOW_FLD_OFF_INVAL)
2526 for (k = 0; k < info->entry.last; k++, dst++) {
2527 /* Add overflow bits from previous byte */
2528 buf[dst] = (tmp_s & 0xff00) >> 8;
2530 /* If mask is not valid, tmp_m is always zero, so just setting
2531 * dontcare to 0 (no masked bits). If mask is valid, pulls in
2532 * overflow bits of mask from prev byte
2534 dontcare[dst] = (tmp_m & 0xff00) >> 8;
2536 /* If there is displacement, last byte will only contain
2537 * displaced data, but there is no more data to read from user
2538 * buffer, so skip so as not to potentially read beyond end of
2541 if (!disp || k < info->entry.last - 1) {
2542 /* Store shifted data to use in next byte */
2543 tmp_s = data[src++] << disp;
2545 /* Add current (shifted) byte */
2546 buf[dst] |= tmp_s & 0xff;
2548 /* Handle mask if valid */
2550 tmp_m = (~data[mask++] & 0xff) << disp;
2551 dontcare[dst] |= tmp_m & 0xff;
2556 /* Fill in don't care bits at beginning of field */
2558 dst = info->entry.val - ICE_AQC_ACL_PROF_BYTE_SEL_START_IDX;
2559 for (k = 0; k < disp; k++)
2560 dontcare[dst] |= BIT(k);
2563 end_disp = (disp + ice_flds_info[fld].size) % BITS_PER_BYTE;
2565 /* Fill in don't care bits at end of field */
2567 dst = info->entry.val - ICE_AQC_ACL_PROF_BYTE_SEL_START_IDX +
2568 info->entry.last - 1;
2569 for (k = end_disp; k < BITS_PER_BYTE; k++)
2570 dontcare[dst] |= BIT(k);
2575 * ice_flow_acl_frmt_entry - Format ACL entry
2576 * @hw: pointer to the hardware structure
2577 * @prof: pointer to flow profile
2578 * @e: pointer to the flow entry
2579 * @data: pointer to a data buffer containing flow entry's match values/masks
2580 * @acts: array of actions to be performed on a match
2581 * @acts_cnt: number of actions
2583 * Formats the key (and key_inverse) to be matched from the data passed in,
2584 * along with data from the flow profile. This key/key_inverse pair makes up
2585 * the 'entry' for an ACL flow entry.
2587 static enum ice_status
2588 ice_flow_acl_frmt_entry(struct ice_hw *hw, struct ice_flow_prof *prof,
2589 struct ice_flow_entry *e, u8 *data,
2590 struct ice_flow_action *acts, u8 acts_cnt)
2592 u8 *buf = NULL, *dontcare = NULL, *key = NULL, range = 0, dir_flag_msk;
2593 struct ice_aqc_acl_profile_ranges *range_buf = NULL;
2594 enum ice_status status;
2599 status = ice_flow_get_hw_prof(hw, ICE_BLK_ACL, prof->id, &prof_id);
2603 /* Format the result action */
2605 status = ice_flow_acl_check_actions(hw, acts, acts_cnt, &cnt_alloc);
2609 status = ICE_ERR_NO_MEMORY;
2611 e->acts = (struct ice_flow_action *)
2612 ice_memdup(hw, acts, acts_cnt * sizeof(*acts),
2613 ICE_NONDMA_TO_NONDMA);
2617 e->acts_cnt = acts_cnt;
2619 /* Format the matching data */
2620 buf_sz = prof->cfg.scen->width;
2621 buf = (u8 *)ice_malloc(hw, buf_sz);
2625 dontcare = (u8 *)ice_malloc(hw, buf_sz);
2629 /* 'key' buffer will store both key and key_inverse, so must be twice
2632 key = (u8 *)ice_malloc(hw, buf_sz * 2);
2636 range_buf = (struct ice_aqc_acl_profile_ranges *)
2637 ice_malloc(hw, sizeof(struct ice_aqc_acl_profile_ranges));
2641 /* Set don't care mask to all 1's to start, will zero out used bytes */
2642 ice_memset(dontcare, 0xff, buf_sz, ICE_NONDMA_MEM);
2644 for (i = 0; i < prof->segs_cnt; i++) {
2645 struct ice_flow_seg_info *seg = &prof->segs[i];
2648 ice_for_each_set_bit(j, (ice_bitmap_t *)&seg->match,
2649 ICE_FLOW_FIELD_IDX_MAX) {
2650 struct ice_flow_fld_info *info = &seg->fields[j];
2652 if (info->type == ICE_FLOW_FLD_TYPE_RANGE)
2653 ice_flow_acl_frmt_entry_range(j, info,
2657 ice_flow_acl_frmt_entry_fld(j, info, buf,
2661 for (j = 0; j < seg->raws_cnt; j++) {
2662 struct ice_flow_fld_info *info = &seg->raws[j].info;
2663 u16 dst, src, mask, k;
2664 bool use_mask = false;
2666 src = info->src.val;
2667 dst = info->entry.val -
2668 ICE_AQC_ACL_PROF_BYTE_SEL_START_IDX;
2669 mask = info->src.mask;
2671 if (mask != ICE_FLOW_FLD_OFF_INVAL)
2674 for (k = 0; k < info->entry.last; k++, dst++) {
2675 buf[dst] = data[src++];
2677 dontcare[dst] = ~data[mask++];
2684 buf[prof->cfg.scen->pid_idx] = (u8)prof_id;
2685 dontcare[prof->cfg.scen->pid_idx] = 0;
2687 /* Format the buffer for direction flags */
2688 dir_flag_msk = BIT(ICE_FLG_PKT_DIR);
2690 if (prof->dir == ICE_FLOW_RX)
2691 buf[prof->cfg.scen->pkt_dir_idx] = dir_flag_msk;
2694 buf[prof->cfg.scen->rng_chk_idx] = range;
2695 /* Mark any unused range checkers as don't care */
2696 dontcare[prof->cfg.scen->rng_chk_idx] = ~range;
2697 e->range_buf = range_buf;
2699 ice_free(hw, range_buf);
2702 status = ice_set_key(key, buf_sz * 2, buf, NULL, dontcare, NULL, 0,
2708 e->entry_sz = buf_sz * 2;
2715 ice_free(hw, dontcare);
2720 if (status && range_buf) {
2721 ice_free(hw, range_buf);
2722 e->range_buf = NULL;
2725 if (status && e->acts) {
2726 ice_free(hw, e->acts);
2731 if (status && cnt_alloc)
2732 ice_flow_acl_free_act_cntr(hw, acts, acts_cnt);
2738 * ice_flow_acl_find_scen_entry_cond - Find an ACL scenario entry that matches
2739 * the compared data.
2740 * @prof: pointer to flow profile
2741 * @e: pointer to the comparing flow entry
2742 * @do_chg_action: decide if we want to change the ACL action
2743 * @do_add_entry: decide if we want to add the new ACL entry
2744 * @do_rem_entry: decide if we want to remove the current ACL entry
2746 * Find an ACL scenario entry that matches the compared data. In the same time,
2747 * this function also figure out:
2748 * a/ If we want to change the ACL action
2749 * b/ If we want to add the new ACL entry
2750 * c/ If we want to remove the current ACL entry
2752 static struct ice_flow_entry *
2753 ice_flow_acl_find_scen_entry_cond(struct ice_flow_prof *prof,
2754 struct ice_flow_entry *e, bool *do_chg_action,
2755 bool *do_add_entry, bool *do_rem_entry)
2757 struct ice_flow_entry *p, *return_entry = NULL;
2761 * a/ There exists an entry with same matching data, but different
2762 * priority, then we remove this existing ACL entry. Then, we
2763 * will add the new entry to the ACL scenario.
2764 * b/ There exists an entry with same matching data, priority, and
2765 * result action, then we do nothing
2766 * c/ There exists an entry with same matching data, priority, but
2767 * different, action, then do only change the action's entry.
2768 * d/ Else, we add this new entry to the ACL scenario.
2770 *do_chg_action = false;
2771 *do_add_entry = true;
2772 *do_rem_entry = false;
2773 LIST_FOR_EACH_ENTRY(p, &prof->entries, ice_flow_entry, l_entry) {
2774 if (memcmp(p->entry, e->entry, p->entry_sz))
2777 /* From this point, we have the same matching_data. */
2778 *do_add_entry = false;
2781 if (p->priority != e->priority) {
2782 /* matching data && !priority */
2783 *do_add_entry = true;
2784 *do_rem_entry = true;
2788 /* From this point, we will have matching_data && priority */
2789 if (p->acts_cnt != e->acts_cnt)
2790 *do_chg_action = true;
2791 for (i = 0; i < p->acts_cnt; i++) {
2792 bool found_not_match = false;
2794 for (j = 0; j < e->acts_cnt; j++)
2795 if (memcmp(&p->acts[i], &e->acts[j],
2796 sizeof(struct ice_flow_action))) {
2797 found_not_match = true;
2801 if (found_not_match) {
2802 *do_chg_action = true;
2807 /* (do_chg_action = true) means :
2808 * matching_data && priority && !result_action
2809 * (do_chg_action = false) means :
2810 * matching_data && priority && result_action
2815 return return_entry;
2819 * ice_flow_acl_convert_to_acl_prio - Convert to ACL priority
2822 static enum ice_acl_entry_prio
2823 ice_flow_acl_convert_to_acl_prio(enum ice_flow_priority p)
2825 enum ice_acl_entry_prio acl_prio;
2828 case ICE_FLOW_PRIO_LOW:
2829 acl_prio = ICE_ACL_PRIO_LOW;
2831 case ICE_FLOW_PRIO_NORMAL:
2832 acl_prio = ICE_ACL_PRIO_NORMAL;
2834 case ICE_FLOW_PRIO_HIGH:
2835 acl_prio = ICE_ACL_PRIO_HIGH;
2838 acl_prio = ICE_ACL_PRIO_NORMAL;
2846 * ice_flow_acl_union_rng_chk - Perform union operation between two
2847 * range-range checker buffers
2848 * @dst_buf: pointer to destination range checker buffer
2849 * @src_buf: pointer to source range checker buffer
2851 * For this function, we do the union between dst_buf and src_buf
2852 * range checker buffer, and we will save the result back to dst_buf
2854 static enum ice_status
2855 ice_flow_acl_union_rng_chk(struct ice_aqc_acl_profile_ranges *dst_buf,
2856 struct ice_aqc_acl_profile_ranges *src_buf)
2860 if (!dst_buf || !src_buf)
2861 return ICE_ERR_BAD_PTR;
2863 for (i = 0; i < ICE_AQC_ACL_PROF_RANGES_NUM_CFG; i++) {
2864 struct ice_acl_rng_data *cfg_data = NULL, *in_data;
2865 bool will_populate = false;
2867 in_data = &src_buf->checker_cfg[i];
2872 for (j = 0; j < ICE_AQC_ACL_PROF_RANGES_NUM_CFG; j++) {
2873 cfg_data = &dst_buf->checker_cfg[j];
2875 if (!cfg_data->mask ||
2876 !memcmp(cfg_data, in_data,
2877 sizeof(struct ice_acl_rng_data))) {
2878 will_populate = true;
2883 if (will_populate) {
2884 ice_memcpy(cfg_data, in_data,
2885 sizeof(struct ice_acl_rng_data),
2886 ICE_NONDMA_TO_NONDMA);
2888 /* No available slot left to program range checker */
2889 return ICE_ERR_MAX_LIMIT;
2897 * ice_flow_acl_add_scen_entry_sync - Add entry to ACL scenario sync
2898 * @hw: pointer to the hardware structure
2899 * @prof: pointer to flow profile
2900 * @entry: double pointer to the flow entry
2902 * For this function, we will look at the current added entries in the
2903 * corresponding ACL scenario. Then, we will perform matching logic to
2904 * see if we want to add/modify/do nothing with this new entry.
2906 static enum ice_status
2907 ice_flow_acl_add_scen_entry_sync(struct ice_hw *hw, struct ice_flow_prof *prof,
2908 struct ice_flow_entry **entry)
2910 bool do_add_entry, do_rem_entry, do_chg_action, do_chg_rng_chk;
2911 struct ice_aqc_acl_profile_ranges query_rng_buf, cfg_rng_buf;
2912 struct ice_acl_act_entry *acts = NULL;
2913 struct ice_flow_entry *exist;
2914 enum ice_status status = ICE_SUCCESS;
2915 struct ice_flow_entry *e;
2918 if (!entry || !(*entry) || !prof)
2919 return ICE_ERR_BAD_PTR;
2923 do_chg_rng_chk = false;
2927 status = ice_flow_get_hw_prof(hw, ICE_BLK_ACL, prof->id,
2932 /* Query the current range-checker value in FW */
2933 status = ice_query_acl_prof_ranges(hw, prof_id, &query_rng_buf,
2937 ice_memcpy(&cfg_rng_buf, &query_rng_buf,
2938 sizeof(struct ice_aqc_acl_profile_ranges),
2939 ICE_NONDMA_TO_NONDMA);
2941 /* Generate the new range-checker value */
2942 status = ice_flow_acl_union_rng_chk(&cfg_rng_buf, e->range_buf);
2946 /* Reconfigure the range check if the buffer is changed. */
2947 do_chg_rng_chk = false;
2948 if (memcmp(&query_rng_buf, &cfg_rng_buf,
2949 sizeof(struct ice_aqc_acl_profile_ranges))) {
2950 status = ice_prog_acl_prof_ranges(hw, prof_id,
2951 &cfg_rng_buf, NULL);
2955 do_chg_rng_chk = true;
2959 /* Figure out if we want to (change the ACL action) and/or
2960 * (Add the new ACL entry) and/or (Remove the current ACL entry)
2962 exist = ice_flow_acl_find_scen_entry_cond(prof, e, &do_chg_action,
2963 &do_add_entry, &do_rem_entry);
2965 status = ice_flow_rem_entry_sync(hw, ICE_BLK_ACL, exist);
2970 /* Prepare the result action buffer */
2971 acts = (struct ice_acl_act_entry *)
2972 ice_calloc(hw, e->entry_sz, sizeof(struct ice_acl_act_entry));
2974 return ICE_ERR_NO_MEMORY;
2976 for (i = 0; i < e->acts_cnt; i++)
2977 ice_memcpy(&acts[i], &e->acts[i].data.acl_act,
2978 sizeof(struct ice_acl_act_entry),
2979 ICE_NONDMA_TO_NONDMA);
2982 enum ice_acl_entry_prio prio;
2986 keys = (u8 *)e->entry;
2987 inverts = keys + (e->entry_sz / 2);
2988 prio = ice_flow_acl_convert_to_acl_prio(e->priority);
2990 status = ice_acl_add_entry(hw, prof->cfg.scen, prio, keys,
2991 inverts, acts, e->acts_cnt,
2996 e->scen_entry_idx = entry_idx;
2997 LIST_ADD(&e->l_entry, &prof->entries);
2999 if (do_chg_action) {
3000 /* For the action memory info, update the SW's copy of
3001 * exist entry with e's action memory info
3003 ice_free(hw, exist->acts);
3004 exist->acts_cnt = e->acts_cnt;
3005 exist->acts = (struct ice_flow_action *)
3006 ice_calloc(hw, exist->acts_cnt,
3007 sizeof(struct ice_flow_action));
3009 status = ICE_ERR_NO_MEMORY;
3013 ice_memcpy(exist->acts, e->acts,
3014 sizeof(struct ice_flow_action) * e->acts_cnt,
3015 ICE_NONDMA_TO_NONDMA);
3017 status = ice_acl_prog_act(hw, prof->cfg.scen, acts,
3019 exist->scen_entry_idx);
3024 if (do_chg_rng_chk) {
3025 /* In this case, we want to update the range checker
3026 * information of the exist entry
3028 status = ice_flow_acl_union_rng_chk(exist->range_buf,
3034 /* As we don't add the new entry to our SW DB, deallocate its
3035 * memories, and return the exist entry to the caller
3037 ice_dealloc_flow_entry(hw, e);
3047 * ice_flow_acl_add_scen_entry - Add entry to ACL scenario
3048 * @hw: pointer to the hardware structure
3049 * @prof: pointer to flow profile
3050 * @e: double pointer to the flow entry
3052 static enum ice_status
3053 ice_flow_acl_add_scen_entry(struct ice_hw *hw, struct ice_flow_prof *prof,
3054 struct ice_flow_entry **e)
3056 enum ice_status status;
3058 ice_acquire_lock(&prof->entries_lock);
3059 status = ice_flow_acl_add_scen_entry_sync(hw, prof, e);
3060 ice_release_lock(&prof->entries_lock);
3066 * ice_flow_add_entry - Add a flow entry
3067 * @hw: pointer to the HW struct
3068 * @blk: classification stage
3069 * @prof_id: ID of the profile to add a new flow entry to
3070 * @entry_id: unique ID to identify this flow entry
3071 * @vsi_handle: software VSI handle for the flow entry
3072 * @prio: priority of the flow entry
3073 * @data: pointer to a data buffer containing flow entry's match values/masks
3074 * @acts: arrays of actions to be performed on a match
3075 * @acts_cnt: number of actions
3076 * @entry_h: pointer to buffer that receives the new flow entry's handle
3079 ice_flow_add_entry(struct ice_hw *hw, enum ice_block blk, u64 prof_id,
3080 u64 entry_id, u16 vsi_handle, enum ice_flow_priority prio,
3081 void *data, struct ice_flow_action *acts, u8 acts_cnt,
3084 struct ice_flow_entry *e = NULL;
3085 struct ice_flow_prof *prof;
3086 enum ice_status status = ICE_SUCCESS;
3088 /* ACL entries must indicate an action */
3089 if (blk == ICE_BLK_ACL && (!acts || !acts_cnt))
3090 return ICE_ERR_PARAM;
3092 /* No flow entry data is expected for RSS */
3093 if (!entry_h || (!data && blk != ICE_BLK_RSS))
3094 return ICE_ERR_BAD_PTR;
3096 if (!ice_is_vsi_valid(hw, vsi_handle))
3097 return ICE_ERR_PARAM;
3099 ice_acquire_lock(&hw->fl_profs_locks[blk]);
3101 prof = ice_flow_find_prof_id(hw, blk, prof_id);
3103 status = ICE_ERR_DOES_NOT_EXIST;
3105 /* Allocate memory for the entry being added and associate
3106 * the VSI to the found flow profile
3108 e = (struct ice_flow_entry *)ice_malloc(hw, sizeof(*e));
3110 status = ICE_ERR_NO_MEMORY;
3112 status = ice_flow_assoc_prof(hw, blk, prof, vsi_handle);
3115 ice_release_lock(&hw->fl_profs_locks[blk]);
3120 e->vsi_handle = vsi_handle;
3129 /* ACL will handle the entry management */
3130 status = ice_flow_acl_frmt_entry(hw, prof, e, (u8 *)data, acts,
3135 status = ice_flow_acl_add_scen_entry(hw, prof, &e);
3141 status = ICE_ERR_NOT_IMPL;
3145 if (blk != ICE_BLK_ACL) {
3146 /* ACL will handle the entry management */
3147 ice_acquire_lock(&prof->entries_lock);
3148 LIST_ADD(&e->l_entry, &prof->entries);
3149 ice_release_lock(&prof->entries_lock);
3152 *entry_h = ICE_FLOW_ENTRY_HNDL(e);
3157 ice_free(hw, e->entry);
3165 * ice_flow_rem_entry - Remove a flow entry
3166 * @hw: pointer to the HW struct
3167 * @blk: classification stage
3168 * @entry_h: handle to the flow entry to be removed
3170 enum ice_status ice_flow_rem_entry(struct ice_hw *hw, enum ice_block blk,
3173 struct ice_flow_entry *entry;
3174 struct ice_flow_prof *prof;
3175 enum ice_status status = ICE_SUCCESS;
3177 if (entry_h == ICE_FLOW_ENTRY_HANDLE_INVAL)
3178 return ICE_ERR_PARAM;
3180 entry = ICE_FLOW_ENTRY_PTR((intptr_t)entry_h);
3182 /* Retain the pointer to the flow profile as the entry will be freed */
3186 ice_acquire_lock(&prof->entries_lock);
3187 status = ice_flow_rem_entry_sync(hw, blk, entry);
3188 ice_release_lock(&prof->entries_lock);
3195 * ice_flow_set_fld_ext - specifies locations of field from entry's input buffer
3196 * @seg: packet segment the field being set belongs to
3197 * @fld: field to be set
3198 * @field_type: type of the field
3199 * @val_loc: if not ICE_FLOW_FLD_OFF_INVAL, location of the value to match from
3200 * entry's input buffer
3201 * @mask_loc: if not ICE_FLOW_FLD_OFF_INVAL, location of mask value from entry's
3203 * @last_loc: if not ICE_FLOW_FLD_OFF_INVAL, location of last/upper value from
3204 * entry's input buffer
3206 * This helper function stores information of a field being matched, including
3207 * the type of the field and the locations of the value to match, the mask, and
3208 * the upper-bound value in the start of the input buffer for a flow entry.
3209 * This function should only be used for fixed-size data structures.
3211 * This function also opportunistically determines the protocol headers to be
3212 * present based on the fields being set. Some fields cannot be used alone to
3213 * determine the protocol headers present. Sometimes, fields for particular
3214 * protocol headers are not matched. In those cases, the protocol headers
3215 * must be explicitly set.
3218 ice_flow_set_fld_ext(struct ice_flow_seg_info *seg, enum ice_flow_field fld,
3219 enum ice_flow_fld_match_type field_type, u16 val_loc,
3220 u16 mask_loc, u16 last_loc)
3222 u64 bit = BIT_ULL(fld);
3225 if (field_type == ICE_FLOW_FLD_TYPE_RANGE)
3228 seg->fields[fld].type = field_type;
3229 seg->fields[fld].src.val = val_loc;
3230 seg->fields[fld].src.mask = mask_loc;
3231 seg->fields[fld].src.last = last_loc;
3233 ICE_FLOW_SET_HDRS(seg, ice_flds_info[fld].hdr);
3237 * ice_flow_set_fld - specifies locations of field from entry's input buffer
3238 * @seg: packet segment the field being set belongs to
3239 * @fld: field to be set
3240 * @val_loc: if not ICE_FLOW_FLD_OFF_INVAL, location of the value to match from
3241 * entry's input buffer
3242 * @mask_loc: if not ICE_FLOW_FLD_OFF_INVAL, location of mask value from entry's
3244 * @last_loc: if not ICE_FLOW_FLD_OFF_INVAL, location of last/upper value from
3245 * entry's input buffer
3246 * @range: indicate if field being matched is to be in a range
3248 * This function specifies the locations, in the form of byte offsets from the
3249 * start of the input buffer for a flow entry, from where the value to match,
3250 * the mask value, and upper value can be extracted. These locations are then
3251 * stored in the flow profile. When adding a flow entry associated with the
3252 * flow profile, these locations will be used to quickly extract the values and
3253 * create the content of a match entry. This function should only be used for
3254 * fixed-size data structures.
3257 ice_flow_set_fld(struct ice_flow_seg_info *seg, enum ice_flow_field fld,
3258 u16 val_loc, u16 mask_loc, u16 last_loc, bool range)
3260 enum ice_flow_fld_match_type t = range ?
3261 ICE_FLOW_FLD_TYPE_RANGE : ICE_FLOW_FLD_TYPE_REG;
3263 ice_flow_set_fld_ext(seg, fld, t, val_loc, mask_loc, last_loc);
3267 * ice_flow_set_fld_prefix - sets locations of prefix field from entry's buf
3268 * @seg: packet segment the field being set belongs to
3269 * @fld: field to be set
3270 * @val_loc: if not ICE_FLOW_FLD_OFF_INVAL, location of the value to match from
3271 * entry's input buffer
3272 * @pref_loc: location of prefix value from entry's input buffer
3273 * @pref_sz: size of the location holding the prefix value
3275 * This function specifies the locations, in the form of byte offsets from the
3276 * start of the input buffer for a flow entry, from where the value to match
3277 * and the IPv4 prefix value can be extracted. These locations are then stored
3278 * in the flow profile. When adding flow entries to the associated flow profile,
3279 * these locations can be used to quickly extract the values to create the
3280 * content of a match entry. This function should only be used for fixed-size
3284 ice_flow_set_fld_prefix(struct ice_flow_seg_info *seg, enum ice_flow_field fld,
3285 u16 val_loc, u16 pref_loc, u8 pref_sz)
3287 /* For this type of field, the "mask" location is for the prefix value's
3288 * location and the "last" location is for the size of the location of
3291 ice_flow_set_fld_ext(seg, fld, ICE_FLOW_FLD_TYPE_PREFIX, val_loc,
3292 pref_loc, (u16)pref_sz);
3296 * ice_flow_add_fld_raw - sets locations of a raw field from entry's input buf
3297 * @seg: packet segment the field being set belongs to
3298 * @off: offset of the raw field from the beginning of the segment in bytes
3299 * @len: length of the raw pattern to be matched
3300 * @val_loc: location of the value to match from entry's input buffer
3301 * @mask_loc: location of mask value from entry's input buffer
3303 * This function specifies the offset of the raw field to be match from the
3304 * beginning of the specified packet segment, and the locations, in the form of
3305 * byte offsets from the start of the input buffer for a flow entry, from where
3306 * the value to match and the mask value to be extracted. These locations are
3307 * then stored in the flow profile. When adding flow entries to the associated
3308 * flow profile, these locations can be used to quickly extract the values to
3309 * create the content of a match entry. This function should only be used for
3310 * fixed-size data structures.
3313 ice_flow_add_fld_raw(struct ice_flow_seg_info *seg, u16 off, u8 len,
3314 u16 val_loc, u16 mask_loc)
3316 if (seg->raws_cnt < ICE_FLOW_SEG_RAW_FLD_MAX) {
3317 seg->raws[seg->raws_cnt].off = off;
3318 seg->raws[seg->raws_cnt].info.type = ICE_FLOW_FLD_TYPE_SIZE;
3319 seg->raws[seg->raws_cnt].info.src.val = val_loc;
3320 seg->raws[seg->raws_cnt].info.src.mask = mask_loc;
3321 /* The "last" field is used to store the length of the field */
3322 seg->raws[seg->raws_cnt].info.src.last = len;
3325 /* Overflows of "raws" will be handled as an error condition later in
3326 * the flow when this information is processed.
3332 * ice_flow_rem_vsi_prof - remove vsi from flow profile
3333 * @hw: pointer to the hardware structure
3334 * @blk: classification stage
3335 * @vsi_handle: software VSI handle
3336 * @prof_id: unique ID to identify this flow profile
3338 * This function removes the flow entries associated to the input
3339 * vsi handle and disassociates the vsi from the flow profile.
3341 enum ice_status ice_flow_rem_vsi_prof(struct ice_hw *hw, enum ice_block blk, u16 vsi_handle,
3344 struct ice_flow_prof *prof = NULL;
3345 enum ice_status status = ICE_SUCCESS;
3347 if (blk >= ICE_BLK_COUNT || !ice_is_vsi_valid(hw, vsi_handle))
3348 return ICE_ERR_PARAM;
3350 /* find flow profile pointer with input package block and profile id */
3351 prof = ice_flow_find_prof_id(hw, ICE_BLK_FD, prof_id);
3353 ice_debug(hw, ICE_DBG_PKG,
3354 "Cannot find flow profile id=%" PRIu64 "\n", prof_id);
3355 return ICE_ERR_DOES_NOT_EXIST;
3358 /* Remove all remaining flow entries before removing the flow profile */
3359 if (!LIST_EMPTY(&prof->entries)) {
3360 struct ice_flow_entry *e, *t;
3362 ice_acquire_lock(&prof->entries_lock);
3363 LIST_FOR_EACH_ENTRY_SAFE(e, t, &prof->entries, ice_flow_entry,
3365 if (e->vsi_handle != vsi_handle)
3368 status = ice_flow_rem_entry_sync(hw, blk, e);
3372 ice_release_lock(&prof->entries_lock);
3377 /* disassociate the flow profile from sw vsi handle */
3378 status = ice_flow_disassoc_prof(hw, blk, prof, vsi_handle);
3380 ice_debug(hw, ICE_DBG_PKG,
3381 "ice_flow_disassoc_prof() failed with status=%d\n",
3386 #define ICE_FLOW_RSS_SEG_HDR_L2_MASKS \
3387 (ICE_FLOW_SEG_HDR_ETH | ICE_FLOW_SEG_HDR_VLAN)
3389 #define ICE_FLOW_RSS_SEG_HDR_L3_MASKS \
3390 (ICE_FLOW_SEG_HDR_IPV4 | ICE_FLOW_SEG_HDR_IPV6)
3392 #define ICE_FLOW_RSS_SEG_HDR_L4_MASKS \
3393 (ICE_FLOW_SEG_HDR_TCP | ICE_FLOW_SEG_HDR_UDP | ICE_FLOW_SEG_HDR_SCTP)
3395 #define ICE_FLOW_RSS_SEG_HDR_VAL_MASKS \
3396 (ICE_FLOW_RSS_SEG_HDR_L2_MASKS | \
3397 ICE_FLOW_RSS_SEG_HDR_L3_MASKS | \
3398 ICE_FLOW_RSS_SEG_HDR_L4_MASKS)
3401 * ice_flow_set_rss_seg_info - setup packet segments for RSS
3402 * @segs: pointer to the flow field segment(s)
3403 * @seg_cnt: segment count
3404 * @cfg: configure parameters
3406 * Helper function to extract fields from hash bitmap and use flow
3407 * header value to set flow field segment for further use in flow
3408 * profile entry or removal.
3410 static enum ice_status
3411 ice_flow_set_rss_seg_info(struct ice_flow_seg_info *segs, u8 seg_cnt,
3412 const struct ice_rss_hash_cfg *cfg)
3414 struct ice_flow_seg_info *seg;
3418 /* set inner most segment */
3419 seg = &segs[seg_cnt - 1];
3421 ice_for_each_set_bit(i, (const ice_bitmap_t *)&cfg->hash_flds,
3422 ICE_FLOW_FIELD_IDX_MAX)
3423 ice_flow_set_fld(seg, (enum ice_flow_field)i,
3424 ICE_FLOW_FLD_OFF_INVAL, ICE_FLOW_FLD_OFF_INVAL,
3425 ICE_FLOW_FLD_OFF_INVAL, false);
3427 ICE_FLOW_SET_HDRS(seg, cfg->addl_hdrs);
3429 /* set outer most header */
3430 if (cfg->hdr_type == ICE_RSS_INNER_HEADERS_W_OUTER_IPV4)
3431 segs[ICE_RSS_OUTER_HEADERS].hdrs |= ICE_FLOW_SEG_HDR_IPV4 |
3432 ICE_FLOW_SEG_HDR_IPV_OTHER;
3433 else if (cfg->hdr_type == ICE_RSS_INNER_HEADERS_W_OUTER_IPV6)
3434 segs[ICE_RSS_OUTER_HEADERS].hdrs |= ICE_FLOW_SEG_HDR_IPV6 |
3435 ICE_FLOW_SEG_HDR_IPV_OTHER;
3437 if (seg->hdrs & ~ICE_FLOW_RSS_SEG_HDR_VAL_MASKS &
3438 ~ICE_FLOW_RSS_HDRS_INNER_MASK & ~ICE_FLOW_SEG_HDR_IPV_OTHER)
3439 return ICE_ERR_PARAM;
3441 val = (u64)(seg->hdrs & ICE_FLOW_RSS_SEG_HDR_L3_MASKS);
3442 if (val && !ice_is_pow2(val))
3445 val = (u64)(seg->hdrs & ICE_FLOW_RSS_SEG_HDR_L4_MASKS);
3446 if (val && !ice_is_pow2(val))
3453 * ice_rem_vsi_rss_list - remove VSI from RSS list
3454 * @hw: pointer to the hardware structure
3455 * @vsi_handle: software VSI handle
3457 * Remove the VSI from all RSS configurations in the list.
3459 void ice_rem_vsi_rss_list(struct ice_hw *hw, u16 vsi_handle)
3461 struct ice_rss_cfg *r, *tmp;
3463 if (LIST_EMPTY(&hw->rss_list_head))
3466 ice_acquire_lock(&hw->rss_locks);
3467 LIST_FOR_EACH_ENTRY_SAFE(r, tmp, &hw->rss_list_head,
3468 ice_rss_cfg, l_entry)
3469 if (ice_test_and_clear_bit(vsi_handle, r->vsis))
3470 if (!ice_is_any_bit_set(r->vsis, ICE_MAX_VSI)) {
3471 LIST_DEL(&r->l_entry);
3474 ice_release_lock(&hw->rss_locks);
3478 * ice_rem_vsi_rss_cfg - remove RSS configurations associated with VSI
3479 * @hw: pointer to the hardware structure
3480 * @vsi_handle: software VSI handle
3482 * This function will iterate through all flow profiles and disassociate
3483 * the VSI from that profile. If the flow profile has no VSIs it will
3486 enum ice_status ice_rem_vsi_rss_cfg(struct ice_hw *hw, u16 vsi_handle)
3488 const enum ice_block blk = ICE_BLK_RSS;
3489 struct ice_flow_prof *p, *t;
3490 enum ice_status status = ICE_SUCCESS;
3492 if (!ice_is_vsi_valid(hw, vsi_handle))
3493 return ICE_ERR_PARAM;
3495 if (LIST_EMPTY(&hw->fl_profs[blk]))
3498 ice_acquire_lock(&hw->rss_locks);
3499 LIST_FOR_EACH_ENTRY_SAFE(p, t, &hw->fl_profs[blk], ice_flow_prof,
3501 if (ice_is_bit_set(p->vsis, vsi_handle)) {
3502 status = ice_flow_disassoc_prof(hw, blk, p, vsi_handle);
3506 if (!ice_is_any_bit_set(p->vsis, ICE_MAX_VSI)) {
3507 status = ice_flow_rem_prof(hw, blk, p->id);
3512 ice_release_lock(&hw->rss_locks);
3518 * ice_get_rss_hdr_type - get a RSS profile's header type
3519 * @prof: RSS flow profile
3521 static enum ice_rss_cfg_hdr_type
3522 ice_get_rss_hdr_type(struct ice_flow_prof *prof)
3524 enum ice_rss_cfg_hdr_type hdr_type = ICE_RSS_ANY_HEADERS;
3526 if (prof->segs_cnt == ICE_FLOW_SEG_SINGLE) {
3527 hdr_type = ICE_RSS_OUTER_HEADERS;
3528 } else if (prof->segs_cnt == ICE_FLOW_SEG_MAX) {
3529 if (prof->segs[ICE_RSS_OUTER_HEADERS].hdrs == ICE_FLOW_SEG_HDR_NONE)
3530 hdr_type = ICE_RSS_INNER_HEADERS;
3531 if (prof->segs[ICE_RSS_OUTER_HEADERS].hdrs & ICE_FLOW_SEG_HDR_IPV4)
3532 hdr_type = ICE_RSS_INNER_HEADERS_W_OUTER_IPV4;
3533 if (prof->segs[ICE_RSS_OUTER_HEADERS].hdrs & ICE_FLOW_SEG_HDR_IPV6)
3534 hdr_type = ICE_RSS_INNER_HEADERS_W_OUTER_IPV6;
3541 * ice_rem_rss_list - remove RSS configuration from list
3542 * @hw: pointer to the hardware structure
3543 * @vsi_handle: software VSI handle
3544 * @prof: pointer to flow profile
3546 * Assumption: lock has already been acquired for RSS list
3549 ice_rem_rss_list(struct ice_hw *hw, u16 vsi_handle, struct ice_flow_prof *prof)
3551 enum ice_rss_cfg_hdr_type hdr_type;
3552 struct ice_rss_cfg *r, *tmp;
3554 /* Search for RSS hash fields associated to the VSI that match the
3555 * hash configurations associated to the flow profile. If found
3556 * remove from the RSS entry list of the VSI context and delete entry.
3558 hdr_type = ice_get_rss_hdr_type(prof);
3559 LIST_FOR_EACH_ENTRY_SAFE(r, tmp, &hw->rss_list_head,
3560 ice_rss_cfg, l_entry)
3561 if (r->hash.hash_flds == prof->segs[prof->segs_cnt - 1].match &&
3562 r->hash.addl_hdrs == prof->segs[prof->segs_cnt - 1].hdrs &&
3563 r->hash.hdr_type == hdr_type) {
3564 ice_clear_bit(vsi_handle, r->vsis);
3565 if (!ice_is_any_bit_set(r->vsis, ICE_MAX_VSI)) {
3566 LIST_DEL(&r->l_entry);
3574 * ice_add_rss_list - add RSS configuration to list
3575 * @hw: pointer to the hardware structure
3576 * @vsi_handle: software VSI handle
3577 * @prof: pointer to flow profile
3579 * Assumption: lock has already been acquired for RSS list
3581 static enum ice_status
3582 ice_add_rss_list(struct ice_hw *hw, u16 vsi_handle, struct ice_flow_prof *prof)
3584 enum ice_rss_cfg_hdr_type hdr_type;
3585 struct ice_rss_cfg *r, *rss_cfg;
3587 hdr_type = ice_get_rss_hdr_type(prof);
3588 LIST_FOR_EACH_ENTRY(r, &hw->rss_list_head,
3589 ice_rss_cfg, l_entry)
3590 if (r->hash.hash_flds == prof->segs[prof->segs_cnt - 1].match &&
3591 r->hash.addl_hdrs == prof->segs[prof->segs_cnt - 1].hdrs &&
3592 r->hash.hdr_type == hdr_type) {
3593 ice_set_bit(vsi_handle, r->vsis);
3597 rss_cfg = (struct ice_rss_cfg *)ice_malloc(hw, sizeof(*rss_cfg));
3599 return ICE_ERR_NO_MEMORY;
3601 rss_cfg->hash.hash_flds = prof->segs[prof->segs_cnt - 1].match;
3602 rss_cfg->hash.addl_hdrs = prof->segs[prof->segs_cnt - 1].hdrs;
3603 rss_cfg->hash.hdr_type = hdr_type;
3604 rss_cfg->hash.symm = prof->cfg.symm;
3605 ice_set_bit(vsi_handle, rss_cfg->vsis);
3607 LIST_ADD_TAIL(&rss_cfg->l_entry, &hw->rss_list_head);
3612 #define ICE_FLOW_PROF_HASH_S 0
3613 #define ICE_FLOW_PROF_HASH_M (0xFFFFFFFFULL << ICE_FLOW_PROF_HASH_S)
3614 #define ICE_FLOW_PROF_HDR_S 32
3615 #define ICE_FLOW_PROF_HDR_M (0x3FFFFFFFULL << ICE_FLOW_PROF_HDR_S)
3616 #define ICE_FLOW_PROF_ENCAP_S 62
3617 #define ICE_FLOW_PROF_ENCAP_M (0x3ULL << ICE_FLOW_PROF_ENCAP_S)
3619 /* Flow profile ID format:
3620 * [0:31] - Packet match fields
3621 * [32:61] - Protocol header
3622 * [62:63] - Encapsulation flag:
3625 * 2 for tunneled with outer ipv4
3626 * 3 for tunneled with outer ipv6
3628 #define ICE_FLOW_GEN_PROFID(hash, hdr, encap) \
3629 ((u64)(((u64)(hash) & ICE_FLOW_PROF_HASH_M) | \
3630 (((u64)(hdr) << ICE_FLOW_PROF_HDR_S) & ICE_FLOW_PROF_HDR_M) | \
3631 (((u64)(encap) << ICE_FLOW_PROF_ENCAP_S) & ICE_FLOW_PROF_ENCAP_M)))
3634 ice_rss_config_xor_word(struct ice_hw *hw, u8 prof_id, u8 src, u8 dst)
3636 u32 s = ((src % 4) << 3); /* byte shift */
3637 u32 v = dst | 0x80; /* value to program */
3638 u8 i = src / 4; /* register index */
3641 reg = rd32(hw, GLQF_HSYMM(prof_id, i));
3642 reg = (reg & ~(0xff << s)) | (v << s);
3643 wr32(hw, GLQF_HSYMM(prof_id, i), reg);
3647 ice_rss_config_xor(struct ice_hw *hw, u8 prof_id, u8 src, u8 dst, u8 len)
3650 ICE_FLOW_SW_FIELD_VECTOR_MAX / ICE_FLOW_FV_EXTRACT_SZ - 1;
3653 for (i = 0; i < len; i++) {
3654 ice_rss_config_xor_word(hw, prof_id,
3655 /* Yes, field vector in GLQF_HSYMM and
3656 * GLQF_HINSET is inversed!
3658 fv_last_word - (src + i),
3659 fv_last_word - (dst + i));
3660 ice_rss_config_xor_word(hw, prof_id,
3661 fv_last_word - (dst + i),
3662 fv_last_word - (src + i));
3667 ice_rss_update_symm(struct ice_hw *hw,
3668 struct ice_flow_prof *prof)
3670 struct ice_prof_map *map;
3673 ice_acquire_lock(&hw->blk[ICE_BLK_RSS].es.prof_map_lock);
3674 map = ice_search_prof_id(hw, ICE_BLK_RSS, prof->id);
3676 prof_id = map->prof_id;
3677 ice_release_lock(&hw->blk[ICE_BLK_RSS].es.prof_map_lock);
3680 /* clear to default */
3681 for (m = 0; m < 6; m++)
3682 wr32(hw, GLQF_HSYMM(prof_id, m), 0);
3683 if (prof->cfg.symm) {
3684 struct ice_flow_seg_info *seg =
3685 &prof->segs[prof->segs_cnt - 1];
3687 struct ice_flow_seg_xtrct *ipv4_src =
3688 &seg->fields[ICE_FLOW_FIELD_IDX_IPV4_SA].xtrct;
3689 struct ice_flow_seg_xtrct *ipv4_dst =
3690 &seg->fields[ICE_FLOW_FIELD_IDX_IPV4_DA].xtrct;
3691 struct ice_flow_seg_xtrct *ipv6_src =
3692 &seg->fields[ICE_FLOW_FIELD_IDX_IPV6_SA].xtrct;
3693 struct ice_flow_seg_xtrct *ipv6_dst =
3694 &seg->fields[ICE_FLOW_FIELD_IDX_IPV6_DA].xtrct;
3696 struct ice_flow_seg_xtrct *tcp_src =
3697 &seg->fields[ICE_FLOW_FIELD_IDX_TCP_SRC_PORT].xtrct;
3698 struct ice_flow_seg_xtrct *tcp_dst =
3699 &seg->fields[ICE_FLOW_FIELD_IDX_TCP_DST_PORT].xtrct;
3701 struct ice_flow_seg_xtrct *udp_src =
3702 &seg->fields[ICE_FLOW_FIELD_IDX_UDP_SRC_PORT].xtrct;
3703 struct ice_flow_seg_xtrct *udp_dst =
3704 &seg->fields[ICE_FLOW_FIELD_IDX_UDP_DST_PORT].xtrct;
3706 struct ice_flow_seg_xtrct *sctp_src =
3707 &seg->fields[ICE_FLOW_FIELD_IDX_SCTP_SRC_PORT].xtrct;
3708 struct ice_flow_seg_xtrct *sctp_dst =
3709 &seg->fields[ICE_FLOW_FIELD_IDX_SCTP_DST_PORT].xtrct;
3712 if (ipv4_src->prot_id != 0 && ipv4_dst->prot_id != 0)
3713 ice_rss_config_xor(hw, prof_id,
3714 ipv4_src->idx, ipv4_dst->idx, 2);
3717 if (ipv6_src->prot_id != 0 && ipv6_dst->prot_id != 0)
3718 ice_rss_config_xor(hw, prof_id,
3719 ipv6_src->idx, ipv6_dst->idx, 8);
3722 if (tcp_src->prot_id != 0 && tcp_dst->prot_id != 0)
3723 ice_rss_config_xor(hw, prof_id,
3724 tcp_src->idx, tcp_dst->idx, 1);
3727 if (udp_src->prot_id != 0 && udp_dst->prot_id != 0)
3728 ice_rss_config_xor(hw, prof_id,
3729 udp_src->idx, udp_dst->idx, 1);
3732 if (sctp_src->prot_id != 0 && sctp_dst->prot_id != 0)
3733 ice_rss_config_xor(hw, prof_id,
3734 sctp_src->idx, sctp_dst->idx, 1);
3739 * ice_add_rss_cfg_sync - add an RSS configuration
3740 * @hw: pointer to the hardware structure
3741 * @vsi_handle: software VSI handle
3742 * @cfg: configure parameters
3744 * Assumption: lock has already been acquired for RSS list
3746 static enum ice_status
3747 ice_add_rss_cfg_sync(struct ice_hw *hw, u16 vsi_handle,
3748 const struct ice_rss_hash_cfg *cfg)
3750 const enum ice_block blk = ICE_BLK_RSS;
3751 struct ice_flow_prof *prof = NULL;
3752 struct ice_flow_seg_info *segs;
3753 enum ice_status status;
3756 segs_cnt = (cfg->hdr_type == ICE_RSS_OUTER_HEADERS) ?
3757 ICE_FLOW_SEG_SINGLE : ICE_FLOW_SEG_MAX;
3759 segs = (struct ice_flow_seg_info *)ice_calloc(hw, segs_cnt,
3762 return ICE_ERR_NO_MEMORY;
3764 /* Construct the packet segment info from the hashed fields */
3765 status = ice_flow_set_rss_seg_info(segs, segs_cnt, cfg);
3769 /* Search for a flow profile that has matching headers, hash fields
3770 * and has the input VSI associated to it. If found, no further
3771 * operations required and exit.
3773 prof = ice_flow_find_prof_conds(hw, blk, ICE_FLOW_RX, segs, segs_cnt,
3775 ICE_FLOW_FIND_PROF_CHK_FLDS |
3776 ICE_FLOW_FIND_PROF_CHK_VSI);
3778 if (prof->cfg.symm == cfg->symm)
3780 prof->cfg.symm = cfg->symm;
3784 /* Check if a flow profile exists with the same protocol headers and
3785 * associated with the input VSI. If so disassociate the VSI from
3786 * this profile. The VSI will be added to a new profile created with
3787 * the protocol header and new hash field configuration.
3789 prof = ice_flow_find_prof_conds(hw, blk, ICE_FLOW_RX, segs, segs_cnt,
3790 vsi_handle, ICE_FLOW_FIND_PROF_CHK_VSI);
3792 status = ice_flow_disassoc_prof(hw, blk, prof, vsi_handle);
3794 ice_rem_rss_list(hw, vsi_handle, prof);
3798 /* Remove profile if it has no VSIs associated */
3799 if (!ice_is_any_bit_set(prof->vsis, ICE_MAX_VSI)) {
3800 status = ice_flow_rem_prof(hw, blk, prof->id);
3806 /* Search for a profile that has same match fields only. If this
3807 * exists then associate the VSI to this profile.
3809 prof = ice_flow_find_prof_conds(hw, blk, ICE_FLOW_RX, segs, segs_cnt,
3811 ICE_FLOW_FIND_PROF_CHK_FLDS);
3813 if (prof->cfg.symm == cfg->symm) {
3814 status = ice_flow_assoc_prof(hw, blk, prof,
3817 status = ice_add_rss_list(hw, vsi_handle,
3820 /* if a profile exist but with different symmetric
3821 * requirement, just return error.
3823 status = ICE_ERR_NOT_SUPPORTED;
3828 /* Create a new flow profile with generated profile and packet
3829 * segment information.
3831 status = ice_flow_add_prof(hw, blk, ICE_FLOW_RX,
3832 ICE_FLOW_GEN_PROFID(cfg->hash_flds,
3833 segs[segs_cnt - 1].hdrs,
3835 segs, segs_cnt, NULL, 0, &prof);
3839 status = ice_flow_assoc_prof(hw, blk, prof, vsi_handle);
3840 /* If association to a new flow profile failed then this profile can
3844 ice_flow_rem_prof(hw, blk, prof->id);
3848 status = ice_add_rss_list(hw, vsi_handle, prof);
3850 prof->cfg.symm = cfg->symm;
3852 ice_rss_update_symm(hw, prof);
3860 * ice_add_rss_cfg - add an RSS configuration with specified hashed fields
3861 * @hw: pointer to the hardware structure
3862 * @vsi_handle: software VSI handle
3863 * @cfg: configure parameters
3865 * This function will generate a flow profile based on fields associated with
3866 * the input fields to hash on, the flow type and use the VSI number to add
3867 * a flow entry to the profile.
3870 ice_add_rss_cfg(struct ice_hw *hw, u16 vsi_handle,
3871 const struct ice_rss_hash_cfg *cfg)
3873 struct ice_rss_hash_cfg local_cfg;
3874 enum ice_status status;
3876 if (!ice_is_vsi_valid(hw, vsi_handle) ||
3877 !cfg || cfg->hdr_type > ICE_RSS_ANY_HEADERS ||
3878 cfg->hash_flds == ICE_HASH_INVALID)
3879 return ICE_ERR_PARAM;
3882 if (cfg->hdr_type < ICE_RSS_ANY_HEADERS) {
3883 ice_acquire_lock(&hw->rss_locks);
3884 status = ice_add_rss_cfg_sync(hw, vsi_handle, &local_cfg);
3885 ice_release_lock(&hw->rss_locks);
3887 ice_acquire_lock(&hw->rss_locks);
3888 local_cfg.hdr_type = ICE_RSS_OUTER_HEADERS;
3889 status = ice_add_rss_cfg_sync(hw, vsi_handle, &local_cfg);
3891 local_cfg.hdr_type = ICE_RSS_INNER_HEADERS;
3892 status = ice_add_rss_cfg_sync(hw, vsi_handle,
3895 ice_release_lock(&hw->rss_locks);
3902 * ice_rem_rss_cfg_sync - remove an existing RSS configuration
3903 * @hw: pointer to the hardware structure
3904 * @vsi_handle: software VSI handle
3905 * @cfg: configure parameters
3907 * Assumption: lock has already been acquired for RSS list
3909 static enum ice_status
3910 ice_rem_rss_cfg_sync(struct ice_hw *hw, u16 vsi_handle,
3911 const struct ice_rss_hash_cfg *cfg)
3913 const enum ice_block blk = ICE_BLK_RSS;
3914 struct ice_flow_seg_info *segs;
3915 struct ice_flow_prof *prof;
3916 enum ice_status status;
3919 segs_cnt = (cfg->hdr_type == ICE_RSS_OUTER_HEADERS) ?
3920 ICE_FLOW_SEG_SINGLE : ICE_FLOW_SEG_MAX;
3921 segs = (struct ice_flow_seg_info *)ice_calloc(hw, segs_cnt,
3924 return ICE_ERR_NO_MEMORY;
3926 /* Construct the packet segment info from the hashed fields */
3927 status = ice_flow_set_rss_seg_info(segs, segs_cnt, cfg);
3931 prof = ice_flow_find_prof_conds(hw, blk, ICE_FLOW_RX, segs, segs_cnt,
3933 ICE_FLOW_FIND_PROF_CHK_FLDS);
3935 status = ICE_ERR_DOES_NOT_EXIST;
3939 status = ice_flow_disassoc_prof(hw, blk, prof, vsi_handle);
3943 /* Remove RSS configuration from VSI context before deleting
3946 ice_rem_rss_list(hw, vsi_handle, prof);
3948 if (!ice_is_any_bit_set(prof->vsis, ICE_MAX_VSI))
3949 status = ice_flow_rem_prof(hw, blk, prof->id);
3957 * ice_rem_rss_cfg - remove an existing RSS config with matching hashed fields
3958 * @hw: pointer to the hardware structure
3959 * @vsi_handle: software VSI handle
3960 * @cfg: configure parameters
3962 * This function will lookup the flow profile based on the input
3963 * hash field bitmap, iterate through the profile entry list of
3964 * that profile and find entry associated with input VSI to be
3965 * removed. Calls are made to underlying flow apis which will in
3966 * turn build or update buffers for RSS XLT1 section.
3969 ice_rem_rss_cfg(struct ice_hw *hw, u16 vsi_handle,
3970 const struct ice_rss_hash_cfg *cfg)
3972 struct ice_rss_hash_cfg local_cfg;
3973 enum ice_status status;
3975 if (!ice_is_vsi_valid(hw, vsi_handle) ||
3976 !cfg || cfg->hdr_type > ICE_RSS_ANY_HEADERS ||
3977 cfg->hash_flds == ICE_HASH_INVALID)
3978 return ICE_ERR_PARAM;
3980 ice_acquire_lock(&hw->rss_locks);
3982 if (cfg->hdr_type < ICE_RSS_ANY_HEADERS) {
3983 status = ice_rem_rss_cfg_sync(hw, vsi_handle, &local_cfg);
3985 local_cfg.hdr_type = ICE_RSS_OUTER_HEADERS;
3986 status = ice_rem_rss_cfg_sync(hw, vsi_handle, &local_cfg);
3989 local_cfg.hdr_type = ICE_RSS_INNER_HEADERS;
3990 status = ice_rem_rss_cfg_sync(hw, vsi_handle,
3994 ice_release_lock(&hw->rss_locks);
4000 * ice_replay_rss_cfg - replay RSS configurations associated with VSI
4001 * @hw: pointer to the hardware structure
4002 * @vsi_handle: software VSI handle
4004 enum ice_status ice_replay_rss_cfg(struct ice_hw *hw, u16 vsi_handle)
4006 enum ice_status status = ICE_SUCCESS;
4007 struct ice_rss_cfg *r;
4009 if (!ice_is_vsi_valid(hw, vsi_handle))
4010 return ICE_ERR_PARAM;
4012 ice_acquire_lock(&hw->rss_locks);
4013 LIST_FOR_EACH_ENTRY(r, &hw->rss_list_head,
4014 ice_rss_cfg, l_entry) {
4015 if (ice_is_bit_set(r->vsis, vsi_handle)) {
4016 status = ice_add_rss_cfg_sync(hw, vsi_handle, &r->hash);
4021 ice_release_lock(&hw->rss_locks);
4027 * ice_get_rss_cfg - returns hashed fields for the given header types
4028 * @hw: pointer to the hardware structure
4029 * @vsi_handle: software VSI handle
4030 * @hdrs: protocol header type
4032 * This function will return the match fields of the first instance of flow
4033 * profile having the given header types and containing input VSI
4035 u64 ice_get_rss_cfg(struct ice_hw *hw, u16 vsi_handle, u32 hdrs)
4037 u64 rss_hash = ICE_HASH_INVALID;
4038 struct ice_rss_cfg *r;
4040 /* verify if the protocol header is non zero and VSI is valid */
4041 if (hdrs == ICE_FLOW_SEG_HDR_NONE || !ice_is_vsi_valid(hw, vsi_handle))
4042 return ICE_HASH_INVALID;
4044 ice_acquire_lock(&hw->rss_locks);
4045 LIST_FOR_EACH_ENTRY(r, &hw->rss_list_head,
4046 ice_rss_cfg, l_entry)
4047 if (ice_is_bit_set(r->vsis, vsi_handle) &&
4048 r->hash.addl_hdrs == hdrs) {
4049 rss_hash = r->hash.hash_flds;
4052 ice_release_lock(&hw->rss_locks);