1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2001-2021 Intel Corporation
5 #include "ice_common.h"
8 /* Size of known protocol header fields */
9 #define ICE_FLOW_FLD_SZ_ETH_TYPE 2
10 #define ICE_FLOW_FLD_SZ_VLAN 2
11 #define ICE_FLOW_FLD_SZ_IPV4_ADDR 4
12 #define ICE_FLOW_FLD_SZ_IPV6_ADDR 16
13 #define ICE_FLOW_FLD_SZ_IPV6_PRE32_ADDR 4
14 #define ICE_FLOW_FLD_SZ_IPV6_PRE48_ADDR 6
15 #define ICE_FLOW_FLD_SZ_IPV6_PRE64_ADDR 8
16 #define ICE_FLOW_FLD_SZ_IP_DSCP 1
17 #define ICE_FLOW_FLD_SZ_IP_TTL 1
18 #define ICE_FLOW_FLD_SZ_IP_PROT 1
19 #define ICE_FLOW_FLD_SZ_PORT 2
20 #define ICE_FLOW_FLD_SZ_TCP_FLAGS 1
21 #define ICE_FLOW_FLD_SZ_ICMP_TYPE 1
22 #define ICE_FLOW_FLD_SZ_ICMP_CODE 1
23 #define ICE_FLOW_FLD_SZ_ARP_OPER 2
24 #define ICE_FLOW_FLD_SZ_GRE_KEYID 4
25 #define ICE_FLOW_FLD_SZ_GTP_TEID 4
26 #define ICE_FLOW_FLD_SZ_GTP_QFI 2
27 #define ICE_FLOW_FLD_SZ_PPPOE_SESS_ID 2
28 #define ICE_FLOW_FLD_SZ_PFCP_SEID 8
29 #define ICE_FLOW_FLD_SZ_L2TPV3_SESS_ID 4
30 #define ICE_FLOW_FLD_SZ_ESP_SPI 4
31 #define ICE_FLOW_FLD_SZ_AH_SPI 4
32 #define ICE_FLOW_FLD_SZ_NAT_T_ESP_SPI 4
33 #define ICE_FLOW_FLD_SZ_VXLAN_VNI 4
34 #define ICE_FLOW_FLD_SZ_ECPRI_TP0_PC_ID 2
36 /* Describe properties of a protocol header field */
37 struct ice_flow_field_info {
38 enum ice_flow_seg_hdr hdr;
39 s16 off; /* Offset from start of a protocol header, in bits */
40 u16 size; /* Size of fields in bits */
41 u16 mask; /* 16-bit mask for field */
44 #define ICE_FLOW_FLD_INFO(_hdr, _offset_bytes, _size_bytes) { \
46 .off = (_offset_bytes) * BITS_PER_BYTE, \
47 .size = (_size_bytes) * BITS_PER_BYTE, \
51 #define ICE_FLOW_FLD_INFO_MSK(_hdr, _offset_bytes, _size_bytes, _mask) { \
53 .off = (_offset_bytes) * BITS_PER_BYTE, \
54 .size = (_size_bytes) * BITS_PER_BYTE, \
58 /* Table containing properties of supported protocol header fields */
60 struct ice_flow_field_info ice_flds_info[ICE_FLOW_FIELD_IDX_MAX] = {
62 /* ICE_FLOW_FIELD_IDX_ETH_DA */
63 ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_ETH, 0, ETH_ALEN),
64 /* ICE_FLOW_FIELD_IDX_ETH_SA */
65 ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_ETH, ETH_ALEN, ETH_ALEN),
66 /* ICE_FLOW_FIELD_IDX_S_VLAN */
67 ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_VLAN, 12, ICE_FLOW_FLD_SZ_VLAN),
68 /* ICE_FLOW_FIELD_IDX_C_VLAN */
69 ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_VLAN, 14, ICE_FLOW_FLD_SZ_VLAN),
70 /* ICE_FLOW_FIELD_IDX_ETH_TYPE */
71 ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_ETH, 0, ICE_FLOW_FLD_SZ_ETH_TYPE),
73 /* ICE_FLOW_FIELD_IDX_IPV4_DSCP */
74 ICE_FLOW_FLD_INFO_MSK(ICE_FLOW_SEG_HDR_IPV4, 0, ICE_FLOW_FLD_SZ_IP_DSCP,
76 /* ICE_FLOW_FIELD_IDX_IPV6_DSCP */
77 ICE_FLOW_FLD_INFO_MSK(ICE_FLOW_SEG_HDR_IPV6, 0, ICE_FLOW_FLD_SZ_IP_DSCP,
79 /* ICE_FLOW_FIELD_IDX_IPV4_TTL */
80 ICE_FLOW_FLD_INFO_MSK(ICE_FLOW_SEG_HDR_NONE, 8,
81 ICE_FLOW_FLD_SZ_IP_TTL, 0xff00),
82 /* ICE_FLOW_FIELD_IDX_IPV4_PROT */
83 ICE_FLOW_FLD_INFO_MSK(ICE_FLOW_SEG_HDR_NONE, 8,
84 ICE_FLOW_FLD_SZ_IP_PROT, 0x00ff),
85 /* ICE_FLOW_FIELD_IDX_IPV6_TTL */
86 ICE_FLOW_FLD_INFO_MSK(ICE_FLOW_SEG_HDR_NONE, 6,
87 ICE_FLOW_FLD_SZ_IP_TTL, 0x00ff),
88 /* ICE_FLOW_FIELD_IDX_IPV6_PROT */
89 ICE_FLOW_FLD_INFO_MSK(ICE_FLOW_SEG_HDR_NONE, 6,
90 ICE_FLOW_FLD_SZ_IP_PROT, 0xff00),
91 /* ICE_FLOW_FIELD_IDX_IPV4_SA */
92 ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_IPV4, 12, ICE_FLOW_FLD_SZ_IPV4_ADDR),
93 /* ICE_FLOW_FIELD_IDX_IPV4_DA */
94 ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_IPV4, 16, ICE_FLOW_FLD_SZ_IPV4_ADDR),
95 /* ICE_FLOW_FIELD_IDX_IPV6_SA */
96 ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_IPV6, 8, ICE_FLOW_FLD_SZ_IPV6_ADDR),
97 /* ICE_FLOW_FIELD_IDX_IPV6_DA */
98 ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_IPV6, 24, ICE_FLOW_FLD_SZ_IPV6_ADDR),
99 /* ICE_FLOW_FIELD_IDX_IPV6_PRE32_SA */
100 ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_IPV6, 8,
101 ICE_FLOW_FLD_SZ_IPV6_PRE32_ADDR),
102 /* ICE_FLOW_FIELD_IDX_IPV6_PRE32_DA */
103 ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_IPV6, 24,
104 ICE_FLOW_FLD_SZ_IPV6_PRE32_ADDR),
105 /* ICE_FLOW_FIELD_IDX_IPV6_PRE48_SA */
106 ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_IPV6, 8,
107 ICE_FLOW_FLD_SZ_IPV6_PRE48_ADDR),
108 /* ICE_FLOW_FIELD_IDX_IPV6_PRE48_DA */
109 ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_IPV6, 24,
110 ICE_FLOW_FLD_SZ_IPV6_PRE48_ADDR),
111 /* ICE_FLOW_FIELD_IDX_IPV6_PRE64_SA */
112 ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_IPV6, 8,
113 ICE_FLOW_FLD_SZ_IPV6_PRE64_ADDR),
114 /* ICE_FLOW_FIELD_IDX_IPV6_PRE64_DA */
115 ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_IPV6, 24,
116 ICE_FLOW_FLD_SZ_IPV6_PRE64_ADDR),
118 /* ICE_FLOW_FIELD_IDX_TCP_SRC_PORT */
119 ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_TCP, 0, ICE_FLOW_FLD_SZ_PORT),
120 /* ICE_FLOW_FIELD_IDX_TCP_DST_PORT */
121 ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_TCP, 2, ICE_FLOW_FLD_SZ_PORT),
122 /* ICE_FLOW_FIELD_IDX_UDP_SRC_PORT */
123 ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_UDP, 0, ICE_FLOW_FLD_SZ_PORT),
124 /* ICE_FLOW_FIELD_IDX_UDP_DST_PORT */
125 ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_UDP, 2, ICE_FLOW_FLD_SZ_PORT),
126 /* ICE_FLOW_FIELD_IDX_SCTP_SRC_PORT */
127 ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_SCTP, 0, ICE_FLOW_FLD_SZ_PORT),
128 /* ICE_FLOW_FIELD_IDX_SCTP_DST_PORT */
129 ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_SCTP, 2, ICE_FLOW_FLD_SZ_PORT),
130 /* ICE_FLOW_FIELD_IDX_TCP_FLAGS */
131 ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_TCP, 13, ICE_FLOW_FLD_SZ_TCP_FLAGS),
133 /* ICE_FLOW_FIELD_IDX_ARP_SIP */
134 ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_ARP, 14, ICE_FLOW_FLD_SZ_IPV4_ADDR),
135 /* ICE_FLOW_FIELD_IDX_ARP_DIP */
136 ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_ARP, 24, ICE_FLOW_FLD_SZ_IPV4_ADDR),
137 /* ICE_FLOW_FIELD_IDX_ARP_SHA */
138 ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_ARP, 8, ETH_ALEN),
139 /* ICE_FLOW_FIELD_IDX_ARP_DHA */
140 ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_ARP, 18, ETH_ALEN),
141 /* ICE_FLOW_FIELD_IDX_ARP_OP */
142 ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_ARP, 6, ICE_FLOW_FLD_SZ_ARP_OPER),
144 /* ICE_FLOW_FIELD_IDX_ICMP_TYPE */
145 ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_ICMP, 0, ICE_FLOW_FLD_SZ_ICMP_TYPE),
146 /* ICE_FLOW_FIELD_IDX_ICMP_CODE */
147 ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_ICMP, 1, ICE_FLOW_FLD_SZ_ICMP_CODE),
149 /* ICE_FLOW_FIELD_IDX_GRE_KEYID */
150 ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_GRE, 12, ICE_FLOW_FLD_SZ_GRE_KEYID),
152 /* ICE_FLOW_FIELD_IDX_GTPC_TEID */
153 ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_GTPC_TEID, 12,
154 ICE_FLOW_FLD_SZ_GTP_TEID),
155 /* ICE_FLOW_FIELD_IDX_GTPU_IP_TEID */
156 ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_GTPU_IP, 12,
157 ICE_FLOW_FLD_SZ_GTP_TEID),
158 /* ICE_FLOW_FIELD_IDX_GTPU_EH_TEID */
159 ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_GTPU_EH, 12,
160 ICE_FLOW_FLD_SZ_GTP_TEID),
161 /* ICE_FLOW_FIELD_IDX_GTPU_EH_QFI */
162 ICE_FLOW_FLD_INFO_MSK(ICE_FLOW_SEG_HDR_GTPU_EH, 22,
163 ICE_FLOW_FLD_SZ_GTP_QFI, 0x3f00),
164 /* ICE_FLOW_FIELD_IDX_GTPU_UP_TEID */
165 ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_GTPU_UP, 12,
166 ICE_FLOW_FLD_SZ_GTP_TEID),
167 /* ICE_FLOW_FIELD_IDX_GTPU_DWN_TEID */
168 ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_GTPU_DWN, 12,
169 ICE_FLOW_FLD_SZ_GTP_TEID),
171 /* ICE_FLOW_FIELD_IDX_PPPOE_SESS_ID */
172 ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_PPPOE, 2,
173 ICE_FLOW_FLD_SZ_PPPOE_SESS_ID),
175 /* ICE_FLOW_FIELD_IDX_PFCP_SEID */
176 ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_PFCP_SESSION, 12,
177 ICE_FLOW_FLD_SZ_PFCP_SEID),
179 /* ICE_FLOW_FIELD_IDX_L2TPV3_SESS_ID */
180 ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_L2TPV3, 0,
181 ICE_FLOW_FLD_SZ_L2TPV3_SESS_ID),
183 /* ICE_FLOW_FIELD_IDX_ESP_SPI */
184 ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_ESP, 0,
185 ICE_FLOW_FLD_SZ_ESP_SPI),
187 /* ICE_FLOW_FIELD_IDX_AH_SPI */
188 ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_AH, 4,
189 ICE_FLOW_FLD_SZ_AH_SPI),
191 /* ICE_FLOW_FIELD_IDX_NAT_T_ESP_SPI */
192 ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_NAT_T_ESP, 8,
193 ICE_FLOW_FLD_SZ_NAT_T_ESP_SPI),
194 /* ICE_FLOW_FIELD_IDX_VXLAN_VNI */
195 ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_VXLAN, 12,
196 ICE_FLOW_FLD_SZ_VXLAN_VNI),
198 /* ICE_FLOW_FIELD_IDX_ECPRI_TP0_PC_ID */
199 ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_ECPRI_TP0, 4,
200 ICE_FLOW_FLD_SZ_ECPRI_TP0_PC_ID),
202 /* ICE_FLOW_FIELD_IDX_UDP_ECPRI_TP0_PC_ID */
203 ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_UDP_ECPRI_TP0, 12,
204 ICE_FLOW_FLD_SZ_ECPRI_TP0_PC_ID),
207 /* Bitmaps indicating relevant packet types for a particular protocol header
209 * Packet types for packets with an Outer/First/Single MAC header
211 static const u32 ice_ptypes_mac_ofos[] = {
212 0xFDC00846, 0xBFBF7F7E, 0xF70001DF, 0xFEFDFDFB,
213 0x0000077E, 0x000003FF, 0x00000000, 0x00000000,
214 0x00400000, 0x03FFF000, 0xFFFFFFE0, 0x00100707,
215 0x00000000, 0x00000000, 0x00000000, 0x00000000,
216 0x00000000, 0x00000000, 0x00000000, 0x00000000,
217 0x00000000, 0x00000000, 0x00000000, 0x00000000,
218 0x00000000, 0x00000000, 0x00000000, 0x00000000,
219 0x00000000, 0x00000000, 0x00000000, 0x00000000,
222 /* Packet types for packets with an Innermost/Last MAC VLAN header */
223 static const u32 ice_ptypes_macvlan_il[] = {
224 0x00000000, 0xBC000000, 0x000001DF, 0xF0000000,
225 0x0000077E, 0x00000000, 0x00000000, 0x00000000,
226 0x00000000, 0x00000000, 0x00000000, 0x00000000,
227 0x00000000, 0x00000000, 0x00000000, 0x00000000,
228 0x00000000, 0x00000000, 0x00000000, 0x00000000,
229 0x00000000, 0x00000000, 0x00000000, 0x00000000,
230 0x00000000, 0x00000000, 0x00000000, 0x00000000,
231 0x00000000, 0x00000000, 0x00000000, 0x00000000,
234 /* Packet types for packets with an Outer/First/Single IPv4 header, does NOT
235 * include IPV4 other PTYPEs
237 static const u32 ice_ptypes_ipv4_ofos[] = {
238 0x1DC00000, 0x24000800, 0x00000000, 0x00000000,
239 0x00000000, 0x00000155, 0x00000000, 0x00000000,
240 0x00000000, 0x000FC000, 0x000002A0, 0x00100000,
241 0x00000000, 0x00000000, 0x00000000, 0x00000000,
242 0x00000000, 0x00000000, 0x00000000, 0x00000000,
243 0x00000000, 0x00000000, 0x00000000, 0x00000000,
244 0x00000000, 0x00000000, 0x00000000, 0x00000000,
245 0x00000000, 0x00000000, 0x00000000, 0x00000000,
248 /* Packet types for packets with an Outer/First/Single IPv4 header, includes
251 static const u32 ice_ptypes_ipv4_ofos_all[] = {
252 0x1DC00000, 0x24000800, 0x00000000, 0x00000000,
253 0x00000000, 0x00000155, 0x00000000, 0x00000000,
254 0x00000000, 0x000FC000, 0x83E0FAA0, 0x00000101,
255 0x00000000, 0x00000000, 0x00000000, 0x00000000,
256 0x00000000, 0x00000000, 0x00000000, 0x00000000,
257 0x00000000, 0x00000000, 0x00000000, 0x00000000,
258 0x00000000, 0x00000000, 0x00000000, 0x00000000,
259 0x00000000, 0x00000000, 0x00000000, 0x00000000,
262 /* Packet types for packets with an Innermost/Last IPv4 header */
263 static const u32 ice_ptypes_ipv4_il[] = {
264 0xE0000000, 0xB807700E, 0x80000003, 0xE01DC03B,
265 0x0000000E, 0x00000000, 0x00000000, 0x00000000,
266 0x00000000, 0x00000000, 0x001FF800, 0x00100000,
267 0x00000000, 0x00000000, 0x00000000, 0x00000000,
268 0x00000000, 0x00000000, 0x00000000, 0x00000000,
269 0x00000000, 0x00000000, 0x00000000, 0x00000000,
270 0x00000000, 0x00000000, 0x00000000, 0x00000000,
271 0x00000000, 0x00000000, 0x00000000, 0x00000000,
274 /* Packet types for packets with an Outer/First/Single IPv6 header, does NOT
275 * include IVP6 other PTYPEs
277 static const u32 ice_ptypes_ipv6_ofos[] = {
278 0x00000000, 0x00000000, 0x77000000, 0x10002000,
279 0x00000000, 0x000002AA, 0x00000000, 0x00000000,
280 0x00000000, 0x03F00000, 0x00000540, 0x00000000,
281 0x00000000, 0x00000000, 0x00000000, 0x00000000,
282 0x00000000, 0x00000000, 0x00000000, 0x00000000,
283 0x00000000, 0x00000000, 0x00000000, 0x00000000,
284 0x00000000, 0x00000000, 0x00000000, 0x00000000,
285 0x00000000, 0x00000000, 0x00000000, 0x00000000,
288 /* Packet types for packets with an Outer/First/Single IPv6 header, includes
291 static const u32 ice_ptypes_ipv6_ofos_all[] = {
292 0x00000000, 0x00000000, 0x77000000, 0x10002000,
293 0x00000000, 0x000002AA, 0x00000000, 0x00000000,
294 0x00000000, 0x03F00000, 0x7C1F0540, 0x00000206,
295 0x00000000, 0x00000000, 0x00000000, 0x00000000,
296 0x00000000, 0x00000000, 0x00000000, 0x00000000,
297 0x00000000, 0x00000000, 0x00000000, 0x00000000,
298 0x00000000, 0x00000000, 0x00000000, 0x00000000,
299 0x00000000, 0x00000000, 0x00000000, 0x00000000,
302 /* Packet types for packets with an Innermost/Last IPv6 header */
303 static const u32 ice_ptypes_ipv6_il[] = {
304 0x00000000, 0x03B80770, 0x000001DC, 0x0EE00000,
305 0x00000770, 0x00000000, 0x00000000, 0x00000000,
306 0x00000000, 0x00000000, 0x7FE00000, 0x00000000,
307 0x00000000, 0x00000000, 0x00000000, 0x00000000,
308 0x00000000, 0x00000000, 0x00000000, 0x00000000,
309 0x00000000, 0x00000000, 0x00000000, 0x00000000,
310 0x00000000, 0x00000000, 0x00000000, 0x00000000,
311 0x00000000, 0x00000000, 0x00000000, 0x00000000,
314 /* Packet types for packets with an Outer/First/Single IPv4 header - no L4 */
315 static const u32 ice_ptypes_ipv4_ofos_no_l4[] = {
316 0x10C00000, 0x04000800, 0x00000000, 0x00000000,
317 0x00000000, 0x00000000, 0x00000000, 0x00000000,
318 0x00000000, 0x000cc000, 0x000002A0, 0x00000000,
319 0x00000000, 0x00000000, 0x00000000, 0x00000000,
320 0x00000000, 0x00000000, 0x00000000, 0x00000000,
321 0x00000000, 0x00000000, 0x00000000, 0x00000000,
322 0x00000000, 0x00000000, 0x00000000, 0x00000000,
323 0x00000000, 0x00000000, 0x00000000, 0x00000000,
326 /* Packet types for packets with an Innermost/Last IPv4 header - no L4 */
327 static const u32 ice_ptypes_ipv4_il_no_l4[] = {
328 0x60000000, 0x18043008, 0x80000002, 0x6010c021,
329 0x00000008, 0x00000000, 0x00000000, 0x00000000,
330 0x00000000, 0x00000000, 0x00139800, 0x00000000,
331 0x00000000, 0x00000000, 0x00000000, 0x00000000,
332 0x00000000, 0x00000000, 0x00000000, 0x00000000,
333 0x00000000, 0x00000000, 0x00000000, 0x00000000,
334 0x00000000, 0x00000000, 0x00000000, 0x00000000,
335 0x00000000, 0x00000000, 0x00000000, 0x00000000,
338 /* Packet types for packets with an Outer/First/Single IPv6 header - no L4 */
339 static const u32 ice_ptypes_ipv6_ofos_no_l4[] = {
340 0x00000000, 0x00000000, 0x43000000, 0x10002000,
341 0x00000000, 0x00000000, 0x00000000, 0x00000000,
342 0x00000000, 0x02300000, 0x00000540, 0x00000000,
343 0x00000000, 0x00000000, 0x00000000, 0x00000000,
344 0x00000000, 0x00000000, 0x00000000, 0x00000000,
345 0x00000000, 0x00000000, 0x00000000, 0x00000000,
346 0x00000000, 0x00000000, 0x00000000, 0x00000000,
347 0x00000000, 0x00000000, 0x00000000, 0x00000000,
350 /* Packet types for packets with an Innermost/Last IPv6 header - no L4 */
351 static const u32 ice_ptypes_ipv6_il_no_l4[] = {
352 0x00000000, 0x02180430, 0x0000010c, 0x086010c0,
353 0x00000430, 0x00000000, 0x00000000, 0x00000000,
354 0x00000000, 0x00000000, 0x4e600000, 0x00000000,
355 0x00000000, 0x00000000, 0x00000000, 0x00000000,
356 0x00000000, 0x00000000, 0x00000000, 0x00000000,
357 0x00000000, 0x00000000, 0x00000000, 0x00000000,
358 0x00000000, 0x00000000, 0x00000000, 0x00000000,
359 0x00000000, 0x00000000, 0x00000000, 0x00000000,
362 /* Packet types for packets with an Outermost/First ARP header */
363 static const u32 ice_ptypes_arp_of[] = {
364 0x00000800, 0x00000000, 0x00000000, 0x00000000,
365 0x00000000, 0x00000000, 0x00000000, 0x00000000,
366 0x00000000, 0x00000000, 0x00000000, 0x00000000,
367 0x00000000, 0x00000000, 0x00000000, 0x00000000,
368 0x00000000, 0x00000000, 0x00000000, 0x00000000,
369 0x00000000, 0x00000000, 0x00000000, 0x00000000,
370 0x00000000, 0x00000000, 0x00000000, 0x00000000,
371 0x00000000, 0x00000000, 0x00000000, 0x00000000,
374 /* UDP Packet types for non-tunneled packets or tunneled
375 * packets with inner UDP.
377 static const u32 ice_ptypes_udp_il[] = {
378 0x81000000, 0x20204040, 0x04000010, 0x80810102,
379 0x00000040, 0x00000000, 0x00000000, 0x00000000,
380 0x00000000, 0x00410000, 0x908427E0, 0x00100007,
381 0x00000000, 0x00000000, 0x00000000, 0x00000000,
382 0x00000000, 0x00000000, 0x00000000, 0x00000000,
383 0x00000000, 0x00000000, 0x00000000, 0x00000000,
384 0x00000000, 0x00000000, 0x00000000, 0x00000000,
385 0x00000000, 0x00000000, 0x00000000, 0x00000000,
388 /* Packet types for packets with an Innermost/Last TCP header */
389 static const u32 ice_ptypes_tcp_il[] = {
390 0x04000000, 0x80810102, 0x10000040, 0x02040408,
391 0x00000102, 0x00000000, 0x00000000, 0x00000000,
392 0x00000000, 0x00820000, 0x21084000, 0x00000000,
393 0x00000000, 0x00000000, 0x00000000, 0x00000000,
394 0x00000000, 0x00000000, 0x00000000, 0x00000000,
395 0x00000000, 0x00000000, 0x00000000, 0x00000000,
396 0x00000000, 0x00000000, 0x00000000, 0x00000000,
397 0x00000000, 0x00000000, 0x00000000, 0x00000000,
400 /* Packet types for packets with an Innermost/Last SCTP header */
401 static const u32 ice_ptypes_sctp_il[] = {
402 0x08000000, 0x01020204, 0x20000081, 0x04080810,
403 0x00000204, 0x00000000, 0x00000000, 0x00000000,
404 0x00000000, 0x01040000, 0x00000000, 0x00000000,
405 0x00000000, 0x00000000, 0x00000000, 0x00000000,
406 0x00000000, 0x00000000, 0x00000000, 0x00000000,
407 0x00000000, 0x00000000, 0x00000000, 0x00000000,
408 0x00000000, 0x00000000, 0x00000000, 0x00000000,
409 0x00000000, 0x00000000, 0x00000000, 0x00000000,
412 /* Packet types for packets with an Outermost/First ICMP header */
413 static const u32 ice_ptypes_icmp_of[] = {
414 0x10000000, 0x00000000, 0x00000000, 0x00000000,
415 0x00000000, 0x00000000, 0x00000000, 0x00000000,
416 0x00000000, 0x00000000, 0x00000000, 0x00000000,
417 0x00000000, 0x00000000, 0x00000000, 0x00000000,
418 0x00000000, 0x00000000, 0x00000000, 0x00000000,
419 0x00000000, 0x00000000, 0x00000000, 0x00000000,
420 0x00000000, 0x00000000, 0x00000000, 0x00000000,
421 0x00000000, 0x00000000, 0x00000000, 0x00000000,
424 /* Packet types for packets with an Innermost/Last ICMP header */
425 static const u32 ice_ptypes_icmp_il[] = {
426 0x00000000, 0x02040408, 0x40000102, 0x08101020,
427 0x00000408, 0x00000000, 0x00000000, 0x00000000,
428 0x00000000, 0x00000000, 0x42108000, 0x00000000,
429 0x00000000, 0x00000000, 0x00000000, 0x00000000,
430 0x00000000, 0x00000000, 0x00000000, 0x00000000,
431 0x00000000, 0x00000000, 0x00000000, 0x00000000,
432 0x00000000, 0x00000000, 0x00000000, 0x00000000,
433 0x00000000, 0x00000000, 0x00000000, 0x00000000,
436 /* Packet types for packets with an Outermost/First GRE header */
437 static const u32 ice_ptypes_gre_of[] = {
438 0x00000000, 0xBFBF7800, 0x000001DF, 0xFEFDE000,
439 0x0000017E, 0x00000000, 0x00000000, 0x00000000,
440 0x00000000, 0x00000000, 0x00000000, 0x00000000,
441 0x00000000, 0x00000000, 0x00000000, 0x00000000,
442 0x00000000, 0x00000000, 0x00000000, 0x00000000,
443 0x00000000, 0x00000000, 0x00000000, 0x00000000,
444 0x00000000, 0x00000000, 0x00000000, 0x00000000,
445 0x00000000, 0x00000000, 0x00000000, 0x00000000,
448 /* Packet types for packets with an Innermost/Last MAC header */
449 static const u32 ice_ptypes_mac_il[] = {
450 0x00000000, 0x20000000, 0x00000000, 0x00000000,
451 0x00000000, 0x00000000, 0x00000000, 0x00000000,
452 0x00000000, 0x00000000, 0x00000000, 0x00000000,
453 0x00000000, 0x00000000, 0x00000000, 0x00000000,
454 0x00000000, 0x00000000, 0x00000000, 0x00000000,
455 0x00000000, 0x00000000, 0x00000000, 0x00000000,
456 0x00000000, 0x00000000, 0x00000000, 0x00000000,
457 0x00000000, 0x00000000, 0x00000000, 0x00000000,
460 /* Packet types for GTPC */
461 static const u32 ice_ptypes_gtpc[] = {
462 0x00000000, 0x00000000, 0x00000000, 0x00000000,
463 0x00000000, 0x00000000, 0x00000000, 0x00000000,
464 0x00000000, 0x00000000, 0x000001E0, 0x00000000,
465 0x00000000, 0x00000000, 0x00000000, 0x00000000,
466 0x00000000, 0x00000000, 0x00000000, 0x00000000,
467 0x00000000, 0x00000000, 0x00000000, 0x00000000,
468 0x00000000, 0x00000000, 0x00000000, 0x00000000,
469 0x00000000, 0x00000000, 0x00000000, 0x00000000,
472 /* Packet types for VXLAN with VNI */
473 static const u32 ice_ptypes_vxlan_vni[] = {
474 0x00000000, 0xBFBFF800, 0x00EFDFDF, 0xFEFDE000,
475 0x03BF7F7E, 0x00000000, 0x00000000, 0x00000000,
476 0x00000000, 0x00000000, 0x00000000, 0x00000000,
477 0x00000000, 0x00000000, 0x00000000, 0x00000000,
478 0x00000000, 0x00000000, 0x00000000, 0x00000000,
479 0x00000000, 0x00000000, 0x00000000, 0x00000000,
480 0x00000000, 0x00000000, 0x00000000, 0x00000000,
481 0x00000000, 0x00000000, 0x00000000, 0x00000000,
484 /* Packet types for GTPC with TEID */
485 static const u32 ice_ptypes_gtpc_tid[] = {
486 0x00000000, 0x00000000, 0x00000000, 0x00000000,
487 0x00000000, 0x00000000, 0x00000000, 0x00000000,
488 0x00000000, 0x00000000, 0x00000060, 0x00000000,
489 0x00000000, 0x00000000, 0x00000000, 0x00000000,
490 0x00000000, 0x00000000, 0x00000000, 0x00000000,
491 0x00000000, 0x00000000, 0x00000000, 0x00000000,
492 0x00000000, 0x00000000, 0x00000000, 0x00000000,
493 0x00000000, 0x00000000, 0x00000000, 0x00000000,
496 /* Packet types for GTPU */
497 static const struct ice_ptype_attributes ice_attr_gtpu_session[] = {
498 { ICE_MAC_IPV4_GTPU_IPV4_FRAG, ICE_PTYPE_ATTR_GTP_SESSION },
499 { ICE_MAC_IPV4_GTPU_IPV4_PAY, ICE_PTYPE_ATTR_GTP_SESSION },
500 { ICE_MAC_IPV4_GTPU_IPV4_UDP_PAY, ICE_PTYPE_ATTR_GTP_SESSION },
501 { ICE_MAC_IPV4_GTPU_IPV4_TCP, ICE_PTYPE_ATTR_GTP_SESSION },
502 { ICE_MAC_IPV4_GTPU_IPV4_ICMP, ICE_PTYPE_ATTR_GTP_SESSION },
503 { ICE_MAC_IPV6_GTPU_IPV4_FRAG, ICE_PTYPE_ATTR_GTP_SESSION },
504 { ICE_MAC_IPV6_GTPU_IPV4_PAY, ICE_PTYPE_ATTR_GTP_SESSION },
505 { ICE_MAC_IPV6_GTPU_IPV4_UDP_PAY, ICE_PTYPE_ATTR_GTP_SESSION },
506 { ICE_MAC_IPV6_GTPU_IPV4_TCP, ICE_PTYPE_ATTR_GTP_SESSION },
507 { ICE_MAC_IPV6_GTPU_IPV4_ICMP, ICE_PTYPE_ATTR_GTP_SESSION },
508 { ICE_MAC_IPV4_GTPU_IPV6_FRAG, ICE_PTYPE_ATTR_GTP_SESSION },
509 { ICE_MAC_IPV4_GTPU_IPV6_PAY, ICE_PTYPE_ATTR_GTP_SESSION },
510 { ICE_MAC_IPV4_GTPU_IPV6_UDP_PAY, ICE_PTYPE_ATTR_GTP_SESSION },
511 { ICE_MAC_IPV4_GTPU_IPV6_TCP, ICE_PTYPE_ATTR_GTP_SESSION },
512 { ICE_MAC_IPV4_GTPU_IPV6_ICMPV6, ICE_PTYPE_ATTR_GTP_SESSION },
513 { ICE_MAC_IPV6_GTPU_IPV6_FRAG, ICE_PTYPE_ATTR_GTP_SESSION },
514 { ICE_MAC_IPV6_GTPU_IPV6_PAY, ICE_PTYPE_ATTR_GTP_SESSION },
515 { ICE_MAC_IPV6_GTPU_IPV6_UDP_PAY, ICE_PTYPE_ATTR_GTP_SESSION },
516 { ICE_MAC_IPV6_GTPU_IPV6_TCP, ICE_PTYPE_ATTR_GTP_SESSION },
517 { ICE_MAC_IPV6_GTPU_IPV6_ICMPV6, ICE_PTYPE_ATTR_GTP_SESSION },
520 static const struct ice_ptype_attributes ice_attr_gtpu_eh[] = {
521 { ICE_MAC_IPV4_GTPU_IPV4_FRAG, ICE_PTYPE_ATTR_GTP_PDU_EH },
522 { ICE_MAC_IPV4_GTPU_IPV4_PAY, ICE_PTYPE_ATTR_GTP_PDU_EH },
523 { ICE_MAC_IPV4_GTPU_IPV4_UDP_PAY, ICE_PTYPE_ATTR_GTP_PDU_EH },
524 { ICE_MAC_IPV4_GTPU_IPV4_TCP, ICE_PTYPE_ATTR_GTP_PDU_EH },
525 { ICE_MAC_IPV4_GTPU_IPV4_ICMP, ICE_PTYPE_ATTR_GTP_PDU_EH },
526 { ICE_MAC_IPV6_GTPU_IPV4_FRAG, ICE_PTYPE_ATTR_GTP_PDU_EH },
527 { ICE_MAC_IPV6_GTPU_IPV4_PAY, ICE_PTYPE_ATTR_GTP_PDU_EH },
528 { ICE_MAC_IPV6_GTPU_IPV4_UDP_PAY, ICE_PTYPE_ATTR_GTP_PDU_EH },
529 { ICE_MAC_IPV6_GTPU_IPV4_TCP, ICE_PTYPE_ATTR_GTP_PDU_EH },
530 { ICE_MAC_IPV6_GTPU_IPV4_ICMP, ICE_PTYPE_ATTR_GTP_PDU_EH },
531 { ICE_MAC_IPV4_GTPU_IPV6_FRAG, ICE_PTYPE_ATTR_GTP_PDU_EH },
532 { ICE_MAC_IPV4_GTPU_IPV6_PAY, ICE_PTYPE_ATTR_GTP_PDU_EH },
533 { ICE_MAC_IPV4_GTPU_IPV6_UDP_PAY, ICE_PTYPE_ATTR_GTP_PDU_EH },
534 { ICE_MAC_IPV4_GTPU_IPV6_TCP, ICE_PTYPE_ATTR_GTP_PDU_EH },
535 { ICE_MAC_IPV4_GTPU_IPV6_ICMPV6, ICE_PTYPE_ATTR_GTP_PDU_EH },
536 { ICE_MAC_IPV6_GTPU_IPV6_FRAG, ICE_PTYPE_ATTR_GTP_PDU_EH },
537 { ICE_MAC_IPV6_GTPU_IPV6_PAY, ICE_PTYPE_ATTR_GTP_PDU_EH },
538 { ICE_MAC_IPV6_GTPU_IPV6_UDP_PAY, ICE_PTYPE_ATTR_GTP_PDU_EH },
539 { ICE_MAC_IPV6_GTPU_IPV6_TCP, ICE_PTYPE_ATTR_GTP_PDU_EH },
540 { ICE_MAC_IPV6_GTPU_IPV6_ICMPV6, ICE_PTYPE_ATTR_GTP_PDU_EH },
543 static const struct ice_ptype_attributes ice_attr_gtpu_down[] = {
544 { ICE_MAC_IPV4_GTPU_IPV4_FRAG, ICE_PTYPE_ATTR_GTP_DOWNLINK },
545 { ICE_MAC_IPV4_GTPU_IPV4_PAY, ICE_PTYPE_ATTR_GTP_DOWNLINK },
546 { ICE_MAC_IPV4_GTPU_IPV4_UDP_PAY, ICE_PTYPE_ATTR_GTP_DOWNLINK },
547 { ICE_MAC_IPV4_GTPU_IPV4_TCP, ICE_PTYPE_ATTR_GTP_DOWNLINK },
548 { ICE_MAC_IPV4_GTPU_IPV4_ICMP, ICE_PTYPE_ATTR_GTP_DOWNLINK },
549 { ICE_MAC_IPV6_GTPU_IPV4_FRAG, ICE_PTYPE_ATTR_GTP_DOWNLINK },
550 { ICE_MAC_IPV6_GTPU_IPV4_PAY, ICE_PTYPE_ATTR_GTP_DOWNLINK },
551 { ICE_MAC_IPV6_GTPU_IPV4_UDP_PAY, ICE_PTYPE_ATTR_GTP_DOWNLINK },
552 { ICE_MAC_IPV6_GTPU_IPV4_TCP, ICE_PTYPE_ATTR_GTP_DOWNLINK },
553 { ICE_MAC_IPV6_GTPU_IPV4_ICMP, ICE_PTYPE_ATTR_GTP_DOWNLINK },
554 { ICE_MAC_IPV4_GTPU_IPV6_FRAG, ICE_PTYPE_ATTR_GTP_DOWNLINK },
555 { ICE_MAC_IPV4_GTPU_IPV6_PAY, ICE_PTYPE_ATTR_GTP_DOWNLINK },
556 { ICE_MAC_IPV4_GTPU_IPV6_UDP_PAY, ICE_PTYPE_ATTR_GTP_DOWNLINK },
557 { ICE_MAC_IPV4_GTPU_IPV6_TCP, ICE_PTYPE_ATTR_GTP_DOWNLINK },
558 { ICE_MAC_IPV4_GTPU_IPV6_ICMPV6, ICE_PTYPE_ATTR_GTP_DOWNLINK },
559 { ICE_MAC_IPV6_GTPU_IPV6_FRAG, ICE_PTYPE_ATTR_GTP_DOWNLINK },
560 { ICE_MAC_IPV6_GTPU_IPV6_PAY, ICE_PTYPE_ATTR_GTP_DOWNLINK },
561 { ICE_MAC_IPV6_GTPU_IPV6_UDP_PAY, ICE_PTYPE_ATTR_GTP_DOWNLINK },
562 { ICE_MAC_IPV6_GTPU_IPV6_TCP, ICE_PTYPE_ATTR_GTP_DOWNLINK },
563 { ICE_MAC_IPV6_GTPU_IPV6_ICMPV6, ICE_PTYPE_ATTR_GTP_DOWNLINK },
566 static const struct ice_ptype_attributes ice_attr_gtpu_up[] = {
567 { ICE_MAC_IPV4_GTPU_IPV4_FRAG, ICE_PTYPE_ATTR_GTP_UPLINK },
568 { ICE_MAC_IPV4_GTPU_IPV4_PAY, ICE_PTYPE_ATTR_GTP_UPLINK },
569 { ICE_MAC_IPV4_GTPU_IPV4_UDP_PAY, ICE_PTYPE_ATTR_GTP_UPLINK },
570 { ICE_MAC_IPV4_GTPU_IPV4_TCP, ICE_PTYPE_ATTR_GTP_UPLINK },
571 { ICE_MAC_IPV4_GTPU_IPV4_ICMP, ICE_PTYPE_ATTR_GTP_UPLINK },
572 { ICE_MAC_IPV6_GTPU_IPV4_FRAG, ICE_PTYPE_ATTR_GTP_UPLINK },
573 { ICE_MAC_IPV6_GTPU_IPV4_PAY, ICE_PTYPE_ATTR_GTP_UPLINK },
574 { ICE_MAC_IPV6_GTPU_IPV4_UDP_PAY, ICE_PTYPE_ATTR_GTP_UPLINK },
575 { ICE_MAC_IPV6_GTPU_IPV4_TCP, ICE_PTYPE_ATTR_GTP_UPLINK },
576 { ICE_MAC_IPV6_GTPU_IPV4_ICMP, ICE_PTYPE_ATTR_GTP_UPLINK },
577 { ICE_MAC_IPV4_GTPU_IPV6_FRAG, ICE_PTYPE_ATTR_GTP_UPLINK },
578 { ICE_MAC_IPV4_GTPU_IPV6_PAY, ICE_PTYPE_ATTR_GTP_UPLINK },
579 { ICE_MAC_IPV4_GTPU_IPV6_UDP_PAY, ICE_PTYPE_ATTR_GTP_UPLINK },
580 { ICE_MAC_IPV4_GTPU_IPV6_TCP, ICE_PTYPE_ATTR_GTP_UPLINK },
581 { ICE_MAC_IPV4_GTPU_IPV6_ICMPV6, ICE_PTYPE_ATTR_GTP_UPLINK },
582 { ICE_MAC_IPV6_GTPU_IPV6_FRAG, ICE_PTYPE_ATTR_GTP_UPLINK },
583 { ICE_MAC_IPV6_GTPU_IPV6_PAY, ICE_PTYPE_ATTR_GTP_UPLINK },
584 { ICE_MAC_IPV6_GTPU_IPV6_UDP_PAY, ICE_PTYPE_ATTR_GTP_UPLINK },
585 { ICE_MAC_IPV6_GTPU_IPV6_TCP, ICE_PTYPE_ATTR_GTP_UPLINK },
586 { ICE_MAC_IPV6_GTPU_IPV6_ICMPV6, ICE_PTYPE_ATTR_GTP_UPLINK },
589 static const u32 ice_ptypes_gtpu[] = {
590 0x00000000, 0x00000000, 0x00000000, 0x00000000,
591 0x00000000, 0x00000000, 0x00000000, 0x00000000,
592 0x00000000, 0x00000000, 0x7FFFFE00, 0x00000000,
593 0x00000000, 0x00000000, 0x00000000, 0x00000000,
594 0x00000000, 0x00000000, 0x00000000, 0x00000000,
595 0x00000000, 0x00000000, 0x00000000, 0x00000000,
596 0x00000000, 0x00000000, 0x00000000, 0x00000000,
597 0x00000000, 0x00000000, 0x00000000, 0x00000000,
600 /* Packet types for pppoe */
601 static const u32 ice_ptypes_pppoe[] = {
602 0x00000000, 0x00000000, 0x00000000, 0x00000000,
603 0x00000000, 0x00000000, 0x00000000, 0x00000000,
604 0x00000000, 0x03ffe000, 0x00000000, 0x00000000,
605 0x00000000, 0x00000000, 0x00000000, 0x00000000,
606 0x00000000, 0x00000000, 0x00000000, 0x00000000,
607 0x00000000, 0x00000000, 0x00000000, 0x00000000,
608 0x00000000, 0x00000000, 0x00000000, 0x00000000,
609 0x00000000, 0x00000000, 0x00000000, 0x00000000,
612 /* Packet types for packets with PFCP NODE header */
613 static const u32 ice_ptypes_pfcp_node[] = {
614 0x00000000, 0x00000000, 0x00000000, 0x00000000,
615 0x00000000, 0x00000000, 0x00000000, 0x00000000,
616 0x00000000, 0x00000000, 0x80000000, 0x00000002,
617 0x00000000, 0x00000000, 0x00000000, 0x00000000,
618 0x00000000, 0x00000000, 0x00000000, 0x00000000,
619 0x00000000, 0x00000000, 0x00000000, 0x00000000,
620 0x00000000, 0x00000000, 0x00000000, 0x00000000,
621 0x00000000, 0x00000000, 0x00000000, 0x00000000,
624 /* Packet types for packets with PFCP SESSION header */
625 static const u32 ice_ptypes_pfcp_session[] = {
626 0x00000000, 0x00000000, 0x00000000, 0x00000000,
627 0x00000000, 0x00000000, 0x00000000, 0x00000000,
628 0x00000000, 0x00000000, 0x00000000, 0x00000005,
629 0x00000000, 0x00000000, 0x00000000, 0x00000000,
630 0x00000000, 0x00000000, 0x00000000, 0x00000000,
631 0x00000000, 0x00000000, 0x00000000, 0x00000000,
632 0x00000000, 0x00000000, 0x00000000, 0x00000000,
633 0x00000000, 0x00000000, 0x00000000, 0x00000000,
636 /* Packet types for l2tpv3 */
637 static const u32 ice_ptypes_l2tpv3[] = {
638 0x00000000, 0x00000000, 0x00000000, 0x00000000,
639 0x00000000, 0x00000000, 0x00000000, 0x00000000,
640 0x00000000, 0x00000000, 0x00000000, 0x00000300,
641 0x00000000, 0x00000000, 0x00000000, 0x00000000,
642 0x00000000, 0x00000000, 0x00000000, 0x00000000,
643 0x00000000, 0x00000000, 0x00000000, 0x00000000,
644 0x00000000, 0x00000000, 0x00000000, 0x00000000,
645 0x00000000, 0x00000000, 0x00000000, 0x00000000,
648 /* Packet types for esp */
649 static const u32 ice_ptypes_esp[] = {
650 0x00000000, 0x00000000, 0x00000000, 0x00000000,
651 0x00000000, 0x00000003, 0x00000000, 0x00000000,
652 0x00000000, 0x00000000, 0x00000000, 0x00000000,
653 0x00000000, 0x00000000, 0x00000000, 0x00000000,
654 0x00000000, 0x00000000, 0x00000000, 0x00000000,
655 0x00000000, 0x00000000, 0x00000000, 0x00000000,
656 0x00000000, 0x00000000, 0x00000000, 0x00000000,
657 0x00000000, 0x00000000, 0x00000000, 0x00000000,
660 /* Packet types for ah */
661 static const u32 ice_ptypes_ah[] = {
662 0x00000000, 0x00000000, 0x00000000, 0x00000000,
663 0x00000000, 0x0000000C, 0x00000000, 0x00000000,
664 0x00000000, 0x00000000, 0x00000000, 0x00000000,
665 0x00000000, 0x00000000, 0x00000000, 0x00000000,
666 0x00000000, 0x00000000, 0x00000000, 0x00000000,
667 0x00000000, 0x00000000, 0x00000000, 0x00000000,
668 0x00000000, 0x00000000, 0x00000000, 0x00000000,
669 0x00000000, 0x00000000, 0x00000000, 0x00000000,
672 /* Packet types for packets with NAT_T ESP header */
673 static const u32 ice_ptypes_nat_t_esp[] = {
674 0x00000000, 0x00000000, 0x00000000, 0x00000000,
675 0x00000000, 0x00000030, 0x00000000, 0x00000000,
676 0x00000000, 0x00000000, 0x00000000, 0x00000000,
677 0x00000000, 0x00000000, 0x00000000, 0x00000000,
678 0x00000000, 0x00000000, 0x00000000, 0x00000000,
679 0x00000000, 0x00000000, 0x00000000, 0x00000000,
680 0x00000000, 0x00000000, 0x00000000, 0x00000000,
681 0x00000000, 0x00000000, 0x00000000, 0x00000000,
684 static const u32 ice_ptypes_mac_non_ip_ofos[] = {
685 0x00000846, 0x00000000, 0x00000000, 0x00000000,
686 0x00000000, 0x00000000, 0x00000000, 0x00000000,
687 0x00400000, 0x03FFF000, 0x00000000, 0x00000000,
688 0x00000000, 0x00000000, 0x00000000, 0x00000000,
689 0x00000000, 0x00000000, 0x00000000, 0x00000000,
690 0x00000000, 0x00000000, 0x00000000, 0x00000000,
691 0x00000000, 0x00000000, 0x00000000, 0x00000000,
692 0x00000000, 0x00000000, 0x00000000, 0x00000000,
695 static const u32 ice_ptypes_gtpu_no_ip[] = {
696 0x00000000, 0x00000000, 0x00000000, 0x00000000,
697 0x00000000, 0x00000000, 0x00000000, 0x00000000,
698 0x00000000, 0x00000000, 0x00000600, 0x00000000,
699 0x00000000, 0x00000000, 0x00000000, 0x00000000,
700 0x00000000, 0x00000000, 0x00000000, 0x00000000,
701 0x00000000, 0x00000000, 0x00000000, 0x00000000,
702 0x00000000, 0x00000000, 0x00000000, 0x00000000,
703 0x00000000, 0x00000000, 0x00000000, 0x00000000,
706 static const u32 ice_ptypes_ecpri_tp0[] = {
707 0x00000000, 0x00000000, 0x00000000, 0x00000000,
708 0x00000000, 0x00000000, 0x00000000, 0x00000000,
709 0x00000000, 0x00000000, 0x00000000, 0x00000400,
710 0x00000000, 0x00000000, 0x00000000, 0x00000000,
711 0x00000000, 0x00000000, 0x00000000, 0x00000000,
712 0x00000000, 0x00000000, 0x00000000, 0x00000000,
713 0x00000000, 0x00000000, 0x00000000, 0x00000000,
714 0x00000000, 0x00000000, 0x00000000, 0x00000000,
717 static const u32 ice_ptypes_udp_ecpri_tp0[] = {
718 0x00000000, 0x00000000, 0x00000000, 0x00000000,
719 0x00000000, 0x00000000, 0x00000000, 0x00000000,
720 0x00000000, 0x00000000, 0x00000000, 0x00100000,
721 0x00000000, 0x00000000, 0x00000000, 0x00000000,
722 0x00000000, 0x00000000, 0x00000000, 0x00000000,
723 0x00000000, 0x00000000, 0x00000000, 0x00000000,
724 0x00000000, 0x00000000, 0x00000000, 0x00000000,
725 0x00000000, 0x00000000, 0x00000000, 0x00000000,
728 /* Manage parameters and info. used during the creation of a flow profile */
729 struct ice_flow_prof_params {
731 u16 entry_length; /* # of bytes formatted entry will require */
733 struct ice_flow_prof *prof;
735 /* For ACL, the es[0] will have the data of ICE_RX_MDID_PKT_FLAGS_15_0
736 * This will give us the direction flags.
738 struct ice_fv_word es[ICE_MAX_FV_WORDS];
739 /* attributes can be used to add attributes to a particular PTYPE */
740 const struct ice_ptype_attributes *attr;
743 u16 mask[ICE_MAX_FV_WORDS];
744 ice_declare_bitmap(ptypes, ICE_FLOW_PTYPE_MAX);
747 #define ICE_FLOW_RSS_HDRS_INNER_MASK \
748 (ICE_FLOW_SEG_HDR_PPPOE | ICE_FLOW_SEG_HDR_GTPC | \
749 ICE_FLOW_SEG_HDR_GTPC_TEID | ICE_FLOW_SEG_HDR_GTPU | \
750 ICE_FLOW_SEG_HDR_PFCP_SESSION | ICE_FLOW_SEG_HDR_L2TPV3 | \
751 ICE_FLOW_SEG_HDR_ESP | ICE_FLOW_SEG_HDR_AH | \
752 ICE_FLOW_SEG_HDR_NAT_T_ESP | ICE_FLOW_SEG_HDR_GTPU_NON_IP | \
753 ICE_FLOW_SEG_HDR_ECPRI_TP0 | ICE_FLOW_SEG_HDR_UDP_ECPRI_TP0)
755 #define ICE_FLOW_SEG_HDRS_L2_MASK \
756 (ICE_FLOW_SEG_HDR_ETH | ICE_FLOW_SEG_HDR_VLAN)
757 #define ICE_FLOW_SEG_HDRS_L3_MASK \
758 (ICE_FLOW_SEG_HDR_IPV4 | ICE_FLOW_SEG_HDR_IPV6 | \
759 ICE_FLOW_SEG_HDR_ARP)
760 #define ICE_FLOW_SEG_HDRS_L4_MASK \
761 (ICE_FLOW_SEG_HDR_ICMP | ICE_FLOW_SEG_HDR_TCP | ICE_FLOW_SEG_HDR_UDP | \
762 ICE_FLOW_SEG_HDR_SCTP)
763 /* mask for L4 protocols that are NOT part of IPV4/6 OTHER PTYPE groups */
764 #define ICE_FLOW_SEG_HDRS_L4_MASK_NO_OTHER \
765 (ICE_FLOW_SEG_HDR_TCP | ICE_FLOW_SEG_HDR_UDP | ICE_FLOW_SEG_HDR_SCTP)
768 * ice_flow_val_hdrs - validates packet segments for valid protocol headers
769 * @segs: array of one or more packet segments that describe the flow
770 * @segs_cnt: number of packet segments provided
772 static enum ice_status
773 ice_flow_val_hdrs(struct ice_flow_seg_info *segs, u8 segs_cnt)
777 for (i = 0; i < segs_cnt; i++) {
778 /* Multiple L3 headers */
779 if (segs[i].hdrs & ICE_FLOW_SEG_HDRS_L3_MASK &&
780 !ice_is_pow2(segs[i].hdrs & ICE_FLOW_SEG_HDRS_L3_MASK))
781 return ICE_ERR_PARAM;
783 /* Multiple L4 headers */
784 if (segs[i].hdrs & ICE_FLOW_SEG_HDRS_L4_MASK &&
785 !ice_is_pow2(segs[i].hdrs & ICE_FLOW_SEG_HDRS_L4_MASK))
786 return ICE_ERR_PARAM;
792 /* Sizes of fixed known protocol headers without header options */
793 #define ICE_FLOW_PROT_HDR_SZ_MAC 14
794 #define ICE_FLOW_PROT_HDR_SZ_MAC_VLAN (ICE_FLOW_PROT_HDR_SZ_MAC + 2)
795 #define ICE_FLOW_PROT_HDR_SZ_IPV4 20
796 #define ICE_FLOW_PROT_HDR_SZ_IPV6 40
797 #define ICE_FLOW_PROT_HDR_SZ_ARP 28
798 #define ICE_FLOW_PROT_HDR_SZ_ICMP 8
799 #define ICE_FLOW_PROT_HDR_SZ_TCP 20
800 #define ICE_FLOW_PROT_HDR_SZ_UDP 8
801 #define ICE_FLOW_PROT_HDR_SZ_SCTP 12
804 * ice_flow_calc_seg_sz - calculates size of a packet segment based on headers
805 * @params: information about the flow to be processed
806 * @seg: index of packet segment whose header size is to be determined
808 static u16 ice_flow_calc_seg_sz(struct ice_flow_prof_params *params, u8 seg)
813 sz = (params->prof->segs[seg].hdrs & ICE_FLOW_SEG_HDR_VLAN) ?
814 ICE_FLOW_PROT_HDR_SZ_MAC_VLAN : ICE_FLOW_PROT_HDR_SZ_MAC;
817 if (params->prof->segs[seg].hdrs & ICE_FLOW_SEG_HDR_IPV4)
818 sz += ICE_FLOW_PROT_HDR_SZ_IPV4;
819 else if (params->prof->segs[seg].hdrs & ICE_FLOW_SEG_HDR_IPV6)
820 sz += ICE_FLOW_PROT_HDR_SZ_IPV6;
821 else if (params->prof->segs[seg].hdrs & ICE_FLOW_SEG_HDR_ARP)
822 sz += ICE_FLOW_PROT_HDR_SZ_ARP;
823 else if (params->prof->segs[seg].hdrs & ICE_FLOW_SEG_HDRS_L4_MASK)
824 /* A L3 header is required if L4 is specified */
828 if (params->prof->segs[seg].hdrs & ICE_FLOW_SEG_HDR_ICMP)
829 sz += ICE_FLOW_PROT_HDR_SZ_ICMP;
830 else if (params->prof->segs[seg].hdrs & ICE_FLOW_SEG_HDR_TCP)
831 sz += ICE_FLOW_PROT_HDR_SZ_TCP;
832 else if (params->prof->segs[seg].hdrs & ICE_FLOW_SEG_HDR_UDP)
833 sz += ICE_FLOW_PROT_HDR_SZ_UDP;
834 else if (params->prof->segs[seg].hdrs & ICE_FLOW_SEG_HDR_SCTP)
835 sz += ICE_FLOW_PROT_HDR_SZ_SCTP;
841 * ice_flow_proc_seg_hdrs - process protocol headers present in pkt segments
842 * @params: information about the flow to be processed
844 * This function identifies the packet types associated with the protocol
845 * headers being present in packet segments of the specified flow profile.
847 static enum ice_status
848 ice_flow_proc_seg_hdrs(struct ice_flow_prof_params *params)
850 struct ice_flow_prof *prof;
853 ice_memset(params->ptypes, 0xff, sizeof(params->ptypes),
858 for (i = 0; i < params->prof->segs_cnt; i++) {
859 const ice_bitmap_t *src;
862 hdrs = prof->segs[i].hdrs;
864 if (hdrs & ICE_FLOW_SEG_HDR_ETH) {
865 src = !i ? (const ice_bitmap_t *)ice_ptypes_mac_ofos :
866 (const ice_bitmap_t *)ice_ptypes_mac_il;
867 ice_and_bitmap(params->ptypes, params->ptypes, src,
871 if (i && hdrs & ICE_FLOW_SEG_HDR_VLAN) {
872 src = (const ice_bitmap_t *)ice_ptypes_macvlan_il;
873 ice_and_bitmap(params->ptypes, params->ptypes, src,
877 if (!i && hdrs & ICE_FLOW_SEG_HDR_ARP) {
878 ice_and_bitmap(params->ptypes, params->ptypes,
879 (const ice_bitmap_t *)ice_ptypes_arp_of,
883 if (hdrs & ICE_FLOW_SEG_HDR_ECPRI_TP0) {
884 src = (const ice_bitmap_t *)ice_ptypes_ecpri_tp0;
885 ice_and_bitmap(params->ptypes, params->ptypes, src,
888 if ((hdrs & ICE_FLOW_SEG_HDR_IPV4) &&
889 (hdrs & ICE_FLOW_SEG_HDR_IPV_OTHER)) {
891 (const ice_bitmap_t *)ice_ptypes_ipv4_il :
892 (const ice_bitmap_t *)ice_ptypes_ipv4_ofos_all;
893 ice_and_bitmap(params->ptypes, params->ptypes, src,
895 } else if ((hdrs & ICE_FLOW_SEG_HDR_IPV6) &&
896 (hdrs & ICE_FLOW_SEG_HDR_IPV_OTHER)) {
898 (const ice_bitmap_t *)ice_ptypes_ipv6_il :
899 (const ice_bitmap_t *)ice_ptypes_ipv6_ofos_all;
900 ice_and_bitmap(params->ptypes, params->ptypes, src,
902 } else if ((hdrs & ICE_FLOW_SEG_HDR_IPV4) &&
903 !(hdrs & ICE_FLOW_SEG_HDRS_L4_MASK_NO_OTHER)) {
904 src = !i ? (const ice_bitmap_t *)ice_ptypes_ipv4_ofos_no_l4 :
905 (const ice_bitmap_t *)ice_ptypes_ipv4_il_no_l4;
906 ice_and_bitmap(params->ptypes, params->ptypes, src,
908 } else if (hdrs & ICE_FLOW_SEG_HDR_IPV4) {
909 src = !i ? (const ice_bitmap_t *)ice_ptypes_ipv4_ofos :
910 (const ice_bitmap_t *)ice_ptypes_ipv4_il;
911 ice_and_bitmap(params->ptypes, params->ptypes, src,
913 } else if ((hdrs & ICE_FLOW_SEG_HDR_IPV6) &&
914 !(hdrs & ICE_FLOW_SEG_HDRS_L4_MASK_NO_OTHER)) {
915 src = !i ? (const ice_bitmap_t *)ice_ptypes_ipv6_ofos_no_l4 :
916 (const ice_bitmap_t *)ice_ptypes_ipv6_il_no_l4;
917 ice_and_bitmap(params->ptypes, params->ptypes, src,
919 } else if (hdrs & ICE_FLOW_SEG_HDR_IPV6) {
920 src = !i ? (const ice_bitmap_t *)ice_ptypes_ipv6_ofos :
921 (const ice_bitmap_t *)ice_ptypes_ipv6_il;
922 ice_and_bitmap(params->ptypes, params->ptypes, src,
926 if (hdrs & ICE_FLOW_SEG_HDR_ETH_NON_IP) {
927 src = (const ice_bitmap_t *)ice_ptypes_mac_non_ip_ofos;
928 ice_and_bitmap(params->ptypes, params->ptypes,
929 src, ICE_FLOW_PTYPE_MAX);
930 } else if (hdrs & ICE_FLOW_SEG_HDR_PPPOE) {
931 src = (const ice_bitmap_t *)ice_ptypes_pppoe;
932 ice_and_bitmap(params->ptypes, params->ptypes, src,
935 src = (const ice_bitmap_t *)ice_ptypes_pppoe;
936 ice_andnot_bitmap(params->ptypes, params->ptypes, src,
940 if (hdrs & ICE_FLOW_SEG_HDR_UDP) {
941 src = (const ice_bitmap_t *)ice_ptypes_udp_il;
942 ice_and_bitmap(params->ptypes, params->ptypes, src,
944 } else if (hdrs & ICE_FLOW_SEG_HDR_TCP) {
945 ice_and_bitmap(params->ptypes, params->ptypes,
946 (const ice_bitmap_t *)ice_ptypes_tcp_il,
948 } else if (hdrs & ICE_FLOW_SEG_HDR_SCTP) {
949 src = (const ice_bitmap_t *)ice_ptypes_sctp_il;
950 ice_and_bitmap(params->ptypes, params->ptypes, src,
954 if (hdrs & ICE_FLOW_SEG_HDR_ICMP) {
955 src = !i ? (const ice_bitmap_t *)ice_ptypes_icmp_of :
956 (const ice_bitmap_t *)ice_ptypes_icmp_il;
957 ice_and_bitmap(params->ptypes, params->ptypes, src,
959 } else if (hdrs & ICE_FLOW_SEG_HDR_GRE) {
961 src = (const ice_bitmap_t *)ice_ptypes_gre_of;
962 ice_and_bitmap(params->ptypes, params->ptypes,
963 src, ICE_FLOW_PTYPE_MAX);
965 } else if (hdrs & ICE_FLOW_SEG_HDR_GTPC) {
966 src = (const ice_bitmap_t *)ice_ptypes_gtpc;
967 ice_and_bitmap(params->ptypes, params->ptypes,
968 src, ICE_FLOW_PTYPE_MAX);
969 } else if (hdrs & ICE_FLOW_SEG_HDR_GTPC_TEID) {
970 src = (const ice_bitmap_t *)ice_ptypes_gtpc_tid;
971 ice_and_bitmap(params->ptypes, params->ptypes,
972 src, ICE_FLOW_PTYPE_MAX);
973 } else if (hdrs & ICE_FLOW_SEG_HDR_GTPU_NON_IP) {
974 src = (const ice_bitmap_t *)ice_ptypes_gtpu_no_ip;
975 ice_and_bitmap(params->ptypes, params->ptypes,
976 src, ICE_FLOW_PTYPE_MAX);
977 } else if (hdrs & ICE_FLOW_SEG_HDR_GTPU_DWN) {
978 src = (const ice_bitmap_t *)ice_ptypes_gtpu;
979 ice_and_bitmap(params->ptypes, params->ptypes,
980 src, ICE_FLOW_PTYPE_MAX);
982 /* Attributes for GTP packet with downlink */
983 params->attr = ice_attr_gtpu_down;
984 params->attr_cnt = ARRAY_SIZE(ice_attr_gtpu_down);
985 } else if (hdrs & ICE_FLOW_SEG_HDR_GTPU_UP) {
986 src = (const ice_bitmap_t *)ice_ptypes_gtpu;
987 ice_and_bitmap(params->ptypes, params->ptypes,
988 src, ICE_FLOW_PTYPE_MAX);
990 /* Attributes for GTP packet with uplink */
991 params->attr = ice_attr_gtpu_up;
992 params->attr_cnt = ARRAY_SIZE(ice_attr_gtpu_up);
993 } else if (hdrs & ICE_FLOW_SEG_HDR_GTPU_EH) {
994 src = (const ice_bitmap_t *)ice_ptypes_gtpu;
995 ice_and_bitmap(params->ptypes, params->ptypes,
996 src, ICE_FLOW_PTYPE_MAX);
998 /* Attributes for GTP packet with Extension Header */
999 params->attr = ice_attr_gtpu_eh;
1000 params->attr_cnt = ARRAY_SIZE(ice_attr_gtpu_eh);
1001 } else if (hdrs & ICE_FLOW_SEG_HDR_GTPU_IP) {
1002 src = (const ice_bitmap_t *)ice_ptypes_gtpu;
1003 ice_and_bitmap(params->ptypes, params->ptypes,
1004 src, ICE_FLOW_PTYPE_MAX);
1006 /* Attributes for GTP packet without Extension Header */
1007 params->attr = ice_attr_gtpu_session;
1008 params->attr_cnt = ARRAY_SIZE(ice_attr_gtpu_session);
1009 } else if (hdrs & ICE_FLOW_SEG_HDR_L2TPV3) {
1010 src = (const ice_bitmap_t *)ice_ptypes_l2tpv3;
1011 ice_and_bitmap(params->ptypes, params->ptypes,
1012 src, ICE_FLOW_PTYPE_MAX);
1013 } else if (hdrs & ICE_FLOW_SEG_HDR_ESP) {
1014 src = (const ice_bitmap_t *)ice_ptypes_esp;
1015 ice_and_bitmap(params->ptypes, params->ptypes,
1016 src, ICE_FLOW_PTYPE_MAX);
1017 } else if (hdrs & ICE_FLOW_SEG_HDR_AH) {
1018 src = (const ice_bitmap_t *)ice_ptypes_ah;
1019 ice_and_bitmap(params->ptypes, params->ptypes,
1020 src, ICE_FLOW_PTYPE_MAX);
1021 } else if (hdrs & ICE_FLOW_SEG_HDR_NAT_T_ESP) {
1022 src = (const ice_bitmap_t *)ice_ptypes_nat_t_esp;
1023 ice_and_bitmap(params->ptypes, params->ptypes,
1024 src, ICE_FLOW_PTYPE_MAX);
1025 } else if (hdrs & ICE_FLOW_SEG_HDR_VXLAN) {
1026 src = (const ice_bitmap_t *)ice_ptypes_vxlan_vni;
1027 ice_and_bitmap(params->ptypes, params->ptypes,
1028 src, ICE_FLOW_PTYPE_MAX);
1029 } else if (hdrs & ICE_FLOW_SEG_HDR_UDP_ECPRI_TP0) {
1030 src = (const ice_bitmap_t *)ice_ptypes_udp_ecpri_tp0;
1031 ice_and_bitmap(params->ptypes, params->ptypes,
1032 src, ICE_FLOW_PTYPE_MAX);
1035 if (hdrs & ICE_FLOW_SEG_HDR_PFCP) {
1036 if (hdrs & ICE_FLOW_SEG_HDR_PFCP_NODE)
1038 (const ice_bitmap_t *)ice_ptypes_pfcp_node;
1041 (const ice_bitmap_t *)ice_ptypes_pfcp_session;
1043 ice_and_bitmap(params->ptypes, params->ptypes,
1044 src, ICE_FLOW_PTYPE_MAX);
1046 src = (const ice_bitmap_t *)ice_ptypes_pfcp_node;
1047 ice_andnot_bitmap(params->ptypes, params->ptypes,
1048 src, ICE_FLOW_PTYPE_MAX);
1050 src = (const ice_bitmap_t *)ice_ptypes_pfcp_session;
1051 ice_andnot_bitmap(params->ptypes, params->ptypes,
1052 src, ICE_FLOW_PTYPE_MAX);
1060 * ice_flow_xtract_pkt_flags - Create an extr sequence entry for packet flags
1061 * @hw: pointer to the HW struct
1062 * @params: information about the flow to be processed
1063 * @flags: The value of pkt_flags[x:x] in Rx/Tx MDID metadata.
1065 * This function will allocate an extraction sequence entries for a DWORD size
1066 * chunk of the packet flags.
1068 static enum ice_status
1069 ice_flow_xtract_pkt_flags(struct ice_hw *hw,
1070 struct ice_flow_prof_params *params,
1071 enum ice_flex_mdid_pkt_flags flags)
1073 u8 fv_words = hw->blk[params->blk].es.fvw;
1076 /* Make sure the number of extraction sequence entries required does not
1077 * exceed the block's capacity.
1079 if (params->es_cnt >= fv_words)
1080 return ICE_ERR_MAX_LIMIT;
1082 /* some blocks require a reversed field vector layout */
1083 if (hw->blk[params->blk].es.reverse)
1084 idx = fv_words - params->es_cnt - 1;
1086 idx = params->es_cnt;
1088 params->es[idx].prot_id = ICE_PROT_META_ID;
1089 params->es[idx].off = flags;
1096 * ice_flow_xtract_fld - Create an extraction sequence entry for the given field
1097 * @hw: pointer to the HW struct
1098 * @params: information about the flow to be processed
1099 * @seg: packet segment index of the field to be extracted
1100 * @fld: ID of field to be extracted
1101 * @match: bitfield of all fields
1103 * This function determines the protocol ID, offset, and size of the given
1104 * field. It then allocates one or more extraction sequence entries for the
1105 * given field, and fill the entries with protocol ID and offset information.
1107 static enum ice_status
1108 ice_flow_xtract_fld(struct ice_hw *hw, struct ice_flow_prof_params *params,
1109 u8 seg, enum ice_flow_field fld, u64 match)
1111 enum ice_flow_field sib = ICE_FLOW_FIELD_IDX_MAX;
1112 enum ice_prot_id prot_id = ICE_PROT_ID_INVAL;
1113 u8 fv_words = hw->blk[params->blk].es.fvw;
1114 struct ice_flow_fld_info *flds;
1115 u16 cnt, ese_bits, i;
1120 flds = params->prof->segs[seg].fields;
1123 case ICE_FLOW_FIELD_IDX_ETH_DA:
1124 case ICE_FLOW_FIELD_IDX_ETH_SA:
1125 case ICE_FLOW_FIELD_IDX_S_VLAN:
1126 case ICE_FLOW_FIELD_IDX_C_VLAN:
1127 prot_id = seg == 0 ? ICE_PROT_MAC_OF_OR_S : ICE_PROT_MAC_IL;
1129 case ICE_FLOW_FIELD_IDX_ETH_TYPE:
1130 prot_id = seg == 0 ? ICE_PROT_ETYPE_OL : ICE_PROT_ETYPE_IL;
1132 case ICE_FLOW_FIELD_IDX_IPV4_DSCP:
1133 prot_id = seg == 0 ? ICE_PROT_IPV4_OF_OR_S : ICE_PROT_IPV4_IL;
1135 case ICE_FLOW_FIELD_IDX_IPV6_DSCP:
1136 prot_id = seg == 0 ? ICE_PROT_IPV6_OF_OR_S : ICE_PROT_IPV6_IL;
1138 case ICE_FLOW_FIELD_IDX_IPV4_TTL:
1139 case ICE_FLOW_FIELD_IDX_IPV4_PROT:
1140 prot_id = seg == 0 ? ICE_PROT_IPV4_OF_OR_S : ICE_PROT_IPV4_IL;
1142 /* TTL and PROT share the same extraction seq. entry.
1143 * Each is considered a sibling to the other in terms of sharing
1144 * the same extraction sequence entry.
1146 if (fld == ICE_FLOW_FIELD_IDX_IPV4_TTL)
1147 sib = ICE_FLOW_FIELD_IDX_IPV4_PROT;
1149 sib = ICE_FLOW_FIELD_IDX_IPV4_TTL;
1151 /* If the sibling field is also included, that field's
1152 * mask needs to be included.
1154 if (match & BIT(sib))
1155 sib_mask = ice_flds_info[sib].mask;
1157 case ICE_FLOW_FIELD_IDX_IPV6_TTL:
1158 case ICE_FLOW_FIELD_IDX_IPV6_PROT:
1159 prot_id = seg == 0 ? ICE_PROT_IPV6_OF_OR_S : ICE_PROT_IPV6_IL;
1161 /* TTL and PROT share the same extraction seq. entry.
1162 * Each is considered a sibling to the other in terms of sharing
1163 * the same extraction sequence entry.
1165 if (fld == ICE_FLOW_FIELD_IDX_IPV6_TTL)
1166 sib = ICE_FLOW_FIELD_IDX_IPV6_PROT;
1168 sib = ICE_FLOW_FIELD_IDX_IPV6_TTL;
1170 /* If the sibling field is also included, that field's
1171 * mask needs to be included.
1173 if (match & BIT(sib))
1174 sib_mask = ice_flds_info[sib].mask;
1176 case ICE_FLOW_FIELD_IDX_IPV4_SA:
1177 case ICE_FLOW_FIELD_IDX_IPV4_DA:
1178 prot_id = seg == 0 ? ICE_PROT_IPV4_OF_OR_S : ICE_PROT_IPV4_IL;
1180 case ICE_FLOW_FIELD_IDX_IPV6_SA:
1181 case ICE_FLOW_FIELD_IDX_IPV6_DA:
1182 case ICE_FLOW_FIELD_IDX_IPV6_PRE32_SA:
1183 case ICE_FLOW_FIELD_IDX_IPV6_PRE32_DA:
1184 case ICE_FLOW_FIELD_IDX_IPV6_PRE48_SA:
1185 case ICE_FLOW_FIELD_IDX_IPV6_PRE48_DA:
1186 case ICE_FLOW_FIELD_IDX_IPV6_PRE64_SA:
1187 case ICE_FLOW_FIELD_IDX_IPV6_PRE64_DA:
1188 prot_id = seg == 0 ? ICE_PROT_IPV6_OF_OR_S : ICE_PROT_IPV6_IL;
1190 case ICE_FLOW_FIELD_IDX_TCP_SRC_PORT:
1191 case ICE_FLOW_FIELD_IDX_TCP_DST_PORT:
1192 case ICE_FLOW_FIELD_IDX_TCP_FLAGS:
1193 prot_id = ICE_PROT_TCP_IL;
1195 case ICE_FLOW_FIELD_IDX_UDP_SRC_PORT:
1196 case ICE_FLOW_FIELD_IDX_UDP_DST_PORT:
1197 prot_id = ICE_PROT_UDP_IL_OR_S;
1199 case ICE_FLOW_FIELD_IDX_SCTP_SRC_PORT:
1200 case ICE_FLOW_FIELD_IDX_SCTP_DST_PORT:
1201 prot_id = ICE_PROT_SCTP_IL;
1203 case ICE_FLOW_FIELD_IDX_VXLAN_VNI:
1204 case ICE_FLOW_FIELD_IDX_GTPC_TEID:
1205 case ICE_FLOW_FIELD_IDX_GTPU_IP_TEID:
1206 case ICE_FLOW_FIELD_IDX_GTPU_UP_TEID:
1207 case ICE_FLOW_FIELD_IDX_GTPU_DWN_TEID:
1208 case ICE_FLOW_FIELD_IDX_GTPU_EH_TEID:
1209 case ICE_FLOW_FIELD_IDX_GTPU_EH_QFI:
1210 /* GTP is accessed through UDP OF protocol */
1211 prot_id = ICE_PROT_UDP_OF;
1213 case ICE_FLOW_FIELD_IDX_PPPOE_SESS_ID:
1214 prot_id = ICE_PROT_PPPOE;
1216 case ICE_FLOW_FIELD_IDX_PFCP_SEID:
1217 prot_id = ICE_PROT_UDP_IL_OR_S;
1219 case ICE_FLOW_FIELD_IDX_L2TPV3_SESS_ID:
1220 prot_id = ICE_PROT_L2TPV3;
1222 case ICE_FLOW_FIELD_IDX_ESP_SPI:
1223 prot_id = ICE_PROT_ESP_F;
1225 case ICE_FLOW_FIELD_IDX_AH_SPI:
1226 prot_id = ICE_PROT_ESP_2;
1228 case ICE_FLOW_FIELD_IDX_NAT_T_ESP_SPI:
1229 prot_id = ICE_PROT_UDP_IL_OR_S;
1231 case ICE_FLOW_FIELD_IDX_ECPRI_TP0_PC_ID:
1232 prot_id = ICE_PROT_ECPRI;
1234 case ICE_FLOW_FIELD_IDX_UDP_ECPRI_TP0_PC_ID:
1235 prot_id = ICE_PROT_UDP_IL_OR_S;
1237 case ICE_FLOW_FIELD_IDX_ARP_SIP:
1238 case ICE_FLOW_FIELD_IDX_ARP_DIP:
1239 case ICE_FLOW_FIELD_IDX_ARP_SHA:
1240 case ICE_FLOW_FIELD_IDX_ARP_DHA:
1241 case ICE_FLOW_FIELD_IDX_ARP_OP:
1242 prot_id = ICE_PROT_ARP_OF;
1244 case ICE_FLOW_FIELD_IDX_ICMP_TYPE:
1245 case ICE_FLOW_FIELD_IDX_ICMP_CODE:
1246 /* ICMP type and code share the same extraction seq. entry */
1247 prot_id = (params->prof->segs[seg].hdrs &
1248 ICE_FLOW_SEG_HDR_IPV4) ?
1249 ICE_PROT_ICMP_IL : ICE_PROT_ICMPV6_IL;
1250 sib = fld == ICE_FLOW_FIELD_IDX_ICMP_TYPE ?
1251 ICE_FLOW_FIELD_IDX_ICMP_CODE :
1252 ICE_FLOW_FIELD_IDX_ICMP_TYPE;
1254 case ICE_FLOW_FIELD_IDX_GRE_KEYID:
1255 prot_id = ICE_PROT_GRE_OF;
1258 return ICE_ERR_NOT_IMPL;
1261 /* Each extraction sequence entry is a word in size, and extracts a
1262 * word-aligned offset from a protocol header.
1264 ese_bits = ICE_FLOW_FV_EXTRACT_SZ * BITS_PER_BYTE;
1266 flds[fld].xtrct.prot_id = prot_id;
1267 flds[fld].xtrct.off = (ice_flds_info[fld].off / ese_bits) *
1268 ICE_FLOW_FV_EXTRACT_SZ;
1269 flds[fld].xtrct.disp = (u8)(ice_flds_info[fld].off % ese_bits);
1270 flds[fld].xtrct.idx = params->es_cnt;
1271 flds[fld].xtrct.mask = ice_flds_info[fld].mask;
1273 /* Adjust the next field-entry index after accommodating the number of
1274 * entries this field consumes
1276 cnt = DIVIDE_AND_ROUND_UP(flds[fld].xtrct.disp +
1277 ice_flds_info[fld].size, ese_bits);
1279 /* Fill in the extraction sequence entries needed for this field */
1280 off = flds[fld].xtrct.off;
1281 mask = flds[fld].xtrct.mask;
1282 for (i = 0; i < cnt; i++) {
1283 /* Only consume an extraction sequence entry if there is no
1284 * sibling field associated with this field or the sibling entry
1285 * already extracts the word shared with this field.
1287 if (sib == ICE_FLOW_FIELD_IDX_MAX ||
1288 flds[sib].xtrct.prot_id == ICE_PROT_ID_INVAL ||
1289 flds[sib].xtrct.off != off) {
1292 /* Make sure the number of extraction sequence required
1293 * does not exceed the block's capability
1295 if (params->es_cnt >= fv_words)
1296 return ICE_ERR_MAX_LIMIT;
1298 /* some blocks require a reversed field vector layout */
1299 if (hw->blk[params->blk].es.reverse)
1300 idx = fv_words - params->es_cnt - 1;
1302 idx = params->es_cnt;
1304 params->es[idx].prot_id = prot_id;
1305 params->es[idx].off = off;
1306 params->mask[idx] = mask | sib_mask;
1310 off += ICE_FLOW_FV_EXTRACT_SZ;
1317 * ice_flow_xtract_raws - Create extract sequence entries for raw bytes
1318 * @hw: pointer to the HW struct
1319 * @params: information about the flow to be processed
1320 * @seg: index of packet segment whose raw fields are to be extracted
1322 static enum ice_status
1323 ice_flow_xtract_raws(struct ice_hw *hw, struct ice_flow_prof_params *params,
1330 if (!params->prof->segs[seg].raws_cnt)
1333 if (params->prof->segs[seg].raws_cnt >
1334 ARRAY_SIZE(params->prof->segs[seg].raws))
1335 return ICE_ERR_MAX_LIMIT;
1337 /* Offsets within the segment headers are not supported */
1338 hdrs_sz = ice_flow_calc_seg_sz(params, seg);
1340 return ICE_ERR_PARAM;
1342 fv_words = hw->blk[params->blk].es.fvw;
1344 for (i = 0; i < params->prof->segs[seg].raws_cnt; i++) {
1345 struct ice_flow_seg_fld_raw *raw;
1348 raw = ¶ms->prof->segs[seg].raws[i];
1350 /* Storing extraction information */
1351 raw->info.xtrct.prot_id = ICE_PROT_MAC_OF_OR_S;
1352 raw->info.xtrct.off = (raw->off / ICE_FLOW_FV_EXTRACT_SZ) *
1353 ICE_FLOW_FV_EXTRACT_SZ;
1354 raw->info.xtrct.disp = (raw->off % ICE_FLOW_FV_EXTRACT_SZ) *
1356 raw->info.xtrct.idx = params->es_cnt;
1358 /* Determine the number of field vector entries this raw field
1361 cnt = DIVIDE_AND_ROUND_UP(raw->info.xtrct.disp +
1362 (raw->info.src.last * BITS_PER_BYTE),
1363 (ICE_FLOW_FV_EXTRACT_SZ *
1365 off = raw->info.xtrct.off;
1366 for (j = 0; j < cnt; j++) {
1369 /* Make sure the number of extraction sequence required
1370 * does not exceed the block's capability
1372 if (params->es_cnt >= hw->blk[params->blk].es.count ||
1373 params->es_cnt >= ICE_MAX_FV_WORDS)
1374 return ICE_ERR_MAX_LIMIT;
1376 /* some blocks require a reversed field vector layout */
1377 if (hw->blk[params->blk].es.reverse)
1378 idx = fv_words - params->es_cnt - 1;
1380 idx = params->es_cnt;
1382 params->es[idx].prot_id = raw->info.xtrct.prot_id;
1383 params->es[idx].off = off;
1385 off += ICE_FLOW_FV_EXTRACT_SZ;
1393 * ice_flow_create_xtrct_seq - Create an extraction sequence for given segments
1394 * @hw: pointer to the HW struct
1395 * @params: information about the flow to be processed
1397 * This function iterates through all matched fields in the given segments, and
1398 * creates an extraction sequence for the fields.
1400 static enum ice_status
1401 ice_flow_create_xtrct_seq(struct ice_hw *hw,
1402 struct ice_flow_prof_params *params)
1404 enum ice_status status = ICE_SUCCESS;
1407 /* For ACL, we also need to extract the direction bit (Rx,Tx) data from
1410 if (params->blk == ICE_BLK_ACL) {
1411 status = ice_flow_xtract_pkt_flags(hw, params,
1412 ICE_RX_MDID_PKT_FLAGS_15_0);
1417 for (i = 0; i < params->prof->segs_cnt; i++) {
1418 u64 match = params->prof->segs[i].match;
1419 enum ice_flow_field j;
1421 ice_for_each_set_bit(j, (ice_bitmap_t *)&match,
1422 ICE_FLOW_FIELD_IDX_MAX) {
1423 status = ice_flow_xtract_fld(hw, params, i, j, match);
1426 ice_clear_bit(j, (ice_bitmap_t *)&match);
1429 /* Process raw matching bytes */
1430 status = ice_flow_xtract_raws(hw, params, i);
1439 * ice_flow_sel_acl_scen - returns the specific scenario
1440 * @hw: pointer to the hardware structure
1441 * @params: information about the flow to be processed
1443 * This function will return the specific scenario based on the
1444 * params passed to it
1446 static enum ice_status
1447 ice_flow_sel_acl_scen(struct ice_hw *hw, struct ice_flow_prof_params *params)
1449 /* Find the best-fit scenario for the provided match width */
1450 struct ice_acl_scen *cand_scen = NULL, *scen;
1453 return ICE_ERR_DOES_NOT_EXIST;
1455 /* Loop through each scenario and match against the scenario width
1456 * to select the specific scenario
1458 LIST_FOR_EACH_ENTRY(scen, &hw->acl_tbl->scens, ice_acl_scen, list_entry)
1459 if (scen->eff_width >= params->entry_length &&
1460 (!cand_scen || cand_scen->eff_width > scen->eff_width))
1463 return ICE_ERR_DOES_NOT_EXIST;
1465 params->prof->cfg.scen = cand_scen;
1471 * ice_flow_acl_def_entry_frmt - Determine the layout of flow entries
1472 * @params: information about the flow to be processed
1474 static enum ice_status
1475 ice_flow_acl_def_entry_frmt(struct ice_flow_prof_params *params)
1477 u16 index, i, range_idx = 0;
1479 index = ICE_AQC_ACL_PROF_BYTE_SEL_START_IDX;
1481 for (i = 0; i < params->prof->segs_cnt; i++) {
1482 struct ice_flow_seg_info *seg = ¶ms->prof->segs[i];
1485 ice_for_each_set_bit(j, (ice_bitmap_t *)&seg->match,
1486 ICE_FLOW_FIELD_IDX_MAX) {
1487 struct ice_flow_fld_info *fld = &seg->fields[j];
1489 fld->entry.mask = ICE_FLOW_FLD_OFF_INVAL;
1491 if (fld->type == ICE_FLOW_FLD_TYPE_RANGE) {
1492 fld->entry.last = ICE_FLOW_FLD_OFF_INVAL;
1494 /* Range checking only supported for single
1497 if (DIVIDE_AND_ROUND_UP(ice_flds_info[j].size +
1499 BITS_PER_BYTE * 2) > 1)
1500 return ICE_ERR_PARAM;
1502 /* Ranges must define low and high values */
1503 if (fld->src.val == ICE_FLOW_FLD_OFF_INVAL ||
1504 fld->src.last == ICE_FLOW_FLD_OFF_INVAL)
1505 return ICE_ERR_PARAM;
1507 fld->entry.val = range_idx++;
1509 /* Store adjusted byte-length of field for later
1510 * use, taking into account potential
1511 * non-byte-aligned displacement
1513 fld->entry.last = DIVIDE_AND_ROUND_UP
1514 (ice_flds_info[j].size +
1515 (fld->xtrct.disp % BITS_PER_BYTE),
1517 fld->entry.val = index;
1518 index += fld->entry.last;
1522 for (j = 0; j < seg->raws_cnt; j++) {
1523 struct ice_flow_seg_fld_raw *raw = &seg->raws[j];
1525 raw->info.entry.mask = ICE_FLOW_FLD_OFF_INVAL;
1526 raw->info.entry.val = index;
1527 raw->info.entry.last = raw->info.src.last;
1528 index += raw->info.entry.last;
1532 /* Currently only support using the byte selection base, which only
1533 * allows for an effective entry size of 30 bytes. Reject anything
1536 if (index > ICE_AQC_ACL_PROF_BYTE_SEL_ELEMS)
1537 return ICE_ERR_PARAM;
1539 /* Only 8 range checkers per profile, reject anything trying to use
1542 if (range_idx > ICE_AQC_ACL_PROF_RANGES_NUM_CFG)
1543 return ICE_ERR_PARAM;
1545 /* Store # bytes required for entry for later use */
1546 params->entry_length = index - ICE_AQC_ACL_PROF_BYTE_SEL_START_IDX;
1552 * ice_flow_proc_segs - process all packet segments associated with a profile
1553 * @hw: pointer to the HW struct
1554 * @params: information about the flow to be processed
1556 static enum ice_status
1557 ice_flow_proc_segs(struct ice_hw *hw, struct ice_flow_prof_params *params)
1559 enum ice_status status;
1561 status = ice_flow_proc_seg_hdrs(params);
1565 status = ice_flow_create_xtrct_seq(hw, params);
1569 switch (params->blk) {
1572 status = ICE_SUCCESS;
1575 status = ice_flow_acl_def_entry_frmt(params);
1578 status = ice_flow_sel_acl_scen(hw, params);
1583 return ICE_ERR_NOT_IMPL;
1589 #define ICE_FLOW_FIND_PROF_CHK_FLDS 0x00000001
1590 #define ICE_FLOW_FIND_PROF_CHK_VSI 0x00000002
1591 #define ICE_FLOW_FIND_PROF_NOT_CHK_DIR 0x00000004
1594 * ice_flow_find_prof_conds - Find a profile matching headers and conditions
1595 * @hw: pointer to the HW struct
1596 * @blk: classification stage
1597 * @dir: flow direction
1598 * @segs: array of one or more packet segments that describe the flow
1599 * @segs_cnt: number of packet segments provided
1600 * @vsi_handle: software VSI handle to check VSI (ICE_FLOW_FIND_PROF_CHK_VSI)
1601 * @conds: additional conditions to be checked (ICE_FLOW_FIND_PROF_CHK_*)
1603 static struct ice_flow_prof *
1604 ice_flow_find_prof_conds(struct ice_hw *hw, enum ice_block blk,
1605 enum ice_flow_dir dir, struct ice_flow_seg_info *segs,
1606 u8 segs_cnt, u16 vsi_handle, u32 conds)
1608 struct ice_flow_prof *p, *prof = NULL;
1610 ice_acquire_lock(&hw->fl_profs_locks[blk]);
1611 LIST_FOR_EACH_ENTRY(p, &hw->fl_profs[blk], ice_flow_prof, l_entry)
1612 if ((p->dir == dir || conds & ICE_FLOW_FIND_PROF_NOT_CHK_DIR) &&
1613 segs_cnt && segs_cnt == p->segs_cnt) {
1616 /* Check for profile-VSI association if specified */
1617 if ((conds & ICE_FLOW_FIND_PROF_CHK_VSI) &&
1618 ice_is_vsi_valid(hw, vsi_handle) &&
1619 !ice_is_bit_set(p->vsis, vsi_handle))
1622 /* Protocol headers must be checked. Matched fields are
1623 * checked if specified.
1625 for (i = 0; i < segs_cnt; i++)
1626 if (segs[i].hdrs != p->segs[i].hdrs ||
1627 ((conds & ICE_FLOW_FIND_PROF_CHK_FLDS) &&
1628 segs[i].match != p->segs[i].match))
1631 /* A match is found if all segments are matched */
1632 if (i == segs_cnt) {
1637 ice_release_lock(&hw->fl_profs_locks[blk]);
1643 * ice_flow_find_prof - Look up a profile matching headers and matched fields
1644 * @hw: pointer to the HW struct
1645 * @blk: classification stage
1646 * @dir: flow direction
1647 * @segs: array of one or more packet segments that describe the flow
1648 * @segs_cnt: number of packet segments provided
1651 ice_flow_find_prof(struct ice_hw *hw, enum ice_block blk, enum ice_flow_dir dir,
1652 struct ice_flow_seg_info *segs, u8 segs_cnt)
1654 struct ice_flow_prof *p;
1656 p = ice_flow_find_prof_conds(hw, blk, dir, segs, segs_cnt,
1657 ICE_MAX_VSI, ICE_FLOW_FIND_PROF_CHK_FLDS);
1659 return p ? p->id : ICE_FLOW_PROF_ID_INVAL;
1663 * ice_flow_find_prof_id - Look up a profile with given profile ID
1664 * @hw: pointer to the HW struct
1665 * @blk: classification stage
1666 * @prof_id: unique ID to identify this flow profile
1668 static struct ice_flow_prof *
1669 ice_flow_find_prof_id(struct ice_hw *hw, enum ice_block blk, u64 prof_id)
1671 struct ice_flow_prof *p;
1673 LIST_FOR_EACH_ENTRY(p, &hw->fl_profs[blk], ice_flow_prof, l_entry)
1674 if (p->id == prof_id)
1681 * ice_dealloc_flow_entry - Deallocate flow entry memory
1682 * @hw: pointer to the HW struct
1683 * @entry: flow entry to be removed
1686 ice_dealloc_flow_entry(struct ice_hw *hw, struct ice_flow_entry *entry)
1692 ice_free(hw, entry->entry);
1694 if (entry->range_buf) {
1695 ice_free(hw, entry->range_buf);
1696 entry->range_buf = NULL;
1700 ice_free(hw, entry->acts);
1702 entry->acts_cnt = 0;
1705 ice_free(hw, entry);
1709 * ice_flow_get_hw_prof - return the HW profile for a specific profile ID handle
1710 * @hw: pointer to the HW struct
1711 * @blk: classification stage
1712 * @prof_id: the profile ID handle
1713 * @hw_prof_id: pointer to variable to receive the HW profile ID
1716 ice_flow_get_hw_prof(struct ice_hw *hw, enum ice_block blk, u64 prof_id,
1719 enum ice_status status = ICE_ERR_DOES_NOT_EXIST;
1720 struct ice_prof_map *map;
1722 ice_acquire_lock(&hw->blk[blk].es.prof_map_lock);
1723 map = ice_search_prof_id(hw, blk, prof_id);
1725 *hw_prof_id = map->prof_id;
1726 status = ICE_SUCCESS;
1728 ice_release_lock(&hw->blk[blk].es.prof_map_lock);
1732 #define ICE_ACL_INVALID_SCEN 0x3f
1735 * ice_flow_acl_is_prof_in_use - Verify if the profile is associated to any PF
1736 * @hw: pointer to the hardware structure
1737 * @prof: pointer to flow profile
1738 * @buf: destination buffer function writes partial extraction sequence to
1740 * returns ICE_SUCCESS if no PF is associated to the given profile
1741 * returns ICE_ERR_IN_USE if at least one PF is associated to the given profile
1742 * returns other error code for real error
1744 static enum ice_status
1745 ice_flow_acl_is_prof_in_use(struct ice_hw *hw, struct ice_flow_prof *prof,
1746 struct ice_aqc_acl_prof_generic_frmt *buf)
1748 enum ice_status status;
1751 status = ice_flow_get_hw_prof(hw, ICE_BLK_ACL, prof->id, &prof_id);
1755 status = ice_query_acl_prof(hw, prof_id, buf, NULL);
1759 /* If all PF's associated scenarios are all 0 or all
1760 * ICE_ACL_INVALID_SCEN (63) for the given profile then the latter has
1761 * not been configured yet.
1763 if (buf->pf_scenario_num[0] == 0 && buf->pf_scenario_num[1] == 0 &&
1764 buf->pf_scenario_num[2] == 0 && buf->pf_scenario_num[3] == 0 &&
1765 buf->pf_scenario_num[4] == 0 && buf->pf_scenario_num[5] == 0 &&
1766 buf->pf_scenario_num[6] == 0 && buf->pf_scenario_num[7] == 0)
1769 if (buf->pf_scenario_num[0] == ICE_ACL_INVALID_SCEN &&
1770 buf->pf_scenario_num[1] == ICE_ACL_INVALID_SCEN &&
1771 buf->pf_scenario_num[2] == ICE_ACL_INVALID_SCEN &&
1772 buf->pf_scenario_num[3] == ICE_ACL_INVALID_SCEN &&
1773 buf->pf_scenario_num[4] == ICE_ACL_INVALID_SCEN &&
1774 buf->pf_scenario_num[5] == ICE_ACL_INVALID_SCEN &&
1775 buf->pf_scenario_num[6] == ICE_ACL_INVALID_SCEN &&
1776 buf->pf_scenario_num[7] == ICE_ACL_INVALID_SCEN)
1779 return ICE_ERR_IN_USE;
1783 * ice_flow_acl_free_act_cntr - Free the ACL rule's actions
1784 * @hw: pointer to the hardware structure
1785 * @acts: array of actions to be performed on a match
1786 * @acts_cnt: number of actions
1788 static enum ice_status
1789 ice_flow_acl_free_act_cntr(struct ice_hw *hw, struct ice_flow_action *acts,
1794 for (i = 0; i < acts_cnt; i++) {
1795 if (acts[i].type == ICE_FLOW_ACT_CNTR_PKT ||
1796 acts[i].type == ICE_FLOW_ACT_CNTR_BYTES ||
1797 acts[i].type == ICE_FLOW_ACT_CNTR_PKT_BYTES) {
1798 struct ice_acl_cntrs cntrs = { 0 };
1799 enum ice_status status;
1801 /* amount is unused in the dealloc path but the common
1802 * parameter check routine wants a value set, as zero
1803 * is invalid for the check. Just set it.
1806 cntrs.bank = 0; /* Only bank0 for the moment */
1808 LE16_TO_CPU(acts[i].data.acl_act.value);
1810 LE16_TO_CPU(acts[i].data.acl_act.value);
1812 if (acts[i].type == ICE_FLOW_ACT_CNTR_PKT_BYTES)
1813 cntrs.type = ICE_AQC_ACL_CNT_TYPE_DUAL;
1815 cntrs.type = ICE_AQC_ACL_CNT_TYPE_SINGLE;
1817 status = ice_aq_dealloc_acl_cntrs(hw, &cntrs, NULL);
1826 * ice_flow_acl_disassoc_scen - Disassociate the scenario from the profile
1827 * @hw: pointer to the hardware structure
1828 * @prof: pointer to flow profile
1830 * Disassociate the scenario from the profile for the PF of the VSI.
1832 static enum ice_status
1833 ice_flow_acl_disassoc_scen(struct ice_hw *hw, struct ice_flow_prof *prof)
1835 struct ice_aqc_acl_prof_generic_frmt buf;
1836 enum ice_status status = ICE_SUCCESS;
1839 ice_memset(&buf, 0, sizeof(buf), ICE_NONDMA_MEM);
1841 status = ice_flow_get_hw_prof(hw, ICE_BLK_ACL, prof->id, &prof_id);
1845 status = ice_query_acl_prof(hw, prof_id, &buf, NULL);
1849 /* Clear scenario for this PF */
1850 buf.pf_scenario_num[hw->pf_id] = ICE_ACL_INVALID_SCEN;
1851 status = ice_prgm_acl_prof_xtrct(hw, prof_id, &buf, NULL);
1857 * ice_flow_rem_entry_sync - Remove a flow entry
1858 * @hw: pointer to the HW struct
1859 * @blk: classification stage
1860 * @entry: flow entry to be removed
1862 static enum ice_status
1863 ice_flow_rem_entry_sync(struct ice_hw *hw, enum ice_block blk,
1864 struct ice_flow_entry *entry)
1867 return ICE_ERR_BAD_PTR;
1869 if (blk == ICE_BLK_ACL) {
1870 enum ice_status status;
1873 return ICE_ERR_BAD_PTR;
1875 status = ice_acl_rem_entry(hw, entry->prof->cfg.scen,
1876 entry->scen_entry_idx);
1880 /* Checks if we need to release an ACL counter. */
1881 if (entry->acts_cnt && entry->acts)
1882 ice_flow_acl_free_act_cntr(hw, entry->acts,
1886 LIST_DEL(&entry->l_entry);
1888 ice_dealloc_flow_entry(hw, entry);
1894 * ice_flow_add_prof_sync - Add a flow profile for packet segments and fields
1895 * @hw: pointer to the HW struct
1896 * @blk: classification stage
1897 * @dir: flow direction
1898 * @prof_id: unique ID to identify this flow profile
1899 * @segs: array of one or more packet segments that describe the flow
1900 * @segs_cnt: number of packet segments provided
1901 * @acts: array of default actions
1902 * @acts_cnt: number of default actions
1903 * @prof: stores the returned flow profile added
1905 * Assumption: the caller has acquired the lock to the profile list
1907 static enum ice_status
1908 ice_flow_add_prof_sync(struct ice_hw *hw, enum ice_block blk,
1909 enum ice_flow_dir dir, u64 prof_id,
1910 struct ice_flow_seg_info *segs, u8 segs_cnt,
1911 struct ice_flow_action *acts, u8 acts_cnt,
1912 struct ice_flow_prof **prof)
1914 struct ice_flow_prof_params *params;
1915 enum ice_status status;
1918 if (!prof || (acts_cnt && !acts))
1919 return ICE_ERR_BAD_PTR;
1921 params = (struct ice_flow_prof_params *)ice_malloc(hw, sizeof(*params));
1923 return ICE_ERR_NO_MEMORY;
1925 params->prof = (struct ice_flow_prof *)
1926 ice_malloc(hw, sizeof(*params->prof));
1927 if (!params->prof) {
1928 status = ICE_ERR_NO_MEMORY;
1932 /* initialize extraction sequence to all invalid (0xff) */
1933 for (i = 0; i < ICE_MAX_FV_WORDS; i++) {
1934 params->es[i].prot_id = ICE_PROT_INVALID;
1935 params->es[i].off = ICE_FV_OFFSET_INVAL;
1939 params->prof->id = prof_id;
1940 params->prof->dir = dir;
1941 params->prof->segs_cnt = segs_cnt;
1943 /* Make a copy of the segments that need to be persistent in the flow
1946 for (i = 0; i < segs_cnt; i++)
1947 ice_memcpy(¶ms->prof->segs[i], &segs[i], sizeof(*segs),
1948 ICE_NONDMA_TO_NONDMA);
1950 /* Make a copy of the actions that need to be persistent in the flow
1954 params->prof->acts = (struct ice_flow_action *)
1955 ice_memdup(hw, acts, acts_cnt * sizeof(*acts),
1956 ICE_NONDMA_TO_NONDMA);
1958 if (!params->prof->acts) {
1959 status = ICE_ERR_NO_MEMORY;
1964 status = ice_flow_proc_segs(hw, params);
1966 ice_debug(hw, ICE_DBG_FLOW, "Error processing a flow's packet segments\n");
1970 /* Add a HW profile for this flow profile */
1971 status = ice_add_prof(hw, blk, prof_id, (u8 *)params->ptypes,
1972 params->attr, params->attr_cnt, params->es,
1975 ice_debug(hw, ICE_DBG_FLOW, "Error adding a HW flow profile\n");
1979 INIT_LIST_HEAD(¶ms->prof->entries);
1980 ice_init_lock(¶ms->prof->entries_lock);
1981 *prof = params->prof;
1985 if (params->prof->acts)
1986 ice_free(hw, params->prof->acts);
1987 ice_free(hw, params->prof);
1990 ice_free(hw, params);
1996 * ice_flow_rem_prof_sync - remove a flow profile
1997 * @hw: pointer to the hardware structure
1998 * @blk: classification stage
1999 * @prof: pointer to flow profile to remove
2001 * Assumption: the caller has acquired the lock to the profile list
2003 static enum ice_status
2004 ice_flow_rem_prof_sync(struct ice_hw *hw, enum ice_block blk,
2005 struct ice_flow_prof *prof)
2007 enum ice_status status;
2009 /* Remove all remaining flow entries before removing the flow profile */
2010 if (!LIST_EMPTY(&prof->entries)) {
2011 struct ice_flow_entry *e, *t;
2013 ice_acquire_lock(&prof->entries_lock);
2015 LIST_FOR_EACH_ENTRY_SAFE(e, t, &prof->entries, ice_flow_entry,
2017 status = ice_flow_rem_entry_sync(hw, blk, e);
2022 ice_release_lock(&prof->entries_lock);
2025 if (blk == ICE_BLK_ACL) {
2026 struct ice_aqc_acl_profile_ranges query_rng_buf;
2027 struct ice_aqc_acl_prof_generic_frmt buf;
2030 /* Disassociate the scenario from the profile for the PF */
2031 status = ice_flow_acl_disassoc_scen(hw, prof);
2035 /* Clear the range-checker if the profile ID is no longer
2038 status = ice_flow_acl_is_prof_in_use(hw, prof, &buf);
2039 if (status && status != ICE_ERR_IN_USE) {
2041 } else if (!status) {
2042 /* Clear the range-checker value for profile ID */
2043 ice_memset(&query_rng_buf, 0,
2044 sizeof(struct ice_aqc_acl_profile_ranges),
2047 status = ice_flow_get_hw_prof(hw, blk, prof->id,
2052 status = ice_prog_acl_prof_ranges(hw, prof_id,
2053 &query_rng_buf, NULL);
2059 /* Remove all hardware profiles associated with this flow profile */
2060 status = ice_rem_prof(hw, blk, prof->id);
2062 LIST_DEL(&prof->l_entry);
2063 ice_destroy_lock(&prof->entries_lock);
2065 ice_free(hw, prof->acts);
2073 * ice_flow_acl_set_xtrct_seq_fld - Populate xtrct seq for single field
2074 * @buf: Destination buffer function writes partial xtrct sequence to
2075 * @info: Info about field
2078 ice_flow_acl_set_xtrct_seq_fld(struct ice_aqc_acl_prof_generic_frmt *buf,
2079 struct ice_flow_fld_info *info)
2084 src = info->xtrct.idx * ICE_FLOW_FV_EXTRACT_SZ +
2085 info->xtrct.disp / BITS_PER_BYTE;
2086 dst = info->entry.val;
2087 for (i = 0; i < info->entry.last; i++)
2088 /* HW stores field vector words in LE, convert words back to BE
2089 * so constructed entries will end up in network order
2091 buf->byte_selection[dst++] = src++ ^ 1;
2095 * ice_flow_acl_set_xtrct_seq - Program ACL extraction sequence
2096 * @hw: pointer to the hardware structure
2097 * @prof: pointer to flow profile
2099 static enum ice_status
2100 ice_flow_acl_set_xtrct_seq(struct ice_hw *hw, struct ice_flow_prof *prof)
2102 struct ice_aqc_acl_prof_generic_frmt buf;
2103 struct ice_flow_fld_info *info;
2104 enum ice_status status;
2108 ice_memset(&buf, 0, sizeof(buf), ICE_NONDMA_MEM);
2110 status = ice_flow_get_hw_prof(hw, ICE_BLK_ACL, prof->id, &prof_id);
2114 status = ice_flow_acl_is_prof_in_use(hw, prof, &buf);
2115 if (status && status != ICE_ERR_IN_USE)
2119 /* Program the profile dependent configuration. This is done
2120 * only once regardless of the number of PFs using that profile
2122 ice_memset(&buf, 0, sizeof(buf), ICE_NONDMA_MEM);
2124 for (i = 0; i < prof->segs_cnt; i++) {
2125 struct ice_flow_seg_info *seg = &prof->segs[i];
2128 ice_for_each_set_bit(j, (ice_bitmap_t *)&seg->match,
2129 ICE_FLOW_FIELD_IDX_MAX) {
2130 info = &seg->fields[j];
2132 if (info->type == ICE_FLOW_FLD_TYPE_RANGE)
2133 buf.word_selection[info->entry.val] =
2136 ice_flow_acl_set_xtrct_seq_fld(&buf,
2140 for (j = 0; j < seg->raws_cnt; j++) {
2141 info = &seg->raws[j].info;
2142 ice_flow_acl_set_xtrct_seq_fld(&buf, info);
2146 ice_memset(&buf.pf_scenario_num[0], ICE_ACL_INVALID_SCEN,
2147 ICE_AQC_ACL_PROF_PF_SCEN_NUM_ELEMS,
2151 /* Update the current PF */
2152 buf.pf_scenario_num[hw->pf_id] = (u8)prof->cfg.scen->id;
2153 status = ice_prgm_acl_prof_xtrct(hw, prof_id, &buf, NULL);
2159 * ice_flow_assoc_vsig_vsi - associate a VSI with VSIG
2160 * @hw: pointer to the hardware structure
2161 * @blk: classification stage
2162 * @vsi_handle: software VSI handle
2163 * @vsig: target VSI group
2165 * Assumption: the caller has already verified that the VSI to
2166 * be added has the same characteristics as the VSIG and will
2167 * thereby have access to all resources added to that VSIG.
2170 ice_flow_assoc_vsig_vsi(struct ice_hw *hw, enum ice_block blk, u16 vsi_handle,
2173 enum ice_status status;
2175 if (!ice_is_vsi_valid(hw, vsi_handle) || blk >= ICE_BLK_COUNT)
2176 return ICE_ERR_PARAM;
2178 ice_acquire_lock(&hw->fl_profs_locks[blk]);
2179 status = ice_add_vsi_flow(hw, blk, ice_get_hw_vsi_num(hw, vsi_handle),
2181 ice_release_lock(&hw->fl_profs_locks[blk]);
2187 * ice_flow_assoc_prof - associate a VSI with a flow profile
2188 * @hw: pointer to the hardware structure
2189 * @blk: classification stage
2190 * @prof: pointer to flow profile
2191 * @vsi_handle: software VSI handle
2193 * Assumption: the caller has acquired the lock to the profile list
2194 * and the software VSI handle has been validated
2197 ice_flow_assoc_prof(struct ice_hw *hw, enum ice_block blk,
2198 struct ice_flow_prof *prof, u16 vsi_handle)
2200 enum ice_status status = ICE_SUCCESS;
2202 if (!ice_is_bit_set(prof->vsis, vsi_handle)) {
2203 if (blk == ICE_BLK_ACL) {
2204 status = ice_flow_acl_set_xtrct_seq(hw, prof);
2208 status = ice_add_prof_id_flow(hw, blk,
2209 ice_get_hw_vsi_num(hw,
2213 ice_set_bit(vsi_handle, prof->vsis);
2215 ice_debug(hw, ICE_DBG_FLOW, "HW profile add failed, %d\n",
2223 * ice_flow_disassoc_prof - disassociate a VSI from a flow profile
2224 * @hw: pointer to the hardware structure
2225 * @blk: classification stage
2226 * @prof: pointer to flow profile
2227 * @vsi_handle: software VSI handle
2229 * Assumption: the caller has acquired the lock to the profile list
2230 * and the software VSI handle has been validated
2232 static enum ice_status
2233 ice_flow_disassoc_prof(struct ice_hw *hw, enum ice_block blk,
2234 struct ice_flow_prof *prof, u16 vsi_handle)
2236 enum ice_status status = ICE_SUCCESS;
2238 if (ice_is_bit_set(prof->vsis, vsi_handle)) {
2239 status = ice_rem_prof_id_flow(hw, blk,
2240 ice_get_hw_vsi_num(hw,
2244 ice_clear_bit(vsi_handle, prof->vsis);
2246 ice_debug(hw, ICE_DBG_FLOW, "HW profile remove failed, %d\n",
2254 * ice_flow_add_prof - Add a flow profile for packet segments and matched fields
2255 * @hw: pointer to the HW struct
2256 * @blk: classification stage
2257 * @dir: flow direction
2258 * @prof_id: unique ID to identify this flow profile
2259 * @segs: array of one or more packet segments that describe the flow
2260 * @segs_cnt: number of packet segments provided
2261 * @acts: array of default actions
2262 * @acts_cnt: number of default actions
2263 * @prof: stores the returned flow profile added
2266 ice_flow_add_prof(struct ice_hw *hw, enum ice_block blk, enum ice_flow_dir dir,
2267 u64 prof_id, struct ice_flow_seg_info *segs, u8 segs_cnt,
2268 struct ice_flow_action *acts, u8 acts_cnt,
2269 struct ice_flow_prof **prof)
2271 enum ice_status status;
2273 if (segs_cnt > ICE_FLOW_SEG_MAX)
2274 return ICE_ERR_MAX_LIMIT;
2277 return ICE_ERR_PARAM;
2280 return ICE_ERR_BAD_PTR;
2282 status = ice_flow_val_hdrs(segs, segs_cnt);
2286 ice_acquire_lock(&hw->fl_profs_locks[blk]);
2288 status = ice_flow_add_prof_sync(hw, blk, dir, prof_id, segs, segs_cnt,
2289 acts, acts_cnt, prof);
2291 LIST_ADD(&(*prof)->l_entry, &hw->fl_profs[blk]);
2293 ice_release_lock(&hw->fl_profs_locks[blk]);
2299 * ice_flow_rem_prof - Remove a flow profile and all entries associated with it
2300 * @hw: pointer to the HW struct
2301 * @blk: the block for which the flow profile is to be removed
2302 * @prof_id: unique ID of the flow profile to be removed
2305 ice_flow_rem_prof(struct ice_hw *hw, enum ice_block blk, u64 prof_id)
2307 struct ice_flow_prof *prof;
2308 enum ice_status status;
2310 ice_acquire_lock(&hw->fl_profs_locks[blk]);
2312 prof = ice_flow_find_prof_id(hw, blk, prof_id);
2314 status = ICE_ERR_DOES_NOT_EXIST;
2318 /* prof becomes invalid after the call */
2319 status = ice_flow_rem_prof_sync(hw, blk, prof);
2322 ice_release_lock(&hw->fl_profs_locks[blk]);
2328 * ice_flow_find_entry - look for a flow entry using its unique ID
2329 * @hw: pointer to the HW struct
2330 * @blk: classification stage
2331 * @entry_id: unique ID to identify this flow entry
2333 * This function looks for the flow entry with the specified unique ID in all
2334 * flow profiles of the specified classification stage. If the entry is found,
2335 * and it returns the handle to the flow entry. Otherwise, it returns
2336 * ICE_FLOW_ENTRY_ID_INVAL.
2338 u64 ice_flow_find_entry(struct ice_hw *hw, enum ice_block blk, u64 entry_id)
2340 struct ice_flow_entry *found = NULL;
2341 struct ice_flow_prof *p;
2343 ice_acquire_lock(&hw->fl_profs_locks[blk]);
2345 LIST_FOR_EACH_ENTRY(p, &hw->fl_profs[blk], ice_flow_prof, l_entry) {
2346 struct ice_flow_entry *e;
2348 ice_acquire_lock(&p->entries_lock);
2349 LIST_FOR_EACH_ENTRY(e, &p->entries, ice_flow_entry, l_entry)
2350 if (e->id == entry_id) {
2354 ice_release_lock(&p->entries_lock);
2360 ice_release_lock(&hw->fl_profs_locks[blk]);
2362 return found ? ICE_FLOW_ENTRY_HNDL(found) : ICE_FLOW_ENTRY_HANDLE_INVAL;
2366 * ice_flow_acl_check_actions - Checks the ACL rule's actions
2367 * @hw: pointer to the hardware structure
2368 * @acts: array of actions to be performed on a match
2369 * @acts_cnt: number of actions
2370 * @cnt_alloc: indicates if an ACL counter has been allocated.
2372 static enum ice_status
2373 ice_flow_acl_check_actions(struct ice_hw *hw, struct ice_flow_action *acts,
2374 u8 acts_cnt, bool *cnt_alloc)
2376 ice_declare_bitmap(dup_check, ICE_AQC_TBL_MAX_ACTION_PAIRS * 2);
2379 ice_zero_bitmap(dup_check, ICE_AQC_TBL_MAX_ACTION_PAIRS * 2);
2382 if (acts_cnt > ICE_FLOW_ACL_MAX_NUM_ACT)
2383 return ICE_ERR_OUT_OF_RANGE;
2385 for (i = 0; i < acts_cnt; i++) {
2386 if (acts[i].type != ICE_FLOW_ACT_NOP &&
2387 acts[i].type != ICE_FLOW_ACT_DROP &&
2388 acts[i].type != ICE_FLOW_ACT_CNTR_PKT &&
2389 acts[i].type != ICE_FLOW_ACT_FWD_QUEUE)
2392 /* If the caller want to add two actions of the same type, then
2393 * it is considered invalid configuration.
2395 if (ice_test_and_set_bit(acts[i].type, dup_check))
2396 return ICE_ERR_PARAM;
2399 /* Checks if ACL counters are needed. */
2400 for (i = 0; i < acts_cnt; i++) {
2401 if (acts[i].type == ICE_FLOW_ACT_CNTR_PKT ||
2402 acts[i].type == ICE_FLOW_ACT_CNTR_BYTES ||
2403 acts[i].type == ICE_FLOW_ACT_CNTR_PKT_BYTES) {
2404 struct ice_acl_cntrs cntrs = { 0 };
2405 enum ice_status status;
2408 cntrs.bank = 0; /* Only bank0 for the moment */
2410 if (acts[i].type == ICE_FLOW_ACT_CNTR_PKT_BYTES)
2411 cntrs.type = ICE_AQC_ACL_CNT_TYPE_DUAL;
2413 cntrs.type = ICE_AQC_ACL_CNT_TYPE_SINGLE;
2415 status = ice_aq_alloc_acl_cntrs(hw, &cntrs, NULL);
2418 /* Counter index within the bank */
2419 acts[i].data.acl_act.value =
2420 CPU_TO_LE16(cntrs.first_cntr);
2429 * ice_flow_acl_frmt_entry_range - Format an ACL range checker for a given field
2430 * @fld: number of the given field
2431 * @info: info about field
2432 * @range_buf: range checker configuration buffer
2433 * @data: pointer to a data buffer containing flow entry's match values/masks
2434 * @range: Input/output param indicating which range checkers are being used
2437 ice_flow_acl_frmt_entry_range(u16 fld, struct ice_flow_fld_info *info,
2438 struct ice_aqc_acl_profile_ranges *range_buf,
2439 u8 *data, u8 *range)
2443 /* If not specified, default mask is all bits in field */
2444 new_mask = (info->src.mask == ICE_FLOW_FLD_OFF_INVAL ?
2445 BIT(ice_flds_info[fld].size) - 1 :
2446 (*(u16 *)(data + info->src.mask))) << info->xtrct.disp;
2448 /* If the mask is 0, then we don't need to worry about this input
2449 * range checker value.
2453 (*(u16 *)(data + info->src.last)) << info->xtrct.disp;
2455 (*(u16 *)(data + info->src.val)) << info->xtrct.disp;
2456 u8 range_idx = info->entry.val;
2458 range_buf->checker_cfg[range_idx].low_boundary =
2459 CPU_TO_BE16(new_low);
2460 range_buf->checker_cfg[range_idx].high_boundary =
2461 CPU_TO_BE16(new_high);
2462 range_buf->checker_cfg[range_idx].mask = CPU_TO_BE16(new_mask);
2464 /* Indicate which range checker is being used */
2465 *range |= BIT(range_idx);
2470 * ice_flow_acl_frmt_entry_fld - Partially format ACL entry for a given field
2471 * @fld: number of the given field
2472 * @info: info about the field
2473 * @buf: buffer containing the entry
2474 * @dontcare: buffer containing don't care mask for entry
2475 * @data: pointer to a data buffer containing flow entry's match values/masks
2478 ice_flow_acl_frmt_entry_fld(u16 fld, struct ice_flow_fld_info *info, u8 *buf,
2479 u8 *dontcare, u8 *data)
2481 u16 dst, src, mask, k, end_disp, tmp_s = 0, tmp_m = 0;
2482 bool use_mask = false;
2485 src = info->src.val;
2486 mask = info->src.mask;
2487 dst = info->entry.val - ICE_AQC_ACL_PROF_BYTE_SEL_START_IDX;
2488 disp = info->xtrct.disp % BITS_PER_BYTE;
2490 if (mask != ICE_FLOW_FLD_OFF_INVAL)
2493 for (k = 0; k < info->entry.last; k++, dst++) {
2494 /* Add overflow bits from previous byte */
2495 buf[dst] = (tmp_s & 0xff00) >> 8;
2497 /* If mask is not valid, tmp_m is always zero, so just setting
2498 * dontcare to 0 (no masked bits). If mask is valid, pulls in
2499 * overflow bits of mask from prev byte
2501 dontcare[dst] = (tmp_m & 0xff00) >> 8;
2503 /* If there is displacement, last byte will only contain
2504 * displaced data, but there is no more data to read from user
2505 * buffer, so skip so as not to potentially read beyond end of
2508 if (!disp || k < info->entry.last - 1) {
2509 /* Store shifted data to use in next byte */
2510 tmp_s = data[src++] << disp;
2512 /* Add current (shifted) byte */
2513 buf[dst] |= tmp_s & 0xff;
2515 /* Handle mask if valid */
2517 tmp_m = (~data[mask++] & 0xff) << disp;
2518 dontcare[dst] |= tmp_m & 0xff;
2523 /* Fill in don't care bits at beginning of field */
2525 dst = info->entry.val - ICE_AQC_ACL_PROF_BYTE_SEL_START_IDX;
2526 for (k = 0; k < disp; k++)
2527 dontcare[dst] |= BIT(k);
2530 end_disp = (disp + ice_flds_info[fld].size) % BITS_PER_BYTE;
2532 /* Fill in don't care bits at end of field */
2534 dst = info->entry.val - ICE_AQC_ACL_PROF_BYTE_SEL_START_IDX +
2535 info->entry.last - 1;
2536 for (k = end_disp; k < BITS_PER_BYTE; k++)
2537 dontcare[dst] |= BIT(k);
2542 * ice_flow_acl_frmt_entry - Format ACL entry
2543 * @hw: pointer to the hardware structure
2544 * @prof: pointer to flow profile
2545 * @e: pointer to the flow entry
2546 * @data: pointer to a data buffer containing flow entry's match values/masks
2547 * @acts: array of actions to be performed on a match
2548 * @acts_cnt: number of actions
2550 * Formats the key (and key_inverse) to be matched from the data passed in,
2551 * along with data from the flow profile. This key/key_inverse pair makes up
2552 * the 'entry' for an ACL flow entry.
2554 static enum ice_status
2555 ice_flow_acl_frmt_entry(struct ice_hw *hw, struct ice_flow_prof *prof,
2556 struct ice_flow_entry *e, u8 *data,
2557 struct ice_flow_action *acts, u8 acts_cnt)
2559 u8 *buf = NULL, *dontcare = NULL, *key = NULL, range = 0, dir_flag_msk;
2560 struct ice_aqc_acl_profile_ranges *range_buf = NULL;
2561 enum ice_status status;
2566 status = ice_flow_get_hw_prof(hw, ICE_BLK_ACL, prof->id, &prof_id);
2570 /* Format the result action */
2572 status = ice_flow_acl_check_actions(hw, acts, acts_cnt, &cnt_alloc);
2576 status = ICE_ERR_NO_MEMORY;
2578 e->acts = (struct ice_flow_action *)
2579 ice_memdup(hw, acts, acts_cnt * sizeof(*acts),
2580 ICE_NONDMA_TO_NONDMA);
2584 e->acts_cnt = acts_cnt;
2586 /* Format the matching data */
2587 buf_sz = prof->cfg.scen->width;
2588 buf = (u8 *)ice_malloc(hw, buf_sz);
2592 dontcare = (u8 *)ice_malloc(hw, buf_sz);
2596 /* 'key' buffer will store both key and key_inverse, so must be twice
2599 key = (u8 *)ice_malloc(hw, buf_sz * 2);
2603 range_buf = (struct ice_aqc_acl_profile_ranges *)
2604 ice_malloc(hw, sizeof(struct ice_aqc_acl_profile_ranges));
2608 /* Set don't care mask to all 1's to start, will zero out used bytes */
2609 ice_memset(dontcare, 0xff, buf_sz, ICE_NONDMA_MEM);
2611 for (i = 0; i < prof->segs_cnt; i++) {
2612 struct ice_flow_seg_info *seg = &prof->segs[i];
2615 ice_for_each_set_bit(j, (ice_bitmap_t *)&seg->match,
2616 ICE_FLOW_FIELD_IDX_MAX) {
2617 struct ice_flow_fld_info *info = &seg->fields[j];
2619 if (info->type == ICE_FLOW_FLD_TYPE_RANGE)
2620 ice_flow_acl_frmt_entry_range(j, info,
2624 ice_flow_acl_frmt_entry_fld(j, info, buf,
2628 for (j = 0; j < seg->raws_cnt; j++) {
2629 struct ice_flow_fld_info *info = &seg->raws[j].info;
2630 u16 dst, src, mask, k;
2631 bool use_mask = false;
2633 src = info->src.val;
2634 dst = info->entry.val -
2635 ICE_AQC_ACL_PROF_BYTE_SEL_START_IDX;
2636 mask = info->src.mask;
2638 if (mask != ICE_FLOW_FLD_OFF_INVAL)
2641 for (k = 0; k < info->entry.last; k++, dst++) {
2642 buf[dst] = data[src++];
2644 dontcare[dst] = ~data[mask++];
2651 buf[prof->cfg.scen->pid_idx] = (u8)prof_id;
2652 dontcare[prof->cfg.scen->pid_idx] = 0;
2654 /* Format the buffer for direction flags */
2655 dir_flag_msk = BIT(ICE_FLG_PKT_DIR);
2657 if (prof->dir == ICE_FLOW_RX)
2658 buf[prof->cfg.scen->pkt_dir_idx] = dir_flag_msk;
2661 buf[prof->cfg.scen->rng_chk_idx] = range;
2662 /* Mark any unused range checkers as don't care */
2663 dontcare[prof->cfg.scen->rng_chk_idx] = ~range;
2664 e->range_buf = range_buf;
2666 ice_free(hw, range_buf);
2669 status = ice_set_key(key, buf_sz * 2, buf, NULL, dontcare, NULL, 0,
2675 e->entry_sz = buf_sz * 2;
2682 ice_free(hw, dontcare);
2687 if (status && range_buf) {
2688 ice_free(hw, range_buf);
2689 e->range_buf = NULL;
2692 if (status && e->acts) {
2693 ice_free(hw, e->acts);
2698 if (status && cnt_alloc)
2699 ice_flow_acl_free_act_cntr(hw, acts, acts_cnt);
2705 * ice_flow_acl_find_scen_entry_cond - Find an ACL scenario entry that matches
2706 * the compared data.
2707 * @prof: pointer to flow profile
2708 * @e: pointer to the comparing flow entry
2709 * @do_chg_action: decide if we want to change the ACL action
2710 * @do_add_entry: decide if we want to add the new ACL entry
2711 * @do_rem_entry: decide if we want to remove the current ACL entry
2713 * Find an ACL scenario entry that matches the compared data. In the same time,
2714 * this function also figure out:
2715 * a/ If we want to change the ACL action
2716 * b/ If we want to add the new ACL entry
2717 * c/ If we want to remove the current ACL entry
2719 static struct ice_flow_entry *
2720 ice_flow_acl_find_scen_entry_cond(struct ice_flow_prof *prof,
2721 struct ice_flow_entry *e, bool *do_chg_action,
2722 bool *do_add_entry, bool *do_rem_entry)
2724 struct ice_flow_entry *p, *return_entry = NULL;
2728 * a/ There exists an entry with same matching data, but different
2729 * priority, then we remove this existing ACL entry. Then, we
2730 * will add the new entry to the ACL scenario.
2731 * b/ There exists an entry with same matching data, priority, and
2732 * result action, then we do nothing
2733 * c/ There exists an entry with same matching data, priority, but
2734 * different, action, then do only change the action's entry.
2735 * d/ Else, we add this new entry to the ACL scenario.
2737 *do_chg_action = false;
2738 *do_add_entry = true;
2739 *do_rem_entry = false;
2740 LIST_FOR_EACH_ENTRY(p, &prof->entries, ice_flow_entry, l_entry) {
2741 if (memcmp(p->entry, e->entry, p->entry_sz))
2744 /* From this point, we have the same matching_data. */
2745 *do_add_entry = false;
2748 if (p->priority != e->priority) {
2749 /* matching data && !priority */
2750 *do_add_entry = true;
2751 *do_rem_entry = true;
2755 /* From this point, we will have matching_data && priority */
2756 if (p->acts_cnt != e->acts_cnt)
2757 *do_chg_action = true;
2758 for (i = 0; i < p->acts_cnt; i++) {
2759 bool found_not_match = false;
2761 for (j = 0; j < e->acts_cnt; j++)
2762 if (memcmp(&p->acts[i], &e->acts[j],
2763 sizeof(struct ice_flow_action))) {
2764 found_not_match = true;
2768 if (found_not_match) {
2769 *do_chg_action = true;
2774 /* (do_chg_action = true) means :
2775 * matching_data && priority && !result_action
2776 * (do_chg_action = false) means :
2777 * matching_data && priority && result_action
2782 return return_entry;
2786 * ice_flow_acl_convert_to_acl_prio - Convert to ACL priority
2789 static enum ice_acl_entry_prio
2790 ice_flow_acl_convert_to_acl_prio(enum ice_flow_priority p)
2792 enum ice_acl_entry_prio acl_prio;
2795 case ICE_FLOW_PRIO_LOW:
2796 acl_prio = ICE_ACL_PRIO_LOW;
2798 case ICE_FLOW_PRIO_NORMAL:
2799 acl_prio = ICE_ACL_PRIO_NORMAL;
2801 case ICE_FLOW_PRIO_HIGH:
2802 acl_prio = ICE_ACL_PRIO_HIGH;
2805 acl_prio = ICE_ACL_PRIO_NORMAL;
2813 * ice_flow_acl_union_rng_chk - Perform union operation between two
2814 * range-range checker buffers
2815 * @dst_buf: pointer to destination range checker buffer
2816 * @src_buf: pointer to source range checker buffer
2818 * For this function, we do the union between dst_buf and src_buf
2819 * range checker buffer, and we will save the result back to dst_buf
2821 static enum ice_status
2822 ice_flow_acl_union_rng_chk(struct ice_aqc_acl_profile_ranges *dst_buf,
2823 struct ice_aqc_acl_profile_ranges *src_buf)
2827 if (!dst_buf || !src_buf)
2828 return ICE_ERR_BAD_PTR;
2830 for (i = 0; i < ICE_AQC_ACL_PROF_RANGES_NUM_CFG; i++) {
2831 struct ice_acl_rng_data *cfg_data = NULL, *in_data;
2832 bool will_populate = false;
2834 in_data = &src_buf->checker_cfg[i];
2839 for (j = 0; j < ICE_AQC_ACL_PROF_RANGES_NUM_CFG; j++) {
2840 cfg_data = &dst_buf->checker_cfg[j];
2842 if (!cfg_data->mask ||
2843 !memcmp(cfg_data, in_data,
2844 sizeof(struct ice_acl_rng_data))) {
2845 will_populate = true;
2850 if (will_populate) {
2851 ice_memcpy(cfg_data, in_data,
2852 sizeof(struct ice_acl_rng_data),
2853 ICE_NONDMA_TO_NONDMA);
2855 /* No available slot left to program range checker */
2856 return ICE_ERR_MAX_LIMIT;
2864 * ice_flow_acl_add_scen_entry_sync - Add entry to ACL scenario sync
2865 * @hw: pointer to the hardware structure
2866 * @prof: pointer to flow profile
2867 * @entry: double pointer to the flow entry
2869 * For this function, we will look at the current added entries in the
2870 * corresponding ACL scenario. Then, we will perform matching logic to
2871 * see if we want to add/modify/do nothing with this new entry.
2873 static enum ice_status
2874 ice_flow_acl_add_scen_entry_sync(struct ice_hw *hw, struct ice_flow_prof *prof,
2875 struct ice_flow_entry **entry)
2877 bool do_add_entry, do_rem_entry, do_chg_action, do_chg_rng_chk;
2878 struct ice_aqc_acl_profile_ranges query_rng_buf, cfg_rng_buf;
2879 struct ice_acl_act_entry *acts = NULL;
2880 struct ice_flow_entry *exist;
2881 enum ice_status status = ICE_SUCCESS;
2882 struct ice_flow_entry *e;
2885 if (!entry || !(*entry) || !prof)
2886 return ICE_ERR_BAD_PTR;
2890 do_chg_rng_chk = false;
2894 status = ice_flow_get_hw_prof(hw, ICE_BLK_ACL, prof->id,
2899 /* Query the current range-checker value in FW */
2900 status = ice_query_acl_prof_ranges(hw, prof_id, &query_rng_buf,
2904 ice_memcpy(&cfg_rng_buf, &query_rng_buf,
2905 sizeof(struct ice_aqc_acl_profile_ranges),
2906 ICE_NONDMA_TO_NONDMA);
2908 /* Generate the new range-checker value */
2909 status = ice_flow_acl_union_rng_chk(&cfg_rng_buf, e->range_buf);
2913 /* Reconfigure the range check if the buffer is changed. */
2914 do_chg_rng_chk = false;
2915 if (memcmp(&query_rng_buf, &cfg_rng_buf,
2916 sizeof(struct ice_aqc_acl_profile_ranges))) {
2917 status = ice_prog_acl_prof_ranges(hw, prof_id,
2918 &cfg_rng_buf, NULL);
2922 do_chg_rng_chk = true;
2926 /* Figure out if we want to (change the ACL action) and/or
2927 * (Add the new ACL entry) and/or (Remove the current ACL entry)
2929 exist = ice_flow_acl_find_scen_entry_cond(prof, e, &do_chg_action,
2930 &do_add_entry, &do_rem_entry);
2932 status = ice_flow_rem_entry_sync(hw, ICE_BLK_ACL, exist);
2937 /* Prepare the result action buffer */
2938 acts = (struct ice_acl_act_entry *)
2939 ice_calloc(hw, e->entry_sz, sizeof(struct ice_acl_act_entry));
2941 return ICE_ERR_NO_MEMORY;
2943 for (i = 0; i < e->acts_cnt; i++)
2944 ice_memcpy(&acts[i], &e->acts[i].data.acl_act,
2945 sizeof(struct ice_acl_act_entry),
2946 ICE_NONDMA_TO_NONDMA);
2949 enum ice_acl_entry_prio prio;
2953 keys = (u8 *)e->entry;
2954 inverts = keys + (e->entry_sz / 2);
2955 prio = ice_flow_acl_convert_to_acl_prio(e->priority);
2957 status = ice_acl_add_entry(hw, prof->cfg.scen, prio, keys,
2958 inverts, acts, e->acts_cnt,
2963 e->scen_entry_idx = entry_idx;
2964 LIST_ADD(&e->l_entry, &prof->entries);
2966 if (do_chg_action) {
2967 /* For the action memory info, update the SW's copy of
2968 * exist entry with e's action memory info
2970 ice_free(hw, exist->acts);
2971 exist->acts_cnt = e->acts_cnt;
2972 exist->acts = (struct ice_flow_action *)
2973 ice_calloc(hw, exist->acts_cnt,
2974 sizeof(struct ice_flow_action));
2976 status = ICE_ERR_NO_MEMORY;
2980 ice_memcpy(exist->acts, e->acts,
2981 sizeof(struct ice_flow_action) * e->acts_cnt,
2982 ICE_NONDMA_TO_NONDMA);
2984 status = ice_acl_prog_act(hw, prof->cfg.scen, acts,
2986 exist->scen_entry_idx);
2991 if (do_chg_rng_chk) {
2992 /* In this case, we want to update the range checker
2993 * information of the exist entry
2995 status = ice_flow_acl_union_rng_chk(exist->range_buf,
3001 /* As we don't add the new entry to our SW DB, deallocate its
3002 * memories, and return the exist entry to the caller
3004 ice_dealloc_flow_entry(hw, e);
3014 * ice_flow_acl_add_scen_entry - Add entry to ACL scenario
3015 * @hw: pointer to the hardware structure
3016 * @prof: pointer to flow profile
3017 * @e: double pointer to the flow entry
3019 static enum ice_status
3020 ice_flow_acl_add_scen_entry(struct ice_hw *hw, struct ice_flow_prof *prof,
3021 struct ice_flow_entry **e)
3023 enum ice_status status;
3025 ice_acquire_lock(&prof->entries_lock);
3026 status = ice_flow_acl_add_scen_entry_sync(hw, prof, e);
3027 ice_release_lock(&prof->entries_lock);
3033 * ice_flow_add_entry - Add a flow entry
3034 * @hw: pointer to the HW struct
3035 * @blk: classification stage
3036 * @prof_id: ID of the profile to add a new flow entry to
3037 * @entry_id: unique ID to identify this flow entry
3038 * @vsi_handle: software VSI handle for the flow entry
3039 * @prio: priority of the flow entry
3040 * @data: pointer to a data buffer containing flow entry's match values/masks
3041 * @acts: arrays of actions to be performed on a match
3042 * @acts_cnt: number of actions
3043 * @entry_h: pointer to buffer that receives the new flow entry's handle
3046 ice_flow_add_entry(struct ice_hw *hw, enum ice_block blk, u64 prof_id,
3047 u64 entry_id, u16 vsi_handle, enum ice_flow_priority prio,
3048 void *data, struct ice_flow_action *acts, u8 acts_cnt,
3051 struct ice_flow_entry *e = NULL;
3052 struct ice_flow_prof *prof;
3053 enum ice_status status = ICE_SUCCESS;
3055 /* ACL entries must indicate an action */
3056 if (blk == ICE_BLK_ACL && (!acts || !acts_cnt))
3057 return ICE_ERR_PARAM;
3059 /* No flow entry data is expected for RSS */
3060 if (!entry_h || (!data && blk != ICE_BLK_RSS))
3061 return ICE_ERR_BAD_PTR;
3063 if (!ice_is_vsi_valid(hw, vsi_handle))
3064 return ICE_ERR_PARAM;
3066 ice_acquire_lock(&hw->fl_profs_locks[blk]);
3068 prof = ice_flow_find_prof_id(hw, blk, prof_id);
3070 status = ICE_ERR_DOES_NOT_EXIST;
3072 /* Allocate memory for the entry being added and associate
3073 * the VSI to the found flow profile
3075 e = (struct ice_flow_entry *)ice_malloc(hw, sizeof(*e));
3077 status = ICE_ERR_NO_MEMORY;
3079 status = ice_flow_assoc_prof(hw, blk, prof, vsi_handle);
3082 ice_release_lock(&hw->fl_profs_locks[blk]);
3087 e->vsi_handle = vsi_handle;
3096 /* ACL will handle the entry management */
3097 status = ice_flow_acl_frmt_entry(hw, prof, e, (u8 *)data, acts,
3102 status = ice_flow_acl_add_scen_entry(hw, prof, &e);
3108 status = ICE_ERR_NOT_IMPL;
3112 if (blk != ICE_BLK_ACL) {
3113 /* ACL will handle the entry management */
3114 ice_acquire_lock(&prof->entries_lock);
3115 LIST_ADD(&e->l_entry, &prof->entries);
3116 ice_release_lock(&prof->entries_lock);
3119 *entry_h = ICE_FLOW_ENTRY_HNDL(e);
3124 ice_free(hw, e->entry);
3132 * ice_flow_rem_entry - Remove a flow entry
3133 * @hw: pointer to the HW struct
3134 * @blk: classification stage
3135 * @entry_h: handle to the flow entry to be removed
3137 enum ice_status ice_flow_rem_entry(struct ice_hw *hw, enum ice_block blk,
3140 struct ice_flow_entry *entry;
3141 struct ice_flow_prof *prof;
3142 enum ice_status status = ICE_SUCCESS;
3144 if (entry_h == ICE_FLOW_ENTRY_HANDLE_INVAL)
3145 return ICE_ERR_PARAM;
3147 entry = ICE_FLOW_ENTRY_PTR((intptr_t)entry_h);
3149 /* Retain the pointer to the flow profile as the entry will be freed */
3153 ice_acquire_lock(&prof->entries_lock);
3154 status = ice_flow_rem_entry_sync(hw, blk, entry);
3155 ice_release_lock(&prof->entries_lock);
3162 * ice_flow_set_fld_ext - specifies locations of field from entry's input buffer
3163 * @seg: packet segment the field being set belongs to
3164 * @fld: field to be set
3165 * @field_type: type of the field
3166 * @val_loc: if not ICE_FLOW_FLD_OFF_INVAL, location of the value to match from
3167 * entry's input buffer
3168 * @mask_loc: if not ICE_FLOW_FLD_OFF_INVAL, location of mask value from entry's
3170 * @last_loc: if not ICE_FLOW_FLD_OFF_INVAL, location of last/upper value from
3171 * entry's input buffer
3173 * This helper function stores information of a field being matched, including
3174 * the type of the field and the locations of the value to match, the mask, and
3175 * the upper-bound value in the start of the input buffer for a flow entry.
3176 * This function should only be used for fixed-size data structures.
3178 * This function also opportunistically determines the protocol headers to be
3179 * present based on the fields being set. Some fields cannot be used alone to
3180 * determine the protocol headers present. Sometimes, fields for particular
3181 * protocol headers are not matched. In those cases, the protocol headers
3182 * must be explicitly set.
3185 ice_flow_set_fld_ext(struct ice_flow_seg_info *seg, enum ice_flow_field fld,
3186 enum ice_flow_fld_match_type field_type, u16 val_loc,
3187 u16 mask_loc, u16 last_loc)
3189 u64 bit = BIT_ULL(fld);
3192 if (field_type == ICE_FLOW_FLD_TYPE_RANGE)
3195 seg->fields[fld].type = field_type;
3196 seg->fields[fld].src.val = val_loc;
3197 seg->fields[fld].src.mask = mask_loc;
3198 seg->fields[fld].src.last = last_loc;
3200 ICE_FLOW_SET_HDRS(seg, ice_flds_info[fld].hdr);
3204 * ice_flow_set_fld - specifies locations of field from entry's input buffer
3205 * @seg: packet segment the field being set belongs to
3206 * @fld: field to be set
3207 * @val_loc: if not ICE_FLOW_FLD_OFF_INVAL, location of the value to match from
3208 * entry's input buffer
3209 * @mask_loc: if not ICE_FLOW_FLD_OFF_INVAL, location of mask value from entry's
3211 * @last_loc: if not ICE_FLOW_FLD_OFF_INVAL, location of last/upper value from
3212 * entry's input buffer
3213 * @range: indicate if field being matched is to be in a range
3215 * This function specifies the locations, in the form of byte offsets from the
3216 * start of the input buffer for a flow entry, from where the value to match,
3217 * the mask value, and upper value can be extracted. These locations are then
3218 * stored in the flow profile. When adding a flow entry associated with the
3219 * flow profile, these locations will be used to quickly extract the values and
3220 * create the content of a match entry. This function should only be used for
3221 * fixed-size data structures.
3224 ice_flow_set_fld(struct ice_flow_seg_info *seg, enum ice_flow_field fld,
3225 u16 val_loc, u16 mask_loc, u16 last_loc, bool range)
3227 enum ice_flow_fld_match_type t = range ?
3228 ICE_FLOW_FLD_TYPE_RANGE : ICE_FLOW_FLD_TYPE_REG;
3230 ice_flow_set_fld_ext(seg, fld, t, val_loc, mask_loc, last_loc);
3234 * ice_flow_set_fld_prefix - sets locations of prefix field from entry's buf
3235 * @seg: packet segment the field being set belongs to
3236 * @fld: field to be set
3237 * @val_loc: if not ICE_FLOW_FLD_OFF_INVAL, location of the value to match from
3238 * entry's input buffer
3239 * @pref_loc: location of prefix value from entry's input buffer
3240 * @pref_sz: size of the location holding the prefix value
3242 * This function specifies the locations, in the form of byte offsets from the
3243 * start of the input buffer for a flow entry, from where the value to match
3244 * and the IPv4 prefix value can be extracted. These locations are then stored
3245 * in the flow profile. When adding flow entries to the associated flow profile,
3246 * these locations can be used to quickly extract the values to create the
3247 * content of a match entry. This function should only be used for fixed-size
3251 ice_flow_set_fld_prefix(struct ice_flow_seg_info *seg, enum ice_flow_field fld,
3252 u16 val_loc, u16 pref_loc, u8 pref_sz)
3254 /* For this type of field, the "mask" location is for the prefix value's
3255 * location and the "last" location is for the size of the location of
3258 ice_flow_set_fld_ext(seg, fld, ICE_FLOW_FLD_TYPE_PREFIX, val_loc,
3259 pref_loc, (u16)pref_sz);
3263 * ice_flow_add_fld_raw - sets locations of a raw field from entry's input buf
3264 * @seg: packet segment the field being set belongs to
3265 * @off: offset of the raw field from the beginning of the segment in bytes
3266 * @len: length of the raw pattern to be matched
3267 * @val_loc: location of the value to match from entry's input buffer
3268 * @mask_loc: location of mask value from entry's input buffer
3270 * This function specifies the offset of the raw field to be match from the
3271 * beginning of the specified packet segment, and the locations, in the form of
3272 * byte offsets from the start of the input buffer for a flow entry, from where
3273 * the value to match and the mask value to be extracted. These locations are
3274 * then stored in the flow profile. When adding flow entries to the associated
3275 * flow profile, these locations can be used to quickly extract the values to
3276 * create the content of a match entry. This function should only be used for
3277 * fixed-size data structures.
3280 ice_flow_add_fld_raw(struct ice_flow_seg_info *seg, u16 off, u8 len,
3281 u16 val_loc, u16 mask_loc)
3283 if (seg->raws_cnt < ICE_FLOW_SEG_RAW_FLD_MAX) {
3284 seg->raws[seg->raws_cnt].off = off;
3285 seg->raws[seg->raws_cnt].info.type = ICE_FLOW_FLD_TYPE_SIZE;
3286 seg->raws[seg->raws_cnt].info.src.val = val_loc;
3287 seg->raws[seg->raws_cnt].info.src.mask = mask_loc;
3288 /* The "last" field is used to store the length of the field */
3289 seg->raws[seg->raws_cnt].info.src.last = len;
3292 /* Overflows of "raws" will be handled as an error condition later in
3293 * the flow when this information is processed.
3299 * ice_flow_rem_vsi_prof - remove vsi from flow profile
3300 * @hw: pointer to the hardware structure
3301 * @blk: classification stage
3302 * @vsi_handle: software VSI handle
3303 * @prof_id: unique ID to identify this flow profile
3305 * This function removes the flow entries associated to the input
3306 * vsi handle and disassociates the vsi from the flow profile.
3308 enum ice_status ice_flow_rem_vsi_prof(struct ice_hw *hw, enum ice_block blk, u16 vsi_handle,
3311 struct ice_flow_prof *prof = NULL;
3312 enum ice_status status = ICE_SUCCESS;
3314 if (blk >= ICE_BLK_COUNT || !ice_is_vsi_valid(hw, vsi_handle))
3315 return ICE_ERR_PARAM;
3317 /* find flow profile pointer with input package block and profile id */
3318 prof = ice_flow_find_prof_id(hw, ICE_BLK_FD, prof_id);
3320 ice_debug(hw, ICE_DBG_PKG,
3321 "Cannot find flow profile id=%" PRIu64 "\n", prof_id);
3322 return ICE_ERR_DOES_NOT_EXIST;
3325 /* Remove all remaining flow entries before removing the flow profile */
3326 if (!LIST_EMPTY(&prof->entries)) {
3327 struct ice_flow_entry *e, *t;
3329 ice_acquire_lock(&prof->entries_lock);
3330 LIST_FOR_EACH_ENTRY_SAFE(e, t, &prof->entries, ice_flow_entry,
3332 if (e->vsi_handle != vsi_handle)
3335 status = ice_flow_rem_entry_sync(hw, blk, e);
3339 ice_release_lock(&prof->entries_lock);
3344 /* disassociate the flow profile from sw vsi handle */
3345 status = ice_flow_disassoc_prof(hw, blk, prof, vsi_handle);
3347 ice_debug(hw, ICE_DBG_PKG,
3348 "ice_flow_disassoc_prof() failed with status=%d\n",
3353 #define ICE_FLOW_RSS_SEG_HDR_L2_MASKS \
3354 (ICE_FLOW_SEG_HDR_ETH | ICE_FLOW_SEG_HDR_VLAN)
3356 #define ICE_FLOW_RSS_SEG_HDR_L3_MASKS \
3357 (ICE_FLOW_SEG_HDR_IPV4 | ICE_FLOW_SEG_HDR_IPV6)
3359 #define ICE_FLOW_RSS_SEG_HDR_L4_MASKS \
3360 (ICE_FLOW_SEG_HDR_TCP | ICE_FLOW_SEG_HDR_UDP | ICE_FLOW_SEG_HDR_SCTP)
3362 #define ICE_FLOW_RSS_SEG_HDR_VAL_MASKS \
3363 (ICE_FLOW_RSS_SEG_HDR_L2_MASKS | \
3364 ICE_FLOW_RSS_SEG_HDR_L3_MASKS | \
3365 ICE_FLOW_RSS_SEG_HDR_L4_MASKS)
3368 * ice_flow_set_rss_seg_info - setup packet segments for RSS
3369 * @segs: pointer to the flow field segment(s)
3370 * @seg_cnt: segment count
3371 * @cfg: configure parameters
3373 * Helper function to extract fields from hash bitmap and use flow
3374 * header value to set flow field segment for further use in flow
3375 * profile entry or removal.
3377 static enum ice_status
3378 ice_flow_set_rss_seg_info(struct ice_flow_seg_info *segs, u8 seg_cnt,
3379 const struct ice_rss_hash_cfg *cfg)
3381 struct ice_flow_seg_info *seg;
3385 /* set inner most segment */
3386 seg = &segs[seg_cnt - 1];
3388 ice_for_each_set_bit(i, (const ice_bitmap_t *)&cfg->hash_flds,
3389 ICE_FLOW_FIELD_IDX_MAX)
3390 ice_flow_set_fld(seg, (enum ice_flow_field)i,
3391 ICE_FLOW_FLD_OFF_INVAL, ICE_FLOW_FLD_OFF_INVAL,
3392 ICE_FLOW_FLD_OFF_INVAL, false);
3394 ICE_FLOW_SET_HDRS(seg, cfg->addl_hdrs);
3396 /* set outer most header */
3397 if (cfg->hdr_type == ICE_RSS_INNER_HEADERS_W_OUTER_IPV4)
3398 segs[ICE_RSS_OUTER_HEADERS].hdrs |= ICE_FLOW_SEG_HDR_IPV4 |
3399 ICE_FLOW_SEG_HDR_IPV_OTHER;
3400 else if (cfg->hdr_type == ICE_RSS_INNER_HEADERS_W_OUTER_IPV6)
3401 segs[ICE_RSS_OUTER_HEADERS].hdrs |= ICE_FLOW_SEG_HDR_IPV6 |
3402 ICE_FLOW_SEG_HDR_IPV_OTHER;
3404 if (seg->hdrs & ~ICE_FLOW_RSS_SEG_HDR_VAL_MASKS &
3405 ~ICE_FLOW_RSS_HDRS_INNER_MASK & ~ICE_FLOW_SEG_HDR_IPV_OTHER)
3406 return ICE_ERR_PARAM;
3408 val = (u64)(seg->hdrs & ICE_FLOW_RSS_SEG_HDR_L3_MASKS);
3409 if (val && !ice_is_pow2(val))
3412 val = (u64)(seg->hdrs & ICE_FLOW_RSS_SEG_HDR_L4_MASKS);
3413 if (val && !ice_is_pow2(val))
3420 * ice_rem_vsi_rss_list - remove VSI from RSS list
3421 * @hw: pointer to the hardware structure
3422 * @vsi_handle: software VSI handle
3424 * Remove the VSI from all RSS configurations in the list.
3426 void ice_rem_vsi_rss_list(struct ice_hw *hw, u16 vsi_handle)
3428 struct ice_rss_cfg *r, *tmp;
3430 if (LIST_EMPTY(&hw->rss_list_head))
3433 ice_acquire_lock(&hw->rss_locks);
3434 LIST_FOR_EACH_ENTRY_SAFE(r, tmp, &hw->rss_list_head,
3435 ice_rss_cfg, l_entry)
3436 if (ice_test_and_clear_bit(vsi_handle, r->vsis))
3437 if (!ice_is_any_bit_set(r->vsis, ICE_MAX_VSI)) {
3438 LIST_DEL(&r->l_entry);
3441 ice_release_lock(&hw->rss_locks);
3445 * ice_rem_vsi_rss_cfg - remove RSS configurations associated with VSI
3446 * @hw: pointer to the hardware structure
3447 * @vsi_handle: software VSI handle
3449 * This function will iterate through all flow profiles and disassociate
3450 * the VSI from that profile. If the flow profile has no VSIs it will
3453 enum ice_status ice_rem_vsi_rss_cfg(struct ice_hw *hw, u16 vsi_handle)
3455 const enum ice_block blk = ICE_BLK_RSS;
3456 struct ice_flow_prof *p, *t;
3457 enum ice_status status = ICE_SUCCESS;
3459 if (!ice_is_vsi_valid(hw, vsi_handle))
3460 return ICE_ERR_PARAM;
3462 if (LIST_EMPTY(&hw->fl_profs[blk]))
3465 ice_acquire_lock(&hw->rss_locks);
3466 LIST_FOR_EACH_ENTRY_SAFE(p, t, &hw->fl_profs[blk], ice_flow_prof,
3468 if (ice_is_bit_set(p->vsis, vsi_handle)) {
3469 status = ice_flow_disassoc_prof(hw, blk, p, vsi_handle);
3473 if (!ice_is_any_bit_set(p->vsis, ICE_MAX_VSI)) {
3474 status = ice_flow_rem_prof(hw, blk, p->id);
3479 ice_release_lock(&hw->rss_locks);
3485 * ice_get_rss_hdr_type - get a RSS profile's header type
3486 * @prof: RSS flow profile
3488 static enum ice_rss_cfg_hdr_type
3489 ice_get_rss_hdr_type(struct ice_flow_prof *prof)
3491 enum ice_rss_cfg_hdr_type hdr_type = ICE_RSS_ANY_HEADERS;
3493 if (prof->segs_cnt == ICE_FLOW_SEG_SINGLE) {
3494 hdr_type = ICE_RSS_OUTER_HEADERS;
3495 } else if (prof->segs_cnt == ICE_FLOW_SEG_MAX) {
3496 if (prof->segs[ICE_RSS_OUTER_HEADERS].hdrs == ICE_FLOW_SEG_HDR_NONE)
3497 hdr_type = ICE_RSS_INNER_HEADERS;
3498 if (prof->segs[ICE_RSS_OUTER_HEADERS].hdrs & ICE_FLOW_SEG_HDR_IPV4)
3499 hdr_type = ICE_RSS_INNER_HEADERS_W_OUTER_IPV4;
3500 if (prof->segs[ICE_RSS_OUTER_HEADERS].hdrs & ICE_FLOW_SEG_HDR_IPV6)
3501 hdr_type = ICE_RSS_INNER_HEADERS_W_OUTER_IPV6;
3508 * ice_rem_rss_list - remove RSS configuration from list
3509 * @hw: pointer to the hardware structure
3510 * @vsi_handle: software VSI handle
3511 * @prof: pointer to flow profile
3513 * Assumption: lock has already been acquired for RSS list
3516 ice_rem_rss_list(struct ice_hw *hw, u16 vsi_handle, struct ice_flow_prof *prof)
3518 enum ice_rss_cfg_hdr_type hdr_type;
3519 struct ice_rss_cfg *r, *tmp;
3521 /* Search for RSS hash fields associated to the VSI that match the
3522 * hash configurations associated to the flow profile. If found
3523 * remove from the RSS entry list of the VSI context and delete entry.
3525 hdr_type = ice_get_rss_hdr_type(prof);
3526 LIST_FOR_EACH_ENTRY_SAFE(r, tmp, &hw->rss_list_head,
3527 ice_rss_cfg, l_entry)
3528 if (r->hash.hash_flds == prof->segs[prof->segs_cnt - 1].match &&
3529 r->hash.addl_hdrs == prof->segs[prof->segs_cnt - 1].hdrs &&
3530 r->hash.hdr_type == hdr_type) {
3531 ice_clear_bit(vsi_handle, r->vsis);
3532 if (!ice_is_any_bit_set(r->vsis, ICE_MAX_VSI)) {
3533 LIST_DEL(&r->l_entry);
3541 * ice_add_rss_list - add RSS configuration to list
3542 * @hw: pointer to the hardware structure
3543 * @vsi_handle: software VSI handle
3544 * @prof: pointer to flow profile
3546 * Assumption: lock has already been acquired for RSS list
3548 static enum ice_status
3549 ice_add_rss_list(struct ice_hw *hw, u16 vsi_handle, struct ice_flow_prof *prof)
3551 enum ice_rss_cfg_hdr_type hdr_type;
3552 struct ice_rss_cfg *r, *rss_cfg;
3554 hdr_type = ice_get_rss_hdr_type(prof);
3555 LIST_FOR_EACH_ENTRY(r, &hw->rss_list_head,
3556 ice_rss_cfg, l_entry)
3557 if (r->hash.hash_flds == prof->segs[prof->segs_cnt - 1].match &&
3558 r->hash.addl_hdrs == prof->segs[prof->segs_cnt - 1].hdrs &&
3559 r->hash.hdr_type == hdr_type) {
3560 ice_set_bit(vsi_handle, r->vsis);
3564 rss_cfg = (struct ice_rss_cfg *)ice_malloc(hw, sizeof(*rss_cfg));
3566 return ICE_ERR_NO_MEMORY;
3568 rss_cfg->hash.hash_flds = prof->segs[prof->segs_cnt - 1].match;
3569 rss_cfg->hash.addl_hdrs = prof->segs[prof->segs_cnt - 1].hdrs;
3570 rss_cfg->hash.hdr_type = hdr_type;
3571 rss_cfg->hash.symm = prof->cfg.symm;
3572 ice_set_bit(vsi_handle, rss_cfg->vsis);
3574 LIST_ADD_TAIL(&rss_cfg->l_entry, &hw->rss_list_head);
3579 #define ICE_FLOW_PROF_HASH_S 0
3580 #define ICE_FLOW_PROF_HASH_M (0xFFFFFFFFULL << ICE_FLOW_PROF_HASH_S)
3581 #define ICE_FLOW_PROF_HDR_S 32
3582 #define ICE_FLOW_PROF_HDR_M (0x3FFFFFFFULL << ICE_FLOW_PROF_HDR_S)
3583 #define ICE_FLOW_PROF_ENCAP_S 62
3584 #define ICE_FLOW_PROF_ENCAP_M (0x3ULL << ICE_FLOW_PROF_ENCAP_S)
3586 /* Flow profile ID format:
3587 * [0:31] - Packet match fields
3588 * [32:61] - Protocol header
3589 * [62:63] - Encapsulation flag:
3592 * 2 for tunneled with outer ipv4
3593 * 3 for tunneled with outer ipv6
3595 #define ICE_FLOW_GEN_PROFID(hash, hdr, encap) \
3596 ((u64)(((u64)(hash) & ICE_FLOW_PROF_HASH_M) | \
3597 (((u64)(hdr) << ICE_FLOW_PROF_HDR_S) & ICE_FLOW_PROF_HDR_M) | \
3598 (((u64)(encap) << ICE_FLOW_PROF_ENCAP_S) & ICE_FLOW_PROF_ENCAP_M)))
3601 ice_rss_config_xor_word(struct ice_hw *hw, u8 prof_id, u8 src, u8 dst)
3603 u32 s = ((src % 4) << 3); /* byte shift */
3604 u32 v = dst | 0x80; /* value to program */
3605 u8 i = src / 4; /* register index */
3608 reg = rd32(hw, GLQF_HSYMM(prof_id, i));
3609 reg = (reg & ~(0xff << s)) | (v << s);
3610 wr32(hw, GLQF_HSYMM(prof_id, i), reg);
3614 ice_rss_config_xor(struct ice_hw *hw, u8 prof_id, u8 src, u8 dst, u8 len)
3617 ICE_FLOW_SW_FIELD_VECTOR_MAX / ICE_FLOW_FV_EXTRACT_SZ - 1;
3620 for (i = 0; i < len; i++) {
3621 ice_rss_config_xor_word(hw, prof_id,
3622 /* Yes, field vector in GLQF_HSYMM and
3623 * GLQF_HINSET is inversed!
3625 fv_last_word - (src + i),
3626 fv_last_word - (dst + i));
3627 ice_rss_config_xor_word(hw, prof_id,
3628 fv_last_word - (dst + i),
3629 fv_last_word - (src + i));
3634 ice_rss_update_symm(struct ice_hw *hw,
3635 struct ice_flow_prof *prof)
3637 struct ice_prof_map *map;
3640 ice_acquire_lock(&hw->blk[ICE_BLK_RSS].es.prof_map_lock);
3641 map = ice_search_prof_id(hw, ICE_BLK_RSS, prof->id);
3643 prof_id = map->prof_id;
3644 ice_release_lock(&hw->blk[ICE_BLK_RSS].es.prof_map_lock);
3647 /* clear to default */
3648 for (m = 0; m < 6; m++)
3649 wr32(hw, GLQF_HSYMM(prof_id, m), 0);
3650 if (prof->cfg.symm) {
3651 struct ice_flow_seg_info *seg =
3652 &prof->segs[prof->segs_cnt - 1];
3654 struct ice_flow_seg_xtrct *ipv4_src =
3655 &seg->fields[ICE_FLOW_FIELD_IDX_IPV4_SA].xtrct;
3656 struct ice_flow_seg_xtrct *ipv4_dst =
3657 &seg->fields[ICE_FLOW_FIELD_IDX_IPV4_DA].xtrct;
3658 struct ice_flow_seg_xtrct *ipv6_src =
3659 &seg->fields[ICE_FLOW_FIELD_IDX_IPV6_SA].xtrct;
3660 struct ice_flow_seg_xtrct *ipv6_dst =
3661 &seg->fields[ICE_FLOW_FIELD_IDX_IPV6_DA].xtrct;
3663 struct ice_flow_seg_xtrct *tcp_src =
3664 &seg->fields[ICE_FLOW_FIELD_IDX_TCP_SRC_PORT].xtrct;
3665 struct ice_flow_seg_xtrct *tcp_dst =
3666 &seg->fields[ICE_FLOW_FIELD_IDX_TCP_DST_PORT].xtrct;
3668 struct ice_flow_seg_xtrct *udp_src =
3669 &seg->fields[ICE_FLOW_FIELD_IDX_UDP_SRC_PORT].xtrct;
3670 struct ice_flow_seg_xtrct *udp_dst =
3671 &seg->fields[ICE_FLOW_FIELD_IDX_UDP_DST_PORT].xtrct;
3673 struct ice_flow_seg_xtrct *sctp_src =
3674 &seg->fields[ICE_FLOW_FIELD_IDX_SCTP_SRC_PORT].xtrct;
3675 struct ice_flow_seg_xtrct *sctp_dst =
3676 &seg->fields[ICE_FLOW_FIELD_IDX_SCTP_DST_PORT].xtrct;
3679 if (ipv4_src->prot_id != 0 && ipv4_dst->prot_id != 0)
3680 ice_rss_config_xor(hw, prof_id,
3681 ipv4_src->idx, ipv4_dst->idx, 2);
3684 if (ipv6_src->prot_id != 0 && ipv6_dst->prot_id != 0)
3685 ice_rss_config_xor(hw, prof_id,
3686 ipv6_src->idx, ipv6_dst->idx, 8);
3689 if (tcp_src->prot_id != 0 && tcp_dst->prot_id != 0)
3690 ice_rss_config_xor(hw, prof_id,
3691 tcp_src->idx, tcp_dst->idx, 1);
3694 if (udp_src->prot_id != 0 && udp_dst->prot_id != 0)
3695 ice_rss_config_xor(hw, prof_id,
3696 udp_src->idx, udp_dst->idx, 1);
3699 if (sctp_src->prot_id != 0 && sctp_dst->prot_id != 0)
3700 ice_rss_config_xor(hw, prof_id,
3701 sctp_src->idx, sctp_dst->idx, 1);
3706 * ice_add_rss_cfg_sync - add an RSS configuration
3707 * @hw: pointer to the hardware structure
3708 * @vsi_handle: software VSI handle
3709 * @cfg: configure parameters
3711 * Assumption: lock has already been acquired for RSS list
3713 static enum ice_status
3714 ice_add_rss_cfg_sync(struct ice_hw *hw, u16 vsi_handle,
3715 const struct ice_rss_hash_cfg *cfg)
3717 const enum ice_block blk = ICE_BLK_RSS;
3718 struct ice_flow_prof *prof = NULL;
3719 struct ice_flow_seg_info *segs;
3720 enum ice_status status;
3723 segs_cnt = (cfg->hdr_type == ICE_RSS_OUTER_HEADERS) ?
3724 ICE_FLOW_SEG_SINGLE : ICE_FLOW_SEG_MAX;
3726 segs = (struct ice_flow_seg_info *)ice_calloc(hw, segs_cnt,
3729 return ICE_ERR_NO_MEMORY;
3731 /* Construct the packet segment info from the hashed fields */
3732 status = ice_flow_set_rss_seg_info(segs, segs_cnt, cfg);
3736 /* Search for a flow profile that has matching headers, hash fields
3737 * and has the input VSI associated to it. If found, no further
3738 * operations required and exit.
3740 prof = ice_flow_find_prof_conds(hw, blk, ICE_FLOW_RX, segs, segs_cnt,
3742 ICE_FLOW_FIND_PROF_CHK_FLDS |
3743 ICE_FLOW_FIND_PROF_CHK_VSI);
3745 if (prof->cfg.symm == cfg->symm)
3747 prof->cfg.symm = cfg->symm;
3751 /* Check if a flow profile exists with the same protocol headers and
3752 * associated with the input VSI. If so disassociate the VSI from
3753 * this profile. The VSI will be added to a new profile created with
3754 * the protocol header and new hash field configuration.
3756 prof = ice_flow_find_prof_conds(hw, blk, ICE_FLOW_RX, segs, segs_cnt,
3757 vsi_handle, ICE_FLOW_FIND_PROF_CHK_VSI);
3759 status = ice_flow_disassoc_prof(hw, blk, prof, vsi_handle);
3761 ice_rem_rss_list(hw, vsi_handle, prof);
3765 /* Remove profile if it has no VSIs associated */
3766 if (!ice_is_any_bit_set(prof->vsis, ICE_MAX_VSI)) {
3767 status = ice_flow_rem_prof(hw, blk, prof->id);
3773 /* Search for a profile that has same match fields only. If this
3774 * exists then associate the VSI to this profile.
3776 prof = ice_flow_find_prof_conds(hw, blk, ICE_FLOW_RX, segs, segs_cnt,
3778 ICE_FLOW_FIND_PROF_CHK_FLDS);
3780 if (prof->cfg.symm == cfg->symm) {
3781 status = ice_flow_assoc_prof(hw, blk, prof,
3784 status = ice_add_rss_list(hw, vsi_handle,
3787 /* if a profile exist but with different symmetric
3788 * requirement, just return error.
3790 status = ICE_ERR_NOT_SUPPORTED;
3795 /* Create a new flow profile with generated profile and packet
3796 * segment information.
3798 status = ice_flow_add_prof(hw, blk, ICE_FLOW_RX,
3799 ICE_FLOW_GEN_PROFID(cfg->hash_flds,
3800 segs[segs_cnt - 1].hdrs,
3802 segs, segs_cnt, NULL, 0, &prof);
3806 status = ice_flow_assoc_prof(hw, blk, prof, vsi_handle);
3807 /* If association to a new flow profile failed then this profile can
3811 ice_flow_rem_prof(hw, blk, prof->id);
3815 status = ice_add_rss_list(hw, vsi_handle, prof);
3817 prof->cfg.symm = cfg->symm;
3819 ice_rss_update_symm(hw, prof);
3827 * ice_add_rss_cfg - add an RSS configuration with specified hashed fields
3828 * @hw: pointer to the hardware structure
3829 * @vsi_handle: software VSI handle
3830 * @cfg: configure parameters
3832 * This function will generate a flow profile based on fields associated with
3833 * the input fields to hash on, the flow type and use the VSI number to add
3834 * a flow entry to the profile.
3837 ice_add_rss_cfg(struct ice_hw *hw, u16 vsi_handle,
3838 const struct ice_rss_hash_cfg *cfg)
3840 struct ice_rss_hash_cfg local_cfg;
3841 enum ice_status status;
3843 if (!ice_is_vsi_valid(hw, vsi_handle) ||
3844 !cfg || cfg->hdr_type > ICE_RSS_ANY_HEADERS ||
3845 cfg->hash_flds == ICE_HASH_INVALID)
3846 return ICE_ERR_PARAM;
3849 if (cfg->hdr_type < ICE_RSS_ANY_HEADERS) {
3850 ice_acquire_lock(&hw->rss_locks);
3851 status = ice_add_rss_cfg_sync(hw, vsi_handle, &local_cfg);
3852 ice_release_lock(&hw->rss_locks);
3854 ice_acquire_lock(&hw->rss_locks);
3855 local_cfg.hdr_type = ICE_RSS_OUTER_HEADERS;
3856 status = ice_add_rss_cfg_sync(hw, vsi_handle, &local_cfg);
3858 local_cfg.hdr_type = ICE_RSS_INNER_HEADERS;
3859 status = ice_add_rss_cfg_sync(hw, vsi_handle,
3862 ice_release_lock(&hw->rss_locks);
3869 * ice_rem_rss_cfg_sync - remove an existing RSS configuration
3870 * @hw: pointer to the hardware structure
3871 * @vsi_handle: software VSI handle
3872 * @cfg: configure parameters
3874 * Assumption: lock has already been acquired for RSS list
3876 static enum ice_status
3877 ice_rem_rss_cfg_sync(struct ice_hw *hw, u16 vsi_handle,
3878 const struct ice_rss_hash_cfg *cfg)
3880 const enum ice_block blk = ICE_BLK_RSS;
3881 struct ice_flow_seg_info *segs;
3882 struct ice_flow_prof *prof;
3883 enum ice_status status;
3886 segs_cnt = (cfg->hdr_type == ICE_RSS_OUTER_HEADERS) ?
3887 ICE_FLOW_SEG_SINGLE : ICE_FLOW_SEG_MAX;
3888 segs = (struct ice_flow_seg_info *)ice_calloc(hw, segs_cnt,
3891 return ICE_ERR_NO_MEMORY;
3893 /* Construct the packet segment info from the hashed fields */
3894 status = ice_flow_set_rss_seg_info(segs, segs_cnt, cfg);
3898 prof = ice_flow_find_prof_conds(hw, blk, ICE_FLOW_RX, segs, segs_cnt,
3900 ICE_FLOW_FIND_PROF_CHK_FLDS);
3902 status = ICE_ERR_DOES_NOT_EXIST;
3906 status = ice_flow_disassoc_prof(hw, blk, prof, vsi_handle);
3910 /* Remove RSS configuration from VSI context before deleting
3913 ice_rem_rss_list(hw, vsi_handle, prof);
3915 if (!ice_is_any_bit_set(prof->vsis, ICE_MAX_VSI))
3916 status = ice_flow_rem_prof(hw, blk, prof->id);
3924 * ice_rem_rss_cfg - remove an existing RSS config with matching hashed fields
3925 * @hw: pointer to the hardware structure
3926 * @vsi_handle: software VSI handle
3927 * @cfg: configure parameters
3929 * This function will lookup the flow profile based on the input
3930 * hash field bitmap, iterate through the profile entry list of
3931 * that profile and find entry associated with input VSI to be
3932 * removed. Calls are made to underlying flow apis which will in
3933 * turn build or update buffers for RSS XLT1 section.
3936 ice_rem_rss_cfg(struct ice_hw *hw, u16 vsi_handle,
3937 const struct ice_rss_hash_cfg *cfg)
3939 struct ice_rss_hash_cfg local_cfg;
3940 enum ice_status status;
3942 if (!ice_is_vsi_valid(hw, vsi_handle) ||
3943 !cfg || cfg->hdr_type > ICE_RSS_ANY_HEADERS ||
3944 cfg->hash_flds == ICE_HASH_INVALID)
3945 return ICE_ERR_PARAM;
3947 ice_acquire_lock(&hw->rss_locks);
3949 if (cfg->hdr_type < ICE_RSS_ANY_HEADERS) {
3950 status = ice_rem_rss_cfg_sync(hw, vsi_handle, &local_cfg);
3952 local_cfg.hdr_type = ICE_RSS_OUTER_HEADERS;
3953 status = ice_rem_rss_cfg_sync(hw, vsi_handle, &local_cfg);
3956 local_cfg.hdr_type = ICE_RSS_INNER_HEADERS;
3957 status = ice_rem_rss_cfg_sync(hw, vsi_handle,
3961 ice_release_lock(&hw->rss_locks);
3967 * ice_replay_rss_cfg - replay RSS configurations associated with VSI
3968 * @hw: pointer to the hardware structure
3969 * @vsi_handle: software VSI handle
3971 enum ice_status ice_replay_rss_cfg(struct ice_hw *hw, u16 vsi_handle)
3973 enum ice_status status = ICE_SUCCESS;
3974 struct ice_rss_cfg *r;
3976 if (!ice_is_vsi_valid(hw, vsi_handle))
3977 return ICE_ERR_PARAM;
3979 ice_acquire_lock(&hw->rss_locks);
3980 LIST_FOR_EACH_ENTRY(r, &hw->rss_list_head,
3981 ice_rss_cfg, l_entry) {
3982 if (ice_is_bit_set(r->vsis, vsi_handle)) {
3983 status = ice_add_rss_cfg_sync(hw, vsi_handle, &r->hash);
3988 ice_release_lock(&hw->rss_locks);
3994 * ice_get_rss_cfg - returns hashed fields for the given header types
3995 * @hw: pointer to the hardware structure
3996 * @vsi_handle: software VSI handle
3997 * @hdrs: protocol header type
3999 * This function will return the match fields of the first instance of flow
4000 * profile having the given header types and containing input VSI
4002 u64 ice_get_rss_cfg(struct ice_hw *hw, u16 vsi_handle, u32 hdrs)
4004 u64 rss_hash = ICE_HASH_INVALID;
4005 struct ice_rss_cfg *r;
4007 /* verify if the protocol header is non zero and VSI is valid */
4008 if (hdrs == ICE_FLOW_SEG_HDR_NONE || !ice_is_vsi_valid(hw, vsi_handle))
4009 return ICE_HASH_INVALID;
4011 ice_acquire_lock(&hw->rss_locks);
4012 LIST_FOR_EACH_ENTRY(r, &hw->rss_list_head,
4013 ice_rss_cfg, l_entry)
4014 if (ice_is_bit_set(r->vsis, vsi_handle) &&
4015 r->hash.addl_hdrs == hdrs) {
4016 rss_hash = r->hash.hash_flds;
4019 ice_release_lock(&hw->rss_locks);