1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2001-2020 Intel Corporation
5 #include "ice_common.h"
8 /* Size of known protocol header fields */
9 #define ICE_FLOW_FLD_SZ_ETH_TYPE 2
10 #define ICE_FLOW_FLD_SZ_VLAN 2
11 #define ICE_FLOW_FLD_SZ_IPV4_ADDR 4
12 #define ICE_FLOW_FLD_SZ_IPV6_ADDR 16
13 #define ICE_FLOW_FLD_SZ_IPV6_PRE32_ADDR 4
14 #define ICE_FLOW_FLD_SZ_IPV6_PRE48_ADDR 6
15 #define ICE_FLOW_FLD_SZ_IPV6_PRE64_ADDR 8
16 #define ICE_FLOW_FLD_SZ_IP_DSCP 1
17 #define ICE_FLOW_FLD_SZ_IP_TTL 1
18 #define ICE_FLOW_FLD_SZ_IP_PROT 1
19 #define ICE_FLOW_FLD_SZ_PORT 2
20 #define ICE_FLOW_FLD_SZ_TCP_FLAGS 1
21 #define ICE_FLOW_FLD_SZ_ICMP_TYPE 1
22 #define ICE_FLOW_FLD_SZ_ICMP_CODE 1
23 #define ICE_FLOW_FLD_SZ_ARP_OPER 2
24 #define ICE_FLOW_FLD_SZ_GRE_KEYID 4
25 #define ICE_FLOW_FLD_SZ_GTP_TEID 4
26 #define ICE_FLOW_FLD_SZ_GTP_QFI 2
27 #define ICE_FLOW_FLD_SZ_PPPOE_SESS_ID 2
28 #define ICE_FLOW_FLD_SZ_PFCP_SEID 8
29 #define ICE_FLOW_FLD_SZ_L2TPV3_SESS_ID 4
30 #define ICE_FLOW_FLD_SZ_ESP_SPI 4
31 #define ICE_FLOW_FLD_SZ_AH_SPI 4
32 #define ICE_FLOW_FLD_SZ_NAT_T_ESP_SPI 4
33 #define ICE_FLOW_FLD_SZ_VXLAN_VNI 4
35 /* Describe properties of a protocol header field */
36 struct ice_flow_field_info {
37 enum ice_flow_seg_hdr hdr;
38 s16 off; /* Offset from start of a protocol header, in bits */
39 u16 size; /* Size of fields in bits */
40 u16 mask; /* 16-bit mask for field */
43 #define ICE_FLOW_FLD_INFO(_hdr, _offset_bytes, _size_bytes) { \
45 .off = (_offset_bytes) * BITS_PER_BYTE, \
46 .size = (_size_bytes) * BITS_PER_BYTE, \
50 #define ICE_FLOW_FLD_INFO_MSK(_hdr, _offset_bytes, _size_bytes, _mask) { \
52 .off = (_offset_bytes) * BITS_PER_BYTE, \
53 .size = (_size_bytes) * BITS_PER_BYTE, \
57 /* Table containing properties of supported protocol header fields */
59 struct ice_flow_field_info ice_flds_info[ICE_FLOW_FIELD_IDX_MAX] = {
61 /* ICE_FLOW_FIELD_IDX_ETH_DA */
62 ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_ETH, 0, ETH_ALEN),
63 /* ICE_FLOW_FIELD_IDX_ETH_SA */
64 ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_ETH, ETH_ALEN, ETH_ALEN),
65 /* ICE_FLOW_FIELD_IDX_S_VLAN */
66 ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_VLAN, 12, ICE_FLOW_FLD_SZ_VLAN),
67 /* ICE_FLOW_FIELD_IDX_C_VLAN */
68 ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_VLAN, 14, ICE_FLOW_FLD_SZ_VLAN),
69 /* ICE_FLOW_FIELD_IDX_ETH_TYPE */
70 ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_ETH, 0, ICE_FLOW_FLD_SZ_ETH_TYPE),
72 /* ICE_FLOW_FIELD_IDX_IPV4_DSCP */
73 ICE_FLOW_FLD_INFO_MSK(ICE_FLOW_SEG_HDR_IPV4, 0, ICE_FLOW_FLD_SZ_IP_DSCP,
75 /* ICE_FLOW_FIELD_IDX_IPV6_DSCP */
76 ICE_FLOW_FLD_INFO_MSK(ICE_FLOW_SEG_HDR_IPV6, 0, ICE_FLOW_FLD_SZ_IP_DSCP,
78 /* ICE_FLOW_FIELD_IDX_IPV4_TTL */
79 ICE_FLOW_FLD_INFO_MSK(ICE_FLOW_SEG_HDR_NONE, 8,
80 ICE_FLOW_FLD_SZ_IP_TTL, 0xff00),
81 /* ICE_FLOW_FIELD_IDX_IPV4_PROT */
82 ICE_FLOW_FLD_INFO_MSK(ICE_FLOW_SEG_HDR_NONE, 8,
83 ICE_FLOW_FLD_SZ_IP_PROT, 0x00ff),
84 /* ICE_FLOW_FIELD_IDX_IPV6_TTL */
85 ICE_FLOW_FLD_INFO_MSK(ICE_FLOW_SEG_HDR_NONE, 6,
86 ICE_FLOW_FLD_SZ_IP_TTL, 0x00ff),
87 /* ICE_FLOW_FIELD_IDX_IPV6_PROT */
88 ICE_FLOW_FLD_INFO_MSK(ICE_FLOW_SEG_HDR_NONE, 6,
89 ICE_FLOW_FLD_SZ_IP_PROT, 0xff00),
90 /* ICE_FLOW_FIELD_IDX_IPV4_SA */
91 ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_IPV4, 12, ICE_FLOW_FLD_SZ_IPV4_ADDR),
92 /* ICE_FLOW_FIELD_IDX_IPV4_DA */
93 ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_IPV4, 16, ICE_FLOW_FLD_SZ_IPV4_ADDR),
94 /* ICE_FLOW_FIELD_IDX_IPV6_SA */
95 ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_IPV6, 8, ICE_FLOW_FLD_SZ_IPV6_ADDR),
96 /* ICE_FLOW_FIELD_IDX_IPV6_DA */
97 ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_IPV6, 24, ICE_FLOW_FLD_SZ_IPV6_ADDR),
98 /* ICE_FLOW_FIELD_IDX_IPV6_PRE32_SA */
99 ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_IPV6, 8,
100 ICE_FLOW_FLD_SZ_IPV6_PRE32_ADDR),
101 /* ICE_FLOW_FIELD_IDX_IPV6_PRE32_DA */
102 ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_IPV6, 24,
103 ICE_FLOW_FLD_SZ_IPV6_PRE32_ADDR),
104 /* ICE_FLOW_FIELD_IDX_IPV6_PRE48_SA */
105 ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_IPV6, 8,
106 ICE_FLOW_FLD_SZ_IPV6_PRE48_ADDR),
107 /* ICE_FLOW_FIELD_IDX_IPV6_PRE48_DA */
108 ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_IPV6, 24,
109 ICE_FLOW_FLD_SZ_IPV6_PRE48_ADDR),
110 /* ICE_FLOW_FIELD_IDX_IPV6_PRE64_SA */
111 ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_IPV6, 8,
112 ICE_FLOW_FLD_SZ_IPV6_PRE64_ADDR),
113 /* ICE_FLOW_FIELD_IDX_IPV6_PRE64_DA */
114 ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_IPV6, 24,
115 ICE_FLOW_FLD_SZ_IPV6_PRE64_ADDR),
117 /* ICE_FLOW_FIELD_IDX_TCP_SRC_PORT */
118 ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_TCP, 0, ICE_FLOW_FLD_SZ_PORT),
119 /* ICE_FLOW_FIELD_IDX_TCP_DST_PORT */
120 ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_TCP, 2, ICE_FLOW_FLD_SZ_PORT),
121 /* ICE_FLOW_FIELD_IDX_UDP_SRC_PORT */
122 ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_UDP, 0, ICE_FLOW_FLD_SZ_PORT),
123 /* ICE_FLOW_FIELD_IDX_UDP_DST_PORT */
124 ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_UDP, 2, ICE_FLOW_FLD_SZ_PORT),
125 /* ICE_FLOW_FIELD_IDX_SCTP_SRC_PORT */
126 ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_SCTP, 0, ICE_FLOW_FLD_SZ_PORT),
127 /* ICE_FLOW_FIELD_IDX_SCTP_DST_PORT */
128 ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_SCTP, 2, ICE_FLOW_FLD_SZ_PORT),
129 /* ICE_FLOW_FIELD_IDX_TCP_FLAGS */
130 ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_TCP, 13, ICE_FLOW_FLD_SZ_TCP_FLAGS),
132 /* ICE_FLOW_FIELD_IDX_ARP_SIP */
133 ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_ARP, 14, ICE_FLOW_FLD_SZ_IPV4_ADDR),
134 /* ICE_FLOW_FIELD_IDX_ARP_DIP */
135 ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_ARP, 24, ICE_FLOW_FLD_SZ_IPV4_ADDR),
136 /* ICE_FLOW_FIELD_IDX_ARP_SHA */
137 ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_ARP, 8, ETH_ALEN),
138 /* ICE_FLOW_FIELD_IDX_ARP_DHA */
139 ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_ARP, 18, ETH_ALEN),
140 /* ICE_FLOW_FIELD_IDX_ARP_OP */
141 ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_ARP, 6, ICE_FLOW_FLD_SZ_ARP_OPER),
143 /* ICE_FLOW_FIELD_IDX_ICMP_TYPE */
144 ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_ICMP, 0, ICE_FLOW_FLD_SZ_ICMP_TYPE),
145 /* ICE_FLOW_FIELD_IDX_ICMP_CODE */
146 ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_ICMP, 1, ICE_FLOW_FLD_SZ_ICMP_CODE),
148 /* ICE_FLOW_FIELD_IDX_GRE_KEYID */
149 ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_GRE, 12, ICE_FLOW_FLD_SZ_GRE_KEYID),
151 /* ICE_FLOW_FIELD_IDX_GTPC_TEID */
152 ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_GTPC_TEID, 12,
153 ICE_FLOW_FLD_SZ_GTP_TEID),
154 /* ICE_FLOW_FIELD_IDX_GTPU_IP_TEID */
155 ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_GTPU_IP, 12,
156 ICE_FLOW_FLD_SZ_GTP_TEID),
157 /* ICE_FLOW_FIELD_IDX_GTPU_EH_TEID */
158 ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_GTPU_EH, 12,
159 ICE_FLOW_FLD_SZ_GTP_TEID),
160 /* ICE_FLOW_FIELD_IDX_GTPU_EH_QFI */
161 ICE_FLOW_FLD_INFO_MSK(ICE_FLOW_SEG_HDR_GTPU_EH, 22,
162 ICE_FLOW_FLD_SZ_GTP_QFI, 0x3f00),
163 /* ICE_FLOW_FIELD_IDX_GTPU_UP_TEID */
164 ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_GTPU_UP, 12,
165 ICE_FLOW_FLD_SZ_GTP_TEID),
166 /* ICE_FLOW_FIELD_IDX_GTPU_DWN_TEID */
167 ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_GTPU_DWN, 12,
168 ICE_FLOW_FLD_SZ_GTP_TEID),
170 /* ICE_FLOW_FIELD_IDX_PPPOE_SESS_ID */
171 ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_PPPOE, 2,
172 ICE_FLOW_FLD_SZ_PPPOE_SESS_ID),
174 /* ICE_FLOW_FIELD_IDX_PFCP_SEID */
175 ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_PFCP_SESSION, 12,
176 ICE_FLOW_FLD_SZ_PFCP_SEID),
178 /* ICE_FLOW_FIELD_IDX_L2TPV3_SESS_ID */
179 ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_L2TPV3, 0,
180 ICE_FLOW_FLD_SZ_L2TPV3_SESS_ID),
182 /* ICE_FLOW_FIELD_IDX_ESP_SPI */
183 ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_ESP, 0,
184 ICE_FLOW_FLD_SZ_ESP_SPI),
186 /* ICE_FLOW_FIELD_IDX_AH_SPI */
187 ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_AH, 4,
188 ICE_FLOW_FLD_SZ_AH_SPI),
190 /* ICE_FLOW_FIELD_IDX_NAT_T_ESP_SPI */
191 ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_NAT_T_ESP, 8,
192 ICE_FLOW_FLD_SZ_NAT_T_ESP_SPI),
193 /* ICE_FLOW_FIELD_IDX_VXLAN_VNI */
194 ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_VXLAN, 12,
195 ICE_FLOW_FLD_SZ_VXLAN_VNI),
198 /* Bitmaps indicating relevant packet types for a particular protocol header
200 * Packet types for packets with an Outer/First/Single MAC header
202 static const u32 ice_ptypes_mac_ofos[] = {
203 0xFDC00846, 0xBFBF7F7E, 0xF70001DF, 0xFEFDFDFB,
204 0x0000077E, 0x000003FF, 0x00000000, 0x00000000,
205 0x00400000, 0x03FFF000, 0xFFFFFFE0, 0x00000307,
206 0x00000000, 0x00000000, 0x00000000, 0x00000000,
207 0x00000000, 0x00000000, 0x00000000, 0x00000000,
208 0x00000000, 0x00000000, 0x00000000, 0x00000000,
209 0x00000000, 0x00000000, 0x00000000, 0x00000000,
210 0x00000000, 0x00000000, 0x00000000, 0x00000000,
213 /* Packet types for packets with an Innermost/Last MAC VLAN header */
214 static const u32 ice_ptypes_macvlan_il[] = {
215 0x00000000, 0xBC000000, 0x000001DF, 0xF0000000,
216 0x0000077E, 0x00000000, 0x00000000, 0x00000000,
217 0x00000000, 0x00000000, 0x00000000, 0x00000000,
218 0x00000000, 0x00000000, 0x00000000, 0x00000000,
219 0x00000000, 0x00000000, 0x00000000, 0x00000000,
220 0x00000000, 0x00000000, 0x00000000, 0x00000000,
221 0x00000000, 0x00000000, 0x00000000, 0x00000000,
222 0x00000000, 0x00000000, 0x00000000, 0x00000000,
225 /* Packet types for packets with an Outer/First/Single IPv4 header, does NOT
226 * include IPV4 other PTYPEs
228 static const u32 ice_ptypes_ipv4_ofos[] = {
229 0x1DC00000, 0x24000800, 0x00000000, 0x00000000,
230 0x00000000, 0x00000155, 0x00000000, 0x00000000,
231 0x00000000, 0x000FC000, 0x000002A0, 0x00000000,
232 0x00000000, 0x00000000, 0x00000000, 0x00000000,
233 0x00000000, 0x00000000, 0x00000000, 0x00000000,
234 0x00000000, 0x00000000, 0x00000000, 0x00000000,
235 0x00000000, 0x00000000, 0x00000000, 0x00000000,
236 0x00000000, 0x00000000, 0x00000000, 0x00000000,
239 /* Packet types for packets with an Outer/First/Single IPv4 header, includes
242 static const u32 ice_ptypes_ipv4_ofos_all[] = {
243 0x1DC00000, 0x24000800, 0x00000000, 0x00000000,
244 0x00000000, 0x00000155, 0x00000000, 0x00000000,
245 0x00000000, 0x000FC000, 0x83E0FAA0, 0x00000101,
246 0x00000000, 0x00000000, 0x00000000, 0x00000000,
247 0x00000000, 0x00000000, 0x00000000, 0x00000000,
248 0x00000000, 0x00000000, 0x00000000, 0x00000000,
249 0x00000000, 0x00000000, 0x00000000, 0x00000000,
250 0x00000000, 0x00000000, 0x00000000, 0x00000000,
253 /* Packet types for packets with an Innermost/Last IPv4 header */
254 static const u32 ice_ptypes_ipv4_il[] = {
255 0xE0000000, 0xB807700E, 0x80000003, 0xE01DC03B,
256 0x0000000E, 0x00000000, 0x00000000, 0x00000000,
257 0x00000000, 0x00000000, 0x001FF800, 0x00000000,
258 0x00000000, 0x00000000, 0x00000000, 0x00000000,
259 0x00000000, 0x00000000, 0x00000000, 0x00000000,
260 0x00000000, 0x00000000, 0x00000000, 0x00000000,
261 0x00000000, 0x00000000, 0x00000000, 0x00000000,
262 0x00000000, 0x00000000, 0x00000000, 0x00000000,
265 /* Packet types for packets with an Outer/First/Single IPv6 header, does NOT
266 * include IVP6 other PTYPEs
268 static const u32 ice_ptypes_ipv6_ofos[] = {
269 0x00000000, 0x00000000, 0x77000000, 0x10002000,
270 0x00000000, 0x000002AA, 0x00000000, 0x00000000,
271 0x00000000, 0x03F00000, 0x00000540, 0x00000000,
272 0x00000000, 0x00000000, 0x00000000, 0x00000000,
273 0x00000000, 0x00000000, 0x00000000, 0x00000000,
274 0x00000000, 0x00000000, 0x00000000, 0x00000000,
275 0x00000000, 0x00000000, 0x00000000, 0x00000000,
276 0x00000000, 0x00000000, 0x00000000, 0x00000000,
279 /* Packet types for packets with an Outer/First/Single IPv6 header, includes
282 static const u32 ice_ptypes_ipv6_ofos_all[] = {
283 0x00000000, 0x00000000, 0x77000000, 0x10002000,
284 0x00000000, 0x000002AA, 0x00000000, 0x00000000,
285 0x00000000, 0x03F00000, 0x7C1F0540, 0x00000206,
286 0x00000000, 0x00000000, 0x00000000, 0x00000000,
287 0x00000000, 0x00000000, 0x00000000, 0x00000000,
288 0x00000000, 0x00000000, 0x00000000, 0x00000000,
289 0x00000000, 0x00000000, 0x00000000, 0x00000000,
290 0x00000000, 0x00000000, 0x00000000, 0x00000000,
293 /* Packet types for packets with an Innermost/Last IPv6 header */
294 static const u32 ice_ptypes_ipv6_il[] = {
295 0x00000000, 0x03B80770, 0x000001DC, 0x0EE00000,
296 0x00000770, 0x00000000, 0x00000000, 0x00000000,
297 0x00000000, 0x00000000, 0x7FE00000, 0x00000000,
298 0x00000000, 0x00000000, 0x00000000, 0x00000000,
299 0x00000000, 0x00000000, 0x00000000, 0x00000000,
300 0x00000000, 0x00000000, 0x00000000, 0x00000000,
301 0x00000000, 0x00000000, 0x00000000, 0x00000000,
302 0x00000000, 0x00000000, 0x00000000, 0x00000000,
305 /* Packet types for packets with an Outer/First/Single IPv4 header - no L4 */
306 static const u32 ice_ptypes_ipv4_ofos_no_l4[] = {
307 0x10C00000, 0x04000800, 0x00000000, 0x00000000,
308 0x00000000, 0x00000000, 0x00000000, 0x00000000,
309 0x00000000, 0x000cc000, 0x000002A0, 0x00000000,
310 0x00000000, 0x00000000, 0x00000000, 0x00000000,
311 0x00000000, 0x00000000, 0x00000000, 0x00000000,
312 0x00000000, 0x00000000, 0x00000000, 0x00000000,
313 0x00000000, 0x00000000, 0x00000000, 0x00000000,
314 0x00000000, 0x00000000, 0x00000000, 0x00000000,
317 /* Packet types for packets with an Innermost/Last IPv4 header - no L4 */
318 static const u32 ice_ptypes_ipv4_il_no_l4[] = {
319 0x60000000, 0x18043008, 0x80000002, 0x6010c021,
320 0x00000008, 0x00000000, 0x00000000, 0x00000000,
321 0x00000000, 0x00000000, 0x00139800, 0x00000000,
322 0x00000000, 0x00000000, 0x00000000, 0x00000000,
323 0x00000000, 0x00000000, 0x00000000, 0x00000000,
324 0x00000000, 0x00000000, 0x00000000, 0x00000000,
325 0x00000000, 0x00000000, 0x00000000, 0x00000000,
326 0x00000000, 0x00000000, 0x00000000, 0x00000000,
329 /* Packet types for packets with an Outer/First/Single IPv6 header - no L4 */
330 static const u32 ice_ptypes_ipv6_ofos_no_l4[] = {
331 0x00000000, 0x00000000, 0x43000000, 0x10002000,
332 0x00000000, 0x00000000, 0x00000000, 0x00000000,
333 0x00000000, 0x02300000, 0x00000540, 0x00000000,
334 0x00000000, 0x00000000, 0x00000000, 0x00000000,
335 0x00000000, 0x00000000, 0x00000000, 0x00000000,
336 0x00000000, 0x00000000, 0x00000000, 0x00000000,
337 0x00000000, 0x00000000, 0x00000000, 0x00000000,
338 0x00000000, 0x00000000, 0x00000000, 0x00000000,
341 /* Packet types for packets with an Innermost/Last IPv6 header - no L4 */
342 static const u32 ice_ptypes_ipv6_il_no_l4[] = {
343 0x00000000, 0x02180430, 0x0000010c, 0x086010c0,
344 0x00000430, 0x00000000, 0x00000000, 0x00000000,
345 0x00000000, 0x00000000, 0x4e600000, 0x00000000,
346 0x00000000, 0x00000000, 0x00000000, 0x00000000,
347 0x00000000, 0x00000000, 0x00000000, 0x00000000,
348 0x00000000, 0x00000000, 0x00000000, 0x00000000,
349 0x00000000, 0x00000000, 0x00000000, 0x00000000,
350 0x00000000, 0x00000000, 0x00000000, 0x00000000,
353 /* Packet types for packets with an Outermost/First ARP header */
354 static const u32 ice_ptypes_arp_of[] = {
355 0x00000800, 0x00000000, 0x00000000, 0x00000000,
356 0x00000000, 0x00000000, 0x00000000, 0x00000000,
357 0x00000000, 0x00000000, 0x00000000, 0x00000000,
358 0x00000000, 0x00000000, 0x00000000, 0x00000000,
359 0x00000000, 0x00000000, 0x00000000, 0x00000000,
360 0x00000000, 0x00000000, 0x00000000, 0x00000000,
361 0x00000000, 0x00000000, 0x00000000, 0x00000000,
362 0x00000000, 0x00000000, 0x00000000, 0x00000000,
365 /* UDP Packet types for non-tunneled packets or tunneled
366 * packets with inner UDP.
368 static const u32 ice_ptypes_udp_il[] = {
369 0x81000000, 0x20204040, 0x04000010, 0x80810102,
370 0x00000040, 0x00000000, 0x00000000, 0x00000000,
371 0x00000000, 0x00410000, 0x908427E0, 0x00000007,
372 0x00000000, 0x00000000, 0x00000000, 0x00000000,
373 0x00000000, 0x00000000, 0x00000000, 0x00000000,
374 0x00000000, 0x00000000, 0x00000000, 0x00000000,
375 0x00000000, 0x00000000, 0x00000000, 0x00000000,
376 0x00000000, 0x00000000, 0x00000000, 0x00000000,
379 /* Packet types for packets with an Innermost/Last TCP header */
380 static const u32 ice_ptypes_tcp_il[] = {
381 0x04000000, 0x80810102, 0x10000040, 0x02040408,
382 0x00000102, 0x00000000, 0x00000000, 0x00000000,
383 0x00000000, 0x00820000, 0x21084000, 0x00000000,
384 0x00000000, 0x00000000, 0x00000000, 0x00000000,
385 0x00000000, 0x00000000, 0x00000000, 0x00000000,
386 0x00000000, 0x00000000, 0x00000000, 0x00000000,
387 0x00000000, 0x00000000, 0x00000000, 0x00000000,
388 0x00000000, 0x00000000, 0x00000000, 0x00000000,
391 /* Packet types for packets with an Innermost/Last SCTP header */
392 static const u32 ice_ptypes_sctp_il[] = {
393 0x08000000, 0x01020204, 0x20000081, 0x04080810,
394 0x00000204, 0x00000000, 0x00000000, 0x00000000,
395 0x00000000, 0x01040000, 0x00000000, 0x00000000,
396 0x00000000, 0x00000000, 0x00000000, 0x00000000,
397 0x00000000, 0x00000000, 0x00000000, 0x00000000,
398 0x00000000, 0x00000000, 0x00000000, 0x00000000,
399 0x00000000, 0x00000000, 0x00000000, 0x00000000,
400 0x00000000, 0x00000000, 0x00000000, 0x00000000,
403 /* Packet types for packets with an Outermost/First ICMP header */
404 static const u32 ice_ptypes_icmp_of[] = {
405 0x10000000, 0x00000000, 0x00000000, 0x00000000,
406 0x00000000, 0x00000000, 0x00000000, 0x00000000,
407 0x00000000, 0x00000000, 0x00000000, 0x00000000,
408 0x00000000, 0x00000000, 0x00000000, 0x00000000,
409 0x00000000, 0x00000000, 0x00000000, 0x00000000,
410 0x00000000, 0x00000000, 0x00000000, 0x00000000,
411 0x00000000, 0x00000000, 0x00000000, 0x00000000,
412 0x00000000, 0x00000000, 0x00000000, 0x00000000,
415 /* Packet types for packets with an Innermost/Last ICMP header */
416 static const u32 ice_ptypes_icmp_il[] = {
417 0x00000000, 0x02040408, 0x40000102, 0x08101020,
418 0x00000408, 0x00000000, 0x00000000, 0x00000000,
419 0x00000000, 0x00000000, 0x42108000, 0x00000000,
420 0x00000000, 0x00000000, 0x00000000, 0x00000000,
421 0x00000000, 0x00000000, 0x00000000, 0x00000000,
422 0x00000000, 0x00000000, 0x00000000, 0x00000000,
423 0x00000000, 0x00000000, 0x00000000, 0x00000000,
424 0x00000000, 0x00000000, 0x00000000, 0x00000000,
427 /* Packet types for packets with an Outermost/First GRE header */
428 static const u32 ice_ptypes_gre_of[] = {
429 0x00000000, 0xBFBF7800, 0x000001DF, 0xFEFDE000,
430 0x0000017E, 0x00000000, 0x00000000, 0x00000000,
431 0x00000000, 0x00000000, 0x00000000, 0x00000000,
432 0x00000000, 0x00000000, 0x00000000, 0x00000000,
433 0x00000000, 0x00000000, 0x00000000, 0x00000000,
434 0x00000000, 0x00000000, 0x00000000, 0x00000000,
435 0x00000000, 0x00000000, 0x00000000, 0x00000000,
436 0x00000000, 0x00000000, 0x00000000, 0x00000000,
439 /* Packet types for packets with an Innermost/Last MAC header */
440 static const u32 ice_ptypes_mac_il[] = {
441 0x00000000, 0x20000000, 0x00000000, 0x00000000,
442 0x00000000, 0x00000000, 0x00000000, 0x00000000,
443 0x00000000, 0x00000000, 0x00000000, 0x00000000,
444 0x00000000, 0x00000000, 0x00000000, 0x00000000,
445 0x00000000, 0x00000000, 0x00000000, 0x00000000,
446 0x00000000, 0x00000000, 0x00000000, 0x00000000,
447 0x00000000, 0x00000000, 0x00000000, 0x00000000,
448 0x00000000, 0x00000000, 0x00000000, 0x00000000,
451 /* Packet types for GTPC */
452 static const u32 ice_ptypes_gtpc[] = {
453 0x00000000, 0x00000000, 0x00000000, 0x00000000,
454 0x00000000, 0x00000000, 0x00000000, 0x00000000,
455 0x00000000, 0x00000000, 0x000001E0, 0x00000000,
456 0x00000000, 0x00000000, 0x00000000, 0x00000000,
457 0x00000000, 0x00000000, 0x00000000, 0x00000000,
458 0x00000000, 0x00000000, 0x00000000, 0x00000000,
459 0x00000000, 0x00000000, 0x00000000, 0x00000000,
460 0x00000000, 0x00000000, 0x00000000, 0x00000000,
463 /* Packet types for VXLAN with VNI */
464 static const u32 ice_ptypes_vxlan_vni[] = {
465 0x00000000, 0xBFBFF800, 0x00EFDFDF, 0xFEFDE000,
466 0x03BF7F7E, 0x00000000, 0x00000000, 0x00000000,
467 0x00000000, 0x00000000, 0x00000000, 0x00000000,
468 0x00000000, 0x00000000, 0x00000000, 0x00000000,
469 0x00000000, 0x00000000, 0x00000000, 0x00000000,
470 0x00000000, 0x00000000, 0x00000000, 0x00000000,
471 0x00000000, 0x00000000, 0x00000000, 0x00000000,
472 0x00000000, 0x00000000, 0x00000000, 0x00000000,
475 /* Packet types for GTPC with TEID */
476 static const u32 ice_ptypes_gtpc_tid[] = {
477 0x00000000, 0x00000000, 0x00000000, 0x00000000,
478 0x00000000, 0x00000000, 0x00000000, 0x00000000,
479 0x00000000, 0x00000000, 0x00000060, 0x00000000,
480 0x00000000, 0x00000000, 0x00000000, 0x00000000,
481 0x00000000, 0x00000000, 0x00000000, 0x00000000,
482 0x00000000, 0x00000000, 0x00000000, 0x00000000,
483 0x00000000, 0x00000000, 0x00000000, 0x00000000,
484 0x00000000, 0x00000000, 0x00000000, 0x00000000,
487 /* Packet types for GTPU */
488 static const struct ice_ptype_attributes ice_attr_gtpu_session[] = {
489 { ICE_MAC_IPV4_GTPU_IPV4_FRAG, ICE_PTYPE_ATTR_GTP_SESSION },
490 { ICE_MAC_IPV4_GTPU_IPV4_PAY, ICE_PTYPE_ATTR_GTP_SESSION },
491 { ICE_MAC_IPV4_GTPU_IPV4_UDP_PAY, ICE_PTYPE_ATTR_GTP_SESSION },
492 { ICE_MAC_IPV4_GTPU_IPV4_TCP, ICE_PTYPE_ATTR_GTP_SESSION },
493 { ICE_MAC_IPV4_GTPU_IPV4_ICMP, ICE_PTYPE_ATTR_GTP_SESSION },
494 { ICE_MAC_IPV6_GTPU_IPV4_FRAG, ICE_PTYPE_ATTR_GTP_SESSION },
495 { ICE_MAC_IPV6_GTPU_IPV4_PAY, ICE_PTYPE_ATTR_GTP_SESSION },
496 { ICE_MAC_IPV6_GTPU_IPV4_UDP_PAY, ICE_PTYPE_ATTR_GTP_SESSION },
497 { ICE_MAC_IPV6_GTPU_IPV4_TCP, ICE_PTYPE_ATTR_GTP_SESSION },
498 { ICE_MAC_IPV6_GTPU_IPV4_ICMP, ICE_PTYPE_ATTR_GTP_SESSION },
499 { ICE_MAC_IPV4_GTPU_IPV6_FRAG, ICE_PTYPE_ATTR_GTP_SESSION },
500 { ICE_MAC_IPV4_GTPU_IPV6_PAY, ICE_PTYPE_ATTR_GTP_SESSION },
501 { ICE_MAC_IPV4_GTPU_IPV6_UDP_PAY, ICE_PTYPE_ATTR_GTP_SESSION },
502 { ICE_MAC_IPV4_GTPU_IPV6_TCP, ICE_PTYPE_ATTR_GTP_SESSION },
503 { ICE_MAC_IPV4_GTPU_IPV6_ICMPV6, ICE_PTYPE_ATTR_GTP_SESSION },
504 { ICE_MAC_IPV6_GTPU_IPV6_FRAG, ICE_PTYPE_ATTR_GTP_SESSION },
505 { ICE_MAC_IPV6_GTPU_IPV6_PAY, ICE_PTYPE_ATTR_GTP_SESSION },
506 { ICE_MAC_IPV6_GTPU_IPV6_UDP_PAY, ICE_PTYPE_ATTR_GTP_SESSION },
507 { ICE_MAC_IPV6_GTPU_IPV6_TCP, ICE_PTYPE_ATTR_GTP_SESSION },
508 { ICE_MAC_IPV6_GTPU_IPV6_ICMPV6, ICE_PTYPE_ATTR_GTP_SESSION },
511 static const struct ice_ptype_attributes ice_attr_gtpu_eh[] = {
512 { ICE_MAC_IPV4_GTPU_IPV4_FRAG, ICE_PTYPE_ATTR_GTP_PDU_EH },
513 { ICE_MAC_IPV4_GTPU_IPV4_PAY, ICE_PTYPE_ATTR_GTP_PDU_EH },
514 { ICE_MAC_IPV4_GTPU_IPV4_UDP_PAY, ICE_PTYPE_ATTR_GTP_PDU_EH },
515 { ICE_MAC_IPV4_GTPU_IPV4_TCP, ICE_PTYPE_ATTR_GTP_PDU_EH },
516 { ICE_MAC_IPV4_GTPU_IPV4_ICMP, ICE_PTYPE_ATTR_GTP_PDU_EH },
517 { ICE_MAC_IPV6_GTPU_IPV4_FRAG, ICE_PTYPE_ATTR_GTP_PDU_EH },
518 { ICE_MAC_IPV6_GTPU_IPV4_PAY, ICE_PTYPE_ATTR_GTP_PDU_EH },
519 { ICE_MAC_IPV6_GTPU_IPV4_UDP_PAY, ICE_PTYPE_ATTR_GTP_PDU_EH },
520 { ICE_MAC_IPV6_GTPU_IPV4_TCP, ICE_PTYPE_ATTR_GTP_PDU_EH },
521 { ICE_MAC_IPV6_GTPU_IPV4_ICMP, ICE_PTYPE_ATTR_GTP_PDU_EH },
522 { ICE_MAC_IPV4_GTPU_IPV6_FRAG, ICE_PTYPE_ATTR_GTP_PDU_EH },
523 { ICE_MAC_IPV4_GTPU_IPV6_PAY, ICE_PTYPE_ATTR_GTP_PDU_EH },
524 { ICE_MAC_IPV4_GTPU_IPV6_UDP_PAY, ICE_PTYPE_ATTR_GTP_PDU_EH },
525 { ICE_MAC_IPV4_GTPU_IPV6_TCP, ICE_PTYPE_ATTR_GTP_PDU_EH },
526 { ICE_MAC_IPV4_GTPU_IPV6_ICMPV6, ICE_PTYPE_ATTR_GTP_PDU_EH },
527 { ICE_MAC_IPV6_GTPU_IPV6_FRAG, ICE_PTYPE_ATTR_GTP_PDU_EH },
528 { ICE_MAC_IPV6_GTPU_IPV6_PAY, ICE_PTYPE_ATTR_GTP_PDU_EH },
529 { ICE_MAC_IPV6_GTPU_IPV6_UDP_PAY, ICE_PTYPE_ATTR_GTP_PDU_EH },
530 { ICE_MAC_IPV6_GTPU_IPV6_TCP, ICE_PTYPE_ATTR_GTP_PDU_EH },
531 { ICE_MAC_IPV6_GTPU_IPV6_ICMPV6, ICE_PTYPE_ATTR_GTP_PDU_EH },
534 static const struct ice_ptype_attributes ice_attr_gtpu_down[] = {
535 { ICE_MAC_IPV4_GTPU_IPV4_FRAG, ICE_PTYPE_ATTR_GTP_DOWNLINK },
536 { ICE_MAC_IPV4_GTPU_IPV4_PAY, ICE_PTYPE_ATTR_GTP_DOWNLINK },
537 { ICE_MAC_IPV4_GTPU_IPV4_UDP_PAY, ICE_PTYPE_ATTR_GTP_DOWNLINK },
538 { ICE_MAC_IPV4_GTPU_IPV4_TCP, ICE_PTYPE_ATTR_GTP_DOWNLINK },
539 { ICE_MAC_IPV4_GTPU_IPV4_ICMP, ICE_PTYPE_ATTR_GTP_DOWNLINK },
540 { ICE_MAC_IPV6_GTPU_IPV4_FRAG, ICE_PTYPE_ATTR_GTP_DOWNLINK },
541 { ICE_MAC_IPV6_GTPU_IPV4_PAY, ICE_PTYPE_ATTR_GTP_DOWNLINK },
542 { ICE_MAC_IPV6_GTPU_IPV4_UDP_PAY, ICE_PTYPE_ATTR_GTP_DOWNLINK },
543 { ICE_MAC_IPV6_GTPU_IPV4_TCP, ICE_PTYPE_ATTR_GTP_DOWNLINK },
544 { ICE_MAC_IPV6_GTPU_IPV4_ICMP, ICE_PTYPE_ATTR_GTP_DOWNLINK },
545 { ICE_MAC_IPV4_GTPU_IPV6_FRAG, ICE_PTYPE_ATTR_GTP_DOWNLINK },
546 { ICE_MAC_IPV4_GTPU_IPV6_PAY, ICE_PTYPE_ATTR_GTP_DOWNLINK },
547 { ICE_MAC_IPV4_GTPU_IPV6_UDP_PAY, ICE_PTYPE_ATTR_GTP_DOWNLINK },
548 { ICE_MAC_IPV4_GTPU_IPV6_TCP, ICE_PTYPE_ATTR_GTP_DOWNLINK },
549 { ICE_MAC_IPV4_GTPU_IPV6_ICMPV6, ICE_PTYPE_ATTR_GTP_DOWNLINK },
550 { ICE_MAC_IPV6_GTPU_IPV6_FRAG, ICE_PTYPE_ATTR_GTP_DOWNLINK },
551 { ICE_MAC_IPV6_GTPU_IPV6_PAY, ICE_PTYPE_ATTR_GTP_DOWNLINK },
552 { ICE_MAC_IPV6_GTPU_IPV6_UDP_PAY, ICE_PTYPE_ATTR_GTP_DOWNLINK },
553 { ICE_MAC_IPV6_GTPU_IPV6_TCP, ICE_PTYPE_ATTR_GTP_DOWNLINK },
554 { ICE_MAC_IPV6_GTPU_IPV6_ICMPV6, ICE_PTYPE_ATTR_GTP_DOWNLINK },
557 static const struct ice_ptype_attributes ice_attr_gtpu_up[] = {
558 { ICE_MAC_IPV4_GTPU_IPV4_FRAG, ICE_PTYPE_ATTR_GTP_UPLINK },
559 { ICE_MAC_IPV4_GTPU_IPV4_PAY, ICE_PTYPE_ATTR_GTP_UPLINK },
560 { ICE_MAC_IPV4_GTPU_IPV4_UDP_PAY, ICE_PTYPE_ATTR_GTP_UPLINK },
561 { ICE_MAC_IPV4_GTPU_IPV4_TCP, ICE_PTYPE_ATTR_GTP_UPLINK },
562 { ICE_MAC_IPV4_GTPU_IPV4_ICMP, ICE_PTYPE_ATTR_GTP_UPLINK },
563 { ICE_MAC_IPV6_GTPU_IPV4_FRAG, ICE_PTYPE_ATTR_GTP_UPLINK },
564 { ICE_MAC_IPV6_GTPU_IPV4_PAY, ICE_PTYPE_ATTR_GTP_UPLINK },
565 { ICE_MAC_IPV6_GTPU_IPV4_UDP_PAY, ICE_PTYPE_ATTR_GTP_UPLINK },
566 { ICE_MAC_IPV6_GTPU_IPV4_TCP, ICE_PTYPE_ATTR_GTP_UPLINK },
567 { ICE_MAC_IPV6_GTPU_IPV4_ICMP, ICE_PTYPE_ATTR_GTP_UPLINK },
568 { ICE_MAC_IPV4_GTPU_IPV6_FRAG, ICE_PTYPE_ATTR_GTP_UPLINK },
569 { ICE_MAC_IPV4_GTPU_IPV6_PAY, ICE_PTYPE_ATTR_GTP_UPLINK },
570 { ICE_MAC_IPV4_GTPU_IPV6_UDP_PAY, ICE_PTYPE_ATTR_GTP_UPLINK },
571 { ICE_MAC_IPV4_GTPU_IPV6_TCP, ICE_PTYPE_ATTR_GTP_UPLINK },
572 { ICE_MAC_IPV4_GTPU_IPV6_ICMPV6, ICE_PTYPE_ATTR_GTP_UPLINK },
573 { ICE_MAC_IPV6_GTPU_IPV6_FRAG, ICE_PTYPE_ATTR_GTP_UPLINK },
574 { ICE_MAC_IPV6_GTPU_IPV6_PAY, ICE_PTYPE_ATTR_GTP_UPLINK },
575 { ICE_MAC_IPV6_GTPU_IPV6_UDP_PAY, ICE_PTYPE_ATTR_GTP_UPLINK },
576 { ICE_MAC_IPV6_GTPU_IPV6_TCP, ICE_PTYPE_ATTR_GTP_UPLINK },
577 { ICE_MAC_IPV6_GTPU_IPV6_ICMPV6, ICE_PTYPE_ATTR_GTP_UPLINK },
580 static const u32 ice_ptypes_gtpu[] = {
581 0x00000000, 0x00000000, 0x00000000, 0x00000000,
582 0x00000000, 0x00000000, 0x00000000, 0x00000000,
583 0x00000000, 0x00000000, 0x7FFFFE00, 0x00000000,
584 0x00000000, 0x00000000, 0x00000000, 0x00000000,
585 0x00000000, 0x00000000, 0x00000000, 0x00000000,
586 0x00000000, 0x00000000, 0x00000000, 0x00000000,
587 0x00000000, 0x00000000, 0x00000000, 0x00000000,
588 0x00000000, 0x00000000, 0x00000000, 0x00000000,
591 /* Packet types for pppoe */
592 static const u32 ice_ptypes_pppoe[] = {
593 0x00000000, 0x00000000, 0x00000000, 0x00000000,
594 0x00000000, 0x00000000, 0x00000000, 0x00000000,
595 0x00000000, 0x03ffe000, 0x00000000, 0x00000000,
596 0x00000000, 0x00000000, 0x00000000, 0x00000000,
597 0x00000000, 0x00000000, 0x00000000, 0x00000000,
598 0x00000000, 0x00000000, 0x00000000, 0x00000000,
599 0x00000000, 0x00000000, 0x00000000, 0x00000000,
600 0x00000000, 0x00000000, 0x00000000, 0x00000000,
603 /* Packet types for packets with PFCP NODE header */
604 static const u32 ice_ptypes_pfcp_node[] = {
605 0x00000000, 0x00000000, 0x00000000, 0x00000000,
606 0x00000000, 0x00000000, 0x00000000, 0x00000000,
607 0x00000000, 0x00000000, 0x80000000, 0x00000002,
608 0x00000000, 0x00000000, 0x00000000, 0x00000000,
609 0x00000000, 0x00000000, 0x00000000, 0x00000000,
610 0x00000000, 0x00000000, 0x00000000, 0x00000000,
611 0x00000000, 0x00000000, 0x00000000, 0x00000000,
612 0x00000000, 0x00000000, 0x00000000, 0x00000000,
615 /* Packet types for packets with PFCP SESSION header */
616 static const u32 ice_ptypes_pfcp_session[] = {
617 0x00000000, 0x00000000, 0x00000000, 0x00000000,
618 0x00000000, 0x00000000, 0x00000000, 0x00000000,
619 0x00000000, 0x00000000, 0x00000000, 0x00000005,
620 0x00000000, 0x00000000, 0x00000000, 0x00000000,
621 0x00000000, 0x00000000, 0x00000000, 0x00000000,
622 0x00000000, 0x00000000, 0x00000000, 0x00000000,
623 0x00000000, 0x00000000, 0x00000000, 0x00000000,
624 0x00000000, 0x00000000, 0x00000000, 0x00000000,
627 /* Packet types for l2tpv3 */
628 static const u32 ice_ptypes_l2tpv3[] = {
629 0x00000000, 0x00000000, 0x00000000, 0x00000000,
630 0x00000000, 0x00000000, 0x00000000, 0x00000000,
631 0x00000000, 0x00000000, 0x00000000, 0x00000300,
632 0x00000000, 0x00000000, 0x00000000, 0x00000000,
633 0x00000000, 0x00000000, 0x00000000, 0x00000000,
634 0x00000000, 0x00000000, 0x00000000, 0x00000000,
635 0x00000000, 0x00000000, 0x00000000, 0x00000000,
636 0x00000000, 0x00000000, 0x00000000, 0x00000000,
639 /* Packet types for esp */
640 static const u32 ice_ptypes_esp[] = {
641 0x00000000, 0x00000000, 0x00000000, 0x00000000,
642 0x00000000, 0x00000003, 0x00000000, 0x00000000,
643 0x00000000, 0x00000000, 0x00000000, 0x00000000,
644 0x00000000, 0x00000000, 0x00000000, 0x00000000,
645 0x00000000, 0x00000000, 0x00000000, 0x00000000,
646 0x00000000, 0x00000000, 0x00000000, 0x00000000,
647 0x00000000, 0x00000000, 0x00000000, 0x00000000,
648 0x00000000, 0x00000000, 0x00000000, 0x00000000,
651 /* Packet types for ah */
652 static const u32 ice_ptypes_ah[] = {
653 0x00000000, 0x00000000, 0x00000000, 0x00000000,
654 0x00000000, 0x0000000C, 0x00000000, 0x00000000,
655 0x00000000, 0x00000000, 0x00000000, 0x00000000,
656 0x00000000, 0x00000000, 0x00000000, 0x00000000,
657 0x00000000, 0x00000000, 0x00000000, 0x00000000,
658 0x00000000, 0x00000000, 0x00000000, 0x00000000,
659 0x00000000, 0x00000000, 0x00000000, 0x00000000,
660 0x00000000, 0x00000000, 0x00000000, 0x00000000,
663 /* Packet types for packets with NAT_T ESP header */
664 static const u32 ice_ptypes_nat_t_esp[] = {
665 0x00000000, 0x00000000, 0x00000000, 0x00000000,
666 0x00000000, 0x00000030, 0x00000000, 0x00000000,
667 0x00000000, 0x00000000, 0x00000000, 0x00000000,
668 0x00000000, 0x00000000, 0x00000000, 0x00000000,
669 0x00000000, 0x00000000, 0x00000000, 0x00000000,
670 0x00000000, 0x00000000, 0x00000000, 0x00000000,
671 0x00000000, 0x00000000, 0x00000000, 0x00000000,
672 0x00000000, 0x00000000, 0x00000000, 0x00000000,
675 static const u32 ice_ptypes_mac_non_ip_ofos[] = {
676 0x00000846, 0x00000000, 0x00000000, 0x00000000,
677 0x00000000, 0x00000000, 0x00000000, 0x00000000,
678 0x00400000, 0x03FFF000, 0x00000000, 0x00000000,
679 0x00000000, 0x00000000, 0x00000000, 0x00000000,
680 0x00000000, 0x00000000, 0x00000000, 0x00000000,
681 0x00000000, 0x00000000, 0x00000000, 0x00000000,
682 0x00000000, 0x00000000, 0x00000000, 0x00000000,
683 0x00000000, 0x00000000, 0x00000000, 0x00000000,
686 static const u32 ice_ptypes_gtpu_no_ip[] = {
687 0x00000000, 0x00000000, 0x00000000, 0x00000000,
688 0x00000000, 0x00000000, 0x00000000, 0x00000000,
689 0x00000000, 0x00000000, 0x00000600, 0x00000000,
690 0x00000000, 0x00000000, 0x00000000, 0x00000000,
691 0x00000000, 0x00000000, 0x00000000, 0x00000000,
692 0x00000000, 0x00000000, 0x00000000, 0x00000000,
693 0x00000000, 0x00000000, 0x00000000, 0x00000000,
694 0x00000000, 0x00000000, 0x00000000, 0x00000000,
697 /* Manage parameters and info. used during the creation of a flow profile */
698 struct ice_flow_prof_params {
700 u16 entry_length; /* # of bytes formatted entry will require */
702 struct ice_flow_prof *prof;
704 /* For ACL, the es[0] will have the data of ICE_RX_MDID_PKT_FLAGS_15_0
705 * This will give us the direction flags.
707 struct ice_fv_word es[ICE_MAX_FV_WORDS];
708 /* attributes can be used to add attributes to a particular PTYPE */
709 const struct ice_ptype_attributes *attr;
712 u16 mask[ICE_MAX_FV_WORDS];
713 ice_declare_bitmap(ptypes, ICE_FLOW_PTYPE_MAX);
716 #define ICE_FLOW_RSS_HDRS_INNER_MASK \
717 (ICE_FLOW_SEG_HDR_PPPOE | ICE_FLOW_SEG_HDR_GTPC | \
718 ICE_FLOW_SEG_HDR_GTPC_TEID | ICE_FLOW_SEG_HDR_GTPU | \
719 ICE_FLOW_SEG_HDR_PFCP_SESSION | ICE_FLOW_SEG_HDR_L2TPV3 | \
720 ICE_FLOW_SEG_HDR_ESP | ICE_FLOW_SEG_HDR_AH | \
721 ICE_FLOW_SEG_HDR_NAT_T_ESP | ICE_FLOW_SEG_HDR_GTPU_NON_IP)
723 #define ICE_FLOW_SEG_HDRS_L2_MASK \
724 (ICE_FLOW_SEG_HDR_ETH | ICE_FLOW_SEG_HDR_VLAN)
725 #define ICE_FLOW_SEG_HDRS_L3_MASK \
726 (ICE_FLOW_SEG_HDR_IPV4 | ICE_FLOW_SEG_HDR_IPV6 | \
727 ICE_FLOW_SEG_HDR_ARP)
728 #define ICE_FLOW_SEG_HDRS_L4_MASK \
729 (ICE_FLOW_SEG_HDR_ICMP | ICE_FLOW_SEG_HDR_TCP | ICE_FLOW_SEG_HDR_UDP | \
730 ICE_FLOW_SEG_HDR_SCTP)
731 /* mask for L4 protocols that are NOT part of IPV4/6 OTHER PTYPE groups */
732 #define ICE_FLOW_SEG_HDRS_L4_MASK_NO_OTHER \
733 (ICE_FLOW_SEG_HDR_TCP | ICE_FLOW_SEG_HDR_UDP | ICE_FLOW_SEG_HDR_SCTP)
736 * ice_flow_val_hdrs - validates packet segments for valid protocol headers
737 * @segs: array of one or more packet segments that describe the flow
738 * @segs_cnt: number of packet segments provided
740 static enum ice_status
741 ice_flow_val_hdrs(struct ice_flow_seg_info *segs, u8 segs_cnt)
745 for (i = 0; i < segs_cnt; i++) {
746 /* Multiple L3 headers */
747 if (segs[i].hdrs & ICE_FLOW_SEG_HDRS_L3_MASK &&
748 !ice_is_pow2(segs[i].hdrs & ICE_FLOW_SEG_HDRS_L3_MASK))
749 return ICE_ERR_PARAM;
751 /* Multiple L4 headers */
752 if (segs[i].hdrs & ICE_FLOW_SEG_HDRS_L4_MASK &&
753 !ice_is_pow2(segs[i].hdrs & ICE_FLOW_SEG_HDRS_L4_MASK))
754 return ICE_ERR_PARAM;
760 /* Sizes of fixed known protocol headers without header options */
761 #define ICE_FLOW_PROT_HDR_SZ_MAC 14
762 #define ICE_FLOW_PROT_HDR_SZ_MAC_VLAN (ICE_FLOW_PROT_HDR_SZ_MAC + 2)
763 #define ICE_FLOW_PROT_HDR_SZ_IPV4 20
764 #define ICE_FLOW_PROT_HDR_SZ_IPV6 40
765 #define ICE_FLOW_PROT_HDR_SZ_ARP 28
766 #define ICE_FLOW_PROT_HDR_SZ_ICMP 8
767 #define ICE_FLOW_PROT_HDR_SZ_TCP 20
768 #define ICE_FLOW_PROT_HDR_SZ_UDP 8
769 #define ICE_FLOW_PROT_HDR_SZ_SCTP 12
772 * ice_flow_calc_seg_sz - calculates size of a packet segment based on headers
773 * @params: information about the flow to be processed
774 * @seg: index of packet segment whose header size is to be determined
776 static u16 ice_flow_calc_seg_sz(struct ice_flow_prof_params *params, u8 seg)
781 sz = (params->prof->segs[seg].hdrs & ICE_FLOW_SEG_HDR_VLAN) ?
782 ICE_FLOW_PROT_HDR_SZ_MAC_VLAN : ICE_FLOW_PROT_HDR_SZ_MAC;
785 if (params->prof->segs[seg].hdrs & ICE_FLOW_SEG_HDR_IPV4)
786 sz += ICE_FLOW_PROT_HDR_SZ_IPV4;
787 else if (params->prof->segs[seg].hdrs & ICE_FLOW_SEG_HDR_IPV6)
788 sz += ICE_FLOW_PROT_HDR_SZ_IPV6;
789 else if (params->prof->segs[seg].hdrs & ICE_FLOW_SEG_HDR_ARP)
790 sz += ICE_FLOW_PROT_HDR_SZ_ARP;
791 else if (params->prof->segs[seg].hdrs & ICE_FLOW_SEG_HDRS_L4_MASK)
792 /* A L3 header is required if L4 is specified */
796 if (params->prof->segs[seg].hdrs & ICE_FLOW_SEG_HDR_ICMP)
797 sz += ICE_FLOW_PROT_HDR_SZ_ICMP;
798 else if (params->prof->segs[seg].hdrs & ICE_FLOW_SEG_HDR_TCP)
799 sz += ICE_FLOW_PROT_HDR_SZ_TCP;
800 else if (params->prof->segs[seg].hdrs & ICE_FLOW_SEG_HDR_UDP)
801 sz += ICE_FLOW_PROT_HDR_SZ_UDP;
802 else if (params->prof->segs[seg].hdrs & ICE_FLOW_SEG_HDR_SCTP)
803 sz += ICE_FLOW_PROT_HDR_SZ_SCTP;
809 * ice_flow_proc_seg_hdrs - process protocol headers present in pkt segments
810 * @params: information about the flow to be processed
812 * This function identifies the packet types associated with the protocol
813 * headers being present in packet segments of the specified flow profile.
815 static enum ice_status
816 ice_flow_proc_seg_hdrs(struct ice_flow_prof_params *params)
818 struct ice_flow_prof *prof;
821 ice_memset(params->ptypes, 0xff, sizeof(params->ptypes),
826 for (i = 0; i < params->prof->segs_cnt; i++) {
827 const ice_bitmap_t *src;
830 hdrs = prof->segs[i].hdrs;
832 if (hdrs & ICE_FLOW_SEG_HDR_ETH) {
833 src = !i ? (const ice_bitmap_t *)ice_ptypes_mac_ofos :
834 (const ice_bitmap_t *)ice_ptypes_mac_il;
835 ice_and_bitmap(params->ptypes, params->ptypes, src,
839 if (i && hdrs & ICE_FLOW_SEG_HDR_VLAN) {
840 src = (const ice_bitmap_t *)ice_ptypes_macvlan_il;
841 ice_and_bitmap(params->ptypes, params->ptypes, src,
845 if (!i && hdrs & ICE_FLOW_SEG_HDR_ARP) {
846 ice_and_bitmap(params->ptypes, params->ptypes,
847 (const ice_bitmap_t *)ice_ptypes_arp_of,
851 if (hdrs & ICE_FLOW_SEG_HDR_PPPOE) {
852 src = (const ice_bitmap_t *)ice_ptypes_pppoe;
853 ice_and_bitmap(params->ptypes, params->ptypes, src,
856 if ((hdrs & ICE_FLOW_SEG_HDR_IPV4) &&
857 (hdrs & ICE_FLOW_SEG_HDR_IPV_OTHER)) {
859 (const ice_bitmap_t *)ice_ptypes_ipv4_il :
860 (const ice_bitmap_t *)ice_ptypes_ipv4_ofos_all;
861 ice_and_bitmap(params->ptypes, params->ptypes, src,
863 } else if ((hdrs & ICE_FLOW_SEG_HDR_IPV6) &&
864 (hdrs & ICE_FLOW_SEG_HDR_IPV_OTHER)) {
866 (const ice_bitmap_t *)ice_ptypes_ipv6_il :
867 (const ice_bitmap_t *)ice_ptypes_ipv6_ofos_all;
868 ice_and_bitmap(params->ptypes, params->ptypes, src,
870 } else if ((hdrs & ICE_FLOW_SEG_HDR_IPV4) &&
871 !(hdrs & ICE_FLOW_SEG_HDRS_L4_MASK_NO_OTHER)) {
872 src = !i ? (const ice_bitmap_t *)ice_ptypes_ipv4_ofos_no_l4 :
873 (const ice_bitmap_t *)ice_ptypes_ipv4_il_no_l4;
874 ice_and_bitmap(params->ptypes, params->ptypes, src,
876 } else if (hdrs & ICE_FLOW_SEG_HDR_IPV4) {
877 src = !i ? (const ice_bitmap_t *)ice_ptypes_ipv4_ofos :
878 (const ice_bitmap_t *)ice_ptypes_ipv4_il;
879 ice_and_bitmap(params->ptypes, params->ptypes, src,
881 } else if ((hdrs & ICE_FLOW_SEG_HDR_IPV6) &&
882 !(hdrs & ICE_FLOW_SEG_HDRS_L4_MASK_NO_OTHER)) {
883 src = !i ? (const ice_bitmap_t *)ice_ptypes_ipv6_ofos_no_l4 :
884 (const ice_bitmap_t *)ice_ptypes_ipv6_il_no_l4;
885 ice_and_bitmap(params->ptypes, params->ptypes, src,
887 } else if (hdrs & ICE_FLOW_SEG_HDR_IPV6) {
888 src = !i ? (const ice_bitmap_t *)ice_ptypes_ipv6_ofos :
889 (const ice_bitmap_t *)ice_ptypes_ipv6_il;
890 ice_and_bitmap(params->ptypes, params->ptypes, src,
894 if (hdrs & ICE_FLOW_SEG_HDR_ETH_NON_IP) {
895 src = (const ice_bitmap_t *)ice_ptypes_mac_non_ip_ofos;
896 ice_and_bitmap(params->ptypes, params->ptypes,
897 src, ICE_FLOW_PTYPE_MAX);
898 } else if (hdrs & ICE_FLOW_SEG_HDR_PPPOE) {
899 src = (const ice_bitmap_t *)ice_ptypes_pppoe;
900 ice_and_bitmap(params->ptypes, params->ptypes, src,
903 src = (const ice_bitmap_t *)ice_ptypes_pppoe;
904 ice_andnot_bitmap(params->ptypes, params->ptypes, src,
908 if (hdrs & ICE_FLOW_SEG_HDR_UDP) {
909 src = (const ice_bitmap_t *)ice_ptypes_udp_il;
910 ice_and_bitmap(params->ptypes, params->ptypes, src,
912 } else if (hdrs & ICE_FLOW_SEG_HDR_TCP) {
913 ice_and_bitmap(params->ptypes, params->ptypes,
914 (const ice_bitmap_t *)ice_ptypes_tcp_il,
916 } else if (hdrs & ICE_FLOW_SEG_HDR_SCTP) {
917 src = (const ice_bitmap_t *)ice_ptypes_sctp_il;
918 ice_and_bitmap(params->ptypes, params->ptypes, src,
922 if (hdrs & ICE_FLOW_SEG_HDR_ICMP) {
923 src = !i ? (const ice_bitmap_t *)ice_ptypes_icmp_of :
924 (const ice_bitmap_t *)ice_ptypes_icmp_il;
925 ice_and_bitmap(params->ptypes, params->ptypes, src,
927 } else if (hdrs & ICE_FLOW_SEG_HDR_GRE) {
929 src = (const ice_bitmap_t *)ice_ptypes_gre_of;
930 ice_and_bitmap(params->ptypes, params->ptypes,
931 src, ICE_FLOW_PTYPE_MAX);
933 } else if (hdrs & ICE_FLOW_SEG_HDR_GTPC) {
934 src = (const ice_bitmap_t *)ice_ptypes_gtpc;
935 ice_and_bitmap(params->ptypes, params->ptypes,
936 src, ICE_FLOW_PTYPE_MAX);
937 } else if (hdrs & ICE_FLOW_SEG_HDR_GTPC_TEID) {
938 src = (const ice_bitmap_t *)ice_ptypes_gtpc_tid;
939 ice_and_bitmap(params->ptypes, params->ptypes,
940 src, ICE_FLOW_PTYPE_MAX);
941 } else if (hdrs & ICE_FLOW_SEG_HDR_GTPU_NON_IP) {
942 src = (const ice_bitmap_t *)ice_ptypes_gtpu_no_ip;
943 ice_and_bitmap(params->ptypes, params->ptypes,
944 src, ICE_FLOW_PTYPE_MAX);
945 } else if (hdrs & ICE_FLOW_SEG_HDR_GTPU_DWN) {
946 src = (const ice_bitmap_t *)ice_ptypes_gtpu;
947 ice_and_bitmap(params->ptypes, params->ptypes,
948 src, ICE_FLOW_PTYPE_MAX);
950 /* Attributes for GTP packet with downlink */
951 params->attr = ice_attr_gtpu_down;
952 params->attr_cnt = ARRAY_SIZE(ice_attr_gtpu_down);
953 } else if (hdrs & ICE_FLOW_SEG_HDR_GTPU_UP) {
954 src = (const ice_bitmap_t *)ice_ptypes_gtpu;
955 ice_and_bitmap(params->ptypes, params->ptypes,
956 src, ICE_FLOW_PTYPE_MAX);
958 /* Attributes for GTP packet with uplink */
959 params->attr = ice_attr_gtpu_up;
960 params->attr_cnt = ARRAY_SIZE(ice_attr_gtpu_up);
961 } else if (hdrs & ICE_FLOW_SEG_HDR_GTPU_EH) {
962 src = (const ice_bitmap_t *)ice_ptypes_gtpu;
963 ice_and_bitmap(params->ptypes, params->ptypes,
964 src, ICE_FLOW_PTYPE_MAX);
966 /* Attributes for GTP packet with Extension Header */
967 params->attr = ice_attr_gtpu_eh;
968 params->attr_cnt = ARRAY_SIZE(ice_attr_gtpu_eh);
969 } else if (hdrs & ICE_FLOW_SEG_HDR_GTPU_IP) {
970 src = (const ice_bitmap_t *)ice_ptypes_gtpu;
971 ice_and_bitmap(params->ptypes, params->ptypes,
972 src, ICE_FLOW_PTYPE_MAX);
974 /* Attributes for GTP packet without Extension Header */
975 params->attr = ice_attr_gtpu_session;
976 params->attr_cnt = ARRAY_SIZE(ice_attr_gtpu_session);
977 } else if (hdrs & ICE_FLOW_SEG_HDR_L2TPV3) {
978 src = (const ice_bitmap_t *)ice_ptypes_l2tpv3;
979 ice_and_bitmap(params->ptypes, params->ptypes,
980 src, ICE_FLOW_PTYPE_MAX);
981 } else if (hdrs & ICE_FLOW_SEG_HDR_ESP) {
982 src = (const ice_bitmap_t *)ice_ptypes_esp;
983 ice_and_bitmap(params->ptypes, params->ptypes,
984 src, ICE_FLOW_PTYPE_MAX);
985 } else if (hdrs & ICE_FLOW_SEG_HDR_AH) {
986 src = (const ice_bitmap_t *)ice_ptypes_ah;
987 ice_and_bitmap(params->ptypes, params->ptypes,
988 src, ICE_FLOW_PTYPE_MAX);
989 } else if (hdrs & ICE_FLOW_SEG_HDR_NAT_T_ESP) {
990 src = (const ice_bitmap_t *)ice_ptypes_nat_t_esp;
991 ice_and_bitmap(params->ptypes, params->ptypes,
992 src, ICE_FLOW_PTYPE_MAX);
993 } else if (hdrs & ICE_FLOW_SEG_HDR_VXLAN) {
994 src = (const ice_bitmap_t *)ice_ptypes_vxlan_vni;
995 ice_and_bitmap(params->ptypes, params->ptypes,
996 src, ICE_FLOW_PTYPE_MAX);
999 if (hdrs & ICE_FLOW_SEG_HDR_PFCP) {
1000 if (hdrs & ICE_FLOW_SEG_HDR_PFCP_NODE)
1002 (const ice_bitmap_t *)ice_ptypes_pfcp_node;
1005 (const ice_bitmap_t *)ice_ptypes_pfcp_session;
1007 ice_and_bitmap(params->ptypes, params->ptypes,
1008 src, ICE_FLOW_PTYPE_MAX);
1010 src = (const ice_bitmap_t *)ice_ptypes_pfcp_node;
1011 ice_andnot_bitmap(params->ptypes, params->ptypes,
1012 src, ICE_FLOW_PTYPE_MAX);
1014 src = (const ice_bitmap_t *)ice_ptypes_pfcp_session;
1015 ice_andnot_bitmap(params->ptypes, params->ptypes,
1016 src, ICE_FLOW_PTYPE_MAX);
1024 * ice_flow_xtract_pkt_flags - Create an extr sequence entry for packet flags
1025 * @hw: pointer to the HW struct
1026 * @params: information about the flow to be processed
1027 * @flags: The value of pkt_flags[x:x] in Rx/Tx MDID metadata.
1029 * This function will allocate an extraction sequence entries for a DWORD size
1030 * chunk of the packet flags.
1032 static enum ice_status
1033 ice_flow_xtract_pkt_flags(struct ice_hw *hw,
1034 struct ice_flow_prof_params *params,
1035 enum ice_flex_mdid_pkt_flags flags)
1037 u8 fv_words = hw->blk[params->blk].es.fvw;
1040 /* Make sure the number of extraction sequence entries required does not
1041 * exceed the block's capacity.
1043 if (params->es_cnt >= fv_words)
1044 return ICE_ERR_MAX_LIMIT;
1046 /* some blocks require a reversed field vector layout */
1047 if (hw->blk[params->blk].es.reverse)
1048 idx = fv_words - params->es_cnt - 1;
1050 idx = params->es_cnt;
1052 params->es[idx].prot_id = ICE_PROT_META_ID;
1053 params->es[idx].off = flags;
1060 * ice_flow_xtract_fld - Create an extraction sequence entry for the given field
1061 * @hw: pointer to the HW struct
1062 * @params: information about the flow to be processed
1063 * @seg: packet segment index of the field to be extracted
1064 * @fld: ID of field to be extracted
1065 * @match: bitfield of all fields
1067 * This function determines the protocol ID, offset, and size of the given
1068 * field. It then allocates one or more extraction sequence entries for the
1069 * given field, and fill the entries with protocol ID and offset information.
1071 static enum ice_status
1072 ice_flow_xtract_fld(struct ice_hw *hw, struct ice_flow_prof_params *params,
1073 u8 seg, enum ice_flow_field fld, u64 match)
1075 enum ice_flow_field sib = ICE_FLOW_FIELD_IDX_MAX;
1076 enum ice_prot_id prot_id = ICE_PROT_ID_INVAL;
1077 u8 fv_words = hw->blk[params->blk].es.fvw;
1078 struct ice_flow_fld_info *flds;
1079 u16 cnt, ese_bits, i;
1084 flds = params->prof->segs[seg].fields;
1087 case ICE_FLOW_FIELD_IDX_ETH_DA:
1088 case ICE_FLOW_FIELD_IDX_ETH_SA:
1089 case ICE_FLOW_FIELD_IDX_S_VLAN:
1090 case ICE_FLOW_FIELD_IDX_C_VLAN:
1091 prot_id = seg == 0 ? ICE_PROT_MAC_OF_OR_S : ICE_PROT_MAC_IL;
1093 case ICE_FLOW_FIELD_IDX_ETH_TYPE:
1094 prot_id = seg == 0 ? ICE_PROT_ETYPE_OL : ICE_PROT_ETYPE_IL;
1096 case ICE_FLOW_FIELD_IDX_IPV4_DSCP:
1097 prot_id = seg == 0 ? ICE_PROT_IPV4_OF_OR_S : ICE_PROT_IPV4_IL;
1099 case ICE_FLOW_FIELD_IDX_IPV6_DSCP:
1100 prot_id = seg == 0 ? ICE_PROT_IPV6_OF_OR_S : ICE_PROT_IPV6_IL;
1102 case ICE_FLOW_FIELD_IDX_IPV4_TTL:
1103 case ICE_FLOW_FIELD_IDX_IPV4_PROT:
1104 prot_id = seg == 0 ? ICE_PROT_IPV4_OF_OR_S : ICE_PROT_IPV4_IL;
1106 /* TTL and PROT share the same extraction seq. entry.
1107 * Each is considered a sibling to the other in terms of sharing
1108 * the same extraction sequence entry.
1110 if (fld == ICE_FLOW_FIELD_IDX_IPV4_TTL)
1111 sib = ICE_FLOW_FIELD_IDX_IPV4_PROT;
1113 sib = ICE_FLOW_FIELD_IDX_IPV4_TTL;
1115 /* If the sibling field is also included, that field's
1116 * mask needs to be included.
1118 if (match & BIT(sib))
1119 sib_mask = ice_flds_info[sib].mask;
1121 case ICE_FLOW_FIELD_IDX_IPV6_TTL:
1122 case ICE_FLOW_FIELD_IDX_IPV6_PROT:
1123 prot_id = seg == 0 ? ICE_PROT_IPV6_OF_OR_S : ICE_PROT_IPV6_IL;
1125 /* TTL and PROT share the same extraction seq. entry.
1126 * Each is considered a sibling to the other in terms of sharing
1127 * the same extraction sequence entry.
1129 if (fld == ICE_FLOW_FIELD_IDX_IPV6_TTL)
1130 sib = ICE_FLOW_FIELD_IDX_IPV6_PROT;
1132 sib = ICE_FLOW_FIELD_IDX_IPV6_TTL;
1134 /* If the sibling field is also included, that field's
1135 * mask needs to be included.
1137 if (match & BIT(sib))
1138 sib_mask = ice_flds_info[sib].mask;
1140 case ICE_FLOW_FIELD_IDX_IPV4_SA:
1141 case ICE_FLOW_FIELD_IDX_IPV4_DA:
1142 prot_id = seg == 0 ? ICE_PROT_IPV4_OF_OR_S : ICE_PROT_IPV4_IL;
1144 case ICE_FLOW_FIELD_IDX_IPV6_SA:
1145 case ICE_FLOW_FIELD_IDX_IPV6_DA:
1146 case ICE_FLOW_FIELD_IDX_IPV6_PRE32_SA:
1147 case ICE_FLOW_FIELD_IDX_IPV6_PRE32_DA:
1148 case ICE_FLOW_FIELD_IDX_IPV6_PRE48_SA:
1149 case ICE_FLOW_FIELD_IDX_IPV6_PRE48_DA:
1150 case ICE_FLOW_FIELD_IDX_IPV6_PRE64_SA:
1151 case ICE_FLOW_FIELD_IDX_IPV6_PRE64_DA:
1152 prot_id = seg == 0 ? ICE_PROT_IPV6_OF_OR_S : ICE_PROT_IPV6_IL;
1154 case ICE_FLOW_FIELD_IDX_TCP_SRC_PORT:
1155 case ICE_FLOW_FIELD_IDX_TCP_DST_PORT:
1156 case ICE_FLOW_FIELD_IDX_TCP_FLAGS:
1157 prot_id = ICE_PROT_TCP_IL;
1159 case ICE_FLOW_FIELD_IDX_UDP_SRC_PORT:
1160 case ICE_FLOW_FIELD_IDX_UDP_DST_PORT:
1161 prot_id = ICE_PROT_UDP_IL_OR_S;
1163 case ICE_FLOW_FIELD_IDX_SCTP_SRC_PORT:
1164 case ICE_FLOW_FIELD_IDX_SCTP_DST_PORT:
1165 prot_id = ICE_PROT_SCTP_IL;
1167 case ICE_FLOW_FIELD_IDX_VXLAN_VNI:
1168 case ICE_FLOW_FIELD_IDX_GTPC_TEID:
1169 case ICE_FLOW_FIELD_IDX_GTPU_IP_TEID:
1170 case ICE_FLOW_FIELD_IDX_GTPU_UP_TEID:
1171 case ICE_FLOW_FIELD_IDX_GTPU_DWN_TEID:
1172 case ICE_FLOW_FIELD_IDX_GTPU_EH_TEID:
1173 case ICE_FLOW_FIELD_IDX_GTPU_EH_QFI:
1174 /* GTP is accessed through UDP OF protocol */
1175 prot_id = ICE_PROT_UDP_OF;
1177 case ICE_FLOW_FIELD_IDX_PPPOE_SESS_ID:
1178 prot_id = ICE_PROT_PPPOE;
1180 case ICE_FLOW_FIELD_IDX_PFCP_SEID:
1181 prot_id = ICE_PROT_UDP_IL_OR_S;
1183 case ICE_FLOW_FIELD_IDX_L2TPV3_SESS_ID:
1184 prot_id = ICE_PROT_L2TPV3;
1186 case ICE_FLOW_FIELD_IDX_ESP_SPI:
1187 prot_id = ICE_PROT_ESP_F;
1189 case ICE_FLOW_FIELD_IDX_AH_SPI:
1190 prot_id = ICE_PROT_ESP_2;
1192 case ICE_FLOW_FIELD_IDX_NAT_T_ESP_SPI:
1193 prot_id = ICE_PROT_UDP_IL_OR_S;
1195 case ICE_FLOW_FIELD_IDX_ARP_SIP:
1196 case ICE_FLOW_FIELD_IDX_ARP_DIP:
1197 case ICE_FLOW_FIELD_IDX_ARP_SHA:
1198 case ICE_FLOW_FIELD_IDX_ARP_DHA:
1199 case ICE_FLOW_FIELD_IDX_ARP_OP:
1200 prot_id = ICE_PROT_ARP_OF;
1202 case ICE_FLOW_FIELD_IDX_ICMP_TYPE:
1203 case ICE_FLOW_FIELD_IDX_ICMP_CODE:
1204 /* ICMP type and code share the same extraction seq. entry */
1205 prot_id = (params->prof->segs[seg].hdrs &
1206 ICE_FLOW_SEG_HDR_IPV4) ?
1207 ICE_PROT_ICMP_IL : ICE_PROT_ICMPV6_IL;
1208 sib = fld == ICE_FLOW_FIELD_IDX_ICMP_TYPE ?
1209 ICE_FLOW_FIELD_IDX_ICMP_CODE :
1210 ICE_FLOW_FIELD_IDX_ICMP_TYPE;
1212 case ICE_FLOW_FIELD_IDX_GRE_KEYID:
1213 prot_id = ICE_PROT_GRE_OF;
1216 return ICE_ERR_NOT_IMPL;
1219 /* Each extraction sequence entry is a word in size, and extracts a
1220 * word-aligned offset from a protocol header.
1222 ese_bits = ICE_FLOW_FV_EXTRACT_SZ * BITS_PER_BYTE;
1224 flds[fld].xtrct.prot_id = prot_id;
1225 flds[fld].xtrct.off = (ice_flds_info[fld].off / ese_bits) *
1226 ICE_FLOW_FV_EXTRACT_SZ;
1227 flds[fld].xtrct.disp = (u8)(ice_flds_info[fld].off % ese_bits);
1228 flds[fld].xtrct.idx = params->es_cnt;
1229 flds[fld].xtrct.mask = ice_flds_info[fld].mask;
1231 /* Adjust the next field-entry index after accommodating the number of
1232 * entries this field consumes
1234 cnt = DIVIDE_AND_ROUND_UP(flds[fld].xtrct.disp +
1235 ice_flds_info[fld].size, ese_bits);
1237 /* Fill in the extraction sequence entries needed for this field */
1238 off = flds[fld].xtrct.off;
1239 mask = flds[fld].xtrct.mask;
1240 for (i = 0; i < cnt; i++) {
1241 /* Only consume an extraction sequence entry if there is no
1242 * sibling field associated with this field or the sibling entry
1243 * already extracts the word shared with this field.
1245 if (sib == ICE_FLOW_FIELD_IDX_MAX ||
1246 flds[sib].xtrct.prot_id == ICE_PROT_ID_INVAL ||
1247 flds[sib].xtrct.off != off) {
1250 /* Make sure the number of extraction sequence required
1251 * does not exceed the block's capability
1253 if (params->es_cnt >= fv_words)
1254 return ICE_ERR_MAX_LIMIT;
1256 /* some blocks require a reversed field vector layout */
1257 if (hw->blk[params->blk].es.reverse)
1258 idx = fv_words - params->es_cnt - 1;
1260 idx = params->es_cnt;
1262 params->es[idx].prot_id = prot_id;
1263 params->es[idx].off = off;
1264 params->mask[idx] = mask | sib_mask;
1268 off += ICE_FLOW_FV_EXTRACT_SZ;
1275 * ice_flow_xtract_raws - Create extract sequence entries for raw bytes
1276 * @hw: pointer to the HW struct
1277 * @params: information about the flow to be processed
1278 * @seg: index of packet segment whose raw fields are to be extracted
1280 static enum ice_status
1281 ice_flow_xtract_raws(struct ice_hw *hw, struct ice_flow_prof_params *params,
1288 if (!params->prof->segs[seg].raws_cnt)
1291 if (params->prof->segs[seg].raws_cnt >
1292 ARRAY_SIZE(params->prof->segs[seg].raws))
1293 return ICE_ERR_MAX_LIMIT;
1295 /* Offsets within the segment headers are not supported */
1296 hdrs_sz = ice_flow_calc_seg_sz(params, seg);
1298 return ICE_ERR_PARAM;
1300 fv_words = hw->blk[params->blk].es.fvw;
1302 for (i = 0; i < params->prof->segs[seg].raws_cnt; i++) {
1303 struct ice_flow_seg_fld_raw *raw;
1306 raw = ¶ms->prof->segs[seg].raws[i];
1308 /* Storing extraction information */
1309 raw->info.xtrct.prot_id = ICE_PROT_MAC_OF_OR_S;
1310 raw->info.xtrct.off = (raw->off / ICE_FLOW_FV_EXTRACT_SZ) *
1311 ICE_FLOW_FV_EXTRACT_SZ;
1312 raw->info.xtrct.disp = (raw->off % ICE_FLOW_FV_EXTRACT_SZ) *
1314 raw->info.xtrct.idx = params->es_cnt;
1316 /* Determine the number of field vector entries this raw field
1319 cnt = DIVIDE_AND_ROUND_UP(raw->info.xtrct.disp +
1320 (raw->info.src.last * BITS_PER_BYTE),
1321 (ICE_FLOW_FV_EXTRACT_SZ *
1323 off = raw->info.xtrct.off;
1324 for (j = 0; j < cnt; j++) {
1327 /* Make sure the number of extraction sequence required
1328 * does not exceed the block's capability
1330 if (params->es_cnt >= hw->blk[params->blk].es.count ||
1331 params->es_cnt >= ICE_MAX_FV_WORDS)
1332 return ICE_ERR_MAX_LIMIT;
1334 /* some blocks require a reversed field vector layout */
1335 if (hw->blk[params->blk].es.reverse)
1336 idx = fv_words - params->es_cnt - 1;
1338 idx = params->es_cnt;
1340 params->es[idx].prot_id = raw->info.xtrct.prot_id;
1341 params->es[idx].off = off;
1343 off += ICE_FLOW_FV_EXTRACT_SZ;
1351 * ice_flow_create_xtrct_seq - Create an extraction sequence for given segments
1352 * @hw: pointer to the HW struct
1353 * @params: information about the flow to be processed
1355 * This function iterates through all matched fields in the given segments, and
1356 * creates an extraction sequence for the fields.
1358 static enum ice_status
1359 ice_flow_create_xtrct_seq(struct ice_hw *hw,
1360 struct ice_flow_prof_params *params)
1362 enum ice_status status = ICE_SUCCESS;
1365 /* For ACL, we also need to extract the direction bit (Rx,Tx) data from
1368 if (params->blk == ICE_BLK_ACL) {
1369 status = ice_flow_xtract_pkt_flags(hw, params,
1370 ICE_RX_MDID_PKT_FLAGS_15_0);
1375 for (i = 0; i < params->prof->segs_cnt; i++) {
1376 u64 match = params->prof->segs[i].match;
1377 enum ice_flow_field j;
1379 ice_for_each_set_bit(j, (ice_bitmap_t *)&match,
1380 ICE_FLOW_FIELD_IDX_MAX) {
1381 status = ice_flow_xtract_fld(hw, params, i, j, match);
1384 ice_clear_bit(j, (ice_bitmap_t *)&match);
1387 /* Process raw matching bytes */
1388 status = ice_flow_xtract_raws(hw, params, i);
1397 * ice_flow_sel_acl_scen - returns the specific scenario
1398 * @hw: pointer to the hardware structure
1399 * @params: information about the flow to be processed
1401 * This function will return the specific scenario based on the
1402 * params passed to it
1404 static enum ice_status
1405 ice_flow_sel_acl_scen(struct ice_hw *hw, struct ice_flow_prof_params *params)
1407 /* Find the best-fit scenario for the provided match width */
1408 struct ice_acl_scen *cand_scen = NULL, *scen;
1411 return ICE_ERR_DOES_NOT_EXIST;
1413 /* Loop through each scenario and match against the scenario width
1414 * to select the specific scenario
1416 LIST_FOR_EACH_ENTRY(scen, &hw->acl_tbl->scens, ice_acl_scen, list_entry)
1417 if (scen->eff_width >= params->entry_length &&
1418 (!cand_scen || cand_scen->eff_width > scen->eff_width))
1421 return ICE_ERR_DOES_NOT_EXIST;
1423 params->prof->cfg.scen = cand_scen;
1429 * ice_flow_acl_def_entry_frmt - Determine the layout of flow entries
1430 * @params: information about the flow to be processed
1432 static enum ice_status
1433 ice_flow_acl_def_entry_frmt(struct ice_flow_prof_params *params)
1435 u16 index, i, range_idx = 0;
1437 index = ICE_AQC_ACL_PROF_BYTE_SEL_START_IDX;
1439 for (i = 0; i < params->prof->segs_cnt; i++) {
1440 struct ice_flow_seg_info *seg = ¶ms->prof->segs[i];
1443 ice_for_each_set_bit(j, (ice_bitmap_t *)&seg->match,
1444 ICE_FLOW_FIELD_IDX_MAX) {
1445 struct ice_flow_fld_info *fld = &seg->fields[j];
1447 fld->entry.mask = ICE_FLOW_FLD_OFF_INVAL;
1449 if (fld->type == ICE_FLOW_FLD_TYPE_RANGE) {
1450 fld->entry.last = ICE_FLOW_FLD_OFF_INVAL;
1452 /* Range checking only supported for single
1455 if (DIVIDE_AND_ROUND_UP(ice_flds_info[j].size +
1457 BITS_PER_BYTE * 2) > 1)
1458 return ICE_ERR_PARAM;
1460 /* Ranges must define low and high values */
1461 if (fld->src.val == ICE_FLOW_FLD_OFF_INVAL ||
1462 fld->src.last == ICE_FLOW_FLD_OFF_INVAL)
1463 return ICE_ERR_PARAM;
1465 fld->entry.val = range_idx++;
1467 /* Store adjusted byte-length of field for later
1468 * use, taking into account potential
1469 * non-byte-aligned displacement
1471 fld->entry.last = DIVIDE_AND_ROUND_UP
1472 (ice_flds_info[j].size +
1473 (fld->xtrct.disp % BITS_PER_BYTE),
1475 fld->entry.val = index;
1476 index += fld->entry.last;
1480 for (j = 0; j < seg->raws_cnt; j++) {
1481 struct ice_flow_seg_fld_raw *raw = &seg->raws[j];
1483 raw->info.entry.mask = ICE_FLOW_FLD_OFF_INVAL;
1484 raw->info.entry.val = index;
1485 raw->info.entry.last = raw->info.src.last;
1486 index += raw->info.entry.last;
1490 /* Currently only support using the byte selection base, which only
1491 * allows for an effective entry size of 30 bytes. Reject anything
1494 if (index > ICE_AQC_ACL_PROF_BYTE_SEL_ELEMS)
1495 return ICE_ERR_PARAM;
1497 /* Only 8 range checkers per profile, reject anything trying to use
1500 if (range_idx > ICE_AQC_ACL_PROF_RANGES_NUM_CFG)
1501 return ICE_ERR_PARAM;
1503 /* Store # bytes required for entry for later use */
1504 params->entry_length = index - ICE_AQC_ACL_PROF_BYTE_SEL_START_IDX;
1510 * ice_flow_proc_segs - process all packet segments associated with a profile
1511 * @hw: pointer to the HW struct
1512 * @params: information about the flow to be processed
1514 static enum ice_status
1515 ice_flow_proc_segs(struct ice_hw *hw, struct ice_flow_prof_params *params)
1517 enum ice_status status;
1519 status = ice_flow_proc_seg_hdrs(params);
1523 status = ice_flow_create_xtrct_seq(hw, params);
1527 switch (params->blk) {
1530 status = ICE_SUCCESS;
1533 status = ice_flow_acl_def_entry_frmt(params);
1536 status = ice_flow_sel_acl_scen(hw, params);
1541 return ICE_ERR_NOT_IMPL;
1547 #define ICE_FLOW_FIND_PROF_CHK_FLDS 0x00000001
1548 #define ICE_FLOW_FIND_PROF_CHK_VSI 0x00000002
1549 #define ICE_FLOW_FIND_PROF_NOT_CHK_DIR 0x00000004
1552 * ice_flow_find_prof_conds - Find a profile matching headers and conditions
1553 * @hw: pointer to the HW struct
1554 * @blk: classification stage
1555 * @dir: flow direction
1556 * @segs: array of one or more packet segments that describe the flow
1557 * @segs_cnt: number of packet segments provided
1558 * @vsi_handle: software VSI handle to check VSI (ICE_FLOW_FIND_PROF_CHK_VSI)
1559 * @conds: additional conditions to be checked (ICE_FLOW_FIND_PROF_CHK_*)
1561 static struct ice_flow_prof *
1562 ice_flow_find_prof_conds(struct ice_hw *hw, enum ice_block blk,
1563 enum ice_flow_dir dir, struct ice_flow_seg_info *segs,
1564 u8 segs_cnt, u16 vsi_handle, u32 conds)
1566 struct ice_flow_prof *p, *prof = NULL;
1568 ice_acquire_lock(&hw->fl_profs_locks[blk]);
1569 LIST_FOR_EACH_ENTRY(p, &hw->fl_profs[blk], ice_flow_prof, l_entry)
1570 if ((p->dir == dir || conds & ICE_FLOW_FIND_PROF_NOT_CHK_DIR) &&
1571 segs_cnt && segs_cnt == p->segs_cnt) {
1574 /* Check for profile-VSI association if specified */
1575 if ((conds & ICE_FLOW_FIND_PROF_CHK_VSI) &&
1576 ice_is_vsi_valid(hw, vsi_handle) &&
1577 !ice_is_bit_set(p->vsis, vsi_handle))
1580 /* Protocol headers must be checked. Matched fields are
1581 * checked if specified.
1583 for (i = 0; i < segs_cnt; i++)
1584 if (segs[i].hdrs != p->segs[i].hdrs ||
1585 ((conds & ICE_FLOW_FIND_PROF_CHK_FLDS) &&
1586 segs[i].match != p->segs[i].match))
1589 /* A match is found if all segments are matched */
1590 if (i == segs_cnt) {
1595 ice_release_lock(&hw->fl_profs_locks[blk]);
1601 * ice_flow_find_prof - Look up a profile matching headers and matched fields
1602 * @hw: pointer to the HW struct
1603 * @blk: classification stage
1604 * @dir: flow direction
1605 * @segs: array of one or more packet segments that describe the flow
1606 * @segs_cnt: number of packet segments provided
1609 ice_flow_find_prof(struct ice_hw *hw, enum ice_block blk, enum ice_flow_dir dir,
1610 struct ice_flow_seg_info *segs, u8 segs_cnt)
1612 struct ice_flow_prof *p;
1614 p = ice_flow_find_prof_conds(hw, blk, dir, segs, segs_cnt,
1615 ICE_MAX_VSI, ICE_FLOW_FIND_PROF_CHK_FLDS);
1617 return p ? p->id : ICE_FLOW_PROF_ID_INVAL;
1621 * ice_flow_find_prof_id - Look up a profile with given profile ID
1622 * @hw: pointer to the HW struct
1623 * @blk: classification stage
1624 * @prof_id: unique ID to identify this flow profile
1626 static struct ice_flow_prof *
1627 ice_flow_find_prof_id(struct ice_hw *hw, enum ice_block blk, u64 prof_id)
1629 struct ice_flow_prof *p;
1631 LIST_FOR_EACH_ENTRY(p, &hw->fl_profs[blk], ice_flow_prof, l_entry)
1632 if (p->id == prof_id)
1639 * ice_dealloc_flow_entry - Deallocate flow entry memory
1640 * @hw: pointer to the HW struct
1641 * @entry: flow entry to be removed
1644 ice_dealloc_flow_entry(struct ice_hw *hw, struct ice_flow_entry *entry)
1650 ice_free(hw, entry->entry);
1652 if (entry->range_buf) {
1653 ice_free(hw, entry->range_buf);
1654 entry->range_buf = NULL;
1658 ice_free(hw, entry->acts);
1660 entry->acts_cnt = 0;
1663 ice_free(hw, entry);
1667 * ice_flow_get_hw_prof - return the HW profile for a specific profile ID handle
1668 * @hw: pointer to the HW struct
1669 * @blk: classification stage
1670 * @prof_id: the profile ID handle
1671 * @hw_prof_id: pointer to variable to receive the HW profile ID
1674 ice_flow_get_hw_prof(struct ice_hw *hw, enum ice_block blk, u64 prof_id,
1677 enum ice_status status = ICE_ERR_DOES_NOT_EXIST;
1678 struct ice_prof_map *map;
1680 ice_acquire_lock(&hw->blk[blk].es.prof_map_lock);
1681 map = ice_search_prof_id(hw, blk, prof_id);
1683 *hw_prof_id = map->prof_id;
1684 status = ICE_SUCCESS;
1686 ice_release_lock(&hw->blk[blk].es.prof_map_lock);
1690 #define ICE_ACL_INVALID_SCEN 0x3f
1693 * ice_flow_acl_is_prof_in_use - Verify if the profile is associated to any PF
1694 * @hw: pointer to the hardware structure
1695 * @prof: pointer to flow profile
1696 * @buf: destination buffer function writes partial extraction sequence to
1698 * returns ICE_SUCCESS if no PF is associated to the given profile
1699 * returns ICE_ERR_IN_USE if at least one PF is associated to the given profile
1700 * returns other error code for real error
1702 static enum ice_status
1703 ice_flow_acl_is_prof_in_use(struct ice_hw *hw, struct ice_flow_prof *prof,
1704 struct ice_aqc_acl_prof_generic_frmt *buf)
1706 enum ice_status status;
1709 status = ice_flow_get_hw_prof(hw, ICE_BLK_ACL, prof->id, &prof_id);
1713 status = ice_query_acl_prof(hw, prof_id, buf, NULL);
1717 /* If all PF's associated scenarios are all 0 or all
1718 * ICE_ACL_INVALID_SCEN (63) for the given profile then the latter has
1719 * not been configured yet.
1721 if (buf->pf_scenario_num[0] == 0 && buf->pf_scenario_num[1] == 0 &&
1722 buf->pf_scenario_num[2] == 0 && buf->pf_scenario_num[3] == 0 &&
1723 buf->pf_scenario_num[4] == 0 && buf->pf_scenario_num[5] == 0 &&
1724 buf->pf_scenario_num[6] == 0 && buf->pf_scenario_num[7] == 0)
1727 if (buf->pf_scenario_num[0] == ICE_ACL_INVALID_SCEN &&
1728 buf->pf_scenario_num[1] == ICE_ACL_INVALID_SCEN &&
1729 buf->pf_scenario_num[2] == ICE_ACL_INVALID_SCEN &&
1730 buf->pf_scenario_num[3] == ICE_ACL_INVALID_SCEN &&
1731 buf->pf_scenario_num[4] == ICE_ACL_INVALID_SCEN &&
1732 buf->pf_scenario_num[5] == ICE_ACL_INVALID_SCEN &&
1733 buf->pf_scenario_num[6] == ICE_ACL_INVALID_SCEN &&
1734 buf->pf_scenario_num[7] == ICE_ACL_INVALID_SCEN)
1737 return ICE_ERR_IN_USE;
1741 * ice_flow_acl_free_act_cntr - Free the ACL rule's actions
1742 * @hw: pointer to the hardware structure
1743 * @acts: array of actions to be performed on a match
1744 * @acts_cnt: number of actions
1746 static enum ice_status
1747 ice_flow_acl_free_act_cntr(struct ice_hw *hw, struct ice_flow_action *acts,
1752 for (i = 0; i < acts_cnt; i++) {
1753 if (acts[i].type == ICE_FLOW_ACT_CNTR_PKT ||
1754 acts[i].type == ICE_FLOW_ACT_CNTR_BYTES ||
1755 acts[i].type == ICE_FLOW_ACT_CNTR_PKT_BYTES) {
1756 struct ice_acl_cntrs cntrs;
1757 enum ice_status status;
1759 cntrs.bank = 0; /* Only bank0 for the moment */
1761 LE16_TO_CPU(acts[i].data.acl_act.value);
1763 LE16_TO_CPU(acts[i].data.acl_act.value);
1765 if (acts[i].type == ICE_FLOW_ACT_CNTR_PKT_BYTES)
1766 cntrs.type = ICE_AQC_ACL_CNT_TYPE_DUAL;
1768 cntrs.type = ICE_AQC_ACL_CNT_TYPE_SINGLE;
1770 status = ice_aq_dealloc_acl_cntrs(hw, &cntrs, NULL);
1779 * ice_flow_acl_disassoc_scen - Disassociate the scenario from the profile
1780 * @hw: pointer to the hardware structure
1781 * @prof: pointer to flow profile
1783 * Disassociate the scenario from the profile for the PF of the VSI.
1785 static enum ice_status
1786 ice_flow_acl_disassoc_scen(struct ice_hw *hw, struct ice_flow_prof *prof)
1788 struct ice_aqc_acl_prof_generic_frmt buf;
1789 enum ice_status status = ICE_SUCCESS;
1792 ice_memset(&buf, 0, sizeof(buf), ICE_NONDMA_MEM);
1794 status = ice_flow_get_hw_prof(hw, ICE_BLK_ACL, prof->id, &prof_id);
1798 status = ice_query_acl_prof(hw, prof_id, &buf, NULL);
1802 /* Clear scenario for this PF */
1803 buf.pf_scenario_num[hw->pf_id] = ICE_ACL_INVALID_SCEN;
1804 status = ice_prgm_acl_prof_xtrct(hw, prof_id, &buf, NULL);
1810 * ice_flow_rem_entry_sync - Remove a flow entry
1811 * @hw: pointer to the HW struct
1812 * @blk: classification stage
1813 * @entry: flow entry to be removed
1815 static enum ice_status
1816 ice_flow_rem_entry_sync(struct ice_hw *hw, enum ice_block blk,
1817 struct ice_flow_entry *entry)
1820 return ICE_ERR_BAD_PTR;
1822 if (blk == ICE_BLK_ACL) {
1823 enum ice_status status;
1826 return ICE_ERR_BAD_PTR;
1828 status = ice_acl_rem_entry(hw, entry->prof->cfg.scen,
1829 entry->scen_entry_idx);
1833 /* Checks if we need to release an ACL counter. */
1834 if (entry->acts_cnt && entry->acts)
1835 ice_flow_acl_free_act_cntr(hw, entry->acts,
1839 LIST_DEL(&entry->l_entry);
1841 ice_dealloc_flow_entry(hw, entry);
1847 * ice_flow_add_prof_sync - Add a flow profile for packet segments and fields
1848 * @hw: pointer to the HW struct
1849 * @blk: classification stage
1850 * @dir: flow direction
1851 * @prof_id: unique ID to identify this flow profile
1852 * @segs: array of one or more packet segments that describe the flow
1853 * @segs_cnt: number of packet segments provided
1854 * @acts: array of default actions
1855 * @acts_cnt: number of default actions
1856 * @prof: stores the returned flow profile added
1858 * Assumption: the caller has acquired the lock to the profile list
1860 static enum ice_status
1861 ice_flow_add_prof_sync(struct ice_hw *hw, enum ice_block blk,
1862 enum ice_flow_dir dir, u64 prof_id,
1863 struct ice_flow_seg_info *segs, u8 segs_cnt,
1864 struct ice_flow_action *acts, u8 acts_cnt,
1865 struct ice_flow_prof **prof)
1867 struct ice_flow_prof_params *params;
1868 enum ice_status status;
1871 if (!prof || (acts_cnt && !acts))
1872 return ICE_ERR_BAD_PTR;
1874 params = (struct ice_flow_prof_params *)ice_malloc(hw, sizeof(*params));
1876 return ICE_ERR_NO_MEMORY;
1878 params->prof = (struct ice_flow_prof *)
1879 ice_malloc(hw, sizeof(*params->prof));
1880 if (!params->prof) {
1881 status = ICE_ERR_NO_MEMORY;
1885 /* initialize extraction sequence to all invalid (0xff) */
1886 for (i = 0; i < ICE_MAX_FV_WORDS; i++) {
1887 params->es[i].prot_id = ICE_PROT_INVALID;
1888 params->es[i].off = ICE_FV_OFFSET_INVAL;
1892 params->prof->id = prof_id;
1893 params->prof->dir = dir;
1894 params->prof->segs_cnt = segs_cnt;
1896 /* Make a copy of the segments that need to be persistent in the flow
1899 for (i = 0; i < segs_cnt; i++)
1900 ice_memcpy(¶ms->prof->segs[i], &segs[i], sizeof(*segs),
1901 ICE_NONDMA_TO_NONDMA);
1903 /* Make a copy of the actions that need to be persistent in the flow
1907 params->prof->acts = (struct ice_flow_action *)
1908 ice_memdup(hw, acts, acts_cnt * sizeof(*acts),
1909 ICE_NONDMA_TO_NONDMA);
1911 if (!params->prof->acts) {
1912 status = ICE_ERR_NO_MEMORY;
1917 status = ice_flow_proc_segs(hw, params);
1919 ice_debug(hw, ICE_DBG_FLOW, "Error processing a flow's packet segments\n");
1923 /* Add a HW profile for this flow profile */
1924 status = ice_add_prof(hw, blk, prof_id, (u8 *)params->ptypes,
1925 params->attr, params->attr_cnt, params->es,
1928 ice_debug(hw, ICE_DBG_FLOW, "Error adding a HW flow profile\n");
1932 INIT_LIST_HEAD(¶ms->prof->entries);
1933 ice_init_lock(¶ms->prof->entries_lock);
1934 *prof = params->prof;
1938 if (params->prof->acts)
1939 ice_free(hw, params->prof->acts);
1940 ice_free(hw, params->prof);
1943 ice_free(hw, params);
1949 * ice_flow_rem_prof_sync - remove a flow profile
1950 * @hw: pointer to the hardware structure
1951 * @blk: classification stage
1952 * @prof: pointer to flow profile to remove
1954 * Assumption: the caller has acquired the lock to the profile list
1956 static enum ice_status
1957 ice_flow_rem_prof_sync(struct ice_hw *hw, enum ice_block blk,
1958 struct ice_flow_prof *prof)
1960 enum ice_status status;
1962 /* Remove all remaining flow entries before removing the flow profile */
1963 if (!LIST_EMPTY(&prof->entries)) {
1964 struct ice_flow_entry *e, *t;
1966 ice_acquire_lock(&prof->entries_lock);
1968 LIST_FOR_EACH_ENTRY_SAFE(e, t, &prof->entries, ice_flow_entry,
1970 status = ice_flow_rem_entry_sync(hw, blk, e);
1975 ice_release_lock(&prof->entries_lock);
1978 if (blk == ICE_BLK_ACL) {
1979 struct ice_aqc_acl_profile_ranges query_rng_buf;
1980 struct ice_aqc_acl_prof_generic_frmt buf;
1983 /* Disassociate the scenario from the profile for the PF */
1984 status = ice_flow_acl_disassoc_scen(hw, prof);
1988 /* Clear the range-checker if the profile ID is no longer
1991 status = ice_flow_acl_is_prof_in_use(hw, prof, &buf);
1992 if (status && status != ICE_ERR_IN_USE) {
1994 } else if (!status) {
1995 /* Clear the range-checker value for profile ID */
1996 ice_memset(&query_rng_buf, 0,
1997 sizeof(struct ice_aqc_acl_profile_ranges),
2000 status = ice_flow_get_hw_prof(hw, blk, prof->id,
2005 status = ice_prog_acl_prof_ranges(hw, prof_id,
2006 &query_rng_buf, NULL);
2012 /* Remove all hardware profiles associated with this flow profile */
2013 status = ice_rem_prof(hw, blk, prof->id);
2015 LIST_DEL(&prof->l_entry);
2016 ice_destroy_lock(&prof->entries_lock);
2018 ice_free(hw, prof->acts);
2026 * ice_flow_acl_set_xtrct_seq_fld - Populate xtrct seq for single field
2027 * @buf: Destination buffer function writes partial xtrct sequence to
2028 * @info: Info about field
2031 ice_flow_acl_set_xtrct_seq_fld(struct ice_aqc_acl_prof_generic_frmt *buf,
2032 struct ice_flow_fld_info *info)
2037 src = info->xtrct.idx * ICE_FLOW_FV_EXTRACT_SZ +
2038 info->xtrct.disp / BITS_PER_BYTE;
2039 dst = info->entry.val;
2040 for (i = 0; i < info->entry.last; i++)
2041 /* HW stores field vector words in LE, convert words back to BE
2042 * so constructed entries will end up in network order
2044 buf->byte_selection[dst++] = src++ ^ 1;
2048 * ice_flow_acl_set_xtrct_seq - Program ACL extraction sequence
2049 * @hw: pointer to the hardware structure
2050 * @prof: pointer to flow profile
2052 static enum ice_status
2053 ice_flow_acl_set_xtrct_seq(struct ice_hw *hw, struct ice_flow_prof *prof)
2055 struct ice_aqc_acl_prof_generic_frmt buf;
2056 struct ice_flow_fld_info *info;
2057 enum ice_status status;
2061 ice_memset(&buf, 0, sizeof(buf), ICE_NONDMA_MEM);
2063 status = ice_flow_get_hw_prof(hw, ICE_BLK_ACL, prof->id, &prof_id);
2067 status = ice_flow_acl_is_prof_in_use(hw, prof, &buf);
2068 if (status && status != ICE_ERR_IN_USE)
2072 /* Program the profile dependent configuration. This is done
2073 * only once regardless of the number of PFs using that profile
2075 ice_memset(&buf, 0, sizeof(buf), ICE_NONDMA_MEM);
2077 for (i = 0; i < prof->segs_cnt; i++) {
2078 struct ice_flow_seg_info *seg = &prof->segs[i];
2081 ice_for_each_set_bit(j, (ice_bitmap_t *)&seg->match,
2082 ICE_FLOW_FIELD_IDX_MAX) {
2083 info = &seg->fields[j];
2085 if (info->type == ICE_FLOW_FLD_TYPE_RANGE)
2086 buf.word_selection[info->entry.val] =
2089 ice_flow_acl_set_xtrct_seq_fld(&buf,
2093 for (j = 0; j < seg->raws_cnt; j++) {
2094 info = &seg->raws[j].info;
2095 ice_flow_acl_set_xtrct_seq_fld(&buf, info);
2099 ice_memset(&buf.pf_scenario_num[0], ICE_ACL_INVALID_SCEN,
2100 ICE_AQC_ACL_PROF_PF_SCEN_NUM_ELEMS,
2104 /* Update the current PF */
2105 buf.pf_scenario_num[hw->pf_id] = (u8)prof->cfg.scen->id;
2106 status = ice_prgm_acl_prof_xtrct(hw, prof_id, &buf, NULL);
2112 * ice_flow_assoc_vsig_vsi - associate a VSI with VSIG
2113 * @hw: pointer to the hardware structure
2114 * @blk: classification stage
2115 * @vsi_handle: software VSI handle
2116 * @vsig: target VSI group
2118 * Assumption: the caller has already verified that the VSI to
2119 * be added has the same characteristics as the VSIG and will
2120 * thereby have access to all resources added to that VSIG.
2123 ice_flow_assoc_vsig_vsi(struct ice_hw *hw, enum ice_block blk, u16 vsi_handle,
2126 enum ice_status status;
2128 if (!ice_is_vsi_valid(hw, vsi_handle) || blk >= ICE_BLK_COUNT)
2129 return ICE_ERR_PARAM;
2131 ice_acquire_lock(&hw->fl_profs_locks[blk]);
2132 status = ice_add_vsi_flow(hw, blk, ice_get_hw_vsi_num(hw, vsi_handle),
2134 ice_release_lock(&hw->fl_profs_locks[blk]);
2140 * ice_flow_assoc_prof - associate a VSI with a flow profile
2141 * @hw: pointer to the hardware structure
2142 * @blk: classification stage
2143 * @prof: pointer to flow profile
2144 * @vsi_handle: software VSI handle
2146 * Assumption: the caller has acquired the lock to the profile list
2147 * and the software VSI handle has been validated
2150 ice_flow_assoc_prof(struct ice_hw *hw, enum ice_block blk,
2151 struct ice_flow_prof *prof, u16 vsi_handle)
2153 enum ice_status status = ICE_SUCCESS;
2155 if (!ice_is_bit_set(prof->vsis, vsi_handle)) {
2156 if (blk == ICE_BLK_ACL) {
2157 status = ice_flow_acl_set_xtrct_seq(hw, prof);
2161 status = ice_add_prof_id_flow(hw, blk,
2162 ice_get_hw_vsi_num(hw,
2166 ice_set_bit(vsi_handle, prof->vsis);
2168 ice_debug(hw, ICE_DBG_FLOW, "HW profile add failed, %d\n",
2176 * ice_flow_disassoc_prof - disassociate a VSI from a flow profile
2177 * @hw: pointer to the hardware structure
2178 * @blk: classification stage
2179 * @prof: pointer to flow profile
2180 * @vsi_handle: software VSI handle
2182 * Assumption: the caller has acquired the lock to the profile list
2183 * and the software VSI handle has been validated
2185 static enum ice_status
2186 ice_flow_disassoc_prof(struct ice_hw *hw, enum ice_block blk,
2187 struct ice_flow_prof *prof, u16 vsi_handle)
2189 enum ice_status status = ICE_SUCCESS;
2191 if (ice_is_bit_set(prof->vsis, vsi_handle)) {
2192 status = ice_rem_prof_id_flow(hw, blk,
2193 ice_get_hw_vsi_num(hw,
2197 ice_clear_bit(vsi_handle, prof->vsis);
2199 ice_debug(hw, ICE_DBG_FLOW, "HW profile remove failed, %d\n",
2207 * ice_flow_add_prof - Add a flow profile for packet segments and matched fields
2208 * @hw: pointer to the HW struct
2209 * @blk: classification stage
2210 * @dir: flow direction
2211 * @prof_id: unique ID to identify this flow profile
2212 * @segs: array of one or more packet segments that describe the flow
2213 * @segs_cnt: number of packet segments provided
2214 * @acts: array of default actions
2215 * @acts_cnt: number of default actions
2216 * @prof: stores the returned flow profile added
2219 ice_flow_add_prof(struct ice_hw *hw, enum ice_block blk, enum ice_flow_dir dir,
2220 u64 prof_id, struct ice_flow_seg_info *segs, u8 segs_cnt,
2221 struct ice_flow_action *acts, u8 acts_cnt,
2222 struct ice_flow_prof **prof)
2224 enum ice_status status;
2226 if (segs_cnt > ICE_FLOW_SEG_MAX)
2227 return ICE_ERR_MAX_LIMIT;
2230 return ICE_ERR_PARAM;
2233 return ICE_ERR_BAD_PTR;
2235 status = ice_flow_val_hdrs(segs, segs_cnt);
2239 ice_acquire_lock(&hw->fl_profs_locks[blk]);
2241 status = ice_flow_add_prof_sync(hw, blk, dir, prof_id, segs, segs_cnt,
2242 acts, acts_cnt, prof);
2244 LIST_ADD(&(*prof)->l_entry, &hw->fl_profs[blk]);
2246 ice_release_lock(&hw->fl_profs_locks[blk]);
2252 * ice_flow_rem_prof - Remove a flow profile and all entries associated with it
2253 * @hw: pointer to the HW struct
2254 * @blk: the block for which the flow profile is to be removed
2255 * @prof_id: unique ID of the flow profile to be removed
2258 ice_flow_rem_prof(struct ice_hw *hw, enum ice_block blk, u64 prof_id)
2260 struct ice_flow_prof *prof;
2261 enum ice_status status;
2263 ice_acquire_lock(&hw->fl_profs_locks[blk]);
2265 prof = ice_flow_find_prof_id(hw, blk, prof_id);
2267 status = ICE_ERR_DOES_NOT_EXIST;
2271 /* prof becomes invalid after the call */
2272 status = ice_flow_rem_prof_sync(hw, blk, prof);
2275 ice_release_lock(&hw->fl_profs_locks[blk]);
2281 * ice_flow_find_entry - look for a flow entry using its unique ID
2282 * @hw: pointer to the HW struct
2283 * @blk: classification stage
2284 * @entry_id: unique ID to identify this flow entry
2286 * This function looks for the flow entry with the specified unique ID in all
2287 * flow profiles of the specified classification stage. If the entry is found,
2288 * and it returns the handle to the flow entry. Otherwise, it returns
2289 * ICE_FLOW_ENTRY_ID_INVAL.
2291 u64 ice_flow_find_entry(struct ice_hw *hw, enum ice_block blk, u64 entry_id)
2293 struct ice_flow_entry *found = NULL;
2294 struct ice_flow_prof *p;
2296 ice_acquire_lock(&hw->fl_profs_locks[blk]);
2298 LIST_FOR_EACH_ENTRY(p, &hw->fl_profs[blk], ice_flow_prof, l_entry) {
2299 struct ice_flow_entry *e;
2301 ice_acquire_lock(&p->entries_lock);
2302 LIST_FOR_EACH_ENTRY(e, &p->entries, ice_flow_entry, l_entry)
2303 if (e->id == entry_id) {
2307 ice_release_lock(&p->entries_lock);
2313 ice_release_lock(&hw->fl_profs_locks[blk]);
2315 return found ? ICE_FLOW_ENTRY_HNDL(found) : ICE_FLOW_ENTRY_HANDLE_INVAL;
2319 * ice_flow_acl_check_actions - Checks the ACL rule's actions
2320 * @hw: pointer to the hardware structure
2321 * @acts: array of actions to be performed on a match
2322 * @acts_cnt: number of actions
2323 * @cnt_alloc: indicates if an ACL counter has been allocated.
2325 static enum ice_status
2326 ice_flow_acl_check_actions(struct ice_hw *hw, struct ice_flow_action *acts,
2327 u8 acts_cnt, bool *cnt_alloc)
2329 ice_declare_bitmap(dup_check, ICE_AQC_TBL_MAX_ACTION_PAIRS * 2);
2332 ice_zero_bitmap(dup_check, ICE_AQC_TBL_MAX_ACTION_PAIRS * 2);
2335 if (acts_cnt > ICE_FLOW_ACL_MAX_NUM_ACT)
2336 return ICE_ERR_OUT_OF_RANGE;
2338 for (i = 0; i < acts_cnt; i++) {
2339 if (acts[i].type != ICE_FLOW_ACT_NOP &&
2340 acts[i].type != ICE_FLOW_ACT_DROP &&
2341 acts[i].type != ICE_FLOW_ACT_CNTR_PKT &&
2342 acts[i].type != ICE_FLOW_ACT_FWD_QUEUE)
2345 /* If the caller want to add two actions of the same type, then
2346 * it is considered invalid configuration.
2348 if (ice_test_and_set_bit(acts[i].type, dup_check))
2349 return ICE_ERR_PARAM;
2352 /* Checks if ACL counters are needed. */
2353 for (i = 0; i < acts_cnt; i++) {
2354 if (acts[i].type == ICE_FLOW_ACT_CNTR_PKT ||
2355 acts[i].type == ICE_FLOW_ACT_CNTR_BYTES ||
2356 acts[i].type == ICE_FLOW_ACT_CNTR_PKT_BYTES) {
2357 struct ice_acl_cntrs cntrs;
2358 enum ice_status status;
2361 cntrs.bank = 0; /* Only bank0 for the moment */
2363 if (acts[i].type == ICE_FLOW_ACT_CNTR_PKT_BYTES)
2364 cntrs.type = ICE_AQC_ACL_CNT_TYPE_DUAL;
2366 cntrs.type = ICE_AQC_ACL_CNT_TYPE_SINGLE;
2368 status = ice_aq_alloc_acl_cntrs(hw, &cntrs, NULL);
2371 /* Counter index within the bank */
2372 acts[i].data.acl_act.value =
2373 CPU_TO_LE16(cntrs.first_cntr);
2382 * ice_flow_acl_frmt_entry_range - Format an ACL range checker for a given field
2383 * @fld: number of the given field
2384 * @info: info about field
2385 * @range_buf: range checker configuration buffer
2386 * @data: pointer to a data buffer containing flow entry's match values/masks
2387 * @range: Input/output param indicating which range checkers are being used
2390 ice_flow_acl_frmt_entry_range(u16 fld, struct ice_flow_fld_info *info,
2391 struct ice_aqc_acl_profile_ranges *range_buf,
2392 u8 *data, u8 *range)
2396 /* If not specified, default mask is all bits in field */
2397 new_mask = (info->src.mask == ICE_FLOW_FLD_OFF_INVAL ?
2398 BIT(ice_flds_info[fld].size) - 1 :
2399 (*(u16 *)(data + info->src.mask))) << info->xtrct.disp;
2401 /* If the mask is 0, then we don't need to worry about this input
2402 * range checker value.
2406 (*(u16 *)(data + info->src.last)) << info->xtrct.disp;
2408 (*(u16 *)(data + info->src.val)) << info->xtrct.disp;
2409 u8 range_idx = info->entry.val;
2411 range_buf->checker_cfg[range_idx].low_boundary =
2412 CPU_TO_BE16(new_low);
2413 range_buf->checker_cfg[range_idx].high_boundary =
2414 CPU_TO_BE16(new_high);
2415 range_buf->checker_cfg[range_idx].mask = CPU_TO_BE16(new_mask);
2417 /* Indicate which range checker is being used */
2418 *range |= BIT(range_idx);
2423 * ice_flow_acl_frmt_entry_fld - Partially format ACL entry for a given field
2424 * @fld: number of the given field
2425 * @info: info about the field
2426 * @buf: buffer containing the entry
2427 * @dontcare: buffer containing don't care mask for entry
2428 * @data: pointer to a data buffer containing flow entry's match values/masks
2431 ice_flow_acl_frmt_entry_fld(u16 fld, struct ice_flow_fld_info *info, u8 *buf,
2432 u8 *dontcare, u8 *data)
2434 u16 dst, src, mask, k, end_disp, tmp_s = 0, tmp_m = 0;
2435 bool use_mask = false;
2438 src = info->src.val;
2439 mask = info->src.mask;
2440 dst = info->entry.val - ICE_AQC_ACL_PROF_BYTE_SEL_START_IDX;
2441 disp = info->xtrct.disp % BITS_PER_BYTE;
2443 if (mask != ICE_FLOW_FLD_OFF_INVAL)
2446 for (k = 0; k < info->entry.last; k++, dst++) {
2447 /* Add overflow bits from previous byte */
2448 buf[dst] = (tmp_s & 0xff00) >> 8;
2450 /* If mask is not valid, tmp_m is always zero, so just setting
2451 * dontcare to 0 (no masked bits). If mask is valid, pulls in
2452 * overflow bits of mask from prev byte
2454 dontcare[dst] = (tmp_m & 0xff00) >> 8;
2456 /* If there is displacement, last byte will only contain
2457 * displaced data, but there is no more data to read from user
2458 * buffer, so skip so as not to potentially read beyond end of
2461 if (!disp || k < info->entry.last - 1) {
2462 /* Store shifted data to use in next byte */
2463 tmp_s = data[src++] << disp;
2465 /* Add current (shifted) byte */
2466 buf[dst] |= tmp_s & 0xff;
2468 /* Handle mask if valid */
2470 tmp_m = (~data[mask++] & 0xff) << disp;
2471 dontcare[dst] |= tmp_m & 0xff;
2476 /* Fill in don't care bits at beginning of field */
2478 dst = info->entry.val - ICE_AQC_ACL_PROF_BYTE_SEL_START_IDX;
2479 for (k = 0; k < disp; k++)
2480 dontcare[dst] |= BIT(k);
2483 end_disp = (disp + ice_flds_info[fld].size) % BITS_PER_BYTE;
2485 /* Fill in don't care bits at end of field */
2487 dst = info->entry.val - ICE_AQC_ACL_PROF_BYTE_SEL_START_IDX +
2488 info->entry.last - 1;
2489 for (k = end_disp; k < BITS_PER_BYTE; k++)
2490 dontcare[dst] |= BIT(k);
2495 * ice_flow_acl_frmt_entry - Format ACL entry
2496 * @hw: pointer to the hardware structure
2497 * @prof: pointer to flow profile
2498 * @e: pointer to the flow entry
2499 * @data: pointer to a data buffer containing flow entry's match values/masks
2500 * @acts: array of actions to be performed on a match
2501 * @acts_cnt: number of actions
2503 * Formats the key (and key_inverse) to be matched from the data passed in,
2504 * along with data from the flow profile. This key/key_inverse pair makes up
2505 * the 'entry' for an ACL flow entry.
2507 static enum ice_status
2508 ice_flow_acl_frmt_entry(struct ice_hw *hw, struct ice_flow_prof *prof,
2509 struct ice_flow_entry *e, u8 *data,
2510 struct ice_flow_action *acts, u8 acts_cnt)
2512 u8 *buf = NULL, *dontcare = NULL, *key = NULL, range = 0, dir_flag_msk;
2513 struct ice_aqc_acl_profile_ranges *range_buf = NULL;
2514 enum ice_status status;
2519 status = ice_flow_get_hw_prof(hw, ICE_BLK_ACL, prof->id, &prof_id);
2523 /* Format the result action */
2525 status = ice_flow_acl_check_actions(hw, acts, acts_cnt, &cnt_alloc);
2529 status = ICE_ERR_NO_MEMORY;
2531 e->acts = (struct ice_flow_action *)
2532 ice_memdup(hw, acts, acts_cnt * sizeof(*acts),
2533 ICE_NONDMA_TO_NONDMA);
2537 e->acts_cnt = acts_cnt;
2539 /* Format the matching data */
2540 buf_sz = prof->cfg.scen->width;
2541 buf = (u8 *)ice_malloc(hw, buf_sz);
2545 dontcare = (u8 *)ice_malloc(hw, buf_sz);
2549 /* 'key' buffer will store both key and key_inverse, so must be twice
2552 key = (u8 *)ice_malloc(hw, buf_sz * 2);
2556 range_buf = (struct ice_aqc_acl_profile_ranges *)
2557 ice_malloc(hw, sizeof(struct ice_aqc_acl_profile_ranges));
2561 /* Set don't care mask to all 1's to start, will zero out used bytes */
2562 ice_memset(dontcare, 0xff, buf_sz, ICE_NONDMA_MEM);
2564 for (i = 0; i < prof->segs_cnt; i++) {
2565 struct ice_flow_seg_info *seg = &prof->segs[i];
2568 ice_for_each_set_bit(j, (ice_bitmap_t *)&seg->match,
2569 ICE_FLOW_FIELD_IDX_MAX) {
2570 struct ice_flow_fld_info *info = &seg->fields[j];
2572 if (info->type == ICE_FLOW_FLD_TYPE_RANGE)
2573 ice_flow_acl_frmt_entry_range(j, info,
2577 ice_flow_acl_frmt_entry_fld(j, info, buf,
2581 for (j = 0; j < seg->raws_cnt; j++) {
2582 struct ice_flow_fld_info *info = &seg->raws[j].info;
2583 u16 dst, src, mask, k;
2584 bool use_mask = false;
2586 src = info->src.val;
2587 dst = info->entry.val -
2588 ICE_AQC_ACL_PROF_BYTE_SEL_START_IDX;
2589 mask = info->src.mask;
2591 if (mask != ICE_FLOW_FLD_OFF_INVAL)
2594 for (k = 0; k < info->entry.last; k++, dst++) {
2595 buf[dst] = data[src++];
2597 dontcare[dst] = ~data[mask++];
2604 buf[prof->cfg.scen->pid_idx] = (u8)prof_id;
2605 dontcare[prof->cfg.scen->pid_idx] = 0;
2607 /* Format the buffer for direction flags */
2608 dir_flag_msk = BIT(ICE_FLG_PKT_DIR);
2610 if (prof->dir == ICE_FLOW_RX)
2611 buf[prof->cfg.scen->pkt_dir_idx] = dir_flag_msk;
2614 buf[prof->cfg.scen->rng_chk_idx] = range;
2615 /* Mark any unused range checkers as don't care */
2616 dontcare[prof->cfg.scen->rng_chk_idx] = ~range;
2617 e->range_buf = range_buf;
2619 ice_free(hw, range_buf);
2622 status = ice_set_key(key, buf_sz * 2, buf, NULL, dontcare, NULL, 0,
2628 e->entry_sz = buf_sz * 2;
2635 ice_free(hw, dontcare);
2640 if (status && range_buf) {
2641 ice_free(hw, range_buf);
2642 e->range_buf = NULL;
2645 if (status && e->acts) {
2646 ice_free(hw, e->acts);
2651 if (status && cnt_alloc)
2652 ice_flow_acl_free_act_cntr(hw, acts, acts_cnt);
2658 * ice_flow_acl_find_scen_entry_cond - Find an ACL scenario entry that matches
2659 * the compared data.
2660 * @prof: pointer to flow profile
2661 * @e: pointer to the comparing flow entry
2662 * @do_chg_action: decide if we want to change the ACL action
2663 * @do_add_entry: decide if we want to add the new ACL entry
2664 * @do_rem_entry: decide if we want to remove the current ACL entry
2666 * Find an ACL scenario entry that matches the compared data. In the same time,
2667 * this function also figure out:
2668 * a/ If we want to change the ACL action
2669 * b/ If we want to add the new ACL entry
2670 * c/ If we want to remove the current ACL entry
2672 static struct ice_flow_entry *
2673 ice_flow_acl_find_scen_entry_cond(struct ice_flow_prof *prof,
2674 struct ice_flow_entry *e, bool *do_chg_action,
2675 bool *do_add_entry, bool *do_rem_entry)
2677 struct ice_flow_entry *p, *return_entry = NULL;
2681 * a/ There exists an entry with same matching data, but different
2682 * priority, then we remove this existing ACL entry. Then, we
2683 * will add the new entry to the ACL scenario.
2684 * b/ There exists an entry with same matching data, priority, and
2685 * result action, then we do nothing
2686 * c/ There exists an entry with same matching data, priority, but
2687 * different, action, then do only change the action's entry.
2688 * d/ Else, we add this new entry to the ACL scenario.
2690 *do_chg_action = false;
2691 *do_add_entry = true;
2692 *do_rem_entry = false;
2693 LIST_FOR_EACH_ENTRY(p, &prof->entries, ice_flow_entry, l_entry) {
2694 if (memcmp(p->entry, e->entry, p->entry_sz))
2697 /* From this point, we have the same matching_data. */
2698 *do_add_entry = false;
2701 if (p->priority != e->priority) {
2702 /* matching data && !priority */
2703 *do_add_entry = true;
2704 *do_rem_entry = true;
2708 /* From this point, we will have matching_data && priority */
2709 if (p->acts_cnt != e->acts_cnt)
2710 *do_chg_action = true;
2711 for (i = 0; i < p->acts_cnt; i++) {
2712 bool found_not_match = false;
2714 for (j = 0; j < e->acts_cnt; j++)
2715 if (memcmp(&p->acts[i], &e->acts[j],
2716 sizeof(struct ice_flow_action))) {
2717 found_not_match = true;
2721 if (found_not_match) {
2722 *do_chg_action = true;
2727 /* (do_chg_action = true) means :
2728 * matching_data && priority && !result_action
2729 * (do_chg_action = false) means :
2730 * matching_data && priority && result_action
2735 return return_entry;
2739 * ice_flow_acl_convert_to_acl_prio - Convert to ACL priority
2742 static enum ice_acl_entry_prio
2743 ice_flow_acl_convert_to_acl_prio(enum ice_flow_priority p)
2745 enum ice_acl_entry_prio acl_prio;
2748 case ICE_FLOW_PRIO_LOW:
2749 acl_prio = ICE_ACL_PRIO_LOW;
2751 case ICE_FLOW_PRIO_NORMAL:
2752 acl_prio = ICE_ACL_PRIO_NORMAL;
2754 case ICE_FLOW_PRIO_HIGH:
2755 acl_prio = ICE_ACL_PRIO_HIGH;
2758 acl_prio = ICE_ACL_PRIO_NORMAL;
2766 * ice_flow_acl_union_rng_chk - Perform union operation between two
2767 * range-range checker buffers
2768 * @dst_buf: pointer to destination range checker buffer
2769 * @src_buf: pointer to source range checker buffer
2771 * For this function, we do the union between dst_buf and src_buf
2772 * range checker buffer, and we will save the result back to dst_buf
2774 static enum ice_status
2775 ice_flow_acl_union_rng_chk(struct ice_aqc_acl_profile_ranges *dst_buf,
2776 struct ice_aqc_acl_profile_ranges *src_buf)
2780 if (!dst_buf || !src_buf)
2781 return ICE_ERR_BAD_PTR;
2783 for (i = 0; i < ICE_AQC_ACL_PROF_RANGES_NUM_CFG; i++) {
2784 struct ice_acl_rng_data *cfg_data = NULL, *in_data;
2785 bool will_populate = false;
2787 in_data = &src_buf->checker_cfg[i];
2792 for (j = 0; j < ICE_AQC_ACL_PROF_RANGES_NUM_CFG; j++) {
2793 cfg_data = &dst_buf->checker_cfg[j];
2795 if (!cfg_data->mask ||
2796 !memcmp(cfg_data, in_data,
2797 sizeof(struct ice_acl_rng_data))) {
2798 will_populate = true;
2803 if (will_populate) {
2804 ice_memcpy(cfg_data, in_data,
2805 sizeof(struct ice_acl_rng_data),
2806 ICE_NONDMA_TO_NONDMA);
2808 /* No available slot left to program range checker */
2809 return ICE_ERR_MAX_LIMIT;
2817 * ice_flow_acl_add_scen_entry_sync - Add entry to ACL scenario sync
2818 * @hw: pointer to the hardware structure
2819 * @prof: pointer to flow profile
2820 * @entry: double pointer to the flow entry
2822 * For this function, we will look at the current added entries in the
2823 * corresponding ACL scenario. Then, we will perform matching logic to
2824 * see if we want to add/modify/do nothing with this new entry.
2826 static enum ice_status
2827 ice_flow_acl_add_scen_entry_sync(struct ice_hw *hw, struct ice_flow_prof *prof,
2828 struct ice_flow_entry **entry)
2830 bool do_add_entry, do_rem_entry, do_chg_action, do_chg_rng_chk;
2831 struct ice_aqc_acl_profile_ranges query_rng_buf, cfg_rng_buf;
2832 struct ice_acl_act_entry *acts = NULL;
2833 struct ice_flow_entry *exist;
2834 enum ice_status status = ICE_SUCCESS;
2835 struct ice_flow_entry *e;
2838 if (!entry || !(*entry) || !prof)
2839 return ICE_ERR_BAD_PTR;
2843 do_chg_rng_chk = false;
2847 status = ice_flow_get_hw_prof(hw, ICE_BLK_ACL, prof->id,
2852 /* Query the current range-checker value in FW */
2853 status = ice_query_acl_prof_ranges(hw, prof_id, &query_rng_buf,
2857 ice_memcpy(&cfg_rng_buf, &query_rng_buf,
2858 sizeof(struct ice_aqc_acl_profile_ranges),
2859 ICE_NONDMA_TO_NONDMA);
2861 /* Generate the new range-checker value */
2862 status = ice_flow_acl_union_rng_chk(&cfg_rng_buf, e->range_buf);
2866 /* Reconfigure the range check if the buffer is changed. */
2867 do_chg_rng_chk = false;
2868 if (memcmp(&query_rng_buf, &cfg_rng_buf,
2869 sizeof(struct ice_aqc_acl_profile_ranges))) {
2870 status = ice_prog_acl_prof_ranges(hw, prof_id,
2871 &cfg_rng_buf, NULL);
2875 do_chg_rng_chk = true;
2879 /* Figure out if we want to (change the ACL action) and/or
2880 * (Add the new ACL entry) and/or (Remove the current ACL entry)
2882 exist = ice_flow_acl_find_scen_entry_cond(prof, e, &do_chg_action,
2883 &do_add_entry, &do_rem_entry);
2885 status = ice_flow_rem_entry_sync(hw, ICE_BLK_ACL, exist);
2890 /* Prepare the result action buffer */
2891 acts = (struct ice_acl_act_entry *)
2892 ice_calloc(hw, e->entry_sz, sizeof(struct ice_acl_act_entry));
2894 return ICE_ERR_NO_MEMORY;
2896 for (i = 0; i < e->acts_cnt; i++)
2897 ice_memcpy(&acts[i], &e->acts[i].data.acl_act,
2898 sizeof(struct ice_acl_act_entry),
2899 ICE_NONDMA_TO_NONDMA);
2902 enum ice_acl_entry_prio prio;
2906 keys = (u8 *)e->entry;
2907 inverts = keys + (e->entry_sz / 2);
2908 prio = ice_flow_acl_convert_to_acl_prio(e->priority);
2910 status = ice_acl_add_entry(hw, prof->cfg.scen, prio, keys,
2911 inverts, acts, e->acts_cnt,
2916 e->scen_entry_idx = entry_idx;
2917 LIST_ADD(&e->l_entry, &prof->entries);
2919 if (do_chg_action) {
2920 /* For the action memory info, update the SW's copy of
2921 * exist entry with e's action memory info
2923 ice_free(hw, exist->acts);
2924 exist->acts_cnt = e->acts_cnt;
2925 exist->acts = (struct ice_flow_action *)
2926 ice_calloc(hw, exist->acts_cnt,
2927 sizeof(struct ice_flow_action));
2929 status = ICE_ERR_NO_MEMORY;
2933 ice_memcpy(exist->acts, e->acts,
2934 sizeof(struct ice_flow_action) * e->acts_cnt,
2935 ICE_NONDMA_TO_NONDMA);
2937 status = ice_acl_prog_act(hw, prof->cfg.scen, acts,
2939 exist->scen_entry_idx);
2944 if (do_chg_rng_chk) {
2945 /* In this case, we want to update the range checker
2946 * information of the exist entry
2948 status = ice_flow_acl_union_rng_chk(exist->range_buf,
2954 /* As we don't add the new entry to our SW DB, deallocate its
2955 * memories, and return the exist entry to the caller
2957 ice_dealloc_flow_entry(hw, e);
2967 * ice_flow_acl_add_scen_entry - Add entry to ACL scenario
2968 * @hw: pointer to the hardware structure
2969 * @prof: pointer to flow profile
2970 * @e: double pointer to the flow entry
2972 static enum ice_status
2973 ice_flow_acl_add_scen_entry(struct ice_hw *hw, struct ice_flow_prof *prof,
2974 struct ice_flow_entry **e)
2976 enum ice_status status;
2978 ice_acquire_lock(&prof->entries_lock);
2979 status = ice_flow_acl_add_scen_entry_sync(hw, prof, e);
2980 ice_release_lock(&prof->entries_lock);
2986 * ice_flow_add_entry - Add a flow entry
2987 * @hw: pointer to the HW struct
2988 * @blk: classification stage
2989 * @prof_id: ID of the profile to add a new flow entry to
2990 * @entry_id: unique ID to identify this flow entry
2991 * @vsi_handle: software VSI handle for the flow entry
2992 * @prio: priority of the flow entry
2993 * @data: pointer to a data buffer containing flow entry's match values/masks
2994 * @acts: arrays of actions to be performed on a match
2995 * @acts_cnt: number of actions
2996 * @entry_h: pointer to buffer that receives the new flow entry's handle
2999 ice_flow_add_entry(struct ice_hw *hw, enum ice_block blk, u64 prof_id,
3000 u64 entry_id, u16 vsi_handle, enum ice_flow_priority prio,
3001 void *data, struct ice_flow_action *acts, u8 acts_cnt,
3004 struct ice_flow_entry *e = NULL;
3005 struct ice_flow_prof *prof;
3006 enum ice_status status = ICE_SUCCESS;
3008 /* ACL entries must indicate an action */
3009 if (blk == ICE_BLK_ACL && (!acts || !acts_cnt))
3010 return ICE_ERR_PARAM;
3012 /* No flow entry data is expected for RSS */
3013 if (!entry_h || (!data && blk != ICE_BLK_RSS))
3014 return ICE_ERR_BAD_PTR;
3016 if (!ice_is_vsi_valid(hw, vsi_handle))
3017 return ICE_ERR_PARAM;
3019 ice_acquire_lock(&hw->fl_profs_locks[blk]);
3021 prof = ice_flow_find_prof_id(hw, blk, prof_id);
3023 status = ICE_ERR_DOES_NOT_EXIST;
3025 /* Allocate memory for the entry being added and associate
3026 * the VSI to the found flow profile
3028 e = (struct ice_flow_entry *)ice_malloc(hw, sizeof(*e));
3030 status = ICE_ERR_NO_MEMORY;
3032 status = ice_flow_assoc_prof(hw, blk, prof, vsi_handle);
3035 ice_release_lock(&hw->fl_profs_locks[blk]);
3040 e->vsi_handle = vsi_handle;
3049 /* ACL will handle the entry management */
3050 status = ice_flow_acl_frmt_entry(hw, prof, e, (u8 *)data, acts,
3055 status = ice_flow_acl_add_scen_entry(hw, prof, &e);
3061 status = ICE_ERR_NOT_IMPL;
3065 if (blk != ICE_BLK_ACL) {
3066 /* ACL will handle the entry management */
3067 ice_acquire_lock(&prof->entries_lock);
3068 LIST_ADD(&e->l_entry, &prof->entries);
3069 ice_release_lock(&prof->entries_lock);
3072 *entry_h = ICE_FLOW_ENTRY_HNDL(e);
3077 ice_free(hw, e->entry);
3085 * ice_flow_rem_entry - Remove a flow entry
3086 * @hw: pointer to the HW struct
3087 * @blk: classification stage
3088 * @entry_h: handle to the flow entry to be removed
3090 enum ice_status ice_flow_rem_entry(struct ice_hw *hw, enum ice_block blk,
3093 struct ice_flow_entry *entry;
3094 struct ice_flow_prof *prof;
3095 enum ice_status status = ICE_SUCCESS;
3097 if (entry_h == ICE_FLOW_ENTRY_HANDLE_INVAL)
3098 return ICE_ERR_PARAM;
3100 entry = ICE_FLOW_ENTRY_PTR((unsigned long)entry_h);
3102 /* Retain the pointer to the flow profile as the entry will be freed */
3106 ice_acquire_lock(&prof->entries_lock);
3107 status = ice_flow_rem_entry_sync(hw, blk, entry);
3108 ice_release_lock(&prof->entries_lock);
3115 * ice_flow_set_fld_ext - specifies locations of field from entry's input buffer
3116 * @seg: packet segment the field being set belongs to
3117 * @fld: field to be set
3118 * @field_type: type of the field
3119 * @val_loc: if not ICE_FLOW_FLD_OFF_INVAL, location of the value to match from
3120 * entry's input buffer
3121 * @mask_loc: if not ICE_FLOW_FLD_OFF_INVAL, location of mask value from entry's
3123 * @last_loc: if not ICE_FLOW_FLD_OFF_INVAL, location of last/upper value from
3124 * entry's input buffer
3126 * This helper function stores information of a field being matched, including
3127 * the type of the field and the locations of the value to match, the mask, and
3128 * the upper-bound value in the start of the input buffer for a flow entry.
3129 * This function should only be used for fixed-size data structures.
3131 * This function also opportunistically determines the protocol headers to be
3132 * present based on the fields being set. Some fields cannot be used alone to
3133 * determine the protocol headers present. Sometimes, fields for particular
3134 * protocol headers are not matched. In those cases, the protocol headers
3135 * must be explicitly set.
3138 ice_flow_set_fld_ext(struct ice_flow_seg_info *seg, enum ice_flow_field fld,
3139 enum ice_flow_fld_match_type field_type, u16 val_loc,
3140 u16 mask_loc, u16 last_loc)
3142 u64 bit = BIT_ULL(fld);
3145 if (field_type == ICE_FLOW_FLD_TYPE_RANGE)
3148 seg->fields[fld].type = field_type;
3149 seg->fields[fld].src.val = val_loc;
3150 seg->fields[fld].src.mask = mask_loc;
3151 seg->fields[fld].src.last = last_loc;
3153 ICE_FLOW_SET_HDRS(seg, ice_flds_info[fld].hdr);
3157 * ice_flow_set_fld - specifies locations of field from entry's input buffer
3158 * @seg: packet segment the field being set belongs to
3159 * @fld: field to be set
3160 * @val_loc: if not ICE_FLOW_FLD_OFF_INVAL, location of the value to match from
3161 * entry's input buffer
3162 * @mask_loc: if not ICE_FLOW_FLD_OFF_INVAL, location of mask value from entry's
3164 * @last_loc: if not ICE_FLOW_FLD_OFF_INVAL, location of last/upper value from
3165 * entry's input buffer
3166 * @range: indicate if field being matched is to be in a range
3168 * This function specifies the locations, in the form of byte offsets from the
3169 * start of the input buffer for a flow entry, from where the value to match,
3170 * the mask value, and upper value can be extracted. These locations are then
3171 * stored in the flow profile. When adding a flow entry associated with the
3172 * flow profile, these locations will be used to quickly extract the values and
3173 * create the content of a match entry. This function should only be used for
3174 * fixed-size data structures.
3177 ice_flow_set_fld(struct ice_flow_seg_info *seg, enum ice_flow_field fld,
3178 u16 val_loc, u16 mask_loc, u16 last_loc, bool range)
3180 enum ice_flow_fld_match_type t = range ?
3181 ICE_FLOW_FLD_TYPE_RANGE : ICE_FLOW_FLD_TYPE_REG;
3183 ice_flow_set_fld_ext(seg, fld, t, val_loc, mask_loc, last_loc);
3187 * ice_flow_set_fld_prefix - sets locations of prefix field from entry's buf
3188 * @seg: packet segment the field being set belongs to
3189 * @fld: field to be set
3190 * @val_loc: if not ICE_FLOW_FLD_OFF_INVAL, location of the value to match from
3191 * entry's input buffer
3192 * @pref_loc: location of prefix value from entry's input buffer
3193 * @pref_sz: size of the location holding the prefix value
3195 * This function specifies the locations, in the form of byte offsets from the
3196 * start of the input buffer for a flow entry, from where the value to match
3197 * and the IPv4 prefix value can be extracted. These locations are then stored
3198 * in the flow profile. When adding flow entries to the associated flow profile,
3199 * these locations can be used to quickly extract the values to create the
3200 * content of a match entry. This function should only be used for fixed-size
3204 ice_flow_set_fld_prefix(struct ice_flow_seg_info *seg, enum ice_flow_field fld,
3205 u16 val_loc, u16 pref_loc, u8 pref_sz)
3207 /* For this type of field, the "mask" location is for the prefix value's
3208 * location and the "last" location is for the size of the location of
3211 ice_flow_set_fld_ext(seg, fld, ICE_FLOW_FLD_TYPE_PREFIX, val_loc,
3212 pref_loc, (u16)pref_sz);
3216 * ice_flow_add_fld_raw - sets locations of a raw field from entry's input buf
3217 * @seg: packet segment the field being set belongs to
3218 * @off: offset of the raw field from the beginning of the segment in bytes
3219 * @len: length of the raw pattern to be matched
3220 * @val_loc: location of the value to match from entry's input buffer
3221 * @mask_loc: location of mask value from entry's input buffer
3223 * This function specifies the offset of the raw field to be match from the
3224 * beginning of the specified packet segment, and the locations, in the form of
3225 * byte offsets from the start of the input buffer for a flow entry, from where
3226 * the value to match and the mask value to be extracted. These locations are
3227 * then stored in the flow profile. When adding flow entries to the associated
3228 * flow profile, these locations can be used to quickly extract the values to
3229 * create the content of a match entry. This function should only be used for
3230 * fixed-size data structures.
3233 ice_flow_add_fld_raw(struct ice_flow_seg_info *seg, u16 off, u8 len,
3234 u16 val_loc, u16 mask_loc)
3236 if (seg->raws_cnt < ICE_FLOW_SEG_RAW_FLD_MAX) {
3237 seg->raws[seg->raws_cnt].off = off;
3238 seg->raws[seg->raws_cnt].info.type = ICE_FLOW_FLD_TYPE_SIZE;
3239 seg->raws[seg->raws_cnt].info.src.val = val_loc;
3240 seg->raws[seg->raws_cnt].info.src.mask = mask_loc;
3241 /* The "last" field is used to store the length of the field */
3242 seg->raws[seg->raws_cnt].info.src.last = len;
3245 /* Overflows of "raws" will be handled as an error condition later in
3246 * the flow when this information is processed.
3251 #define ICE_FLOW_RSS_SEG_HDR_L2_MASKS \
3252 (ICE_FLOW_SEG_HDR_ETH | ICE_FLOW_SEG_HDR_VLAN)
3254 #define ICE_FLOW_RSS_SEG_HDR_L3_MASKS \
3255 (ICE_FLOW_SEG_HDR_IPV4 | ICE_FLOW_SEG_HDR_IPV6)
3257 #define ICE_FLOW_RSS_SEG_HDR_L4_MASKS \
3258 (ICE_FLOW_SEG_HDR_TCP | ICE_FLOW_SEG_HDR_UDP | ICE_FLOW_SEG_HDR_SCTP)
3260 #define ICE_FLOW_RSS_SEG_HDR_VAL_MASKS \
3261 (ICE_FLOW_RSS_SEG_HDR_L2_MASKS | \
3262 ICE_FLOW_RSS_SEG_HDR_L3_MASKS | \
3263 ICE_FLOW_RSS_SEG_HDR_L4_MASKS)
3266 * ice_flow_set_rss_seg_info - setup packet segments for RSS
3267 * @segs: pointer to the flow field segment(s)
3268 * @seg_cnt: segment count
3269 * @cfg: configure parameters
3271 * Helper function to extract fields from hash bitmap and use flow
3272 * header value to set flow field segment for further use in flow
3273 * profile entry or removal.
3275 static enum ice_status
3276 ice_flow_set_rss_seg_info(struct ice_flow_seg_info *segs, u8 seg_cnt,
3277 const struct ice_rss_hash_cfg *cfg)
3279 struct ice_flow_seg_info *seg;
3283 /* set inner most segment */
3284 seg = &segs[seg_cnt - 1];
3286 ice_for_each_set_bit(i, (const ice_bitmap_t *)&cfg->hash_flds,
3287 ICE_FLOW_FIELD_IDX_MAX)
3288 ice_flow_set_fld(seg, (enum ice_flow_field)i,
3289 ICE_FLOW_FLD_OFF_INVAL, ICE_FLOW_FLD_OFF_INVAL,
3290 ICE_FLOW_FLD_OFF_INVAL, false);
3292 ICE_FLOW_SET_HDRS(seg, cfg->addl_hdrs);
3294 /* set outer most header */
3295 if (cfg->hdr_type == ICE_RSS_INNER_HEADERS_W_OUTER_IPV4)
3296 segs[ICE_RSS_OUTER_HEADERS].hdrs |= ICE_FLOW_SEG_HDR_IPV4 |
3297 ICE_FLOW_SEG_HDR_IPV_OTHER;
3298 else if (cfg->hdr_type == ICE_RSS_INNER_HEADERS_W_OUTER_IPV6)
3299 segs[ICE_RSS_OUTER_HEADERS].hdrs |= ICE_FLOW_SEG_HDR_IPV6 |
3300 ICE_FLOW_SEG_HDR_IPV_OTHER;
3302 if (seg->hdrs & ~ICE_FLOW_RSS_SEG_HDR_VAL_MASKS &
3303 ~ICE_FLOW_RSS_HDRS_INNER_MASK & ~ICE_FLOW_SEG_HDR_IPV_OTHER)
3304 return ICE_ERR_PARAM;
3306 val = (u64)(seg->hdrs & ICE_FLOW_RSS_SEG_HDR_L3_MASKS);
3307 if (val && !ice_is_pow2(val))
3310 val = (u64)(seg->hdrs & ICE_FLOW_RSS_SEG_HDR_L4_MASKS);
3311 if (val && !ice_is_pow2(val))
3318 * ice_rem_vsi_rss_list - remove VSI from RSS list
3319 * @hw: pointer to the hardware structure
3320 * @vsi_handle: software VSI handle
3322 * Remove the VSI from all RSS configurations in the list.
3324 void ice_rem_vsi_rss_list(struct ice_hw *hw, u16 vsi_handle)
3326 struct ice_rss_cfg *r, *tmp;
3328 if (LIST_EMPTY(&hw->rss_list_head))
3331 ice_acquire_lock(&hw->rss_locks);
3332 LIST_FOR_EACH_ENTRY_SAFE(r, tmp, &hw->rss_list_head,
3333 ice_rss_cfg, l_entry)
3334 if (ice_test_and_clear_bit(vsi_handle, r->vsis))
3335 if (!ice_is_any_bit_set(r->vsis, ICE_MAX_VSI)) {
3336 LIST_DEL(&r->l_entry);
3339 ice_release_lock(&hw->rss_locks);
3343 * ice_rem_vsi_rss_cfg - remove RSS configurations associated with VSI
3344 * @hw: pointer to the hardware structure
3345 * @vsi_handle: software VSI handle
3347 * This function will iterate through all flow profiles and disassociate
3348 * the VSI from that profile. If the flow profile has no VSIs it will
3351 enum ice_status ice_rem_vsi_rss_cfg(struct ice_hw *hw, u16 vsi_handle)
3353 const enum ice_block blk = ICE_BLK_RSS;
3354 struct ice_flow_prof *p, *t;
3355 enum ice_status status = ICE_SUCCESS;
3357 if (!ice_is_vsi_valid(hw, vsi_handle))
3358 return ICE_ERR_PARAM;
3360 if (LIST_EMPTY(&hw->fl_profs[blk]))
3363 ice_acquire_lock(&hw->rss_locks);
3364 LIST_FOR_EACH_ENTRY_SAFE(p, t, &hw->fl_profs[blk], ice_flow_prof,
3366 if (ice_is_bit_set(p->vsis, vsi_handle)) {
3367 status = ice_flow_disassoc_prof(hw, blk, p, vsi_handle);
3371 if (!ice_is_any_bit_set(p->vsis, ICE_MAX_VSI)) {
3372 status = ice_flow_rem_prof(hw, blk, p->id);
3377 ice_release_lock(&hw->rss_locks);
3383 * ice_get_rss_hdr_type - get a RSS profile's header type
3384 * @prof: RSS flow profile
3386 static enum ice_rss_cfg_hdr_type
3387 ice_get_rss_hdr_type(struct ice_flow_prof *prof)
3389 enum ice_rss_cfg_hdr_type hdr_type = ICE_RSS_ANY_HEADERS;
3391 if (prof->segs_cnt == ICE_FLOW_SEG_SINGLE) {
3392 hdr_type = ICE_RSS_OUTER_HEADERS;
3393 } else if (prof->segs_cnt == ICE_FLOW_SEG_MAX) {
3394 if (prof->segs[ICE_RSS_OUTER_HEADERS].hdrs == ICE_FLOW_SEG_HDR_NONE)
3395 hdr_type = ICE_RSS_INNER_HEADERS;
3396 if (prof->segs[ICE_RSS_OUTER_HEADERS].hdrs & ICE_FLOW_SEG_HDR_IPV4)
3397 hdr_type = ICE_RSS_INNER_HEADERS_W_OUTER_IPV4;
3398 if (prof->segs[ICE_RSS_OUTER_HEADERS].hdrs & ICE_FLOW_SEG_HDR_IPV6)
3399 hdr_type = ICE_RSS_INNER_HEADERS_W_OUTER_IPV6;
3406 * ice_rem_rss_list - remove RSS configuration from list
3407 * @hw: pointer to the hardware structure
3408 * @vsi_handle: software VSI handle
3409 * @prof: pointer to flow profile
3411 * Assumption: lock has already been acquired for RSS list
3414 ice_rem_rss_list(struct ice_hw *hw, u16 vsi_handle, struct ice_flow_prof *prof)
3416 enum ice_rss_cfg_hdr_type hdr_type;
3417 struct ice_rss_cfg *r, *tmp;
3419 /* Search for RSS hash fields associated to the VSI that match the
3420 * hash configurations associated to the flow profile. If found
3421 * remove from the RSS entry list of the VSI context and delete entry.
3423 hdr_type = ice_get_rss_hdr_type(prof);
3424 LIST_FOR_EACH_ENTRY_SAFE(r, tmp, &hw->rss_list_head,
3425 ice_rss_cfg, l_entry)
3426 if (r->hash.hash_flds == prof->segs[prof->segs_cnt - 1].match &&
3427 r->hash.addl_hdrs == prof->segs[prof->segs_cnt - 1].hdrs &&
3428 r->hash.hdr_type == hdr_type) {
3429 ice_clear_bit(vsi_handle, r->vsis);
3430 if (!ice_is_any_bit_set(r->vsis, ICE_MAX_VSI)) {
3431 LIST_DEL(&r->l_entry);
3439 * ice_add_rss_list - add RSS configuration to list
3440 * @hw: pointer to the hardware structure
3441 * @vsi_handle: software VSI handle
3442 * @prof: pointer to flow profile
3444 * Assumption: lock has already been acquired for RSS list
3446 static enum ice_status
3447 ice_add_rss_list(struct ice_hw *hw, u16 vsi_handle, struct ice_flow_prof *prof)
3449 enum ice_rss_cfg_hdr_type hdr_type;
3450 struct ice_rss_cfg *r, *rss_cfg;
3452 hdr_type = ice_get_rss_hdr_type(prof);
3453 LIST_FOR_EACH_ENTRY(r, &hw->rss_list_head,
3454 ice_rss_cfg, l_entry)
3455 if (r->hash.hash_flds == prof->segs[prof->segs_cnt - 1].match &&
3456 r->hash.addl_hdrs == prof->segs[prof->segs_cnt - 1].hdrs &&
3457 r->hash.hdr_type == hdr_type) {
3458 ice_set_bit(vsi_handle, r->vsis);
3462 rss_cfg = (struct ice_rss_cfg *)ice_malloc(hw, sizeof(*rss_cfg));
3464 return ICE_ERR_NO_MEMORY;
3466 rss_cfg->hash.hash_flds = prof->segs[prof->segs_cnt - 1].match;
3467 rss_cfg->hash.addl_hdrs = prof->segs[prof->segs_cnt - 1].hdrs;
3468 rss_cfg->hash.hdr_type = hdr_type;
3469 rss_cfg->hash.symm = prof->cfg.symm;
3470 ice_set_bit(vsi_handle, rss_cfg->vsis);
3472 LIST_ADD_TAIL(&rss_cfg->l_entry, &hw->rss_list_head);
3477 #define ICE_FLOW_PROF_HASH_S 0
3478 #define ICE_FLOW_PROF_HASH_M (0xFFFFFFFFULL << ICE_FLOW_PROF_HASH_S)
3479 #define ICE_FLOW_PROF_HDR_S 32
3480 #define ICE_FLOW_PROF_HDR_M (0x3FFFFFFFULL << ICE_FLOW_PROF_HDR_S)
3481 #define ICE_FLOW_PROF_ENCAP_S 62
3482 #define ICE_FLOW_PROF_ENCAP_M (0x3ULL << ICE_FLOW_PROF_ENCAP_S)
3484 /* Flow profile ID format:
3485 * [0:31] - Packet match fields
3486 * [32:61] - Protocol header
3487 * [62:63] - Encapsulation flag:
3490 * 2 for tunneled with outer ipv4
3491 * 3 for tunneled with outer ipv6
3493 #define ICE_FLOW_GEN_PROFID(hash, hdr, encap) \
3494 (u64)(((u64)(hash) & ICE_FLOW_PROF_HASH_M) | \
3495 (((u64)(hdr) << ICE_FLOW_PROF_HDR_S) & ICE_FLOW_PROF_HDR_M) | \
3496 (((u64)(encap) << ICE_FLOW_PROF_ENCAP_S) & ICE_FLOW_PROF_ENCAP_M))
3499 ice_rss_config_xor_word(struct ice_hw *hw, u8 prof_id, u8 src, u8 dst)
3501 u32 s = ((src % 4) << 3); /* byte shift */
3502 u32 v = dst | 0x80; /* value to program */
3503 u8 i = src / 4; /* register index */
3506 reg = rd32(hw, GLQF_HSYMM(prof_id, i));
3507 reg = (reg & ~(0xff << s)) | (v << s);
3508 wr32(hw, GLQF_HSYMM(prof_id, i), reg);
3512 ice_rss_config_xor(struct ice_hw *hw, u8 prof_id, u8 src, u8 dst, u8 len)
3515 ICE_FLOW_SW_FIELD_VECTOR_MAX / ICE_FLOW_FV_EXTRACT_SZ - 1;
3518 for (i = 0; i < len; i++) {
3519 ice_rss_config_xor_word(hw, prof_id,
3520 /* Yes, field vector in GLQF_HSYMM and
3521 * GLQF_HINSET is inversed!
3523 fv_last_word - (src + i),
3524 fv_last_word - (dst + i));
3525 ice_rss_config_xor_word(hw, prof_id,
3526 fv_last_word - (dst + i),
3527 fv_last_word - (src + i));
3532 ice_rss_update_symm(struct ice_hw *hw,
3533 struct ice_flow_prof *prof)
3535 struct ice_prof_map *map;
3538 ice_acquire_lock(&hw->blk[ICE_BLK_RSS].es.prof_map_lock);
3539 map = ice_search_prof_id(hw, ICE_BLK_RSS, prof->id);
3541 prof_id = map->prof_id;
3542 ice_release_lock(&hw->blk[ICE_BLK_RSS].es.prof_map_lock);
3545 /* clear to default */
3546 for (m = 0; m < 6; m++)
3547 wr32(hw, GLQF_HSYMM(prof_id, m), 0);
3548 if (prof->cfg.symm) {
3549 struct ice_flow_seg_info *seg =
3550 &prof->segs[prof->segs_cnt - 1];
3552 struct ice_flow_seg_xtrct *ipv4_src =
3553 &seg->fields[ICE_FLOW_FIELD_IDX_IPV4_SA].xtrct;
3554 struct ice_flow_seg_xtrct *ipv4_dst =
3555 &seg->fields[ICE_FLOW_FIELD_IDX_IPV4_DA].xtrct;
3556 struct ice_flow_seg_xtrct *ipv6_src =
3557 &seg->fields[ICE_FLOW_FIELD_IDX_IPV6_SA].xtrct;
3558 struct ice_flow_seg_xtrct *ipv6_dst =
3559 &seg->fields[ICE_FLOW_FIELD_IDX_IPV6_DA].xtrct;
3561 struct ice_flow_seg_xtrct *tcp_src =
3562 &seg->fields[ICE_FLOW_FIELD_IDX_TCP_SRC_PORT].xtrct;
3563 struct ice_flow_seg_xtrct *tcp_dst =
3564 &seg->fields[ICE_FLOW_FIELD_IDX_TCP_DST_PORT].xtrct;
3566 struct ice_flow_seg_xtrct *udp_src =
3567 &seg->fields[ICE_FLOW_FIELD_IDX_UDP_SRC_PORT].xtrct;
3568 struct ice_flow_seg_xtrct *udp_dst =
3569 &seg->fields[ICE_FLOW_FIELD_IDX_UDP_DST_PORT].xtrct;
3571 struct ice_flow_seg_xtrct *sctp_src =
3572 &seg->fields[ICE_FLOW_FIELD_IDX_SCTP_SRC_PORT].xtrct;
3573 struct ice_flow_seg_xtrct *sctp_dst =
3574 &seg->fields[ICE_FLOW_FIELD_IDX_SCTP_DST_PORT].xtrct;
3577 if (ipv4_src->prot_id != 0 && ipv4_dst->prot_id != 0)
3578 ice_rss_config_xor(hw, prof_id,
3579 ipv4_src->idx, ipv4_dst->idx, 2);
3582 if (ipv6_src->prot_id != 0 && ipv6_dst->prot_id != 0)
3583 ice_rss_config_xor(hw, prof_id,
3584 ipv6_src->idx, ipv6_dst->idx, 8);
3587 if (tcp_src->prot_id != 0 && tcp_dst->prot_id != 0)
3588 ice_rss_config_xor(hw, prof_id,
3589 tcp_src->idx, tcp_dst->idx, 1);
3592 if (udp_src->prot_id != 0 && udp_dst->prot_id != 0)
3593 ice_rss_config_xor(hw, prof_id,
3594 udp_src->idx, udp_dst->idx, 1);
3597 if (sctp_src->prot_id != 0 && sctp_dst->prot_id != 0)
3598 ice_rss_config_xor(hw, prof_id,
3599 sctp_src->idx, sctp_dst->idx, 1);
3604 * ice_add_rss_cfg_sync - add an RSS configuration
3605 * @hw: pointer to the hardware structure
3606 * @vsi_handle: software VSI handle
3607 * @cfg: configure parameters
3609 * Assumption: lock has already been acquired for RSS list
3611 static enum ice_status
3612 ice_add_rss_cfg_sync(struct ice_hw *hw, u16 vsi_handle,
3613 const struct ice_rss_hash_cfg *cfg)
3615 const enum ice_block blk = ICE_BLK_RSS;
3616 struct ice_flow_prof *prof = NULL;
3617 struct ice_flow_seg_info *segs;
3618 enum ice_status status;
3621 segs_cnt = (cfg->hdr_type == ICE_RSS_OUTER_HEADERS) ?
3622 ICE_FLOW_SEG_SINGLE : ICE_FLOW_SEG_MAX;
3624 segs = (struct ice_flow_seg_info *)ice_calloc(hw, segs_cnt,
3627 return ICE_ERR_NO_MEMORY;
3629 /* Construct the packet segment info from the hashed fields */
3630 status = ice_flow_set_rss_seg_info(segs, segs_cnt, cfg);
3634 /* Don't do RSS for GTPU Outer */
3635 if (segs_cnt == ICE_FLOW_SEG_SINGLE &&
3636 segs[segs_cnt - 1].hdrs & ICE_FLOW_SEG_HDR_GTPU) {
3637 status = ICE_SUCCESS;
3641 /* Search for a flow profile that has matching headers, hash fields
3642 * and has the input VSI associated to it. If found, no further
3643 * operations required and exit.
3645 prof = ice_flow_find_prof_conds(hw, blk, ICE_FLOW_RX, segs, segs_cnt,
3647 ICE_FLOW_FIND_PROF_CHK_FLDS |
3648 ICE_FLOW_FIND_PROF_CHK_VSI);
3650 if (prof->cfg.symm == cfg->symm)
3652 prof->cfg.symm = cfg->symm;
3656 /* Check if a flow profile exists with the same protocol headers and
3657 * associated with the input VSI. If so disassociate the VSI from
3658 * this profile. The VSI will be added to a new profile created with
3659 * the protocol header and new hash field configuration.
3661 prof = ice_flow_find_prof_conds(hw, blk, ICE_FLOW_RX, segs, segs_cnt,
3662 vsi_handle, ICE_FLOW_FIND_PROF_CHK_VSI);
3664 status = ice_flow_disassoc_prof(hw, blk, prof, vsi_handle);
3666 ice_rem_rss_list(hw, vsi_handle, prof);
3670 /* Remove profile if it has no VSIs associated */
3671 if (!ice_is_any_bit_set(prof->vsis, ICE_MAX_VSI)) {
3672 status = ice_flow_rem_prof(hw, blk, prof->id);
3678 /* Search for a profile that has same match fields only. If this
3679 * exists then associate the VSI to this profile.
3681 prof = ice_flow_find_prof_conds(hw, blk, ICE_FLOW_RX, segs, segs_cnt,
3683 ICE_FLOW_FIND_PROF_CHK_FLDS);
3685 if (prof->cfg.symm == cfg->symm) {
3686 status = ice_flow_assoc_prof(hw, blk, prof,
3689 status = ice_add_rss_list(hw, vsi_handle,
3692 /* if a profile exist but with different symmetric
3693 * requirement, just return error.
3695 status = ICE_ERR_NOT_SUPPORTED;
3700 /* Create a new flow profile with generated profile and packet
3701 * segment information.
3703 status = ice_flow_add_prof(hw, blk, ICE_FLOW_RX,
3704 ICE_FLOW_GEN_PROFID(cfg->hash_flds,
3705 segs[segs_cnt - 1].hdrs,
3707 segs, segs_cnt, NULL, 0, &prof);
3711 status = ice_flow_assoc_prof(hw, blk, prof, vsi_handle);
3712 /* If association to a new flow profile failed then this profile can
3716 ice_flow_rem_prof(hw, blk, prof->id);
3720 status = ice_add_rss_list(hw, vsi_handle, prof);
3722 prof->cfg.symm = cfg->symm;
3724 ice_rss_update_symm(hw, prof);
3732 * ice_add_rss_cfg - add an RSS configuration with specified hashed fields
3733 * @hw: pointer to the hardware structure
3734 * @vsi_handle: software VSI handle
3735 * @cfg: configure parameters
3737 * This function will generate a flow profile based on fields associated with
3738 * the input fields to hash on, the flow type and use the VSI number to add
3739 * a flow entry to the profile.
3742 ice_add_rss_cfg(struct ice_hw *hw, u16 vsi_handle,
3743 const struct ice_rss_hash_cfg *cfg)
3745 struct ice_rss_hash_cfg local_cfg;
3746 enum ice_status status;
3748 if (!ice_is_vsi_valid(hw, vsi_handle) ||
3749 !cfg || cfg->hdr_type > ICE_RSS_ANY_HEADERS ||
3750 cfg->hash_flds == ICE_HASH_INVALID)
3751 return ICE_ERR_PARAM;
3754 if (cfg->hdr_type < ICE_RSS_ANY_HEADERS) {
3755 ice_acquire_lock(&hw->rss_locks);
3756 status = ice_add_rss_cfg_sync(hw, vsi_handle, &local_cfg);
3757 ice_release_lock(&hw->rss_locks);
3759 ice_acquire_lock(&hw->rss_locks);
3760 local_cfg.hdr_type = ICE_RSS_OUTER_HEADERS;
3761 status = ice_add_rss_cfg_sync(hw, vsi_handle, &local_cfg);
3763 local_cfg.hdr_type = ICE_RSS_INNER_HEADERS;
3764 status = ice_add_rss_cfg_sync(hw, vsi_handle,
3767 ice_release_lock(&hw->rss_locks);
3774 * ice_rem_rss_cfg_sync - remove an existing RSS configuration
3775 * @hw: pointer to the hardware structure
3776 * @vsi_handle: software VSI handle
3777 * @cfg: configure parameters
3779 * Assumption: lock has already been acquired for RSS list
3781 static enum ice_status
3782 ice_rem_rss_cfg_sync(struct ice_hw *hw, u16 vsi_handle,
3783 const struct ice_rss_hash_cfg *cfg)
3785 const enum ice_block blk = ICE_BLK_RSS;
3786 struct ice_flow_seg_info *segs;
3787 struct ice_flow_prof *prof;
3788 enum ice_status status;
3791 segs_cnt = (cfg->hdr_type == ICE_RSS_OUTER_HEADERS) ?
3792 ICE_FLOW_SEG_SINGLE : ICE_FLOW_SEG_MAX;
3793 segs = (struct ice_flow_seg_info *)ice_calloc(hw, segs_cnt,
3796 return ICE_ERR_NO_MEMORY;
3798 /* Construct the packet segment info from the hashed fields */
3799 status = ice_flow_set_rss_seg_info(segs, segs_cnt, cfg);
3803 /* Don't do RSS for GTPU Outer */
3804 if (segs_cnt == ICE_FLOW_SEG_SINGLE &&
3805 segs[segs_cnt - 1].hdrs & ICE_FLOW_SEG_HDR_GTPU) {
3806 status = ICE_SUCCESS;
3810 prof = ice_flow_find_prof_conds(hw, blk, ICE_FLOW_RX, segs, segs_cnt,
3812 ICE_FLOW_FIND_PROF_CHK_FLDS);
3814 status = ICE_ERR_DOES_NOT_EXIST;
3818 status = ice_flow_disassoc_prof(hw, blk, prof, vsi_handle);
3822 /* Remove RSS configuration from VSI context before deleting
3825 ice_rem_rss_list(hw, vsi_handle, prof);
3827 if (!ice_is_any_bit_set(prof->vsis, ICE_MAX_VSI))
3828 status = ice_flow_rem_prof(hw, blk, prof->id);
3836 * ice_rem_rss_cfg - remove an existing RSS config with matching hashed fields
3837 * @hw: pointer to the hardware structure
3838 * @vsi_handle: software VSI handle
3839 * @cfg: configure parameters
3841 * This function will lookup the flow profile based on the input
3842 * hash field bitmap, iterate through the profile entry list of
3843 * that profile and find entry associated with input VSI to be
3844 * removed. Calls are made to underlying flow apis which will in
3845 * turn build or update buffers for RSS XLT1 section.
3848 ice_rem_rss_cfg(struct ice_hw *hw, u16 vsi_handle,
3849 const struct ice_rss_hash_cfg *cfg)
3851 struct ice_rss_hash_cfg local_cfg;
3852 enum ice_status status;
3854 if (!ice_is_vsi_valid(hw, vsi_handle) ||
3855 !cfg || cfg->hdr_type > ICE_RSS_ANY_HEADERS ||
3856 cfg->hash_flds == ICE_HASH_INVALID)
3857 return ICE_ERR_PARAM;
3859 ice_acquire_lock(&hw->rss_locks);
3861 if (cfg->hdr_type < ICE_RSS_ANY_HEADERS) {
3862 status = ice_rem_rss_cfg_sync(hw, vsi_handle, &local_cfg);
3864 local_cfg.hdr_type = ICE_RSS_OUTER_HEADERS;
3865 status = ice_rem_rss_cfg_sync(hw, vsi_handle, &local_cfg);
3868 local_cfg.hdr_type = ICE_RSS_INNER_HEADERS;
3869 status = ice_rem_rss_cfg_sync(hw, vsi_handle,
3873 ice_release_lock(&hw->rss_locks);
3879 * ice_replay_rss_cfg - replay RSS configurations associated with VSI
3880 * @hw: pointer to the hardware structure
3881 * @vsi_handle: software VSI handle
3883 enum ice_status ice_replay_rss_cfg(struct ice_hw *hw, u16 vsi_handle)
3885 enum ice_status status = ICE_SUCCESS;
3886 struct ice_rss_cfg *r;
3888 if (!ice_is_vsi_valid(hw, vsi_handle))
3889 return ICE_ERR_PARAM;
3891 ice_acquire_lock(&hw->rss_locks);
3892 LIST_FOR_EACH_ENTRY(r, &hw->rss_list_head,
3893 ice_rss_cfg, l_entry) {
3894 if (ice_is_bit_set(r->vsis, vsi_handle)) {
3895 status = ice_add_rss_cfg_sync(hw, vsi_handle, &r->hash);
3900 ice_release_lock(&hw->rss_locks);
3906 * ice_get_rss_cfg - returns hashed fields for the given header types
3907 * @hw: pointer to the hardware structure
3908 * @vsi_handle: software VSI handle
3909 * @hdrs: protocol header type
3911 * This function will return the match fields of the first instance of flow
3912 * profile having the given header types and containing input VSI
3914 u64 ice_get_rss_cfg(struct ice_hw *hw, u16 vsi_handle, u32 hdrs)
3916 u64 rss_hash = ICE_HASH_INVALID;
3917 struct ice_rss_cfg *r;
3919 /* verify if the protocol header is non zero and VSI is valid */
3920 if (hdrs == ICE_FLOW_SEG_HDR_NONE || !ice_is_vsi_valid(hw, vsi_handle))
3921 return ICE_HASH_INVALID;
3923 ice_acquire_lock(&hw->rss_locks);
3924 LIST_FOR_EACH_ENTRY(r, &hw->rss_list_head,
3925 ice_rss_cfg, l_entry)
3926 if (ice_is_bit_set(r->vsis, vsi_handle) &&
3927 r->hash.addl_hdrs == hdrs) {
3928 rss_hash = r->hash.hash_flds;
3931 ice_release_lock(&hw->rss_locks);