1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2001-2019
5 #include "ice_common.h"
8 /* Size of known protocol header fields */
9 #define ICE_FLOW_FLD_SZ_ETH_TYPE 2
10 #define ICE_FLOW_FLD_SZ_VLAN 2
11 #define ICE_FLOW_FLD_SZ_IPV4_ADDR 4
12 #define ICE_FLOW_FLD_SZ_IPV6_ADDR 16
13 #define ICE_FLOW_FLD_SZ_IP_DSCP 1
14 #define ICE_FLOW_FLD_SZ_IP_TTL 1
15 #define ICE_FLOW_FLD_SZ_IP_PROT 1
16 #define ICE_FLOW_FLD_SZ_PORT 2
17 #define ICE_FLOW_FLD_SZ_TCP_FLAGS 1
18 #define ICE_FLOW_FLD_SZ_ICMP_TYPE 1
19 #define ICE_FLOW_FLD_SZ_ICMP_CODE 1
20 #define ICE_FLOW_FLD_SZ_ARP_OPER 2
21 #define ICE_FLOW_FLD_SZ_GRE_KEYID 4
23 /* Protocol header fields are extracted at the word boundaries as word-sized
24 * values. Specify the displacement value of some non-word-aligned fields needed
25 * to compute the offset of words containing the fields in the corresponding
26 * protocol headers. Displacement values are expressed in number of bits.
28 #define ICE_FLOW_FLD_IPV6_TTL_DSCP_DISP (-4)
29 #define ICE_FLOW_FLD_IPV6_TTL_PROT_DISP ((-2) * BITS_PER_BYTE)
30 #define ICE_FLOW_FLD_IPV6_TTL_TTL_DISP ((-1) * BITS_PER_BYTE)
32 /* Describe properties of a protocol header field */
33 struct ice_flow_field_info {
34 enum ice_flow_seg_hdr hdr;
35 s16 off; /* Offset from start of a protocol header, in bits */
36 u16 size; /* Size of fields in bits */
37 u16 mask; /* 16-bit mask for field */
40 #define ICE_FLOW_FLD_INFO(_hdr, _offset_bytes, _size_bytes) { \
42 .off = (_offset_bytes) * BITS_PER_BYTE, \
43 .size = (_size_bytes) * BITS_PER_BYTE, \
47 #define ICE_FLOW_FLD_INFO_MSK(_hdr, _offset_bytes, _size_bytes, _mask) { \
49 .off = (_offset_bytes) * BITS_PER_BYTE, \
50 .size = (_size_bytes) * BITS_PER_BYTE, \
54 /* Table containing properties of supported protocol header fields */
56 struct ice_flow_field_info ice_flds_info[ICE_FLOW_FIELD_IDX_MAX] = {
58 /* ICE_FLOW_FIELD_IDX_ETH_DA */
59 ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_ETH, 0, ETH_ALEN),
60 /* ICE_FLOW_FIELD_IDX_ETH_SA */
61 ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_ETH, ETH_ALEN, ETH_ALEN),
62 /* ICE_FLOW_FIELD_IDX_S_VLAN */
63 ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_VLAN, 12, ICE_FLOW_FLD_SZ_VLAN),
64 /* ICE_FLOW_FIELD_IDX_C_VLAN */
65 ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_VLAN, 14, ICE_FLOW_FLD_SZ_VLAN),
66 /* ICE_FLOW_FIELD_IDX_ETH_TYPE */
67 ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_ETH, 12, ICE_FLOW_FLD_SZ_ETH_TYPE),
69 /* ICE_FLOW_FIELD_IDX_IP_DSCP */
70 ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_IPV4, 1, 1),
71 /* ICE_FLOW_FIELD_IDX_IP_TTL */
72 ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_NONE, 8, 1),
73 /* ICE_FLOW_FIELD_IDX_IP_PROT */
74 ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_NONE, 9, ICE_FLOW_FLD_SZ_IP_PROT),
75 /* ICE_FLOW_FIELD_IDX_IPV4_SA */
76 ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_IPV4, 12, ICE_FLOW_FLD_SZ_IPV4_ADDR),
77 /* ICE_FLOW_FIELD_IDX_IPV4_DA */
78 ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_IPV4, 16, ICE_FLOW_FLD_SZ_IPV4_ADDR),
80 /* ICE_FLOW_FIELD_IDX_IPV6_SA */
81 ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_IPV6, 8, ICE_FLOW_FLD_SZ_IPV6_ADDR),
82 /* ICE_FLOW_FIELD_IDX_IPV6_DA */
83 ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_IPV6, 24, ICE_FLOW_FLD_SZ_IPV6_ADDR),
85 /* ICE_FLOW_FIELD_IDX_TCP_SRC_PORT */
86 ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_TCP, 0, ICE_FLOW_FLD_SZ_PORT),
87 /* ICE_FLOW_FIELD_IDX_TCP_DST_PORT */
88 ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_TCP, 2, ICE_FLOW_FLD_SZ_PORT),
89 /* ICE_FLOW_FIELD_IDX_UDP_SRC_PORT */
90 ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_UDP, 0, ICE_FLOW_FLD_SZ_PORT),
91 /* ICE_FLOW_FIELD_IDX_UDP_DST_PORT */
92 ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_UDP, 2, ICE_FLOW_FLD_SZ_PORT),
93 /* ICE_FLOW_FIELD_IDX_SCTP_SRC_PORT */
94 ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_SCTP, 0, ICE_FLOW_FLD_SZ_PORT),
95 /* ICE_FLOW_FIELD_IDX_SCTP_DST_PORT */
96 ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_SCTP, 2, ICE_FLOW_FLD_SZ_PORT),
97 /* ICE_FLOW_FIELD_IDX_TCP_FLAGS */
98 ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_TCP, 13, ICE_FLOW_FLD_SZ_TCP_FLAGS),
100 /* ICE_FLOW_FIELD_IDX_ARP_SIP */
101 ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_ARP, 14, ICE_FLOW_FLD_SZ_IPV4_ADDR),
102 /* ICE_FLOW_FIELD_IDX_ARP_DIP */
103 ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_ARP, 24, ICE_FLOW_FLD_SZ_IPV4_ADDR),
104 /* ICE_FLOW_FIELD_IDX_ARP_SHA */
105 ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_ARP, 8, ETH_ALEN),
106 /* ICE_FLOW_FIELD_IDX_ARP_DHA */
107 ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_ARP, 18, ETH_ALEN),
108 /* ICE_FLOW_FIELD_IDX_ARP_OP */
109 ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_ARP, 6, ICE_FLOW_FLD_SZ_ARP_OPER),
111 /* ICE_FLOW_FIELD_IDX_ICMP_TYPE */
112 ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_ICMP, 0, ICE_FLOW_FLD_SZ_ICMP_TYPE),
113 /* ICE_FLOW_FIELD_IDX_ICMP_CODE */
114 ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_ICMP, 1, ICE_FLOW_FLD_SZ_ICMP_CODE),
116 /* ICE_FLOW_FIELD_IDX_GRE_KEYID */
117 ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_GRE, 12, ICE_FLOW_FLD_SZ_GRE_KEYID),
120 /* Bitmaps indicating relevant packet types for a particular protocol header
122 * Packet types for packets with an Outer/First/Single MAC header
124 static const u32 ice_ptypes_mac_ofos[] = {
125 0xFDC00CC6, 0xBFBF7F7E, 0xF7EFDFDF, 0xFEFDFDFB,
126 0x03BF7F7E, 0x00000000, 0x00000000, 0x00000000,
127 0x000B0F0F, 0x00000000, 0x00000000, 0x00000000,
128 0x00000000, 0x00000000, 0x00000000, 0x00000000,
129 0x00000000, 0x00000000, 0x00000000, 0x00000000,
130 0x00000000, 0x00000000, 0x00000000, 0x00000000,
131 0x00000000, 0x00000000, 0x00000000, 0x00000000,
132 0x00000000, 0x00000000, 0x00000000, 0x00000000,
135 /* Packet types for packets with an Innermost/Last MAC VLAN header */
136 static const u32 ice_ptypes_macvlan_il[] = {
137 0x00000000, 0xBC000000, 0x000001DF, 0xF0000000,
138 0x0000077E, 0x00000000, 0x00000000, 0x00000000,
139 0x00000000, 0x00000000, 0x00000000, 0x00000000,
140 0x00000000, 0x00000000, 0x00000000, 0x00000000,
141 0x00000000, 0x00000000, 0x00000000, 0x00000000,
142 0x00000000, 0x00000000, 0x00000000, 0x00000000,
143 0x00000000, 0x00000000, 0x00000000, 0x00000000,
144 0x00000000, 0x00000000, 0x00000000, 0x00000000,
147 /* Packet types for packets with an Outer/First/Single IPv4 header */
148 static const u32 ice_ptypes_ipv4_ofos[] = {
149 0xFDC00000, 0xBFBF7F7E, 0x00EFDFDF, 0x00000000,
150 0x00000000, 0x00000000, 0x00000000, 0x00000000,
151 0x0003000F, 0x00000000, 0x00000000, 0x00000000,
152 0x00000000, 0x00000000, 0x00000000, 0x00000000,
153 0x00000000, 0x00000000, 0x00000000, 0x00000000,
154 0x00000000, 0x00000000, 0x00000000, 0x00000000,
155 0x00000000, 0x00000000, 0x00000000, 0x00000000,
156 0x00000000, 0x00000000, 0x00000000, 0x00000000,
159 /* Packet types for packets with an Innermost/Last IPv4 header */
160 static const u32 ice_ptypes_ipv4_il[] = {
161 0xE0000000, 0xB807700E, 0x8001DC03, 0xE01DC03B,
162 0x0007700E, 0x00000000, 0x00000000, 0x00000000,
163 0x00000000, 0x00000000, 0x00000000, 0x00000000,
164 0x00000000, 0x00000000, 0x00000000, 0x00000000,
165 0x00000000, 0x00000000, 0x00000000, 0x00000000,
166 0x00000000, 0x00000000, 0x00000000, 0x00000000,
167 0x00000000, 0x00000000, 0x00000000, 0x00000000,
168 0x00000000, 0x00000000, 0x00000000, 0x00000000,
171 /* Packet types for packets with an Outer/First/Single IPv6 header */
172 static const u32 ice_ptypes_ipv6_ofos[] = {
173 0x00000000, 0x00000000, 0xF7000000, 0xFEFDFDFB,
174 0x03BF7F7E, 0x00000000, 0x00000000, 0x00000000,
175 0x00080F00, 0x00000000, 0x00000000, 0x00000000,
176 0x00000000, 0x00000000, 0x00000000, 0x00000000,
177 0x00000000, 0x00000000, 0x00000000, 0x00000000,
178 0x00000000, 0x00000000, 0x00000000, 0x00000000,
179 0x00000000, 0x00000000, 0x00000000, 0x00000000,
180 0x00000000, 0x00000000, 0x00000000, 0x00000000,
183 /* Packet types for packets with an Innermost/Last IPv6 header */
184 static const u32 ice_ptypes_ipv6_il[] = {
185 0x00000000, 0x03B80770, 0x00EE01DC, 0x0EE00000,
186 0x03B80770, 0x00000000, 0x00000000, 0x00000000,
187 0x00000000, 0x00000000, 0x00000000, 0x00000000,
188 0x00000000, 0x00000000, 0x00000000, 0x00000000,
189 0x00000000, 0x00000000, 0x00000000, 0x00000000,
190 0x00000000, 0x00000000, 0x00000000, 0x00000000,
191 0x00000000, 0x00000000, 0x00000000, 0x00000000,
192 0x00000000, 0x00000000, 0x00000000, 0x00000000,
195 /* Packet types for packets with an Outermost/First ARP header */
196 static const u32 ice_ptypes_arp_of[] = {
197 0x00000800, 0x00000000, 0x00000000, 0x00000000,
198 0x00000000, 0x00000000, 0x00000000, 0x00000000,
199 0x00000000, 0x00000000, 0x00000000, 0x00000000,
200 0x00000000, 0x00000000, 0x00000000, 0x00000000,
201 0x00000000, 0x00000000, 0x00000000, 0x00000000,
202 0x00000000, 0x00000000, 0x00000000, 0x00000000,
203 0x00000000, 0x00000000, 0x00000000, 0x00000000,
204 0x00000000, 0x00000000, 0x00000000, 0x00000000,
207 /* UDP Packet types for non-tunneled packets or tunneled
208 * packets with inner UDP.
210 static const u32 ice_ptypes_udp_il[] = {
211 0x81000000, 0x20204040, 0x04081010, 0x80810102,
212 0x00204040, 0x00000000, 0x00000000, 0x00000000,
213 0x00000000, 0x00000000, 0x00000000, 0x00000000,
214 0x00000000, 0x00000000, 0x00000000, 0x00000000,
215 0x00000000, 0x00000000, 0x00000000, 0x00000000,
216 0x00000000, 0x00000000, 0x00000000, 0x00000000,
217 0x00000000, 0x00000000, 0x00000000, 0x00000000,
218 0x00000000, 0x00000000, 0x00000000, 0x00000000,
221 /* Packet types for packets with an Innermost/Last TCP header */
222 static const u32 ice_ptypes_tcp_il[] = {
223 0x04000000, 0x80810102, 0x10204040, 0x42040408,
224 0x00810102, 0x00000000, 0x00000000, 0x00000000,
225 0x00000000, 0x00000000, 0x00000000, 0x00000000,
226 0x00000000, 0x00000000, 0x00000000, 0x00000000,
227 0x00000000, 0x00000000, 0x00000000, 0x00000000,
228 0x00000000, 0x00000000, 0x00000000, 0x00000000,
229 0x00000000, 0x00000000, 0x00000000, 0x00000000,
230 0x00000000, 0x00000000, 0x00000000, 0x00000000,
233 /* Packet types for packets with an Innermost/Last SCTP header */
234 static const u32 ice_ptypes_sctp_il[] = {
235 0x08000000, 0x01020204, 0x20408081, 0x04080810,
236 0x01020204, 0x00000000, 0x00000000, 0x00000000,
237 0x00000000, 0x00000000, 0x00000000, 0x00000000,
238 0x00000000, 0x00000000, 0x00000000, 0x00000000,
239 0x00000000, 0x00000000, 0x00000000, 0x00000000,
240 0x00000000, 0x00000000, 0x00000000, 0x00000000,
241 0x00000000, 0x00000000, 0x00000000, 0x00000000,
242 0x00000000, 0x00000000, 0x00000000, 0x00000000,
245 /* Packet types for packets with an Outermost/First ICMP header */
246 static const u32 ice_ptypes_icmp_of[] = {
247 0x10000000, 0x00000000, 0x00000000, 0x00000000,
248 0x00000000, 0x00000000, 0x00000000, 0x00000000,
249 0x00000000, 0x00000000, 0x00000000, 0x00000000,
250 0x00000000, 0x00000000, 0x00000000, 0x00000000,
251 0x00000000, 0x00000000, 0x00000000, 0x00000000,
252 0x00000000, 0x00000000, 0x00000000, 0x00000000,
253 0x00000000, 0x00000000, 0x00000000, 0x00000000,
254 0x00000000, 0x00000000, 0x00000000, 0x00000000,
257 /* Packet types for packets with an Innermost/Last ICMP header */
258 static const u32 ice_ptypes_icmp_il[] = {
259 0x00000000, 0x02040408, 0x40810102, 0x08101020,
260 0x02040408, 0x00000000, 0x00000000, 0x00000000,
261 0x00000000, 0x00000000, 0x00000000, 0x00000000,
262 0x00000000, 0x00000000, 0x00000000, 0x00000000,
263 0x00000000, 0x00000000, 0x00000000, 0x00000000,
264 0x00000000, 0x00000000, 0x00000000, 0x00000000,
265 0x00000000, 0x00000000, 0x00000000, 0x00000000,
266 0x00000000, 0x00000000, 0x00000000, 0x00000000,
269 /* Packet types for packets with an Outermost/First GRE header */
270 static const u32 ice_ptypes_gre_of[] = {
271 0x00000000, 0xBFBF7800, 0x00EFDFDF, 0xFEFDE000,
272 0x03BF7F7E, 0x00000000, 0x00000000, 0x00000000,
273 0x00000000, 0x00000000, 0x00000000, 0x00000000,
274 0x00000000, 0x00000000, 0x00000000, 0x00000000,
275 0x00000000, 0x00000000, 0x00000000, 0x00000000,
276 0x00000000, 0x00000000, 0x00000000, 0x00000000,
277 0x00000000, 0x00000000, 0x00000000, 0x00000000,
278 0x00000000, 0x00000000, 0x00000000, 0x00000000,
281 /* Packet types for packets with an Innermost/Last MAC header */
282 static const u32 ice_ptypes_mac_il[] = {
283 0x00000000, 0x00000000, 0x00EFDE00, 0x00000000,
284 0x03BF7800, 0x00000000, 0x00000000, 0x00000000,
285 0x00000000, 0x00000000, 0x00000000, 0x00000000,
286 0x00000000, 0x00000000, 0x00000000, 0x00000000,
287 0x00000000, 0x00000000, 0x00000000, 0x00000000,
288 0x00000000, 0x00000000, 0x00000000, 0x00000000,
289 0x00000000, 0x00000000, 0x00000000, 0x00000000,
290 0x00000000, 0x00000000, 0x00000000, 0x00000000,
293 /* Manage parameters and info. used during the creation of a flow profile */
294 struct ice_flow_prof_params {
296 u16 entry_length; /* # of bytes formatted entry will require */
298 struct ice_flow_prof *prof;
300 /* For ACL, the es[0] will have the data of ICE_RX_MDID_PKT_FLAGS_15_0
301 * This will give us the direction flags.
303 struct ice_fv_word es[ICE_MAX_FV_WORDS];
304 u16 mask[ICE_MAX_FV_WORDS];
305 ice_declare_bitmap(ptypes, ICE_FLOW_PTYPE_MAX);
309 * ice_is_pow2 - check if integer value is a power of 2
310 * @val: unsigned integer to be validated
312 static bool ice_is_pow2(u64 val)
314 return (val && !(val & (val - 1)));
317 #define ICE_FLOW_SEG_HDRS_L2_MASK \
318 (ICE_FLOW_SEG_HDR_ETH | ICE_FLOW_SEG_HDR_VLAN)
319 #define ICE_FLOW_SEG_HDRS_L3_MASK \
320 (ICE_FLOW_SEG_HDR_IPV4 | ICE_FLOW_SEG_HDR_IPV6 | ICE_FLOW_SEG_HDR_ARP)
321 #define ICE_FLOW_SEG_HDRS_L4_MASK \
322 (ICE_FLOW_SEG_HDR_ICMP | ICE_FLOW_SEG_HDR_TCP | ICE_FLOW_SEG_HDR_UDP | \
323 ICE_FLOW_SEG_HDR_SCTP)
326 * ice_flow_val_hdrs - validates packet segments for valid protocol headers
327 * @segs: array of one or more packet segments that describe the flow
328 * @segs_cnt: number of packet segments provided
330 static enum ice_status
331 ice_flow_val_hdrs(struct ice_flow_seg_info *segs, u8 segs_cnt)
333 const u32 masks = (ICE_FLOW_SEG_HDRS_L2_MASK |
334 ICE_FLOW_SEG_HDRS_L3_MASK |
335 ICE_FLOW_SEG_HDRS_L4_MASK);
338 for (i = 0; i < segs_cnt; i++) {
339 /* No header specified */
340 if (!(segs[i].hdrs & masks) || (segs[i].hdrs & ~masks))
341 return ICE_ERR_PARAM;
343 /* Multiple L3 headers */
344 if (segs[i].hdrs & ICE_FLOW_SEG_HDRS_L3_MASK &&
345 !ice_is_pow2(segs[i].hdrs & ICE_FLOW_SEG_HDRS_L3_MASK))
346 return ICE_ERR_PARAM;
348 /* Multiple L4 headers */
349 if (segs[i].hdrs & ICE_FLOW_SEG_HDRS_L4_MASK &&
350 !ice_is_pow2(segs[i].hdrs & ICE_FLOW_SEG_HDRS_L4_MASK))
351 return ICE_ERR_PARAM;
357 /* Sizes of fixed known protocol headers without header options */
358 #define ICE_FLOW_PROT_HDR_SZ_MAC 14
359 #define ICE_FLOW_PROT_HDR_SZ_MAC_VLAN (ICE_FLOW_PROT_HDR_SZ_MAC + 2)
360 #define ICE_FLOW_PROT_HDR_SZ_IPV4 20
361 #define ICE_FLOW_PROT_HDR_SZ_IPV6 40
362 #define ICE_FLOW_PROT_HDR_SZ_ARP 28
363 #define ICE_FLOW_PROT_HDR_SZ_ICMP 8
364 #define ICE_FLOW_PROT_HDR_SZ_TCP 20
365 #define ICE_FLOW_PROT_HDR_SZ_UDP 8
366 #define ICE_FLOW_PROT_HDR_SZ_SCTP 12
369 * ice_flow_calc_seg_sz - calculates size of a packet segment based on headers
370 * @params: information about the flow to be processed
371 * @seg: index of packet segment whose header size is to be determined
373 static u16 ice_flow_calc_seg_sz(struct ice_flow_prof_params *params, u8 seg)
378 sz = (params->prof->segs[seg].hdrs & ICE_FLOW_SEG_HDR_VLAN) ?
379 ICE_FLOW_PROT_HDR_SZ_MAC_VLAN : ICE_FLOW_PROT_HDR_SZ_MAC;
382 if (params->prof->segs[seg].hdrs & ICE_FLOW_SEG_HDR_IPV4)
383 sz += ICE_FLOW_PROT_HDR_SZ_IPV4;
384 else if (params->prof->segs[seg].hdrs & ICE_FLOW_SEG_HDR_IPV6)
385 sz += ICE_FLOW_PROT_HDR_SZ_IPV6;
386 else if (params->prof->segs[seg].hdrs & ICE_FLOW_SEG_HDR_ARP)
387 sz += ICE_FLOW_PROT_HDR_SZ_ARP;
388 else if (params->prof->segs[seg].hdrs & ICE_FLOW_SEG_HDRS_L4_MASK)
389 /* A L3 header is required if L4 is specified */
393 if (params->prof->segs[seg].hdrs & ICE_FLOW_SEG_HDR_ICMP)
394 sz += ICE_FLOW_PROT_HDR_SZ_ICMP;
395 else if (params->prof->segs[seg].hdrs & ICE_FLOW_SEG_HDR_TCP)
396 sz += ICE_FLOW_PROT_HDR_SZ_TCP;
397 else if (params->prof->segs[seg].hdrs & ICE_FLOW_SEG_HDR_UDP)
398 sz += ICE_FLOW_PROT_HDR_SZ_UDP;
399 else if (params->prof->segs[seg].hdrs & ICE_FLOW_SEG_HDR_SCTP)
400 sz += ICE_FLOW_PROT_HDR_SZ_SCTP;
406 * ice_flow_proc_seg_hdrs - process protocol headers present in pkt segments
407 * @params: information about the flow to be processed
409 * This function identifies the packet types associated with the protocol
410 * headers being present in packet segments of the specified flow profile.
412 static enum ice_status
413 ice_flow_proc_seg_hdrs(struct ice_flow_prof_params *params)
415 struct ice_flow_prof *prof;
418 ice_memset(params->ptypes, 0xff, sizeof(params->ptypes),
423 for (i = 0; i < params->prof->segs_cnt; i++) {
424 const ice_bitmap_t *src;
427 hdrs = prof->segs[i].hdrs;
429 if (hdrs & ICE_FLOW_SEG_HDR_ETH) {
430 src = !i ? (const ice_bitmap_t *)ice_ptypes_mac_ofos :
431 (const ice_bitmap_t *)ice_ptypes_mac_il;
432 ice_and_bitmap(params->ptypes, params->ptypes, src,
434 hdrs &= ~ICE_FLOW_SEG_HDR_ETH;
437 if (i && hdrs & ICE_FLOW_SEG_HDR_VLAN) {
438 src = (const ice_bitmap_t *)ice_ptypes_macvlan_il;
439 ice_and_bitmap(params->ptypes, params->ptypes, src,
441 hdrs &= ~ICE_FLOW_SEG_HDR_VLAN;
444 if (!i && hdrs & ICE_FLOW_SEG_HDR_ARP) {
445 ice_and_bitmap(params->ptypes, params->ptypes,
446 (const ice_bitmap_t *)ice_ptypes_arp_of,
448 hdrs &= ~ICE_FLOW_SEG_HDR_ARP;
451 if (hdrs & ICE_FLOW_SEG_HDR_IPV4) {
452 src = !i ? (const ice_bitmap_t *)ice_ptypes_ipv4_ofos :
453 (const ice_bitmap_t *)ice_ptypes_ipv4_il;
454 ice_and_bitmap(params->ptypes, params->ptypes, src,
456 hdrs &= ~ICE_FLOW_SEG_HDR_IPV4;
457 } else if (hdrs & ICE_FLOW_SEG_HDR_IPV6) {
458 src = !i ? (const ice_bitmap_t *)ice_ptypes_ipv6_ofos :
459 (const ice_bitmap_t *)ice_ptypes_ipv6_il;
460 ice_and_bitmap(params->ptypes, params->ptypes, src,
462 hdrs &= ~ICE_FLOW_SEG_HDR_IPV6;
465 if (hdrs & ICE_FLOW_SEG_HDR_ICMP) {
466 src = !i ? (const ice_bitmap_t *)ice_ptypes_icmp_of :
467 (const ice_bitmap_t *)ice_ptypes_icmp_il;
468 ice_and_bitmap(params->ptypes, params->ptypes, src,
470 hdrs &= ~ICE_FLOW_SEG_HDR_ICMP;
471 } else if (hdrs & ICE_FLOW_SEG_HDR_UDP) {
472 src = (const ice_bitmap_t *)ice_ptypes_udp_il;
473 ice_and_bitmap(params->ptypes, params->ptypes, src,
475 hdrs &= ~ICE_FLOW_SEG_HDR_UDP;
476 } else if (hdrs & ICE_FLOW_SEG_HDR_TCP) {
477 ice_and_bitmap(params->ptypes, params->ptypes,
478 (const ice_bitmap_t *)ice_ptypes_tcp_il,
480 hdrs &= ~ICE_FLOW_SEG_HDR_TCP;
481 } else if (hdrs & ICE_FLOW_SEG_HDR_SCTP) {
482 src = (const ice_bitmap_t *)ice_ptypes_sctp_il;
483 ice_and_bitmap(params->ptypes, params->ptypes, src,
485 hdrs &= ~ICE_FLOW_SEG_HDR_SCTP;
486 } else if (hdrs & ICE_FLOW_SEG_HDR_GRE) {
488 src = (const ice_bitmap_t *)ice_ptypes_gre_of;
489 ice_and_bitmap(params->ptypes, params->ptypes,
490 src, ICE_FLOW_PTYPE_MAX);
492 hdrs &= ~ICE_FLOW_SEG_HDR_GRE;
500 * ice_flow_xtract_pkt_flags - Create an extr sequence entry for packet flags
501 * @hw: pointer to the HW struct
502 * @params: information about the flow to be processed
503 * @flags: The value of pkt_flags[x:x] in RX/TX MDID metadata.
505 * This function will allocate an extraction sequence entries for a DWORD size
506 * chunk of the packet flags.
508 static enum ice_status
509 ice_flow_xtract_pkt_flags(struct ice_hw *hw,
510 struct ice_flow_prof_params *params,
511 enum ice_flex_mdid_pkt_flags flags)
513 u8 fv_words = hw->blk[params->blk].es.fvw;
516 /* Make sure the number of extraction sequence entries required does not
517 * exceed the block's capacity.
519 if (params->es_cnt >= fv_words)
520 return ICE_ERR_MAX_LIMIT;
522 /* some blocks require a reversed field vector layout */
523 if (hw->blk[params->blk].es.reverse)
524 idx = fv_words - params->es_cnt - 1;
526 idx = params->es_cnt;
528 params->es[idx].prot_id = ICE_PROT_META_ID;
529 params->es[idx].off = flags;
536 * ice_flow_xtract_fld - Create an extraction sequence entry for the given field
537 * @hw: pointer to the HW struct
538 * @params: information about the flow to be processed
539 * @seg: packet segment index of the field to be extracted
540 * @fld: ID of field to be extracted
542 * This function determines the protocol ID, offset, and size of the given
543 * field. It then allocates one or more extraction sequence entries for the
544 * given field, and fill the entries with protocol ID and offset information.
546 static enum ice_status
547 ice_flow_xtract_fld(struct ice_hw *hw, struct ice_flow_prof_params *params,
548 u8 seg, enum ice_flow_field fld)
550 enum ice_flow_field sib = ICE_FLOW_FIELD_IDX_MAX;
551 enum ice_prot_id prot_id = ICE_PROT_ID_INVAL;
552 u8 fv_words = hw->blk[params->blk].es.fvw;
553 struct ice_flow_fld_info *flds;
554 u16 cnt, ese_bits, i;
559 flds = params->prof->segs[seg].fields;
562 case ICE_FLOW_FIELD_IDX_ETH_DA:
563 case ICE_FLOW_FIELD_IDX_ETH_SA:
564 case ICE_FLOW_FIELD_IDX_S_VLAN:
565 case ICE_FLOW_FIELD_IDX_C_VLAN:
566 prot_id = seg == 0 ? ICE_PROT_MAC_OF_OR_S : ICE_PROT_MAC_IL;
568 case ICE_FLOW_FIELD_IDX_ETH_TYPE:
569 prot_id = seg == 0 ? ICE_PROT_ETYPE_OL : ICE_PROT_ETYPE_IL;
571 case ICE_FLOW_FIELD_IDX_IP_DSCP:
572 if (params->prof->segs[seg].hdrs & ICE_FLOW_SEG_HDR_IPV6)
573 adj = ICE_FLOW_FLD_IPV6_TTL_DSCP_DISP;
575 case ICE_FLOW_FIELD_IDX_IP_TTL:
576 case ICE_FLOW_FIELD_IDX_IP_PROT:
577 /* Some fields are located at different offsets in IPv4 and
580 if (params->prof->segs[seg].hdrs & ICE_FLOW_SEG_HDR_IPV4) {
581 prot_id = seg == 0 ? ICE_PROT_IPV4_OF_OR_S :
583 /* TTL and PROT share the same extraction seq. entry.
584 * Each is considered a sibling to the other in term
585 * sharing the same extraction sequence entry.
587 if (fld == ICE_FLOW_FIELD_IDX_IP_TTL)
588 sib = ICE_FLOW_FIELD_IDX_IP_PROT;
589 else if (fld == ICE_FLOW_FIELD_IDX_IP_PROT)
590 sib = ICE_FLOW_FIELD_IDX_IP_TTL;
591 } else if (params->prof->segs[seg].hdrs &
592 ICE_FLOW_SEG_HDR_IPV6) {
593 prot_id = seg == 0 ? ICE_PROT_IPV6_OF_OR_S :
595 if (fld == ICE_FLOW_FIELD_IDX_IP_TTL)
596 adj = ICE_FLOW_FLD_IPV6_TTL_TTL_DISP;
597 else if (fld == ICE_FLOW_FIELD_IDX_IP_PROT)
598 adj = ICE_FLOW_FLD_IPV6_TTL_PROT_DISP;
601 case ICE_FLOW_FIELD_IDX_IPV4_SA:
602 case ICE_FLOW_FIELD_IDX_IPV4_DA:
603 prot_id = seg == 0 ? ICE_PROT_IPV4_OF_OR_S : ICE_PROT_IPV4_IL;
605 case ICE_FLOW_FIELD_IDX_IPV6_SA:
606 case ICE_FLOW_FIELD_IDX_IPV6_DA:
607 prot_id = seg == 0 ? ICE_PROT_IPV6_OF_OR_S : ICE_PROT_IPV6_IL;
609 case ICE_FLOW_FIELD_IDX_TCP_SRC_PORT:
610 case ICE_FLOW_FIELD_IDX_TCP_DST_PORT:
611 case ICE_FLOW_FIELD_IDX_TCP_FLAGS:
612 prot_id = ICE_PROT_TCP_IL;
614 case ICE_FLOW_FIELD_IDX_UDP_SRC_PORT:
615 case ICE_FLOW_FIELD_IDX_UDP_DST_PORT:
616 prot_id = seg == 0 ? ICE_PROT_UDP_IL_OR_S : ICE_PROT_UDP_OF;
618 case ICE_FLOW_FIELD_IDX_SCTP_SRC_PORT:
619 case ICE_FLOW_FIELD_IDX_SCTP_DST_PORT:
620 prot_id = ICE_PROT_SCTP_IL;
622 case ICE_FLOW_FIELD_IDX_ARP_SIP:
623 case ICE_FLOW_FIELD_IDX_ARP_DIP:
624 case ICE_FLOW_FIELD_IDX_ARP_SHA:
625 case ICE_FLOW_FIELD_IDX_ARP_DHA:
626 case ICE_FLOW_FIELD_IDX_ARP_OP:
627 prot_id = ICE_PROT_ARP_OF;
629 case ICE_FLOW_FIELD_IDX_ICMP_TYPE:
630 case ICE_FLOW_FIELD_IDX_ICMP_CODE:
631 /* ICMP type and code share the same extraction seq. entry */
632 prot_id = (params->prof->segs[seg].hdrs &
633 ICE_FLOW_SEG_HDR_IPV4) ?
634 ICE_PROT_ICMP_IL : ICE_PROT_ICMPV6_IL;
635 sib = fld == ICE_FLOW_FIELD_IDX_ICMP_TYPE ?
636 ICE_FLOW_FIELD_IDX_ICMP_CODE :
637 ICE_FLOW_FIELD_IDX_ICMP_TYPE;
639 case ICE_FLOW_FIELD_IDX_GRE_KEYID:
640 prot_id = ICE_PROT_GRE_OF;
643 return ICE_ERR_NOT_IMPL;
646 /* Each extraction sequence entry is a word in size, and extracts a
647 * word-aligned offset from a protocol header.
649 ese_bits = ICE_FLOW_FV_EXTRACT_SZ * BITS_PER_BYTE;
651 flds[fld].xtrct.prot_id = prot_id;
652 flds[fld].xtrct.off = (ice_flds_info[fld].off / ese_bits) *
653 ICE_FLOW_FV_EXTRACT_SZ;
654 flds[fld].xtrct.disp = (u8)((ice_flds_info[fld].off + adj) % ese_bits);
655 flds[fld].xtrct.idx = params->es_cnt;
657 /* Adjust the next field-entry index after accommodating the number of
658 * entries this field consumes
660 cnt = DIVIDE_AND_ROUND_UP(flds[fld].xtrct.disp +
661 ice_flds_info[fld].size, ese_bits);
663 /* Fill in the extraction sequence entries needed for this field */
664 off = flds[fld].xtrct.off;
665 mask = ice_flds_info[fld].mask;
666 for (i = 0; i < cnt; i++) {
667 /* Only consume an extraction sequence entry if there is no
668 * sibling field associated with this field or the sibling entry
669 * already extracts the word shared with this field.
671 if (sib == ICE_FLOW_FIELD_IDX_MAX ||
672 flds[sib].xtrct.prot_id == ICE_PROT_ID_INVAL ||
673 flds[sib].xtrct.off != off) {
676 /* Make sure the number of extraction sequence required
677 * does not exceed the block's capability
679 if (params->es_cnt >= fv_words)
680 return ICE_ERR_MAX_LIMIT;
682 /* some blocks require a reversed field vector layout */
683 if (hw->blk[params->blk].es.reverse)
684 idx = fv_words - params->es_cnt - 1;
686 idx = params->es_cnt;
688 params->es[idx].prot_id = prot_id;
689 params->es[idx].off = off;
690 params->mask[idx] = mask;
694 off += ICE_FLOW_FV_EXTRACT_SZ;
701 * ice_flow_xtract_raws - Create extract sequence entries for raw bytes
702 * @hw: pointer to the HW struct
703 * @params: information about the flow to be processed
704 * @seg: index of packet segment whose raw fields are to be be extracted
706 static enum ice_status
707 ice_flow_xtract_raws(struct ice_hw *hw, struct ice_flow_prof_params *params,
713 if (!params->prof->segs[seg].raws_cnt)
716 if (params->prof->segs[seg].raws_cnt >
717 ARRAY_SIZE(params->prof->segs[seg].raws))
718 return ICE_ERR_MAX_LIMIT;
720 /* Offsets within the segment headers are not supported */
721 hdrs_sz = ice_flow_calc_seg_sz(params, seg);
723 return ICE_ERR_PARAM;
725 for (i = 0; i < params->prof->segs[seg].raws_cnt; i++) {
726 struct ice_flow_seg_fld_raw *raw;
729 raw = ¶ms->prof->segs[seg].raws[i];
731 /* Only support matching raw fields in the payload */
732 if (raw->off < hdrs_sz)
733 return ICE_ERR_PARAM;
735 /* Convert the segment-relative offset into payload-relative
738 off = raw->off - hdrs_sz;
740 /* Storing extraction information */
741 raw->info.xtrct.prot_id = ICE_PROT_PAY;
742 raw->info.xtrct.off = (off / ICE_FLOW_FV_EXTRACT_SZ) *
743 ICE_FLOW_FV_EXTRACT_SZ;
744 raw->info.xtrct.disp = (off % ICE_FLOW_FV_EXTRACT_SZ) *
746 raw->info.xtrct.idx = params->es_cnt;
748 /* Determine the number of field vector entries this raw field
751 cnt = DIVIDE_AND_ROUND_UP(raw->info.xtrct.disp +
752 (raw->info.src.last * BITS_PER_BYTE),
753 (ICE_FLOW_FV_EXTRACT_SZ *
755 off = raw->info.xtrct.off;
756 for (j = 0; j < cnt; j++) {
757 /* Make sure the number of extraction sequence required
758 * does not exceed the block's capability
760 if (params->es_cnt >= hw->blk[params->blk].es.count ||
761 params->es_cnt >= ICE_MAX_FV_WORDS)
762 return ICE_ERR_MAX_LIMIT;
764 params->es[params->es_cnt].prot_id = ICE_PROT_PAY;
765 params->es[params->es_cnt].off = off;
767 off += ICE_FLOW_FV_EXTRACT_SZ;
775 * ice_flow_create_xtrct_seq - Create an extraction sequence for given segments
776 * @hw: pointer to the HW struct
777 * @params: information about the flow to be processed
779 * This function iterates through all matched fields in the given segments, and
780 * creates an extraction sequence for the fields.
782 static enum ice_status
783 ice_flow_create_xtrct_seq(struct ice_hw *hw,
784 struct ice_flow_prof_params *params)
786 enum ice_status status = ICE_SUCCESS;
789 /* For ACL, we also need to extract the direction bit (Rx,Tx) data from
792 if (params->blk == ICE_BLK_ACL)
793 ice_flow_xtract_pkt_flags(hw, params,
794 ICE_RX_MDID_PKT_FLAGS_15_0);
796 for (i = 0; i < params->prof->segs_cnt; i++) {
797 u64 match = params->prof->segs[i].match;
800 for (j = 0; j < ICE_FLOW_FIELD_IDX_MAX && match; j++) {
801 const u64 bit = BIT_ULL(j);
804 status = ice_flow_xtract_fld
805 (hw, params, i, (enum ice_flow_field)j);
812 /* Process raw matching bytes */
813 status = ice_flow_xtract_raws(hw, params, i);
822 * ice_flow_proc_segs - process all packet segments associated with a profile
823 * @hw: pointer to the HW struct
824 * @params: information about the flow to be processed
826 static enum ice_status
827 ice_flow_proc_segs(struct ice_hw *hw, struct ice_flow_prof_params *params)
829 enum ice_status status;
831 status = ice_flow_proc_seg_hdrs(params);
835 status = ice_flow_create_xtrct_seq(hw, params);
839 switch (params->blk) {
841 /* Only header information is provided for RSS configuration.
842 * No further processing is needed.
844 status = ICE_SUCCESS;
847 status = ICE_SUCCESS;
851 return ICE_ERR_NOT_IMPL;
857 #define ICE_FLOW_FIND_PROF_CHK_FLDS 0x00000001
858 #define ICE_FLOW_FIND_PROF_CHK_VSI 0x00000002
859 #define ICE_FLOW_FIND_PROF_NOT_CHK_DIR 0x00000004
862 * ice_flow_find_prof_conds - Find a profile matching headers and conditions
863 * @hw: pointer to the HW struct
864 * @blk: classification stage
865 * @dir: flow direction
866 * @segs: array of one or more packet segments that describe the flow
867 * @segs_cnt: number of packet segments provided
868 * @vsi_handle: software VSI handle to check VSI (ICE_FLOW_FIND_PROF_CHK_VSI)
869 * @conds: additional conditions to be checked (ICE_FLOW_FIND_PROF_CHK_*)
871 static struct ice_flow_prof *
872 ice_flow_find_prof_conds(struct ice_hw *hw, enum ice_block blk,
873 enum ice_flow_dir dir, struct ice_flow_seg_info *segs,
874 u8 segs_cnt, u16 vsi_handle, u32 conds)
876 struct ice_flow_prof *p;
878 LIST_FOR_EACH_ENTRY(p, &hw->fl_profs[blk], ice_flow_prof, l_entry) {
879 if ((p->dir == dir || conds & ICE_FLOW_FIND_PROF_NOT_CHK_DIR) &&
880 segs_cnt && segs_cnt == p->segs_cnt) {
883 /* Check for profile-VSI association if specified */
884 if ((conds & ICE_FLOW_FIND_PROF_CHK_VSI) &&
885 ice_is_vsi_valid(hw, vsi_handle) &&
886 !ice_is_bit_set(p->vsis, vsi_handle))
889 /* Protocol headers must be checked. Matched fields are
890 * checked if specified.
892 for (i = 0; i < segs_cnt; i++)
893 if (segs[i].hdrs != p->segs[i].hdrs ||
894 ((conds & ICE_FLOW_FIND_PROF_CHK_FLDS) &&
895 segs[i].match != p->segs[i].match))
898 /* A match is found if all segments are matched */
908 * ice_flow_find_prof - Look up a profile matching headers and matched fields
909 * @hw: pointer to the HW struct
910 * @blk: classification stage
911 * @dir: flow direction
912 * @segs: array of one or more packet segments that describe the flow
913 * @segs_cnt: number of packet segments provided
916 ice_flow_find_prof(struct ice_hw *hw, enum ice_block blk, enum ice_flow_dir dir,
917 struct ice_flow_seg_info *segs, u8 segs_cnt)
919 struct ice_flow_prof *p;
921 ice_acquire_lock(&hw->fl_profs_locks[blk]);
922 p = ice_flow_find_prof_conds(hw, blk, dir, segs, segs_cnt,
923 ICE_MAX_VSI, ICE_FLOW_FIND_PROF_CHK_FLDS);
924 ice_release_lock(&hw->fl_profs_locks[blk]);
926 return p ? p->id : ICE_FLOW_PROF_ID_INVAL;
930 * ice_flow_find_prof_id - Look up a profile with given profile ID
931 * @hw: pointer to the HW struct
932 * @blk: classification stage
933 * @prof_id: unique ID to identify this flow profile
935 static struct ice_flow_prof *
936 ice_flow_find_prof_id(struct ice_hw *hw, enum ice_block blk, u64 prof_id)
938 struct ice_flow_prof *p;
940 LIST_FOR_EACH_ENTRY(p, &hw->fl_profs[blk], ice_flow_prof, l_entry) {
941 if (p->id == prof_id)
949 * ice_dealloc_flow_entry - Deallocate flow entry memory
950 * @hw: pointer to the HW struct
951 * @entry: flow entry to be removed
954 ice_dealloc_flow_entry(struct ice_hw *hw, struct ice_flow_entry *entry)
960 ice_free(hw, entry->entry);
963 ice_free(hw, entry->acts);
972 * ice_flow_rem_entry_sync - Remove a flow entry
973 * @hw: pointer to the HW struct
974 * @entry: flow entry to be removed
976 static enum ice_status
977 ice_flow_rem_entry_sync(struct ice_hw *hw, struct ice_flow_entry *entry)
980 return ICE_ERR_BAD_PTR;
982 LIST_DEL(&entry->l_entry);
984 ice_dealloc_flow_entry(hw, entry);
990 * ice_flow_add_prof_sync - Add a flow profile for packet segments and fields
991 * @hw: pointer to the HW struct
992 * @blk: classification stage
993 * @dir: flow direction
994 * @prof_id: unique ID to identify this flow profile
995 * @segs: array of one or more packet segments that describe the flow
996 * @segs_cnt: number of packet segments provided
997 * @acts: array of default actions
998 * @acts_cnt: number of default actions
999 * @prof: stores the returned flow profile added
1001 * Assumption: the caller has acquired the lock to the profile list
1003 static enum ice_status
1004 ice_flow_add_prof_sync(struct ice_hw *hw, enum ice_block blk,
1005 enum ice_flow_dir dir, u64 prof_id,
1006 struct ice_flow_seg_info *segs, u8 segs_cnt,
1007 struct ice_flow_action *acts, u8 acts_cnt,
1008 struct ice_flow_prof **prof)
1010 struct ice_flow_prof_params params;
1011 enum ice_status status = ICE_SUCCESS;
1014 if (!prof || (acts_cnt && !acts))
1015 return ICE_ERR_BAD_PTR;
1017 ice_memset(¶ms, 0, sizeof(params), ICE_NONDMA_MEM);
1018 params.prof = (struct ice_flow_prof *)
1019 ice_malloc(hw, sizeof(*params.prof));
1021 return ICE_ERR_NO_MEMORY;
1023 /* initialize extraction sequence to all invalid (0xff) */
1024 for (i = 0; i < ICE_MAX_FV_WORDS; i++) {
1025 params.es[i].prot_id = ICE_PROT_INVALID;
1026 params.es[i].off = ICE_FV_OFFSET_INVAL;
1030 params.prof->id = prof_id;
1031 params.prof->dir = dir;
1032 params.prof->segs_cnt = segs_cnt;
1034 /* Make a copy of the segments that need to be persistent in the flow
1037 for (i = 0; i < segs_cnt; i++)
1038 ice_memcpy(¶ms.prof->segs[i], &segs[i], sizeof(*segs),
1039 ICE_NONDMA_TO_NONDMA);
1041 /* Make a copy of the actions that need to be persistent in the flow
1045 params.prof->acts = (struct ice_flow_action *)
1046 ice_memdup(hw, acts, acts_cnt * sizeof(*acts),
1047 ICE_NONDMA_TO_NONDMA);
1049 if (!params.prof->acts) {
1050 status = ICE_ERR_NO_MEMORY;
1055 status = ice_flow_proc_segs(hw, ¶ms);
1057 ice_debug(hw, ICE_DBG_FLOW,
1058 "Error processing a flow's packet segments\n");
1062 /* Add a HW profile for this flow profile */
1063 status = ice_add_prof_with_mask(hw, blk, prof_id, (u8 *)params.ptypes,
1064 params.es, params.mask);
1066 ice_debug(hw, ICE_DBG_FLOW, "Error adding a HW flow profile\n");
1070 INIT_LIST_HEAD(¶ms.prof->entries);
1071 ice_init_lock(¶ms.prof->entries_lock);
1072 *prof = params.prof;
1076 if (params.prof->acts)
1077 ice_free(hw, params.prof->acts);
1078 ice_free(hw, params.prof);
1085 * ice_flow_rem_prof_sync - remove a flow profile
1086 * @hw: pointer to the hardware structure
1087 * @blk: classification stage
1088 * @prof: pointer to flow profile to remove
1090 * Assumption: the caller has acquired the lock to the profile list
1092 static enum ice_status
1093 ice_flow_rem_prof_sync(struct ice_hw *hw, enum ice_block blk,
1094 struct ice_flow_prof *prof)
1096 enum ice_status status = ICE_SUCCESS;
1098 /* Remove all remaining flow entries before removing the flow profile */
1099 if (!LIST_EMPTY(&prof->entries)) {
1100 struct ice_flow_entry *e, *t;
1102 ice_acquire_lock(&prof->entries_lock);
1104 LIST_FOR_EACH_ENTRY_SAFE(e, t, &prof->entries, ice_flow_entry,
1106 status = ice_flow_rem_entry_sync(hw, e);
1111 ice_release_lock(&prof->entries_lock);
1114 /* Remove all hardware profiles associated with this flow profile */
1115 status = ice_rem_prof(hw, blk, prof->id);
1117 LIST_DEL(&prof->l_entry);
1118 ice_destroy_lock(&prof->entries_lock);
1120 ice_free(hw, prof->acts);
1128 * ice_flow_assoc_vsig_vsi - associate a VSI with VSIG
1129 * @hw: pointer to the hardware structure
1130 * @blk: classification stage
1131 * @vsi_handle: software VSI handle
1132 * @vsig: target VSI group
1134 * Assumption: the caller has already verified that the VSI to
1135 * be added has the same characteristics as the VSIG and will
1136 * thereby have access to all resources added to that VSIG.
1139 ice_flow_assoc_vsig_vsi(struct ice_hw *hw, enum ice_block blk, u16 vsi_handle,
1142 enum ice_status status;
1144 if (!ice_is_vsi_valid(hw, vsi_handle) || blk >= ICE_BLK_COUNT)
1145 return ICE_ERR_PARAM;
1147 ice_acquire_lock(&hw->fl_profs_locks[blk]);
1148 status = ice_add_vsi_flow(hw, blk, ice_get_hw_vsi_num(hw, vsi_handle),
1150 ice_release_lock(&hw->fl_profs_locks[blk]);
1156 * ice_flow_assoc_prof - associate a VSI with a flow profile
1157 * @hw: pointer to the hardware structure
1158 * @blk: classification stage
1159 * @prof: pointer to flow profile
1160 * @vsi_handle: software VSI handle
1162 * Assumption: the caller has acquired the lock to the profile list
1163 * and the software VSI handle has been validated
1165 static enum ice_status
1166 ice_flow_assoc_prof(struct ice_hw *hw, enum ice_block blk,
1167 struct ice_flow_prof *prof, u16 vsi_handle)
1169 enum ice_status status = ICE_SUCCESS;
1171 if (!ice_is_bit_set(prof->vsis, vsi_handle)) {
1172 status = ice_add_prof_id_flow(hw, blk,
1173 ice_get_hw_vsi_num(hw,
1177 ice_set_bit(vsi_handle, prof->vsis);
1179 ice_debug(hw, ICE_DBG_FLOW,
1180 "HW profile add failed, %d\n",
1188 * ice_flow_disassoc_prof - disassociate a VSI from a flow profile
1189 * @hw: pointer to the hardware structure
1190 * @blk: classification stage
1191 * @prof: pointer to flow profile
1192 * @vsi_handle: software VSI handle
1194 * Assumption: the caller has acquired the lock to the profile list
1195 * and the software VSI handle has been validated
1197 static enum ice_status
1198 ice_flow_disassoc_prof(struct ice_hw *hw, enum ice_block blk,
1199 struct ice_flow_prof *prof, u16 vsi_handle)
1201 enum ice_status status = ICE_SUCCESS;
1203 if (ice_is_bit_set(prof->vsis, vsi_handle)) {
1204 status = ice_rem_prof_id_flow(hw, blk,
1205 ice_get_hw_vsi_num(hw,
1209 ice_clear_bit(vsi_handle, prof->vsis);
1211 ice_debug(hw, ICE_DBG_FLOW,
1212 "HW profile remove failed, %d\n",
1220 * ice_flow_add_prof - Add a flow profile for packet segments and matched fields
1221 * @hw: pointer to the HW struct
1222 * @blk: classification stage
1223 * @dir: flow direction
1224 * @prof_id: unique ID to identify this flow profile
1225 * @segs: array of one or more packet segments that describe the flow
1226 * @segs_cnt: number of packet segments provided
1227 * @acts: array of default actions
1228 * @acts_cnt: number of default actions
1229 * @prof: stores the returned flow profile added
1232 ice_flow_add_prof(struct ice_hw *hw, enum ice_block blk, enum ice_flow_dir dir,
1233 u64 prof_id, struct ice_flow_seg_info *segs, u8 segs_cnt,
1234 struct ice_flow_action *acts, u8 acts_cnt,
1235 struct ice_flow_prof **prof)
1237 enum ice_status status;
1239 if (segs_cnt > ICE_FLOW_SEG_MAX)
1240 return ICE_ERR_MAX_LIMIT;
1243 return ICE_ERR_PARAM;
1246 return ICE_ERR_BAD_PTR;
1248 status = ice_flow_val_hdrs(segs, segs_cnt);
1252 ice_acquire_lock(&hw->fl_profs_locks[blk]);
1254 status = ice_flow_add_prof_sync(hw, blk, dir, prof_id, segs, segs_cnt,
1255 acts, acts_cnt, prof);
1257 LIST_ADD(&(*prof)->l_entry, &hw->fl_profs[blk]);
1259 ice_release_lock(&hw->fl_profs_locks[blk]);
1265 * ice_flow_rem_prof - Remove a flow profile and all entries associated with it
1266 * @hw: pointer to the HW struct
1267 * @blk: the block for which the flow profile is to be removed
1268 * @prof_id: unique ID of the flow profile to be removed
1271 ice_flow_rem_prof(struct ice_hw *hw, enum ice_block blk, u64 prof_id)
1273 struct ice_flow_prof *prof;
1274 enum ice_status status;
1276 ice_acquire_lock(&hw->fl_profs_locks[blk]);
1278 prof = ice_flow_find_prof_id(hw, blk, prof_id);
1280 status = ICE_ERR_DOES_NOT_EXIST;
1284 /* prof becomes invalid after the call */
1285 status = ice_flow_rem_prof_sync(hw, blk, prof);
1288 ice_release_lock(&hw->fl_profs_locks[blk]);
1294 * ice_flow_get_hw_prof - return the HW profile for a specific profile ID handle
1295 * @hw: pointer to the HW struct
1296 * @blk: classification stage
1297 * @prof_id: the profile ID handle
1298 * @hw_prof_id: pointer to variable to receive the HW profile ID
1301 ice_flow_get_hw_prof(struct ice_hw *hw, enum ice_block blk, u64 prof_id,
1304 struct ice_prof_map *map;
1306 map = ice_search_prof_id(hw, blk, prof_id);
1308 *hw_prof_id = map->prof_id;
1312 return ICE_ERR_DOES_NOT_EXIST;
1316 * ice_flow_find_entry - look for a flow entry using its unique ID
1317 * @hw: pointer to the HW struct
1318 * @blk: classification stage
1319 * @entry_id: unique ID to identify this flow entry
1321 * This function looks for the flow entry with the specified unique ID in all
1322 * flow profiles of the specified classification stage. If the entry is found,
1323 * and it returns the handle to the flow entry. Otherwise, it returns
1324 * ICE_FLOW_ENTRY_ID_INVAL.
1326 u64 ice_flow_find_entry(struct ice_hw *hw, enum ice_block blk, u64 entry_id)
1328 struct ice_flow_entry *found = NULL;
1329 struct ice_flow_prof *p;
1331 ice_acquire_lock(&hw->fl_profs_locks[blk]);
1333 LIST_FOR_EACH_ENTRY(p, &hw->fl_profs[blk], ice_flow_prof, l_entry) {
1334 struct ice_flow_entry *e;
1336 ice_acquire_lock(&p->entries_lock);
1337 LIST_FOR_EACH_ENTRY(e, &p->entries, ice_flow_entry, l_entry)
1338 if (e->id == entry_id) {
1342 ice_release_lock(&p->entries_lock);
1348 ice_release_lock(&hw->fl_profs_locks[blk]);
1350 return found ? ICE_FLOW_ENTRY_HNDL(found) : ICE_FLOW_ENTRY_HANDLE_INVAL;
1354 * ice_flow_add_entry - Add a flow entry
1355 * @hw: pointer to the HW struct
1356 * @blk: classification stage
1357 * @prof_id: ID of the profile to add a new flow entry to
1358 * @entry_id: unique ID to identify this flow entry
1359 * @vsi_handle: software VSI handle for the flow entry
1360 * @prio: priority of the flow entry
1361 * @data: pointer to a data buffer containing flow entry's match values/masks
1362 * @acts: arrays of actions to be performed on a match
1363 * @acts_cnt: number of actions
1364 * @entry_h: pointer to buffer that receives the new flow entry's handle
1367 ice_flow_add_entry(struct ice_hw *hw, enum ice_block blk, u64 prof_id,
1368 u64 entry_id, u16 vsi_handle, enum ice_flow_priority prio,
1369 void *data, struct ice_flow_action *acts, u8 acts_cnt,
1372 struct ice_flow_prof *prof = NULL;
1373 struct ice_flow_entry *e = NULL;
1374 enum ice_status status = ICE_SUCCESS;
1376 if (acts_cnt && !acts)
1377 return ICE_ERR_PARAM;
1379 /* No flow entry data is expected for RSS */
1380 if (!entry_h || (!data && blk != ICE_BLK_RSS))
1381 return ICE_ERR_BAD_PTR;
1383 if (!ice_is_vsi_valid(hw, vsi_handle))
1384 return ICE_ERR_PARAM;
1386 ice_acquire_lock(&hw->fl_profs_locks[blk]);
1388 prof = ice_flow_find_prof_id(hw, blk, prof_id);
1390 status = ICE_ERR_DOES_NOT_EXIST;
1392 /* Allocate memory for the entry being added and associate
1393 * the VSI to the found flow profile
1395 e = (struct ice_flow_entry *)ice_malloc(hw, sizeof(*e));
1397 status = ICE_ERR_NO_MEMORY;
1399 status = ice_flow_assoc_prof(hw, blk, prof, vsi_handle);
1402 ice_release_lock(&hw->fl_profs_locks[blk]);
1407 e->vsi_handle = vsi_handle;
1413 /* RSS will add only one entry per VSI per profile */
1420 status = ICE_ERR_NOT_IMPL;
1424 ice_acquire_lock(&prof->entries_lock);
1425 LIST_ADD(&e->l_entry, &prof->entries);
1426 ice_release_lock(&prof->entries_lock);
1428 *entry_h = ICE_FLOW_ENTRY_HNDL(e);
1433 ice_free(hw, e->entry);
1441 * ice_flow_rem_entry - Remove a flow entry
1442 * @hw: pointer to the HW struct
1443 * @entry_h: handle to the flow entry to be removed
1445 enum ice_status ice_flow_rem_entry(struct ice_hw *hw, u64 entry_h)
1447 struct ice_flow_entry *entry;
1448 struct ice_flow_prof *prof;
1449 enum ice_status status;
1451 if (entry_h == ICE_FLOW_ENTRY_HANDLE_INVAL)
1452 return ICE_ERR_PARAM;
1454 entry = ICE_FLOW_ENTRY_PTR((unsigned long)entry_h);
1456 /* Retain the pointer to the flow profile as the entry will be freed */
1459 ice_acquire_lock(&prof->entries_lock);
1460 status = ice_flow_rem_entry_sync(hw, entry);
1461 ice_release_lock(&prof->entries_lock);
1467 * ice_flow_set_fld_ext - specifies locations of field from entry's input buffer
1468 * @seg: packet segment the field being set belongs to
1469 * @fld: field to be set
1470 * @type: type of the field
1471 * @val_loc: if not ICE_FLOW_FLD_OFF_INVAL, location of the value to match from
1472 * entry's input buffer
1473 * @mask_loc: if not ICE_FLOW_FLD_OFF_INVAL, location of mask value from entry's
1475 * @last_loc: if not ICE_FLOW_FLD_OFF_INVAL, location of last/upper value from
1476 * entry's input buffer
1478 * This helper function stores information of a field being matched, including
1479 * the type of the field and the locations of the value to match, the mask, and
1480 * and the upper-bound value in the start of the input buffer for a flow entry.
1481 * This function should only be used for fixed-size data structures.
1483 * This function also opportunistically determines the protocol headers to be
1484 * present based on the fields being set. Some fields cannot be used alone to
1485 * determine the protocol headers present. Sometimes, fields for particular
1486 * protocol headers are not matched. In those cases, the protocol headers
1487 * must be explicitly set.
1490 ice_flow_set_fld_ext(struct ice_flow_seg_info *seg, enum ice_flow_field fld,
1491 enum ice_flow_fld_match_type type, u16 val_loc,
1492 u16 mask_loc, u16 last_loc)
1494 u64 bit = BIT_ULL(fld);
1497 if (type == ICE_FLOW_FLD_TYPE_RANGE)
1500 seg->fields[fld].type = type;
1501 seg->fields[fld].src.val = val_loc;
1502 seg->fields[fld].src.mask = mask_loc;
1503 seg->fields[fld].src.last = last_loc;
1505 ICE_FLOW_SET_HDRS(seg, ice_flds_info[fld].hdr);
1509 * ice_flow_set_fld - specifies locations of field from entry's input buffer
1510 * @seg: packet segment the field being set belongs to
1511 * @fld: field to be set
1512 * @val_loc: if not ICE_FLOW_FLD_OFF_INVAL, location of the value to match from
1513 * entry's input buffer
1514 * @mask_loc: if not ICE_FLOW_FLD_OFF_INVAL, location of mask value from entry's
1516 * @last_loc: if not ICE_FLOW_FLD_OFF_INVAL, location of last/upper value from
1517 * entry's input buffer
1518 * @range: indicate if field being matched is to be in a range
1520 * This function specifies the locations, in the form of byte offsets from the
1521 * start of the input buffer for a flow entry, from where the value to match,
1522 * the mask value, and upper value can be extracted. These locations are then
1523 * stored in the flow profile. When adding a flow entry associated with the
1524 * flow profile, these locations will be used to quickly extract the values and
1525 * create the content of a match entry. This function should only be used for
1526 * fixed-size data structures.
1529 ice_flow_set_fld(struct ice_flow_seg_info *seg, enum ice_flow_field fld,
1530 u16 val_loc, u16 mask_loc, u16 last_loc, bool range)
1532 enum ice_flow_fld_match_type t = range ?
1533 ICE_FLOW_FLD_TYPE_RANGE : ICE_FLOW_FLD_TYPE_REG;
1535 ice_flow_set_fld_ext(seg, fld, t, val_loc, mask_loc, last_loc);
1539 * ice_flow_set_fld_prefix - sets locations of prefix field from entry's buf
1540 * @seg: packet segment the field being set belongs to
1541 * @fld: field to be set
1542 * @val_loc: if not ICE_FLOW_FLD_OFF_INVAL, location of the value to match from
1543 * entry's input buffer
1544 * @pref_loc: location of prefix value from entry's input buffer
1545 * @pref_sz: size of the location holding the prefix value
1547 * This function specifies the locations, in the form of byte offsets from the
1548 * start of the input buffer for a flow entry, from where the value to match
1549 * and the IPv4 prefix value can be extracted. These locations are then stored
1550 * in the flow profile. When adding flow entries to the associated flow profile,
1551 * these locations can be used to quickly extract the values to create the
1552 * content of a match entry. This function should only be used for fixed-size
1556 ice_flow_set_fld_prefix(struct ice_flow_seg_info *seg, enum ice_flow_field fld,
1557 u16 val_loc, u16 pref_loc, u8 pref_sz)
1559 /* For this type of field, the "mask" location is for the prefix value's
1560 * location and the "last" location is for the size of the location of
1563 ice_flow_set_fld_ext(seg, fld, ICE_FLOW_FLD_TYPE_PREFIX, val_loc,
1564 pref_loc, (u16)pref_sz);
1568 * ice_flow_add_fld_raw - sets locations of a raw field from entry's input buf
1569 * @seg: packet segment the field being set belongs to
1570 * @off: offset of the raw field from the beginning of the segment in bytes
1571 * @len: length of the raw pattern to be matched
1572 * @val_loc: location of the value to match from entry's input buffer
1573 * @mask_loc: location of mask value from entry's input buffer
1575 * This function specifies the offset of the raw field to be match from the
1576 * beginning of the specified packet segment, and the locations, in the form of
1577 * byte offsets from the start of the input buffer for a flow entry, from where
1578 * the value to match and the mask value to be extracted. These locations are
1579 * then stored in the flow profile. When adding flow entries to the associated
1580 * flow profile, these locations can be used to quickly extract the values to
1581 * create the content of a match entry. This function should only be used for
1582 * fixed-size data structures.
1585 ice_flow_add_fld_raw(struct ice_flow_seg_info *seg, u16 off, u8 len,
1586 u16 val_loc, u16 mask_loc)
1588 if (seg->raws_cnt < ICE_FLOW_SEG_RAW_FLD_MAX) {
1589 seg->raws[seg->raws_cnt].off = off;
1590 seg->raws[seg->raws_cnt].info.type = ICE_FLOW_FLD_TYPE_SIZE;
1591 seg->raws[seg->raws_cnt].info.src.val = val_loc;
1592 seg->raws[seg->raws_cnt].info.src.mask = mask_loc;
1593 /* The "last" field is used to store the length of the field */
1594 seg->raws[seg->raws_cnt].info.src.last = len;
1597 /* Overflows of "raws" will be handled as an error condition later in
1598 * the flow when this information is processed.
1603 #define ICE_FLOW_RSS_SEG_HDR_L3_MASKS \
1604 (ICE_FLOW_SEG_HDR_IPV4 | ICE_FLOW_SEG_HDR_IPV6)
1606 #define ICE_FLOW_RSS_SEG_HDR_L4_MASKS \
1607 (ICE_FLOW_SEG_HDR_TCP | ICE_FLOW_SEG_HDR_UDP | \
1608 ICE_FLOW_SEG_HDR_SCTP)
1610 #define ICE_FLOW_RSS_SEG_HDR_VAL_MASKS \
1611 (ICE_FLOW_RSS_SEG_HDR_L3_MASKS | \
1612 ICE_FLOW_RSS_SEG_HDR_L4_MASKS)
1615 * ice_flow_set_rss_seg_info - setup packet segments for RSS
1616 * @segs: pointer to the flow field segment(s)
1617 * @hash_fields: fields to be hashed on for the segment(s)
1618 * @flow_hdr: protocol header fields within a packet segment
1620 * Helper function to extract fields from hash bitmap and use flow
1621 * header value to set flow field segment for further use in flow
1622 * profile entry or removal.
1624 static enum ice_status
1625 ice_flow_set_rss_seg_info(struct ice_flow_seg_info *segs, u64 hash_fields,
1628 u64 val = hash_fields;
1631 for (i = 0; val && i < ICE_FLOW_FIELD_IDX_MAX; i++) {
1632 u64 bit = BIT_ULL(i);
1635 ice_flow_set_fld(segs, (enum ice_flow_field)i,
1636 ICE_FLOW_FLD_OFF_INVAL,
1637 ICE_FLOW_FLD_OFF_INVAL,
1638 ICE_FLOW_FLD_OFF_INVAL, false);
1642 ICE_FLOW_SET_HDRS(segs, flow_hdr);
1644 if (segs->hdrs & ~ICE_FLOW_RSS_SEG_HDR_VAL_MASKS)
1645 return ICE_ERR_PARAM;
1647 val = (u64)(segs->hdrs & ICE_FLOW_RSS_SEG_HDR_L3_MASKS);
1648 if (!ice_is_pow2(val))
1651 val = (u64)(segs->hdrs & ICE_FLOW_RSS_SEG_HDR_L4_MASKS);
1652 if (val && !ice_is_pow2(val))
1659 * ice_rem_vsi_rss_list - remove VSI from RSS list
1660 * @hw: pointer to the hardware structure
1661 * @vsi_handle: software VSI handle
1663 * Remove the VSI from all RSS configurations in the list.
1665 void ice_rem_vsi_rss_list(struct ice_hw *hw, u16 vsi_handle)
1667 struct ice_rss_cfg *r, *tmp;
1669 if (LIST_EMPTY(&hw->rss_list_head))
1672 ice_acquire_lock(&hw->rss_locks);
1673 LIST_FOR_EACH_ENTRY_SAFE(r, tmp, &hw->rss_list_head,
1674 ice_rss_cfg, l_entry) {
1675 if (ice_is_bit_set(r->vsis, vsi_handle)) {
1676 ice_clear_bit(vsi_handle, r->vsis);
1678 if (!ice_is_any_bit_set(r->vsis, ICE_MAX_VSI)) {
1679 LIST_DEL(&r->l_entry);
1684 ice_release_lock(&hw->rss_locks);
1688 * ice_rem_vsi_rss_cfg - remove RSS configurations associated with VSI
1689 * @hw: pointer to the hardware structure
1690 * @vsi_handle: software VSI handle
1692 * This function will iterate through all flow profiles and disassociate
1693 * the VSI from that profile. If the flow profile has no VSIs it will
1696 enum ice_status ice_rem_vsi_rss_cfg(struct ice_hw *hw, u16 vsi_handle)
1698 const enum ice_block blk = ICE_BLK_RSS;
1699 struct ice_flow_prof *p, *t;
1700 enum ice_status status = ICE_SUCCESS;
1702 if (!ice_is_vsi_valid(hw, vsi_handle))
1703 return ICE_ERR_PARAM;
1705 if (LIST_EMPTY(&hw->fl_profs[blk]))
1708 ice_acquire_lock(&hw->fl_profs_locks[blk]);
1709 LIST_FOR_EACH_ENTRY_SAFE(p, t, &hw->fl_profs[blk], ice_flow_prof,
1711 if (ice_is_bit_set(p->vsis, vsi_handle)) {
1712 status = ice_flow_disassoc_prof(hw, blk, p, vsi_handle);
1716 if (!ice_is_any_bit_set(p->vsis, ICE_MAX_VSI)) {
1717 status = ice_flow_rem_prof_sync(hw, blk, p);
1723 ice_release_lock(&hw->fl_profs_locks[blk]);
1729 * ice_rem_rss_list - remove RSS configuration from list
1730 * @hw: pointer to the hardware structure
1731 * @vsi_handle: software VSI handle
1732 * @prof: pointer to flow profile
1734 * Assumption: lock has already been acquired for RSS list
1737 ice_rem_rss_list(struct ice_hw *hw, u16 vsi_handle, struct ice_flow_prof *prof)
1739 struct ice_rss_cfg *r, *tmp;
1741 /* Search for RSS hash fields associated to the VSI that match the
1742 * hash configurations associated to the flow profile. If found
1743 * remove from the RSS entry list of the VSI context and delete entry.
1745 LIST_FOR_EACH_ENTRY_SAFE(r, tmp, &hw->rss_list_head,
1746 ice_rss_cfg, l_entry) {
1747 if (r->hashed_flds == prof->segs[prof->segs_cnt - 1].match &&
1748 r->packet_hdr == prof->segs[prof->segs_cnt - 1].hdrs) {
1749 ice_clear_bit(vsi_handle, r->vsis);
1750 if (!ice_is_any_bit_set(r->vsis, ICE_MAX_VSI)) {
1751 LIST_DEL(&r->l_entry);
1760 * ice_add_rss_list - add RSS configuration to list
1761 * @hw: pointer to the hardware structure
1762 * @vsi_handle: software VSI handle
1763 * @prof: pointer to flow profile
1765 * Assumption: lock has already been acquired for RSS list
1767 static enum ice_status
1768 ice_add_rss_list(struct ice_hw *hw, u16 vsi_handle, struct ice_flow_prof *prof)
1770 struct ice_rss_cfg *r, *rss_cfg;
1772 LIST_FOR_EACH_ENTRY(r, &hw->rss_list_head,
1773 ice_rss_cfg, l_entry)
1774 if (r->hashed_flds == prof->segs[prof->segs_cnt - 1].match &&
1775 r->packet_hdr == prof->segs[prof->segs_cnt - 1].hdrs) {
1776 ice_set_bit(vsi_handle, r->vsis);
1780 rss_cfg = (struct ice_rss_cfg *)ice_malloc(hw, sizeof(*rss_cfg));
1782 return ICE_ERR_NO_MEMORY;
1784 rss_cfg->hashed_flds = prof->segs[prof->segs_cnt - 1].match;
1785 rss_cfg->packet_hdr = prof->segs[prof->segs_cnt - 1].hdrs;
1786 ice_set_bit(vsi_handle, rss_cfg->vsis);
1788 LIST_ADD_TAIL(&rss_cfg->l_entry, &hw->rss_list_head);
1793 #define ICE_FLOW_PROF_HASH_S 0
1794 #define ICE_FLOW_PROF_HASH_M (0xFFFFFFFFULL << ICE_FLOW_PROF_HASH_S)
1795 #define ICE_FLOW_PROF_HDR_S 32
1796 #define ICE_FLOW_PROF_HDR_M (0xFFFFFFFFULL << ICE_FLOW_PROF_HDR_S)
1798 #define ICE_FLOW_GEN_PROFID(hash, hdr) \
1799 (u64)(((u64)(hash) & ICE_FLOW_PROF_HASH_M) | \
1800 (((u64)(hdr) << ICE_FLOW_PROF_HDR_S) & ICE_FLOW_PROF_HDR_M))
1803 * ice_add_rss_cfg_sync - add an RSS configuration
1804 * @hw: pointer to the hardware structure
1805 * @vsi_handle: software VSI handle
1806 * @hashed_flds: hash bit fields (ICE_FLOW_HASH_*) to configure
1807 * @addl_hdrs: protocol header fields
1809 * Assumption: lock has already been acquired for RSS list
1811 static enum ice_status
1812 ice_add_rss_cfg_sync(struct ice_hw *hw, u16 vsi_handle, u64 hashed_flds,
1815 const enum ice_block blk = ICE_BLK_RSS;
1816 struct ice_flow_prof *prof = NULL;
1817 struct ice_flow_seg_info *segs;
1818 enum ice_status status = ICE_SUCCESS;
1820 segs = (struct ice_flow_seg_info *)ice_malloc(hw, sizeof(*segs));
1822 return ICE_ERR_NO_MEMORY;
1824 /* Construct the packet segment info from the hashed fields */
1825 status = ice_flow_set_rss_seg_info(segs, hashed_flds, addl_hdrs);
1829 /* Search for a flow profile that has matching headers, hash fields
1830 * and has the input VSI associated to it. If found, no further
1831 * operations required and exit.
1833 prof = ice_flow_find_prof_conds(hw, blk, ICE_FLOW_RX, segs, 1,
1835 ICE_FLOW_FIND_PROF_CHK_FLDS |
1836 ICE_FLOW_FIND_PROF_CHK_VSI);
1840 /* Check if a flow profile exists with the same protocol headers and
1841 * associated with the input VSI. If so disasscociate the VSI from
1842 * this profile. The VSI will be added to a new profile created with
1843 * the protocol header and new hash field configuration.
1845 prof = ice_flow_find_prof_conds(hw, blk, ICE_FLOW_RX, segs, 1,
1846 vsi_handle, ICE_FLOW_FIND_PROF_CHK_VSI);
1848 status = ice_flow_disassoc_prof(hw, blk, prof, vsi_handle);
1850 ice_rem_rss_list(hw, vsi_handle, prof);
1854 /* Remove profile if it has no VSIs associated */
1855 if (!ice_is_any_bit_set(prof->vsis, ICE_MAX_VSI)) {
1856 status = ice_flow_rem_prof_sync(hw, blk, prof);
1862 /* Search for a profile that has same match fields only. If this
1863 * exists then associate the VSI to this profile.
1865 prof = ice_flow_find_prof_conds(hw, blk, ICE_FLOW_RX, segs, 1,
1867 ICE_FLOW_FIND_PROF_CHK_FLDS);
1869 status = ice_flow_assoc_prof(hw, blk, prof, vsi_handle);
1871 status = ice_add_rss_list(hw, vsi_handle, prof);
1875 /* Create a new flow profile with generated profile and packet
1876 * segment information.
1878 status = ice_flow_add_prof(hw, blk, ICE_FLOW_RX,
1879 ICE_FLOW_GEN_PROFID(hashed_flds, segs->hdrs),
1880 segs, 1, NULL, 0, &prof);
1884 status = ice_flow_assoc_prof(hw, blk, prof, vsi_handle);
1885 /* If association to a new flow profile failed then this profile can
1889 ice_flow_rem_prof_sync(hw, blk, prof);
1893 status = ice_add_rss_list(hw, vsi_handle, prof);
1901 * ice_add_rss_cfg - add an RSS configuration with specified hashed fields
1902 * @hw: pointer to the hardware structure
1903 * @vsi_handle: software VSI handle
1904 * @hashed_flds: hash bit fields (ICE_FLOW_HASH_*) to configure
1905 * @addl_hdrs: protocol header fields
1907 * This function will generate a flow profile based on fields associated with
1908 * the input fields to hash on, the flow type and use the VSI number to add
1909 * a flow entry to the profile.
1912 ice_add_rss_cfg(struct ice_hw *hw, u16 vsi_handle, u64 hashed_flds,
1915 enum ice_status status;
1917 if (hashed_flds == ICE_HASH_INVALID ||
1918 !ice_is_vsi_valid(hw, vsi_handle))
1919 return ICE_ERR_PARAM;
1921 ice_acquire_lock(&hw->rss_locks);
1922 status = ice_add_rss_cfg_sync(hw, vsi_handle, hashed_flds, addl_hdrs);
1923 ice_release_lock(&hw->rss_locks);
1929 * ice_rem_rss_cfg_sync - remove an existing RSS configuration
1930 * @hw: pointer to the hardware structure
1931 * @vsi_handle: software VSI handle
1932 * @hashed_flds: Packet hash types (ICE_FLOW_HASH_*) to remove
1933 * @addl_hdrs: Protocol header fields within a packet segment
1935 * Assumption: lock has already been acquired for RSS list
1937 static enum ice_status
1938 ice_rem_rss_cfg_sync(struct ice_hw *hw, u16 vsi_handle, u64 hashed_flds,
1941 const enum ice_block blk = ICE_BLK_RSS;
1942 struct ice_flow_seg_info *segs;
1943 struct ice_flow_prof *prof;
1944 enum ice_status status;
1946 segs = (struct ice_flow_seg_info *)ice_malloc(hw, sizeof(*segs));
1948 return ICE_ERR_NO_MEMORY;
1950 /* Construct the packet segment info from the hashed fields */
1951 status = ice_flow_set_rss_seg_info(segs, hashed_flds, addl_hdrs);
1955 prof = ice_flow_find_prof_conds(hw, blk, ICE_FLOW_RX, segs, 1,
1957 ICE_FLOW_FIND_PROF_CHK_FLDS);
1959 status = ICE_ERR_DOES_NOT_EXIST;
1963 status = ice_flow_disassoc_prof(hw, blk, prof, vsi_handle);
1967 /* Remove RSS configuration from VSI context before deleting
1970 ice_rem_rss_list(hw, vsi_handle, prof);
1972 if (!ice_is_any_bit_set(prof->vsis, ICE_MAX_VSI))
1973 status = ice_flow_rem_prof_sync(hw, blk, prof);
1980 /* Mapping of AVF hash bit fields to an L3-L4 hash combination.
1981 * As the ice_flow_avf_hdr_field represent individual bit shifts in a hash,
1982 * convert its values to their appropriate flow L3, L4 values.
1984 #define ICE_FLOW_AVF_RSS_IPV4_MASKS \
1985 (BIT_ULL(ICE_AVF_FLOW_FIELD_IPV4_OTHER) | \
1986 BIT_ULL(ICE_AVF_FLOW_FIELD_FRAG_IPV4))
1987 #define ICE_FLOW_AVF_RSS_TCP_IPV4_MASKS \
1988 (BIT_ULL(ICE_AVF_FLOW_FIELD_IPV4_TCP_SYN_NO_ACK) | \
1989 BIT_ULL(ICE_AVF_FLOW_FIELD_IPV4_TCP))
1990 #define ICE_FLOW_AVF_RSS_UDP_IPV4_MASKS \
1991 (BIT_ULL(ICE_AVF_FLOW_FIELD_UNICAST_IPV4_UDP) | \
1992 BIT_ULL(ICE_AVF_FLOW_FIELD_MULTICAST_IPV4_UDP) | \
1993 BIT_ULL(ICE_AVF_FLOW_FIELD_IPV4_UDP))
1994 #define ICE_FLOW_AVF_RSS_ALL_IPV4_MASKS \
1995 (ICE_FLOW_AVF_RSS_TCP_IPV4_MASKS | ICE_FLOW_AVF_RSS_UDP_IPV4_MASKS | \
1996 ICE_FLOW_AVF_RSS_IPV4_MASKS | BIT_ULL(ICE_AVF_FLOW_FIELD_IPV4_SCTP))
1998 #define ICE_FLOW_AVF_RSS_IPV6_MASKS \
1999 (BIT_ULL(ICE_AVF_FLOW_FIELD_IPV6_OTHER) | \
2000 BIT_ULL(ICE_AVF_FLOW_FIELD_FRAG_IPV6))
2001 #define ICE_FLOW_AVF_RSS_UDP_IPV6_MASKS \
2002 (BIT_ULL(ICE_AVF_FLOW_FIELD_UNICAST_IPV6_UDP) | \
2003 BIT_ULL(ICE_AVF_FLOW_FIELD_MULTICAST_IPV6_UDP) | \
2004 BIT_ULL(ICE_AVF_FLOW_FIELD_IPV6_UDP))
2005 #define ICE_FLOW_AVF_RSS_TCP_IPV6_MASKS \
2006 (BIT_ULL(ICE_AVF_FLOW_FIELD_IPV6_TCP_SYN_NO_ACK) | \
2007 BIT_ULL(ICE_AVF_FLOW_FIELD_IPV6_TCP))
2008 #define ICE_FLOW_AVF_RSS_ALL_IPV6_MASKS \
2009 (ICE_FLOW_AVF_RSS_TCP_IPV6_MASKS | ICE_FLOW_AVF_RSS_UDP_IPV6_MASKS | \
2010 ICE_FLOW_AVF_RSS_IPV6_MASKS | BIT_ULL(ICE_AVF_FLOW_FIELD_IPV6_SCTP))
2012 #define ICE_FLOW_MAX_CFG 10
2015 * ice_add_avf_rss_cfg - add an RSS configuration for AVF driver
2016 * @hw: pointer to the hardware structure
2017 * @vsi_handle: software VSI handle
2018 * @avf_hash: hash bit fields (ICE_AVF_FLOW_FIELD_*) to configure
2020 * This function will take the hash bitmap provided by the AVF driver via a
2021 * message, convert it to ICE-compatible values, and configure RSS flow
2025 ice_add_avf_rss_cfg(struct ice_hw *hw, u16 vsi_handle, u64 avf_hash)
2027 enum ice_status status = ICE_SUCCESS;
2030 if (avf_hash == ICE_AVF_FLOW_FIELD_INVALID ||
2031 !ice_is_vsi_valid(hw, vsi_handle))
2032 return ICE_ERR_PARAM;
2034 /* Make sure no unsupported bits are specified */
2035 if (avf_hash & ~(ICE_FLOW_AVF_RSS_ALL_IPV4_MASKS |
2036 ICE_FLOW_AVF_RSS_ALL_IPV6_MASKS))
2039 hash_flds = avf_hash;
2041 /* Always create an L3 RSS configuration for any L4 RSS configuration */
2042 if (hash_flds & ICE_FLOW_AVF_RSS_ALL_IPV4_MASKS)
2043 hash_flds |= ICE_FLOW_AVF_RSS_IPV4_MASKS;
2045 if (hash_flds & ICE_FLOW_AVF_RSS_ALL_IPV6_MASKS)
2046 hash_flds |= ICE_FLOW_AVF_RSS_IPV6_MASKS;
2048 /* Create the corresponding RSS configuration for each valid hash bit */
2050 u64 rss_hash = ICE_HASH_INVALID;
2052 if (hash_flds & ICE_FLOW_AVF_RSS_ALL_IPV4_MASKS) {
2053 if (hash_flds & ICE_FLOW_AVF_RSS_IPV4_MASKS) {
2054 rss_hash = ICE_FLOW_HASH_IPV4;
2055 hash_flds &= ~ICE_FLOW_AVF_RSS_IPV4_MASKS;
2056 } else if (hash_flds &
2057 ICE_FLOW_AVF_RSS_TCP_IPV4_MASKS) {
2058 rss_hash = ICE_FLOW_HASH_IPV4 |
2059 ICE_FLOW_HASH_TCP_PORT;
2060 hash_flds &= ~ICE_FLOW_AVF_RSS_TCP_IPV4_MASKS;
2061 } else if (hash_flds &
2062 ICE_FLOW_AVF_RSS_UDP_IPV4_MASKS) {
2063 rss_hash = ICE_FLOW_HASH_IPV4 |
2064 ICE_FLOW_HASH_UDP_PORT;
2065 hash_flds &= ~ICE_FLOW_AVF_RSS_UDP_IPV4_MASKS;
2066 } else if (hash_flds &
2067 BIT_ULL(ICE_AVF_FLOW_FIELD_IPV4_SCTP)) {
2068 rss_hash = ICE_FLOW_HASH_IPV4 |
2069 ICE_FLOW_HASH_SCTP_PORT;
2071 ~BIT_ULL(ICE_AVF_FLOW_FIELD_IPV4_SCTP);
2073 } else if (hash_flds & ICE_FLOW_AVF_RSS_ALL_IPV6_MASKS) {
2074 if (hash_flds & ICE_FLOW_AVF_RSS_IPV6_MASKS) {
2075 rss_hash = ICE_FLOW_HASH_IPV6;
2076 hash_flds &= ~ICE_FLOW_AVF_RSS_IPV6_MASKS;
2077 } else if (hash_flds &
2078 ICE_FLOW_AVF_RSS_TCP_IPV6_MASKS) {
2079 rss_hash = ICE_FLOW_HASH_IPV6 |
2080 ICE_FLOW_HASH_TCP_PORT;
2081 hash_flds &= ~ICE_FLOW_AVF_RSS_TCP_IPV6_MASKS;
2082 } else if (hash_flds &
2083 ICE_FLOW_AVF_RSS_UDP_IPV6_MASKS) {
2084 rss_hash = ICE_FLOW_HASH_IPV6 |
2085 ICE_FLOW_HASH_UDP_PORT;
2086 hash_flds &= ~ICE_FLOW_AVF_RSS_UDP_IPV6_MASKS;
2087 } else if (hash_flds &
2088 BIT_ULL(ICE_AVF_FLOW_FIELD_IPV6_SCTP)) {
2089 rss_hash = ICE_FLOW_HASH_IPV6 |
2090 ICE_FLOW_HASH_SCTP_PORT;
2092 ~BIT_ULL(ICE_AVF_FLOW_FIELD_IPV6_SCTP);
2096 if (rss_hash == ICE_HASH_INVALID)
2097 return ICE_ERR_OUT_OF_RANGE;
2099 status = ice_add_rss_cfg(hw, vsi_handle, rss_hash,
2100 ICE_FLOW_SEG_HDR_NONE);
2109 * ice_rem_rss_cfg - remove an existing RSS config with matching hashed fields
2110 * @hw: pointer to the hardware structure
2111 * @vsi_handle: software VSI handle
2112 * @hashed_flds: Packet hash types (ICE_FLOW_HASH_*) to remove
2113 * @addl_hdrs: Protocol header fields within a packet segment
2115 * This function will lookup the flow profile based on the input
2116 * hash field bitmap, iterate through the profile entry list of
2117 * that profile and find entry associated with input VSI to be
2118 * removed. Calls are made to underlying flow apis which will in
2119 * turn build or update buffers for RSS XLT1 section.
2122 ice_rem_rss_cfg(struct ice_hw *hw, u16 vsi_handle, u64 hashed_flds,
2125 enum ice_status status;
2127 if (hashed_flds == ICE_HASH_INVALID ||
2128 !ice_is_vsi_valid(hw, vsi_handle))
2129 return ICE_ERR_PARAM;
2131 ice_acquire_lock(&hw->rss_locks);
2132 status = ice_rem_rss_cfg_sync(hw, vsi_handle, hashed_flds, addl_hdrs);
2133 ice_release_lock(&hw->rss_locks);
2139 * ice_replay_rss_cfg - replay RSS configurations associated with VSI
2140 * @hw: pointer to the hardware structure
2141 * @vsi_handle: software VSI handle
2143 enum ice_status ice_replay_rss_cfg(struct ice_hw *hw, u16 vsi_handle)
2145 enum ice_status status = ICE_SUCCESS;
2146 struct ice_rss_cfg *r;
2148 if (!ice_is_vsi_valid(hw, vsi_handle))
2149 return ICE_ERR_PARAM;
2151 ice_acquire_lock(&hw->rss_locks);
2152 LIST_FOR_EACH_ENTRY(r, &hw->rss_list_head,
2153 ice_rss_cfg, l_entry) {
2154 if (ice_is_bit_set(r->vsis, vsi_handle)) {
2155 status = ice_add_rss_cfg_sync(hw, vsi_handle,
2162 ice_release_lock(&hw->rss_locks);
2168 * ice_get_rss_cfg - returns hashed fields for the given header types
2169 * @hw: pointer to the hardware structure
2170 * @vsi_handle: software VSI handle
2171 * @hdrs: protocol header type
2173 * This function will return the match fields of the first instance of flow
2174 * profile having the given header types and containing input VSI
2176 u64 ice_get_rss_cfg(struct ice_hw *hw, u16 vsi_handle, u32 hdrs)
2178 struct ice_rss_cfg *r, *rss_cfg = NULL;
2180 /* verify if the protocol header is non zero and VSI is valid */
2181 if (hdrs == ICE_FLOW_SEG_HDR_NONE || !ice_is_vsi_valid(hw, vsi_handle))
2182 return ICE_HASH_INVALID;
2184 ice_acquire_lock(&hw->rss_locks);
2185 LIST_FOR_EACH_ENTRY(r, &hw->rss_list_head,
2186 ice_rss_cfg, l_entry)
2187 if (ice_is_bit_set(r->vsis, vsi_handle) &&
2188 r->packet_hdr == hdrs) {
2192 ice_release_lock(&hw->rss_locks);
2194 return rss_cfg ? rss_cfg->hashed_flds : ICE_HASH_INVALID;