1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2001-2020 Intel Corporation
8 #include "ice_flex_type.h"
11 #define ICE_IPV4_MAKE_PREFIX_MASK(prefix) ((u32)(~0) << (32 - (prefix)))
12 #define ICE_FLOW_PROF_ID_INVAL 0xfffffffffffffffful
13 #define ICE_FLOW_PROF_ID_BYPASS 0
14 #define ICE_FLOW_PROF_ID_DEFAULT 1
15 #define ICE_FLOW_ENTRY_HANDLE_INVAL 0
16 #define ICE_FLOW_VSI_INVAL 0xffff
17 #define ICE_FLOW_FLD_OFF_INVAL 0xffff
19 /* Generate flow hash field from flow field type(s) */
20 #define ICE_FLOW_HASH_ETH \
21 (BIT_ULL(ICE_FLOW_FIELD_IDX_ETH_DA) | \
22 BIT_ULL(ICE_FLOW_FIELD_IDX_ETH_SA))
23 #define ICE_FLOW_HASH_IPV4 \
24 (BIT_ULL(ICE_FLOW_FIELD_IDX_IPV4_SA) | \
25 BIT_ULL(ICE_FLOW_FIELD_IDX_IPV4_DA))
26 #define ICE_FLOW_HASH_IPV6 \
27 (BIT_ULL(ICE_FLOW_FIELD_IDX_IPV6_SA) | \
28 BIT_ULL(ICE_FLOW_FIELD_IDX_IPV6_DA))
29 #define ICE_FLOW_HASH_TCP_PORT \
30 (BIT_ULL(ICE_FLOW_FIELD_IDX_TCP_SRC_PORT) | \
31 BIT_ULL(ICE_FLOW_FIELD_IDX_TCP_DST_PORT))
32 #define ICE_FLOW_HASH_UDP_PORT \
33 (BIT_ULL(ICE_FLOW_FIELD_IDX_UDP_SRC_PORT) | \
34 BIT_ULL(ICE_FLOW_FIELD_IDX_UDP_DST_PORT))
35 #define ICE_FLOW_HASH_SCTP_PORT \
36 (BIT_ULL(ICE_FLOW_FIELD_IDX_SCTP_SRC_PORT) | \
37 BIT_ULL(ICE_FLOW_FIELD_IDX_SCTP_DST_PORT))
39 #define ICE_HASH_INVALID 0
40 #define ICE_HASH_TCP_IPV4 (ICE_FLOW_HASH_IPV4 | ICE_FLOW_HASH_TCP_PORT)
41 #define ICE_HASH_TCP_IPV6 (ICE_FLOW_HASH_IPV6 | ICE_FLOW_HASH_TCP_PORT)
42 #define ICE_HASH_UDP_IPV4 (ICE_FLOW_HASH_IPV4 | ICE_FLOW_HASH_UDP_PORT)
43 #define ICE_HASH_UDP_IPV6 (ICE_FLOW_HASH_IPV6 | ICE_FLOW_HASH_UDP_PORT)
44 #define ICE_HASH_SCTP_IPV4 (ICE_FLOW_HASH_IPV4 | ICE_FLOW_HASH_SCTP_PORT)
45 #define ICE_HASH_SCTP_IPV6 (ICE_FLOW_HASH_IPV6 | ICE_FLOW_HASH_SCTP_PORT)
47 #define ICE_FLOW_HASH_GTP_TEID \
48 (BIT_ULL(ICE_FLOW_FIELD_IDX_GTPC_TEID))
50 #define ICE_FLOW_HASH_GTP_IPV4_TEID \
51 (ICE_FLOW_HASH_IPV4 | ICE_FLOW_HASH_GTP_TEID)
52 #define ICE_FLOW_HASH_GTP_IPV6_TEID \
53 (ICE_FLOW_HASH_IPV6 | ICE_FLOW_HASH_GTP_TEID)
55 #define ICE_FLOW_HASH_GTP_U_TEID \
56 (BIT_ULL(ICE_FLOW_FIELD_IDX_GTPU_IP_TEID))
58 #define ICE_FLOW_HASH_GTP_U_IPV4_TEID \
59 (ICE_FLOW_HASH_IPV4 | ICE_FLOW_HASH_GTP_U_TEID)
60 #define ICE_FLOW_HASH_GTP_U_IPV6_TEID \
61 (ICE_FLOW_HASH_IPV6 | ICE_FLOW_HASH_GTP_U_TEID)
63 #define ICE_FLOW_HASH_GTP_U_EH_TEID \
64 (BIT_ULL(ICE_FLOW_FIELD_IDX_GTPU_EH_TEID))
66 #define ICE_FLOW_HASH_GTP_U_EH_QFI \
67 (BIT_ULL(ICE_FLOW_FIELD_IDX_GTPU_EH_QFI))
69 #define ICE_FLOW_HASH_GTP_U_IPV4_EH \
70 (ICE_FLOW_HASH_IPV4 | ICE_FLOW_HASH_GTP_U_EH_TEID | \
71 ICE_FLOW_HASH_GTP_U_EH_QFI)
72 #define ICE_FLOW_HASH_GTP_U_IPV6_EH \
73 (ICE_FLOW_HASH_IPV6 | ICE_FLOW_HASH_GTP_U_EH_TEID | \
74 ICE_FLOW_HASH_GTP_U_EH_QFI)
76 #define ICE_FLOW_HASH_PPPOE_SESS_ID \
77 (BIT_ULL(ICE_FLOW_FIELD_IDX_PPPOE_SESS_ID))
79 #define ICE_FLOW_HASH_PPPOE_SESS_ID_ETH \
80 (ICE_FLOW_HASH_ETH | ICE_FLOW_HASH_PPPOE_SESS_ID)
81 #define ICE_FLOW_HASH_PPPOE_TCP_ID \
82 (ICE_FLOW_HASH_TCP_PORT | ICE_FLOW_HASH_PPPOE_SESS_ID)
83 #define ICE_FLOW_HASH_PPPOE_UDP_ID \
84 (ICE_FLOW_HASH_UDP_PORT | ICE_FLOW_HASH_PPPOE_SESS_ID)
86 #define ICE_FLOW_HASH_PFCP_SEID \
87 (BIT_ULL(ICE_FLOW_FIELD_IDX_PFCP_SEID))
88 #define ICE_FLOW_HASH_PFCP_IPV4_SEID \
89 (ICE_FLOW_HASH_IPV4 | ICE_FLOW_HASH_PFCP_SEID)
90 #define ICE_FLOW_HASH_PFCP_IPV6_SEID \
91 (ICE_FLOW_HASH_IPV6 | ICE_FLOW_HASH_PFCP_SEID)
93 #define ICE_FLOW_HASH_L2TPV3_SESS_ID \
94 (BIT_ULL(ICE_FLOW_FIELD_IDX_L2TPV3_SESS_ID))
95 #define ICE_FLOW_HASH_L2TPV3_IPV4_SESS_ID \
96 (ICE_FLOW_HASH_IPV4 | ICE_FLOW_HASH_L2TPV3_SESS_ID)
97 #define ICE_FLOW_HASH_L2TPV3_IPV6_SESS_ID \
98 (ICE_FLOW_HASH_IPV6 | ICE_FLOW_HASH_L2TPV3_SESS_ID)
100 #define ICE_FLOW_HASH_ESP_SPI \
101 (BIT_ULL(ICE_FLOW_FIELD_IDX_ESP_SPI))
102 #define ICE_FLOW_HASH_ESP_IPV4_SPI \
103 (ICE_FLOW_HASH_IPV4 | ICE_FLOW_HASH_ESP_SPI)
104 #define ICE_FLOW_HASH_ESP_IPV6_SPI \
105 (ICE_FLOW_HASH_IPV6 | ICE_FLOW_HASH_ESP_SPI)
107 #define ICE_FLOW_HASH_AH_SPI \
108 (BIT_ULL(ICE_FLOW_FIELD_IDX_AH_SPI))
109 #define ICE_FLOW_HASH_AH_IPV4_SPI \
110 (ICE_FLOW_HASH_IPV4 | ICE_FLOW_HASH_AH_SPI)
111 #define ICE_FLOW_HASH_AH_IPV6_SPI \
112 (ICE_FLOW_HASH_IPV6 | ICE_FLOW_HASH_AH_SPI)
114 #define ICE_FLOW_HASH_NAT_T_ESP_SPI \
115 (BIT_ULL(ICE_FLOW_FIELD_IDX_NAT_T_ESP_SPI))
116 #define ICE_FLOW_HASH_NAT_T_ESP_IPV4_SPI \
117 (ICE_FLOW_HASH_IPV4 | ICE_FLOW_HASH_NAT_T_ESP_SPI)
118 #define ICE_FLOW_HASH_NAT_T_ESP_IPV6_SPI \
119 (ICE_FLOW_HASH_IPV6 | ICE_FLOW_HASH_NAT_T_ESP_SPI)
121 /* Protocol header fields within a packet segment. A segment consists of one or
122 * more protocol headers that make up a logical group of protocol headers. Each
123 * logical group of protocol headers encapsulates or is encapsulated using/by
124 * tunneling or encapsulation protocols for network virtualization such as GRE,
127 enum ice_flow_seg_hdr {
128 ICE_FLOW_SEG_HDR_NONE = 0x00000000,
129 ICE_FLOW_SEG_HDR_ETH = 0x00000001,
130 ICE_FLOW_SEG_HDR_VLAN = 0x00000002,
131 ICE_FLOW_SEG_HDR_IPV4 = 0x00000004,
132 ICE_FLOW_SEG_HDR_IPV6 = 0x00000008,
133 ICE_FLOW_SEG_HDR_ARP = 0x00000010,
134 ICE_FLOW_SEG_HDR_ICMP = 0x00000020,
135 ICE_FLOW_SEG_HDR_TCP = 0x00000040,
136 ICE_FLOW_SEG_HDR_UDP = 0x00000080,
137 ICE_FLOW_SEG_HDR_SCTP = 0x00000100,
138 ICE_FLOW_SEG_HDR_GRE = 0x00000200,
139 ICE_FLOW_SEG_HDR_GTPC = 0x00000400,
140 ICE_FLOW_SEG_HDR_GTPC_TEID = 0x00000800,
141 ICE_FLOW_SEG_HDR_GTPU_IP = 0x00001000,
142 ICE_FLOW_SEG_HDR_GTPU_EH = 0x00002000,
143 ICE_FLOW_SEG_HDR_GTPU_DWN = 0x00004000,
144 ICE_FLOW_SEG_HDR_GTPU_UP = 0x00008000,
145 ICE_FLOW_SEG_HDR_PPPOE = 0x00010000,
146 ICE_FLOW_SEG_HDR_PFCP_NODE = 0x00020000,
147 ICE_FLOW_SEG_HDR_PFCP_SESSION = 0x00040000,
148 ICE_FLOW_SEG_HDR_L2TPV3 = 0x00080000,
149 ICE_FLOW_SEG_HDR_ESP = 0x00100000,
150 ICE_FLOW_SEG_HDR_AH = 0x00200000,
151 ICE_FLOW_SEG_HDR_NAT_T_ESP = 0x00400000,
152 ICE_FLOW_SEG_HDR_ETH_NON_IP = 0x00800000,
155 /* These segements all have the same PTYPES, but are otherwise distinguished by
156 * the value of the gtp_eh_pdu and gtp_eh_pdu_link flags:
158 * gtp_eh_pdu gtp_eh_pdu_link
159 * ICE_FLOW_SEG_HDR_GTPU_IP 0 0
160 * ICE_FLOW_SEG_HDR_GTPU_EH 1 don't care
161 * ICE_FLOW_SEG_HDR_GTPU_DWN 1 0
162 * ICE_FLOW_SEG_HDR_GTPU_UP 1 1
164 #define ICE_FLOW_SEG_HDR_GTPU (ICE_FLOW_SEG_HDR_GTPU_IP | \
165 ICE_FLOW_SEG_HDR_GTPU_EH | \
166 ICE_FLOW_SEG_HDR_GTPU_DWN | \
167 ICE_FLOW_SEG_HDR_GTPU_UP)
168 #define ICE_FLOW_SEG_HDR_PFCP (ICE_FLOW_SEG_HDR_PFCP_NODE | \
169 ICE_FLOW_SEG_HDR_PFCP_SESSION)
171 enum ice_flow_field {
173 ICE_FLOW_FIELD_IDX_ETH_DA,
174 ICE_FLOW_FIELD_IDX_ETH_SA,
175 ICE_FLOW_FIELD_IDX_S_VLAN,
176 ICE_FLOW_FIELD_IDX_C_VLAN,
177 ICE_FLOW_FIELD_IDX_ETH_TYPE,
179 ICE_FLOW_FIELD_IDX_IPV4_DSCP,
180 ICE_FLOW_FIELD_IDX_IPV6_DSCP,
181 ICE_FLOW_FIELD_IDX_IPV4_TTL,
182 ICE_FLOW_FIELD_IDX_IPV4_PROT,
183 ICE_FLOW_FIELD_IDX_IPV6_TTL,
184 ICE_FLOW_FIELD_IDX_IPV6_PROT,
185 ICE_FLOW_FIELD_IDX_IPV4_SA,
186 ICE_FLOW_FIELD_IDX_IPV4_DA,
187 ICE_FLOW_FIELD_IDX_IPV6_SA,
188 ICE_FLOW_FIELD_IDX_IPV6_DA,
190 ICE_FLOW_FIELD_IDX_TCP_SRC_PORT,
191 ICE_FLOW_FIELD_IDX_TCP_DST_PORT,
192 ICE_FLOW_FIELD_IDX_UDP_SRC_PORT,
193 ICE_FLOW_FIELD_IDX_UDP_DST_PORT,
194 ICE_FLOW_FIELD_IDX_SCTP_SRC_PORT,
195 ICE_FLOW_FIELD_IDX_SCTP_DST_PORT,
196 ICE_FLOW_FIELD_IDX_TCP_FLAGS,
198 ICE_FLOW_FIELD_IDX_ARP_SIP,
199 ICE_FLOW_FIELD_IDX_ARP_DIP,
200 ICE_FLOW_FIELD_IDX_ARP_SHA,
201 ICE_FLOW_FIELD_IDX_ARP_DHA,
202 ICE_FLOW_FIELD_IDX_ARP_OP,
204 ICE_FLOW_FIELD_IDX_ICMP_TYPE,
205 ICE_FLOW_FIELD_IDX_ICMP_CODE,
207 ICE_FLOW_FIELD_IDX_GRE_KEYID,
209 ICE_FLOW_FIELD_IDX_GTPC_TEID,
211 ICE_FLOW_FIELD_IDX_GTPU_IP_TEID,
213 ICE_FLOW_FIELD_IDX_GTPU_EH_TEID,
214 ICE_FLOW_FIELD_IDX_GTPU_EH_QFI,
216 ICE_FLOW_FIELD_IDX_GTPU_UP_TEID,
218 ICE_FLOW_FIELD_IDX_GTPU_DWN_TEID,
220 ICE_FLOW_FIELD_IDX_PPPOE_SESS_ID,
222 ICE_FLOW_FIELD_IDX_PFCP_SEID,
224 ICE_FLOW_FIELD_IDX_L2TPV3_SESS_ID,
226 ICE_FLOW_FIELD_IDX_ESP_SPI,
228 ICE_FLOW_FIELD_IDX_AH_SPI,
230 ICE_FLOW_FIELD_IDX_NAT_T_ESP_SPI,
231 /* The total number of enums must not exceed 64 */
232 ICE_FLOW_FIELD_IDX_MAX
235 /* Flow headers and fields for AVF support */
236 enum ice_flow_avf_hdr_field {
237 /* Values 0 - 28 are reserved for future use */
238 ICE_AVF_FLOW_FIELD_INVALID = 0,
239 ICE_AVF_FLOW_FIELD_UNICAST_IPV4_UDP = 29,
240 ICE_AVF_FLOW_FIELD_MULTICAST_IPV4_UDP,
241 ICE_AVF_FLOW_FIELD_IPV4_UDP,
242 ICE_AVF_FLOW_FIELD_IPV4_TCP_SYN_NO_ACK,
243 ICE_AVF_FLOW_FIELD_IPV4_TCP,
244 ICE_AVF_FLOW_FIELD_IPV4_SCTP,
245 ICE_AVF_FLOW_FIELD_IPV4_OTHER,
246 ICE_AVF_FLOW_FIELD_FRAG_IPV4,
247 /* Values 37-38 are reserved */
248 ICE_AVF_FLOW_FIELD_UNICAST_IPV6_UDP = 39,
249 ICE_AVF_FLOW_FIELD_MULTICAST_IPV6_UDP,
250 ICE_AVF_FLOW_FIELD_IPV6_UDP,
251 ICE_AVF_FLOW_FIELD_IPV6_TCP_SYN_NO_ACK,
252 ICE_AVF_FLOW_FIELD_IPV6_TCP,
253 ICE_AVF_FLOW_FIELD_IPV6_SCTP,
254 ICE_AVF_FLOW_FIELD_IPV6_OTHER,
255 ICE_AVF_FLOW_FIELD_FRAG_IPV6,
256 ICE_AVF_FLOW_FIELD_RSVD47,
257 ICE_AVF_FLOW_FIELD_FCOE_OX,
258 ICE_AVF_FLOW_FIELD_FCOE_RX,
259 ICE_AVF_FLOW_FIELD_FCOE_OTHER,
260 /* Values 51-62 are reserved */
261 ICE_AVF_FLOW_FIELD_L2_PAYLOAD = 63,
262 ICE_AVF_FLOW_FIELD_MAX
265 /* Supported RSS offloads This macro is defined to support
266 * VIRTCHNL_OP_GET_RSS_HENA_CAPS ops. PF driver sends the RSS hardware
267 * capabilities to the caller of this ops.
269 #define ICE_DEFAULT_RSS_HENA ( \
270 BIT_ULL(ICE_AVF_FLOW_FIELD_IPV4_UDP) | \
271 BIT_ULL(ICE_AVF_FLOW_FIELD_IPV4_SCTP) | \
272 BIT_ULL(ICE_AVF_FLOW_FIELD_IPV4_TCP) | \
273 BIT_ULL(ICE_AVF_FLOW_FIELD_IPV4_OTHER) | \
274 BIT_ULL(ICE_AVF_FLOW_FIELD_FRAG_IPV4) | \
275 BIT_ULL(ICE_AVF_FLOW_FIELD_IPV6_UDP) | \
276 BIT_ULL(ICE_AVF_FLOW_FIELD_IPV6_TCP) | \
277 BIT_ULL(ICE_AVF_FLOW_FIELD_IPV6_SCTP) | \
278 BIT_ULL(ICE_AVF_FLOW_FIELD_IPV6_OTHER) | \
279 BIT_ULL(ICE_AVF_FLOW_FIELD_FRAG_IPV6) | \
280 BIT_ULL(ICE_AVF_FLOW_FIELD_IPV4_TCP_SYN_NO_ACK) | \
281 BIT_ULL(ICE_AVF_FLOW_FIELD_UNICAST_IPV4_UDP) | \
282 BIT_ULL(ICE_AVF_FLOW_FIELD_MULTICAST_IPV4_UDP) | \
283 BIT_ULL(ICE_AVF_FLOW_FIELD_IPV6_TCP_SYN_NO_ACK) | \
284 BIT_ULL(ICE_AVF_FLOW_FIELD_UNICAST_IPV6_UDP) | \
285 BIT_ULL(ICE_AVF_FLOW_FIELD_MULTICAST_IPV6_UDP))
288 ICE_FLOW_DIR_UNDEFINED = 0,
291 ICE_FLOW_TX_RX = ICE_FLOW_RX | ICE_FLOW_TX
294 enum ice_flow_priority {
296 ICE_FLOW_PRIO_NORMAL,
300 #define ICE_FLOW_SEG_MAX 2
301 #define ICE_FLOW_SEG_RAW_FLD_MAX 2
302 #define ICE_FLOW_PROFILE_MAX 1024
303 #define ICE_FLOW_SW_FIELD_VECTOR_MAX 48
304 #define ICE_FLOW_ACL_FIELD_VECTOR_MAX 32
305 #define ICE_FLOW_FV_EXTRACT_SZ 2
307 #define ICE_FLOW_SET_HDRS(seg, val) ((seg)->hdrs |= (u32)(val))
309 struct ice_flow_seg_xtrct {
310 u8 prot_id; /* Protocol ID of extracted header field */
311 u16 off; /* Starting offset of the field in header in bytes */
312 u8 idx; /* Index of FV entry used */
313 u8 disp; /* Displacement of field in bits fr. FV entry's start */
314 u16 mask; /* Mask for field */
317 enum ice_flow_fld_match_type {
318 ICE_FLOW_FLD_TYPE_REG, /* Value, mask */
319 ICE_FLOW_FLD_TYPE_RANGE, /* Value, mask, last (upper bound) */
320 ICE_FLOW_FLD_TYPE_PREFIX, /* IP address, prefix, size of prefix */
321 ICE_FLOW_FLD_TYPE_SIZE, /* Value, mask, size of match */
324 struct ice_flow_fld_loc {
325 /* Describe offsets of field information relative to the beginning of
326 * input buffer provided when adding flow entries.
328 u16 val; /* Offset where the value is located */
329 u16 mask; /* Offset where the mask/prefix value is located */
330 u16 last; /* Length or offset where the upper value is located */
333 struct ice_flow_fld_info {
334 enum ice_flow_fld_match_type type;
335 /* Location where to retrieve data from an input buffer */
336 struct ice_flow_fld_loc src;
337 /* Location where to put the data into the final entry buffer */
338 struct ice_flow_fld_loc entry;
339 struct ice_flow_seg_xtrct xtrct;
342 struct ice_flow_seg_fld_raw {
343 struct ice_flow_fld_info info;
344 u16 off; /* Offset from the start of the segment */
347 struct ice_flow_seg_info {
348 u32 hdrs; /* Bitmask indicating protocol headers present */
349 u64 match; /* Bitmask indicating header fields to be matched */
350 u64 range; /* Bitmask indicating header fields matched as ranges */
352 struct ice_flow_fld_info fields[ICE_FLOW_FIELD_IDX_MAX];
354 u8 raws_cnt; /* Number of raw fields to be matched */
355 struct ice_flow_seg_fld_raw raws[ICE_FLOW_SEG_RAW_FLD_MAX];
358 /* This structure describes a flow entry, and is tracked only in this file */
359 struct ice_flow_entry {
360 struct LIST_ENTRY_TYPE l_entry;
363 struct ice_flow_prof *prof;
365 struct ice_flow_action *acts;
366 /* Flow entry's content */
368 /* Range buffer (For ACL only) */
369 struct ice_aqc_acl_profile_ranges *range_buf;
370 enum ice_flow_priority priority;
373 /* Entry index in the ACL's scenario */
375 #define ICE_FLOW_ACL_MAX_NUM_ACT 2
379 #define ICE_FLOW_ENTRY_HNDL(e) ((unsigned long)e)
380 #define ICE_FLOW_ENTRY_PTR(h) ((struct ice_flow_entry *)(h))
382 struct ice_flow_prof {
383 struct LIST_ENTRY_TYPE l_entry;
386 enum ice_flow_dir dir;
390 /* Keep track of flow entries associated with this flow profile */
391 struct ice_lock entries_lock;
392 struct LIST_HEAD_TYPE entries;
394 struct ice_flow_seg_info segs[ICE_FLOW_SEG_MAX];
396 /* software VSI handles referenced by this flow profile */
397 ice_declare_bitmap(vsis, ICE_MAX_VSI);
400 /* struct sw_recipe */
401 struct ice_acl_scen *scen;
404 /* Symmetric Hash for RSS */
408 /* Default actions */
409 struct ice_flow_action *acts;
413 struct LIST_ENTRY_TYPE l_entry;
414 /* bitmap of VSIs added to the RSS entry */
415 ice_declare_bitmap(vsis, ICE_MAX_VSI);
421 enum ice_flow_action_type {
425 ICE_FLOW_ACT_CNTR_PKT,
426 ICE_FLOW_ACT_FWD_VSI,
427 ICE_FLOW_ACT_FWD_VSI_LIST, /* Should be abstracted away */
428 ICE_FLOW_ACT_FWD_QUEUE, /* Can Queues be abstracted away? */
429 ICE_FLOW_ACT_FWD_QUEUE_GROUP, /* Can Queues be abstracted away? */
433 ICE_FLOW_ACT_CNTR_BYTES,
434 ICE_FLOW_ACT_CNTR_PKT_BYTES,
435 ICE_FLOW_ACT_GENERIC_0,
436 ICE_FLOW_ACT_GENERIC_1,
437 ICE_FLOW_ACT_GENERIC_2,
438 ICE_FLOW_ACT_GENERIC_3,
439 ICE_FLOW_ACT_GENERIC_4,
440 ICE_FLOW_ACT_RPT_FLOW_ID,
441 ICE_FLOW_ACT_BUILD_PROF_IDX,
444 struct ice_flow_action {
445 enum ice_flow_action_type type;
447 struct ice_acl_act_entry acl_act;
453 ice_flow_find_prof(struct ice_hw *hw, enum ice_block blk, enum ice_flow_dir dir,
454 struct ice_flow_seg_info *segs, u8 segs_cnt);
456 ice_flow_add_prof(struct ice_hw *hw, enum ice_block blk, enum ice_flow_dir dir,
457 u64 prof_id, struct ice_flow_seg_info *segs, u8 segs_cnt,
458 struct ice_flow_action *acts, u8 acts_cnt,
459 struct ice_flow_prof **prof);
461 ice_flow_rem_prof(struct ice_hw *hw, enum ice_block blk, u64 prof_id);
463 ice_flow_assoc_vsig_vsi(struct ice_hw *hw, enum ice_block blk, u16 vsi_handle,
466 ice_flow_get_hw_prof(struct ice_hw *hw, enum ice_block blk, u64 prof_id,
469 u64 ice_flow_find_entry(struct ice_hw *hw, enum ice_block blk, u64 entry_id);
471 ice_flow_add_entry(struct ice_hw *hw, enum ice_block blk, u64 prof_id,
472 u64 entry_id, u16 vsi, enum ice_flow_priority prio,
473 void *data, struct ice_flow_action *acts, u8 acts_cnt,
476 ice_flow_rem_entry(struct ice_hw *hw, enum ice_block blk, u64 entry_h);
478 ice_flow_set_fld(struct ice_flow_seg_info *seg, enum ice_flow_field fld,
479 u16 val_loc, u16 mask_loc, u16 last_loc, bool range);
481 ice_flow_set_fld_prefix(struct ice_flow_seg_info *seg, enum ice_flow_field fld,
482 u16 val_loc, u16 prefix_loc, u8 prefix_sz);
484 ice_flow_add_fld_raw(struct ice_flow_seg_info *seg, u16 off, u8 len,
485 u16 val_loc, u16 mask_loc);
486 void ice_rem_vsi_rss_list(struct ice_hw *hw, u16 vsi_handle);
487 enum ice_status ice_replay_rss_cfg(struct ice_hw *hw, u16 vsi_handle);
489 ice_add_avf_rss_cfg(struct ice_hw *hw, u16 vsi_handle, u64 hashed_flds);
490 enum ice_status ice_rem_vsi_rss_cfg(struct ice_hw *hw, u16 vsi_handle);
492 ice_add_rss_cfg(struct ice_hw *hw, u16 vsi_handle, u64 hashed_flds,
493 u32 addl_hdrs, bool symm);
495 ice_rem_rss_cfg(struct ice_hw *hw, u16 vsi_handle, u64 hashed_flds,
497 u64 ice_get_rss_cfg(struct ice_hw *hw, u16 vsi_handle, u32 hdrs);
498 #endif /* _ICE_FLOW_H_ */