net/ice/base: whitelist register for NVM access
[dpdk.git] / drivers / net / ice / base / ice_flow.h
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2001-2019
3  */
4
5 #ifndef _ICE_FLOW_H_
6 #define _ICE_FLOW_H_
7
8 #include "ice_flex_type.h"
9 #define ICE_IPV4_MAKE_PREFIX_MASK(prefix) ((u32)(~0) << (32 - (prefix)))
10 #define ICE_FLOW_PROF_ID_INVAL          0xfffffffffffffffful
11 #define ICE_FLOW_PROF_ID_BYPASS         0
12 #define ICE_FLOW_PROF_ID_DEFAULT        1
13 #define ICE_FLOW_ENTRY_HANDLE_INVAL     0
14 #define ICE_FLOW_VSI_INVAL              0xffff
15 #define ICE_FLOW_FLD_OFF_INVAL          0xffff
16
17 /* Generate flow hash field from flow field type(s) */
18 #define ICE_FLOW_HASH_ETH       \
19         (BIT_ULL(ICE_FLOW_FIELD_IDX_ETH_DA) | \
20          BIT_ULL(ICE_FLOW_FIELD_IDX_ETH_SA))
21 #define ICE_FLOW_HASH_IPV4      \
22         (BIT_ULL(ICE_FLOW_FIELD_IDX_IPV4_SA) | \
23          BIT_ULL(ICE_FLOW_FIELD_IDX_IPV4_DA))
24 #define ICE_FLOW_HASH_IPV6      \
25         (BIT_ULL(ICE_FLOW_FIELD_IDX_IPV6_SA) | \
26          BIT_ULL(ICE_FLOW_FIELD_IDX_IPV6_DA))
27 #define ICE_FLOW_HASH_TCP_PORT  \
28         (BIT_ULL(ICE_FLOW_FIELD_IDX_TCP_SRC_PORT) | \
29          BIT_ULL(ICE_FLOW_FIELD_IDX_TCP_DST_PORT))
30 #define ICE_FLOW_HASH_UDP_PORT  \
31         (BIT_ULL(ICE_FLOW_FIELD_IDX_UDP_SRC_PORT) | \
32          BIT_ULL(ICE_FLOW_FIELD_IDX_UDP_DST_PORT))
33 #define ICE_FLOW_HASH_SCTP_PORT \
34         (BIT_ULL(ICE_FLOW_FIELD_IDX_SCTP_SRC_PORT) | \
35          BIT_ULL(ICE_FLOW_FIELD_IDX_SCTP_DST_PORT))
36
37 #define ICE_HASH_INVALID        0
38 #define ICE_HASH_TCP_IPV4       (ICE_FLOW_HASH_IPV4 | ICE_FLOW_HASH_TCP_PORT)
39 #define ICE_HASH_TCP_IPV6       (ICE_FLOW_HASH_IPV6 | ICE_FLOW_HASH_TCP_PORT)
40 #define ICE_HASH_UDP_IPV4       (ICE_FLOW_HASH_IPV4 | ICE_FLOW_HASH_UDP_PORT)
41 #define ICE_HASH_UDP_IPV6       (ICE_FLOW_HASH_IPV6 | ICE_FLOW_HASH_UDP_PORT)
42 #define ICE_HASH_SCTP_IPV4      (ICE_FLOW_HASH_IPV4 | ICE_FLOW_HASH_SCTP_PORT)
43 #define ICE_HASH_SCTP_IPV6      (ICE_FLOW_HASH_IPV6 | ICE_FLOW_HASH_SCTP_PORT)
44
45 #define ICE_FLOW_HASH_GTP_TEID \
46         (BIT_ULL(ICE_FLOW_FIELD_IDX_GTPC_TEID))
47
48 #define ICE_FLOW_HASH_GTP_IPV4_TEID \
49         (ICE_FLOW_HASH_IPV4 | ICE_FLOW_HASH_GTP_TEID)
50 #define ICE_FLOW_HASH_GTP_IPV6_TEID \
51         (ICE_FLOW_HASH_IPV6 | ICE_FLOW_HASH_GTP_TEID)
52
53 #define ICE_FLOW_HASH_GTP_U_TEID \
54         (BIT_ULL(ICE_FLOW_FIELD_IDX_GTPU_IP_TEID))
55
56 #define ICE_FLOW_HASH_GTP_U_IPV4_TEID \
57         (ICE_FLOW_HASH_IPV4 | ICE_FLOW_HASH_GTP_U_TEID)
58 #define ICE_FLOW_HASH_GTP_U_IPV6_TEID \
59         (ICE_FLOW_HASH_IPV6 | ICE_FLOW_HASH_GTP_U_TEID)
60
61 #define ICE_FLOW_HASH_GTP_U_EH_TEID \
62         (BIT_ULL(ICE_FLOW_FIELD_IDX_GTPU_EH_TEID))
63
64 #define ICE_FLOW_HASH_GTP_U_EH_QFI \
65         (BIT_ULL(ICE_FLOW_FIELD_IDX_GTPU_EH_QFI))
66
67 #define ICE_FLOW_HASH_GTP_U_IPV4_EH \
68         (ICE_FLOW_HASH_IPV4 | ICE_FLOW_HASH_GTP_U_EH_TEID | \
69          ICE_FLOW_HASH_GTP_U_EH_QFI)
70 #define ICE_FLOW_HASH_GTP_U_IPV6_EH \
71         (ICE_FLOW_HASH_IPV6 | ICE_FLOW_HASH_GTP_U_EH_TEID | \
72          ICE_FLOW_HASH_GTP_U_EH_QFI)
73
74 #define ICE_FLOW_HASH_PPPOE_SESS_ID \
75         (BIT_ULL(ICE_FLOW_FIELD_IDX_PPPOE_SESS_ID))
76
77 #define ICE_FLOW_HASH_PPPOE_SESS_ID_ETH \
78         (ICE_FLOW_HASH_ETH | ICE_FLOW_HASH_PPPOE_SESS_ID)
79 #define ICE_FLOW_HASH_PPPOE_TCP_ID \
80         (ICE_FLOW_HASH_TCP_PORT | ICE_FLOW_HASH_PPPOE_SESS_ID)
81 #define ICE_FLOW_HASH_PPPOE_UDP_ID \
82         (ICE_FLOW_HASH_UDP_PORT | ICE_FLOW_HASH_PPPOE_SESS_ID)
83
84 /* Protocol header fields within a packet segment. A segment consists of one or
85  * more protocol headers that make up a logical group of protocol headers. Each
86  * logical group of protocol headers encapsulates or is encapsulated using/by
87  * tunneling or encapsulation protocols for network virtualization such as GRE,
88  * VxLAN, etc.
89  */
90 enum ice_flow_seg_hdr {
91         ICE_FLOW_SEG_HDR_NONE           = 0x00000000,
92         ICE_FLOW_SEG_HDR_ETH            = 0x00000001,
93         ICE_FLOW_SEG_HDR_VLAN           = 0x00000002,
94         ICE_FLOW_SEG_HDR_IPV4           = 0x00000004,
95         ICE_FLOW_SEG_HDR_IPV6           = 0x00000008,
96         ICE_FLOW_SEG_HDR_ARP            = 0x00000010,
97         ICE_FLOW_SEG_HDR_ICMP           = 0x00000020,
98         ICE_FLOW_SEG_HDR_TCP            = 0x00000040,
99         ICE_FLOW_SEG_HDR_UDP            = 0x00000080,
100         ICE_FLOW_SEG_HDR_SCTP           = 0x00000100,
101         ICE_FLOW_SEG_HDR_GRE            = 0x00000200,
102         ICE_FLOW_SEG_HDR_GTPC           = 0x00000400,
103         ICE_FLOW_SEG_HDR_GTPC_TEID      = 0x00000800,
104         ICE_FLOW_SEG_HDR_GTPU_IP        = 0x00001000,
105         ICE_FLOW_SEG_HDR_GTPU_EH        = 0x00002000,
106         ICE_FLOW_SEG_HDR_GTPU_DWN       = 0x00004000,
107         ICE_FLOW_SEG_HDR_GTPU_UP        = 0x00008000,
108         ICE_FLOW_SEG_HDR_PPPOE          = 0x00010000,
109 };
110
111 /* These segements all have the same PTYPES, but are otherwise distinguished by
112  * the value of the gtp_eh_pdu and gtp_eh_pdu_link flags:
113  *
114  *                                gtp_eh_pdu     gtp_eh_pdu_link
115  * ICE_FLOW_SEG_HDR_GTPU_IP           0              0
116  * ICE_FLOW_SEG_HDR_GTPU_EH           1              don't care
117  * ICE_FLOW_SEG_HDR_GTPU_DWN          1              0
118  * ICE_FLOW_SEG_HDR_GTPU_UP           1              1
119  */
120 #define ICE_FLOW_SEG_HDR_GTPU (ICE_FLOW_SEG_HDR_GTPU_IP | \
121                                ICE_FLOW_SEG_HDR_GTPU_DWN | \
122                                ICE_FLOW_SEG_HDR_GTPU_UP)
123
124 enum ice_flow_field {
125         /* L2 */
126         ICE_FLOW_FIELD_IDX_ETH_DA,
127         ICE_FLOW_FIELD_IDX_ETH_SA,
128         ICE_FLOW_FIELD_IDX_S_VLAN,
129         ICE_FLOW_FIELD_IDX_C_VLAN,
130         ICE_FLOW_FIELD_IDX_ETH_TYPE,
131         /* L3 */
132         ICE_FLOW_FIELD_IDX_IPV4_DSCP,
133         ICE_FLOW_FIELD_IDX_IPV6_DSCP,
134         ICE_FLOW_FIELD_IDX_IPV4_TTL,
135         ICE_FLOW_FIELD_IDX_IPV4_PROT,
136         ICE_FLOW_FIELD_IDX_IPV6_TTL,
137         ICE_FLOW_FIELD_IDX_IPV6_PROT,
138         ICE_FLOW_FIELD_IDX_IPV4_SA,
139         ICE_FLOW_FIELD_IDX_IPV4_DA,
140         ICE_FLOW_FIELD_IDX_IPV6_SA,
141         ICE_FLOW_FIELD_IDX_IPV6_DA,
142         /* L4 */
143         ICE_FLOW_FIELD_IDX_TCP_SRC_PORT,
144         ICE_FLOW_FIELD_IDX_TCP_DST_PORT,
145         ICE_FLOW_FIELD_IDX_UDP_SRC_PORT,
146         ICE_FLOW_FIELD_IDX_UDP_DST_PORT,
147         ICE_FLOW_FIELD_IDX_SCTP_SRC_PORT,
148         ICE_FLOW_FIELD_IDX_SCTP_DST_PORT,
149         ICE_FLOW_FIELD_IDX_TCP_FLAGS,
150         /* ARP */
151         ICE_FLOW_FIELD_IDX_ARP_SIP,
152         ICE_FLOW_FIELD_IDX_ARP_DIP,
153         ICE_FLOW_FIELD_IDX_ARP_SHA,
154         ICE_FLOW_FIELD_IDX_ARP_DHA,
155         ICE_FLOW_FIELD_IDX_ARP_OP,
156         /* ICMP */
157         ICE_FLOW_FIELD_IDX_ICMP_TYPE,
158         ICE_FLOW_FIELD_IDX_ICMP_CODE,
159         /* GRE */
160         ICE_FLOW_FIELD_IDX_GRE_KEYID,
161         /* GTPC_TEID */
162         ICE_FLOW_FIELD_IDX_GTPC_TEID,
163         /* GTPU_IP */
164         ICE_FLOW_FIELD_IDX_GTPU_IP_TEID,
165         /* GTPU_EH */
166         ICE_FLOW_FIELD_IDX_GTPU_EH_TEID,
167         ICE_FLOW_FIELD_IDX_GTPU_EH_QFI,
168         /* GTPU_UP */
169         ICE_FLOW_FIELD_IDX_GTPU_UP_TEID,
170         /* GTPU_DWN */
171         ICE_FLOW_FIELD_IDX_GTPU_DWN_TEID,
172         /* PPPOE */
173         ICE_FLOW_FIELD_IDX_PPPOE_SESS_ID,
174          /* The total number of enums must not exceed 64 */
175         ICE_FLOW_FIELD_IDX_MAX
176 };
177
178 /* Flow headers and fields for AVF support */
179 enum ice_flow_avf_hdr_field {
180         /* Values 0 - 28 are reserved for future use */
181         ICE_AVF_FLOW_FIELD_INVALID              = 0,
182         ICE_AVF_FLOW_FIELD_UNICAST_IPV4_UDP     = 29,
183         ICE_AVF_FLOW_FIELD_MULTICAST_IPV4_UDP,
184         ICE_AVF_FLOW_FIELD_IPV4_UDP,
185         ICE_AVF_FLOW_FIELD_IPV4_TCP_SYN_NO_ACK,
186         ICE_AVF_FLOW_FIELD_IPV4_TCP,
187         ICE_AVF_FLOW_FIELD_IPV4_SCTP,
188         ICE_AVF_FLOW_FIELD_IPV4_OTHER,
189         ICE_AVF_FLOW_FIELD_FRAG_IPV4,
190         ICE_AVF_FLOW_FIELD_UNICAST_IPV6_UDP     = 39,
191         ICE_AVF_FLOW_FIELD_MULTICAST_IPV6_UDP,
192         ICE_AVF_FLOW_FIELD_IPV6_UDP,
193         ICE_AVF_FLOW_FIELD_IPV6_TCP_SYN_NO_ACK,
194         ICE_AVF_FLOW_FIELD_IPV6_TCP,
195         ICE_AVF_FLOW_FIELD_IPV6_SCTP,
196         ICE_AVF_FLOW_FIELD_IPV6_OTHER,
197         ICE_AVF_FLOW_FIELD_FRAG_IPV6,
198         ICE_AVF_FLOW_FIELD_RSVD47,
199         ICE_AVF_FLOW_FIELD_FCOE_OX,
200         ICE_AVF_FLOW_FIELD_FCOE_RX,
201         ICE_AVF_FLOW_FIELD_FCOE_OTHER,
202         /* Values 51-62 are reserved */
203         ICE_AVF_FLOW_FIELD_L2_PAYLOAD           = 63,
204         ICE_AVF_FLOW_FIELD_MAX
205 };
206
207 /* Supported RSS offloads  This macro is defined to support
208  * VIRTCHNL_OP_GET_RSS_HENA_CAPS ops. PF driver sends the RSS hardware
209  * capabilities to the caller of this ops.
210  */
211 #define ICE_DEFAULT_RSS_HENA ( \
212         BIT_ULL(ICE_AVF_FLOW_FIELD_IPV4_UDP) | \
213         BIT_ULL(ICE_AVF_FLOW_FIELD_IPV4_SCTP) | \
214         BIT_ULL(ICE_AVF_FLOW_FIELD_IPV4_TCP) | \
215         BIT_ULL(ICE_AVF_FLOW_FIELD_IPV4_OTHER) | \
216         BIT_ULL(ICE_AVF_FLOW_FIELD_FRAG_IPV4) | \
217         BIT_ULL(ICE_AVF_FLOW_FIELD_IPV6_UDP) | \
218         BIT_ULL(ICE_AVF_FLOW_FIELD_IPV6_TCP) | \
219         BIT_ULL(ICE_AVF_FLOW_FIELD_IPV6_SCTP) | \
220         BIT_ULL(ICE_AVF_FLOW_FIELD_IPV6_OTHER) | \
221         BIT_ULL(ICE_AVF_FLOW_FIELD_FRAG_IPV6) | \
222         BIT_ULL(ICE_AVF_FLOW_FIELD_IPV4_TCP_SYN_NO_ACK) | \
223         BIT_ULL(ICE_AVF_FLOW_FIELD_UNICAST_IPV4_UDP) | \
224         BIT_ULL(ICE_AVF_FLOW_FIELD_MULTICAST_IPV4_UDP) | \
225         BIT_ULL(ICE_AVF_FLOW_FIELD_IPV6_TCP_SYN_NO_ACK) | \
226         BIT_ULL(ICE_AVF_FLOW_FIELD_UNICAST_IPV6_UDP) | \
227         BIT_ULL(ICE_AVF_FLOW_FIELD_MULTICAST_IPV6_UDP))
228
229 enum ice_flow_dir {
230         ICE_FLOW_DIR_UNDEFINED  = 0,
231         ICE_FLOW_TX             = 0x01,
232         ICE_FLOW_RX             = 0x02,
233         ICE_FLOW_TX_RX          = ICE_FLOW_RX | ICE_FLOW_TX
234 };
235
236 enum ice_flow_priority {
237         ICE_FLOW_PRIO_LOW,
238         ICE_FLOW_PRIO_NORMAL,
239         ICE_FLOW_PRIO_HIGH
240 };
241
242 #define ICE_FLOW_SEG_MAX                2
243 #define ICE_FLOW_SEG_RAW_FLD_MAX        2
244 #define ICE_FLOW_PROFILE_MAX            1024
245 #define ICE_FLOW_SW_FIELD_VECTOR_MAX    48
246 #define ICE_FLOW_ACL_FIELD_VECTOR_MAX   32
247 #define ICE_FLOW_FV_EXTRACT_SZ          2
248
249 #define ICE_FLOW_SET_HDRS(seg, val)     ((seg)->hdrs |= (u32)(val))
250
251 struct ice_flow_seg_xtrct {
252         u8 prot_id;     /* Protocol ID of extracted header field */
253         u16 off;        /* Starting offset of the field in header in bytes */
254         u8 idx;         /* Index of FV entry used */
255         u8 disp;        /* Displacement of field in bits fr. FV entry's start */
256         u16 mask;       /* Mask for field */
257 };
258
259 enum ice_flow_fld_match_type {
260         ICE_FLOW_FLD_TYPE_REG,          /* Value, mask */
261         ICE_FLOW_FLD_TYPE_RANGE,        /* Value, mask, last (upper bound) */
262         ICE_FLOW_FLD_TYPE_PREFIX,       /* IP address, prefix, size of prefix */
263         ICE_FLOW_FLD_TYPE_SIZE,         /* Value, mask, size of match */
264 };
265
266 struct ice_flow_fld_loc {
267         /* Describe offsets of field information relative to the beginning of
268          * input buffer provided when adding flow entries.
269          */
270         u16 val;        /* Offset where the value is located */
271         u16 mask;       /* Offset where the mask/prefix value is located */
272         u16 last;       /* Length or offset where the upper value is located */
273 };
274
275 struct ice_flow_fld_info {
276         enum ice_flow_fld_match_type type;
277         /* Location where to retrieve data from an input buffer */
278         struct ice_flow_fld_loc src;
279         /* Location where to put the data into the final entry buffer */
280         struct ice_flow_fld_loc entry;
281         struct ice_flow_seg_xtrct xtrct;
282 };
283
284 struct ice_flow_seg_fld_raw {
285         int off;        /* Offset from the start of the segment */
286         struct ice_flow_fld_info info;
287 };
288
289 struct ice_flow_seg_info {
290         u32 hdrs;       /* Bitmask indicating protocol headers present */
291         u64 match;      /* Bitmask indicating header fields to be matched */
292         u64 range;      /* Bitmask indicating header fields matched as ranges */
293
294         struct ice_flow_fld_info fields[ICE_FLOW_FIELD_IDX_MAX];
295
296         u8 raws_cnt;    /* Number of raw fields to be matched */
297         struct ice_flow_seg_fld_raw raws[ICE_FLOW_SEG_RAW_FLD_MAX];
298 };
299
300 /* This structure describes a flow entry, and is tracked only in this file */
301 struct ice_flow_entry {
302         struct LIST_ENTRY_TYPE l_entry;
303
304         u64 id;
305         struct ice_flow_prof *prof;
306         /* Action list */
307         struct ice_flow_action *acts;
308         /* Flow entry's content */
309         void *entry;
310         enum ice_flow_priority priority;
311         u16 vsi_handle;
312         u16 entry_sz;
313         u8 acts_cnt;
314 };
315
316 #define ICE_FLOW_ENTRY_HNDL(e)  ((unsigned long)e)
317 #define ICE_FLOW_ENTRY_PTR(h)   ((struct ice_flow_entry *)(h))
318
319 struct ice_flow_prof {
320         struct LIST_ENTRY_TYPE l_entry;
321
322         u64 id;
323         enum ice_flow_dir dir;
324         u8 segs_cnt;
325         u8 acts_cnt;
326
327         /* Keep track of flow entries associated with this flow profile */
328         struct ice_lock entries_lock;
329         struct LIST_HEAD_TYPE entries;
330
331         struct ice_flow_seg_info segs[ICE_FLOW_SEG_MAX];
332
333         /* software VSI handles referenced by this flow profile */
334         ice_declare_bitmap(vsis, ICE_MAX_VSI);
335
336         union {
337                 /* struct sw_recipe */
338                 /* struct fd */
339                 u32 data;
340                 /* Symmetric Hash for RSS */
341                 bool symm;
342         } cfg;
343
344         /* Default actions */
345         struct ice_flow_action *acts;
346 };
347
348 struct ice_rss_cfg {
349         struct LIST_ENTRY_TYPE l_entry;
350         /* bitmap of VSIs added to the RSS entry */
351         ice_declare_bitmap(vsis, ICE_MAX_VSI);
352         u64 hashed_flds;
353         u32 packet_hdr;
354         bool symm;
355 };
356
357 enum ice_flow_action_type {
358         ICE_FLOW_ACT_NOP,
359         ICE_FLOW_ACT_ALLOW,
360         ICE_FLOW_ACT_DROP,
361         ICE_FLOW_ACT_CNTR_PKT,
362         ICE_FLOW_ACT_FWD_VSI,
363         ICE_FLOW_ACT_FWD_VSI_LIST,      /* Should be abstracted away */
364         ICE_FLOW_ACT_FWD_QUEUE,         /* Can Queues be abstracted away? */
365         ICE_FLOW_ACT_FWD_QUEUE_GROUP,   /* Can Queues be abstracted away? */
366         ICE_FLOW_ACT_PUSH,
367         ICE_FLOW_ACT_POP,
368         ICE_FLOW_ACT_MODIFY,
369         ICE_FLOW_ACT_CNTR_BYTES,
370         ICE_FLOW_ACT_CNTR_PKT_BYTES,
371         ICE_FLOW_ACT_GENERIC_0,
372         ICE_FLOW_ACT_GENERIC_1,
373         ICE_FLOW_ACT_GENERIC_2,
374         ICE_FLOW_ACT_GENERIC_3,
375         ICE_FLOW_ACT_GENERIC_4,
376         ICE_FLOW_ACT_RPT_FLOW_ID,
377         ICE_FLOW_ACT_BUILD_PROF_IDX,
378 };
379
380 struct ice_flow_action {
381         enum ice_flow_action_type type;
382         union {
383                 u32 dummy;
384         } data;
385 };
386
387 u64
388 ice_flow_find_prof(struct ice_hw *hw, enum ice_block blk, enum ice_flow_dir dir,
389                    struct ice_flow_seg_info *segs, u8 segs_cnt);
390 enum ice_status
391 ice_flow_add_prof(struct ice_hw *hw, enum ice_block blk, enum ice_flow_dir dir,
392                   u64 prof_id, struct ice_flow_seg_info *segs, u8 segs_cnt,
393                   struct ice_flow_action *acts, u8 acts_cnt,
394                   struct ice_flow_prof **prof);
395 enum ice_status
396 ice_flow_rem_prof(struct ice_hw *hw, enum ice_block blk, u64 prof_id);
397 enum ice_status
398 ice_flow_assoc_vsig_vsi(struct ice_hw *hw, enum ice_block blk, u16 vsi_handle,
399                         u16 vsig);
400 enum ice_status
401 ice_flow_get_hw_prof(struct ice_hw *hw, enum ice_block blk, u64 prof_id,
402                      u8 *hw_prof);
403
404 u64 ice_flow_find_entry(struct ice_hw *hw, enum ice_block blk, u64 entry_id);
405 enum ice_status
406 ice_flow_add_entry(struct ice_hw *hw, enum ice_block blk, u64 prof_id,
407                    u64 entry_id, u16 vsi, enum ice_flow_priority prio,
408                    void *data, struct ice_flow_action *acts, u8 acts_cnt,
409                    u64 *entry_h);
410 enum ice_status ice_flow_rem_entry(struct ice_hw *hw, u64 entry_h);
411 void
412 ice_flow_set_fld(struct ice_flow_seg_info *seg, enum ice_flow_field fld,
413                  u16 val_loc, u16 mask_loc, u16 last_loc, bool range);
414 void
415 ice_flow_set_fld_prefix(struct ice_flow_seg_info *seg, enum ice_flow_field fld,
416                         u16 val_loc, u16 prefix_loc, u8 prefix_sz);
417 void
418 ice_flow_add_fld_raw(struct ice_flow_seg_info *seg, u16 off, u8 len,
419                      u16 val_loc, u16 mask_loc);
420 void ice_rem_vsi_rss_list(struct ice_hw *hw, u16 vsi_handle);
421 enum ice_status ice_replay_rss_cfg(struct ice_hw *hw, u16 vsi_handle);
422 enum ice_status
423 ice_add_avf_rss_cfg(struct ice_hw *hw, u16 vsi_handle, u64 hashed_flds);
424 enum ice_status ice_rem_vsi_rss_cfg(struct ice_hw *hw, u16 vsi_handle);
425 enum ice_status
426 ice_add_rss_cfg(struct ice_hw *hw, u16 vsi_handle, u64 hashed_flds,
427                 u32 addl_hdrs, bool symm);
428 enum ice_status
429 ice_rem_rss_cfg(struct ice_hw *hw, u16 vsi_handle, u64 hashed_flds,
430                 u32 addl_hdrs);
431 u64 ice_get_rss_cfg(struct ice_hw *hw, u16 vsi_handle, u32 hdrs);
432 #endif /* _ICE_FLOW_H_ */