net/ice: fix RSS for GTPU
[dpdk.git] / drivers / net / ice / base / ice_flow.h
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2001-2020 Intel Corporation
3  */
4
5 #ifndef _ICE_FLOW_H_
6 #define _ICE_FLOW_H_
7
8 #include "ice_flex_type.h"
9 #include "ice_acl.h"
10 #define ICE_IPV4_MAKE_PREFIX_MASK(prefix) ((u32)(~0) << (32 - (prefix)))
11 #define ICE_FLOW_PROF_ID_INVAL          0xfffffffffffffffful
12 #define ICE_FLOW_PROF_ID_BYPASS         0
13 #define ICE_FLOW_PROF_ID_DEFAULT        1
14 #define ICE_FLOW_ENTRY_HANDLE_INVAL     0
15 #define ICE_FLOW_VSI_INVAL              0xffff
16 #define ICE_FLOW_FLD_OFF_INVAL          0xffff
17
18 /* Generate flow hash field from flow field type(s) */
19 #define ICE_FLOW_HASH_ETH       \
20         (BIT_ULL(ICE_FLOW_FIELD_IDX_ETH_DA) | \
21          BIT_ULL(ICE_FLOW_FIELD_IDX_ETH_SA))
22 #define ICE_FLOW_HASH_IPV4      \
23         (BIT_ULL(ICE_FLOW_FIELD_IDX_IPV4_SA) | \
24          BIT_ULL(ICE_FLOW_FIELD_IDX_IPV4_DA))
25 #define ICE_FLOW_HASH_IPV6      \
26         (BIT_ULL(ICE_FLOW_FIELD_IDX_IPV6_SA) | \
27          BIT_ULL(ICE_FLOW_FIELD_IDX_IPV6_DA))
28 #define ICE_FLOW_HASH_TCP_PORT  \
29         (BIT_ULL(ICE_FLOW_FIELD_IDX_TCP_SRC_PORT) | \
30          BIT_ULL(ICE_FLOW_FIELD_IDX_TCP_DST_PORT))
31 #define ICE_FLOW_HASH_UDP_PORT  \
32         (BIT_ULL(ICE_FLOW_FIELD_IDX_UDP_SRC_PORT) | \
33          BIT_ULL(ICE_FLOW_FIELD_IDX_UDP_DST_PORT))
34 #define ICE_FLOW_HASH_SCTP_PORT \
35         (BIT_ULL(ICE_FLOW_FIELD_IDX_SCTP_SRC_PORT) | \
36          BIT_ULL(ICE_FLOW_FIELD_IDX_SCTP_DST_PORT))
37
38 #define ICE_HASH_INVALID        0
39 #define ICE_HASH_TCP_IPV4       (ICE_FLOW_HASH_IPV4 | ICE_FLOW_HASH_TCP_PORT)
40 #define ICE_HASH_TCP_IPV6       (ICE_FLOW_HASH_IPV6 | ICE_FLOW_HASH_TCP_PORT)
41 #define ICE_HASH_UDP_IPV4       (ICE_FLOW_HASH_IPV4 | ICE_FLOW_HASH_UDP_PORT)
42 #define ICE_HASH_UDP_IPV6       (ICE_FLOW_HASH_IPV6 | ICE_FLOW_HASH_UDP_PORT)
43 #define ICE_HASH_SCTP_IPV4      (ICE_FLOW_HASH_IPV4 | ICE_FLOW_HASH_SCTP_PORT)
44 #define ICE_HASH_SCTP_IPV6      (ICE_FLOW_HASH_IPV6 | ICE_FLOW_HASH_SCTP_PORT)
45
46 #define ICE_FLOW_HASH_GTP_TEID \
47         (BIT_ULL(ICE_FLOW_FIELD_IDX_GTPC_TEID))
48
49 #define ICE_FLOW_HASH_GTP_IPV4_TEID \
50         (ICE_FLOW_HASH_IPV4 | ICE_FLOW_HASH_GTP_TEID)
51 #define ICE_FLOW_HASH_GTP_IPV6_TEID \
52         (ICE_FLOW_HASH_IPV6 | ICE_FLOW_HASH_GTP_TEID)
53
54 #define ICE_FLOW_HASH_GTP_U_TEID \
55         (BIT_ULL(ICE_FLOW_FIELD_IDX_GTPU_IP_TEID))
56
57 #define ICE_FLOW_HASH_GTP_U_IPV4_TEID \
58         (ICE_FLOW_HASH_IPV4 | ICE_FLOW_HASH_GTP_U_TEID)
59 #define ICE_FLOW_HASH_GTP_U_IPV6_TEID \
60         (ICE_FLOW_HASH_IPV6 | ICE_FLOW_HASH_GTP_U_TEID)
61
62 #define ICE_FLOW_HASH_GTP_U_EH_TEID \
63         (BIT_ULL(ICE_FLOW_FIELD_IDX_GTPU_EH_TEID))
64
65 #define ICE_FLOW_HASH_GTP_U_EH_QFI \
66         (BIT_ULL(ICE_FLOW_FIELD_IDX_GTPU_EH_QFI))
67
68 #define ICE_FLOW_HASH_GTP_U_IPV4_EH \
69         (ICE_FLOW_HASH_IPV4 | ICE_FLOW_HASH_GTP_U_EH_TEID | \
70          ICE_FLOW_HASH_GTP_U_EH_QFI)
71 #define ICE_FLOW_HASH_GTP_U_IPV6_EH \
72         (ICE_FLOW_HASH_IPV6 | ICE_FLOW_HASH_GTP_U_EH_TEID | \
73          ICE_FLOW_HASH_GTP_U_EH_QFI)
74
75 #define ICE_FLOW_HASH_PPPOE_SESS_ID \
76         (BIT_ULL(ICE_FLOW_FIELD_IDX_PPPOE_SESS_ID))
77
78 #define ICE_FLOW_HASH_PPPOE_SESS_ID_ETH \
79         (ICE_FLOW_HASH_ETH | ICE_FLOW_HASH_PPPOE_SESS_ID)
80 #define ICE_FLOW_HASH_PPPOE_TCP_ID \
81         (ICE_FLOW_HASH_TCP_PORT | ICE_FLOW_HASH_PPPOE_SESS_ID)
82 #define ICE_FLOW_HASH_PPPOE_UDP_ID \
83         (ICE_FLOW_HASH_UDP_PORT | ICE_FLOW_HASH_PPPOE_SESS_ID)
84
85 #define ICE_FLOW_HASH_PFCP_SEID \
86         (BIT_ULL(ICE_FLOW_FIELD_IDX_PFCP_SEID))
87 #define ICE_FLOW_HASH_PFCP_IPV4_SEID \
88         (ICE_FLOW_HASH_IPV4 | ICE_FLOW_HASH_PFCP_SEID)
89 #define ICE_FLOW_HASH_PFCP_IPV6_SEID \
90         (ICE_FLOW_HASH_IPV6 | ICE_FLOW_HASH_PFCP_SEID)
91
92 #define ICE_FLOW_HASH_L2TPV3_SESS_ID \
93         (BIT_ULL(ICE_FLOW_FIELD_IDX_L2TPV3_SESS_ID))
94 #define ICE_FLOW_HASH_L2TPV3_IPV4_SESS_ID \
95         (ICE_FLOW_HASH_IPV4 | ICE_FLOW_HASH_L2TPV3_SESS_ID)
96 #define ICE_FLOW_HASH_L2TPV3_IPV6_SESS_ID \
97         (ICE_FLOW_HASH_IPV6 | ICE_FLOW_HASH_L2TPV3_SESS_ID)
98
99 #define ICE_FLOW_HASH_ESP_SPI \
100         (BIT_ULL(ICE_FLOW_FIELD_IDX_ESP_SPI))
101 #define ICE_FLOW_HASH_ESP_IPV4_SPI \
102         (ICE_FLOW_HASH_IPV4 | ICE_FLOW_HASH_ESP_SPI)
103 #define ICE_FLOW_HASH_ESP_IPV6_SPI \
104         (ICE_FLOW_HASH_IPV6 | ICE_FLOW_HASH_ESP_SPI)
105
106 #define ICE_FLOW_HASH_AH_SPI \
107         (BIT_ULL(ICE_FLOW_FIELD_IDX_AH_SPI))
108 #define ICE_FLOW_HASH_AH_IPV4_SPI \
109         (ICE_FLOW_HASH_IPV4 | ICE_FLOW_HASH_AH_SPI)
110 #define ICE_FLOW_HASH_AH_IPV6_SPI \
111         (ICE_FLOW_HASH_IPV6 | ICE_FLOW_HASH_AH_SPI)
112
113 #define ICE_FLOW_HASH_NAT_T_ESP_SPI \
114         (BIT_ULL(ICE_FLOW_FIELD_IDX_NAT_T_ESP_SPI))
115 #define ICE_FLOW_HASH_NAT_T_ESP_IPV4_SPI \
116         (ICE_FLOW_HASH_IPV4 | ICE_FLOW_HASH_NAT_T_ESP_SPI)
117 #define ICE_FLOW_HASH_NAT_T_ESP_IPV6_SPI \
118         (ICE_FLOW_HASH_IPV6 | ICE_FLOW_HASH_NAT_T_ESP_SPI)
119
120 /* Protocol header fields within a packet segment. A segment consists of one or
121  * more protocol headers that make up a logical group of protocol headers. Each
122  * logical group of protocol headers encapsulates or is encapsulated using/by
123  * tunneling or encapsulation protocols for network virtualization such as GRE,
124  * VxLAN, etc.
125  */
126 enum ice_flow_seg_hdr {
127         ICE_FLOW_SEG_HDR_NONE           = 0x00000000,
128         ICE_FLOW_SEG_HDR_ETH            = 0x00000001,
129         ICE_FLOW_SEG_HDR_VLAN           = 0x00000002,
130         ICE_FLOW_SEG_HDR_IPV4           = 0x00000004,
131         ICE_FLOW_SEG_HDR_IPV6           = 0x00000008,
132         ICE_FLOW_SEG_HDR_ARP            = 0x00000010,
133         ICE_FLOW_SEG_HDR_ICMP           = 0x00000020,
134         ICE_FLOW_SEG_HDR_TCP            = 0x00000040,
135         ICE_FLOW_SEG_HDR_UDP            = 0x00000080,
136         ICE_FLOW_SEG_HDR_SCTP           = 0x00000100,
137         ICE_FLOW_SEG_HDR_GRE            = 0x00000200,
138         ICE_FLOW_SEG_HDR_GTPC           = 0x00000400,
139         ICE_FLOW_SEG_HDR_GTPC_TEID      = 0x00000800,
140         ICE_FLOW_SEG_HDR_GTPU_IP        = 0x00001000,
141         ICE_FLOW_SEG_HDR_GTPU_EH        = 0x00002000,
142         ICE_FLOW_SEG_HDR_GTPU_DWN       = 0x00004000,
143         ICE_FLOW_SEG_HDR_GTPU_UP        = 0x00008000,
144         ICE_FLOW_SEG_HDR_PPPOE          = 0x00010000,
145         ICE_FLOW_SEG_HDR_PFCP_NODE      = 0x00020000,
146         ICE_FLOW_SEG_HDR_PFCP_SESSION   = 0x00040000,
147         ICE_FLOW_SEG_HDR_L2TPV3         = 0x00080000,
148         ICE_FLOW_SEG_HDR_ESP            = 0x00100000,
149         ICE_FLOW_SEG_HDR_AH             = 0x00200000,
150         ICE_FLOW_SEG_HDR_NAT_T_ESP      = 0x00400000,
151 };
152
153 /* These segements all have the same PTYPES, but are otherwise distinguished by
154  * the value of the gtp_eh_pdu and gtp_eh_pdu_link flags:
155  *
156  *                                gtp_eh_pdu     gtp_eh_pdu_link
157  * ICE_FLOW_SEG_HDR_GTPU_IP           0              0
158  * ICE_FLOW_SEG_HDR_GTPU_EH           1              don't care
159  * ICE_FLOW_SEG_HDR_GTPU_DWN          1              0
160  * ICE_FLOW_SEG_HDR_GTPU_UP           1              1
161  */
162 #define ICE_FLOW_SEG_HDR_GTPU (ICE_FLOW_SEG_HDR_GTPU_IP | \
163                                ICE_FLOW_SEG_HDR_GTPU_EH | \
164                                ICE_FLOW_SEG_HDR_GTPU_DWN | \
165                                ICE_FLOW_SEG_HDR_GTPU_UP)
166 #define ICE_FLOW_SEG_HDR_PFCP (ICE_FLOW_SEG_HDR_PFCP_NODE | \
167                                ICE_FLOW_SEG_HDR_PFCP_SESSION)
168
169 enum ice_flow_field {
170         /* L2 */
171         ICE_FLOW_FIELD_IDX_ETH_DA,
172         ICE_FLOW_FIELD_IDX_ETH_SA,
173         ICE_FLOW_FIELD_IDX_S_VLAN,
174         ICE_FLOW_FIELD_IDX_C_VLAN,
175         ICE_FLOW_FIELD_IDX_ETH_TYPE,
176         /* L3 */
177         ICE_FLOW_FIELD_IDX_IPV4_DSCP,
178         ICE_FLOW_FIELD_IDX_IPV6_DSCP,
179         ICE_FLOW_FIELD_IDX_IPV4_TTL,
180         ICE_FLOW_FIELD_IDX_IPV4_PROT,
181         ICE_FLOW_FIELD_IDX_IPV6_TTL,
182         ICE_FLOW_FIELD_IDX_IPV6_PROT,
183         ICE_FLOW_FIELD_IDX_IPV4_SA,
184         ICE_FLOW_FIELD_IDX_IPV4_DA,
185         ICE_FLOW_FIELD_IDX_IPV6_SA,
186         ICE_FLOW_FIELD_IDX_IPV6_DA,
187         /* L4 */
188         ICE_FLOW_FIELD_IDX_TCP_SRC_PORT,
189         ICE_FLOW_FIELD_IDX_TCP_DST_PORT,
190         ICE_FLOW_FIELD_IDX_UDP_SRC_PORT,
191         ICE_FLOW_FIELD_IDX_UDP_DST_PORT,
192         ICE_FLOW_FIELD_IDX_SCTP_SRC_PORT,
193         ICE_FLOW_FIELD_IDX_SCTP_DST_PORT,
194         ICE_FLOW_FIELD_IDX_TCP_FLAGS,
195         /* ARP */
196         ICE_FLOW_FIELD_IDX_ARP_SIP,
197         ICE_FLOW_FIELD_IDX_ARP_DIP,
198         ICE_FLOW_FIELD_IDX_ARP_SHA,
199         ICE_FLOW_FIELD_IDX_ARP_DHA,
200         ICE_FLOW_FIELD_IDX_ARP_OP,
201         /* ICMP */
202         ICE_FLOW_FIELD_IDX_ICMP_TYPE,
203         ICE_FLOW_FIELD_IDX_ICMP_CODE,
204         /* GRE */
205         ICE_FLOW_FIELD_IDX_GRE_KEYID,
206         /* GTPC_TEID */
207         ICE_FLOW_FIELD_IDX_GTPC_TEID,
208         /* GTPU_IP */
209         ICE_FLOW_FIELD_IDX_GTPU_IP_TEID,
210         /* GTPU_EH */
211         ICE_FLOW_FIELD_IDX_GTPU_EH_TEID,
212         ICE_FLOW_FIELD_IDX_GTPU_EH_QFI,
213         /* GTPU_UP */
214         ICE_FLOW_FIELD_IDX_GTPU_UP_TEID,
215         /* GTPU_DWN */
216         ICE_FLOW_FIELD_IDX_GTPU_DWN_TEID,
217         /* PPPOE */
218         ICE_FLOW_FIELD_IDX_PPPOE_SESS_ID,
219         /* PFCP */
220         ICE_FLOW_FIELD_IDX_PFCP_SEID,
221         /* L2TPV3 */
222         ICE_FLOW_FIELD_IDX_L2TPV3_SESS_ID,
223         /* ESP */
224         ICE_FLOW_FIELD_IDX_ESP_SPI,
225         /* AH */
226         ICE_FLOW_FIELD_IDX_AH_SPI,
227         /* NAT_T ESP */
228         ICE_FLOW_FIELD_IDX_NAT_T_ESP_SPI,
229          /* The total number of enums must not exceed 64 */
230         ICE_FLOW_FIELD_IDX_MAX
231 };
232
233 /* Flow headers and fields for AVF support */
234 enum ice_flow_avf_hdr_field {
235         /* Values 0 - 28 are reserved for future use */
236         ICE_AVF_FLOW_FIELD_INVALID              = 0,
237         ICE_AVF_FLOW_FIELD_UNICAST_IPV4_UDP     = 29,
238         ICE_AVF_FLOW_FIELD_MULTICAST_IPV4_UDP,
239         ICE_AVF_FLOW_FIELD_IPV4_UDP,
240         ICE_AVF_FLOW_FIELD_IPV4_TCP_SYN_NO_ACK,
241         ICE_AVF_FLOW_FIELD_IPV4_TCP,
242         ICE_AVF_FLOW_FIELD_IPV4_SCTP,
243         ICE_AVF_FLOW_FIELD_IPV4_OTHER,
244         ICE_AVF_FLOW_FIELD_FRAG_IPV4,
245         /* Values 37-38 are reserved */
246         ICE_AVF_FLOW_FIELD_UNICAST_IPV6_UDP     = 39,
247         ICE_AVF_FLOW_FIELD_MULTICAST_IPV6_UDP,
248         ICE_AVF_FLOW_FIELD_IPV6_UDP,
249         ICE_AVF_FLOW_FIELD_IPV6_TCP_SYN_NO_ACK,
250         ICE_AVF_FLOW_FIELD_IPV6_TCP,
251         ICE_AVF_FLOW_FIELD_IPV6_SCTP,
252         ICE_AVF_FLOW_FIELD_IPV6_OTHER,
253         ICE_AVF_FLOW_FIELD_FRAG_IPV6,
254         ICE_AVF_FLOW_FIELD_RSVD47,
255         ICE_AVF_FLOW_FIELD_FCOE_OX,
256         ICE_AVF_FLOW_FIELD_FCOE_RX,
257         ICE_AVF_FLOW_FIELD_FCOE_OTHER,
258         /* Values 51-62 are reserved */
259         ICE_AVF_FLOW_FIELD_L2_PAYLOAD           = 63,
260         ICE_AVF_FLOW_FIELD_MAX
261 };
262
263 /* Supported RSS offloads  This macro is defined to support
264  * VIRTCHNL_OP_GET_RSS_HENA_CAPS ops. PF driver sends the RSS hardware
265  * capabilities to the caller of this ops.
266  */
267 #define ICE_DEFAULT_RSS_HENA ( \
268         BIT_ULL(ICE_AVF_FLOW_FIELD_IPV4_UDP) | \
269         BIT_ULL(ICE_AVF_FLOW_FIELD_IPV4_SCTP) | \
270         BIT_ULL(ICE_AVF_FLOW_FIELD_IPV4_TCP) | \
271         BIT_ULL(ICE_AVF_FLOW_FIELD_IPV4_OTHER) | \
272         BIT_ULL(ICE_AVF_FLOW_FIELD_FRAG_IPV4) | \
273         BIT_ULL(ICE_AVF_FLOW_FIELD_IPV6_UDP) | \
274         BIT_ULL(ICE_AVF_FLOW_FIELD_IPV6_TCP) | \
275         BIT_ULL(ICE_AVF_FLOW_FIELD_IPV6_SCTP) | \
276         BIT_ULL(ICE_AVF_FLOW_FIELD_IPV6_OTHER) | \
277         BIT_ULL(ICE_AVF_FLOW_FIELD_FRAG_IPV6) | \
278         BIT_ULL(ICE_AVF_FLOW_FIELD_IPV4_TCP_SYN_NO_ACK) | \
279         BIT_ULL(ICE_AVF_FLOW_FIELD_UNICAST_IPV4_UDP) | \
280         BIT_ULL(ICE_AVF_FLOW_FIELD_MULTICAST_IPV4_UDP) | \
281         BIT_ULL(ICE_AVF_FLOW_FIELD_IPV6_TCP_SYN_NO_ACK) | \
282         BIT_ULL(ICE_AVF_FLOW_FIELD_UNICAST_IPV6_UDP) | \
283         BIT_ULL(ICE_AVF_FLOW_FIELD_MULTICAST_IPV6_UDP))
284
285 enum ice_flow_dir {
286         ICE_FLOW_DIR_UNDEFINED  = 0,
287         ICE_FLOW_TX             = 0x01,
288         ICE_FLOW_RX             = 0x02,
289         ICE_FLOW_TX_RX          = ICE_FLOW_RX | ICE_FLOW_TX
290 };
291
292 enum ice_flow_priority {
293         ICE_FLOW_PRIO_LOW,
294         ICE_FLOW_PRIO_NORMAL,
295         ICE_FLOW_PRIO_HIGH
296 };
297
298 #define ICE_FLOW_SEG_MAX                2
299 #define ICE_FLOW_SEG_RAW_FLD_MAX        2
300 #define ICE_FLOW_PROFILE_MAX            1024
301 #define ICE_FLOW_SW_FIELD_VECTOR_MAX    48
302 #define ICE_FLOW_ACL_FIELD_VECTOR_MAX   32
303 #define ICE_FLOW_FV_EXTRACT_SZ          2
304
305 #define ICE_FLOW_SET_HDRS(seg, val)     ((seg)->hdrs |= (u32)(val))
306
307 struct ice_flow_seg_xtrct {
308         u8 prot_id;     /* Protocol ID of extracted header field */
309         u16 off;        /* Starting offset of the field in header in bytes */
310         u8 idx;         /* Index of FV entry used */
311         u8 disp;        /* Displacement of field in bits fr. FV entry's start */
312         u16 mask;       /* Mask for field */
313 };
314
315 enum ice_flow_fld_match_type {
316         ICE_FLOW_FLD_TYPE_REG,          /* Value, mask */
317         ICE_FLOW_FLD_TYPE_RANGE,        /* Value, mask, last (upper bound) */
318         ICE_FLOW_FLD_TYPE_PREFIX,       /* IP address, prefix, size of prefix */
319         ICE_FLOW_FLD_TYPE_SIZE,         /* Value, mask, size of match */
320 };
321
322 struct ice_flow_fld_loc {
323         /* Describe offsets of field information relative to the beginning of
324          * input buffer provided when adding flow entries.
325          */
326         u16 val;        /* Offset where the value is located */
327         u16 mask;       /* Offset where the mask/prefix value is located */
328         u16 last;       /* Length or offset where the upper value is located */
329 };
330
331 struct ice_flow_fld_info {
332         enum ice_flow_fld_match_type type;
333         /* Location where to retrieve data from an input buffer */
334         struct ice_flow_fld_loc src;
335         /* Location where to put the data into the final entry buffer */
336         struct ice_flow_fld_loc entry;
337         struct ice_flow_seg_xtrct xtrct;
338 };
339
340 struct ice_flow_seg_fld_raw {
341         struct ice_flow_fld_info info;
342         u16 off;        /* Offset from the start of the segment */
343 };
344
345 struct ice_flow_seg_info {
346         u32 hdrs;       /* Bitmask indicating protocol headers present */
347         u64 match;      /* Bitmask indicating header fields to be matched */
348         u64 range;      /* Bitmask indicating header fields matched as ranges */
349
350         struct ice_flow_fld_info fields[ICE_FLOW_FIELD_IDX_MAX];
351
352         u8 raws_cnt;    /* Number of raw fields to be matched */
353         struct ice_flow_seg_fld_raw raws[ICE_FLOW_SEG_RAW_FLD_MAX];
354 };
355
356 /* This structure describes a flow entry, and is tracked only in this file */
357 struct ice_flow_entry {
358         struct LIST_ENTRY_TYPE l_entry;
359
360         u64 id;
361         struct ice_flow_prof *prof;
362         /* Action list */
363         struct ice_flow_action *acts;
364         /* Flow entry's content */
365         void *entry;
366         /* Range buffer (For ACL only) */
367         struct ice_aqc_acl_profile_ranges *range_buf;
368         enum ice_flow_priority priority;
369         u16 vsi_handle;
370         u16 entry_sz;
371         /* Entry index in the ACL's scenario */
372         u16 scen_entry_idx;
373 #define ICE_FLOW_ACL_MAX_NUM_ACT        2
374         u8 acts_cnt;
375 };
376
377 #define ICE_FLOW_ENTRY_HNDL(e)  ((unsigned long)e)
378 #define ICE_FLOW_ENTRY_PTR(h)   ((struct ice_flow_entry *)(h))
379
380 struct ice_flow_prof {
381         struct LIST_ENTRY_TYPE l_entry;
382
383         u64 id;
384         enum ice_flow_dir dir;
385         u8 segs_cnt;
386         u8 acts_cnt;
387
388         /* Keep track of flow entries associated with this flow profile */
389         struct ice_lock entries_lock;
390         struct LIST_HEAD_TYPE entries;
391
392         struct ice_flow_seg_info segs[ICE_FLOW_SEG_MAX];
393
394         /* software VSI handles referenced by this flow profile */
395         ice_declare_bitmap(vsis, ICE_MAX_VSI);
396
397         union {
398                 /* struct sw_recipe */
399                 struct ice_acl_scen *scen;
400                 /* struct fd */
401                 u32 data;
402                 /* Symmetric Hash for RSS */
403                 bool symm;
404         } cfg;
405
406         /* Default actions */
407         struct ice_flow_action *acts;
408 };
409
410 struct ice_rss_cfg {
411         struct LIST_ENTRY_TYPE l_entry;
412         /* bitmap of VSIs added to the RSS entry */
413         ice_declare_bitmap(vsis, ICE_MAX_VSI);
414         u64 hashed_flds;
415         u32 packet_hdr;
416         bool symm;
417 };
418
419 enum ice_flow_action_type {
420         ICE_FLOW_ACT_NOP,
421         ICE_FLOW_ACT_ALLOW,
422         ICE_FLOW_ACT_DROP,
423         ICE_FLOW_ACT_CNTR_PKT,
424         ICE_FLOW_ACT_FWD_VSI,
425         ICE_FLOW_ACT_FWD_VSI_LIST,      /* Should be abstracted away */
426         ICE_FLOW_ACT_FWD_QUEUE,         /* Can Queues be abstracted away? */
427         ICE_FLOW_ACT_FWD_QUEUE_GROUP,   /* Can Queues be abstracted away? */
428         ICE_FLOW_ACT_PUSH,
429         ICE_FLOW_ACT_POP,
430         ICE_FLOW_ACT_MODIFY,
431         ICE_FLOW_ACT_CNTR_BYTES,
432         ICE_FLOW_ACT_CNTR_PKT_BYTES,
433         ICE_FLOW_ACT_GENERIC_0,
434         ICE_FLOW_ACT_GENERIC_1,
435         ICE_FLOW_ACT_GENERIC_2,
436         ICE_FLOW_ACT_GENERIC_3,
437         ICE_FLOW_ACT_GENERIC_4,
438         ICE_FLOW_ACT_RPT_FLOW_ID,
439         ICE_FLOW_ACT_BUILD_PROF_IDX,
440 };
441
442 struct ice_flow_action {
443         enum ice_flow_action_type type;
444         union {
445                 struct ice_acl_act_entry acl_act;
446                 u32 dummy;
447         } data;
448 };
449
450 u64
451 ice_flow_find_prof(struct ice_hw *hw, enum ice_block blk, enum ice_flow_dir dir,
452                    struct ice_flow_seg_info *segs, u8 segs_cnt);
453 enum ice_status
454 ice_flow_add_prof(struct ice_hw *hw, enum ice_block blk, enum ice_flow_dir dir,
455                   u64 prof_id, struct ice_flow_seg_info *segs, u8 segs_cnt,
456                   struct ice_flow_action *acts, u8 acts_cnt,
457                   struct ice_flow_prof **prof);
458 enum ice_status
459 ice_flow_rem_prof(struct ice_hw *hw, enum ice_block blk, u64 prof_id);
460 enum ice_status
461 ice_flow_assoc_vsig_vsi(struct ice_hw *hw, enum ice_block blk, u16 vsi_handle,
462                         u16 vsig);
463 enum ice_status
464 ice_flow_get_hw_prof(struct ice_hw *hw, enum ice_block blk, u64 prof_id,
465                      u8 *hw_prof);
466
467 u64 ice_flow_find_entry(struct ice_hw *hw, enum ice_block blk, u64 entry_id);
468 enum ice_status
469 ice_flow_add_entry(struct ice_hw *hw, enum ice_block blk, u64 prof_id,
470                    u64 entry_id, u16 vsi, enum ice_flow_priority prio,
471                    void *data, struct ice_flow_action *acts, u8 acts_cnt,
472                    u64 *entry_h);
473 enum ice_status ice_flow_rem_entry(struct ice_hw *hw, enum ice_block blk,
474                                    u64 entry_h);
475 void
476 ice_flow_set_fld(struct ice_flow_seg_info *seg, enum ice_flow_field fld,
477                  u16 val_loc, u16 mask_loc, u16 last_loc, bool range);
478 void
479 ice_flow_set_fld_prefix(struct ice_flow_seg_info *seg, enum ice_flow_field fld,
480                         u16 val_loc, u16 prefix_loc, u8 prefix_sz);
481 void
482 ice_flow_add_fld_raw(struct ice_flow_seg_info *seg, u16 off, u8 len,
483                      u16 val_loc, u16 mask_loc);
484 void ice_rem_vsi_rss_list(struct ice_hw *hw, u16 vsi_handle);
485 enum ice_status ice_replay_rss_cfg(struct ice_hw *hw, u16 vsi_handle);
486 enum ice_status
487 ice_add_avf_rss_cfg(struct ice_hw *hw, u16 vsi_handle, u64 hashed_flds);
488 enum ice_status ice_rem_vsi_rss_cfg(struct ice_hw *hw, u16 vsi_handle);
489 enum ice_status
490 ice_add_rss_cfg(struct ice_hw *hw, u16 vsi_handle, u64 hashed_flds,
491                 u32 addl_hdrs, bool symm);
492 enum ice_status
493 ice_rem_rss_cfg(struct ice_hw *hw, u16 vsi_handle, u64 hashed_flds,
494                 u32 addl_hdrs);
495 u64 ice_get_rss_cfg(struct ice_hw *hw, u16 vsi_handle, u32 hdrs);
496 #endif /* _ICE_FLOW_H_ */