net/iavf: enable Rx timestamp on flex descriptor
[dpdk.git] / drivers / net / iavf / iavf_fdir.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2020 Intel Corporation
3  */
4
5 #include <sys/queue.h>
6 #include <stdio.h>
7 #include <errno.h>
8 #include <stdint.h>
9 #include <string.h>
10 #include <unistd.h>
11 #include <stdarg.h>
12
13 #include <rte_ether.h>
14 #include <ethdev_driver.h>
15 #include <rte_malloc.h>
16 #include <rte_tailq.h>
17
18 #include "iavf.h"
19 #include "iavf_generic_flow.h"
20 #include "virtchnl.h"
21 #include "iavf_rxtx.h"
22
23 #define IAVF_FDIR_MAX_QREGION_SIZE 128
24
25 #define IAVF_FDIR_IPV6_TC_OFFSET 20
26 #define IAVF_IPV6_TC_MASK  (0xFF << IAVF_FDIR_IPV6_TC_OFFSET)
27
28 #define IAVF_GTPU_EH_DWLINK 0
29 #define IAVF_GTPU_EH_UPLINK 1
30
31 #define IAVF_FDIR_INSET_ETH (\
32         IAVF_INSET_ETHERTYPE)
33
34 #define IAVF_FDIR_INSET_ETH_IPV4 (\
35         IAVF_INSET_IPV4_SRC | IAVF_INSET_IPV4_DST | \
36         IAVF_INSET_IPV4_PROTO | IAVF_INSET_IPV4_TOS | \
37         IAVF_INSET_IPV4_TTL | IAVF_INSET_IPV4_ID)
38
39 #define IAVF_FDIR_INSET_ETH_IPV4_UDP (\
40         IAVF_INSET_IPV4_SRC | IAVF_INSET_IPV4_DST | \
41         IAVF_INSET_IPV4_TOS | IAVF_INSET_IPV4_TTL | \
42         IAVF_INSET_UDP_SRC_PORT | IAVF_INSET_UDP_DST_PORT)
43
44 #define IAVF_FDIR_INSET_ETH_IPV4_TCP (\
45         IAVF_INSET_IPV4_SRC | IAVF_INSET_IPV4_DST | \
46         IAVF_INSET_IPV4_TOS | IAVF_INSET_IPV4_TTL | \
47         IAVF_INSET_TCP_SRC_PORT | IAVF_INSET_TCP_DST_PORT)
48
49 #define IAVF_FDIR_INSET_ETH_IPV4_SCTP (\
50         IAVF_INSET_IPV4_SRC | IAVF_INSET_IPV4_DST | \
51         IAVF_INSET_IPV4_TOS | IAVF_INSET_IPV4_TTL | \
52         IAVF_INSET_SCTP_SRC_PORT | IAVF_INSET_SCTP_DST_PORT)
53
54 #define IAVF_FDIR_INSET_ETH_IPV6 (\
55         IAVF_INSET_IPV6_SRC | IAVF_INSET_IPV6_DST | \
56         IAVF_INSET_IPV6_NEXT_HDR | IAVF_INSET_IPV6_TC | \
57         IAVF_INSET_IPV6_HOP_LIMIT)
58
59 #define IAVF_FDIR_INSET_ETH_IPV6_FRAG_EXT (\
60         IAVF_FDIR_INSET_ETH_IPV6 | IAVF_INSET_IPV6_ID)
61
62 #define IAVF_FDIR_INSET_ETH_IPV6_UDP (\
63         IAVF_INSET_IPV6_SRC | IAVF_INSET_IPV6_DST | \
64         IAVF_INSET_IPV6_TC | IAVF_INSET_IPV6_HOP_LIMIT | \
65         IAVF_INSET_UDP_SRC_PORT | IAVF_INSET_UDP_DST_PORT)
66
67 #define IAVF_FDIR_INSET_ETH_IPV6_TCP (\
68         IAVF_INSET_IPV6_SRC | IAVF_INSET_IPV6_DST | \
69         IAVF_INSET_IPV6_TC | IAVF_INSET_IPV6_HOP_LIMIT | \
70         IAVF_INSET_TCP_SRC_PORT | IAVF_INSET_TCP_DST_PORT)
71
72 #define IAVF_FDIR_INSET_ETH_IPV6_SCTP (\
73         IAVF_INSET_IPV6_SRC | IAVF_INSET_IPV6_DST | \
74         IAVF_INSET_IPV6_TC | IAVF_INSET_IPV6_HOP_LIMIT | \
75         IAVF_INSET_SCTP_SRC_PORT | IAVF_INSET_SCTP_DST_PORT)
76
77 #define IAVF_FDIR_INSET_IPV4_GTPU (\
78         IAVF_INSET_IPV4_SRC | IAVF_INSET_IPV4_DST | \
79         IAVF_INSET_GTPU_TEID)
80
81 #define IAVF_FDIR_INSET_GTPU_IPV4 (\
82         IAVF_INSET_TUN_IPV4_SRC | IAVF_INSET_TUN_IPV4_DST | \
83         IAVF_INSET_TUN_IPV4_PROTO | IAVF_INSET_TUN_IPV4_TOS | \
84         IAVF_INSET_TUN_IPV4_TTL)
85
86 #define IAVF_FDIR_INSET_GTPU_IPV4_UDP (\
87         IAVF_FDIR_INSET_GTPU_IPV4 | \
88         IAVF_INSET_TUN_UDP_SRC_PORT | IAVF_INSET_TUN_UDP_DST_PORT)
89
90 #define IAVF_FDIR_INSET_GTPU_IPV4_TCP (\
91         IAVF_FDIR_INSET_GTPU_IPV4 | \
92         IAVF_INSET_TUN_TCP_SRC_PORT | IAVF_INSET_TUN_TCP_DST_PORT)
93
94 #define IAVF_FDIR_INSET_IPV4_GTPU_EH (\
95         IAVF_INSET_IPV4_SRC | IAVF_INSET_IPV4_DST | \
96         IAVF_INSET_GTPU_TEID | IAVF_INSET_GTPU_QFI)
97
98 #define IAVF_FDIR_INSET_IPV6_GTPU (\
99         IAVF_INSET_IPV6_SRC | IAVF_INSET_IPV6_DST | \
100         IAVF_INSET_GTPU_TEID)
101
102 #define IAVF_FDIR_INSET_GTPU_IPV6 (\
103         IAVF_INSET_TUN_IPV6_SRC | IAVF_INSET_TUN_IPV6_DST | \
104         IAVF_INSET_TUN_IPV6_NEXT_HDR | IAVF_INSET_TUN_IPV6_TC | \
105         IAVF_INSET_TUN_IPV6_HOP_LIMIT)
106
107 #define IAVF_FDIR_INSET_GTPU_IPV6_UDP (\
108         IAVF_FDIR_INSET_GTPU_IPV6 | \
109         IAVF_INSET_TUN_UDP_SRC_PORT | IAVF_INSET_TUN_UDP_DST_PORT)
110
111 #define IAVF_FDIR_INSET_GTPU_IPV6_TCP (\
112         IAVF_FDIR_INSET_GTPU_IPV6 | \
113         IAVF_INSET_TUN_TCP_SRC_PORT | IAVF_INSET_TUN_TCP_DST_PORT)
114
115 #define IAVF_FDIR_INSET_IPV6_GTPU_EH (\
116         IAVF_INSET_IPV6_SRC | IAVF_INSET_IPV6_DST | \
117         IAVF_INSET_GTPU_TEID | IAVF_INSET_GTPU_QFI)
118
119 #define IAVF_FDIR_INSET_L2TPV3OIP (\
120         IAVF_L2TPV3OIP_SESSION_ID)
121
122 #define IAVF_FDIR_INSET_IPV4_ESP (\
123         IAVF_INSET_IPV4_SRC | IAVF_INSET_IPV4_DST | \
124         IAVF_INSET_ESP_SPI)
125
126 #define IAVF_FDIR_INSET_IPV6_ESP (\
127         IAVF_INSET_IPV6_SRC | IAVF_INSET_IPV6_DST | \
128         IAVF_INSET_ESP_SPI)
129
130 #define IAVF_FDIR_INSET_AH (\
131         IAVF_INSET_AH_SPI)
132
133 #define IAVF_FDIR_INSET_IPV4_NATT_ESP (\
134         IAVF_INSET_IPV4_SRC | IAVF_INSET_IPV4_DST | \
135         IAVF_INSET_ESP_SPI)
136
137 #define IAVF_FDIR_INSET_IPV6_NATT_ESP (\
138         IAVF_INSET_IPV6_SRC | IAVF_INSET_IPV6_DST | \
139         IAVF_INSET_ESP_SPI)
140
141 #define IAVF_FDIR_INSET_PFCP (\
142         IAVF_INSET_PFCP_S_FIELD)
143
144 #define IAVF_FDIR_INSET_ECPRI (\
145         IAVF_INSET_ECPRI)
146
147 #define IAVF_FDIR_INSET_GRE_IPV4 (\
148         IAVF_INSET_TUN_IPV4_SRC | IAVF_INSET_TUN_IPV4_DST | \
149         IAVF_INSET_TUN_IPV4_TOS | IAVF_INSET_TUN_IPV4_PROTO)
150
151 #define IAVF_FDIR_INSET_GRE_IPV4_TCP (\
152         IAVF_FDIR_INSET_GRE_IPV4 | IAVF_INSET_TUN_TCP_SRC_PORT | \
153         IAVF_INSET_TUN_TCP_DST_PORT)
154
155 #define IAVF_FDIR_INSET_GRE_IPV4_UDP (\
156         IAVF_FDIR_INSET_GRE_IPV4 | IAVF_INSET_TUN_UDP_SRC_PORT | \
157         IAVF_INSET_TUN_UDP_DST_PORT)
158
159 #define IAVF_FDIR_INSET_GRE_IPV6 (\
160         IAVF_INSET_TUN_IPV6_SRC | IAVF_INSET_TUN_IPV6_DST | \
161         IAVF_INSET_TUN_IPV6_TC | IAVF_INSET_TUN_IPV6_NEXT_HDR)
162
163 #define IAVF_FDIR_INSET_GRE_IPV6_TCP (\
164         IAVF_FDIR_INSET_GRE_IPV6 | IAVF_INSET_TUN_TCP_SRC_PORT | \
165         IAVF_INSET_TUN_TCP_DST_PORT)
166
167 #define IAVF_FDIR_INSET_GRE_IPV6_UDP (\
168         IAVF_FDIR_INSET_GRE_IPV6 | IAVF_INSET_TUN_UDP_SRC_PORT | \
169         IAVF_INSET_TUN_UDP_DST_PORT)
170
171 #define IAVF_FDIR_INSET_L2TPV2 (\
172         IAVF_INSET_SMAC | IAVF_INSET_DMAC | IAVF_INSET_L2TPV2)
173
174 #define IAVF_FDIR_INSET_L2TPV2_PPP_IPV4 (\
175         IAVF_INSET_TUN_IPV4_SRC | IAVF_INSET_TUN_IPV4_DST)
176
177 #define IAVF_FDIR_INSET_L2TPV2_PPP_IPV4_UDP (\
178         IAVF_FDIR_INSET_L2TPV2_PPP_IPV4 | IAVF_INSET_TUN_UDP_SRC_PORT | \
179         IAVF_INSET_TUN_UDP_DST_PORT)
180
181 #define IAVF_FDIR_INSET_L2TPV2_PPP_IPV4_TCP (\
182         IAVF_FDIR_INSET_L2TPV2_PPP_IPV4 | IAVF_INSET_TUN_TCP_SRC_PORT | \
183         IAVF_INSET_TUN_TCP_DST_PORT)
184
185 #define IAVF_FDIR_INSET_L2TPV2_PPP_IPV6 (\
186         IAVF_INSET_TUN_IPV6_SRC | IAVF_INSET_TUN_IPV6_DST)
187
188 #define IAVF_FDIR_INSET_L2TPV2_PPP_IPV6_UDP (\
189         IAVF_FDIR_INSET_L2TPV2_PPP_IPV6 | IAVF_INSET_TUN_UDP_SRC_PORT | \
190         IAVF_INSET_TUN_UDP_DST_PORT)
191
192 #define IAVF_FDIR_INSET_L2TPV2_PPP_IPV6_TCP (\
193         IAVF_FDIR_INSET_L2TPV2_PPP_IPV6 | IAVF_INSET_TUN_TCP_SRC_PORT | \
194         IAVF_INSET_TUN_TCP_DST_PORT)
195
196 static struct iavf_pattern_match_item iavf_fdir_pattern[] = {
197         {iavf_pattern_ethertype,                 IAVF_FDIR_INSET_ETH,           IAVF_INSET_NONE},
198         {iavf_pattern_eth_ipv4,                  IAVF_FDIR_INSET_ETH_IPV4,      IAVF_INSET_NONE},
199         {iavf_pattern_eth_ipv4_udp,              IAVF_FDIR_INSET_ETH_IPV4_UDP,  IAVF_INSET_NONE},
200         {iavf_pattern_eth_ipv4_tcp,              IAVF_FDIR_INSET_ETH_IPV4_TCP,  IAVF_INSET_NONE},
201         {iavf_pattern_eth_ipv4_sctp,             IAVF_FDIR_INSET_ETH_IPV4_SCTP, IAVF_INSET_NONE},
202         {iavf_pattern_eth_ipv6,                  IAVF_FDIR_INSET_ETH_IPV6,      IAVF_INSET_NONE},
203         {iavf_pattern_eth_ipv6_frag_ext,        IAVF_FDIR_INSET_ETH_IPV6_FRAG_EXT,      IAVF_INSET_NONE},
204         {iavf_pattern_eth_ipv6_udp,              IAVF_FDIR_INSET_ETH_IPV6_UDP,  IAVF_INSET_NONE},
205         {iavf_pattern_eth_ipv6_tcp,              IAVF_FDIR_INSET_ETH_IPV6_TCP,  IAVF_INSET_NONE},
206         {iavf_pattern_eth_ipv6_sctp,             IAVF_FDIR_INSET_ETH_IPV6_SCTP, IAVF_INSET_NONE},
207         {iavf_pattern_eth_ipv4_gtpu,             IAVF_FDIR_INSET_IPV4_GTPU,     IAVF_INSET_NONE},
208         {iavf_pattern_eth_ipv4_gtpu_ipv4,        IAVF_FDIR_INSET_GTPU_IPV4,     IAVF_INSET_NONE},
209         {iavf_pattern_eth_ipv4_gtpu_ipv4_udp,    IAVF_FDIR_INSET_GTPU_IPV4_UDP, IAVF_INSET_NONE},
210         {iavf_pattern_eth_ipv4_gtpu_ipv4_tcp,    IAVF_FDIR_INSET_GTPU_IPV4_TCP, IAVF_INSET_NONE},
211         {iavf_pattern_eth_ipv4_gtpu_ipv6,        IAVF_FDIR_INSET_GTPU_IPV6,     IAVF_INSET_NONE},
212         {iavf_pattern_eth_ipv4_gtpu_ipv6_udp,    IAVF_FDIR_INSET_GTPU_IPV6_UDP, IAVF_INSET_NONE},
213         {iavf_pattern_eth_ipv4_gtpu_ipv6_tcp,    IAVF_FDIR_INSET_GTPU_IPV6_TCP, IAVF_INSET_NONE},
214         {iavf_pattern_eth_ipv4_gtpu_eh,          IAVF_FDIR_INSET_IPV4_GTPU_EH,  IAVF_INSET_NONE},
215         {iavf_pattern_eth_ipv4_gtpu_eh_ipv4,     IAVF_FDIR_INSET_GTPU_IPV4,     IAVF_INSET_NONE},
216         {iavf_pattern_eth_ipv4_gtpu_eh_ipv4_udp, IAVF_FDIR_INSET_GTPU_IPV4_UDP, IAVF_INSET_NONE},
217         {iavf_pattern_eth_ipv4_gtpu_eh_ipv4_tcp, IAVF_FDIR_INSET_GTPU_IPV4_TCP, IAVF_INSET_NONE},
218         {iavf_pattern_eth_ipv4_gtpu_eh_ipv6,     IAVF_FDIR_INSET_GTPU_IPV6,     IAVF_INSET_NONE},
219         {iavf_pattern_eth_ipv4_gtpu_eh_ipv6_udp, IAVF_FDIR_INSET_GTPU_IPV6_UDP, IAVF_INSET_NONE},
220         {iavf_pattern_eth_ipv4_gtpu_eh_ipv6_tcp, IAVF_FDIR_INSET_GTPU_IPV6_TCP, IAVF_INSET_NONE},
221         {iavf_pattern_eth_ipv4_gre_ipv4_gtpu,            IAVF_FDIR_INSET_IPV4_GTPU,     IAVF_INSET_NONE},
222         {iavf_pattern_eth_ipv4_gre_ipv4_gtpu_ipv4,       IAVF_FDIR_INSET_GTPU_IPV4,     IAVF_INSET_NONE},
223         {iavf_pattern_eth_ipv4_gre_ipv4_gtpu_ipv4_udp,   IAVF_FDIR_INSET_GTPU_IPV4_UDP, IAVF_INSET_NONE},
224         {iavf_pattern_eth_ipv4_gre_ipv4_gtpu_ipv4_tcp,   IAVF_FDIR_INSET_GTPU_IPV4_TCP, IAVF_INSET_NONE},
225         {iavf_pattern_eth_ipv4_gre_ipv4_gtpu_ipv6,       IAVF_FDIR_INSET_GTPU_IPV6,     IAVF_INSET_NONE},
226         {iavf_pattern_eth_ipv4_gre_ipv4_gtpu_ipv6_udp,   IAVF_FDIR_INSET_GTPU_IPV6_UDP, IAVF_INSET_NONE},
227         {iavf_pattern_eth_ipv4_gre_ipv4_gtpu_ipv6_tcp,   IAVF_FDIR_INSET_GTPU_IPV6_TCP, IAVF_INSET_NONE},
228         {iavf_pattern_eth_ipv4_gre_ipv6_gtpu,            IAVF_FDIR_INSET_IPV4_GTPU,     IAVF_INSET_NONE},
229         {iavf_pattern_eth_ipv4_gre_ipv6_gtpu_ipv4,       IAVF_FDIR_INSET_GTPU_IPV4,     IAVF_INSET_NONE},
230         {iavf_pattern_eth_ipv4_gre_ipv6_gtpu_ipv4_udp,   IAVF_FDIR_INSET_GTPU_IPV4_UDP, IAVF_INSET_NONE},
231         {iavf_pattern_eth_ipv4_gre_ipv6_gtpu_ipv4_tcp,   IAVF_FDIR_INSET_GTPU_IPV4_TCP, IAVF_INSET_NONE},
232         {iavf_pattern_eth_ipv4_gre_ipv6_gtpu_ipv6,       IAVF_FDIR_INSET_GTPU_IPV6,     IAVF_INSET_NONE},
233         {iavf_pattern_eth_ipv4_gre_ipv6_gtpu_ipv6_udp,   IAVF_FDIR_INSET_GTPU_IPV6_UDP, IAVF_INSET_NONE},
234         {iavf_pattern_eth_ipv4_gre_ipv6_gtpu_ipv6_tcp,   IAVF_FDIR_INSET_GTPU_IPV6_TCP, IAVF_INSET_NONE},
235         {iavf_pattern_eth_ipv6_gre_ipv4_gtpu,            IAVF_FDIR_INSET_IPV6_GTPU,     IAVF_INSET_NONE},
236         {iavf_pattern_eth_ipv6_gre_ipv4_gtpu_ipv4,       IAVF_FDIR_INSET_GTPU_IPV4,     IAVF_INSET_NONE},
237         {iavf_pattern_eth_ipv6_gre_ipv4_gtpu_ipv4_udp,   IAVF_FDIR_INSET_GTPU_IPV4_UDP, IAVF_INSET_NONE},
238         {iavf_pattern_eth_ipv6_gre_ipv4_gtpu_ipv4_tcp,   IAVF_FDIR_INSET_GTPU_IPV4_TCP, IAVF_INSET_NONE},
239         {iavf_pattern_eth_ipv6_gre_ipv4_gtpu_ipv6,       IAVF_FDIR_INSET_GTPU_IPV6,     IAVF_INSET_NONE},
240         {iavf_pattern_eth_ipv6_gre_ipv4_gtpu_ipv6_udp,   IAVF_FDIR_INSET_GTPU_IPV6_UDP, IAVF_INSET_NONE},
241         {iavf_pattern_eth_ipv6_gre_ipv4_gtpu_ipv6_tcp,   IAVF_FDIR_INSET_GTPU_IPV6_TCP, IAVF_INSET_NONE},
242         {iavf_pattern_eth_ipv6_gre_ipv6_gtpu,            IAVF_FDIR_INSET_IPV6_GTPU,     IAVF_INSET_NONE},
243         {iavf_pattern_eth_ipv6_gre_ipv6_gtpu_ipv4,       IAVF_FDIR_INSET_GTPU_IPV4,     IAVF_INSET_NONE},
244         {iavf_pattern_eth_ipv6_gre_ipv6_gtpu_ipv4_udp,   IAVF_FDIR_INSET_GTPU_IPV4_UDP, IAVF_INSET_NONE},
245         {iavf_pattern_eth_ipv6_gre_ipv6_gtpu_ipv4_tcp,   IAVF_FDIR_INSET_GTPU_IPV4_TCP, IAVF_INSET_NONE},
246         {iavf_pattern_eth_ipv6_gre_ipv6_gtpu_ipv6,       IAVF_FDIR_INSET_GTPU_IPV6,     IAVF_INSET_NONE},
247         {iavf_pattern_eth_ipv6_gre_ipv6_gtpu_ipv6_udp,   IAVF_FDIR_INSET_GTPU_IPV6_UDP, IAVF_INSET_NONE},
248         {iavf_pattern_eth_ipv6_gre_ipv6_gtpu_ipv6_tcp,   IAVF_FDIR_INSET_GTPU_IPV6_TCP, IAVF_INSET_NONE},
249         {iavf_pattern_eth_ipv4_gre_ipv4_gtpu_eh,                 IAVF_FDIR_INSET_IPV4_GTPU_EH,  IAVF_INSET_NONE},
250         {iavf_pattern_eth_ipv4_gre_ipv4_gtpu_eh_ipv4,            IAVF_FDIR_INSET_GTPU_IPV4,     IAVF_INSET_NONE},
251         {iavf_pattern_eth_ipv4_gre_ipv4_gtpu_eh_ipv4_udp,        IAVF_FDIR_INSET_GTPU_IPV4_UDP, IAVF_INSET_NONE},
252         {iavf_pattern_eth_ipv4_gre_ipv4_gtpu_eh_ipv4_tcp,        IAVF_FDIR_INSET_GTPU_IPV4_TCP, IAVF_INSET_NONE},
253         {iavf_pattern_eth_ipv4_gre_ipv4_gtpu_eh_ipv6,            IAVF_FDIR_INSET_GTPU_IPV6,     IAVF_INSET_NONE},
254         {iavf_pattern_eth_ipv4_gre_ipv4_gtpu_eh_ipv6_udp,        IAVF_FDIR_INSET_GTPU_IPV6_UDP, IAVF_INSET_NONE},
255         {iavf_pattern_eth_ipv4_gre_ipv4_gtpu_eh_ipv6_tcp,        IAVF_FDIR_INSET_GTPU_IPV6_TCP, IAVF_INSET_NONE},
256         {iavf_pattern_eth_ipv4_gre_ipv6_gtpu_eh,                 IAVF_FDIR_INSET_IPV4_GTPU_EH,  IAVF_INSET_NONE},
257         {iavf_pattern_eth_ipv4_gre_ipv6_gtpu_eh_ipv4,            IAVF_FDIR_INSET_GTPU_IPV4,     IAVF_INSET_NONE},
258         {iavf_pattern_eth_ipv4_gre_ipv6_gtpu_eh_ipv4_udp,        IAVF_FDIR_INSET_GTPU_IPV4_UDP, IAVF_INSET_NONE},
259         {iavf_pattern_eth_ipv4_gre_ipv6_gtpu_eh_ipv4_tcp,        IAVF_FDIR_INSET_GTPU_IPV4_TCP, IAVF_INSET_NONE},
260         {iavf_pattern_eth_ipv4_gre_ipv6_gtpu_eh_ipv6,            IAVF_FDIR_INSET_GTPU_IPV6,     IAVF_INSET_NONE},
261         {iavf_pattern_eth_ipv4_gre_ipv6_gtpu_eh_ipv6_udp,        IAVF_FDIR_INSET_GTPU_IPV6_UDP, IAVF_INSET_NONE},
262         {iavf_pattern_eth_ipv4_gre_ipv6_gtpu_eh_ipv6_tcp,        IAVF_FDIR_INSET_GTPU_IPV6_TCP, IAVF_INSET_NONE},
263         {iavf_pattern_eth_ipv6_gre_ipv4_gtpu_eh,                 IAVF_FDIR_INSET_IPV6_GTPU_EH,  IAVF_INSET_NONE},
264         {iavf_pattern_eth_ipv6_gre_ipv4_gtpu_eh_ipv4,            IAVF_FDIR_INSET_GTPU_IPV4,     IAVF_INSET_NONE},
265         {iavf_pattern_eth_ipv6_gre_ipv4_gtpu_eh_ipv4_udp,        IAVF_FDIR_INSET_GTPU_IPV4_UDP, IAVF_INSET_NONE},
266         {iavf_pattern_eth_ipv6_gre_ipv4_gtpu_eh_ipv4_tcp,        IAVF_FDIR_INSET_GTPU_IPV4_TCP, IAVF_INSET_NONE},
267         {iavf_pattern_eth_ipv6_gre_ipv4_gtpu_eh_ipv6,            IAVF_FDIR_INSET_GTPU_IPV6,     IAVF_INSET_NONE},
268         {iavf_pattern_eth_ipv6_gre_ipv4_gtpu_eh_ipv6_udp,        IAVF_FDIR_INSET_GTPU_IPV6_UDP, IAVF_INSET_NONE},
269         {iavf_pattern_eth_ipv6_gre_ipv4_gtpu_eh_ipv6_tcp,        IAVF_FDIR_INSET_GTPU_IPV6_TCP, IAVF_INSET_NONE},
270         {iavf_pattern_eth_ipv6_gre_ipv6_gtpu_eh,                 IAVF_FDIR_INSET_IPV6_GTPU_EH,  IAVF_INSET_NONE},
271         {iavf_pattern_eth_ipv6_gre_ipv6_gtpu_eh_ipv4,            IAVF_FDIR_INSET_GTPU_IPV4,     IAVF_INSET_NONE},
272         {iavf_pattern_eth_ipv6_gre_ipv6_gtpu_eh_ipv4_udp,        IAVF_FDIR_INSET_GTPU_IPV4_UDP, IAVF_INSET_NONE},
273         {iavf_pattern_eth_ipv6_gre_ipv6_gtpu_eh_ipv4_tcp,        IAVF_FDIR_INSET_GTPU_IPV4_TCP, IAVF_INSET_NONE},
274         {iavf_pattern_eth_ipv6_gre_ipv6_gtpu_eh_ipv6,            IAVF_FDIR_INSET_GTPU_IPV6,     IAVF_INSET_NONE},
275         {iavf_pattern_eth_ipv6_gre_ipv6_gtpu_eh_ipv6_udp,        IAVF_FDIR_INSET_GTPU_IPV6_UDP, IAVF_INSET_NONE},
276         {iavf_pattern_eth_ipv6_gre_ipv6_gtpu_eh_ipv6_tcp,        IAVF_FDIR_INSET_GTPU_IPV6_TCP, IAVF_INSET_NONE},
277         {iavf_pattern_eth_ipv6_gtpu,             IAVF_FDIR_INSET_IPV6_GTPU,     IAVF_INSET_NONE},
278         {iavf_pattern_eth_ipv6_gtpu_eh,          IAVF_FDIR_INSET_IPV6_GTPU_EH,  IAVF_INSET_NONE},
279         {iavf_pattern_eth_ipv4_l2tpv3,           IAVF_FDIR_INSET_L2TPV3OIP,     IAVF_INSET_NONE},
280         {iavf_pattern_eth_ipv6_l2tpv3,           IAVF_FDIR_INSET_L2TPV3OIP,     IAVF_INSET_NONE},
281         {iavf_pattern_eth_ipv4_esp,              IAVF_FDIR_INSET_IPV4_ESP,      IAVF_INSET_NONE},
282         {iavf_pattern_eth_ipv6_esp,              IAVF_FDIR_INSET_IPV6_ESP,      IAVF_INSET_NONE},
283         {iavf_pattern_eth_ipv4_ah,               IAVF_FDIR_INSET_AH,            IAVF_INSET_NONE},
284         {iavf_pattern_eth_ipv6_ah,               IAVF_FDIR_INSET_AH,            IAVF_INSET_NONE},
285         {iavf_pattern_eth_ipv4_udp_esp,          IAVF_FDIR_INSET_IPV4_NATT_ESP, IAVF_INSET_NONE},
286         {iavf_pattern_eth_ipv6_udp_esp,          IAVF_FDIR_INSET_IPV6_NATT_ESP, IAVF_INSET_NONE},
287         {iavf_pattern_eth_ipv4_pfcp,             IAVF_FDIR_INSET_PFCP,          IAVF_INSET_NONE},
288         {iavf_pattern_eth_ipv6_pfcp,             IAVF_FDIR_INSET_PFCP,          IAVF_INSET_NONE},
289         {iavf_pattern_eth_ecpri,                 IAVF_FDIR_INSET_ECPRI,         IAVF_INSET_NONE},
290         {iavf_pattern_eth_ipv4_ecpri,            IAVF_FDIR_INSET_ECPRI,         IAVF_INSET_NONE},
291         {iavf_pattern_eth_ipv4_gre_ipv4,        IAVF_FDIR_INSET_GRE_IPV4,       IAVF_INSET_NONE},
292         {iavf_pattern_eth_ipv4_gre_ipv4_tcp,    IAVF_FDIR_INSET_GRE_IPV4_TCP,   IAVF_INSET_NONE},
293         {iavf_pattern_eth_ipv4_gre_ipv4_udp,    IAVF_FDIR_INSET_GRE_IPV4_UDP,   IAVF_INSET_NONE},
294         {iavf_pattern_eth_ipv4_gre_ipv6,        IAVF_FDIR_INSET_GRE_IPV6,       IAVF_INSET_NONE},
295         {iavf_pattern_eth_ipv4_gre_ipv6_tcp,    IAVF_FDIR_INSET_GRE_IPV6_TCP,   IAVF_INSET_NONE},
296         {iavf_pattern_eth_ipv4_gre_ipv6_udp,    IAVF_FDIR_INSET_GRE_IPV6_UDP,   IAVF_INSET_NONE},
297         {iavf_pattern_eth_ipv6_gre_ipv4,        IAVF_FDIR_INSET_GRE_IPV4,       IAVF_INSET_NONE},
298         {iavf_pattern_eth_ipv6_gre_ipv4_tcp,    IAVF_FDIR_INSET_GRE_IPV4_TCP,   IAVF_INSET_NONE},
299         {iavf_pattern_eth_ipv6_gre_ipv4_udp,    IAVF_FDIR_INSET_GRE_IPV4_UDP,   IAVF_INSET_NONE},
300         {iavf_pattern_eth_ipv6_gre_ipv6,        IAVF_FDIR_INSET_GRE_IPV6,       IAVF_INSET_NONE},
301         {iavf_pattern_eth_ipv6_gre_ipv6_tcp,    IAVF_FDIR_INSET_GRE_IPV6_TCP,   IAVF_INSET_NONE},
302         {iavf_pattern_eth_ipv6_gre_ipv6_udp,    IAVF_FDIR_INSET_GRE_IPV6_UDP,   IAVF_INSET_NONE},
303
304         {iavf_pattern_eth_ipv4_udp_l2tpv2,              IAVF_FDIR_INSET_L2TPV2,                 IAVF_INSET_NONE},
305         {iavf_pattern_eth_ipv4_udp_l2tpv2_ppp,          IAVF_FDIR_INSET_L2TPV2,                 IAVF_INSET_NONE},
306         {iavf_pattern_eth_ipv6_udp_l2tpv2,              IAVF_FDIR_INSET_L2TPV2,                 IAVF_INSET_NONE},
307         {iavf_pattern_eth_ipv6_udp_l2tpv2_ppp,          IAVF_FDIR_INSET_L2TPV2,                 IAVF_INSET_NONE},
308         {iavf_pattern_eth_ipv4_udp_l2tpv2_ppp_ipv4,     IAVF_FDIR_INSET_L2TPV2_PPP_IPV4,        IAVF_INSET_NONE},
309         {iavf_pattern_eth_ipv4_udp_l2tpv2_ppp_ipv4_udp, IAVF_FDIR_INSET_L2TPV2_PPP_IPV4_UDP,    IAVF_INSET_NONE},
310         {iavf_pattern_eth_ipv4_udp_l2tpv2_ppp_ipv4_tcp, IAVF_FDIR_INSET_L2TPV2_PPP_IPV4_TCP,    IAVF_INSET_NONE},
311         {iavf_pattern_eth_ipv6_udp_l2tpv2_ppp_ipv4,     IAVF_FDIR_INSET_L2TPV2_PPP_IPV4,        IAVF_INSET_NONE},
312         {iavf_pattern_eth_ipv6_udp_l2tpv2_ppp_ipv4_udp, IAVF_FDIR_INSET_L2TPV2_PPP_IPV4_UDP,    IAVF_INSET_NONE},
313         {iavf_pattern_eth_ipv6_udp_l2tpv2_ppp_ipv4_tcp, IAVF_FDIR_INSET_L2TPV2_PPP_IPV4_TCP,    IAVF_INSET_NONE},
314         {iavf_pattern_eth_ipv4_udp_l2tpv2_ppp_ipv6,     IAVF_FDIR_INSET_L2TPV2_PPP_IPV6,        IAVF_INSET_NONE},
315         {iavf_pattern_eth_ipv4_udp_l2tpv2_ppp_ipv6_udp, IAVF_FDIR_INSET_L2TPV2_PPP_IPV6_UDP,    IAVF_INSET_NONE},
316         {iavf_pattern_eth_ipv4_udp_l2tpv2_ppp_ipv6_tcp, IAVF_FDIR_INSET_L2TPV2_PPP_IPV6_TCP,    IAVF_INSET_NONE},
317         {iavf_pattern_eth_ipv6_udp_l2tpv2_ppp_ipv6,     IAVF_FDIR_INSET_L2TPV2_PPP_IPV6,        IAVF_INSET_NONE},
318         {iavf_pattern_eth_ipv6_udp_l2tpv2_ppp_ipv6_udp, IAVF_FDIR_INSET_L2TPV2_PPP_IPV6_UDP,    IAVF_INSET_NONE},
319         {iavf_pattern_eth_ipv6_udp_l2tpv2_ppp_ipv6_tcp, IAVF_FDIR_INSET_L2TPV2_PPP_IPV6_TCP,    IAVF_INSET_NONE},
320 };
321
322 static struct iavf_flow_parser iavf_fdir_parser;
323
324 static int
325 iavf_fdir_init(struct iavf_adapter *ad)
326 {
327         struct iavf_info *vf = IAVF_DEV_PRIVATE_TO_VF(ad);
328         struct iavf_flow_parser *parser;
329
330         if (!vf->vf_res)
331                 return -EINVAL;
332
333         if (vf->vf_res->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_FDIR_PF)
334                 parser = &iavf_fdir_parser;
335         else
336                 return -ENOTSUP;
337
338         return iavf_register_parser(parser, ad);
339 }
340
341 static void
342 iavf_fdir_uninit(struct iavf_adapter *ad)
343 {
344         iavf_unregister_parser(&iavf_fdir_parser, ad);
345 }
346
347 static int
348 iavf_fdir_create(struct iavf_adapter *ad,
349                 struct rte_flow *flow,
350                 void *meta,
351                 struct rte_flow_error *error)
352 {
353         struct iavf_fdir_conf *filter = meta;
354         struct iavf_fdir_conf *rule;
355         int ret;
356
357         rule = rte_zmalloc("fdir_entry", sizeof(*rule), 0);
358         if (!rule) {
359                 rte_flow_error_set(error, ENOMEM,
360                                 RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
361                                 "Failed to allocate memory for fdir rule");
362                 return -rte_errno;
363         }
364
365         ret = iavf_fdir_add(ad, filter);
366         if (ret) {
367                 rte_flow_error_set(error, -ret,
368                                 RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
369                                 "Failed to add filter rule.");
370                 goto free_entry;
371         }
372
373         if (filter->mark_flag == 1)
374                 iavf_fdir_rx_proc_enable(ad, 1);
375
376         rte_memcpy(rule, filter, sizeof(*rule));
377         flow->rule = rule;
378
379         return 0;
380
381 free_entry:
382         rte_free(rule);
383         return -rte_errno;
384 }
385
386 static int
387 iavf_fdir_destroy(struct iavf_adapter *ad,
388                 struct rte_flow *flow,
389                 struct rte_flow_error *error)
390 {
391         struct iavf_fdir_conf *filter;
392         int ret;
393
394         filter = (struct iavf_fdir_conf *)flow->rule;
395
396         ret = iavf_fdir_del(ad, filter);
397         if (ret) {
398                 rte_flow_error_set(error, -ret,
399                                 RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
400                                 "Failed to delete filter rule.");
401                 return -rte_errno;
402         }
403
404         if (filter->mark_flag == 1)
405                 iavf_fdir_rx_proc_enable(ad, 0);
406
407         flow->rule = NULL;
408         rte_free(filter);
409
410         return 0;
411 }
412
413 static int
414 iavf_fdir_validation(struct iavf_adapter *ad,
415                 __rte_unused struct rte_flow *flow,
416                 void *meta,
417                 struct rte_flow_error *error)
418 {
419         struct iavf_fdir_conf *filter = meta;
420         int ret;
421
422         ret = iavf_fdir_check(ad, filter);
423         if (ret) {
424                 rte_flow_error_set(error, -ret,
425                                 RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
426                                 "Failed to validate filter rule.");
427                 return -rte_errno;
428         }
429
430         return 0;
431 };
432
433 static struct iavf_flow_engine iavf_fdir_engine = {
434         .init = iavf_fdir_init,
435         .uninit = iavf_fdir_uninit,
436         .create = iavf_fdir_create,
437         .destroy = iavf_fdir_destroy,
438         .validation = iavf_fdir_validation,
439         .type = IAVF_FLOW_ENGINE_FDIR,
440 };
441
442 static int
443 iavf_fdir_parse_action_qregion(struct iavf_adapter *ad,
444                         struct rte_flow_error *error,
445                         const struct rte_flow_action *act,
446                         struct virtchnl_filter_action *filter_action)
447 {
448         struct iavf_info *vf = IAVF_DEV_PRIVATE_TO_VF(ad);
449         const struct rte_flow_action_rss *rss = act->conf;
450         uint32_t i;
451
452         if (act->type != RTE_FLOW_ACTION_TYPE_RSS) {
453                 rte_flow_error_set(error, EINVAL,
454                                 RTE_FLOW_ERROR_TYPE_ACTION, act,
455                                 "Invalid action.");
456                 return -rte_errno;
457         }
458
459         if (rss->queue_num <= 1) {
460                 rte_flow_error_set(error, EINVAL,
461                                 RTE_FLOW_ERROR_TYPE_ACTION, act,
462                                 "Queue region size can't be 0 or 1.");
463                 return -rte_errno;
464         }
465
466         /* check if queue index for queue region is continuous */
467         for (i = 0; i < rss->queue_num - 1; i++) {
468                 if (rss->queue[i + 1] != rss->queue[i] + 1) {
469                         rte_flow_error_set(error, EINVAL,
470                                         RTE_FLOW_ERROR_TYPE_ACTION, act,
471                                         "Discontinuous queue region");
472                         return -rte_errno;
473                 }
474         }
475
476         if (rss->queue[rss->queue_num - 1] >= ad->dev_data->nb_rx_queues) {
477                 rte_flow_error_set(error, EINVAL,
478                                 RTE_FLOW_ERROR_TYPE_ACTION, act,
479                                 "Invalid queue region indexes.");
480                 return -rte_errno;
481         }
482
483         if (!(rte_is_power_of_2(rss->queue_num) &&
484                 rss->queue_num <= IAVF_FDIR_MAX_QREGION_SIZE)) {
485                 rte_flow_error_set(error, EINVAL,
486                                 RTE_FLOW_ERROR_TYPE_ACTION, act,
487                                 "The region size should be any of the following values:"
488                                 "1, 2, 4, 8, 16, 32, 64, 128 as long as the total number "
489                                 "of queues do not exceed the VSI allocation.");
490                 return -rte_errno;
491         }
492
493         if (rss->queue_num > vf->max_rss_qregion) {
494                 rte_flow_error_set(error, EINVAL,
495                                 RTE_FLOW_ERROR_TYPE_ACTION, act,
496                                 "The region size cannot be large than the supported max RSS queue region");
497                 return -rte_errno;
498         }
499
500         filter_action->act_conf.queue.index = rss->queue[0];
501         filter_action->act_conf.queue.region = rte_fls_u32(rss->queue_num) - 1;
502
503         return 0;
504 }
505
506 static int
507 iavf_fdir_parse_action(struct iavf_adapter *ad,
508                         const struct rte_flow_action actions[],
509                         struct rte_flow_error *error,
510                         struct iavf_fdir_conf *filter)
511 {
512         const struct rte_flow_action_queue *act_q;
513         const struct rte_flow_action_mark *mark_spec = NULL;
514         uint32_t dest_num = 0;
515         uint32_t mark_num = 0;
516         int ret;
517
518         int number = 0;
519         struct virtchnl_filter_action *filter_action;
520
521         for (; actions->type != RTE_FLOW_ACTION_TYPE_END; actions++) {
522                 switch (actions->type) {
523                 case RTE_FLOW_ACTION_TYPE_VOID:
524                         break;
525
526                 case RTE_FLOW_ACTION_TYPE_PASSTHRU:
527                         dest_num++;
528
529                         filter_action = &filter->add_fltr.rule_cfg.action_set.actions[number];
530
531                         filter_action->type = VIRTCHNL_ACTION_PASSTHRU;
532
533                         filter->add_fltr.rule_cfg.action_set.count = ++number;
534                         break;
535
536                 case RTE_FLOW_ACTION_TYPE_DROP:
537                         dest_num++;
538
539                         filter_action = &filter->add_fltr.rule_cfg.action_set.actions[number];
540
541                         filter_action->type = VIRTCHNL_ACTION_DROP;
542
543                         filter->add_fltr.rule_cfg.action_set.count = ++number;
544                         break;
545
546                 case RTE_FLOW_ACTION_TYPE_QUEUE:
547                         dest_num++;
548
549                         act_q = actions->conf;
550                         filter_action = &filter->add_fltr.rule_cfg.action_set.actions[number];
551
552                         filter_action->type = VIRTCHNL_ACTION_QUEUE;
553                         filter_action->act_conf.queue.index = act_q->index;
554
555                         if (filter_action->act_conf.queue.index >=
556                                 ad->dev_data->nb_rx_queues) {
557                                 rte_flow_error_set(error, EINVAL,
558                                         RTE_FLOW_ERROR_TYPE_ACTION,
559                                         actions, "Invalid queue for FDIR.");
560                                 return -rte_errno;
561                         }
562
563                         filter->add_fltr.rule_cfg.action_set.count = ++number;
564                         break;
565
566                 case RTE_FLOW_ACTION_TYPE_RSS:
567                         dest_num++;
568
569                         filter_action = &filter->add_fltr.rule_cfg.action_set.actions[number];
570
571                         filter_action->type = VIRTCHNL_ACTION_Q_REGION;
572
573                         ret = iavf_fdir_parse_action_qregion(ad,
574                                                 error, actions, filter_action);
575                         if (ret)
576                                 return ret;
577
578                         filter->add_fltr.rule_cfg.action_set.count = ++number;
579                         break;
580
581                 case RTE_FLOW_ACTION_TYPE_MARK:
582                         mark_num++;
583
584                         filter->mark_flag = 1;
585                         mark_spec = actions->conf;
586                         filter_action = &filter->add_fltr.rule_cfg.action_set.actions[number];
587
588                         filter_action->type = VIRTCHNL_ACTION_MARK;
589                         filter_action->act_conf.mark_id = mark_spec->id;
590
591                         filter->add_fltr.rule_cfg.action_set.count = ++number;
592                         break;
593
594                 default:
595                         rte_flow_error_set(error, EINVAL,
596                                         RTE_FLOW_ERROR_TYPE_ACTION, actions,
597                                         "Invalid action.");
598                         return -rte_errno;
599                 }
600         }
601
602         if (number > VIRTCHNL_MAX_NUM_ACTIONS) {
603                 rte_flow_error_set(error, EINVAL,
604                         RTE_FLOW_ERROR_TYPE_ACTION, actions,
605                         "Action numbers exceed the maximum value");
606                 return -rte_errno;
607         }
608
609         if (dest_num >= 2) {
610                 rte_flow_error_set(error, EINVAL,
611                         RTE_FLOW_ERROR_TYPE_ACTION, actions,
612                         "Unsupported action combination");
613                 return -rte_errno;
614         }
615
616         if (mark_num >= 2) {
617                 rte_flow_error_set(error, EINVAL,
618                         RTE_FLOW_ERROR_TYPE_ACTION, actions,
619                         "Too many mark actions");
620                 return -rte_errno;
621         }
622
623         if (dest_num + mark_num == 0) {
624                 rte_flow_error_set(error, EINVAL,
625                         RTE_FLOW_ERROR_TYPE_ACTION, actions,
626                         "Empty action");
627                 return -rte_errno;
628         }
629
630         /* Mark only is equal to mark + passthru. */
631         if (dest_num == 0) {
632                 filter_action = &filter->add_fltr.rule_cfg.action_set.actions[number];
633                 filter_action->type = VIRTCHNL_ACTION_PASSTHRU;
634                 filter->add_fltr.rule_cfg.action_set.count = ++number;
635         }
636
637         return 0;
638 }
639
640 static bool
641 iavf_fdir_refine_input_set(const uint64_t input_set,
642                            const uint64_t input_set_mask,
643                            struct iavf_fdir_conf *filter)
644 {
645         struct virtchnl_proto_hdr *hdr, *hdr_last;
646         struct rte_flow_item_ipv4 ipv4_spec;
647         struct rte_flow_item_ipv6 ipv6_spec;
648         int last_layer;
649         uint8_t proto_id;
650
651         if (input_set & ~input_set_mask)
652                 return false;
653         else if (input_set)
654                 return true;
655
656         last_layer = filter->add_fltr.rule_cfg.proto_hdrs.count - 1;
657         /* Last layer of TCP/UDP pattern isn't less than 2. */
658         if (last_layer < 2)
659                 return false;
660         hdr_last = &filter->add_fltr.rule_cfg.proto_hdrs.proto_hdr[last_layer];
661         if (hdr_last->type == VIRTCHNL_PROTO_HDR_TCP)
662                 proto_id = 6;
663         else if (hdr_last->type == VIRTCHNL_PROTO_HDR_UDP)
664                 proto_id = 17;
665         else
666                 return false;
667
668         hdr = &filter->add_fltr.rule_cfg.proto_hdrs.proto_hdr[last_layer - 1];
669         switch (hdr->type) {
670         case VIRTCHNL_PROTO_HDR_IPV4:
671                 VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(hdr, IPV4, PROT);
672                 memset(&ipv4_spec, 0, sizeof(ipv4_spec));
673                 ipv4_spec.hdr.next_proto_id = proto_id;
674                 rte_memcpy(hdr->buffer, &ipv4_spec.hdr,
675                            sizeof(ipv4_spec.hdr));
676                 return true;
677         case VIRTCHNL_PROTO_HDR_IPV6:
678                 VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(hdr, IPV6, PROT);
679                 memset(&ipv6_spec, 0, sizeof(ipv6_spec));
680                 ipv6_spec.hdr.proto = proto_id;
681                 rte_memcpy(hdr->buffer, &ipv6_spec.hdr,
682                            sizeof(ipv6_spec.hdr));
683                 return true;
684         default:
685                 return false;
686         }
687 }
688
689 static void
690 iavf_fdir_add_fragment_hdr(struct virtchnl_proto_hdrs *hdrs, int layer)
691 {
692         struct virtchnl_proto_hdr *hdr1;
693         struct virtchnl_proto_hdr *hdr2;
694         int i;
695
696         if (layer < 0 || layer > hdrs->count)
697                 return;
698
699         /* shift headers layer */
700         for (i = hdrs->count; i >= layer; i--) {
701                 hdr1 = &hdrs->proto_hdr[i];
702                 hdr2 = &hdrs->proto_hdr[i - 1];
703                 *hdr1 = *hdr2;
704         }
705
706         /* adding dummy fragment header */
707         hdr1 = &hdrs->proto_hdr[layer];
708         VIRTCHNL_SET_PROTO_HDR_TYPE(hdr1, IPV4_FRAG);
709         hdr1->field_selector = 0;
710         hdrs->count = ++layer;
711 }
712
713 static int
714 iavf_fdir_parse_pattern(__rte_unused struct iavf_adapter *ad,
715                         const struct rte_flow_item pattern[],
716                         const uint64_t input_set_mask,
717                         struct rte_flow_error *error,
718                         struct iavf_fdir_conf *filter)
719 {
720         struct virtchnl_proto_hdrs *hdrs =
721                         &filter->add_fltr.rule_cfg.proto_hdrs;
722         enum rte_flow_item_type l3 = RTE_FLOW_ITEM_TYPE_END;
723         const struct rte_flow_item_eth *eth_spec, *eth_mask;
724         const struct rte_flow_item_ipv4 *ipv4_spec, *ipv4_last, *ipv4_mask;
725         const struct rte_flow_item_ipv6 *ipv6_spec, *ipv6_mask;
726         const struct rte_flow_item_ipv6_frag_ext *ipv6_frag_spec;
727         const struct rte_flow_item_ipv6_frag_ext *ipv6_frag_mask;
728         const struct rte_flow_item_udp *udp_spec, *udp_mask;
729         const struct rte_flow_item_tcp *tcp_spec, *tcp_mask;
730         const struct rte_flow_item_sctp *sctp_spec, *sctp_mask;
731         const struct rte_flow_item_gtp *gtp_spec, *gtp_mask;
732         const struct rte_flow_item_gtp_psc *gtp_psc_spec, *gtp_psc_mask;
733         const struct rte_flow_item_l2tpv3oip *l2tpv3oip_spec, *l2tpv3oip_mask;
734         const struct rte_flow_item_esp *esp_spec, *esp_mask;
735         const struct rte_flow_item_ah *ah_spec, *ah_mask;
736         const struct rte_flow_item_pfcp *pfcp_spec, *pfcp_mask;
737         const struct rte_flow_item_ecpri *ecpri_spec, *ecpri_mask;
738         const struct rte_flow_item_gre *gre_spec, *gre_mask;
739         const struct rte_flow_item_l2tpv2 *l2tpv2_spec, *l2tpv2_mask;
740         const struct rte_flow_item_ppp *ppp_spec, *ppp_mask;
741         const struct rte_flow_item *item = pattern;
742         struct virtchnl_proto_hdr *hdr, *hdr1 = NULL;
743         struct rte_ecpri_common_hdr ecpri_common;
744         uint64_t input_set = IAVF_INSET_NONE;
745         enum rte_flow_item_type item_type;
746         enum rte_flow_item_type next_type;
747         uint8_t tun_inner = 0;
748         uint16_t ether_type, flags_version;
749         int layer = 0;
750
751         uint8_t  ipv6_addr_mask[16] = {
752                 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
753                 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF
754         };
755
756         for (item = pattern; item->type != RTE_FLOW_ITEM_TYPE_END; item++) {
757                 item_type = item->type;
758
759                 if (item->last && !(item_type == RTE_FLOW_ITEM_TYPE_IPV4 ||
760                                     item_type ==
761                                     RTE_FLOW_ITEM_TYPE_IPV6_FRAG_EXT)) {
762                         rte_flow_error_set(error, EINVAL,
763                                            RTE_FLOW_ERROR_TYPE_ITEM, item,
764                                            "Not support range");
765                 }
766
767                 switch (item_type) {
768                 case RTE_FLOW_ITEM_TYPE_ETH:
769                         eth_spec = item->spec;
770                         eth_mask = item->mask;
771                         next_type = (item + 1)->type;
772
773                         hdr1 = &hdrs->proto_hdr[layer];
774
775                         VIRTCHNL_SET_PROTO_HDR_TYPE(hdr1, ETH);
776
777                         if (next_type == RTE_FLOW_ITEM_TYPE_END &&
778                             (!eth_spec || !eth_mask)) {
779                                 rte_flow_error_set(error, EINVAL,
780                                                 RTE_FLOW_ERROR_TYPE_ITEM,
781                                                 item, "NULL eth spec/mask.");
782                                 return -rte_errno;
783                         }
784
785                         if (eth_spec && eth_mask) {
786                                 if (!rte_is_zero_ether_addr(&eth_mask->dst)) {
787                                         input_set |= IAVF_INSET_DMAC;
788                                         VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(hdr1,
789                                                                         ETH,
790                                                                         DST);
791                                 } else if (!rte_is_zero_ether_addr(&eth_mask->src)) {
792                                         input_set |= IAVF_INSET_SMAC;
793                                         VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(hdr1,
794                                                                         ETH,
795                                                                         SRC);
796                                 }
797
798                                 if (eth_mask->type) {
799                                         if (eth_mask->type != RTE_BE16(0xffff)) {
800                                                 rte_flow_error_set(error, EINVAL,
801                                                         RTE_FLOW_ERROR_TYPE_ITEM,
802                                                         item, "Invalid type mask.");
803                                                 return -rte_errno;
804                                         }
805
806                                         ether_type = rte_be_to_cpu_16(eth_spec->type);
807                                         if (ether_type == RTE_ETHER_TYPE_IPV4 ||
808                                                 ether_type == RTE_ETHER_TYPE_IPV6) {
809                                                 rte_flow_error_set(error, EINVAL,
810                                                         RTE_FLOW_ERROR_TYPE_ITEM,
811                                                         item,
812                                                         "Unsupported ether_type.");
813                                                 return -rte_errno;
814                                         }
815
816                                         input_set |= IAVF_INSET_ETHERTYPE;
817                                         VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(hdr1, ETH,
818                                                                         ETHERTYPE);
819                                 }
820
821                                 rte_memcpy(hdr1->buffer, eth_spec,
822                                            sizeof(struct rte_ether_hdr));
823                         }
824
825                         hdrs->count = ++layer;
826                         break;
827
828                 case RTE_FLOW_ITEM_TYPE_IPV4:
829                         l3 = RTE_FLOW_ITEM_TYPE_IPV4;
830                         ipv4_spec = item->spec;
831                         ipv4_last = item->last;
832                         ipv4_mask = item->mask;
833                         next_type = (item + 1)->type;
834
835                         hdr = &hdrs->proto_hdr[layer];
836
837                         VIRTCHNL_SET_PROTO_HDR_TYPE(hdr, IPV4);
838
839                         if (!(ipv4_spec && ipv4_mask)) {
840                                 hdrs->count = ++layer;
841                                 break;
842                         }
843
844                         if (ipv4_mask->hdr.version_ihl ||
845                             ipv4_mask->hdr.total_length ||
846                             ipv4_mask->hdr.hdr_checksum) {
847                                 rte_flow_error_set(error, EINVAL,
848                                                    RTE_FLOW_ERROR_TYPE_ITEM,
849                                                    item, "Invalid IPv4 mask.");
850                                 return -rte_errno;
851                         }
852
853                         if (ipv4_last &&
854                             (ipv4_last->hdr.version_ihl ||
855                              ipv4_last->hdr.type_of_service ||
856                              ipv4_last->hdr.time_to_live ||
857                              ipv4_last->hdr.total_length |
858                              ipv4_last->hdr.next_proto_id ||
859                              ipv4_last->hdr.hdr_checksum ||
860                              ipv4_last->hdr.src_addr ||
861                              ipv4_last->hdr.dst_addr)) {
862                                 rte_flow_error_set(error, EINVAL,
863                                                    RTE_FLOW_ERROR_TYPE_ITEM,
864                                                    item, "Invalid IPv4 last.");
865                                 return -rte_errno;
866                         }
867
868                         if (ipv4_mask->hdr.type_of_service ==
869                             UINT8_MAX) {
870                                 input_set |= IAVF_INSET_IPV4_TOS;
871                                 VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(hdr, IPV4,
872                                                                  DSCP);
873                         }
874
875                         if (ipv4_mask->hdr.next_proto_id == UINT8_MAX) {
876                                 input_set |= IAVF_INSET_IPV4_PROTO;
877                                 VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(hdr, IPV4,
878                                                                  PROT);
879                         }
880
881                         if (ipv4_mask->hdr.time_to_live == UINT8_MAX) {
882                                 input_set |= IAVF_INSET_IPV4_TTL;
883                                 VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(hdr, IPV4,
884                                                                  TTL);
885                         }
886
887                         if (ipv4_mask->hdr.src_addr == UINT32_MAX) {
888                                 input_set |= IAVF_INSET_IPV4_SRC;
889                                 VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(hdr, IPV4,
890                                                                  SRC);
891                         }
892
893                         if (ipv4_mask->hdr.dst_addr == UINT32_MAX) {
894                                 input_set |= IAVF_INSET_IPV4_DST;
895                                 VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(hdr, IPV4,
896                                                                  DST);
897                         }
898
899                         if (tun_inner) {
900                                 input_set &= ~IAVF_PROT_IPV4_OUTER;
901                                 input_set |= IAVF_PROT_IPV4_INNER;
902                         }
903
904                         rte_memcpy(hdr->buffer, &ipv4_spec->hdr,
905                                    sizeof(ipv4_spec->hdr));
906
907                         hdrs->count = ++layer;
908
909                         /* fragment Ipv4:
910                          * spec is 0x2000, mask is 0x2000
911                          */
912                         if (ipv4_spec->hdr.fragment_offset ==
913                             rte_cpu_to_be_16(RTE_IPV4_HDR_MF_FLAG) &&
914                             ipv4_mask->hdr.fragment_offset ==
915                             rte_cpu_to_be_16(RTE_IPV4_HDR_MF_FLAG)) {
916                                 /* all IPv4 fragment packet has the same
917                                  * ethertype, if the spec and mask is valid,
918                                  * set ethertype into input set.
919                                  */
920                                 input_set |= IAVF_INSET_ETHERTYPE;
921                                 VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(hdr1, ETH,
922                                                                  ETHERTYPE);
923
924                                 /* add dummy header for IPv4 Fragment */
925                                 iavf_fdir_add_fragment_hdr(hdrs, layer);
926                         } else if (ipv4_mask->hdr.packet_id == UINT16_MAX) {
927                                 rte_flow_error_set(error, EINVAL,
928                                                    RTE_FLOW_ERROR_TYPE_ITEM,
929                                                    item, "Invalid IPv4 mask.");
930                                 return -rte_errno;
931                         }
932
933                         break;
934
935                 case RTE_FLOW_ITEM_TYPE_IPV6:
936                         l3 = RTE_FLOW_ITEM_TYPE_IPV6;
937                         ipv6_spec = item->spec;
938                         ipv6_mask = item->mask;
939
940                         hdr = &hdrs->proto_hdr[layer];
941
942                         VIRTCHNL_SET_PROTO_HDR_TYPE(hdr, IPV6);
943
944                         if (!(ipv6_spec && ipv6_mask)) {
945                                 hdrs->count = ++layer;
946                                 break;
947                         }
948
949                         if (ipv6_mask->hdr.payload_len) {
950                                 rte_flow_error_set(error, EINVAL,
951                                                    RTE_FLOW_ERROR_TYPE_ITEM,
952                                                    item, "Invalid IPv6 mask");
953                                 return -rte_errno;
954                         }
955
956                         if ((ipv6_mask->hdr.vtc_flow &
957                               rte_cpu_to_be_32(IAVF_IPV6_TC_MASK))
958                              == rte_cpu_to_be_32(IAVF_IPV6_TC_MASK)) {
959                                 input_set |= IAVF_INSET_IPV6_TC;
960                                 VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(hdr, IPV6,
961                                                                  TC);
962                         }
963
964                         if (ipv6_mask->hdr.proto == UINT8_MAX) {
965                                 input_set |= IAVF_INSET_IPV6_NEXT_HDR;
966                                 VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(hdr, IPV6,
967                                                                  PROT);
968                         }
969
970                         if (ipv6_mask->hdr.hop_limits == UINT8_MAX) {
971                                 input_set |= IAVF_INSET_IPV6_HOP_LIMIT;
972                                 VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(hdr, IPV6,
973                                                                  HOP_LIMIT);
974                         }
975
976                         if (!memcmp(ipv6_mask->hdr.src_addr, ipv6_addr_mask,
977                                     RTE_DIM(ipv6_mask->hdr.src_addr))) {
978                                 input_set |= IAVF_INSET_IPV6_SRC;
979                                 VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(hdr, IPV6,
980                                                                  SRC);
981                         }
982                         if (!memcmp(ipv6_mask->hdr.dst_addr, ipv6_addr_mask,
983                                     RTE_DIM(ipv6_mask->hdr.dst_addr))) {
984                                 input_set |= IAVF_INSET_IPV6_DST;
985                                 VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(hdr, IPV6,
986                                                                  DST);
987                         }
988
989                         if (tun_inner) {
990                                 input_set &= ~IAVF_PROT_IPV6_OUTER;
991                                 input_set |= IAVF_PROT_IPV6_INNER;
992                         }
993
994                         rte_memcpy(hdr->buffer, &ipv6_spec->hdr,
995                                    sizeof(ipv6_spec->hdr));
996
997                         hdrs->count = ++layer;
998                         break;
999
1000                 case RTE_FLOW_ITEM_TYPE_IPV6_FRAG_EXT:
1001                         ipv6_frag_spec = item->spec;
1002                         ipv6_frag_mask = item->mask;
1003                         next_type = (item + 1)->type;
1004
1005                         hdr = &hdrs->proto_hdr[layer];
1006
1007                         VIRTCHNL_SET_PROTO_HDR_TYPE(hdr, IPV6_EH_FRAG);
1008
1009                         if (!(ipv6_frag_spec && ipv6_frag_mask)) {
1010                                 hdrs->count = ++layer;
1011                                 break;
1012                         }
1013
1014                         /* fragment Ipv6:
1015                          * spec is 0x1, mask is 0x1
1016                          */
1017                         if (ipv6_frag_spec->hdr.frag_data ==
1018                             rte_cpu_to_be_16(1) &&
1019                             ipv6_frag_mask->hdr.frag_data ==
1020                             rte_cpu_to_be_16(1)) {
1021                                 /* all IPv6 fragment packet has the same
1022                                  * ethertype, if the spec and mask is valid,
1023                                  * set ethertype into input set.
1024                                  */
1025                                 input_set |= IAVF_INSET_ETHERTYPE;
1026                                 VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(hdr1, ETH,
1027                                                                  ETHERTYPE);
1028
1029                                 rte_memcpy(hdr->buffer, &ipv6_frag_spec->hdr,
1030                                            sizeof(ipv6_frag_spec->hdr));
1031                         } else if (ipv6_frag_mask->hdr.id == UINT32_MAX) {
1032                                 rte_flow_error_set(error, EINVAL,
1033                                                    RTE_FLOW_ERROR_TYPE_ITEM,
1034                                                    item, "Invalid IPv6 mask.");
1035                                 return -rte_errno;
1036                         }
1037
1038                         hdrs->count = ++layer;
1039                         break;
1040
1041                 case RTE_FLOW_ITEM_TYPE_UDP:
1042                         udp_spec = item->spec;
1043                         udp_mask = item->mask;
1044
1045                         hdr = &hdrs->proto_hdr[layer];
1046
1047                         VIRTCHNL_SET_PROTO_HDR_TYPE(hdr, UDP);
1048
1049                         if (udp_spec && udp_mask) {
1050                                 if (udp_mask->hdr.dgram_len ||
1051                                         udp_mask->hdr.dgram_cksum) {
1052                                         rte_flow_error_set(error, EINVAL,
1053                                                 RTE_FLOW_ERROR_TYPE_ITEM, item,
1054                                                 "Invalid UDP mask");
1055                                         return -rte_errno;
1056                                 }
1057
1058                                 if (udp_mask->hdr.src_port == UINT16_MAX) {
1059                                         input_set |= IAVF_INSET_UDP_SRC_PORT;
1060                                         VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(hdr, UDP, SRC_PORT);
1061                                 }
1062                                 if (udp_mask->hdr.dst_port == UINT16_MAX) {
1063                                         input_set |= IAVF_INSET_UDP_DST_PORT;
1064                                         VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(hdr, UDP, DST_PORT);
1065                                 }
1066
1067                                 if (tun_inner) {
1068                                         input_set &= ~IAVF_PROT_UDP_OUTER;
1069                                         input_set |= IAVF_PROT_UDP_INNER;
1070                                 }
1071
1072                                 if (l3 == RTE_FLOW_ITEM_TYPE_IPV4)
1073                                         rte_memcpy(hdr->buffer,
1074                                                 &udp_spec->hdr,
1075                                                 sizeof(udp_spec->hdr));
1076                                 else if (l3 == RTE_FLOW_ITEM_TYPE_IPV6)
1077                                         rte_memcpy(hdr->buffer,
1078                                                 &udp_spec->hdr,
1079                                                 sizeof(udp_spec->hdr));
1080                         }
1081
1082                         hdrs->count = ++layer;
1083                         break;
1084
1085                 case RTE_FLOW_ITEM_TYPE_TCP:
1086                         tcp_spec = item->spec;
1087                         tcp_mask = item->mask;
1088
1089                         hdr = &hdrs->proto_hdr[layer];
1090
1091                         VIRTCHNL_SET_PROTO_HDR_TYPE(hdr, TCP);
1092
1093                         if (tcp_spec && tcp_mask) {
1094                                 if (tcp_mask->hdr.sent_seq ||
1095                                         tcp_mask->hdr.recv_ack ||
1096                                         tcp_mask->hdr.data_off ||
1097                                         tcp_mask->hdr.tcp_flags ||
1098                                         tcp_mask->hdr.rx_win ||
1099                                         tcp_mask->hdr.cksum ||
1100                                         tcp_mask->hdr.tcp_urp) {
1101                                         rte_flow_error_set(error, EINVAL,
1102                                                 RTE_FLOW_ERROR_TYPE_ITEM, item,
1103                                                 "Invalid TCP mask");
1104                                         return -rte_errno;
1105                                 }
1106
1107                                 if (tcp_mask->hdr.src_port == UINT16_MAX) {
1108                                         input_set |= IAVF_INSET_TCP_SRC_PORT;
1109                                         VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(hdr, TCP, SRC_PORT);
1110                                 }
1111                                 if (tcp_mask->hdr.dst_port == UINT16_MAX) {
1112                                         input_set |= IAVF_INSET_TCP_DST_PORT;
1113                                         VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(hdr, TCP, DST_PORT);
1114                                 }
1115
1116                                 if (tun_inner) {
1117                                         input_set &= ~IAVF_PROT_TCP_OUTER;
1118                                         input_set |= IAVF_PROT_TCP_INNER;
1119                                 }
1120
1121                                 if (l3 == RTE_FLOW_ITEM_TYPE_IPV4)
1122                                         rte_memcpy(hdr->buffer,
1123                                                 &tcp_spec->hdr,
1124                                                 sizeof(tcp_spec->hdr));
1125                                 else if (l3 == RTE_FLOW_ITEM_TYPE_IPV6)
1126                                         rte_memcpy(hdr->buffer,
1127                                                 &tcp_spec->hdr,
1128                                                 sizeof(tcp_spec->hdr));
1129                         }
1130
1131                         hdrs->count = ++layer;
1132                         break;
1133
1134                 case RTE_FLOW_ITEM_TYPE_SCTP:
1135                         sctp_spec = item->spec;
1136                         sctp_mask = item->mask;
1137
1138                         hdr = &hdrs->proto_hdr[layer];
1139
1140                         VIRTCHNL_SET_PROTO_HDR_TYPE(hdr, SCTP);
1141
1142                         if (sctp_spec && sctp_mask) {
1143                                 if (sctp_mask->hdr.cksum) {
1144                                         rte_flow_error_set(error, EINVAL,
1145                                                 RTE_FLOW_ERROR_TYPE_ITEM, item,
1146                                                 "Invalid UDP mask");
1147                                         return -rte_errno;
1148                                 }
1149
1150                                 if (sctp_mask->hdr.src_port == UINT16_MAX) {
1151                                         input_set |= IAVF_INSET_SCTP_SRC_PORT;
1152                                         VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(hdr, SCTP, SRC_PORT);
1153                                 }
1154                                 if (sctp_mask->hdr.dst_port == UINT16_MAX) {
1155                                         input_set |= IAVF_INSET_SCTP_DST_PORT;
1156                                         VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(hdr, SCTP, DST_PORT);
1157                                 }
1158
1159                                 if (l3 == RTE_FLOW_ITEM_TYPE_IPV4)
1160                                         rte_memcpy(hdr->buffer,
1161                                                 &sctp_spec->hdr,
1162                                                 sizeof(sctp_spec->hdr));
1163                                 else if (l3 == RTE_FLOW_ITEM_TYPE_IPV6)
1164                                         rte_memcpy(hdr->buffer,
1165                                                 &sctp_spec->hdr,
1166                                                 sizeof(sctp_spec->hdr));
1167                         }
1168
1169                         hdrs->count = ++layer;
1170                         break;
1171
1172                 case RTE_FLOW_ITEM_TYPE_GTPU:
1173                         gtp_spec = item->spec;
1174                         gtp_mask = item->mask;
1175
1176                         hdr = &hdrs->proto_hdr[layer];
1177
1178                         VIRTCHNL_SET_PROTO_HDR_TYPE(hdr, GTPU_IP);
1179
1180                         if (gtp_spec && gtp_mask) {
1181                                 if (gtp_mask->v_pt_rsv_flags ||
1182                                         gtp_mask->msg_type ||
1183                                         gtp_mask->msg_len) {
1184                                         rte_flow_error_set(error, EINVAL,
1185                                                 RTE_FLOW_ERROR_TYPE_ITEM,
1186                                                 item, "Invalid GTP mask");
1187                                         return -rte_errno;
1188                                 }
1189
1190                                 if (gtp_mask->teid == UINT32_MAX) {
1191                                         input_set |= IAVF_INSET_GTPU_TEID;
1192                                         VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(hdr, GTPU_IP, TEID);
1193                                 }
1194
1195                                 rte_memcpy(hdr->buffer,
1196                                         gtp_spec, sizeof(*gtp_spec));
1197                         }
1198
1199                         tun_inner = 1;
1200
1201                         hdrs->count = ++layer;
1202                         break;
1203
1204                 case RTE_FLOW_ITEM_TYPE_GTP_PSC:
1205                         gtp_psc_spec = item->spec;
1206                         gtp_psc_mask = item->mask;
1207
1208                         hdr = &hdrs->proto_hdr[layer];
1209
1210                         if (!gtp_psc_spec)
1211                                 VIRTCHNL_SET_PROTO_HDR_TYPE(hdr, GTPU_EH);
1212                         else if ((gtp_psc_mask->hdr.qfi) &&
1213                                 !(gtp_psc_mask->hdr.type))
1214                                 VIRTCHNL_SET_PROTO_HDR_TYPE(hdr, GTPU_EH);
1215                         else if (gtp_psc_spec->hdr.type == IAVF_GTPU_EH_UPLINK)
1216                                 VIRTCHNL_SET_PROTO_HDR_TYPE(hdr, GTPU_EH_PDU_UP);
1217                         else if (gtp_psc_spec->hdr.type == IAVF_GTPU_EH_DWLINK)
1218                                 VIRTCHNL_SET_PROTO_HDR_TYPE(hdr, GTPU_EH_PDU_DWN);
1219
1220                         if (gtp_psc_spec && gtp_psc_mask) {
1221                                 if (gtp_psc_mask->hdr.qfi == 0x3F) {
1222                                         input_set |= IAVF_INSET_GTPU_QFI;
1223                                         if (!gtp_psc_mask->hdr.type)
1224                                                 VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(hdr,
1225                                                                                  GTPU_EH, QFI);
1226                                         else if (gtp_psc_spec->hdr.type ==
1227                                                                 IAVF_GTPU_EH_UPLINK)
1228                                                 VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(hdr,
1229                                                                                  GTPU_UP, QFI);
1230                                         else if (gtp_psc_spec->hdr.type ==
1231                                                                 IAVF_GTPU_EH_DWLINK)
1232                                                 VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(hdr,
1233                                                                                  GTPU_DWN, QFI);
1234                                 }
1235
1236                                 rte_memcpy(hdr->buffer, gtp_psc_spec,
1237                                         sizeof(*gtp_psc_spec));
1238                         }
1239
1240                         hdrs->count = ++layer;
1241                         break;
1242
1243                 case RTE_FLOW_ITEM_TYPE_L2TPV3OIP:
1244                         l2tpv3oip_spec = item->spec;
1245                         l2tpv3oip_mask = item->mask;
1246
1247                         hdr = &hdrs->proto_hdr[layer];
1248
1249                         VIRTCHNL_SET_PROTO_HDR_TYPE(hdr, L2TPV3);
1250
1251                         if (l2tpv3oip_spec && l2tpv3oip_mask) {
1252                                 if (l2tpv3oip_mask->session_id == UINT32_MAX) {
1253                                         input_set |= IAVF_L2TPV3OIP_SESSION_ID;
1254                                         VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(hdr, L2TPV3, SESS_ID);
1255                                 }
1256
1257                                 rte_memcpy(hdr->buffer, l2tpv3oip_spec,
1258                                         sizeof(*l2tpv3oip_spec));
1259                         }
1260
1261                         hdrs->count = ++layer;
1262                         break;
1263
1264                 case RTE_FLOW_ITEM_TYPE_ESP:
1265                         esp_spec = item->spec;
1266                         esp_mask = item->mask;
1267
1268                         hdr = &hdrs->proto_hdr[layer];
1269
1270                         VIRTCHNL_SET_PROTO_HDR_TYPE(hdr, ESP);
1271
1272                         if (esp_spec && esp_mask) {
1273                                 if (esp_mask->hdr.spi == UINT32_MAX) {
1274                                         input_set |= IAVF_INSET_ESP_SPI;
1275                                         VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(hdr, ESP, SPI);
1276                                 }
1277
1278                                 rte_memcpy(hdr->buffer, &esp_spec->hdr,
1279                                         sizeof(esp_spec->hdr));
1280                         }
1281
1282                         hdrs->count = ++layer;
1283                         break;
1284
1285                 case RTE_FLOW_ITEM_TYPE_AH:
1286                         ah_spec = item->spec;
1287                         ah_mask = item->mask;
1288
1289                         hdr = &hdrs->proto_hdr[layer];
1290
1291                         VIRTCHNL_SET_PROTO_HDR_TYPE(hdr, AH);
1292
1293                         if (ah_spec && ah_mask) {
1294                                 if (ah_mask->spi == UINT32_MAX) {
1295                                         input_set |= IAVF_INSET_AH_SPI;
1296                                         VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(hdr, AH, SPI);
1297                                 }
1298
1299                                 rte_memcpy(hdr->buffer, ah_spec,
1300                                         sizeof(*ah_spec));
1301                         }
1302
1303                         hdrs->count = ++layer;
1304                         break;
1305
1306                 case RTE_FLOW_ITEM_TYPE_PFCP:
1307                         pfcp_spec = item->spec;
1308                         pfcp_mask = item->mask;
1309
1310                         hdr = &hdrs->proto_hdr[layer];
1311
1312                         VIRTCHNL_SET_PROTO_HDR_TYPE(hdr, PFCP);
1313
1314                         if (pfcp_spec && pfcp_mask) {
1315                                 if (pfcp_mask->s_field == UINT8_MAX) {
1316                                         input_set |= IAVF_INSET_PFCP_S_FIELD;
1317                                         VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(hdr, PFCP, S_FIELD);
1318                                 }
1319
1320                                 rte_memcpy(hdr->buffer, pfcp_spec,
1321                                         sizeof(*pfcp_spec));
1322                         }
1323
1324                         hdrs->count = ++layer;
1325                         break;
1326
1327                 case RTE_FLOW_ITEM_TYPE_ECPRI:
1328                         ecpri_spec = item->spec;
1329                         ecpri_mask = item->mask;
1330
1331                         ecpri_common.u32 = rte_be_to_cpu_32(ecpri_spec->hdr.common.u32);
1332
1333                         hdr = &hdrs->proto_hdr[layer];
1334
1335                         VIRTCHNL_SET_PROTO_HDR_TYPE(hdr, ECPRI);
1336
1337                         if (ecpri_spec && ecpri_mask) {
1338                                 if (ecpri_common.type == RTE_ECPRI_MSG_TYPE_IQ_DATA &&
1339                                                 ecpri_mask->hdr.type0.pc_id == UINT16_MAX) {
1340                                         input_set |= IAVF_ECPRI_PC_RTC_ID;
1341                                         VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(hdr, ECPRI,
1342                                                                          PC_RTC_ID);
1343                                 }
1344
1345                                 rte_memcpy(hdr->buffer, ecpri_spec,
1346                                         sizeof(*ecpri_spec));
1347                         }
1348
1349                         hdrs->count = ++layer;
1350                         break;
1351
1352                 case RTE_FLOW_ITEM_TYPE_GRE:
1353                         gre_spec = item->spec;
1354                         gre_mask = item->mask;
1355
1356                         hdr = &hdrs->proto_hdr[layer];
1357
1358                         VIRTCHNL_SET_PROTO_HDR_TYPE(hdr, GRE);
1359
1360                         if (gre_spec && gre_mask) {
1361                                 rte_memcpy(hdr->buffer, gre_spec,
1362                                            sizeof(*gre_spec));
1363                         }
1364
1365                         tun_inner = 1;
1366
1367                         hdrs->count = ++layer;
1368                         break;
1369
1370                 case RTE_FLOW_ITEM_TYPE_L2TPV2:
1371                         l2tpv2_spec = item->spec;
1372                         l2tpv2_mask = item->mask;
1373
1374                         hdr = &hdrs->proto_hdr[layer];
1375
1376                         VIRTCHNL_SET_PROTO_HDR_TYPE(hdr, L2TPV2);
1377
1378                         if (l2tpv2_spec && l2tpv2_mask) {
1379                                 flags_version =
1380                                         rte_be_to_cpu_16(l2tpv2_spec->hdr.common.flags_version);
1381                                 if ((flags_version == RTE_L2TPV2_MSG_TYPE_CONTROL &&
1382                                      l2tpv2_mask->hdr.type3.session_id == UINT16_MAX) ||
1383                                     (flags_version == RTE_L2TPV2_MSG_TYPE_DATA &&
1384                                      l2tpv2_mask->hdr.type7.session_id == UINT16_MAX) ||
1385                                     (flags_version == RTE_L2TPV2_MSG_TYPE_DATA_L &&
1386                                      l2tpv2_mask->hdr.type6.session_id == UINT16_MAX) ||
1387                                     (flags_version == RTE_L2TPV2_MSG_TYPE_DATA_S &&
1388                                      l2tpv2_mask->hdr.type5.session_id == UINT16_MAX) ||
1389                                     (flags_version == RTE_L2TPV2_MSG_TYPE_DATA_O &&
1390                                      l2tpv2_mask->hdr.type4.session_id == UINT16_MAX) ||
1391                                     (flags_version == RTE_L2TPV2_MSG_TYPE_DATA_L_S &&
1392                                      l2tpv2_mask->hdr.type3.session_id == UINT16_MAX) ||
1393                                     (flags_version == RTE_L2TPV2_MSG_TYPE_DATA_L_O &&
1394                                      l2tpv2_mask->hdr.type2.session_id == UINT16_MAX) ||
1395                                     (flags_version == RTE_L2TPV2_MSG_TYPE_DATA_S_O &&
1396                                      l2tpv2_mask->hdr.type1.session_id == UINT16_MAX) ||
1397                                     (flags_version == RTE_L2TPV2_MSG_TYPE_DATA_L_S_O &&
1398                                      l2tpv2_mask->hdr.type0.session_id == UINT16_MAX)) {
1399                                         input_set |= IAVF_L2TPV2_SESSION_ID;
1400                                         if (flags_version & IAVF_L2TPV2_FLAGS_LEN)
1401                                                 VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(hdr,
1402                                                                 L2TPV2,
1403                                                                 LEN_SESS_ID);
1404                                         else
1405                                                 VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(hdr,
1406                                                                 L2TPV2,
1407                                                                 SESS_ID);
1408                                 }
1409
1410                                 rte_memcpy(hdr->buffer, l2tpv2_spec,
1411                                            sizeof(*l2tpv2_spec));
1412                         }
1413
1414                         tun_inner = 1;
1415
1416                         hdrs->count = ++layer;
1417                         break;
1418
1419                 case RTE_FLOW_ITEM_TYPE_PPP:
1420                         ppp_spec = item->spec;
1421                         ppp_mask = item->mask;
1422
1423                         hdr = &hdrs->proto_hdr[layer];
1424
1425                         VIRTCHNL_SET_PROTO_HDR_TYPE(hdr, PPP);
1426
1427                         if (ppp_spec && ppp_mask) {
1428                                 rte_memcpy(hdr->buffer, ppp_spec,
1429                                            sizeof(*ppp_spec));
1430                         }
1431
1432                         hdrs->count = ++layer;
1433                         break;
1434
1435                 case RTE_FLOW_ITEM_TYPE_VOID:
1436                         break;
1437
1438                 default:
1439                         rte_flow_error_set(error, EINVAL,
1440                                         RTE_FLOW_ERROR_TYPE_ITEM, item,
1441                                         "Invalid pattern item.");
1442                         return -rte_errno;
1443                 }
1444         }
1445
1446         if (layer > VIRTCHNL_MAX_NUM_PROTO_HDRS) {
1447                 rte_flow_error_set(error, EINVAL,
1448                         RTE_FLOW_ERROR_TYPE_ITEM, item,
1449                         "Protocol header layers exceed the maximum value");
1450                 return -rte_errno;
1451         }
1452
1453         if (!iavf_fdir_refine_input_set(input_set,
1454                                         input_set_mask | IAVF_INSET_ETHERTYPE,
1455                                         filter)) {
1456                 rte_flow_error_set(error, EINVAL,
1457                                    RTE_FLOW_ERROR_TYPE_ITEM_SPEC, pattern,
1458                                    "Invalid input set");
1459                 return -rte_errno;
1460         }
1461
1462         filter->input_set = input_set;
1463
1464         return 0;
1465 }
1466
1467 static int
1468 iavf_fdir_parse(struct iavf_adapter *ad,
1469                 struct iavf_pattern_match_item *array,
1470                 uint32_t array_len,
1471                 const struct rte_flow_item pattern[],
1472                 const struct rte_flow_action actions[],
1473                 void **meta,
1474                 struct rte_flow_error *error)
1475 {
1476         struct iavf_info *vf = IAVF_DEV_PRIVATE_TO_VF(ad);
1477         struct iavf_fdir_conf *filter = &vf->fdir.conf;
1478         struct iavf_pattern_match_item *item = NULL;
1479         int ret;
1480
1481         memset(filter, 0, sizeof(*filter));
1482
1483         item = iavf_search_pattern_match_item(pattern, array, array_len, error);
1484         if (!item)
1485                 return -rte_errno;
1486
1487         ret = iavf_fdir_parse_pattern(ad, pattern, item->input_set_mask,
1488                                       error, filter);
1489         if (ret)
1490                 goto error;
1491
1492         ret = iavf_fdir_parse_action(ad, actions, error, filter);
1493         if (ret)
1494                 goto error;
1495
1496         if (meta)
1497                 *meta = filter;
1498
1499 error:
1500         rte_free(item);
1501         return ret;
1502 }
1503
1504 static struct iavf_flow_parser iavf_fdir_parser = {
1505         .engine = &iavf_fdir_engine,
1506         .array = iavf_fdir_pattern,
1507         .array_len = RTE_DIM(iavf_fdir_pattern),
1508         .parse_pattern_action = iavf_fdir_parse,
1509         .stage = IAVF_FLOW_STAGE_DISTRIBUTOR,
1510 };
1511
1512 RTE_INIT(iavf_fdir_engine_register)
1513 {
1514         iavf_register_flow_engine(&iavf_fdir_engine);
1515 }