ca72d457fd2fb9b5453fbd37c96bb3ea1f41fd1c
[dpdk.git] / drivers / net / iavf / iavf_fdir.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2020 Intel Corporation
3  */
4
5 #include <sys/queue.h>
6 #include <stdio.h>
7 #include <errno.h>
8 #include <stdint.h>
9 #include <string.h>
10 #include <unistd.h>
11 #include <stdarg.h>
12
13 #include <rte_ether.h>
14 #include <ethdev_driver.h>
15 #include <rte_malloc.h>
16 #include <rte_tailq.h>
17
18 #include "iavf.h"
19 #include "iavf_generic_flow.h"
20 #include "virtchnl.h"
21 #include "iavf_rxtx.h"
22
23 #define IAVF_FDIR_MAX_QREGION_SIZE 128
24
25 #define IAVF_FDIR_IPV6_TC_OFFSET 20
26 #define IAVF_IPV6_TC_MASK  (0xFF << IAVF_FDIR_IPV6_TC_OFFSET)
27
28 #define IAVF_GTPU_EH_DWLINK 0
29 #define IAVF_GTPU_EH_UPLINK 1
30
31 #define IAVF_FDIR_INSET_ETH (\
32         IAVF_INSET_ETHERTYPE)
33
34 #define IAVF_FDIR_INSET_ETH_IPV4 (\
35         IAVF_INSET_IPV4_SRC | IAVF_INSET_IPV4_DST | \
36         IAVF_INSET_IPV4_PROTO | IAVF_INSET_IPV4_TOS | \
37         IAVF_INSET_IPV4_TTL | IAVF_INSET_IPV4_ID)
38
39 #define IAVF_FDIR_INSET_ETH_IPV4_UDP (\
40         IAVF_INSET_IPV4_SRC | IAVF_INSET_IPV4_DST | \
41         IAVF_INSET_IPV4_TOS | IAVF_INSET_IPV4_TTL | \
42         IAVF_INSET_UDP_SRC_PORT | IAVF_INSET_UDP_DST_PORT)
43
44 #define IAVF_FDIR_INSET_ETH_IPV4_TCP (\
45         IAVF_INSET_IPV4_SRC | IAVF_INSET_IPV4_DST | \
46         IAVF_INSET_IPV4_TOS | IAVF_INSET_IPV4_TTL | \
47         IAVF_INSET_TCP_SRC_PORT | IAVF_INSET_TCP_DST_PORT)
48
49 #define IAVF_FDIR_INSET_ETH_IPV4_SCTP (\
50         IAVF_INSET_IPV4_SRC | IAVF_INSET_IPV4_DST | \
51         IAVF_INSET_IPV4_TOS | IAVF_INSET_IPV4_TTL | \
52         IAVF_INSET_SCTP_SRC_PORT | IAVF_INSET_SCTP_DST_PORT)
53
54 #define IAVF_FDIR_INSET_ETH_IPV6 (\
55         IAVF_INSET_IPV6_SRC | IAVF_INSET_IPV6_DST | \
56         IAVF_INSET_IPV6_NEXT_HDR | IAVF_INSET_IPV6_TC | \
57         IAVF_INSET_IPV6_HOP_LIMIT)
58
59 #define IAVF_FDIR_INSET_ETH_IPV6_FRAG_EXT (\
60         IAVF_INSET_IPV6_ID)
61
62 #define IAVF_FDIR_INSET_ETH_IPV6_UDP (\
63         IAVF_INSET_IPV6_SRC | IAVF_INSET_IPV6_DST | \
64         IAVF_INSET_IPV6_TC | IAVF_INSET_IPV6_HOP_LIMIT | \
65         IAVF_INSET_UDP_SRC_PORT | IAVF_INSET_UDP_DST_PORT)
66
67 #define IAVF_FDIR_INSET_ETH_IPV6_TCP (\
68         IAVF_INSET_IPV6_SRC | IAVF_INSET_IPV6_DST | \
69         IAVF_INSET_IPV6_TC | IAVF_INSET_IPV6_HOP_LIMIT | \
70         IAVF_INSET_TCP_SRC_PORT | IAVF_INSET_TCP_DST_PORT)
71
72 #define IAVF_FDIR_INSET_ETH_IPV6_SCTP (\
73         IAVF_INSET_IPV6_SRC | IAVF_INSET_IPV6_DST | \
74         IAVF_INSET_IPV6_TC | IAVF_INSET_IPV6_HOP_LIMIT | \
75         IAVF_INSET_SCTP_SRC_PORT | IAVF_INSET_SCTP_DST_PORT)
76
77 #define IAVF_FDIR_INSET_IPV4_GTPU (\
78         IAVF_INSET_IPV4_SRC | IAVF_INSET_IPV4_DST | \
79         IAVF_INSET_GTPU_TEID)
80
81 #define IAVF_FDIR_INSET_GTPU_IPV4 (\
82         IAVF_INSET_TUN_IPV4_SRC | IAVF_INSET_TUN_IPV4_DST | \
83         IAVF_INSET_TUN_IPV4_PROTO | IAVF_INSET_TUN_IPV4_TOS | \
84         IAVF_INSET_TUN_IPV4_TTL)
85
86 #define IAVF_FDIR_INSET_GTPU_IPV4_UDP (\
87         IAVF_FDIR_INSET_GTPU_IPV4 | \
88         IAVF_INSET_TUN_UDP_SRC_PORT | IAVF_INSET_TUN_UDP_DST_PORT)
89
90 #define IAVF_FDIR_INSET_GTPU_IPV4_TCP (\
91         IAVF_FDIR_INSET_GTPU_IPV4 | \
92         IAVF_INSET_TUN_TCP_SRC_PORT | IAVF_INSET_TUN_TCP_DST_PORT)
93
94 #define IAVF_FDIR_INSET_IPV4_GTPU_EH (\
95         IAVF_INSET_IPV4_SRC | IAVF_INSET_IPV4_DST | \
96         IAVF_INSET_GTPU_TEID | IAVF_INSET_GTPU_QFI)
97
98 #define IAVF_FDIR_INSET_IPV6_GTPU (\
99         IAVF_INSET_IPV6_SRC | IAVF_INSET_IPV6_DST | \
100         IAVF_INSET_GTPU_TEID)
101
102 #define IAVF_FDIR_INSET_GTPU_IPV6 (\
103         IAVF_INSET_TUN_IPV6_SRC | IAVF_INSET_TUN_IPV6_DST | \
104         IAVF_INSET_TUN_IPV6_NEXT_HDR | IAVF_INSET_TUN_IPV6_TC | \
105         IAVF_INSET_TUN_IPV6_HOP_LIMIT)
106
107 #define IAVF_FDIR_INSET_GTPU_IPV6_UDP (\
108         IAVF_FDIR_INSET_GTPU_IPV6 | \
109         IAVF_INSET_TUN_UDP_SRC_PORT | IAVF_INSET_TUN_UDP_DST_PORT)
110
111 #define IAVF_FDIR_INSET_GTPU_IPV6_TCP (\
112         IAVF_FDIR_INSET_GTPU_IPV6 | \
113         IAVF_INSET_TUN_TCP_SRC_PORT | IAVF_INSET_TUN_TCP_DST_PORT)
114
115 #define IAVF_FDIR_INSET_IPV6_GTPU_EH (\
116         IAVF_INSET_IPV6_SRC | IAVF_INSET_IPV6_DST | \
117         IAVF_INSET_GTPU_TEID | IAVF_INSET_GTPU_QFI)
118
119 #define IAVF_FDIR_INSET_L2TPV3OIP (\
120         IAVF_L2TPV3OIP_SESSION_ID)
121
122 #define IAVF_FDIR_INSET_IPV4_ESP (\
123         IAVF_INSET_IPV4_SRC | IAVF_INSET_IPV4_DST | \
124         IAVF_INSET_ESP_SPI)
125
126 #define IAVF_FDIR_INSET_IPV6_ESP (\
127         IAVF_INSET_IPV6_SRC | IAVF_INSET_IPV6_DST | \
128         IAVF_INSET_ESP_SPI)
129
130 #define IAVF_FDIR_INSET_AH (\
131         IAVF_INSET_AH_SPI)
132
133 #define IAVF_FDIR_INSET_IPV4_NATT_ESP (\
134         IAVF_INSET_IPV4_SRC | IAVF_INSET_IPV4_DST | \
135         IAVF_INSET_ESP_SPI)
136
137 #define IAVF_FDIR_INSET_IPV6_NATT_ESP (\
138         IAVF_INSET_IPV6_SRC | IAVF_INSET_IPV6_DST | \
139         IAVF_INSET_ESP_SPI)
140
141 #define IAVF_FDIR_INSET_PFCP (\
142         IAVF_INSET_PFCP_S_FIELD)
143
144 #define IAVF_FDIR_INSET_ECPRI (\
145         IAVF_INSET_ECPRI)
146
147 #define IAVF_FDIR_INSET_GRE_IPV4 (\
148         IAVF_INSET_TUN_IPV4_SRC | IAVF_INSET_TUN_IPV4_DST | \
149         IAVF_INSET_TUN_IPV4_TOS | IAVF_INSET_TUN_IPV4_PROTO)
150
151 #define IAVF_FDIR_INSET_GRE_IPV4_TCP (\
152         IAVF_FDIR_INSET_GRE_IPV4 | IAVF_INSET_TUN_TCP_SRC_PORT | \
153         IAVF_INSET_TUN_TCP_DST_PORT)
154
155 #define IAVF_FDIR_INSET_GRE_IPV4_UDP (\
156         IAVF_FDIR_INSET_GRE_IPV4 | IAVF_INSET_TUN_UDP_SRC_PORT | \
157         IAVF_INSET_TUN_UDP_DST_PORT)
158
159 #define IAVF_FDIR_INSET_GRE_IPV6 (\
160         IAVF_INSET_TUN_IPV6_SRC | IAVF_INSET_TUN_IPV6_DST | \
161         IAVF_INSET_TUN_IPV6_TC | IAVF_INSET_TUN_IPV6_NEXT_HDR)
162
163 #define IAVF_FDIR_INSET_GRE_IPV6_TCP (\
164         IAVF_FDIR_INSET_GRE_IPV6 | IAVF_INSET_TUN_TCP_SRC_PORT | \
165         IAVF_INSET_TUN_TCP_DST_PORT)
166
167 #define IAVF_FDIR_INSET_GRE_IPV6_UDP (\
168         IAVF_FDIR_INSET_GRE_IPV6 | IAVF_INSET_TUN_UDP_SRC_PORT | \
169         IAVF_INSET_TUN_UDP_DST_PORT)
170
171 static struct iavf_pattern_match_item iavf_fdir_pattern[] = {
172         {iavf_pattern_ethertype,                 IAVF_FDIR_INSET_ETH,           IAVF_INSET_NONE},
173         {iavf_pattern_eth_ipv4,                  IAVF_FDIR_INSET_ETH_IPV4,      IAVF_INSET_NONE},
174         {iavf_pattern_eth_ipv4_udp,              IAVF_FDIR_INSET_ETH_IPV4_UDP,  IAVF_INSET_NONE},
175         {iavf_pattern_eth_ipv4_tcp,              IAVF_FDIR_INSET_ETH_IPV4_TCP,  IAVF_INSET_NONE},
176         {iavf_pattern_eth_ipv4_sctp,             IAVF_FDIR_INSET_ETH_IPV4_SCTP, IAVF_INSET_NONE},
177         {iavf_pattern_eth_ipv6,                  IAVF_FDIR_INSET_ETH_IPV6,      IAVF_INSET_NONE},
178         {iavf_pattern_eth_ipv6_frag_ext,        IAVF_FDIR_INSET_ETH_IPV6_FRAG_EXT,      IAVF_INSET_NONE},
179         {iavf_pattern_eth_ipv6_udp,              IAVF_FDIR_INSET_ETH_IPV6_UDP,  IAVF_INSET_NONE},
180         {iavf_pattern_eth_ipv6_tcp,              IAVF_FDIR_INSET_ETH_IPV6_TCP,  IAVF_INSET_NONE},
181         {iavf_pattern_eth_ipv6_sctp,             IAVF_FDIR_INSET_ETH_IPV6_SCTP, IAVF_INSET_NONE},
182         {iavf_pattern_eth_ipv4_gtpu,             IAVF_FDIR_INSET_IPV4_GTPU,     IAVF_INSET_NONE},
183         {iavf_pattern_eth_ipv4_gtpu_ipv4,        IAVF_FDIR_INSET_GTPU_IPV4,     IAVF_INSET_NONE},
184         {iavf_pattern_eth_ipv4_gtpu_ipv4_udp,    IAVF_FDIR_INSET_GTPU_IPV4_UDP, IAVF_INSET_NONE},
185         {iavf_pattern_eth_ipv4_gtpu_ipv4_tcp,    IAVF_FDIR_INSET_GTPU_IPV4_TCP, IAVF_INSET_NONE},
186         {iavf_pattern_eth_ipv4_gtpu_ipv6,        IAVF_FDIR_INSET_GTPU_IPV6,     IAVF_INSET_NONE},
187         {iavf_pattern_eth_ipv4_gtpu_ipv6_udp,    IAVF_FDIR_INSET_GTPU_IPV6_UDP, IAVF_INSET_NONE},
188         {iavf_pattern_eth_ipv4_gtpu_ipv6_tcp,    IAVF_FDIR_INSET_GTPU_IPV6_TCP, IAVF_INSET_NONE},
189         {iavf_pattern_eth_ipv4_gtpu_eh,          IAVF_FDIR_INSET_IPV4_GTPU_EH,  IAVF_INSET_NONE},
190         {iavf_pattern_eth_ipv4_gtpu_eh_ipv4,     IAVF_FDIR_INSET_GTPU_IPV4,     IAVF_INSET_NONE},
191         {iavf_pattern_eth_ipv4_gtpu_eh_ipv4_udp, IAVF_FDIR_INSET_GTPU_IPV4_UDP, IAVF_INSET_NONE},
192         {iavf_pattern_eth_ipv4_gtpu_eh_ipv4_tcp, IAVF_FDIR_INSET_GTPU_IPV4_TCP, IAVF_INSET_NONE},
193         {iavf_pattern_eth_ipv4_gtpu_eh_ipv6,     IAVF_FDIR_INSET_GTPU_IPV6,     IAVF_INSET_NONE},
194         {iavf_pattern_eth_ipv4_gtpu_eh_ipv6_udp, IAVF_FDIR_INSET_GTPU_IPV6_UDP, IAVF_INSET_NONE},
195         {iavf_pattern_eth_ipv4_gtpu_eh_ipv6_tcp, IAVF_FDIR_INSET_GTPU_IPV6_TCP, IAVF_INSET_NONE},
196         {iavf_pattern_eth_ipv4_gre_ipv4_gtpu,            IAVF_FDIR_INSET_IPV4_GTPU,     IAVF_INSET_NONE},
197         {iavf_pattern_eth_ipv4_gre_ipv4_gtpu_ipv4,       IAVF_FDIR_INSET_GTPU_IPV4,     IAVF_INSET_NONE},
198         {iavf_pattern_eth_ipv4_gre_ipv4_gtpu_ipv4_udp,   IAVF_FDIR_INSET_GTPU_IPV4_UDP, IAVF_INSET_NONE},
199         {iavf_pattern_eth_ipv4_gre_ipv4_gtpu_ipv4_tcp,   IAVF_FDIR_INSET_GTPU_IPV4_TCP, IAVF_INSET_NONE},
200         {iavf_pattern_eth_ipv4_gre_ipv4_gtpu_ipv6,       IAVF_FDIR_INSET_GTPU_IPV6,     IAVF_INSET_NONE},
201         {iavf_pattern_eth_ipv4_gre_ipv4_gtpu_ipv6_udp,   IAVF_FDIR_INSET_GTPU_IPV6_UDP, IAVF_INSET_NONE},
202         {iavf_pattern_eth_ipv4_gre_ipv4_gtpu_ipv6_tcp,   IAVF_FDIR_INSET_GTPU_IPV6_TCP, IAVF_INSET_NONE},
203         {iavf_pattern_eth_ipv4_gre_ipv6_gtpu,            IAVF_FDIR_INSET_IPV4_GTPU,     IAVF_INSET_NONE},
204         {iavf_pattern_eth_ipv4_gre_ipv6_gtpu_ipv4,       IAVF_FDIR_INSET_GTPU_IPV4,     IAVF_INSET_NONE},
205         {iavf_pattern_eth_ipv4_gre_ipv6_gtpu_ipv4_udp,   IAVF_FDIR_INSET_GTPU_IPV4_UDP, IAVF_INSET_NONE},
206         {iavf_pattern_eth_ipv4_gre_ipv6_gtpu_ipv4_tcp,   IAVF_FDIR_INSET_GTPU_IPV4_TCP, IAVF_INSET_NONE},
207         {iavf_pattern_eth_ipv4_gre_ipv6_gtpu_ipv6,       IAVF_FDIR_INSET_GTPU_IPV6,     IAVF_INSET_NONE},
208         {iavf_pattern_eth_ipv4_gre_ipv6_gtpu_ipv6_udp,   IAVF_FDIR_INSET_GTPU_IPV6_UDP, IAVF_INSET_NONE},
209         {iavf_pattern_eth_ipv4_gre_ipv6_gtpu_ipv6_tcp,   IAVF_FDIR_INSET_GTPU_IPV6_TCP, IAVF_INSET_NONE},
210         {iavf_pattern_eth_ipv6_gre_ipv4_gtpu,            IAVF_FDIR_INSET_IPV6_GTPU,     IAVF_INSET_NONE},
211         {iavf_pattern_eth_ipv6_gre_ipv4_gtpu_ipv4,       IAVF_FDIR_INSET_GTPU_IPV4,     IAVF_INSET_NONE},
212         {iavf_pattern_eth_ipv6_gre_ipv4_gtpu_ipv4_udp,   IAVF_FDIR_INSET_GTPU_IPV4_UDP, IAVF_INSET_NONE},
213         {iavf_pattern_eth_ipv6_gre_ipv4_gtpu_ipv4_tcp,   IAVF_FDIR_INSET_GTPU_IPV4_TCP, IAVF_INSET_NONE},
214         {iavf_pattern_eth_ipv6_gre_ipv4_gtpu_ipv6,       IAVF_FDIR_INSET_GTPU_IPV6,     IAVF_INSET_NONE},
215         {iavf_pattern_eth_ipv6_gre_ipv4_gtpu_ipv6_udp,   IAVF_FDIR_INSET_GTPU_IPV6_UDP, IAVF_INSET_NONE},
216         {iavf_pattern_eth_ipv6_gre_ipv4_gtpu_ipv6_tcp,   IAVF_FDIR_INSET_GTPU_IPV6_TCP, IAVF_INSET_NONE},
217         {iavf_pattern_eth_ipv6_gre_ipv6_gtpu,            IAVF_FDIR_INSET_IPV6_GTPU,     IAVF_INSET_NONE},
218         {iavf_pattern_eth_ipv6_gre_ipv6_gtpu_ipv4,       IAVF_FDIR_INSET_GTPU_IPV4,     IAVF_INSET_NONE},
219         {iavf_pattern_eth_ipv6_gre_ipv6_gtpu_ipv4_udp,   IAVF_FDIR_INSET_GTPU_IPV4_UDP, IAVF_INSET_NONE},
220         {iavf_pattern_eth_ipv6_gre_ipv6_gtpu_ipv4_tcp,   IAVF_FDIR_INSET_GTPU_IPV4_TCP, IAVF_INSET_NONE},
221         {iavf_pattern_eth_ipv6_gre_ipv6_gtpu_ipv6,       IAVF_FDIR_INSET_GTPU_IPV6,     IAVF_INSET_NONE},
222         {iavf_pattern_eth_ipv6_gre_ipv6_gtpu_ipv6_udp,   IAVF_FDIR_INSET_GTPU_IPV6_UDP, IAVF_INSET_NONE},
223         {iavf_pattern_eth_ipv6_gre_ipv6_gtpu_ipv6_tcp,   IAVF_FDIR_INSET_GTPU_IPV6_TCP, IAVF_INSET_NONE},
224         {iavf_pattern_eth_ipv4_gre_ipv4_gtpu_eh,                 IAVF_FDIR_INSET_IPV4_GTPU_EH,  IAVF_INSET_NONE},
225         {iavf_pattern_eth_ipv4_gre_ipv4_gtpu_eh_ipv4,            IAVF_FDIR_INSET_GTPU_IPV4,     IAVF_INSET_NONE},
226         {iavf_pattern_eth_ipv4_gre_ipv4_gtpu_eh_ipv4_udp,        IAVF_FDIR_INSET_GTPU_IPV4_UDP, IAVF_INSET_NONE},
227         {iavf_pattern_eth_ipv4_gre_ipv4_gtpu_eh_ipv4_tcp,        IAVF_FDIR_INSET_GTPU_IPV4_TCP, IAVF_INSET_NONE},
228         {iavf_pattern_eth_ipv4_gre_ipv4_gtpu_eh_ipv6,            IAVF_FDIR_INSET_GTPU_IPV6,     IAVF_INSET_NONE},
229         {iavf_pattern_eth_ipv4_gre_ipv4_gtpu_eh_ipv6_udp,        IAVF_FDIR_INSET_GTPU_IPV6_UDP, IAVF_INSET_NONE},
230         {iavf_pattern_eth_ipv4_gre_ipv4_gtpu_eh_ipv6_tcp,        IAVF_FDIR_INSET_GTPU_IPV6_TCP, IAVF_INSET_NONE},
231         {iavf_pattern_eth_ipv4_gre_ipv6_gtpu_eh,                 IAVF_FDIR_INSET_IPV4_GTPU_EH,  IAVF_INSET_NONE},
232         {iavf_pattern_eth_ipv4_gre_ipv6_gtpu_eh_ipv4,            IAVF_FDIR_INSET_GTPU_IPV4,     IAVF_INSET_NONE},
233         {iavf_pattern_eth_ipv4_gre_ipv6_gtpu_eh_ipv4_udp,        IAVF_FDIR_INSET_GTPU_IPV4_UDP, IAVF_INSET_NONE},
234         {iavf_pattern_eth_ipv4_gre_ipv6_gtpu_eh_ipv4_tcp,        IAVF_FDIR_INSET_GTPU_IPV4_TCP, IAVF_INSET_NONE},
235         {iavf_pattern_eth_ipv4_gre_ipv6_gtpu_eh_ipv6,            IAVF_FDIR_INSET_GTPU_IPV6,     IAVF_INSET_NONE},
236         {iavf_pattern_eth_ipv4_gre_ipv6_gtpu_eh_ipv6_udp,        IAVF_FDIR_INSET_GTPU_IPV6_UDP, IAVF_INSET_NONE},
237         {iavf_pattern_eth_ipv4_gre_ipv6_gtpu_eh_ipv6_tcp,        IAVF_FDIR_INSET_GTPU_IPV6_TCP, IAVF_INSET_NONE},
238         {iavf_pattern_eth_ipv6_gre_ipv4_gtpu_eh,                 IAVF_FDIR_INSET_IPV6_GTPU_EH,  IAVF_INSET_NONE},
239         {iavf_pattern_eth_ipv6_gre_ipv4_gtpu_eh_ipv4,            IAVF_FDIR_INSET_GTPU_IPV4,     IAVF_INSET_NONE},
240         {iavf_pattern_eth_ipv6_gre_ipv4_gtpu_eh_ipv4_udp,        IAVF_FDIR_INSET_GTPU_IPV4_UDP, IAVF_INSET_NONE},
241         {iavf_pattern_eth_ipv6_gre_ipv4_gtpu_eh_ipv4_tcp,        IAVF_FDIR_INSET_GTPU_IPV4_TCP, IAVF_INSET_NONE},
242         {iavf_pattern_eth_ipv6_gre_ipv4_gtpu_eh_ipv6,            IAVF_FDIR_INSET_GTPU_IPV6,     IAVF_INSET_NONE},
243         {iavf_pattern_eth_ipv6_gre_ipv4_gtpu_eh_ipv6_udp,        IAVF_FDIR_INSET_GTPU_IPV6_UDP, IAVF_INSET_NONE},
244         {iavf_pattern_eth_ipv6_gre_ipv4_gtpu_eh_ipv6_tcp,        IAVF_FDIR_INSET_GTPU_IPV6_TCP, IAVF_INSET_NONE},
245         {iavf_pattern_eth_ipv6_gre_ipv6_gtpu_eh,                 IAVF_FDIR_INSET_IPV6_GTPU_EH,  IAVF_INSET_NONE},
246         {iavf_pattern_eth_ipv6_gre_ipv6_gtpu_eh_ipv4,            IAVF_FDIR_INSET_GTPU_IPV4,     IAVF_INSET_NONE},
247         {iavf_pattern_eth_ipv6_gre_ipv6_gtpu_eh_ipv4_udp,        IAVF_FDIR_INSET_GTPU_IPV4_UDP, IAVF_INSET_NONE},
248         {iavf_pattern_eth_ipv6_gre_ipv6_gtpu_eh_ipv4_tcp,        IAVF_FDIR_INSET_GTPU_IPV4_TCP, IAVF_INSET_NONE},
249         {iavf_pattern_eth_ipv6_gre_ipv6_gtpu_eh_ipv6,            IAVF_FDIR_INSET_GTPU_IPV6,     IAVF_INSET_NONE},
250         {iavf_pattern_eth_ipv6_gre_ipv6_gtpu_eh_ipv6_udp,        IAVF_FDIR_INSET_GTPU_IPV6_UDP, IAVF_INSET_NONE},
251         {iavf_pattern_eth_ipv6_gre_ipv6_gtpu_eh_ipv6_tcp,        IAVF_FDIR_INSET_GTPU_IPV6_TCP, IAVF_INSET_NONE},
252         {iavf_pattern_eth_ipv6_gtpu,             IAVF_FDIR_INSET_IPV6_GTPU,     IAVF_INSET_NONE},
253         {iavf_pattern_eth_ipv6_gtpu_eh,          IAVF_FDIR_INSET_IPV6_GTPU_EH,  IAVF_INSET_NONE},
254         {iavf_pattern_eth_ipv4_l2tpv3,           IAVF_FDIR_INSET_L2TPV3OIP,     IAVF_INSET_NONE},
255         {iavf_pattern_eth_ipv6_l2tpv3,           IAVF_FDIR_INSET_L2TPV3OIP,     IAVF_INSET_NONE},
256         {iavf_pattern_eth_ipv4_esp,              IAVF_FDIR_INSET_IPV4_ESP,      IAVF_INSET_NONE},
257         {iavf_pattern_eth_ipv6_esp,              IAVF_FDIR_INSET_IPV6_ESP,      IAVF_INSET_NONE},
258         {iavf_pattern_eth_ipv4_ah,               IAVF_FDIR_INSET_AH,            IAVF_INSET_NONE},
259         {iavf_pattern_eth_ipv6_ah,               IAVF_FDIR_INSET_AH,            IAVF_INSET_NONE},
260         {iavf_pattern_eth_ipv4_udp_esp,          IAVF_FDIR_INSET_IPV4_NATT_ESP, IAVF_INSET_NONE},
261         {iavf_pattern_eth_ipv6_udp_esp,          IAVF_FDIR_INSET_IPV6_NATT_ESP, IAVF_INSET_NONE},
262         {iavf_pattern_eth_ipv4_pfcp,             IAVF_FDIR_INSET_PFCP,          IAVF_INSET_NONE},
263         {iavf_pattern_eth_ipv6_pfcp,             IAVF_FDIR_INSET_PFCP,          IAVF_INSET_NONE},
264         {iavf_pattern_eth_ecpri,                 IAVF_FDIR_INSET_ECPRI,         IAVF_INSET_NONE},
265         {iavf_pattern_eth_ipv4_ecpri,            IAVF_FDIR_INSET_ECPRI,         IAVF_INSET_NONE},
266         {iavf_pattern_eth_ipv4_gre_ipv4,        IAVF_FDIR_INSET_GRE_IPV4,       IAVF_INSET_NONE},
267         {iavf_pattern_eth_ipv4_gre_ipv4_tcp,    IAVF_FDIR_INSET_GRE_IPV4_TCP,   IAVF_INSET_NONE},
268         {iavf_pattern_eth_ipv4_gre_ipv4_udp,    IAVF_FDIR_INSET_GRE_IPV4_UDP,   IAVF_INSET_NONE},
269         {iavf_pattern_eth_ipv4_gre_ipv6,        IAVF_FDIR_INSET_GRE_IPV6,       IAVF_INSET_NONE},
270         {iavf_pattern_eth_ipv4_gre_ipv6_tcp,    IAVF_FDIR_INSET_GRE_IPV6_TCP,   IAVF_INSET_NONE},
271         {iavf_pattern_eth_ipv4_gre_ipv6_udp,    IAVF_FDIR_INSET_GRE_IPV6_UDP,   IAVF_INSET_NONE},
272         {iavf_pattern_eth_ipv6_gre_ipv4,        IAVF_FDIR_INSET_GRE_IPV4,       IAVF_INSET_NONE},
273         {iavf_pattern_eth_ipv6_gre_ipv4_tcp,    IAVF_FDIR_INSET_GRE_IPV4_TCP,   IAVF_INSET_NONE},
274         {iavf_pattern_eth_ipv6_gre_ipv4_udp,    IAVF_FDIR_INSET_GRE_IPV4_UDP,   IAVF_INSET_NONE},
275         {iavf_pattern_eth_ipv6_gre_ipv6,        IAVF_FDIR_INSET_GRE_IPV6,       IAVF_INSET_NONE},
276         {iavf_pattern_eth_ipv6_gre_ipv6_tcp,    IAVF_FDIR_INSET_GRE_IPV6_TCP,   IAVF_INSET_NONE},
277         {iavf_pattern_eth_ipv6_gre_ipv6_udp,    IAVF_FDIR_INSET_GRE_IPV6_UDP,   IAVF_INSET_NONE},
278 };
279
280 static struct iavf_flow_parser iavf_fdir_parser;
281
282 static int
283 iavf_fdir_init(struct iavf_adapter *ad)
284 {
285         struct iavf_info *vf = IAVF_DEV_PRIVATE_TO_VF(ad);
286         struct iavf_flow_parser *parser;
287
288         if (!vf->vf_res)
289                 return -EINVAL;
290
291         if (vf->vf_res->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_FDIR_PF)
292                 parser = &iavf_fdir_parser;
293         else
294                 return -ENOTSUP;
295
296         return iavf_register_parser(parser, ad);
297 }
298
299 static void
300 iavf_fdir_uninit(struct iavf_adapter *ad)
301 {
302         iavf_unregister_parser(&iavf_fdir_parser, ad);
303 }
304
305 static int
306 iavf_fdir_create(struct iavf_adapter *ad,
307                 struct rte_flow *flow,
308                 void *meta,
309                 struct rte_flow_error *error)
310 {
311         struct iavf_fdir_conf *filter = meta;
312         struct iavf_fdir_conf *rule;
313         int ret;
314
315         rule = rte_zmalloc("fdir_entry", sizeof(*rule), 0);
316         if (!rule) {
317                 rte_flow_error_set(error, ENOMEM,
318                                 RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
319                                 "Failed to allocate memory for fdir rule");
320                 return -rte_errno;
321         }
322
323         ret = iavf_fdir_add(ad, filter);
324         if (ret) {
325                 rte_flow_error_set(error, -ret,
326                                 RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
327                                 "Failed to add filter rule.");
328                 goto free_entry;
329         }
330
331         if (filter->mark_flag == 1)
332                 iavf_fdir_rx_proc_enable(ad, 1);
333
334         rte_memcpy(rule, filter, sizeof(*rule));
335         flow->rule = rule;
336
337         return 0;
338
339 free_entry:
340         rte_free(rule);
341         return -rte_errno;
342 }
343
344 static int
345 iavf_fdir_destroy(struct iavf_adapter *ad,
346                 struct rte_flow *flow,
347                 struct rte_flow_error *error)
348 {
349         struct iavf_fdir_conf *filter;
350         int ret;
351
352         filter = (struct iavf_fdir_conf *)flow->rule;
353
354         ret = iavf_fdir_del(ad, filter);
355         if (ret) {
356                 rte_flow_error_set(error, -ret,
357                                 RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
358                                 "Failed to delete filter rule.");
359                 return -rte_errno;
360         }
361
362         if (filter->mark_flag == 1)
363                 iavf_fdir_rx_proc_enable(ad, 0);
364
365         flow->rule = NULL;
366         rte_free(filter);
367
368         return 0;
369 }
370
371 static int
372 iavf_fdir_validation(struct iavf_adapter *ad,
373                 __rte_unused struct rte_flow *flow,
374                 void *meta,
375                 struct rte_flow_error *error)
376 {
377         struct iavf_fdir_conf *filter = meta;
378         int ret;
379
380         ret = iavf_fdir_check(ad, filter);
381         if (ret) {
382                 rte_flow_error_set(error, -ret,
383                                 RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
384                                 "Failed to validate filter rule.");
385                 return -rte_errno;
386         }
387
388         return 0;
389 };
390
391 static struct iavf_flow_engine iavf_fdir_engine = {
392         .init = iavf_fdir_init,
393         .uninit = iavf_fdir_uninit,
394         .create = iavf_fdir_create,
395         .destroy = iavf_fdir_destroy,
396         .validation = iavf_fdir_validation,
397         .type = IAVF_FLOW_ENGINE_FDIR,
398 };
399
400 static int
401 iavf_fdir_parse_action_qregion(struct iavf_adapter *ad,
402                         struct rte_flow_error *error,
403                         const struct rte_flow_action *act,
404                         struct virtchnl_filter_action *filter_action)
405 {
406         struct iavf_info *vf = IAVF_DEV_PRIVATE_TO_VF(ad);
407         const struct rte_flow_action_rss *rss = act->conf;
408         uint32_t i;
409
410         if (act->type != RTE_FLOW_ACTION_TYPE_RSS) {
411                 rte_flow_error_set(error, EINVAL,
412                                 RTE_FLOW_ERROR_TYPE_ACTION, act,
413                                 "Invalid action.");
414                 return -rte_errno;
415         }
416
417         if (rss->queue_num <= 1) {
418                 rte_flow_error_set(error, EINVAL,
419                                 RTE_FLOW_ERROR_TYPE_ACTION, act,
420                                 "Queue region size can't be 0 or 1.");
421                 return -rte_errno;
422         }
423
424         /* check if queue index for queue region is continuous */
425         for (i = 0; i < rss->queue_num - 1; i++) {
426                 if (rss->queue[i + 1] != rss->queue[i] + 1) {
427                         rte_flow_error_set(error, EINVAL,
428                                         RTE_FLOW_ERROR_TYPE_ACTION, act,
429                                         "Discontinuous queue region");
430                         return -rte_errno;
431                 }
432         }
433
434         if (rss->queue[rss->queue_num - 1] >= ad->eth_dev->data->nb_rx_queues) {
435                 rte_flow_error_set(error, EINVAL,
436                                 RTE_FLOW_ERROR_TYPE_ACTION, act,
437                                 "Invalid queue region indexes.");
438                 return -rte_errno;
439         }
440
441         if (!(rte_is_power_of_2(rss->queue_num) &&
442                 rss->queue_num <= IAVF_FDIR_MAX_QREGION_SIZE)) {
443                 rte_flow_error_set(error, EINVAL,
444                                 RTE_FLOW_ERROR_TYPE_ACTION, act,
445                                 "The region size should be any of the following values:"
446                                 "1, 2, 4, 8, 16, 32, 64, 128 as long as the total number "
447                                 "of queues do not exceed the VSI allocation.");
448                 return -rte_errno;
449         }
450
451         if (rss->queue_num > vf->max_rss_qregion) {
452                 rte_flow_error_set(error, EINVAL,
453                                 RTE_FLOW_ERROR_TYPE_ACTION, act,
454                                 "The region size cannot be large than the supported max RSS queue region");
455                 return -rte_errno;
456         }
457
458         filter_action->act_conf.queue.index = rss->queue[0];
459         filter_action->act_conf.queue.region = rte_fls_u32(rss->queue_num) - 1;
460
461         return 0;
462 }
463
464 static int
465 iavf_fdir_parse_action(struct iavf_adapter *ad,
466                         const struct rte_flow_action actions[],
467                         struct rte_flow_error *error,
468                         struct iavf_fdir_conf *filter)
469 {
470         const struct rte_flow_action_queue *act_q;
471         const struct rte_flow_action_mark *mark_spec = NULL;
472         uint32_t dest_num = 0;
473         uint32_t mark_num = 0;
474         int ret;
475
476         int number = 0;
477         struct virtchnl_filter_action *filter_action;
478
479         for (; actions->type != RTE_FLOW_ACTION_TYPE_END; actions++) {
480                 switch (actions->type) {
481                 case RTE_FLOW_ACTION_TYPE_VOID:
482                         break;
483
484                 case RTE_FLOW_ACTION_TYPE_PASSTHRU:
485                         dest_num++;
486
487                         filter_action = &filter->add_fltr.rule_cfg.action_set.actions[number];
488
489                         filter_action->type = VIRTCHNL_ACTION_PASSTHRU;
490
491                         filter->add_fltr.rule_cfg.action_set.count = ++number;
492                         break;
493
494                 case RTE_FLOW_ACTION_TYPE_DROP:
495                         dest_num++;
496
497                         filter_action = &filter->add_fltr.rule_cfg.action_set.actions[number];
498
499                         filter_action->type = VIRTCHNL_ACTION_DROP;
500
501                         filter->add_fltr.rule_cfg.action_set.count = ++number;
502                         break;
503
504                 case RTE_FLOW_ACTION_TYPE_QUEUE:
505                         dest_num++;
506
507                         act_q = actions->conf;
508                         filter_action = &filter->add_fltr.rule_cfg.action_set.actions[number];
509
510                         filter_action->type = VIRTCHNL_ACTION_QUEUE;
511                         filter_action->act_conf.queue.index = act_q->index;
512
513                         if (filter_action->act_conf.queue.index >=
514                                 ad->eth_dev->data->nb_rx_queues) {
515                                 rte_flow_error_set(error, EINVAL,
516                                         RTE_FLOW_ERROR_TYPE_ACTION,
517                                         actions, "Invalid queue for FDIR.");
518                                 return -rte_errno;
519                         }
520
521                         filter->add_fltr.rule_cfg.action_set.count = ++number;
522                         break;
523
524                 case RTE_FLOW_ACTION_TYPE_RSS:
525                         dest_num++;
526
527                         filter_action = &filter->add_fltr.rule_cfg.action_set.actions[number];
528
529                         filter_action->type = VIRTCHNL_ACTION_Q_REGION;
530
531                         ret = iavf_fdir_parse_action_qregion(ad,
532                                                 error, actions, filter_action);
533                         if (ret)
534                                 return ret;
535
536                         filter->add_fltr.rule_cfg.action_set.count = ++number;
537                         break;
538
539                 case RTE_FLOW_ACTION_TYPE_MARK:
540                         mark_num++;
541
542                         filter->mark_flag = 1;
543                         mark_spec = actions->conf;
544                         filter_action = &filter->add_fltr.rule_cfg.action_set.actions[number];
545
546                         filter_action->type = VIRTCHNL_ACTION_MARK;
547                         filter_action->act_conf.mark_id = mark_spec->id;
548
549                         filter->add_fltr.rule_cfg.action_set.count = ++number;
550                         break;
551
552                 default:
553                         rte_flow_error_set(error, EINVAL,
554                                         RTE_FLOW_ERROR_TYPE_ACTION, actions,
555                                         "Invalid action.");
556                         return -rte_errno;
557                 }
558         }
559
560         if (number > VIRTCHNL_MAX_NUM_ACTIONS) {
561                 rte_flow_error_set(error, EINVAL,
562                         RTE_FLOW_ERROR_TYPE_ACTION, actions,
563                         "Action numbers exceed the maximum value");
564                 return -rte_errno;
565         }
566
567         if (dest_num >= 2) {
568                 rte_flow_error_set(error, EINVAL,
569                         RTE_FLOW_ERROR_TYPE_ACTION, actions,
570                         "Unsupported action combination");
571                 return -rte_errno;
572         }
573
574         if (mark_num >= 2) {
575                 rte_flow_error_set(error, EINVAL,
576                         RTE_FLOW_ERROR_TYPE_ACTION, actions,
577                         "Too many mark actions");
578                 return -rte_errno;
579         }
580
581         if (dest_num + mark_num == 0) {
582                 rte_flow_error_set(error, EINVAL,
583                         RTE_FLOW_ERROR_TYPE_ACTION, actions,
584                         "Empty action");
585                 return -rte_errno;
586         }
587
588         /* Mark only is equal to mark + passthru. */
589         if (dest_num == 0) {
590                 filter_action = &filter->add_fltr.rule_cfg.action_set.actions[number];
591                 filter_action->type = VIRTCHNL_ACTION_PASSTHRU;
592                 filter->add_fltr.rule_cfg.action_set.count = ++number;
593         }
594
595         return 0;
596 }
597
598 static bool
599 iavf_fdir_refine_input_set(const uint64_t input_set,
600                            const uint64_t input_set_mask,
601                            struct iavf_fdir_conf *filter)
602 {
603         struct virtchnl_proto_hdr *hdr, *hdr_last;
604         struct rte_flow_item_ipv4 ipv4_spec;
605         struct rte_flow_item_ipv6 ipv6_spec;
606         int last_layer;
607         uint8_t proto_id;
608
609         if (input_set & ~input_set_mask)
610                 return false;
611         else if (input_set)
612                 return true;
613
614         last_layer = filter->add_fltr.rule_cfg.proto_hdrs.count - 1;
615         /* Last layer of TCP/UDP pattern isn't less than 2. */
616         if (last_layer < 2)
617                 return false;
618         hdr_last = &filter->add_fltr.rule_cfg.proto_hdrs.proto_hdr[last_layer];
619         if (hdr_last->type == VIRTCHNL_PROTO_HDR_TCP)
620                 proto_id = 6;
621         else if (hdr_last->type == VIRTCHNL_PROTO_HDR_UDP)
622                 proto_id = 17;
623         else
624                 return false;
625
626         hdr = &filter->add_fltr.rule_cfg.proto_hdrs.proto_hdr[last_layer - 1];
627         switch (hdr->type) {
628         case VIRTCHNL_PROTO_HDR_IPV4:
629                 VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(hdr, IPV4, PROT);
630                 memset(&ipv4_spec, 0, sizeof(ipv4_spec));
631                 ipv4_spec.hdr.next_proto_id = proto_id;
632                 rte_memcpy(hdr->buffer, &ipv4_spec.hdr,
633                            sizeof(ipv4_spec.hdr));
634                 return true;
635         case VIRTCHNL_PROTO_HDR_IPV6:
636                 VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(hdr, IPV6, PROT);
637                 memset(&ipv6_spec, 0, sizeof(ipv6_spec));
638                 ipv6_spec.hdr.proto = proto_id;
639                 rte_memcpy(hdr->buffer, &ipv6_spec.hdr,
640                            sizeof(ipv6_spec.hdr));
641                 return true;
642         default:
643                 return false;
644         }
645 }
646
647 static void
648 iavf_fdir_add_fragment_hdr(struct virtchnl_proto_hdrs *hdrs, int layer)
649 {
650         struct virtchnl_proto_hdr *hdr1;
651         struct virtchnl_proto_hdr *hdr2;
652         int i;
653
654         if (layer < 0 || layer > hdrs->count)
655                 return;
656
657         /* shift headers layer */
658         for (i = hdrs->count; i >= layer; i--) {
659                 hdr1 = &hdrs->proto_hdr[i];
660                 hdr2 = &hdrs->proto_hdr[i - 1];
661                 *hdr1 = *hdr2;
662         }
663
664         /* adding dummy fragment header */
665         hdr1 = &hdrs->proto_hdr[layer];
666         VIRTCHNL_SET_PROTO_HDR_TYPE(hdr1, IPV4_FRAG);
667         hdr1->field_selector = 0;
668         hdrs->count = ++layer;
669 }
670
671 static int
672 iavf_fdir_parse_pattern(__rte_unused struct iavf_adapter *ad,
673                         const struct rte_flow_item pattern[],
674                         const uint64_t input_set_mask,
675                         struct rte_flow_error *error,
676                         struct iavf_fdir_conf *filter)
677 {
678         struct virtchnl_proto_hdrs *hdrs =
679                         &filter->add_fltr.rule_cfg.proto_hdrs;
680         enum rte_flow_item_type l3 = RTE_FLOW_ITEM_TYPE_END;
681         const struct rte_flow_item_eth *eth_spec, *eth_mask;
682         const struct rte_flow_item_ipv4 *ipv4_spec, *ipv4_last, *ipv4_mask;
683         const struct rte_flow_item_ipv6 *ipv6_spec, *ipv6_mask;
684         const struct rte_flow_item_ipv6_frag_ext *ipv6_frag_spec;
685         const struct rte_flow_item_ipv6_frag_ext *ipv6_frag_mask;
686         const struct rte_flow_item_udp *udp_spec, *udp_mask;
687         const struct rte_flow_item_tcp *tcp_spec, *tcp_mask;
688         const struct rte_flow_item_sctp *sctp_spec, *sctp_mask;
689         const struct rte_flow_item_gtp *gtp_spec, *gtp_mask;
690         const struct rte_flow_item_gtp_psc *gtp_psc_spec, *gtp_psc_mask;
691         const struct rte_flow_item_l2tpv3oip *l2tpv3oip_spec, *l2tpv3oip_mask;
692         const struct rte_flow_item_esp *esp_spec, *esp_mask;
693         const struct rte_flow_item_ah *ah_spec, *ah_mask;
694         const struct rte_flow_item_pfcp *pfcp_spec, *pfcp_mask;
695         const struct rte_flow_item_ecpri *ecpri_spec, *ecpri_mask;
696         const struct rte_flow_item_gre *gre_spec, *gre_mask;
697         const struct rte_flow_item *item = pattern;
698         struct virtchnl_proto_hdr *hdr, *hdr1 = NULL;
699         struct rte_ecpri_common_hdr ecpri_common;
700         uint64_t input_set = IAVF_INSET_NONE;
701         enum rte_flow_item_type item_type;
702         enum rte_flow_item_type next_type;
703         uint8_t tun_inner = 0;
704         uint16_t ether_type;
705         int layer = 0;
706
707         uint8_t  ipv6_addr_mask[16] = {
708                 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
709                 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF
710         };
711
712         for (item = pattern; item->type != RTE_FLOW_ITEM_TYPE_END; item++) {
713                 item_type = item->type;
714
715                 if (item->last && !(item_type == RTE_FLOW_ITEM_TYPE_IPV4 ||
716                                     item_type ==
717                                     RTE_FLOW_ITEM_TYPE_IPV6_FRAG_EXT)) {
718                         rte_flow_error_set(error, EINVAL,
719                                            RTE_FLOW_ERROR_TYPE_ITEM, item,
720                                            "Not support range");
721                 }
722
723                 switch (item_type) {
724                 case RTE_FLOW_ITEM_TYPE_ETH:
725                         eth_spec = item->spec;
726                         eth_mask = item->mask;
727                         next_type = (item + 1)->type;
728
729                         hdr1 = &hdrs->proto_hdr[layer];
730
731                         VIRTCHNL_SET_PROTO_HDR_TYPE(hdr1, ETH);
732
733                         if (next_type == RTE_FLOW_ITEM_TYPE_END &&
734                             (!eth_spec || !eth_mask)) {
735                                 rte_flow_error_set(error, EINVAL,
736                                                 RTE_FLOW_ERROR_TYPE_ITEM,
737                                                 item, "NULL eth spec/mask.");
738                                 return -rte_errno;
739                         }
740
741                         if (eth_spec && eth_mask) {
742                                 if (!rte_is_zero_ether_addr(&eth_mask->src) ||
743                                     !rte_is_zero_ether_addr(&eth_mask->dst)) {
744                                         rte_flow_error_set(error, EINVAL,
745                                                 RTE_FLOW_ERROR_TYPE_ITEM, item,
746                                                 "Invalid MAC_addr mask.");
747                                         return -rte_errno;
748                                 }
749                         }
750
751                         if (eth_spec && eth_mask && eth_mask->type) {
752                                 if (eth_mask->type != RTE_BE16(0xffff)) {
753                                         rte_flow_error_set(error, EINVAL,
754                                                 RTE_FLOW_ERROR_TYPE_ITEM,
755                                                 item, "Invalid type mask.");
756                                         return -rte_errno;
757                                 }
758
759                                 ether_type = rte_be_to_cpu_16(eth_spec->type);
760                                 if (ether_type == RTE_ETHER_TYPE_IPV4 ||
761                                         ether_type == RTE_ETHER_TYPE_IPV6) {
762                                         rte_flow_error_set(error, EINVAL,
763                                                 RTE_FLOW_ERROR_TYPE_ITEM,
764                                                 item,
765                                                 "Unsupported ether_type.");
766                                         return -rte_errno;
767                                 }
768
769                                 input_set |= IAVF_INSET_ETHERTYPE;
770                                 VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(hdr1, ETH,
771                                                                  ETHERTYPE);
772
773                                 rte_memcpy(hdr1->buffer, eth_spec,
774                                            sizeof(struct rte_ether_hdr));
775                         }
776
777                         hdrs->count = ++layer;
778                         break;
779
780                 case RTE_FLOW_ITEM_TYPE_IPV4:
781                         l3 = RTE_FLOW_ITEM_TYPE_IPV4;
782                         ipv4_spec = item->spec;
783                         ipv4_last = item->last;
784                         ipv4_mask = item->mask;
785                         next_type = (item + 1)->type;
786
787                         hdr = &hdrs->proto_hdr[layer];
788
789                         VIRTCHNL_SET_PROTO_HDR_TYPE(hdr, IPV4);
790
791                         if (!(ipv4_spec && ipv4_mask)) {
792                                 hdrs->count = ++layer;
793                                 break;
794                         }
795
796                         if (ipv4_mask->hdr.version_ihl ||
797                             ipv4_mask->hdr.total_length ||
798                             ipv4_mask->hdr.hdr_checksum) {
799                                 rte_flow_error_set(error, EINVAL,
800                                                    RTE_FLOW_ERROR_TYPE_ITEM,
801                                                    item, "Invalid IPv4 mask.");
802                                 return -rte_errno;
803                         }
804
805                         if (ipv4_last &&
806                             (ipv4_last->hdr.version_ihl ||
807                              ipv4_last->hdr.type_of_service ||
808                              ipv4_last->hdr.time_to_live ||
809                              ipv4_last->hdr.total_length |
810                              ipv4_last->hdr.next_proto_id ||
811                              ipv4_last->hdr.hdr_checksum ||
812                              ipv4_last->hdr.src_addr ||
813                              ipv4_last->hdr.dst_addr)) {
814                                 rte_flow_error_set(error, EINVAL,
815                                                    RTE_FLOW_ERROR_TYPE_ITEM,
816                                                    item, "Invalid IPv4 last.");
817                                 return -rte_errno;
818                         }
819
820                         if (ipv4_mask->hdr.type_of_service ==
821                             UINT8_MAX) {
822                                 input_set |= IAVF_INSET_IPV4_TOS;
823                                 VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(hdr, IPV4,
824                                                                  DSCP);
825                         }
826
827                         if (ipv4_mask->hdr.next_proto_id == UINT8_MAX) {
828                                 input_set |= IAVF_INSET_IPV4_PROTO;
829                                 VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(hdr, IPV4,
830                                                                  PROT);
831                         }
832
833                         if (ipv4_mask->hdr.time_to_live == UINT8_MAX) {
834                                 input_set |= IAVF_INSET_IPV4_TTL;
835                                 VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(hdr, IPV4,
836                                                                  TTL);
837                         }
838
839                         if (ipv4_mask->hdr.src_addr == UINT32_MAX) {
840                                 input_set |= IAVF_INSET_IPV4_SRC;
841                                 VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(hdr, IPV4,
842                                                                  SRC);
843                         }
844
845                         if (ipv4_mask->hdr.dst_addr == UINT32_MAX) {
846                                 input_set |= IAVF_INSET_IPV4_DST;
847                                 VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(hdr, IPV4,
848                                                                  DST);
849                         }
850
851                         if (tun_inner) {
852                                 input_set &= ~IAVF_PROT_IPV4_OUTER;
853                                 input_set |= IAVF_PROT_IPV4_INNER;
854                         }
855
856                         rte_memcpy(hdr->buffer, &ipv4_spec->hdr,
857                                    sizeof(ipv4_spec->hdr));
858
859                         hdrs->count = ++layer;
860
861                         /* fragment Ipv4:
862                          * spec is 0x2000, mask is 0x2000
863                          */
864                         if (ipv4_spec->hdr.fragment_offset ==
865                             rte_cpu_to_be_16(RTE_IPV4_HDR_MF_FLAG) &&
866                             ipv4_mask->hdr.fragment_offset ==
867                             rte_cpu_to_be_16(RTE_IPV4_HDR_MF_FLAG)) {
868                                 /* all IPv4 fragment packet has the same
869                                  * ethertype, if the spec and mask is valid,
870                                  * set ethertype into input set.
871                                  */
872                                 input_set |= IAVF_INSET_ETHERTYPE;
873                                 VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(hdr1, ETH,
874                                                                  ETHERTYPE);
875
876                                 /* add dummy header for IPv4 Fragment */
877                                 iavf_fdir_add_fragment_hdr(hdrs, layer);
878                         } else if (ipv4_mask->hdr.packet_id == UINT16_MAX) {
879                                 rte_flow_error_set(error, EINVAL,
880                                                    RTE_FLOW_ERROR_TYPE_ITEM,
881                                                    item, "Invalid IPv4 mask.");
882                                 return -rte_errno;
883                         }
884
885                         break;
886
887                 case RTE_FLOW_ITEM_TYPE_IPV6:
888                         l3 = RTE_FLOW_ITEM_TYPE_IPV6;
889                         ipv6_spec = item->spec;
890                         ipv6_mask = item->mask;
891
892                         hdr = &hdrs->proto_hdr[layer];
893
894                         VIRTCHNL_SET_PROTO_HDR_TYPE(hdr, IPV6);
895
896                         if (!(ipv6_spec && ipv6_mask)) {
897                                 hdrs->count = ++layer;
898                                 break;
899                         }
900
901                         if (ipv6_mask->hdr.payload_len) {
902                                 rte_flow_error_set(error, EINVAL,
903                                                    RTE_FLOW_ERROR_TYPE_ITEM,
904                                                    item, "Invalid IPv6 mask");
905                                 return -rte_errno;
906                         }
907
908                         if ((ipv6_mask->hdr.vtc_flow &
909                               rte_cpu_to_be_32(IAVF_IPV6_TC_MASK))
910                              == rte_cpu_to_be_32(IAVF_IPV6_TC_MASK)) {
911                                 input_set |= IAVF_INSET_IPV6_TC;
912                                 VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(hdr, IPV6,
913                                                                  TC);
914                         }
915
916                         if (ipv6_mask->hdr.proto == UINT8_MAX) {
917                                 input_set |= IAVF_INSET_IPV6_NEXT_HDR;
918                                 VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(hdr, IPV6,
919                                                                  PROT);
920                         }
921
922                         if (ipv6_mask->hdr.hop_limits == UINT8_MAX) {
923                                 input_set |= IAVF_INSET_IPV6_HOP_LIMIT;
924                                 VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(hdr, IPV6,
925                                                                  HOP_LIMIT);
926                         }
927
928                         if (!memcmp(ipv6_mask->hdr.src_addr, ipv6_addr_mask,
929                                     RTE_DIM(ipv6_mask->hdr.src_addr))) {
930                                 input_set |= IAVF_INSET_IPV6_SRC;
931                                 VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(hdr, IPV6,
932                                                                  SRC);
933                         }
934                         if (!memcmp(ipv6_mask->hdr.dst_addr, ipv6_addr_mask,
935                                     RTE_DIM(ipv6_mask->hdr.dst_addr))) {
936                                 input_set |= IAVF_INSET_IPV6_DST;
937                                 VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(hdr, IPV6,
938                                                                  DST);
939                         }
940
941                         if (tun_inner) {
942                                 input_set &= ~IAVF_PROT_IPV6_OUTER;
943                                 input_set |= IAVF_PROT_IPV6_INNER;
944                         }
945
946                         rte_memcpy(hdr->buffer, &ipv6_spec->hdr,
947                                    sizeof(ipv6_spec->hdr));
948
949                         hdrs->count = ++layer;
950                         break;
951
952                 case RTE_FLOW_ITEM_TYPE_IPV6_FRAG_EXT:
953                         ipv6_frag_spec = item->spec;
954                         ipv6_frag_mask = item->mask;
955                         next_type = (item + 1)->type;
956
957                         hdr = &hdrs->proto_hdr[layer];
958
959                         VIRTCHNL_SET_PROTO_HDR_TYPE(hdr, IPV6_EH_FRAG);
960
961                         if (!(ipv6_frag_spec && ipv6_frag_mask)) {
962                                 hdrs->count = ++layer;
963                                 break;
964                         }
965
966                         /* fragment Ipv6:
967                          * spec is 0x1, mask is 0x1
968                          */
969                         if (ipv6_frag_spec->hdr.frag_data ==
970                             rte_cpu_to_be_16(1) &&
971                             ipv6_frag_mask->hdr.frag_data ==
972                             rte_cpu_to_be_16(1)) {
973                                 /* all IPv6 fragment packet has the same
974                                  * ethertype, if the spec and mask is valid,
975                                  * set ethertype into input set.
976                                  */
977                                 input_set |= IAVF_INSET_ETHERTYPE;
978                                 VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(hdr1, ETH,
979                                                                  ETHERTYPE);
980
981                                 rte_memcpy(hdr->buffer, &ipv6_frag_spec->hdr,
982                                            sizeof(ipv6_frag_spec->hdr));
983                         } else if (ipv6_frag_mask->hdr.id == UINT32_MAX) {
984                                 rte_flow_error_set(error, EINVAL,
985                                                    RTE_FLOW_ERROR_TYPE_ITEM,
986                                                    item, "Invalid IPv6 mask.");
987                                 return -rte_errno;
988                         }
989
990                         hdrs->count = ++layer;
991                         break;
992
993                 case RTE_FLOW_ITEM_TYPE_UDP:
994                         udp_spec = item->spec;
995                         udp_mask = item->mask;
996
997                         hdr = &hdrs->proto_hdr[layer];
998
999                         VIRTCHNL_SET_PROTO_HDR_TYPE(hdr, UDP);
1000
1001                         if (udp_spec && udp_mask) {
1002                                 if (udp_mask->hdr.dgram_len ||
1003                                         udp_mask->hdr.dgram_cksum) {
1004                                         rte_flow_error_set(error, EINVAL,
1005                                                 RTE_FLOW_ERROR_TYPE_ITEM, item,
1006                                                 "Invalid UDP mask");
1007                                         return -rte_errno;
1008                                 }
1009
1010                                 if (udp_mask->hdr.src_port == UINT16_MAX) {
1011                                         input_set |= IAVF_INSET_UDP_SRC_PORT;
1012                                         VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(hdr, UDP, SRC_PORT);
1013                                 }
1014                                 if (udp_mask->hdr.dst_port == UINT16_MAX) {
1015                                         input_set |= IAVF_INSET_UDP_DST_PORT;
1016                                         VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(hdr, UDP, DST_PORT);
1017                                 }
1018
1019                                 if (tun_inner) {
1020                                         input_set &= ~IAVF_PROT_UDP_OUTER;
1021                                         input_set |= IAVF_PROT_UDP_INNER;
1022                                 }
1023
1024                                 if (l3 == RTE_FLOW_ITEM_TYPE_IPV4)
1025                                         rte_memcpy(hdr->buffer,
1026                                                 &udp_spec->hdr,
1027                                                 sizeof(udp_spec->hdr));
1028                                 else if (l3 == RTE_FLOW_ITEM_TYPE_IPV6)
1029                                         rte_memcpy(hdr->buffer,
1030                                                 &udp_spec->hdr,
1031                                                 sizeof(udp_spec->hdr));
1032                         }
1033
1034                         hdrs->count = ++layer;
1035                         break;
1036
1037                 case RTE_FLOW_ITEM_TYPE_TCP:
1038                         tcp_spec = item->spec;
1039                         tcp_mask = item->mask;
1040
1041                         hdr = &hdrs->proto_hdr[layer];
1042
1043                         VIRTCHNL_SET_PROTO_HDR_TYPE(hdr, TCP);
1044
1045                         if (tcp_spec && tcp_mask) {
1046                                 if (tcp_mask->hdr.sent_seq ||
1047                                         tcp_mask->hdr.recv_ack ||
1048                                         tcp_mask->hdr.data_off ||
1049                                         tcp_mask->hdr.tcp_flags ||
1050                                         tcp_mask->hdr.rx_win ||
1051                                         tcp_mask->hdr.cksum ||
1052                                         tcp_mask->hdr.tcp_urp) {
1053                                         rte_flow_error_set(error, EINVAL,
1054                                                 RTE_FLOW_ERROR_TYPE_ITEM, item,
1055                                                 "Invalid TCP mask");
1056                                         return -rte_errno;
1057                                 }
1058
1059                                 if (tcp_mask->hdr.src_port == UINT16_MAX) {
1060                                         input_set |= IAVF_INSET_TCP_SRC_PORT;
1061                                         VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(hdr, TCP, SRC_PORT);
1062                                 }
1063                                 if (tcp_mask->hdr.dst_port == UINT16_MAX) {
1064                                         input_set |= IAVF_INSET_TCP_DST_PORT;
1065                                         VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(hdr, TCP, DST_PORT);
1066                                 }
1067
1068                                 if (tun_inner) {
1069                                         input_set &= ~IAVF_PROT_TCP_OUTER;
1070                                         input_set |= IAVF_PROT_TCP_INNER;
1071                                 }
1072
1073                                 if (l3 == RTE_FLOW_ITEM_TYPE_IPV4)
1074                                         rte_memcpy(hdr->buffer,
1075                                                 &tcp_spec->hdr,
1076                                                 sizeof(tcp_spec->hdr));
1077                                 else if (l3 == RTE_FLOW_ITEM_TYPE_IPV6)
1078                                         rte_memcpy(hdr->buffer,
1079                                                 &tcp_spec->hdr,
1080                                                 sizeof(tcp_spec->hdr));
1081                         }
1082
1083                         hdrs->count = ++layer;
1084                         break;
1085
1086                 case RTE_FLOW_ITEM_TYPE_SCTP:
1087                         sctp_spec = item->spec;
1088                         sctp_mask = item->mask;
1089
1090                         hdr = &hdrs->proto_hdr[layer];
1091
1092                         VIRTCHNL_SET_PROTO_HDR_TYPE(hdr, SCTP);
1093
1094                         if (sctp_spec && sctp_mask) {
1095                                 if (sctp_mask->hdr.cksum) {
1096                                         rte_flow_error_set(error, EINVAL,
1097                                                 RTE_FLOW_ERROR_TYPE_ITEM, item,
1098                                                 "Invalid UDP mask");
1099                                         return -rte_errno;
1100                                 }
1101
1102                                 if (sctp_mask->hdr.src_port == UINT16_MAX) {
1103                                         input_set |= IAVF_INSET_SCTP_SRC_PORT;
1104                                         VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(hdr, SCTP, SRC_PORT);
1105                                 }
1106                                 if (sctp_mask->hdr.dst_port == UINT16_MAX) {
1107                                         input_set |= IAVF_INSET_SCTP_DST_PORT;
1108                                         VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(hdr, SCTP, DST_PORT);
1109                                 }
1110
1111                                 if (l3 == RTE_FLOW_ITEM_TYPE_IPV4)
1112                                         rte_memcpy(hdr->buffer,
1113                                                 &sctp_spec->hdr,
1114                                                 sizeof(sctp_spec->hdr));
1115                                 else if (l3 == RTE_FLOW_ITEM_TYPE_IPV6)
1116                                         rte_memcpy(hdr->buffer,
1117                                                 &sctp_spec->hdr,
1118                                                 sizeof(sctp_spec->hdr));
1119                         }
1120
1121                         hdrs->count = ++layer;
1122                         break;
1123
1124                 case RTE_FLOW_ITEM_TYPE_GTPU:
1125                         gtp_spec = item->spec;
1126                         gtp_mask = item->mask;
1127
1128                         hdr = &hdrs->proto_hdr[layer];
1129
1130                         VIRTCHNL_SET_PROTO_HDR_TYPE(hdr, GTPU_IP);
1131
1132                         if (gtp_spec && gtp_mask) {
1133                                 if (gtp_mask->v_pt_rsv_flags ||
1134                                         gtp_mask->msg_type ||
1135                                         gtp_mask->msg_len) {
1136                                         rte_flow_error_set(error, EINVAL,
1137                                                 RTE_FLOW_ERROR_TYPE_ITEM,
1138                                                 item, "Invalid GTP mask");
1139                                         return -rte_errno;
1140                                 }
1141
1142                                 if (gtp_mask->teid == UINT32_MAX) {
1143                                         input_set |= IAVF_INSET_GTPU_TEID;
1144                                         VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(hdr, GTPU_IP, TEID);
1145                                 }
1146
1147                                 rte_memcpy(hdr->buffer,
1148                                         gtp_spec, sizeof(*gtp_spec));
1149                         }
1150
1151                         tun_inner = 1;
1152
1153                         hdrs->count = ++layer;
1154                         break;
1155
1156                 case RTE_FLOW_ITEM_TYPE_GTP_PSC:
1157                         gtp_psc_spec = item->spec;
1158                         gtp_psc_mask = item->mask;
1159
1160                         hdr = &hdrs->proto_hdr[layer];
1161
1162                         if (!gtp_psc_spec)
1163                                 VIRTCHNL_SET_PROTO_HDR_TYPE(hdr, GTPU_EH);
1164                         else if ((gtp_psc_mask->qfi) && !(gtp_psc_mask->pdu_type))
1165                                 VIRTCHNL_SET_PROTO_HDR_TYPE(hdr, GTPU_EH);
1166                         else if (gtp_psc_spec->pdu_type == IAVF_GTPU_EH_UPLINK)
1167                                 VIRTCHNL_SET_PROTO_HDR_TYPE(hdr, GTPU_EH_PDU_UP);
1168                         else if (gtp_psc_spec->pdu_type == IAVF_GTPU_EH_DWLINK)
1169                                 VIRTCHNL_SET_PROTO_HDR_TYPE(hdr, GTPU_EH_PDU_DWN);
1170
1171                         if (gtp_psc_spec && gtp_psc_mask) {
1172                                 if (gtp_psc_mask->qfi == UINT8_MAX) {
1173                                         input_set |= IAVF_INSET_GTPU_QFI;
1174                                         VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(hdr, GTPU_EH, QFI);
1175                                 }
1176
1177                                 rte_memcpy(hdr->buffer, gtp_psc_spec,
1178                                         sizeof(*gtp_psc_spec));
1179                         }
1180
1181                         hdrs->count = ++layer;
1182                         break;
1183
1184                 case RTE_FLOW_ITEM_TYPE_L2TPV3OIP:
1185                         l2tpv3oip_spec = item->spec;
1186                         l2tpv3oip_mask = item->mask;
1187
1188                         hdr = &hdrs->proto_hdr[layer];
1189
1190                         VIRTCHNL_SET_PROTO_HDR_TYPE(hdr, L2TPV3);
1191
1192                         if (l2tpv3oip_spec && l2tpv3oip_mask) {
1193                                 if (l2tpv3oip_mask->session_id == UINT32_MAX) {
1194                                         input_set |= IAVF_L2TPV3OIP_SESSION_ID;
1195                                         VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(hdr, L2TPV3, SESS_ID);
1196                                 }
1197
1198                                 rte_memcpy(hdr->buffer, l2tpv3oip_spec,
1199                                         sizeof(*l2tpv3oip_spec));
1200                         }
1201
1202                         hdrs->count = ++layer;
1203                         break;
1204
1205                 case RTE_FLOW_ITEM_TYPE_ESP:
1206                         esp_spec = item->spec;
1207                         esp_mask = item->mask;
1208
1209                         hdr = &hdrs->proto_hdr[layer];
1210
1211                         VIRTCHNL_SET_PROTO_HDR_TYPE(hdr, ESP);
1212
1213                         if (esp_spec && esp_mask) {
1214                                 if (esp_mask->hdr.spi == UINT32_MAX) {
1215                                         input_set |= IAVF_INSET_ESP_SPI;
1216                                         VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(hdr, ESP, SPI);
1217                                 }
1218
1219                                 rte_memcpy(hdr->buffer, &esp_spec->hdr,
1220                                         sizeof(esp_spec->hdr));
1221                         }
1222
1223                         hdrs->count = ++layer;
1224                         break;
1225
1226                 case RTE_FLOW_ITEM_TYPE_AH:
1227                         ah_spec = item->spec;
1228                         ah_mask = item->mask;
1229
1230                         hdr = &hdrs->proto_hdr[layer];
1231
1232                         VIRTCHNL_SET_PROTO_HDR_TYPE(hdr, AH);
1233
1234                         if (ah_spec && ah_mask) {
1235                                 if (ah_mask->spi == UINT32_MAX) {
1236                                         input_set |= IAVF_INSET_AH_SPI;
1237                                         VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(hdr, AH, SPI);
1238                                 }
1239
1240                                 rte_memcpy(hdr->buffer, ah_spec,
1241                                         sizeof(*ah_spec));
1242                         }
1243
1244                         hdrs->count = ++layer;
1245                         break;
1246
1247                 case RTE_FLOW_ITEM_TYPE_PFCP:
1248                         pfcp_spec = item->spec;
1249                         pfcp_mask = item->mask;
1250
1251                         hdr = &hdrs->proto_hdr[layer];
1252
1253                         VIRTCHNL_SET_PROTO_HDR_TYPE(hdr, PFCP);
1254
1255                         if (pfcp_spec && pfcp_mask) {
1256                                 if (pfcp_mask->s_field == UINT8_MAX) {
1257                                         input_set |= IAVF_INSET_PFCP_S_FIELD;
1258                                         VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(hdr, PFCP, S_FIELD);
1259                                 }
1260
1261                                 rte_memcpy(hdr->buffer, pfcp_spec,
1262                                         sizeof(*pfcp_spec));
1263                         }
1264
1265                         hdrs->count = ++layer;
1266                         break;
1267
1268                 case RTE_FLOW_ITEM_TYPE_ECPRI:
1269                         ecpri_spec = item->spec;
1270                         ecpri_mask = item->mask;
1271
1272                         ecpri_common.u32 = rte_be_to_cpu_32(ecpri_spec->hdr.common.u32);
1273
1274                         hdr = &hdrs->proto_hdr[layer];
1275
1276                         VIRTCHNL_SET_PROTO_HDR_TYPE(hdr, ECPRI);
1277
1278                         if (ecpri_spec && ecpri_mask) {
1279                                 if (ecpri_common.type == RTE_ECPRI_MSG_TYPE_IQ_DATA &&
1280                                                 ecpri_mask->hdr.type0.pc_id == UINT16_MAX) {
1281                                         input_set |= IAVF_ECPRI_PC_RTC_ID;
1282                                         VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(hdr, ECPRI,
1283                                                                          PC_RTC_ID);
1284                                 }
1285
1286                                 rte_memcpy(hdr->buffer, ecpri_spec,
1287                                         sizeof(*ecpri_spec));
1288                         }
1289
1290                         hdrs->count = ++layer;
1291                         break;
1292
1293                 case RTE_FLOW_ITEM_TYPE_GRE:
1294                         gre_spec = item->spec;
1295                         gre_mask = item->mask;
1296
1297                         hdr = &hdrs->proto_hdr[layer];
1298
1299                         VIRTCHNL_SET_PROTO_HDR_TYPE(hdr, GRE);
1300
1301                         if (gre_spec && gre_mask) {
1302                                 rte_memcpy(hdr->buffer, gre_spec,
1303                                            sizeof(*gre_spec));
1304                         }
1305
1306                         tun_inner = 1;
1307
1308                         hdrs->count = ++layer;
1309                         break;
1310
1311                 case RTE_FLOW_ITEM_TYPE_VOID:
1312                         break;
1313
1314                 default:
1315                         rte_flow_error_set(error, EINVAL,
1316                                         RTE_FLOW_ERROR_TYPE_ITEM, item,
1317                                         "Invalid pattern item.");
1318                         return -rte_errno;
1319                 }
1320         }
1321
1322         if (layer > VIRTCHNL_MAX_NUM_PROTO_HDRS) {
1323                 rte_flow_error_set(error, EINVAL,
1324                         RTE_FLOW_ERROR_TYPE_ITEM, item,
1325                         "Protocol header layers exceed the maximum value");
1326                 return -rte_errno;
1327         }
1328
1329         if (!iavf_fdir_refine_input_set(input_set,
1330                                         input_set_mask | IAVF_INSET_ETHERTYPE,
1331                                         filter)) {
1332                 rte_flow_error_set(error, EINVAL,
1333                                    RTE_FLOW_ERROR_TYPE_ITEM_SPEC, pattern,
1334                                    "Invalid input set");
1335                 return -rte_errno;
1336         }
1337
1338         filter->input_set = input_set;
1339
1340         return 0;
1341 }
1342
1343 static int
1344 iavf_fdir_parse(struct iavf_adapter *ad,
1345                 struct iavf_pattern_match_item *array,
1346                 uint32_t array_len,
1347                 const struct rte_flow_item pattern[],
1348                 const struct rte_flow_action actions[],
1349                 void **meta,
1350                 struct rte_flow_error *error)
1351 {
1352         struct iavf_info *vf = IAVF_DEV_PRIVATE_TO_VF(ad);
1353         struct iavf_fdir_conf *filter = &vf->fdir.conf;
1354         struct iavf_pattern_match_item *item = NULL;
1355         int ret;
1356
1357         memset(filter, 0, sizeof(*filter));
1358
1359         item = iavf_search_pattern_match_item(pattern, array, array_len, error);
1360         if (!item)
1361                 return -rte_errno;
1362
1363         ret = iavf_fdir_parse_pattern(ad, pattern, item->input_set_mask,
1364                                       error, filter);
1365         if (ret)
1366                 goto error;
1367
1368         ret = iavf_fdir_parse_action(ad, actions, error, filter);
1369         if (ret)
1370                 goto error;
1371
1372         if (meta)
1373                 *meta = filter;
1374
1375 error:
1376         rte_free(item);
1377         return ret;
1378 }
1379
1380 static struct iavf_flow_parser iavf_fdir_parser = {
1381         .engine = &iavf_fdir_engine,
1382         .array = iavf_fdir_pattern,
1383         .array_len = RTE_DIM(iavf_fdir_pattern),
1384         .parse_pattern_action = iavf_fdir_parse,
1385         .stage = IAVF_FLOW_STAGE_DISTRIBUTOR,
1386 };
1387
1388 RTE_INIT(iavf_fdir_engine_register)
1389 {
1390         iavf_register_flow_engine(&iavf_fdir_engine);
1391 }