da3eec8b5991d2394cd80d62cdcb0a4e85071198
[dpdk.git] / drivers / net / iavf / iavf_fdir.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2020 Intel Corporation
3  */
4
5 #include <sys/queue.h>
6 #include <stdio.h>
7 #include <errno.h>
8 #include <stdint.h>
9 #include <string.h>
10 #include <unistd.h>
11 #include <stdarg.h>
12
13 #include <rte_ether.h>
14 #include <ethdev_driver.h>
15 #include <rte_malloc.h>
16 #include <rte_tailq.h>
17
18 #include "iavf.h"
19 #include "iavf_generic_flow.h"
20 #include "virtchnl.h"
21 #include "iavf_rxtx.h"
22
23 #define IAVF_FDIR_MAX_QREGION_SIZE 128
24
25 #define IAVF_FDIR_IPV6_TC_OFFSET 20
26 #define IAVF_IPV6_TC_MASK  (0xFF << IAVF_FDIR_IPV6_TC_OFFSET)
27
28 #define IAVF_GTPU_EH_DWLINK 0
29 #define IAVF_GTPU_EH_UPLINK 1
30
31 #define IAVF_FDIR_INSET_ETH (\
32         IAVF_INSET_ETHERTYPE)
33
34 #define IAVF_FDIR_INSET_ETH_IPV4 (\
35         IAVF_INSET_IPV4_SRC | IAVF_INSET_IPV4_DST | \
36         IAVF_INSET_IPV4_PROTO | IAVF_INSET_IPV4_TOS | \
37         IAVF_INSET_IPV4_TTL | IAVF_INSET_IPV4_ID)
38
39 #define IAVF_FDIR_INSET_ETH_IPV4_UDP (\
40         IAVF_INSET_IPV4_SRC | IAVF_INSET_IPV4_DST | \
41         IAVF_INSET_IPV4_TOS | IAVF_INSET_IPV4_TTL | \
42         IAVF_INSET_UDP_SRC_PORT | IAVF_INSET_UDP_DST_PORT)
43
44 #define IAVF_FDIR_INSET_ETH_IPV4_TCP (\
45         IAVF_INSET_IPV4_SRC | IAVF_INSET_IPV4_DST | \
46         IAVF_INSET_IPV4_TOS | IAVF_INSET_IPV4_TTL | \
47         IAVF_INSET_TCP_SRC_PORT | IAVF_INSET_TCP_DST_PORT)
48
49 #define IAVF_FDIR_INSET_ETH_IPV4_SCTP (\
50         IAVF_INSET_IPV4_SRC | IAVF_INSET_IPV4_DST | \
51         IAVF_INSET_IPV4_TOS | IAVF_INSET_IPV4_TTL | \
52         IAVF_INSET_SCTP_SRC_PORT | IAVF_INSET_SCTP_DST_PORT)
53
54 #define IAVF_FDIR_INSET_ETH_IPV6 (\
55         IAVF_INSET_IPV6_SRC | IAVF_INSET_IPV6_DST | \
56         IAVF_INSET_IPV6_NEXT_HDR | IAVF_INSET_IPV6_TC | \
57         IAVF_INSET_IPV6_HOP_LIMIT)
58
59 #define IAVF_FDIR_INSET_ETH_IPV6_FRAG_EXT (\
60         IAVF_INSET_IPV6_ID)
61
62 #define IAVF_FDIR_INSET_ETH_IPV6_UDP (\
63         IAVF_INSET_IPV6_SRC | IAVF_INSET_IPV6_DST | \
64         IAVF_INSET_IPV6_TC | IAVF_INSET_IPV6_HOP_LIMIT | \
65         IAVF_INSET_UDP_SRC_PORT | IAVF_INSET_UDP_DST_PORT)
66
67 #define IAVF_FDIR_INSET_ETH_IPV6_TCP (\
68         IAVF_INSET_IPV6_SRC | IAVF_INSET_IPV6_DST | \
69         IAVF_INSET_IPV6_TC | IAVF_INSET_IPV6_HOP_LIMIT | \
70         IAVF_INSET_TCP_SRC_PORT | IAVF_INSET_TCP_DST_PORT)
71
72 #define IAVF_FDIR_INSET_ETH_IPV6_SCTP (\
73         IAVF_INSET_IPV6_SRC | IAVF_INSET_IPV6_DST | \
74         IAVF_INSET_IPV6_TC | IAVF_INSET_IPV6_HOP_LIMIT | \
75         IAVF_INSET_SCTP_SRC_PORT | IAVF_INSET_SCTP_DST_PORT)
76
77 #define IAVF_FDIR_INSET_IPV4_GTPU (\
78         IAVF_INSET_IPV4_SRC | IAVF_INSET_IPV4_DST | \
79         IAVF_INSET_GTPU_TEID)
80
81 #define IAVF_FDIR_INSET_GTPU_IPV4 (\
82         IAVF_INSET_TUN_IPV4_SRC | IAVF_INSET_TUN_IPV4_DST | \
83         IAVF_INSET_TUN_IPV4_PROTO | IAVF_INSET_TUN_IPV4_TOS | \
84         IAVF_INSET_TUN_IPV4_TTL)
85
86 #define IAVF_FDIR_INSET_GTPU_IPV4_UDP (\
87         IAVF_FDIR_INSET_GTPU_IPV4 | \
88         IAVF_INSET_TUN_UDP_SRC_PORT | IAVF_INSET_TUN_UDP_DST_PORT)
89
90 #define IAVF_FDIR_INSET_GTPU_IPV4_TCP (\
91         IAVF_FDIR_INSET_GTPU_IPV4 | \
92         IAVF_INSET_TUN_TCP_SRC_PORT | IAVF_INSET_TUN_TCP_DST_PORT)
93
94 #define IAVF_FDIR_INSET_IPV4_GTPU_EH (\
95         IAVF_INSET_IPV4_SRC | IAVF_INSET_IPV4_DST | \
96         IAVF_INSET_GTPU_TEID | IAVF_INSET_GTPU_QFI)
97
98 #define IAVF_FDIR_INSET_IPV6_GTPU (\
99         IAVF_INSET_IPV6_SRC | IAVF_INSET_IPV6_DST | \
100         IAVF_INSET_GTPU_TEID)
101
102 #define IAVF_FDIR_INSET_GTPU_IPV6 (\
103         IAVF_INSET_TUN_IPV6_SRC | IAVF_INSET_TUN_IPV6_DST | \
104         IAVF_INSET_TUN_IPV6_NEXT_HDR | IAVF_INSET_TUN_IPV6_TC | \
105         IAVF_INSET_TUN_IPV6_HOP_LIMIT)
106
107 #define IAVF_FDIR_INSET_GTPU_IPV6_UDP (\
108         IAVF_FDIR_INSET_GTPU_IPV6 | \
109         IAVF_INSET_TUN_UDP_SRC_PORT | IAVF_INSET_TUN_UDP_DST_PORT)
110
111 #define IAVF_FDIR_INSET_GTPU_IPV6_TCP (\
112         IAVF_FDIR_INSET_GTPU_IPV6 | \
113         IAVF_INSET_TUN_TCP_SRC_PORT | IAVF_INSET_TUN_TCP_DST_PORT)
114
115 #define IAVF_FDIR_INSET_IPV6_GTPU_EH (\
116         IAVF_INSET_IPV6_SRC | IAVF_INSET_IPV6_DST | \
117         IAVF_INSET_GTPU_TEID | IAVF_INSET_GTPU_QFI)
118
119 #define IAVF_FDIR_INSET_L2TPV3OIP (\
120         IAVF_L2TPV3OIP_SESSION_ID)
121
122 #define IAVF_FDIR_INSET_IPV4_ESP (\
123         IAVF_INSET_IPV4_SRC | IAVF_INSET_IPV4_DST | \
124         IAVF_INSET_ESP_SPI)
125
126 #define IAVF_FDIR_INSET_IPV6_ESP (\
127         IAVF_INSET_IPV6_SRC | IAVF_INSET_IPV6_DST | \
128         IAVF_INSET_ESP_SPI)
129
130 #define IAVF_FDIR_INSET_AH (\
131         IAVF_INSET_AH_SPI)
132
133 #define IAVF_FDIR_INSET_IPV4_NATT_ESP (\
134         IAVF_INSET_IPV4_SRC | IAVF_INSET_IPV4_DST | \
135         IAVF_INSET_ESP_SPI)
136
137 #define IAVF_FDIR_INSET_IPV6_NATT_ESP (\
138         IAVF_INSET_IPV6_SRC | IAVF_INSET_IPV6_DST | \
139         IAVF_INSET_ESP_SPI)
140
141 #define IAVF_FDIR_INSET_PFCP (\
142         IAVF_INSET_PFCP_S_FIELD)
143
144 #define IAVF_FDIR_INSET_ECPRI (\
145         IAVF_INSET_ECPRI)
146
147 #define IAVF_FDIR_INSET_GRE_IPV4 (\
148         IAVF_INSET_TUN_IPV4_SRC | IAVF_INSET_TUN_IPV4_DST | \
149         IAVF_INSET_TUN_IPV4_TOS | IAVF_INSET_TUN_IPV4_PROTO)
150
151 #define IAVF_FDIR_INSET_GRE_IPV4_TCP (\
152         IAVF_FDIR_INSET_GRE_IPV4 | IAVF_INSET_TUN_TCP_SRC_PORT | \
153         IAVF_INSET_TUN_TCP_DST_PORT)
154
155 #define IAVF_FDIR_INSET_GRE_IPV4_UDP (\
156         IAVF_FDIR_INSET_GRE_IPV4 | IAVF_INSET_TUN_UDP_SRC_PORT | \
157         IAVF_INSET_TUN_UDP_DST_PORT)
158
159 #define IAVF_FDIR_INSET_GRE_IPV6 (\
160         IAVF_INSET_TUN_IPV6_SRC | IAVF_INSET_TUN_IPV6_DST | \
161         IAVF_INSET_TUN_IPV6_TC | IAVF_INSET_TUN_IPV6_NEXT_HDR)
162
163 #define IAVF_FDIR_INSET_GRE_IPV6_TCP (\
164         IAVF_FDIR_INSET_GRE_IPV6 | IAVF_INSET_TUN_TCP_SRC_PORT | \
165         IAVF_INSET_TUN_TCP_DST_PORT)
166
167 #define IAVF_FDIR_INSET_GRE_IPV6_UDP (\
168         IAVF_FDIR_INSET_GRE_IPV6 | IAVF_INSET_TUN_UDP_SRC_PORT | \
169         IAVF_INSET_TUN_UDP_DST_PORT)
170
171 static struct iavf_pattern_match_item iavf_fdir_pattern[] = {
172         {iavf_pattern_ethertype,                 IAVF_FDIR_INSET_ETH,           IAVF_INSET_NONE},
173         {iavf_pattern_eth_ipv4,                  IAVF_FDIR_INSET_ETH_IPV4,      IAVF_INSET_NONE},
174         {iavf_pattern_eth_ipv4_udp,              IAVF_FDIR_INSET_ETH_IPV4_UDP,  IAVF_INSET_NONE},
175         {iavf_pattern_eth_ipv4_tcp,              IAVF_FDIR_INSET_ETH_IPV4_TCP,  IAVF_INSET_NONE},
176         {iavf_pattern_eth_ipv4_sctp,             IAVF_FDIR_INSET_ETH_IPV4_SCTP, IAVF_INSET_NONE},
177         {iavf_pattern_eth_ipv6,                  IAVF_FDIR_INSET_ETH_IPV6,      IAVF_INSET_NONE},
178         {iavf_pattern_eth_ipv6_frag_ext,        IAVF_FDIR_INSET_ETH_IPV6_FRAG_EXT,      IAVF_INSET_NONE},
179         {iavf_pattern_eth_ipv6_udp,              IAVF_FDIR_INSET_ETH_IPV6_UDP,  IAVF_INSET_NONE},
180         {iavf_pattern_eth_ipv6_tcp,              IAVF_FDIR_INSET_ETH_IPV6_TCP,  IAVF_INSET_NONE},
181         {iavf_pattern_eth_ipv6_sctp,             IAVF_FDIR_INSET_ETH_IPV6_SCTP, IAVF_INSET_NONE},
182         {iavf_pattern_eth_ipv4_gtpu,             IAVF_FDIR_INSET_IPV4_GTPU,     IAVF_INSET_NONE},
183         {iavf_pattern_eth_ipv4_gtpu_ipv4,        IAVF_FDIR_INSET_GTPU_IPV4,     IAVF_INSET_NONE},
184         {iavf_pattern_eth_ipv4_gtpu_ipv4_udp,    IAVF_FDIR_INSET_GTPU_IPV4_UDP, IAVF_INSET_NONE},
185         {iavf_pattern_eth_ipv4_gtpu_ipv4_tcp,    IAVF_FDIR_INSET_GTPU_IPV4_TCP, IAVF_INSET_NONE},
186         {iavf_pattern_eth_ipv4_gtpu_ipv6,        IAVF_FDIR_INSET_GTPU_IPV6,     IAVF_INSET_NONE},
187         {iavf_pattern_eth_ipv4_gtpu_ipv6_udp,    IAVF_FDIR_INSET_GTPU_IPV6_UDP, IAVF_INSET_NONE},
188         {iavf_pattern_eth_ipv4_gtpu_ipv6_tcp,    IAVF_FDIR_INSET_GTPU_IPV6_TCP, IAVF_INSET_NONE},
189         {iavf_pattern_eth_ipv4_gtpu_eh,          IAVF_FDIR_INSET_IPV4_GTPU_EH,  IAVF_INSET_NONE},
190         {iavf_pattern_eth_ipv4_gtpu_eh_ipv4,     IAVF_FDIR_INSET_GTPU_IPV4,     IAVF_INSET_NONE},
191         {iavf_pattern_eth_ipv4_gtpu_eh_ipv4_udp, IAVF_FDIR_INSET_GTPU_IPV4_UDP, IAVF_INSET_NONE},
192         {iavf_pattern_eth_ipv4_gtpu_eh_ipv4_tcp, IAVF_FDIR_INSET_GTPU_IPV4_TCP, IAVF_INSET_NONE},
193         {iavf_pattern_eth_ipv4_gtpu_eh_ipv6,     IAVF_FDIR_INSET_GTPU_IPV6,     IAVF_INSET_NONE},
194         {iavf_pattern_eth_ipv4_gtpu_eh_ipv6_udp, IAVF_FDIR_INSET_GTPU_IPV6_UDP, IAVF_INSET_NONE},
195         {iavf_pattern_eth_ipv4_gtpu_eh_ipv6_tcp, IAVF_FDIR_INSET_GTPU_IPV6_TCP, IAVF_INSET_NONE},
196         {iavf_pattern_eth_ipv4_gre_ipv4_gtpu,            IAVF_FDIR_INSET_IPV4_GTPU,     IAVF_INSET_NONE},
197         {iavf_pattern_eth_ipv4_gre_ipv4_gtpu_ipv4,       IAVF_FDIR_INSET_GTPU_IPV4,     IAVF_INSET_NONE},
198         {iavf_pattern_eth_ipv4_gre_ipv4_gtpu_ipv4_udp,   IAVF_FDIR_INSET_GTPU_IPV4_UDP, IAVF_INSET_NONE},
199         {iavf_pattern_eth_ipv4_gre_ipv4_gtpu_ipv4_tcp,   IAVF_FDIR_INSET_GTPU_IPV4_TCP, IAVF_INSET_NONE},
200         {iavf_pattern_eth_ipv4_gre_ipv4_gtpu_ipv6,       IAVF_FDIR_INSET_GTPU_IPV6,     IAVF_INSET_NONE},
201         {iavf_pattern_eth_ipv4_gre_ipv4_gtpu_ipv6_udp,   IAVF_FDIR_INSET_GTPU_IPV6_UDP, IAVF_INSET_NONE},
202         {iavf_pattern_eth_ipv4_gre_ipv4_gtpu_ipv6_tcp,   IAVF_FDIR_INSET_GTPU_IPV6_TCP, IAVF_INSET_NONE},
203         {iavf_pattern_eth_ipv4_gre_ipv6_gtpu,            IAVF_FDIR_INSET_IPV4_GTPU,     IAVF_INSET_NONE},
204         {iavf_pattern_eth_ipv4_gre_ipv6_gtpu_ipv4,       IAVF_FDIR_INSET_GTPU_IPV4,     IAVF_INSET_NONE},
205         {iavf_pattern_eth_ipv4_gre_ipv6_gtpu_ipv4_udp,   IAVF_FDIR_INSET_GTPU_IPV4_UDP, IAVF_INSET_NONE},
206         {iavf_pattern_eth_ipv4_gre_ipv6_gtpu_ipv4_tcp,   IAVF_FDIR_INSET_GTPU_IPV4_TCP, IAVF_INSET_NONE},
207         {iavf_pattern_eth_ipv4_gre_ipv6_gtpu_ipv6,       IAVF_FDIR_INSET_GTPU_IPV6,     IAVF_INSET_NONE},
208         {iavf_pattern_eth_ipv4_gre_ipv6_gtpu_ipv6_udp,   IAVF_FDIR_INSET_GTPU_IPV6_UDP, IAVF_INSET_NONE},
209         {iavf_pattern_eth_ipv4_gre_ipv6_gtpu_ipv6_tcp,   IAVF_FDIR_INSET_GTPU_IPV6_TCP, IAVF_INSET_NONE},
210         {iavf_pattern_eth_ipv6_gre_ipv4_gtpu,            IAVF_FDIR_INSET_IPV6_GTPU,     IAVF_INSET_NONE},
211         {iavf_pattern_eth_ipv6_gre_ipv4_gtpu_ipv4,       IAVF_FDIR_INSET_GTPU_IPV4,     IAVF_INSET_NONE},
212         {iavf_pattern_eth_ipv6_gre_ipv4_gtpu_ipv4_udp,   IAVF_FDIR_INSET_GTPU_IPV4_UDP, IAVF_INSET_NONE},
213         {iavf_pattern_eth_ipv6_gre_ipv4_gtpu_ipv4_tcp,   IAVF_FDIR_INSET_GTPU_IPV4_TCP, IAVF_INSET_NONE},
214         {iavf_pattern_eth_ipv6_gre_ipv4_gtpu_ipv6,       IAVF_FDIR_INSET_GTPU_IPV6,     IAVF_INSET_NONE},
215         {iavf_pattern_eth_ipv6_gre_ipv4_gtpu_ipv6_udp,   IAVF_FDIR_INSET_GTPU_IPV6_UDP, IAVF_INSET_NONE},
216         {iavf_pattern_eth_ipv6_gre_ipv4_gtpu_ipv6_tcp,   IAVF_FDIR_INSET_GTPU_IPV6_TCP, IAVF_INSET_NONE},
217         {iavf_pattern_eth_ipv6_gre_ipv6_gtpu,            IAVF_FDIR_INSET_IPV6_GTPU,     IAVF_INSET_NONE},
218         {iavf_pattern_eth_ipv6_gre_ipv6_gtpu_ipv4,       IAVF_FDIR_INSET_GTPU_IPV4,     IAVF_INSET_NONE},
219         {iavf_pattern_eth_ipv6_gre_ipv6_gtpu_ipv4_udp,   IAVF_FDIR_INSET_GTPU_IPV4_UDP, IAVF_INSET_NONE},
220         {iavf_pattern_eth_ipv6_gre_ipv6_gtpu_ipv4_tcp,   IAVF_FDIR_INSET_GTPU_IPV4_TCP, IAVF_INSET_NONE},
221         {iavf_pattern_eth_ipv6_gre_ipv6_gtpu_ipv6,       IAVF_FDIR_INSET_GTPU_IPV6,     IAVF_INSET_NONE},
222         {iavf_pattern_eth_ipv6_gre_ipv6_gtpu_ipv6_udp,   IAVF_FDIR_INSET_GTPU_IPV6_UDP, IAVF_INSET_NONE},
223         {iavf_pattern_eth_ipv6_gre_ipv6_gtpu_ipv6_tcp,   IAVF_FDIR_INSET_GTPU_IPV6_TCP, IAVF_INSET_NONE},
224         {iavf_pattern_eth_ipv4_gre_ipv4_gtpu_eh,                 IAVF_FDIR_INSET_IPV4_GTPU_EH,  IAVF_INSET_NONE},
225         {iavf_pattern_eth_ipv4_gre_ipv4_gtpu_eh_ipv4,            IAVF_FDIR_INSET_GTPU_IPV4,     IAVF_INSET_NONE},
226         {iavf_pattern_eth_ipv4_gre_ipv4_gtpu_eh_ipv4_udp,        IAVF_FDIR_INSET_GTPU_IPV4_UDP, IAVF_INSET_NONE},
227         {iavf_pattern_eth_ipv4_gre_ipv4_gtpu_eh_ipv4_tcp,        IAVF_FDIR_INSET_GTPU_IPV4_TCP, IAVF_INSET_NONE},
228         {iavf_pattern_eth_ipv4_gre_ipv4_gtpu_eh_ipv6,            IAVF_FDIR_INSET_GTPU_IPV6,     IAVF_INSET_NONE},
229         {iavf_pattern_eth_ipv4_gre_ipv4_gtpu_eh_ipv6_udp,        IAVF_FDIR_INSET_GTPU_IPV6_UDP, IAVF_INSET_NONE},
230         {iavf_pattern_eth_ipv4_gre_ipv4_gtpu_eh_ipv6_tcp,        IAVF_FDIR_INSET_GTPU_IPV6_TCP, IAVF_INSET_NONE},
231         {iavf_pattern_eth_ipv4_gre_ipv6_gtpu_eh,                 IAVF_FDIR_INSET_IPV4_GTPU_EH,  IAVF_INSET_NONE},
232         {iavf_pattern_eth_ipv4_gre_ipv6_gtpu_eh_ipv4,            IAVF_FDIR_INSET_GTPU_IPV4,     IAVF_INSET_NONE},
233         {iavf_pattern_eth_ipv4_gre_ipv6_gtpu_eh_ipv4_udp,        IAVF_FDIR_INSET_GTPU_IPV4_UDP, IAVF_INSET_NONE},
234         {iavf_pattern_eth_ipv4_gre_ipv6_gtpu_eh_ipv4_tcp,        IAVF_FDIR_INSET_GTPU_IPV4_TCP, IAVF_INSET_NONE},
235         {iavf_pattern_eth_ipv4_gre_ipv6_gtpu_eh_ipv6,            IAVF_FDIR_INSET_GTPU_IPV6,     IAVF_INSET_NONE},
236         {iavf_pattern_eth_ipv4_gre_ipv6_gtpu_eh_ipv6_udp,        IAVF_FDIR_INSET_GTPU_IPV6_UDP, IAVF_INSET_NONE},
237         {iavf_pattern_eth_ipv4_gre_ipv6_gtpu_eh_ipv6_tcp,        IAVF_FDIR_INSET_GTPU_IPV6_TCP, IAVF_INSET_NONE},
238         {iavf_pattern_eth_ipv6_gre_ipv4_gtpu_eh,                 IAVF_FDIR_INSET_IPV6_GTPU_EH,  IAVF_INSET_NONE},
239         {iavf_pattern_eth_ipv6_gre_ipv4_gtpu_eh_ipv4,            IAVF_FDIR_INSET_GTPU_IPV4,     IAVF_INSET_NONE},
240         {iavf_pattern_eth_ipv6_gre_ipv4_gtpu_eh_ipv4_udp,        IAVF_FDIR_INSET_GTPU_IPV4_UDP, IAVF_INSET_NONE},
241         {iavf_pattern_eth_ipv6_gre_ipv4_gtpu_eh_ipv4_tcp,        IAVF_FDIR_INSET_GTPU_IPV4_TCP, IAVF_INSET_NONE},
242         {iavf_pattern_eth_ipv6_gre_ipv4_gtpu_eh_ipv6,            IAVF_FDIR_INSET_GTPU_IPV6,     IAVF_INSET_NONE},
243         {iavf_pattern_eth_ipv6_gre_ipv4_gtpu_eh_ipv6_udp,        IAVF_FDIR_INSET_GTPU_IPV6_UDP, IAVF_INSET_NONE},
244         {iavf_pattern_eth_ipv6_gre_ipv4_gtpu_eh_ipv6_tcp,        IAVF_FDIR_INSET_GTPU_IPV6_TCP, IAVF_INSET_NONE},
245         {iavf_pattern_eth_ipv6_gre_ipv6_gtpu_eh,                 IAVF_FDIR_INSET_IPV6_GTPU_EH,  IAVF_INSET_NONE},
246         {iavf_pattern_eth_ipv6_gre_ipv6_gtpu_eh_ipv4,            IAVF_FDIR_INSET_GTPU_IPV4,     IAVF_INSET_NONE},
247         {iavf_pattern_eth_ipv6_gre_ipv6_gtpu_eh_ipv4_udp,        IAVF_FDIR_INSET_GTPU_IPV4_UDP, IAVF_INSET_NONE},
248         {iavf_pattern_eth_ipv6_gre_ipv6_gtpu_eh_ipv4_tcp,        IAVF_FDIR_INSET_GTPU_IPV4_TCP, IAVF_INSET_NONE},
249         {iavf_pattern_eth_ipv6_gre_ipv6_gtpu_eh_ipv6,            IAVF_FDIR_INSET_GTPU_IPV6,     IAVF_INSET_NONE},
250         {iavf_pattern_eth_ipv6_gre_ipv6_gtpu_eh_ipv6_udp,        IAVF_FDIR_INSET_GTPU_IPV6_UDP, IAVF_INSET_NONE},
251         {iavf_pattern_eth_ipv6_gre_ipv6_gtpu_eh_ipv6_tcp,        IAVF_FDIR_INSET_GTPU_IPV6_TCP, IAVF_INSET_NONE},
252         {iavf_pattern_eth_ipv6_gtpu,             IAVF_FDIR_INSET_IPV6_GTPU,     IAVF_INSET_NONE},
253         {iavf_pattern_eth_ipv6_gtpu_eh,          IAVF_FDIR_INSET_IPV6_GTPU_EH,  IAVF_INSET_NONE},
254         {iavf_pattern_eth_ipv4_l2tpv3,           IAVF_FDIR_INSET_L2TPV3OIP,     IAVF_INSET_NONE},
255         {iavf_pattern_eth_ipv6_l2tpv3,           IAVF_FDIR_INSET_L2TPV3OIP,     IAVF_INSET_NONE},
256         {iavf_pattern_eth_ipv4_esp,              IAVF_FDIR_INSET_IPV4_ESP,      IAVF_INSET_NONE},
257         {iavf_pattern_eth_ipv6_esp,              IAVF_FDIR_INSET_IPV6_ESP,      IAVF_INSET_NONE},
258         {iavf_pattern_eth_ipv4_ah,               IAVF_FDIR_INSET_AH,            IAVF_INSET_NONE},
259         {iavf_pattern_eth_ipv6_ah,               IAVF_FDIR_INSET_AH,            IAVF_INSET_NONE},
260         {iavf_pattern_eth_ipv4_udp_esp,          IAVF_FDIR_INSET_IPV4_NATT_ESP, IAVF_INSET_NONE},
261         {iavf_pattern_eth_ipv6_udp_esp,          IAVF_FDIR_INSET_IPV6_NATT_ESP, IAVF_INSET_NONE},
262         {iavf_pattern_eth_ipv4_pfcp,             IAVF_FDIR_INSET_PFCP,          IAVF_INSET_NONE},
263         {iavf_pattern_eth_ipv6_pfcp,             IAVF_FDIR_INSET_PFCP,          IAVF_INSET_NONE},
264         {iavf_pattern_eth_ecpri,                 IAVF_FDIR_INSET_ECPRI,         IAVF_INSET_NONE},
265         {iavf_pattern_eth_ipv4_ecpri,            IAVF_FDIR_INSET_ECPRI,         IAVF_INSET_NONE},
266         {iavf_pattern_eth_ipv4_gre_ipv4,        IAVF_FDIR_INSET_GRE_IPV4,       IAVF_INSET_NONE},
267         {iavf_pattern_eth_ipv4_gre_ipv4_tcp,    IAVF_FDIR_INSET_GRE_IPV4_TCP,   IAVF_INSET_NONE},
268         {iavf_pattern_eth_ipv4_gre_ipv4_udp,    IAVF_FDIR_INSET_GRE_IPV4_UDP,   IAVF_INSET_NONE},
269         {iavf_pattern_eth_ipv4_gre_ipv6,        IAVF_FDIR_INSET_GRE_IPV6,       IAVF_INSET_NONE},
270         {iavf_pattern_eth_ipv4_gre_ipv6_tcp,    IAVF_FDIR_INSET_GRE_IPV6_TCP,   IAVF_INSET_NONE},
271         {iavf_pattern_eth_ipv4_gre_ipv6_udp,    IAVF_FDIR_INSET_GRE_IPV6_UDP,   IAVF_INSET_NONE},
272         {iavf_pattern_eth_ipv6_gre_ipv4,        IAVF_FDIR_INSET_GRE_IPV4,       IAVF_INSET_NONE},
273         {iavf_pattern_eth_ipv6_gre_ipv4_tcp,    IAVF_FDIR_INSET_GRE_IPV4_TCP,   IAVF_INSET_NONE},
274         {iavf_pattern_eth_ipv6_gre_ipv4_udp,    IAVF_FDIR_INSET_GRE_IPV4_UDP,   IAVF_INSET_NONE},
275         {iavf_pattern_eth_ipv6_gre_ipv6,        IAVF_FDIR_INSET_GRE_IPV6,       IAVF_INSET_NONE},
276         {iavf_pattern_eth_ipv6_gre_ipv6_tcp,    IAVF_FDIR_INSET_GRE_IPV6_TCP,   IAVF_INSET_NONE},
277         {iavf_pattern_eth_ipv6_gre_ipv6_udp,    IAVF_FDIR_INSET_GRE_IPV6_UDP,   IAVF_INSET_NONE},
278 };
279
280 static struct iavf_flow_parser iavf_fdir_parser;
281
282 static int
283 iavf_fdir_init(struct iavf_adapter *ad)
284 {
285         struct iavf_info *vf = IAVF_DEV_PRIVATE_TO_VF(ad);
286         struct iavf_flow_parser *parser;
287
288         if (!vf->vf_res)
289                 return -EINVAL;
290
291         if (vf->vf_res->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_FDIR_PF)
292                 parser = &iavf_fdir_parser;
293         else
294                 return -ENOTSUP;
295
296         return iavf_register_parser(parser, ad);
297 }
298
299 static void
300 iavf_fdir_uninit(struct iavf_adapter *ad)
301 {
302         iavf_unregister_parser(&iavf_fdir_parser, ad);
303 }
304
305 static int
306 iavf_fdir_create(struct iavf_adapter *ad,
307                 struct rte_flow *flow,
308                 void *meta,
309                 struct rte_flow_error *error)
310 {
311         struct iavf_fdir_conf *filter = meta;
312         struct iavf_fdir_conf *rule;
313         int ret;
314
315         rule = rte_zmalloc("fdir_entry", sizeof(*rule), 0);
316         if (!rule) {
317                 rte_flow_error_set(error, ENOMEM,
318                                 RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
319                                 "Failed to allocate memory for fdir rule");
320                 return -rte_errno;
321         }
322
323         ret = iavf_fdir_add(ad, filter);
324         if (ret) {
325                 rte_flow_error_set(error, -ret,
326                                 RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
327                                 "Failed to add filter rule.");
328                 goto free_entry;
329         }
330
331         if (filter->mark_flag == 1)
332                 iavf_fdir_rx_proc_enable(ad, 1);
333
334         rte_memcpy(rule, filter, sizeof(*rule));
335         flow->rule = rule;
336
337         return 0;
338
339 free_entry:
340         rte_free(rule);
341         return -rte_errno;
342 }
343
344 static int
345 iavf_fdir_destroy(struct iavf_adapter *ad,
346                 struct rte_flow *flow,
347                 struct rte_flow_error *error)
348 {
349         struct iavf_fdir_conf *filter;
350         int ret;
351
352         filter = (struct iavf_fdir_conf *)flow->rule;
353
354         ret = iavf_fdir_del(ad, filter);
355         if (ret) {
356                 rte_flow_error_set(error, -ret,
357                                 RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
358                                 "Failed to delete filter rule.");
359                 return -rte_errno;
360         }
361
362         if (filter->mark_flag == 1)
363                 iavf_fdir_rx_proc_enable(ad, 0);
364
365         flow->rule = NULL;
366         rte_free(filter);
367
368         return 0;
369 }
370
371 static int
372 iavf_fdir_validation(struct iavf_adapter *ad,
373                 __rte_unused struct rte_flow *flow,
374                 void *meta,
375                 struct rte_flow_error *error)
376 {
377         struct iavf_fdir_conf *filter = meta;
378         int ret;
379
380         ret = iavf_fdir_check(ad, filter);
381         if (ret) {
382                 rte_flow_error_set(error, -ret,
383                                 RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
384                                 "Failed to validate filter rule.");
385                 return -rte_errno;
386         }
387
388         return 0;
389 };
390
391 static struct iavf_flow_engine iavf_fdir_engine = {
392         .init = iavf_fdir_init,
393         .uninit = iavf_fdir_uninit,
394         .create = iavf_fdir_create,
395         .destroy = iavf_fdir_destroy,
396         .validation = iavf_fdir_validation,
397         .type = IAVF_FLOW_ENGINE_FDIR,
398 };
399
400 static int
401 iavf_fdir_parse_action_qregion(struct iavf_adapter *ad,
402                         struct rte_flow_error *error,
403                         const struct rte_flow_action *act,
404                         struct virtchnl_filter_action *filter_action)
405 {
406         struct iavf_info *vf = IAVF_DEV_PRIVATE_TO_VF(ad);
407         const struct rte_flow_action_rss *rss = act->conf;
408         uint32_t i;
409
410         if (act->type != RTE_FLOW_ACTION_TYPE_RSS) {
411                 rte_flow_error_set(error, EINVAL,
412                                 RTE_FLOW_ERROR_TYPE_ACTION, act,
413                                 "Invalid action.");
414                 return -rte_errno;
415         }
416
417         if (rss->queue_num <= 1) {
418                 rte_flow_error_set(error, EINVAL,
419                                 RTE_FLOW_ERROR_TYPE_ACTION, act,
420                                 "Queue region size can't be 0 or 1.");
421                 return -rte_errno;
422         }
423
424         /* check if queue index for queue region is continuous */
425         for (i = 0; i < rss->queue_num - 1; i++) {
426                 if (rss->queue[i + 1] != rss->queue[i] + 1) {
427                         rte_flow_error_set(error, EINVAL,
428                                         RTE_FLOW_ERROR_TYPE_ACTION, act,
429                                         "Discontinuous queue region");
430                         return -rte_errno;
431                 }
432         }
433
434         if (rss->queue[rss->queue_num - 1] >= ad->eth_dev->data->nb_rx_queues) {
435                 rte_flow_error_set(error, EINVAL,
436                                 RTE_FLOW_ERROR_TYPE_ACTION, act,
437                                 "Invalid queue region indexes.");
438                 return -rte_errno;
439         }
440
441         if (!(rte_is_power_of_2(rss->queue_num) &&
442                 rss->queue_num <= IAVF_FDIR_MAX_QREGION_SIZE)) {
443                 rte_flow_error_set(error, EINVAL,
444                                 RTE_FLOW_ERROR_TYPE_ACTION, act,
445                                 "The region size should be any of the following values:"
446                                 "1, 2, 4, 8, 16, 32, 64, 128 as long as the total number "
447                                 "of queues do not exceed the VSI allocation.");
448                 return -rte_errno;
449         }
450
451         if (rss->queue_num > vf->max_rss_qregion) {
452                 rte_flow_error_set(error, EINVAL,
453                                 RTE_FLOW_ERROR_TYPE_ACTION, act,
454                                 "The region size cannot be large than the supported max RSS queue region");
455                 return -rte_errno;
456         }
457
458         filter_action->act_conf.queue.index = rss->queue[0];
459         filter_action->act_conf.queue.region = rte_fls_u32(rss->queue_num) - 1;
460
461         return 0;
462 }
463
464 static int
465 iavf_fdir_parse_action(struct iavf_adapter *ad,
466                         const struct rte_flow_action actions[],
467                         struct rte_flow_error *error,
468                         struct iavf_fdir_conf *filter)
469 {
470         const struct rte_flow_action_queue *act_q;
471         const struct rte_flow_action_mark *mark_spec = NULL;
472         uint32_t dest_num = 0;
473         uint32_t mark_num = 0;
474         int ret;
475
476         int number = 0;
477         struct virtchnl_filter_action *filter_action;
478
479         for (; actions->type != RTE_FLOW_ACTION_TYPE_END; actions++) {
480                 switch (actions->type) {
481                 case RTE_FLOW_ACTION_TYPE_VOID:
482                         break;
483
484                 case RTE_FLOW_ACTION_TYPE_PASSTHRU:
485                         dest_num++;
486
487                         filter_action = &filter->add_fltr.rule_cfg.action_set.actions[number];
488
489                         filter_action->type = VIRTCHNL_ACTION_PASSTHRU;
490
491                         filter->add_fltr.rule_cfg.action_set.count = ++number;
492                         break;
493
494                 case RTE_FLOW_ACTION_TYPE_DROP:
495                         dest_num++;
496
497                         filter_action = &filter->add_fltr.rule_cfg.action_set.actions[number];
498
499                         filter_action->type = VIRTCHNL_ACTION_DROP;
500
501                         filter->add_fltr.rule_cfg.action_set.count = ++number;
502                         break;
503
504                 case RTE_FLOW_ACTION_TYPE_QUEUE:
505                         dest_num++;
506
507                         act_q = actions->conf;
508                         filter_action = &filter->add_fltr.rule_cfg.action_set.actions[number];
509
510                         filter_action->type = VIRTCHNL_ACTION_QUEUE;
511                         filter_action->act_conf.queue.index = act_q->index;
512
513                         if (filter_action->act_conf.queue.index >=
514                                 ad->eth_dev->data->nb_rx_queues) {
515                                 rte_flow_error_set(error, EINVAL,
516                                         RTE_FLOW_ERROR_TYPE_ACTION,
517                                         actions, "Invalid queue for FDIR.");
518                                 return -rte_errno;
519                         }
520
521                         filter->add_fltr.rule_cfg.action_set.count = ++number;
522                         break;
523
524                 case RTE_FLOW_ACTION_TYPE_RSS:
525                         dest_num++;
526
527                         filter_action = &filter->add_fltr.rule_cfg.action_set.actions[number];
528
529                         filter_action->type = VIRTCHNL_ACTION_Q_REGION;
530
531                         ret = iavf_fdir_parse_action_qregion(ad,
532                                                 error, actions, filter_action);
533                         if (ret)
534                                 return ret;
535
536                         filter->add_fltr.rule_cfg.action_set.count = ++number;
537                         break;
538
539                 case RTE_FLOW_ACTION_TYPE_MARK:
540                         mark_num++;
541
542                         filter->mark_flag = 1;
543                         mark_spec = actions->conf;
544                         filter_action = &filter->add_fltr.rule_cfg.action_set.actions[number];
545
546                         filter_action->type = VIRTCHNL_ACTION_MARK;
547                         filter_action->act_conf.mark_id = mark_spec->id;
548
549                         filter->add_fltr.rule_cfg.action_set.count = ++number;
550                         break;
551
552                 default:
553                         rte_flow_error_set(error, EINVAL,
554                                         RTE_FLOW_ERROR_TYPE_ACTION, actions,
555                                         "Invalid action.");
556                         return -rte_errno;
557                 }
558         }
559
560         if (number > VIRTCHNL_MAX_NUM_ACTIONS) {
561                 rte_flow_error_set(error, EINVAL,
562                         RTE_FLOW_ERROR_TYPE_ACTION, actions,
563                         "Action numbers exceed the maximum value");
564                 return -rte_errno;
565         }
566
567         if (dest_num >= 2) {
568                 rte_flow_error_set(error, EINVAL,
569                         RTE_FLOW_ERROR_TYPE_ACTION, actions,
570                         "Unsupported action combination");
571                 return -rte_errno;
572         }
573
574         if (mark_num >= 2) {
575                 rte_flow_error_set(error, EINVAL,
576                         RTE_FLOW_ERROR_TYPE_ACTION, actions,
577                         "Too many mark actions");
578                 return -rte_errno;
579         }
580
581         if (dest_num + mark_num == 0) {
582                 rte_flow_error_set(error, EINVAL,
583                         RTE_FLOW_ERROR_TYPE_ACTION, actions,
584                         "Empty action");
585                 return -rte_errno;
586         }
587
588         /* Mark only is equal to mark + passthru. */
589         if (dest_num == 0) {
590                 filter_action = &filter->add_fltr.rule_cfg.action_set.actions[number];
591                 filter_action->type = VIRTCHNL_ACTION_PASSTHRU;
592                 filter->add_fltr.rule_cfg.action_set.count = ++number;
593         }
594
595         return 0;
596 }
597
598 static bool
599 iavf_fdir_refine_input_set(const uint64_t input_set,
600                            const uint64_t input_set_mask,
601                            struct iavf_fdir_conf *filter)
602 {
603         struct virtchnl_proto_hdr *hdr, *hdr_last;
604         struct rte_flow_item_ipv4 ipv4_spec;
605         struct rte_flow_item_ipv6 ipv6_spec;
606         int last_layer;
607         uint8_t proto_id;
608
609         if (input_set & ~input_set_mask)
610                 return false;
611         else if (input_set)
612                 return true;
613
614         last_layer = filter->add_fltr.rule_cfg.proto_hdrs.count - 1;
615         /* Last layer of TCP/UDP pattern isn't less than 2. */
616         if (last_layer < 2)
617                 return false;
618         hdr_last = &filter->add_fltr.rule_cfg.proto_hdrs.proto_hdr[last_layer];
619         if (hdr_last->type == VIRTCHNL_PROTO_HDR_TCP)
620                 proto_id = 6;
621         else if (hdr_last->type == VIRTCHNL_PROTO_HDR_UDP)
622                 proto_id = 17;
623         else
624                 return false;
625
626         hdr = &filter->add_fltr.rule_cfg.proto_hdrs.proto_hdr[last_layer - 1];
627         switch (hdr->type) {
628         case VIRTCHNL_PROTO_HDR_IPV4:
629                 VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(hdr, IPV4, PROT);
630                 memset(&ipv4_spec, 0, sizeof(ipv4_spec));
631                 ipv4_spec.hdr.next_proto_id = proto_id;
632                 rte_memcpy(hdr->buffer, &ipv4_spec.hdr,
633                            sizeof(ipv4_spec.hdr));
634                 return true;
635         case VIRTCHNL_PROTO_HDR_IPV6:
636                 VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(hdr, IPV6, PROT);
637                 memset(&ipv6_spec, 0, sizeof(ipv6_spec));
638                 ipv6_spec.hdr.proto = proto_id;
639                 rte_memcpy(hdr->buffer, &ipv6_spec.hdr,
640                            sizeof(ipv6_spec.hdr));
641                 return true;
642         default:
643                 return false;
644         }
645 }
646
647 static void
648 iavf_fdir_add_fragment_hdr(struct virtchnl_proto_hdrs *hdrs, int layer)
649 {
650         struct virtchnl_proto_hdr *hdr1;
651         struct virtchnl_proto_hdr *hdr2;
652         int i;
653
654         if (layer < 0 || layer > hdrs->count)
655                 return;
656
657         /* shift headers layer */
658         for (i = hdrs->count; i >= layer; i--) {
659                 hdr1 = &hdrs->proto_hdr[i];
660                 hdr2 = &hdrs->proto_hdr[i - 1];
661                 *hdr1 = *hdr2;
662         }
663
664         /* adding dummy fragment header */
665         hdr1 = &hdrs->proto_hdr[layer];
666         VIRTCHNL_SET_PROTO_HDR_TYPE(hdr1, IPV4_FRAG);
667         hdrs->count = ++layer;
668 }
669
670 static int
671 iavf_fdir_parse_pattern(__rte_unused struct iavf_adapter *ad,
672                         const struct rte_flow_item pattern[],
673                         const uint64_t input_set_mask,
674                         struct rte_flow_error *error,
675                         struct iavf_fdir_conf *filter)
676 {
677         struct virtchnl_proto_hdrs *hdrs =
678                         &filter->add_fltr.rule_cfg.proto_hdrs;
679         enum rte_flow_item_type l3 = RTE_FLOW_ITEM_TYPE_END;
680         const struct rte_flow_item_eth *eth_spec, *eth_mask;
681         const struct rte_flow_item_ipv4 *ipv4_spec, *ipv4_last, *ipv4_mask;
682         const struct rte_flow_item_ipv6 *ipv6_spec, *ipv6_mask;
683         const struct rte_flow_item_ipv6_frag_ext *ipv6_frag_spec;
684         const struct rte_flow_item_ipv6_frag_ext *ipv6_frag_mask;
685         const struct rte_flow_item_udp *udp_spec, *udp_mask;
686         const struct rte_flow_item_tcp *tcp_spec, *tcp_mask;
687         const struct rte_flow_item_sctp *sctp_spec, *sctp_mask;
688         const struct rte_flow_item_gtp *gtp_spec, *gtp_mask;
689         const struct rte_flow_item_gtp_psc *gtp_psc_spec, *gtp_psc_mask;
690         const struct rte_flow_item_l2tpv3oip *l2tpv3oip_spec, *l2tpv3oip_mask;
691         const struct rte_flow_item_esp *esp_spec, *esp_mask;
692         const struct rte_flow_item_ah *ah_spec, *ah_mask;
693         const struct rte_flow_item_pfcp *pfcp_spec, *pfcp_mask;
694         const struct rte_flow_item_ecpri *ecpri_spec, *ecpri_mask;
695         const struct rte_flow_item_gre *gre_spec, *gre_mask;
696         const struct rte_flow_item *item = pattern;
697         struct virtchnl_proto_hdr *hdr, *hdr1 = NULL;
698         struct rte_ecpri_common_hdr ecpri_common;
699         uint64_t input_set = IAVF_INSET_NONE;
700         enum rte_flow_item_type item_type;
701         enum rte_flow_item_type next_type;
702         uint8_t tun_inner = 0;
703         uint16_t ether_type;
704         int layer = 0;
705
706         uint8_t  ipv6_addr_mask[16] = {
707                 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
708                 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF
709         };
710
711         for (item = pattern; item->type != RTE_FLOW_ITEM_TYPE_END; item++) {
712                 item_type = item->type;
713
714                 if (item->last && !(item_type == RTE_FLOW_ITEM_TYPE_IPV4 ||
715                                     item_type ==
716                                     RTE_FLOW_ITEM_TYPE_IPV6_FRAG_EXT)) {
717                         rte_flow_error_set(error, EINVAL,
718                                            RTE_FLOW_ERROR_TYPE_ITEM, item,
719                                            "Not support range");
720                 }
721
722                 switch (item_type) {
723                 case RTE_FLOW_ITEM_TYPE_ETH:
724                         eth_spec = item->spec;
725                         eth_mask = item->mask;
726                         next_type = (item + 1)->type;
727
728                         hdr1 = &hdrs->proto_hdr[layer];
729
730                         VIRTCHNL_SET_PROTO_HDR_TYPE(hdr1, ETH);
731
732                         if (next_type == RTE_FLOW_ITEM_TYPE_END &&
733                             (!eth_spec || !eth_mask)) {
734                                 rte_flow_error_set(error, EINVAL,
735                                                 RTE_FLOW_ERROR_TYPE_ITEM,
736                                                 item, "NULL eth spec/mask.");
737                                 return -rte_errno;
738                         }
739
740                         if (eth_spec && eth_mask) {
741                                 if (!rte_is_zero_ether_addr(&eth_mask->src) ||
742                                     !rte_is_zero_ether_addr(&eth_mask->dst)) {
743                                         rte_flow_error_set(error, EINVAL,
744                                                 RTE_FLOW_ERROR_TYPE_ITEM, item,
745                                                 "Invalid MAC_addr mask.");
746                                         return -rte_errno;
747                                 }
748                         }
749
750                         if (eth_spec && eth_mask && eth_mask->type) {
751                                 if (eth_mask->type != RTE_BE16(0xffff)) {
752                                         rte_flow_error_set(error, EINVAL,
753                                                 RTE_FLOW_ERROR_TYPE_ITEM,
754                                                 item, "Invalid type mask.");
755                                         return -rte_errno;
756                                 }
757
758                                 ether_type = rte_be_to_cpu_16(eth_spec->type);
759                                 if (ether_type == RTE_ETHER_TYPE_IPV4 ||
760                                         ether_type == RTE_ETHER_TYPE_IPV6) {
761                                         rte_flow_error_set(error, EINVAL,
762                                                 RTE_FLOW_ERROR_TYPE_ITEM,
763                                                 item,
764                                                 "Unsupported ether_type.");
765                                         return -rte_errno;
766                                 }
767
768                                 input_set |= IAVF_INSET_ETHERTYPE;
769                                 VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(hdr1, ETH,
770                                                                  ETHERTYPE);
771
772                                 rte_memcpy(hdr1->buffer, eth_spec,
773                                            sizeof(struct rte_ether_hdr));
774                         }
775
776                         hdrs->count = ++layer;
777                         break;
778
779                 case RTE_FLOW_ITEM_TYPE_IPV4:
780                         l3 = RTE_FLOW_ITEM_TYPE_IPV4;
781                         ipv4_spec = item->spec;
782                         ipv4_last = item->last;
783                         ipv4_mask = item->mask;
784                         next_type = (item + 1)->type;
785
786                         hdr = &hdrs->proto_hdr[layer];
787
788                         VIRTCHNL_SET_PROTO_HDR_TYPE(hdr, IPV4);
789
790                         if (!(ipv4_spec && ipv4_mask)) {
791                                 hdrs->count = ++layer;
792                                 break;
793                         }
794
795                         if (ipv4_mask->hdr.version_ihl ||
796                             ipv4_mask->hdr.total_length ||
797                             ipv4_mask->hdr.hdr_checksum) {
798                                 rte_flow_error_set(error, EINVAL,
799                                                    RTE_FLOW_ERROR_TYPE_ITEM,
800                                                    item, "Invalid IPv4 mask.");
801                                 return -rte_errno;
802                         }
803
804                         if (ipv4_last &&
805                             (ipv4_last->hdr.version_ihl ||
806                              ipv4_last->hdr.type_of_service ||
807                              ipv4_last->hdr.time_to_live ||
808                              ipv4_last->hdr.total_length |
809                              ipv4_last->hdr.next_proto_id ||
810                              ipv4_last->hdr.hdr_checksum ||
811                              ipv4_last->hdr.src_addr ||
812                              ipv4_last->hdr.dst_addr)) {
813                                 rte_flow_error_set(error, EINVAL,
814                                                    RTE_FLOW_ERROR_TYPE_ITEM,
815                                                    item, "Invalid IPv4 last.");
816                                 return -rte_errno;
817                         }
818
819                         if (ipv4_mask->hdr.type_of_service ==
820                             UINT8_MAX) {
821                                 input_set |= IAVF_INSET_IPV4_TOS;
822                                 VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(hdr, IPV4,
823                                                                  DSCP);
824                         }
825
826                         if (ipv4_mask->hdr.next_proto_id == UINT8_MAX) {
827                                 input_set |= IAVF_INSET_IPV4_PROTO;
828                                 VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(hdr, IPV4,
829                                                                  PROT);
830                         }
831
832                         if (ipv4_mask->hdr.time_to_live == UINT8_MAX) {
833                                 input_set |= IAVF_INSET_IPV4_TTL;
834                                 VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(hdr, IPV4,
835                                                                  TTL);
836                         }
837
838                         if (ipv4_mask->hdr.src_addr == UINT32_MAX) {
839                                 input_set |= IAVF_INSET_IPV4_SRC;
840                                 VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(hdr, IPV4,
841                                                                  SRC);
842                         }
843
844                         if (ipv4_mask->hdr.dst_addr == UINT32_MAX) {
845                                 input_set |= IAVF_INSET_IPV4_DST;
846                                 VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(hdr, IPV4,
847                                                                  DST);
848                         }
849
850                         if (tun_inner) {
851                                 input_set &= ~IAVF_PROT_IPV4_OUTER;
852                                 input_set |= IAVF_PROT_IPV4_INNER;
853                         }
854
855                         rte_memcpy(hdr->buffer, &ipv4_spec->hdr,
856                                    sizeof(ipv4_spec->hdr));
857
858                         hdrs->count = ++layer;
859
860                         /* fragment Ipv4:
861                          * spec is 0x2000, mask is 0x2000
862                          */
863                         if (ipv4_spec->hdr.fragment_offset ==
864                             rte_cpu_to_be_16(RTE_IPV4_HDR_MF_FLAG) &&
865                             ipv4_mask->hdr.fragment_offset ==
866                             rte_cpu_to_be_16(RTE_IPV4_HDR_MF_FLAG)) {
867                                 /* all IPv4 fragment packet has the same
868                                  * ethertype, if the spec and mask is valid,
869                                  * set ethertype into input set.
870                                  */
871                                 input_set |= IAVF_INSET_ETHERTYPE;
872                                 VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(hdr1, ETH,
873                                                                  ETHERTYPE);
874
875                                 /* add dummy header for IPv4 Fragment */
876                                 iavf_fdir_add_fragment_hdr(hdrs, layer);
877                         } else if (ipv4_mask->hdr.packet_id == UINT16_MAX) {
878                                 rte_flow_error_set(error, EINVAL,
879                                                    RTE_FLOW_ERROR_TYPE_ITEM,
880                                                    item, "Invalid IPv4 mask.");
881                                 return -rte_errno;
882                         }
883
884                         break;
885
886                 case RTE_FLOW_ITEM_TYPE_IPV6:
887                         l3 = RTE_FLOW_ITEM_TYPE_IPV6;
888                         ipv6_spec = item->spec;
889                         ipv6_mask = item->mask;
890
891                         hdr = &hdrs->proto_hdr[layer];
892
893                         VIRTCHNL_SET_PROTO_HDR_TYPE(hdr, IPV6);
894
895                         if (!(ipv6_spec && ipv6_mask)) {
896                                 hdrs->count = ++layer;
897                                 break;
898                         }
899
900                         if (ipv6_mask->hdr.payload_len) {
901                                 rte_flow_error_set(error, EINVAL,
902                                                    RTE_FLOW_ERROR_TYPE_ITEM,
903                                                    item, "Invalid IPv6 mask");
904                                 return -rte_errno;
905                         }
906
907                         if ((ipv6_mask->hdr.vtc_flow &
908                               rte_cpu_to_be_32(IAVF_IPV6_TC_MASK))
909                              == rte_cpu_to_be_32(IAVF_IPV6_TC_MASK)) {
910                                 input_set |= IAVF_INSET_IPV6_TC;
911                                 VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(hdr, IPV6,
912                                                                  TC);
913                         }
914
915                         if (ipv6_mask->hdr.proto == UINT8_MAX) {
916                                 input_set |= IAVF_INSET_IPV6_NEXT_HDR;
917                                 VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(hdr, IPV6,
918                                                                  PROT);
919                         }
920
921                         if (ipv6_mask->hdr.hop_limits == UINT8_MAX) {
922                                 input_set |= IAVF_INSET_IPV6_HOP_LIMIT;
923                                 VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(hdr, IPV6,
924                                                                  HOP_LIMIT);
925                         }
926
927                         if (!memcmp(ipv6_mask->hdr.src_addr, ipv6_addr_mask,
928                                     RTE_DIM(ipv6_mask->hdr.src_addr))) {
929                                 input_set |= IAVF_INSET_IPV6_SRC;
930                                 VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(hdr, IPV6,
931                                                                  SRC);
932                         }
933                         if (!memcmp(ipv6_mask->hdr.dst_addr, ipv6_addr_mask,
934                                     RTE_DIM(ipv6_mask->hdr.dst_addr))) {
935                                 input_set |= IAVF_INSET_IPV6_DST;
936                                 VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(hdr, IPV6,
937                                                                  DST);
938                         }
939
940                         if (tun_inner) {
941                                 input_set &= ~IAVF_PROT_IPV6_OUTER;
942                                 input_set |= IAVF_PROT_IPV6_INNER;
943                         }
944
945                         rte_memcpy(hdr->buffer, &ipv6_spec->hdr,
946                                    sizeof(ipv6_spec->hdr));
947
948                         hdrs->count = ++layer;
949                         break;
950
951                 case RTE_FLOW_ITEM_TYPE_IPV6_FRAG_EXT:
952                         ipv6_frag_spec = item->spec;
953                         ipv6_frag_mask = item->mask;
954                         next_type = (item + 1)->type;
955
956                         hdr = &hdrs->proto_hdr[layer];
957
958                         VIRTCHNL_SET_PROTO_HDR_TYPE(hdr, IPV6_EH_FRAG);
959
960                         if (!(ipv6_frag_spec && ipv6_frag_mask)) {
961                                 hdrs->count = ++layer;
962                                 break;
963                         }
964
965                         /* fragment Ipv6:
966                          * spec is 0x1, mask is 0x1
967                          */
968                         if (ipv6_frag_spec->hdr.frag_data ==
969                             rte_cpu_to_be_16(1) &&
970                             ipv6_frag_mask->hdr.frag_data ==
971                             rte_cpu_to_be_16(1)) {
972                                 /* all IPv6 fragment packet has the same
973                                  * ethertype, if the spec and mask is valid,
974                                  * set ethertype into input set.
975                                  */
976                                 input_set |= IAVF_INSET_ETHERTYPE;
977                                 VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(hdr1, ETH,
978                                                                  ETHERTYPE);
979
980                                 rte_memcpy(hdr->buffer, &ipv6_frag_spec->hdr,
981                                            sizeof(ipv6_frag_spec->hdr));
982                         } else if (ipv6_frag_mask->hdr.id == UINT32_MAX) {
983                                 rte_flow_error_set(error, EINVAL,
984                                                    RTE_FLOW_ERROR_TYPE_ITEM,
985                                                    item, "Invalid IPv6 mask.");
986                                 return -rte_errno;
987                         }
988
989                         hdrs->count = ++layer;
990                         break;
991
992                 case RTE_FLOW_ITEM_TYPE_UDP:
993                         udp_spec = item->spec;
994                         udp_mask = item->mask;
995
996                         hdr = &hdrs->proto_hdr[layer];
997
998                         VIRTCHNL_SET_PROTO_HDR_TYPE(hdr, UDP);
999
1000                         if (udp_spec && udp_mask) {
1001                                 if (udp_mask->hdr.dgram_len ||
1002                                         udp_mask->hdr.dgram_cksum) {
1003                                         rte_flow_error_set(error, EINVAL,
1004                                                 RTE_FLOW_ERROR_TYPE_ITEM, item,
1005                                                 "Invalid UDP mask");
1006                                         return -rte_errno;
1007                                 }
1008
1009                                 if (udp_mask->hdr.src_port == UINT16_MAX) {
1010                                         input_set |= IAVF_INSET_UDP_SRC_PORT;
1011                                         VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(hdr, UDP, SRC_PORT);
1012                                 }
1013                                 if (udp_mask->hdr.dst_port == UINT16_MAX) {
1014                                         input_set |= IAVF_INSET_UDP_DST_PORT;
1015                                         VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(hdr, UDP, DST_PORT);
1016                                 }
1017
1018                                 if (tun_inner) {
1019                                         input_set &= ~IAVF_PROT_UDP_OUTER;
1020                                         input_set |= IAVF_PROT_UDP_INNER;
1021                                 }
1022
1023                                 if (l3 == RTE_FLOW_ITEM_TYPE_IPV4)
1024                                         rte_memcpy(hdr->buffer,
1025                                                 &udp_spec->hdr,
1026                                                 sizeof(udp_spec->hdr));
1027                                 else if (l3 == RTE_FLOW_ITEM_TYPE_IPV6)
1028                                         rte_memcpy(hdr->buffer,
1029                                                 &udp_spec->hdr,
1030                                                 sizeof(udp_spec->hdr));
1031                         }
1032
1033                         hdrs->count = ++layer;
1034                         break;
1035
1036                 case RTE_FLOW_ITEM_TYPE_TCP:
1037                         tcp_spec = item->spec;
1038                         tcp_mask = item->mask;
1039
1040                         hdr = &hdrs->proto_hdr[layer];
1041
1042                         VIRTCHNL_SET_PROTO_HDR_TYPE(hdr, TCP);
1043
1044                         if (tcp_spec && tcp_mask) {
1045                                 if (tcp_mask->hdr.sent_seq ||
1046                                         tcp_mask->hdr.recv_ack ||
1047                                         tcp_mask->hdr.data_off ||
1048                                         tcp_mask->hdr.tcp_flags ||
1049                                         tcp_mask->hdr.rx_win ||
1050                                         tcp_mask->hdr.cksum ||
1051                                         tcp_mask->hdr.tcp_urp) {
1052                                         rte_flow_error_set(error, EINVAL,
1053                                                 RTE_FLOW_ERROR_TYPE_ITEM, item,
1054                                                 "Invalid TCP mask");
1055                                         return -rte_errno;
1056                                 }
1057
1058                                 if (tcp_mask->hdr.src_port == UINT16_MAX) {
1059                                         input_set |= IAVF_INSET_TCP_SRC_PORT;
1060                                         VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(hdr, TCP, SRC_PORT);
1061                                 }
1062                                 if (tcp_mask->hdr.dst_port == UINT16_MAX) {
1063                                         input_set |= IAVF_INSET_TCP_DST_PORT;
1064                                         VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(hdr, TCP, DST_PORT);
1065                                 }
1066
1067                                 if (tun_inner) {
1068                                         input_set &= ~IAVF_PROT_TCP_OUTER;
1069                                         input_set |= IAVF_PROT_TCP_INNER;
1070                                 }
1071
1072                                 if (l3 == RTE_FLOW_ITEM_TYPE_IPV4)
1073                                         rte_memcpy(hdr->buffer,
1074                                                 &tcp_spec->hdr,
1075                                                 sizeof(tcp_spec->hdr));
1076                                 else if (l3 == RTE_FLOW_ITEM_TYPE_IPV6)
1077                                         rte_memcpy(hdr->buffer,
1078                                                 &tcp_spec->hdr,
1079                                                 sizeof(tcp_spec->hdr));
1080                         }
1081
1082                         hdrs->count = ++layer;
1083                         break;
1084
1085                 case RTE_FLOW_ITEM_TYPE_SCTP:
1086                         sctp_spec = item->spec;
1087                         sctp_mask = item->mask;
1088
1089                         hdr = &hdrs->proto_hdr[layer];
1090
1091                         VIRTCHNL_SET_PROTO_HDR_TYPE(hdr, SCTP);
1092
1093                         if (sctp_spec && sctp_mask) {
1094                                 if (sctp_mask->hdr.cksum) {
1095                                         rte_flow_error_set(error, EINVAL,
1096                                                 RTE_FLOW_ERROR_TYPE_ITEM, item,
1097                                                 "Invalid UDP mask");
1098                                         return -rte_errno;
1099                                 }
1100
1101                                 if (sctp_mask->hdr.src_port == UINT16_MAX) {
1102                                         input_set |= IAVF_INSET_SCTP_SRC_PORT;
1103                                         VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(hdr, SCTP, SRC_PORT);
1104                                 }
1105                                 if (sctp_mask->hdr.dst_port == UINT16_MAX) {
1106                                         input_set |= IAVF_INSET_SCTP_DST_PORT;
1107                                         VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(hdr, SCTP, DST_PORT);
1108                                 }
1109
1110                                 if (l3 == RTE_FLOW_ITEM_TYPE_IPV4)
1111                                         rte_memcpy(hdr->buffer,
1112                                                 &sctp_spec->hdr,
1113                                                 sizeof(sctp_spec->hdr));
1114                                 else if (l3 == RTE_FLOW_ITEM_TYPE_IPV6)
1115                                         rte_memcpy(hdr->buffer,
1116                                                 &sctp_spec->hdr,
1117                                                 sizeof(sctp_spec->hdr));
1118                         }
1119
1120                         hdrs->count = ++layer;
1121                         break;
1122
1123                 case RTE_FLOW_ITEM_TYPE_GTPU:
1124                         gtp_spec = item->spec;
1125                         gtp_mask = item->mask;
1126
1127                         hdr = &hdrs->proto_hdr[layer];
1128
1129                         VIRTCHNL_SET_PROTO_HDR_TYPE(hdr, GTPU_IP);
1130
1131                         if (gtp_spec && gtp_mask) {
1132                                 if (gtp_mask->v_pt_rsv_flags ||
1133                                         gtp_mask->msg_type ||
1134                                         gtp_mask->msg_len) {
1135                                         rte_flow_error_set(error, EINVAL,
1136                                                 RTE_FLOW_ERROR_TYPE_ITEM,
1137                                                 item, "Invalid GTP mask");
1138                                         return -rte_errno;
1139                                 }
1140
1141                                 if (gtp_mask->teid == UINT32_MAX) {
1142                                         input_set |= IAVF_INSET_GTPU_TEID;
1143                                         VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(hdr, GTPU_IP, TEID);
1144                                 }
1145
1146                                 rte_memcpy(hdr->buffer,
1147                                         gtp_spec, sizeof(*gtp_spec));
1148                         }
1149
1150                         tun_inner = 1;
1151
1152                         hdrs->count = ++layer;
1153                         break;
1154
1155                 case RTE_FLOW_ITEM_TYPE_GTP_PSC:
1156                         gtp_psc_spec = item->spec;
1157                         gtp_psc_mask = item->mask;
1158
1159                         hdr = &hdrs->proto_hdr[layer];
1160
1161                         if (!gtp_psc_spec)
1162                                 VIRTCHNL_SET_PROTO_HDR_TYPE(hdr, GTPU_EH);
1163                         else if ((gtp_psc_mask->qfi) && !(gtp_psc_mask->pdu_type))
1164                                 VIRTCHNL_SET_PROTO_HDR_TYPE(hdr, GTPU_EH);
1165                         else if (gtp_psc_spec->pdu_type == IAVF_GTPU_EH_UPLINK)
1166                                 VIRTCHNL_SET_PROTO_HDR_TYPE(hdr, GTPU_EH_PDU_UP);
1167                         else if (gtp_psc_spec->pdu_type == IAVF_GTPU_EH_DWLINK)
1168                                 VIRTCHNL_SET_PROTO_HDR_TYPE(hdr, GTPU_EH_PDU_DWN);
1169
1170                         if (gtp_psc_spec && gtp_psc_mask) {
1171                                 if (gtp_psc_mask->qfi == UINT8_MAX) {
1172                                         input_set |= IAVF_INSET_GTPU_QFI;
1173                                         VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(hdr, GTPU_EH, QFI);
1174                                 }
1175
1176                                 rte_memcpy(hdr->buffer, gtp_psc_spec,
1177                                         sizeof(*gtp_psc_spec));
1178                         }
1179
1180                         hdrs->count = ++layer;
1181                         break;
1182
1183                 case RTE_FLOW_ITEM_TYPE_L2TPV3OIP:
1184                         l2tpv3oip_spec = item->spec;
1185                         l2tpv3oip_mask = item->mask;
1186
1187                         hdr = &hdrs->proto_hdr[layer];
1188
1189                         VIRTCHNL_SET_PROTO_HDR_TYPE(hdr, L2TPV3);
1190
1191                         if (l2tpv3oip_spec && l2tpv3oip_mask) {
1192                                 if (l2tpv3oip_mask->session_id == UINT32_MAX) {
1193                                         input_set |= IAVF_L2TPV3OIP_SESSION_ID;
1194                                         VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(hdr, L2TPV3, SESS_ID);
1195                                 }
1196
1197                                 rte_memcpy(hdr->buffer, l2tpv3oip_spec,
1198                                         sizeof(*l2tpv3oip_spec));
1199                         }
1200
1201                         hdrs->count = ++layer;
1202                         break;
1203
1204                 case RTE_FLOW_ITEM_TYPE_ESP:
1205                         esp_spec = item->spec;
1206                         esp_mask = item->mask;
1207
1208                         hdr = &hdrs->proto_hdr[layer];
1209
1210                         VIRTCHNL_SET_PROTO_HDR_TYPE(hdr, ESP);
1211
1212                         if (esp_spec && esp_mask) {
1213                                 if (esp_mask->hdr.spi == UINT32_MAX) {
1214                                         input_set |= IAVF_INSET_ESP_SPI;
1215                                         VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(hdr, ESP, SPI);
1216                                 }
1217
1218                                 rte_memcpy(hdr->buffer, &esp_spec->hdr,
1219                                         sizeof(esp_spec->hdr));
1220                         }
1221
1222                         hdrs->count = ++layer;
1223                         break;
1224
1225                 case RTE_FLOW_ITEM_TYPE_AH:
1226                         ah_spec = item->spec;
1227                         ah_mask = item->mask;
1228
1229                         hdr = &hdrs->proto_hdr[layer];
1230
1231                         VIRTCHNL_SET_PROTO_HDR_TYPE(hdr, AH);
1232
1233                         if (ah_spec && ah_mask) {
1234                                 if (ah_mask->spi == UINT32_MAX) {
1235                                         input_set |= IAVF_INSET_AH_SPI;
1236                                         VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(hdr, AH, SPI);
1237                                 }
1238
1239                                 rte_memcpy(hdr->buffer, ah_spec,
1240                                         sizeof(*ah_spec));
1241                         }
1242
1243                         hdrs->count = ++layer;
1244                         break;
1245
1246                 case RTE_FLOW_ITEM_TYPE_PFCP:
1247                         pfcp_spec = item->spec;
1248                         pfcp_mask = item->mask;
1249
1250                         hdr = &hdrs->proto_hdr[layer];
1251
1252                         VIRTCHNL_SET_PROTO_HDR_TYPE(hdr, PFCP);
1253
1254                         if (pfcp_spec && pfcp_mask) {
1255                                 if (pfcp_mask->s_field == UINT8_MAX) {
1256                                         input_set |= IAVF_INSET_PFCP_S_FIELD;
1257                                         VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(hdr, PFCP, S_FIELD);
1258                                 }
1259
1260                                 rte_memcpy(hdr->buffer, pfcp_spec,
1261                                         sizeof(*pfcp_spec));
1262                         }
1263
1264                         hdrs->count = ++layer;
1265                         break;
1266
1267                 case RTE_FLOW_ITEM_TYPE_ECPRI:
1268                         ecpri_spec = item->spec;
1269                         ecpri_mask = item->mask;
1270
1271                         ecpri_common.u32 = rte_be_to_cpu_32(ecpri_spec->hdr.common.u32);
1272
1273                         hdr = &hdrs->proto_hdr[layer];
1274
1275                         VIRTCHNL_SET_PROTO_HDR_TYPE(hdr, ECPRI);
1276
1277                         if (ecpri_spec && ecpri_mask) {
1278                                 if (ecpri_common.type == RTE_ECPRI_MSG_TYPE_IQ_DATA &&
1279                                                 ecpri_mask->hdr.type0.pc_id == UINT16_MAX) {
1280                                         input_set |= IAVF_ECPRI_PC_RTC_ID;
1281                                         VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(hdr, ECPRI,
1282                                                                          PC_RTC_ID);
1283                                 }
1284
1285                                 rte_memcpy(hdr->buffer, ecpri_spec,
1286                                         sizeof(*ecpri_spec));
1287                         }
1288
1289                         hdrs->count = ++layer;
1290                         break;
1291
1292                 case RTE_FLOW_ITEM_TYPE_GRE:
1293                         gre_spec = item->spec;
1294                         gre_mask = item->mask;
1295
1296                         hdr = &hdrs->proto_hdr[layer];
1297
1298                         VIRTCHNL_SET_PROTO_HDR_TYPE(hdr, GRE);
1299
1300                         if (gre_spec && gre_mask) {
1301                                 rte_memcpy(hdr->buffer, gre_spec,
1302                                            sizeof(*gre_spec));
1303                         }
1304
1305                         tun_inner = 1;
1306
1307                         hdrs->count = ++layer;
1308                         break;
1309
1310                 case RTE_FLOW_ITEM_TYPE_VOID:
1311                         break;
1312
1313                 default:
1314                         rte_flow_error_set(error, EINVAL,
1315                                         RTE_FLOW_ERROR_TYPE_ITEM, item,
1316                                         "Invalid pattern item.");
1317                         return -rte_errno;
1318                 }
1319         }
1320
1321         if (layer > VIRTCHNL_MAX_NUM_PROTO_HDRS) {
1322                 rte_flow_error_set(error, EINVAL,
1323                         RTE_FLOW_ERROR_TYPE_ITEM, item,
1324                         "Protocol header layers exceed the maximum value");
1325                 return -rte_errno;
1326         }
1327
1328         if (!iavf_fdir_refine_input_set(input_set,
1329                                         input_set_mask | IAVF_INSET_ETHERTYPE,
1330                                         filter)) {
1331                 rte_flow_error_set(error, EINVAL,
1332                                    RTE_FLOW_ERROR_TYPE_ITEM_SPEC, pattern,
1333                                    "Invalid input set");
1334                 return -rte_errno;
1335         }
1336
1337         filter->input_set = input_set;
1338
1339         return 0;
1340 }
1341
1342 static int
1343 iavf_fdir_parse(struct iavf_adapter *ad,
1344                 struct iavf_pattern_match_item *array,
1345                 uint32_t array_len,
1346                 const struct rte_flow_item pattern[],
1347                 const struct rte_flow_action actions[],
1348                 void **meta,
1349                 struct rte_flow_error *error)
1350 {
1351         struct iavf_info *vf = IAVF_DEV_PRIVATE_TO_VF(ad);
1352         struct iavf_fdir_conf *filter = &vf->fdir.conf;
1353         struct iavf_pattern_match_item *item = NULL;
1354         int ret;
1355
1356         memset(filter, 0, sizeof(*filter));
1357
1358         item = iavf_search_pattern_match_item(pattern, array, array_len, error);
1359         if (!item)
1360                 return -rte_errno;
1361
1362         ret = iavf_fdir_parse_pattern(ad, pattern, item->input_set_mask,
1363                                       error, filter);
1364         if (ret)
1365                 goto error;
1366
1367         ret = iavf_fdir_parse_action(ad, actions, error, filter);
1368         if (ret)
1369                 goto error;
1370
1371         if (meta)
1372                 *meta = filter;
1373
1374 error:
1375         rte_free(item);
1376         return ret;
1377 }
1378
1379 static struct iavf_flow_parser iavf_fdir_parser = {
1380         .engine = &iavf_fdir_engine,
1381         .array = iavf_fdir_pattern,
1382         .array_len = RTE_DIM(iavf_fdir_pattern),
1383         .parse_pattern_action = iavf_fdir_parse,
1384         .stage = IAVF_FLOW_STAGE_DISTRIBUTOR,
1385 };
1386
1387 RTE_INIT(iavf_fdir_engine_register)
1388 {
1389         iavf_register_flow_engine(&iavf_fdir_engine);
1390 }