net/ice: fix outer L4 checksum in scalar Rx
[dpdk.git] / drivers / net / iavf / iavf_fdir.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2020 Intel Corporation
3  */
4
5 #include <sys/queue.h>
6 #include <stdio.h>
7 #include <errno.h>
8 #include <stdint.h>
9 #include <string.h>
10 #include <unistd.h>
11 #include <stdarg.h>
12
13 #include <rte_ether.h>
14 #include <ethdev_driver.h>
15 #include <rte_malloc.h>
16 #include <rte_tailq.h>
17
18 #include "iavf.h"
19 #include "iavf_generic_flow.h"
20 #include "virtchnl.h"
21 #include "iavf_rxtx.h"
22
23 #define IAVF_FDIR_MAX_QREGION_SIZE 128
24
25 #define IAVF_FDIR_IPV6_TC_OFFSET 20
26 #define IAVF_IPV6_TC_MASK  (0xFF << IAVF_FDIR_IPV6_TC_OFFSET)
27
28 #define IAVF_GTPU_EH_DWLINK 0
29 #define IAVF_GTPU_EH_UPLINK 1
30
31 #define IAVF_FDIR_INSET_ETH (\
32         IAVF_INSET_ETHERTYPE)
33
34 #define IAVF_FDIR_INSET_ETH_IPV4 (\
35         IAVF_INSET_IPV4_SRC | IAVF_INSET_IPV4_DST | \
36         IAVF_INSET_IPV4_PROTO | IAVF_INSET_IPV4_TOS | \
37         IAVF_INSET_IPV4_TTL | IAVF_INSET_IPV4_ID)
38
39 #define IAVF_FDIR_INSET_ETH_IPV4_UDP (\
40         IAVF_INSET_IPV4_SRC | IAVF_INSET_IPV4_DST | \
41         IAVF_INSET_IPV4_TOS | IAVF_INSET_IPV4_TTL | \
42         IAVF_INSET_UDP_SRC_PORT | IAVF_INSET_UDP_DST_PORT)
43
44 #define IAVF_FDIR_INSET_ETH_IPV4_TCP (\
45         IAVF_INSET_IPV4_SRC | IAVF_INSET_IPV4_DST | \
46         IAVF_INSET_IPV4_TOS | IAVF_INSET_IPV4_TTL | \
47         IAVF_INSET_TCP_SRC_PORT | IAVF_INSET_TCP_DST_PORT)
48
49 #define IAVF_FDIR_INSET_ETH_IPV4_SCTP (\
50         IAVF_INSET_IPV4_SRC | IAVF_INSET_IPV4_DST | \
51         IAVF_INSET_IPV4_TOS | IAVF_INSET_IPV4_TTL | \
52         IAVF_INSET_SCTP_SRC_PORT | IAVF_INSET_SCTP_DST_PORT)
53
54 #define IAVF_FDIR_INSET_ETH_IPV6 (\
55         IAVF_INSET_IPV6_SRC | IAVF_INSET_IPV6_DST | \
56         IAVF_INSET_IPV6_NEXT_HDR | IAVF_INSET_IPV6_TC | \
57         IAVF_INSET_IPV6_HOP_LIMIT)
58
59 #define IAVF_FDIR_INSET_ETH_IPV6_FRAG_EXT (\
60         IAVF_FDIR_INSET_ETH_IPV6 | IAVF_INSET_IPV6_ID)
61
62 #define IAVF_FDIR_INSET_ETH_IPV6_UDP (\
63         IAVF_INSET_IPV6_SRC | IAVF_INSET_IPV6_DST | \
64         IAVF_INSET_IPV6_TC | IAVF_INSET_IPV6_HOP_LIMIT | \
65         IAVF_INSET_UDP_SRC_PORT | IAVF_INSET_UDP_DST_PORT)
66
67 #define IAVF_FDIR_INSET_ETH_IPV6_TCP (\
68         IAVF_INSET_IPV6_SRC | IAVF_INSET_IPV6_DST | \
69         IAVF_INSET_IPV6_TC | IAVF_INSET_IPV6_HOP_LIMIT | \
70         IAVF_INSET_TCP_SRC_PORT | IAVF_INSET_TCP_DST_PORT)
71
72 #define IAVF_FDIR_INSET_ETH_IPV6_SCTP (\
73         IAVF_INSET_IPV6_SRC | IAVF_INSET_IPV6_DST | \
74         IAVF_INSET_IPV6_TC | IAVF_INSET_IPV6_HOP_LIMIT | \
75         IAVF_INSET_SCTP_SRC_PORT | IAVF_INSET_SCTP_DST_PORT)
76
77 #define IAVF_FDIR_INSET_IPV4_GTPU (\
78         IAVF_INSET_IPV4_SRC | IAVF_INSET_IPV4_DST | \
79         IAVF_INSET_GTPU_TEID)
80
81 #define IAVF_FDIR_INSET_GTPU_IPV4 (\
82         IAVF_INSET_TUN_IPV4_SRC | IAVF_INSET_TUN_IPV4_DST | \
83         IAVF_INSET_TUN_IPV4_PROTO | IAVF_INSET_TUN_IPV4_TOS | \
84         IAVF_INSET_TUN_IPV4_TTL)
85
86 #define IAVF_FDIR_INSET_GTPU_IPV4_UDP (\
87         IAVF_FDIR_INSET_GTPU_IPV4 | \
88         IAVF_INSET_TUN_UDP_SRC_PORT | IAVF_INSET_TUN_UDP_DST_PORT)
89
90 #define IAVF_FDIR_INSET_GTPU_IPV4_TCP (\
91         IAVF_FDIR_INSET_GTPU_IPV4 | \
92         IAVF_INSET_TUN_TCP_SRC_PORT | IAVF_INSET_TUN_TCP_DST_PORT)
93
94 #define IAVF_FDIR_INSET_IPV4_GTPU_EH (\
95         IAVF_INSET_IPV4_SRC | IAVF_INSET_IPV4_DST | \
96         IAVF_INSET_GTPU_TEID | IAVF_INSET_GTPU_QFI)
97
98 #define IAVF_FDIR_INSET_IPV6_GTPU (\
99         IAVF_INSET_IPV6_SRC | IAVF_INSET_IPV6_DST | \
100         IAVF_INSET_GTPU_TEID)
101
102 #define IAVF_FDIR_INSET_GTPU_IPV6 (\
103         IAVF_INSET_TUN_IPV6_SRC | IAVF_INSET_TUN_IPV6_DST | \
104         IAVF_INSET_TUN_IPV6_NEXT_HDR | IAVF_INSET_TUN_IPV6_TC | \
105         IAVF_INSET_TUN_IPV6_HOP_LIMIT)
106
107 #define IAVF_FDIR_INSET_GTPU_IPV6_UDP (\
108         IAVF_FDIR_INSET_GTPU_IPV6 | \
109         IAVF_INSET_TUN_UDP_SRC_PORT | IAVF_INSET_TUN_UDP_DST_PORT)
110
111 #define IAVF_FDIR_INSET_GTPU_IPV6_TCP (\
112         IAVF_FDIR_INSET_GTPU_IPV6 | \
113         IAVF_INSET_TUN_TCP_SRC_PORT | IAVF_INSET_TUN_TCP_DST_PORT)
114
115 #define IAVF_FDIR_INSET_IPV6_GTPU_EH (\
116         IAVF_INSET_IPV6_SRC | IAVF_INSET_IPV6_DST | \
117         IAVF_INSET_GTPU_TEID | IAVF_INSET_GTPU_QFI)
118
119 #define IAVF_FDIR_INSET_L2TPV3OIP (\
120         IAVF_L2TPV3OIP_SESSION_ID)
121
122 #define IAVF_FDIR_INSET_IPV4_ESP (\
123         IAVF_INSET_IPV4_SRC | IAVF_INSET_IPV4_DST | \
124         IAVF_INSET_ESP_SPI)
125
126 #define IAVF_FDIR_INSET_IPV6_ESP (\
127         IAVF_INSET_IPV6_SRC | IAVF_INSET_IPV6_DST | \
128         IAVF_INSET_ESP_SPI)
129
130 #define IAVF_FDIR_INSET_AH (\
131         IAVF_INSET_AH_SPI)
132
133 #define IAVF_FDIR_INSET_IPV4_NATT_ESP (\
134         IAVF_INSET_IPV4_SRC | IAVF_INSET_IPV4_DST | \
135         IAVF_INSET_ESP_SPI)
136
137 #define IAVF_FDIR_INSET_IPV6_NATT_ESP (\
138         IAVF_INSET_IPV6_SRC | IAVF_INSET_IPV6_DST | \
139         IAVF_INSET_ESP_SPI)
140
141 #define IAVF_FDIR_INSET_PFCP (\
142         IAVF_INSET_PFCP_S_FIELD)
143
144 #define IAVF_FDIR_INSET_ECPRI (\
145         IAVF_INSET_ECPRI)
146
147 #define IAVF_FDIR_INSET_GRE_IPV4 (\
148         IAVF_INSET_TUN_IPV4_SRC | IAVF_INSET_TUN_IPV4_DST | \
149         IAVF_INSET_TUN_IPV4_TOS | IAVF_INSET_TUN_IPV4_PROTO)
150
151 #define IAVF_FDIR_INSET_GRE_IPV4_TCP (\
152         IAVF_FDIR_INSET_GRE_IPV4 | IAVF_INSET_TUN_TCP_SRC_PORT | \
153         IAVF_INSET_TUN_TCP_DST_PORT)
154
155 #define IAVF_FDIR_INSET_GRE_IPV4_UDP (\
156         IAVF_FDIR_INSET_GRE_IPV4 | IAVF_INSET_TUN_UDP_SRC_PORT | \
157         IAVF_INSET_TUN_UDP_DST_PORT)
158
159 #define IAVF_FDIR_INSET_GRE_IPV6 (\
160         IAVF_INSET_TUN_IPV6_SRC | IAVF_INSET_TUN_IPV6_DST | \
161         IAVF_INSET_TUN_IPV6_TC | IAVF_INSET_TUN_IPV6_NEXT_HDR)
162
163 #define IAVF_FDIR_INSET_GRE_IPV6_TCP (\
164         IAVF_FDIR_INSET_GRE_IPV6 | IAVF_INSET_TUN_TCP_SRC_PORT | \
165         IAVF_INSET_TUN_TCP_DST_PORT)
166
167 #define IAVF_FDIR_INSET_GRE_IPV6_UDP (\
168         IAVF_FDIR_INSET_GRE_IPV6 | IAVF_INSET_TUN_UDP_SRC_PORT | \
169         IAVF_INSET_TUN_UDP_DST_PORT)
170
171 #define IAVF_FDIR_INSET_L2TPV2 (\
172         IAVF_INSET_SMAC | IAVF_INSET_DMAC | IAVF_INSET_L2TPV2)
173
174 #define IAVF_FDIR_INSET_L2TPV2_PPP_IPV4 (\
175         IAVF_INSET_TUN_IPV4_SRC | IAVF_INSET_TUN_IPV4_DST)
176
177 #define IAVF_FDIR_INSET_L2TPV2_PPP_IPV4_UDP (\
178         IAVF_FDIR_INSET_L2TPV2_PPP_IPV4 | IAVF_INSET_TUN_UDP_SRC_PORT | \
179         IAVF_INSET_TUN_UDP_DST_PORT)
180
181 #define IAVF_FDIR_INSET_L2TPV2_PPP_IPV4_TCP (\
182         IAVF_FDIR_INSET_L2TPV2_PPP_IPV4 | IAVF_INSET_TUN_TCP_SRC_PORT | \
183         IAVF_INSET_TUN_TCP_DST_PORT)
184
185 #define IAVF_FDIR_INSET_L2TPV2_PPP_IPV6 (\
186         IAVF_INSET_TUN_IPV6_SRC | IAVF_INSET_TUN_IPV6_DST)
187
188 #define IAVF_FDIR_INSET_L2TPV2_PPP_IPV6_UDP (\
189         IAVF_FDIR_INSET_L2TPV2_PPP_IPV6 | IAVF_INSET_TUN_UDP_SRC_PORT | \
190         IAVF_INSET_TUN_UDP_DST_PORT)
191
192 #define IAVF_FDIR_INSET_L2TPV2_PPP_IPV6_TCP (\
193         IAVF_FDIR_INSET_L2TPV2_PPP_IPV6 | IAVF_INSET_TUN_TCP_SRC_PORT | \
194         IAVF_INSET_TUN_TCP_DST_PORT)
195
196 static struct iavf_pattern_match_item iavf_fdir_pattern[] = {
197         {iavf_pattern_raw,                       IAVF_INSET_NONE,               IAVF_INSET_NONE},
198         {iavf_pattern_ethertype,                 IAVF_FDIR_INSET_ETH,           IAVF_INSET_NONE},
199         {iavf_pattern_eth_ipv4,                  IAVF_FDIR_INSET_ETH_IPV4,      IAVF_INSET_NONE},
200         {iavf_pattern_eth_ipv4_udp,              IAVF_FDIR_INSET_ETH_IPV4_UDP,  IAVF_INSET_NONE},
201         {iavf_pattern_eth_ipv4_tcp,              IAVF_FDIR_INSET_ETH_IPV4_TCP,  IAVF_INSET_NONE},
202         {iavf_pattern_eth_ipv4_sctp,             IAVF_FDIR_INSET_ETH_IPV4_SCTP, IAVF_INSET_NONE},
203         {iavf_pattern_eth_ipv6,                  IAVF_FDIR_INSET_ETH_IPV6,      IAVF_INSET_NONE},
204         {iavf_pattern_eth_ipv6_frag_ext,        IAVF_FDIR_INSET_ETH_IPV6_FRAG_EXT,      IAVF_INSET_NONE},
205         {iavf_pattern_eth_ipv6_udp,              IAVF_FDIR_INSET_ETH_IPV6_UDP,  IAVF_INSET_NONE},
206         {iavf_pattern_eth_ipv6_tcp,              IAVF_FDIR_INSET_ETH_IPV6_TCP,  IAVF_INSET_NONE},
207         {iavf_pattern_eth_ipv6_sctp,             IAVF_FDIR_INSET_ETH_IPV6_SCTP, IAVF_INSET_NONE},
208         {iavf_pattern_eth_ipv4_gtpu,             IAVF_FDIR_INSET_IPV4_GTPU,     IAVF_INSET_NONE},
209         {iavf_pattern_eth_ipv4_gtpu_ipv4,        IAVF_FDIR_INSET_GTPU_IPV4,     IAVF_INSET_NONE},
210         {iavf_pattern_eth_ipv4_gtpu_ipv4_udp,    IAVF_FDIR_INSET_GTPU_IPV4_UDP, IAVF_INSET_NONE},
211         {iavf_pattern_eth_ipv4_gtpu_ipv4_tcp,    IAVF_FDIR_INSET_GTPU_IPV4_TCP, IAVF_INSET_NONE},
212         {iavf_pattern_eth_ipv4_gtpu_ipv6,        IAVF_FDIR_INSET_GTPU_IPV6,     IAVF_INSET_NONE},
213         {iavf_pattern_eth_ipv4_gtpu_ipv6_udp,    IAVF_FDIR_INSET_GTPU_IPV6_UDP, IAVF_INSET_NONE},
214         {iavf_pattern_eth_ipv4_gtpu_ipv6_tcp,    IAVF_FDIR_INSET_GTPU_IPV6_TCP, IAVF_INSET_NONE},
215         {iavf_pattern_eth_ipv4_gtpu_eh,          IAVF_FDIR_INSET_IPV4_GTPU_EH,  IAVF_INSET_NONE},
216         {iavf_pattern_eth_ipv4_gtpu_eh_ipv4,     IAVF_FDIR_INSET_GTPU_IPV4,     IAVF_INSET_NONE},
217         {iavf_pattern_eth_ipv4_gtpu_eh_ipv4_udp, IAVF_FDIR_INSET_GTPU_IPV4_UDP, IAVF_INSET_NONE},
218         {iavf_pattern_eth_ipv4_gtpu_eh_ipv4_tcp, IAVF_FDIR_INSET_GTPU_IPV4_TCP, IAVF_INSET_NONE},
219         {iavf_pattern_eth_ipv4_gtpu_eh_ipv6,     IAVF_FDIR_INSET_GTPU_IPV6,     IAVF_INSET_NONE},
220         {iavf_pattern_eth_ipv4_gtpu_eh_ipv6_udp, IAVF_FDIR_INSET_GTPU_IPV6_UDP, IAVF_INSET_NONE},
221         {iavf_pattern_eth_ipv4_gtpu_eh_ipv6_tcp, IAVF_FDIR_INSET_GTPU_IPV6_TCP, IAVF_INSET_NONE},
222         {iavf_pattern_eth_ipv4_gre_ipv4_gtpu,            IAVF_FDIR_INSET_IPV4_GTPU,     IAVF_INSET_NONE},
223         {iavf_pattern_eth_ipv4_gre_ipv4_gtpu_ipv4,       IAVF_FDIR_INSET_GTPU_IPV4,     IAVF_INSET_NONE},
224         {iavf_pattern_eth_ipv4_gre_ipv4_gtpu_ipv4_udp,   IAVF_FDIR_INSET_GTPU_IPV4_UDP, IAVF_INSET_NONE},
225         {iavf_pattern_eth_ipv4_gre_ipv4_gtpu_ipv4_tcp,   IAVF_FDIR_INSET_GTPU_IPV4_TCP, IAVF_INSET_NONE},
226         {iavf_pattern_eth_ipv4_gre_ipv4_gtpu_ipv6,       IAVF_FDIR_INSET_GTPU_IPV6,     IAVF_INSET_NONE},
227         {iavf_pattern_eth_ipv4_gre_ipv4_gtpu_ipv6_udp,   IAVF_FDIR_INSET_GTPU_IPV6_UDP, IAVF_INSET_NONE},
228         {iavf_pattern_eth_ipv4_gre_ipv4_gtpu_ipv6_tcp,   IAVF_FDIR_INSET_GTPU_IPV6_TCP, IAVF_INSET_NONE},
229         {iavf_pattern_eth_ipv4_gre_ipv6_gtpu,            IAVF_FDIR_INSET_IPV4_GTPU,     IAVF_INSET_NONE},
230         {iavf_pattern_eth_ipv4_gre_ipv6_gtpu_ipv4,       IAVF_FDIR_INSET_GTPU_IPV4,     IAVF_INSET_NONE},
231         {iavf_pattern_eth_ipv4_gre_ipv6_gtpu_ipv4_udp,   IAVF_FDIR_INSET_GTPU_IPV4_UDP, IAVF_INSET_NONE},
232         {iavf_pattern_eth_ipv4_gre_ipv6_gtpu_ipv4_tcp,   IAVF_FDIR_INSET_GTPU_IPV4_TCP, IAVF_INSET_NONE},
233         {iavf_pattern_eth_ipv4_gre_ipv6_gtpu_ipv6,       IAVF_FDIR_INSET_GTPU_IPV6,     IAVF_INSET_NONE},
234         {iavf_pattern_eth_ipv4_gre_ipv6_gtpu_ipv6_udp,   IAVF_FDIR_INSET_GTPU_IPV6_UDP, IAVF_INSET_NONE},
235         {iavf_pattern_eth_ipv4_gre_ipv6_gtpu_ipv6_tcp,   IAVF_FDIR_INSET_GTPU_IPV6_TCP, IAVF_INSET_NONE},
236         {iavf_pattern_eth_ipv6_gre_ipv4_gtpu,            IAVF_FDIR_INSET_IPV6_GTPU,     IAVF_INSET_NONE},
237         {iavf_pattern_eth_ipv6_gre_ipv4_gtpu_ipv4,       IAVF_FDIR_INSET_GTPU_IPV4,     IAVF_INSET_NONE},
238         {iavf_pattern_eth_ipv6_gre_ipv4_gtpu_ipv4_udp,   IAVF_FDIR_INSET_GTPU_IPV4_UDP, IAVF_INSET_NONE},
239         {iavf_pattern_eth_ipv6_gre_ipv4_gtpu_ipv4_tcp,   IAVF_FDIR_INSET_GTPU_IPV4_TCP, IAVF_INSET_NONE},
240         {iavf_pattern_eth_ipv6_gre_ipv4_gtpu_ipv6,       IAVF_FDIR_INSET_GTPU_IPV6,     IAVF_INSET_NONE},
241         {iavf_pattern_eth_ipv6_gre_ipv4_gtpu_ipv6_udp,   IAVF_FDIR_INSET_GTPU_IPV6_UDP, IAVF_INSET_NONE},
242         {iavf_pattern_eth_ipv6_gre_ipv4_gtpu_ipv6_tcp,   IAVF_FDIR_INSET_GTPU_IPV6_TCP, IAVF_INSET_NONE},
243         {iavf_pattern_eth_ipv6_gre_ipv6_gtpu,            IAVF_FDIR_INSET_IPV6_GTPU,     IAVF_INSET_NONE},
244         {iavf_pattern_eth_ipv6_gre_ipv6_gtpu_ipv4,       IAVF_FDIR_INSET_GTPU_IPV4,     IAVF_INSET_NONE},
245         {iavf_pattern_eth_ipv6_gre_ipv6_gtpu_ipv4_udp,   IAVF_FDIR_INSET_GTPU_IPV4_UDP, IAVF_INSET_NONE},
246         {iavf_pattern_eth_ipv6_gre_ipv6_gtpu_ipv4_tcp,   IAVF_FDIR_INSET_GTPU_IPV4_TCP, IAVF_INSET_NONE},
247         {iavf_pattern_eth_ipv6_gre_ipv6_gtpu_ipv6,       IAVF_FDIR_INSET_GTPU_IPV6,     IAVF_INSET_NONE},
248         {iavf_pattern_eth_ipv6_gre_ipv6_gtpu_ipv6_udp,   IAVF_FDIR_INSET_GTPU_IPV6_UDP, IAVF_INSET_NONE},
249         {iavf_pattern_eth_ipv6_gre_ipv6_gtpu_ipv6_tcp,   IAVF_FDIR_INSET_GTPU_IPV6_TCP, IAVF_INSET_NONE},
250         {iavf_pattern_eth_ipv4_gre_ipv4_gtpu_eh,                 IAVF_FDIR_INSET_IPV4_GTPU_EH,  IAVF_INSET_NONE},
251         {iavf_pattern_eth_ipv4_gre_ipv4_gtpu_eh_ipv4,            IAVF_FDIR_INSET_GTPU_IPV4,     IAVF_INSET_NONE},
252         {iavf_pattern_eth_ipv4_gre_ipv4_gtpu_eh_ipv4_udp,        IAVF_FDIR_INSET_GTPU_IPV4_UDP, IAVF_INSET_NONE},
253         {iavf_pattern_eth_ipv4_gre_ipv4_gtpu_eh_ipv4_tcp,        IAVF_FDIR_INSET_GTPU_IPV4_TCP, IAVF_INSET_NONE},
254         {iavf_pattern_eth_ipv4_gre_ipv4_gtpu_eh_ipv6,            IAVF_FDIR_INSET_GTPU_IPV6,     IAVF_INSET_NONE},
255         {iavf_pattern_eth_ipv4_gre_ipv4_gtpu_eh_ipv6_udp,        IAVF_FDIR_INSET_GTPU_IPV6_UDP, IAVF_INSET_NONE},
256         {iavf_pattern_eth_ipv4_gre_ipv4_gtpu_eh_ipv6_tcp,        IAVF_FDIR_INSET_GTPU_IPV6_TCP, IAVF_INSET_NONE},
257         {iavf_pattern_eth_ipv4_gre_ipv6_gtpu_eh,                 IAVF_FDIR_INSET_IPV4_GTPU_EH,  IAVF_INSET_NONE},
258         {iavf_pattern_eth_ipv4_gre_ipv6_gtpu_eh_ipv4,            IAVF_FDIR_INSET_GTPU_IPV4,     IAVF_INSET_NONE},
259         {iavf_pattern_eth_ipv4_gre_ipv6_gtpu_eh_ipv4_udp,        IAVF_FDIR_INSET_GTPU_IPV4_UDP, IAVF_INSET_NONE},
260         {iavf_pattern_eth_ipv4_gre_ipv6_gtpu_eh_ipv4_tcp,        IAVF_FDIR_INSET_GTPU_IPV4_TCP, IAVF_INSET_NONE},
261         {iavf_pattern_eth_ipv4_gre_ipv6_gtpu_eh_ipv6,            IAVF_FDIR_INSET_GTPU_IPV6,     IAVF_INSET_NONE},
262         {iavf_pattern_eth_ipv4_gre_ipv6_gtpu_eh_ipv6_udp,        IAVF_FDIR_INSET_GTPU_IPV6_UDP, IAVF_INSET_NONE},
263         {iavf_pattern_eth_ipv4_gre_ipv6_gtpu_eh_ipv6_tcp,        IAVF_FDIR_INSET_GTPU_IPV6_TCP, IAVF_INSET_NONE},
264         {iavf_pattern_eth_ipv6_gre_ipv4_gtpu_eh,                 IAVF_FDIR_INSET_IPV6_GTPU_EH,  IAVF_INSET_NONE},
265         {iavf_pattern_eth_ipv6_gre_ipv4_gtpu_eh_ipv4,            IAVF_FDIR_INSET_GTPU_IPV4,     IAVF_INSET_NONE},
266         {iavf_pattern_eth_ipv6_gre_ipv4_gtpu_eh_ipv4_udp,        IAVF_FDIR_INSET_GTPU_IPV4_UDP, IAVF_INSET_NONE},
267         {iavf_pattern_eth_ipv6_gre_ipv4_gtpu_eh_ipv4_tcp,        IAVF_FDIR_INSET_GTPU_IPV4_TCP, IAVF_INSET_NONE},
268         {iavf_pattern_eth_ipv6_gre_ipv4_gtpu_eh_ipv6,            IAVF_FDIR_INSET_GTPU_IPV6,     IAVF_INSET_NONE},
269         {iavf_pattern_eth_ipv6_gre_ipv4_gtpu_eh_ipv6_udp,        IAVF_FDIR_INSET_GTPU_IPV6_UDP, IAVF_INSET_NONE},
270         {iavf_pattern_eth_ipv6_gre_ipv4_gtpu_eh_ipv6_tcp,        IAVF_FDIR_INSET_GTPU_IPV6_TCP, IAVF_INSET_NONE},
271         {iavf_pattern_eth_ipv6_gre_ipv6_gtpu_eh,                 IAVF_FDIR_INSET_IPV6_GTPU_EH,  IAVF_INSET_NONE},
272         {iavf_pattern_eth_ipv6_gre_ipv6_gtpu_eh_ipv4,            IAVF_FDIR_INSET_GTPU_IPV4,     IAVF_INSET_NONE},
273         {iavf_pattern_eth_ipv6_gre_ipv6_gtpu_eh_ipv4_udp,        IAVF_FDIR_INSET_GTPU_IPV4_UDP, IAVF_INSET_NONE},
274         {iavf_pattern_eth_ipv6_gre_ipv6_gtpu_eh_ipv4_tcp,        IAVF_FDIR_INSET_GTPU_IPV4_TCP, IAVF_INSET_NONE},
275         {iavf_pattern_eth_ipv6_gre_ipv6_gtpu_eh_ipv6,            IAVF_FDIR_INSET_GTPU_IPV6,     IAVF_INSET_NONE},
276         {iavf_pattern_eth_ipv6_gre_ipv6_gtpu_eh_ipv6_udp,        IAVF_FDIR_INSET_GTPU_IPV6_UDP, IAVF_INSET_NONE},
277         {iavf_pattern_eth_ipv6_gre_ipv6_gtpu_eh_ipv6_tcp,        IAVF_FDIR_INSET_GTPU_IPV6_TCP, IAVF_INSET_NONE},
278         {iavf_pattern_eth_ipv6_gtpu,             IAVF_FDIR_INSET_IPV6_GTPU,     IAVF_INSET_NONE},
279         {iavf_pattern_eth_ipv6_gtpu_eh,          IAVF_FDIR_INSET_IPV6_GTPU_EH,  IAVF_INSET_NONE},
280         {iavf_pattern_eth_ipv4_l2tpv3,           IAVF_FDIR_INSET_L2TPV3OIP,     IAVF_INSET_NONE},
281         {iavf_pattern_eth_ipv6_l2tpv3,           IAVF_FDIR_INSET_L2TPV3OIP,     IAVF_INSET_NONE},
282         {iavf_pattern_eth_ipv4_esp,              IAVF_FDIR_INSET_IPV4_ESP,      IAVF_INSET_NONE},
283         {iavf_pattern_eth_ipv6_esp,              IAVF_FDIR_INSET_IPV6_ESP,      IAVF_INSET_NONE},
284         {iavf_pattern_eth_ipv4_ah,               IAVF_FDIR_INSET_AH,            IAVF_INSET_NONE},
285         {iavf_pattern_eth_ipv6_ah,               IAVF_FDIR_INSET_AH,            IAVF_INSET_NONE},
286         {iavf_pattern_eth_ipv4_udp_esp,          IAVF_FDIR_INSET_IPV4_NATT_ESP, IAVF_INSET_NONE},
287         {iavf_pattern_eth_ipv6_udp_esp,          IAVF_FDIR_INSET_IPV6_NATT_ESP, IAVF_INSET_NONE},
288         {iavf_pattern_eth_ipv4_pfcp,             IAVF_FDIR_INSET_PFCP,          IAVF_INSET_NONE},
289         {iavf_pattern_eth_ipv6_pfcp,             IAVF_FDIR_INSET_PFCP,          IAVF_INSET_NONE},
290         {iavf_pattern_eth_ecpri,                 IAVF_FDIR_INSET_ECPRI,         IAVF_INSET_NONE},
291         {iavf_pattern_eth_ipv4_ecpri,            IAVF_FDIR_INSET_ECPRI,         IAVF_INSET_NONE},
292         {iavf_pattern_eth_ipv4_gre_ipv4,        IAVF_FDIR_INSET_GRE_IPV4,       IAVF_INSET_NONE},
293         {iavf_pattern_eth_ipv4_gre_ipv4_tcp,    IAVF_FDIR_INSET_GRE_IPV4_TCP,   IAVF_INSET_NONE},
294         {iavf_pattern_eth_ipv4_gre_ipv4_udp,    IAVF_FDIR_INSET_GRE_IPV4_UDP,   IAVF_INSET_NONE},
295         {iavf_pattern_eth_ipv4_gre_ipv6,        IAVF_FDIR_INSET_GRE_IPV6,       IAVF_INSET_NONE},
296         {iavf_pattern_eth_ipv4_gre_ipv6_tcp,    IAVF_FDIR_INSET_GRE_IPV6_TCP,   IAVF_INSET_NONE},
297         {iavf_pattern_eth_ipv4_gre_ipv6_udp,    IAVF_FDIR_INSET_GRE_IPV6_UDP,   IAVF_INSET_NONE},
298         {iavf_pattern_eth_ipv6_gre_ipv4,        IAVF_FDIR_INSET_GRE_IPV4,       IAVF_INSET_NONE},
299         {iavf_pattern_eth_ipv6_gre_ipv4_tcp,    IAVF_FDIR_INSET_GRE_IPV4_TCP,   IAVF_INSET_NONE},
300         {iavf_pattern_eth_ipv6_gre_ipv4_udp,    IAVF_FDIR_INSET_GRE_IPV4_UDP,   IAVF_INSET_NONE},
301         {iavf_pattern_eth_ipv6_gre_ipv6,        IAVF_FDIR_INSET_GRE_IPV6,       IAVF_INSET_NONE},
302         {iavf_pattern_eth_ipv6_gre_ipv6_tcp,    IAVF_FDIR_INSET_GRE_IPV6_TCP,   IAVF_INSET_NONE},
303         {iavf_pattern_eth_ipv6_gre_ipv6_udp,    IAVF_FDIR_INSET_GRE_IPV6_UDP,   IAVF_INSET_NONE},
304
305         {iavf_pattern_eth_ipv4_udp_l2tpv2,              IAVF_FDIR_INSET_L2TPV2,                 IAVF_INSET_NONE},
306         {iavf_pattern_eth_ipv4_udp_l2tpv2_ppp,          IAVF_FDIR_INSET_L2TPV2,                 IAVF_INSET_NONE},
307         {iavf_pattern_eth_ipv6_udp_l2tpv2,              IAVF_FDIR_INSET_L2TPV2,                 IAVF_INSET_NONE},
308         {iavf_pattern_eth_ipv6_udp_l2tpv2_ppp,          IAVF_FDIR_INSET_L2TPV2,                 IAVF_INSET_NONE},
309         {iavf_pattern_eth_ipv4_udp_l2tpv2_ppp_ipv4,     IAVF_FDIR_INSET_L2TPV2_PPP_IPV4,        IAVF_INSET_NONE},
310         {iavf_pattern_eth_ipv4_udp_l2tpv2_ppp_ipv4_udp, IAVF_FDIR_INSET_L2TPV2_PPP_IPV4_UDP,    IAVF_INSET_NONE},
311         {iavf_pattern_eth_ipv4_udp_l2tpv2_ppp_ipv4_tcp, IAVF_FDIR_INSET_L2TPV2_PPP_IPV4_TCP,    IAVF_INSET_NONE},
312         {iavf_pattern_eth_ipv6_udp_l2tpv2_ppp_ipv4,     IAVF_FDIR_INSET_L2TPV2_PPP_IPV4,        IAVF_INSET_NONE},
313         {iavf_pattern_eth_ipv6_udp_l2tpv2_ppp_ipv4_udp, IAVF_FDIR_INSET_L2TPV2_PPP_IPV4_UDP,    IAVF_INSET_NONE},
314         {iavf_pattern_eth_ipv6_udp_l2tpv2_ppp_ipv4_tcp, IAVF_FDIR_INSET_L2TPV2_PPP_IPV4_TCP,    IAVF_INSET_NONE},
315         {iavf_pattern_eth_ipv4_udp_l2tpv2_ppp_ipv6,     IAVF_FDIR_INSET_L2TPV2_PPP_IPV6,        IAVF_INSET_NONE},
316         {iavf_pattern_eth_ipv4_udp_l2tpv2_ppp_ipv6_udp, IAVF_FDIR_INSET_L2TPV2_PPP_IPV6_UDP,    IAVF_INSET_NONE},
317         {iavf_pattern_eth_ipv4_udp_l2tpv2_ppp_ipv6_tcp, IAVF_FDIR_INSET_L2TPV2_PPP_IPV6_TCP,    IAVF_INSET_NONE},
318         {iavf_pattern_eth_ipv6_udp_l2tpv2_ppp_ipv6,     IAVF_FDIR_INSET_L2TPV2_PPP_IPV6,        IAVF_INSET_NONE},
319         {iavf_pattern_eth_ipv6_udp_l2tpv2_ppp_ipv6_udp, IAVF_FDIR_INSET_L2TPV2_PPP_IPV6_UDP,    IAVF_INSET_NONE},
320         {iavf_pattern_eth_ipv6_udp_l2tpv2_ppp_ipv6_tcp, IAVF_FDIR_INSET_L2TPV2_PPP_IPV6_TCP,    IAVF_INSET_NONE},
321 };
322
323 static struct iavf_flow_parser iavf_fdir_parser;
324
325 static int
326 iavf_fdir_init(struct iavf_adapter *ad)
327 {
328         struct iavf_info *vf = IAVF_DEV_PRIVATE_TO_VF(ad);
329         struct iavf_flow_parser *parser;
330
331         if (!vf->vf_res)
332                 return -EINVAL;
333
334         if (vf->vf_res->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_FDIR_PF)
335                 parser = &iavf_fdir_parser;
336         else
337                 return -ENOTSUP;
338
339         return iavf_register_parser(parser, ad);
340 }
341
342 static void
343 iavf_fdir_uninit(struct iavf_adapter *ad)
344 {
345         iavf_unregister_parser(&iavf_fdir_parser, ad);
346 }
347
348 static int
349 iavf_fdir_create(struct iavf_adapter *ad,
350                 struct rte_flow *flow,
351                 void *meta,
352                 struct rte_flow_error *error)
353 {
354         struct iavf_fdir_conf *filter = meta;
355         struct iavf_fdir_conf *rule;
356         int ret;
357
358         rule = rte_zmalloc("fdir_entry", sizeof(*rule), 0);
359         if (!rule) {
360                 rte_flow_error_set(error, ENOMEM,
361                                 RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
362                                 "Failed to allocate memory for fdir rule");
363                 return -rte_errno;
364         }
365
366         ret = iavf_fdir_add(ad, filter);
367         if (ret) {
368                 rte_flow_error_set(error, -ret,
369                                 RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
370                                 "Failed to add filter rule.");
371                 goto free_entry;
372         }
373
374         if (filter->mark_flag == 1)
375                 iavf_fdir_rx_proc_enable(ad, 1);
376
377         rte_memcpy(rule, filter, sizeof(*rule));
378         flow->rule = rule;
379
380         return 0;
381
382 free_entry:
383         rte_free(rule);
384         return -rte_errno;
385 }
386
387 static int
388 iavf_fdir_destroy(struct iavf_adapter *ad,
389                 struct rte_flow *flow,
390                 struct rte_flow_error *error)
391 {
392         struct iavf_fdir_conf *filter;
393         int ret;
394
395         filter = (struct iavf_fdir_conf *)flow->rule;
396
397         ret = iavf_fdir_del(ad, filter);
398         if (ret) {
399                 rte_flow_error_set(error, -ret,
400                                 RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
401                                 "Failed to delete filter rule.");
402                 return -rte_errno;
403         }
404
405         if (filter->mark_flag == 1)
406                 iavf_fdir_rx_proc_enable(ad, 0);
407
408         flow->rule = NULL;
409         rte_free(filter);
410
411         return 0;
412 }
413
414 static int
415 iavf_fdir_validation(struct iavf_adapter *ad,
416                 __rte_unused struct rte_flow *flow,
417                 void *meta,
418                 struct rte_flow_error *error)
419 {
420         struct iavf_fdir_conf *filter = meta;
421         int ret;
422
423         ret = iavf_fdir_check(ad, filter);
424         if (ret) {
425                 rte_flow_error_set(error, -ret,
426                                 RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
427                                 "Failed to validate filter rule.");
428                 return -rte_errno;
429         }
430
431         return 0;
432 };
433
434 static struct iavf_flow_engine iavf_fdir_engine = {
435         .init = iavf_fdir_init,
436         .uninit = iavf_fdir_uninit,
437         .create = iavf_fdir_create,
438         .destroy = iavf_fdir_destroy,
439         .validation = iavf_fdir_validation,
440         .type = IAVF_FLOW_ENGINE_FDIR,
441 };
442
443 static int
444 iavf_fdir_parse_action_qregion(struct iavf_adapter *ad,
445                         struct rte_flow_error *error,
446                         const struct rte_flow_action *act,
447                         struct virtchnl_filter_action *filter_action)
448 {
449         struct iavf_info *vf = IAVF_DEV_PRIVATE_TO_VF(ad);
450         const struct rte_flow_action_rss *rss = act->conf;
451         uint32_t i;
452
453         if (act->type != RTE_FLOW_ACTION_TYPE_RSS) {
454                 rte_flow_error_set(error, EINVAL,
455                                 RTE_FLOW_ERROR_TYPE_ACTION, act,
456                                 "Invalid action.");
457                 return -rte_errno;
458         }
459
460         if (rss->queue_num <= 1) {
461                 rte_flow_error_set(error, EINVAL,
462                                 RTE_FLOW_ERROR_TYPE_ACTION, act,
463                                 "Queue region size can't be 0 or 1.");
464                 return -rte_errno;
465         }
466
467         /* check if queue index for queue region is continuous */
468         for (i = 0; i < rss->queue_num - 1; i++) {
469                 if (rss->queue[i + 1] != rss->queue[i] + 1) {
470                         rte_flow_error_set(error, EINVAL,
471                                         RTE_FLOW_ERROR_TYPE_ACTION, act,
472                                         "Discontinuous queue region");
473                         return -rte_errno;
474                 }
475         }
476
477         if (rss->queue[rss->queue_num - 1] >= ad->dev_data->nb_rx_queues) {
478                 rte_flow_error_set(error, EINVAL,
479                                 RTE_FLOW_ERROR_TYPE_ACTION, act,
480                                 "Invalid queue region indexes.");
481                 return -rte_errno;
482         }
483
484         if (!(rte_is_power_of_2(rss->queue_num) &&
485                 rss->queue_num <= IAVF_FDIR_MAX_QREGION_SIZE)) {
486                 rte_flow_error_set(error, EINVAL,
487                                 RTE_FLOW_ERROR_TYPE_ACTION, act,
488                                 "The region size should be any of the following values:"
489                                 "1, 2, 4, 8, 16, 32, 64, 128 as long as the total number "
490                                 "of queues do not exceed the VSI allocation.");
491                 return -rte_errno;
492         }
493
494         if (rss->queue_num > vf->max_rss_qregion) {
495                 rte_flow_error_set(error, EINVAL,
496                                 RTE_FLOW_ERROR_TYPE_ACTION, act,
497                                 "The region size cannot be large than the supported max RSS queue region");
498                 return -rte_errno;
499         }
500
501         filter_action->act_conf.queue.index = rss->queue[0];
502         filter_action->act_conf.queue.region = rte_fls_u32(rss->queue_num) - 1;
503
504         return 0;
505 }
506
507 static int
508 iavf_fdir_parse_action(struct iavf_adapter *ad,
509                         const struct rte_flow_action actions[],
510                         struct rte_flow_error *error,
511                         struct iavf_fdir_conf *filter)
512 {
513         const struct rte_flow_action_queue *act_q;
514         const struct rte_flow_action_mark *mark_spec = NULL;
515         uint32_t dest_num = 0;
516         uint32_t mark_num = 0;
517         int ret;
518
519         int number = 0;
520         struct virtchnl_filter_action *filter_action;
521
522         for (; actions->type != RTE_FLOW_ACTION_TYPE_END; actions++) {
523                 switch (actions->type) {
524                 case RTE_FLOW_ACTION_TYPE_VOID:
525                         break;
526
527                 case RTE_FLOW_ACTION_TYPE_PASSTHRU:
528                         dest_num++;
529
530                         filter_action = &filter->add_fltr.rule_cfg.action_set.actions[number];
531
532                         filter_action->type = VIRTCHNL_ACTION_PASSTHRU;
533
534                         filter->add_fltr.rule_cfg.action_set.count = ++number;
535                         break;
536
537                 case RTE_FLOW_ACTION_TYPE_DROP:
538                         dest_num++;
539
540                         filter_action = &filter->add_fltr.rule_cfg.action_set.actions[number];
541
542                         filter_action->type = VIRTCHNL_ACTION_DROP;
543
544                         filter->add_fltr.rule_cfg.action_set.count = ++number;
545                         break;
546
547                 case RTE_FLOW_ACTION_TYPE_QUEUE:
548                         dest_num++;
549
550                         act_q = actions->conf;
551                         filter_action = &filter->add_fltr.rule_cfg.action_set.actions[number];
552
553                         filter_action->type = VIRTCHNL_ACTION_QUEUE;
554                         filter_action->act_conf.queue.index = act_q->index;
555
556                         if (filter_action->act_conf.queue.index >=
557                                 ad->dev_data->nb_rx_queues) {
558                                 rte_flow_error_set(error, EINVAL,
559                                         RTE_FLOW_ERROR_TYPE_ACTION,
560                                         actions, "Invalid queue for FDIR.");
561                                 return -rte_errno;
562                         }
563
564                         filter->add_fltr.rule_cfg.action_set.count = ++number;
565                         break;
566
567                 case RTE_FLOW_ACTION_TYPE_RSS:
568                         dest_num++;
569
570                         filter_action = &filter->add_fltr.rule_cfg.action_set.actions[number];
571
572                         filter_action->type = VIRTCHNL_ACTION_Q_REGION;
573
574                         ret = iavf_fdir_parse_action_qregion(ad,
575                                                 error, actions, filter_action);
576                         if (ret)
577                                 return ret;
578
579                         filter->add_fltr.rule_cfg.action_set.count = ++number;
580                         break;
581
582                 case RTE_FLOW_ACTION_TYPE_MARK:
583                         mark_num++;
584
585                         filter->mark_flag = 1;
586                         mark_spec = actions->conf;
587                         filter_action = &filter->add_fltr.rule_cfg.action_set.actions[number];
588
589                         filter_action->type = VIRTCHNL_ACTION_MARK;
590                         filter_action->act_conf.mark_id = mark_spec->id;
591
592                         filter->add_fltr.rule_cfg.action_set.count = ++number;
593                         break;
594
595                 default:
596                         rte_flow_error_set(error, EINVAL,
597                                         RTE_FLOW_ERROR_TYPE_ACTION, actions,
598                                         "Invalid action.");
599                         return -rte_errno;
600                 }
601         }
602
603         if (number > VIRTCHNL_MAX_NUM_ACTIONS) {
604                 rte_flow_error_set(error, EINVAL,
605                         RTE_FLOW_ERROR_TYPE_ACTION, actions,
606                         "Action numbers exceed the maximum value");
607                 return -rte_errno;
608         }
609
610         if (dest_num >= 2) {
611                 rte_flow_error_set(error, EINVAL,
612                         RTE_FLOW_ERROR_TYPE_ACTION, actions,
613                         "Unsupported action combination");
614                 return -rte_errno;
615         }
616
617         if (mark_num >= 2) {
618                 rte_flow_error_set(error, EINVAL,
619                         RTE_FLOW_ERROR_TYPE_ACTION, actions,
620                         "Too many mark actions");
621                 return -rte_errno;
622         }
623
624         if (dest_num + mark_num == 0) {
625                 rte_flow_error_set(error, EINVAL,
626                         RTE_FLOW_ERROR_TYPE_ACTION, actions,
627                         "Empty action");
628                 return -rte_errno;
629         }
630
631         /* Mark only is equal to mark + passthru. */
632         if (dest_num == 0) {
633                 filter_action = &filter->add_fltr.rule_cfg.action_set.actions[number];
634                 filter_action->type = VIRTCHNL_ACTION_PASSTHRU;
635                 filter->add_fltr.rule_cfg.action_set.count = ++number;
636         }
637
638         return 0;
639 }
640
641 static bool
642 iavf_fdir_refine_input_set(const uint64_t input_set,
643                            const uint64_t input_set_mask,
644                            struct iavf_fdir_conf *filter)
645 {
646         struct virtchnl_proto_hdr *hdr, *hdr_last;
647         struct rte_flow_item_ipv4 ipv4_spec;
648         struct rte_flow_item_ipv6 ipv6_spec;
649         int last_layer;
650         uint8_t proto_id;
651
652         if (input_set & ~input_set_mask)
653                 return false;
654         else if (input_set)
655                 return true;
656
657         last_layer = filter->add_fltr.rule_cfg.proto_hdrs.count - 1;
658         /* Last layer of TCP/UDP pattern isn't less than 2. */
659         if (last_layer < 2)
660                 return false;
661         hdr_last = &filter->add_fltr.rule_cfg.proto_hdrs.proto_hdr[last_layer];
662         if (hdr_last->type == VIRTCHNL_PROTO_HDR_TCP)
663                 proto_id = 6;
664         else if (hdr_last->type == VIRTCHNL_PROTO_HDR_UDP)
665                 proto_id = 17;
666         else
667                 return false;
668
669         hdr = &filter->add_fltr.rule_cfg.proto_hdrs.proto_hdr[last_layer - 1];
670         switch (hdr->type) {
671         case VIRTCHNL_PROTO_HDR_IPV4:
672                 VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(hdr, IPV4, PROT);
673                 memset(&ipv4_spec, 0, sizeof(ipv4_spec));
674                 ipv4_spec.hdr.next_proto_id = proto_id;
675                 rte_memcpy(hdr->buffer, &ipv4_spec.hdr,
676                            sizeof(ipv4_spec.hdr));
677                 return true;
678         case VIRTCHNL_PROTO_HDR_IPV6:
679                 VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(hdr, IPV6, PROT);
680                 memset(&ipv6_spec, 0, sizeof(ipv6_spec));
681                 ipv6_spec.hdr.proto = proto_id;
682                 rte_memcpy(hdr->buffer, &ipv6_spec.hdr,
683                            sizeof(ipv6_spec.hdr));
684                 return true;
685         default:
686                 return false;
687         }
688 }
689
690 static void
691 iavf_fdir_add_fragment_hdr(struct virtchnl_proto_hdrs *hdrs, int layer)
692 {
693         struct virtchnl_proto_hdr *hdr1;
694         struct virtchnl_proto_hdr *hdr2;
695         int i;
696
697         if (layer < 0 || layer > hdrs->count)
698                 return;
699
700         /* shift headers layer */
701         for (i = hdrs->count; i >= layer; i--) {
702                 hdr1 = &hdrs->proto_hdr[i];
703                 hdr2 = &hdrs->proto_hdr[i - 1];
704                 *hdr1 = *hdr2;
705         }
706
707         /* adding dummy fragment header */
708         hdr1 = &hdrs->proto_hdr[layer];
709         VIRTCHNL_SET_PROTO_HDR_TYPE(hdr1, IPV4_FRAG);
710         hdr1->field_selector = 0;
711         hdrs->count = ++layer;
712 }
713
714 static int
715 iavf_fdir_parse_pattern(__rte_unused struct iavf_adapter *ad,
716                         const struct rte_flow_item pattern[],
717                         const uint64_t input_set_mask,
718                         struct rte_flow_error *error,
719                         struct iavf_fdir_conf *filter)
720 {
721         struct virtchnl_proto_hdrs *hdrs =
722                         &filter->add_fltr.rule_cfg.proto_hdrs;
723         enum rte_flow_item_type l3 = RTE_FLOW_ITEM_TYPE_END;
724         const struct rte_flow_item_raw *raw_spec, *raw_mask;
725         const struct rte_flow_item_eth *eth_spec, *eth_mask;
726         const struct rte_flow_item_ipv4 *ipv4_spec, *ipv4_last, *ipv4_mask;
727         const struct rte_flow_item_ipv6 *ipv6_spec, *ipv6_mask;
728         const struct rte_flow_item_ipv6_frag_ext *ipv6_frag_spec;
729         const struct rte_flow_item_ipv6_frag_ext *ipv6_frag_mask;
730         const struct rte_flow_item_udp *udp_spec, *udp_mask;
731         const struct rte_flow_item_tcp *tcp_spec, *tcp_mask;
732         const struct rte_flow_item_sctp *sctp_spec, *sctp_mask;
733         const struct rte_flow_item_gtp *gtp_spec, *gtp_mask;
734         const struct rte_flow_item_gtp_psc *gtp_psc_spec, *gtp_psc_mask;
735         const struct rte_flow_item_l2tpv3oip *l2tpv3oip_spec, *l2tpv3oip_mask;
736         const struct rte_flow_item_esp *esp_spec, *esp_mask;
737         const struct rte_flow_item_ah *ah_spec, *ah_mask;
738         const struct rte_flow_item_pfcp *pfcp_spec, *pfcp_mask;
739         const struct rte_flow_item_ecpri *ecpri_spec, *ecpri_mask;
740         const struct rte_flow_item_gre *gre_spec, *gre_mask;
741         const struct rte_flow_item_l2tpv2 *l2tpv2_spec, *l2tpv2_mask;
742         const struct rte_flow_item_ppp *ppp_spec, *ppp_mask;
743         const struct rte_flow_item *item = pattern;
744         struct virtchnl_proto_hdr *hdr, *hdr1 = NULL;
745         struct rte_ecpri_common_hdr ecpri_common;
746         uint64_t input_set = IAVF_INSET_NONE;
747         enum rte_flow_item_type item_type;
748         enum rte_flow_item_type next_type;
749         uint8_t tun_inner = 0;
750         uint16_t ether_type, flags_version;
751         uint8_t item_num = 0;
752         int layer = 0;
753
754         uint8_t  ipv6_addr_mask[16] = {
755                 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
756                 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF
757         };
758
759         for (item = pattern; item->type != RTE_FLOW_ITEM_TYPE_END; item++) {
760                 item_type = item->type;
761
762                 if (item->last && !(item_type == RTE_FLOW_ITEM_TYPE_IPV4 ||
763                                     item_type ==
764                                     RTE_FLOW_ITEM_TYPE_IPV6_FRAG_EXT)) {
765                         rte_flow_error_set(error, EINVAL,
766                                            RTE_FLOW_ERROR_TYPE_ITEM, item,
767                                            "Not support range");
768                 }
769                 item_num++;
770
771                 switch (item_type) {
772                 case RTE_FLOW_ITEM_TYPE_RAW: {
773                         raw_spec = item->spec;
774                         raw_mask = item->mask;
775
776                         if (item_num != 1)
777                                 return -rte_errno;
778
779                         if (raw_spec->length != raw_mask->length)
780                                 return -rte_errno;
781
782                         uint16_t pkt_len = 0;
783                         uint16_t tmp_val = 0;
784                         uint8_t tmp = 0;
785                         int i, j;
786
787                         pkt_len = raw_spec->length;
788
789                         for (i = 0, j = 0; i < pkt_len; i += 2, j++) {
790                                 tmp = raw_spec->pattern[i];
791                                 if (tmp >= 'a' && tmp <= 'f')
792                                         tmp_val = tmp - 'a' + 10;
793                                 if (tmp >= 'A' && tmp <= 'F')
794                                         tmp_val = tmp - 'A' + 10;
795                                 if (tmp >= '0' && tmp <= '9')
796                                         tmp_val = tmp - '0';
797
798                                 tmp_val *= 16;
799                                 tmp = raw_spec->pattern[i + 1];
800                                 if (tmp >= 'a' && tmp <= 'f')
801                                         tmp_val += (tmp - 'a' + 10);
802                                 if (tmp >= 'A' && tmp <= 'F')
803                                         tmp_val += (tmp - 'A' + 10);
804                                 if (tmp >= '0' && tmp <= '9')
805                                         tmp_val += (tmp - '0');
806
807                                 hdrs->raw.spec[j] = tmp_val;
808
809                                 tmp = raw_mask->pattern[i];
810                                 if (tmp >= 'a' && tmp <= 'f')
811                                         tmp_val = tmp - 'a' + 10;
812                                 if (tmp >= 'A' && tmp <= 'F')
813                                         tmp_val = tmp - 'A' + 10;
814                                 if (tmp >= '0' && tmp <= '9')
815                                         tmp_val = tmp - '0';
816
817                                 tmp_val *= 16;
818                                 tmp = raw_mask->pattern[i + 1];
819                                 if (tmp >= 'a' && tmp <= 'f')
820                                         tmp_val += (tmp - 'a' + 10);
821                                 if (tmp >= 'A' && tmp <= 'F')
822                                         tmp_val += (tmp - 'A' + 10);
823                                 if (tmp >= '0' && tmp <= '9')
824                                         tmp_val += (tmp - '0');
825
826                                 hdrs->raw.mask[j] = tmp_val;
827                         }
828
829                         hdrs->raw.pkt_len = pkt_len / 2;
830                         hdrs->tunnel_level = 0;
831                         hdrs->count = 0;
832                         return 0;
833                 }
834
835                 case RTE_FLOW_ITEM_TYPE_ETH:
836                         eth_spec = item->spec;
837                         eth_mask = item->mask;
838                         next_type = (item + 1)->type;
839
840                         hdr1 = &hdrs->proto_hdr[layer];
841
842                         VIRTCHNL_SET_PROTO_HDR_TYPE(hdr1, ETH);
843
844                         if (next_type == RTE_FLOW_ITEM_TYPE_END &&
845                             (!eth_spec || !eth_mask)) {
846                                 rte_flow_error_set(error, EINVAL,
847                                                 RTE_FLOW_ERROR_TYPE_ITEM,
848                                                 item, "NULL eth spec/mask.");
849                                 return -rte_errno;
850                         }
851
852                         if (eth_spec && eth_mask) {
853                                 if (!rte_is_zero_ether_addr(&eth_mask->dst)) {
854                                         input_set |= IAVF_INSET_DMAC;
855                                         VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(hdr1,
856                                                                         ETH,
857                                                                         DST);
858                                 } else if (!rte_is_zero_ether_addr(&eth_mask->src)) {
859                                         input_set |= IAVF_INSET_SMAC;
860                                         VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(hdr1,
861                                                                         ETH,
862                                                                         SRC);
863                                 }
864
865                                 if (eth_mask->type) {
866                                         if (eth_mask->type != RTE_BE16(0xffff)) {
867                                                 rte_flow_error_set(error, EINVAL,
868                                                         RTE_FLOW_ERROR_TYPE_ITEM,
869                                                         item, "Invalid type mask.");
870                                                 return -rte_errno;
871                                         }
872
873                                         ether_type = rte_be_to_cpu_16(eth_spec->type);
874                                         if (ether_type == RTE_ETHER_TYPE_IPV4 ||
875                                                 ether_type == RTE_ETHER_TYPE_IPV6) {
876                                                 rte_flow_error_set(error, EINVAL,
877                                                         RTE_FLOW_ERROR_TYPE_ITEM,
878                                                         item,
879                                                         "Unsupported ether_type.");
880                                                 return -rte_errno;
881                                         }
882
883                                         input_set |= IAVF_INSET_ETHERTYPE;
884                                         VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(hdr1, ETH,
885                                                                         ETHERTYPE);
886                                 }
887
888                                 rte_memcpy(hdr1->buffer, eth_spec,
889                                            sizeof(struct rte_ether_hdr));
890                         }
891
892                         hdrs->count = ++layer;
893                         break;
894
895                 case RTE_FLOW_ITEM_TYPE_IPV4:
896                         l3 = RTE_FLOW_ITEM_TYPE_IPV4;
897                         ipv4_spec = item->spec;
898                         ipv4_last = item->last;
899                         ipv4_mask = item->mask;
900                         next_type = (item + 1)->type;
901
902                         hdr = &hdrs->proto_hdr[layer];
903
904                         VIRTCHNL_SET_PROTO_HDR_TYPE(hdr, IPV4);
905
906                         if (!(ipv4_spec && ipv4_mask)) {
907                                 hdrs->count = ++layer;
908                                 break;
909                         }
910
911                         if (ipv4_mask->hdr.version_ihl ||
912                             ipv4_mask->hdr.total_length ||
913                             ipv4_mask->hdr.hdr_checksum) {
914                                 rte_flow_error_set(error, EINVAL,
915                                                    RTE_FLOW_ERROR_TYPE_ITEM,
916                                                    item, "Invalid IPv4 mask.");
917                                 return -rte_errno;
918                         }
919
920                         if (ipv4_last &&
921                             (ipv4_last->hdr.version_ihl ||
922                              ipv4_last->hdr.type_of_service ||
923                              ipv4_last->hdr.time_to_live ||
924                              ipv4_last->hdr.total_length |
925                              ipv4_last->hdr.next_proto_id ||
926                              ipv4_last->hdr.hdr_checksum ||
927                              ipv4_last->hdr.src_addr ||
928                              ipv4_last->hdr.dst_addr)) {
929                                 rte_flow_error_set(error, EINVAL,
930                                                    RTE_FLOW_ERROR_TYPE_ITEM,
931                                                    item, "Invalid IPv4 last.");
932                                 return -rte_errno;
933                         }
934
935                         if (ipv4_mask->hdr.type_of_service ==
936                             UINT8_MAX) {
937                                 input_set |= IAVF_INSET_IPV4_TOS;
938                                 VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(hdr, IPV4,
939                                                                  DSCP);
940                         }
941
942                         if (ipv4_mask->hdr.next_proto_id == UINT8_MAX) {
943                                 input_set |= IAVF_INSET_IPV4_PROTO;
944                                 VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(hdr, IPV4,
945                                                                  PROT);
946                         }
947
948                         if (ipv4_mask->hdr.time_to_live == UINT8_MAX) {
949                                 input_set |= IAVF_INSET_IPV4_TTL;
950                                 VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(hdr, IPV4,
951                                                                  TTL);
952                         }
953
954                         if (ipv4_mask->hdr.src_addr == UINT32_MAX) {
955                                 input_set |= IAVF_INSET_IPV4_SRC;
956                                 VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(hdr, IPV4,
957                                                                  SRC);
958                         }
959
960                         if (ipv4_mask->hdr.dst_addr == UINT32_MAX) {
961                                 input_set |= IAVF_INSET_IPV4_DST;
962                                 VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(hdr, IPV4,
963                                                                  DST);
964                         }
965
966                         if (tun_inner) {
967                                 input_set &= ~IAVF_PROT_IPV4_OUTER;
968                                 input_set |= IAVF_PROT_IPV4_INNER;
969                         }
970
971                         rte_memcpy(hdr->buffer, &ipv4_spec->hdr,
972                                    sizeof(ipv4_spec->hdr));
973
974                         hdrs->count = ++layer;
975
976                         /* fragment Ipv4:
977                          * spec is 0x2000, mask is 0x2000
978                          */
979                         if (ipv4_spec->hdr.fragment_offset ==
980                             rte_cpu_to_be_16(RTE_IPV4_HDR_MF_FLAG) &&
981                             ipv4_mask->hdr.fragment_offset ==
982                             rte_cpu_to_be_16(RTE_IPV4_HDR_MF_FLAG)) {
983                                 /* all IPv4 fragment packet has the same
984                                  * ethertype, if the spec and mask is valid,
985                                  * set ethertype into input set.
986                                  */
987                                 input_set |= IAVF_INSET_ETHERTYPE;
988                                 VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(hdr1, ETH,
989                                                                  ETHERTYPE);
990
991                                 /* add dummy header for IPv4 Fragment */
992                                 iavf_fdir_add_fragment_hdr(hdrs, layer);
993                         } else if (ipv4_mask->hdr.packet_id == UINT16_MAX) {
994                                 rte_flow_error_set(error, EINVAL,
995                                                    RTE_FLOW_ERROR_TYPE_ITEM,
996                                                    item, "Invalid IPv4 mask.");
997                                 return -rte_errno;
998                         }
999
1000                         break;
1001
1002                 case RTE_FLOW_ITEM_TYPE_IPV6:
1003                         l3 = RTE_FLOW_ITEM_TYPE_IPV6;
1004                         ipv6_spec = item->spec;
1005                         ipv6_mask = item->mask;
1006
1007                         hdr = &hdrs->proto_hdr[layer];
1008
1009                         VIRTCHNL_SET_PROTO_HDR_TYPE(hdr, IPV6);
1010
1011                         if (!(ipv6_spec && ipv6_mask)) {
1012                                 hdrs->count = ++layer;
1013                                 break;
1014                         }
1015
1016                         if (ipv6_mask->hdr.payload_len) {
1017                                 rte_flow_error_set(error, EINVAL,
1018                                                    RTE_FLOW_ERROR_TYPE_ITEM,
1019                                                    item, "Invalid IPv6 mask");
1020                                 return -rte_errno;
1021                         }
1022
1023                         if ((ipv6_mask->hdr.vtc_flow &
1024                               rte_cpu_to_be_32(IAVF_IPV6_TC_MASK))
1025                              == rte_cpu_to_be_32(IAVF_IPV6_TC_MASK)) {
1026                                 input_set |= IAVF_INSET_IPV6_TC;
1027                                 VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(hdr, IPV6,
1028                                                                  TC);
1029                         }
1030
1031                         if (ipv6_mask->hdr.proto == UINT8_MAX) {
1032                                 input_set |= IAVF_INSET_IPV6_NEXT_HDR;
1033                                 VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(hdr, IPV6,
1034                                                                  PROT);
1035                         }
1036
1037                         if (ipv6_mask->hdr.hop_limits == UINT8_MAX) {
1038                                 input_set |= IAVF_INSET_IPV6_HOP_LIMIT;
1039                                 VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(hdr, IPV6,
1040                                                                  HOP_LIMIT);
1041                         }
1042
1043                         if (!memcmp(ipv6_mask->hdr.src_addr, ipv6_addr_mask,
1044                                     RTE_DIM(ipv6_mask->hdr.src_addr))) {
1045                                 input_set |= IAVF_INSET_IPV6_SRC;
1046                                 VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(hdr, IPV6,
1047                                                                  SRC);
1048                         }
1049                         if (!memcmp(ipv6_mask->hdr.dst_addr, ipv6_addr_mask,
1050                                     RTE_DIM(ipv6_mask->hdr.dst_addr))) {
1051                                 input_set |= IAVF_INSET_IPV6_DST;
1052                                 VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(hdr, IPV6,
1053                                                                  DST);
1054                         }
1055
1056                         if (tun_inner) {
1057                                 input_set &= ~IAVF_PROT_IPV6_OUTER;
1058                                 input_set |= IAVF_PROT_IPV6_INNER;
1059                         }
1060
1061                         rte_memcpy(hdr->buffer, &ipv6_spec->hdr,
1062                                    sizeof(ipv6_spec->hdr));
1063
1064                         hdrs->count = ++layer;
1065                         break;
1066
1067                 case RTE_FLOW_ITEM_TYPE_IPV6_FRAG_EXT:
1068                         ipv6_frag_spec = item->spec;
1069                         ipv6_frag_mask = item->mask;
1070                         next_type = (item + 1)->type;
1071
1072                         hdr = &hdrs->proto_hdr[layer];
1073
1074                         VIRTCHNL_SET_PROTO_HDR_TYPE(hdr, IPV6_EH_FRAG);
1075
1076                         if (!(ipv6_frag_spec && ipv6_frag_mask)) {
1077                                 hdrs->count = ++layer;
1078                                 break;
1079                         }
1080
1081                         /* fragment Ipv6:
1082                          * spec is 0x1, mask is 0x1
1083                          */
1084                         if (ipv6_frag_spec->hdr.frag_data ==
1085                             rte_cpu_to_be_16(1) &&
1086                             ipv6_frag_mask->hdr.frag_data ==
1087                             rte_cpu_to_be_16(1)) {
1088                                 /* all IPv6 fragment packet has the same
1089                                  * ethertype, if the spec and mask is valid,
1090                                  * set ethertype into input set.
1091                                  */
1092                                 input_set |= IAVF_INSET_ETHERTYPE;
1093                                 VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(hdr1, ETH,
1094                                                                  ETHERTYPE);
1095
1096                                 rte_memcpy(hdr->buffer, &ipv6_frag_spec->hdr,
1097                                            sizeof(ipv6_frag_spec->hdr));
1098                         } else if (ipv6_frag_mask->hdr.id == UINT32_MAX) {
1099                                 rte_flow_error_set(error, EINVAL,
1100                                                    RTE_FLOW_ERROR_TYPE_ITEM,
1101                                                    item, "Invalid IPv6 mask.");
1102                                 return -rte_errno;
1103                         }
1104
1105                         hdrs->count = ++layer;
1106                         break;
1107
1108                 case RTE_FLOW_ITEM_TYPE_UDP:
1109                         udp_spec = item->spec;
1110                         udp_mask = item->mask;
1111
1112                         hdr = &hdrs->proto_hdr[layer];
1113
1114                         VIRTCHNL_SET_PROTO_HDR_TYPE(hdr, UDP);
1115
1116                         if (udp_spec && udp_mask) {
1117                                 if (udp_mask->hdr.dgram_len ||
1118                                         udp_mask->hdr.dgram_cksum) {
1119                                         rte_flow_error_set(error, EINVAL,
1120                                                 RTE_FLOW_ERROR_TYPE_ITEM, item,
1121                                                 "Invalid UDP mask");
1122                                         return -rte_errno;
1123                                 }
1124
1125                                 if (udp_mask->hdr.src_port == UINT16_MAX) {
1126                                         input_set |= IAVF_INSET_UDP_SRC_PORT;
1127                                         VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(hdr, UDP, SRC_PORT);
1128                                 }
1129                                 if (udp_mask->hdr.dst_port == UINT16_MAX) {
1130                                         input_set |= IAVF_INSET_UDP_DST_PORT;
1131                                         VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(hdr, UDP, DST_PORT);
1132                                 }
1133
1134                                 if (tun_inner) {
1135                                         input_set &= ~IAVF_PROT_UDP_OUTER;
1136                                         input_set |= IAVF_PROT_UDP_INNER;
1137                                 }
1138
1139                                 if (l3 == RTE_FLOW_ITEM_TYPE_IPV4)
1140                                         rte_memcpy(hdr->buffer,
1141                                                 &udp_spec->hdr,
1142                                                 sizeof(udp_spec->hdr));
1143                                 else if (l3 == RTE_FLOW_ITEM_TYPE_IPV6)
1144                                         rte_memcpy(hdr->buffer,
1145                                                 &udp_spec->hdr,
1146                                                 sizeof(udp_spec->hdr));
1147                         }
1148
1149                         hdrs->count = ++layer;
1150                         break;
1151
1152                 case RTE_FLOW_ITEM_TYPE_TCP:
1153                         tcp_spec = item->spec;
1154                         tcp_mask = item->mask;
1155
1156                         hdr = &hdrs->proto_hdr[layer];
1157
1158                         VIRTCHNL_SET_PROTO_HDR_TYPE(hdr, TCP);
1159
1160                         if (tcp_spec && tcp_mask) {
1161                                 if (tcp_mask->hdr.sent_seq ||
1162                                         tcp_mask->hdr.recv_ack ||
1163                                         tcp_mask->hdr.data_off ||
1164                                         tcp_mask->hdr.tcp_flags ||
1165                                         tcp_mask->hdr.rx_win ||
1166                                         tcp_mask->hdr.cksum ||
1167                                         tcp_mask->hdr.tcp_urp) {
1168                                         rte_flow_error_set(error, EINVAL,
1169                                                 RTE_FLOW_ERROR_TYPE_ITEM, item,
1170                                                 "Invalid TCP mask");
1171                                         return -rte_errno;
1172                                 }
1173
1174                                 if (tcp_mask->hdr.src_port == UINT16_MAX) {
1175                                         input_set |= IAVF_INSET_TCP_SRC_PORT;
1176                                         VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(hdr, TCP, SRC_PORT);
1177                                 }
1178                                 if (tcp_mask->hdr.dst_port == UINT16_MAX) {
1179                                         input_set |= IAVF_INSET_TCP_DST_PORT;
1180                                         VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(hdr, TCP, DST_PORT);
1181                                 }
1182
1183                                 if (tun_inner) {
1184                                         input_set &= ~IAVF_PROT_TCP_OUTER;
1185                                         input_set |= IAVF_PROT_TCP_INNER;
1186                                 }
1187
1188                                 if (l3 == RTE_FLOW_ITEM_TYPE_IPV4)
1189                                         rte_memcpy(hdr->buffer,
1190                                                 &tcp_spec->hdr,
1191                                                 sizeof(tcp_spec->hdr));
1192                                 else if (l3 == RTE_FLOW_ITEM_TYPE_IPV6)
1193                                         rte_memcpy(hdr->buffer,
1194                                                 &tcp_spec->hdr,
1195                                                 sizeof(tcp_spec->hdr));
1196                         }
1197
1198                         hdrs->count = ++layer;
1199                         break;
1200
1201                 case RTE_FLOW_ITEM_TYPE_SCTP:
1202                         sctp_spec = item->spec;
1203                         sctp_mask = item->mask;
1204
1205                         hdr = &hdrs->proto_hdr[layer];
1206
1207                         VIRTCHNL_SET_PROTO_HDR_TYPE(hdr, SCTP);
1208
1209                         if (sctp_spec && sctp_mask) {
1210                                 if (sctp_mask->hdr.cksum) {
1211                                         rte_flow_error_set(error, EINVAL,
1212                                                 RTE_FLOW_ERROR_TYPE_ITEM, item,
1213                                                 "Invalid UDP mask");
1214                                         return -rte_errno;
1215                                 }
1216
1217                                 if (sctp_mask->hdr.src_port == UINT16_MAX) {
1218                                         input_set |= IAVF_INSET_SCTP_SRC_PORT;
1219                                         VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(hdr, SCTP, SRC_PORT);
1220                                 }
1221                                 if (sctp_mask->hdr.dst_port == UINT16_MAX) {
1222                                         input_set |= IAVF_INSET_SCTP_DST_PORT;
1223                                         VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(hdr, SCTP, DST_PORT);
1224                                 }
1225
1226                                 if (l3 == RTE_FLOW_ITEM_TYPE_IPV4)
1227                                         rte_memcpy(hdr->buffer,
1228                                                 &sctp_spec->hdr,
1229                                                 sizeof(sctp_spec->hdr));
1230                                 else if (l3 == RTE_FLOW_ITEM_TYPE_IPV6)
1231                                         rte_memcpy(hdr->buffer,
1232                                                 &sctp_spec->hdr,
1233                                                 sizeof(sctp_spec->hdr));
1234                         }
1235
1236                         hdrs->count = ++layer;
1237                         break;
1238
1239                 case RTE_FLOW_ITEM_TYPE_GTPU:
1240                         gtp_spec = item->spec;
1241                         gtp_mask = item->mask;
1242
1243                         hdr = &hdrs->proto_hdr[layer];
1244
1245                         VIRTCHNL_SET_PROTO_HDR_TYPE(hdr, GTPU_IP);
1246
1247                         if (gtp_spec && gtp_mask) {
1248                                 if (gtp_mask->v_pt_rsv_flags ||
1249                                         gtp_mask->msg_type ||
1250                                         gtp_mask->msg_len) {
1251                                         rte_flow_error_set(error, EINVAL,
1252                                                 RTE_FLOW_ERROR_TYPE_ITEM,
1253                                                 item, "Invalid GTP mask");
1254                                         return -rte_errno;
1255                                 }
1256
1257                                 if (gtp_mask->teid == UINT32_MAX) {
1258                                         input_set |= IAVF_INSET_GTPU_TEID;
1259                                         VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(hdr, GTPU_IP, TEID);
1260                                 }
1261
1262                                 rte_memcpy(hdr->buffer,
1263                                         gtp_spec, sizeof(*gtp_spec));
1264                         }
1265
1266                         tun_inner = 1;
1267
1268                         hdrs->count = ++layer;
1269                         break;
1270
1271                 case RTE_FLOW_ITEM_TYPE_GTP_PSC:
1272                         gtp_psc_spec = item->spec;
1273                         gtp_psc_mask = item->mask;
1274
1275                         hdr = &hdrs->proto_hdr[layer];
1276
1277                         if (!gtp_psc_spec)
1278                                 VIRTCHNL_SET_PROTO_HDR_TYPE(hdr, GTPU_EH);
1279                         else if ((gtp_psc_mask->hdr.qfi) &&
1280                                 !(gtp_psc_mask->hdr.type))
1281                                 VIRTCHNL_SET_PROTO_HDR_TYPE(hdr, GTPU_EH);
1282                         else if (gtp_psc_spec->hdr.type == IAVF_GTPU_EH_UPLINK)
1283                                 VIRTCHNL_SET_PROTO_HDR_TYPE(hdr, GTPU_EH_PDU_UP);
1284                         else if (gtp_psc_spec->hdr.type == IAVF_GTPU_EH_DWLINK)
1285                                 VIRTCHNL_SET_PROTO_HDR_TYPE(hdr, GTPU_EH_PDU_DWN);
1286
1287                         if (gtp_psc_spec && gtp_psc_mask) {
1288                                 if (gtp_psc_mask->hdr.qfi == 0x3F) {
1289                                         input_set |= IAVF_INSET_GTPU_QFI;
1290                                         if (!gtp_psc_mask->hdr.type)
1291                                                 VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(hdr,
1292                                                                                  GTPU_EH, QFI);
1293                                         else if (gtp_psc_spec->hdr.type ==
1294                                                                 IAVF_GTPU_EH_UPLINK)
1295                                                 VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(hdr,
1296                                                                                  GTPU_UP, QFI);
1297                                         else if (gtp_psc_spec->hdr.type ==
1298                                                                 IAVF_GTPU_EH_DWLINK)
1299                                                 VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(hdr,
1300                                                                                  GTPU_DWN, QFI);
1301                                 }
1302
1303                                 rte_memcpy(hdr->buffer, gtp_psc_spec,
1304                                         sizeof(*gtp_psc_spec));
1305                         }
1306
1307                         hdrs->count = ++layer;
1308                         break;
1309
1310                 case RTE_FLOW_ITEM_TYPE_L2TPV3OIP:
1311                         l2tpv3oip_spec = item->spec;
1312                         l2tpv3oip_mask = item->mask;
1313
1314                         hdr = &hdrs->proto_hdr[layer];
1315
1316                         VIRTCHNL_SET_PROTO_HDR_TYPE(hdr, L2TPV3);
1317
1318                         if (l2tpv3oip_spec && l2tpv3oip_mask) {
1319                                 if (l2tpv3oip_mask->session_id == UINT32_MAX) {
1320                                         input_set |= IAVF_L2TPV3OIP_SESSION_ID;
1321                                         VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(hdr, L2TPV3, SESS_ID);
1322                                 }
1323
1324                                 rte_memcpy(hdr->buffer, l2tpv3oip_spec,
1325                                         sizeof(*l2tpv3oip_spec));
1326                         }
1327
1328                         hdrs->count = ++layer;
1329                         break;
1330
1331                 case RTE_FLOW_ITEM_TYPE_ESP:
1332                         esp_spec = item->spec;
1333                         esp_mask = item->mask;
1334
1335                         hdr = &hdrs->proto_hdr[layer];
1336
1337                         VIRTCHNL_SET_PROTO_HDR_TYPE(hdr, ESP);
1338
1339                         if (esp_spec && esp_mask) {
1340                                 if (esp_mask->hdr.spi == UINT32_MAX) {
1341                                         input_set |= IAVF_INSET_ESP_SPI;
1342                                         VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(hdr, ESP, SPI);
1343                                 }
1344
1345                                 rte_memcpy(hdr->buffer, &esp_spec->hdr,
1346                                         sizeof(esp_spec->hdr));
1347                         }
1348
1349                         hdrs->count = ++layer;
1350                         break;
1351
1352                 case RTE_FLOW_ITEM_TYPE_AH:
1353                         ah_spec = item->spec;
1354                         ah_mask = item->mask;
1355
1356                         hdr = &hdrs->proto_hdr[layer];
1357
1358                         VIRTCHNL_SET_PROTO_HDR_TYPE(hdr, AH);
1359
1360                         if (ah_spec && ah_mask) {
1361                                 if (ah_mask->spi == UINT32_MAX) {
1362                                         input_set |= IAVF_INSET_AH_SPI;
1363                                         VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(hdr, AH, SPI);
1364                                 }
1365
1366                                 rte_memcpy(hdr->buffer, ah_spec,
1367                                         sizeof(*ah_spec));
1368                         }
1369
1370                         hdrs->count = ++layer;
1371                         break;
1372
1373                 case RTE_FLOW_ITEM_TYPE_PFCP:
1374                         pfcp_spec = item->spec;
1375                         pfcp_mask = item->mask;
1376
1377                         hdr = &hdrs->proto_hdr[layer];
1378
1379                         VIRTCHNL_SET_PROTO_HDR_TYPE(hdr, PFCP);
1380
1381                         if (pfcp_spec && pfcp_mask) {
1382                                 if (pfcp_mask->s_field == UINT8_MAX) {
1383                                         input_set |= IAVF_INSET_PFCP_S_FIELD;
1384                                         VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(hdr, PFCP, S_FIELD);
1385                                 }
1386
1387                                 rte_memcpy(hdr->buffer, pfcp_spec,
1388                                         sizeof(*pfcp_spec));
1389                         }
1390
1391                         hdrs->count = ++layer;
1392                         break;
1393
1394                 case RTE_FLOW_ITEM_TYPE_ECPRI:
1395                         ecpri_spec = item->spec;
1396                         ecpri_mask = item->mask;
1397
1398                         ecpri_common.u32 = rte_be_to_cpu_32(ecpri_spec->hdr.common.u32);
1399
1400                         hdr = &hdrs->proto_hdr[layer];
1401
1402                         VIRTCHNL_SET_PROTO_HDR_TYPE(hdr, ECPRI);
1403
1404                         if (ecpri_spec && ecpri_mask) {
1405                                 if (ecpri_common.type == RTE_ECPRI_MSG_TYPE_IQ_DATA &&
1406                                                 ecpri_mask->hdr.type0.pc_id == UINT16_MAX) {
1407                                         input_set |= IAVF_ECPRI_PC_RTC_ID;
1408                                         VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(hdr, ECPRI,
1409                                                                          PC_RTC_ID);
1410                                 }
1411
1412                                 rte_memcpy(hdr->buffer, ecpri_spec,
1413                                         sizeof(*ecpri_spec));
1414                         }
1415
1416                         hdrs->count = ++layer;
1417                         break;
1418
1419                 case RTE_FLOW_ITEM_TYPE_GRE:
1420                         gre_spec = item->spec;
1421                         gre_mask = item->mask;
1422
1423                         hdr = &hdrs->proto_hdr[layer];
1424
1425                         VIRTCHNL_SET_PROTO_HDR_TYPE(hdr, GRE);
1426
1427                         if (gre_spec && gre_mask) {
1428                                 rte_memcpy(hdr->buffer, gre_spec,
1429                                            sizeof(*gre_spec));
1430                         }
1431
1432                         tun_inner = 1;
1433
1434                         hdrs->count = ++layer;
1435                         break;
1436
1437                 case RTE_FLOW_ITEM_TYPE_L2TPV2:
1438                         l2tpv2_spec = item->spec;
1439                         l2tpv2_mask = item->mask;
1440
1441                         hdr = &hdrs->proto_hdr[layer];
1442
1443                         VIRTCHNL_SET_PROTO_HDR_TYPE(hdr, L2TPV2);
1444
1445                         if (l2tpv2_spec && l2tpv2_mask) {
1446                                 flags_version =
1447                                         rte_be_to_cpu_16(l2tpv2_spec->hdr.common.flags_version);
1448                                 if ((flags_version == RTE_L2TPV2_MSG_TYPE_CONTROL &&
1449                                      l2tpv2_mask->hdr.type3.session_id == UINT16_MAX) ||
1450                                     (flags_version == RTE_L2TPV2_MSG_TYPE_DATA &&
1451                                      l2tpv2_mask->hdr.type7.session_id == UINT16_MAX) ||
1452                                     (flags_version == RTE_L2TPV2_MSG_TYPE_DATA_L &&
1453                                      l2tpv2_mask->hdr.type6.session_id == UINT16_MAX) ||
1454                                     (flags_version == RTE_L2TPV2_MSG_TYPE_DATA_S &&
1455                                      l2tpv2_mask->hdr.type5.session_id == UINT16_MAX) ||
1456                                     (flags_version == RTE_L2TPV2_MSG_TYPE_DATA_O &&
1457                                      l2tpv2_mask->hdr.type4.session_id == UINT16_MAX) ||
1458                                     (flags_version == RTE_L2TPV2_MSG_TYPE_DATA_L_S &&
1459                                      l2tpv2_mask->hdr.type3.session_id == UINT16_MAX) ||
1460                                     (flags_version == RTE_L2TPV2_MSG_TYPE_DATA_L_O &&
1461                                      l2tpv2_mask->hdr.type2.session_id == UINT16_MAX) ||
1462                                     (flags_version == RTE_L2TPV2_MSG_TYPE_DATA_S_O &&
1463                                      l2tpv2_mask->hdr.type1.session_id == UINT16_MAX) ||
1464                                     (flags_version == RTE_L2TPV2_MSG_TYPE_DATA_L_S_O &&
1465                                      l2tpv2_mask->hdr.type0.session_id == UINT16_MAX)) {
1466                                         input_set |= IAVF_L2TPV2_SESSION_ID;
1467                                         if (flags_version & IAVF_L2TPV2_FLAGS_LEN)
1468                                                 VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(hdr,
1469                                                                 L2TPV2,
1470                                                                 LEN_SESS_ID);
1471                                         else
1472                                                 VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(hdr,
1473                                                                 L2TPV2,
1474                                                                 SESS_ID);
1475                                 }
1476
1477                                 rte_memcpy(hdr->buffer, l2tpv2_spec,
1478                                            sizeof(*l2tpv2_spec));
1479                         }
1480
1481                         tun_inner = 1;
1482
1483                         hdrs->count = ++layer;
1484                         break;
1485
1486                 case RTE_FLOW_ITEM_TYPE_PPP:
1487                         ppp_spec = item->spec;
1488                         ppp_mask = item->mask;
1489
1490                         hdr = &hdrs->proto_hdr[layer];
1491
1492                         VIRTCHNL_SET_PROTO_HDR_TYPE(hdr, PPP);
1493
1494                         if (ppp_spec && ppp_mask) {
1495                                 rte_memcpy(hdr->buffer, ppp_spec,
1496                                            sizeof(*ppp_spec));
1497                         }
1498
1499                         hdrs->count = ++layer;
1500                         break;
1501
1502                 case RTE_FLOW_ITEM_TYPE_VOID:
1503                         break;
1504
1505                 default:
1506                         rte_flow_error_set(error, EINVAL,
1507                                         RTE_FLOW_ERROR_TYPE_ITEM, item,
1508                                         "Invalid pattern item.");
1509                         return -rte_errno;
1510                 }
1511         }
1512
1513         if (layer > VIRTCHNL_MAX_NUM_PROTO_HDRS) {
1514                 rte_flow_error_set(error, EINVAL,
1515                         RTE_FLOW_ERROR_TYPE_ITEM, item,
1516                         "Protocol header layers exceed the maximum value");
1517                 return -rte_errno;
1518         }
1519
1520         if (!iavf_fdir_refine_input_set(input_set,
1521                                         input_set_mask | IAVF_INSET_ETHERTYPE,
1522                                         filter)) {
1523                 rte_flow_error_set(error, EINVAL,
1524                                    RTE_FLOW_ERROR_TYPE_ITEM_SPEC, pattern,
1525                                    "Invalid input set");
1526                 return -rte_errno;
1527         }
1528
1529         filter->input_set = input_set;
1530
1531         return 0;
1532 }
1533
1534 static int
1535 iavf_fdir_parse(struct iavf_adapter *ad,
1536                 struct iavf_pattern_match_item *array,
1537                 uint32_t array_len,
1538                 const struct rte_flow_item pattern[],
1539                 const struct rte_flow_action actions[],
1540                 void **meta,
1541                 struct rte_flow_error *error)
1542 {
1543         struct iavf_info *vf = IAVF_DEV_PRIVATE_TO_VF(ad);
1544         struct iavf_fdir_conf *filter = &vf->fdir.conf;
1545         struct iavf_pattern_match_item *item = NULL;
1546         int ret;
1547
1548         memset(filter, 0, sizeof(*filter));
1549
1550         item = iavf_search_pattern_match_item(pattern, array, array_len, error);
1551         if (!item)
1552                 return -rte_errno;
1553
1554         ret = iavf_fdir_parse_pattern(ad, pattern, item->input_set_mask,
1555                                       error, filter);
1556         if (ret)
1557                 goto error;
1558
1559         ret = iavf_fdir_parse_action(ad, actions, error, filter);
1560         if (ret)
1561                 goto error;
1562
1563         if (meta)
1564                 *meta = filter;
1565
1566 error:
1567         rte_free(item);
1568         return ret;
1569 }
1570
1571 static struct iavf_flow_parser iavf_fdir_parser = {
1572         .engine = &iavf_fdir_engine,
1573         .array = iavf_fdir_pattern,
1574         .array_len = RTE_DIM(iavf_fdir_pattern),
1575         .parse_pattern_action = iavf_fdir_parse,
1576         .stage = IAVF_FLOW_STAGE_DISTRIBUTOR,
1577 };
1578
1579 RTE_INIT(iavf_fdir_engine_register)
1580 {
1581         iavf_register_flow_engine(&iavf_fdir_engine);
1582 }