net/iavf: support flow director for GRE tunnel packet
[dpdk.git] / drivers / net / iavf / iavf_fdir.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2020 Intel Corporation
3  */
4
5 #include <sys/queue.h>
6 #include <stdio.h>
7 #include <errno.h>
8 #include <stdint.h>
9 #include <string.h>
10 #include <unistd.h>
11 #include <stdarg.h>
12
13 #include <rte_ether.h>
14 #include <ethdev_driver.h>
15 #include <rte_malloc.h>
16 #include <rte_tailq.h>
17
18 #include "iavf.h"
19 #include "iavf_generic_flow.h"
20 #include "virtchnl.h"
21 #include "iavf_rxtx.h"
22
23 #define IAVF_FDIR_MAX_QREGION_SIZE 128
24
25 #define IAVF_FDIR_IPV6_TC_OFFSET 20
26 #define IAVF_IPV6_TC_MASK  (0xFF << IAVF_FDIR_IPV6_TC_OFFSET)
27
28 #define IAVF_GTPU_EH_DWLINK 0
29 #define IAVF_GTPU_EH_UPLINK 1
30
31 #define IAVF_FDIR_INSET_ETH (\
32         IAVF_INSET_ETHERTYPE)
33
34 #define IAVF_FDIR_INSET_ETH_IPV4 (\
35         IAVF_INSET_IPV4_SRC | IAVF_INSET_IPV4_DST | \
36         IAVF_INSET_IPV4_PROTO | IAVF_INSET_IPV4_TOS | \
37         IAVF_INSET_IPV4_TTL | IAVF_INSET_IPV4_ID)
38
39 #define IAVF_FDIR_INSET_ETH_IPV4_UDP (\
40         IAVF_INSET_IPV4_SRC | IAVF_INSET_IPV4_DST | \
41         IAVF_INSET_IPV4_TOS | IAVF_INSET_IPV4_TTL | \
42         IAVF_INSET_UDP_SRC_PORT | IAVF_INSET_UDP_DST_PORT)
43
44 #define IAVF_FDIR_INSET_ETH_IPV4_TCP (\
45         IAVF_INSET_IPV4_SRC | IAVF_INSET_IPV4_DST | \
46         IAVF_INSET_IPV4_TOS | IAVF_INSET_IPV4_TTL | \
47         IAVF_INSET_TCP_SRC_PORT | IAVF_INSET_TCP_DST_PORT)
48
49 #define IAVF_FDIR_INSET_ETH_IPV4_SCTP (\
50         IAVF_INSET_IPV4_SRC | IAVF_INSET_IPV4_DST | \
51         IAVF_INSET_IPV4_TOS | IAVF_INSET_IPV4_TTL | \
52         IAVF_INSET_SCTP_SRC_PORT | IAVF_INSET_SCTP_DST_PORT)
53
54 #define IAVF_FDIR_INSET_ETH_IPV6 (\
55         IAVF_INSET_IPV6_SRC | IAVF_INSET_IPV6_DST | \
56         IAVF_INSET_IPV6_NEXT_HDR | IAVF_INSET_IPV6_TC | \
57         IAVF_INSET_IPV6_HOP_LIMIT)
58
59 #define IAVF_FDIR_INSET_ETH_IPV6_FRAG_EXT (\
60         IAVF_INSET_IPV6_ID)
61
62 #define IAVF_FDIR_INSET_ETH_IPV6_UDP (\
63         IAVF_INSET_IPV6_SRC | IAVF_INSET_IPV6_DST | \
64         IAVF_INSET_IPV6_TC | IAVF_INSET_IPV6_HOP_LIMIT | \
65         IAVF_INSET_UDP_SRC_PORT | IAVF_INSET_UDP_DST_PORT)
66
67 #define IAVF_FDIR_INSET_ETH_IPV6_TCP (\
68         IAVF_INSET_IPV6_SRC | IAVF_INSET_IPV6_DST | \
69         IAVF_INSET_IPV6_TC | IAVF_INSET_IPV6_HOP_LIMIT | \
70         IAVF_INSET_TCP_SRC_PORT | IAVF_INSET_TCP_DST_PORT)
71
72 #define IAVF_FDIR_INSET_ETH_IPV6_SCTP (\
73         IAVF_INSET_IPV6_SRC | IAVF_INSET_IPV6_DST | \
74         IAVF_INSET_IPV6_TC | IAVF_INSET_IPV6_HOP_LIMIT | \
75         IAVF_INSET_SCTP_SRC_PORT | IAVF_INSET_SCTP_DST_PORT)
76
77 #define IAVF_FDIR_INSET_IPV4_GTPU (\
78         IAVF_INSET_IPV4_SRC | IAVF_INSET_IPV4_DST | \
79         IAVF_INSET_GTPU_TEID)
80
81 #define IAVF_FDIR_INSET_GTPU_IPV4 (\
82         IAVF_INSET_TUN_IPV4_SRC | IAVF_INSET_TUN_IPV4_DST | \
83         IAVF_INSET_TUN_IPV4_PROTO | IAVF_INSET_TUN_IPV4_TOS | \
84         IAVF_INSET_TUN_IPV4_TTL)
85
86 #define IAVF_FDIR_INSET_GTPU_IPV4_UDP (\
87         IAVF_FDIR_INSET_GTPU_IPV4 | \
88         IAVF_INSET_TUN_UDP_SRC_PORT | IAVF_INSET_TUN_UDP_DST_PORT)
89
90 #define IAVF_FDIR_INSET_GTPU_IPV4_TCP (\
91         IAVF_FDIR_INSET_GTPU_IPV4 | \
92         IAVF_INSET_TUN_TCP_SRC_PORT | IAVF_INSET_TUN_TCP_DST_PORT)
93
94 #define IAVF_FDIR_INSET_IPV4_GTPU_EH (\
95         IAVF_INSET_IPV4_SRC | IAVF_INSET_IPV4_DST | \
96         IAVF_INSET_GTPU_TEID | IAVF_INSET_GTPU_QFI)
97
98 #define IAVF_FDIR_INSET_IPV6_GTPU (\
99         IAVF_INSET_IPV6_SRC | IAVF_INSET_IPV6_DST | \
100         IAVF_INSET_GTPU_TEID)
101
102 #define IAVF_FDIR_INSET_GTPU_IPV6 (\
103         IAVF_INSET_TUN_IPV6_SRC | IAVF_INSET_TUN_IPV6_DST | \
104         IAVF_INSET_TUN_IPV6_NEXT_HDR | IAVF_INSET_TUN_IPV6_TC | \
105         IAVF_INSET_TUN_IPV6_HOP_LIMIT)
106
107 #define IAVF_FDIR_INSET_GTPU_IPV6_UDP (\
108         IAVF_FDIR_INSET_GTPU_IPV6 | \
109         IAVF_INSET_TUN_UDP_SRC_PORT | IAVF_INSET_TUN_UDP_DST_PORT)
110
111 #define IAVF_FDIR_INSET_GTPU_IPV6_TCP (\
112         IAVF_FDIR_INSET_GTPU_IPV6 | \
113         IAVF_INSET_TUN_TCP_SRC_PORT | IAVF_INSET_TUN_TCP_DST_PORT)
114
115 #define IAVF_FDIR_INSET_IPV6_GTPU_EH (\
116         IAVF_INSET_IPV6_SRC | IAVF_INSET_IPV6_DST | \
117         IAVF_INSET_GTPU_TEID | IAVF_INSET_GTPU_QFI)
118
119 #define IAVF_FDIR_INSET_L2TPV3OIP (\
120         IAVF_L2TPV3OIP_SESSION_ID)
121
122 #define IAVF_FDIR_INSET_IPV4_ESP (\
123         IAVF_INSET_IPV4_SRC | IAVF_INSET_IPV4_DST | \
124         IAVF_INSET_ESP_SPI)
125
126 #define IAVF_FDIR_INSET_IPV6_ESP (\
127         IAVF_INSET_IPV6_SRC | IAVF_INSET_IPV6_DST | \
128         IAVF_INSET_ESP_SPI)
129
130 #define IAVF_FDIR_INSET_AH (\
131         IAVF_INSET_AH_SPI)
132
133 #define IAVF_FDIR_INSET_IPV4_NATT_ESP (\
134         IAVF_INSET_IPV4_SRC | IAVF_INSET_IPV4_DST | \
135         IAVF_INSET_ESP_SPI)
136
137 #define IAVF_FDIR_INSET_IPV6_NATT_ESP (\
138         IAVF_INSET_IPV6_SRC | IAVF_INSET_IPV6_DST | \
139         IAVF_INSET_ESP_SPI)
140
141 #define IAVF_FDIR_INSET_PFCP (\
142         IAVF_INSET_PFCP_S_FIELD)
143
144 #define IAVF_FDIR_INSET_ECPRI (\
145         IAVF_INSET_ECPRI)
146
147 #define IAVF_FDIR_INSET_GRE_IPV4 (\
148         IAVF_INSET_TUN_IPV4_SRC | IAVF_INSET_TUN_IPV4_DST | \
149         IAVF_INSET_TUN_IPV4_TOS | IAVF_INSET_TUN_IPV4_PROTO)
150
151 #define IAVF_FDIR_INSET_GRE_IPV4_TCP (\
152         IAVF_FDIR_INSET_GRE_IPV4 | IAVF_INSET_TUN_TCP_SRC_PORT | \
153         IAVF_INSET_TUN_TCP_DST_PORT)
154
155 #define IAVF_FDIR_INSET_GRE_IPV4_UDP (\
156         IAVF_FDIR_INSET_GRE_IPV4 | IAVF_INSET_TUN_UDP_SRC_PORT | \
157         IAVF_INSET_TUN_UDP_DST_PORT)
158
159 #define IAVF_FDIR_INSET_GRE_IPV6 (\
160         IAVF_INSET_TUN_IPV6_SRC | IAVF_INSET_TUN_IPV6_DST | \
161         IAVF_INSET_TUN_IPV6_TC | IAVF_INSET_TUN_IPV6_NEXT_HDR)
162
163 #define IAVF_FDIR_INSET_GRE_IPV6_TCP (\
164         IAVF_FDIR_INSET_GRE_IPV6 | IAVF_INSET_TUN_TCP_SRC_PORT | \
165         IAVF_INSET_TUN_TCP_DST_PORT)
166
167 #define IAVF_FDIR_INSET_GRE_IPV6_UDP (\
168         IAVF_FDIR_INSET_GRE_IPV6 | IAVF_INSET_TUN_UDP_SRC_PORT | \
169         IAVF_INSET_TUN_UDP_DST_PORT)
170
171 static struct iavf_pattern_match_item iavf_fdir_pattern[] = {
172         {iavf_pattern_ethertype,                 IAVF_FDIR_INSET_ETH,           IAVF_INSET_NONE},
173         {iavf_pattern_eth_ipv4,                  IAVF_FDIR_INSET_ETH_IPV4,      IAVF_INSET_NONE},
174         {iavf_pattern_eth_ipv4_udp,              IAVF_FDIR_INSET_ETH_IPV4_UDP,  IAVF_INSET_NONE},
175         {iavf_pattern_eth_ipv4_tcp,              IAVF_FDIR_INSET_ETH_IPV4_TCP,  IAVF_INSET_NONE},
176         {iavf_pattern_eth_ipv4_sctp,             IAVF_FDIR_INSET_ETH_IPV4_SCTP, IAVF_INSET_NONE},
177         {iavf_pattern_eth_ipv6,                  IAVF_FDIR_INSET_ETH_IPV6,      IAVF_INSET_NONE},
178         {iavf_pattern_eth_ipv6_frag_ext,        IAVF_FDIR_INSET_ETH_IPV6_FRAG_EXT,      IAVF_INSET_NONE},
179         {iavf_pattern_eth_ipv6_udp,              IAVF_FDIR_INSET_ETH_IPV6_UDP,  IAVF_INSET_NONE},
180         {iavf_pattern_eth_ipv6_tcp,              IAVF_FDIR_INSET_ETH_IPV6_TCP,  IAVF_INSET_NONE},
181         {iavf_pattern_eth_ipv6_sctp,             IAVF_FDIR_INSET_ETH_IPV6_SCTP, IAVF_INSET_NONE},
182         {iavf_pattern_eth_ipv4_gtpu,             IAVF_FDIR_INSET_IPV4_GTPU,     IAVF_INSET_NONE},
183         {iavf_pattern_eth_ipv4_gtpu_ipv4,        IAVF_FDIR_INSET_GTPU_IPV4,     IAVF_INSET_NONE},
184         {iavf_pattern_eth_ipv4_gtpu_ipv4_udp,    IAVF_FDIR_INSET_GTPU_IPV4_UDP, IAVF_INSET_NONE},
185         {iavf_pattern_eth_ipv4_gtpu_ipv4_tcp,    IAVF_FDIR_INSET_GTPU_IPV4_TCP, IAVF_INSET_NONE},
186         {iavf_pattern_eth_ipv4_gtpu_ipv6,        IAVF_FDIR_INSET_GTPU_IPV6,     IAVF_INSET_NONE},
187         {iavf_pattern_eth_ipv4_gtpu_ipv6_udp,    IAVF_FDIR_INSET_GTPU_IPV6_UDP, IAVF_INSET_NONE},
188         {iavf_pattern_eth_ipv4_gtpu_ipv6_tcp,    IAVF_FDIR_INSET_GTPU_IPV6_TCP, IAVF_INSET_NONE},
189         {iavf_pattern_eth_ipv4_gtpu_eh,          IAVF_FDIR_INSET_IPV4_GTPU_EH,  IAVF_INSET_NONE},
190         {iavf_pattern_eth_ipv4_gtpu_eh_ipv4,     IAVF_FDIR_INSET_GTPU_IPV4,     IAVF_INSET_NONE},
191         {iavf_pattern_eth_ipv4_gtpu_eh_ipv4_udp, IAVF_FDIR_INSET_GTPU_IPV4_UDP, IAVF_INSET_NONE},
192         {iavf_pattern_eth_ipv4_gtpu_eh_ipv4_tcp, IAVF_FDIR_INSET_GTPU_IPV4_TCP, IAVF_INSET_NONE},
193         {iavf_pattern_eth_ipv4_gtpu_eh_ipv6,     IAVF_FDIR_INSET_GTPU_IPV6,     IAVF_INSET_NONE},
194         {iavf_pattern_eth_ipv4_gtpu_eh_ipv6_udp, IAVF_FDIR_INSET_GTPU_IPV6_UDP, IAVF_INSET_NONE},
195         {iavf_pattern_eth_ipv4_gtpu_eh_ipv6_tcp, IAVF_FDIR_INSET_GTPU_IPV6_TCP, IAVF_INSET_NONE},
196         {iavf_pattern_eth_ipv6_gtpu,             IAVF_FDIR_INSET_IPV6_GTPU,     IAVF_INSET_NONE},
197         {iavf_pattern_eth_ipv6_gtpu_eh,          IAVF_FDIR_INSET_IPV6_GTPU_EH,  IAVF_INSET_NONE},
198         {iavf_pattern_eth_ipv4_l2tpv3,           IAVF_FDIR_INSET_L2TPV3OIP,     IAVF_INSET_NONE},
199         {iavf_pattern_eth_ipv6_l2tpv3,           IAVF_FDIR_INSET_L2TPV3OIP,     IAVF_INSET_NONE},
200         {iavf_pattern_eth_ipv4_esp,              IAVF_FDIR_INSET_IPV4_ESP,      IAVF_INSET_NONE},
201         {iavf_pattern_eth_ipv6_esp,              IAVF_FDIR_INSET_IPV6_ESP,      IAVF_INSET_NONE},
202         {iavf_pattern_eth_ipv4_ah,               IAVF_FDIR_INSET_AH,            IAVF_INSET_NONE},
203         {iavf_pattern_eth_ipv6_ah,               IAVF_FDIR_INSET_AH,            IAVF_INSET_NONE},
204         {iavf_pattern_eth_ipv4_udp_esp,          IAVF_FDIR_INSET_IPV4_NATT_ESP, IAVF_INSET_NONE},
205         {iavf_pattern_eth_ipv6_udp_esp,          IAVF_FDIR_INSET_IPV6_NATT_ESP, IAVF_INSET_NONE},
206         {iavf_pattern_eth_ipv4_pfcp,             IAVF_FDIR_INSET_PFCP,          IAVF_INSET_NONE},
207         {iavf_pattern_eth_ipv6_pfcp,             IAVF_FDIR_INSET_PFCP,          IAVF_INSET_NONE},
208         {iavf_pattern_eth_ecpri,                 IAVF_FDIR_INSET_ECPRI,         IAVF_INSET_NONE},
209         {iavf_pattern_eth_ipv4_ecpri,            IAVF_FDIR_INSET_ECPRI,         IAVF_INSET_NONE},
210         {iavf_pattern_eth_ipv4_gre_ipv4,        IAVF_FDIR_INSET_GRE_IPV4,       IAVF_INSET_NONE},
211         {iavf_pattern_eth_ipv4_gre_ipv4_tcp,    IAVF_FDIR_INSET_GRE_IPV4_TCP,   IAVF_INSET_NONE},
212         {iavf_pattern_eth_ipv4_gre_ipv4_udp,    IAVF_FDIR_INSET_GRE_IPV4_UDP,   IAVF_INSET_NONE},
213         {iavf_pattern_eth_ipv4_gre_ipv6,        IAVF_FDIR_INSET_GRE_IPV6,       IAVF_INSET_NONE},
214         {iavf_pattern_eth_ipv4_gre_ipv6_tcp,    IAVF_FDIR_INSET_GRE_IPV6_TCP,   IAVF_INSET_NONE},
215         {iavf_pattern_eth_ipv4_gre_ipv6_udp,    IAVF_FDIR_INSET_GRE_IPV6_UDP,   IAVF_INSET_NONE},
216         {iavf_pattern_eth_ipv6_gre_ipv4,        IAVF_FDIR_INSET_GRE_IPV4,       IAVF_INSET_NONE},
217         {iavf_pattern_eth_ipv6_gre_ipv4_tcp,    IAVF_FDIR_INSET_GRE_IPV4_TCP,   IAVF_INSET_NONE},
218         {iavf_pattern_eth_ipv6_gre_ipv4_udp,    IAVF_FDIR_INSET_GRE_IPV4_UDP,   IAVF_INSET_NONE},
219         {iavf_pattern_eth_ipv6_gre_ipv6,        IAVF_FDIR_INSET_GRE_IPV6,       IAVF_INSET_NONE},
220         {iavf_pattern_eth_ipv6_gre_ipv6_tcp,    IAVF_FDIR_INSET_GRE_IPV6_TCP,   IAVF_INSET_NONE},
221         {iavf_pattern_eth_ipv6_gre_ipv6_udp,    IAVF_FDIR_INSET_GRE_IPV6_UDP,   IAVF_INSET_NONE},
222 };
223
224 static struct iavf_flow_parser iavf_fdir_parser;
225
226 static int
227 iavf_fdir_init(struct iavf_adapter *ad)
228 {
229         struct iavf_info *vf = IAVF_DEV_PRIVATE_TO_VF(ad);
230         struct iavf_flow_parser *parser;
231
232         if (!vf->vf_res)
233                 return -EINVAL;
234
235         if (vf->vf_res->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_FDIR_PF)
236                 parser = &iavf_fdir_parser;
237         else
238                 return -ENOTSUP;
239
240         return iavf_register_parser(parser, ad);
241 }
242
243 static void
244 iavf_fdir_uninit(struct iavf_adapter *ad)
245 {
246         iavf_unregister_parser(&iavf_fdir_parser, ad);
247 }
248
249 static int
250 iavf_fdir_create(struct iavf_adapter *ad,
251                 struct rte_flow *flow,
252                 void *meta,
253                 struct rte_flow_error *error)
254 {
255         struct iavf_fdir_conf *filter = meta;
256         struct iavf_fdir_conf *rule;
257         int ret;
258
259         rule = rte_zmalloc("fdir_entry", sizeof(*rule), 0);
260         if (!rule) {
261                 rte_flow_error_set(error, ENOMEM,
262                                 RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
263                                 "Failed to allocate memory for fdir rule");
264                 return -rte_errno;
265         }
266
267         ret = iavf_fdir_add(ad, filter);
268         if (ret) {
269                 rte_flow_error_set(error, -ret,
270                                 RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
271                                 "Failed to add filter rule.");
272                 goto free_entry;
273         }
274
275         if (filter->mark_flag == 1)
276                 iavf_fdir_rx_proc_enable(ad, 1);
277
278         rte_memcpy(rule, filter, sizeof(*rule));
279         flow->rule = rule;
280
281         return 0;
282
283 free_entry:
284         rte_free(rule);
285         return -rte_errno;
286 }
287
288 static int
289 iavf_fdir_destroy(struct iavf_adapter *ad,
290                 struct rte_flow *flow,
291                 struct rte_flow_error *error)
292 {
293         struct iavf_fdir_conf *filter;
294         int ret;
295
296         filter = (struct iavf_fdir_conf *)flow->rule;
297
298         ret = iavf_fdir_del(ad, filter);
299         if (ret) {
300                 rte_flow_error_set(error, -ret,
301                                 RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
302                                 "Failed to delete filter rule.");
303                 return -rte_errno;
304         }
305
306         if (filter->mark_flag == 1)
307                 iavf_fdir_rx_proc_enable(ad, 0);
308
309         flow->rule = NULL;
310         rte_free(filter);
311
312         return 0;
313 }
314
315 static int
316 iavf_fdir_validation(struct iavf_adapter *ad,
317                 __rte_unused struct rte_flow *flow,
318                 void *meta,
319                 struct rte_flow_error *error)
320 {
321         struct iavf_fdir_conf *filter = meta;
322         int ret;
323
324         ret = iavf_fdir_check(ad, filter);
325         if (ret) {
326                 rte_flow_error_set(error, -ret,
327                                 RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
328                                 "Failed to validate filter rule.");
329                 return -rte_errno;
330         }
331
332         return 0;
333 };
334
335 static struct iavf_flow_engine iavf_fdir_engine = {
336         .init = iavf_fdir_init,
337         .uninit = iavf_fdir_uninit,
338         .create = iavf_fdir_create,
339         .destroy = iavf_fdir_destroy,
340         .validation = iavf_fdir_validation,
341         .type = IAVF_FLOW_ENGINE_FDIR,
342 };
343
344 static int
345 iavf_fdir_parse_action_qregion(struct iavf_adapter *ad,
346                         struct rte_flow_error *error,
347                         const struct rte_flow_action *act,
348                         struct virtchnl_filter_action *filter_action)
349 {
350         struct iavf_info *vf = IAVF_DEV_PRIVATE_TO_VF(ad);
351         const struct rte_flow_action_rss *rss = act->conf;
352         uint32_t i;
353
354         if (act->type != RTE_FLOW_ACTION_TYPE_RSS) {
355                 rte_flow_error_set(error, EINVAL,
356                                 RTE_FLOW_ERROR_TYPE_ACTION, act,
357                                 "Invalid action.");
358                 return -rte_errno;
359         }
360
361         if (rss->queue_num <= 1) {
362                 rte_flow_error_set(error, EINVAL,
363                                 RTE_FLOW_ERROR_TYPE_ACTION, act,
364                                 "Queue region size can't be 0 or 1.");
365                 return -rte_errno;
366         }
367
368         /* check if queue index for queue region is continuous */
369         for (i = 0; i < rss->queue_num - 1; i++) {
370                 if (rss->queue[i + 1] != rss->queue[i] + 1) {
371                         rte_flow_error_set(error, EINVAL,
372                                         RTE_FLOW_ERROR_TYPE_ACTION, act,
373                                         "Discontinuous queue region");
374                         return -rte_errno;
375                 }
376         }
377
378         if (rss->queue[rss->queue_num - 1] >= ad->eth_dev->data->nb_rx_queues) {
379                 rte_flow_error_set(error, EINVAL,
380                                 RTE_FLOW_ERROR_TYPE_ACTION, act,
381                                 "Invalid queue region indexes.");
382                 return -rte_errno;
383         }
384
385         if (!(rte_is_power_of_2(rss->queue_num) &&
386                 rss->queue_num <= IAVF_FDIR_MAX_QREGION_SIZE)) {
387                 rte_flow_error_set(error, EINVAL,
388                                 RTE_FLOW_ERROR_TYPE_ACTION, act,
389                                 "The region size should be any of the following values:"
390                                 "1, 2, 4, 8, 16, 32, 64, 128 as long as the total number "
391                                 "of queues do not exceed the VSI allocation.");
392                 return -rte_errno;
393         }
394
395         if (rss->queue_num > vf->max_rss_qregion) {
396                 rte_flow_error_set(error, EINVAL,
397                                 RTE_FLOW_ERROR_TYPE_ACTION, act,
398                                 "The region size cannot be large than the supported max RSS queue region");
399                 return -rte_errno;
400         }
401
402         filter_action->act_conf.queue.index = rss->queue[0];
403         filter_action->act_conf.queue.region = rte_fls_u32(rss->queue_num) - 1;
404
405         return 0;
406 }
407
408 static int
409 iavf_fdir_parse_action(struct iavf_adapter *ad,
410                         const struct rte_flow_action actions[],
411                         struct rte_flow_error *error,
412                         struct iavf_fdir_conf *filter)
413 {
414         const struct rte_flow_action_queue *act_q;
415         const struct rte_flow_action_mark *mark_spec = NULL;
416         uint32_t dest_num = 0;
417         uint32_t mark_num = 0;
418         int ret;
419
420         int number = 0;
421         struct virtchnl_filter_action *filter_action;
422
423         for (; actions->type != RTE_FLOW_ACTION_TYPE_END; actions++) {
424                 switch (actions->type) {
425                 case RTE_FLOW_ACTION_TYPE_VOID:
426                         break;
427
428                 case RTE_FLOW_ACTION_TYPE_PASSTHRU:
429                         dest_num++;
430
431                         filter_action = &filter->add_fltr.rule_cfg.action_set.actions[number];
432
433                         filter_action->type = VIRTCHNL_ACTION_PASSTHRU;
434
435                         filter->add_fltr.rule_cfg.action_set.count = ++number;
436                         break;
437
438                 case RTE_FLOW_ACTION_TYPE_DROP:
439                         dest_num++;
440
441                         filter_action = &filter->add_fltr.rule_cfg.action_set.actions[number];
442
443                         filter_action->type = VIRTCHNL_ACTION_DROP;
444
445                         filter->add_fltr.rule_cfg.action_set.count = ++number;
446                         break;
447
448                 case RTE_FLOW_ACTION_TYPE_QUEUE:
449                         dest_num++;
450
451                         act_q = actions->conf;
452                         filter_action = &filter->add_fltr.rule_cfg.action_set.actions[number];
453
454                         filter_action->type = VIRTCHNL_ACTION_QUEUE;
455                         filter_action->act_conf.queue.index = act_q->index;
456
457                         if (filter_action->act_conf.queue.index >=
458                                 ad->eth_dev->data->nb_rx_queues) {
459                                 rte_flow_error_set(error, EINVAL,
460                                         RTE_FLOW_ERROR_TYPE_ACTION,
461                                         actions, "Invalid queue for FDIR.");
462                                 return -rte_errno;
463                         }
464
465                         filter->add_fltr.rule_cfg.action_set.count = ++number;
466                         break;
467
468                 case RTE_FLOW_ACTION_TYPE_RSS:
469                         dest_num++;
470
471                         filter_action = &filter->add_fltr.rule_cfg.action_set.actions[number];
472
473                         filter_action->type = VIRTCHNL_ACTION_Q_REGION;
474
475                         ret = iavf_fdir_parse_action_qregion(ad,
476                                                 error, actions, filter_action);
477                         if (ret)
478                                 return ret;
479
480                         filter->add_fltr.rule_cfg.action_set.count = ++number;
481                         break;
482
483                 case RTE_FLOW_ACTION_TYPE_MARK:
484                         mark_num++;
485
486                         filter->mark_flag = 1;
487                         mark_spec = actions->conf;
488                         filter_action = &filter->add_fltr.rule_cfg.action_set.actions[number];
489
490                         filter_action->type = VIRTCHNL_ACTION_MARK;
491                         filter_action->act_conf.mark_id = mark_spec->id;
492
493                         filter->add_fltr.rule_cfg.action_set.count = ++number;
494                         break;
495
496                 default:
497                         rte_flow_error_set(error, EINVAL,
498                                         RTE_FLOW_ERROR_TYPE_ACTION, actions,
499                                         "Invalid action.");
500                         return -rte_errno;
501                 }
502         }
503
504         if (number > VIRTCHNL_MAX_NUM_ACTIONS) {
505                 rte_flow_error_set(error, EINVAL,
506                         RTE_FLOW_ERROR_TYPE_ACTION, actions,
507                         "Action numbers exceed the maximum value");
508                 return -rte_errno;
509         }
510
511         if (dest_num >= 2) {
512                 rte_flow_error_set(error, EINVAL,
513                         RTE_FLOW_ERROR_TYPE_ACTION, actions,
514                         "Unsupported action combination");
515                 return -rte_errno;
516         }
517
518         if (mark_num >= 2) {
519                 rte_flow_error_set(error, EINVAL,
520                         RTE_FLOW_ERROR_TYPE_ACTION, actions,
521                         "Too many mark actions");
522                 return -rte_errno;
523         }
524
525         if (dest_num + mark_num == 0) {
526                 rte_flow_error_set(error, EINVAL,
527                         RTE_FLOW_ERROR_TYPE_ACTION, actions,
528                         "Empty action");
529                 return -rte_errno;
530         }
531
532         /* Mark only is equal to mark + passthru. */
533         if (dest_num == 0) {
534                 filter_action = &filter->add_fltr.rule_cfg.action_set.actions[number];
535                 filter_action->type = VIRTCHNL_ACTION_PASSTHRU;
536                 filter->add_fltr.rule_cfg.action_set.count = ++number;
537         }
538
539         return 0;
540 }
541
542 static bool
543 iavf_fdir_refine_input_set(const uint64_t input_set,
544                            const uint64_t input_set_mask,
545                            struct iavf_fdir_conf *filter)
546 {
547         struct virtchnl_proto_hdr *hdr, *hdr_last;
548         struct rte_flow_item_ipv4 ipv4_spec;
549         struct rte_flow_item_ipv6 ipv6_spec;
550         int last_layer;
551         uint8_t proto_id;
552
553         if (input_set & ~input_set_mask)
554                 return false;
555         else if (input_set)
556                 return true;
557
558         last_layer = filter->add_fltr.rule_cfg.proto_hdrs.count - 1;
559         /* Last layer of TCP/UDP pattern isn't less than 2. */
560         if (last_layer < 2)
561                 return false;
562         hdr_last = &filter->add_fltr.rule_cfg.proto_hdrs.proto_hdr[last_layer];
563         if (hdr_last->type == VIRTCHNL_PROTO_HDR_TCP)
564                 proto_id = 6;
565         else if (hdr_last->type == VIRTCHNL_PROTO_HDR_UDP)
566                 proto_id = 17;
567         else
568                 return false;
569
570         hdr = &filter->add_fltr.rule_cfg.proto_hdrs.proto_hdr[last_layer - 1];
571         switch (hdr->type) {
572         case VIRTCHNL_PROTO_HDR_IPV4:
573                 VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(hdr, IPV4, PROT);
574                 memset(&ipv4_spec, 0, sizeof(ipv4_spec));
575                 ipv4_spec.hdr.next_proto_id = proto_id;
576                 rte_memcpy(hdr->buffer, &ipv4_spec.hdr,
577                            sizeof(ipv4_spec.hdr));
578                 return true;
579         case VIRTCHNL_PROTO_HDR_IPV6:
580                 VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(hdr, IPV6, PROT);
581                 memset(&ipv6_spec, 0, sizeof(ipv6_spec));
582                 ipv6_spec.hdr.proto = proto_id;
583                 rte_memcpy(hdr->buffer, &ipv6_spec.hdr,
584                            sizeof(ipv6_spec.hdr));
585                 return true;
586         default:
587                 return false;
588         }
589 }
590
591 static void
592 iavf_fdir_add_fragment_hdr(struct virtchnl_proto_hdrs *hdrs, int layer)
593 {
594         struct virtchnl_proto_hdr *hdr1;
595         struct virtchnl_proto_hdr *hdr2;
596         int i;
597
598         if (layer < 0 || layer > hdrs->count)
599                 return;
600
601         /* shift headers layer */
602         for (i = hdrs->count; i >= layer; i--) {
603                 hdr1 = &hdrs->proto_hdr[i];
604                 hdr2 = &hdrs->proto_hdr[i - 1];
605                 *hdr1 = *hdr2;
606         }
607
608         /* adding dummy fragment header */
609         hdr1 = &hdrs->proto_hdr[layer];
610         VIRTCHNL_SET_PROTO_HDR_TYPE(hdr1, IPV4_FRAG);
611         hdrs->count = ++layer;
612 }
613
614 static int
615 iavf_fdir_parse_pattern(__rte_unused struct iavf_adapter *ad,
616                         const struct rte_flow_item pattern[],
617                         const uint64_t input_set_mask,
618                         struct rte_flow_error *error,
619                         struct iavf_fdir_conf *filter)
620 {
621         struct virtchnl_proto_hdrs *hdrs =
622                         &filter->add_fltr.rule_cfg.proto_hdrs;
623         enum rte_flow_item_type l3 = RTE_FLOW_ITEM_TYPE_END;
624         const struct rte_flow_item_eth *eth_spec, *eth_mask;
625         const struct rte_flow_item_ipv4 *ipv4_spec, *ipv4_last, *ipv4_mask;
626         const struct rte_flow_item_ipv6 *ipv6_spec, *ipv6_mask;
627         const struct rte_flow_item_ipv6_frag_ext *ipv6_frag_spec;
628         const struct rte_flow_item_ipv6_frag_ext *ipv6_frag_last;
629         const struct rte_flow_item_ipv6_frag_ext *ipv6_frag_mask;
630         const struct rte_flow_item_udp *udp_spec, *udp_mask;
631         const struct rte_flow_item_tcp *tcp_spec, *tcp_mask;
632         const struct rte_flow_item_sctp *sctp_spec, *sctp_mask;
633         const struct rte_flow_item_gtp *gtp_spec, *gtp_mask;
634         const struct rte_flow_item_gtp_psc *gtp_psc_spec, *gtp_psc_mask;
635         const struct rte_flow_item_l2tpv3oip *l2tpv3oip_spec, *l2tpv3oip_mask;
636         const struct rte_flow_item_esp *esp_spec, *esp_mask;
637         const struct rte_flow_item_ah *ah_spec, *ah_mask;
638         const struct rte_flow_item_pfcp *pfcp_spec, *pfcp_mask;
639         const struct rte_flow_item_ecpri *ecpri_spec, *ecpri_mask;
640         const struct rte_flow_item_gre *gre_spec, *gre_mask;
641         const struct rte_flow_item *item = pattern;
642         struct virtchnl_proto_hdr *hdr, *hdr1 = NULL;
643         struct rte_ecpri_common_hdr ecpri_common;
644         uint64_t input_set = IAVF_INSET_NONE;
645         enum rte_flow_item_type item_type;
646         enum rte_flow_item_type next_type;
647         uint8_t tun_inner = 0;
648         uint16_t ether_type;
649         int layer = 0;
650
651         uint8_t  ipv6_addr_mask[16] = {
652                 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
653                 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF
654         };
655
656         for (item = pattern; item->type != RTE_FLOW_ITEM_TYPE_END; item++) {
657                 item_type = item->type;
658
659                 if (item->last && !(item_type == RTE_FLOW_ITEM_TYPE_IPV4 ||
660                                     item_type ==
661                                     RTE_FLOW_ITEM_TYPE_IPV6_FRAG_EXT)) {
662                         rte_flow_error_set(error, EINVAL,
663                                            RTE_FLOW_ERROR_TYPE_ITEM, item,
664                                            "Not support range");
665                 }
666
667                 switch (item_type) {
668                 case RTE_FLOW_ITEM_TYPE_ETH:
669                         eth_spec = item->spec;
670                         eth_mask = item->mask;
671                         next_type = (item + 1)->type;
672
673                         hdr1 = &hdrs->proto_hdr[layer];
674
675                         VIRTCHNL_SET_PROTO_HDR_TYPE(hdr1, ETH);
676
677                         if (next_type == RTE_FLOW_ITEM_TYPE_END &&
678                             (!eth_spec || !eth_mask)) {
679                                 rte_flow_error_set(error, EINVAL,
680                                                 RTE_FLOW_ERROR_TYPE_ITEM,
681                                                 item, "NULL eth spec/mask.");
682                                 return -rte_errno;
683                         }
684
685                         if (eth_spec && eth_mask) {
686                                 if (!rte_is_zero_ether_addr(&eth_mask->src) ||
687                                     !rte_is_zero_ether_addr(&eth_mask->dst)) {
688                                         rte_flow_error_set(error, EINVAL,
689                                                 RTE_FLOW_ERROR_TYPE_ITEM, item,
690                                                 "Invalid MAC_addr mask.");
691                                         return -rte_errno;
692                                 }
693                         }
694
695                         if (eth_spec && eth_mask && eth_mask->type) {
696                                 if (eth_mask->type != RTE_BE16(0xffff)) {
697                                         rte_flow_error_set(error, EINVAL,
698                                                 RTE_FLOW_ERROR_TYPE_ITEM,
699                                                 item, "Invalid type mask.");
700                                         return -rte_errno;
701                                 }
702
703                                 ether_type = rte_be_to_cpu_16(eth_spec->type);
704                                 if (ether_type == RTE_ETHER_TYPE_IPV4 ||
705                                         ether_type == RTE_ETHER_TYPE_IPV6) {
706                                         rte_flow_error_set(error, EINVAL,
707                                                 RTE_FLOW_ERROR_TYPE_ITEM,
708                                                 item,
709                                                 "Unsupported ether_type.");
710                                         return -rte_errno;
711                                 }
712
713                                 input_set |= IAVF_INSET_ETHERTYPE;
714                                 VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(hdr1, ETH,
715                                                                  ETHERTYPE);
716
717                                 rte_memcpy(hdr1->buffer, eth_spec,
718                                            sizeof(struct rte_ether_hdr));
719                         }
720
721                         hdrs->count = ++layer;
722                         break;
723
724                 case RTE_FLOW_ITEM_TYPE_IPV4:
725                         l3 = RTE_FLOW_ITEM_TYPE_IPV4;
726                         ipv4_spec = item->spec;
727                         ipv4_last = item->last;
728                         ipv4_mask = item->mask;
729                         next_type = (item + 1)->type;
730
731                         hdr = &hdrs->proto_hdr[layer];
732
733                         VIRTCHNL_SET_PROTO_HDR_TYPE(hdr, IPV4);
734
735                         if (!(ipv4_spec && ipv4_mask)) {
736                                 hdrs->count = ++layer;
737                                 break;
738                         }
739
740                         if (ipv4_mask->hdr.version_ihl ||
741                             ipv4_mask->hdr.total_length ||
742                             ipv4_mask->hdr.hdr_checksum) {
743                                 rte_flow_error_set(error, EINVAL,
744                                                    RTE_FLOW_ERROR_TYPE_ITEM,
745                                                    item, "Invalid IPv4 mask.");
746                                 return -rte_errno;
747                         }
748
749                         if (ipv4_last &&
750                             (ipv4_last->hdr.version_ihl ||
751                              ipv4_last->hdr.type_of_service ||
752                              ipv4_last->hdr.time_to_live ||
753                              ipv4_last->hdr.total_length |
754                              ipv4_last->hdr.next_proto_id ||
755                              ipv4_last->hdr.hdr_checksum ||
756                              ipv4_last->hdr.src_addr ||
757                              ipv4_last->hdr.dst_addr)) {
758                                 rte_flow_error_set(error, EINVAL,
759                                                    RTE_FLOW_ERROR_TYPE_ITEM,
760                                                    item, "Invalid IPv4 last.");
761                                 return -rte_errno;
762                         }
763
764                         if (ipv4_mask->hdr.type_of_service ==
765                             UINT8_MAX) {
766                                 input_set |= IAVF_INSET_IPV4_TOS;
767                                 VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(hdr, IPV4,
768                                                                  DSCP);
769                         }
770
771                         if (ipv4_mask->hdr.next_proto_id == UINT8_MAX) {
772                                 input_set |= IAVF_INSET_IPV4_PROTO;
773                                 VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(hdr, IPV4,
774                                                                  PROT);
775                         }
776
777                         if (ipv4_mask->hdr.time_to_live == UINT8_MAX) {
778                                 input_set |= IAVF_INSET_IPV4_TTL;
779                                 VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(hdr, IPV4,
780                                                                  TTL);
781                         }
782
783                         if (ipv4_mask->hdr.src_addr == UINT32_MAX) {
784                                 input_set |= IAVF_INSET_IPV4_SRC;
785                                 VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(hdr, IPV4,
786                                                                  SRC);
787                         }
788
789                         if (ipv4_mask->hdr.dst_addr == UINT32_MAX) {
790                                 input_set |= IAVF_INSET_IPV4_DST;
791                                 VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(hdr, IPV4,
792                                                                  DST);
793                         }
794
795                         if (tun_inner) {
796                                 input_set &= ~IAVF_PROT_IPV4_OUTER;
797                                 input_set |= IAVF_PROT_IPV4_INNER;
798                         }
799
800                         rte_memcpy(hdr->buffer, &ipv4_spec->hdr,
801                                    sizeof(ipv4_spec->hdr));
802
803                         hdrs->count = ++layer;
804
805                         /* only support any packet id for fragment IPv4
806                          * any packet_id:
807                          * spec is 0, last is 0xffff, mask is 0xffff
808                          */
809                         if (ipv4_last && ipv4_spec->hdr.packet_id == 0 &&
810                             ipv4_last->hdr.packet_id == UINT16_MAX &&
811                             ipv4_mask->hdr.packet_id == UINT16_MAX &&
812                             ipv4_mask->hdr.fragment_offset == UINT16_MAX) {
813                                 /* all IPv4 fragment packet has the same
814                                  * ethertype, if the spec is for all valid
815                                  * packet id, set ethertype into input set.
816                                  */
817                                 input_set |= IAVF_INSET_ETHERTYPE;
818                                 VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(hdr1, ETH,
819                                                                  ETHERTYPE);
820
821                                 /* add dummy header for IPv4 Fragment */
822                                 iavf_fdir_add_fragment_hdr(hdrs, layer);
823                         } else if (ipv4_mask->hdr.packet_id == UINT16_MAX) {
824                                 rte_flow_error_set(error, EINVAL,
825                                                    RTE_FLOW_ERROR_TYPE_ITEM,
826                                                    item, "Invalid IPv4 mask.");
827                                 return -rte_errno;
828                         }
829
830                         break;
831
832                 case RTE_FLOW_ITEM_TYPE_IPV6:
833                         l3 = RTE_FLOW_ITEM_TYPE_IPV6;
834                         ipv6_spec = item->spec;
835                         ipv6_mask = item->mask;
836
837                         hdr = &hdrs->proto_hdr[layer];
838
839                         VIRTCHNL_SET_PROTO_HDR_TYPE(hdr, IPV6);
840
841                         if (!(ipv6_spec && ipv6_mask)) {
842                                 hdrs->count = ++layer;
843                                 break;
844                         }
845
846                         if (ipv6_mask->hdr.payload_len) {
847                                 rte_flow_error_set(error, EINVAL,
848                                                    RTE_FLOW_ERROR_TYPE_ITEM,
849                                                    item, "Invalid IPv6 mask");
850                                 return -rte_errno;
851                         }
852
853                         if ((ipv6_mask->hdr.vtc_flow &
854                               rte_cpu_to_be_32(IAVF_IPV6_TC_MASK))
855                              == rte_cpu_to_be_32(IAVF_IPV6_TC_MASK)) {
856                                 input_set |= IAVF_INSET_IPV6_TC;
857                                 VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(hdr, IPV6,
858                                                                  TC);
859                         }
860
861                         if (ipv6_mask->hdr.proto == UINT8_MAX) {
862                                 input_set |= IAVF_INSET_IPV6_NEXT_HDR;
863                                 VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(hdr, IPV6,
864                                                                  PROT);
865                         }
866
867                         if (ipv6_mask->hdr.hop_limits == UINT8_MAX) {
868                                 input_set |= IAVF_INSET_IPV6_HOP_LIMIT;
869                                 VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(hdr, IPV6,
870                                                                  HOP_LIMIT);
871                         }
872
873                         if (!memcmp(ipv6_mask->hdr.src_addr, ipv6_addr_mask,
874                                     RTE_DIM(ipv6_mask->hdr.src_addr))) {
875                                 input_set |= IAVF_INSET_IPV6_SRC;
876                                 VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(hdr, IPV6,
877                                                                  SRC);
878                         }
879                         if (!memcmp(ipv6_mask->hdr.dst_addr, ipv6_addr_mask,
880                                     RTE_DIM(ipv6_mask->hdr.dst_addr))) {
881                                 input_set |= IAVF_INSET_IPV6_DST;
882                                 VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(hdr, IPV6,
883                                                                  DST);
884                         }
885
886                         if (tun_inner) {
887                                 input_set &= ~IAVF_PROT_IPV6_OUTER;
888                                 input_set |= IAVF_PROT_IPV6_INNER;
889                         }
890
891                         rte_memcpy(hdr->buffer, &ipv6_spec->hdr,
892                                    sizeof(ipv6_spec->hdr));
893
894                         hdrs->count = ++layer;
895                         break;
896
897                 case RTE_FLOW_ITEM_TYPE_IPV6_FRAG_EXT:
898                         ipv6_frag_spec = item->spec;
899                         ipv6_frag_last = item->last;
900                         ipv6_frag_mask = item->mask;
901                         next_type = (item + 1)->type;
902
903                         hdr = &hdrs->proto_hdr[layer];
904
905                         VIRTCHNL_SET_PROTO_HDR_TYPE(hdr, IPV6_EH_FRAG);
906
907                         if (!(ipv6_frag_spec && ipv6_frag_mask)) {
908                                 hdrs->count = ++layer;
909                                 break;
910                         }
911
912                         /* only support any packet id for fragment IPv6
913                          * any packet_id:
914                          * spec is 0, last is 0xffffffff, mask is 0xffffffff
915                          */
916                         if (ipv6_frag_last && ipv6_frag_spec->hdr.id == 0 &&
917                             ipv6_frag_last->hdr.id == UINT32_MAX &&
918                             ipv6_frag_mask->hdr.id == UINT32_MAX &&
919                             ipv6_frag_mask->hdr.frag_data == UINT16_MAX) {
920                                 /* all IPv6 fragment packet has the same
921                                  * ethertype, if the spec is for all valid
922                                  * packet id, set ethertype into input set.
923                                  */
924                                 input_set |= IAVF_INSET_ETHERTYPE;
925                                 VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(hdr1, ETH,
926                                                                  ETHERTYPE);
927
928                                 rte_memcpy(hdr->buffer, &ipv6_frag_spec->hdr,
929                                            sizeof(ipv6_frag_spec->hdr));
930                         } else if (ipv6_frag_mask->hdr.id == UINT32_MAX) {
931                                 rte_flow_error_set(error, EINVAL,
932                                                    RTE_FLOW_ERROR_TYPE_ITEM,
933                                                    item, "Invalid IPv6 mask.");
934                                 return -rte_errno;
935                         }
936
937                         hdrs->count = ++layer;
938                         break;
939
940                 case RTE_FLOW_ITEM_TYPE_UDP:
941                         udp_spec = item->spec;
942                         udp_mask = item->mask;
943
944                         hdr = &hdrs->proto_hdr[layer];
945
946                         VIRTCHNL_SET_PROTO_HDR_TYPE(hdr, UDP);
947
948                         if (udp_spec && udp_mask) {
949                                 if (udp_mask->hdr.dgram_len ||
950                                         udp_mask->hdr.dgram_cksum) {
951                                         rte_flow_error_set(error, EINVAL,
952                                                 RTE_FLOW_ERROR_TYPE_ITEM, item,
953                                                 "Invalid UDP mask");
954                                         return -rte_errno;
955                                 }
956
957                                 if (udp_mask->hdr.src_port == UINT16_MAX) {
958                                         input_set |= IAVF_INSET_UDP_SRC_PORT;
959                                         VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(hdr, UDP, SRC_PORT);
960                                 }
961                                 if (udp_mask->hdr.dst_port == UINT16_MAX) {
962                                         input_set |= IAVF_INSET_UDP_DST_PORT;
963                                         VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(hdr, UDP, DST_PORT);
964                                 }
965
966                                 if (tun_inner) {
967                                         input_set &= ~IAVF_PROT_UDP_OUTER;
968                                         input_set |= IAVF_PROT_UDP_INNER;
969                                 }
970
971                                 if (l3 == RTE_FLOW_ITEM_TYPE_IPV4)
972                                         rte_memcpy(hdr->buffer,
973                                                 &udp_spec->hdr,
974                                                 sizeof(udp_spec->hdr));
975                                 else if (l3 == RTE_FLOW_ITEM_TYPE_IPV6)
976                                         rte_memcpy(hdr->buffer,
977                                                 &udp_spec->hdr,
978                                                 sizeof(udp_spec->hdr));
979                         }
980
981                         hdrs->count = ++layer;
982                         break;
983
984                 case RTE_FLOW_ITEM_TYPE_TCP:
985                         tcp_spec = item->spec;
986                         tcp_mask = item->mask;
987
988                         hdr = &hdrs->proto_hdr[layer];
989
990                         VIRTCHNL_SET_PROTO_HDR_TYPE(hdr, TCP);
991
992                         if (tcp_spec && tcp_mask) {
993                                 if (tcp_mask->hdr.sent_seq ||
994                                         tcp_mask->hdr.recv_ack ||
995                                         tcp_mask->hdr.data_off ||
996                                         tcp_mask->hdr.tcp_flags ||
997                                         tcp_mask->hdr.rx_win ||
998                                         tcp_mask->hdr.cksum ||
999                                         tcp_mask->hdr.tcp_urp) {
1000                                         rte_flow_error_set(error, EINVAL,
1001                                                 RTE_FLOW_ERROR_TYPE_ITEM, item,
1002                                                 "Invalid TCP mask");
1003                                         return -rte_errno;
1004                                 }
1005
1006                                 if (tcp_mask->hdr.src_port == UINT16_MAX) {
1007                                         input_set |= IAVF_INSET_TCP_SRC_PORT;
1008                                         VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(hdr, TCP, SRC_PORT);
1009                                 }
1010                                 if (tcp_mask->hdr.dst_port == UINT16_MAX) {
1011                                         input_set |= IAVF_INSET_TCP_DST_PORT;
1012                                         VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(hdr, TCP, DST_PORT);
1013                                 }
1014
1015                                 if (tun_inner) {
1016                                         input_set &= ~IAVF_PROT_TCP_OUTER;
1017                                         input_set |= IAVF_PROT_TCP_INNER;
1018                                 }
1019
1020                                 if (l3 == RTE_FLOW_ITEM_TYPE_IPV4)
1021                                         rte_memcpy(hdr->buffer,
1022                                                 &tcp_spec->hdr,
1023                                                 sizeof(tcp_spec->hdr));
1024                                 else if (l3 == RTE_FLOW_ITEM_TYPE_IPV6)
1025                                         rte_memcpy(hdr->buffer,
1026                                                 &tcp_spec->hdr,
1027                                                 sizeof(tcp_spec->hdr));
1028                         }
1029
1030                         hdrs->count = ++layer;
1031                         break;
1032
1033                 case RTE_FLOW_ITEM_TYPE_SCTP:
1034                         sctp_spec = item->spec;
1035                         sctp_mask = item->mask;
1036
1037                         hdr = &hdrs->proto_hdr[layer];
1038
1039                         VIRTCHNL_SET_PROTO_HDR_TYPE(hdr, SCTP);
1040
1041                         if (sctp_spec && sctp_mask) {
1042                                 if (sctp_mask->hdr.cksum) {
1043                                         rte_flow_error_set(error, EINVAL,
1044                                                 RTE_FLOW_ERROR_TYPE_ITEM, item,
1045                                                 "Invalid UDP mask");
1046                                         return -rte_errno;
1047                                 }
1048
1049                                 if (sctp_mask->hdr.src_port == UINT16_MAX) {
1050                                         input_set |= IAVF_INSET_SCTP_SRC_PORT;
1051                                         VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(hdr, SCTP, SRC_PORT);
1052                                 }
1053                                 if (sctp_mask->hdr.dst_port == UINT16_MAX) {
1054                                         input_set |= IAVF_INSET_SCTP_DST_PORT;
1055                                         VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(hdr, SCTP, DST_PORT);
1056                                 }
1057
1058                                 if (l3 == RTE_FLOW_ITEM_TYPE_IPV4)
1059                                         rte_memcpy(hdr->buffer,
1060                                                 &sctp_spec->hdr,
1061                                                 sizeof(sctp_spec->hdr));
1062                                 else if (l3 == RTE_FLOW_ITEM_TYPE_IPV6)
1063                                         rte_memcpy(hdr->buffer,
1064                                                 &sctp_spec->hdr,
1065                                                 sizeof(sctp_spec->hdr));
1066                         }
1067
1068                         hdrs->count = ++layer;
1069                         break;
1070
1071                 case RTE_FLOW_ITEM_TYPE_GTPU:
1072                         gtp_spec = item->spec;
1073                         gtp_mask = item->mask;
1074
1075                         hdr = &hdrs->proto_hdr[layer];
1076
1077                         VIRTCHNL_SET_PROTO_HDR_TYPE(hdr, GTPU_IP);
1078
1079                         if (gtp_spec && gtp_mask) {
1080                                 if (gtp_mask->v_pt_rsv_flags ||
1081                                         gtp_mask->msg_type ||
1082                                         gtp_mask->msg_len) {
1083                                         rte_flow_error_set(error, EINVAL,
1084                                                 RTE_FLOW_ERROR_TYPE_ITEM,
1085                                                 item, "Invalid GTP mask");
1086                                         return -rte_errno;
1087                                 }
1088
1089                                 if (gtp_mask->teid == UINT32_MAX) {
1090                                         input_set |= IAVF_INSET_GTPU_TEID;
1091                                         VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(hdr, GTPU_IP, TEID);
1092                                 }
1093
1094                                 rte_memcpy(hdr->buffer,
1095                                         gtp_spec, sizeof(*gtp_spec));
1096                         }
1097
1098                         tun_inner = 1;
1099
1100                         hdrs->count = ++layer;
1101                         break;
1102
1103                 case RTE_FLOW_ITEM_TYPE_GTP_PSC:
1104                         gtp_psc_spec = item->spec;
1105                         gtp_psc_mask = item->mask;
1106
1107                         hdr = &hdrs->proto_hdr[layer];
1108
1109                         if (!gtp_psc_spec)
1110                                 VIRTCHNL_SET_PROTO_HDR_TYPE(hdr, GTPU_EH);
1111                         else if ((gtp_psc_mask->qfi) && !(gtp_psc_mask->pdu_type))
1112                                 VIRTCHNL_SET_PROTO_HDR_TYPE(hdr, GTPU_EH);
1113                         else if (gtp_psc_spec->pdu_type == IAVF_GTPU_EH_UPLINK)
1114                                 VIRTCHNL_SET_PROTO_HDR_TYPE(hdr, GTPU_EH_PDU_UP);
1115                         else if (gtp_psc_spec->pdu_type == IAVF_GTPU_EH_DWLINK)
1116                                 VIRTCHNL_SET_PROTO_HDR_TYPE(hdr, GTPU_EH_PDU_DWN);
1117
1118                         if (gtp_psc_spec && gtp_psc_mask) {
1119                                 if (gtp_psc_mask->qfi == UINT8_MAX) {
1120                                         input_set |= IAVF_INSET_GTPU_QFI;
1121                                         VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(hdr, GTPU_EH, QFI);
1122                                 }
1123
1124                                 rte_memcpy(hdr->buffer, gtp_psc_spec,
1125                                         sizeof(*gtp_psc_spec));
1126                         }
1127
1128                         hdrs->count = ++layer;
1129                         break;
1130
1131                 case RTE_FLOW_ITEM_TYPE_L2TPV3OIP:
1132                         l2tpv3oip_spec = item->spec;
1133                         l2tpv3oip_mask = item->mask;
1134
1135                         hdr = &hdrs->proto_hdr[layer];
1136
1137                         VIRTCHNL_SET_PROTO_HDR_TYPE(hdr, L2TPV3);
1138
1139                         if (l2tpv3oip_spec && l2tpv3oip_mask) {
1140                                 if (l2tpv3oip_mask->session_id == UINT32_MAX) {
1141                                         input_set |= IAVF_L2TPV3OIP_SESSION_ID;
1142                                         VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(hdr, L2TPV3, SESS_ID);
1143                                 }
1144
1145                                 rte_memcpy(hdr->buffer, l2tpv3oip_spec,
1146                                         sizeof(*l2tpv3oip_spec));
1147                         }
1148
1149                         hdrs->count = ++layer;
1150                         break;
1151
1152                 case RTE_FLOW_ITEM_TYPE_ESP:
1153                         esp_spec = item->spec;
1154                         esp_mask = item->mask;
1155
1156                         hdr = &hdrs->proto_hdr[layer];
1157
1158                         VIRTCHNL_SET_PROTO_HDR_TYPE(hdr, ESP);
1159
1160                         if (esp_spec && esp_mask) {
1161                                 if (esp_mask->hdr.spi == UINT32_MAX) {
1162                                         input_set |= IAVF_INSET_ESP_SPI;
1163                                         VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(hdr, ESP, SPI);
1164                                 }
1165
1166                                 rte_memcpy(hdr->buffer, &esp_spec->hdr,
1167                                         sizeof(esp_spec->hdr));
1168                         }
1169
1170                         hdrs->count = ++layer;
1171                         break;
1172
1173                 case RTE_FLOW_ITEM_TYPE_AH:
1174                         ah_spec = item->spec;
1175                         ah_mask = item->mask;
1176
1177                         hdr = &hdrs->proto_hdr[layer];
1178
1179                         VIRTCHNL_SET_PROTO_HDR_TYPE(hdr, AH);
1180
1181                         if (ah_spec && ah_mask) {
1182                                 if (ah_mask->spi == UINT32_MAX) {
1183                                         input_set |= IAVF_INSET_AH_SPI;
1184                                         VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(hdr, AH, SPI);
1185                                 }
1186
1187                                 rte_memcpy(hdr->buffer, ah_spec,
1188                                         sizeof(*ah_spec));
1189                         }
1190
1191                         hdrs->count = ++layer;
1192                         break;
1193
1194                 case RTE_FLOW_ITEM_TYPE_PFCP:
1195                         pfcp_spec = item->spec;
1196                         pfcp_mask = item->mask;
1197
1198                         hdr = &hdrs->proto_hdr[layer];
1199
1200                         VIRTCHNL_SET_PROTO_HDR_TYPE(hdr, PFCP);
1201
1202                         if (pfcp_spec && pfcp_mask) {
1203                                 if (pfcp_mask->s_field == UINT8_MAX) {
1204                                         input_set |= IAVF_INSET_PFCP_S_FIELD;
1205                                         VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(hdr, PFCP, S_FIELD);
1206                                 }
1207
1208                                 rte_memcpy(hdr->buffer, pfcp_spec,
1209                                         sizeof(*pfcp_spec));
1210                         }
1211
1212                         hdrs->count = ++layer;
1213                         break;
1214
1215                 case RTE_FLOW_ITEM_TYPE_ECPRI:
1216                         ecpri_spec = item->spec;
1217                         ecpri_mask = item->mask;
1218
1219                         ecpri_common.u32 = rte_be_to_cpu_32(ecpri_spec->hdr.common.u32);
1220
1221                         hdr = &hdrs->proto_hdr[layer];
1222
1223                         VIRTCHNL_SET_PROTO_HDR_TYPE(hdr, ECPRI);
1224
1225                         if (ecpri_spec && ecpri_mask) {
1226                                 if (ecpri_common.type == RTE_ECPRI_MSG_TYPE_IQ_DATA &&
1227                                                 ecpri_mask->hdr.type0.pc_id == UINT16_MAX) {
1228                                         input_set |= IAVF_ECPRI_PC_RTC_ID;
1229                                         VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(hdr, ECPRI,
1230                                                                          PC_RTC_ID);
1231                                 }
1232
1233                                 rte_memcpy(hdr->buffer, ecpri_spec,
1234                                         sizeof(*ecpri_spec));
1235                         }
1236
1237                         hdrs->count = ++layer;
1238                         break;
1239
1240                 case RTE_FLOW_ITEM_TYPE_GRE:
1241                         gre_spec = item->spec;
1242                         gre_mask = item->mask;
1243
1244                         hdr = &hdrs->proto_hdr[layer];
1245
1246                         VIRTCHNL_SET_PROTO_HDR_TYPE(hdr, GRE);
1247
1248                         if (gre_spec && gre_mask) {
1249                                 rte_memcpy(hdr->buffer, gre_spec,
1250                                            sizeof(*gre_spec));
1251                         }
1252
1253                         tun_inner = 1;
1254
1255                         hdrs->count = ++layer;
1256                         break;
1257
1258                 case RTE_FLOW_ITEM_TYPE_VOID:
1259                         break;
1260
1261                 default:
1262                         rte_flow_error_set(error, EINVAL,
1263                                         RTE_FLOW_ERROR_TYPE_ITEM, item,
1264                                         "Invalid pattern item.");
1265                         return -rte_errno;
1266                 }
1267         }
1268
1269         if (layer > VIRTCHNL_MAX_NUM_PROTO_HDRS) {
1270                 rte_flow_error_set(error, EINVAL,
1271                         RTE_FLOW_ERROR_TYPE_ITEM, item,
1272                         "Protocol header layers exceed the maximum value");
1273                 return -rte_errno;
1274         }
1275
1276         if (!iavf_fdir_refine_input_set(input_set,
1277                                         input_set_mask | IAVF_INSET_ETHERTYPE,
1278                                         filter)) {
1279                 rte_flow_error_set(error, EINVAL,
1280                                    RTE_FLOW_ERROR_TYPE_ITEM_SPEC, pattern,
1281                                    "Invalid input set");
1282                 return -rte_errno;
1283         }
1284
1285         filter->input_set = input_set;
1286
1287         return 0;
1288 }
1289
1290 static int
1291 iavf_fdir_parse(struct iavf_adapter *ad,
1292                 struct iavf_pattern_match_item *array,
1293                 uint32_t array_len,
1294                 const struct rte_flow_item pattern[],
1295                 const struct rte_flow_action actions[],
1296                 void **meta,
1297                 struct rte_flow_error *error)
1298 {
1299         struct iavf_info *vf = IAVF_DEV_PRIVATE_TO_VF(ad);
1300         struct iavf_fdir_conf *filter = &vf->fdir.conf;
1301         struct iavf_pattern_match_item *item = NULL;
1302         int ret;
1303
1304         memset(filter, 0, sizeof(*filter));
1305
1306         item = iavf_search_pattern_match_item(pattern, array, array_len, error);
1307         if (!item)
1308                 return -rte_errno;
1309
1310         ret = iavf_fdir_parse_pattern(ad, pattern, item->input_set_mask,
1311                                       error, filter);
1312         if (ret)
1313                 goto error;
1314
1315         ret = iavf_fdir_parse_action(ad, actions, error, filter);
1316         if (ret)
1317                 goto error;
1318
1319         if (meta)
1320                 *meta = filter;
1321
1322 error:
1323         rte_free(item);
1324         return ret;
1325 }
1326
1327 static struct iavf_flow_parser iavf_fdir_parser = {
1328         .engine = &iavf_fdir_engine,
1329         .array = iavf_fdir_pattern,
1330         .array_len = RTE_DIM(iavf_fdir_pattern),
1331         .parse_pattern_action = iavf_fdir_parse,
1332         .stage = IAVF_FLOW_STAGE_DISTRIBUTOR,
1333 };
1334
1335 RTE_INIT(iavf_fdir_engine_register)
1336 {
1337         iavf_register_flow_engine(&iavf_fdir_engine);
1338 }