net/iavf: simplify flow director rules for IP fragment
[dpdk.git] / drivers / net / iavf / iavf_fdir.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2020 Intel Corporation
3  */
4
5 #include <sys/queue.h>
6 #include <stdio.h>
7 #include <errno.h>
8 #include <stdint.h>
9 #include <string.h>
10 #include <unistd.h>
11 #include <stdarg.h>
12
13 #include <rte_ether.h>
14 #include <ethdev_driver.h>
15 #include <rte_malloc.h>
16 #include <rte_tailq.h>
17
18 #include "iavf.h"
19 #include "iavf_generic_flow.h"
20 #include "virtchnl.h"
21 #include "iavf_rxtx.h"
22
23 #define IAVF_FDIR_MAX_QREGION_SIZE 128
24
25 #define IAVF_FDIR_IPV6_TC_OFFSET 20
26 #define IAVF_IPV6_TC_MASK  (0xFF << IAVF_FDIR_IPV6_TC_OFFSET)
27
28 #define IAVF_GTPU_EH_DWLINK 0
29 #define IAVF_GTPU_EH_UPLINK 1
30
31 #define IAVF_FDIR_INSET_ETH (\
32         IAVF_INSET_ETHERTYPE)
33
34 #define IAVF_FDIR_INSET_ETH_IPV4 (\
35         IAVF_INSET_IPV4_SRC | IAVF_INSET_IPV4_DST | \
36         IAVF_INSET_IPV4_PROTO | IAVF_INSET_IPV4_TOS | \
37         IAVF_INSET_IPV4_TTL | IAVF_INSET_IPV4_ID)
38
39 #define IAVF_FDIR_INSET_ETH_IPV4_UDP (\
40         IAVF_INSET_IPV4_SRC | IAVF_INSET_IPV4_DST | \
41         IAVF_INSET_IPV4_TOS | IAVF_INSET_IPV4_TTL | \
42         IAVF_INSET_UDP_SRC_PORT | IAVF_INSET_UDP_DST_PORT)
43
44 #define IAVF_FDIR_INSET_ETH_IPV4_TCP (\
45         IAVF_INSET_IPV4_SRC | IAVF_INSET_IPV4_DST | \
46         IAVF_INSET_IPV4_TOS | IAVF_INSET_IPV4_TTL | \
47         IAVF_INSET_TCP_SRC_PORT | IAVF_INSET_TCP_DST_PORT)
48
49 #define IAVF_FDIR_INSET_ETH_IPV4_SCTP (\
50         IAVF_INSET_IPV4_SRC | IAVF_INSET_IPV4_DST | \
51         IAVF_INSET_IPV4_TOS | IAVF_INSET_IPV4_TTL | \
52         IAVF_INSET_SCTP_SRC_PORT | IAVF_INSET_SCTP_DST_PORT)
53
54 #define IAVF_FDIR_INSET_ETH_IPV6 (\
55         IAVF_INSET_IPV6_SRC | IAVF_INSET_IPV6_DST | \
56         IAVF_INSET_IPV6_NEXT_HDR | IAVF_INSET_IPV6_TC | \
57         IAVF_INSET_IPV6_HOP_LIMIT)
58
59 #define IAVF_FDIR_INSET_ETH_IPV6_FRAG_EXT (\
60         IAVF_INSET_IPV6_ID)
61
62 #define IAVF_FDIR_INSET_ETH_IPV6_UDP (\
63         IAVF_INSET_IPV6_SRC | IAVF_INSET_IPV6_DST | \
64         IAVF_INSET_IPV6_TC | IAVF_INSET_IPV6_HOP_LIMIT | \
65         IAVF_INSET_UDP_SRC_PORT | IAVF_INSET_UDP_DST_PORT)
66
67 #define IAVF_FDIR_INSET_ETH_IPV6_TCP (\
68         IAVF_INSET_IPV6_SRC | IAVF_INSET_IPV6_DST | \
69         IAVF_INSET_IPV6_TC | IAVF_INSET_IPV6_HOP_LIMIT | \
70         IAVF_INSET_TCP_SRC_PORT | IAVF_INSET_TCP_DST_PORT)
71
72 #define IAVF_FDIR_INSET_ETH_IPV6_SCTP (\
73         IAVF_INSET_IPV6_SRC | IAVF_INSET_IPV6_DST | \
74         IAVF_INSET_IPV6_TC | IAVF_INSET_IPV6_HOP_LIMIT | \
75         IAVF_INSET_SCTP_SRC_PORT | IAVF_INSET_SCTP_DST_PORT)
76
77 #define IAVF_FDIR_INSET_IPV4_GTPU (\
78         IAVF_INSET_IPV4_SRC | IAVF_INSET_IPV4_DST | \
79         IAVF_INSET_GTPU_TEID)
80
81 #define IAVF_FDIR_INSET_GTPU_IPV4 (\
82         IAVF_INSET_TUN_IPV4_SRC | IAVF_INSET_TUN_IPV4_DST | \
83         IAVF_INSET_TUN_IPV4_PROTO | IAVF_INSET_TUN_IPV4_TOS | \
84         IAVF_INSET_TUN_IPV4_TTL)
85
86 #define IAVF_FDIR_INSET_GTPU_IPV4_UDP (\
87         IAVF_FDIR_INSET_GTPU_IPV4 | \
88         IAVF_INSET_TUN_UDP_SRC_PORT | IAVF_INSET_TUN_UDP_DST_PORT)
89
90 #define IAVF_FDIR_INSET_GTPU_IPV4_TCP (\
91         IAVF_FDIR_INSET_GTPU_IPV4 | \
92         IAVF_INSET_TUN_TCP_SRC_PORT | IAVF_INSET_TUN_TCP_DST_PORT)
93
94 #define IAVF_FDIR_INSET_IPV4_GTPU_EH (\
95         IAVF_INSET_IPV4_SRC | IAVF_INSET_IPV4_DST | \
96         IAVF_INSET_GTPU_TEID | IAVF_INSET_GTPU_QFI)
97
98 #define IAVF_FDIR_INSET_IPV6_GTPU (\
99         IAVF_INSET_IPV6_SRC | IAVF_INSET_IPV6_DST | \
100         IAVF_INSET_GTPU_TEID)
101
102 #define IAVF_FDIR_INSET_GTPU_IPV6 (\
103         IAVF_INSET_TUN_IPV6_SRC | IAVF_INSET_TUN_IPV6_DST | \
104         IAVF_INSET_TUN_IPV6_NEXT_HDR | IAVF_INSET_TUN_IPV6_TC | \
105         IAVF_INSET_TUN_IPV6_HOP_LIMIT)
106
107 #define IAVF_FDIR_INSET_GTPU_IPV6_UDP (\
108         IAVF_FDIR_INSET_GTPU_IPV6 | \
109         IAVF_INSET_TUN_UDP_SRC_PORT | IAVF_INSET_TUN_UDP_DST_PORT)
110
111 #define IAVF_FDIR_INSET_GTPU_IPV6_TCP (\
112         IAVF_FDIR_INSET_GTPU_IPV6 | \
113         IAVF_INSET_TUN_TCP_SRC_PORT | IAVF_INSET_TUN_TCP_DST_PORT)
114
115 #define IAVF_FDIR_INSET_IPV6_GTPU_EH (\
116         IAVF_INSET_IPV6_SRC | IAVF_INSET_IPV6_DST | \
117         IAVF_INSET_GTPU_TEID | IAVF_INSET_GTPU_QFI)
118
119 #define IAVF_FDIR_INSET_L2TPV3OIP (\
120         IAVF_L2TPV3OIP_SESSION_ID)
121
122 #define IAVF_FDIR_INSET_IPV4_ESP (\
123         IAVF_INSET_IPV4_SRC | IAVF_INSET_IPV4_DST | \
124         IAVF_INSET_ESP_SPI)
125
126 #define IAVF_FDIR_INSET_IPV6_ESP (\
127         IAVF_INSET_IPV6_SRC | IAVF_INSET_IPV6_DST | \
128         IAVF_INSET_ESP_SPI)
129
130 #define IAVF_FDIR_INSET_AH (\
131         IAVF_INSET_AH_SPI)
132
133 #define IAVF_FDIR_INSET_IPV4_NATT_ESP (\
134         IAVF_INSET_IPV4_SRC | IAVF_INSET_IPV4_DST | \
135         IAVF_INSET_ESP_SPI)
136
137 #define IAVF_FDIR_INSET_IPV6_NATT_ESP (\
138         IAVF_INSET_IPV6_SRC | IAVF_INSET_IPV6_DST | \
139         IAVF_INSET_ESP_SPI)
140
141 #define IAVF_FDIR_INSET_PFCP (\
142         IAVF_INSET_PFCP_S_FIELD)
143
144 #define IAVF_FDIR_INSET_ECPRI (\
145         IAVF_INSET_ECPRI)
146
147 #define IAVF_FDIR_INSET_GRE_IPV4 (\
148         IAVF_INSET_TUN_IPV4_SRC | IAVF_INSET_TUN_IPV4_DST | \
149         IAVF_INSET_TUN_IPV4_TOS | IAVF_INSET_TUN_IPV4_PROTO)
150
151 #define IAVF_FDIR_INSET_GRE_IPV4_TCP (\
152         IAVF_FDIR_INSET_GRE_IPV4 | IAVF_INSET_TUN_TCP_SRC_PORT | \
153         IAVF_INSET_TUN_TCP_DST_PORT)
154
155 #define IAVF_FDIR_INSET_GRE_IPV4_UDP (\
156         IAVF_FDIR_INSET_GRE_IPV4 | IAVF_INSET_TUN_UDP_SRC_PORT | \
157         IAVF_INSET_TUN_UDP_DST_PORT)
158
159 #define IAVF_FDIR_INSET_GRE_IPV6 (\
160         IAVF_INSET_TUN_IPV6_SRC | IAVF_INSET_TUN_IPV6_DST | \
161         IAVF_INSET_TUN_IPV6_TC | IAVF_INSET_TUN_IPV6_NEXT_HDR)
162
163 #define IAVF_FDIR_INSET_GRE_IPV6_TCP (\
164         IAVF_FDIR_INSET_GRE_IPV6 | IAVF_INSET_TUN_TCP_SRC_PORT | \
165         IAVF_INSET_TUN_TCP_DST_PORT)
166
167 #define IAVF_FDIR_INSET_GRE_IPV6_UDP (\
168         IAVF_FDIR_INSET_GRE_IPV6 | IAVF_INSET_TUN_UDP_SRC_PORT | \
169         IAVF_INSET_TUN_UDP_DST_PORT)
170
171 static struct iavf_pattern_match_item iavf_fdir_pattern[] = {
172         {iavf_pattern_ethertype,                 IAVF_FDIR_INSET_ETH,           IAVF_INSET_NONE},
173         {iavf_pattern_eth_ipv4,                  IAVF_FDIR_INSET_ETH_IPV4,      IAVF_INSET_NONE},
174         {iavf_pattern_eth_ipv4_udp,              IAVF_FDIR_INSET_ETH_IPV4_UDP,  IAVF_INSET_NONE},
175         {iavf_pattern_eth_ipv4_tcp,              IAVF_FDIR_INSET_ETH_IPV4_TCP,  IAVF_INSET_NONE},
176         {iavf_pattern_eth_ipv4_sctp,             IAVF_FDIR_INSET_ETH_IPV4_SCTP, IAVF_INSET_NONE},
177         {iavf_pattern_eth_ipv6,                  IAVF_FDIR_INSET_ETH_IPV6,      IAVF_INSET_NONE},
178         {iavf_pattern_eth_ipv6_frag_ext,        IAVF_FDIR_INSET_ETH_IPV6_FRAG_EXT,      IAVF_INSET_NONE},
179         {iavf_pattern_eth_ipv6_udp,              IAVF_FDIR_INSET_ETH_IPV6_UDP,  IAVF_INSET_NONE},
180         {iavf_pattern_eth_ipv6_tcp,              IAVF_FDIR_INSET_ETH_IPV6_TCP,  IAVF_INSET_NONE},
181         {iavf_pattern_eth_ipv6_sctp,             IAVF_FDIR_INSET_ETH_IPV6_SCTP, IAVF_INSET_NONE},
182         {iavf_pattern_eth_ipv4_gtpu,             IAVF_FDIR_INSET_IPV4_GTPU,     IAVF_INSET_NONE},
183         {iavf_pattern_eth_ipv4_gtpu_ipv4,        IAVF_FDIR_INSET_GTPU_IPV4,     IAVF_INSET_NONE},
184         {iavf_pattern_eth_ipv4_gtpu_ipv4_udp,    IAVF_FDIR_INSET_GTPU_IPV4_UDP, IAVF_INSET_NONE},
185         {iavf_pattern_eth_ipv4_gtpu_ipv4_tcp,    IAVF_FDIR_INSET_GTPU_IPV4_TCP, IAVF_INSET_NONE},
186         {iavf_pattern_eth_ipv4_gtpu_ipv6,        IAVF_FDIR_INSET_GTPU_IPV6,     IAVF_INSET_NONE},
187         {iavf_pattern_eth_ipv4_gtpu_ipv6_udp,    IAVF_FDIR_INSET_GTPU_IPV6_UDP, IAVF_INSET_NONE},
188         {iavf_pattern_eth_ipv4_gtpu_ipv6_tcp,    IAVF_FDIR_INSET_GTPU_IPV6_TCP, IAVF_INSET_NONE},
189         {iavf_pattern_eth_ipv4_gtpu_eh,          IAVF_FDIR_INSET_IPV4_GTPU_EH,  IAVF_INSET_NONE},
190         {iavf_pattern_eth_ipv4_gtpu_eh_ipv4,     IAVF_FDIR_INSET_GTPU_IPV4,     IAVF_INSET_NONE},
191         {iavf_pattern_eth_ipv4_gtpu_eh_ipv4_udp, IAVF_FDIR_INSET_GTPU_IPV4_UDP, IAVF_INSET_NONE},
192         {iavf_pattern_eth_ipv4_gtpu_eh_ipv4_tcp, IAVF_FDIR_INSET_GTPU_IPV4_TCP, IAVF_INSET_NONE},
193         {iavf_pattern_eth_ipv4_gtpu_eh_ipv6,     IAVF_FDIR_INSET_GTPU_IPV6,     IAVF_INSET_NONE},
194         {iavf_pattern_eth_ipv4_gtpu_eh_ipv6_udp, IAVF_FDIR_INSET_GTPU_IPV6_UDP, IAVF_INSET_NONE},
195         {iavf_pattern_eth_ipv4_gtpu_eh_ipv6_tcp, IAVF_FDIR_INSET_GTPU_IPV6_TCP, IAVF_INSET_NONE},
196         {iavf_pattern_eth_ipv6_gtpu,             IAVF_FDIR_INSET_IPV6_GTPU,     IAVF_INSET_NONE},
197         {iavf_pattern_eth_ipv6_gtpu_eh,          IAVF_FDIR_INSET_IPV6_GTPU_EH,  IAVF_INSET_NONE},
198         {iavf_pattern_eth_ipv4_l2tpv3,           IAVF_FDIR_INSET_L2TPV3OIP,     IAVF_INSET_NONE},
199         {iavf_pattern_eth_ipv6_l2tpv3,           IAVF_FDIR_INSET_L2TPV3OIP,     IAVF_INSET_NONE},
200         {iavf_pattern_eth_ipv4_esp,              IAVF_FDIR_INSET_IPV4_ESP,      IAVF_INSET_NONE},
201         {iavf_pattern_eth_ipv6_esp,              IAVF_FDIR_INSET_IPV6_ESP,      IAVF_INSET_NONE},
202         {iavf_pattern_eth_ipv4_ah,               IAVF_FDIR_INSET_AH,            IAVF_INSET_NONE},
203         {iavf_pattern_eth_ipv6_ah,               IAVF_FDIR_INSET_AH,            IAVF_INSET_NONE},
204         {iavf_pattern_eth_ipv4_udp_esp,          IAVF_FDIR_INSET_IPV4_NATT_ESP, IAVF_INSET_NONE},
205         {iavf_pattern_eth_ipv6_udp_esp,          IAVF_FDIR_INSET_IPV6_NATT_ESP, IAVF_INSET_NONE},
206         {iavf_pattern_eth_ipv4_pfcp,             IAVF_FDIR_INSET_PFCP,          IAVF_INSET_NONE},
207         {iavf_pattern_eth_ipv6_pfcp,             IAVF_FDIR_INSET_PFCP,          IAVF_INSET_NONE},
208         {iavf_pattern_eth_ecpri,                 IAVF_FDIR_INSET_ECPRI,         IAVF_INSET_NONE},
209         {iavf_pattern_eth_ipv4_ecpri,            IAVF_FDIR_INSET_ECPRI,         IAVF_INSET_NONE},
210         {iavf_pattern_eth_ipv4_gre_ipv4,        IAVF_FDIR_INSET_GRE_IPV4,       IAVF_INSET_NONE},
211         {iavf_pattern_eth_ipv4_gre_ipv4_tcp,    IAVF_FDIR_INSET_GRE_IPV4_TCP,   IAVF_INSET_NONE},
212         {iavf_pattern_eth_ipv4_gre_ipv4_udp,    IAVF_FDIR_INSET_GRE_IPV4_UDP,   IAVF_INSET_NONE},
213         {iavf_pattern_eth_ipv4_gre_ipv6,        IAVF_FDIR_INSET_GRE_IPV6,       IAVF_INSET_NONE},
214         {iavf_pattern_eth_ipv4_gre_ipv6_tcp,    IAVF_FDIR_INSET_GRE_IPV6_TCP,   IAVF_INSET_NONE},
215         {iavf_pattern_eth_ipv4_gre_ipv6_udp,    IAVF_FDIR_INSET_GRE_IPV6_UDP,   IAVF_INSET_NONE},
216         {iavf_pattern_eth_ipv6_gre_ipv4,        IAVF_FDIR_INSET_GRE_IPV4,       IAVF_INSET_NONE},
217         {iavf_pattern_eth_ipv6_gre_ipv4_tcp,    IAVF_FDIR_INSET_GRE_IPV4_TCP,   IAVF_INSET_NONE},
218         {iavf_pattern_eth_ipv6_gre_ipv4_udp,    IAVF_FDIR_INSET_GRE_IPV4_UDP,   IAVF_INSET_NONE},
219         {iavf_pattern_eth_ipv6_gre_ipv6,        IAVF_FDIR_INSET_GRE_IPV6,       IAVF_INSET_NONE},
220         {iavf_pattern_eth_ipv6_gre_ipv6_tcp,    IAVF_FDIR_INSET_GRE_IPV6_TCP,   IAVF_INSET_NONE},
221         {iavf_pattern_eth_ipv6_gre_ipv6_udp,    IAVF_FDIR_INSET_GRE_IPV6_UDP,   IAVF_INSET_NONE},
222 };
223
224 static struct iavf_flow_parser iavf_fdir_parser;
225
226 static int
227 iavf_fdir_init(struct iavf_adapter *ad)
228 {
229         struct iavf_info *vf = IAVF_DEV_PRIVATE_TO_VF(ad);
230         struct iavf_flow_parser *parser;
231
232         if (!vf->vf_res)
233                 return -EINVAL;
234
235         if (vf->vf_res->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_FDIR_PF)
236                 parser = &iavf_fdir_parser;
237         else
238                 return -ENOTSUP;
239
240         return iavf_register_parser(parser, ad);
241 }
242
243 static void
244 iavf_fdir_uninit(struct iavf_adapter *ad)
245 {
246         iavf_unregister_parser(&iavf_fdir_parser, ad);
247 }
248
249 static int
250 iavf_fdir_create(struct iavf_adapter *ad,
251                 struct rte_flow *flow,
252                 void *meta,
253                 struct rte_flow_error *error)
254 {
255         struct iavf_fdir_conf *filter = meta;
256         struct iavf_fdir_conf *rule;
257         int ret;
258
259         rule = rte_zmalloc("fdir_entry", sizeof(*rule), 0);
260         if (!rule) {
261                 rte_flow_error_set(error, ENOMEM,
262                                 RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
263                                 "Failed to allocate memory for fdir rule");
264                 return -rte_errno;
265         }
266
267         ret = iavf_fdir_add(ad, filter);
268         if (ret) {
269                 rte_flow_error_set(error, -ret,
270                                 RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
271                                 "Failed to add filter rule.");
272                 goto free_entry;
273         }
274
275         if (filter->mark_flag == 1)
276                 iavf_fdir_rx_proc_enable(ad, 1);
277
278         rte_memcpy(rule, filter, sizeof(*rule));
279         flow->rule = rule;
280
281         return 0;
282
283 free_entry:
284         rte_free(rule);
285         return -rte_errno;
286 }
287
288 static int
289 iavf_fdir_destroy(struct iavf_adapter *ad,
290                 struct rte_flow *flow,
291                 struct rte_flow_error *error)
292 {
293         struct iavf_fdir_conf *filter;
294         int ret;
295
296         filter = (struct iavf_fdir_conf *)flow->rule;
297
298         ret = iavf_fdir_del(ad, filter);
299         if (ret) {
300                 rte_flow_error_set(error, -ret,
301                                 RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
302                                 "Failed to delete filter rule.");
303                 return -rte_errno;
304         }
305
306         if (filter->mark_flag == 1)
307                 iavf_fdir_rx_proc_enable(ad, 0);
308
309         flow->rule = NULL;
310         rte_free(filter);
311
312         return 0;
313 }
314
315 static int
316 iavf_fdir_validation(struct iavf_adapter *ad,
317                 __rte_unused struct rte_flow *flow,
318                 void *meta,
319                 struct rte_flow_error *error)
320 {
321         struct iavf_fdir_conf *filter = meta;
322         int ret;
323
324         ret = iavf_fdir_check(ad, filter);
325         if (ret) {
326                 rte_flow_error_set(error, -ret,
327                                 RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
328                                 "Failed to validate filter rule.");
329                 return -rte_errno;
330         }
331
332         return 0;
333 };
334
335 static struct iavf_flow_engine iavf_fdir_engine = {
336         .init = iavf_fdir_init,
337         .uninit = iavf_fdir_uninit,
338         .create = iavf_fdir_create,
339         .destroy = iavf_fdir_destroy,
340         .validation = iavf_fdir_validation,
341         .type = IAVF_FLOW_ENGINE_FDIR,
342 };
343
344 static int
345 iavf_fdir_parse_action_qregion(struct iavf_adapter *ad,
346                         struct rte_flow_error *error,
347                         const struct rte_flow_action *act,
348                         struct virtchnl_filter_action *filter_action)
349 {
350         struct iavf_info *vf = IAVF_DEV_PRIVATE_TO_VF(ad);
351         const struct rte_flow_action_rss *rss = act->conf;
352         uint32_t i;
353
354         if (act->type != RTE_FLOW_ACTION_TYPE_RSS) {
355                 rte_flow_error_set(error, EINVAL,
356                                 RTE_FLOW_ERROR_TYPE_ACTION, act,
357                                 "Invalid action.");
358                 return -rte_errno;
359         }
360
361         if (rss->queue_num <= 1) {
362                 rte_flow_error_set(error, EINVAL,
363                                 RTE_FLOW_ERROR_TYPE_ACTION, act,
364                                 "Queue region size can't be 0 or 1.");
365                 return -rte_errno;
366         }
367
368         /* check if queue index for queue region is continuous */
369         for (i = 0; i < rss->queue_num - 1; i++) {
370                 if (rss->queue[i + 1] != rss->queue[i] + 1) {
371                         rte_flow_error_set(error, EINVAL,
372                                         RTE_FLOW_ERROR_TYPE_ACTION, act,
373                                         "Discontinuous queue region");
374                         return -rte_errno;
375                 }
376         }
377
378         if (rss->queue[rss->queue_num - 1] >= ad->eth_dev->data->nb_rx_queues) {
379                 rte_flow_error_set(error, EINVAL,
380                                 RTE_FLOW_ERROR_TYPE_ACTION, act,
381                                 "Invalid queue region indexes.");
382                 return -rte_errno;
383         }
384
385         if (!(rte_is_power_of_2(rss->queue_num) &&
386                 rss->queue_num <= IAVF_FDIR_MAX_QREGION_SIZE)) {
387                 rte_flow_error_set(error, EINVAL,
388                                 RTE_FLOW_ERROR_TYPE_ACTION, act,
389                                 "The region size should be any of the following values:"
390                                 "1, 2, 4, 8, 16, 32, 64, 128 as long as the total number "
391                                 "of queues do not exceed the VSI allocation.");
392                 return -rte_errno;
393         }
394
395         if (rss->queue_num > vf->max_rss_qregion) {
396                 rte_flow_error_set(error, EINVAL,
397                                 RTE_FLOW_ERROR_TYPE_ACTION, act,
398                                 "The region size cannot be large than the supported max RSS queue region");
399                 return -rte_errno;
400         }
401
402         filter_action->act_conf.queue.index = rss->queue[0];
403         filter_action->act_conf.queue.region = rte_fls_u32(rss->queue_num) - 1;
404
405         return 0;
406 }
407
408 static int
409 iavf_fdir_parse_action(struct iavf_adapter *ad,
410                         const struct rte_flow_action actions[],
411                         struct rte_flow_error *error,
412                         struct iavf_fdir_conf *filter)
413 {
414         const struct rte_flow_action_queue *act_q;
415         const struct rte_flow_action_mark *mark_spec = NULL;
416         uint32_t dest_num = 0;
417         uint32_t mark_num = 0;
418         int ret;
419
420         int number = 0;
421         struct virtchnl_filter_action *filter_action;
422
423         for (; actions->type != RTE_FLOW_ACTION_TYPE_END; actions++) {
424                 switch (actions->type) {
425                 case RTE_FLOW_ACTION_TYPE_VOID:
426                         break;
427
428                 case RTE_FLOW_ACTION_TYPE_PASSTHRU:
429                         dest_num++;
430
431                         filter_action = &filter->add_fltr.rule_cfg.action_set.actions[number];
432
433                         filter_action->type = VIRTCHNL_ACTION_PASSTHRU;
434
435                         filter->add_fltr.rule_cfg.action_set.count = ++number;
436                         break;
437
438                 case RTE_FLOW_ACTION_TYPE_DROP:
439                         dest_num++;
440
441                         filter_action = &filter->add_fltr.rule_cfg.action_set.actions[number];
442
443                         filter_action->type = VIRTCHNL_ACTION_DROP;
444
445                         filter->add_fltr.rule_cfg.action_set.count = ++number;
446                         break;
447
448                 case RTE_FLOW_ACTION_TYPE_QUEUE:
449                         dest_num++;
450
451                         act_q = actions->conf;
452                         filter_action = &filter->add_fltr.rule_cfg.action_set.actions[number];
453
454                         filter_action->type = VIRTCHNL_ACTION_QUEUE;
455                         filter_action->act_conf.queue.index = act_q->index;
456
457                         if (filter_action->act_conf.queue.index >=
458                                 ad->eth_dev->data->nb_rx_queues) {
459                                 rte_flow_error_set(error, EINVAL,
460                                         RTE_FLOW_ERROR_TYPE_ACTION,
461                                         actions, "Invalid queue for FDIR.");
462                                 return -rte_errno;
463                         }
464
465                         filter->add_fltr.rule_cfg.action_set.count = ++number;
466                         break;
467
468                 case RTE_FLOW_ACTION_TYPE_RSS:
469                         dest_num++;
470
471                         filter_action = &filter->add_fltr.rule_cfg.action_set.actions[number];
472
473                         filter_action->type = VIRTCHNL_ACTION_Q_REGION;
474
475                         ret = iavf_fdir_parse_action_qregion(ad,
476                                                 error, actions, filter_action);
477                         if (ret)
478                                 return ret;
479
480                         filter->add_fltr.rule_cfg.action_set.count = ++number;
481                         break;
482
483                 case RTE_FLOW_ACTION_TYPE_MARK:
484                         mark_num++;
485
486                         filter->mark_flag = 1;
487                         mark_spec = actions->conf;
488                         filter_action = &filter->add_fltr.rule_cfg.action_set.actions[number];
489
490                         filter_action->type = VIRTCHNL_ACTION_MARK;
491                         filter_action->act_conf.mark_id = mark_spec->id;
492
493                         filter->add_fltr.rule_cfg.action_set.count = ++number;
494                         break;
495
496                 default:
497                         rte_flow_error_set(error, EINVAL,
498                                         RTE_FLOW_ERROR_TYPE_ACTION, actions,
499                                         "Invalid action.");
500                         return -rte_errno;
501                 }
502         }
503
504         if (number > VIRTCHNL_MAX_NUM_ACTIONS) {
505                 rte_flow_error_set(error, EINVAL,
506                         RTE_FLOW_ERROR_TYPE_ACTION, actions,
507                         "Action numbers exceed the maximum value");
508                 return -rte_errno;
509         }
510
511         if (dest_num >= 2) {
512                 rte_flow_error_set(error, EINVAL,
513                         RTE_FLOW_ERROR_TYPE_ACTION, actions,
514                         "Unsupported action combination");
515                 return -rte_errno;
516         }
517
518         if (mark_num >= 2) {
519                 rte_flow_error_set(error, EINVAL,
520                         RTE_FLOW_ERROR_TYPE_ACTION, actions,
521                         "Too many mark actions");
522                 return -rte_errno;
523         }
524
525         if (dest_num + mark_num == 0) {
526                 rte_flow_error_set(error, EINVAL,
527                         RTE_FLOW_ERROR_TYPE_ACTION, actions,
528                         "Empty action");
529                 return -rte_errno;
530         }
531
532         /* Mark only is equal to mark + passthru. */
533         if (dest_num == 0) {
534                 filter_action = &filter->add_fltr.rule_cfg.action_set.actions[number];
535                 filter_action->type = VIRTCHNL_ACTION_PASSTHRU;
536                 filter->add_fltr.rule_cfg.action_set.count = ++number;
537         }
538
539         return 0;
540 }
541
542 static bool
543 iavf_fdir_refine_input_set(const uint64_t input_set,
544                            const uint64_t input_set_mask,
545                            struct iavf_fdir_conf *filter)
546 {
547         struct virtchnl_proto_hdr *hdr, *hdr_last;
548         struct rte_flow_item_ipv4 ipv4_spec;
549         struct rte_flow_item_ipv6 ipv6_spec;
550         int last_layer;
551         uint8_t proto_id;
552
553         if (input_set & ~input_set_mask)
554                 return false;
555         else if (input_set)
556                 return true;
557
558         last_layer = filter->add_fltr.rule_cfg.proto_hdrs.count - 1;
559         /* Last layer of TCP/UDP pattern isn't less than 2. */
560         if (last_layer < 2)
561                 return false;
562         hdr_last = &filter->add_fltr.rule_cfg.proto_hdrs.proto_hdr[last_layer];
563         if (hdr_last->type == VIRTCHNL_PROTO_HDR_TCP)
564                 proto_id = 6;
565         else if (hdr_last->type == VIRTCHNL_PROTO_HDR_UDP)
566                 proto_id = 17;
567         else
568                 return false;
569
570         hdr = &filter->add_fltr.rule_cfg.proto_hdrs.proto_hdr[last_layer - 1];
571         switch (hdr->type) {
572         case VIRTCHNL_PROTO_HDR_IPV4:
573                 VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(hdr, IPV4, PROT);
574                 memset(&ipv4_spec, 0, sizeof(ipv4_spec));
575                 ipv4_spec.hdr.next_proto_id = proto_id;
576                 rte_memcpy(hdr->buffer, &ipv4_spec.hdr,
577                            sizeof(ipv4_spec.hdr));
578                 return true;
579         case VIRTCHNL_PROTO_HDR_IPV6:
580                 VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(hdr, IPV6, PROT);
581                 memset(&ipv6_spec, 0, sizeof(ipv6_spec));
582                 ipv6_spec.hdr.proto = proto_id;
583                 rte_memcpy(hdr->buffer, &ipv6_spec.hdr,
584                            sizeof(ipv6_spec.hdr));
585                 return true;
586         default:
587                 return false;
588         }
589 }
590
591 static void
592 iavf_fdir_add_fragment_hdr(struct virtchnl_proto_hdrs *hdrs, int layer)
593 {
594         struct virtchnl_proto_hdr *hdr1;
595         struct virtchnl_proto_hdr *hdr2;
596         int i;
597
598         if (layer < 0 || layer > hdrs->count)
599                 return;
600
601         /* shift headers layer */
602         for (i = hdrs->count; i >= layer; i--) {
603                 hdr1 = &hdrs->proto_hdr[i];
604                 hdr2 = &hdrs->proto_hdr[i - 1];
605                 *hdr1 = *hdr2;
606         }
607
608         /* adding dummy fragment header */
609         hdr1 = &hdrs->proto_hdr[layer];
610         VIRTCHNL_SET_PROTO_HDR_TYPE(hdr1, IPV4_FRAG);
611         hdrs->count = ++layer;
612 }
613
614 static int
615 iavf_fdir_parse_pattern(__rte_unused struct iavf_adapter *ad,
616                         const struct rte_flow_item pattern[],
617                         const uint64_t input_set_mask,
618                         struct rte_flow_error *error,
619                         struct iavf_fdir_conf *filter)
620 {
621         struct virtchnl_proto_hdrs *hdrs =
622                         &filter->add_fltr.rule_cfg.proto_hdrs;
623         enum rte_flow_item_type l3 = RTE_FLOW_ITEM_TYPE_END;
624         const struct rte_flow_item_eth *eth_spec, *eth_mask;
625         const struct rte_flow_item_ipv4 *ipv4_spec, *ipv4_last, *ipv4_mask;
626         const struct rte_flow_item_ipv6 *ipv6_spec, *ipv6_mask;
627         const struct rte_flow_item_ipv6_frag_ext *ipv6_frag_spec;
628         const struct rte_flow_item_ipv6_frag_ext *ipv6_frag_mask;
629         const struct rte_flow_item_udp *udp_spec, *udp_mask;
630         const struct rte_flow_item_tcp *tcp_spec, *tcp_mask;
631         const struct rte_flow_item_sctp *sctp_spec, *sctp_mask;
632         const struct rte_flow_item_gtp *gtp_spec, *gtp_mask;
633         const struct rte_flow_item_gtp_psc *gtp_psc_spec, *gtp_psc_mask;
634         const struct rte_flow_item_l2tpv3oip *l2tpv3oip_spec, *l2tpv3oip_mask;
635         const struct rte_flow_item_esp *esp_spec, *esp_mask;
636         const struct rte_flow_item_ah *ah_spec, *ah_mask;
637         const struct rte_flow_item_pfcp *pfcp_spec, *pfcp_mask;
638         const struct rte_flow_item_ecpri *ecpri_spec, *ecpri_mask;
639         const struct rte_flow_item_gre *gre_spec, *gre_mask;
640         const struct rte_flow_item *item = pattern;
641         struct virtchnl_proto_hdr *hdr, *hdr1 = NULL;
642         struct rte_ecpri_common_hdr ecpri_common;
643         uint64_t input_set = IAVF_INSET_NONE;
644         enum rte_flow_item_type item_type;
645         enum rte_flow_item_type next_type;
646         uint8_t tun_inner = 0;
647         uint16_t ether_type;
648         int layer = 0;
649
650         uint8_t  ipv6_addr_mask[16] = {
651                 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
652                 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF
653         };
654
655         for (item = pattern; item->type != RTE_FLOW_ITEM_TYPE_END; item++) {
656                 item_type = item->type;
657
658                 if (item->last && !(item_type == RTE_FLOW_ITEM_TYPE_IPV4 ||
659                                     item_type ==
660                                     RTE_FLOW_ITEM_TYPE_IPV6_FRAG_EXT)) {
661                         rte_flow_error_set(error, EINVAL,
662                                            RTE_FLOW_ERROR_TYPE_ITEM, item,
663                                            "Not support range");
664                 }
665
666                 switch (item_type) {
667                 case RTE_FLOW_ITEM_TYPE_ETH:
668                         eth_spec = item->spec;
669                         eth_mask = item->mask;
670                         next_type = (item + 1)->type;
671
672                         hdr1 = &hdrs->proto_hdr[layer];
673
674                         VIRTCHNL_SET_PROTO_HDR_TYPE(hdr1, ETH);
675
676                         if (next_type == RTE_FLOW_ITEM_TYPE_END &&
677                             (!eth_spec || !eth_mask)) {
678                                 rte_flow_error_set(error, EINVAL,
679                                                 RTE_FLOW_ERROR_TYPE_ITEM,
680                                                 item, "NULL eth spec/mask.");
681                                 return -rte_errno;
682                         }
683
684                         if (eth_spec && eth_mask) {
685                                 if (!rte_is_zero_ether_addr(&eth_mask->src) ||
686                                     !rte_is_zero_ether_addr(&eth_mask->dst)) {
687                                         rte_flow_error_set(error, EINVAL,
688                                                 RTE_FLOW_ERROR_TYPE_ITEM, item,
689                                                 "Invalid MAC_addr mask.");
690                                         return -rte_errno;
691                                 }
692                         }
693
694                         if (eth_spec && eth_mask && eth_mask->type) {
695                                 if (eth_mask->type != RTE_BE16(0xffff)) {
696                                         rte_flow_error_set(error, EINVAL,
697                                                 RTE_FLOW_ERROR_TYPE_ITEM,
698                                                 item, "Invalid type mask.");
699                                         return -rte_errno;
700                                 }
701
702                                 ether_type = rte_be_to_cpu_16(eth_spec->type);
703                                 if (ether_type == RTE_ETHER_TYPE_IPV4 ||
704                                         ether_type == RTE_ETHER_TYPE_IPV6) {
705                                         rte_flow_error_set(error, EINVAL,
706                                                 RTE_FLOW_ERROR_TYPE_ITEM,
707                                                 item,
708                                                 "Unsupported ether_type.");
709                                         return -rte_errno;
710                                 }
711
712                                 input_set |= IAVF_INSET_ETHERTYPE;
713                                 VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(hdr1, ETH,
714                                                                  ETHERTYPE);
715
716                                 rte_memcpy(hdr1->buffer, eth_spec,
717                                            sizeof(struct rte_ether_hdr));
718                         }
719
720                         hdrs->count = ++layer;
721                         break;
722
723                 case RTE_FLOW_ITEM_TYPE_IPV4:
724                         l3 = RTE_FLOW_ITEM_TYPE_IPV4;
725                         ipv4_spec = item->spec;
726                         ipv4_last = item->last;
727                         ipv4_mask = item->mask;
728                         next_type = (item + 1)->type;
729
730                         hdr = &hdrs->proto_hdr[layer];
731
732                         VIRTCHNL_SET_PROTO_HDR_TYPE(hdr, IPV4);
733
734                         if (!(ipv4_spec && ipv4_mask)) {
735                                 hdrs->count = ++layer;
736                                 break;
737                         }
738
739                         if (ipv4_mask->hdr.version_ihl ||
740                             ipv4_mask->hdr.total_length ||
741                             ipv4_mask->hdr.hdr_checksum) {
742                                 rte_flow_error_set(error, EINVAL,
743                                                    RTE_FLOW_ERROR_TYPE_ITEM,
744                                                    item, "Invalid IPv4 mask.");
745                                 return -rte_errno;
746                         }
747
748                         if (ipv4_last &&
749                             (ipv4_last->hdr.version_ihl ||
750                              ipv4_last->hdr.type_of_service ||
751                              ipv4_last->hdr.time_to_live ||
752                              ipv4_last->hdr.total_length |
753                              ipv4_last->hdr.next_proto_id ||
754                              ipv4_last->hdr.hdr_checksum ||
755                              ipv4_last->hdr.src_addr ||
756                              ipv4_last->hdr.dst_addr)) {
757                                 rte_flow_error_set(error, EINVAL,
758                                                    RTE_FLOW_ERROR_TYPE_ITEM,
759                                                    item, "Invalid IPv4 last.");
760                                 return -rte_errno;
761                         }
762
763                         if (ipv4_mask->hdr.type_of_service ==
764                             UINT8_MAX) {
765                                 input_set |= IAVF_INSET_IPV4_TOS;
766                                 VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(hdr, IPV4,
767                                                                  DSCP);
768                         }
769
770                         if (ipv4_mask->hdr.next_proto_id == UINT8_MAX) {
771                                 input_set |= IAVF_INSET_IPV4_PROTO;
772                                 VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(hdr, IPV4,
773                                                                  PROT);
774                         }
775
776                         if (ipv4_mask->hdr.time_to_live == UINT8_MAX) {
777                                 input_set |= IAVF_INSET_IPV4_TTL;
778                                 VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(hdr, IPV4,
779                                                                  TTL);
780                         }
781
782                         if (ipv4_mask->hdr.src_addr == UINT32_MAX) {
783                                 input_set |= IAVF_INSET_IPV4_SRC;
784                                 VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(hdr, IPV4,
785                                                                  SRC);
786                         }
787
788                         if (ipv4_mask->hdr.dst_addr == UINT32_MAX) {
789                                 input_set |= IAVF_INSET_IPV4_DST;
790                                 VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(hdr, IPV4,
791                                                                  DST);
792                         }
793
794                         if (tun_inner) {
795                                 input_set &= ~IAVF_PROT_IPV4_OUTER;
796                                 input_set |= IAVF_PROT_IPV4_INNER;
797                         }
798
799                         rte_memcpy(hdr->buffer, &ipv4_spec->hdr,
800                                    sizeof(ipv4_spec->hdr));
801
802                         hdrs->count = ++layer;
803
804                         /* fragment Ipv4:
805                          * spec is 0x2000, mask is 0x2000
806                          */
807                         if (ipv4_spec->hdr.fragment_offset ==
808                             rte_cpu_to_be_16(RTE_IPV4_HDR_MF_FLAG) &&
809                             ipv4_mask->hdr.fragment_offset ==
810                             rte_cpu_to_be_16(RTE_IPV4_HDR_MF_FLAG)) {
811                                 /* all IPv4 fragment packet has the same
812                                  * ethertype, if the spec and mask is valid,
813                                  * set ethertype into input set.
814                                  */
815                                 input_set |= IAVF_INSET_ETHERTYPE;
816                                 VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(hdr1, ETH,
817                                                                  ETHERTYPE);
818
819                                 /* add dummy header for IPv4 Fragment */
820                                 iavf_fdir_add_fragment_hdr(hdrs, layer);
821                         } else if (ipv4_mask->hdr.packet_id == UINT16_MAX) {
822                                 rte_flow_error_set(error, EINVAL,
823                                                    RTE_FLOW_ERROR_TYPE_ITEM,
824                                                    item, "Invalid IPv4 mask.");
825                                 return -rte_errno;
826                         }
827
828                         break;
829
830                 case RTE_FLOW_ITEM_TYPE_IPV6:
831                         l3 = RTE_FLOW_ITEM_TYPE_IPV6;
832                         ipv6_spec = item->spec;
833                         ipv6_mask = item->mask;
834
835                         hdr = &hdrs->proto_hdr[layer];
836
837                         VIRTCHNL_SET_PROTO_HDR_TYPE(hdr, IPV6);
838
839                         if (!(ipv6_spec && ipv6_mask)) {
840                                 hdrs->count = ++layer;
841                                 break;
842                         }
843
844                         if (ipv6_mask->hdr.payload_len) {
845                                 rte_flow_error_set(error, EINVAL,
846                                                    RTE_FLOW_ERROR_TYPE_ITEM,
847                                                    item, "Invalid IPv6 mask");
848                                 return -rte_errno;
849                         }
850
851                         if ((ipv6_mask->hdr.vtc_flow &
852                               rte_cpu_to_be_32(IAVF_IPV6_TC_MASK))
853                              == rte_cpu_to_be_32(IAVF_IPV6_TC_MASK)) {
854                                 input_set |= IAVF_INSET_IPV6_TC;
855                                 VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(hdr, IPV6,
856                                                                  TC);
857                         }
858
859                         if (ipv6_mask->hdr.proto == UINT8_MAX) {
860                                 input_set |= IAVF_INSET_IPV6_NEXT_HDR;
861                                 VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(hdr, IPV6,
862                                                                  PROT);
863                         }
864
865                         if (ipv6_mask->hdr.hop_limits == UINT8_MAX) {
866                                 input_set |= IAVF_INSET_IPV6_HOP_LIMIT;
867                                 VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(hdr, IPV6,
868                                                                  HOP_LIMIT);
869                         }
870
871                         if (!memcmp(ipv6_mask->hdr.src_addr, ipv6_addr_mask,
872                                     RTE_DIM(ipv6_mask->hdr.src_addr))) {
873                                 input_set |= IAVF_INSET_IPV6_SRC;
874                                 VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(hdr, IPV6,
875                                                                  SRC);
876                         }
877                         if (!memcmp(ipv6_mask->hdr.dst_addr, ipv6_addr_mask,
878                                     RTE_DIM(ipv6_mask->hdr.dst_addr))) {
879                                 input_set |= IAVF_INSET_IPV6_DST;
880                                 VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(hdr, IPV6,
881                                                                  DST);
882                         }
883
884                         if (tun_inner) {
885                                 input_set &= ~IAVF_PROT_IPV6_OUTER;
886                                 input_set |= IAVF_PROT_IPV6_INNER;
887                         }
888
889                         rte_memcpy(hdr->buffer, &ipv6_spec->hdr,
890                                    sizeof(ipv6_spec->hdr));
891
892                         hdrs->count = ++layer;
893                         break;
894
895                 case RTE_FLOW_ITEM_TYPE_IPV6_FRAG_EXT:
896                         ipv6_frag_spec = item->spec;
897                         ipv6_frag_mask = item->mask;
898                         next_type = (item + 1)->type;
899
900                         hdr = &hdrs->proto_hdr[layer];
901
902                         VIRTCHNL_SET_PROTO_HDR_TYPE(hdr, IPV6_EH_FRAG);
903
904                         if (!(ipv6_frag_spec && ipv6_frag_mask)) {
905                                 hdrs->count = ++layer;
906                                 break;
907                         }
908
909                         /* fragment Ipv6:
910                          * spec is 0x1, mask is 0x1
911                          */
912                         if (ipv6_frag_spec->hdr.frag_data ==
913                             rte_cpu_to_be_16(1) &&
914                             ipv6_frag_mask->hdr.frag_data ==
915                             rte_cpu_to_be_16(1)) {
916                                 /* all IPv6 fragment packet has the same
917                                  * ethertype, if the spec and mask is valid,
918                                  * set ethertype into input set.
919                                  */
920                                 input_set |= IAVF_INSET_ETHERTYPE;
921                                 VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(hdr1, ETH,
922                                                                  ETHERTYPE);
923
924                                 rte_memcpy(hdr->buffer, &ipv6_frag_spec->hdr,
925                                            sizeof(ipv6_frag_spec->hdr));
926                         } else if (ipv6_frag_mask->hdr.id == UINT32_MAX) {
927                                 rte_flow_error_set(error, EINVAL,
928                                                    RTE_FLOW_ERROR_TYPE_ITEM,
929                                                    item, "Invalid IPv6 mask.");
930                                 return -rte_errno;
931                         }
932
933                         hdrs->count = ++layer;
934                         break;
935
936                 case RTE_FLOW_ITEM_TYPE_UDP:
937                         udp_spec = item->spec;
938                         udp_mask = item->mask;
939
940                         hdr = &hdrs->proto_hdr[layer];
941
942                         VIRTCHNL_SET_PROTO_HDR_TYPE(hdr, UDP);
943
944                         if (udp_spec && udp_mask) {
945                                 if (udp_mask->hdr.dgram_len ||
946                                         udp_mask->hdr.dgram_cksum) {
947                                         rte_flow_error_set(error, EINVAL,
948                                                 RTE_FLOW_ERROR_TYPE_ITEM, item,
949                                                 "Invalid UDP mask");
950                                         return -rte_errno;
951                                 }
952
953                                 if (udp_mask->hdr.src_port == UINT16_MAX) {
954                                         input_set |= IAVF_INSET_UDP_SRC_PORT;
955                                         VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(hdr, UDP, SRC_PORT);
956                                 }
957                                 if (udp_mask->hdr.dst_port == UINT16_MAX) {
958                                         input_set |= IAVF_INSET_UDP_DST_PORT;
959                                         VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(hdr, UDP, DST_PORT);
960                                 }
961
962                                 if (tun_inner) {
963                                         input_set &= ~IAVF_PROT_UDP_OUTER;
964                                         input_set |= IAVF_PROT_UDP_INNER;
965                                 }
966
967                                 if (l3 == RTE_FLOW_ITEM_TYPE_IPV4)
968                                         rte_memcpy(hdr->buffer,
969                                                 &udp_spec->hdr,
970                                                 sizeof(udp_spec->hdr));
971                                 else if (l3 == RTE_FLOW_ITEM_TYPE_IPV6)
972                                         rte_memcpy(hdr->buffer,
973                                                 &udp_spec->hdr,
974                                                 sizeof(udp_spec->hdr));
975                         }
976
977                         hdrs->count = ++layer;
978                         break;
979
980                 case RTE_FLOW_ITEM_TYPE_TCP:
981                         tcp_spec = item->spec;
982                         tcp_mask = item->mask;
983
984                         hdr = &hdrs->proto_hdr[layer];
985
986                         VIRTCHNL_SET_PROTO_HDR_TYPE(hdr, TCP);
987
988                         if (tcp_spec && tcp_mask) {
989                                 if (tcp_mask->hdr.sent_seq ||
990                                         tcp_mask->hdr.recv_ack ||
991                                         tcp_mask->hdr.data_off ||
992                                         tcp_mask->hdr.tcp_flags ||
993                                         tcp_mask->hdr.rx_win ||
994                                         tcp_mask->hdr.cksum ||
995                                         tcp_mask->hdr.tcp_urp) {
996                                         rte_flow_error_set(error, EINVAL,
997                                                 RTE_FLOW_ERROR_TYPE_ITEM, item,
998                                                 "Invalid TCP mask");
999                                         return -rte_errno;
1000                                 }
1001
1002                                 if (tcp_mask->hdr.src_port == UINT16_MAX) {
1003                                         input_set |= IAVF_INSET_TCP_SRC_PORT;
1004                                         VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(hdr, TCP, SRC_PORT);
1005                                 }
1006                                 if (tcp_mask->hdr.dst_port == UINT16_MAX) {
1007                                         input_set |= IAVF_INSET_TCP_DST_PORT;
1008                                         VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(hdr, TCP, DST_PORT);
1009                                 }
1010
1011                                 if (tun_inner) {
1012                                         input_set &= ~IAVF_PROT_TCP_OUTER;
1013                                         input_set |= IAVF_PROT_TCP_INNER;
1014                                 }
1015
1016                                 if (l3 == RTE_FLOW_ITEM_TYPE_IPV4)
1017                                         rte_memcpy(hdr->buffer,
1018                                                 &tcp_spec->hdr,
1019                                                 sizeof(tcp_spec->hdr));
1020                                 else if (l3 == RTE_FLOW_ITEM_TYPE_IPV6)
1021                                         rte_memcpy(hdr->buffer,
1022                                                 &tcp_spec->hdr,
1023                                                 sizeof(tcp_spec->hdr));
1024                         }
1025
1026                         hdrs->count = ++layer;
1027                         break;
1028
1029                 case RTE_FLOW_ITEM_TYPE_SCTP:
1030                         sctp_spec = item->spec;
1031                         sctp_mask = item->mask;
1032
1033                         hdr = &hdrs->proto_hdr[layer];
1034
1035                         VIRTCHNL_SET_PROTO_HDR_TYPE(hdr, SCTP);
1036
1037                         if (sctp_spec && sctp_mask) {
1038                                 if (sctp_mask->hdr.cksum) {
1039                                         rte_flow_error_set(error, EINVAL,
1040                                                 RTE_FLOW_ERROR_TYPE_ITEM, item,
1041                                                 "Invalid UDP mask");
1042                                         return -rte_errno;
1043                                 }
1044
1045                                 if (sctp_mask->hdr.src_port == UINT16_MAX) {
1046                                         input_set |= IAVF_INSET_SCTP_SRC_PORT;
1047                                         VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(hdr, SCTP, SRC_PORT);
1048                                 }
1049                                 if (sctp_mask->hdr.dst_port == UINT16_MAX) {
1050                                         input_set |= IAVF_INSET_SCTP_DST_PORT;
1051                                         VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(hdr, SCTP, DST_PORT);
1052                                 }
1053
1054                                 if (l3 == RTE_FLOW_ITEM_TYPE_IPV4)
1055                                         rte_memcpy(hdr->buffer,
1056                                                 &sctp_spec->hdr,
1057                                                 sizeof(sctp_spec->hdr));
1058                                 else if (l3 == RTE_FLOW_ITEM_TYPE_IPV6)
1059                                         rte_memcpy(hdr->buffer,
1060                                                 &sctp_spec->hdr,
1061                                                 sizeof(sctp_spec->hdr));
1062                         }
1063
1064                         hdrs->count = ++layer;
1065                         break;
1066
1067                 case RTE_FLOW_ITEM_TYPE_GTPU:
1068                         gtp_spec = item->spec;
1069                         gtp_mask = item->mask;
1070
1071                         hdr = &hdrs->proto_hdr[layer];
1072
1073                         VIRTCHNL_SET_PROTO_HDR_TYPE(hdr, GTPU_IP);
1074
1075                         if (gtp_spec && gtp_mask) {
1076                                 if (gtp_mask->v_pt_rsv_flags ||
1077                                         gtp_mask->msg_type ||
1078                                         gtp_mask->msg_len) {
1079                                         rte_flow_error_set(error, EINVAL,
1080                                                 RTE_FLOW_ERROR_TYPE_ITEM,
1081                                                 item, "Invalid GTP mask");
1082                                         return -rte_errno;
1083                                 }
1084
1085                                 if (gtp_mask->teid == UINT32_MAX) {
1086                                         input_set |= IAVF_INSET_GTPU_TEID;
1087                                         VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(hdr, GTPU_IP, TEID);
1088                                 }
1089
1090                                 rte_memcpy(hdr->buffer,
1091                                         gtp_spec, sizeof(*gtp_spec));
1092                         }
1093
1094                         tun_inner = 1;
1095
1096                         hdrs->count = ++layer;
1097                         break;
1098
1099                 case RTE_FLOW_ITEM_TYPE_GTP_PSC:
1100                         gtp_psc_spec = item->spec;
1101                         gtp_psc_mask = item->mask;
1102
1103                         hdr = &hdrs->proto_hdr[layer];
1104
1105                         if (!gtp_psc_spec)
1106                                 VIRTCHNL_SET_PROTO_HDR_TYPE(hdr, GTPU_EH);
1107                         else if ((gtp_psc_mask->qfi) && !(gtp_psc_mask->pdu_type))
1108                                 VIRTCHNL_SET_PROTO_HDR_TYPE(hdr, GTPU_EH);
1109                         else if (gtp_psc_spec->pdu_type == IAVF_GTPU_EH_UPLINK)
1110                                 VIRTCHNL_SET_PROTO_HDR_TYPE(hdr, GTPU_EH_PDU_UP);
1111                         else if (gtp_psc_spec->pdu_type == IAVF_GTPU_EH_DWLINK)
1112                                 VIRTCHNL_SET_PROTO_HDR_TYPE(hdr, GTPU_EH_PDU_DWN);
1113
1114                         if (gtp_psc_spec && gtp_psc_mask) {
1115                                 if (gtp_psc_mask->qfi == UINT8_MAX) {
1116                                         input_set |= IAVF_INSET_GTPU_QFI;
1117                                         VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(hdr, GTPU_EH, QFI);
1118                                 }
1119
1120                                 rte_memcpy(hdr->buffer, gtp_psc_spec,
1121                                         sizeof(*gtp_psc_spec));
1122                         }
1123
1124                         hdrs->count = ++layer;
1125                         break;
1126
1127                 case RTE_FLOW_ITEM_TYPE_L2TPV3OIP:
1128                         l2tpv3oip_spec = item->spec;
1129                         l2tpv3oip_mask = item->mask;
1130
1131                         hdr = &hdrs->proto_hdr[layer];
1132
1133                         VIRTCHNL_SET_PROTO_HDR_TYPE(hdr, L2TPV3);
1134
1135                         if (l2tpv3oip_spec && l2tpv3oip_mask) {
1136                                 if (l2tpv3oip_mask->session_id == UINT32_MAX) {
1137                                         input_set |= IAVF_L2TPV3OIP_SESSION_ID;
1138                                         VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(hdr, L2TPV3, SESS_ID);
1139                                 }
1140
1141                                 rte_memcpy(hdr->buffer, l2tpv3oip_spec,
1142                                         sizeof(*l2tpv3oip_spec));
1143                         }
1144
1145                         hdrs->count = ++layer;
1146                         break;
1147
1148                 case RTE_FLOW_ITEM_TYPE_ESP:
1149                         esp_spec = item->spec;
1150                         esp_mask = item->mask;
1151
1152                         hdr = &hdrs->proto_hdr[layer];
1153
1154                         VIRTCHNL_SET_PROTO_HDR_TYPE(hdr, ESP);
1155
1156                         if (esp_spec && esp_mask) {
1157                                 if (esp_mask->hdr.spi == UINT32_MAX) {
1158                                         input_set |= IAVF_INSET_ESP_SPI;
1159                                         VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(hdr, ESP, SPI);
1160                                 }
1161
1162                                 rte_memcpy(hdr->buffer, &esp_spec->hdr,
1163                                         sizeof(esp_spec->hdr));
1164                         }
1165
1166                         hdrs->count = ++layer;
1167                         break;
1168
1169                 case RTE_FLOW_ITEM_TYPE_AH:
1170                         ah_spec = item->spec;
1171                         ah_mask = item->mask;
1172
1173                         hdr = &hdrs->proto_hdr[layer];
1174
1175                         VIRTCHNL_SET_PROTO_HDR_TYPE(hdr, AH);
1176
1177                         if (ah_spec && ah_mask) {
1178                                 if (ah_mask->spi == UINT32_MAX) {
1179                                         input_set |= IAVF_INSET_AH_SPI;
1180                                         VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(hdr, AH, SPI);
1181                                 }
1182
1183                                 rte_memcpy(hdr->buffer, ah_spec,
1184                                         sizeof(*ah_spec));
1185                         }
1186
1187                         hdrs->count = ++layer;
1188                         break;
1189
1190                 case RTE_FLOW_ITEM_TYPE_PFCP:
1191                         pfcp_spec = item->spec;
1192                         pfcp_mask = item->mask;
1193
1194                         hdr = &hdrs->proto_hdr[layer];
1195
1196                         VIRTCHNL_SET_PROTO_HDR_TYPE(hdr, PFCP);
1197
1198                         if (pfcp_spec && pfcp_mask) {
1199                                 if (pfcp_mask->s_field == UINT8_MAX) {
1200                                         input_set |= IAVF_INSET_PFCP_S_FIELD;
1201                                         VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(hdr, PFCP, S_FIELD);
1202                                 }
1203
1204                                 rte_memcpy(hdr->buffer, pfcp_spec,
1205                                         sizeof(*pfcp_spec));
1206                         }
1207
1208                         hdrs->count = ++layer;
1209                         break;
1210
1211                 case RTE_FLOW_ITEM_TYPE_ECPRI:
1212                         ecpri_spec = item->spec;
1213                         ecpri_mask = item->mask;
1214
1215                         ecpri_common.u32 = rte_be_to_cpu_32(ecpri_spec->hdr.common.u32);
1216
1217                         hdr = &hdrs->proto_hdr[layer];
1218
1219                         VIRTCHNL_SET_PROTO_HDR_TYPE(hdr, ECPRI);
1220
1221                         if (ecpri_spec && ecpri_mask) {
1222                                 if (ecpri_common.type == RTE_ECPRI_MSG_TYPE_IQ_DATA &&
1223                                                 ecpri_mask->hdr.type0.pc_id == UINT16_MAX) {
1224                                         input_set |= IAVF_ECPRI_PC_RTC_ID;
1225                                         VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(hdr, ECPRI,
1226                                                                          PC_RTC_ID);
1227                                 }
1228
1229                                 rte_memcpy(hdr->buffer, ecpri_spec,
1230                                         sizeof(*ecpri_spec));
1231                         }
1232
1233                         hdrs->count = ++layer;
1234                         break;
1235
1236                 case RTE_FLOW_ITEM_TYPE_GRE:
1237                         gre_spec = item->spec;
1238                         gre_mask = item->mask;
1239
1240                         hdr = &hdrs->proto_hdr[layer];
1241
1242                         VIRTCHNL_SET_PROTO_HDR_TYPE(hdr, GRE);
1243
1244                         if (gre_spec && gre_mask) {
1245                                 rte_memcpy(hdr->buffer, gre_spec,
1246                                            sizeof(*gre_spec));
1247                         }
1248
1249                         tun_inner = 1;
1250
1251                         hdrs->count = ++layer;
1252                         break;
1253
1254                 case RTE_FLOW_ITEM_TYPE_VOID:
1255                         break;
1256
1257                 default:
1258                         rte_flow_error_set(error, EINVAL,
1259                                         RTE_FLOW_ERROR_TYPE_ITEM, item,
1260                                         "Invalid pattern item.");
1261                         return -rte_errno;
1262                 }
1263         }
1264
1265         if (layer > VIRTCHNL_MAX_NUM_PROTO_HDRS) {
1266                 rte_flow_error_set(error, EINVAL,
1267                         RTE_FLOW_ERROR_TYPE_ITEM, item,
1268                         "Protocol header layers exceed the maximum value");
1269                 return -rte_errno;
1270         }
1271
1272         if (!iavf_fdir_refine_input_set(input_set,
1273                                         input_set_mask | IAVF_INSET_ETHERTYPE,
1274                                         filter)) {
1275                 rte_flow_error_set(error, EINVAL,
1276                                    RTE_FLOW_ERROR_TYPE_ITEM_SPEC, pattern,
1277                                    "Invalid input set");
1278                 return -rte_errno;
1279         }
1280
1281         filter->input_set = input_set;
1282
1283         return 0;
1284 }
1285
1286 static int
1287 iavf_fdir_parse(struct iavf_adapter *ad,
1288                 struct iavf_pattern_match_item *array,
1289                 uint32_t array_len,
1290                 const struct rte_flow_item pattern[],
1291                 const struct rte_flow_action actions[],
1292                 void **meta,
1293                 struct rte_flow_error *error)
1294 {
1295         struct iavf_info *vf = IAVF_DEV_PRIVATE_TO_VF(ad);
1296         struct iavf_fdir_conf *filter = &vf->fdir.conf;
1297         struct iavf_pattern_match_item *item = NULL;
1298         int ret;
1299
1300         memset(filter, 0, sizeof(*filter));
1301
1302         item = iavf_search_pattern_match_item(pattern, array, array_len, error);
1303         if (!item)
1304                 return -rte_errno;
1305
1306         ret = iavf_fdir_parse_pattern(ad, pattern, item->input_set_mask,
1307                                       error, filter);
1308         if (ret)
1309                 goto error;
1310
1311         ret = iavf_fdir_parse_action(ad, actions, error, filter);
1312         if (ret)
1313                 goto error;
1314
1315         if (meta)
1316                 *meta = filter;
1317
1318 error:
1319         rte_free(item);
1320         return ret;
1321 }
1322
1323 static struct iavf_flow_parser iavf_fdir_parser = {
1324         .engine = &iavf_fdir_engine,
1325         .array = iavf_fdir_pattern,
1326         .array_len = RTE_DIM(iavf_fdir_pattern),
1327         .parse_pattern_action = iavf_fdir_parse,
1328         .stage = IAVF_FLOW_STAGE_DISTRIBUTOR,
1329 };
1330
1331 RTE_INIT(iavf_fdir_engine_register)
1332 {
1333         iavf_register_flow_engine(&iavf_fdir_engine);
1334 }