net/iavf: support flow pattern for GRE
[dpdk.git] / drivers / net / iavf / iavf_fdir.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2020 Intel Corporation
3  */
4
5 #include <sys/queue.h>
6 #include <stdio.h>
7 #include <errno.h>
8 #include <stdint.h>
9 #include <string.h>
10 #include <unistd.h>
11 #include <stdarg.h>
12
13 #include <rte_ether.h>
14 #include <ethdev_driver.h>
15 #include <rte_malloc.h>
16 #include <rte_tailq.h>
17
18 #include "iavf.h"
19 #include "iavf_generic_flow.h"
20 #include "virtchnl.h"
21 #include "iavf_rxtx.h"
22
23 #define IAVF_FDIR_MAX_QREGION_SIZE 128
24
25 #define IAVF_FDIR_IPV6_TC_OFFSET 20
26 #define IAVF_IPV6_TC_MASK  (0xFF << IAVF_FDIR_IPV6_TC_OFFSET)
27
28 #define IAVF_GTPU_EH_DWLINK 0
29 #define IAVF_GTPU_EH_UPLINK 1
30
31 #define IAVF_FDIR_INSET_ETH (\
32         IAVF_INSET_ETHERTYPE)
33
34 #define IAVF_FDIR_INSET_ETH_IPV4 (\
35         IAVF_INSET_IPV4_SRC | IAVF_INSET_IPV4_DST | \
36         IAVF_INSET_IPV4_PROTO | IAVF_INSET_IPV4_TOS | \
37         IAVF_INSET_IPV4_TTL | IAVF_INSET_IPV4_ID)
38
39 #define IAVF_FDIR_INSET_ETH_IPV4_UDP (\
40         IAVF_INSET_IPV4_SRC | IAVF_INSET_IPV4_DST | \
41         IAVF_INSET_IPV4_TOS | IAVF_INSET_IPV4_TTL | \
42         IAVF_INSET_UDP_SRC_PORT | IAVF_INSET_UDP_DST_PORT)
43
44 #define IAVF_FDIR_INSET_ETH_IPV4_TCP (\
45         IAVF_INSET_IPV4_SRC | IAVF_INSET_IPV4_DST | \
46         IAVF_INSET_IPV4_TOS | IAVF_INSET_IPV4_TTL | \
47         IAVF_INSET_TCP_SRC_PORT | IAVF_INSET_TCP_DST_PORT)
48
49 #define IAVF_FDIR_INSET_ETH_IPV4_SCTP (\
50         IAVF_INSET_IPV4_SRC | IAVF_INSET_IPV4_DST | \
51         IAVF_INSET_IPV4_TOS | IAVF_INSET_IPV4_TTL | \
52         IAVF_INSET_SCTP_SRC_PORT | IAVF_INSET_SCTP_DST_PORT)
53
54 #define IAVF_FDIR_INSET_ETH_IPV6 (\
55         IAVF_INSET_IPV6_SRC | IAVF_INSET_IPV6_DST | \
56         IAVF_INSET_IPV6_NEXT_HDR | IAVF_INSET_IPV6_TC | \
57         IAVF_INSET_IPV6_HOP_LIMIT)
58
59 #define IAVF_FDIR_INSET_ETH_IPV6_FRAG_EXT (\
60         IAVF_INSET_IPV6_ID)
61
62 #define IAVF_FDIR_INSET_ETH_IPV6_UDP (\
63         IAVF_INSET_IPV6_SRC | IAVF_INSET_IPV6_DST | \
64         IAVF_INSET_IPV6_TC | IAVF_INSET_IPV6_HOP_LIMIT | \
65         IAVF_INSET_UDP_SRC_PORT | IAVF_INSET_UDP_DST_PORT)
66
67 #define IAVF_FDIR_INSET_ETH_IPV6_TCP (\
68         IAVF_INSET_IPV6_SRC | IAVF_INSET_IPV6_DST | \
69         IAVF_INSET_IPV6_TC | IAVF_INSET_IPV6_HOP_LIMIT | \
70         IAVF_INSET_TCP_SRC_PORT | IAVF_INSET_TCP_DST_PORT)
71
72 #define IAVF_FDIR_INSET_ETH_IPV6_SCTP (\
73         IAVF_INSET_IPV6_SRC | IAVF_INSET_IPV6_DST | \
74         IAVF_INSET_IPV6_TC | IAVF_INSET_IPV6_HOP_LIMIT | \
75         IAVF_INSET_SCTP_SRC_PORT | IAVF_INSET_SCTP_DST_PORT)
76
77 #define IAVF_FDIR_INSET_IPV4_GTPU (\
78         IAVF_INSET_IPV4_SRC | IAVF_INSET_IPV4_DST | \
79         IAVF_INSET_GTPU_TEID)
80
81 #define IAVF_FDIR_INSET_GTPU_IPV4 (\
82         IAVF_INSET_TUN_IPV4_SRC | IAVF_INSET_TUN_IPV4_DST | \
83         IAVF_INSET_TUN_IPV4_PROTO | IAVF_INSET_TUN_IPV4_TOS | \
84         IAVF_INSET_TUN_IPV4_TTL)
85
86 #define IAVF_FDIR_INSET_GTPU_IPV4_UDP (\
87         IAVF_FDIR_INSET_GTPU_IPV4 | \
88         IAVF_INSET_TUN_UDP_SRC_PORT | IAVF_INSET_TUN_UDP_DST_PORT)
89
90 #define IAVF_FDIR_INSET_GTPU_IPV4_TCP (\
91         IAVF_FDIR_INSET_GTPU_IPV4 | \
92         IAVF_INSET_TUN_TCP_SRC_PORT | IAVF_INSET_TUN_TCP_DST_PORT)
93
94 #define IAVF_FDIR_INSET_IPV4_GTPU_EH (\
95         IAVF_INSET_IPV4_SRC | IAVF_INSET_IPV4_DST | \
96         IAVF_INSET_GTPU_TEID | IAVF_INSET_GTPU_QFI)
97
98 #define IAVF_FDIR_INSET_IPV6_GTPU (\
99         IAVF_INSET_IPV6_SRC | IAVF_INSET_IPV6_DST | \
100         IAVF_INSET_GTPU_TEID)
101
102 #define IAVF_FDIR_INSET_GTPU_IPV6 (\
103         IAVF_INSET_TUN_IPV6_SRC | IAVF_INSET_TUN_IPV6_DST | \
104         IAVF_INSET_TUN_IPV6_NEXT_HDR | IAVF_INSET_TUN_IPV6_TC | \
105         IAVF_INSET_TUN_IPV6_HOP_LIMIT)
106
107 #define IAVF_FDIR_INSET_GTPU_IPV6_UDP (\
108         IAVF_FDIR_INSET_GTPU_IPV6 | \
109         IAVF_INSET_TUN_UDP_SRC_PORT | IAVF_INSET_TUN_UDP_DST_PORT)
110
111 #define IAVF_FDIR_INSET_GTPU_IPV6_TCP (\
112         IAVF_FDIR_INSET_GTPU_IPV6 | \
113         IAVF_INSET_TUN_TCP_SRC_PORT | IAVF_INSET_TUN_TCP_DST_PORT)
114
115 #define IAVF_FDIR_INSET_IPV6_GTPU_EH (\
116         IAVF_INSET_IPV6_SRC | IAVF_INSET_IPV6_DST | \
117         IAVF_INSET_GTPU_TEID | IAVF_INSET_GTPU_QFI)
118
119 #define IAVF_FDIR_INSET_L2TPV3OIP (\
120         IAVF_L2TPV3OIP_SESSION_ID)
121
122 #define IAVF_FDIR_INSET_IPV4_ESP (\
123         IAVF_INSET_IPV4_SRC | IAVF_INSET_IPV4_DST | \
124         IAVF_INSET_ESP_SPI)
125
126 #define IAVF_FDIR_INSET_IPV6_ESP (\
127         IAVF_INSET_IPV6_SRC | IAVF_INSET_IPV6_DST | \
128         IAVF_INSET_ESP_SPI)
129
130 #define IAVF_FDIR_INSET_AH (\
131         IAVF_INSET_AH_SPI)
132
133 #define IAVF_FDIR_INSET_IPV4_NATT_ESP (\
134         IAVF_INSET_IPV4_SRC | IAVF_INSET_IPV4_DST | \
135         IAVF_INSET_ESP_SPI)
136
137 #define IAVF_FDIR_INSET_IPV6_NATT_ESP (\
138         IAVF_INSET_IPV6_SRC | IAVF_INSET_IPV6_DST | \
139         IAVF_INSET_ESP_SPI)
140
141 #define IAVF_FDIR_INSET_PFCP (\
142         IAVF_INSET_PFCP_S_FIELD)
143
144 #define IAVF_FDIR_INSET_ECPRI (\
145         IAVF_INSET_ECPRI)
146
147 static struct iavf_pattern_match_item iavf_fdir_pattern[] = {
148         {iavf_pattern_ethertype,                 IAVF_FDIR_INSET_ETH,           IAVF_INSET_NONE},
149         {iavf_pattern_eth_ipv4,                  IAVF_FDIR_INSET_ETH_IPV4,      IAVF_INSET_NONE},
150         {iavf_pattern_eth_ipv4_udp,              IAVF_FDIR_INSET_ETH_IPV4_UDP,  IAVF_INSET_NONE},
151         {iavf_pattern_eth_ipv4_tcp,              IAVF_FDIR_INSET_ETH_IPV4_TCP,  IAVF_INSET_NONE},
152         {iavf_pattern_eth_ipv4_sctp,             IAVF_FDIR_INSET_ETH_IPV4_SCTP, IAVF_INSET_NONE},
153         {iavf_pattern_eth_ipv6,                  IAVF_FDIR_INSET_ETH_IPV6,      IAVF_INSET_NONE},
154         {iavf_pattern_eth_ipv6_frag_ext,        IAVF_FDIR_INSET_ETH_IPV6_FRAG_EXT,      IAVF_INSET_NONE},
155         {iavf_pattern_eth_ipv6_udp,              IAVF_FDIR_INSET_ETH_IPV6_UDP,  IAVF_INSET_NONE},
156         {iavf_pattern_eth_ipv6_tcp,              IAVF_FDIR_INSET_ETH_IPV6_TCP,  IAVF_INSET_NONE},
157         {iavf_pattern_eth_ipv6_sctp,             IAVF_FDIR_INSET_ETH_IPV6_SCTP, IAVF_INSET_NONE},
158         {iavf_pattern_eth_ipv4_gtpu,             IAVF_FDIR_INSET_IPV4_GTPU,     IAVF_INSET_NONE},
159         {iavf_pattern_eth_ipv4_gtpu_ipv4,        IAVF_FDIR_INSET_GTPU_IPV4,     IAVF_INSET_NONE},
160         {iavf_pattern_eth_ipv4_gtpu_ipv4_udp,    IAVF_FDIR_INSET_GTPU_IPV4_UDP, IAVF_INSET_NONE},
161         {iavf_pattern_eth_ipv4_gtpu_ipv4_tcp,    IAVF_FDIR_INSET_GTPU_IPV4_TCP, IAVF_INSET_NONE},
162         {iavf_pattern_eth_ipv4_gtpu_ipv6,        IAVF_FDIR_INSET_GTPU_IPV6,     IAVF_INSET_NONE},
163         {iavf_pattern_eth_ipv4_gtpu_ipv6_udp,    IAVF_FDIR_INSET_GTPU_IPV6_UDP, IAVF_INSET_NONE},
164         {iavf_pattern_eth_ipv4_gtpu_ipv6_tcp,    IAVF_FDIR_INSET_GTPU_IPV6_TCP, IAVF_INSET_NONE},
165         {iavf_pattern_eth_ipv4_gtpu_eh,          IAVF_FDIR_INSET_IPV4_GTPU_EH,  IAVF_INSET_NONE},
166         {iavf_pattern_eth_ipv4_gtpu_eh_ipv4,     IAVF_FDIR_INSET_GTPU_IPV4,     IAVF_INSET_NONE},
167         {iavf_pattern_eth_ipv4_gtpu_eh_ipv4_udp, IAVF_FDIR_INSET_GTPU_IPV4_UDP, IAVF_INSET_NONE},
168         {iavf_pattern_eth_ipv4_gtpu_eh_ipv4_tcp, IAVF_FDIR_INSET_GTPU_IPV4_TCP, IAVF_INSET_NONE},
169         {iavf_pattern_eth_ipv4_gtpu_eh_ipv6,     IAVF_FDIR_INSET_GTPU_IPV6,     IAVF_INSET_NONE},
170         {iavf_pattern_eth_ipv4_gtpu_eh_ipv6_udp, IAVF_FDIR_INSET_GTPU_IPV6_UDP, IAVF_INSET_NONE},
171         {iavf_pattern_eth_ipv4_gtpu_eh_ipv6_tcp, IAVF_FDIR_INSET_GTPU_IPV6_TCP, IAVF_INSET_NONE},
172         {iavf_pattern_eth_ipv6_gtpu,             IAVF_FDIR_INSET_IPV6_GTPU,     IAVF_INSET_NONE},
173         {iavf_pattern_eth_ipv6_gtpu_eh,          IAVF_FDIR_INSET_IPV6_GTPU_EH,  IAVF_INSET_NONE},
174         {iavf_pattern_eth_ipv4_l2tpv3,           IAVF_FDIR_INSET_L2TPV3OIP,     IAVF_INSET_NONE},
175         {iavf_pattern_eth_ipv6_l2tpv3,           IAVF_FDIR_INSET_L2TPV3OIP,     IAVF_INSET_NONE},
176         {iavf_pattern_eth_ipv4_esp,              IAVF_FDIR_INSET_IPV4_ESP,      IAVF_INSET_NONE},
177         {iavf_pattern_eth_ipv6_esp,              IAVF_FDIR_INSET_IPV6_ESP,      IAVF_INSET_NONE},
178         {iavf_pattern_eth_ipv4_ah,               IAVF_FDIR_INSET_AH,            IAVF_INSET_NONE},
179         {iavf_pattern_eth_ipv6_ah,               IAVF_FDIR_INSET_AH,            IAVF_INSET_NONE},
180         {iavf_pattern_eth_ipv4_udp_esp,          IAVF_FDIR_INSET_IPV4_NATT_ESP, IAVF_INSET_NONE},
181         {iavf_pattern_eth_ipv6_udp_esp,          IAVF_FDIR_INSET_IPV6_NATT_ESP, IAVF_INSET_NONE},
182         {iavf_pattern_eth_ipv4_pfcp,             IAVF_FDIR_INSET_PFCP,          IAVF_INSET_NONE},
183         {iavf_pattern_eth_ipv6_pfcp,             IAVF_FDIR_INSET_PFCP,          IAVF_INSET_NONE},
184         {iavf_pattern_eth_ecpri,                 IAVF_FDIR_INSET_ECPRI,         IAVF_INSET_NONE},
185         {iavf_pattern_eth_ipv4_ecpri,            IAVF_FDIR_INSET_ECPRI,         IAVF_INSET_NONE},
186 };
187
188 static struct iavf_flow_parser iavf_fdir_parser;
189
190 static int
191 iavf_fdir_init(struct iavf_adapter *ad)
192 {
193         struct iavf_info *vf = IAVF_DEV_PRIVATE_TO_VF(ad);
194         struct iavf_flow_parser *parser;
195
196         if (!vf->vf_res)
197                 return -EINVAL;
198
199         if (vf->vf_res->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_FDIR_PF)
200                 parser = &iavf_fdir_parser;
201         else
202                 return -ENOTSUP;
203
204         return iavf_register_parser(parser, ad);
205 }
206
207 static void
208 iavf_fdir_uninit(struct iavf_adapter *ad)
209 {
210         iavf_unregister_parser(&iavf_fdir_parser, ad);
211 }
212
213 static int
214 iavf_fdir_create(struct iavf_adapter *ad,
215                 struct rte_flow *flow,
216                 void *meta,
217                 struct rte_flow_error *error)
218 {
219         struct iavf_fdir_conf *filter = meta;
220         struct iavf_fdir_conf *rule;
221         int ret;
222
223         rule = rte_zmalloc("fdir_entry", sizeof(*rule), 0);
224         if (!rule) {
225                 rte_flow_error_set(error, ENOMEM,
226                                 RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
227                                 "Failed to allocate memory for fdir rule");
228                 return -rte_errno;
229         }
230
231         ret = iavf_fdir_add(ad, filter);
232         if (ret) {
233                 rte_flow_error_set(error, -ret,
234                                 RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
235                                 "Failed to add filter rule.");
236                 goto free_entry;
237         }
238
239         if (filter->mark_flag == 1)
240                 iavf_fdir_rx_proc_enable(ad, 1);
241
242         rte_memcpy(rule, filter, sizeof(*rule));
243         flow->rule = rule;
244
245         return 0;
246
247 free_entry:
248         rte_free(rule);
249         return -rte_errno;
250 }
251
252 static int
253 iavf_fdir_destroy(struct iavf_adapter *ad,
254                 struct rte_flow *flow,
255                 struct rte_flow_error *error)
256 {
257         struct iavf_fdir_conf *filter;
258         int ret;
259
260         filter = (struct iavf_fdir_conf *)flow->rule;
261
262         ret = iavf_fdir_del(ad, filter);
263         if (ret) {
264                 rte_flow_error_set(error, -ret,
265                                 RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
266                                 "Failed to delete filter rule.");
267                 return -rte_errno;
268         }
269
270         if (filter->mark_flag == 1)
271                 iavf_fdir_rx_proc_enable(ad, 0);
272
273         flow->rule = NULL;
274         rte_free(filter);
275
276         return 0;
277 }
278
279 static int
280 iavf_fdir_validation(struct iavf_adapter *ad,
281                 __rte_unused struct rte_flow *flow,
282                 void *meta,
283                 struct rte_flow_error *error)
284 {
285         struct iavf_fdir_conf *filter = meta;
286         int ret;
287
288         ret = iavf_fdir_check(ad, filter);
289         if (ret) {
290                 rte_flow_error_set(error, -ret,
291                                 RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
292                                 "Failed to validate filter rule.");
293                 return -rte_errno;
294         }
295
296         return 0;
297 };
298
299 static struct iavf_flow_engine iavf_fdir_engine = {
300         .init = iavf_fdir_init,
301         .uninit = iavf_fdir_uninit,
302         .create = iavf_fdir_create,
303         .destroy = iavf_fdir_destroy,
304         .validation = iavf_fdir_validation,
305         .type = IAVF_FLOW_ENGINE_FDIR,
306 };
307
308 static int
309 iavf_fdir_parse_action_qregion(struct iavf_adapter *ad,
310                         struct rte_flow_error *error,
311                         const struct rte_flow_action *act,
312                         struct virtchnl_filter_action *filter_action)
313 {
314         struct iavf_info *vf = IAVF_DEV_PRIVATE_TO_VF(ad);
315         const struct rte_flow_action_rss *rss = act->conf;
316         uint32_t i;
317
318         if (act->type != RTE_FLOW_ACTION_TYPE_RSS) {
319                 rte_flow_error_set(error, EINVAL,
320                                 RTE_FLOW_ERROR_TYPE_ACTION, act,
321                                 "Invalid action.");
322                 return -rte_errno;
323         }
324
325         if (rss->queue_num <= 1) {
326                 rte_flow_error_set(error, EINVAL,
327                                 RTE_FLOW_ERROR_TYPE_ACTION, act,
328                                 "Queue region size can't be 0 or 1.");
329                 return -rte_errno;
330         }
331
332         /* check if queue index for queue region is continuous */
333         for (i = 0; i < rss->queue_num - 1; i++) {
334                 if (rss->queue[i + 1] != rss->queue[i] + 1) {
335                         rte_flow_error_set(error, EINVAL,
336                                         RTE_FLOW_ERROR_TYPE_ACTION, act,
337                                         "Discontinuous queue region");
338                         return -rte_errno;
339                 }
340         }
341
342         if (rss->queue[rss->queue_num - 1] >= ad->eth_dev->data->nb_rx_queues) {
343                 rte_flow_error_set(error, EINVAL,
344                                 RTE_FLOW_ERROR_TYPE_ACTION, act,
345                                 "Invalid queue region indexes.");
346                 return -rte_errno;
347         }
348
349         if (!(rte_is_power_of_2(rss->queue_num) &&
350                 rss->queue_num <= IAVF_FDIR_MAX_QREGION_SIZE)) {
351                 rte_flow_error_set(error, EINVAL,
352                                 RTE_FLOW_ERROR_TYPE_ACTION, act,
353                                 "The region size should be any of the following values:"
354                                 "1, 2, 4, 8, 16, 32, 64, 128 as long as the total number "
355                                 "of queues do not exceed the VSI allocation.");
356                 return -rte_errno;
357         }
358
359         if (rss->queue_num > vf->max_rss_qregion) {
360                 rte_flow_error_set(error, EINVAL,
361                                 RTE_FLOW_ERROR_TYPE_ACTION, act,
362                                 "The region size cannot be large than the supported max RSS queue region");
363                 return -rte_errno;
364         }
365
366         filter_action->act_conf.queue.index = rss->queue[0];
367         filter_action->act_conf.queue.region = rte_fls_u32(rss->queue_num) - 1;
368
369         return 0;
370 }
371
372 static int
373 iavf_fdir_parse_action(struct iavf_adapter *ad,
374                         const struct rte_flow_action actions[],
375                         struct rte_flow_error *error,
376                         struct iavf_fdir_conf *filter)
377 {
378         const struct rte_flow_action_queue *act_q;
379         const struct rte_flow_action_mark *mark_spec = NULL;
380         uint32_t dest_num = 0;
381         uint32_t mark_num = 0;
382         int ret;
383
384         int number = 0;
385         struct virtchnl_filter_action *filter_action;
386
387         for (; actions->type != RTE_FLOW_ACTION_TYPE_END; actions++) {
388                 switch (actions->type) {
389                 case RTE_FLOW_ACTION_TYPE_VOID:
390                         break;
391
392                 case RTE_FLOW_ACTION_TYPE_PASSTHRU:
393                         dest_num++;
394
395                         filter_action = &filter->add_fltr.rule_cfg.action_set.actions[number];
396
397                         filter_action->type = VIRTCHNL_ACTION_PASSTHRU;
398
399                         filter->add_fltr.rule_cfg.action_set.count = ++number;
400                         break;
401
402                 case RTE_FLOW_ACTION_TYPE_DROP:
403                         dest_num++;
404
405                         filter_action = &filter->add_fltr.rule_cfg.action_set.actions[number];
406
407                         filter_action->type = VIRTCHNL_ACTION_DROP;
408
409                         filter->add_fltr.rule_cfg.action_set.count = ++number;
410                         break;
411
412                 case RTE_FLOW_ACTION_TYPE_QUEUE:
413                         dest_num++;
414
415                         act_q = actions->conf;
416                         filter_action = &filter->add_fltr.rule_cfg.action_set.actions[number];
417
418                         filter_action->type = VIRTCHNL_ACTION_QUEUE;
419                         filter_action->act_conf.queue.index = act_q->index;
420
421                         if (filter_action->act_conf.queue.index >=
422                                 ad->eth_dev->data->nb_rx_queues) {
423                                 rte_flow_error_set(error, EINVAL,
424                                         RTE_FLOW_ERROR_TYPE_ACTION,
425                                         actions, "Invalid queue for FDIR.");
426                                 return -rte_errno;
427                         }
428
429                         filter->add_fltr.rule_cfg.action_set.count = ++number;
430                         break;
431
432                 case RTE_FLOW_ACTION_TYPE_RSS:
433                         dest_num++;
434
435                         filter_action = &filter->add_fltr.rule_cfg.action_set.actions[number];
436
437                         filter_action->type = VIRTCHNL_ACTION_Q_REGION;
438
439                         ret = iavf_fdir_parse_action_qregion(ad,
440                                                 error, actions, filter_action);
441                         if (ret)
442                                 return ret;
443
444                         filter->add_fltr.rule_cfg.action_set.count = ++number;
445                         break;
446
447                 case RTE_FLOW_ACTION_TYPE_MARK:
448                         mark_num++;
449
450                         filter->mark_flag = 1;
451                         mark_spec = actions->conf;
452                         filter_action = &filter->add_fltr.rule_cfg.action_set.actions[number];
453
454                         filter_action->type = VIRTCHNL_ACTION_MARK;
455                         filter_action->act_conf.mark_id = mark_spec->id;
456
457                         filter->add_fltr.rule_cfg.action_set.count = ++number;
458                         break;
459
460                 default:
461                         rte_flow_error_set(error, EINVAL,
462                                         RTE_FLOW_ERROR_TYPE_ACTION, actions,
463                                         "Invalid action.");
464                         return -rte_errno;
465                 }
466         }
467
468         if (number > VIRTCHNL_MAX_NUM_ACTIONS) {
469                 rte_flow_error_set(error, EINVAL,
470                         RTE_FLOW_ERROR_TYPE_ACTION, actions,
471                         "Action numbers exceed the maximum value");
472                 return -rte_errno;
473         }
474
475         if (dest_num >= 2) {
476                 rte_flow_error_set(error, EINVAL,
477                         RTE_FLOW_ERROR_TYPE_ACTION, actions,
478                         "Unsupported action combination");
479                 return -rte_errno;
480         }
481
482         if (mark_num >= 2) {
483                 rte_flow_error_set(error, EINVAL,
484                         RTE_FLOW_ERROR_TYPE_ACTION, actions,
485                         "Too many mark actions");
486                 return -rte_errno;
487         }
488
489         if (dest_num + mark_num == 0) {
490                 rte_flow_error_set(error, EINVAL,
491                         RTE_FLOW_ERROR_TYPE_ACTION, actions,
492                         "Empty action");
493                 return -rte_errno;
494         }
495
496         /* Mark only is equal to mark + passthru. */
497         if (dest_num == 0) {
498                 filter_action = &filter->add_fltr.rule_cfg.action_set.actions[number];
499                 filter_action->type = VIRTCHNL_ACTION_PASSTHRU;
500                 filter->add_fltr.rule_cfg.action_set.count = ++number;
501         }
502
503         return 0;
504 }
505
506 static bool
507 iavf_fdir_refine_input_set(const uint64_t input_set,
508                            const uint64_t input_set_mask,
509                            struct iavf_fdir_conf *filter)
510 {
511         struct virtchnl_proto_hdr *hdr, *hdr_last;
512         struct rte_flow_item_ipv4 ipv4_spec;
513         struct rte_flow_item_ipv6 ipv6_spec;
514         int last_layer;
515         uint8_t proto_id;
516
517         if (input_set & ~input_set_mask)
518                 return false;
519         else if (input_set)
520                 return true;
521
522         last_layer = filter->add_fltr.rule_cfg.proto_hdrs.count - 1;
523         /* Last layer of TCP/UDP pattern isn't less than 2. */
524         if (last_layer < 2)
525                 return false;
526         hdr_last = &filter->add_fltr.rule_cfg.proto_hdrs.proto_hdr[last_layer];
527         if (hdr_last->type == VIRTCHNL_PROTO_HDR_TCP)
528                 proto_id = 6;
529         else if (hdr_last->type == VIRTCHNL_PROTO_HDR_UDP)
530                 proto_id = 17;
531         else
532                 return false;
533
534         hdr = &filter->add_fltr.rule_cfg.proto_hdrs.proto_hdr[last_layer - 1];
535         switch (hdr->type) {
536         case VIRTCHNL_PROTO_HDR_IPV4:
537                 VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(hdr, IPV4, PROT);
538                 memset(&ipv4_spec, 0, sizeof(ipv4_spec));
539                 ipv4_spec.hdr.next_proto_id = proto_id;
540                 rte_memcpy(hdr->buffer, &ipv4_spec.hdr,
541                            sizeof(ipv4_spec.hdr));
542                 return true;
543         case VIRTCHNL_PROTO_HDR_IPV6:
544                 VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(hdr, IPV6, PROT);
545                 memset(&ipv6_spec, 0, sizeof(ipv6_spec));
546                 ipv6_spec.hdr.proto = proto_id;
547                 rte_memcpy(hdr->buffer, &ipv6_spec.hdr,
548                            sizeof(ipv6_spec.hdr));
549                 return true;
550         default:
551                 return false;
552         }
553 }
554
555 static void
556 iavf_fdir_add_fragment_hdr(struct virtchnl_proto_hdrs *hdrs, int layer)
557 {
558         struct virtchnl_proto_hdr *hdr1;
559         struct virtchnl_proto_hdr *hdr2;
560         int i;
561
562         if (layer < 0 || layer > hdrs->count)
563                 return;
564
565         /* shift headers layer */
566         for (i = hdrs->count; i >= layer; i--) {
567                 hdr1 = &hdrs->proto_hdr[i];
568                 hdr2 = &hdrs->proto_hdr[i - 1];
569                 *hdr1 = *hdr2;
570         }
571
572         /* adding dummy fragment header */
573         hdr1 = &hdrs->proto_hdr[layer];
574         VIRTCHNL_SET_PROTO_HDR_TYPE(hdr1, IPV4_FRAG);
575         hdrs->count = ++layer;
576 }
577
578 static int
579 iavf_fdir_parse_pattern(__rte_unused struct iavf_adapter *ad,
580                         const struct rte_flow_item pattern[],
581                         const uint64_t input_set_mask,
582                         struct rte_flow_error *error,
583                         struct iavf_fdir_conf *filter)
584 {
585         struct virtchnl_proto_hdrs *hdrs =
586                         &filter->add_fltr.rule_cfg.proto_hdrs;
587         enum rte_flow_item_type l3 = RTE_FLOW_ITEM_TYPE_END;
588         const struct rte_flow_item_eth *eth_spec, *eth_mask;
589         const struct rte_flow_item_ipv4 *ipv4_spec, *ipv4_last, *ipv4_mask;
590         const struct rte_flow_item_ipv6 *ipv6_spec, *ipv6_mask;
591         const struct rte_flow_item_ipv6_frag_ext *ipv6_frag_spec;
592         const struct rte_flow_item_ipv6_frag_ext *ipv6_frag_last;
593         const struct rte_flow_item_ipv6_frag_ext *ipv6_frag_mask;
594         const struct rte_flow_item_udp *udp_spec, *udp_mask;
595         const struct rte_flow_item_tcp *tcp_spec, *tcp_mask;
596         const struct rte_flow_item_sctp *sctp_spec, *sctp_mask;
597         const struct rte_flow_item_gtp *gtp_spec, *gtp_mask;
598         const struct rte_flow_item_gtp_psc *gtp_psc_spec, *gtp_psc_mask;
599         const struct rte_flow_item_l2tpv3oip *l2tpv3oip_spec, *l2tpv3oip_mask;
600         const struct rte_flow_item_esp *esp_spec, *esp_mask;
601         const struct rte_flow_item_ah *ah_spec, *ah_mask;
602         const struct rte_flow_item_pfcp *pfcp_spec, *pfcp_mask;
603         const struct rte_flow_item_ecpri *ecpri_spec, *ecpri_mask;
604         const struct rte_flow_item *item = pattern;
605         struct virtchnl_proto_hdr *hdr, *hdr1 = NULL;
606         struct rte_ecpri_common_hdr ecpri_common;
607         uint64_t input_set = IAVF_INSET_NONE;
608         enum rte_flow_item_type item_type;
609         enum rte_flow_item_type next_type;
610         uint8_t tun_inner = 0;
611         uint16_t ether_type;
612         int layer = 0;
613
614         uint8_t  ipv6_addr_mask[16] = {
615                 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
616                 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF
617         };
618
619         for (item = pattern; item->type != RTE_FLOW_ITEM_TYPE_END; item++) {
620                 item_type = item->type;
621
622                 if (item->last && !(item_type == RTE_FLOW_ITEM_TYPE_IPV4 ||
623                                     item_type ==
624                                     RTE_FLOW_ITEM_TYPE_IPV6_FRAG_EXT)) {
625                         rte_flow_error_set(error, EINVAL,
626                                            RTE_FLOW_ERROR_TYPE_ITEM, item,
627                                            "Not support range");
628                 }
629
630                 switch (item_type) {
631                 case RTE_FLOW_ITEM_TYPE_ETH:
632                         eth_spec = item->spec;
633                         eth_mask = item->mask;
634                         next_type = (item + 1)->type;
635
636                         hdr1 = &hdrs->proto_hdr[layer];
637
638                         VIRTCHNL_SET_PROTO_HDR_TYPE(hdr1, ETH);
639
640                         if (next_type == RTE_FLOW_ITEM_TYPE_END &&
641                             (!eth_spec || !eth_mask)) {
642                                 rte_flow_error_set(error, EINVAL,
643                                                 RTE_FLOW_ERROR_TYPE_ITEM,
644                                                 item, "NULL eth spec/mask.");
645                                 return -rte_errno;
646                         }
647
648                         if (eth_spec && eth_mask) {
649                                 if (!rte_is_zero_ether_addr(&eth_mask->src) ||
650                                     !rte_is_zero_ether_addr(&eth_mask->dst)) {
651                                         rte_flow_error_set(error, EINVAL,
652                                                 RTE_FLOW_ERROR_TYPE_ITEM, item,
653                                                 "Invalid MAC_addr mask.");
654                                         return -rte_errno;
655                                 }
656                         }
657
658                         if (eth_spec && eth_mask && eth_mask->type) {
659                                 if (eth_mask->type != RTE_BE16(0xffff)) {
660                                         rte_flow_error_set(error, EINVAL,
661                                                 RTE_FLOW_ERROR_TYPE_ITEM,
662                                                 item, "Invalid type mask.");
663                                         return -rte_errno;
664                                 }
665
666                                 ether_type = rte_be_to_cpu_16(eth_spec->type);
667                                 if (ether_type == RTE_ETHER_TYPE_IPV4 ||
668                                         ether_type == RTE_ETHER_TYPE_IPV6) {
669                                         rte_flow_error_set(error, EINVAL,
670                                                 RTE_FLOW_ERROR_TYPE_ITEM,
671                                                 item,
672                                                 "Unsupported ether_type.");
673                                         return -rte_errno;
674                                 }
675
676                                 input_set |= IAVF_INSET_ETHERTYPE;
677                                 VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(hdr1, ETH,
678                                                                  ETHERTYPE);
679
680                                 rte_memcpy(hdr1->buffer, eth_spec,
681                                            sizeof(struct rte_ether_hdr));
682                         }
683
684                         hdrs->count = ++layer;
685                         break;
686
687                 case RTE_FLOW_ITEM_TYPE_IPV4:
688                         l3 = RTE_FLOW_ITEM_TYPE_IPV4;
689                         ipv4_spec = item->spec;
690                         ipv4_last = item->last;
691                         ipv4_mask = item->mask;
692                         next_type = (item + 1)->type;
693
694                         hdr = &hdrs->proto_hdr[layer];
695
696                         VIRTCHNL_SET_PROTO_HDR_TYPE(hdr, IPV4);
697
698                         if (!(ipv4_spec && ipv4_mask)) {
699                                 hdrs->count = ++layer;
700                                 break;
701                         }
702
703                         if (ipv4_mask->hdr.version_ihl ||
704                             ipv4_mask->hdr.total_length ||
705                             ipv4_mask->hdr.hdr_checksum) {
706                                 rte_flow_error_set(error, EINVAL,
707                                                    RTE_FLOW_ERROR_TYPE_ITEM,
708                                                    item, "Invalid IPv4 mask.");
709                                 return -rte_errno;
710                         }
711
712                         if (ipv4_last &&
713                             (ipv4_last->hdr.version_ihl ||
714                              ipv4_last->hdr.type_of_service ||
715                              ipv4_last->hdr.time_to_live ||
716                              ipv4_last->hdr.total_length |
717                              ipv4_last->hdr.next_proto_id ||
718                              ipv4_last->hdr.hdr_checksum ||
719                              ipv4_last->hdr.src_addr ||
720                              ipv4_last->hdr.dst_addr)) {
721                                 rte_flow_error_set(error, EINVAL,
722                                                    RTE_FLOW_ERROR_TYPE_ITEM,
723                                                    item, "Invalid IPv4 last.");
724                                 return -rte_errno;
725                         }
726
727                         if (ipv4_mask->hdr.type_of_service ==
728                             UINT8_MAX) {
729                                 input_set |= IAVF_INSET_IPV4_TOS;
730                                 VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(hdr, IPV4,
731                                                                  DSCP);
732                         }
733
734                         if (ipv4_mask->hdr.next_proto_id == UINT8_MAX) {
735                                 input_set |= IAVF_INSET_IPV4_PROTO;
736                                 VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(hdr, IPV4,
737                                                                  PROT);
738                         }
739
740                         if (ipv4_mask->hdr.time_to_live == UINT8_MAX) {
741                                 input_set |= IAVF_INSET_IPV4_TTL;
742                                 VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(hdr, IPV4,
743                                                                  TTL);
744                         }
745
746                         if (ipv4_mask->hdr.src_addr == UINT32_MAX) {
747                                 input_set |= IAVF_INSET_IPV4_SRC;
748                                 VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(hdr, IPV4,
749                                                                  SRC);
750                         }
751
752                         if (ipv4_mask->hdr.dst_addr == UINT32_MAX) {
753                                 input_set |= IAVF_INSET_IPV4_DST;
754                                 VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(hdr, IPV4,
755                                                                  DST);
756                         }
757
758                         if (tun_inner) {
759                                 input_set &= ~IAVF_PROT_IPV4_OUTER;
760                                 input_set |= IAVF_PROT_IPV4_INNER;
761                         }
762
763                         rte_memcpy(hdr->buffer, &ipv4_spec->hdr,
764                                    sizeof(ipv4_spec->hdr));
765
766                         hdrs->count = ++layer;
767
768                         /* only support any packet id for fragment IPv4
769                          * any packet_id:
770                          * spec is 0, last is 0xffff, mask is 0xffff
771                          */
772                         if (ipv4_last && ipv4_spec->hdr.packet_id == 0 &&
773                             ipv4_last->hdr.packet_id == UINT16_MAX &&
774                             ipv4_mask->hdr.packet_id == UINT16_MAX &&
775                             ipv4_mask->hdr.fragment_offset == UINT16_MAX) {
776                                 /* all IPv4 fragment packet has the same
777                                  * ethertype, if the spec is for all valid
778                                  * packet id, set ethertype into input set.
779                                  */
780                                 input_set |= IAVF_INSET_ETHERTYPE;
781                                 VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(hdr1, ETH,
782                                                                  ETHERTYPE);
783
784                                 /* add dummy header for IPv4 Fragment */
785                                 iavf_fdir_add_fragment_hdr(hdrs, layer);
786                         } else if (ipv4_mask->hdr.packet_id == UINT16_MAX) {
787                                 rte_flow_error_set(error, EINVAL,
788                                                    RTE_FLOW_ERROR_TYPE_ITEM,
789                                                    item, "Invalid IPv4 mask.");
790                                 return -rte_errno;
791                         }
792
793                         break;
794
795                 case RTE_FLOW_ITEM_TYPE_IPV6:
796                         l3 = RTE_FLOW_ITEM_TYPE_IPV6;
797                         ipv6_spec = item->spec;
798                         ipv6_mask = item->mask;
799
800                         hdr = &hdrs->proto_hdr[layer];
801
802                         VIRTCHNL_SET_PROTO_HDR_TYPE(hdr, IPV6);
803
804                         if (!(ipv6_spec && ipv6_mask)) {
805                                 hdrs->count = ++layer;
806                                 break;
807                         }
808
809                         if (ipv6_mask->hdr.payload_len) {
810                                 rte_flow_error_set(error, EINVAL,
811                                                    RTE_FLOW_ERROR_TYPE_ITEM,
812                                                    item, "Invalid IPv6 mask");
813                                 return -rte_errno;
814                         }
815
816                         if ((ipv6_mask->hdr.vtc_flow &
817                               rte_cpu_to_be_32(IAVF_IPV6_TC_MASK))
818                              == rte_cpu_to_be_32(IAVF_IPV6_TC_MASK)) {
819                                 input_set |= IAVF_INSET_IPV6_TC;
820                                 VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(hdr, IPV6,
821                                                                  TC);
822                         }
823
824                         if (ipv6_mask->hdr.proto == UINT8_MAX) {
825                                 input_set |= IAVF_INSET_IPV6_NEXT_HDR;
826                                 VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(hdr, IPV6,
827                                                                  PROT);
828                         }
829
830                         if (ipv6_mask->hdr.hop_limits == UINT8_MAX) {
831                                 input_set |= IAVF_INSET_IPV6_HOP_LIMIT;
832                                 VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(hdr, IPV6,
833                                                                  HOP_LIMIT);
834                         }
835
836                         if (!memcmp(ipv6_mask->hdr.src_addr, ipv6_addr_mask,
837                                     RTE_DIM(ipv6_mask->hdr.src_addr))) {
838                                 input_set |= IAVF_INSET_IPV6_SRC;
839                                 VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(hdr, IPV6,
840                                                                  SRC);
841                         }
842                         if (!memcmp(ipv6_mask->hdr.dst_addr, ipv6_addr_mask,
843                                     RTE_DIM(ipv6_mask->hdr.dst_addr))) {
844                                 input_set |= IAVF_INSET_IPV6_DST;
845                                 VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(hdr, IPV6,
846                                                                  DST);
847                         }
848
849                         if (tun_inner) {
850                                 input_set &= ~IAVF_PROT_IPV6_OUTER;
851                                 input_set |= IAVF_PROT_IPV6_INNER;
852                         }
853
854                         rte_memcpy(hdr->buffer, &ipv6_spec->hdr,
855                                    sizeof(ipv6_spec->hdr));
856
857                         hdrs->count = ++layer;
858                         break;
859
860                 case RTE_FLOW_ITEM_TYPE_IPV6_FRAG_EXT:
861                         ipv6_frag_spec = item->spec;
862                         ipv6_frag_last = item->last;
863                         ipv6_frag_mask = item->mask;
864                         next_type = (item + 1)->type;
865
866                         hdr = &hdrs->proto_hdr[layer];
867
868                         VIRTCHNL_SET_PROTO_HDR_TYPE(hdr, IPV6_EH_FRAG);
869
870                         if (!(ipv6_frag_spec && ipv6_frag_mask)) {
871                                 hdrs->count = ++layer;
872                                 break;
873                         }
874
875                         /* only support any packet id for fragment IPv6
876                          * any packet_id:
877                          * spec is 0, last is 0xffffffff, mask is 0xffffffff
878                          */
879                         if (ipv6_frag_last && ipv6_frag_spec->hdr.id == 0 &&
880                             ipv6_frag_last->hdr.id == UINT32_MAX &&
881                             ipv6_frag_mask->hdr.id == UINT32_MAX &&
882                             ipv6_frag_mask->hdr.frag_data == UINT16_MAX) {
883                                 /* all IPv6 fragment packet has the same
884                                  * ethertype, if the spec is for all valid
885                                  * packet id, set ethertype into input set.
886                                  */
887                                 input_set |= IAVF_INSET_ETHERTYPE;
888                                 VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(hdr1, ETH,
889                                                                  ETHERTYPE);
890
891                                 rte_memcpy(hdr->buffer, &ipv6_frag_spec->hdr,
892                                            sizeof(ipv6_frag_spec->hdr));
893                         } else if (ipv6_frag_mask->hdr.id == UINT32_MAX) {
894                                 rte_flow_error_set(error, EINVAL,
895                                                    RTE_FLOW_ERROR_TYPE_ITEM,
896                                                    item, "Invalid IPv6 mask.");
897                                 return -rte_errno;
898                         }
899
900                         hdrs->count = ++layer;
901                         break;
902
903                 case RTE_FLOW_ITEM_TYPE_UDP:
904                         udp_spec = item->spec;
905                         udp_mask = item->mask;
906
907                         hdr = &hdrs->proto_hdr[layer];
908
909                         VIRTCHNL_SET_PROTO_HDR_TYPE(hdr, UDP);
910
911                         if (udp_spec && udp_mask) {
912                                 if (udp_mask->hdr.dgram_len ||
913                                         udp_mask->hdr.dgram_cksum) {
914                                         rte_flow_error_set(error, EINVAL,
915                                                 RTE_FLOW_ERROR_TYPE_ITEM, item,
916                                                 "Invalid UDP mask");
917                                         return -rte_errno;
918                                 }
919
920                                 if (udp_mask->hdr.src_port == UINT16_MAX) {
921                                         input_set |= IAVF_INSET_UDP_SRC_PORT;
922                                         VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(hdr, UDP, SRC_PORT);
923                                 }
924                                 if (udp_mask->hdr.dst_port == UINT16_MAX) {
925                                         input_set |= IAVF_INSET_UDP_DST_PORT;
926                                         VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(hdr, UDP, DST_PORT);
927                                 }
928
929                                 if (tun_inner) {
930                                         input_set &= ~IAVF_PROT_UDP_OUTER;
931                                         input_set |= IAVF_PROT_UDP_INNER;
932                                 }
933
934                                 if (l3 == RTE_FLOW_ITEM_TYPE_IPV4)
935                                         rte_memcpy(hdr->buffer,
936                                                 &udp_spec->hdr,
937                                                 sizeof(udp_spec->hdr));
938                                 else if (l3 == RTE_FLOW_ITEM_TYPE_IPV6)
939                                         rte_memcpy(hdr->buffer,
940                                                 &udp_spec->hdr,
941                                                 sizeof(udp_spec->hdr));
942                         }
943
944                         hdrs->count = ++layer;
945                         break;
946
947                 case RTE_FLOW_ITEM_TYPE_TCP:
948                         tcp_spec = item->spec;
949                         tcp_mask = item->mask;
950
951                         hdr = &hdrs->proto_hdr[layer];
952
953                         VIRTCHNL_SET_PROTO_HDR_TYPE(hdr, TCP);
954
955                         if (tcp_spec && tcp_mask) {
956                                 if (tcp_mask->hdr.sent_seq ||
957                                         tcp_mask->hdr.recv_ack ||
958                                         tcp_mask->hdr.data_off ||
959                                         tcp_mask->hdr.tcp_flags ||
960                                         tcp_mask->hdr.rx_win ||
961                                         tcp_mask->hdr.cksum ||
962                                         tcp_mask->hdr.tcp_urp) {
963                                         rte_flow_error_set(error, EINVAL,
964                                                 RTE_FLOW_ERROR_TYPE_ITEM, item,
965                                                 "Invalid TCP mask");
966                                         return -rte_errno;
967                                 }
968
969                                 if (tcp_mask->hdr.src_port == UINT16_MAX) {
970                                         input_set |= IAVF_INSET_TCP_SRC_PORT;
971                                         VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(hdr, TCP, SRC_PORT);
972                                 }
973                                 if (tcp_mask->hdr.dst_port == UINT16_MAX) {
974                                         input_set |= IAVF_INSET_TCP_DST_PORT;
975                                         VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(hdr, TCP, DST_PORT);
976                                 }
977
978                                 if (tun_inner) {
979                                         input_set &= ~IAVF_PROT_TCP_OUTER;
980                                         input_set |= IAVF_PROT_TCP_INNER;
981                                 }
982
983                                 if (l3 == RTE_FLOW_ITEM_TYPE_IPV4)
984                                         rte_memcpy(hdr->buffer,
985                                                 &tcp_spec->hdr,
986                                                 sizeof(tcp_spec->hdr));
987                                 else if (l3 == RTE_FLOW_ITEM_TYPE_IPV6)
988                                         rte_memcpy(hdr->buffer,
989                                                 &tcp_spec->hdr,
990                                                 sizeof(tcp_spec->hdr));
991                         }
992
993                         hdrs->count = ++layer;
994                         break;
995
996                 case RTE_FLOW_ITEM_TYPE_SCTP:
997                         sctp_spec = item->spec;
998                         sctp_mask = item->mask;
999
1000                         hdr = &hdrs->proto_hdr[layer];
1001
1002                         VIRTCHNL_SET_PROTO_HDR_TYPE(hdr, SCTP);
1003
1004                         if (sctp_spec && sctp_mask) {
1005                                 if (sctp_mask->hdr.cksum) {
1006                                         rte_flow_error_set(error, EINVAL,
1007                                                 RTE_FLOW_ERROR_TYPE_ITEM, item,
1008                                                 "Invalid UDP mask");
1009                                         return -rte_errno;
1010                                 }
1011
1012                                 if (sctp_mask->hdr.src_port == UINT16_MAX) {
1013                                         input_set |= IAVF_INSET_SCTP_SRC_PORT;
1014                                         VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(hdr, SCTP, SRC_PORT);
1015                                 }
1016                                 if (sctp_mask->hdr.dst_port == UINT16_MAX) {
1017                                         input_set |= IAVF_INSET_SCTP_DST_PORT;
1018                                         VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(hdr, SCTP, DST_PORT);
1019                                 }
1020
1021                                 if (l3 == RTE_FLOW_ITEM_TYPE_IPV4)
1022                                         rte_memcpy(hdr->buffer,
1023                                                 &sctp_spec->hdr,
1024                                                 sizeof(sctp_spec->hdr));
1025                                 else if (l3 == RTE_FLOW_ITEM_TYPE_IPV6)
1026                                         rte_memcpy(hdr->buffer,
1027                                                 &sctp_spec->hdr,
1028                                                 sizeof(sctp_spec->hdr));
1029                         }
1030
1031                         hdrs->count = ++layer;
1032                         break;
1033
1034                 case RTE_FLOW_ITEM_TYPE_GTPU:
1035                         gtp_spec = item->spec;
1036                         gtp_mask = item->mask;
1037
1038                         hdr = &hdrs->proto_hdr[layer];
1039
1040                         VIRTCHNL_SET_PROTO_HDR_TYPE(hdr, GTPU_IP);
1041
1042                         if (gtp_spec && gtp_mask) {
1043                                 if (gtp_mask->v_pt_rsv_flags ||
1044                                         gtp_mask->msg_type ||
1045                                         gtp_mask->msg_len) {
1046                                         rte_flow_error_set(error, EINVAL,
1047                                                 RTE_FLOW_ERROR_TYPE_ITEM,
1048                                                 item, "Invalid GTP mask");
1049                                         return -rte_errno;
1050                                 }
1051
1052                                 if (gtp_mask->teid == UINT32_MAX) {
1053                                         input_set |= IAVF_INSET_GTPU_TEID;
1054                                         VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(hdr, GTPU_IP, TEID);
1055                                 }
1056
1057                                 rte_memcpy(hdr->buffer,
1058                                         gtp_spec, sizeof(*gtp_spec));
1059                         }
1060
1061                         tun_inner = 1;
1062
1063                         hdrs->count = ++layer;
1064                         break;
1065
1066                 case RTE_FLOW_ITEM_TYPE_GTP_PSC:
1067                         gtp_psc_spec = item->spec;
1068                         gtp_psc_mask = item->mask;
1069
1070                         hdr = &hdrs->proto_hdr[layer];
1071
1072                         if (!gtp_psc_spec)
1073                                 VIRTCHNL_SET_PROTO_HDR_TYPE(hdr, GTPU_EH);
1074                         else if ((gtp_psc_mask->qfi) && !(gtp_psc_mask->pdu_type))
1075                                 VIRTCHNL_SET_PROTO_HDR_TYPE(hdr, GTPU_EH);
1076                         else if (gtp_psc_spec->pdu_type == IAVF_GTPU_EH_UPLINK)
1077                                 VIRTCHNL_SET_PROTO_HDR_TYPE(hdr, GTPU_EH_PDU_UP);
1078                         else if (gtp_psc_spec->pdu_type == IAVF_GTPU_EH_DWLINK)
1079                                 VIRTCHNL_SET_PROTO_HDR_TYPE(hdr, GTPU_EH_PDU_DWN);
1080
1081                         if (gtp_psc_spec && gtp_psc_mask) {
1082                                 if (gtp_psc_mask->qfi == UINT8_MAX) {
1083                                         input_set |= IAVF_INSET_GTPU_QFI;
1084                                         VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(hdr, GTPU_EH, QFI);
1085                                 }
1086
1087                                 rte_memcpy(hdr->buffer, gtp_psc_spec,
1088                                         sizeof(*gtp_psc_spec));
1089                         }
1090
1091                         hdrs->count = ++layer;
1092                         break;
1093
1094                 case RTE_FLOW_ITEM_TYPE_L2TPV3OIP:
1095                         l2tpv3oip_spec = item->spec;
1096                         l2tpv3oip_mask = item->mask;
1097
1098                         hdr = &hdrs->proto_hdr[layer];
1099
1100                         VIRTCHNL_SET_PROTO_HDR_TYPE(hdr, L2TPV3);
1101
1102                         if (l2tpv3oip_spec && l2tpv3oip_mask) {
1103                                 if (l2tpv3oip_mask->session_id == UINT32_MAX) {
1104                                         input_set |= IAVF_L2TPV3OIP_SESSION_ID;
1105                                         VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(hdr, L2TPV3, SESS_ID);
1106                                 }
1107
1108                                 rte_memcpy(hdr->buffer, l2tpv3oip_spec,
1109                                         sizeof(*l2tpv3oip_spec));
1110                         }
1111
1112                         hdrs->count = ++layer;
1113                         break;
1114
1115                 case RTE_FLOW_ITEM_TYPE_ESP:
1116                         esp_spec = item->spec;
1117                         esp_mask = item->mask;
1118
1119                         hdr = &hdrs->proto_hdr[layer];
1120
1121                         VIRTCHNL_SET_PROTO_HDR_TYPE(hdr, ESP);
1122
1123                         if (esp_spec && esp_mask) {
1124                                 if (esp_mask->hdr.spi == UINT32_MAX) {
1125                                         input_set |= IAVF_INSET_ESP_SPI;
1126                                         VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(hdr, ESP, SPI);
1127                                 }
1128
1129                                 rte_memcpy(hdr->buffer, &esp_spec->hdr,
1130                                         sizeof(esp_spec->hdr));
1131                         }
1132
1133                         hdrs->count = ++layer;
1134                         break;
1135
1136                 case RTE_FLOW_ITEM_TYPE_AH:
1137                         ah_spec = item->spec;
1138                         ah_mask = item->mask;
1139
1140                         hdr = &hdrs->proto_hdr[layer];
1141
1142                         VIRTCHNL_SET_PROTO_HDR_TYPE(hdr, AH);
1143
1144                         if (ah_spec && ah_mask) {
1145                                 if (ah_mask->spi == UINT32_MAX) {
1146                                         input_set |= IAVF_INSET_AH_SPI;
1147                                         VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(hdr, AH, SPI);
1148                                 }
1149
1150                                 rte_memcpy(hdr->buffer, ah_spec,
1151                                         sizeof(*ah_spec));
1152                         }
1153
1154                         hdrs->count = ++layer;
1155                         break;
1156
1157                 case RTE_FLOW_ITEM_TYPE_PFCP:
1158                         pfcp_spec = item->spec;
1159                         pfcp_mask = item->mask;
1160
1161                         hdr = &hdrs->proto_hdr[layer];
1162
1163                         VIRTCHNL_SET_PROTO_HDR_TYPE(hdr, PFCP);
1164
1165                         if (pfcp_spec && pfcp_mask) {
1166                                 if (pfcp_mask->s_field == UINT8_MAX) {
1167                                         input_set |= IAVF_INSET_PFCP_S_FIELD;
1168                                         VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(hdr, PFCP, S_FIELD);
1169                                 }
1170
1171                                 rte_memcpy(hdr->buffer, pfcp_spec,
1172                                         sizeof(*pfcp_spec));
1173                         }
1174
1175                         hdrs->count = ++layer;
1176                         break;
1177
1178                 case RTE_FLOW_ITEM_TYPE_ECPRI:
1179                         ecpri_spec = item->spec;
1180                         ecpri_mask = item->mask;
1181
1182                         ecpri_common.u32 = rte_be_to_cpu_32(ecpri_spec->hdr.common.u32);
1183
1184                         hdr = &hdrs->proto_hdr[layer];
1185
1186                         VIRTCHNL_SET_PROTO_HDR_TYPE(hdr, ECPRI);
1187
1188                         if (ecpri_spec && ecpri_mask) {
1189                                 if (ecpri_common.type == RTE_ECPRI_MSG_TYPE_IQ_DATA &&
1190                                                 ecpri_mask->hdr.type0.pc_id == UINT16_MAX) {
1191                                         input_set |= IAVF_ECPRI_PC_RTC_ID;
1192                                         VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(hdr, ECPRI,
1193                                                                          PC_RTC_ID);
1194                                 }
1195
1196                                 rte_memcpy(hdr->buffer, ecpri_spec,
1197                                         sizeof(*ecpri_spec));
1198                         }
1199
1200                         hdrs->count = ++layer;
1201                         break;
1202
1203                 case RTE_FLOW_ITEM_TYPE_VOID:
1204                         break;
1205
1206                 default:
1207                         rte_flow_error_set(error, EINVAL,
1208                                         RTE_FLOW_ERROR_TYPE_ITEM, item,
1209                                         "Invalid pattern item.");
1210                         return -rte_errno;
1211                 }
1212         }
1213
1214         if (layer > VIRTCHNL_MAX_NUM_PROTO_HDRS) {
1215                 rte_flow_error_set(error, EINVAL,
1216                         RTE_FLOW_ERROR_TYPE_ITEM, item,
1217                         "Protocol header layers exceed the maximum value");
1218                 return -rte_errno;
1219         }
1220
1221         if (!iavf_fdir_refine_input_set(input_set,
1222                                         input_set_mask | IAVF_INSET_ETHERTYPE,
1223                                         filter)) {
1224                 rte_flow_error_set(error, EINVAL,
1225                                    RTE_FLOW_ERROR_TYPE_ITEM_SPEC, pattern,
1226                                    "Invalid input set");
1227                 return -rte_errno;
1228         }
1229
1230         filter->input_set = input_set;
1231
1232         return 0;
1233 }
1234
1235 static int
1236 iavf_fdir_parse(struct iavf_adapter *ad,
1237                 struct iavf_pattern_match_item *array,
1238                 uint32_t array_len,
1239                 const struct rte_flow_item pattern[],
1240                 const struct rte_flow_action actions[],
1241                 void **meta,
1242                 struct rte_flow_error *error)
1243 {
1244         struct iavf_info *vf = IAVF_DEV_PRIVATE_TO_VF(ad);
1245         struct iavf_fdir_conf *filter = &vf->fdir.conf;
1246         struct iavf_pattern_match_item *item = NULL;
1247         int ret;
1248
1249         memset(filter, 0, sizeof(*filter));
1250
1251         item = iavf_search_pattern_match_item(pattern, array, array_len, error);
1252         if (!item)
1253                 return -rte_errno;
1254
1255         ret = iavf_fdir_parse_pattern(ad, pattern, item->input_set_mask,
1256                                       error, filter);
1257         if (ret)
1258                 goto error;
1259
1260         ret = iavf_fdir_parse_action(ad, actions, error, filter);
1261         if (ret)
1262                 goto error;
1263
1264         if (meta)
1265                 *meta = filter;
1266
1267 error:
1268         rte_free(item);
1269         return ret;
1270 }
1271
1272 static struct iavf_flow_parser iavf_fdir_parser = {
1273         .engine = &iavf_fdir_engine,
1274         .array = iavf_fdir_pattern,
1275         .array_len = RTE_DIM(iavf_fdir_pattern),
1276         .parse_pattern_action = iavf_fdir_parse,
1277         .stage = IAVF_FLOW_STAGE_DISTRIBUTOR,
1278 };
1279
1280 RTE_INIT(iavf_fdir_engine_register)
1281 {
1282         iavf_register_flow_engine(&iavf_fdir_engine);
1283 }