net/iavf: fix RSS configuration on i40e VF
[dpdk.git] / drivers / net / iavf / iavf_fdir.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2020 Intel Corporation
3  */
4
5 #include <sys/queue.h>
6 #include <stdio.h>
7 #include <errno.h>
8 #include <stdint.h>
9 #include <string.h>
10 #include <unistd.h>
11 #include <stdarg.h>
12
13 #include <rte_ether.h>
14 #include <ethdev_driver.h>
15 #include <rte_malloc.h>
16 #include <rte_tailq.h>
17
18 #include "iavf.h"
19 #include "iavf_generic_flow.h"
20 #include "virtchnl.h"
21 #include "iavf_rxtx.h"
22
23 #define IAVF_FDIR_MAX_QREGION_SIZE 128
24
25 #define IAVF_FDIR_IPV6_TC_OFFSET 20
26 #define IAVF_IPV6_TC_MASK  (0xFF << IAVF_FDIR_IPV6_TC_OFFSET)
27
28 #define IAVF_GTPU_EH_DWLINK 0
29 #define IAVF_GTPU_EH_UPLINK 1
30
31 #define IAVF_FDIR_INSET_ETH (\
32         IAVF_INSET_ETHERTYPE)
33
34 #define IAVF_FDIR_INSET_ETH_IPV4 (\
35         IAVF_INSET_IPV4_SRC | IAVF_INSET_IPV4_DST | \
36         IAVF_INSET_IPV4_PROTO | IAVF_INSET_IPV4_TOS | \
37         IAVF_INSET_IPV4_TTL | IAVF_INSET_IPV4_ID)
38
39 #define IAVF_FDIR_INSET_ETH_IPV4_UDP (\
40         IAVF_INSET_IPV4_SRC | IAVF_INSET_IPV4_DST | \
41         IAVF_INSET_IPV4_TOS | IAVF_INSET_IPV4_TTL | \
42         IAVF_INSET_UDP_SRC_PORT | IAVF_INSET_UDP_DST_PORT)
43
44 #define IAVF_FDIR_INSET_ETH_IPV4_TCP (\
45         IAVF_INSET_IPV4_SRC | IAVF_INSET_IPV4_DST | \
46         IAVF_INSET_IPV4_TOS | IAVF_INSET_IPV4_TTL | \
47         IAVF_INSET_TCP_SRC_PORT | IAVF_INSET_TCP_DST_PORT)
48
49 #define IAVF_FDIR_INSET_ETH_IPV4_SCTP (\
50         IAVF_INSET_IPV4_SRC | IAVF_INSET_IPV4_DST | \
51         IAVF_INSET_IPV4_TOS | IAVF_INSET_IPV4_TTL | \
52         IAVF_INSET_SCTP_SRC_PORT | IAVF_INSET_SCTP_DST_PORT)
53
54 #define IAVF_FDIR_INSET_ETH_IPV6 (\
55         IAVF_INSET_IPV6_SRC | IAVF_INSET_IPV6_DST | \
56         IAVF_INSET_IPV6_NEXT_HDR | IAVF_INSET_IPV6_TC | \
57         IAVF_INSET_IPV6_HOP_LIMIT)
58
59 #define IAVF_FDIR_INSET_ETH_IPV6_FRAG_EXT (\
60         IAVF_INSET_IPV6_ID)
61
62 #define IAVF_FDIR_INSET_ETH_IPV6_UDP (\
63         IAVF_INSET_IPV6_SRC | IAVF_INSET_IPV6_DST | \
64         IAVF_INSET_IPV6_TC | IAVF_INSET_IPV6_HOP_LIMIT | \
65         IAVF_INSET_UDP_SRC_PORT | IAVF_INSET_UDP_DST_PORT)
66
67 #define IAVF_FDIR_INSET_ETH_IPV6_TCP (\
68         IAVF_INSET_IPV6_SRC | IAVF_INSET_IPV6_DST | \
69         IAVF_INSET_IPV6_TC | IAVF_INSET_IPV6_HOP_LIMIT | \
70         IAVF_INSET_TCP_SRC_PORT | IAVF_INSET_TCP_DST_PORT)
71
72 #define IAVF_FDIR_INSET_ETH_IPV6_SCTP (\
73         IAVF_INSET_IPV6_SRC | IAVF_INSET_IPV6_DST | \
74         IAVF_INSET_IPV6_TC | IAVF_INSET_IPV6_HOP_LIMIT | \
75         IAVF_INSET_SCTP_SRC_PORT | IAVF_INSET_SCTP_DST_PORT)
76
77 #define IAVF_FDIR_INSET_IPV4_GTPU (\
78         IAVF_INSET_IPV4_SRC | IAVF_INSET_IPV4_DST | \
79         IAVF_INSET_GTPU_TEID)
80
81 #define IAVF_FDIR_INSET_GTPU_IPV4 (\
82         IAVF_INSET_TUN_IPV4_SRC | IAVF_INSET_TUN_IPV4_DST | \
83         IAVF_INSET_TUN_IPV4_PROTO | IAVF_INSET_TUN_IPV4_TOS | \
84         IAVF_INSET_TUN_IPV4_TTL)
85
86 #define IAVF_FDIR_INSET_GTPU_IPV4_UDP (\
87         IAVF_FDIR_INSET_GTPU_IPV4 | \
88         IAVF_INSET_TUN_UDP_SRC_PORT | IAVF_INSET_TUN_UDP_DST_PORT)
89
90 #define IAVF_FDIR_INSET_GTPU_IPV4_TCP (\
91         IAVF_FDIR_INSET_GTPU_IPV4 | \
92         IAVF_INSET_TUN_TCP_SRC_PORT | IAVF_INSET_TUN_TCP_DST_PORT)
93
94 #define IAVF_FDIR_INSET_IPV4_GTPU_EH (\
95         IAVF_INSET_IPV4_SRC | IAVF_INSET_IPV4_DST | \
96         IAVF_INSET_GTPU_TEID | IAVF_INSET_GTPU_QFI)
97
98 #define IAVF_FDIR_INSET_IPV6_GTPU (\
99         IAVF_INSET_IPV6_SRC | IAVF_INSET_IPV6_DST | \
100         IAVF_INSET_GTPU_TEID)
101
102 #define IAVF_FDIR_INSET_GTPU_IPV6 (\
103         IAVF_INSET_TUN_IPV6_SRC | IAVF_INSET_TUN_IPV6_DST | \
104         IAVF_INSET_TUN_IPV6_NEXT_HDR | IAVF_INSET_TUN_IPV6_TC | \
105         IAVF_INSET_TUN_IPV6_HOP_LIMIT)
106
107 #define IAVF_FDIR_INSET_GTPU_IPV6_UDP (\
108         IAVF_FDIR_INSET_GTPU_IPV6 | \
109         IAVF_INSET_TUN_UDP_SRC_PORT | IAVF_INSET_TUN_UDP_DST_PORT)
110
111 #define IAVF_FDIR_INSET_GTPU_IPV6_TCP (\
112         IAVF_FDIR_INSET_GTPU_IPV6 | \
113         IAVF_INSET_TUN_TCP_SRC_PORT | IAVF_INSET_TUN_TCP_DST_PORT)
114
115 #define IAVF_FDIR_INSET_IPV6_GTPU_EH (\
116         IAVF_INSET_IPV6_SRC | IAVF_INSET_IPV6_DST | \
117         IAVF_INSET_GTPU_TEID | IAVF_INSET_GTPU_QFI)
118
119 #define IAVF_FDIR_INSET_L2TPV3OIP (\
120         IAVF_L2TPV3OIP_SESSION_ID)
121
122 #define IAVF_FDIR_INSET_ESP (\
123         IAVF_INSET_ESP_SPI)
124
125 #define IAVF_FDIR_INSET_AH (\
126         IAVF_INSET_AH_SPI)
127
128 #define IAVF_FDIR_INSET_IPV4_NATT_ESP (\
129         IAVF_INSET_IPV4_SRC | IAVF_INSET_IPV4_DST | \
130         IAVF_INSET_ESP_SPI)
131
132 #define IAVF_FDIR_INSET_IPV6_NATT_ESP (\
133         IAVF_INSET_IPV6_SRC | IAVF_INSET_IPV6_DST | \
134         IAVF_INSET_ESP_SPI)
135
136 #define IAVF_FDIR_INSET_PFCP (\
137         IAVF_INSET_PFCP_S_FIELD)
138
139 #define IAVF_FDIR_INSET_ECPRI (\
140         IAVF_INSET_ECPRI)
141
142 static struct iavf_pattern_match_item iavf_fdir_pattern[] = {
143         {iavf_pattern_ethertype,                 IAVF_FDIR_INSET_ETH,           IAVF_INSET_NONE},
144         {iavf_pattern_eth_ipv4,                  IAVF_FDIR_INSET_ETH_IPV4,      IAVF_INSET_NONE},
145         {iavf_pattern_eth_ipv4_udp,              IAVF_FDIR_INSET_ETH_IPV4_UDP,  IAVF_INSET_NONE},
146         {iavf_pattern_eth_ipv4_tcp,              IAVF_FDIR_INSET_ETH_IPV4_TCP,  IAVF_INSET_NONE},
147         {iavf_pattern_eth_ipv4_sctp,             IAVF_FDIR_INSET_ETH_IPV4_SCTP, IAVF_INSET_NONE},
148         {iavf_pattern_eth_ipv6,                  IAVF_FDIR_INSET_ETH_IPV6,      IAVF_INSET_NONE},
149         {iavf_pattern_eth_ipv6_frag_ext,        IAVF_FDIR_INSET_ETH_IPV6_FRAG_EXT,      IAVF_INSET_NONE},
150         {iavf_pattern_eth_ipv6_udp,              IAVF_FDIR_INSET_ETH_IPV6_UDP,  IAVF_INSET_NONE},
151         {iavf_pattern_eth_ipv6_tcp,              IAVF_FDIR_INSET_ETH_IPV6_TCP,  IAVF_INSET_NONE},
152         {iavf_pattern_eth_ipv6_sctp,             IAVF_FDIR_INSET_ETH_IPV6_SCTP, IAVF_INSET_NONE},
153         {iavf_pattern_eth_ipv4_gtpu,             IAVF_FDIR_INSET_IPV4_GTPU,     IAVF_INSET_NONE},
154         {iavf_pattern_eth_ipv4_gtpu_ipv4,        IAVF_FDIR_INSET_GTPU_IPV4,     IAVF_INSET_NONE},
155         {iavf_pattern_eth_ipv4_gtpu_ipv4_udp,    IAVF_FDIR_INSET_GTPU_IPV4_UDP, IAVF_INSET_NONE},
156         {iavf_pattern_eth_ipv4_gtpu_ipv4_tcp,    IAVF_FDIR_INSET_GTPU_IPV4_TCP, IAVF_INSET_NONE},
157         {iavf_pattern_eth_ipv4_gtpu_ipv6,        IAVF_FDIR_INSET_GTPU_IPV6,     IAVF_INSET_NONE},
158         {iavf_pattern_eth_ipv4_gtpu_ipv6_udp,    IAVF_FDIR_INSET_GTPU_IPV6_UDP, IAVF_INSET_NONE},
159         {iavf_pattern_eth_ipv4_gtpu_ipv6_tcp,    IAVF_FDIR_INSET_GTPU_IPV6_TCP, IAVF_INSET_NONE},
160         {iavf_pattern_eth_ipv4_gtpu_eh,          IAVF_FDIR_INSET_IPV4_GTPU_EH,  IAVF_INSET_NONE},
161         {iavf_pattern_eth_ipv4_gtpu_eh_ipv4,     IAVF_FDIR_INSET_GTPU_IPV4,     IAVF_INSET_NONE},
162         {iavf_pattern_eth_ipv4_gtpu_eh_ipv4_udp, IAVF_FDIR_INSET_GTPU_IPV4_UDP, IAVF_INSET_NONE},
163         {iavf_pattern_eth_ipv4_gtpu_eh_ipv4_tcp, IAVF_FDIR_INSET_GTPU_IPV4_TCP, IAVF_INSET_NONE},
164         {iavf_pattern_eth_ipv4_gtpu_eh_ipv6,     IAVF_FDIR_INSET_GTPU_IPV6,     IAVF_INSET_NONE},
165         {iavf_pattern_eth_ipv4_gtpu_eh_ipv6_udp, IAVF_FDIR_INSET_GTPU_IPV6_UDP, IAVF_INSET_NONE},
166         {iavf_pattern_eth_ipv4_gtpu_eh_ipv6_tcp, IAVF_FDIR_INSET_GTPU_IPV6_TCP, IAVF_INSET_NONE},
167         {iavf_pattern_eth_ipv6_gtpu,             IAVF_FDIR_INSET_IPV6_GTPU,     IAVF_INSET_NONE},
168         {iavf_pattern_eth_ipv6_gtpu_eh,          IAVF_FDIR_INSET_IPV6_GTPU_EH,  IAVF_INSET_NONE},
169         {iavf_pattern_eth_ipv4_l2tpv3,           IAVF_FDIR_INSET_L2TPV3OIP,     IAVF_INSET_NONE},
170         {iavf_pattern_eth_ipv6_l2tpv3,           IAVF_FDIR_INSET_L2TPV3OIP,     IAVF_INSET_NONE},
171         {iavf_pattern_eth_ipv4_esp,              IAVF_FDIR_INSET_ESP,           IAVF_INSET_NONE},
172         {iavf_pattern_eth_ipv6_esp,              IAVF_FDIR_INSET_ESP,           IAVF_INSET_NONE},
173         {iavf_pattern_eth_ipv4_ah,               IAVF_FDIR_INSET_AH,            IAVF_INSET_NONE},
174         {iavf_pattern_eth_ipv6_ah,               IAVF_FDIR_INSET_AH,            IAVF_INSET_NONE},
175         {iavf_pattern_eth_ipv4_udp_esp,          IAVF_FDIR_INSET_IPV4_NATT_ESP, IAVF_INSET_NONE},
176         {iavf_pattern_eth_ipv6_udp_esp,          IAVF_FDIR_INSET_IPV6_NATT_ESP, IAVF_INSET_NONE},
177         {iavf_pattern_eth_ipv4_pfcp,             IAVF_FDIR_INSET_PFCP,          IAVF_INSET_NONE},
178         {iavf_pattern_eth_ipv6_pfcp,             IAVF_FDIR_INSET_PFCP,          IAVF_INSET_NONE},
179         {iavf_pattern_eth_ecpri,                 IAVF_FDIR_INSET_ECPRI,         IAVF_INSET_NONE},
180         {iavf_pattern_eth_ipv4_ecpri,            IAVF_FDIR_INSET_ECPRI,         IAVF_INSET_NONE},
181 };
182
183 static struct iavf_flow_parser iavf_fdir_parser;
184
185 static int
186 iavf_fdir_init(struct iavf_adapter *ad)
187 {
188         struct iavf_info *vf = IAVF_DEV_PRIVATE_TO_VF(ad);
189         struct iavf_flow_parser *parser;
190
191         if (!vf->vf_res)
192                 return -EINVAL;
193
194         if (vf->vf_res->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_FDIR_PF)
195                 parser = &iavf_fdir_parser;
196         else
197                 return -ENOTSUP;
198
199         return iavf_register_parser(parser, ad);
200 }
201
202 static void
203 iavf_fdir_uninit(struct iavf_adapter *ad)
204 {
205         iavf_unregister_parser(&iavf_fdir_parser, ad);
206 }
207
208 static int
209 iavf_fdir_create(struct iavf_adapter *ad,
210                 struct rte_flow *flow,
211                 void *meta,
212                 struct rte_flow_error *error)
213 {
214         struct iavf_fdir_conf *filter = meta;
215         struct iavf_fdir_conf *rule;
216         int ret;
217
218         rule = rte_zmalloc("fdir_entry", sizeof(*rule), 0);
219         if (!rule) {
220                 rte_flow_error_set(error, ENOMEM,
221                                 RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
222                                 "Failed to allocate memory for fdir rule");
223                 return -rte_errno;
224         }
225
226         ret = iavf_fdir_add(ad, filter);
227         if (ret) {
228                 rte_flow_error_set(error, -ret,
229                                 RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
230                                 "Failed to add filter rule.");
231                 goto free_entry;
232         }
233
234         if (filter->mark_flag == 1)
235                 iavf_fdir_rx_proc_enable(ad, 1);
236
237         rte_memcpy(rule, filter, sizeof(*rule));
238         flow->rule = rule;
239
240         return 0;
241
242 free_entry:
243         rte_free(rule);
244         return -rte_errno;
245 }
246
247 static int
248 iavf_fdir_destroy(struct iavf_adapter *ad,
249                 struct rte_flow *flow,
250                 struct rte_flow_error *error)
251 {
252         struct iavf_fdir_conf *filter;
253         int ret;
254
255         filter = (struct iavf_fdir_conf *)flow->rule;
256
257         ret = iavf_fdir_del(ad, filter);
258         if (ret) {
259                 rte_flow_error_set(error, -ret,
260                                 RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
261                                 "Failed to delete filter rule.");
262                 return -rte_errno;
263         }
264
265         if (filter->mark_flag == 1)
266                 iavf_fdir_rx_proc_enable(ad, 0);
267
268         flow->rule = NULL;
269         rte_free(filter);
270
271         return 0;
272 }
273
274 static int
275 iavf_fdir_validation(struct iavf_adapter *ad,
276                 __rte_unused struct rte_flow *flow,
277                 void *meta,
278                 struct rte_flow_error *error)
279 {
280         struct iavf_fdir_conf *filter = meta;
281         int ret;
282
283         ret = iavf_fdir_check(ad, filter);
284         if (ret) {
285                 rte_flow_error_set(error, -ret,
286                                 RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
287                                 "Failed to validate filter rule.");
288                 return -rte_errno;
289         }
290
291         return 0;
292 };
293
294 static struct iavf_flow_engine iavf_fdir_engine = {
295         .init = iavf_fdir_init,
296         .uninit = iavf_fdir_uninit,
297         .create = iavf_fdir_create,
298         .destroy = iavf_fdir_destroy,
299         .validation = iavf_fdir_validation,
300         .type = IAVF_FLOW_ENGINE_FDIR,
301 };
302
303 static int
304 iavf_fdir_parse_action_qregion(struct iavf_adapter *ad,
305                         struct rte_flow_error *error,
306                         const struct rte_flow_action *act,
307                         struct virtchnl_filter_action *filter_action)
308 {
309         struct iavf_info *vf = IAVF_DEV_PRIVATE_TO_VF(ad);
310         const struct rte_flow_action_rss *rss = act->conf;
311         uint32_t i;
312
313         if (act->type != RTE_FLOW_ACTION_TYPE_RSS) {
314                 rte_flow_error_set(error, EINVAL,
315                                 RTE_FLOW_ERROR_TYPE_ACTION, act,
316                                 "Invalid action.");
317                 return -rte_errno;
318         }
319
320         if (rss->queue_num <= 1) {
321                 rte_flow_error_set(error, EINVAL,
322                                 RTE_FLOW_ERROR_TYPE_ACTION, act,
323                                 "Queue region size can't be 0 or 1.");
324                 return -rte_errno;
325         }
326
327         /* check if queue index for queue region is continuous */
328         for (i = 0; i < rss->queue_num - 1; i++) {
329                 if (rss->queue[i + 1] != rss->queue[i] + 1) {
330                         rte_flow_error_set(error, EINVAL,
331                                         RTE_FLOW_ERROR_TYPE_ACTION, act,
332                                         "Discontinuous queue region");
333                         return -rte_errno;
334                 }
335         }
336
337         if (rss->queue[rss->queue_num - 1] >= ad->eth_dev->data->nb_rx_queues) {
338                 rte_flow_error_set(error, EINVAL,
339                                 RTE_FLOW_ERROR_TYPE_ACTION, act,
340                                 "Invalid queue region indexes.");
341                 return -rte_errno;
342         }
343
344         if (!(rte_is_power_of_2(rss->queue_num) &&
345                 rss->queue_num <= IAVF_FDIR_MAX_QREGION_SIZE)) {
346                 rte_flow_error_set(error, EINVAL,
347                                 RTE_FLOW_ERROR_TYPE_ACTION, act,
348                                 "The region size should be any of the following values:"
349                                 "1, 2, 4, 8, 16, 32, 64, 128 as long as the total number "
350                                 "of queues do not exceed the VSI allocation.");
351                 return -rte_errno;
352         }
353
354         if (rss->queue_num > vf->max_rss_qregion) {
355                 rte_flow_error_set(error, EINVAL,
356                                 RTE_FLOW_ERROR_TYPE_ACTION, act,
357                                 "The region size cannot be large than the supported max RSS queue region");
358                 return -rte_errno;
359         }
360
361         filter_action->act_conf.queue.index = rss->queue[0];
362         filter_action->act_conf.queue.region = rte_fls_u32(rss->queue_num) - 1;
363
364         return 0;
365 }
366
367 static int
368 iavf_fdir_parse_action(struct iavf_adapter *ad,
369                         const struct rte_flow_action actions[],
370                         struct rte_flow_error *error,
371                         struct iavf_fdir_conf *filter)
372 {
373         const struct rte_flow_action_queue *act_q;
374         const struct rte_flow_action_mark *mark_spec = NULL;
375         uint32_t dest_num = 0;
376         uint32_t mark_num = 0;
377         int ret;
378
379         int number = 0;
380         struct virtchnl_filter_action *filter_action;
381
382         for (; actions->type != RTE_FLOW_ACTION_TYPE_END; actions++) {
383                 switch (actions->type) {
384                 case RTE_FLOW_ACTION_TYPE_VOID:
385                         break;
386
387                 case RTE_FLOW_ACTION_TYPE_PASSTHRU:
388                         dest_num++;
389
390                         filter_action = &filter->add_fltr.rule_cfg.action_set.actions[number];
391
392                         filter_action->type = VIRTCHNL_ACTION_PASSTHRU;
393
394                         filter->add_fltr.rule_cfg.action_set.count = ++number;
395                         break;
396
397                 case RTE_FLOW_ACTION_TYPE_DROP:
398                         dest_num++;
399
400                         filter_action = &filter->add_fltr.rule_cfg.action_set.actions[number];
401
402                         filter_action->type = VIRTCHNL_ACTION_DROP;
403
404                         filter->add_fltr.rule_cfg.action_set.count = ++number;
405                         break;
406
407                 case RTE_FLOW_ACTION_TYPE_QUEUE:
408                         dest_num++;
409
410                         act_q = actions->conf;
411                         filter_action = &filter->add_fltr.rule_cfg.action_set.actions[number];
412
413                         filter_action->type = VIRTCHNL_ACTION_QUEUE;
414                         filter_action->act_conf.queue.index = act_q->index;
415
416                         if (filter_action->act_conf.queue.index >=
417                                 ad->eth_dev->data->nb_rx_queues) {
418                                 rte_flow_error_set(error, EINVAL,
419                                         RTE_FLOW_ERROR_TYPE_ACTION,
420                                         actions, "Invalid queue for FDIR.");
421                                 return -rte_errno;
422                         }
423
424                         filter->add_fltr.rule_cfg.action_set.count = ++number;
425                         break;
426
427                 case RTE_FLOW_ACTION_TYPE_RSS:
428                         dest_num++;
429
430                         filter_action = &filter->add_fltr.rule_cfg.action_set.actions[number];
431
432                         filter_action->type = VIRTCHNL_ACTION_Q_REGION;
433
434                         ret = iavf_fdir_parse_action_qregion(ad,
435                                                 error, actions, filter_action);
436                         if (ret)
437                                 return ret;
438
439                         filter->add_fltr.rule_cfg.action_set.count = ++number;
440                         break;
441
442                 case RTE_FLOW_ACTION_TYPE_MARK:
443                         mark_num++;
444
445                         filter->mark_flag = 1;
446                         mark_spec = actions->conf;
447                         filter_action = &filter->add_fltr.rule_cfg.action_set.actions[number];
448
449                         filter_action->type = VIRTCHNL_ACTION_MARK;
450                         filter_action->act_conf.mark_id = mark_spec->id;
451
452                         filter->add_fltr.rule_cfg.action_set.count = ++number;
453                         break;
454
455                 default:
456                         rte_flow_error_set(error, EINVAL,
457                                         RTE_FLOW_ERROR_TYPE_ACTION, actions,
458                                         "Invalid action.");
459                         return -rte_errno;
460                 }
461         }
462
463         if (number > VIRTCHNL_MAX_NUM_ACTIONS) {
464                 rte_flow_error_set(error, EINVAL,
465                         RTE_FLOW_ERROR_TYPE_ACTION, actions,
466                         "Action numbers exceed the maximum value");
467                 return -rte_errno;
468         }
469
470         if (dest_num >= 2) {
471                 rte_flow_error_set(error, EINVAL,
472                         RTE_FLOW_ERROR_TYPE_ACTION, actions,
473                         "Unsupported action combination");
474                 return -rte_errno;
475         }
476
477         if (mark_num >= 2) {
478                 rte_flow_error_set(error, EINVAL,
479                         RTE_FLOW_ERROR_TYPE_ACTION, actions,
480                         "Too many mark actions");
481                 return -rte_errno;
482         }
483
484         if (dest_num + mark_num == 0) {
485                 rte_flow_error_set(error, EINVAL,
486                         RTE_FLOW_ERROR_TYPE_ACTION, actions,
487                         "Empty action");
488                 return -rte_errno;
489         }
490
491         /* Mark only is equal to mark + passthru. */
492         if (dest_num == 0) {
493                 filter_action = &filter->add_fltr.rule_cfg.action_set.actions[number];
494                 filter_action->type = VIRTCHNL_ACTION_PASSTHRU;
495                 filter->add_fltr.rule_cfg.action_set.count = ++number;
496         }
497
498         return 0;
499 }
500
501 static bool
502 iavf_fdir_refine_input_set(const uint64_t input_set,
503                            const uint64_t input_set_mask,
504                            struct iavf_fdir_conf *filter)
505 {
506         struct virtchnl_proto_hdr *hdr, *hdr_last;
507         struct rte_flow_item_ipv4 ipv4_spec;
508         struct rte_flow_item_ipv6 ipv6_spec;
509         int last_layer;
510         uint8_t proto_id;
511
512         if (input_set & ~input_set_mask)
513                 return false;
514         else if (input_set)
515                 return true;
516
517         last_layer = filter->add_fltr.rule_cfg.proto_hdrs.count - 1;
518         /* Last layer of TCP/UDP pattern isn't less than 2. */
519         if (last_layer < 2)
520                 return false;
521         hdr_last = &filter->add_fltr.rule_cfg.proto_hdrs.proto_hdr[last_layer];
522         if (hdr_last->type == VIRTCHNL_PROTO_HDR_TCP)
523                 proto_id = 6;
524         else if (hdr_last->type == VIRTCHNL_PROTO_HDR_UDP)
525                 proto_id = 17;
526         else
527                 return false;
528
529         hdr = &filter->add_fltr.rule_cfg.proto_hdrs.proto_hdr[last_layer - 1];
530         switch (hdr->type) {
531         case VIRTCHNL_PROTO_HDR_IPV4:
532                 VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(hdr, IPV4, PROT);
533                 memset(&ipv4_spec, 0, sizeof(ipv4_spec));
534                 ipv4_spec.hdr.next_proto_id = proto_id;
535                 rte_memcpy(hdr->buffer, &ipv4_spec.hdr,
536                            sizeof(ipv4_spec.hdr));
537                 return true;
538         case VIRTCHNL_PROTO_HDR_IPV6:
539                 VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(hdr, IPV6, PROT);
540                 memset(&ipv6_spec, 0, sizeof(ipv6_spec));
541                 ipv6_spec.hdr.proto = proto_id;
542                 rte_memcpy(hdr->buffer, &ipv6_spec.hdr,
543                            sizeof(ipv6_spec.hdr));
544                 return true;
545         default:
546                 return false;
547         }
548 }
549
550 static void
551 iavf_fdir_add_fragment_hdr(struct virtchnl_proto_hdrs *hdrs, int layer)
552 {
553         struct virtchnl_proto_hdr *hdr1;
554         struct virtchnl_proto_hdr *hdr2;
555         int i;
556
557         if (layer < 0 || layer > hdrs->count)
558                 return;
559
560         /* shift headers layer */
561         for (i = hdrs->count; i >= layer; i--) {
562                 hdr1 = &hdrs->proto_hdr[i];
563                 hdr2 = &hdrs->proto_hdr[i - 1];
564                 *hdr1 = *hdr2;
565         }
566
567         /* adding dummy fragment header */
568         hdr1 = &hdrs->proto_hdr[layer];
569         VIRTCHNL_SET_PROTO_HDR_TYPE(hdr1, IPV4_FRAG);
570         hdrs->count = ++layer;
571 }
572
573 static int
574 iavf_fdir_parse_pattern(__rte_unused struct iavf_adapter *ad,
575                         const struct rte_flow_item pattern[],
576                         const uint64_t input_set_mask,
577                         struct rte_flow_error *error,
578                         struct iavf_fdir_conf *filter)
579 {
580         struct virtchnl_proto_hdrs *hdrs =
581                         &filter->add_fltr.rule_cfg.proto_hdrs;
582         enum rte_flow_item_type l3 = RTE_FLOW_ITEM_TYPE_END;
583         const struct rte_flow_item_eth *eth_spec, *eth_mask;
584         const struct rte_flow_item_ipv4 *ipv4_spec, *ipv4_last, *ipv4_mask;
585         const struct rte_flow_item_ipv6 *ipv6_spec, *ipv6_mask;
586         const struct rte_flow_item_ipv6_frag_ext *ipv6_frag_spec;
587         const struct rte_flow_item_ipv6_frag_ext *ipv6_frag_last;
588         const struct rte_flow_item_ipv6_frag_ext *ipv6_frag_mask;
589         const struct rte_flow_item_udp *udp_spec, *udp_mask;
590         const struct rte_flow_item_tcp *tcp_spec, *tcp_mask;
591         const struct rte_flow_item_sctp *sctp_spec, *sctp_mask;
592         const struct rte_flow_item_gtp *gtp_spec, *gtp_mask;
593         const struct rte_flow_item_gtp_psc *gtp_psc_spec, *gtp_psc_mask;
594         const struct rte_flow_item_l2tpv3oip *l2tpv3oip_spec, *l2tpv3oip_mask;
595         const struct rte_flow_item_esp *esp_spec, *esp_mask;
596         const struct rte_flow_item_ah *ah_spec, *ah_mask;
597         const struct rte_flow_item_pfcp *pfcp_spec, *pfcp_mask;
598         const struct rte_flow_item_ecpri *ecpri_spec, *ecpri_mask;
599         const struct rte_flow_item *item = pattern;
600         struct virtchnl_proto_hdr *hdr, *hdr1 = NULL;
601         struct rte_ecpri_common_hdr ecpri_common;
602         uint64_t input_set = IAVF_INSET_NONE;
603         enum rte_flow_item_type item_type;
604         enum rte_flow_item_type next_type;
605         uint8_t tun_inner = 0;
606         uint16_t ether_type;
607         int layer = 0;
608
609         uint8_t  ipv6_addr_mask[16] = {
610                 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
611                 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF
612         };
613
614         for (item = pattern; item->type != RTE_FLOW_ITEM_TYPE_END; item++) {
615                 item_type = item->type;
616
617                 if (item->last && !(item_type == RTE_FLOW_ITEM_TYPE_IPV4 ||
618                                     item_type ==
619                                     RTE_FLOW_ITEM_TYPE_IPV6_FRAG_EXT)) {
620                         rte_flow_error_set(error, EINVAL,
621                                            RTE_FLOW_ERROR_TYPE_ITEM, item,
622                                            "Not support range");
623                 }
624
625                 switch (item_type) {
626                 case RTE_FLOW_ITEM_TYPE_ETH:
627                         eth_spec = item->spec;
628                         eth_mask = item->mask;
629                         next_type = (item + 1)->type;
630
631                         hdr1 = &hdrs->proto_hdr[layer];
632
633                         VIRTCHNL_SET_PROTO_HDR_TYPE(hdr1, ETH);
634
635                         if (next_type == RTE_FLOW_ITEM_TYPE_END &&
636                             (!eth_spec || !eth_mask)) {
637                                 rte_flow_error_set(error, EINVAL,
638                                                 RTE_FLOW_ERROR_TYPE_ITEM,
639                                                 item, "NULL eth spec/mask.");
640                                 return -rte_errno;
641                         }
642
643                         if (eth_spec && eth_mask) {
644                                 if (!rte_is_zero_ether_addr(&eth_mask->src) ||
645                                     !rte_is_zero_ether_addr(&eth_mask->dst)) {
646                                         rte_flow_error_set(error, EINVAL,
647                                                 RTE_FLOW_ERROR_TYPE_ITEM, item,
648                                                 "Invalid MAC_addr mask.");
649                                         return -rte_errno;
650                                 }
651                         }
652
653                         if (eth_spec && eth_mask && eth_mask->type) {
654                                 if (eth_mask->type != RTE_BE16(0xffff)) {
655                                         rte_flow_error_set(error, EINVAL,
656                                                 RTE_FLOW_ERROR_TYPE_ITEM,
657                                                 item, "Invalid type mask.");
658                                         return -rte_errno;
659                                 }
660
661                                 ether_type = rte_be_to_cpu_16(eth_spec->type);
662                                 if (ether_type == RTE_ETHER_TYPE_IPV4 ||
663                                         ether_type == RTE_ETHER_TYPE_IPV6) {
664                                         rte_flow_error_set(error, EINVAL,
665                                                 RTE_FLOW_ERROR_TYPE_ITEM,
666                                                 item,
667                                                 "Unsupported ether_type.");
668                                         return -rte_errno;
669                                 }
670
671                                 input_set |= IAVF_INSET_ETHERTYPE;
672                                 VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(hdr1, ETH,
673                                                                  ETHERTYPE);
674
675                                 rte_memcpy(hdr1->buffer, eth_spec,
676                                            sizeof(struct rte_ether_hdr));
677                         }
678
679                         hdrs->count = ++layer;
680                         break;
681
682                 case RTE_FLOW_ITEM_TYPE_IPV4:
683                         l3 = RTE_FLOW_ITEM_TYPE_IPV4;
684                         ipv4_spec = item->spec;
685                         ipv4_last = item->last;
686                         ipv4_mask = item->mask;
687                         next_type = (item + 1)->type;
688
689                         hdr = &hdrs->proto_hdr[layer];
690
691                         VIRTCHNL_SET_PROTO_HDR_TYPE(hdr, IPV4);
692
693                         if (!(ipv4_spec && ipv4_mask)) {
694                                 hdrs->count = ++layer;
695                                 break;
696                         }
697
698                         if (ipv4_mask->hdr.version_ihl ||
699                             ipv4_mask->hdr.total_length ||
700                             ipv4_mask->hdr.hdr_checksum) {
701                                 rte_flow_error_set(error, EINVAL,
702                                                    RTE_FLOW_ERROR_TYPE_ITEM,
703                                                    item, "Invalid IPv4 mask.");
704                                 return -rte_errno;
705                         }
706
707                         if (ipv4_last &&
708                             (ipv4_last->hdr.version_ihl ||
709                              ipv4_last->hdr.type_of_service ||
710                              ipv4_last->hdr.time_to_live ||
711                              ipv4_last->hdr.total_length |
712                              ipv4_last->hdr.next_proto_id ||
713                              ipv4_last->hdr.hdr_checksum ||
714                              ipv4_last->hdr.src_addr ||
715                              ipv4_last->hdr.dst_addr)) {
716                                 rte_flow_error_set(error, EINVAL,
717                                                    RTE_FLOW_ERROR_TYPE_ITEM,
718                                                    item, "Invalid IPv4 last.");
719                                 return -rte_errno;
720                         }
721
722                         if (ipv4_mask->hdr.type_of_service ==
723                             UINT8_MAX) {
724                                 input_set |= IAVF_INSET_IPV4_TOS;
725                                 VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(hdr, IPV4,
726                                                                  DSCP);
727                         }
728
729                         if (ipv4_mask->hdr.next_proto_id == UINT8_MAX) {
730                                 input_set |= IAVF_INSET_IPV4_PROTO;
731                                 VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(hdr, IPV4,
732                                                                  PROT);
733                         }
734
735                         if (ipv4_mask->hdr.time_to_live == UINT8_MAX) {
736                                 input_set |= IAVF_INSET_IPV4_TTL;
737                                 VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(hdr, IPV4,
738                                                                  TTL);
739                         }
740
741                         if (ipv4_mask->hdr.src_addr == UINT32_MAX) {
742                                 input_set |= IAVF_INSET_IPV4_SRC;
743                                 VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(hdr, IPV4,
744                                                                  SRC);
745                         }
746
747                         if (ipv4_mask->hdr.dst_addr == UINT32_MAX) {
748                                 input_set |= IAVF_INSET_IPV4_DST;
749                                 VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(hdr, IPV4,
750                                                                  DST);
751                         }
752
753                         if (tun_inner) {
754                                 input_set &= ~IAVF_PROT_IPV4_OUTER;
755                                 input_set |= IAVF_PROT_IPV4_INNER;
756                         }
757
758                         rte_memcpy(hdr->buffer, &ipv4_spec->hdr,
759                                    sizeof(ipv4_spec->hdr));
760
761                         hdrs->count = ++layer;
762
763                         /* only support any packet id for fragment IPv4
764                          * any packet_id:
765                          * spec is 0, last is 0xffff, mask is 0xffff
766                          */
767                         if (ipv4_last && ipv4_spec->hdr.packet_id == 0 &&
768                             ipv4_last->hdr.packet_id == UINT16_MAX &&
769                             ipv4_mask->hdr.packet_id == UINT16_MAX &&
770                             ipv4_mask->hdr.fragment_offset == UINT16_MAX) {
771                                 /* all IPv4 fragment packet has the same
772                                  * ethertype, if the spec is for all valid
773                                  * packet id, set ethertype into input set.
774                                  */
775                                 input_set |= IAVF_INSET_ETHERTYPE;
776                                 VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(hdr1, ETH,
777                                                                  ETHERTYPE);
778
779                                 /* add dummy header for IPv4 Fragment */
780                                 iavf_fdir_add_fragment_hdr(hdrs, layer);
781                         } else if (ipv4_mask->hdr.packet_id == UINT16_MAX) {
782                                 rte_flow_error_set(error, EINVAL,
783                                                    RTE_FLOW_ERROR_TYPE_ITEM,
784                                                    item, "Invalid IPv4 mask.");
785                                 return -rte_errno;
786                         }
787
788                         break;
789
790                 case RTE_FLOW_ITEM_TYPE_IPV6:
791                         l3 = RTE_FLOW_ITEM_TYPE_IPV6;
792                         ipv6_spec = item->spec;
793                         ipv6_mask = item->mask;
794
795                         hdr = &hdrs->proto_hdr[layer];
796
797                         VIRTCHNL_SET_PROTO_HDR_TYPE(hdr, IPV6);
798
799                         if (!(ipv6_spec && ipv6_mask)) {
800                                 hdrs->count = ++layer;
801                                 break;
802                         }
803
804                         if (ipv6_mask->hdr.payload_len) {
805                                 rte_flow_error_set(error, EINVAL,
806                                                    RTE_FLOW_ERROR_TYPE_ITEM,
807                                                    item, "Invalid IPv6 mask");
808                                 return -rte_errno;
809                         }
810
811                         if ((ipv6_mask->hdr.vtc_flow &
812                               rte_cpu_to_be_32(IAVF_IPV6_TC_MASK))
813                              == rte_cpu_to_be_32(IAVF_IPV6_TC_MASK)) {
814                                 input_set |= IAVF_INSET_IPV6_TC;
815                                 VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(hdr, IPV6,
816                                                                  TC);
817                         }
818
819                         if (ipv6_mask->hdr.proto == UINT8_MAX) {
820                                 input_set |= IAVF_INSET_IPV6_NEXT_HDR;
821                                 VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(hdr, IPV6,
822                                                                  PROT);
823                         }
824
825                         if (ipv6_mask->hdr.hop_limits == UINT8_MAX) {
826                                 input_set |= IAVF_INSET_IPV6_HOP_LIMIT;
827                                 VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(hdr, IPV6,
828                                                                  HOP_LIMIT);
829                         }
830
831                         if (!memcmp(ipv6_mask->hdr.src_addr, ipv6_addr_mask,
832                                     RTE_DIM(ipv6_mask->hdr.src_addr))) {
833                                 input_set |= IAVF_INSET_IPV6_SRC;
834                                 VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(hdr, IPV6,
835                                                                  SRC);
836                         }
837                         if (!memcmp(ipv6_mask->hdr.dst_addr, ipv6_addr_mask,
838                                     RTE_DIM(ipv6_mask->hdr.dst_addr))) {
839                                 input_set |= IAVF_INSET_IPV6_DST;
840                                 VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(hdr, IPV6,
841                                                                  DST);
842                         }
843
844                         if (tun_inner) {
845                                 input_set &= ~IAVF_PROT_IPV6_OUTER;
846                                 input_set |= IAVF_PROT_IPV6_INNER;
847                         }
848
849                         rte_memcpy(hdr->buffer, &ipv6_spec->hdr,
850                                    sizeof(ipv6_spec->hdr));
851
852                         hdrs->count = ++layer;
853                         break;
854
855                 case RTE_FLOW_ITEM_TYPE_IPV6_FRAG_EXT:
856                         ipv6_frag_spec = item->spec;
857                         ipv6_frag_last = item->last;
858                         ipv6_frag_mask = item->mask;
859                         next_type = (item + 1)->type;
860
861                         hdr = &hdrs->proto_hdr[layer];
862
863                         VIRTCHNL_SET_PROTO_HDR_TYPE(hdr, IPV6_EH_FRAG);
864
865                         if (!(ipv6_frag_spec && ipv6_frag_mask)) {
866                                 hdrs->count = ++layer;
867                                 break;
868                         }
869
870                         /* only support any packet id for fragment IPv6
871                          * any packet_id:
872                          * spec is 0, last is 0xffffffff, mask is 0xffffffff
873                          */
874                         if (ipv6_frag_last && ipv6_frag_spec->hdr.id == 0 &&
875                             ipv6_frag_last->hdr.id == UINT32_MAX &&
876                             ipv6_frag_mask->hdr.id == UINT32_MAX &&
877                             ipv6_frag_mask->hdr.frag_data == UINT16_MAX) {
878                                 /* all IPv6 fragment packet has the same
879                                  * ethertype, if the spec is for all valid
880                                  * packet id, set ethertype into input set.
881                                  */
882                                 input_set |= IAVF_INSET_ETHERTYPE;
883                                 VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(hdr1, ETH,
884                                                                  ETHERTYPE);
885
886                                 rte_memcpy(hdr->buffer, &ipv6_frag_spec->hdr,
887                                            sizeof(ipv6_frag_spec->hdr));
888                         } else if (ipv6_frag_mask->hdr.id == UINT32_MAX) {
889                                 rte_flow_error_set(error, EINVAL,
890                                                    RTE_FLOW_ERROR_TYPE_ITEM,
891                                                    item, "Invalid IPv6 mask.");
892                                 return -rte_errno;
893                         }
894
895                         hdrs->count = ++layer;
896                         break;
897
898                 case RTE_FLOW_ITEM_TYPE_UDP:
899                         udp_spec = item->spec;
900                         udp_mask = item->mask;
901
902                         hdr = &hdrs->proto_hdr[layer];
903
904                         VIRTCHNL_SET_PROTO_HDR_TYPE(hdr, UDP);
905
906                         if (udp_spec && udp_mask) {
907                                 if (udp_mask->hdr.dgram_len ||
908                                         udp_mask->hdr.dgram_cksum) {
909                                         rte_flow_error_set(error, EINVAL,
910                                                 RTE_FLOW_ERROR_TYPE_ITEM, item,
911                                                 "Invalid UDP mask");
912                                         return -rte_errno;
913                                 }
914
915                                 if (udp_mask->hdr.src_port == UINT16_MAX) {
916                                         input_set |= IAVF_INSET_UDP_SRC_PORT;
917                                         VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(hdr, UDP, SRC_PORT);
918                                 }
919                                 if (udp_mask->hdr.dst_port == UINT16_MAX) {
920                                         input_set |= IAVF_INSET_UDP_DST_PORT;
921                                         VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(hdr, UDP, DST_PORT);
922                                 }
923
924                                 if (tun_inner) {
925                                         input_set &= ~IAVF_PROT_UDP_OUTER;
926                                         input_set |= IAVF_PROT_UDP_INNER;
927                                 }
928
929                                 if (l3 == RTE_FLOW_ITEM_TYPE_IPV4)
930                                         rte_memcpy(hdr->buffer,
931                                                 &udp_spec->hdr,
932                                                 sizeof(udp_spec->hdr));
933                                 else if (l3 == RTE_FLOW_ITEM_TYPE_IPV6)
934                                         rte_memcpy(hdr->buffer,
935                                                 &udp_spec->hdr,
936                                                 sizeof(udp_spec->hdr));
937                         }
938
939                         hdrs->count = ++layer;
940                         break;
941
942                 case RTE_FLOW_ITEM_TYPE_TCP:
943                         tcp_spec = item->spec;
944                         tcp_mask = item->mask;
945
946                         hdr = &hdrs->proto_hdr[layer];
947
948                         VIRTCHNL_SET_PROTO_HDR_TYPE(hdr, TCP);
949
950                         if (tcp_spec && tcp_mask) {
951                                 if (tcp_mask->hdr.sent_seq ||
952                                         tcp_mask->hdr.recv_ack ||
953                                         tcp_mask->hdr.data_off ||
954                                         tcp_mask->hdr.tcp_flags ||
955                                         tcp_mask->hdr.rx_win ||
956                                         tcp_mask->hdr.cksum ||
957                                         tcp_mask->hdr.tcp_urp) {
958                                         rte_flow_error_set(error, EINVAL,
959                                                 RTE_FLOW_ERROR_TYPE_ITEM, item,
960                                                 "Invalid TCP mask");
961                                         return -rte_errno;
962                                 }
963
964                                 if (tcp_mask->hdr.src_port == UINT16_MAX) {
965                                         input_set |= IAVF_INSET_TCP_SRC_PORT;
966                                         VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(hdr, TCP, SRC_PORT);
967                                 }
968                                 if (tcp_mask->hdr.dst_port == UINT16_MAX) {
969                                         input_set |= IAVF_INSET_TCP_DST_PORT;
970                                         VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(hdr, TCP, DST_PORT);
971                                 }
972
973                                 if (tun_inner) {
974                                         input_set &= ~IAVF_PROT_TCP_OUTER;
975                                         input_set |= IAVF_PROT_TCP_INNER;
976                                 }
977
978                                 if (l3 == RTE_FLOW_ITEM_TYPE_IPV4)
979                                         rte_memcpy(hdr->buffer,
980                                                 &tcp_spec->hdr,
981                                                 sizeof(tcp_spec->hdr));
982                                 else if (l3 == RTE_FLOW_ITEM_TYPE_IPV6)
983                                         rte_memcpy(hdr->buffer,
984                                                 &tcp_spec->hdr,
985                                                 sizeof(tcp_spec->hdr));
986                         }
987
988                         hdrs->count = ++layer;
989                         break;
990
991                 case RTE_FLOW_ITEM_TYPE_SCTP:
992                         sctp_spec = item->spec;
993                         sctp_mask = item->mask;
994
995                         hdr = &hdrs->proto_hdr[layer];
996
997                         VIRTCHNL_SET_PROTO_HDR_TYPE(hdr, SCTP);
998
999                         if (sctp_spec && sctp_mask) {
1000                                 if (sctp_mask->hdr.cksum) {
1001                                         rte_flow_error_set(error, EINVAL,
1002                                                 RTE_FLOW_ERROR_TYPE_ITEM, item,
1003                                                 "Invalid UDP mask");
1004                                         return -rte_errno;
1005                                 }
1006
1007                                 if (sctp_mask->hdr.src_port == UINT16_MAX) {
1008                                         input_set |= IAVF_INSET_SCTP_SRC_PORT;
1009                                         VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(hdr, SCTP, SRC_PORT);
1010                                 }
1011                                 if (sctp_mask->hdr.dst_port == UINT16_MAX) {
1012                                         input_set |= IAVF_INSET_SCTP_DST_PORT;
1013                                         VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(hdr, SCTP, DST_PORT);
1014                                 }
1015
1016                                 if (l3 == RTE_FLOW_ITEM_TYPE_IPV4)
1017                                         rte_memcpy(hdr->buffer,
1018                                                 &sctp_spec->hdr,
1019                                                 sizeof(sctp_spec->hdr));
1020                                 else if (l3 == RTE_FLOW_ITEM_TYPE_IPV6)
1021                                         rte_memcpy(hdr->buffer,
1022                                                 &sctp_spec->hdr,
1023                                                 sizeof(sctp_spec->hdr));
1024                         }
1025
1026                         hdrs->count = ++layer;
1027                         break;
1028
1029                 case RTE_FLOW_ITEM_TYPE_GTPU:
1030                         gtp_spec = item->spec;
1031                         gtp_mask = item->mask;
1032
1033                         hdr = &hdrs->proto_hdr[layer];
1034
1035                         VIRTCHNL_SET_PROTO_HDR_TYPE(hdr, GTPU_IP);
1036
1037                         if (gtp_spec && gtp_mask) {
1038                                 if (gtp_mask->v_pt_rsv_flags ||
1039                                         gtp_mask->msg_type ||
1040                                         gtp_mask->msg_len) {
1041                                         rte_flow_error_set(error, EINVAL,
1042                                                 RTE_FLOW_ERROR_TYPE_ITEM,
1043                                                 item, "Invalid GTP mask");
1044                                         return -rte_errno;
1045                                 }
1046
1047                                 if (gtp_mask->teid == UINT32_MAX) {
1048                                         input_set |= IAVF_INSET_GTPU_TEID;
1049                                         VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(hdr, GTPU_IP, TEID);
1050                                 }
1051
1052                                 rte_memcpy(hdr->buffer,
1053                                         gtp_spec, sizeof(*gtp_spec));
1054                         }
1055
1056                         tun_inner = 1;
1057
1058                         hdrs->count = ++layer;
1059                         break;
1060
1061                 case RTE_FLOW_ITEM_TYPE_GTP_PSC:
1062                         gtp_psc_spec = item->spec;
1063                         gtp_psc_mask = item->mask;
1064
1065                         hdr = &hdrs->proto_hdr[layer];
1066
1067                         if (!gtp_psc_spec)
1068                                 VIRTCHNL_SET_PROTO_HDR_TYPE(hdr, GTPU_EH);
1069                         else if ((gtp_psc_mask->qfi) && !(gtp_psc_mask->pdu_type))
1070                                 VIRTCHNL_SET_PROTO_HDR_TYPE(hdr, GTPU_EH);
1071                         else if (gtp_psc_spec->pdu_type == IAVF_GTPU_EH_UPLINK)
1072                                 VIRTCHNL_SET_PROTO_HDR_TYPE(hdr, GTPU_EH_PDU_UP);
1073                         else if (gtp_psc_spec->pdu_type == IAVF_GTPU_EH_DWLINK)
1074                                 VIRTCHNL_SET_PROTO_HDR_TYPE(hdr, GTPU_EH_PDU_DWN);
1075
1076                         if (gtp_psc_spec && gtp_psc_mask) {
1077                                 if (gtp_psc_mask->qfi == UINT8_MAX) {
1078                                         input_set |= IAVF_INSET_GTPU_QFI;
1079                                         VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(hdr, GTPU_EH, QFI);
1080                                 }
1081
1082                                 rte_memcpy(hdr->buffer, gtp_psc_spec,
1083                                         sizeof(*gtp_psc_spec));
1084                         }
1085
1086                         hdrs->count = ++layer;
1087                         break;
1088
1089                 case RTE_FLOW_ITEM_TYPE_L2TPV3OIP:
1090                         l2tpv3oip_spec = item->spec;
1091                         l2tpv3oip_mask = item->mask;
1092
1093                         hdr = &hdrs->proto_hdr[layer];
1094
1095                         VIRTCHNL_SET_PROTO_HDR_TYPE(hdr, L2TPV3);
1096
1097                         if (l2tpv3oip_spec && l2tpv3oip_mask) {
1098                                 if (l2tpv3oip_mask->session_id == UINT32_MAX) {
1099                                         input_set |= IAVF_L2TPV3OIP_SESSION_ID;
1100                                         VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(hdr, L2TPV3, SESS_ID);
1101                                 }
1102
1103                                 rte_memcpy(hdr->buffer, l2tpv3oip_spec,
1104                                         sizeof(*l2tpv3oip_spec));
1105                         }
1106
1107                         hdrs->count = ++layer;
1108                         break;
1109
1110                 case RTE_FLOW_ITEM_TYPE_ESP:
1111                         esp_spec = item->spec;
1112                         esp_mask = item->mask;
1113
1114                         hdr = &hdrs->proto_hdr[layer];
1115
1116                         VIRTCHNL_SET_PROTO_HDR_TYPE(hdr, ESP);
1117
1118                         if (esp_spec && esp_mask) {
1119                                 if (esp_mask->hdr.spi == UINT32_MAX) {
1120                                         input_set |= IAVF_INSET_ESP_SPI;
1121                                         VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(hdr, ESP, SPI);
1122                                 }
1123
1124                                 rte_memcpy(hdr->buffer, &esp_spec->hdr,
1125                                         sizeof(esp_spec->hdr));
1126                         }
1127
1128                         hdrs->count = ++layer;
1129                         break;
1130
1131                 case RTE_FLOW_ITEM_TYPE_AH:
1132                         ah_spec = item->spec;
1133                         ah_mask = item->mask;
1134
1135                         hdr = &hdrs->proto_hdr[layer];
1136
1137                         VIRTCHNL_SET_PROTO_HDR_TYPE(hdr, AH);
1138
1139                         if (ah_spec && ah_mask) {
1140                                 if (ah_mask->spi == UINT32_MAX) {
1141                                         input_set |= IAVF_INSET_AH_SPI;
1142                                         VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(hdr, AH, SPI);
1143                                 }
1144
1145                                 rte_memcpy(hdr->buffer, ah_spec,
1146                                         sizeof(*ah_spec));
1147                         }
1148
1149                         hdrs->count = ++layer;
1150                         break;
1151
1152                 case RTE_FLOW_ITEM_TYPE_PFCP:
1153                         pfcp_spec = item->spec;
1154                         pfcp_mask = item->mask;
1155
1156                         hdr = &hdrs->proto_hdr[layer];
1157
1158                         VIRTCHNL_SET_PROTO_HDR_TYPE(hdr, PFCP);
1159
1160                         if (pfcp_spec && pfcp_mask) {
1161                                 if (pfcp_mask->s_field == UINT8_MAX) {
1162                                         input_set |= IAVF_INSET_PFCP_S_FIELD;
1163                                         VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(hdr, PFCP, S_FIELD);
1164                                 }
1165
1166                                 rte_memcpy(hdr->buffer, pfcp_spec,
1167                                         sizeof(*pfcp_spec));
1168                         }
1169
1170                         hdrs->count = ++layer;
1171                         break;
1172
1173                 case RTE_FLOW_ITEM_TYPE_ECPRI:
1174                         ecpri_spec = item->spec;
1175                         ecpri_mask = item->mask;
1176
1177                         ecpri_common.u32 = rte_be_to_cpu_32(ecpri_spec->hdr.common.u32);
1178
1179                         hdr = &hdrs->proto_hdr[layer];
1180
1181                         VIRTCHNL_SET_PROTO_HDR_TYPE(hdr, ECPRI);
1182
1183                         if (ecpri_spec && ecpri_mask) {
1184                                 if (ecpri_common.type == RTE_ECPRI_MSG_TYPE_IQ_DATA &&
1185                                                 ecpri_mask->hdr.type0.pc_id == UINT16_MAX) {
1186                                         input_set |= IAVF_ECPRI_PC_RTC_ID;
1187                                         VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(hdr, ECPRI,
1188                                                                          PC_RTC_ID);
1189                                 }
1190
1191                                 rte_memcpy(hdr->buffer, ecpri_spec,
1192                                         sizeof(*ecpri_spec));
1193                         }
1194
1195                         hdrs->count = ++layer;
1196                         break;
1197
1198                 case RTE_FLOW_ITEM_TYPE_VOID:
1199                         break;
1200
1201                 default:
1202                         rte_flow_error_set(error, EINVAL,
1203                                         RTE_FLOW_ERROR_TYPE_ITEM, item,
1204                                         "Invalid pattern item.");
1205                         return -rte_errno;
1206                 }
1207         }
1208
1209         if (layer > VIRTCHNL_MAX_NUM_PROTO_HDRS) {
1210                 rte_flow_error_set(error, EINVAL,
1211                         RTE_FLOW_ERROR_TYPE_ITEM, item,
1212                         "Protocol header layers exceed the maximum value");
1213                 return -rte_errno;
1214         }
1215
1216         if (!iavf_fdir_refine_input_set(input_set,
1217                                         input_set_mask | IAVF_INSET_ETHERTYPE,
1218                                         filter)) {
1219                 rte_flow_error_set(error, EINVAL,
1220                                    RTE_FLOW_ERROR_TYPE_ITEM_SPEC, pattern,
1221                                    "Invalid input set");
1222                 return -rte_errno;
1223         }
1224
1225         filter->input_set = input_set;
1226
1227         return 0;
1228 }
1229
1230 static int
1231 iavf_fdir_parse(struct iavf_adapter *ad,
1232                 struct iavf_pattern_match_item *array,
1233                 uint32_t array_len,
1234                 const struct rte_flow_item pattern[],
1235                 const struct rte_flow_action actions[],
1236                 void **meta,
1237                 struct rte_flow_error *error)
1238 {
1239         struct iavf_info *vf = IAVF_DEV_PRIVATE_TO_VF(ad);
1240         struct iavf_fdir_conf *filter = &vf->fdir.conf;
1241         struct iavf_pattern_match_item *item = NULL;
1242         int ret;
1243
1244         memset(filter, 0, sizeof(*filter));
1245
1246         item = iavf_search_pattern_match_item(pattern, array, array_len, error);
1247         if (!item)
1248                 return -rte_errno;
1249
1250         ret = iavf_fdir_parse_pattern(ad, pattern, item->input_set_mask,
1251                                       error, filter);
1252         if (ret)
1253                 goto error;
1254
1255         ret = iavf_fdir_parse_action(ad, actions, error, filter);
1256         if (ret)
1257                 goto error;
1258
1259         if (meta)
1260                 *meta = filter;
1261
1262 error:
1263         rte_free(item);
1264         return ret;
1265 }
1266
1267 static struct iavf_flow_parser iavf_fdir_parser = {
1268         .engine = &iavf_fdir_engine,
1269         .array = iavf_fdir_pattern,
1270         .array_len = RTE_DIM(iavf_fdir_pattern),
1271         .parse_pattern_action = iavf_fdir_parse,
1272         .stage = IAVF_FLOW_STAGE_DISTRIBUTOR,
1273 };
1274
1275 RTE_INIT(iavf_fdir_engine_register)
1276 {
1277         iavf_register_flow_engine(&iavf_fdir_engine);
1278 }