net/iavf: support GTPU inner IPv4 for flow director
[dpdk.git] / drivers / net / iavf / iavf_fdir.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2020 Intel Corporation
3  */
4
5 #include <sys/queue.h>
6 #include <stdio.h>
7 #include <errno.h>
8 #include <stdint.h>
9 #include <string.h>
10 #include <unistd.h>
11 #include <stdarg.h>
12
13 #include <rte_ether.h>
14 #include <ethdev_driver.h>
15 #include <rte_malloc.h>
16 #include <rte_tailq.h>
17
18 #include "iavf.h"
19 #include "iavf_generic_flow.h"
20 #include "virtchnl.h"
21 #include "iavf_rxtx.h"
22
23 #define IAVF_FDIR_MAX_QREGION_SIZE 128
24
25 #define IAVF_FDIR_IPV6_TC_OFFSET 20
26 #define IAVF_IPV6_TC_MASK  (0xFF << IAVF_FDIR_IPV6_TC_OFFSET)
27
28 #define IAVF_GTPU_EH_DWLINK 0
29 #define IAVF_GTPU_EH_UPLINK 1
30
31 #define IAVF_FDIR_INSET_ETH (\
32         IAVF_INSET_ETHERTYPE)
33
34 #define IAVF_FDIR_INSET_ETH_IPV4 (\
35         IAVF_INSET_IPV4_SRC | IAVF_INSET_IPV4_DST | \
36         IAVF_INSET_IPV4_PROTO | IAVF_INSET_IPV4_TOS | \
37         IAVF_INSET_IPV4_TTL)
38
39 #define IAVF_FDIR_INSET_ETH_IPV4_UDP (\
40         IAVF_INSET_IPV4_SRC | IAVF_INSET_IPV4_DST | \
41         IAVF_INSET_IPV4_TOS | IAVF_INSET_IPV4_TTL | \
42         IAVF_INSET_UDP_SRC_PORT | IAVF_INSET_UDP_DST_PORT)
43
44 #define IAVF_FDIR_INSET_ETH_IPV4_TCP (\
45         IAVF_INSET_IPV4_SRC | IAVF_INSET_IPV4_DST | \
46         IAVF_INSET_IPV4_TOS | IAVF_INSET_IPV4_TTL | \
47         IAVF_INSET_TCP_SRC_PORT | IAVF_INSET_TCP_DST_PORT)
48
49 #define IAVF_FDIR_INSET_ETH_IPV4_SCTP (\
50         IAVF_INSET_IPV4_SRC | IAVF_INSET_IPV4_DST | \
51         IAVF_INSET_IPV4_TOS | IAVF_INSET_IPV4_TTL | \
52         IAVF_INSET_SCTP_SRC_PORT | IAVF_INSET_SCTP_DST_PORT)
53
54 #define IAVF_FDIR_INSET_ETH_IPV6 (\
55         IAVF_INSET_IPV6_SRC | IAVF_INSET_IPV6_DST | \
56         IAVF_INSET_IPV6_NEXT_HDR | IAVF_INSET_IPV6_TC | \
57         IAVF_INSET_IPV6_HOP_LIMIT)
58
59 #define IAVF_FDIR_INSET_ETH_IPV6_UDP (\
60         IAVF_INSET_IPV6_SRC | IAVF_INSET_IPV6_DST | \
61         IAVF_INSET_IPV6_TC | IAVF_INSET_IPV6_HOP_LIMIT | \
62         IAVF_INSET_UDP_SRC_PORT | IAVF_INSET_UDP_DST_PORT)
63
64 #define IAVF_FDIR_INSET_ETH_IPV6_TCP (\
65         IAVF_INSET_IPV6_SRC | IAVF_INSET_IPV6_DST | \
66         IAVF_INSET_IPV6_TC | IAVF_INSET_IPV6_HOP_LIMIT | \
67         IAVF_INSET_TCP_SRC_PORT | IAVF_INSET_TCP_DST_PORT)
68
69 #define IAVF_FDIR_INSET_ETH_IPV6_SCTP (\
70         IAVF_INSET_IPV6_SRC | IAVF_INSET_IPV6_DST | \
71         IAVF_INSET_IPV6_TC | IAVF_INSET_IPV6_HOP_LIMIT | \
72         IAVF_INSET_SCTP_SRC_PORT | IAVF_INSET_SCTP_DST_PORT)
73
74 #define IAVF_FDIR_INSET_IPV4_GTPU (\
75         IAVF_INSET_IPV4_SRC | IAVF_INSET_IPV4_DST | \
76         IAVF_INSET_GTPU_TEID)
77
78 #define IAVF_FDIR_INSET_GTPU_IPV4 (\
79         IAVF_INSET_TUN_IPV4_SRC | IAVF_INSET_TUN_IPV4_DST | \
80         IAVF_INSET_TUN_IPV4_PROTO | IAVF_INSET_TUN_IPV4_TOS | \
81         IAVF_INSET_TUN_IPV4_TTL)
82
83 #define IAVF_FDIR_INSET_GTPU_IPV4_UDP (\
84         IAVF_FDIR_INSET_GTPU_IPV4 | \
85         IAVF_INSET_TUN_UDP_SRC_PORT | IAVF_INSET_TUN_UDP_DST_PORT)
86
87 #define IAVF_FDIR_INSET_GTPU_IPV4_TCP (\
88         IAVF_FDIR_INSET_GTPU_IPV4 | \
89         IAVF_INSET_TUN_TCP_SRC_PORT | IAVF_INSET_TUN_TCP_DST_PORT)
90
91 #define IAVF_FDIR_INSET_IPV4_GTPU_EH (\
92         IAVF_INSET_IPV4_SRC | IAVF_INSET_IPV4_DST | \
93         IAVF_INSET_GTPU_TEID | IAVF_INSET_GTPU_QFI)
94
95 #define IAVF_FDIR_INSET_IPV6_GTPU (\
96         IAVF_INSET_IPV6_SRC | IAVF_INSET_IPV6_DST | \
97         IAVF_INSET_GTPU_TEID)
98
99 #define IAVF_FDIR_INSET_IPV6_GTPU_EH (\
100         IAVF_INSET_IPV6_SRC | IAVF_INSET_IPV6_DST | \
101         IAVF_INSET_GTPU_TEID | IAVF_INSET_GTPU_QFI)
102
103 #define IAVF_FDIR_INSET_L2TPV3OIP (\
104         IAVF_L2TPV3OIP_SESSION_ID)
105
106 #define IAVF_FDIR_INSET_ESP (\
107         IAVF_INSET_ESP_SPI)
108
109 #define IAVF_FDIR_INSET_AH (\
110         IAVF_INSET_AH_SPI)
111
112 #define IAVF_FDIR_INSET_IPV4_NATT_ESP (\
113         IAVF_INSET_IPV4_SRC | IAVF_INSET_IPV4_DST | \
114         IAVF_INSET_ESP_SPI)
115
116 #define IAVF_FDIR_INSET_IPV6_NATT_ESP (\
117         IAVF_INSET_IPV6_SRC | IAVF_INSET_IPV6_DST | \
118         IAVF_INSET_ESP_SPI)
119
120 #define IAVF_FDIR_INSET_PFCP (\
121         IAVF_INSET_PFCP_S_FIELD)
122
123 #define IAVF_FDIR_INSET_ECPRI (\
124         IAVF_INSET_ECPRI)
125
126 static struct iavf_pattern_match_item iavf_fdir_pattern[] = {
127         {iavf_pattern_ethertype,                 IAVF_FDIR_INSET_ETH,           IAVF_INSET_NONE},
128         {iavf_pattern_eth_ipv4,                  IAVF_FDIR_INSET_ETH_IPV4,      IAVF_INSET_NONE},
129         {iavf_pattern_eth_ipv4_udp,              IAVF_FDIR_INSET_ETH_IPV4_UDP,  IAVF_INSET_NONE},
130         {iavf_pattern_eth_ipv4_tcp,              IAVF_FDIR_INSET_ETH_IPV4_TCP,  IAVF_INSET_NONE},
131         {iavf_pattern_eth_ipv4_sctp,             IAVF_FDIR_INSET_ETH_IPV4_SCTP, IAVF_INSET_NONE},
132         {iavf_pattern_eth_ipv6,                  IAVF_FDIR_INSET_ETH_IPV6,      IAVF_INSET_NONE},
133         {iavf_pattern_eth_ipv6_udp,              IAVF_FDIR_INSET_ETH_IPV6_UDP,  IAVF_INSET_NONE},
134         {iavf_pattern_eth_ipv6_tcp,              IAVF_FDIR_INSET_ETH_IPV6_TCP,  IAVF_INSET_NONE},
135         {iavf_pattern_eth_ipv6_sctp,             IAVF_FDIR_INSET_ETH_IPV6_SCTP, IAVF_INSET_NONE},
136         {iavf_pattern_eth_ipv4_gtpu,             IAVF_FDIR_INSET_IPV4_GTPU,     IAVF_INSET_NONE},
137         {iavf_pattern_eth_ipv4_gtpu_ipv4,        IAVF_FDIR_INSET_GTPU_IPV4,     IAVF_INSET_NONE},
138         {iavf_pattern_eth_ipv4_gtpu_ipv4_udp,    IAVF_FDIR_INSET_GTPU_IPV4_UDP, IAVF_INSET_NONE},
139         {iavf_pattern_eth_ipv4_gtpu_ipv4_tcp,    IAVF_FDIR_INSET_GTPU_IPV4_TCP, IAVF_INSET_NONE},
140         {iavf_pattern_eth_ipv4_gtpu_eh,          IAVF_FDIR_INSET_IPV4_GTPU_EH,  IAVF_INSET_NONE},
141         {iavf_pattern_eth_ipv4_gtpu_eh_ipv4,     IAVF_FDIR_INSET_GTPU_IPV4,     IAVF_INSET_NONE},
142         {iavf_pattern_eth_ipv4_gtpu_eh_ipv4_udp, IAVF_FDIR_INSET_GTPU_IPV4_UDP, IAVF_INSET_NONE},
143         {iavf_pattern_eth_ipv4_gtpu_eh_ipv4_tcp, IAVF_FDIR_INSET_GTPU_IPV4_TCP, IAVF_INSET_NONE},
144         {iavf_pattern_eth_ipv6_gtpu,             IAVF_FDIR_INSET_IPV6_GTPU,     IAVF_INSET_NONE},
145         {iavf_pattern_eth_ipv6_gtpu_eh,          IAVF_FDIR_INSET_IPV6_GTPU_EH,  IAVF_INSET_NONE},
146         {iavf_pattern_eth_ipv4_l2tpv3,           IAVF_FDIR_INSET_L2TPV3OIP,     IAVF_INSET_NONE},
147         {iavf_pattern_eth_ipv6_l2tpv3,           IAVF_FDIR_INSET_L2TPV3OIP,     IAVF_INSET_NONE},
148         {iavf_pattern_eth_ipv4_esp,              IAVF_FDIR_INSET_ESP,           IAVF_INSET_NONE},
149         {iavf_pattern_eth_ipv6_esp,              IAVF_FDIR_INSET_ESP,           IAVF_INSET_NONE},
150         {iavf_pattern_eth_ipv4_ah,               IAVF_FDIR_INSET_AH,            IAVF_INSET_NONE},
151         {iavf_pattern_eth_ipv6_ah,               IAVF_FDIR_INSET_AH,            IAVF_INSET_NONE},
152         {iavf_pattern_eth_ipv4_udp_esp,          IAVF_FDIR_INSET_IPV4_NATT_ESP, IAVF_INSET_NONE},
153         {iavf_pattern_eth_ipv6_udp_esp,          IAVF_FDIR_INSET_IPV6_NATT_ESP, IAVF_INSET_NONE},
154         {iavf_pattern_eth_ipv4_pfcp,             IAVF_FDIR_INSET_PFCP,          IAVF_INSET_NONE},
155         {iavf_pattern_eth_ipv6_pfcp,             IAVF_FDIR_INSET_PFCP,          IAVF_INSET_NONE},
156         {iavf_pattern_eth_ecpri,                 IAVF_FDIR_INSET_ECPRI,         IAVF_INSET_NONE},
157         {iavf_pattern_eth_ipv4_ecpri,            IAVF_FDIR_INSET_ECPRI,         IAVF_INSET_NONE},
158 };
159
160 static struct iavf_flow_parser iavf_fdir_parser;
161
162 static int
163 iavf_fdir_init(struct iavf_adapter *ad)
164 {
165         struct iavf_info *vf = IAVF_DEV_PRIVATE_TO_VF(ad);
166         struct iavf_flow_parser *parser;
167
168         if (!vf->vf_res)
169                 return -EINVAL;
170
171         if (vf->vf_res->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_FDIR_PF)
172                 parser = &iavf_fdir_parser;
173         else
174                 return -ENOTSUP;
175
176         return iavf_register_parser(parser, ad);
177 }
178
179 static void
180 iavf_fdir_uninit(struct iavf_adapter *ad)
181 {
182         iavf_unregister_parser(&iavf_fdir_parser, ad);
183 }
184
185 static int
186 iavf_fdir_create(struct iavf_adapter *ad,
187                 struct rte_flow *flow,
188                 void *meta,
189                 struct rte_flow_error *error)
190 {
191         struct iavf_fdir_conf *filter = meta;
192         struct iavf_fdir_conf *rule;
193         int ret;
194
195         rule = rte_zmalloc("fdir_entry", sizeof(*rule), 0);
196         if (!rule) {
197                 rte_flow_error_set(error, ENOMEM,
198                                 RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
199                                 "Failed to allocate memory for fdir rule");
200                 return -rte_errno;
201         }
202
203         ret = iavf_fdir_add(ad, filter);
204         if (ret) {
205                 rte_flow_error_set(error, -ret,
206                                 RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
207                                 "Failed to add filter rule.");
208                 goto free_entry;
209         }
210
211         if (filter->mark_flag == 1)
212                 iavf_fdir_rx_proc_enable(ad, 1);
213
214         rte_memcpy(rule, filter, sizeof(*rule));
215         flow->rule = rule;
216
217         return 0;
218
219 free_entry:
220         rte_free(rule);
221         return -rte_errno;
222 }
223
224 static int
225 iavf_fdir_destroy(struct iavf_adapter *ad,
226                 struct rte_flow *flow,
227                 struct rte_flow_error *error)
228 {
229         struct iavf_fdir_conf *filter;
230         int ret;
231
232         filter = (struct iavf_fdir_conf *)flow->rule;
233
234         ret = iavf_fdir_del(ad, filter);
235         if (ret) {
236                 rte_flow_error_set(error, -ret,
237                                 RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
238                                 "Failed to delete filter rule.");
239                 return -rte_errno;
240         }
241
242         if (filter->mark_flag == 1)
243                 iavf_fdir_rx_proc_enable(ad, 0);
244
245         flow->rule = NULL;
246         rte_free(filter);
247
248         return 0;
249 }
250
251 static int
252 iavf_fdir_validation(struct iavf_adapter *ad,
253                 __rte_unused struct rte_flow *flow,
254                 void *meta,
255                 struct rte_flow_error *error)
256 {
257         struct iavf_fdir_conf *filter = meta;
258         int ret;
259
260         ret = iavf_fdir_check(ad, filter);
261         if (ret) {
262                 rte_flow_error_set(error, -ret,
263                                 RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
264                                 "Failed to validate filter rule.");
265                 return -rte_errno;
266         }
267
268         return 0;
269 };
270
271 static struct iavf_flow_engine iavf_fdir_engine = {
272         .init = iavf_fdir_init,
273         .uninit = iavf_fdir_uninit,
274         .create = iavf_fdir_create,
275         .destroy = iavf_fdir_destroy,
276         .validation = iavf_fdir_validation,
277         .type = IAVF_FLOW_ENGINE_FDIR,
278 };
279
280 static int
281 iavf_fdir_parse_action_qregion(struct iavf_adapter *ad,
282                         struct rte_flow_error *error,
283                         const struct rte_flow_action *act,
284                         struct virtchnl_filter_action *filter_action)
285 {
286         struct iavf_info *vf = IAVF_DEV_PRIVATE_TO_VF(ad);
287         const struct rte_flow_action_rss *rss = act->conf;
288         uint32_t i;
289
290         if (act->type != RTE_FLOW_ACTION_TYPE_RSS) {
291                 rte_flow_error_set(error, EINVAL,
292                                 RTE_FLOW_ERROR_TYPE_ACTION, act,
293                                 "Invalid action.");
294                 return -rte_errno;
295         }
296
297         if (rss->queue_num <= 1) {
298                 rte_flow_error_set(error, EINVAL,
299                                 RTE_FLOW_ERROR_TYPE_ACTION, act,
300                                 "Queue region size can't be 0 or 1.");
301                 return -rte_errno;
302         }
303
304         /* check if queue index for queue region is continuous */
305         for (i = 0; i < rss->queue_num - 1; i++) {
306                 if (rss->queue[i + 1] != rss->queue[i] + 1) {
307                         rte_flow_error_set(error, EINVAL,
308                                         RTE_FLOW_ERROR_TYPE_ACTION, act,
309                                         "Discontinuous queue region");
310                         return -rte_errno;
311                 }
312         }
313
314         if (rss->queue[rss->queue_num - 1] >= ad->eth_dev->data->nb_rx_queues) {
315                 rte_flow_error_set(error, EINVAL,
316                                 RTE_FLOW_ERROR_TYPE_ACTION, act,
317                                 "Invalid queue region indexes.");
318                 return -rte_errno;
319         }
320
321         if (!(rte_is_power_of_2(rss->queue_num) &&
322                 rss->queue_num <= IAVF_FDIR_MAX_QREGION_SIZE)) {
323                 rte_flow_error_set(error, EINVAL,
324                                 RTE_FLOW_ERROR_TYPE_ACTION, act,
325                                 "The region size should be any of the following values:"
326                                 "1, 2, 4, 8, 16, 32, 64, 128 as long as the total number "
327                                 "of queues do not exceed the VSI allocation.");
328                 return -rte_errno;
329         }
330
331         if (rss->queue_num > vf->max_rss_qregion) {
332                 rte_flow_error_set(error, EINVAL,
333                                 RTE_FLOW_ERROR_TYPE_ACTION, act,
334                                 "The region size cannot be large than the supported max RSS queue region");
335                 return -rte_errno;
336         }
337
338         filter_action->act_conf.queue.index = rss->queue[0];
339         filter_action->act_conf.queue.region = rte_fls_u32(rss->queue_num) - 1;
340
341         return 0;
342 }
343
344 static int
345 iavf_fdir_parse_action(struct iavf_adapter *ad,
346                         const struct rte_flow_action actions[],
347                         struct rte_flow_error *error,
348                         struct iavf_fdir_conf *filter)
349 {
350         const struct rte_flow_action_queue *act_q;
351         const struct rte_flow_action_mark *mark_spec = NULL;
352         uint32_t dest_num = 0;
353         uint32_t mark_num = 0;
354         int ret;
355
356         int number = 0;
357         struct virtchnl_filter_action *filter_action;
358
359         for (; actions->type != RTE_FLOW_ACTION_TYPE_END; actions++) {
360                 switch (actions->type) {
361                 case RTE_FLOW_ACTION_TYPE_VOID:
362                         break;
363
364                 case RTE_FLOW_ACTION_TYPE_PASSTHRU:
365                         dest_num++;
366
367                         filter_action = &filter->add_fltr.rule_cfg.action_set.actions[number];
368
369                         filter_action->type = VIRTCHNL_ACTION_PASSTHRU;
370
371                         filter->add_fltr.rule_cfg.action_set.count = ++number;
372                         break;
373
374                 case RTE_FLOW_ACTION_TYPE_DROP:
375                         dest_num++;
376
377                         filter_action = &filter->add_fltr.rule_cfg.action_set.actions[number];
378
379                         filter_action->type = VIRTCHNL_ACTION_DROP;
380
381                         filter->add_fltr.rule_cfg.action_set.count = ++number;
382                         break;
383
384                 case RTE_FLOW_ACTION_TYPE_QUEUE:
385                         dest_num++;
386
387                         act_q = actions->conf;
388                         filter_action = &filter->add_fltr.rule_cfg.action_set.actions[number];
389
390                         filter_action->type = VIRTCHNL_ACTION_QUEUE;
391                         filter_action->act_conf.queue.index = act_q->index;
392
393                         if (filter_action->act_conf.queue.index >=
394                                 ad->eth_dev->data->nb_rx_queues) {
395                                 rte_flow_error_set(error, EINVAL,
396                                         RTE_FLOW_ERROR_TYPE_ACTION,
397                                         actions, "Invalid queue for FDIR.");
398                                 return -rte_errno;
399                         }
400
401                         filter->add_fltr.rule_cfg.action_set.count = ++number;
402                         break;
403
404                 case RTE_FLOW_ACTION_TYPE_RSS:
405                         dest_num++;
406
407                         filter_action = &filter->add_fltr.rule_cfg.action_set.actions[number];
408
409                         filter_action->type = VIRTCHNL_ACTION_Q_REGION;
410
411                         ret = iavf_fdir_parse_action_qregion(ad,
412                                                 error, actions, filter_action);
413                         if (ret)
414                                 return ret;
415
416                         filter->add_fltr.rule_cfg.action_set.count = ++number;
417                         break;
418
419                 case RTE_FLOW_ACTION_TYPE_MARK:
420                         mark_num++;
421
422                         filter->mark_flag = 1;
423                         mark_spec = actions->conf;
424                         filter_action = &filter->add_fltr.rule_cfg.action_set.actions[number];
425
426                         filter_action->type = VIRTCHNL_ACTION_MARK;
427                         filter_action->act_conf.mark_id = mark_spec->id;
428
429                         filter->add_fltr.rule_cfg.action_set.count = ++number;
430                         break;
431
432                 default:
433                         rte_flow_error_set(error, EINVAL,
434                                         RTE_FLOW_ERROR_TYPE_ACTION, actions,
435                                         "Invalid action.");
436                         return -rte_errno;
437                 }
438         }
439
440         if (number > VIRTCHNL_MAX_NUM_ACTIONS) {
441                 rte_flow_error_set(error, EINVAL,
442                         RTE_FLOW_ERROR_TYPE_ACTION, actions,
443                         "Action numbers exceed the maximum value");
444                 return -rte_errno;
445         }
446
447         if (dest_num >= 2) {
448                 rte_flow_error_set(error, EINVAL,
449                         RTE_FLOW_ERROR_TYPE_ACTION, actions,
450                         "Unsupported action combination");
451                 return -rte_errno;
452         }
453
454         if (mark_num >= 2) {
455                 rte_flow_error_set(error, EINVAL,
456                         RTE_FLOW_ERROR_TYPE_ACTION, actions,
457                         "Too many mark actions");
458                 return -rte_errno;
459         }
460
461         if (dest_num + mark_num == 0) {
462                 rte_flow_error_set(error, EINVAL,
463                         RTE_FLOW_ERROR_TYPE_ACTION, actions,
464                         "Empty action");
465                 return -rte_errno;
466         }
467
468         /* Mark only is equal to mark + passthru. */
469         if (dest_num == 0) {
470                 filter_action = &filter->add_fltr.rule_cfg.action_set.actions[number];
471                 filter_action->type = VIRTCHNL_ACTION_PASSTHRU;
472                 filter->add_fltr.rule_cfg.action_set.count = ++number;
473         }
474
475         return 0;
476 }
477
478 static bool
479 iavf_fdir_refine_input_set(const uint64_t input_set,
480                            const uint64_t input_set_mask,
481                            struct iavf_fdir_conf *filter)
482 {
483         struct virtchnl_proto_hdr *hdr, *hdr_last;
484         struct rte_flow_item_ipv4 ipv4_spec;
485         struct rte_flow_item_ipv6 ipv6_spec;
486         int last_layer;
487         uint8_t proto_id;
488
489         if (input_set & ~input_set_mask)
490                 return false;
491         else if (input_set)
492                 return true;
493
494         last_layer = filter->add_fltr.rule_cfg.proto_hdrs.count - 1;
495         /* Last layer of TCP/UDP pattern isn't less than 2. */
496         if (last_layer < 2)
497                 return false;
498         hdr_last = &filter->add_fltr.rule_cfg.proto_hdrs.proto_hdr[last_layer];
499         if (hdr_last->type == VIRTCHNL_PROTO_HDR_TCP)
500                 proto_id = 6;
501         else if (hdr_last->type == VIRTCHNL_PROTO_HDR_UDP)
502                 proto_id = 17;
503         else
504                 return false;
505
506         hdr = &filter->add_fltr.rule_cfg.proto_hdrs.proto_hdr[last_layer - 1];
507         switch (hdr->type) {
508         case VIRTCHNL_PROTO_HDR_IPV4:
509                 VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(hdr, IPV4, PROT);
510                 memset(&ipv4_spec, 0, sizeof(ipv4_spec));
511                 ipv4_spec.hdr.next_proto_id = proto_id;
512                 rte_memcpy(hdr->buffer, &ipv4_spec.hdr,
513                            sizeof(ipv4_spec.hdr));
514                 return true;
515         case VIRTCHNL_PROTO_HDR_IPV6:
516                 VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(hdr, IPV6, PROT);
517                 memset(&ipv6_spec, 0, sizeof(ipv6_spec));
518                 ipv6_spec.hdr.proto = proto_id;
519                 rte_memcpy(hdr->buffer, &ipv6_spec.hdr,
520                            sizeof(ipv6_spec.hdr));
521                 return true;
522         default:
523                 return false;
524         }
525 }
526
527 static int
528 iavf_fdir_parse_pattern(__rte_unused struct iavf_adapter *ad,
529                         const struct rte_flow_item pattern[],
530                         const uint64_t input_set_mask,
531                         struct rte_flow_error *error,
532                         struct iavf_fdir_conf *filter)
533 {
534         const struct rte_flow_item *item = pattern;
535         enum rte_flow_item_type item_type;
536         enum rte_flow_item_type l3 = RTE_FLOW_ITEM_TYPE_END;
537         const struct rte_flow_item_eth *eth_spec, *eth_mask;
538         const struct rte_flow_item_ipv4 *ipv4_spec, *ipv4_mask;
539         const struct rte_flow_item_ipv6 *ipv6_spec, *ipv6_mask;
540         const struct rte_flow_item_udp *udp_spec, *udp_mask;
541         const struct rte_flow_item_tcp *tcp_spec, *tcp_mask;
542         const struct rte_flow_item_sctp *sctp_spec, *sctp_mask;
543         const struct rte_flow_item_gtp *gtp_spec, *gtp_mask;
544         const struct rte_flow_item_gtp_psc *gtp_psc_spec, *gtp_psc_mask;
545         const struct rte_flow_item_l2tpv3oip *l2tpv3oip_spec, *l2tpv3oip_mask;
546         const struct rte_flow_item_esp *esp_spec, *esp_mask;
547         const struct rte_flow_item_ah *ah_spec, *ah_mask;
548         const struct rte_flow_item_pfcp *pfcp_spec, *pfcp_mask;
549         const struct rte_flow_item_ecpri *ecpri_spec, *ecpri_mask;
550         struct rte_ecpri_common_hdr ecpri_common;
551         uint64_t input_set = IAVF_INSET_NONE;
552
553         enum rte_flow_item_type next_type;
554         uint16_t ether_type;
555
556         u8 tun_inner = 0;
557         int layer = 0;
558         struct virtchnl_proto_hdr *hdr;
559
560         uint8_t  ipv6_addr_mask[16] = {
561                 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
562                 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF
563         };
564
565         for (item = pattern; item->type != RTE_FLOW_ITEM_TYPE_END; item++) {
566                 if (item->last) {
567                         rte_flow_error_set(error, EINVAL,
568                                         RTE_FLOW_ERROR_TYPE_ITEM, item,
569                                         "Not support range");
570                 }
571
572                 item_type = item->type;
573
574                 switch (item_type) {
575                 case RTE_FLOW_ITEM_TYPE_ETH:
576                         eth_spec = item->spec;
577                         eth_mask = item->mask;
578                         next_type = (item + 1)->type;
579
580                         hdr = &filter->add_fltr.rule_cfg.proto_hdrs.proto_hdr[layer];
581
582                         VIRTCHNL_SET_PROTO_HDR_TYPE(hdr, ETH);
583
584                         if (next_type == RTE_FLOW_ITEM_TYPE_END &&
585                                 (!eth_spec || !eth_mask)) {
586                                 rte_flow_error_set(error, EINVAL,
587                                                 RTE_FLOW_ERROR_TYPE_ITEM,
588                                                 item, "NULL eth spec/mask.");
589                                 return -rte_errno;
590                         }
591
592                         if (eth_spec && eth_mask) {
593                                 if (!rte_is_zero_ether_addr(&eth_mask->src) ||
594                                     !rte_is_zero_ether_addr(&eth_mask->dst)) {
595                                         rte_flow_error_set(error, EINVAL,
596                                                 RTE_FLOW_ERROR_TYPE_ITEM, item,
597                                                 "Invalid MAC_addr mask.");
598                                         return -rte_errno;
599                                 }
600                         }
601
602                         if (eth_spec && eth_mask && eth_mask->type) {
603                                 if (eth_mask->type != RTE_BE16(0xffff)) {
604                                         rte_flow_error_set(error, EINVAL,
605                                                 RTE_FLOW_ERROR_TYPE_ITEM,
606                                                 item, "Invalid type mask.");
607                                         return -rte_errno;
608                                 }
609
610                                 ether_type = rte_be_to_cpu_16(eth_spec->type);
611                                 if (ether_type == RTE_ETHER_TYPE_IPV4 ||
612                                         ether_type == RTE_ETHER_TYPE_IPV6) {
613                                         rte_flow_error_set(error, EINVAL,
614                                                 RTE_FLOW_ERROR_TYPE_ITEM,
615                                                 item,
616                                                 "Unsupported ether_type.");
617                                         return -rte_errno;
618                                 }
619
620                                 input_set |= IAVF_INSET_ETHERTYPE;
621                                 VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(hdr, ETH, ETHERTYPE);
622
623                                 rte_memcpy(hdr->buffer,
624                                         eth_spec, sizeof(struct rte_ether_hdr));
625                         }
626
627                         filter->add_fltr.rule_cfg.proto_hdrs.count = ++layer;
628                         break;
629
630                 case RTE_FLOW_ITEM_TYPE_IPV4:
631                         l3 = RTE_FLOW_ITEM_TYPE_IPV4;
632                         ipv4_spec = item->spec;
633                         ipv4_mask = item->mask;
634
635                         hdr = &filter->add_fltr.rule_cfg.proto_hdrs.proto_hdr[layer];
636
637                         VIRTCHNL_SET_PROTO_HDR_TYPE(hdr, IPV4);
638
639                         if (ipv4_spec && ipv4_mask) {
640                                 if (ipv4_mask->hdr.version_ihl ||
641                                         ipv4_mask->hdr.total_length ||
642                                         ipv4_mask->hdr.packet_id ||
643                                         ipv4_mask->hdr.fragment_offset ||
644                                         ipv4_mask->hdr.hdr_checksum) {
645                                         rte_flow_error_set(error, EINVAL,
646                                                 RTE_FLOW_ERROR_TYPE_ITEM,
647                                                 item, "Invalid IPv4 mask.");
648                                         return -rte_errno;
649                                 }
650
651                                 if (ipv4_mask->hdr.type_of_service ==
652                                                                 UINT8_MAX) {
653                                         input_set |= IAVF_INSET_IPV4_TOS;
654                                         VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(hdr, IPV4, DSCP);
655                                 }
656                                 if (ipv4_mask->hdr.next_proto_id == UINT8_MAX) {
657                                         input_set |= IAVF_INSET_IPV4_PROTO;
658                                         VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(hdr, IPV4, PROT);
659                                 }
660                                 if (ipv4_mask->hdr.time_to_live == UINT8_MAX) {
661                                         input_set |= IAVF_INSET_IPV4_TTL;
662                                         VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(hdr, IPV4, TTL);
663                                 }
664                                 if (ipv4_mask->hdr.src_addr == UINT32_MAX) {
665                                         input_set |= IAVF_INSET_IPV4_SRC;
666                                         VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(hdr, IPV4, SRC);
667                                 }
668                                 if (ipv4_mask->hdr.dst_addr == UINT32_MAX) {
669                                         input_set |= IAVF_INSET_IPV4_DST;
670                                         VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(hdr, IPV4, DST);
671                                 }
672
673                                 if (tun_inner) {
674                                         input_set &= ~IAVF_PROT_IPV4_OUTER;
675                                         input_set |= IAVF_PROT_IPV4_INNER;
676                                 }
677
678                                 rte_memcpy(hdr->buffer,
679                                         &ipv4_spec->hdr,
680                                         sizeof(ipv4_spec->hdr));
681                         }
682
683                         filter->add_fltr.rule_cfg.proto_hdrs.count = ++layer;
684                         break;
685
686                 case RTE_FLOW_ITEM_TYPE_IPV6:
687                         l3 = RTE_FLOW_ITEM_TYPE_IPV6;
688                         ipv6_spec = item->spec;
689                         ipv6_mask = item->mask;
690
691                         hdr = &filter->add_fltr.rule_cfg.proto_hdrs.proto_hdr[layer];
692
693                         VIRTCHNL_SET_PROTO_HDR_TYPE(hdr, IPV6);
694
695                         if (ipv6_spec && ipv6_mask) {
696                                 if (ipv6_mask->hdr.payload_len) {
697                                         rte_flow_error_set(error, EINVAL,
698                                                 RTE_FLOW_ERROR_TYPE_ITEM,
699                                                 item, "Invalid IPv6 mask");
700                                         return -rte_errno;
701                                 }
702
703                                 if ((ipv6_mask->hdr.vtc_flow &
704                                         rte_cpu_to_be_32(IAVF_IPV6_TC_MASK))
705                                         == rte_cpu_to_be_32(IAVF_IPV6_TC_MASK)) {
706                                         input_set |= IAVF_INSET_IPV6_TC;
707                                         VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(hdr, IPV6, TC);
708                                 }
709                                 if (ipv6_mask->hdr.proto == UINT8_MAX) {
710                                         input_set |= IAVF_INSET_IPV6_NEXT_HDR;
711                                         VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(hdr, IPV6, PROT);
712                                 }
713                                 if (ipv6_mask->hdr.hop_limits == UINT8_MAX) {
714                                         input_set |= IAVF_INSET_IPV6_HOP_LIMIT;
715                                         VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(hdr, IPV6, HOP_LIMIT);
716                                 }
717                                 if (!memcmp(ipv6_mask->hdr.src_addr,
718                                         ipv6_addr_mask,
719                                         RTE_DIM(ipv6_mask->hdr.src_addr))) {
720                                         input_set |= IAVF_INSET_IPV6_SRC;
721                                         VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(hdr, IPV6, SRC);
722                                 }
723                                 if (!memcmp(ipv6_mask->hdr.dst_addr,
724                                         ipv6_addr_mask,
725                                         RTE_DIM(ipv6_mask->hdr.dst_addr))) {
726                                         input_set |= IAVF_INSET_IPV6_DST;
727                                         VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(hdr, IPV6, DST);
728                                 }
729
730                                 rte_memcpy(hdr->buffer,
731                                         &ipv6_spec->hdr,
732                                         sizeof(ipv6_spec->hdr));
733                         }
734
735                         filter->add_fltr.rule_cfg.proto_hdrs.count = ++layer;
736                         break;
737
738                 case RTE_FLOW_ITEM_TYPE_UDP:
739                         udp_spec = item->spec;
740                         udp_mask = item->mask;
741
742                         hdr = &filter->add_fltr.rule_cfg.proto_hdrs.proto_hdr[layer];
743
744                         VIRTCHNL_SET_PROTO_HDR_TYPE(hdr, UDP);
745
746                         if (udp_spec && udp_mask) {
747                                 if (udp_mask->hdr.dgram_len ||
748                                         udp_mask->hdr.dgram_cksum) {
749                                         rte_flow_error_set(error, EINVAL,
750                                                 RTE_FLOW_ERROR_TYPE_ITEM, item,
751                                                 "Invalid UDP mask");
752                                         return -rte_errno;
753                                 }
754
755                                 if (udp_mask->hdr.src_port == UINT16_MAX) {
756                                         input_set |= IAVF_INSET_UDP_SRC_PORT;
757                                         VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(hdr, UDP, SRC_PORT);
758                                 }
759                                 if (udp_mask->hdr.dst_port == UINT16_MAX) {
760                                         input_set |= IAVF_INSET_UDP_DST_PORT;
761                                         VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(hdr, UDP, DST_PORT);
762                                 }
763
764                                 if (tun_inner) {
765                                         input_set &= ~IAVF_PROT_UDP_OUTER;
766                                         input_set |= IAVF_PROT_UDP_INNER;
767                                 }
768
769                                 if (l3 == RTE_FLOW_ITEM_TYPE_IPV4)
770                                         rte_memcpy(hdr->buffer,
771                                                 &udp_spec->hdr,
772                                                 sizeof(udp_spec->hdr));
773                                 else if (l3 == RTE_FLOW_ITEM_TYPE_IPV6)
774                                         rte_memcpy(hdr->buffer,
775                                                 &udp_spec->hdr,
776                                                 sizeof(udp_spec->hdr));
777                         }
778
779                         filter->add_fltr.rule_cfg.proto_hdrs.count = ++layer;
780                         break;
781
782                 case RTE_FLOW_ITEM_TYPE_TCP:
783                         tcp_spec = item->spec;
784                         tcp_mask = item->mask;
785
786                         hdr = &filter->add_fltr.rule_cfg.proto_hdrs.proto_hdr[layer];
787
788                         VIRTCHNL_SET_PROTO_HDR_TYPE(hdr, TCP);
789
790                         if (tcp_spec && tcp_mask) {
791                                 if (tcp_mask->hdr.sent_seq ||
792                                         tcp_mask->hdr.recv_ack ||
793                                         tcp_mask->hdr.data_off ||
794                                         tcp_mask->hdr.tcp_flags ||
795                                         tcp_mask->hdr.rx_win ||
796                                         tcp_mask->hdr.cksum ||
797                                         tcp_mask->hdr.tcp_urp) {
798                                         rte_flow_error_set(error, EINVAL,
799                                                 RTE_FLOW_ERROR_TYPE_ITEM, item,
800                                                 "Invalid TCP mask");
801                                         return -rte_errno;
802                                 }
803
804                                 if (tcp_mask->hdr.src_port == UINT16_MAX) {
805                                         input_set |= IAVF_INSET_TCP_SRC_PORT;
806                                         VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(hdr, TCP, SRC_PORT);
807                                 }
808                                 if (tcp_mask->hdr.dst_port == UINT16_MAX) {
809                                         input_set |= IAVF_INSET_TCP_DST_PORT;
810                                         VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(hdr, TCP, DST_PORT);
811                                 }
812
813                                 if (tun_inner) {
814                                         input_set &= ~IAVF_PROT_TCP_OUTER;
815                                         input_set |= IAVF_PROT_TCP_INNER;
816                                 }
817
818                                 if (l3 == RTE_FLOW_ITEM_TYPE_IPV4)
819                                         rte_memcpy(hdr->buffer,
820                                                 &tcp_spec->hdr,
821                                                 sizeof(tcp_spec->hdr));
822                                 else if (l3 == RTE_FLOW_ITEM_TYPE_IPV6)
823                                         rte_memcpy(hdr->buffer,
824                                                 &tcp_spec->hdr,
825                                                 sizeof(tcp_spec->hdr));
826                         }
827
828                         filter->add_fltr.rule_cfg.proto_hdrs.count = ++layer;
829                         break;
830
831                 case RTE_FLOW_ITEM_TYPE_SCTP:
832                         sctp_spec = item->spec;
833                         sctp_mask = item->mask;
834
835                         hdr = &filter->add_fltr.rule_cfg.proto_hdrs.proto_hdr[layer];
836
837                         VIRTCHNL_SET_PROTO_HDR_TYPE(hdr, SCTP);
838
839                         if (sctp_spec && sctp_mask) {
840                                 if (sctp_mask->hdr.cksum) {
841                                         rte_flow_error_set(error, EINVAL,
842                                                 RTE_FLOW_ERROR_TYPE_ITEM, item,
843                                                 "Invalid UDP mask");
844                                         return -rte_errno;
845                                 }
846
847                                 if (sctp_mask->hdr.src_port == UINT16_MAX) {
848                                         input_set |= IAVF_INSET_SCTP_SRC_PORT;
849                                         VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(hdr, SCTP, SRC_PORT);
850                                 }
851                                 if (sctp_mask->hdr.dst_port == UINT16_MAX) {
852                                         input_set |= IAVF_INSET_SCTP_DST_PORT;
853                                         VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(hdr, SCTP, DST_PORT);
854                                 }
855
856                                 if (l3 == RTE_FLOW_ITEM_TYPE_IPV4)
857                                         rte_memcpy(hdr->buffer,
858                                                 &sctp_spec->hdr,
859                                                 sizeof(sctp_spec->hdr));
860                                 else if (l3 == RTE_FLOW_ITEM_TYPE_IPV6)
861                                         rte_memcpy(hdr->buffer,
862                                                 &sctp_spec->hdr,
863                                                 sizeof(sctp_spec->hdr));
864                         }
865
866                         filter->add_fltr.rule_cfg.proto_hdrs.count = ++layer;
867                         break;
868
869                 case RTE_FLOW_ITEM_TYPE_GTPU:
870                         gtp_spec = item->spec;
871                         gtp_mask = item->mask;
872
873                         hdr = &filter->add_fltr.rule_cfg.proto_hdrs.proto_hdr[layer];
874
875                         VIRTCHNL_SET_PROTO_HDR_TYPE(hdr, GTPU_IP);
876
877                         if (gtp_spec && gtp_mask) {
878                                 if (gtp_mask->v_pt_rsv_flags ||
879                                         gtp_mask->msg_type ||
880                                         gtp_mask->msg_len) {
881                                         rte_flow_error_set(error, EINVAL,
882                                                 RTE_FLOW_ERROR_TYPE_ITEM,
883                                                 item, "Invalid GTP mask");
884                                         return -rte_errno;
885                                 }
886
887                                 if (gtp_mask->teid == UINT32_MAX) {
888                                         input_set |= IAVF_INSET_GTPU_TEID;
889                                         VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(hdr, GTPU_IP, TEID);
890                                 }
891
892                                 rte_memcpy(hdr->buffer,
893                                         gtp_spec, sizeof(*gtp_spec));
894                         }
895
896                         tun_inner = 1;
897
898                         filter->add_fltr.rule_cfg.proto_hdrs.count = ++layer;
899                         break;
900
901                 case RTE_FLOW_ITEM_TYPE_GTP_PSC:
902                         gtp_psc_spec = item->spec;
903                         gtp_psc_mask = item->mask;
904
905                         hdr = &filter->add_fltr.rule_cfg.proto_hdrs.proto_hdr[layer];
906
907                         if (!gtp_psc_spec)
908                                 VIRTCHNL_SET_PROTO_HDR_TYPE(hdr, GTPU_EH);
909                         else if ((gtp_psc_mask->qfi) && !(gtp_psc_mask->pdu_type))
910                                 VIRTCHNL_SET_PROTO_HDR_TYPE(hdr, GTPU_EH);
911                         else if (gtp_psc_spec->pdu_type == IAVF_GTPU_EH_UPLINK)
912                                 VIRTCHNL_SET_PROTO_HDR_TYPE(hdr, GTPU_EH_PDU_UP);
913                         else if (gtp_psc_spec->pdu_type == IAVF_GTPU_EH_DWLINK)
914                                 VIRTCHNL_SET_PROTO_HDR_TYPE(hdr, GTPU_EH_PDU_DWN);
915
916                         if (gtp_psc_spec && gtp_psc_mask) {
917                                 if (gtp_psc_mask->qfi == UINT8_MAX) {
918                                         input_set |= IAVF_INSET_GTPU_QFI;
919                                         VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(hdr, GTPU_EH, QFI);
920                                 }
921
922                                 rte_memcpy(hdr->buffer, gtp_psc_spec,
923                                         sizeof(*gtp_psc_spec));
924                         }
925
926                         filter->add_fltr.rule_cfg.proto_hdrs.count = ++layer;
927                         break;
928
929                 case RTE_FLOW_ITEM_TYPE_L2TPV3OIP:
930                         l2tpv3oip_spec = item->spec;
931                         l2tpv3oip_mask = item->mask;
932
933                         hdr = &filter->add_fltr.rule_cfg.proto_hdrs.proto_hdr[layer];
934
935                         VIRTCHNL_SET_PROTO_HDR_TYPE(hdr, L2TPV3);
936
937                         if (l2tpv3oip_spec && l2tpv3oip_mask) {
938                                 if (l2tpv3oip_mask->session_id == UINT32_MAX) {
939                                         input_set |= IAVF_L2TPV3OIP_SESSION_ID;
940                                         VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(hdr, L2TPV3, SESS_ID);
941                                 }
942
943                                 rte_memcpy(hdr->buffer, l2tpv3oip_spec,
944                                         sizeof(*l2tpv3oip_spec));
945                         }
946
947                         filter->add_fltr.rule_cfg.proto_hdrs.count = ++layer;
948                         break;
949
950                 case RTE_FLOW_ITEM_TYPE_ESP:
951                         esp_spec = item->spec;
952                         esp_mask = item->mask;
953
954                         hdr = &filter->add_fltr.rule_cfg.proto_hdrs.proto_hdr[layer];
955
956                         VIRTCHNL_SET_PROTO_HDR_TYPE(hdr, ESP);
957
958                         if (esp_spec && esp_mask) {
959                                 if (esp_mask->hdr.spi == UINT32_MAX) {
960                                         input_set |= IAVF_INSET_ESP_SPI;
961                                         VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(hdr, ESP, SPI);
962                                 }
963
964                                 rte_memcpy(hdr->buffer, &esp_spec->hdr,
965                                         sizeof(esp_spec->hdr));
966                         }
967
968                         filter->add_fltr.rule_cfg.proto_hdrs.count = ++layer;
969                         break;
970
971                 case RTE_FLOW_ITEM_TYPE_AH:
972                         ah_spec = item->spec;
973                         ah_mask = item->mask;
974
975                         hdr = &filter->add_fltr.rule_cfg.proto_hdrs.proto_hdr[layer];
976
977                         VIRTCHNL_SET_PROTO_HDR_TYPE(hdr, AH);
978
979                         if (ah_spec && ah_mask) {
980                                 if (ah_mask->spi == UINT32_MAX) {
981                                         input_set |= IAVF_INSET_AH_SPI;
982                                         VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(hdr, AH, SPI);
983                                 }
984
985                                 rte_memcpy(hdr->buffer, ah_spec,
986                                         sizeof(*ah_spec));
987                         }
988
989                         filter->add_fltr.rule_cfg.proto_hdrs.count = ++layer;
990                         break;
991
992                 case RTE_FLOW_ITEM_TYPE_PFCP:
993                         pfcp_spec = item->spec;
994                         pfcp_mask = item->mask;
995
996                         hdr = &filter->add_fltr.rule_cfg.proto_hdrs.proto_hdr[layer];
997
998                         VIRTCHNL_SET_PROTO_HDR_TYPE(hdr, PFCP);
999
1000                         if (pfcp_spec && pfcp_mask) {
1001                                 if (pfcp_mask->s_field == UINT8_MAX) {
1002                                         input_set |= IAVF_INSET_PFCP_S_FIELD;
1003                                         VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(hdr, PFCP, S_FIELD);
1004                                 }
1005
1006                                 rte_memcpy(hdr->buffer, pfcp_spec,
1007                                         sizeof(*pfcp_spec));
1008                         }
1009
1010                         filter->add_fltr.rule_cfg.proto_hdrs.count = ++layer;
1011                         break;
1012
1013                 case RTE_FLOW_ITEM_TYPE_ECPRI:
1014                         ecpri_spec = item->spec;
1015                         ecpri_mask = item->mask;
1016
1017                         ecpri_common.u32 = rte_be_to_cpu_32(ecpri_spec->hdr.common.u32);
1018
1019                         hdr = &filter->add_fltr.rule_cfg.proto_hdrs.proto_hdr[layer];
1020
1021                         VIRTCHNL_SET_PROTO_HDR_TYPE(hdr, ECPRI);
1022
1023                         if (ecpri_spec && ecpri_mask) {
1024                                 if (ecpri_common.type == RTE_ECPRI_MSG_TYPE_IQ_DATA &&
1025                                                 ecpri_mask->hdr.type0.pc_id == UINT16_MAX) {
1026                                         input_set |= IAVF_ECPRI_PC_RTC_ID;
1027                                         VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(hdr, ECPRI,
1028                                                                          PC_RTC_ID);
1029                                 }
1030
1031                                 rte_memcpy(hdr->buffer, ecpri_spec,
1032                                         sizeof(*ecpri_spec));
1033                         }
1034
1035                         filter->add_fltr.rule_cfg.proto_hdrs.count = ++layer;
1036                         break;
1037
1038                 case RTE_FLOW_ITEM_TYPE_VOID:
1039                         break;
1040
1041                 default:
1042                         rte_flow_error_set(error, EINVAL,
1043                                         RTE_FLOW_ERROR_TYPE_ITEM, item,
1044                                         "Invalid pattern item.");
1045                         return -rte_errno;
1046                 }
1047         }
1048
1049         if (layer > VIRTCHNL_MAX_NUM_PROTO_HDRS) {
1050                 rte_flow_error_set(error, EINVAL,
1051                         RTE_FLOW_ERROR_TYPE_ITEM, item,
1052                         "Protocol header layers exceed the maximum value");
1053                 return -rte_errno;
1054         }
1055
1056         if (!iavf_fdir_refine_input_set(input_set, input_set_mask, filter)) {
1057                 rte_flow_error_set(error, EINVAL,
1058                                    RTE_FLOW_ERROR_TYPE_ITEM_SPEC, pattern,
1059                                    "Invalid input set");
1060                 return -rte_errno;
1061         }
1062
1063         filter->input_set = input_set;
1064
1065         return 0;
1066 }
1067
1068 static int
1069 iavf_fdir_parse(struct iavf_adapter *ad,
1070                 struct iavf_pattern_match_item *array,
1071                 uint32_t array_len,
1072                 const struct rte_flow_item pattern[],
1073                 const struct rte_flow_action actions[],
1074                 void **meta,
1075                 struct rte_flow_error *error)
1076 {
1077         struct iavf_info *vf = IAVF_DEV_PRIVATE_TO_VF(ad);
1078         struct iavf_fdir_conf *filter = &vf->fdir.conf;
1079         struct iavf_pattern_match_item *item = NULL;
1080         int ret;
1081
1082         memset(filter, 0, sizeof(*filter));
1083
1084         item = iavf_search_pattern_match_item(pattern, array, array_len, error);
1085         if (!item)
1086                 return -rte_errno;
1087
1088         ret = iavf_fdir_parse_pattern(ad, pattern, item->input_set_mask,
1089                                       error, filter);
1090         if (ret)
1091                 goto error;
1092
1093         ret = iavf_fdir_parse_action(ad, actions, error, filter);
1094         if (ret)
1095                 goto error;
1096
1097         if (meta)
1098                 *meta = filter;
1099
1100 error:
1101         rte_free(item);
1102         return ret;
1103 }
1104
1105 static struct iavf_flow_parser iavf_fdir_parser = {
1106         .engine = &iavf_fdir_engine,
1107         .array = iavf_fdir_pattern,
1108         .array_len = RTE_DIM(iavf_fdir_pattern),
1109         .parse_pattern_action = iavf_fdir_parse,
1110         .stage = IAVF_FLOW_STAGE_DISTRIBUTOR,
1111 };
1112
1113 RTE_INIT(iavf_fdir_engine_register)
1114 {
1115         iavf_register_flow_engine(&iavf_fdir_engine);
1116 }