7c0bb1401f75a774056247df60233ad267df82d7
[dpdk.git] / drivers / net / iavf / iavf_fdir.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2020 Intel Corporation
3  */
4
5 #include <sys/queue.h>
6 #include <stdio.h>
7 #include <errno.h>
8 #include <stdint.h>
9 #include <string.h>
10 #include <unistd.h>
11 #include <stdarg.h>
12
13 #include <rte_ether.h>
14 #include <rte_ethdev_driver.h>
15 #include <rte_malloc.h>
16 #include <rte_tailq.h>
17
18 #include "iavf.h"
19 #include "iavf_generic_flow.h"
20 #include "virtchnl.h"
21
22 #define IAVF_FDIR_MAX_QREGION_SIZE 128
23
24 #define IAVF_FDIR_IPV6_TC_OFFSET 20
25 #define IAVF_IPV6_TC_MASK  (0xFF << IAVF_FDIR_IPV6_TC_OFFSET)
26
27 #define IAVF_FDIR_INSET_ETH (\
28         IAVF_INSET_ETHERTYPE)
29
30 #define IAVF_FDIR_INSET_ETH_IPV4 (\
31         IAVF_INSET_IPV4_SRC | IAVF_INSET_IPV4_DST | \
32         IAVF_INSET_IPV4_PROTO | IAVF_INSET_IPV4_TOS | \
33         IAVF_INSET_IPV4_TTL)
34
35 #define IAVF_FDIR_INSET_ETH_IPV4_UDP (\
36         IAVF_INSET_IPV4_SRC | IAVF_INSET_IPV4_DST | \
37         IAVF_INSET_IPV4_TOS | IAVF_INSET_IPV4_TTL | \
38         IAVF_INSET_UDP_SRC_PORT | IAVF_INSET_UDP_DST_PORT)
39
40 #define IAVF_FDIR_INSET_ETH_IPV4_TCP (\
41         IAVF_INSET_IPV4_SRC | IAVF_INSET_IPV4_DST | \
42         IAVF_INSET_IPV4_TOS | IAVF_INSET_IPV4_TTL | \
43         IAVF_INSET_TCP_SRC_PORT | IAVF_INSET_TCP_DST_PORT)
44
45 #define IAVF_FDIR_INSET_ETH_IPV4_SCTP (\
46         IAVF_INSET_IPV4_SRC | IAVF_INSET_IPV4_DST | \
47         IAVF_INSET_IPV4_TOS | IAVF_INSET_IPV4_TTL | \
48         IAVF_INSET_SCTP_SRC_PORT | IAVF_INSET_SCTP_DST_PORT)
49
50 #define IAVF_FDIR_INSET_ETH_IPV6 (\
51         IAVF_INSET_IPV6_SRC | IAVF_INSET_IPV6_DST | \
52         IAVF_INSET_IPV6_NEXT_HDR | IAVF_INSET_IPV6_TC | \
53         IAVF_INSET_IPV6_HOP_LIMIT)
54
55 #define IAVF_FDIR_INSET_ETH_IPV6_UDP (\
56         IAVF_INSET_IPV6_SRC | IAVF_INSET_IPV6_DST | \
57         IAVF_INSET_IPV6_TC | IAVF_INSET_IPV6_HOP_LIMIT | \
58         IAVF_INSET_UDP_SRC_PORT | IAVF_INSET_UDP_DST_PORT)
59
60 #define IAVF_FDIR_INSET_ETH_IPV6_TCP (\
61         IAVF_INSET_IPV6_SRC | IAVF_INSET_IPV6_DST | \
62         IAVF_INSET_IPV6_TC | IAVF_INSET_IPV6_HOP_LIMIT | \
63         IAVF_INSET_TCP_SRC_PORT | IAVF_INSET_TCP_DST_PORT)
64
65 #define IAVF_FDIR_INSET_ETH_IPV6_SCTP (\
66         IAVF_INSET_IPV6_SRC | IAVF_INSET_IPV6_DST | \
67         IAVF_INSET_IPV6_TC | IAVF_INSET_IPV6_HOP_LIMIT | \
68         IAVF_INSET_SCTP_SRC_PORT | IAVF_INSET_SCTP_DST_PORT)
69
70 #define IAVF_FDIR_INSET_GTPU (\
71         IAVF_INSET_IPV4_SRC | IAVF_INSET_IPV4_DST | \
72         IAVF_INSET_GTPU_TEID)
73
74 #define IAVF_FDIR_INSET_GTPU_EH (\
75         IAVF_INSET_IPV4_SRC | IAVF_INSET_IPV4_DST | \
76         IAVF_INSET_GTPU_TEID | IAVF_INSET_GTPU_QFI)
77
78 static struct iavf_pattern_match_item iavf_fdir_pattern[] = {
79         {iavf_pattern_ethertype,                IAVF_FDIR_INSET_ETH,                    IAVF_INSET_NONE},
80         {iavf_pattern_eth_ipv4,                 IAVF_FDIR_INSET_ETH_IPV4,               IAVF_INSET_NONE},
81         {iavf_pattern_eth_ipv4_udp,             IAVF_FDIR_INSET_ETH_IPV4_UDP,           IAVF_INSET_NONE},
82         {iavf_pattern_eth_ipv4_tcp,             IAVF_FDIR_INSET_ETH_IPV4_TCP,           IAVF_INSET_NONE},
83         {iavf_pattern_eth_ipv4_sctp,            IAVF_FDIR_INSET_ETH_IPV4_SCTP,          IAVF_INSET_NONE},
84         {iavf_pattern_eth_ipv6,                 IAVF_FDIR_INSET_ETH_IPV6,               IAVF_INSET_NONE},
85         {iavf_pattern_eth_ipv6_udp,             IAVF_FDIR_INSET_ETH_IPV6_UDP,           IAVF_INSET_NONE},
86         {iavf_pattern_eth_ipv6_tcp,             IAVF_FDIR_INSET_ETH_IPV6_TCP,           IAVF_INSET_NONE},
87         {iavf_pattern_eth_ipv6_sctp,            IAVF_FDIR_INSET_ETH_IPV6_SCTP,          IAVF_INSET_NONE},
88         {iavf_pattern_eth_ipv4_gtpu,            IAVF_FDIR_INSET_GTPU,                   IAVF_INSET_NONE},
89         {iavf_pattern_eth_ipv4_gtpu_eh,         IAVF_FDIR_INSET_GTPU_EH,                IAVF_INSET_NONE},
90 };
91
92 static struct iavf_flow_parser iavf_fdir_parser;
93
94 static int
95 iavf_fdir_init(struct iavf_adapter *ad)
96 {
97         struct iavf_info *vf = IAVF_DEV_PRIVATE_TO_VF(ad);
98         struct iavf_flow_parser *parser;
99
100         if (!vf->vf_res)
101                 return -EINVAL;
102
103         if (vf->vf_res->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_FDIR_PF)
104                 parser = &iavf_fdir_parser;
105         else
106                 return -ENOTSUP;
107
108         return iavf_register_parser(parser, ad);
109 }
110
111 static void
112 iavf_fdir_uninit(struct iavf_adapter *ad)
113 {
114         iavf_unregister_parser(&iavf_fdir_parser, ad);
115 }
116
117 static int
118 iavf_fdir_create(struct iavf_adapter *ad,
119                 struct rte_flow *flow,
120                 void *meta,
121                 struct rte_flow_error *error)
122 {
123         struct iavf_fdir_conf *filter = meta;
124         struct iavf_fdir_conf *rule;
125         int ret;
126
127         rule = rte_zmalloc("fdir_entry", sizeof(*rule), 0);
128         if (!rule) {
129                 rte_flow_error_set(error, ENOMEM,
130                                 RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
131                                 "Failed to allocate memory for fdir rule");
132                 return -rte_errno;
133         }
134
135         ret = iavf_fdir_add(ad, filter);
136         if (ret) {
137                 rte_flow_error_set(error, -ret,
138                                 RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
139                                 "Failed to add filter rule.");
140                 goto free_entry;
141         }
142
143         rte_memcpy(rule, filter, sizeof(*rule));
144         flow->rule = rule;
145
146         return 0;
147
148 free_entry:
149         rte_free(rule);
150         return -rte_errno;
151 }
152
153 static int
154 iavf_fdir_destroy(struct iavf_adapter *ad,
155                 struct rte_flow *flow,
156                 struct rte_flow_error *error)
157 {
158         struct iavf_fdir_conf *filter;
159         int ret;
160
161         filter = (struct iavf_fdir_conf *)flow->rule;
162
163         ret = iavf_fdir_del(ad, filter);
164         if (ret) {
165                 rte_flow_error_set(error, -ret,
166                                 RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
167                                 "Failed to delete filter rule.");
168                 return -rte_errno;
169         }
170
171         flow->rule = NULL;
172         rte_free(filter);
173
174         return 0;
175 }
176
177 static int
178 iavf_fdir_validation(struct iavf_adapter *ad,
179                 __rte_unused struct rte_flow *flow,
180                 void *meta,
181                 struct rte_flow_error *error)
182 {
183         struct iavf_fdir_conf *filter = meta;
184         int ret;
185
186         ret = iavf_fdir_check(ad, filter);
187         if (ret) {
188                 rte_flow_error_set(error, -ret,
189                                 RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
190                                 "Failed to validate filter rule.");
191                 return -rte_errno;
192         }
193
194         return 0;
195 };
196
197 static struct iavf_flow_engine iavf_fdir_engine = {
198         .init = iavf_fdir_init,
199         .uninit = iavf_fdir_uninit,
200         .create = iavf_fdir_create,
201         .destroy = iavf_fdir_destroy,
202         .validation = iavf_fdir_validation,
203         .type = IAVF_FLOW_ENGINE_FDIR,
204 };
205
206 static int
207 iavf_fdir_parse_action_qregion(struct iavf_adapter *ad,
208                         struct rte_flow_error *error,
209                         const struct rte_flow_action *act,
210                         struct virtchnl_filter_action *filter_action)
211 {
212         const struct rte_flow_action_rss *rss = act->conf;
213         uint32_t i;
214
215         if (act->type != RTE_FLOW_ACTION_TYPE_RSS) {
216                 rte_flow_error_set(error, EINVAL,
217                                 RTE_FLOW_ERROR_TYPE_ACTION, act,
218                                 "Invalid action.");
219                 return -rte_errno;
220         }
221
222         if (rss->queue_num <= 1) {
223                 rte_flow_error_set(error, EINVAL,
224                                 RTE_FLOW_ERROR_TYPE_ACTION, act,
225                                 "Queue region size can't be 0 or 1.");
226                 return -rte_errno;
227         }
228
229         /* check if queue index for queue region is continuous */
230         for (i = 0; i < rss->queue_num - 1; i++) {
231                 if (rss->queue[i + 1] != rss->queue[i] + 1) {
232                         rte_flow_error_set(error, EINVAL,
233                                         RTE_FLOW_ERROR_TYPE_ACTION, act,
234                                         "Discontinuous queue region");
235                         return -rte_errno;
236                 }
237         }
238
239         if (rss->queue[rss->queue_num - 1] >= ad->eth_dev->data->nb_rx_queues) {
240                 rte_flow_error_set(error, EINVAL,
241                                 RTE_FLOW_ERROR_TYPE_ACTION, act,
242                                 "Invalid queue region indexes.");
243                 return -rte_errno;
244         }
245
246         if (!(rte_is_power_of_2(rss->queue_num) &&
247                 rss->queue_num <= IAVF_FDIR_MAX_QREGION_SIZE)) {
248                 rte_flow_error_set(error, EINVAL,
249                                 RTE_FLOW_ERROR_TYPE_ACTION, act,
250                                 "The region size should be any of the following values:"
251                                 "1, 2, 4, 8, 16, 32, 64, 128 as long as the total number "
252                                 "of queues do not exceed the VSI allocation.");
253                 return -rte_errno;
254         }
255
256         filter_action->act_conf.queue.index = rss->queue[0];
257         filter_action->act_conf.queue.region = rte_fls_u32(rss->queue_num) - 1;
258
259         return 0;
260 }
261
262 static int
263 iavf_fdir_parse_action(struct iavf_adapter *ad,
264                         const struct rte_flow_action actions[],
265                         struct rte_flow_error *error,
266                         struct iavf_fdir_conf *filter)
267 {
268         const struct rte_flow_action_queue *act_q;
269         uint32_t dest_num = 0;
270         int ret;
271
272         int number = 0;
273         struct virtchnl_filter_action *filter_action;
274
275         for (; actions->type != RTE_FLOW_ACTION_TYPE_END; actions++) {
276                 switch (actions->type) {
277                 case RTE_FLOW_ACTION_TYPE_VOID:
278                         break;
279
280                 case RTE_FLOW_ACTION_TYPE_PASSTHRU:
281                         dest_num++;
282
283                         filter_action = &filter->add_fltr.rule_cfg.action_set.actions[number];
284
285                         filter_action->type = VIRTCHNL_ACTION_PASSTHRU;
286
287                         filter->add_fltr.rule_cfg.action_set.count = ++number;
288                         break;
289
290                 case RTE_FLOW_ACTION_TYPE_DROP:
291                         dest_num++;
292
293                         filter_action = &filter->add_fltr.rule_cfg.action_set.actions[number];
294
295                         filter_action->type = VIRTCHNL_ACTION_DROP;
296
297                         filter->add_fltr.rule_cfg.action_set.count = ++number;
298                         break;
299
300                 case RTE_FLOW_ACTION_TYPE_QUEUE:
301                         dest_num++;
302
303                         act_q = actions->conf;
304                         filter_action = &filter->add_fltr.rule_cfg.action_set.actions[number];
305
306                         filter_action->type = VIRTCHNL_ACTION_QUEUE;
307                         filter_action->act_conf.queue.index = act_q->index;
308
309                         if (filter_action->act_conf.queue.index >=
310                                 ad->eth_dev->data->nb_rx_queues) {
311                                 rte_flow_error_set(error, EINVAL,
312                                         RTE_FLOW_ERROR_TYPE_ACTION,
313                                         actions, "Invalid queue for FDIR.");
314                                 return -rte_errno;
315                         }
316
317                         filter->add_fltr.rule_cfg.action_set.count = ++number;
318                         break;
319
320                 case RTE_FLOW_ACTION_TYPE_RSS:
321                         dest_num++;
322
323                         filter_action = &filter->add_fltr.rule_cfg.action_set.actions[number];
324
325                         filter_action->type = VIRTCHNL_ACTION_Q_REGION;
326
327                         ret = iavf_fdir_parse_action_qregion(ad,
328                                                 error, actions, filter_action);
329                         if (ret)
330                                 return ret;
331
332                         filter->add_fltr.rule_cfg.action_set.count = ++number;
333                         break;
334
335                 default:
336                         rte_flow_error_set(error, EINVAL,
337                                         RTE_FLOW_ERROR_TYPE_ACTION, actions,
338                                         "Invalid action.");
339                         return -rte_errno;
340                 }
341         }
342
343         if (number > VIRTCHNL_MAX_NUM_ACTIONS) {
344                 rte_flow_error_set(error, EINVAL,
345                         RTE_FLOW_ERROR_TYPE_ACTION, actions,
346                         "Action numbers exceed the maximum value");
347                 return -rte_errno;
348         }
349
350         if (dest_num == 0 || dest_num >= 2) {
351                 rte_flow_error_set(error, EINVAL,
352                         RTE_FLOW_ERROR_TYPE_ACTION, actions,
353                         "Unsupported action combination");
354                 return -rte_errno;
355         }
356
357         return 0;
358 }
359
360 static int
361 iavf_fdir_parse_pattern(__rte_unused struct iavf_adapter *ad,
362                         const struct rte_flow_item pattern[],
363                         struct rte_flow_error *error,
364                         struct iavf_fdir_conf *filter)
365 {
366         const struct rte_flow_item *item = pattern;
367         enum rte_flow_item_type item_type;
368         enum rte_flow_item_type l3 = RTE_FLOW_ITEM_TYPE_END;
369         const struct rte_flow_item_eth *eth_spec, *eth_mask;
370         const struct rte_flow_item_ipv4 *ipv4_spec, *ipv4_mask;
371         const struct rte_flow_item_ipv6 *ipv6_spec, *ipv6_mask;
372         const struct rte_flow_item_udp *udp_spec, *udp_mask;
373         const struct rte_flow_item_tcp *tcp_spec, *tcp_mask;
374         const struct rte_flow_item_sctp *sctp_spec, *sctp_mask;
375         const struct rte_flow_item_gtp *gtp_spec, *gtp_mask;
376         const struct rte_flow_item_gtp_psc *gtp_psc_spec, *gtp_psc_mask;
377         uint64_t input_set = IAVF_INSET_NONE;
378
379         enum rte_flow_item_type next_type;
380         uint16_t ether_type;
381
382         int layer = 0;
383         struct virtchnl_proto_hdr *hdr;
384
385         uint8_t  ipv6_addr_mask[16] = {
386                 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
387                 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF
388         };
389
390         for (item = pattern; item->type != RTE_FLOW_ITEM_TYPE_END; item++) {
391                 if (item->last) {
392                         rte_flow_error_set(error, EINVAL,
393                                         RTE_FLOW_ERROR_TYPE_ITEM, item,
394                                         "Not support range");
395                 }
396
397                 item_type = item->type;
398
399                 switch (item_type) {
400                 case RTE_FLOW_ITEM_TYPE_ETH:
401                         eth_spec = item->spec;
402                         eth_mask = item->mask;
403                         next_type = (item + 1)->type;
404
405                         hdr = &filter->add_fltr.rule_cfg.proto_hdrs.proto_hdr[layer];
406
407                         VIRTCHNL_SET_PROTO_HDR_TYPE(hdr, ETH);
408
409                         if (next_type == RTE_FLOW_ITEM_TYPE_END &&
410                                 (!eth_spec || !eth_mask)) {
411                                 rte_flow_error_set(error, EINVAL,
412                                                 RTE_FLOW_ERROR_TYPE_ITEM,
413                                                 item, "NULL eth spec/mask.");
414                                 return -rte_errno;
415                         }
416
417                         if (eth_spec && eth_mask) {
418                                 if (!rte_is_zero_ether_addr(&eth_mask->src) ||
419                                     !rte_is_zero_ether_addr(&eth_mask->dst)) {
420                                         rte_flow_error_set(error, EINVAL,
421                                                 RTE_FLOW_ERROR_TYPE_ITEM, item,
422                                                 "Invalid MAC_addr mask.");
423                                         return -rte_errno;
424                                 }
425                         }
426
427                         if (eth_spec && eth_mask && eth_mask->type) {
428                                 if (eth_mask->type != RTE_BE16(0xffff)) {
429                                         rte_flow_error_set(error, EINVAL,
430                                                 RTE_FLOW_ERROR_TYPE_ITEM,
431                                                 item, "Invalid type mask.");
432                                         return -rte_errno;
433                                 }
434
435                                 ether_type = rte_be_to_cpu_16(eth_spec->type);
436                                 if (ether_type == RTE_ETHER_TYPE_IPV4 ||
437                                         ether_type == RTE_ETHER_TYPE_IPV6) {
438                                         rte_flow_error_set(error, EINVAL,
439                                                 RTE_FLOW_ERROR_TYPE_ITEM,
440                                                 item,
441                                                 "Unsupported ether_type.");
442                                         return -rte_errno;
443                                 }
444
445                                 input_set |= IAVF_INSET_ETHERTYPE;
446                                 VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(hdr, ETH, ETHERTYPE);
447
448                                 rte_memcpy(hdr->buffer,
449                                         eth_spec, sizeof(*eth_spec));
450                         }
451
452                         filter->add_fltr.rule_cfg.proto_hdrs.count = ++layer;
453                         break;
454
455                 case RTE_FLOW_ITEM_TYPE_IPV4:
456                         l3 = RTE_FLOW_ITEM_TYPE_IPV4;
457                         ipv4_spec = item->spec;
458                         ipv4_mask = item->mask;
459
460                         hdr = &filter->add_fltr.rule_cfg.proto_hdrs.proto_hdr[layer];
461
462                         VIRTCHNL_SET_PROTO_HDR_TYPE(hdr, IPV4);
463
464                         if (ipv4_spec && ipv4_mask) {
465                                 if (ipv4_mask->hdr.version_ihl ||
466                                         ipv4_mask->hdr.total_length ||
467                                         ipv4_mask->hdr.packet_id ||
468                                         ipv4_mask->hdr.fragment_offset ||
469                                         ipv4_mask->hdr.hdr_checksum) {
470                                         rte_flow_error_set(error, EINVAL,
471                                                 RTE_FLOW_ERROR_TYPE_ITEM,
472                                                 item, "Invalid IPv4 mask.");
473                                         return -rte_errno;
474                                 }
475
476                                 if (ipv4_mask->hdr.type_of_service ==
477                                                                 UINT8_MAX) {
478                                         input_set |= IAVF_INSET_IPV4_TOS;
479                                         VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(hdr, IPV4, DSCP);
480                                 }
481                                 if (ipv4_mask->hdr.next_proto_id == UINT8_MAX) {
482                                         input_set |= IAVF_INSET_IPV4_PROTO;
483                                         VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(hdr, IPV4, PROT);
484                                 }
485                                 if (ipv4_mask->hdr.time_to_live == UINT8_MAX) {
486                                         input_set |= IAVF_INSET_IPV4_TTL;
487                                         VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(hdr, IPV4, TTL);
488                                 }
489                                 if (ipv4_mask->hdr.src_addr == UINT32_MAX) {
490                                         input_set |= IAVF_INSET_IPV4_SRC;
491                                         VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(hdr, IPV4, SRC);
492                                 }
493                                 if (ipv4_mask->hdr.dst_addr == UINT32_MAX) {
494                                         input_set |= IAVF_INSET_IPV4_DST;
495                                         VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(hdr, IPV4, DST);
496                                 }
497
498                                 rte_memcpy(hdr->buffer,
499                                         &ipv4_spec->hdr,
500                                         sizeof(ipv4_spec->hdr));
501                         }
502
503                         filter->add_fltr.rule_cfg.proto_hdrs.count = ++layer;
504                         break;
505
506                 case RTE_FLOW_ITEM_TYPE_IPV6:
507                         l3 = RTE_FLOW_ITEM_TYPE_IPV6;
508                         ipv6_spec = item->spec;
509                         ipv6_mask = item->mask;
510
511                         hdr = &filter->add_fltr.rule_cfg.proto_hdrs.proto_hdr[layer];
512
513                         VIRTCHNL_SET_PROTO_HDR_TYPE(hdr, IPV6);
514
515                         if (ipv6_spec && ipv6_mask) {
516                                 if (ipv6_mask->hdr.payload_len) {
517                                         rte_flow_error_set(error, EINVAL,
518                                                 RTE_FLOW_ERROR_TYPE_ITEM,
519                                                 item, "Invalid IPv6 mask");
520                                         return -rte_errno;
521                                 }
522
523                                 if ((ipv6_mask->hdr.vtc_flow &
524                                         rte_cpu_to_be_32(IAVF_IPV6_TC_MASK))
525                                         == rte_cpu_to_be_32(IAVF_IPV6_TC_MASK)) {
526                                         input_set |= IAVF_INSET_IPV6_TC;
527                                         VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(hdr, IPV6, TC);
528                                 }
529                                 if (ipv6_mask->hdr.proto == UINT8_MAX) {
530                                         input_set |= IAVF_INSET_IPV6_NEXT_HDR;
531                                         VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(hdr, IPV6, PROT);
532                                 }
533                                 if (ipv6_mask->hdr.hop_limits == UINT8_MAX) {
534                                         input_set |= IAVF_INSET_IPV6_HOP_LIMIT;
535                                         VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(hdr, IPV6, HOP_LIMIT);
536                                 }
537                                 if (!memcmp(ipv6_mask->hdr.src_addr,
538                                         ipv6_addr_mask,
539                                         RTE_DIM(ipv6_mask->hdr.src_addr))) {
540                                         input_set |= IAVF_INSET_IPV6_SRC;
541                                         VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(hdr, IPV6, SRC);
542                                 }
543                                 if (!memcmp(ipv6_mask->hdr.dst_addr,
544                                         ipv6_addr_mask,
545                                         RTE_DIM(ipv6_mask->hdr.dst_addr))) {
546                                         input_set |= IAVF_INSET_IPV6_DST;
547                                         VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(hdr, IPV6, DST);
548                                 }
549
550                                 rte_memcpy(hdr->buffer,
551                                         &ipv6_spec->hdr,
552                                         sizeof(ipv6_spec->hdr));
553                         }
554
555                         filter->add_fltr.rule_cfg.proto_hdrs.count = ++layer;
556                         break;
557
558                 case RTE_FLOW_ITEM_TYPE_UDP:
559                         udp_spec = item->spec;
560                         udp_mask = item->mask;
561
562                         hdr = &filter->add_fltr.rule_cfg.proto_hdrs.proto_hdr[layer];
563
564                         VIRTCHNL_SET_PROTO_HDR_TYPE(hdr, UDP);
565
566                         if (udp_spec && udp_mask) {
567                                 if (udp_mask->hdr.dgram_len ||
568                                         udp_mask->hdr.dgram_cksum) {
569                                         rte_flow_error_set(error, EINVAL,
570                                                 RTE_FLOW_ERROR_TYPE_ITEM, item,
571                                                 "Invalid UDP mask");
572                                         return -rte_errno;
573                                 }
574
575                                 if (udp_mask->hdr.src_port == UINT16_MAX) {
576                                         input_set |= IAVF_INSET_UDP_SRC_PORT;
577                                         VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(hdr, UDP, SRC_PORT);
578                                 }
579                                 if (udp_mask->hdr.dst_port == UINT16_MAX) {
580                                         input_set |= IAVF_INSET_UDP_DST_PORT;
581                                         VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(hdr, UDP, DST_PORT);
582                                 }
583
584                                 if (l3 == RTE_FLOW_ITEM_TYPE_IPV4)
585                                         rte_memcpy(hdr->buffer,
586                                                 &udp_spec->hdr,
587                                                 sizeof(udp_spec->hdr));
588                                 else if (l3 == RTE_FLOW_ITEM_TYPE_IPV6)
589                                         rte_memcpy(hdr->buffer,
590                                                 &udp_spec->hdr,
591                                                 sizeof(udp_spec->hdr));
592                         }
593
594                         filter->add_fltr.rule_cfg.proto_hdrs.count = ++layer;
595                         break;
596
597                 case RTE_FLOW_ITEM_TYPE_TCP:
598                         tcp_spec = item->spec;
599                         tcp_mask = item->mask;
600
601                         hdr = &filter->add_fltr.rule_cfg.proto_hdrs.proto_hdr[layer];
602
603                         VIRTCHNL_SET_PROTO_HDR_TYPE(hdr, TCP);
604
605                         if (tcp_spec && tcp_mask) {
606                                 if (tcp_mask->hdr.sent_seq ||
607                                         tcp_mask->hdr.recv_ack ||
608                                         tcp_mask->hdr.data_off ||
609                                         tcp_mask->hdr.tcp_flags ||
610                                         tcp_mask->hdr.rx_win ||
611                                         tcp_mask->hdr.cksum ||
612                                         tcp_mask->hdr.tcp_urp) {
613                                         rte_flow_error_set(error, EINVAL,
614                                                 RTE_FLOW_ERROR_TYPE_ITEM, item,
615                                                 "Invalid TCP mask");
616                                         return -rte_errno;
617                                 }
618
619                                 if (tcp_mask->hdr.src_port == UINT16_MAX) {
620                                         input_set |= IAVF_INSET_TCP_SRC_PORT;
621                                         VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(hdr, TCP, SRC_PORT);
622                                 }
623                                 if (tcp_mask->hdr.dst_port == UINT16_MAX) {
624                                         input_set |= IAVF_INSET_TCP_DST_PORT;
625                                         VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(hdr, TCP, DST_PORT);
626                                 }
627
628                                 if (l3 == RTE_FLOW_ITEM_TYPE_IPV4)
629                                         rte_memcpy(hdr->buffer,
630                                                 &tcp_spec->hdr,
631                                                 sizeof(tcp_spec->hdr));
632                                 else if (l3 == RTE_FLOW_ITEM_TYPE_IPV6)
633                                         rte_memcpy(hdr->buffer,
634                                                 &tcp_spec->hdr,
635                                                 sizeof(tcp_spec->hdr));
636                         }
637
638                         filter->add_fltr.rule_cfg.proto_hdrs.count = ++layer;
639                         break;
640
641                 case RTE_FLOW_ITEM_TYPE_SCTP:
642                         sctp_spec = item->spec;
643                         sctp_mask = item->mask;
644
645                         hdr = &filter->add_fltr.rule_cfg.proto_hdrs.proto_hdr[layer];
646
647                         VIRTCHNL_SET_PROTO_HDR_TYPE(hdr, SCTP);
648
649                         if (sctp_spec && sctp_mask) {
650                                 if (sctp_mask->hdr.cksum) {
651                                         rte_flow_error_set(error, EINVAL,
652                                                 RTE_FLOW_ERROR_TYPE_ITEM, item,
653                                                 "Invalid UDP mask");
654                                         return -rte_errno;
655                                 }
656
657                                 if (sctp_mask->hdr.src_port == UINT16_MAX) {
658                                         input_set |= IAVF_INSET_SCTP_SRC_PORT;
659                                         VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(hdr, SCTP, SRC_PORT);
660                                 }
661                                 if (sctp_mask->hdr.dst_port == UINT16_MAX) {
662                                         input_set |= IAVF_INSET_SCTP_DST_PORT;
663                                         VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(hdr, SCTP, DST_PORT);
664                                 }
665
666                                 if (l3 == RTE_FLOW_ITEM_TYPE_IPV4)
667                                         rte_memcpy(hdr->buffer,
668                                                 &sctp_spec->hdr,
669                                                 sizeof(sctp_spec->hdr));
670                                 else if (l3 == RTE_FLOW_ITEM_TYPE_IPV6)
671                                         rte_memcpy(hdr->buffer,
672                                                 &sctp_spec->hdr,
673                                                 sizeof(sctp_spec->hdr));
674                         }
675
676                         filter->add_fltr.rule_cfg.proto_hdrs.count = ++layer;
677                         break;
678
679                 case RTE_FLOW_ITEM_TYPE_GTPU:
680                         gtp_spec = item->spec;
681                         gtp_mask = item->mask;
682
683                         hdr = &filter->add_fltr.rule_cfg.proto_hdrs.proto_hdr[layer];
684
685                         VIRTCHNL_SET_PROTO_HDR_TYPE(hdr, GTPU_IP);
686
687                         if (gtp_spec && gtp_mask) {
688                                 if (gtp_mask->v_pt_rsv_flags ||
689                                         gtp_mask->msg_type ||
690                                         gtp_mask->msg_len) {
691                                         rte_flow_error_set(error, EINVAL,
692                                                 RTE_FLOW_ERROR_TYPE_ITEM,
693                                                 item, "Invalid GTP mask");
694                                         return -rte_errno;
695                                 }
696
697                                 if (gtp_mask->teid == UINT32_MAX) {
698                                         input_set |= IAVF_INSET_GTPU_TEID;
699                                         VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(hdr, GTPU_IP, TEID);
700                                 }
701
702                                 rte_memcpy(hdr->buffer,
703                                         gtp_spec, sizeof(*gtp_spec));
704                         }
705
706                         filter->add_fltr.rule_cfg.proto_hdrs.count = ++layer;
707                         break;
708
709                 case RTE_FLOW_ITEM_TYPE_GTP_PSC:
710                         gtp_psc_spec = item->spec;
711                         gtp_psc_mask = item->mask;
712
713                         hdr = &filter->add_fltr.rule_cfg.proto_hdrs.proto_hdr[layer];
714
715                         VIRTCHNL_SET_PROTO_HDR_TYPE(hdr, GTPU_EH);
716
717                         if (gtp_psc_spec && gtp_psc_mask) {
718                                 if (gtp_psc_mask->qfi == UINT8_MAX) {
719                                         input_set |= IAVF_INSET_GTPU_QFI;
720                                         VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(hdr, GTPU_EH, QFI);
721                                 }
722
723                                 rte_memcpy(hdr->buffer, gtp_psc_spec,
724                                         sizeof(*gtp_psc_spec));
725                         }
726
727                         filter->add_fltr.rule_cfg.proto_hdrs.count = ++layer;
728                         break;
729
730                 case RTE_FLOW_ITEM_TYPE_VOID:
731                         break;
732
733                 default:
734                         rte_flow_error_set(error, EINVAL,
735                                         RTE_FLOW_ERROR_TYPE_ITEM, item,
736                                         "Invalid pattern item.");
737                         return -rte_errno;
738                 }
739         }
740
741         if (layer > VIRTCHNL_MAX_NUM_PROTO_HDRS) {
742                 rte_flow_error_set(error, EINVAL,
743                         RTE_FLOW_ERROR_TYPE_ITEM, item,
744                         "Protocol header layers exceed the maximum value");
745                 return -rte_errno;
746         }
747
748         filter->input_set = input_set;
749
750         return 0;
751 }
752
753 static int
754 iavf_fdir_parse(struct iavf_adapter *ad,
755                 struct iavf_pattern_match_item *array,
756                 uint32_t array_len,
757                 const struct rte_flow_item pattern[],
758                 const struct rte_flow_action actions[],
759                 void **meta,
760                 struct rte_flow_error *error)
761 {
762         struct iavf_info *vf = IAVF_DEV_PRIVATE_TO_VF(ad);
763         struct iavf_fdir_conf *filter = &vf->fdir.conf;
764         struct iavf_pattern_match_item *item = NULL;
765         uint64_t input_set;
766         int ret;
767
768         memset(filter, 0, sizeof(*filter));
769
770         item = iavf_search_pattern_match_item(pattern, array, array_len, error);
771         if (!item)
772                 return -rte_errno;
773
774         ret = iavf_fdir_parse_pattern(ad, pattern, error, filter);
775         if (ret)
776                 goto error;
777
778         input_set = filter->input_set;
779         if (!input_set || input_set & ~item->input_set_mask) {
780                 rte_flow_error_set(error, EINVAL,
781                                 RTE_FLOW_ERROR_TYPE_ITEM_SPEC, pattern,
782                                 "Invalid input set");
783                 ret = -rte_errno;
784                 goto error;
785         }
786
787         ret = iavf_fdir_parse_action(ad, actions, error, filter);
788         if (ret)
789                 goto error;
790
791         if (meta)
792                 *meta = filter;
793
794 error:
795         rte_free(item);
796         return ret;
797 }
798
799 static struct iavf_flow_parser iavf_fdir_parser = {
800         .engine = &iavf_fdir_engine,
801         .array = iavf_fdir_pattern,
802         .array_len = RTE_DIM(iavf_fdir_pattern),
803         .parse_pattern_action = iavf_fdir_parse,
804         .stage = IAVF_FLOW_STAGE_DISTRIBUTOR,
805 };
806
807 RTE_INIT(iavf_fdir_engine_register)
808 {
809         iavf_register_flow_engine(&iavf_fdir_engine);
810 }