net/iavf: support flow director mark action
[dpdk.git] / drivers / net / iavf / iavf_fdir.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2020 Intel Corporation
3  */
4
5 #include <sys/queue.h>
6 #include <stdio.h>
7 #include <errno.h>
8 #include <stdint.h>
9 #include <string.h>
10 #include <unistd.h>
11 #include <stdarg.h>
12
13 #include <rte_ether.h>
14 #include <rte_ethdev_driver.h>
15 #include <rte_malloc.h>
16 #include <rte_tailq.h>
17
18 #include "iavf.h"
19 #include "iavf_generic_flow.h"
20 #include "virtchnl.h"
21 #include "iavf_rxtx.h"
22
23 #define IAVF_FDIR_MAX_QREGION_SIZE 128
24
25 #define IAVF_FDIR_IPV6_TC_OFFSET 20
26 #define IAVF_IPV6_TC_MASK  (0xFF << IAVF_FDIR_IPV6_TC_OFFSET)
27
28 #define IAVF_FDIR_INSET_ETH (\
29         IAVF_INSET_ETHERTYPE)
30
31 #define IAVF_FDIR_INSET_ETH_IPV4 (\
32         IAVF_INSET_IPV4_SRC | IAVF_INSET_IPV4_DST | \
33         IAVF_INSET_IPV4_PROTO | IAVF_INSET_IPV4_TOS | \
34         IAVF_INSET_IPV4_TTL)
35
36 #define IAVF_FDIR_INSET_ETH_IPV4_UDP (\
37         IAVF_INSET_IPV4_SRC | IAVF_INSET_IPV4_DST | \
38         IAVF_INSET_IPV4_TOS | IAVF_INSET_IPV4_TTL | \
39         IAVF_INSET_UDP_SRC_PORT | IAVF_INSET_UDP_DST_PORT)
40
41 #define IAVF_FDIR_INSET_ETH_IPV4_TCP (\
42         IAVF_INSET_IPV4_SRC | IAVF_INSET_IPV4_DST | \
43         IAVF_INSET_IPV4_TOS | IAVF_INSET_IPV4_TTL | \
44         IAVF_INSET_TCP_SRC_PORT | IAVF_INSET_TCP_DST_PORT)
45
46 #define IAVF_FDIR_INSET_ETH_IPV4_SCTP (\
47         IAVF_INSET_IPV4_SRC | IAVF_INSET_IPV4_DST | \
48         IAVF_INSET_IPV4_TOS | IAVF_INSET_IPV4_TTL | \
49         IAVF_INSET_SCTP_SRC_PORT | IAVF_INSET_SCTP_DST_PORT)
50
51 #define IAVF_FDIR_INSET_ETH_IPV6 (\
52         IAVF_INSET_IPV6_SRC | IAVF_INSET_IPV6_DST | \
53         IAVF_INSET_IPV6_NEXT_HDR | IAVF_INSET_IPV6_TC | \
54         IAVF_INSET_IPV6_HOP_LIMIT)
55
56 #define IAVF_FDIR_INSET_ETH_IPV6_UDP (\
57         IAVF_INSET_IPV6_SRC | IAVF_INSET_IPV6_DST | \
58         IAVF_INSET_IPV6_TC | IAVF_INSET_IPV6_HOP_LIMIT | \
59         IAVF_INSET_UDP_SRC_PORT | IAVF_INSET_UDP_DST_PORT)
60
61 #define IAVF_FDIR_INSET_ETH_IPV6_TCP (\
62         IAVF_INSET_IPV6_SRC | IAVF_INSET_IPV6_DST | \
63         IAVF_INSET_IPV6_TC | IAVF_INSET_IPV6_HOP_LIMIT | \
64         IAVF_INSET_TCP_SRC_PORT | IAVF_INSET_TCP_DST_PORT)
65
66 #define IAVF_FDIR_INSET_ETH_IPV6_SCTP (\
67         IAVF_INSET_IPV6_SRC | IAVF_INSET_IPV6_DST | \
68         IAVF_INSET_IPV6_TC | IAVF_INSET_IPV6_HOP_LIMIT | \
69         IAVF_INSET_SCTP_SRC_PORT | IAVF_INSET_SCTP_DST_PORT)
70
71 #define IAVF_FDIR_INSET_GTPU (\
72         IAVF_INSET_IPV4_SRC | IAVF_INSET_IPV4_DST | \
73         IAVF_INSET_GTPU_TEID)
74
75 #define IAVF_FDIR_INSET_GTPU_EH (\
76         IAVF_INSET_IPV4_SRC | IAVF_INSET_IPV4_DST | \
77         IAVF_INSET_GTPU_TEID | IAVF_INSET_GTPU_QFI)
78
79 #define IAVF_FDIR_INSET_L2TPV3OIP (\
80         IAVF_L2TPV3OIP_SESSION_ID)
81
82 #define IAVF_FDIR_INSET_ESP (\
83         IAVF_INSET_ESP_SPI)
84
85 #define IAVF_FDIR_INSET_AH (\
86         IAVF_INSET_AH_SPI)
87
88 #define IAVF_FDIR_INSET_IPV4_NATT_ESP (\
89         IAVF_INSET_IPV4_SRC | IAVF_INSET_IPV4_DST | \
90         IAVF_INSET_ESP_SPI)
91
92 #define IAVF_FDIR_INSET_IPV6_NATT_ESP (\
93         IAVF_INSET_IPV6_SRC | IAVF_INSET_IPV6_DST | \
94         IAVF_INSET_ESP_SPI)
95
96 #define IAVF_FDIR_INSET_PFCP (\
97         IAVF_INSET_PFCP_S_FIELD)
98
99 static struct iavf_pattern_match_item iavf_fdir_pattern[] = {
100         {iavf_pattern_ethertype,                IAVF_FDIR_INSET_ETH,                    IAVF_INSET_NONE},
101         {iavf_pattern_eth_ipv4,                 IAVF_FDIR_INSET_ETH_IPV4,               IAVF_INSET_NONE},
102         {iavf_pattern_eth_ipv4_udp,             IAVF_FDIR_INSET_ETH_IPV4_UDP,           IAVF_INSET_NONE},
103         {iavf_pattern_eth_ipv4_tcp,             IAVF_FDIR_INSET_ETH_IPV4_TCP,           IAVF_INSET_NONE},
104         {iavf_pattern_eth_ipv4_sctp,            IAVF_FDIR_INSET_ETH_IPV4_SCTP,          IAVF_INSET_NONE},
105         {iavf_pattern_eth_ipv6,                 IAVF_FDIR_INSET_ETH_IPV6,               IAVF_INSET_NONE},
106         {iavf_pattern_eth_ipv6_udp,             IAVF_FDIR_INSET_ETH_IPV6_UDP,           IAVF_INSET_NONE},
107         {iavf_pattern_eth_ipv6_tcp,             IAVF_FDIR_INSET_ETH_IPV6_TCP,           IAVF_INSET_NONE},
108         {iavf_pattern_eth_ipv6_sctp,            IAVF_FDIR_INSET_ETH_IPV6_SCTP,          IAVF_INSET_NONE},
109         {iavf_pattern_eth_ipv4_gtpu,            IAVF_FDIR_INSET_GTPU,                   IAVF_INSET_NONE},
110         {iavf_pattern_eth_ipv4_gtpu_eh,         IAVF_FDIR_INSET_GTPU_EH,                IAVF_INSET_NONE},
111         {iavf_pattern_eth_ipv4_l2tpv3,          IAVF_FDIR_INSET_L2TPV3OIP,              IAVF_INSET_NONE},
112         {iavf_pattern_eth_ipv6_l2tpv3,          IAVF_FDIR_INSET_L2TPV3OIP,              IAVF_INSET_NONE},
113         {iavf_pattern_eth_ipv4_esp,             IAVF_FDIR_INSET_ESP,                    IAVF_INSET_NONE},
114         {iavf_pattern_eth_ipv6_esp,             IAVF_FDIR_INSET_ESP,                    IAVF_INSET_NONE},
115         {iavf_pattern_eth_ipv4_ah,              IAVF_FDIR_INSET_AH,                     IAVF_INSET_NONE},
116         {iavf_pattern_eth_ipv6_ah,              IAVF_FDIR_INSET_AH,                     IAVF_INSET_NONE},
117         {iavf_pattern_eth_ipv4_udp_esp,         IAVF_FDIR_INSET_IPV4_NATT_ESP,          IAVF_INSET_NONE},
118         {iavf_pattern_eth_ipv6_udp_esp,         IAVF_FDIR_INSET_IPV6_NATT_ESP,          IAVF_INSET_NONE},
119         {iavf_pattern_eth_ipv4_pfcp,            IAVF_FDIR_INSET_PFCP,                   IAVF_INSET_NONE},
120         {iavf_pattern_eth_ipv6_pfcp,            IAVF_FDIR_INSET_PFCP,                   IAVF_INSET_NONE},
121 };
122
123 static struct iavf_flow_parser iavf_fdir_parser;
124
125 static int
126 iavf_fdir_init(struct iavf_adapter *ad)
127 {
128         struct iavf_info *vf = IAVF_DEV_PRIVATE_TO_VF(ad);
129         struct iavf_flow_parser *parser;
130
131         if (!vf->vf_res)
132                 return -EINVAL;
133
134         if (vf->vf_res->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_FDIR_PF)
135                 parser = &iavf_fdir_parser;
136         else
137                 return -ENOTSUP;
138
139         return iavf_register_parser(parser, ad);
140 }
141
142 static void
143 iavf_fdir_uninit(struct iavf_adapter *ad)
144 {
145         iavf_unregister_parser(&iavf_fdir_parser, ad);
146 }
147
148 static int
149 iavf_fdir_create(struct iavf_adapter *ad,
150                 struct rte_flow *flow,
151                 void *meta,
152                 struct rte_flow_error *error)
153 {
154         struct iavf_fdir_conf *filter = meta;
155         struct iavf_fdir_conf *rule;
156         int ret;
157
158         rule = rte_zmalloc("fdir_entry", sizeof(*rule), 0);
159         if (!rule) {
160                 rte_flow_error_set(error, ENOMEM,
161                                 RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
162                                 "Failed to allocate memory for fdir rule");
163                 return -rte_errno;
164         }
165
166         ret = iavf_fdir_add(ad, filter);
167         if (ret) {
168                 rte_flow_error_set(error, -ret,
169                                 RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
170                                 "Failed to add filter rule.");
171                 goto free_entry;
172         }
173
174         if (filter->mark_flag == 1)
175                 iavf_fdir_rx_proc_enable(ad, 1);
176
177         rte_memcpy(rule, filter, sizeof(*rule));
178         flow->rule = rule;
179
180         return 0;
181
182 free_entry:
183         rte_free(rule);
184         return -rte_errno;
185 }
186
187 static int
188 iavf_fdir_destroy(struct iavf_adapter *ad,
189                 struct rte_flow *flow,
190                 struct rte_flow_error *error)
191 {
192         struct iavf_fdir_conf *filter;
193         int ret;
194
195         filter = (struct iavf_fdir_conf *)flow->rule;
196
197         ret = iavf_fdir_del(ad, filter);
198         if (ret) {
199                 rte_flow_error_set(error, -ret,
200                                 RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
201                                 "Failed to delete filter rule.");
202                 return -rte_errno;
203         }
204
205         if (filter->mark_flag == 1)
206                 iavf_fdir_rx_proc_enable(ad, 0);
207
208         flow->rule = NULL;
209         rte_free(filter);
210
211         return 0;
212 }
213
214 static int
215 iavf_fdir_validation(struct iavf_adapter *ad,
216                 __rte_unused struct rte_flow *flow,
217                 void *meta,
218                 struct rte_flow_error *error)
219 {
220         struct iavf_fdir_conf *filter = meta;
221         int ret;
222
223         ret = iavf_fdir_check(ad, filter);
224         if (ret) {
225                 rte_flow_error_set(error, -ret,
226                                 RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
227                                 "Failed to validate filter rule.");
228                 return -rte_errno;
229         }
230
231         return 0;
232 };
233
234 static struct iavf_flow_engine iavf_fdir_engine = {
235         .init = iavf_fdir_init,
236         .uninit = iavf_fdir_uninit,
237         .create = iavf_fdir_create,
238         .destroy = iavf_fdir_destroy,
239         .validation = iavf_fdir_validation,
240         .type = IAVF_FLOW_ENGINE_FDIR,
241 };
242
243 static int
244 iavf_fdir_parse_action_qregion(struct iavf_adapter *ad,
245                         struct rte_flow_error *error,
246                         const struct rte_flow_action *act,
247                         struct virtchnl_filter_action *filter_action)
248 {
249         const struct rte_flow_action_rss *rss = act->conf;
250         uint32_t i;
251
252         if (act->type != RTE_FLOW_ACTION_TYPE_RSS) {
253                 rte_flow_error_set(error, EINVAL,
254                                 RTE_FLOW_ERROR_TYPE_ACTION, act,
255                                 "Invalid action.");
256                 return -rte_errno;
257         }
258
259         if (rss->queue_num <= 1) {
260                 rte_flow_error_set(error, EINVAL,
261                                 RTE_FLOW_ERROR_TYPE_ACTION, act,
262                                 "Queue region size can't be 0 or 1.");
263                 return -rte_errno;
264         }
265
266         /* check if queue index for queue region is continuous */
267         for (i = 0; i < rss->queue_num - 1; i++) {
268                 if (rss->queue[i + 1] != rss->queue[i] + 1) {
269                         rte_flow_error_set(error, EINVAL,
270                                         RTE_FLOW_ERROR_TYPE_ACTION, act,
271                                         "Discontinuous queue region");
272                         return -rte_errno;
273                 }
274         }
275
276         if (rss->queue[rss->queue_num - 1] >= ad->eth_dev->data->nb_rx_queues) {
277                 rte_flow_error_set(error, EINVAL,
278                                 RTE_FLOW_ERROR_TYPE_ACTION, act,
279                                 "Invalid queue region indexes.");
280                 return -rte_errno;
281         }
282
283         if (!(rte_is_power_of_2(rss->queue_num) &&
284                 rss->queue_num <= IAVF_FDIR_MAX_QREGION_SIZE)) {
285                 rte_flow_error_set(error, EINVAL,
286                                 RTE_FLOW_ERROR_TYPE_ACTION, act,
287                                 "The region size should be any of the following values:"
288                                 "1, 2, 4, 8, 16, 32, 64, 128 as long as the total number "
289                                 "of queues do not exceed the VSI allocation.");
290                 return -rte_errno;
291         }
292
293         filter_action->act_conf.queue.index = rss->queue[0];
294         filter_action->act_conf.queue.region = rte_fls_u32(rss->queue_num) - 1;
295
296         return 0;
297 }
298
299 static int
300 iavf_fdir_parse_action(struct iavf_adapter *ad,
301                         const struct rte_flow_action actions[],
302                         struct rte_flow_error *error,
303                         struct iavf_fdir_conf *filter)
304 {
305         const struct rte_flow_action_queue *act_q;
306         const struct rte_flow_action_mark *mark_spec = NULL;
307         uint32_t dest_num = 0;
308         uint32_t mark_num = 0;
309         int ret;
310
311         int number = 0;
312         struct virtchnl_filter_action *filter_action;
313
314         for (; actions->type != RTE_FLOW_ACTION_TYPE_END; actions++) {
315                 switch (actions->type) {
316                 case RTE_FLOW_ACTION_TYPE_VOID:
317                         break;
318
319                 case RTE_FLOW_ACTION_TYPE_PASSTHRU:
320                         dest_num++;
321
322                         filter_action = &filter->add_fltr.rule_cfg.action_set.actions[number];
323
324                         filter_action->type = VIRTCHNL_ACTION_PASSTHRU;
325
326                         filter->add_fltr.rule_cfg.action_set.count = ++number;
327                         break;
328
329                 case RTE_FLOW_ACTION_TYPE_DROP:
330                         dest_num++;
331
332                         filter_action = &filter->add_fltr.rule_cfg.action_set.actions[number];
333
334                         filter_action->type = VIRTCHNL_ACTION_DROP;
335
336                         filter->add_fltr.rule_cfg.action_set.count = ++number;
337                         break;
338
339                 case RTE_FLOW_ACTION_TYPE_QUEUE:
340                         dest_num++;
341
342                         act_q = actions->conf;
343                         filter_action = &filter->add_fltr.rule_cfg.action_set.actions[number];
344
345                         filter_action->type = VIRTCHNL_ACTION_QUEUE;
346                         filter_action->act_conf.queue.index = act_q->index;
347
348                         if (filter_action->act_conf.queue.index >=
349                                 ad->eth_dev->data->nb_rx_queues) {
350                                 rte_flow_error_set(error, EINVAL,
351                                         RTE_FLOW_ERROR_TYPE_ACTION,
352                                         actions, "Invalid queue for FDIR.");
353                                 return -rte_errno;
354                         }
355
356                         filter->add_fltr.rule_cfg.action_set.count = ++number;
357                         break;
358
359                 case RTE_FLOW_ACTION_TYPE_RSS:
360                         dest_num++;
361
362                         filter_action = &filter->add_fltr.rule_cfg.action_set.actions[number];
363
364                         filter_action->type = VIRTCHNL_ACTION_Q_REGION;
365
366                         ret = iavf_fdir_parse_action_qregion(ad,
367                                                 error, actions, filter_action);
368                         if (ret)
369                                 return ret;
370
371                         filter->add_fltr.rule_cfg.action_set.count = ++number;
372                         break;
373
374                 case RTE_FLOW_ACTION_TYPE_MARK:
375                         mark_num++;
376
377                         filter->mark_flag = 1;
378                         mark_spec = actions->conf;
379                         filter_action = &filter->add_fltr.rule_cfg.action_set.actions[number];
380
381                         filter_action->type = VIRTCHNL_ACTION_MARK;
382                         filter_action->act_conf.mark_id = mark_spec->id;
383
384                         filter->add_fltr.rule_cfg.action_set.count = ++number;
385                         break;
386
387                 default:
388                         rte_flow_error_set(error, EINVAL,
389                                         RTE_FLOW_ERROR_TYPE_ACTION, actions,
390                                         "Invalid action.");
391                         return -rte_errno;
392                 }
393         }
394
395         if (number > VIRTCHNL_MAX_NUM_ACTIONS) {
396                 rte_flow_error_set(error, EINVAL,
397                         RTE_FLOW_ERROR_TYPE_ACTION, actions,
398                         "Action numbers exceed the maximum value");
399                 return -rte_errno;
400         }
401
402         if (dest_num >= 2) {
403                 rte_flow_error_set(error, EINVAL,
404                         RTE_FLOW_ERROR_TYPE_ACTION, actions,
405                         "Unsupported action combination");
406                 return -rte_errno;
407         }
408
409         if (mark_num >= 2) {
410                 rte_flow_error_set(error, EINVAL,
411                         RTE_FLOW_ERROR_TYPE_ACTION, actions,
412                         "Too many mark actions");
413                 return -rte_errno;
414         }
415
416         if (dest_num + mark_num == 0) {
417                 rte_flow_error_set(error, EINVAL,
418                         RTE_FLOW_ERROR_TYPE_ACTION, actions,
419                         "Empty action");
420                 return -rte_errno;
421         }
422
423         /* Mark only is equal to mark + passthru. */
424         if (dest_num == 0) {
425                 filter_action = &filter->add_fltr.rule_cfg.action_set.actions[number];
426                 filter_action->type = VIRTCHNL_ACTION_PASSTHRU;
427                 filter->add_fltr.rule_cfg.action_set.count = ++number;
428         }
429
430         return 0;
431 }
432
433 static int
434 iavf_fdir_parse_pattern(__rte_unused struct iavf_adapter *ad,
435                         const struct rte_flow_item pattern[],
436                         struct rte_flow_error *error,
437                         struct iavf_fdir_conf *filter)
438 {
439         const struct rte_flow_item *item = pattern;
440         enum rte_flow_item_type item_type;
441         enum rte_flow_item_type l3 = RTE_FLOW_ITEM_TYPE_END;
442         const struct rte_flow_item_eth *eth_spec, *eth_mask;
443         const struct rte_flow_item_ipv4 *ipv4_spec, *ipv4_mask;
444         const struct rte_flow_item_ipv6 *ipv6_spec, *ipv6_mask;
445         const struct rte_flow_item_udp *udp_spec, *udp_mask;
446         const struct rte_flow_item_tcp *tcp_spec, *tcp_mask;
447         const struct rte_flow_item_sctp *sctp_spec, *sctp_mask;
448         const struct rte_flow_item_gtp *gtp_spec, *gtp_mask;
449         const struct rte_flow_item_gtp_psc *gtp_psc_spec, *gtp_psc_mask;
450         const struct rte_flow_item_l2tpv3oip *l2tpv3oip_spec, *l2tpv3oip_mask;
451         const struct rte_flow_item_esp *esp_spec, *esp_mask;
452         const struct rte_flow_item_ah *ah_spec, *ah_mask;
453         const struct rte_flow_item_pfcp *pfcp_spec, *pfcp_mask;
454         uint64_t input_set = IAVF_INSET_NONE;
455
456         enum rte_flow_item_type next_type;
457         uint16_t ether_type;
458
459         int layer = 0;
460         struct virtchnl_proto_hdr *hdr;
461
462         uint8_t  ipv6_addr_mask[16] = {
463                 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
464                 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF
465         };
466
467         for (item = pattern; item->type != RTE_FLOW_ITEM_TYPE_END; item++) {
468                 if (item->last) {
469                         rte_flow_error_set(error, EINVAL,
470                                         RTE_FLOW_ERROR_TYPE_ITEM, item,
471                                         "Not support range");
472                 }
473
474                 item_type = item->type;
475
476                 switch (item_type) {
477                 case RTE_FLOW_ITEM_TYPE_ETH:
478                         eth_spec = item->spec;
479                         eth_mask = item->mask;
480                         next_type = (item + 1)->type;
481
482                         hdr = &filter->add_fltr.rule_cfg.proto_hdrs.proto_hdr[layer];
483
484                         VIRTCHNL_SET_PROTO_HDR_TYPE(hdr, ETH);
485
486                         if (next_type == RTE_FLOW_ITEM_TYPE_END &&
487                                 (!eth_spec || !eth_mask)) {
488                                 rte_flow_error_set(error, EINVAL,
489                                                 RTE_FLOW_ERROR_TYPE_ITEM,
490                                                 item, "NULL eth spec/mask.");
491                                 return -rte_errno;
492                         }
493
494                         if (eth_spec && eth_mask) {
495                                 if (!rte_is_zero_ether_addr(&eth_mask->src) ||
496                                     !rte_is_zero_ether_addr(&eth_mask->dst)) {
497                                         rte_flow_error_set(error, EINVAL,
498                                                 RTE_FLOW_ERROR_TYPE_ITEM, item,
499                                                 "Invalid MAC_addr mask.");
500                                         return -rte_errno;
501                                 }
502                         }
503
504                         if (eth_spec && eth_mask && eth_mask->type) {
505                                 if (eth_mask->type != RTE_BE16(0xffff)) {
506                                         rte_flow_error_set(error, EINVAL,
507                                                 RTE_FLOW_ERROR_TYPE_ITEM,
508                                                 item, "Invalid type mask.");
509                                         return -rte_errno;
510                                 }
511
512                                 ether_type = rte_be_to_cpu_16(eth_spec->type);
513                                 if (ether_type == RTE_ETHER_TYPE_IPV4 ||
514                                         ether_type == RTE_ETHER_TYPE_IPV6) {
515                                         rte_flow_error_set(error, EINVAL,
516                                                 RTE_FLOW_ERROR_TYPE_ITEM,
517                                                 item,
518                                                 "Unsupported ether_type.");
519                                         return -rte_errno;
520                                 }
521
522                                 input_set |= IAVF_INSET_ETHERTYPE;
523                                 VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(hdr, ETH, ETHERTYPE);
524
525                                 rte_memcpy(hdr->buffer,
526                                         eth_spec, sizeof(*eth_spec));
527                         }
528
529                         filter->add_fltr.rule_cfg.proto_hdrs.count = ++layer;
530                         break;
531
532                 case RTE_FLOW_ITEM_TYPE_IPV4:
533                         l3 = RTE_FLOW_ITEM_TYPE_IPV4;
534                         ipv4_spec = item->spec;
535                         ipv4_mask = item->mask;
536
537                         hdr = &filter->add_fltr.rule_cfg.proto_hdrs.proto_hdr[layer];
538
539                         VIRTCHNL_SET_PROTO_HDR_TYPE(hdr, IPV4);
540
541                         if (ipv4_spec && ipv4_mask) {
542                                 if (ipv4_mask->hdr.version_ihl ||
543                                         ipv4_mask->hdr.total_length ||
544                                         ipv4_mask->hdr.packet_id ||
545                                         ipv4_mask->hdr.fragment_offset ||
546                                         ipv4_mask->hdr.hdr_checksum) {
547                                         rte_flow_error_set(error, EINVAL,
548                                                 RTE_FLOW_ERROR_TYPE_ITEM,
549                                                 item, "Invalid IPv4 mask.");
550                                         return -rte_errno;
551                                 }
552
553                                 if (ipv4_mask->hdr.type_of_service ==
554                                                                 UINT8_MAX) {
555                                         input_set |= IAVF_INSET_IPV4_TOS;
556                                         VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(hdr, IPV4, DSCP);
557                                 }
558                                 if (ipv4_mask->hdr.next_proto_id == UINT8_MAX) {
559                                         input_set |= IAVF_INSET_IPV4_PROTO;
560                                         VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(hdr, IPV4, PROT);
561                                 }
562                                 if (ipv4_mask->hdr.time_to_live == UINT8_MAX) {
563                                         input_set |= IAVF_INSET_IPV4_TTL;
564                                         VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(hdr, IPV4, TTL);
565                                 }
566                                 if (ipv4_mask->hdr.src_addr == UINT32_MAX) {
567                                         input_set |= IAVF_INSET_IPV4_SRC;
568                                         VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(hdr, IPV4, SRC);
569                                 }
570                                 if (ipv4_mask->hdr.dst_addr == UINT32_MAX) {
571                                         input_set |= IAVF_INSET_IPV4_DST;
572                                         VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(hdr, IPV4, DST);
573                                 }
574
575                                 rte_memcpy(hdr->buffer,
576                                         &ipv4_spec->hdr,
577                                         sizeof(ipv4_spec->hdr));
578                         }
579
580                         filter->add_fltr.rule_cfg.proto_hdrs.count = ++layer;
581                         break;
582
583                 case RTE_FLOW_ITEM_TYPE_IPV6:
584                         l3 = RTE_FLOW_ITEM_TYPE_IPV6;
585                         ipv6_spec = item->spec;
586                         ipv6_mask = item->mask;
587
588                         hdr = &filter->add_fltr.rule_cfg.proto_hdrs.proto_hdr[layer];
589
590                         VIRTCHNL_SET_PROTO_HDR_TYPE(hdr, IPV6);
591
592                         if (ipv6_spec && ipv6_mask) {
593                                 if (ipv6_mask->hdr.payload_len) {
594                                         rte_flow_error_set(error, EINVAL,
595                                                 RTE_FLOW_ERROR_TYPE_ITEM,
596                                                 item, "Invalid IPv6 mask");
597                                         return -rte_errno;
598                                 }
599
600                                 if ((ipv6_mask->hdr.vtc_flow &
601                                         rte_cpu_to_be_32(IAVF_IPV6_TC_MASK))
602                                         == rte_cpu_to_be_32(IAVF_IPV6_TC_MASK)) {
603                                         input_set |= IAVF_INSET_IPV6_TC;
604                                         VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(hdr, IPV6, TC);
605                                 }
606                                 if (ipv6_mask->hdr.proto == UINT8_MAX) {
607                                         input_set |= IAVF_INSET_IPV6_NEXT_HDR;
608                                         VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(hdr, IPV6, PROT);
609                                 }
610                                 if (ipv6_mask->hdr.hop_limits == UINT8_MAX) {
611                                         input_set |= IAVF_INSET_IPV6_HOP_LIMIT;
612                                         VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(hdr, IPV6, HOP_LIMIT);
613                                 }
614                                 if (!memcmp(ipv6_mask->hdr.src_addr,
615                                         ipv6_addr_mask,
616                                         RTE_DIM(ipv6_mask->hdr.src_addr))) {
617                                         input_set |= IAVF_INSET_IPV6_SRC;
618                                         VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(hdr, IPV6, SRC);
619                                 }
620                                 if (!memcmp(ipv6_mask->hdr.dst_addr,
621                                         ipv6_addr_mask,
622                                         RTE_DIM(ipv6_mask->hdr.dst_addr))) {
623                                         input_set |= IAVF_INSET_IPV6_DST;
624                                         VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(hdr, IPV6, DST);
625                                 }
626
627                                 rte_memcpy(hdr->buffer,
628                                         &ipv6_spec->hdr,
629                                         sizeof(ipv6_spec->hdr));
630                         }
631
632                         filter->add_fltr.rule_cfg.proto_hdrs.count = ++layer;
633                         break;
634
635                 case RTE_FLOW_ITEM_TYPE_UDP:
636                         udp_spec = item->spec;
637                         udp_mask = item->mask;
638
639                         hdr = &filter->add_fltr.rule_cfg.proto_hdrs.proto_hdr[layer];
640
641                         VIRTCHNL_SET_PROTO_HDR_TYPE(hdr, UDP);
642
643                         if (udp_spec && udp_mask) {
644                                 if (udp_mask->hdr.dgram_len ||
645                                         udp_mask->hdr.dgram_cksum) {
646                                         rte_flow_error_set(error, EINVAL,
647                                                 RTE_FLOW_ERROR_TYPE_ITEM, item,
648                                                 "Invalid UDP mask");
649                                         return -rte_errno;
650                                 }
651
652                                 if (udp_mask->hdr.src_port == UINT16_MAX) {
653                                         input_set |= IAVF_INSET_UDP_SRC_PORT;
654                                         VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(hdr, UDP, SRC_PORT);
655                                 }
656                                 if (udp_mask->hdr.dst_port == UINT16_MAX) {
657                                         input_set |= IAVF_INSET_UDP_DST_PORT;
658                                         VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(hdr, UDP, DST_PORT);
659                                 }
660
661                                 if (l3 == RTE_FLOW_ITEM_TYPE_IPV4)
662                                         rte_memcpy(hdr->buffer,
663                                                 &udp_spec->hdr,
664                                                 sizeof(udp_spec->hdr));
665                                 else if (l3 == RTE_FLOW_ITEM_TYPE_IPV6)
666                                         rte_memcpy(hdr->buffer,
667                                                 &udp_spec->hdr,
668                                                 sizeof(udp_spec->hdr));
669                         }
670
671                         filter->add_fltr.rule_cfg.proto_hdrs.count = ++layer;
672                         break;
673
674                 case RTE_FLOW_ITEM_TYPE_TCP:
675                         tcp_spec = item->spec;
676                         tcp_mask = item->mask;
677
678                         hdr = &filter->add_fltr.rule_cfg.proto_hdrs.proto_hdr[layer];
679
680                         VIRTCHNL_SET_PROTO_HDR_TYPE(hdr, TCP);
681
682                         if (tcp_spec && tcp_mask) {
683                                 if (tcp_mask->hdr.sent_seq ||
684                                         tcp_mask->hdr.recv_ack ||
685                                         tcp_mask->hdr.data_off ||
686                                         tcp_mask->hdr.tcp_flags ||
687                                         tcp_mask->hdr.rx_win ||
688                                         tcp_mask->hdr.cksum ||
689                                         tcp_mask->hdr.tcp_urp) {
690                                         rte_flow_error_set(error, EINVAL,
691                                                 RTE_FLOW_ERROR_TYPE_ITEM, item,
692                                                 "Invalid TCP mask");
693                                         return -rte_errno;
694                                 }
695
696                                 if (tcp_mask->hdr.src_port == UINT16_MAX) {
697                                         input_set |= IAVF_INSET_TCP_SRC_PORT;
698                                         VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(hdr, TCP, SRC_PORT);
699                                 }
700                                 if (tcp_mask->hdr.dst_port == UINT16_MAX) {
701                                         input_set |= IAVF_INSET_TCP_DST_PORT;
702                                         VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(hdr, TCP, DST_PORT);
703                                 }
704
705                                 if (l3 == RTE_FLOW_ITEM_TYPE_IPV4)
706                                         rte_memcpy(hdr->buffer,
707                                                 &tcp_spec->hdr,
708                                                 sizeof(tcp_spec->hdr));
709                                 else if (l3 == RTE_FLOW_ITEM_TYPE_IPV6)
710                                         rte_memcpy(hdr->buffer,
711                                                 &tcp_spec->hdr,
712                                                 sizeof(tcp_spec->hdr));
713                         }
714
715                         filter->add_fltr.rule_cfg.proto_hdrs.count = ++layer;
716                         break;
717
718                 case RTE_FLOW_ITEM_TYPE_SCTP:
719                         sctp_spec = item->spec;
720                         sctp_mask = item->mask;
721
722                         hdr = &filter->add_fltr.rule_cfg.proto_hdrs.proto_hdr[layer];
723
724                         VIRTCHNL_SET_PROTO_HDR_TYPE(hdr, SCTP);
725
726                         if (sctp_spec && sctp_mask) {
727                                 if (sctp_mask->hdr.cksum) {
728                                         rte_flow_error_set(error, EINVAL,
729                                                 RTE_FLOW_ERROR_TYPE_ITEM, item,
730                                                 "Invalid UDP mask");
731                                         return -rte_errno;
732                                 }
733
734                                 if (sctp_mask->hdr.src_port == UINT16_MAX) {
735                                         input_set |= IAVF_INSET_SCTP_SRC_PORT;
736                                         VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(hdr, SCTP, SRC_PORT);
737                                 }
738                                 if (sctp_mask->hdr.dst_port == UINT16_MAX) {
739                                         input_set |= IAVF_INSET_SCTP_DST_PORT;
740                                         VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(hdr, SCTP, DST_PORT);
741                                 }
742
743                                 if (l3 == RTE_FLOW_ITEM_TYPE_IPV4)
744                                         rte_memcpy(hdr->buffer,
745                                                 &sctp_spec->hdr,
746                                                 sizeof(sctp_spec->hdr));
747                                 else if (l3 == RTE_FLOW_ITEM_TYPE_IPV6)
748                                         rte_memcpy(hdr->buffer,
749                                                 &sctp_spec->hdr,
750                                                 sizeof(sctp_spec->hdr));
751                         }
752
753                         filter->add_fltr.rule_cfg.proto_hdrs.count = ++layer;
754                         break;
755
756                 case RTE_FLOW_ITEM_TYPE_GTPU:
757                         gtp_spec = item->spec;
758                         gtp_mask = item->mask;
759
760                         hdr = &filter->add_fltr.rule_cfg.proto_hdrs.proto_hdr[layer];
761
762                         VIRTCHNL_SET_PROTO_HDR_TYPE(hdr, GTPU_IP);
763
764                         if (gtp_spec && gtp_mask) {
765                                 if (gtp_mask->v_pt_rsv_flags ||
766                                         gtp_mask->msg_type ||
767                                         gtp_mask->msg_len) {
768                                         rte_flow_error_set(error, EINVAL,
769                                                 RTE_FLOW_ERROR_TYPE_ITEM,
770                                                 item, "Invalid GTP mask");
771                                         return -rte_errno;
772                                 }
773
774                                 if (gtp_mask->teid == UINT32_MAX) {
775                                         input_set |= IAVF_INSET_GTPU_TEID;
776                                         VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(hdr, GTPU_IP, TEID);
777                                 }
778
779                                 rte_memcpy(hdr->buffer,
780                                         gtp_spec, sizeof(*gtp_spec));
781                         }
782
783                         filter->add_fltr.rule_cfg.proto_hdrs.count = ++layer;
784                         break;
785
786                 case RTE_FLOW_ITEM_TYPE_GTP_PSC:
787                         gtp_psc_spec = item->spec;
788                         gtp_psc_mask = item->mask;
789
790                         hdr = &filter->add_fltr.rule_cfg.proto_hdrs.proto_hdr[layer];
791
792                         VIRTCHNL_SET_PROTO_HDR_TYPE(hdr, GTPU_EH);
793
794                         if (gtp_psc_spec && gtp_psc_mask) {
795                                 if (gtp_psc_mask->qfi == UINT8_MAX) {
796                                         input_set |= IAVF_INSET_GTPU_QFI;
797                                         VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(hdr, GTPU_EH, QFI);
798                                 }
799
800                                 rte_memcpy(hdr->buffer, gtp_psc_spec,
801                                         sizeof(*gtp_psc_spec));
802                         }
803
804                         filter->add_fltr.rule_cfg.proto_hdrs.count = ++layer;
805                         break;
806
807                 case RTE_FLOW_ITEM_TYPE_L2TPV3OIP:
808                         l2tpv3oip_spec = item->spec;
809                         l2tpv3oip_mask = item->mask;
810
811                         hdr = &filter->add_fltr.rule_cfg.proto_hdrs.proto_hdr[layer];
812
813                         VIRTCHNL_SET_PROTO_HDR_TYPE(hdr, L2TPV3);
814
815                         if (l2tpv3oip_spec && l2tpv3oip_mask) {
816                                 if (l2tpv3oip_mask->session_id == UINT32_MAX) {
817                                         input_set |= IAVF_L2TPV3OIP_SESSION_ID;
818                                         VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(hdr, L2TPV3, SESS_ID);
819                                 }
820
821                                 rte_memcpy(hdr->buffer, l2tpv3oip_spec,
822                                         sizeof(*l2tpv3oip_spec));
823                         }
824
825                         filter->add_fltr.rule_cfg.proto_hdrs.count = ++layer;
826                         break;
827
828                 case RTE_FLOW_ITEM_TYPE_ESP:
829                         esp_spec = item->spec;
830                         esp_mask = item->mask;
831
832                         hdr = &filter->add_fltr.rule_cfg.proto_hdrs.proto_hdr[layer];
833
834                         VIRTCHNL_SET_PROTO_HDR_TYPE(hdr, ESP);
835
836                         if (esp_spec && esp_mask) {
837                                 if (esp_mask->hdr.spi == UINT32_MAX) {
838                                         input_set |= IAVF_INSET_ESP_SPI;
839                                         VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(hdr, ESP, SPI);
840                                 }
841
842                                 rte_memcpy(hdr->buffer, &esp_spec->hdr,
843                                         sizeof(esp_spec->hdr));
844                         }
845
846                         filter->add_fltr.rule_cfg.proto_hdrs.count = ++layer;
847                         break;
848
849                 case RTE_FLOW_ITEM_TYPE_AH:
850                         ah_spec = item->spec;
851                         ah_mask = item->mask;
852
853                         hdr = &filter->add_fltr.rule_cfg.proto_hdrs.proto_hdr[layer];
854
855                         VIRTCHNL_SET_PROTO_HDR_TYPE(hdr, AH);
856
857                         if (ah_spec && ah_mask) {
858                                 if (ah_mask->spi == UINT32_MAX) {
859                                         input_set |= IAVF_INSET_AH_SPI;
860                                         VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(hdr, AH, SPI);
861                                 }
862
863                                 rte_memcpy(hdr->buffer, ah_spec,
864                                         sizeof(*ah_spec));
865                         }
866
867                         filter->add_fltr.rule_cfg.proto_hdrs.count = ++layer;
868                         break;
869
870                 case RTE_FLOW_ITEM_TYPE_PFCP:
871                         pfcp_spec = item->spec;
872                         pfcp_mask = item->mask;
873
874                         hdr = &filter->add_fltr.rule_cfg.proto_hdrs.proto_hdr[layer];
875
876                         VIRTCHNL_SET_PROTO_HDR_TYPE(hdr, PFCP);
877
878                         if (pfcp_spec && pfcp_mask) {
879                                 if (pfcp_mask->s_field == UINT8_MAX) {
880                                         input_set |= IAVF_INSET_PFCP_S_FIELD;
881                                         VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(hdr, PFCP, S_FIELD);
882                                 }
883
884                                 rte_memcpy(hdr->buffer, pfcp_spec,
885                                         sizeof(*pfcp_spec));
886                         }
887
888                         filter->add_fltr.rule_cfg.proto_hdrs.count = ++layer;
889                         break;
890
891                 case RTE_FLOW_ITEM_TYPE_VOID:
892                         break;
893
894                 default:
895                         rte_flow_error_set(error, EINVAL,
896                                         RTE_FLOW_ERROR_TYPE_ITEM, item,
897                                         "Invalid pattern item.");
898                         return -rte_errno;
899                 }
900         }
901
902         if (layer > VIRTCHNL_MAX_NUM_PROTO_HDRS) {
903                 rte_flow_error_set(error, EINVAL,
904                         RTE_FLOW_ERROR_TYPE_ITEM, item,
905                         "Protocol header layers exceed the maximum value");
906                 return -rte_errno;
907         }
908
909         filter->input_set = input_set;
910
911         return 0;
912 }
913
914 static int
915 iavf_fdir_parse(struct iavf_adapter *ad,
916                 struct iavf_pattern_match_item *array,
917                 uint32_t array_len,
918                 const struct rte_flow_item pattern[],
919                 const struct rte_flow_action actions[],
920                 void **meta,
921                 struct rte_flow_error *error)
922 {
923         struct iavf_info *vf = IAVF_DEV_PRIVATE_TO_VF(ad);
924         struct iavf_fdir_conf *filter = &vf->fdir.conf;
925         struct iavf_pattern_match_item *item = NULL;
926         uint64_t input_set;
927         int ret;
928
929         memset(filter, 0, sizeof(*filter));
930
931         item = iavf_search_pattern_match_item(pattern, array, array_len, error);
932         if (!item)
933                 return -rte_errno;
934
935         ret = iavf_fdir_parse_pattern(ad, pattern, error, filter);
936         if (ret)
937                 goto error;
938
939         input_set = filter->input_set;
940         if (!input_set || input_set & ~item->input_set_mask) {
941                 rte_flow_error_set(error, EINVAL,
942                                 RTE_FLOW_ERROR_TYPE_ITEM_SPEC, pattern,
943                                 "Invalid input set");
944                 ret = -rte_errno;
945                 goto error;
946         }
947
948         ret = iavf_fdir_parse_action(ad, actions, error, filter);
949         if (ret)
950                 goto error;
951
952         if (meta)
953                 *meta = filter;
954
955 error:
956         rte_free(item);
957         return ret;
958 }
959
960 static struct iavf_flow_parser iavf_fdir_parser = {
961         .engine = &iavf_fdir_engine,
962         .array = iavf_fdir_pattern,
963         .array_len = RTE_DIM(iavf_fdir_pattern),
964         .parse_pattern_action = iavf_fdir_parse,
965         .stage = IAVF_FLOW_STAGE_DISTRIBUTOR,
966 };
967
968 RTE_INIT(iavf_fdir_engine_register)
969 {
970         iavf_register_flow_engine(&iavf_fdir_engine);
971 }