1e50a07734d58993422914842097f65484a6b96f
[dpdk.git] / drivers / net / iavf / iavf_fdir.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2020 Intel Corporation
3  */
4
5 #include <sys/queue.h>
6 #include <stdio.h>
7 #include <errno.h>
8 #include <stdint.h>
9 #include <string.h>
10 #include <unistd.h>
11 #include <stdarg.h>
12
13 #include <rte_ether.h>
14 #include <rte_ethdev_driver.h>
15 #include <rte_malloc.h>
16 #include <rte_tailq.h>
17
18 #include "iavf.h"
19 #include "iavf_generic_flow.h"
20 #include "virtchnl.h"
21
22 #define IAVF_FDIR_MAX_QREGION_SIZE 128
23
24 #define IAVF_FDIR_IPV6_TC_OFFSET 20
25 #define IAVF_IPV6_TC_MASK  (0xFF << IAVF_FDIR_IPV6_TC_OFFSET)
26
27 #define IAVF_FDIR_INSET_ETH (\
28         IAVF_INSET_ETHERTYPE)
29
30 #define IAVF_FDIR_INSET_ETH_IPV4 (\
31         IAVF_INSET_IPV4_SRC | IAVF_INSET_IPV4_DST | \
32         IAVF_INSET_IPV4_PROTO | IAVF_INSET_IPV4_TOS | \
33         IAVF_INSET_IPV4_TTL)
34
35 #define IAVF_FDIR_INSET_ETH_IPV4_UDP (\
36         IAVF_INSET_IPV4_SRC | IAVF_INSET_IPV4_DST | \
37         IAVF_INSET_IPV4_TOS | IAVF_INSET_IPV4_TTL | \
38         IAVF_INSET_UDP_SRC_PORT | IAVF_INSET_UDP_DST_PORT)
39
40 #define IAVF_FDIR_INSET_ETH_IPV4_TCP (\
41         IAVF_INSET_IPV4_SRC | IAVF_INSET_IPV4_DST | \
42         IAVF_INSET_IPV4_TOS | IAVF_INSET_IPV4_TTL | \
43         IAVF_INSET_TCP_SRC_PORT | IAVF_INSET_TCP_DST_PORT)
44
45 #define IAVF_FDIR_INSET_ETH_IPV4_SCTP (\
46         IAVF_INSET_IPV4_SRC | IAVF_INSET_IPV4_DST | \
47         IAVF_INSET_IPV4_TOS | IAVF_INSET_IPV4_TTL | \
48         IAVF_INSET_SCTP_SRC_PORT | IAVF_INSET_SCTP_DST_PORT)
49
50 #define IAVF_FDIR_INSET_ETH_IPV6 (\
51         IAVF_INSET_IPV6_SRC | IAVF_INSET_IPV6_DST | \
52         IAVF_INSET_IPV6_NEXT_HDR | IAVF_INSET_IPV6_TC | \
53         IAVF_INSET_IPV6_HOP_LIMIT)
54
55 #define IAVF_FDIR_INSET_ETH_IPV6_UDP (\
56         IAVF_INSET_IPV6_SRC | IAVF_INSET_IPV6_DST | \
57         IAVF_INSET_IPV6_TC | IAVF_INSET_IPV6_HOP_LIMIT | \
58         IAVF_INSET_UDP_SRC_PORT | IAVF_INSET_UDP_DST_PORT)
59
60 #define IAVF_FDIR_INSET_ETH_IPV6_TCP (\
61         IAVF_INSET_IPV6_SRC | IAVF_INSET_IPV6_DST | \
62         IAVF_INSET_IPV6_TC | IAVF_INSET_IPV6_HOP_LIMIT | \
63         IAVF_INSET_TCP_SRC_PORT | IAVF_INSET_TCP_DST_PORT)
64
65 #define IAVF_FDIR_INSET_ETH_IPV6_SCTP (\
66         IAVF_INSET_IPV6_SRC | IAVF_INSET_IPV6_DST | \
67         IAVF_INSET_IPV6_TC | IAVF_INSET_IPV6_HOP_LIMIT | \
68         IAVF_INSET_SCTP_SRC_PORT | IAVF_INSET_SCTP_DST_PORT)
69
70 #define IAVF_FDIR_INSET_GTPU (\
71         IAVF_INSET_IPV4_SRC | IAVF_INSET_IPV4_DST | \
72         IAVF_INSET_GTPU_TEID)
73
74 #define IAVF_FDIR_INSET_GTPU_EH (\
75         IAVF_INSET_IPV4_SRC | IAVF_INSET_IPV4_DST | \
76         IAVF_INSET_GTPU_TEID | IAVF_INSET_GTPU_QFI)
77
78 #define IAVF_FDIR_INSET_L2TPV3OIP (\
79         IAVF_L2TPV3OIP_SESSION_ID)
80
81 #define IAVF_FDIR_INSET_ESP (\
82         IAVF_INSET_ESP_SPI)
83
84 #define IAVF_FDIR_INSET_AH (\
85         IAVF_INSET_AH_SPI)
86
87 #define IAVF_FDIR_INSET_IPV4_NATT_ESP (\
88         IAVF_INSET_IPV4_SRC | IAVF_INSET_IPV4_DST | \
89         IAVF_INSET_ESP_SPI)
90
91 #define IAVF_FDIR_INSET_IPV6_NATT_ESP (\
92         IAVF_INSET_IPV6_SRC | IAVF_INSET_IPV6_DST | \
93         IAVF_INSET_ESP_SPI)
94
95 #define IAVF_FDIR_INSET_PFCP (\
96         IAVF_INSET_PFCP_S_FIELD)
97
98 static struct iavf_pattern_match_item iavf_fdir_pattern[] = {
99         {iavf_pattern_ethertype,                IAVF_FDIR_INSET_ETH,                    IAVF_INSET_NONE},
100         {iavf_pattern_eth_ipv4,                 IAVF_FDIR_INSET_ETH_IPV4,               IAVF_INSET_NONE},
101         {iavf_pattern_eth_ipv4_udp,             IAVF_FDIR_INSET_ETH_IPV4_UDP,           IAVF_INSET_NONE},
102         {iavf_pattern_eth_ipv4_tcp,             IAVF_FDIR_INSET_ETH_IPV4_TCP,           IAVF_INSET_NONE},
103         {iavf_pattern_eth_ipv4_sctp,            IAVF_FDIR_INSET_ETH_IPV4_SCTP,          IAVF_INSET_NONE},
104         {iavf_pattern_eth_ipv6,                 IAVF_FDIR_INSET_ETH_IPV6,               IAVF_INSET_NONE},
105         {iavf_pattern_eth_ipv6_udp,             IAVF_FDIR_INSET_ETH_IPV6_UDP,           IAVF_INSET_NONE},
106         {iavf_pattern_eth_ipv6_tcp,             IAVF_FDIR_INSET_ETH_IPV6_TCP,           IAVF_INSET_NONE},
107         {iavf_pattern_eth_ipv6_sctp,            IAVF_FDIR_INSET_ETH_IPV6_SCTP,          IAVF_INSET_NONE},
108         {iavf_pattern_eth_ipv4_gtpu,            IAVF_FDIR_INSET_GTPU,                   IAVF_INSET_NONE},
109         {iavf_pattern_eth_ipv4_gtpu_eh,         IAVF_FDIR_INSET_GTPU_EH,                IAVF_INSET_NONE},
110         {iavf_pattern_eth_ipv4_l2tpv3,          IAVF_FDIR_INSET_L2TPV3OIP,              IAVF_INSET_NONE},
111         {iavf_pattern_eth_ipv6_l2tpv3,          IAVF_FDIR_INSET_L2TPV3OIP,              IAVF_INSET_NONE},
112         {iavf_pattern_eth_ipv4_esp,             IAVF_FDIR_INSET_ESP,                    IAVF_INSET_NONE},
113         {iavf_pattern_eth_ipv6_esp,             IAVF_FDIR_INSET_ESP,                    IAVF_INSET_NONE},
114         {iavf_pattern_eth_ipv4_ah,              IAVF_FDIR_INSET_AH,                     IAVF_INSET_NONE},
115         {iavf_pattern_eth_ipv6_ah,              IAVF_FDIR_INSET_AH,                     IAVF_INSET_NONE},
116         {iavf_pattern_eth_ipv4_udp_esp,         IAVF_FDIR_INSET_IPV4_NATT_ESP,          IAVF_INSET_NONE},
117         {iavf_pattern_eth_ipv6_udp_esp,         IAVF_FDIR_INSET_IPV6_NATT_ESP,          IAVF_INSET_NONE},
118         {iavf_pattern_eth_ipv4_pfcp,            IAVF_FDIR_INSET_PFCP,                   IAVF_INSET_NONE},
119         {iavf_pattern_eth_ipv6_pfcp,            IAVF_FDIR_INSET_PFCP,                   IAVF_INSET_NONE},
120 };
121
122 static struct iavf_flow_parser iavf_fdir_parser;
123
124 static int
125 iavf_fdir_init(struct iavf_adapter *ad)
126 {
127         struct iavf_info *vf = IAVF_DEV_PRIVATE_TO_VF(ad);
128         struct iavf_flow_parser *parser;
129
130         if (!vf->vf_res)
131                 return -EINVAL;
132
133         if (vf->vf_res->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_FDIR_PF)
134                 parser = &iavf_fdir_parser;
135         else
136                 return -ENOTSUP;
137
138         return iavf_register_parser(parser, ad);
139 }
140
141 static void
142 iavf_fdir_uninit(struct iavf_adapter *ad)
143 {
144         iavf_unregister_parser(&iavf_fdir_parser, ad);
145 }
146
147 static int
148 iavf_fdir_create(struct iavf_adapter *ad,
149                 struct rte_flow *flow,
150                 void *meta,
151                 struct rte_flow_error *error)
152 {
153         struct iavf_fdir_conf *filter = meta;
154         struct iavf_fdir_conf *rule;
155         int ret;
156
157         rule = rte_zmalloc("fdir_entry", sizeof(*rule), 0);
158         if (!rule) {
159                 rte_flow_error_set(error, ENOMEM,
160                                 RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
161                                 "Failed to allocate memory for fdir rule");
162                 return -rte_errno;
163         }
164
165         ret = iavf_fdir_add(ad, filter);
166         if (ret) {
167                 rte_flow_error_set(error, -ret,
168                                 RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
169                                 "Failed to add filter rule.");
170                 goto free_entry;
171         }
172
173         rte_memcpy(rule, filter, sizeof(*rule));
174         flow->rule = rule;
175
176         return 0;
177
178 free_entry:
179         rte_free(rule);
180         return -rte_errno;
181 }
182
183 static int
184 iavf_fdir_destroy(struct iavf_adapter *ad,
185                 struct rte_flow *flow,
186                 struct rte_flow_error *error)
187 {
188         struct iavf_fdir_conf *filter;
189         int ret;
190
191         filter = (struct iavf_fdir_conf *)flow->rule;
192
193         ret = iavf_fdir_del(ad, filter);
194         if (ret) {
195                 rte_flow_error_set(error, -ret,
196                                 RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
197                                 "Failed to delete filter rule.");
198                 return -rte_errno;
199         }
200
201         flow->rule = NULL;
202         rte_free(filter);
203
204         return 0;
205 }
206
207 static int
208 iavf_fdir_validation(struct iavf_adapter *ad,
209                 __rte_unused struct rte_flow *flow,
210                 void *meta,
211                 struct rte_flow_error *error)
212 {
213         struct iavf_fdir_conf *filter = meta;
214         int ret;
215
216         ret = iavf_fdir_check(ad, filter);
217         if (ret) {
218                 rte_flow_error_set(error, -ret,
219                                 RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
220                                 "Failed to validate filter rule.");
221                 return -rte_errno;
222         }
223
224         return 0;
225 };
226
227 static struct iavf_flow_engine iavf_fdir_engine = {
228         .init = iavf_fdir_init,
229         .uninit = iavf_fdir_uninit,
230         .create = iavf_fdir_create,
231         .destroy = iavf_fdir_destroy,
232         .validation = iavf_fdir_validation,
233         .type = IAVF_FLOW_ENGINE_FDIR,
234 };
235
236 static int
237 iavf_fdir_parse_action_qregion(struct iavf_adapter *ad,
238                         struct rte_flow_error *error,
239                         const struct rte_flow_action *act,
240                         struct virtchnl_filter_action *filter_action)
241 {
242         const struct rte_flow_action_rss *rss = act->conf;
243         uint32_t i;
244
245         if (act->type != RTE_FLOW_ACTION_TYPE_RSS) {
246                 rte_flow_error_set(error, EINVAL,
247                                 RTE_FLOW_ERROR_TYPE_ACTION, act,
248                                 "Invalid action.");
249                 return -rte_errno;
250         }
251
252         if (rss->queue_num <= 1) {
253                 rte_flow_error_set(error, EINVAL,
254                                 RTE_FLOW_ERROR_TYPE_ACTION, act,
255                                 "Queue region size can't be 0 or 1.");
256                 return -rte_errno;
257         }
258
259         /* check if queue index for queue region is continuous */
260         for (i = 0; i < rss->queue_num - 1; i++) {
261                 if (rss->queue[i + 1] != rss->queue[i] + 1) {
262                         rte_flow_error_set(error, EINVAL,
263                                         RTE_FLOW_ERROR_TYPE_ACTION, act,
264                                         "Discontinuous queue region");
265                         return -rte_errno;
266                 }
267         }
268
269         if (rss->queue[rss->queue_num - 1] >= ad->eth_dev->data->nb_rx_queues) {
270                 rte_flow_error_set(error, EINVAL,
271                                 RTE_FLOW_ERROR_TYPE_ACTION, act,
272                                 "Invalid queue region indexes.");
273                 return -rte_errno;
274         }
275
276         if (!(rte_is_power_of_2(rss->queue_num) &&
277                 rss->queue_num <= IAVF_FDIR_MAX_QREGION_SIZE)) {
278                 rte_flow_error_set(error, EINVAL,
279                                 RTE_FLOW_ERROR_TYPE_ACTION, act,
280                                 "The region size should be any of the following values:"
281                                 "1, 2, 4, 8, 16, 32, 64, 128 as long as the total number "
282                                 "of queues do not exceed the VSI allocation.");
283                 return -rte_errno;
284         }
285
286         filter_action->act_conf.queue.index = rss->queue[0];
287         filter_action->act_conf.queue.region = rte_fls_u32(rss->queue_num) - 1;
288
289         return 0;
290 }
291
292 static int
293 iavf_fdir_parse_action(struct iavf_adapter *ad,
294                         const struct rte_flow_action actions[],
295                         struct rte_flow_error *error,
296                         struct iavf_fdir_conf *filter)
297 {
298         const struct rte_flow_action_queue *act_q;
299         uint32_t dest_num = 0;
300         int ret;
301
302         int number = 0;
303         struct virtchnl_filter_action *filter_action;
304
305         for (; actions->type != RTE_FLOW_ACTION_TYPE_END; actions++) {
306                 switch (actions->type) {
307                 case RTE_FLOW_ACTION_TYPE_VOID:
308                         break;
309
310                 case RTE_FLOW_ACTION_TYPE_PASSTHRU:
311                         dest_num++;
312
313                         filter_action = &filter->add_fltr.rule_cfg.action_set.actions[number];
314
315                         filter_action->type = VIRTCHNL_ACTION_PASSTHRU;
316
317                         filter->add_fltr.rule_cfg.action_set.count = ++number;
318                         break;
319
320                 case RTE_FLOW_ACTION_TYPE_DROP:
321                         dest_num++;
322
323                         filter_action = &filter->add_fltr.rule_cfg.action_set.actions[number];
324
325                         filter_action->type = VIRTCHNL_ACTION_DROP;
326
327                         filter->add_fltr.rule_cfg.action_set.count = ++number;
328                         break;
329
330                 case RTE_FLOW_ACTION_TYPE_QUEUE:
331                         dest_num++;
332
333                         act_q = actions->conf;
334                         filter_action = &filter->add_fltr.rule_cfg.action_set.actions[number];
335
336                         filter_action->type = VIRTCHNL_ACTION_QUEUE;
337                         filter_action->act_conf.queue.index = act_q->index;
338
339                         if (filter_action->act_conf.queue.index >=
340                                 ad->eth_dev->data->nb_rx_queues) {
341                                 rte_flow_error_set(error, EINVAL,
342                                         RTE_FLOW_ERROR_TYPE_ACTION,
343                                         actions, "Invalid queue for FDIR.");
344                                 return -rte_errno;
345                         }
346
347                         filter->add_fltr.rule_cfg.action_set.count = ++number;
348                         break;
349
350                 case RTE_FLOW_ACTION_TYPE_RSS:
351                         dest_num++;
352
353                         filter_action = &filter->add_fltr.rule_cfg.action_set.actions[number];
354
355                         filter_action->type = VIRTCHNL_ACTION_Q_REGION;
356
357                         ret = iavf_fdir_parse_action_qregion(ad,
358                                                 error, actions, filter_action);
359                         if (ret)
360                                 return ret;
361
362                         filter->add_fltr.rule_cfg.action_set.count = ++number;
363                         break;
364
365                 default:
366                         rte_flow_error_set(error, EINVAL,
367                                         RTE_FLOW_ERROR_TYPE_ACTION, actions,
368                                         "Invalid action.");
369                         return -rte_errno;
370                 }
371         }
372
373         if (number > VIRTCHNL_MAX_NUM_ACTIONS) {
374                 rte_flow_error_set(error, EINVAL,
375                         RTE_FLOW_ERROR_TYPE_ACTION, actions,
376                         "Action numbers exceed the maximum value");
377                 return -rte_errno;
378         }
379
380         if (dest_num == 0 || dest_num >= 2) {
381                 rte_flow_error_set(error, EINVAL,
382                         RTE_FLOW_ERROR_TYPE_ACTION, actions,
383                         "Unsupported action combination");
384                 return -rte_errno;
385         }
386
387         return 0;
388 }
389
390 static int
391 iavf_fdir_parse_pattern(__rte_unused struct iavf_adapter *ad,
392                         const struct rte_flow_item pattern[],
393                         struct rte_flow_error *error,
394                         struct iavf_fdir_conf *filter)
395 {
396         const struct rte_flow_item *item = pattern;
397         enum rte_flow_item_type item_type;
398         enum rte_flow_item_type l3 = RTE_FLOW_ITEM_TYPE_END;
399         const struct rte_flow_item_eth *eth_spec, *eth_mask;
400         const struct rte_flow_item_ipv4 *ipv4_spec, *ipv4_mask;
401         const struct rte_flow_item_ipv6 *ipv6_spec, *ipv6_mask;
402         const struct rte_flow_item_udp *udp_spec, *udp_mask;
403         const struct rte_flow_item_tcp *tcp_spec, *tcp_mask;
404         const struct rte_flow_item_sctp *sctp_spec, *sctp_mask;
405         const struct rte_flow_item_gtp *gtp_spec, *gtp_mask;
406         const struct rte_flow_item_gtp_psc *gtp_psc_spec, *gtp_psc_mask;
407         const struct rte_flow_item_l2tpv3oip *l2tpv3oip_spec, *l2tpv3oip_mask;
408         const struct rte_flow_item_esp *esp_spec, *esp_mask;
409         const struct rte_flow_item_ah *ah_spec, *ah_mask;
410         const struct rte_flow_item_pfcp *pfcp_spec, *pfcp_mask;
411         uint64_t input_set = IAVF_INSET_NONE;
412
413         enum rte_flow_item_type next_type;
414         uint16_t ether_type;
415
416         int layer = 0;
417         struct virtchnl_proto_hdr *hdr;
418
419         uint8_t  ipv6_addr_mask[16] = {
420                 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
421                 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF
422         };
423
424         for (item = pattern; item->type != RTE_FLOW_ITEM_TYPE_END; item++) {
425                 if (item->last) {
426                         rte_flow_error_set(error, EINVAL,
427                                         RTE_FLOW_ERROR_TYPE_ITEM, item,
428                                         "Not support range");
429                 }
430
431                 item_type = item->type;
432
433                 switch (item_type) {
434                 case RTE_FLOW_ITEM_TYPE_ETH:
435                         eth_spec = item->spec;
436                         eth_mask = item->mask;
437                         next_type = (item + 1)->type;
438
439                         hdr = &filter->add_fltr.rule_cfg.proto_hdrs.proto_hdr[layer];
440
441                         VIRTCHNL_SET_PROTO_HDR_TYPE(hdr, ETH);
442
443                         if (next_type == RTE_FLOW_ITEM_TYPE_END &&
444                                 (!eth_spec || !eth_mask)) {
445                                 rte_flow_error_set(error, EINVAL,
446                                                 RTE_FLOW_ERROR_TYPE_ITEM,
447                                                 item, "NULL eth spec/mask.");
448                                 return -rte_errno;
449                         }
450
451                         if (eth_spec && eth_mask) {
452                                 if (!rte_is_zero_ether_addr(&eth_mask->src) ||
453                                     !rte_is_zero_ether_addr(&eth_mask->dst)) {
454                                         rte_flow_error_set(error, EINVAL,
455                                                 RTE_FLOW_ERROR_TYPE_ITEM, item,
456                                                 "Invalid MAC_addr mask.");
457                                         return -rte_errno;
458                                 }
459                         }
460
461                         if (eth_spec && eth_mask && eth_mask->type) {
462                                 if (eth_mask->type != RTE_BE16(0xffff)) {
463                                         rte_flow_error_set(error, EINVAL,
464                                                 RTE_FLOW_ERROR_TYPE_ITEM,
465                                                 item, "Invalid type mask.");
466                                         return -rte_errno;
467                                 }
468
469                                 ether_type = rte_be_to_cpu_16(eth_spec->type);
470                                 if (ether_type == RTE_ETHER_TYPE_IPV4 ||
471                                         ether_type == RTE_ETHER_TYPE_IPV6) {
472                                         rte_flow_error_set(error, EINVAL,
473                                                 RTE_FLOW_ERROR_TYPE_ITEM,
474                                                 item,
475                                                 "Unsupported ether_type.");
476                                         return -rte_errno;
477                                 }
478
479                                 input_set |= IAVF_INSET_ETHERTYPE;
480                                 VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(hdr, ETH, ETHERTYPE);
481
482                                 rte_memcpy(hdr->buffer,
483                                         eth_spec, sizeof(*eth_spec));
484                         }
485
486                         filter->add_fltr.rule_cfg.proto_hdrs.count = ++layer;
487                         break;
488
489                 case RTE_FLOW_ITEM_TYPE_IPV4:
490                         l3 = RTE_FLOW_ITEM_TYPE_IPV4;
491                         ipv4_spec = item->spec;
492                         ipv4_mask = item->mask;
493
494                         hdr = &filter->add_fltr.rule_cfg.proto_hdrs.proto_hdr[layer];
495
496                         VIRTCHNL_SET_PROTO_HDR_TYPE(hdr, IPV4);
497
498                         if (ipv4_spec && ipv4_mask) {
499                                 if (ipv4_mask->hdr.version_ihl ||
500                                         ipv4_mask->hdr.total_length ||
501                                         ipv4_mask->hdr.packet_id ||
502                                         ipv4_mask->hdr.fragment_offset ||
503                                         ipv4_mask->hdr.hdr_checksum) {
504                                         rte_flow_error_set(error, EINVAL,
505                                                 RTE_FLOW_ERROR_TYPE_ITEM,
506                                                 item, "Invalid IPv4 mask.");
507                                         return -rte_errno;
508                                 }
509
510                                 if (ipv4_mask->hdr.type_of_service ==
511                                                                 UINT8_MAX) {
512                                         input_set |= IAVF_INSET_IPV4_TOS;
513                                         VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(hdr, IPV4, DSCP);
514                                 }
515                                 if (ipv4_mask->hdr.next_proto_id == UINT8_MAX) {
516                                         input_set |= IAVF_INSET_IPV4_PROTO;
517                                         VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(hdr, IPV4, PROT);
518                                 }
519                                 if (ipv4_mask->hdr.time_to_live == UINT8_MAX) {
520                                         input_set |= IAVF_INSET_IPV4_TTL;
521                                         VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(hdr, IPV4, TTL);
522                                 }
523                                 if (ipv4_mask->hdr.src_addr == UINT32_MAX) {
524                                         input_set |= IAVF_INSET_IPV4_SRC;
525                                         VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(hdr, IPV4, SRC);
526                                 }
527                                 if (ipv4_mask->hdr.dst_addr == UINT32_MAX) {
528                                         input_set |= IAVF_INSET_IPV4_DST;
529                                         VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(hdr, IPV4, DST);
530                                 }
531
532                                 rte_memcpy(hdr->buffer,
533                                         &ipv4_spec->hdr,
534                                         sizeof(ipv4_spec->hdr));
535                         }
536
537                         filter->add_fltr.rule_cfg.proto_hdrs.count = ++layer;
538                         break;
539
540                 case RTE_FLOW_ITEM_TYPE_IPV6:
541                         l3 = RTE_FLOW_ITEM_TYPE_IPV6;
542                         ipv6_spec = item->spec;
543                         ipv6_mask = item->mask;
544
545                         hdr = &filter->add_fltr.rule_cfg.proto_hdrs.proto_hdr[layer];
546
547                         VIRTCHNL_SET_PROTO_HDR_TYPE(hdr, IPV6);
548
549                         if (ipv6_spec && ipv6_mask) {
550                                 if (ipv6_mask->hdr.payload_len) {
551                                         rte_flow_error_set(error, EINVAL,
552                                                 RTE_FLOW_ERROR_TYPE_ITEM,
553                                                 item, "Invalid IPv6 mask");
554                                         return -rte_errno;
555                                 }
556
557                                 if ((ipv6_mask->hdr.vtc_flow &
558                                         rte_cpu_to_be_32(IAVF_IPV6_TC_MASK))
559                                         == rte_cpu_to_be_32(IAVF_IPV6_TC_MASK)) {
560                                         input_set |= IAVF_INSET_IPV6_TC;
561                                         VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(hdr, IPV6, TC);
562                                 }
563                                 if (ipv6_mask->hdr.proto == UINT8_MAX) {
564                                         input_set |= IAVF_INSET_IPV6_NEXT_HDR;
565                                         VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(hdr, IPV6, PROT);
566                                 }
567                                 if (ipv6_mask->hdr.hop_limits == UINT8_MAX) {
568                                         input_set |= IAVF_INSET_IPV6_HOP_LIMIT;
569                                         VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(hdr, IPV6, HOP_LIMIT);
570                                 }
571                                 if (!memcmp(ipv6_mask->hdr.src_addr,
572                                         ipv6_addr_mask,
573                                         RTE_DIM(ipv6_mask->hdr.src_addr))) {
574                                         input_set |= IAVF_INSET_IPV6_SRC;
575                                         VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(hdr, IPV6, SRC);
576                                 }
577                                 if (!memcmp(ipv6_mask->hdr.dst_addr,
578                                         ipv6_addr_mask,
579                                         RTE_DIM(ipv6_mask->hdr.dst_addr))) {
580                                         input_set |= IAVF_INSET_IPV6_DST;
581                                         VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(hdr, IPV6, DST);
582                                 }
583
584                                 rte_memcpy(hdr->buffer,
585                                         &ipv6_spec->hdr,
586                                         sizeof(ipv6_spec->hdr));
587                         }
588
589                         filter->add_fltr.rule_cfg.proto_hdrs.count = ++layer;
590                         break;
591
592                 case RTE_FLOW_ITEM_TYPE_UDP:
593                         udp_spec = item->spec;
594                         udp_mask = item->mask;
595
596                         hdr = &filter->add_fltr.rule_cfg.proto_hdrs.proto_hdr[layer];
597
598                         VIRTCHNL_SET_PROTO_HDR_TYPE(hdr, UDP);
599
600                         if (udp_spec && udp_mask) {
601                                 if (udp_mask->hdr.dgram_len ||
602                                         udp_mask->hdr.dgram_cksum) {
603                                         rte_flow_error_set(error, EINVAL,
604                                                 RTE_FLOW_ERROR_TYPE_ITEM, item,
605                                                 "Invalid UDP mask");
606                                         return -rte_errno;
607                                 }
608
609                                 if (udp_mask->hdr.src_port == UINT16_MAX) {
610                                         input_set |= IAVF_INSET_UDP_SRC_PORT;
611                                         VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(hdr, UDP, SRC_PORT);
612                                 }
613                                 if (udp_mask->hdr.dst_port == UINT16_MAX) {
614                                         input_set |= IAVF_INSET_UDP_DST_PORT;
615                                         VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(hdr, UDP, DST_PORT);
616                                 }
617
618                                 if (l3 == RTE_FLOW_ITEM_TYPE_IPV4)
619                                         rte_memcpy(hdr->buffer,
620                                                 &udp_spec->hdr,
621                                                 sizeof(udp_spec->hdr));
622                                 else if (l3 == RTE_FLOW_ITEM_TYPE_IPV6)
623                                         rte_memcpy(hdr->buffer,
624                                                 &udp_spec->hdr,
625                                                 sizeof(udp_spec->hdr));
626                         }
627
628                         filter->add_fltr.rule_cfg.proto_hdrs.count = ++layer;
629                         break;
630
631                 case RTE_FLOW_ITEM_TYPE_TCP:
632                         tcp_spec = item->spec;
633                         tcp_mask = item->mask;
634
635                         hdr = &filter->add_fltr.rule_cfg.proto_hdrs.proto_hdr[layer];
636
637                         VIRTCHNL_SET_PROTO_HDR_TYPE(hdr, TCP);
638
639                         if (tcp_spec && tcp_mask) {
640                                 if (tcp_mask->hdr.sent_seq ||
641                                         tcp_mask->hdr.recv_ack ||
642                                         tcp_mask->hdr.data_off ||
643                                         tcp_mask->hdr.tcp_flags ||
644                                         tcp_mask->hdr.rx_win ||
645                                         tcp_mask->hdr.cksum ||
646                                         tcp_mask->hdr.tcp_urp) {
647                                         rte_flow_error_set(error, EINVAL,
648                                                 RTE_FLOW_ERROR_TYPE_ITEM, item,
649                                                 "Invalid TCP mask");
650                                         return -rte_errno;
651                                 }
652
653                                 if (tcp_mask->hdr.src_port == UINT16_MAX) {
654                                         input_set |= IAVF_INSET_TCP_SRC_PORT;
655                                         VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(hdr, TCP, SRC_PORT);
656                                 }
657                                 if (tcp_mask->hdr.dst_port == UINT16_MAX) {
658                                         input_set |= IAVF_INSET_TCP_DST_PORT;
659                                         VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(hdr, TCP, DST_PORT);
660                                 }
661
662                                 if (l3 == RTE_FLOW_ITEM_TYPE_IPV4)
663                                         rte_memcpy(hdr->buffer,
664                                                 &tcp_spec->hdr,
665                                                 sizeof(tcp_spec->hdr));
666                                 else if (l3 == RTE_FLOW_ITEM_TYPE_IPV6)
667                                         rte_memcpy(hdr->buffer,
668                                                 &tcp_spec->hdr,
669                                                 sizeof(tcp_spec->hdr));
670                         }
671
672                         filter->add_fltr.rule_cfg.proto_hdrs.count = ++layer;
673                         break;
674
675                 case RTE_FLOW_ITEM_TYPE_SCTP:
676                         sctp_spec = item->spec;
677                         sctp_mask = item->mask;
678
679                         hdr = &filter->add_fltr.rule_cfg.proto_hdrs.proto_hdr[layer];
680
681                         VIRTCHNL_SET_PROTO_HDR_TYPE(hdr, SCTP);
682
683                         if (sctp_spec && sctp_mask) {
684                                 if (sctp_mask->hdr.cksum) {
685                                         rte_flow_error_set(error, EINVAL,
686                                                 RTE_FLOW_ERROR_TYPE_ITEM, item,
687                                                 "Invalid UDP mask");
688                                         return -rte_errno;
689                                 }
690
691                                 if (sctp_mask->hdr.src_port == UINT16_MAX) {
692                                         input_set |= IAVF_INSET_SCTP_SRC_PORT;
693                                         VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(hdr, SCTP, SRC_PORT);
694                                 }
695                                 if (sctp_mask->hdr.dst_port == UINT16_MAX) {
696                                         input_set |= IAVF_INSET_SCTP_DST_PORT;
697                                         VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(hdr, SCTP, DST_PORT);
698                                 }
699
700                                 if (l3 == RTE_FLOW_ITEM_TYPE_IPV4)
701                                         rte_memcpy(hdr->buffer,
702                                                 &sctp_spec->hdr,
703                                                 sizeof(sctp_spec->hdr));
704                                 else if (l3 == RTE_FLOW_ITEM_TYPE_IPV6)
705                                         rte_memcpy(hdr->buffer,
706                                                 &sctp_spec->hdr,
707                                                 sizeof(sctp_spec->hdr));
708                         }
709
710                         filter->add_fltr.rule_cfg.proto_hdrs.count = ++layer;
711                         break;
712
713                 case RTE_FLOW_ITEM_TYPE_GTPU:
714                         gtp_spec = item->spec;
715                         gtp_mask = item->mask;
716
717                         hdr = &filter->add_fltr.rule_cfg.proto_hdrs.proto_hdr[layer];
718
719                         VIRTCHNL_SET_PROTO_HDR_TYPE(hdr, GTPU_IP);
720
721                         if (gtp_spec && gtp_mask) {
722                                 if (gtp_mask->v_pt_rsv_flags ||
723                                         gtp_mask->msg_type ||
724                                         gtp_mask->msg_len) {
725                                         rte_flow_error_set(error, EINVAL,
726                                                 RTE_FLOW_ERROR_TYPE_ITEM,
727                                                 item, "Invalid GTP mask");
728                                         return -rte_errno;
729                                 }
730
731                                 if (gtp_mask->teid == UINT32_MAX) {
732                                         input_set |= IAVF_INSET_GTPU_TEID;
733                                         VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(hdr, GTPU_IP, TEID);
734                                 }
735
736                                 rte_memcpy(hdr->buffer,
737                                         gtp_spec, sizeof(*gtp_spec));
738                         }
739
740                         filter->add_fltr.rule_cfg.proto_hdrs.count = ++layer;
741                         break;
742
743                 case RTE_FLOW_ITEM_TYPE_GTP_PSC:
744                         gtp_psc_spec = item->spec;
745                         gtp_psc_mask = item->mask;
746
747                         hdr = &filter->add_fltr.rule_cfg.proto_hdrs.proto_hdr[layer];
748
749                         VIRTCHNL_SET_PROTO_HDR_TYPE(hdr, GTPU_EH);
750
751                         if (gtp_psc_spec && gtp_psc_mask) {
752                                 if (gtp_psc_mask->qfi == UINT8_MAX) {
753                                         input_set |= IAVF_INSET_GTPU_QFI;
754                                         VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(hdr, GTPU_EH, QFI);
755                                 }
756
757                                 rte_memcpy(hdr->buffer, gtp_psc_spec,
758                                         sizeof(*gtp_psc_spec));
759                         }
760
761                         filter->add_fltr.rule_cfg.proto_hdrs.count = ++layer;
762                         break;
763
764                 case RTE_FLOW_ITEM_TYPE_L2TPV3OIP:
765                         l2tpv3oip_spec = item->spec;
766                         l2tpv3oip_mask = item->mask;
767
768                         hdr = &filter->add_fltr.rule_cfg.proto_hdrs.proto_hdr[layer];
769
770                         VIRTCHNL_SET_PROTO_HDR_TYPE(hdr, L2TPV3);
771
772                         if (l2tpv3oip_spec && l2tpv3oip_mask) {
773                                 if (l2tpv3oip_mask->session_id == UINT32_MAX) {
774                                         input_set |= IAVF_L2TPV3OIP_SESSION_ID;
775                                         VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(hdr, L2TPV3, SESS_ID);
776                                 }
777
778                                 rte_memcpy(hdr->buffer, l2tpv3oip_spec,
779                                         sizeof(*l2tpv3oip_spec));
780                         }
781
782                         filter->add_fltr.rule_cfg.proto_hdrs.count = ++layer;
783                         break;
784
785                 case RTE_FLOW_ITEM_TYPE_ESP:
786                         esp_spec = item->spec;
787                         esp_mask = item->mask;
788
789                         hdr = &filter->add_fltr.rule_cfg.proto_hdrs.proto_hdr[layer];
790
791                         VIRTCHNL_SET_PROTO_HDR_TYPE(hdr, ESP);
792
793                         if (esp_spec && esp_mask) {
794                                 if (esp_mask->hdr.spi == UINT32_MAX) {
795                                         input_set |= IAVF_INSET_ESP_SPI;
796                                         VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(hdr, ESP, SPI);
797                                 }
798
799                                 rte_memcpy(hdr->buffer, &esp_spec->hdr,
800                                         sizeof(esp_spec->hdr));
801                         }
802
803                         filter->add_fltr.rule_cfg.proto_hdrs.count = ++layer;
804                         break;
805
806                 case RTE_FLOW_ITEM_TYPE_AH:
807                         ah_spec = item->spec;
808                         ah_mask = item->mask;
809
810                         hdr = &filter->add_fltr.rule_cfg.proto_hdrs.proto_hdr[layer];
811
812                         VIRTCHNL_SET_PROTO_HDR_TYPE(hdr, AH);
813
814                         if (ah_spec && ah_mask) {
815                                 if (ah_mask->spi == UINT32_MAX) {
816                                         input_set |= IAVF_INSET_AH_SPI;
817                                         VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(hdr, AH, SPI);
818                                 }
819
820                                 rte_memcpy(hdr->buffer, ah_spec,
821                                         sizeof(*ah_spec));
822                         }
823
824                         filter->add_fltr.rule_cfg.proto_hdrs.count = ++layer;
825                         break;
826
827                 case RTE_FLOW_ITEM_TYPE_PFCP:
828                         pfcp_spec = item->spec;
829                         pfcp_mask = item->mask;
830
831                         hdr = &filter->add_fltr.rule_cfg.proto_hdrs.proto_hdr[layer];
832
833                         VIRTCHNL_SET_PROTO_HDR_TYPE(hdr, PFCP);
834
835                         if (pfcp_spec && pfcp_mask) {
836                                 if (pfcp_mask->s_field == UINT8_MAX) {
837                                         input_set |= IAVF_INSET_PFCP_S_FIELD;
838                                         VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(hdr, PFCP, S_FIELD);
839                                 }
840
841                                 rte_memcpy(hdr->buffer, pfcp_spec,
842                                         sizeof(*pfcp_spec));
843                         }
844
845                         filter->add_fltr.rule_cfg.proto_hdrs.count = ++layer;
846                         break;
847
848                 case RTE_FLOW_ITEM_TYPE_VOID:
849                         break;
850
851                 default:
852                         rte_flow_error_set(error, EINVAL,
853                                         RTE_FLOW_ERROR_TYPE_ITEM, item,
854                                         "Invalid pattern item.");
855                         return -rte_errno;
856                 }
857         }
858
859         if (layer > VIRTCHNL_MAX_NUM_PROTO_HDRS) {
860                 rte_flow_error_set(error, EINVAL,
861                         RTE_FLOW_ERROR_TYPE_ITEM, item,
862                         "Protocol header layers exceed the maximum value");
863                 return -rte_errno;
864         }
865
866         filter->input_set = input_set;
867
868         return 0;
869 }
870
871 static int
872 iavf_fdir_parse(struct iavf_adapter *ad,
873                 struct iavf_pattern_match_item *array,
874                 uint32_t array_len,
875                 const struct rte_flow_item pattern[],
876                 const struct rte_flow_action actions[],
877                 void **meta,
878                 struct rte_flow_error *error)
879 {
880         struct iavf_info *vf = IAVF_DEV_PRIVATE_TO_VF(ad);
881         struct iavf_fdir_conf *filter = &vf->fdir.conf;
882         struct iavf_pattern_match_item *item = NULL;
883         uint64_t input_set;
884         int ret;
885
886         memset(filter, 0, sizeof(*filter));
887
888         item = iavf_search_pattern_match_item(pattern, array, array_len, error);
889         if (!item)
890                 return -rte_errno;
891
892         ret = iavf_fdir_parse_pattern(ad, pattern, error, filter);
893         if (ret)
894                 goto error;
895
896         input_set = filter->input_set;
897         if (!input_set || input_set & ~item->input_set_mask) {
898                 rte_flow_error_set(error, EINVAL,
899                                 RTE_FLOW_ERROR_TYPE_ITEM_SPEC, pattern,
900                                 "Invalid input set");
901                 ret = -rte_errno;
902                 goto error;
903         }
904
905         ret = iavf_fdir_parse_action(ad, actions, error, filter);
906         if (ret)
907                 goto error;
908
909         if (meta)
910                 *meta = filter;
911
912 error:
913         rte_free(item);
914         return ret;
915 }
916
917 static struct iavf_flow_parser iavf_fdir_parser = {
918         .engine = &iavf_fdir_engine,
919         .array = iavf_fdir_pattern,
920         .array_len = RTE_DIM(iavf_fdir_pattern),
921         .parse_pattern_action = iavf_fdir_parse,
922         .stage = IAVF_FLOW_STAGE_DISTRIBUTOR,
923 };
924
925 RTE_INIT(iavf_fdir_engine_register)
926 {
927         iavf_register_flow_engine(&iavf_fdir_engine);
928 }