net/iavf: support flow director L2TPv3 and IPsec
[dpdk.git] / drivers / net / iavf / iavf_fdir.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2020 Intel Corporation
3  */
4
5 #include <sys/queue.h>
6 #include <stdio.h>
7 #include <errno.h>
8 #include <stdint.h>
9 #include <string.h>
10 #include <unistd.h>
11 #include <stdarg.h>
12
13 #include <rte_ether.h>
14 #include <rte_ethdev_driver.h>
15 #include <rte_malloc.h>
16 #include <rte_tailq.h>
17
18 #include "iavf.h"
19 #include "iavf_generic_flow.h"
20 #include "virtchnl.h"
21
22 #define IAVF_FDIR_MAX_QREGION_SIZE 128
23
24 #define IAVF_FDIR_IPV6_TC_OFFSET 20
25 #define IAVF_IPV6_TC_MASK  (0xFF << IAVF_FDIR_IPV6_TC_OFFSET)
26
27 #define IAVF_FDIR_INSET_ETH (\
28         IAVF_INSET_ETHERTYPE)
29
30 #define IAVF_FDIR_INSET_ETH_IPV4 (\
31         IAVF_INSET_IPV4_SRC | IAVF_INSET_IPV4_DST | \
32         IAVF_INSET_IPV4_PROTO | IAVF_INSET_IPV4_TOS | \
33         IAVF_INSET_IPV4_TTL)
34
35 #define IAVF_FDIR_INSET_ETH_IPV4_UDP (\
36         IAVF_INSET_IPV4_SRC | IAVF_INSET_IPV4_DST | \
37         IAVF_INSET_IPV4_TOS | IAVF_INSET_IPV4_TTL | \
38         IAVF_INSET_UDP_SRC_PORT | IAVF_INSET_UDP_DST_PORT)
39
40 #define IAVF_FDIR_INSET_ETH_IPV4_TCP (\
41         IAVF_INSET_IPV4_SRC | IAVF_INSET_IPV4_DST | \
42         IAVF_INSET_IPV4_TOS | IAVF_INSET_IPV4_TTL | \
43         IAVF_INSET_TCP_SRC_PORT | IAVF_INSET_TCP_DST_PORT)
44
45 #define IAVF_FDIR_INSET_ETH_IPV4_SCTP (\
46         IAVF_INSET_IPV4_SRC | IAVF_INSET_IPV4_DST | \
47         IAVF_INSET_IPV4_TOS | IAVF_INSET_IPV4_TTL | \
48         IAVF_INSET_SCTP_SRC_PORT | IAVF_INSET_SCTP_DST_PORT)
49
50 #define IAVF_FDIR_INSET_ETH_IPV6 (\
51         IAVF_INSET_IPV6_SRC | IAVF_INSET_IPV6_DST | \
52         IAVF_INSET_IPV6_NEXT_HDR | IAVF_INSET_IPV6_TC | \
53         IAVF_INSET_IPV6_HOP_LIMIT)
54
55 #define IAVF_FDIR_INSET_ETH_IPV6_UDP (\
56         IAVF_INSET_IPV6_SRC | IAVF_INSET_IPV6_DST | \
57         IAVF_INSET_IPV6_TC | IAVF_INSET_IPV6_HOP_LIMIT | \
58         IAVF_INSET_UDP_SRC_PORT | IAVF_INSET_UDP_DST_PORT)
59
60 #define IAVF_FDIR_INSET_ETH_IPV6_TCP (\
61         IAVF_INSET_IPV6_SRC | IAVF_INSET_IPV6_DST | \
62         IAVF_INSET_IPV6_TC | IAVF_INSET_IPV6_HOP_LIMIT | \
63         IAVF_INSET_TCP_SRC_PORT | IAVF_INSET_TCP_DST_PORT)
64
65 #define IAVF_FDIR_INSET_ETH_IPV6_SCTP (\
66         IAVF_INSET_IPV6_SRC | IAVF_INSET_IPV6_DST | \
67         IAVF_INSET_IPV6_TC | IAVF_INSET_IPV6_HOP_LIMIT | \
68         IAVF_INSET_SCTP_SRC_PORT | IAVF_INSET_SCTP_DST_PORT)
69
70 #define IAVF_FDIR_INSET_GTPU (\
71         IAVF_INSET_IPV4_SRC | IAVF_INSET_IPV4_DST | \
72         IAVF_INSET_GTPU_TEID)
73
74 #define IAVF_FDIR_INSET_GTPU_EH (\
75         IAVF_INSET_IPV4_SRC | IAVF_INSET_IPV4_DST | \
76         IAVF_INSET_GTPU_TEID | IAVF_INSET_GTPU_QFI)
77
78 #define IAVF_FDIR_INSET_L2TPV3OIP (\
79         IAVF_L2TPV3OIP_SESSION_ID)
80
81 #define IAVF_FDIR_INSET_ESP (\
82         IAVF_INSET_ESP_SPI)
83
84 #define IAVF_FDIR_INSET_AH (\
85         IAVF_INSET_AH_SPI)
86
87 #define IAVF_FDIR_INSET_IPV4_NATT_ESP (\
88         IAVF_INSET_IPV4_SRC | IAVF_INSET_IPV4_DST | \
89         IAVF_INSET_ESP_SPI)
90
91 #define IAVF_FDIR_INSET_IPV6_NATT_ESP (\
92         IAVF_INSET_IPV6_SRC | IAVF_INSET_IPV6_DST | \
93         IAVF_INSET_ESP_SPI)
94
95 static struct iavf_pattern_match_item iavf_fdir_pattern[] = {
96         {iavf_pattern_ethertype,                IAVF_FDIR_INSET_ETH,                    IAVF_INSET_NONE},
97         {iavf_pattern_eth_ipv4,                 IAVF_FDIR_INSET_ETH_IPV4,               IAVF_INSET_NONE},
98         {iavf_pattern_eth_ipv4_udp,             IAVF_FDIR_INSET_ETH_IPV4_UDP,           IAVF_INSET_NONE},
99         {iavf_pattern_eth_ipv4_tcp,             IAVF_FDIR_INSET_ETH_IPV4_TCP,           IAVF_INSET_NONE},
100         {iavf_pattern_eth_ipv4_sctp,            IAVF_FDIR_INSET_ETH_IPV4_SCTP,          IAVF_INSET_NONE},
101         {iavf_pattern_eth_ipv6,                 IAVF_FDIR_INSET_ETH_IPV6,               IAVF_INSET_NONE},
102         {iavf_pattern_eth_ipv6_udp,             IAVF_FDIR_INSET_ETH_IPV6_UDP,           IAVF_INSET_NONE},
103         {iavf_pattern_eth_ipv6_tcp,             IAVF_FDIR_INSET_ETH_IPV6_TCP,           IAVF_INSET_NONE},
104         {iavf_pattern_eth_ipv6_sctp,            IAVF_FDIR_INSET_ETH_IPV6_SCTP,          IAVF_INSET_NONE},
105         {iavf_pattern_eth_ipv4_gtpu,            IAVF_FDIR_INSET_GTPU,                   IAVF_INSET_NONE},
106         {iavf_pattern_eth_ipv4_gtpu_eh,         IAVF_FDIR_INSET_GTPU_EH,                IAVF_INSET_NONE},
107         {iavf_pattern_eth_ipv4_l2tpv3,          IAVF_FDIR_INSET_L2TPV3OIP,              IAVF_INSET_NONE},
108         {iavf_pattern_eth_ipv6_l2tpv3,          IAVF_FDIR_INSET_L2TPV3OIP,              IAVF_INSET_NONE},
109         {iavf_pattern_eth_ipv4_esp,             IAVF_FDIR_INSET_ESP,                    IAVF_INSET_NONE},
110         {iavf_pattern_eth_ipv6_esp,             IAVF_FDIR_INSET_ESP,                    IAVF_INSET_NONE},
111         {iavf_pattern_eth_ipv4_ah,              IAVF_FDIR_INSET_AH,                     IAVF_INSET_NONE},
112         {iavf_pattern_eth_ipv6_ah,              IAVF_FDIR_INSET_AH,                     IAVF_INSET_NONE},
113         {iavf_pattern_eth_ipv4_udp_esp,         IAVF_FDIR_INSET_IPV4_NATT_ESP,          IAVF_INSET_NONE},
114         {iavf_pattern_eth_ipv6_udp_esp,         IAVF_FDIR_INSET_IPV6_NATT_ESP,          IAVF_INSET_NONE},
115 };
116
117 static struct iavf_flow_parser iavf_fdir_parser;
118
119 static int
120 iavf_fdir_init(struct iavf_adapter *ad)
121 {
122         struct iavf_info *vf = IAVF_DEV_PRIVATE_TO_VF(ad);
123         struct iavf_flow_parser *parser;
124
125         if (!vf->vf_res)
126                 return -EINVAL;
127
128         if (vf->vf_res->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_FDIR_PF)
129                 parser = &iavf_fdir_parser;
130         else
131                 return -ENOTSUP;
132
133         return iavf_register_parser(parser, ad);
134 }
135
136 static void
137 iavf_fdir_uninit(struct iavf_adapter *ad)
138 {
139         iavf_unregister_parser(&iavf_fdir_parser, ad);
140 }
141
142 static int
143 iavf_fdir_create(struct iavf_adapter *ad,
144                 struct rte_flow *flow,
145                 void *meta,
146                 struct rte_flow_error *error)
147 {
148         struct iavf_fdir_conf *filter = meta;
149         struct iavf_fdir_conf *rule;
150         int ret;
151
152         rule = rte_zmalloc("fdir_entry", sizeof(*rule), 0);
153         if (!rule) {
154                 rte_flow_error_set(error, ENOMEM,
155                                 RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
156                                 "Failed to allocate memory for fdir rule");
157                 return -rte_errno;
158         }
159
160         ret = iavf_fdir_add(ad, filter);
161         if (ret) {
162                 rte_flow_error_set(error, -ret,
163                                 RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
164                                 "Failed to add filter rule.");
165                 goto free_entry;
166         }
167
168         rte_memcpy(rule, filter, sizeof(*rule));
169         flow->rule = rule;
170
171         return 0;
172
173 free_entry:
174         rte_free(rule);
175         return -rte_errno;
176 }
177
178 static int
179 iavf_fdir_destroy(struct iavf_adapter *ad,
180                 struct rte_flow *flow,
181                 struct rte_flow_error *error)
182 {
183         struct iavf_fdir_conf *filter;
184         int ret;
185
186         filter = (struct iavf_fdir_conf *)flow->rule;
187
188         ret = iavf_fdir_del(ad, filter);
189         if (ret) {
190                 rte_flow_error_set(error, -ret,
191                                 RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
192                                 "Failed to delete filter rule.");
193                 return -rte_errno;
194         }
195
196         flow->rule = NULL;
197         rte_free(filter);
198
199         return 0;
200 }
201
202 static int
203 iavf_fdir_validation(struct iavf_adapter *ad,
204                 __rte_unused struct rte_flow *flow,
205                 void *meta,
206                 struct rte_flow_error *error)
207 {
208         struct iavf_fdir_conf *filter = meta;
209         int ret;
210
211         ret = iavf_fdir_check(ad, filter);
212         if (ret) {
213                 rte_flow_error_set(error, -ret,
214                                 RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
215                                 "Failed to validate filter rule.");
216                 return -rte_errno;
217         }
218
219         return 0;
220 };
221
222 static struct iavf_flow_engine iavf_fdir_engine = {
223         .init = iavf_fdir_init,
224         .uninit = iavf_fdir_uninit,
225         .create = iavf_fdir_create,
226         .destroy = iavf_fdir_destroy,
227         .validation = iavf_fdir_validation,
228         .type = IAVF_FLOW_ENGINE_FDIR,
229 };
230
231 static int
232 iavf_fdir_parse_action_qregion(struct iavf_adapter *ad,
233                         struct rte_flow_error *error,
234                         const struct rte_flow_action *act,
235                         struct virtchnl_filter_action *filter_action)
236 {
237         const struct rte_flow_action_rss *rss = act->conf;
238         uint32_t i;
239
240         if (act->type != RTE_FLOW_ACTION_TYPE_RSS) {
241                 rte_flow_error_set(error, EINVAL,
242                                 RTE_FLOW_ERROR_TYPE_ACTION, act,
243                                 "Invalid action.");
244                 return -rte_errno;
245         }
246
247         if (rss->queue_num <= 1) {
248                 rte_flow_error_set(error, EINVAL,
249                                 RTE_FLOW_ERROR_TYPE_ACTION, act,
250                                 "Queue region size can't be 0 or 1.");
251                 return -rte_errno;
252         }
253
254         /* check if queue index for queue region is continuous */
255         for (i = 0; i < rss->queue_num - 1; i++) {
256                 if (rss->queue[i + 1] != rss->queue[i] + 1) {
257                         rte_flow_error_set(error, EINVAL,
258                                         RTE_FLOW_ERROR_TYPE_ACTION, act,
259                                         "Discontinuous queue region");
260                         return -rte_errno;
261                 }
262         }
263
264         if (rss->queue[rss->queue_num - 1] >= ad->eth_dev->data->nb_rx_queues) {
265                 rte_flow_error_set(error, EINVAL,
266                                 RTE_FLOW_ERROR_TYPE_ACTION, act,
267                                 "Invalid queue region indexes.");
268                 return -rte_errno;
269         }
270
271         if (!(rte_is_power_of_2(rss->queue_num) &&
272                 rss->queue_num <= IAVF_FDIR_MAX_QREGION_SIZE)) {
273                 rte_flow_error_set(error, EINVAL,
274                                 RTE_FLOW_ERROR_TYPE_ACTION, act,
275                                 "The region size should be any of the following values:"
276                                 "1, 2, 4, 8, 16, 32, 64, 128 as long as the total number "
277                                 "of queues do not exceed the VSI allocation.");
278                 return -rte_errno;
279         }
280
281         filter_action->act_conf.queue.index = rss->queue[0];
282         filter_action->act_conf.queue.region = rte_fls_u32(rss->queue_num) - 1;
283
284         return 0;
285 }
286
287 static int
288 iavf_fdir_parse_action(struct iavf_adapter *ad,
289                         const struct rte_flow_action actions[],
290                         struct rte_flow_error *error,
291                         struct iavf_fdir_conf *filter)
292 {
293         const struct rte_flow_action_queue *act_q;
294         uint32_t dest_num = 0;
295         int ret;
296
297         int number = 0;
298         struct virtchnl_filter_action *filter_action;
299
300         for (; actions->type != RTE_FLOW_ACTION_TYPE_END; actions++) {
301                 switch (actions->type) {
302                 case RTE_FLOW_ACTION_TYPE_VOID:
303                         break;
304
305                 case RTE_FLOW_ACTION_TYPE_PASSTHRU:
306                         dest_num++;
307
308                         filter_action = &filter->add_fltr.rule_cfg.action_set.actions[number];
309
310                         filter_action->type = VIRTCHNL_ACTION_PASSTHRU;
311
312                         filter->add_fltr.rule_cfg.action_set.count = ++number;
313                         break;
314
315                 case RTE_FLOW_ACTION_TYPE_DROP:
316                         dest_num++;
317
318                         filter_action = &filter->add_fltr.rule_cfg.action_set.actions[number];
319
320                         filter_action->type = VIRTCHNL_ACTION_DROP;
321
322                         filter->add_fltr.rule_cfg.action_set.count = ++number;
323                         break;
324
325                 case RTE_FLOW_ACTION_TYPE_QUEUE:
326                         dest_num++;
327
328                         act_q = actions->conf;
329                         filter_action = &filter->add_fltr.rule_cfg.action_set.actions[number];
330
331                         filter_action->type = VIRTCHNL_ACTION_QUEUE;
332                         filter_action->act_conf.queue.index = act_q->index;
333
334                         if (filter_action->act_conf.queue.index >=
335                                 ad->eth_dev->data->nb_rx_queues) {
336                                 rte_flow_error_set(error, EINVAL,
337                                         RTE_FLOW_ERROR_TYPE_ACTION,
338                                         actions, "Invalid queue for FDIR.");
339                                 return -rte_errno;
340                         }
341
342                         filter->add_fltr.rule_cfg.action_set.count = ++number;
343                         break;
344
345                 case RTE_FLOW_ACTION_TYPE_RSS:
346                         dest_num++;
347
348                         filter_action = &filter->add_fltr.rule_cfg.action_set.actions[number];
349
350                         filter_action->type = VIRTCHNL_ACTION_Q_REGION;
351
352                         ret = iavf_fdir_parse_action_qregion(ad,
353                                                 error, actions, filter_action);
354                         if (ret)
355                                 return ret;
356
357                         filter->add_fltr.rule_cfg.action_set.count = ++number;
358                         break;
359
360                 default:
361                         rte_flow_error_set(error, EINVAL,
362                                         RTE_FLOW_ERROR_TYPE_ACTION, actions,
363                                         "Invalid action.");
364                         return -rte_errno;
365                 }
366         }
367
368         if (number > VIRTCHNL_MAX_NUM_ACTIONS) {
369                 rte_flow_error_set(error, EINVAL,
370                         RTE_FLOW_ERROR_TYPE_ACTION, actions,
371                         "Action numbers exceed the maximum value");
372                 return -rte_errno;
373         }
374
375         if (dest_num == 0 || dest_num >= 2) {
376                 rte_flow_error_set(error, EINVAL,
377                         RTE_FLOW_ERROR_TYPE_ACTION, actions,
378                         "Unsupported action combination");
379                 return -rte_errno;
380         }
381
382         return 0;
383 }
384
385 static int
386 iavf_fdir_parse_pattern(__rte_unused struct iavf_adapter *ad,
387                         const struct rte_flow_item pattern[],
388                         struct rte_flow_error *error,
389                         struct iavf_fdir_conf *filter)
390 {
391         const struct rte_flow_item *item = pattern;
392         enum rte_flow_item_type item_type;
393         enum rte_flow_item_type l3 = RTE_FLOW_ITEM_TYPE_END;
394         const struct rte_flow_item_eth *eth_spec, *eth_mask;
395         const struct rte_flow_item_ipv4 *ipv4_spec, *ipv4_mask;
396         const struct rte_flow_item_ipv6 *ipv6_spec, *ipv6_mask;
397         const struct rte_flow_item_udp *udp_spec, *udp_mask;
398         const struct rte_flow_item_tcp *tcp_spec, *tcp_mask;
399         const struct rte_flow_item_sctp *sctp_spec, *sctp_mask;
400         const struct rte_flow_item_gtp *gtp_spec, *gtp_mask;
401         const struct rte_flow_item_gtp_psc *gtp_psc_spec, *gtp_psc_mask;
402         const struct rte_flow_item_l2tpv3oip *l2tpv3oip_spec, *l2tpv3oip_mask;
403         const struct rte_flow_item_esp *esp_spec, *esp_mask;
404         const struct rte_flow_item_ah *ah_spec, *ah_mask;
405         uint64_t input_set = IAVF_INSET_NONE;
406
407         enum rte_flow_item_type next_type;
408         uint16_t ether_type;
409
410         int layer = 0;
411         struct virtchnl_proto_hdr *hdr;
412
413         uint8_t  ipv6_addr_mask[16] = {
414                 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
415                 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF
416         };
417
418         for (item = pattern; item->type != RTE_FLOW_ITEM_TYPE_END; item++) {
419                 if (item->last) {
420                         rte_flow_error_set(error, EINVAL,
421                                         RTE_FLOW_ERROR_TYPE_ITEM, item,
422                                         "Not support range");
423                 }
424
425                 item_type = item->type;
426
427                 switch (item_type) {
428                 case RTE_FLOW_ITEM_TYPE_ETH:
429                         eth_spec = item->spec;
430                         eth_mask = item->mask;
431                         next_type = (item + 1)->type;
432
433                         hdr = &filter->add_fltr.rule_cfg.proto_hdrs.proto_hdr[layer];
434
435                         VIRTCHNL_SET_PROTO_HDR_TYPE(hdr, ETH);
436
437                         if (next_type == RTE_FLOW_ITEM_TYPE_END &&
438                                 (!eth_spec || !eth_mask)) {
439                                 rte_flow_error_set(error, EINVAL,
440                                                 RTE_FLOW_ERROR_TYPE_ITEM,
441                                                 item, "NULL eth spec/mask.");
442                                 return -rte_errno;
443                         }
444
445                         if (eth_spec && eth_mask) {
446                                 if (!rte_is_zero_ether_addr(&eth_mask->src) ||
447                                     !rte_is_zero_ether_addr(&eth_mask->dst)) {
448                                         rte_flow_error_set(error, EINVAL,
449                                                 RTE_FLOW_ERROR_TYPE_ITEM, item,
450                                                 "Invalid MAC_addr mask.");
451                                         return -rte_errno;
452                                 }
453                         }
454
455                         if (eth_spec && eth_mask && eth_mask->type) {
456                                 if (eth_mask->type != RTE_BE16(0xffff)) {
457                                         rte_flow_error_set(error, EINVAL,
458                                                 RTE_FLOW_ERROR_TYPE_ITEM,
459                                                 item, "Invalid type mask.");
460                                         return -rte_errno;
461                                 }
462
463                                 ether_type = rte_be_to_cpu_16(eth_spec->type);
464                                 if (ether_type == RTE_ETHER_TYPE_IPV4 ||
465                                         ether_type == RTE_ETHER_TYPE_IPV6) {
466                                         rte_flow_error_set(error, EINVAL,
467                                                 RTE_FLOW_ERROR_TYPE_ITEM,
468                                                 item,
469                                                 "Unsupported ether_type.");
470                                         return -rte_errno;
471                                 }
472
473                                 input_set |= IAVF_INSET_ETHERTYPE;
474                                 VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(hdr, ETH, ETHERTYPE);
475
476                                 rte_memcpy(hdr->buffer,
477                                         eth_spec, sizeof(*eth_spec));
478                         }
479
480                         filter->add_fltr.rule_cfg.proto_hdrs.count = ++layer;
481                         break;
482
483                 case RTE_FLOW_ITEM_TYPE_IPV4:
484                         l3 = RTE_FLOW_ITEM_TYPE_IPV4;
485                         ipv4_spec = item->spec;
486                         ipv4_mask = item->mask;
487
488                         hdr = &filter->add_fltr.rule_cfg.proto_hdrs.proto_hdr[layer];
489
490                         VIRTCHNL_SET_PROTO_HDR_TYPE(hdr, IPV4);
491
492                         if (ipv4_spec && ipv4_mask) {
493                                 if (ipv4_mask->hdr.version_ihl ||
494                                         ipv4_mask->hdr.total_length ||
495                                         ipv4_mask->hdr.packet_id ||
496                                         ipv4_mask->hdr.fragment_offset ||
497                                         ipv4_mask->hdr.hdr_checksum) {
498                                         rte_flow_error_set(error, EINVAL,
499                                                 RTE_FLOW_ERROR_TYPE_ITEM,
500                                                 item, "Invalid IPv4 mask.");
501                                         return -rte_errno;
502                                 }
503
504                                 if (ipv4_mask->hdr.type_of_service ==
505                                                                 UINT8_MAX) {
506                                         input_set |= IAVF_INSET_IPV4_TOS;
507                                         VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(hdr, IPV4, DSCP);
508                                 }
509                                 if (ipv4_mask->hdr.next_proto_id == UINT8_MAX) {
510                                         input_set |= IAVF_INSET_IPV4_PROTO;
511                                         VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(hdr, IPV4, PROT);
512                                 }
513                                 if (ipv4_mask->hdr.time_to_live == UINT8_MAX) {
514                                         input_set |= IAVF_INSET_IPV4_TTL;
515                                         VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(hdr, IPV4, TTL);
516                                 }
517                                 if (ipv4_mask->hdr.src_addr == UINT32_MAX) {
518                                         input_set |= IAVF_INSET_IPV4_SRC;
519                                         VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(hdr, IPV4, SRC);
520                                 }
521                                 if (ipv4_mask->hdr.dst_addr == UINT32_MAX) {
522                                         input_set |= IAVF_INSET_IPV4_DST;
523                                         VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(hdr, IPV4, DST);
524                                 }
525
526                                 rte_memcpy(hdr->buffer,
527                                         &ipv4_spec->hdr,
528                                         sizeof(ipv4_spec->hdr));
529                         }
530
531                         filter->add_fltr.rule_cfg.proto_hdrs.count = ++layer;
532                         break;
533
534                 case RTE_FLOW_ITEM_TYPE_IPV6:
535                         l3 = RTE_FLOW_ITEM_TYPE_IPV6;
536                         ipv6_spec = item->spec;
537                         ipv6_mask = item->mask;
538
539                         hdr = &filter->add_fltr.rule_cfg.proto_hdrs.proto_hdr[layer];
540
541                         VIRTCHNL_SET_PROTO_HDR_TYPE(hdr, IPV6);
542
543                         if (ipv6_spec && ipv6_mask) {
544                                 if (ipv6_mask->hdr.payload_len) {
545                                         rte_flow_error_set(error, EINVAL,
546                                                 RTE_FLOW_ERROR_TYPE_ITEM,
547                                                 item, "Invalid IPv6 mask");
548                                         return -rte_errno;
549                                 }
550
551                                 if ((ipv6_mask->hdr.vtc_flow &
552                                         rte_cpu_to_be_32(IAVF_IPV6_TC_MASK))
553                                         == rte_cpu_to_be_32(IAVF_IPV6_TC_MASK)) {
554                                         input_set |= IAVF_INSET_IPV6_TC;
555                                         VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(hdr, IPV6, TC);
556                                 }
557                                 if (ipv6_mask->hdr.proto == UINT8_MAX) {
558                                         input_set |= IAVF_INSET_IPV6_NEXT_HDR;
559                                         VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(hdr, IPV6, PROT);
560                                 }
561                                 if (ipv6_mask->hdr.hop_limits == UINT8_MAX) {
562                                         input_set |= IAVF_INSET_IPV6_HOP_LIMIT;
563                                         VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(hdr, IPV6, HOP_LIMIT);
564                                 }
565                                 if (!memcmp(ipv6_mask->hdr.src_addr,
566                                         ipv6_addr_mask,
567                                         RTE_DIM(ipv6_mask->hdr.src_addr))) {
568                                         input_set |= IAVF_INSET_IPV6_SRC;
569                                         VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(hdr, IPV6, SRC);
570                                 }
571                                 if (!memcmp(ipv6_mask->hdr.dst_addr,
572                                         ipv6_addr_mask,
573                                         RTE_DIM(ipv6_mask->hdr.dst_addr))) {
574                                         input_set |= IAVF_INSET_IPV6_DST;
575                                         VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(hdr, IPV6, DST);
576                                 }
577
578                                 rte_memcpy(hdr->buffer,
579                                         &ipv6_spec->hdr,
580                                         sizeof(ipv6_spec->hdr));
581                         }
582
583                         filter->add_fltr.rule_cfg.proto_hdrs.count = ++layer;
584                         break;
585
586                 case RTE_FLOW_ITEM_TYPE_UDP:
587                         udp_spec = item->spec;
588                         udp_mask = item->mask;
589
590                         hdr = &filter->add_fltr.rule_cfg.proto_hdrs.proto_hdr[layer];
591
592                         VIRTCHNL_SET_PROTO_HDR_TYPE(hdr, UDP);
593
594                         if (udp_spec && udp_mask) {
595                                 if (udp_mask->hdr.dgram_len ||
596                                         udp_mask->hdr.dgram_cksum) {
597                                         rte_flow_error_set(error, EINVAL,
598                                                 RTE_FLOW_ERROR_TYPE_ITEM, item,
599                                                 "Invalid UDP mask");
600                                         return -rte_errno;
601                                 }
602
603                                 if (udp_mask->hdr.src_port == UINT16_MAX) {
604                                         input_set |= IAVF_INSET_UDP_SRC_PORT;
605                                         VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(hdr, UDP, SRC_PORT);
606                                 }
607                                 if (udp_mask->hdr.dst_port == UINT16_MAX) {
608                                         input_set |= IAVF_INSET_UDP_DST_PORT;
609                                         VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(hdr, UDP, DST_PORT);
610                                 }
611
612                                 if (l3 == RTE_FLOW_ITEM_TYPE_IPV4)
613                                         rte_memcpy(hdr->buffer,
614                                                 &udp_spec->hdr,
615                                                 sizeof(udp_spec->hdr));
616                                 else if (l3 == RTE_FLOW_ITEM_TYPE_IPV6)
617                                         rte_memcpy(hdr->buffer,
618                                                 &udp_spec->hdr,
619                                                 sizeof(udp_spec->hdr));
620                         }
621
622                         filter->add_fltr.rule_cfg.proto_hdrs.count = ++layer;
623                         break;
624
625                 case RTE_FLOW_ITEM_TYPE_TCP:
626                         tcp_spec = item->spec;
627                         tcp_mask = item->mask;
628
629                         hdr = &filter->add_fltr.rule_cfg.proto_hdrs.proto_hdr[layer];
630
631                         VIRTCHNL_SET_PROTO_HDR_TYPE(hdr, TCP);
632
633                         if (tcp_spec && tcp_mask) {
634                                 if (tcp_mask->hdr.sent_seq ||
635                                         tcp_mask->hdr.recv_ack ||
636                                         tcp_mask->hdr.data_off ||
637                                         tcp_mask->hdr.tcp_flags ||
638                                         tcp_mask->hdr.rx_win ||
639                                         tcp_mask->hdr.cksum ||
640                                         tcp_mask->hdr.tcp_urp) {
641                                         rte_flow_error_set(error, EINVAL,
642                                                 RTE_FLOW_ERROR_TYPE_ITEM, item,
643                                                 "Invalid TCP mask");
644                                         return -rte_errno;
645                                 }
646
647                                 if (tcp_mask->hdr.src_port == UINT16_MAX) {
648                                         input_set |= IAVF_INSET_TCP_SRC_PORT;
649                                         VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(hdr, TCP, SRC_PORT);
650                                 }
651                                 if (tcp_mask->hdr.dst_port == UINT16_MAX) {
652                                         input_set |= IAVF_INSET_TCP_DST_PORT;
653                                         VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(hdr, TCP, DST_PORT);
654                                 }
655
656                                 if (l3 == RTE_FLOW_ITEM_TYPE_IPV4)
657                                         rte_memcpy(hdr->buffer,
658                                                 &tcp_spec->hdr,
659                                                 sizeof(tcp_spec->hdr));
660                                 else if (l3 == RTE_FLOW_ITEM_TYPE_IPV6)
661                                         rte_memcpy(hdr->buffer,
662                                                 &tcp_spec->hdr,
663                                                 sizeof(tcp_spec->hdr));
664                         }
665
666                         filter->add_fltr.rule_cfg.proto_hdrs.count = ++layer;
667                         break;
668
669                 case RTE_FLOW_ITEM_TYPE_SCTP:
670                         sctp_spec = item->spec;
671                         sctp_mask = item->mask;
672
673                         hdr = &filter->add_fltr.rule_cfg.proto_hdrs.proto_hdr[layer];
674
675                         VIRTCHNL_SET_PROTO_HDR_TYPE(hdr, SCTP);
676
677                         if (sctp_spec && sctp_mask) {
678                                 if (sctp_mask->hdr.cksum) {
679                                         rte_flow_error_set(error, EINVAL,
680                                                 RTE_FLOW_ERROR_TYPE_ITEM, item,
681                                                 "Invalid UDP mask");
682                                         return -rte_errno;
683                                 }
684
685                                 if (sctp_mask->hdr.src_port == UINT16_MAX) {
686                                         input_set |= IAVF_INSET_SCTP_SRC_PORT;
687                                         VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(hdr, SCTP, SRC_PORT);
688                                 }
689                                 if (sctp_mask->hdr.dst_port == UINT16_MAX) {
690                                         input_set |= IAVF_INSET_SCTP_DST_PORT;
691                                         VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(hdr, SCTP, DST_PORT);
692                                 }
693
694                                 if (l3 == RTE_FLOW_ITEM_TYPE_IPV4)
695                                         rte_memcpy(hdr->buffer,
696                                                 &sctp_spec->hdr,
697                                                 sizeof(sctp_spec->hdr));
698                                 else if (l3 == RTE_FLOW_ITEM_TYPE_IPV6)
699                                         rte_memcpy(hdr->buffer,
700                                                 &sctp_spec->hdr,
701                                                 sizeof(sctp_spec->hdr));
702                         }
703
704                         filter->add_fltr.rule_cfg.proto_hdrs.count = ++layer;
705                         break;
706
707                 case RTE_FLOW_ITEM_TYPE_GTPU:
708                         gtp_spec = item->spec;
709                         gtp_mask = item->mask;
710
711                         hdr = &filter->add_fltr.rule_cfg.proto_hdrs.proto_hdr[layer];
712
713                         VIRTCHNL_SET_PROTO_HDR_TYPE(hdr, GTPU_IP);
714
715                         if (gtp_spec && gtp_mask) {
716                                 if (gtp_mask->v_pt_rsv_flags ||
717                                         gtp_mask->msg_type ||
718                                         gtp_mask->msg_len) {
719                                         rte_flow_error_set(error, EINVAL,
720                                                 RTE_FLOW_ERROR_TYPE_ITEM,
721                                                 item, "Invalid GTP mask");
722                                         return -rte_errno;
723                                 }
724
725                                 if (gtp_mask->teid == UINT32_MAX) {
726                                         input_set |= IAVF_INSET_GTPU_TEID;
727                                         VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(hdr, GTPU_IP, TEID);
728                                 }
729
730                                 rte_memcpy(hdr->buffer,
731                                         gtp_spec, sizeof(*gtp_spec));
732                         }
733
734                         filter->add_fltr.rule_cfg.proto_hdrs.count = ++layer;
735                         break;
736
737                 case RTE_FLOW_ITEM_TYPE_GTP_PSC:
738                         gtp_psc_spec = item->spec;
739                         gtp_psc_mask = item->mask;
740
741                         hdr = &filter->add_fltr.rule_cfg.proto_hdrs.proto_hdr[layer];
742
743                         VIRTCHNL_SET_PROTO_HDR_TYPE(hdr, GTPU_EH);
744
745                         if (gtp_psc_spec && gtp_psc_mask) {
746                                 if (gtp_psc_mask->qfi == UINT8_MAX) {
747                                         input_set |= IAVF_INSET_GTPU_QFI;
748                                         VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(hdr, GTPU_EH, QFI);
749                                 }
750
751                                 rte_memcpy(hdr->buffer, gtp_psc_spec,
752                                         sizeof(*gtp_psc_spec));
753                         }
754
755                         filter->add_fltr.rule_cfg.proto_hdrs.count = ++layer;
756                         break;
757
758                 case RTE_FLOW_ITEM_TYPE_L2TPV3OIP:
759                         l2tpv3oip_spec = item->spec;
760                         l2tpv3oip_mask = item->mask;
761
762                         hdr = &filter->add_fltr.rule_cfg.proto_hdrs.proto_hdr[layer];
763
764                         VIRTCHNL_SET_PROTO_HDR_TYPE(hdr, L2TPV3);
765
766                         if (l2tpv3oip_spec && l2tpv3oip_mask) {
767                                 if (l2tpv3oip_mask->session_id == UINT32_MAX) {
768                                         input_set |= IAVF_L2TPV3OIP_SESSION_ID;
769                                         VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(hdr, L2TPV3, SESS_ID);
770                                 }
771
772                                 rte_memcpy(hdr->buffer, l2tpv3oip_spec,
773                                         sizeof(*l2tpv3oip_spec));
774                         }
775
776                         filter->add_fltr.rule_cfg.proto_hdrs.count = ++layer;
777                         break;
778
779                 case RTE_FLOW_ITEM_TYPE_ESP:
780                         esp_spec = item->spec;
781                         esp_mask = item->mask;
782
783                         hdr = &filter->add_fltr.rule_cfg.proto_hdrs.proto_hdr[layer];
784
785                         VIRTCHNL_SET_PROTO_HDR_TYPE(hdr, ESP);
786
787                         if (esp_spec && esp_mask) {
788                                 if (esp_mask->hdr.spi == UINT32_MAX) {
789                                         input_set |= IAVF_INSET_ESP_SPI;
790                                         VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(hdr, ESP, SPI);
791                                 }
792
793                                 rte_memcpy(hdr->buffer, &esp_spec->hdr,
794                                         sizeof(esp_spec->hdr));
795                         }
796
797                         filter->add_fltr.rule_cfg.proto_hdrs.count = ++layer;
798                         break;
799
800                 case RTE_FLOW_ITEM_TYPE_AH:
801                         ah_spec = item->spec;
802                         ah_mask = item->mask;
803
804                         hdr = &filter->add_fltr.rule_cfg.proto_hdrs.proto_hdr[layer];
805
806                         VIRTCHNL_SET_PROTO_HDR_TYPE(hdr, AH);
807
808                         if (ah_spec && ah_mask) {
809                                 if (ah_mask->spi == UINT32_MAX) {
810                                         input_set |= IAVF_INSET_AH_SPI;
811                                         VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(hdr, AH, SPI);
812                                 }
813
814                                 rte_memcpy(hdr->buffer, ah_spec,
815                                         sizeof(*ah_spec));
816                         }
817
818                         filter->add_fltr.rule_cfg.proto_hdrs.count = ++layer;
819                         break;
820
821                 case RTE_FLOW_ITEM_TYPE_VOID:
822                         break;
823
824                 default:
825                         rte_flow_error_set(error, EINVAL,
826                                         RTE_FLOW_ERROR_TYPE_ITEM, item,
827                                         "Invalid pattern item.");
828                         return -rte_errno;
829                 }
830         }
831
832         if (layer > VIRTCHNL_MAX_NUM_PROTO_HDRS) {
833                 rte_flow_error_set(error, EINVAL,
834                         RTE_FLOW_ERROR_TYPE_ITEM, item,
835                         "Protocol header layers exceed the maximum value");
836                 return -rte_errno;
837         }
838
839         filter->input_set = input_set;
840
841         return 0;
842 }
843
844 static int
845 iavf_fdir_parse(struct iavf_adapter *ad,
846                 struct iavf_pattern_match_item *array,
847                 uint32_t array_len,
848                 const struct rte_flow_item pattern[],
849                 const struct rte_flow_action actions[],
850                 void **meta,
851                 struct rte_flow_error *error)
852 {
853         struct iavf_info *vf = IAVF_DEV_PRIVATE_TO_VF(ad);
854         struct iavf_fdir_conf *filter = &vf->fdir.conf;
855         struct iavf_pattern_match_item *item = NULL;
856         uint64_t input_set;
857         int ret;
858
859         memset(filter, 0, sizeof(*filter));
860
861         item = iavf_search_pattern_match_item(pattern, array, array_len, error);
862         if (!item)
863                 return -rte_errno;
864
865         ret = iavf_fdir_parse_pattern(ad, pattern, error, filter);
866         if (ret)
867                 goto error;
868
869         input_set = filter->input_set;
870         if (!input_set || input_set & ~item->input_set_mask) {
871                 rte_flow_error_set(error, EINVAL,
872                                 RTE_FLOW_ERROR_TYPE_ITEM_SPEC, pattern,
873                                 "Invalid input set");
874                 ret = -rte_errno;
875                 goto error;
876         }
877
878         ret = iavf_fdir_parse_action(ad, actions, error, filter);
879         if (ret)
880                 goto error;
881
882         if (meta)
883                 *meta = filter;
884
885 error:
886         rte_free(item);
887         return ret;
888 }
889
890 static struct iavf_flow_parser iavf_fdir_parser = {
891         .engine = &iavf_fdir_engine,
892         .array = iavf_fdir_pattern,
893         .array_len = RTE_DIM(iavf_fdir_pattern),
894         .parse_pattern_action = iavf_fdir_parse,
895         .stage = IAVF_FLOW_STAGE_DISTRIBUTOR,
896 };
897
898 RTE_INIT(iavf_fdir_engine_register)
899 {
900         iavf_register_flow_engine(&iavf_fdir_engine);
901 }