net/iavf: support flow director basic rule
[dpdk.git] / drivers / net / iavf / iavf_fdir.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2020 Intel Corporation
3  */
4
5 #include <sys/queue.h>
6 #include <stdio.h>
7 #include <errno.h>
8 #include <stdint.h>
9 #include <string.h>
10 #include <unistd.h>
11 #include <stdarg.h>
12
13 #include <rte_ether.h>
14 #include <rte_ethdev_driver.h>
15 #include <rte_malloc.h>
16 #include <rte_tailq.h>
17
18 #include "iavf.h"
19 #include "iavf_generic_flow.h"
20 #include "virtchnl.h"
21
22 #define IAVF_FDIR_MAX_QREGION_SIZE 128
23
24 #define IAVF_FDIR_IPV6_TC_OFFSET 20
25 #define IAVF_IPV6_TC_MASK  (0xFF << IAVF_FDIR_IPV6_TC_OFFSET)
26
27 #define IAVF_FDIR_INSET_ETH (\
28         IAVF_INSET_ETHERTYPE)
29
30 #define IAVF_FDIR_INSET_ETH_IPV4 (\
31         IAVF_INSET_IPV4_SRC | IAVF_INSET_IPV4_DST | \
32         IAVF_INSET_IPV4_PROTO | IAVF_INSET_IPV4_TOS | \
33         IAVF_INSET_IPV4_TTL)
34
35 #define IAVF_FDIR_INSET_ETH_IPV4_UDP (\
36         IAVF_INSET_IPV4_SRC | IAVF_INSET_IPV4_DST | \
37         IAVF_INSET_IPV4_TOS | IAVF_INSET_IPV4_TTL | \
38         IAVF_INSET_UDP_SRC_PORT | IAVF_INSET_UDP_DST_PORT)
39
40 #define IAVF_FDIR_INSET_ETH_IPV4_TCP (\
41         IAVF_INSET_IPV4_SRC | IAVF_INSET_IPV4_DST | \
42         IAVF_INSET_IPV4_TOS | IAVF_INSET_IPV4_TTL | \
43         IAVF_INSET_TCP_SRC_PORT | IAVF_INSET_TCP_DST_PORT)
44
45 #define IAVF_FDIR_INSET_ETH_IPV4_SCTP (\
46         IAVF_INSET_IPV4_SRC | IAVF_INSET_IPV4_DST | \
47         IAVF_INSET_IPV4_TOS | IAVF_INSET_IPV4_TTL | \
48         IAVF_INSET_SCTP_SRC_PORT | IAVF_INSET_SCTP_DST_PORT)
49
50 #define IAVF_FDIR_INSET_ETH_IPV6 (\
51         IAVF_INSET_IPV6_SRC | IAVF_INSET_IPV6_DST | \
52         IAVF_INSET_IPV6_NEXT_HDR | IAVF_INSET_IPV6_TC | \
53         IAVF_INSET_IPV6_HOP_LIMIT)
54
55 #define IAVF_FDIR_INSET_ETH_IPV6_UDP (\
56         IAVF_INSET_IPV6_SRC | IAVF_INSET_IPV6_DST | \
57         IAVF_INSET_IPV6_TC | IAVF_INSET_IPV6_HOP_LIMIT | \
58         IAVF_INSET_UDP_SRC_PORT | IAVF_INSET_UDP_DST_PORT)
59
60 #define IAVF_FDIR_INSET_ETH_IPV6_TCP (\
61         IAVF_INSET_IPV6_SRC | IAVF_INSET_IPV6_DST | \
62         IAVF_INSET_IPV6_TC | IAVF_INSET_IPV6_HOP_LIMIT | \
63         IAVF_INSET_TCP_SRC_PORT | IAVF_INSET_TCP_DST_PORT)
64
65 #define IAVF_FDIR_INSET_ETH_IPV6_SCTP (\
66         IAVF_INSET_IPV6_SRC | IAVF_INSET_IPV6_DST | \
67         IAVF_INSET_IPV6_TC | IAVF_INSET_IPV6_HOP_LIMIT | \
68         IAVF_INSET_SCTP_SRC_PORT | IAVF_INSET_SCTP_DST_PORT)
69
70 static struct iavf_pattern_match_item iavf_fdir_pattern[] = {
71         {iavf_pattern_ethertype,                IAVF_FDIR_INSET_ETH,                    IAVF_INSET_NONE},
72         {iavf_pattern_eth_ipv4,                 IAVF_FDIR_INSET_ETH_IPV4,               IAVF_INSET_NONE},
73         {iavf_pattern_eth_ipv4_udp,             IAVF_FDIR_INSET_ETH_IPV4_UDP,           IAVF_INSET_NONE},
74         {iavf_pattern_eth_ipv4_tcp,             IAVF_FDIR_INSET_ETH_IPV4_TCP,           IAVF_INSET_NONE},
75         {iavf_pattern_eth_ipv4_sctp,            IAVF_FDIR_INSET_ETH_IPV4_SCTP,          IAVF_INSET_NONE},
76         {iavf_pattern_eth_ipv6,                 IAVF_FDIR_INSET_ETH_IPV6,               IAVF_INSET_NONE},
77         {iavf_pattern_eth_ipv6_udp,             IAVF_FDIR_INSET_ETH_IPV6_UDP,           IAVF_INSET_NONE},
78         {iavf_pattern_eth_ipv6_tcp,             IAVF_FDIR_INSET_ETH_IPV6_TCP,           IAVF_INSET_NONE},
79         {iavf_pattern_eth_ipv6_sctp,            IAVF_FDIR_INSET_ETH_IPV6_SCTP,          IAVF_INSET_NONE},
80 };
81
82 static struct iavf_flow_parser iavf_fdir_parser;
83
84 static int
85 iavf_fdir_init(struct iavf_adapter *ad)
86 {
87         struct iavf_info *vf = IAVF_DEV_PRIVATE_TO_VF(ad);
88         struct iavf_flow_parser *parser;
89
90         if (!vf->vf_res)
91                 return -EINVAL;
92
93         if (vf->vf_res->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_FDIR_PF)
94                 parser = &iavf_fdir_parser;
95         else
96                 return -ENOTSUP;
97
98         return iavf_register_parser(parser, ad);
99 }
100
101 static void
102 iavf_fdir_uninit(struct iavf_adapter *ad)
103 {
104         iavf_unregister_parser(&iavf_fdir_parser, ad);
105 }
106
107 static int
108 iavf_fdir_create(struct iavf_adapter *ad,
109                 struct rte_flow *flow,
110                 void *meta,
111                 struct rte_flow_error *error)
112 {
113         struct iavf_fdir_conf *filter = meta;
114         struct iavf_fdir_conf *rule;
115         int ret;
116
117         rule = rte_zmalloc("fdir_entry", sizeof(*rule), 0);
118         if (!rule) {
119                 rte_flow_error_set(error, ENOMEM,
120                                 RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
121                                 "Failed to allocate memory for fdir rule");
122                 return -rte_errno;
123         }
124
125         ret = iavf_fdir_add(ad, filter);
126         if (ret) {
127                 rte_flow_error_set(error, -ret,
128                                 RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
129                                 "Failed to add filter rule.");
130                 goto free_entry;
131         }
132
133         rte_memcpy(rule, filter, sizeof(*rule));
134         flow->rule = rule;
135
136         return 0;
137
138 free_entry:
139         rte_free(rule);
140         return -rte_errno;
141 }
142
143 static int
144 iavf_fdir_destroy(struct iavf_adapter *ad,
145                 struct rte_flow *flow,
146                 struct rte_flow_error *error)
147 {
148         struct iavf_fdir_conf *filter;
149         int ret;
150
151         filter = (struct iavf_fdir_conf *)flow->rule;
152
153         ret = iavf_fdir_del(ad, filter);
154         if (ret) {
155                 rte_flow_error_set(error, -ret,
156                                 RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
157                                 "Failed to delete filter rule.");
158                 return -rte_errno;
159         }
160
161         flow->rule = NULL;
162         rte_free(filter);
163
164         return 0;
165 }
166
167 static int
168 iavf_fdir_validation(struct iavf_adapter *ad,
169                 __rte_unused struct rte_flow *flow,
170                 void *meta,
171                 struct rte_flow_error *error)
172 {
173         struct iavf_fdir_conf *filter = meta;
174         int ret;
175
176         ret = iavf_fdir_check(ad, filter);
177         if (ret) {
178                 rte_flow_error_set(error, -ret,
179                                 RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
180                                 "Failed to validate filter rule.");
181                 return -rte_errno;
182         }
183
184         return 0;
185 };
186
187 static struct iavf_flow_engine iavf_fdir_engine = {
188         .init = iavf_fdir_init,
189         .uninit = iavf_fdir_uninit,
190         .create = iavf_fdir_create,
191         .destroy = iavf_fdir_destroy,
192         .validation = iavf_fdir_validation,
193         .type = IAVF_FLOW_ENGINE_FDIR,
194 };
195
196 static int
197 iavf_fdir_parse_action_qregion(struct iavf_adapter *ad,
198                         struct rte_flow_error *error,
199                         const struct rte_flow_action *act,
200                         struct virtchnl_filter_action *filter_action)
201 {
202         const struct rte_flow_action_rss *rss = act->conf;
203         uint32_t i;
204
205         if (act->type != RTE_FLOW_ACTION_TYPE_RSS) {
206                 rte_flow_error_set(error, EINVAL,
207                                 RTE_FLOW_ERROR_TYPE_ACTION, act,
208                                 "Invalid action.");
209                 return -rte_errno;
210         }
211
212         if (rss->queue_num <= 1) {
213                 rte_flow_error_set(error, EINVAL,
214                                 RTE_FLOW_ERROR_TYPE_ACTION, act,
215                                 "Queue region size can't be 0 or 1.");
216                 return -rte_errno;
217         }
218
219         /* check if queue index for queue region is continuous */
220         for (i = 0; i < rss->queue_num - 1; i++) {
221                 if (rss->queue[i + 1] != rss->queue[i] + 1) {
222                         rte_flow_error_set(error, EINVAL,
223                                         RTE_FLOW_ERROR_TYPE_ACTION, act,
224                                         "Discontinuous queue region");
225                         return -rte_errno;
226                 }
227         }
228
229         if (rss->queue[rss->queue_num - 1] >= ad->eth_dev->data->nb_rx_queues) {
230                 rte_flow_error_set(error, EINVAL,
231                                 RTE_FLOW_ERROR_TYPE_ACTION, act,
232                                 "Invalid queue region indexes.");
233                 return -rte_errno;
234         }
235
236         if (!(rte_is_power_of_2(rss->queue_num) &&
237                 rss->queue_num <= IAVF_FDIR_MAX_QREGION_SIZE)) {
238                 rte_flow_error_set(error, EINVAL,
239                                 RTE_FLOW_ERROR_TYPE_ACTION, act,
240                                 "The region size should be any of the following values:"
241                                 "1, 2, 4, 8, 16, 32, 64, 128 as long as the total number "
242                                 "of queues do not exceed the VSI allocation.");
243                 return -rte_errno;
244         }
245
246         filter_action->act_conf.queue.index = rss->queue[0];
247         filter_action->act_conf.queue.region = rte_fls_u32(rss->queue_num) - 1;
248
249         return 0;
250 }
251
252 static int
253 iavf_fdir_parse_action(struct iavf_adapter *ad,
254                         const struct rte_flow_action actions[],
255                         struct rte_flow_error *error,
256                         struct iavf_fdir_conf *filter)
257 {
258         const struct rte_flow_action_queue *act_q;
259         uint32_t dest_num = 0;
260         int ret;
261
262         int number = 0;
263         struct virtchnl_filter_action *filter_action;
264
265         for (; actions->type != RTE_FLOW_ACTION_TYPE_END; actions++) {
266                 switch (actions->type) {
267                 case RTE_FLOW_ACTION_TYPE_VOID:
268                         break;
269
270                 case RTE_FLOW_ACTION_TYPE_PASSTHRU:
271                         dest_num++;
272
273                         filter_action = &filter->add_fltr.rule_cfg.action_set.actions[number];
274
275                         filter_action->type = VIRTCHNL_ACTION_PASSTHRU;
276
277                         filter->add_fltr.rule_cfg.action_set.count = ++number;
278                         break;
279
280                 case RTE_FLOW_ACTION_TYPE_DROP:
281                         dest_num++;
282
283                         filter_action = &filter->add_fltr.rule_cfg.action_set.actions[number];
284
285                         filter_action->type = VIRTCHNL_ACTION_DROP;
286
287                         filter->add_fltr.rule_cfg.action_set.count = ++number;
288                         break;
289
290                 case RTE_FLOW_ACTION_TYPE_QUEUE:
291                         dest_num++;
292
293                         act_q = actions->conf;
294                         filter_action = &filter->add_fltr.rule_cfg.action_set.actions[number];
295
296                         filter_action->type = VIRTCHNL_ACTION_QUEUE;
297                         filter_action->act_conf.queue.index = act_q->index;
298
299                         if (filter_action->act_conf.queue.index >=
300                                 ad->eth_dev->data->nb_rx_queues) {
301                                 rte_flow_error_set(error, EINVAL,
302                                         RTE_FLOW_ERROR_TYPE_ACTION,
303                                         actions, "Invalid queue for FDIR.");
304                                 return -rte_errno;
305                         }
306
307                         filter->add_fltr.rule_cfg.action_set.count = ++number;
308                         break;
309
310                 case RTE_FLOW_ACTION_TYPE_RSS:
311                         dest_num++;
312
313                         filter_action = &filter->add_fltr.rule_cfg.action_set.actions[number];
314
315                         filter_action->type = VIRTCHNL_ACTION_Q_REGION;
316
317                         ret = iavf_fdir_parse_action_qregion(ad,
318                                                 error, actions, filter_action);
319                         if (ret)
320                                 return ret;
321
322                         filter->add_fltr.rule_cfg.action_set.count = ++number;
323                         break;
324
325                 default:
326                         rte_flow_error_set(error, EINVAL,
327                                         RTE_FLOW_ERROR_TYPE_ACTION, actions,
328                                         "Invalid action.");
329                         return -rte_errno;
330                 }
331         }
332
333         if (number > VIRTCHNL_MAX_NUM_ACTIONS) {
334                 rte_flow_error_set(error, EINVAL,
335                         RTE_FLOW_ERROR_TYPE_ACTION, actions,
336                         "Action numbers exceed the maximum value");
337                 return -rte_errno;
338         }
339
340         if (dest_num == 0 || dest_num >= 2) {
341                 rte_flow_error_set(error, EINVAL,
342                         RTE_FLOW_ERROR_TYPE_ACTION, actions,
343                         "Unsupported action combination");
344                 return -rte_errno;
345         }
346
347         return 0;
348 }
349
350 static int
351 iavf_fdir_parse_pattern(__rte_unused struct iavf_adapter *ad,
352                         const struct rte_flow_item pattern[],
353                         struct rte_flow_error *error,
354                         struct iavf_fdir_conf *filter)
355 {
356         const struct rte_flow_item *item = pattern;
357         enum rte_flow_item_type item_type;
358         enum rte_flow_item_type l3 = RTE_FLOW_ITEM_TYPE_END;
359         const struct rte_flow_item_eth *eth_spec, *eth_mask;
360         const struct rte_flow_item_ipv4 *ipv4_spec, *ipv4_mask;
361         const struct rte_flow_item_ipv6 *ipv6_spec, *ipv6_mask;
362         const struct rte_flow_item_udp *udp_spec, *udp_mask;
363         const struct rte_flow_item_tcp *tcp_spec, *tcp_mask;
364         const struct rte_flow_item_sctp *sctp_spec, *sctp_mask;
365         uint64_t input_set = IAVF_INSET_NONE;
366
367         enum rte_flow_item_type next_type;
368         uint16_t ether_type;
369
370         int layer = 0;
371         struct virtchnl_proto_hdr *hdr;
372
373         uint8_t  ipv6_addr_mask[16] = {
374                 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
375                 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF
376         };
377
378         for (item = pattern; item->type != RTE_FLOW_ITEM_TYPE_END; item++) {
379                 if (item->last) {
380                         rte_flow_error_set(error, EINVAL,
381                                         RTE_FLOW_ERROR_TYPE_ITEM, item,
382                                         "Not support range");
383                 }
384
385                 item_type = item->type;
386
387                 switch (item_type) {
388                 case RTE_FLOW_ITEM_TYPE_ETH:
389                         eth_spec = item->spec;
390                         eth_mask = item->mask;
391                         next_type = (item + 1)->type;
392
393                         hdr = &filter->add_fltr.rule_cfg.proto_hdrs.proto_hdr[layer];
394
395                         VIRTCHNL_SET_PROTO_HDR_TYPE(hdr, ETH);
396
397                         if (next_type == RTE_FLOW_ITEM_TYPE_END &&
398                                 (!eth_spec || !eth_mask)) {
399                                 rte_flow_error_set(error, EINVAL,
400                                                 RTE_FLOW_ERROR_TYPE_ITEM,
401                                                 item, "NULL eth spec/mask.");
402                                 return -rte_errno;
403                         }
404
405                         if (eth_spec && eth_mask) {
406                                 if (!rte_is_zero_ether_addr(&eth_mask->src) ||
407                                     !rte_is_zero_ether_addr(&eth_mask->dst)) {
408                                         rte_flow_error_set(error, EINVAL,
409                                                 RTE_FLOW_ERROR_TYPE_ITEM, item,
410                                                 "Invalid MAC_addr mask.");
411                                         return -rte_errno;
412                                 }
413                         }
414
415                         if (eth_spec && eth_mask && eth_mask->type) {
416                                 if (eth_mask->type != RTE_BE16(0xffff)) {
417                                         rte_flow_error_set(error, EINVAL,
418                                                 RTE_FLOW_ERROR_TYPE_ITEM,
419                                                 item, "Invalid type mask.");
420                                         return -rte_errno;
421                                 }
422
423                                 ether_type = rte_be_to_cpu_16(eth_spec->type);
424                                 if (ether_type == RTE_ETHER_TYPE_IPV4 ||
425                                         ether_type == RTE_ETHER_TYPE_IPV6) {
426                                         rte_flow_error_set(error, EINVAL,
427                                                 RTE_FLOW_ERROR_TYPE_ITEM,
428                                                 item,
429                                                 "Unsupported ether_type.");
430                                         return -rte_errno;
431                                 }
432
433                                 input_set |= IAVF_INSET_ETHERTYPE;
434                                 VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(hdr, ETH, ETHERTYPE);
435
436                                 rte_memcpy(hdr->buffer,
437                                         eth_spec, sizeof(*eth_spec));
438                         }
439
440                         filter->add_fltr.rule_cfg.proto_hdrs.count = ++layer;
441                         break;
442
443                 case RTE_FLOW_ITEM_TYPE_IPV4:
444                         l3 = RTE_FLOW_ITEM_TYPE_IPV4;
445                         ipv4_spec = item->spec;
446                         ipv4_mask = item->mask;
447
448                         hdr = &filter->add_fltr.rule_cfg.proto_hdrs.proto_hdr[layer];
449
450                         VIRTCHNL_SET_PROTO_HDR_TYPE(hdr, IPV4);
451
452                         if (ipv4_spec && ipv4_mask) {
453                                 if (ipv4_mask->hdr.version_ihl ||
454                                         ipv4_mask->hdr.total_length ||
455                                         ipv4_mask->hdr.packet_id ||
456                                         ipv4_mask->hdr.fragment_offset ||
457                                         ipv4_mask->hdr.hdr_checksum) {
458                                         rte_flow_error_set(error, EINVAL,
459                                                 RTE_FLOW_ERROR_TYPE_ITEM,
460                                                 item, "Invalid IPv4 mask.");
461                                         return -rte_errno;
462                                 }
463
464                                 if (ipv4_mask->hdr.type_of_service ==
465                                                                 UINT8_MAX) {
466                                         input_set |= IAVF_INSET_IPV4_TOS;
467                                         VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(hdr, IPV4, DSCP);
468                                 }
469                                 if (ipv4_mask->hdr.next_proto_id == UINT8_MAX) {
470                                         input_set |= IAVF_INSET_IPV4_PROTO;
471                                         VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(hdr, IPV4, PROT);
472                                 }
473                                 if (ipv4_mask->hdr.time_to_live == UINT8_MAX) {
474                                         input_set |= IAVF_INSET_IPV4_TTL;
475                                         VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(hdr, IPV4, TTL);
476                                 }
477                                 if (ipv4_mask->hdr.src_addr == UINT32_MAX) {
478                                         input_set |= IAVF_INSET_IPV4_SRC;
479                                         VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(hdr, IPV4, SRC);
480                                 }
481                                 if (ipv4_mask->hdr.dst_addr == UINT32_MAX) {
482                                         input_set |= IAVF_INSET_IPV4_DST;
483                                         VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(hdr, IPV4, DST);
484                                 }
485
486                                 rte_memcpy(hdr->buffer,
487                                         &ipv4_spec->hdr,
488                                         sizeof(ipv4_spec->hdr));
489                         }
490
491                         filter->add_fltr.rule_cfg.proto_hdrs.count = ++layer;
492                         break;
493
494                 case RTE_FLOW_ITEM_TYPE_IPV6:
495                         l3 = RTE_FLOW_ITEM_TYPE_IPV6;
496                         ipv6_spec = item->spec;
497                         ipv6_mask = item->mask;
498
499                         hdr = &filter->add_fltr.rule_cfg.proto_hdrs.proto_hdr[layer];
500
501                         VIRTCHNL_SET_PROTO_HDR_TYPE(hdr, IPV6);
502
503                         if (ipv6_spec && ipv6_mask) {
504                                 if (ipv6_mask->hdr.payload_len) {
505                                         rte_flow_error_set(error, EINVAL,
506                                                 RTE_FLOW_ERROR_TYPE_ITEM,
507                                                 item, "Invalid IPv6 mask");
508                                         return -rte_errno;
509                                 }
510
511                                 if ((ipv6_mask->hdr.vtc_flow &
512                                         rte_cpu_to_be_32(IAVF_IPV6_TC_MASK))
513                                         == rte_cpu_to_be_32(IAVF_IPV6_TC_MASK)) {
514                                         input_set |= IAVF_INSET_IPV6_TC;
515                                         VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(hdr, IPV6, TC);
516                                 }
517                                 if (ipv6_mask->hdr.proto == UINT8_MAX) {
518                                         input_set |= IAVF_INSET_IPV6_NEXT_HDR;
519                                         VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(hdr, IPV6, PROT);
520                                 }
521                                 if (ipv6_mask->hdr.hop_limits == UINT8_MAX) {
522                                         input_set |= IAVF_INSET_IPV6_HOP_LIMIT;
523                                         VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(hdr, IPV6, HOP_LIMIT);
524                                 }
525                                 if (!memcmp(ipv6_mask->hdr.src_addr,
526                                         ipv6_addr_mask,
527                                         RTE_DIM(ipv6_mask->hdr.src_addr))) {
528                                         input_set |= IAVF_INSET_IPV6_SRC;
529                                         VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(hdr, IPV6, SRC);
530                                 }
531                                 if (!memcmp(ipv6_mask->hdr.dst_addr,
532                                         ipv6_addr_mask,
533                                         RTE_DIM(ipv6_mask->hdr.dst_addr))) {
534                                         input_set |= IAVF_INSET_IPV6_DST;
535                                         VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(hdr, IPV6, DST);
536                                 }
537
538                                 rte_memcpy(hdr->buffer,
539                                         &ipv6_spec->hdr,
540                                         sizeof(ipv6_spec->hdr));
541                         }
542
543                         filter->add_fltr.rule_cfg.proto_hdrs.count = ++layer;
544                         break;
545
546                 case RTE_FLOW_ITEM_TYPE_UDP:
547                         udp_spec = item->spec;
548                         udp_mask = item->mask;
549
550                         hdr = &filter->add_fltr.rule_cfg.proto_hdrs.proto_hdr[layer];
551
552                         VIRTCHNL_SET_PROTO_HDR_TYPE(hdr, UDP);
553
554                         if (udp_spec && udp_mask) {
555                                 if (udp_mask->hdr.dgram_len ||
556                                         udp_mask->hdr.dgram_cksum) {
557                                         rte_flow_error_set(error, EINVAL,
558                                                 RTE_FLOW_ERROR_TYPE_ITEM, item,
559                                                 "Invalid UDP mask");
560                                         return -rte_errno;
561                                 }
562
563                                 if (udp_mask->hdr.src_port == UINT16_MAX) {
564                                         input_set |= IAVF_INSET_UDP_SRC_PORT;
565                                         VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(hdr, UDP, SRC_PORT);
566                                 }
567                                 if (udp_mask->hdr.dst_port == UINT16_MAX) {
568                                         input_set |= IAVF_INSET_UDP_DST_PORT;
569                                         VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(hdr, UDP, DST_PORT);
570                                 }
571
572                                 if (l3 == RTE_FLOW_ITEM_TYPE_IPV4)
573                                         rte_memcpy(hdr->buffer,
574                                                 &udp_spec->hdr,
575                                                 sizeof(udp_spec->hdr));
576                                 else if (l3 == RTE_FLOW_ITEM_TYPE_IPV6)
577                                         rte_memcpy(hdr->buffer,
578                                                 &udp_spec->hdr,
579                                                 sizeof(udp_spec->hdr));
580                         }
581
582                         filter->add_fltr.rule_cfg.proto_hdrs.count = ++layer;
583                         break;
584
585                 case RTE_FLOW_ITEM_TYPE_TCP:
586                         tcp_spec = item->spec;
587                         tcp_mask = item->mask;
588
589                         hdr = &filter->add_fltr.rule_cfg.proto_hdrs.proto_hdr[layer];
590
591                         VIRTCHNL_SET_PROTO_HDR_TYPE(hdr, TCP);
592
593                         if (tcp_spec && tcp_mask) {
594                                 if (tcp_mask->hdr.sent_seq ||
595                                         tcp_mask->hdr.recv_ack ||
596                                         tcp_mask->hdr.data_off ||
597                                         tcp_mask->hdr.tcp_flags ||
598                                         tcp_mask->hdr.rx_win ||
599                                         tcp_mask->hdr.cksum ||
600                                         tcp_mask->hdr.tcp_urp) {
601                                         rte_flow_error_set(error, EINVAL,
602                                                 RTE_FLOW_ERROR_TYPE_ITEM, item,
603                                                 "Invalid TCP mask");
604                                         return -rte_errno;
605                                 }
606
607                                 if (tcp_mask->hdr.src_port == UINT16_MAX) {
608                                         input_set |= IAVF_INSET_TCP_SRC_PORT;
609                                         VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(hdr, TCP, SRC_PORT);
610                                 }
611                                 if (tcp_mask->hdr.dst_port == UINT16_MAX) {
612                                         input_set |= IAVF_INSET_TCP_DST_PORT;
613                                         VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(hdr, TCP, DST_PORT);
614                                 }
615
616                                 if (l3 == RTE_FLOW_ITEM_TYPE_IPV4)
617                                         rte_memcpy(hdr->buffer,
618                                                 &tcp_spec->hdr,
619                                                 sizeof(tcp_spec->hdr));
620                                 else if (l3 == RTE_FLOW_ITEM_TYPE_IPV6)
621                                         rte_memcpy(hdr->buffer,
622                                                 &tcp_spec->hdr,
623                                                 sizeof(tcp_spec->hdr));
624                         }
625
626                         filter->add_fltr.rule_cfg.proto_hdrs.count = ++layer;
627                         break;
628
629                 case RTE_FLOW_ITEM_TYPE_SCTP:
630                         sctp_spec = item->spec;
631                         sctp_mask = item->mask;
632
633                         hdr = &filter->add_fltr.rule_cfg.proto_hdrs.proto_hdr[layer];
634
635                         VIRTCHNL_SET_PROTO_HDR_TYPE(hdr, SCTP);
636
637                         if (sctp_spec && sctp_mask) {
638                                 if (sctp_mask->hdr.cksum) {
639                                         rte_flow_error_set(error, EINVAL,
640                                                 RTE_FLOW_ERROR_TYPE_ITEM, item,
641                                                 "Invalid UDP mask");
642                                         return -rte_errno;
643                                 }
644
645                                 if (sctp_mask->hdr.src_port == UINT16_MAX) {
646                                         input_set |= IAVF_INSET_SCTP_SRC_PORT;
647                                         VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(hdr, SCTP, SRC_PORT);
648                                 }
649                                 if (sctp_mask->hdr.dst_port == UINT16_MAX) {
650                                         input_set |= IAVF_INSET_SCTP_DST_PORT;
651                                         VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(hdr, SCTP, DST_PORT);
652                                 }
653
654                                 if (l3 == RTE_FLOW_ITEM_TYPE_IPV4)
655                                         rte_memcpy(hdr->buffer,
656                                                 &sctp_spec->hdr,
657                                                 sizeof(sctp_spec->hdr));
658                                 else if (l3 == RTE_FLOW_ITEM_TYPE_IPV6)
659                                         rte_memcpy(hdr->buffer,
660                                                 &sctp_spec->hdr,
661                                                 sizeof(sctp_spec->hdr));
662                         }
663
664                         filter->add_fltr.rule_cfg.proto_hdrs.count = ++layer;
665                         break;
666
667                 case RTE_FLOW_ITEM_TYPE_VOID:
668                         break;
669
670                 default:
671                         rte_flow_error_set(error, EINVAL,
672                                         RTE_FLOW_ERROR_TYPE_ITEM, item,
673                                         "Invalid pattern item.");
674                         return -rte_errno;
675                 }
676         }
677
678         if (layer > VIRTCHNL_MAX_NUM_PROTO_HDRS) {
679                 rte_flow_error_set(error, EINVAL,
680                         RTE_FLOW_ERROR_TYPE_ITEM, item,
681                         "Protocol header layers exceed the maximum value");
682                 return -rte_errno;
683         }
684
685         filter->input_set = input_set;
686
687         return 0;
688 }
689
690 static int
691 iavf_fdir_parse(struct iavf_adapter *ad,
692                 struct iavf_pattern_match_item *array,
693                 uint32_t array_len,
694                 const struct rte_flow_item pattern[],
695                 const struct rte_flow_action actions[],
696                 void **meta,
697                 struct rte_flow_error *error)
698 {
699         struct iavf_info *vf = IAVF_DEV_PRIVATE_TO_VF(ad);
700         struct iavf_fdir_conf *filter = &vf->fdir.conf;
701         struct iavf_pattern_match_item *item = NULL;
702         uint64_t input_set;
703         int ret;
704
705         memset(filter, 0, sizeof(*filter));
706
707         item = iavf_search_pattern_match_item(pattern, array, array_len, error);
708         if (!item)
709                 return -rte_errno;
710
711         ret = iavf_fdir_parse_pattern(ad, pattern, error, filter);
712         if (ret)
713                 goto error;
714
715         input_set = filter->input_set;
716         if (!input_set || input_set & ~item->input_set_mask) {
717                 rte_flow_error_set(error, EINVAL,
718                                 RTE_FLOW_ERROR_TYPE_ITEM_SPEC, pattern,
719                                 "Invalid input set");
720                 ret = -rte_errno;
721                 goto error;
722         }
723
724         ret = iavf_fdir_parse_action(ad, actions, error, filter);
725         if (ret)
726                 goto error;
727
728         if (meta)
729                 *meta = filter;
730
731 error:
732         rte_free(item);
733         return ret;
734 }
735
736 static struct iavf_flow_parser iavf_fdir_parser = {
737         .engine = &iavf_fdir_engine,
738         .array = iavf_fdir_pattern,
739         .array_len = RTE_DIM(iavf_fdir_pattern),
740         .parse_pattern_action = iavf_fdir_parse,
741         .stage = IAVF_FLOW_STAGE_DISTRIBUTOR,
742 };
743
744 RTE_INIT(iavf_fdir_engine_register)
745 {
746         iavf_register_flow_engine(&iavf_fdir_engine);
747 }