1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2020 Intel Corporation
13 #include <rte_ether.h>
14 #include <rte_ethdev_driver.h>
15 #include <rte_malloc.h>
16 #include <rte_tailq.h>
19 #include "iavf_generic_flow.h"
21 #include "iavf_rxtx.h"
23 #define IAVF_FDIR_MAX_QREGION_SIZE 128
25 #define IAVF_FDIR_IPV6_TC_OFFSET 20
26 #define IAVF_IPV6_TC_MASK (0xFF << IAVF_FDIR_IPV6_TC_OFFSET)
28 #define IAVF_FDIR_INSET_ETH (\
31 #define IAVF_FDIR_INSET_ETH_IPV4 (\
32 IAVF_INSET_IPV4_SRC | IAVF_INSET_IPV4_DST | \
33 IAVF_INSET_IPV4_PROTO | IAVF_INSET_IPV4_TOS | \
36 #define IAVF_FDIR_INSET_ETH_IPV4_UDP (\
37 IAVF_INSET_IPV4_SRC | IAVF_INSET_IPV4_DST | \
38 IAVF_INSET_IPV4_TOS | IAVF_INSET_IPV4_TTL | \
39 IAVF_INSET_UDP_SRC_PORT | IAVF_INSET_UDP_DST_PORT)
41 #define IAVF_FDIR_INSET_ETH_IPV4_TCP (\
42 IAVF_INSET_IPV4_SRC | IAVF_INSET_IPV4_DST | \
43 IAVF_INSET_IPV4_TOS | IAVF_INSET_IPV4_TTL | \
44 IAVF_INSET_TCP_SRC_PORT | IAVF_INSET_TCP_DST_PORT)
46 #define IAVF_FDIR_INSET_ETH_IPV4_SCTP (\
47 IAVF_INSET_IPV4_SRC | IAVF_INSET_IPV4_DST | \
48 IAVF_INSET_IPV4_TOS | IAVF_INSET_IPV4_TTL | \
49 IAVF_INSET_SCTP_SRC_PORT | IAVF_INSET_SCTP_DST_PORT)
51 #define IAVF_FDIR_INSET_ETH_IPV6 (\
52 IAVF_INSET_IPV6_SRC | IAVF_INSET_IPV6_DST | \
53 IAVF_INSET_IPV6_NEXT_HDR | IAVF_INSET_IPV6_TC | \
54 IAVF_INSET_IPV6_HOP_LIMIT)
56 #define IAVF_FDIR_INSET_ETH_IPV6_UDP (\
57 IAVF_INSET_IPV6_SRC | IAVF_INSET_IPV6_DST | \
58 IAVF_INSET_IPV6_TC | IAVF_INSET_IPV6_HOP_LIMIT | \
59 IAVF_INSET_UDP_SRC_PORT | IAVF_INSET_UDP_DST_PORT)
61 #define IAVF_FDIR_INSET_ETH_IPV6_TCP (\
62 IAVF_INSET_IPV6_SRC | IAVF_INSET_IPV6_DST | \
63 IAVF_INSET_IPV6_TC | IAVF_INSET_IPV6_HOP_LIMIT | \
64 IAVF_INSET_TCP_SRC_PORT | IAVF_INSET_TCP_DST_PORT)
66 #define IAVF_FDIR_INSET_ETH_IPV6_SCTP (\
67 IAVF_INSET_IPV6_SRC | IAVF_INSET_IPV6_DST | \
68 IAVF_INSET_IPV6_TC | IAVF_INSET_IPV6_HOP_LIMIT | \
69 IAVF_INSET_SCTP_SRC_PORT | IAVF_INSET_SCTP_DST_PORT)
71 #define IAVF_FDIR_INSET_GTPU (\
72 IAVF_INSET_IPV4_SRC | IAVF_INSET_IPV4_DST | \
75 #define IAVF_FDIR_INSET_GTPU_EH (\
76 IAVF_INSET_IPV4_SRC | IAVF_INSET_IPV4_DST | \
77 IAVF_INSET_GTPU_TEID | IAVF_INSET_GTPU_QFI)
79 #define IAVF_FDIR_INSET_L2TPV3OIP (\
80 IAVF_L2TPV3OIP_SESSION_ID)
82 #define IAVF_FDIR_INSET_ESP (\
85 #define IAVF_FDIR_INSET_AH (\
88 #define IAVF_FDIR_INSET_IPV4_NATT_ESP (\
89 IAVF_INSET_IPV4_SRC | IAVF_INSET_IPV4_DST | \
92 #define IAVF_FDIR_INSET_IPV6_NATT_ESP (\
93 IAVF_INSET_IPV6_SRC | IAVF_INSET_IPV6_DST | \
96 #define IAVF_FDIR_INSET_PFCP (\
97 IAVF_INSET_PFCP_S_FIELD)
99 static struct iavf_pattern_match_item iavf_fdir_pattern[] = {
100 {iavf_pattern_ethertype, IAVF_FDIR_INSET_ETH, IAVF_INSET_NONE},
101 {iavf_pattern_eth_ipv4, IAVF_FDIR_INSET_ETH_IPV4, IAVF_INSET_NONE},
102 {iavf_pattern_eth_ipv4_udp, IAVF_FDIR_INSET_ETH_IPV4_UDP, IAVF_INSET_NONE},
103 {iavf_pattern_eth_ipv4_tcp, IAVF_FDIR_INSET_ETH_IPV4_TCP, IAVF_INSET_NONE},
104 {iavf_pattern_eth_ipv4_sctp, IAVF_FDIR_INSET_ETH_IPV4_SCTP, IAVF_INSET_NONE},
105 {iavf_pattern_eth_ipv6, IAVF_FDIR_INSET_ETH_IPV6, IAVF_INSET_NONE},
106 {iavf_pattern_eth_ipv6_udp, IAVF_FDIR_INSET_ETH_IPV6_UDP, IAVF_INSET_NONE},
107 {iavf_pattern_eth_ipv6_tcp, IAVF_FDIR_INSET_ETH_IPV6_TCP, IAVF_INSET_NONE},
108 {iavf_pattern_eth_ipv6_sctp, IAVF_FDIR_INSET_ETH_IPV6_SCTP, IAVF_INSET_NONE},
109 {iavf_pattern_eth_ipv4_gtpu, IAVF_FDIR_INSET_GTPU, IAVF_INSET_NONE},
110 {iavf_pattern_eth_ipv4_gtpu_eh, IAVF_FDIR_INSET_GTPU_EH, IAVF_INSET_NONE},
111 {iavf_pattern_eth_ipv4_l2tpv3, IAVF_FDIR_INSET_L2TPV3OIP, IAVF_INSET_NONE},
112 {iavf_pattern_eth_ipv6_l2tpv3, IAVF_FDIR_INSET_L2TPV3OIP, IAVF_INSET_NONE},
113 {iavf_pattern_eth_ipv4_esp, IAVF_FDIR_INSET_ESP, IAVF_INSET_NONE},
114 {iavf_pattern_eth_ipv6_esp, IAVF_FDIR_INSET_ESP, IAVF_INSET_NONE},
115 {iavf_pattern_eth_ipv4_ah, IAVF_FDIR_INSET_AH, IAVF_INSET_NONE},
116 {iavf_pattern_eth_ipv6_ah, IAVF_FDIR_INSET_AH, IAVF_INSET_NONE},
117 {iavf_pattern_eth_ipv4_udp_esp, IAVF_FDIR_INSET_IPV4_NATT_ESP, IAVF_INSET_NONE},
118 {iavf_pattern_eth_ipv6_udp_esp, IAVF_FDIR_INSET_IPV6_NATT_ESP, IAVF_INSET_NONE},
119 {iavf_pattern_eth_ipv4_pfcp, IAVF_FDIR_INSET_PFCP, IAVF_INSET_NONE},
120 {iavf_pattern_eth_ipv6_pfcp, IAVF_FDIR_INSET_PFCP, IAVF_INSET_NONE},
123 static struct iavf_flow_parser iavf_fdir_parser;
126 iavf_fdir_init(struct iavf_adapter *ad)
128 struct iavf_info *vf = IAVF_DEV_PRIVATE_TO_VF(ad);
129 struct iavf_flow_parser *parser;
134 if (vf->vf_res->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_FDIR_PF)
135 parser = &iavf_fdir_parser;
139 return iavf_register_parser(parser, ad);
143 iavf_fdir_uninit(struct iavf_adapter *ad)
145 iavf_unregister_parser(&iavf_fdir_parser, ad);
149 iavf_fdir_create(struct iavf_adapter *ad,
150 struct rte_flow *flow,
152 struct rte_flow_error *error)
154 struct iavf_fdir_conf *filter = meta;
155 struct iavf_fdir_conf *rule;
158 rule = rte_zmalloc("fdir_entry", sizeof(*rule), 0);
160 rte_flow_error_set(error, ENOMEM,
161 RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
162 "Failed to allocate memory for fdir rule");
166 ret = iavf_fdir_add(ad, filter);
168 rte_flow_error_set(error, -ret,
169 RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
170 "Failed to add filter rule.");
174 if (filter->mark_flag == 1)
175 iavf_fdir_rx_proc_enable(ad, 1);
177 rte_memcpy(rule, filter, sizeof(*rule));
188 iavf_fdir_destroy(struct iavf_adapter *ad,
189 struct rte_flow *flow,
190 struct rte_flow_error *error)
192 struct iavf_fdir_conf *filter;
195 filter = (struct iavf_fdir_conf *)flow->rule;
197 ret = iavf_fdir_del(ad, filter);
199 rte_flow_error_set(error, -ret,
200 RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
201 "Failed to delete filter rule.");
205 if (filter->mark_flag == 1)
206 iavf_fdir_rx_proc_enable(ad, 0);
215 iavf_fdir_validation(struct iavf_adapter *ad,
216 __rte_unused struct rte_flow *flow,
218 struct rte_flow_error *error)
220 struct iavf_fdir_conf *filter = meta;
223 ret = iavf_fdir_check(ad, filter);
225 rte_flow_error_set(error, -ret,
226 RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
227 "Failed to validate filter rule.");
234 static struct iavf_flow_engine iavf_fdir_engine = {
235 .init = iavf_fdir_init,
236 .uninit = iavf_fdir_uninit,
237 .create = iavf_fdir_create,
238 .destroy = iavf_fdir_destroy,
239 .validation = iavf_fdir_validation,
240 .type = IAVF_FLOW_ENGINE_FDIR,
244 iavf_fdir_parse_action_qregion(struct iavf_adapter *ad,
245 struct rte_flow_error *error,
246 const struct rte_flow_action *act,
247 struct virtchnl_filter_action *filter_action)
249 const struct rte_flow_action_rss *rss = act->conf;
252 if (act->type != RTE_FLOW_ACTION_TYPE_RSS) {
253 rte_flow_error_set(error, EINVAL,
254 RTE_FLOW_ERROR_TYPE_ACTION, act,
259 if (rss->queue_num <= 1) {
260 rte_flow_error_set(error, EINVAL,
261 RTE_FLOW_ERROR_TYPE_ACTION, act,
262 "Queue region size can't be 0 or 1.");
266 /* check if queue index for queue region is continuous */
267 for (i = 0; i < rss->queue_num - 1; i++) {
268 if (rss->queue[i + 1] != rss->queue[i] + 1) {
269 rte_flow_error_set(error, EINVAL,
270 RTE_FLOW_ERROR_TYPE_ACTION, act,
271 "Discontinuous queue region");
276 if (rss->queue[rss->queue_num - 1] >= ad->eth_dev->data->nb_rx_queues) {
277 rte_flow_error_set(error, EINVAL,
278 RTE_FLOW_ERROR_TYPE_ACTION, act,
279 "Invalid queue region indexes.");
283 if (!(rte_is_power_of_2(rss->queue_num) &&
284 rss->queue_num <= IAVF_FDIR_MAX_QREGION_SIZE)) {
285 rte_flow_error_set(error, EINVAL,
286 RTE_FLOW_ERROR_TYPE_ACTION, act,
287 "The region size should be any of the following values:"
288 "1, 2, 4, 8, 16, 32, 64, 128 as long as the total number "
289 "of queues do not exceed the VSI allocation.");
293 filter_action->act_conf.queue.index = rss->queue[0];
294 filter_action->act_conf.queue.region = rte_fls_u32(rss->queue_num) - 1;
300 iavf_fdir_parse_action(struct iavf_adapter *ad,
301 const struct rte_flow_action actions[],
302 struct rte_flow_error *error,
303 struct iavf_fdir_conf *filter)
305 const struct rte_flow_action_queue *act_q;
306 const struct rte_flow_action_mark *mark_spec = NULL;
307 uint32_t dest_num = 0;
308 uint32_t mark_num = 0;
312 struct virtchnl_filter_action *filter_action;
314 for (; actions->type != RTE_FLOW_ACTION_TYPE_END; actions++) {
315 switch (actions->type) {
316 case RTE_FLOW_ACTION_TYPE_VOID:
319 case RTE_FLOW_ACTION_TYPE_PASSTHRU:
322 filter_action = &filter->add_fltr.rule_cfg.action_set.actions[number];
324 filter_action->type = VIRTCHNL_ACTION_PASSTHRU;
326 filter->add_fltr.rule_cfg.action_set.count = ++number;
329 case RTE_FLOW_ACTION_TYPE_DROP:
332 filter_action = &filter->add_fltr.rule_cfg.action_set.actions[number];
334 filter_action->type = VIRTCHNL_ACTION_DROP;
336 filter->add_fltr.rule_cfg.action_set.count = ++number;
339 case RTE_FLOW_ACTION_TYPE_QUEUE:
342 act_q = actions->conf;
343 filter_action = &filter->add_fltr.rule_cfg.action_set.actions[number];
345 filter_action->type = VIRTCHNL_ACTION_QUEUE;
346 filter_action->act_conf.queue.index = act_q->index;
348 if (filter_action->act_conf.queue.index >=
349 ad->eth_dev->data->nb_rx_queues) {
350 rte_flow_error_set(error, EINVAL,
351 RTE_FLOW_ERROR_TYPE_ACTION,
352 actions, "Invalid queue for FDIR.");
356 filter->add_fltr.rule_cfg.action_set.count = ++number;
359 case RTE_FLOW_ACTION_TYPE_RSS:
362 filter_action = &filter->add_fltr.rule_cfg.action_set.actions[number];
364 filter_action->type = VIRTCHNL_ACTION_Q_REGION;
366 ret = iavf_fdir_parse_action_qregion(ad,
367 error, actions, filter_action);
371 filter->add_fltr.rule_cfg.action_set.count = ++number;
374 case RTE_FLOW_ACTION_TYPE_MARK:
377 filter->mark_flag = 1;
378 mark_spec = actions->conf;
379 filter_action = &filter->add_fltr.rule_cfg.action_set.actions[number];
381 filter_action->type = VIRTCHNL_ACTION_MARK;
382 filter_action->act_conf.mark_id = mark_spec->id;
384 filter->add_fltr.rule_cfg.action_set.count = ++number;
388 rte_flow_error_set(error, EINVAL,
389 RTE_FLOW_ERROR_TYPE_ACTION, actions,
395 if (number > VIRTCHNL_MAX_NUM_ACTIONS) {
396 rte_flow_error_set(error, EINVAL,
397 RTE_FLOW_ERROR_TYPE_ACTION, actions,
398 "Action numbers exceed the maximum value");
403 rte_flow_error_set(error, EINVAL,
404 RTE_FLOW_ERROR_TYPE_ACTION, actions,
405 "Unsupported action combination");
410 rte_flow_error_set(error, EINVAL,
411 RTE_FLOW_ERROR_TYPE_ACTION, actions,
412 "Too many mark actions");
416 if (dest_num + mark_num == 0) {
417 rte_flow_error_set(error, EINVAL,
418 RTE_FLOW_ERROR_TYPE_ACTION, actions,
423 /* Mark only is equal to mark + passthru. */
425 filter_action = &filter->add_fltr.rule_cfg.action_set.actions[number];
426 filter_action->type = VIRTCHNL_ACTION_PASSTHRU;
427 filter->add_fltr.rule_cfg.action_set.count = ++number;
434 iavf_fdir_parse_pattern(__rte_unused struct iavf_adapter *ad,
435 const struct rte_flow_item pattern[],
436 struct rte_flow_error *error,
437 struct iavf_fdir_conf *filter)
439 const struct rte_flow_item *item = pattern;
440 enum rte_flow_item_type item_type;
441 enum rte_flow_item_type l3 = RTE_FLOW_ITEM_TYPE_END;
442 const struct rte_flow_item_eth *eth_spec, *eth_mask;
443 const struct rte_flow_item_ipv4 *ipv4_spec, *ipv4_mask;
444 const struct rte_flow_item_ipv6 *ipv6_spec, *ipv6_mask;
445 const struct rte_flow_item_udp *udp_spec, *udp_mask;
446 const struct rte_flow_item_tcp *tcp_spec, *tcp_mask;
447 const struct rte_flow_item_sctp *sctp_spec, *sctp_mask;
448 const struct rte_flow_item_gtp *gtp_spec, *gtp_mask;
449 const struct rte_flow_item_gtp_psc *gtp_psc_spec, *gtp_psc_mask;
450 const struct rte_flow_item_l2tpv3oip *l2tpv3oip_spec, *l2tpv3oip_mask;
451 const struct rte_flow_item_esp *esp_spec, *esp_mask;
452 const struct rte_flow_item_ah *ah_spec, *ah_mask;
453 const struct rte_flow_item_pfcp *pfcp_spec, *pfcp_mask;
454 uint64_t input_set = IAVF_INSET_NONE;
456 enum rte_flow_item_type next_type;
460 struct virtchnl_proto_hdr *hdr;
462 uint8_t ipv6_addr_mask[16] = {
463 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
464 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF
467 for (item = pattern; item->type != RTE_FLOW_ITEM_TYPE_END; item++) {
469 rte_flow_error_set(error, EINVAL,
470 RTE_FLOW_ERROR_TYPE_ITEM, item,
471 "Not support range");
474 item_type = item->type;
477 case RTE_FLOW_ITEM_TYPE_ETH:
478 eth_spec = item->spec;
479 eth_mask = item->mask;
480 next_type = (item + 1)->type;
482 hdr = &filter->add_fltr.rule_cfg.proto_hdrs.proto_hdr[layer];
484 VIRTCHNL_SET_PROTO_HDR_TYPE(hdr, ETH);
486 if (next_type == RTE_FLOW_ITEM_TYPE_END &&
487 (!eth_spec || !eth_mask)) {
488 rte_flow_error_set(error, EINVAL,
489 RTE_FLOW_ERROR_TYPE_ITEM,
490 item, "NULL eth spec/mask.");
494 if (eth_spec && eth_mask) {
495 if (!rte_is_zero_ether_addr(ð_mask->src) ||
496 !rte_is_zero_ether_addr(ð_mask->dst)) {
497 rte_flow_error_set(error, EINVAL,
498 RTE_FLOW_ERROR_TYPE_ITEM, item,
499 "Invalid MAC_addr mask.");
504 if (eth_spec && eth_mask && eth_mask->type) {
505 if (eth_mask->type != RTE_BE16(0xffff)) {
506 rte_flow_error_set(error, EINVAL,
507 RTE_FLOW_ERROR_TYPE_ITEM,
508 item, "Invalid type mask.");
512 ether_type = rte_be_to_cpu_16(eth_spec->type);
513 if (ether_type == RTE_ETHER_TYPE_IPV4 ||
514 ether_type == RTE_ETHER_TYPE_IPV6) {
515 rte_flow_error_set(error, EINVAL,
516 RTE_FLOW_ERROR_TYPE_ITEM,
518 "Unsupported ether_type.");
522 input_set |= IAVF_INSET_ETHERTYPE;
523 VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(hdr, ETH, ETHERTYPE);
525 rte_memcpy(hdr->buffer,
526 eth_spec, sizeof(*eth_spec));
529 filter->add_fltr.rule_cfg.proto_hdrs.count = ++layer;
532 case RTE_FLOW_ITEM_TYPE_IPV4:
533 l3 = RTE_FLOW_ITEM_TYPE_IPV4;
534 ipv4_spec = item->spec;
535 ipv4_mask = item->mask;
537 hdr = &filter->add_fltr.rule_cfg.proto_hdrs.proto_hdr[layer];
539 VIRTCHNL_SET_PROTO_HDR_TYPE(hdr, IPV4);
541 if (ipv4_spec && ipv4_mask) {
542 if (ipv4_mask->hdr.version_ihl ||
543 ipv4_mask->hdr.total_length ||
544 ipv4_mask->hdr.packet_id ||
545 ipv4_mask->hdr.fragment_offset ||
546 ipv4_mask->hdr.hdr_checksum) {
547 rte_flow_error_set(error, EINVAL,
548 RTE_FLOW_ERROR_TYPE_ITEM,
549 item, "Invalid IPv4 mask.");
553 if (ipv4_mask->hdr.type_of_service ==
555 input_set |= IAVF_INSET_IPV4_TOS;
556 VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(hdr, IPV4, DSCP);
558 if (ipv4_mask->hdr.next_proto_id == UINT8_MAX) {
559 input_set |= IAVF_INSET_IPV4_PROTO;
560 VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(hdr, IPV4, PROT);
562 if (ipv4_mask->hdr.time_to_live == UINT8_MAX) {
563 input_set |= IAVF_INSET_IPV4_TTL;
564 VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(hdr, IPV4, TTL);
566 if (ipv4_mask->hdr.src_addr == UINT32_MAX) {
567 input_set |= IAVF_INSET_IPV4_SRC;
568 VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(hdr, IPV4, SRC);
570 if (ipv4_mask->hdr.dst_addr == UINT32_MAX) {
571 input_set |= IAVF_INSET_IPV4_DST;
572 VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(hdr, IPV4, DST);
575 rte_memcpy(hdr->buffer,
577 sizeof(ipv4_spec->hdr));
580 filter->add_fltr.rule_cfg.proto_hdrs.count = ++layer;
583 case RTE_FLOW_ITEM_TYPE_IPV6:
584 l3 = RTE_FLOW_ITEM_TYPE_IPV6;
585 ipv6_spec = item->spec;
586 ipv6_mask = item->mask;
588 hdr = &filter->add_fltr.rule_cfg.proto_hdrs.proto_hdr[layer];
590 VIRTCHNL_SET_PROTO_HDR_TYPE(hdr, IPV6);
592 if (ipv6_spec && ipv6_mask) {
593 if (ipv6_mask->hdr.payload_len) {
594 rte_flow_error_set(error, EINVAL,
595 RTE_FLOW_ERROR_TYPE_ITEM,
596 item, "Invalid IPv6 mask");
600 if ((ipv6_mask->hdr.vtc_flow &
601 rte_cpu_to_be_32(IAVF_IPV6_TC_MASK))
602 == rte_cpu_to_be_32(IAVF_IPV6_TC_MASK)) {
603 input_set |= IAVF_INSET_IPV6_TC;
604 VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(hdr, IPV6, TC);
606 if (ipv6_mask->hdr.proto == UINT8_MAX) {
607 input_set |= IAVF_INSET_IPV6_NEXT_HDR;
608 VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(hdr, IPV6, PROT);
610 if (ipv6_mask->hdr.hop_limits == UINT8_MAX) {
611 input_set |= IAVF_INSET_IPV6_HOP_LIMIT;
612 VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(hdr, IPV6, HOP_LIMIT);
614 if (!memcmp(ipv6_mask->hdr.src_addr,
616 RTE_DIM(ipv6_mask->hdr.src_addr))) {
617 input_set |= IAVF_INSET_IPV6_SRC;
618 VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(hdr, IPV6, SRC);
620 if (!memcmp(ipv6_mask->hdr.dst_addr,
622 RTE_DIM(ipv6_mask->hdr.dst_addr))) {
623 input_set |= IAVF_INSET_IPV6_DST;
624 VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(hdr, IPV6, DST);
627 rte_memcpy(hdr->buffer,
629 sizeof(ipv6_spec->hdr));
632 filter->add_fltr.rule_cfg.proto_hdrs.count = ++layer;
635 case RTE_FLOW_ITEM_TYPE_UDP:
636 udp_spec = item->spec;
637 udp_mask = item->mask;
639 hdr = &filter->add_fltr.rule_cfg.proto_hdrs.proto_hdr[layer];
641 VIRTCHNL_SET_PROTO_HDR_TYPE(hdr, UDP);
643 if (udp_spec && udp_mask) {
644 if (udp_mask->hdr.dgram_len ||
645 udp_mask->hdr.dgram_cksum) {
646 rte_flow_error_set(error, EINVAL,
647 RTE_FLOW_ERROR_TYPE_ITEM, item,
652 if (udp_mask->hdr.src_port == UINT16_MAX) {
653 input_set |= IAVF_INSET_UDP_SRC_PORT;
654 VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(hdr, UDP, SRC_PORT);
656 if (udp_mask->hdr.dst_port == UINT16_MAX) {
657 input_set |= IAVF_INSET_UDP_DST_PORT;
658 VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(hdr, UDP, DST_PORT);
661 if (l3 == RTE_FLOW_ITEM_TYPE_IPV4)
662 rte_memcpy(hdr->buffer,
664 sizeof(udp_spec->hdr));
665 else if (l3 == RTE_FLOW_ITEM_TYPE_IPV6)
666 rte_memcpy(hdr->buffer,
668 sizeof(udp_spec->hdr));
671 filter->add_fltr.rule_cfg.proto_hdrs.count = ++layer;
674 case RTE_FLOW_ITEM_TYPE_TCP:
675 tcp_spec = item->spec;
676 tcp_mask = item->mask;
678 hdr = &filter->add_fltr.rule_cfg.proto_hdrs.proto_hdr[layer];
680 VIRTCHNL_SET_PROTO_HDR_TYPE(hdr, TCP);
682 if (tcp_spec && tcp_mask) {
683 if (tcp_mask->hdr.sent_seq ||
684 tcp_mask->hdr.recv_ack ||
685 tcp_mask->hdr.data_off ||
686 tcp_mask->hdr.tcp_flags ||
687 tcp_mask->hdr.rx_win ||
688 tcp_mask->hdr.cksum ||
689 tcp_mask->hdr.tcp_urp) {
690 rte_flow_error_set(error, EINVAL,
691 RTE_FLOW_ERROR_TYPE_ITEM, item,
696 if (tcp_mask->hdr.src_port == UINT16_MAX) {
697 input_set |= IAVF_INSET_TCP_SRC_PORT;
698 VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(hdr, TCP, SRC_PORT);
700 if (tcp_mask->hdr.dst_port == UINT16_MAX) {
701 input_set |= IAVF_INSET_TCP_DST_PORT;
702 VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(hdr, TCP, DST_PORT);
705 if (l3 == RTE_FLOW_ITEM_TYPE_IPV4)
706 rte_memcpy(hdr->buffer,
708 sizeof(tcp_spec->hdr));
709 else if (l3 == RTE_FLOW_ITEM_TYPE_IPV6)
710 rte_memcpy(hdr->buffer,
712 sizeof(tcp_spec->hdr));
715 filter->add_fltr.rule_cfg.proto_hdrs.count = ++layer;
718 case RTE_FLOW_ITEM_TYPE_SCTP:
719 sctp_spec = item->spec;
720 sctp_mask = item->mask;
722 hdr = &filter->add_fltr.rule_cfg.proto_hdrs.proto_hdr[layer];
724 VIRTCHNL_SET_PROTO_HDR_TYPE(hdr, SCTP);
726 if (sctp_spec && sctp_mask) {
727 if (sctp_mask->hdr.cksum) {
728 rte_flow_error_set(error, EINVAL,
729 RTE_FLOW_ERROR_TYPE_ITEM, item,
734 if (sctp_mask->hdr.src_port == UINT16_MAX) {
735 input_set |= IAVF_INSET_SCTP_SRC_PORT;
736 VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(hdr, SCTP, SRC_PORT);
738 if (sctp_mask->hdr.dst_port == UINT16_MAX) {
739 input_set |= IAVF_INSET_SCTP_DST_PORT;
740 VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(hdr, SCTP, DST_PORT);
743 if (l3 == RTE_FLOW_ITEM_TYPE_IPV4)
744 rte_memcpy(hdr->buffer,
746 sizeof(sctp_spec->hdr));
747 else if (l3 == RTE_FLOW_ITEM_TYPE_IPV6)
748 rte_memcpy(hdr->buffer,
750 sizeof(sctp_spec->hdr));
753 filter->add_fltr.rule_cfg.proto_hdrs.count = ++layer;
756 case RTE_FLOW_ITEM_TYPE_GTPU:
757 gtp_spec = item->spec;
758 gtp_mask = item->mask;
760 hdr = &filter->add_fltr.rule_cfg.proto_hdrs.proto_hdr[layer];
762 VIRTCHNL_SET_PROTO_HDR_TYPE(hdr, GTPU_IP);
764 if (gtp_spec && gtp_mask) {
765 if (gtp_mask->v_pt_rsv_flags ||
766 gtp_mask->msg_type ||
768 rte_flow_error_set(error, EINVAL,
769 RTE_FLOW_ERROR_TYPE_ITEM,
770 item, "Invalid GTP mask");
774 if (gtp_mask->teid == UINT32_MAX) {
775 input_set |= IAVF_INSET_GTPU_TEID;
776 VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(hdr, GTPU_IP, TEID);
779 rte_memcpy(hdr->buffer,
780 gtp_spec, sizeof(*gtp_spec));
783 filter->add_fltr.rule_cfg.proto_hdrs.count = ++layer;
786 case RTE_FLOW_ITEM_TYPE_GTP_PSC:
787 gtp_psc_spec = item->spec;
788 gtp_psc_mask = item->mask;
790 hdr = &filter->add_fltr.rule_cfg.proto_hdrs.proto_hdr[layer];
792 VIRTCHNL_SET_PROTO_HDR_TYPE(hdr, GTPU_EH);
794 if (gtp_psc_spec && gtp_psc_mask) {
795 if (gtp_psc_mask->qfi == UINT8_MAX) {
796 input_set |= IAVF_INSET_GTPU_QFI;
797 VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(hdr, GTPU_EH, QFI);
800 rte_memcpy(hdr->buffer, gtp_psc_spec,
801 sizeof(*gtp_psc_spec));
804 filter->add_fltr.rule_cfg.proto_hdrs.count = ++layer;
807 case RTE_FLOW_ITEM_TYPE_L2TPV3OIP:
808 l2tpv3oip_spec = item->spec;
809 l2tpv3oip_mask = item->mask;
811 hdr = &filter->add_fltr.rule_cfg.proto_hdrs.proto_hdr[layer];
813 VIRTCHNL_SET_PROTO_HDR_TYPE(hdr, L2TPV3);
815 if (l2tpv3oip_spec && l2tpv3oip_mask) {
816 if (l2tpv3oip_mask->session_id == UINT32_MAX) {
817 input_set |= IAVF_L2TPV3OIP_SESSION_ID;
818 VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(hdr, L2TPV3, SESS_ID);
821 rte_memcpy(hdr->buffer, l2tpv3oip_spec,
822 sizeof(*l2tpv3oip_spec));
825 filter->add_fltr.rule_cfg.proto_hdrs.count = ++layer;
828 case RTE_FLOW_ITEM_TYPE_ESP:
829 esp_spec = item->spec;
830 esp_mask = item->mask;
832 hdr = &filter->add_fltr.rule_cfg.proto_hdrs.proto_hdr[layer];
834 VIRTCHNL_SET_PROTO_HDR_TYPE(hdr, ESP);
836 if (esp_spec && esp_mask) {
837 if (esp_mask->hdr.spi == UINT32_MAX) {
838 input_set |= IAVF_INSET_ESP_SPI;
839 VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(hdr, ESP, SPI);
842 rte_memcpy(hdr->buffer, &esp_spec->hdr,
843 sizeof(esp_spec->hdr));
846 filter->add_fltr.rule_cfg.proto_hdrs.count = ++layer;
849 case RTE_FLOW_ITEM_TYPE_AH:
850 ah_spec = item->spec;
851 ah_mask = item->mask;
853 hdr = &filter->add_fltr.rule_cfg.proto_hdrs.proto_hdr[layer];
855 VIRTCHNL_SET_PROTO_HDR_TYPE(hdr, AH);
857 if (ah_spec && ah_mask) {
858 if (ah_mask->spi == UINT32_MAX) {
859 input_set |= IAVF_INSET_AH_SPI;
860 VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(hdr, AH, SPI);
863 rte_memcpy(hdr->buffer, ah_spec,
867 filter->add_fltr.rule_cfg.proto_hdrs.count = ++layer;
870 case RTE_FLOW_ITEM_TYPE_PFCP:
871 pfcp_spec = item->spec;
872 pfcp_mask = item->mask;
874 hdr = &filter->add_fltr.rule_cfg.proto_hdrs.proto_hdr[layer];
876 VIRTCHNL_SET_PROTO_HDR_TYPE(hdr, PFCP);
878 if (pfcp_spec && pfcp_mask) {
879 if (pfcp_mask->s_field == UINT8_MAX) {
880 input_set |= IAVF_INSET_PFCP_S_FIELD;
881 VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(hdr, PFCP, S_FIELD);
884 rte_memcpy(hdr->buffer, pfcp_spec,
888 filter->add_fltr.rule_cfg.proto_hdrs.count = ++layer;
891 case RTE_FLOW_ITEM_TYPE_VOID:
895 rte_flow_error_set(error, EINVAL,
896 RTE_FLOW_ERROR_TYPE_ITEM, item,
897 "Invalid pattern item.");
902 if (layer > VIRTCHNL_MAX_NUM_PROTO_HDRS) {
903 rte_flow_error_set(error, EINVAL,
904 RTE_FLOW_ERROR_TYPE_ITEM, item,
905 "Protocol header layers exceed the maximum value");
909 filter->input_set = input_set;
915 iavf_fdir_parse(struct iavf_adapter *ad,
916 struct iavf_pattern_match_item *array,
918 const struct rte_flow_item pattern[],
919 const struct rte_flow_action actions[],
921 struct rte_flow_error *error)
923 struct iavf_info *vf = IAVF_DEV_PRIVATE_TO_VF(ad);
924 struct iavf_fdir_conf *filter = &vf->fdir.conf;
925 struct iavf_pattern_match_item *item = NULL;
929 memset(filter, 0, sizeof(*filter));
931 item = iavf_search_pattern_match_item(pattern, array, array_len, error);
935 ret = iavf_fdir_parse_pattern(ad, pattern, error, filter);
939 input_set = filter->input_set;
940 if (!input_set || input_set & ~item->input_set_mask) {
941 rte_flow_error_set(error, EINVAL,
942 RTE_FLOW_ERROR_TYPE_ITEM_SPEC, pattern,
943 "Invalid input set");
948 ret = iavf_fdir_parse_action(ad, actions, error, filter);
960 static struct iavf_flow_parser iavf_fdir_parser = {
961 .engine = &iavf_fdir_engine,
962 .array = iavf_fdir_pattern,
963 .array_len = RTE_DIM(iavf_fdir_pattern),
964 .parse_pattern_action = iavf_fdir_parse,
965 .stage = IAVF_FLOW_STAGE_DISTRIBUTOR,
968 RTE_INIT(iavf_fdir_engine_register)
970 iavf_register_flow_engine(&iavf_fdir_engine);