1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2020 Intel Corporation
13 #include <rte_ether.h>
14 #include <rte_ethdev_driver.h>
15 #include <rte_malloc.h>
16 #include <rte_tailq.h>
19 #include "iavf_generic_flow.h"
21 #include "iavf_rxtx.h"
23 #define IAVF_FDIR_MAX_QREGION_SIZE 128
25 #define IAVF_FDIR_IPV6_TC_OFFSET 20
26 #define IAVF_IPV6_TC_MASK (0xFF << IAVF_FDIR_IPV6_TC_OFFSET)
28 #define IAVF_FDIR_INSET_ETH (\
31 #define IAVF_FDIR_INSET_ETH_IPV4 (\
32 IAVF_INSET_IPV4_SRC | IAVF_INSET_IPV4_DST | \
33 IAVF_INSET_IPV4_PROTO | IAVF_INSET_IPV4_TOS | \
36 #define IAVF_FDIR_INSET_ETH_IPV4_UDP (\
37 IAVF_INSET_IPV4_SRC | IAVF_INSET_IPV4_DST | \
38 IAVF_INSET_IPV4_TOS | IAVF_INSET_IPV4_TTL | \
39 IAVF_INSET_UDP_SRC_PORT | IAVF_INSET_UDP_DST_PORT)
41 #define IAVF_FDIR_INSET_ETH_IPV4_TCP (\
42 IAVF_INSET_IPV4_SRC | IAVF_INSET_IPV4_DST | \
43 IAVF_INSET_IPV4_TOS | IAVF_INSET_IPV4_TTL | \
44 IAVF_INSET_TCP_SRC_PORT | IAVF_INSET_TCP_DST_PORT)
46 #define IAVF_FDIR_INSET_ETH_IPV4_SCTP (\
47 IAVF_INSET_IPV4_SRC | IAVF_INSET_IPV4_DST | \
48 IAVF_INSET_IPV4_TOS | IAVF_INSET_IPV4_TTL | \
49 IAVF_INSET_SCTP_SRC_PORT | IAVF_INSET_SCTP_DST_PORT)
51 #define IAVF_FDIR_INSET_ETH_IPV6 (\
52 IAVF_INSET_IPV6_SRC | IAVF_INSET_IPV6_DST | \
53 IAVF_INSET_IPV6_NEXT_HDR | IAVF_INSET_IPV6_TC | \
54 IAVF_INSET_IPV6_HOP_LIMIT)
56 #define IAVF_FDIR_INSET_ETH_IPV6_UDP (\
57 IAVF_INSET_IPV6_SRC | IAVF_INSET_IPV6_DST | \
58 IAVF_INSET_IPV6_TC | IAVF_INSET_IPV6_HOP_LIMIT | \
59 IAVF_INSET_UDP_SRC_PORT | IAVF_INSET_UDP_DST_PORT)
61 #define IAVF_FDIR_INSET_ETH_IPV6_TCP (\
62 IAVF_INSET_IPV6_SRC | IAVF_INSET_IPV6_DST | \
63 IAVF_INSET_IPV6_TC | IAVF_INSET_IPV6_HOP_LIMIT | \
64 IAVF_INSET_TCP_SRC_PORT | IAVF_INSET_TCP_DST_PORT)
66 #define IAVF_FDIR_INSET_ETH_IPV6_SCTP (\
67 IAVF_INSET_IPV6_SRC | IAVF_INSET_IPV6_DST | \
68 IAVF_INSET_IPV6_TC | IAVF_INSET_IPV6_HOP_LIMIT | \
69 IAVF_INSET_SCTP_SRC_PORT | IAVF_INSET_SCTP_DST_PORT)
71 #define IAVF_FDIR_INSET_IPV4_GTPU (\
72 IAVF_INSET_IPV4_SRC | IAVF_INSET_IPV4_DST | \
75 #define IAVF_FDIR_INSET_IPV4_GTPU_EH (\
76 IAVF_INSET_IPV4_SRC | IAVF_INSET_IPV4_DST | \
77 IAVF_INSET_GTPU_TEID | IAVF_INSET_GTPU_QFI)
79 #define IAVF_FDIR_INSET_IPV6_GTPU (\
80 IAVF_INSET_IPV6_SRC | IAVF_INSET_IPV6_DST | \
83 #define IAVF_FDIR_INSET_IPV6_GTPU_EH (\
84 IAVF_INSET_IPV6_SRC | IAVF_INSET_IPV6_DST | \
85 IAVF_INSET_GTPU_TEID | IAVF_INSET_GTPU_QFI)
87 #define IAVF_FDIR_INSET_L2TPV3OIP (\
88 IAVF_L2TPV3OIP_SESSION_ID)
90 #define IAVF_FDIR_INSET_ESP (\
93 #define IAVF_FDIR_INSET_AH (\
96 #define IAVF_FDIR_INSET_IPV4_NATT_ESP (\
97 IAVF_INSET_IPV4_SRC | IAVF_INSET_IPV4_DST | \
100 #define IAVF_FDIR_INSET_IPV6_NATT_ESP (\
101 IAVF_INSET_IPV6_SRC | IAVF_INSET_IPV6_DST | \
104 #define IAVF_FDIR_INSET_PFCP (\
105 IAVF_INSET_PFCP_S_FIELD)
107 static struct iavf_pattern_match_item iavf_fdir_pattern[] = {
108 {iavf_pattern_ethertype, IAVF_FDIR_INSET_ETH, IAVF_INSET_NONE},
109 {iavf_pattern_eth_ipv4, IAVF_FDIR_INSET_ETH_IPV4, IAVF_INSET_NONE},
110 {iavf_pattern_eth_ipv4_udp, IAVF_FDIR_INSET_ETH_IPV4_UDP, IAVF_INSET_NONE},
111 {iavf_pattern_eth_ipv4_tcp, IAVF_FDIR_INSET_ETH_IPV4_TCP, IAVF_INSET_NONE},
112 {iavf_pattern_eth_ipv4_sctp, IAVF_FDIR_INSET_ETH_IPV4_SCTP, IAVF_INSET_NONE},
113 {iavf_pattern_eth_ipv6, IAVF_FDIR_INSET_ETH_IPV6, IAVF_INSET_NONE},
114 {iavf_pattern_eth_ipv6_udp, IAVF_FDIR_INSET_ETH_IPV6_UDP, IAVF_INSET_NONE},
115 {iavf_pattern_eth_ipv6_tcp, IAVF_FDIR_INSET_ETH_IPV6_TCP, IAVF_INSET_NONE},
116 {iavf_pattern_eth_ipv6_sctp, IAVF_FDIR_INSET_ETH_IPV6_SCTP, IAVF_INSET_NONE},
117 {iavf_pattern_eth_ipv4_gtpu, IAVF_FDIR_INSET_IPV4_GTPU, IAVF_INSET_NONE},
118 {iavf_pattern_eth_ipv4_gtpu_eh, IAVF_FDIR_INSET_IPV4_GTPU_EH, IAVF_INSET_NONE},
119 {iavf_pattern_eth_ipv6_gtpu, IAVF_FDIR_INSET_IPV6_GTPU, IAVF_INSET_NONE},
120 {iavf_pattern_eth_ipv6_gtpu_eh, IAVF_FDIR_INSET_IPV6_GTPU_EH, IAVF_INSET_NONE},
121 {iavf_pattern_eth_ipv4_l2tpv3, IAVF_FDIR_INSET_L2TPV3OIP, IAVF_INSET_NONE},
122 {iavf_pattern_eth_ipv6_l2tpv3, IAVF_FDIR_INSET_L2TPV3OIP, IAVF_INSET_NONE},
123 {iavf_pattern_eth_ipv4_esp, IAVF_FDIR_INSET_ESP, IAVF_INSET_NONE},
124 {iavf_pattern_eth_ipv6_esp, IAVF_FDIR_INSET_ESP, IAVF_INSET_NONE},
125 {iavf_pattern_eth_ipv4_ah, IAVF_FDIR_INSET_AH, IAVF_INSET_NONE},
126 {iavf_pattern_eth_ipv6_ah, IAVF_FDIR_INSET_AH, IAVF_INSET_NONE},
127 {iavf_pattern_eth_ipv4_udp_esp, IAVF_FDIR_INSET_IPV4_NATT_ESP, IAVF_INSET_NONE},
128 {iavf_pattern_eth_ipv6_udp_esp, IAVF_FDIR_INSET_IPV6_NATT_ESP, IAVF_INSET_NONE},
129 {iavf_pattern_eth_ipv4_pfcp, IAVF_FDIR_INSET_PFCP, IAVF_INSET_NONE},
130 {iavf_pattern_eth_ipv6_pfcp, IAVF_FDIR_INSET_PFCP, IAVF_INSET_NONE},
133 static struct iavf_flow_parser iavf_fdir_parser;
136 iavf_fdir_init(struct iavf_adapter *ad)
138 struct iavf_info *vf = IAVF_DEV_PRIVATE_TO_VF(ad);
139 struct iavf_flow_parser *parser;
144 if (vf->vf_res->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_FDIR_PF)
145 parser = &iavf_fdir_parser;
149 return iavf_register_parser(parser, ad);
153 iavf_fdir_uninit(struct iavf_adapter *ad)
155 iavf_unregister_parser(&iavf_fdir_parser, ad);
159 iavf_fdir_create(struct iavf_adapter *ad,
160 struct rte_flow *flow,
162 struct rte_flow_error *error)
164 struct iavf_fdir_conf *filter = meta;
165 struct iavf_fdir_conf *rule;
168 rule = rte_zmalloc("fdir_entry", sizeof(*rule), 0);
170 rte_flow_error_set(error, ENOMEM,
171 RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
172 "Failed to allocate memory for fdir rule");
176 ret = iavf_fdir_add(ad, filter);
178 rte_flow_error_set(error, -ret,
179 RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
180 "Failed to add filter rule.");
184 if (filter->mark_flag == 1)
185 iavf_fdir_rx_proc_enable(ad, 1);
187 rte_memcpy(rule, filter, sizeof(*rule));
198 iavf_fdir_destroy(struct iavf_adapter *ad,
199 struct rte_flow *flow,
200 struct rte_flow_error *error)
202 struct iavf_fdir_conf *filter;
205 filter = (struct iavf_fdir_conf *)flow->rule;
207 ret = iavf_fdir_del(ad, filter);
209 rte_flow_error_set(error, -ret,
210 RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
211 "Failed to delete filter rule.");
215 if (filter->mark_flag == 1)
216 iavf_fdir_rx_proc_enable(ad, 0);
225 iavf_fdir_validation(struct iavf_adapter *ad,
226 __rte_unused struct rte_flow *flow,
228 struct rte_flow_error *error)
230 struct iavf_fdir_conf *filter = meta;
233 ret = iavf_fdir_check(ad, filter);
235 rte_flow_error_set(error, -ret,
236 RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
237 "Failed to validate filter rule.");
244 static struct iavf_flow_engine iavf_fdir_engine = {
245 .init = iavf_fdir_init,
246 .uninit = iavf_fdir_uninit,
247 .create = iavf_fdir_create,
248 .destroy = iavf_fdir_destroy,
249 .validation = iavf_fdir_validation,
250 .type = IAVF_FLOW_ENGINE_FDIR,
254 iavf_fdir_parse_action_qregion(struct iavf_adapter *ad,
255 struct rte_flow_error *error,
256 const struct rte_flow_action *act,
257 struct virtchnl_filter_action *filter_action)
259 const struct rte_flow_action_rss *rss = act->conf;
262 if (act->type != RTE_FLOW_ACTION_TYPE_RSS) {
263 rte_flow_error_set(error, EINVAL,
264 RTE_FLOW_ERROR_TYPE_ACTION, act,
269 if (rss->queue_num <= 1) {
270 rte_flow_error_set(error, EINVAL,
271 RTE_FLOW_ERROR_TYPE_ACTION, act,
272 "Queue region size can't be 0 or 1.");
276 /* check if queue index for queue region is continuous */
277 for (i = 0; i < rss->queue_num - 1; i++) {
278 if (rss->queue[i + 1] != rss->queue[i] + 1) {
279 rte_flow_error_set(error, EINVAL,
280 RTE_FLOW_ERROR_TYPE_ACTION, act,
281 "Discontinuous queue region");
286 if (rss->queue[rss->queue_num - 1] >= ad->eth_dev->data->nb_rx_queues) {
287 rte_flow_error_set(error, EINVAL,
288 RTE_FLOW_ERROR_TYPE_ACTION, act,
289 "Invalid queue region indexes.");
293 if (!(rte_is_power_of_2(rss->queue_num) &&
294 rss->queue_num <= IAVF_FDIR_MAX_QREGION_SIZE)) {
295 rte_flow_error_set(error, EINVAL,
296 RTE_FLOW_ERROR_TYPE_ACTION, act,
297 "The region size should be any of the following values:"
298 "1, 2, 4, 8, 16, 32, 64, 128 as long as the total number "
299 "of queues do not exceed the VSI allocation.");
303 filter_action->act_conf.queue.index = rss->queue[0];
304 filter_action->act_conf.queue.region = rte_fls_u32(rss->queue_num) - 1;
310 iavf_fdir_parse_action(struct iavf_adapter *ad,
311 const struct rte_flow_action actions[],
312 struct rte_flow_error *error,
313 struct iavf_fdir_conf *filter)
315 const struct rte_flow_action_queue *act_q;
316 const struct rte_flow_action_mark *mark_spec = NULL;
317 uint32_t dest_num = 0;
318 uint32_t mark_num = 0;
322 struct virtchnl_filter_action *filter_action;
324 for (; actions->type != RTE_FLOW_ACTION_TYPE_END; actions++) {
325 switch (actions->type) {
326 case RTE_FLOW_ACTION_TYPE_VOID:
329 case RTE_FLOW_ACTION_TYPE_PASSTHRU:
332 filter_action = &filter->add_fltr.rule_cfg.action_set.actions[number];
334 filter_action->type = VIRTCHNL_ACTION_PASSTHRU;
336 filter->add_fltr.rule_cfg.action_set.count = ++number;
339 case RTE_FLOW_ACTION_TYPE_DROP:
342 filter_action = &filter->add_fltr.rule_cfg.action_set.actions[number];
344 filter_action->type = VIRTCHNL_ACTION_DROP;
346 filter->add_fltr.rule_cfg.action_set.count = ++number;
349 case RTE_FLOW_ACTION_TYPE_QUEUE:
352 act_q = actions->conf;
353 filter_action = &filter->add_fltr.rule_cfg.action_set.actions[number];
355 filter_action->type = VIRTCHNL_ACTION_QUEUE;
356 filter_action->act_conf.queue.index = act_q->index;
358 if (filter_action->act_conf.queue.index >=
359 ad->eth_dev->data->nb_rx_queues) {
360 rte_flow_error_set(error, EINVAL,
361 RTE_FLOW_ERROR_TYPE_ACTION,
362 actions, "Invalid queue for FDIR.");
366 filter->add_fltr.rule_cfg.action_set.count = ++number;
369 case RTE_FLOW_ACTION_TYPE_RSS:
372 filter_action = &filter->add_fltr.rule_cfg.action_set.actions[number];
374 filter_action->type = VIRTCHNL_ACTION_Q_REGION;
376 ret = iavf_fdir_parse_action_qregion(ad,
377 error, actions, filter_action);
381 filter->add_fltr.rule_cfg.action_set.count = ++number;
384 case RTE_FLOW_ACTION_TYPE_MARK:
387 filter->mark_flag = 1;
388 mark_spec = actions->conf;
389 filter_action = &filter->add_fltr.rule_cfg.action_set.actions[number];
391 filter_action->type = VIRTCHNL_ACTION_MARK;
392 filter_action->act_conf.mark_id = mark_spec->id;
394 filter->add_fltr.rule_cfg.action_set.count = ++number;
398 rte_flow_error_set(error, EINVAL,
399 RTE_FLOW_ERROR_TYPE_ACTION, actions,
405 if (number > VIRTCHNL_MAX_NUM_ACTIONS) {
406 rte_flow_error_set(error, EINVAL,
407 RTE_FLOW_ERROR_TYPE_ACTION, actions,
408 "Action numbers exceed the maximum value");
413 rte_flow_error_set(error, EINVAL,
414 RTE_FLOW_ERROR_TYPE_ACTION, actions,
415 "Unsupported action combination");
420 rte_flow_error_set(error, EINVAL,
421 RTE_FLOW_ERROR_TYPE_ACTION, actions,
422 "Too many mark actions");
426 if (dest_num + mark_num == 0) {
427 rte_flow_error_set(error, EINVAL,
428 RTE_FLOW_ERROR_TYPE_ACTION, actions,
433 /* Mark only is equal to mark + passthru. */
435 filter_action = &filter->add_fltr.rule_cfg.action_set.actions[number];
436 filter_action->type = VIRTCHNL_ACTION_PASSTHRU;
437 filter->add_fltr.rule_cfg.action_set.count = ++number;
444 iavf_fdir_parse_pattern(__rte_unused struct iavf_adapter *ad,
445 const struct rte_flow_item pattern[],
446 struct rte_flow_error *error,
447 struct iavf_fdir_conf *filter)
449 const struct rte_flow_item *item = pattern;
450 enum rte_flow_item_type item_type;
451 enum rte_flow_item_type l3 = RTE_FLOW_ITEM_TYPE_END;
452 const struct rte_flow_item_eth *eth_spec, *eth_mask;
453 const struct rte_flow_item_ipv4 *ipv4_spec, *ipv4_mask;
454 const struct rte_flow_item_ipv6 *ipv6_spec, *ipv6_mask;
455 const struct rte_flow_item_udp *udp_spec, *udp_mask;
456 const struct rte_flow_item_tcp *tcp_spec, *tcp_mask;
457 const struct rte_flow_item_sctp *sctp_spec, *sctp_mask;
458 const struct rte_flow_item_gtp *gtp_spec, *gtp_mask;
459 const struct rte_flow_item_gtp_psc *gtp_psc_spec, *gtp_psc_mask;
460 const struct rte_flow_item_l2tpv3oip *l2tpv3oip_spec, *l2tpv3oip_mask;
461 const struct rte_flow_item_esp *esp_spec, *esp_mask;
462 const struct rte_flow_item_ah *ah_spec, *ah_mask;
463 const struct rte_flow_item_pfcp *pfcp_spec, *pfcp_mask;
464 uint64_t input_set = IAVF_INSET_NONE;
466 enum rte_flow_item_type next_type;
470 struct virtchnl_proto_hdr *hdr;
472 uint8_t ipv6_addr_mask[16] = {
473 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
474 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF
477 for (item = pattern; item->type != RTE_FLOW_ITEM_TYPE_END; item++) {
479 rte_flow_error_set(error, EINVAL,
480 RTE_FLOW_ERROR_TYPE_ITEM, item,
481 "Not support range");
484 item_type = item->type;
487 case RTE_FLOW_ITEM_TYPE_ETH:
488 eth_spec = item->spec;
489 eth_mask = item->mask;
490 next_type = (item + 1)->type;
492 hdr = &filter->add_fltr.rule_cfg.proto_hdrs.proto_hdr[layer];
494 VIRTCHNL_SET_PROTO_HDR_TYPE(hdr, ETH);
496 if (next_type == RTE_FLOW_ITEM_TYPE_END &&
497 (!eth_spec || !eth_mask)) {
498 rte_flow_error_set(error, EINVAL,
499 RTE_FLOW_ERROR_TYPE_ITEM,
500 item, "NULL eth spec/mask.");
504 if (eth_spec && eth_mask) {
505 if (!rte_is_zero_ether_addr(ð_mask->src) ||
506 !rte_is_zero_ether_addr(ð_mask->dst)) {
507 rte_flow_error_set(error, EINVAL,
508 RTE_FLOW_ERROR_TYPE_ITEM, item,
509 "Invalid MAC_addr mask.");
514 if (eth_spec && eth_mask && eth_mask->type) {
515 if (eth_mask->type != RTE_BE16(0xffff)) {
516 rte_flow_error_set(error, EINVAL,
517 RTE_FLOW_ERROR_TYPE_ITEM,
518 item, "Invalid type mask.");
522 ether_type = rte_be_to_cpu_16(eth_spec->type);
523 if (ether_type == RTE_ETHER_TYPE_IPV4 ||
524 ether_type == RTE_ETHER_TYPE_IPV6) {
525 rte_flow_error_set(error, EINVAL,
526 RTE_FLOW_ERROR_TYPE_ITEM,
528 "Unsupported ether_type.");
532 input_set |= IAVF_INSET_ETHERTYPE;
533 VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(hdr, ETH, ETHERTYPE);
535 rte_memcpy(hdr->buffer,
536 eth_spec, sizeof(*eth_spec));
539 filter->add_fltr.rule_cfg.proto_hdrs.count = ++layer;
542 case RTE_FLOW_ITEM_TYPE_IPV4:
543 l3 = RTE_FLOW_ITEM_TYPE_IPV4;
544 ipv4_spec = item->spec;
545 ipv4_mask = item->mask;
547 hdr = &filter->add_fltr.rule_cfg.proto_hdrs.proto_hdr[layer];
549 VIRTCHNL_SET_PROTO_HDR_TYPE(hdr, IPV4);
551 if (ipv4_spec && ipv4_mask) {
552 if (ipv4_mask->hdr.version_ihl ||
553 ipv4_mask->hdr.total_length ||
554 ipv4_mask->hdr.packet_id ||
555 ipv4_mask->hdr.fragment_offset ||
556 ipv4_mask->hdr.hdr_checksum) {
557 rte_flow_error_set(error, EINVAL,
558 RTE_FLOW_ERROR_TYPE_ITEM,
559 item, "Invalid IPv4 mask.");
563 if (ipv4_mask->hdr.type_of_service ==
565 input_set |= IAVF_INSET_IPV4_TOS;
566 VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(hdr, IPV4, DSCP);
568 if (ipv4_mask->hdr.next_proto_id == UINT8_MAX) {
569 input_set |= IAVF_INSET_IPV4_PROTO;
570 VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(hdr, IPV4, PROT);
572 if (ipv4_mask->hdr.time_to_live == UINT8_MAX) {
573 input_set |= IAVF_INSET_IPV4_TTL;
574 VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(hdr, IPV4, TTL);
576 if (ipv4_mask->hdr.src_addr == UINT32_MAX) {
577 input_set |= IAVF_INSET_IPV4_SRC;
578 VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(hdr, IPV4, SRC);
580 if (ipv4_mask->hdr.dst_addr == UINT32_MAX) {
581 input_set |= IAVF_INSET_IPV4_DST;
582 VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(hdr, IPV4, DST);
585 rte_memcpy(hdr->buffer,
587 sizeof(ipv4_spec->hdr));
590 filter->add_fltr.rule_cfg.proto_hdrs.count = ++layer;
593 case RTE_FLOW_ITEM_TYPE_IPV6:
594 l3 = RTE_FLOW_ITEM_TYPE_IPV6;
595 ipv6_spec = item->spec;
596 ipv6_mask = item->mask;
598 hdr = &filter->add_fltr.rule_cfg.proto_hdrs.proto_hdr[layer];
600 VIRTCHNL_SET_PROTO_HDR_TYPE(hdr, IPV6);
602 if (ipv6_spec && ipv6_mask) {
603 if (ipv6_mask->hdr.payload_len) {
604 rte_flow_error_set(error, EINVAL,
605 RTE_FLOW_ERROR_TYPE_ITEM,
606 item, "Invalid IPv6 mask");
610 if ((ipv6_mask->hdr.vtc_flow &
611 rte_cpu_to_be_32(IAVF_IPV6_TC_MASK))
612 == rte_cpu_to_be_32(IAVF_IPV6_TC_MASK)) {
613 input_set |= IAVF_INSET_IPV6_TC;
614 VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(hdr, IPV6, TC);
616 if (ipv6_mask->hdr.proto == UINT8_MAX) {
617 input_set |= IAVF_INSET_IPV6_NEXT_HDR;
618 VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(hdr, IPV6, PROT);
620 if (ipv6_mask->hdr.hop_limits == UINT8_MAX) {
621 input_set |= IAVF_INSET_IPV6_HOP_LIMIT;
622 VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(hdr, IPV6, HOP_LIMIT);
624 if (!memcmp(ipv6_mask->hdr.src_addr,
626 RTE_DIM(ipv6_mask->hdr.src_addr))) {
627 input_set |= IAVF_INSET_IPV6_SRC;
628 VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(hdr, IPV6, SRC);
630 if (!memcmp(ipv6_mask->hdr.dst_addr,
632 RTE_DIM(ipv6_mask->hdr.dst_addr))) {
633 input_set |= IAVF_INSET_IPV6_DST;
634 VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(hdr, IPV6, DST);
637 rte_memcpy(hdr->buffer,
639 sizeof(ipv6_spec->hdr));
642 filter->add_fltr.rule_cfg.proto_hdrs.count = ++layer;
645 case RTE_FLOW_ITEM_TYPE_UDP:
646 udp_spec = item->spec;
647 udp_mask = item->mask;
649 hdr = &filter->add_fltr.rule_cfg.proto_hdrs.proto_hdr[layer];
651 VIRTCHNL_SET_PROTO_HDR_TYPE(hdr, UDP);
653 if (udp_spec && udp_mask) {
654 if (udp_mask->hdr.dgram_len ||
655 udp_mask->hdr.dgram_cksum) {
656 rte_flow_error_set(error, EINVAL,
657 RTE_FLOW_ERROR_TYPE_ITEM, item,
662 if (udp_mask->hdr.src_port == UINT16_MAX) {
663 input_set |= IAVF_INSET_UDP_SRC_PORT;
664 VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(hdr, UDP, SRC_PORT);
666 if (udp_mask->hdr.dst_port == UINT16_MAX) {
667 input_set |= IAVF_INSET_UDP_DST_PORT;
668 VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(hdr, UDP, DST_PORT);
671 if (l3 == RTE_FLOW_ITEM_TYPE_IPV4)
672 rte_memcpy(hdr->buffer,
674 sizeof(udp_spec->hdr));
675 else if (l3 == RTE_FLOW_ITEM_TYPE_IPV6)
676 rte_memcpy(hdr->buffer,
678 sizeof(udp_spec->hdr));
681 filter->add_fltr.rule_cfg.proto_hdrs.count = ++layer;
684 case RTE_FLOW_ITEM_TYPE_TCP:
685 tcp_spec = item->spec;
686 tcp_mask = item->mask;
688 hdr = &filter->add_fltr.rule_cfg.proto_hdrs.proto_hdr[layer];
690 VIRTCHNL_SET_PROTO_HDR_TYPE(hdr, TCP);
692 if (tcp_spec && tcp_mask) {
693 if (tcp_mask->hdr.sent_seq ||
694 tcp_mask->hdr.recv_ack ||
695 tcp_mask->hdr.data_off ||
696 tcp_mask->hdr.tcp_flags ||
697 tcp_mask->hdr.rx_win ||
698 tcp_mask->hdr.cksum ||
699 tcp_mask->hdr.tcp_urp) {
700 rte_flow_error_set(error, EINVAL,
701 RTE_FLOW_ERROR_TYPE_ITEM, item,
706 if (tcp_mask->hdr.src_port == UINT16_MAX) {
707 input_set |= IAVF_INSET_TCP_SRC_PORT;
708 VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(hdr, TCP, SRC_PORT);
710 if (tcp_mask->hdr.dst_port == UINT16_MAX) {
711 input_set |= IAVF_INSET_TCP_DST_PORT;
712 VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(hdr, TCP, DST_PORT);
715 if (l3 == RTE_FLOW_ITEM_TYPE_IPV4)
716 rte_memcpy(hdr->buffer,
718 sizeof(tcp_spec->hdr));
719 else if (l3 == RTE_FLOW_ITEM_TYPE_IPV6)
720 rte_memcpy(hdr->buffer,
722 sizeof(tcp_spec->hdr));
725 filter->add_fltr.rule_cfg.proto_hdrs.count = ++layer;
728 case RTE_FLOW_ITEM_TYPE_SCTP:
729 sctp_spec = item->spec;
730 sctp_mask = item->mask;
732 hdr = &filter->add_fltr.rule_cfg.proto_hdrs.proto_hdr[layer];
734 VIRTCHNL_SET_PROTO_HDR_TYPE(hdr, SCTP);
736 if (sctp_spec && sctp_mask) {
737 if (sctp_mask->hdr.cksum) {
738 rte_flow_error_set(error, EINVAL,
739 RTE_FLOW_ERROR_TYPE_ITEM, item,
744 if (sctp_mask->hdr.src_port == UINT16_MAX) {
745 input_set |= IAVF_INSET_SCTP_SRC_PORT;
746 VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(hdr, SCTP, SRC_PORT);
748 if (sctp_mask->hdr.dst_port == UINT16_MAX) {
749 input_set |= IAVF_INSET_SCTP_DST_PORT;
750 VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(hdr, SCTP, DST_PORT);
753 if (l3 == RTE_FLOW_ITEM_TYPE_IPV4)
754 rte_memcpy(hdr->buffer,
756 sizeof(sctp_spec->hdr));
757 else if (l3 == RTE_FLOW_ITEM_TYPE_IPV6)
758 rte_memcpy(hdr->buffer,
760 sizeof(sctp_spec->hdr));
763 filter->add_fltr.rule_cfg.proto_hdrs.count = ++layer;
766 case RTE_FLOW_ITEM_TYPE_GTPU:
767 gtp_spec = item->spec;
768 gtp_mask = item->mask;
770 hdr = &filter->add_fltr.rule_cfg.proto_hdrs.proto_hdr[layer];
772 VIRTCHNL_SET_PROTO_HDR_TYPE(hdr, GTPU_IP);
774 if (gtp_spec && gtp_mask) {
775 if (gtp_mask->v_pt_rsv_flags ||
776 gtp_mask->msg_type ||
778 rte_flow_error_set(error, EINVAL,
779 RTE_FLOW_ERROR_TYPE_ITEM,
780 item, "Invalid GTP mask");
784 if (gtp_mask->teid == UINT32_MAX) {
785 input_set |= IAVF_INSET_GTPU_TEID;
786 VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(hdr, GTPU_IP, TEID);
789 rte_memcpy(hdr->buffer,
790 gtp_spec, sizeof(*gtp_spec));
793 filter->add_fltr.rule_cfg.proto_hdrs.count = ++layer;
796 case RTE_FLOW_ITEM_TYPE_GTP_PSC:
797 gtp_psc_spec = item->spec;
798 gtp_psc_mask = item->mask;
800 hdr = &filter->add_fltr.rule_cfg.proto_hdrs.proto_hdr[layer];
802 VIRTCHNL_SET_PROTO_HDR_TYPE(hdr, GTPU_EH);
804 if (gtp_psc_spec && gtp_psc_mask) {
805 if (gtp_psc_mask->qfi == UINT8_MAX) {
806 input_set |= IAVF_INSET_GTPU_QFI;
807 VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(hdr, GTPU_EH, QFI);
810 rte_memcpy(hdr->buffer, gtp_psc_spec,
811 sizeof(*gtp_psc_spec));
814 filter->add_fltr.rule_cfg.proto_hdrs.count = ++layer;
817 case RTE_FLOW_ITEM_TYPE_L2TPV3OIP:
818 l2tpv3oip_spec = item->spec;
819 l2tpv3oip_mask = item->mask;
821 hdr = &filter->add_fltr.rule_cfg.proto_hdrs.proto_hdr[layer];
823 VIRTCHNL_SET_PROTO_HDR_TYPE(hdr, L2TPV3);
825 if (l2tpv3oip_spec && l2tpv3oip_mask) {
826 if (l2tpv3oip_mask->session_id == UINT32_MAX) {
827 input_set |= IAVF_L2TPV3OIP_SESSION_ID;
828 VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(hdr, L2TPV3, SESS_ID);
831 rte_memcpy(hdr->buffer, l2tpv3oip_spec,
832 sizeof(*l2tpv3oip_spec));
835 filter->add_fltr.rule_cfg.proto_hdrs.count = ++layer;
838 case RTE_FLOW_ITEM_TYPE_ESP:
839 esp_spec = item->spec;
840 esp_mask = item->mask;
842 hdr = &filter->add_fltr.rule_cfg.proto_hdrs.proto_hdr[layer];
844 VIRTCHNL_SET_PROTO_HDR_TYPE(hdr, ESP);
846 if (esp_spec && esp_mask) {
847 if (esp_mask->hdr.spi == UINT32_MAX) {
848 input_set |= IAVF_INSET_ESP_SPI;
849 VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(hdr, ESP, SPI);
852 rte_memcpy(hdr->buffer, &esp_spec->hdr,
853 sizeof(esp_spec->hdr));
856 filter->add_fltr.rule_cfg.proto_hdrs.count = ++layer;
859 case RTE_FLOW_ITEM_TYPE_AH:
860 ah_spec = item->spec;
861 ah_mask = item->mask;
863 hdr = &filter->add_fltr.rule_cfg.proto_hdrs.proto_hdr[layer];
865 VIRTCHNL_SET_PROTO_HDR_TYPE(hdr, AH);
867 if (ah_spec && ah_mask) {
868 if (ah_mask->spi == UINT32_MAX) {
869 input_set |= IAVF_INSET_AH_SPI;
870 VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(hdr, AH, SPI);
873 rte_memcpy(hdr->buffer, ah_spec,
877 filter->add_fltr.rule_cfg.proto_hdrs.count = ++layer;
880 case RTE_FLOW_ITEM_TYPE_PFCP:
881 pfcp_spec = item->spec;
882 pfcp_mask = item->mask;
884 hdr = &filter->add_fltr.rule_cfg.proto_hdrs.proto_hdr[layer];
886 VIRTCHNL_SET_PROTO_HDR_TYPE(hdr, PFCP);
888 if (pfcp_spec && pfcp_mask) {
889 if (pfcp_mask->s_field == UINT8_MAX) {
890 input_set |= IAVF_INSET_PFCP_S_FIELD;
891 VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(hdr, PFCP, S_FIELD);
894 rte_memcpy(hdr->buffer, pfcp_spec,
898 filter->add_fltr.rule_cfg.proto_hdrs.count = ++layer;
901 case RTE_FLOW_ITEM_TYPE_VOID:
905 rte_flow_error_set(error, EINVAL,
906 RTE_FLOW_ERROR_TYPE_ITEM, item,
907 "Invalid pattern item.");
912 if (layer > VIRTCHNL_MAX_NUM_PROTO_HDRS) {
913 rte_flow_error_set(error, EINVAL,
914 RTE_FLOW_ERROR_TYPE_ITEM, item,
915 "Protocol header layers exceed the maximum value");
919 filter->input_set = input_set;
925 iavf_fdir_parse(struct iavf_adapter *ad,
926 struct iavf_pattern_match_item *array,
928 const struct rte_flow_item pattern[],
929 const struct rte_flow_action actions[],
931 struct rte_flow_error *error)
933 struct iavf_info *vf = IAVF_DEV_PRIVATE_TO_VF(ad);
934 struct iavf_fdir_conf *filter = &vf->fdir.conf;
935 struct iavf_pattern_match_item *item = NULL;
939 memset(filter, 0, sizeof(*filter));
941 item = iavf_search_pattern_match_item(pattern, array, array_len, error);
945 ret = iavf_fdir_parse_pattern(ad, pattern, error, filter);
949 input_set = filter->input_set;
950 if (!input_set || input_set & ~item->input_set_mask) {
951 rte_flow_error_set(error, EINVAL,
952 RTE_FLOW_ERROR_TYPE_ITEM_SPEC, pattern,
953 "Invalid input set");
958 ret = iavf_fdir_parse_action(ad, actions, error, filter);
970 static struct iavf_flow_parser iavf_fdir_parser = {
971 .engine = &iavf_fdir_engine,
972 .array = iavf_fdir_pattern,
973 .array_len = RTE_DIM(iavf_fdir_pattern),
974 .parse_pattern_action = iavf_fdir_parse,
975 .stage = IAVF_FLOW_STAGE_DISTRIBUTOR,
978 RTE_INIT(iavf_fdir_engine_register)
980 iavf_register_flow_engine(&iavf_fdir_engine);