1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2020 Intel Corporation
13 #include <rte_ether.h>
14 #include <rte_ethdev_driver.h>
15 #include <rte_malloc.h>
16 #include <rte_tailq.h>
19 #include "iavf_generic_flow.h"
21 #include "iavf_rxtx.h"
23 #define IAVF_FDIR_MAX_QREGION_SIZE 128
25 #define IAVF_FDIR_IPV6_TC_OFFSET 20
26 #define IAVF_IPV6_TC_MASK (0xFF << IAVF_FDIR_IPV6_TC_OFFSET)
28 #define IAVF_FDIR_INSET_ETH (\
31 #define IAVF_FDIR_INSET_ETH_IPV4 (\
32 IAVF_INSET_IPV4_SRC | IAVF_INSET_IPV4_DST | \
33 IAVF_INSET_IPV4_PROTO | IAVF_INSET_IPV4_TOS | \
36 #define IAVF_FDIR_INSET_ETH_IPV4_UDP (\
37 IAVF_INSET_IPV4_SRC | IAVF_INSET_IPV4_DST | \
38 IAVF_INSET_IPV4_TOS | IAVF_INSET_IPV4_TTL | \
39 IAVF_INSET_UDP_SRC_PORT | IAVF_INSET_UDP_DST_PORT)
41 #define IAVF_FDIR_INSET_ETH_IPV4_TCP (\
42 IAVF_INSET_IPV4_SRC | IAVF_INSET_IPV4_DST | \
43 IAVF_INSET_IPV4_TOS | IAVF_INSET_IPV4_TTL | \
44 IAVF_INSET_TCP_SRC_PORT | IAVF_INSET_TCP_DST_PORT)
46 #define IAVF_FDIR_INSET_ETH_IPV4_SCTP (\
47 IAVF_INSET_IPV4_SRC | IAVF_INSET_IPV4_DST | \
48 IAVF_INSET_IPV4_TOS | IAVF_INSET_IPV4_TTL | \
49 IAVF_INSET_SCTP_SRC_PORT | IAVF_INSET_SCTP_DST_PORT)
51 #define IAVF_FDIR_INSET_ETH_IPV6 (\
52 IAVF_INSET_IPV6_SRC | IAVF_INSET_IPV6_DST | \
53 IAVF_INSET_IPV6_NEXT_HDR | IAVF_INSET_IPV6_TC | \
54 IAVF_INSET_IPV6_HOP_LIMIT)
56 #define IAVF_FDIR_INSET_ETH_IPV6_UDP (\
57 IAVF_INSET_IPV6_SRC | IAVF_INSET_IPV6_DST | \
58 IAVF_INSET_IPV6_TC | IAVF_INSET_IPV6_HOP_LIMIT | \
59 IAVF_INSET_UDP_SRC_PORT | IAVF_INSET_UDP_DST_PORT)
61 #define IAVF_FDIR_INSET_ETH_IPV6_TCP (\
62 IAVF_INSET_IPV6_SRC | IAVF_INSET_IPV6_DST | \
63 IAVF_INSET_IPV6_TC | IAVF_INSET_IPV6_HOP_LIMIT | \
64 IAVF_INSET_TCP_SRC_PORT | IAVF_INSET_TCP_DST_PORT)
66 #define IAVF_FDIR_INSET_ETH_IPV6_SCTP (\
67 IAVF_INSET_IPV6_SRC | IAVF_INSET_IPV6_DST | \
68 IAVF_INSET_IPV6_TC | IAVF_INSET_IPV6_HOP_LIMIT | \
69 IAVF_INSET_SCTP_SRC_PORT | IAVF_INSET_SCTP_DST_PORT)
71 #define IAVF_FDIR_INSET_IPV4_GTPU (\
72 IAVF_INSET_IPV4_SRC | IAVF_INSET_IPV4_DST | \
75 #define IAVF_FDIR_INSET_IPV4_GTPU_EH (\
76 IAVF_INSET_IPV4_SRC | IAVF_INSET_IPV4_DST | \
77 IAVF_INSET_GTPU_TEID | IAVF_INSET_GTPU_QFI)
79 #define IAVF_FDIR_INSET_IPV6_GTPU (\
80 IAVF_INSET_IPV6_SRC | IAVF_INSET_IPV6_DST | \
83 #define IAVF_FDIR_INSET_IPV6_GTPU_EH (\
84 IAVF_INSET_IPV6_SRC | IAVF_INSET_IPV6_DST | \
85 IAVF_INSET_GTPU_TEID | IAVF_INSET_GTPU_QFI)
87 #define IAVF_FDIR_INSET_L2TPV3OIP (\
88 IAVF_L2TPV3OIP_SESSION_ID)
90 #define IAVF_FDIR_INSET_ESP (\
93 #define IAVF_FDIR_INSET_AH (\
96 #define IAVF_FDIR_INSET_IPV4_NATT_ESP (\
97 IAVF_INSET_IPV4_SRC | IAVF_INSET_IPV4_DST | \
100 #define IAVF_FDIR_INSET_IPV6_NATT_ESP (\
101 IAVF_INSET_IPV6_SRC | IAVF_INSET_IPV6_DST | \
104 #define IAVF_FDIR_INSET_PFCP (\
105 IAVF_INSET_PFCP_S_FIELD)
107 static struct iavf_pattern_match_item iavf_fdir_pattern[] = {
108 {iavf_pattern_ethertype, IAVF_FDIR_INSET_ETH, IAVF_INSET_NONE},
109 {iavf_pattern_eth_ipv4, IAVF_FDIR_INSET_ETH_IPV4, IAVF_INSET_NONE},
110 {iavf_pattern_eth_ipv4_udp, IAVF_FDIR_INSET_ETH_IPV4_UDP, IAVF_INSET_NONE},
111 {iavf_pattern_eth_ipv4_tcp, IAVF_FDIR_INSET_ETH_IPV4_TCP, IAVF_INSET_NONE},
112 {iavf_pattern_eth_ipv4_sctp, IAVF_FDIR_INSET_ETH_IPV4_SCTP, IAVF_INSET_NONE},
113 {iavf_pattern_eth_ipv6, IAVF_FDIR_INSET_ETH_IPV6, IAVF_INSET_NONE},
114 {iavf_pattern_eth_ipv6_udp, IAVF_FDIR_INSET_ETH_IPV6_UDP, IAVF_INSET_NONE},
115 {iavf_pattern_eth_ipv6_tcp, IAVF_FDIR_INSET_ETH_IPV6_TCP, IAVF_INSET_NONE},
116 {iavf_pattern_eth_ipv6_sctp, IAVF_FDIR_INSET_ETH_IPV6_SCTP, IAVF_INSET_NONE},
117 {iavf_pattern_eth_ipv4_gtpu, IAVF_FDIR_INSET_IPV4_GTPU, IAVF_INSET_NONE},
118 {iavf_pattern_eth_ipv4_gtpu_eh, IAVF_FDIR_INSET_IPV4_GTPU_EH, IAVF_INSET_NONE},
119 {iavf_pattern_eth_ipv6_gtpu, IAVF_FDIR_INSET_IPV6_GTPU, IAVF_INSET_NONE},
120 {iavf_pattern_eth_ipv6_gtpu_eh, IAVF_FDIR_INSET_IPV6_GTPU_EH, IAVF_INSET_NONE},
121 {iavf_pattern_eth_ipv4_l2tpv3, IAVF_FDIR_INSET_L2TPV3OIP, IAVF_INSET_NONE},
122 {iavf_pattern_eth_ipv6_l2tpv3, IAVF_FDIR_INSET_L2TPV3OIP, IAVF_INSET_NONE},
123 {iavf_pattern_eth_ipv4_esp, IAVF_FDIR_INSET_ESP, IAVF_INSET_NONE},
124 {iavf_pattern_eth_ipv6_esp, IAVF_FDIR_INSET_ESP, IAVF_INSET_NONE},
125 {iavf_pattern_eth_ipv4_ah, IAVF_FDIR_INSET_AH, IAVF_INSET_NONE},
126 {iavf_pattern_eth_ipv6_ah, IAVF_FDIR_INSET_AH, IAVF_INSET_NONE},
127 {iavf_pattern_eth_ipv4_udp_esp, IAVF_FDIR_INSET_IPV4_NATT_ESP, IAVF_INSET_NONE},
128 {iavf_pattern_eth_ipv6_udp_esp, IAVF_FDIR_INSET_IPV6_NATT_ESP, IAVF_INSET_NONE},
129 {iavf_pattern_eth_ipv4_pfcp, IAVF_FDIR_INSET_PFCP, IAVF_INSET_NONE},
130 {iavf_pattern_eth_ipv6_pfcp, IAVF_FDIR_INSET_PFCP, IAVF_INSET_NONE},
133 static struct iavf_flow_parser iavf_fdir_parser;
136 iavf_fdir_init(struct iavf_adapter *ad)
138 struct iavf_info *vf = IAVF_DEV_PRIVATE_TO_VF(ad);
139 struct iavf_flow_parser *parser;
144 if (vf->vf_res->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_FDIR_PF)
145 parser = &iavf_fdir_parser;
149 return iavf_register_parser(parser, ad);
153 iavf_fdir_uninit(struct iavf_adapter *ad)
155 iavf_unregister_parser(&iavf_fdir_parser, ad);
159 iavf_fdir_create(struct iavf_adapter *ad,
160 struct rte_flow *flow,
162 struct rte_flow_error *error)
164 struct iavf_fdir_conf *filter = meta;
165 struct iavf_fdir_conf *rule;
168 rule = rte_zmalloc("fdir_entry", sizeof(*rule), 0);
170 rte_flow_error_set(error, ENOMEM,
171 RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
172 "Failed to allocate memory for fdir rule");
176 ret = iavf_fdir_add(ad, filter);
178 rte_flow_error_set(error, -ret,
179 RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
180 "Failed to add filter rule.");
184 if (filter->mark_flag == 1)
185 iavf_fdir_rx_proc_enable(ad, 1);
187 rte_memcpy(rule, filter, sizeof(*rule));
198 iavf_fdir_destroy(struct iavf_adapter *ad,
199 struct rte_flow *flow,
200 struct rte_flow_error *error)
202 struct iavf_fdir_conf *filter;
205 filter = (struct iavf_fdir_conf *)flow->rule;
207 ret = iavf_fdir_del(ad, filter);
209 rte_flow_error_set(error, -ret,
210 RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
211 "Failed to delete filter rule.");
215 if (filter->mark_flag == 1)
216 iavf_fdir_rx_proc_enable(ad, 0);
225 iavf_fdir_validation(struct iavf_adapter *ad,
226 __rte_unused struct rte_flow *flow,
228 struct rte_flow_error *error)
230 struct iavf_fdir_conf *filter = meta;
233 ret = iavf_fdir_check(ad, filter);
235 rte_flow_error_set(error, -ret,
236 RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
237 "Failed to validate filter rule.");
244 static struct iavf_flow_engine iavf_fdir_engine = {
245 .init = iavf_fdir_init,
246 .uninit = iavf_fdir_uninit,
247 .create = iavf_fdir_create,
248 .destroy = iavf_fdir_destroy,
249 .validation = iavf_fdir_validation,
250 .type = IAVF_FLOW_ENGINE_FDIR,
254 iavf_fdir_parse_action_qregion(struct iavf_adapter *ad,
255 struct rte_flow_error *error,
256 const struct rte_flow_action *act,
257 struct virtchnl_filter_action *filter_action)
259 struct iavf_info *vf = IAVF_DEV_PRIVATE_TO_VF(ad);
260 const struct rte_flow_action_rss *rss = act->conf;
263 if (act->type != RTE_FLOW_ACTION_TYPE_RSS) {
264 rte_flow_error_set(error, EINVAL,
265 RTE_FLOW_ERROR_TYPE_ACTION, act,
270 if (rss->queue_num <= 1) {
271 rte_flow_error_set(error, EINVAL,
272 RTE_FLOW_ERROR_TYPE_ACTION, act,
273 "Queue region size can't be 0 or 1.");
277 /* check if queue index for queue region is continuous */
278 for (i = 0; i < rss->queue_num - 1; i++) {
279 if (rss->queue[i + 1] != rss->queue[i] + 1) {
280 rte_flow_error_set(error, EINVAL,
281 RTE_FLOW_ERROR_TYPE_ACTION, act,
282 "Discontinuous queue region");
287 if (rss->queue[rss->queue_num - 1] >= ad->eth_dev->data->nb_rx_queues) {
288 rte_flow_error_set(error, EINVAL,
289 RTE_FLOW_ERROR_TYPE_ACTION, act,
290 "Invalid queue region indexes.");
294 if (!(rte_is_power_of_2(rss->queue_num) &&
295 rss->queue_num <= IAVF_FDIR_MAX_QREGION_SIZE)) {
296 rte_flow_error_set(error, EINVAL,
297 RTE_FLOW_ERROR_TYPE_ACTION, act,
298 "The region size should be any of the following values:"
299 "1, 2, 4, 8, 16, 32, 64, 128 as long as the total number "
300 "of queues do not exceed the VSI allocation.");
304 if (rss->queue_num > vf->max_rss_qregion) {
305 rte_flow_error_set(error, EINVAL,
306 RTE_FLOW_ERROR_TYPE_ACTION, act,
307 "The region size cannot be large than the supported max RSS queue region");
311 filter_action->act_conf.queue.index = rss->queue[0];
312 filter_action->act_conf.queue.region = rte_fls_u32(rss->queue_num) - 1;
318 iavf_fdir_parse_action(struct iavf_adapter *ad,
319 const struct rte_flow_action actions[],
320 struct rte_flow_error *error,
321 struct iavf_fdir_conf *filter)
323 const struct rte_flow_action_queue *act_q;
324 const struct rte_flow_action_mark *mark_spec = NULL;
325 uint32_t dest_num = 0;
326 uint32_t mark_num = 0;
330 struct virtchnl_filter_action *filter_action;
332 for (; actions->type != RTE_FLOW_ACTION_TYPE_END; actions++) {
333 switch (actions->type) {
334 case RTE_FLOW_ACTION_TYPE_VOID:
337 case RTE_FLOW_ACTION_TYPE_PASSTHRU:
340 filter_action = &filter->add_fltr.rule_cfg.action_set.actions[number];
342 filter_action->type = VIRTCHNL_ACTION_PASSTHRU;
344 filter->add_fltr.rule_cfg.action_set.count = ++number;
347 case RTE_FLOW_ACTION_TYPE_DROP:
350 filter_action = &filter->add_fltr.rule_cfg.action_set.actions[number];
352 filter_action->type = VIRTCHNL_ACTION_DROP;
354 filter->add_fltr.rule_cfg.action_set.count = ++number;
357 case RTE_FLOW_ACTION_TYPE_QUEUE:
360 act_q = actions->conf;
361 filter_action = &filter->add_fltr.rule_cfg.action_set.actions[number];
363 filter_action->type = VIRTCHNL_ACTION_QUEUE;
364 filter_action->act_conf.queue.index = act_q->index;
366 if (filter_action->act_conf.queue.index >=
367 ad->eth_dev->data->nb_rx_queues) {
368 rte_flow_error_set(error, EINVAL,
369 RTE_FLOW_ERROR_TYPE_ACTION,
370 actions, "Invalid queue for FDIR.");
374 filter->add_fltr.rule_cfg.action_set.count = ++number;
377 case RTE_FLOW_ACTION_TYPE_RSS:
380 filter_action = &filter->add_fltr.rule_cfg.action_set.actions[number];
382 filter_action->type = VIRTCHNL_ACTION_Q_REGION;
384 ret = iavf_fdir_parse_action_qregion(ad,
385 error, actions, filter_action);
389 filter->add_fltr.rule_cfg.action_set.count = ++number;
392 case RTE_FLOW_ACTION_TYPE_MARK:
395 filter->mark_flag = 1;
396 mark_spec = actions->conf;
397 filter_action = &filter->add_fltr.rule_cfg.action_set.actions[number];
399 filter_action->type = VIRTCHNL_ACTION_MARK;
400 filter_action->act_conf.mark_id = mark_spec->id;
402 filter->add_fltr.rule_cfg.action_set.count = ++number;
406 rte_flow_error_set(error, EINVAL,
407 RTE_FLOW_ERROR_TYPE_ACTION, actions,
413 if (number > VIRTCHNL_MAX_NUM_ACTIONS) {
414 rte_flow_error_set(error, EINVAL,
415 RTE_FLOW_ERROR_TYPE_ACTION, actions,
416 "Action numbers exceed the maximum value");
421 rte_flow_error_set(error, EINVAL,
422 RTE_FLOW_ERROR_TYPE_ACTION, actions,
423 "Unsupported action combination");
428 rte_flow_error_set(error, EINVAL,
429 RTE_FLOW_ERROR_TYPE_ACTION, actions,
430 "Too many mark actions");
434 if (dest_num + mark_num == 0) {
435 rte_flow_error_set(error, EINVAL,
436 RTE_FLOW_ERROR_TYPE_ACTION, actions,
441 /* Mark only is equal to mark + passthru. */
443 filter_action = &filter->add_fltr.rule_cfg.action_set.actions[number];
444 filter_action->type = VIRTCHNL_ACTION_PASSTHRU;
445 filter->add_fltr.rule_cfg.action_set.count = ++number;
452 iavf_fdir_parse_pattern(__rte_unused struct iavf_adapter *ad,
453 const struct rte_flow_item pattern[],
454 struct rte_flow_error *error,
455 struct iavf_fdir_conf *filter)
457 const struct rte_flow_item *item = pattern;
458 enum rte_flow_item_type item_type;
459 enum rte_flow_item_type l3 = RTE_FLOW_ITEM_TYPE_END;
460 const struct rte_flow_item_eth *eth_spec, *eth_mask;
461 const struct rte_flow_item_ipv4 *ipv4_spec, *ipv4_mask;
462 const struct rte_flow_item_ipv6 *ipv6_spec, *ipv6_mask;
463 const struct rte_flow_item_udp *udp_spec, *udp_mask;
464 const struct rte_flow_item_tcp *tcp_spec, *tcp_mask;
465 const struct rte_flow_item_sctp *sctp_spec, *sctp_mask;
466 const struct rte_flow_item_gtp *gtp_spec, *gtp_mask;
467 const struct rte_flow_item_gtp_psc *gtp_psc_spec, *gtp_psc_mask;
468 const struct rte_flow_item_l2tpv3oip *l2tpv3oip_spec, *l2tpv3oip_mask;
469 const struct rte_flow_item_esp *esp_spec, *esp_mask;
470 const struct rte_flow_item_ah *ah_spec, *ah_mask;
471 const struct rte_flow_item_pfcp *pfcp_spec, *pfcp_mask;
472 uint64_t input_set = IAVF_INSET_NONE;
474 enum rte_flow_item_type next_type;
478 struct virtchnl_proto_hdr *hdr;
480 uint8_t ipv6_addr_mask[16] = {
481 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
482 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF
485 for (item = pattern; item->type != RTE_FLOW_ITEM_TYPE_END; item++) {
487 rte_flow_error_set(error, EINVAL,
488 RTE_FLOW_ERROR_TYPE_ITEM, item,
489 "Not support range");
492 item_type = item->type;
495 case RTE_FLOW_ITEM_TYPE_ETH:
496 eth_spec = item->spec;
497 eth_mask = item->mask;
498 next_type = (item + 1)->type;
500 hdr = &filter->add_fltr.rule_cfg.proto_hdrs.proto_hdr[layer];
502 VIRTCHNL_SET_PROTO_HDR_TYPE(hdr, ETH);
504 if (next_type == RTE_FLOW_ITEM_TYPE_END &&
505 (!eth_spec || !eth_mask)) {
506 rte_flow_error_set(error, EINVAL,
507 RTE_FLOW_ERROR_TYPE_ITEM,
508 item, "NULL eth spec/mask.");
512 if (eth_spec && eth_mask) {
513 if (!rte_is_zero_ether_addr(ð_mask->src) ||
514 !rte_is_zero_ether_addr(ð_mask->dst)) {
515 rte_flow_error_set(error, EINVAL,
516 RTE_FLOW_ERROR_TYPE_ITEM, item,
517 "Invalid MAC_addr mask.");
522 if (eth_spec && eth_mask && eth_mask->type) {
523 if (eth_mask->type != RTE_BE16(0xffff)) {
524 rte_flow_error_set(error, EINVAL,
525 RTE_FLOW_ERROR_TYPE_ITEM,
526 item, "Invalid type mask.");
530 ether_type = rte_be_to_cpu_16(eth_spec->type);
531 if (ether_type == RTE_ETHER_TYPE_IPV4 ||
532 ether_type == RTE_ETHER_TYPE_IPV6) {
533 rte_flow_error_set(error, EINVAL,
534 RTE_FLOW_ERROR_TYPE_ITEM,
536 "Unsupported ether_type.");
540 input_set |= IAVF_INSET_ETHERTYPE;
541 VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(hdr, ETH, ETHERTYPE);
543 rte_memcpy(hdr->buffer,
544 eth_spec, sizeof(*eth_spec));
547 filter->add_fltr.rule_cfg.proto_hdrs.count = ++layer;
550 case RTE_FLOW_ITEM_TYPE_IPV4:
551 l3 = RTE_FLOW_ITEM_TYPE_IPV4;
552 ipv4_spec = item->spec;
553 ipv4_mask = item->mask;
555 hdr = &filter->add_fltr.rule_cfg.proto_hdrs.proto_hdr[layer];
557 VIRTCHNL_SET_PROTO_HDR_TYPE(hdr, IPV4);
559 if (ipv4_spec && ipv4_mask) {
560 if (ipv4_mask->hdr.version_ihl ||
561 ipv4_mask->hdr.total_length ||
562 ipv4_mask->hdr.packet_id ||
563 ipv4_mask->hdr.fragment_offset ||
564 ipv4_mask->hdr.hdr_checksum) {
565 rte_flow_error_set(error, EINVAL,
566 RTE_FLOW_ERROR_TYPE_ITEM,
567 item, "Invalid IPv4 mask.");
571 if (ipv4_mask->hdr.type_of_service ==
573 input_set |= IAVF_INSET_IPV4_TOS;
574 VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(hdr, IPV4, DSCP);
576 if (ipv4_mask->hdr.next_proto_id == UINT8_MAX) {
577 input_set |= IAVF_INSET_IPV4_PROTO;
578 VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(hdr, IPV4, PROT);
580 if (ipv4_mask->hdr.time_to_live == UINT8_MAX) {
581 input_set |= IAVF_INSET_IPV4_TTL;
582 VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(hdr, IPV4, TTL);
584 if (ipv4_mask->hdr.src_addr == UINT32_MAX) {
585 input_set |= IAVF_INSET_IPV4_SRC;
586 VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(hdr, IPV4, SRC);
588 if (ipv4_mask->hdr.dst_addr == UINT32_MAX) {
589 input_set |= IAVF_INSET_IPV4_DST;
590 VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(hdr, IPV4, DST);
593 rte_memcpy(hdr->buffer,
595 sizeof(ipv4_spec->hdr));
598 filter->add_fltr.rule_cfg.proto_hdrs.count = ++layer;
601 case RTE_FLOW_ITEM_TYPE_IPV6:
602 l3 = RTE_FLOW_ITEM_TYPE_IPV6;
603 ipv6_spec = item->spec;
604 ipv6_mask = item->mask;
606 hdr = &filter->add_fltr.rule_cfg.proto_hdrs.proto_hdr[layer];
608 VIRTCHNL_SET_PROTO_HDR_TYPE(hdr, IPV6);
610 if (ipv6_spec && ipv6_mask) {
611 if (ipv6_mask->hdr.payload_len) {
612 rte_flow_error_set(error, EINVAL,
613 RTE_FLOW_ERROR_TYPE_ITEM,
614 item, "Invalid IPv6 mask");
618 if ((ipv6_mask->hdr.vtc_flow &
619 rte_cpu_to_be_32(IAVF_IPV6_TC_MASK))
620 == rte_cpu_to_be_32(IAVF_IPV6_TC_MASK)) {
621 input_set |= IAVF_INSET_IPV6_TC;
622 VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(hdr, IPV6, TC);
624 if (ipv6_mask->hdr.proto == UINT8_MAX) {
625 input_set |= IAVF_INSET_IPV6_NEXT_HDR;
626 VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(hdr, IPV6, PROT);
628 if (ipv6_mask->hdr.hop_limits == UINT8_MAX) {
629 input_set |= IAVF_INSET_IPV6_HOP_LIMIT;
630 VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(hdr, IPV6, HOP_LIMIT);
632 if (!memcmp(ipv6_mask->hdr.src_addr,
634 RTE_DIM(ipv6_mask->hdr.src_addr))) {
635 input_set |= IAVF_INSET_IPV6_SRC;
636 VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(hdr, IPV6, SRC);
638 if (!memcmp(ipv6_mask->hdr.dst_addr,
640 RTE_DIM(ipv6_mask->hdr.dst_addr))) {
641 input_set |= IAVF_INSET_IPV6_DST;
642 VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(hdr, IPV6, DST);
645 rte_memcpy(hdr->buffer,
647 sizeof(ipv6_spec->hdr));
650 filter->add_fltr.rule_cfg.proto_hdrs.count = ++layer;
653 case RTE_FLOW_ITEM_TYPE_UDP:
654 udp_spec = item->spec;
655 udp_mask = item->mask;
657 hdr = &filter->add_fltr.rule_cfg.proto_hdrs.proto_hdr[layer];
659 VIRTCHNL_SET_PROTO_HDR_TYPE(hdr, UDP);
661 if (udp_spec && udp_mask) {
662 if (udp_mask->hdr.dgram_len ||
663 udp_mask->hdr.dgram_cksum) {
664 rte_flow_error_set(error, EINVAL,
665 RTE_FLOW_ERROR_TYPE_ITEM, item,
670 if (udp_mask->hdr.src_port == UINT16_MAX) {
671 input_set |= IAVF_INSET_UDP_SRC_PORT;
672 VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(hdr, UDP, SRC_PORT);
674 if (udp_mask->hdr.dst_port == UINT16_MAX) {
675 input_set |= IAVF_INSET_UDP_DST_PORT;
676 VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(hdr, UDP, DST_PORT);
679 if (l3 == RTE_FLOW_ITEM_TYPE_IPV4)
680 rte_memcpy(hdr->buffer,
682 sizeof(udp_spec->hdr));
683 else if (l3 == RTE_FLOW_ITEM_TYPE_IPV6)
684 rte_memcpy(hdr->buffer,
686 sizeof(udp_spec->hdr));
689 filter->add_fltr.rule_cfg.proto_hdrs.count = ++layer;
692 case RTE_FLOW_ITEM_TYPE_TCP:
693 tcp_spec = item->spec;
694 tcp_mask = item->mask;
696 hdr = &filter->add_fltr.rule_cfg.proto_hdrs.proto_hdr[layer];
698 VIRTCHNL_SET_PROTO_HDR_TYPE(hdr, TCP);
700 if (tcp_spec && tcp_mask) {
701 if (tcp_mask->hdr.sent_seq ||
702 tcp_mask->hdr.recv_ack ||
703 tcp_mask->hdr.data_off ||
704 tcp_mask->hdr.tcp_flags ||
705 tcp_mask->hdr.rx_win ||
706 tcp_mask->hdr.cksum ||
707 tcp_mask->hdr.tcp_urp) {
708 rte_flow_error_set(error, EINVAL,
709 RTE_FLOW_ERROR_TYPE_ITEM, item,
714 if (tcp_mask->hdr.src_port == UINT16_MAX) {
715 input_set |= IAVF_INSET_TCP_SRC_PORT;
716 VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(hdr, TCP, SRC_PORT);
718 if (tcp_mask->hdr.dst_port == UINT16_MAX) {
719 input_set |= IAVF_INSET_TCP_DST_PORT;
720 VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(hdr, TCP, DST_PORT);
723 if (l3 == RTE_FLOW_ITEM_TYPE_IPV4)
724 rte_memcpy(hdr->buffer,
726 sizeof(tcp_spec->hdr));
727 else if (l3 == RTE_FLOW_ITEM_TYPE_IPV6)
728 rte_memcpy(hdr->buffer,
730 sizeof(tcp_spec->hdr));
733 filter->add_fltr.rule_cfg.proto_hdrs.count = ++layer;
736 case RTE_FLOW_ITEM_TYPE_SCTP:
737 sctp_spec = item->spec;
738 sctp_mask = item->mask;
740 hdr = &filter->add_fltr.rule_cfg.proto_hdrs.proto_hdr[layer];
742 VIRTCHNL_SET_PROTO_HDR_TYPE(hdr, SCTP);
744 if (sctp_spec && sctp_mask) {
745 if (sctp_mask->hdr.cksum) {
746 rte_flow_error_set(error, EINVAL,
747 RTE_FLOW_ERROR_TYPE_ITEM, item,
752 if (sctp_mask->hdr.src_port == UINT16_MAX) {
753 input_set |= IAVF_INSET_SCTP_SRC_PORT;
754 VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(hdr, SCTP, SRC_PORT);
756 if (sctp_mask->hdr.dst_port == UINT16_MAX) {
757 input_set |= IAVF_INSET_SCTP_DST_PORT;
758 VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(hdr, SCTP, DST_PORT);
761 if (l3 == RTE_FLOW_ITEM_TYPE_IPV4)
762 rte_memcpy(hdr->buffer,
764 sizeof(sctp_spec->hdr));
765 else if (l3 == RTE_FLOW_ITEM_TYPE_IPV6)
766 rte_memcpy(hdr->buffer,
768 sizeof(sctp_spec->hdr));
771 filter->add_fltr.rule_cfg.proto_hdrs.count = ++layer;
774 case RTE_FLOW_ITEM_TYPE_GTPU:
775 gtp_spec = item->spec;
776 gtp_mask = item->mask;
778 hdr = &filter->add_fltr.rule_cfg.proto_hdrs.proto_hdr[layer];
780 VIRTCHNL_SET_PROTO_HDR_TYPE(hdr, GTPU_IP);
782 if (gtp_spec && gtp_mask) {
783 if (gtp_mask->v_pt_rsv_flags ||
784 gtp_mask->msg_type ||
786 rte_flow_error_set(error, EINVAL,
787 RTE_FLOW_ERROR_TYPE_ITEM,
788 item, "Invalid GTP mask");
792 if (gtp_mask->teid == UINT32_MAX) {
793 input_set |= IAVF_INSET_GTPU_TEID;
794 VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(hdr, GTPU_IP, TEID);
797 rte_memcpy(hdr->buffer,
798 gtp_spec, sizeof(*gtp_spec));
801 filter->add_fltr.rule_cfg.proto_hdrs.count = ++layer;
804 case RTE_FLOW_ITEM_TYPE_GTP_PSC:
805 gtp_psc_spec = item->spec;
806 gtp_psc_mask = item->mask;
808 hdr = &filter->add_fltr.rule_cfg.proto_hdrs.proto_hdr[layer];
810 VIRTCHNL_SET_PROTO_HDR_TYPE(hdr, GTPU_EH);
812 if (gtp_psc_spec && gtp_psc_mask) {
813 if (gtp_psc_mask->qfi == UINT8_MAX) {
814 input_set |= IAVF_INSET_GTPU_QFI;
815 VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(hdr, GTPU_EH, QFI);
818 rte_memcpy(hdr->buffer, gtp_psc_spec,
819 sizeof(*gtp_psc_spec));
822 filter->add_fltr.rule_cfg.proto_hdrs.count = ++layer;
825 case RTE_FLOW_ITEM_TYPE_L2TPV3OIP:
826 l2tpv3oip_spec = item->spec;
827 l2tpv3oip_mask = item->mask;
829 hdr = &filter->add_fltr.rule_cfg.proto_hdrs.proto_hdr[layer];
831 VIRTCHNL_SET_PROTO_HDR_TYPE(hdr, L2TPV3);
833 if (l2tpv3oip_spec && l2tpv3oip_mask) {
834 if (l2tpv3oip_mask->session_id == UINT32_MAX) {
835 input_set |= IAVF_L2TPV3OIP_SESSION_ID;
836 VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(hdr, L2TPV3, SESS_ID);
839 rte_memcpy(hdr->buffer, l2tpv3oip_spec,
840 sizeof(*l2tpv3oip_spec));
843 filter->add_fltr.rule_cfg.proto_hdrs.count = ++layer;
846 case RTE_FLOW_ITEM_TYPE_ESP:
847 esp_spec = item->spec;
848 esp_mask = item->mask;
850 hdr = &filter->add_fltr.rule_cfg.proto_hdrs.proto_hdr[layer];
852 VIRTCHNL_SET_PROTO_HDR_TYPE(hdr, ESP);
854 if (esp_spec && esp_mask) {
855 if (esp_mask->hdr.spi == UINT32_MAX) {
856 input_set |= IAVF_INSET_ESP_SPI;
857 VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(hdr, ESP, SPI);
860 rte_memcpy(hdr->buffer, &esp_spec->hdr,
861 sizeof(esp_spec->hdr));
864 filter->add_fltr.rule_cfg.proto_hdrs.count = ++layer;
867 case RTE_FLOW_ITEM_TYPE_AH:
868 ah_spec = item->spec;
869 ah_mask = item->mask;
871 hdr = &filter->add_fltr.rule_cfg.proto_hdrs.proto_hdr[layer];
873 VIRTCHNL_SET_PROTO_HDR_TYPE(hdr, AH);
875 if (ah_spec && ah_mask) {
876 if (ah_mask->spi == UINT32_MAX) {
877 input_set |= IAVF_INSET_AH_SPI;
878 VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(hdr, AH, SPI);
881 rte_memcpy(hdr->buffer, ah_spec,
885 filter->add_fltr.rule_cfg.proto_hdrs.count = ++layer;
888 case RTE_FLOW_ITEM_TYPE_PFCP:
889 pfcp_spec = item->spec;
890 pfcp_mask = item->mask;
892 hdr = &filter->add_fltr.rule_cfg.proto_hdrs.proto_hdr[layer];
894 VIRTCHNL_SET_PROTO_HDR_TYPE(hdr, PFCP);
896 if (pfcp_spec && pfcp_mask) {
897 if (pfcp_mask->s_field == UINT8_MAX) {
898 input_set |= IAVF_INSET_PFCP_S_FIELD;
899 VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(hdr, PFCP, S_FIELD);
902 rte_memcpy(hdr->buffer, pfcp_spec,
906 filter->add_fltr.rule_cfg.proto_hdrs.count = ++layer;
909 case RTE_FLOW_ITEM_TYPE_VOID:
913 rte_flow_error_set(error, EINVAL,
914 RTE_FLOW_ERROR_TYPE_ITEM, item,
915 "Invalid pattern item.");
920 if (layer > VIRTCHNL_MAX_NUM_PROTO_HDRS) {
921 rte_flow_error_set(error, EINVAL,
922 RTE_FLOW_ERROR_TYPE_ITEM, item,
923 "Protocol header layers exceed the maximum value");
927 filter->input_set = input_set;
933 iavf_fdir_parse(struct iavf_adapter *ad,
934 struct iavf_pattern_match_item *array,
936 const struct rte_flow_item pattern[],
937 const struct rte_flow_action actions[],
939 struct rte_flow_error *error)
941 struct iavf_info *vf = IAVF_DEV_PRIVATE_TO_VF(ad);
942 struct iavf_fdir_conf *filter = &vf->fdir.conf;
943 struct iavf_pattern_match_item *item = NULL;
947 memset(filter, 0, sizeof(*filter));
949 item = iavf_search_pattern_match_item(pattern, array, array_len, error);
953 ret = iavf_fdir_parse_pattern(ad, pattern, error, filter);
957 input_set = filter->input_set;
958 if (!input_set || input_set & ~item->input_set_mask) {
959 rte_flow_error_set(error, EINVAL,
960 RTE_FLOW_ERROR_TYPE_ITEM_SPEC, pattern,
961 "Invalid input set");
966 ret = iavf_fdir_parse_action(ad, actions, error, filter);
978 static struct iavf_flow_parser iavf_fdir_parser = {
979 .engine = &iavf_fdir_engine,
980 .array = iavf_fdir_pattern,
981 .array_len = RTE_DIM(iavf_fdir_pattern),
982 .parse_pattern_action = iavf_fdir_parse,
983 .stage = IAVF_FLOW_STAGE_DISTRIBUTOR,
986 RTE_INIT(iavf_fdir_engine_register)
988 iavf_register_flow_engine(&iavf_fdir_engine);