1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2020 Intel Corporation
13 #include <rte_ether.h>
14 #include <rte_ethdev_driver.h>
15 #include <rte_malloc.h>
16 #include <rte_tailq.h>
19 #include "iavf_generic_flow.h"
22 #define IAVF_FDIR_MAX_QREGION_SIZE 128
24 #define IAVF_FDIR_IPV6_TC_OFFSET 20
25 #define IAVF_IPV6_TC_MASK (0xFF << IAVF_FDIR_IPV6_TC_OFFSET)
27 #define IAVF_FDIR_INSET_ETH (\
30 #define IAVF_FDIR_INSET_ETH_IPV4 (\
31 IAVF_INSET_IPV4_SRC | IAVF_INSET_IPV4_DST | \
32 IAVF_INSET_IPV4_PROTO | IAVF_INSET_IPV4_TOS | \
35 #define IAVF_FDIR_INSET_ETH_IPV4_UDP (\
36 IAVF_INSET_IPV4_SRC | IAVF_INSET_IPV4_DST | \
37 IAVF_INSET_IPV4_TOS | IAVF_INSET_IPV4_TTL | \
38 IAVF_INSET_UDP_SRC_PORT | IAVF_INSET_UDP_DST_PORT)
40 #define IAVF_FDIR_INSET_ETH_IPV4_TCP (\
41 IAVF_INSET_IPV4_SRC | IAVF_INSET_IPV4_DST | \
42 IAVF_INSET_IPV4_TOS | IAVF_INSET_IPV4_TTL | \
43 IAVF_INSET_TCP_SRC_PORT | IAVF_INSET_TCP_DST_PORT)
45 #define IAVF_FDIR_INSET_ETH_IPV4_SCTP (\
46 IAVF_INSET_IPV4_SRC | IAVF_INSET_IPV4_DST | \
47 IAVF_INSET_IPV4_TOS | IAVF_INSET_IPV4_TTL | \
48 IAVF_INSET_SCTP_SRC_PORT | IAVF_INSET_SCTP_DST_PORT)
50 #define IAVF_FDIR_INSET_ETH_IPV6 (\
51 IAVF_INSET_IPV6_SRC | IAVF_INSET_IPV6_DST | \
52 IAVF_INSET_IPV6_NEXT_HDR | IAVF_INSET_IPV6_TC | \
53 IAVF_INSET_IPV6_HOP_LIMIT)
55 #define IAVF_FDIR_INSET_ETH_IPV6_UDP (\
56 IAVF_INSET_IPV6_SRC | IAVF_INSET_IPV6_DST | \
57 IAVF_INSET_IPV6_TC | IAVF_INSET_IPV6_HOP_LIMIT | \
58 IAVF_INSET_UDP_SRC_PORT | IAVF_INSET_UDP_DST_PORT)
60 #define IAVF_FDIR_INSET_ETH_IPV6_TCP (\
61 IAVF_INSET_IPV6_SRC | IAVF_INSET_IPV6_DST | \
62 IAVF_INSET_IPV6_TC | IAVF_INSET_IPV6_HOP_LIMIT | \
63 IAVF_INSET_TCP_SRC_PORT | IAVF_INSET_TCP_DST_PORT)
65 #define IAVF_FDIR_INSET_ETH_IPV6_SCTP (\
66 IAVF_INSET_IPV6_SRC | IAVF_INSET_IPV6_DST | \
67 IAVF_INSET_IPV6_TC | IAVF_INSET_IPV6_HOP_LIMIT | \
68 IAVF_INSET_SCTP_SRC_PORT | IAVF_INSET_SCTP_DST_PORT)
70 #define IAVF_FDIR_INSET_GTPU (\
71 IAVF_INSET_IPV4_SRC | IAVF_INSET_IPV4_DST | \
74 #define IAVF_FDIR_INSET_GTPU_EH (\
75 IAVF_INSET_IPV4_SRC | IAVF_INSET_IPV4_DST | \
76 IAVF_INSET_GTPU_TEID | IAVF_INSET_GTPU_QFI)
78 #define IAVF_FDIR_INSET_L2TPV3OIP (\
79 IAVF_L2TPV3OIP_SESSION_ID)
81 #define IAVF_FDIR_INSET_ESP (\
84 #define IAVF_FDIR_INSET_AH (\
87 #define IAVF_FDIR_INSET_IPV4_NATT_ESP (\
88 IAVF_INSET_IPV4_SRC | IAVF_INSET_IPV4_DST | \
91 #define IAVF_FDIR_INSET_IPV6_NATT_ESP (\
92 IAVF_INSET_IPV6_SRC | IAVF_INSET_IPV6_DST | \
95 static struct iavf_pattern_match_item iavf_fdir_pattern[] = {
96 {iavf_pattern_ethertype, IAVF_FDIR_INSET_ETH, IAVF_INSET_NONE},
97 {iavf_pattern_eth_ipv4, IAVF_FDIR_INSET_ETH_IPV4, IAVF_INSET_NONE},
98 {iavf_pattern_eth_ipv4_udp, IAVF_FDIR_INSET_ETH_IPV4_UDP, IAVF_INSET_NONE},
99 {iavf_pattern_eth_ipv4_tcp, IAVF_FDIR_INSET_ETH_IPV4_TCP, IAVF_INSET_NONE},
100 {iavf_pattern_eth_ipv4_sctp, IAVF_FDIR_INSET_ETH_IPV4_SCTP, IAVF_INSET_NONE},
101 {iavf_pattern_eth_ipv6, IAVF_FDIR_INSET_ETH_IPV6, IAVF_INSET_NONE},
102 {iavf_pattern_eth_ipv6_udp, IAVF_FDIR_INSET_ETH_IPV6_UDP, IAVF_INSET_NONE},
103 {iavf_pattern_eth_ipv6_tcp, IAVF_FDIR_INSET_ETH_IPV6_TCP, IAVF_INSET_NONE},
104 {iavf_pattern_eth_ipv6_sctp, IAVF_FDIR_INSET_ETH_IPV6_SCTP, IAVF_INSET_NONE},
105 {iavf_pattern_eth_ipv4_gtpu, IAVF_FDIR_INSET_GTPU, IAVF_INSET_NONE},
106 {iavf_pattern_eth_ipv4_gtpu_eh, IAVF_FDIR_INSET_GTPU_EH, IAVF_INSET_NONE},
107 {iavf_pattern_eth_ipv4_l2tpv3, IAVF_FDIR_INSET_L2TPV3OIP, IAVF_INSET_NONE},
108 {iavf_pattern_eth_ipv6_l2tpv3, IAVF_FDIR_INSET_L2TPV3OIP, IAVF_INSET_NONE},
109 {iavf_pattern_eth_ipv4_esp, IAVF_FDIR_INSET_ESP, IAVF_INSET_NONE},
110 {iavf_pattern_eth_ipv6_esp, IAVF_FDIR_INSET_ESP, IAVF_INSET_NONE},
111 {iavf_pattern_eth_ipv4_ah, IAVF_FDIR_INSET_AH, IAVF_INSET_NONE},
112 {iavf_pattern_eth_ipv6_ah, IAVF_FDIR_INSET_AH, IAVF_INSET_NONE},
113 {iavf_pattern_eth_ipv4_udp_esp, IAVF_FDIR_INSET_IPV4_NATT_ESP, IAVF_INSET_NONE},
114 {iavf_pattern_eth_ipv6_udp_esp, IAVF_FDIR_INSET_IPV6_NATT_ESP, IAVF_INSET_NONE},
117 static struct iavf_flow_parser iavf_fdir_parser;
120 iavf_fdir_init(struct iavf_adapter *ad)
122 struct iavf_info *vf = IAVF_DEV_PRIVATE_TO_VF(ad);
123 struct iavf_flow_parser *parser;
128 if (vf->vf_res->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_FDIR_PF)
129 parser = &iavf_fdir_parser;
133 return iavf_register_parser(parser, ad);
137 iavf_fdir_uninit(struct iavf_adapter *ad)
139 iavf_unregister_parser(&iavf_fdir_parser, ad);
143 iavf_fdir_create(struct iavf_adapter *ad,
144 struct rte_flow *flow,
146 struct rte_flow_error *error)
148 struct iavf_fdir_conf *filter = meta;
149 struct iavf_fdir_conf *rule;
152 rule = rte_zmalloc("fdir_entry", sizeof(*rule), 0);
154 rte_flow_error_set(error, ENOMEM,
155 RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
156 "Failed to allocate memory for fdir rule");
160 ret = iavf_fdir_add(ad, filter);
162 rte_flow_error_set(error, -ret,
163 RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
164 "Failed to add filter rule.");
168 rte_memcpy(rule, filter, sizeof(*rule));
179 iavf_fdir_destroy(struct iavf_adapter *ad,
180 struct rte_flow *flow,
181 struct rte_flow_error *error)
183 struct iavf_fdir_conf *filter;
186 filter = (struct iavf_fdir_conf *)flow->rule;
188 ret = iavf_fdir_del(ad, filter);
190 rte_flow_error_set(error, -ret,
191 RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
192 "Failed to delete filter rule.");
203 iavf_fdir_validation(struct iavf_adapter *ad,
204 __rte_unused struct rte_flow *flow,
206 struct rte_flow_error *error)
208 struct iavf_fdir_conf *filter = meta;
211 ret = iavf_fdir_check(ad, filter);
213 rte_flow_error_set(error, -ret,
214 RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
215 "Failed to validate filter rule.");
222 static struct iavf_flow_engine iavf_fdir_engine = {
223 .init = iavf_fdir_init,
224 .uninit = iavf_fdir_uninit,
225 .create = iavf_fdir_create,
226 .destroy = iavf_fdir_destroy,
227 .validation = iavf_fdir_validation,
228 .type = IAVF_FLOW_ENGINE_FDIR,
232 iavf_fdir_parse_action_qregion(struct iavf_adapter *ad,
233 struct rte_flow_error *error,
234 const struct rte_flow_action *act,
235 struct virtchnl_filter_action *filter_action)
237 const struct rte_flow_action_rss *rss = act->conf;
240 if (act->type != RTE_FLOW_ACTION_TYPE_RSS) {
241 rte_flow_error_set(error, EINVAL,
242 RTE_FLOW_ERROR_TYPE_ACTION, act,
247 if (rss->queue_num <= 1) {
248 rte_flow_error_set(error, EINVAL,
249 RTE_FLOW_ERROR_TYPE_ACTION, act,
250 "Queue region size can't be 0 or 1.");
254 /* check if queue index for queue region is continuous */
255 for (i = 0; i < rss->queue_num - 1; i++) {
256 if (rss->queue[i + 1] != rss->queue[i] + 1) {
257 rte_flow_error_set(error, EINVAL,
258 RTE_FLOW_ERROR_TYPE_ACTION, act,
259 "Discontinuous queue region");
264 if (rss->queue[rss->queue_num - 1] >= ad->eth_dev->data->nb_rx_queues) {
265 rte_flow_error_set(error, EINVAL,
266 RTE_FLOW_ERROR_TYPE_ACTION, act,
267 "Invalid queue region indexes.");
271 if (!(rte_is_power_of_2(rss->queue_num) &&
272 rss->queue_num <= IAVF_FDIR_MAX_QREGION_SIZE)) {
273 rte_flow_error_set(error, EINVAL,
274 RTE_FLOW_ERROR_TYPE_ACTION, act,
275 "The region size should be any of the following values:"
276 "1, 2, 4, 8, 16, 32, 64, 128 as long as the total number "
277 "of queues do not exceed the VSI allocation.");
281 filter_action->act_conf.queue.index = rss->queue[0];
282 filter_action->act_conf.queue.region = rte_fls_u32(rss->queue_num) - 1;
288 iavf_fdir_parse_action(struct iavf_adapter *ad,
289 const struct rte_flow_action actions[],
290 struct rte_flow_error *error,
291 struct iavf_fdir_conf *filter)
293 const struct rte_flow_action_queue *act_q;
294 uint32_t dest_num = 0;
298 struct virtchnl_filter_action *filter_action;
300 for (; actions->type != RTE_FLOW_ACTION_TYPE_END; actions++) {
301 switch (actions->type) {
302 case RTE_FLOW_ACTION_TYPE_VOID:
305 case RTE_FLOW_ACTION_TYPE_PASSTHRU:
308 filter_action = &filter->add_fltr.rule_cfg.action_set.actions[number];
310 filter_action->type = VIRTCHNL_ACTION_PASSTHRU;
312 filter->add_fltr.rule_cfg.action_set.count = ++number;
315 case RTE_FLOW_ACTION_TYPE_DROP:
318 filter_action = &filter->add_fltr.rule_cfg.action_set.actions[number];
320 filter_action->type = VIRTCHNL_ACTION_DROP;
322 filter->add_fltr.rule_cfg.action_set.count = ++number;
325 case RTE_FLOW_ACTION_TYPE_QUEUE:
328 act_q = actions->conf;
329 filter_action = &filter->add_fltr.rule_cfg.action_set.actions[number];
331 filter_action->type = VIRTCHNL_ACTION_QUEUE;
332 filter_action->act_conf.queue.index = act_q->index;
334 if (filter_action->act_conf.queue.index >=
335 ad->eth_dev->data->nb_rx_queues) {
336 rte_flow_error_set(error, EINVAL,
337 RTE_FLOW_ERROR_TYPE_ACTION,
338 actions, "Invalid queue for FDIR.");
342 filter->add_fltr.rule_cfg.action_set.count = ++number;
345 case RTE_FLOW_ACTION_TYPE_RSS:
348 filter_action = &filter->add_fltr.rule_cfg.action_set.actions[number];
350 filter_action->type = VIRTCHNL_ACTION_Q_REGION;
352 ret = iavf_fdir_parse_action_qregion(ad,
353 error, actions, filter_action);
357 filter->add_fltr.rule_cfg.action_set.count = ++number;
361 rte_flow_error_set(error, EINVAL,
362 RTE_FLOW_ERROR_TYPE_ACTION, actions,
368 if (number > VIRTCHNL_MAX_NUM_ACTIONS) {
369 rte_flow_error_set(error, EINVAL,
370 RTE_FLOW_ERROR_TYPE_ACTION, actions,
371 "Action numbers exceed the maximum value");
375 if (dest_num == 0 || dest_num >= 2) {
376 rte_flow_error_set(error, EINVAL,
377 RTE_FLOW_ERROR_TYPE_ACTION, actions,
378 "Unsupported action combination");
386 iavf_fdir_parse_pattern(__rte_unused struct iavf_adapter *ad,
387 const struct rte_flow_item pattern[],
388 struct rte_flow_error *error,
389 struct iavf_fdir_conf *filter)
391 const struct rte_flow_item *item = pattern;
392 enum rte_flow_item_type item_type;
393 enum rte_flow_item_type l3 = RTE_FLOW_ITEM_TYPE_END;
394 const struct rte_flow_item_eth *eth_spec, *eth_mask;
395 const struct rte_flow_item_ipv4 *ipv4_spec, *ipv4_mask;
396 const struct rte_flow_item_ipv6 *ipv6_spec, *ipv6_mask;
397 const struct rte_flow_item_udp *udp_spec, *udp_mask;
398 const struct rte_flow_item_tcp *tcp_spec, *tcp_mask;
399 const struct rte_flow_item_sctp *sctp_spec, *sctp_mask;
400 const struct rte_flow_item_gtp *gtp_spec, *gtp_mask;
401 const struct rte_flow_item_gtp_psc *gtp_psc_spec, *gtp_psc_mask;
402 const struct rte_flow_item_l2tpv3oip *l2tpv3oip_spec, *l2tpv3oip_mask;
403 const struct rte_flow_item_esp *esp_spec, *esp_mask;
404 const struct rte_flow_item_ah *ah_spec, *ah_mask;
405 uint64_t input_set = IAVF_INSET_NONE;
407 enum rte_flow_item_type next_type;
411 struct virtchnl_proto_hdr *hdr;
413 uint8_t ipv6_addr_mask[16] = {
414 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
415 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF
418 for (item = pattern; item->type != RTE_FLOW_ITEM_TYPE_END; item++) {
420 rte_flow_error_set(error, EINVAL,
421 RTE_FLOW_ERROR_TYPE_ITEM, item,
422 "Not support range");
425 item_type = item->type;
428 case RTE_FLOW_ITEM_TYPE_ETH:
429 eth_spec = item->spec;
430 eth_mask = item->mask;
431 next_type = (item + 1)->type;
433 hdr = &filter->add_fltr.rule_cfg.proto_hdrs.proto_hdr[layer];
435 VIRTCHNL_SET_PROTO_HDR_TYPE(hdr, ETH);
437 if (next_type == RTE_FLOW_ITEM_TYPE_END &&
438 (!eth_spec || !eth_mask)) {
439 rte_flow_error_set(error, EINVAL,
440 RTE_FLOW_ERROR_TYPE_ITEM,
441 item, "NULL eth spec/mask.");
445 if (eth_spec && eth_mask) {
446 if (!rte_is_zero_ether_addr(ð_mask->src) ||
447 !rte_is_zero_ether_addr(ð_mask->dst)) {
448 rte_flow_error_set(error, EINVAL,
449 RTE_FLOW_ERROR_TYPE_ITEM, item,
450 "Invalid MAC_addr mask.");
455 if (eth_spec && eth_mask && eth_mask->type) {
456 if (eth_mask->type != RTE_BE16(0xffff)) {
457 rte_flow_error_set(error, EINVAL,
458 RTE_FLOW_ERROR_TYPE_ITEM,
459 item, "Invalid type mask.");
463 ether_type = rte_be_to_cpu_16(eth_spec->type);
464 if (ether_type == RTE_ETHER_TYPE_IPV4 ||
465 ether_type == RTE_ETHER_TYPE_IPV6) {
466 rte_flow_error_set(error, EINVAL,
467 RTE_FLOW_ERROR_TYPE_ITEM,
469 "Unsupported ether_type.");
473 input_set |= IAVF_INSET_ETHERTYPE;
474 VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(hdr, ETH, ETHERTYPE);
476 rte_memcpy(hdr->buffer,
477 eth_spec, sizeof(*eth_spec));
480 filter->add_fltr.rule_cfg.proto_hdrs.count = ++layer;
483 case RTE_FLOW_ITEM_TYPE_IPV4:
484 l3 = RTE_FLOW_ITEM_TYPE_IPV4;
485 ipv4_spec = item->spec;
486 ipv4_mask = item->mask;
488 hdr = &filter->add_fltr.rule_cfg.proto_hdrs.proto_hdr[layer];
490 VIRTCHNL_SET_PROTO_HDR_TYPE(hdr, IPV4);
492 if (ipv4_spec && ipv4_mask) {
493 if (ipv4_mask->hdr.version_ihl ||
494 ipv4_mask->hdr.total_length ||
495 ipv4_mask->hdr.packet_id ||
496 ipv4_mask->hdr.fragment_offset ||
497 ipv4_mask->hdr.hdr_checksum) {
498 rte_flow_error_set(error, EINVAL,
499 RTE_FLOW_ERROR_TYPE_ITEM,
500 item, "Invalid IPv4 mask.");
504 if (ipv4_mask->hdr.type_of_service ==
506 input_set |= IAVF_INSET_IPV4_TOS;
507 VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(hdr, IPV4, DSCP);
509 if (ipv4_mask->hdr.next_proto_id == UINT8_MAX) {
510 input_set |= IAVF_INSET_IPV4_PROTO;
511 VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(hdr, IPV4, PROT);
513 if (ipv4_mask->hdr.time_to_live == UINT8_MAX) {
514 input_set |= IAVF_INSET_IPV4_TTL;
515 VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(hdr, IPV4, TTL);
517 if (ipv4_mask->hdr.src_addr == UINT32_MAX) {
518 input_set |= IAVF_INSET_IPV4_SRC;
519 VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(hdr, IPV4, SRC);
521 if (ipv4_mask->hdr.dst_addr == UINT32_MAX) {
522 input_set |= IAVF_INSET_IPV4_DST;
523 VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(hdr, IPV4, DST);
526 rte_memcpy(hdr->buffer,
528 sizeof(ipv4_spec->hdr));
531 filter->add_fltr.rule_cfg.proto_hdrs.count = ++layer;
534 case RTE_FLOW_ITEM_TYPE_IPV6:
535 l3 = RTE_FLOW_ITEM_TYPE_IPV6;
536 ipv6_spec = item->spec;
537 ipv6_mask = item->mask;
539 hdr = &filter->add_fltr.rule_cfg.proto_hdrs.proto_hdr[layer];
541 VIRTCHNL_SET_PROTO_HDR_TYPE(hdr, IPV6);
543 if (ipv6_spec && ipv6_mask) {
544 if (ipv6_mask->hdr.payload_len) {
545 rte_flow_error_set(error, EINVAL,
546 RTE_FLOW_ERROR_TYPE_ITEM,
547 item, "Invalid IPv6 mask");
551 if ((ipv6_mask->hdr.vtc_flow &
552 rte_cpu_to_be_32(IAVF_IPV6_TC_MASK))
553 == rte_cpu_to_be_32(IAVF_IPV6_TC_MASK)) {
554 input_set |= IAVF_INSET_IPV6_TC;
555 VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(hdr, IPV6, TC);
557 if (ipv6_mask->hdr.proto == UINT8_MAX) {
558 input_set |= IAVF_INSET_IPV6_NEXT_HDR;
559 VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(hdr, IPV6, PROT);
561 if (ipv6_mask->hdr.hop_limits == UINT8_MAX) {
562 input_set |= IAVF_INSET_IPV6_HOP_LIMIT;
563 VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(hdr, IPV6, HOP_LIMIT);
565 if (!memcmp(ipv6_mask->hdr.src_addr,
567 RTE_DIM(ipv6_mask->hdr.src_addr))) {
568 input_set |= IAVF_INSET_IPV6_SRC;
569 VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(hdr, IPV6, SRC);
571 if (!memcmp(ipv6_mask->hdr.dst_addr,
573 RTE_DIM(ipv6_mask->hdr.dst_addr))) {
574 input_set |= IAVF_INSET_IPV6_DST;
575 VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(hdr, IPV6, DST);
578 rte_memcpy(hdr->buffer,
580 sizeof(ipv6_spec->hdr));
583 filter->add_fltr.rule_cfg.proto_hdrs.count = ++layer;
586 case RTE_FLOW_ITEM_TYPE_UDP:
587 udp_spec = item->spec;
588 udp_mask = item->mask;
590 hdr = &filter->add_fltr.rule_cfg.proto_hdrs.proto_hdr[layer];
592 VIRTCHNL_SET_PROTO_HDR_TYPE(hdr, UDP);
594 if (udp_spec && udp_mask) {
595 if (udp_mask->hdr.dgram_len ||
596 udp_mask->hdr.dgram_cksum) {
597 rte_flow_error_set(error, EINVAL,
598 RTE_FLOW_ERROR_TYPE_ITEM, item,
603 if (udp_mask->hdr.src_port == UINT16_MAX) {
604 input_set |= IAVF_INSET_UDP_SRC_PORT;
605 VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(hdr, UDP, SRC_PORT);
607 if (udp_mask->hdr.dst_port == UINT16_MAX) {
608 input_set |= IAVF_INSET_UDP_DST_PORT;
609 VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(hdr, UDP, DST_PORT);
612 if (l3 == RTE_FLOW_ITEM_TYPE_IPV4)
613 rte_memcpy(hdr->buffer,
615 sizeof(udp_spec->hdr));
616 else if (l3 == RTE_FLOW_ITEM_TYPE_IPV6)
617 rte_memcpy(hdr->buffer,
619 sizeof(udp_spec->hdr));
622 filter->add_fltr.rule_cfg.proto_hdrs.count = ++layer;
625 case RTE_FLOW_ITEM_TYPE_TCP:
626 tcp_spec = item->spec;
627 tcp_mask = item->mask;
629 hdr = &filter->add_fltr.rule_cfg.proto_hdrs.proto_hdr[layer];
631 VIRTCHNL_SET_PROTO_HDR_TYPE(hdr, TCP);
633 if (tcp_spec && tcp_mask) {
634 if (tcp_mask->hdr.sent_seq ||
635 tcp_mask->hdr.recv_ack ||
636 tcp_mask->hdr.data_off ||
637 tcp_mask->hdr.tcp_flags ||
638 tcp_mask->hdr.rx_win ||
639 tcp_mask->hdr.cksum ||
640 tcp_mask->hdr.tcp_urp) {
641 rte_flow_error_set(error, EINVAL,
642 RTE_FLOW_ERROR_TYPE_ITEM, item,
647 if (tcp_mask->hdr.src_port == UINT16_MAX) {
648 input_set |= IAVF_INSET_TCP_SRC_PORT;
649 VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(hdr, TCP, SRC_PORT);
651 if (tcp_mask->hdr.dst_port == UINT16_MAX) {
652 input_set |= IAVF_INSET_TCP_DST_PORT;
653 VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(hdr, TCP, DST_PORT);
656 if (l3 == RTE_FLOW_ITEM_TYPE_IPV4)
657 rte_memcpy(hdr->buffer,
659 sizeof(tcp_spec->hdr));
660 else if (l3 == RTE_FLOW_ITEM_TYPE_IPV6)
661 rte_memcpy(hdr->buffer,
663 sizeof(tcp_spec->hdr));
666 filter->add_fltr.rule_cfg.proto_hdrs.count = ++layer;
669 case RTE_FLOW_ITEM_TYPE_SCTP:
670 sctp_spec = item->spec;
671 sctp_mask = item->mask;
673 hdr = &filter->add_fltr.rule_cfg.proto_hdrs.proto_hdr[layer];
675 VIRTCHNL_SET_PROTO_HDR_TYPE(hdr, SCTP);
677 if (sctp_spec && sctp_mask) {
678 if (sctp_mask->hdr.cksum) {
679 rte_flow_error_set(error, EINVAL,
680 RTE_FLOW_ERROR_TYPE_ITEM, item,
685 if (sctp_mask->hdr.src_port == UINT16_MAX) {
686 input_set |= IAVF_INSET_SCTP_SRC_PORT;
687 VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(hdr, SCTP, SRC_PORT);
689 if (sctp_mask->hdr.dst_port == UINT16_MAX) {
690 input_set |= IAVF_INSET_SCTP_DST_PORT;
691 VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(hdr, SCTP, DST_PORT);
694 if (l3 == RTE_FLOW_ITEM_TYPE_IPV4)
695 rte_memcpy(hdr->buffer,
697 sizeof(sctp_spec->hdr));
698 else if (l3 == RTE_FLOW_ITEM_TYPE_IPV6)
699 rte_memcpy(hdr->buffer,
701 sizeof(sctp_spec->hdr));
704 filter->add_fltr.rule_cfg.proto_hdrs.count = ++layer;
707 case RTE_FLOW_ITEM_TYPE_GTPU:
708 gtp_spec = item->spec;
709 gtp_mask = item->mask;
711 hdr = &filter->add_fltr.rule_cfg.proto_hdrs.proto_hdr[layer];
713 VIRTCHNL_SET_PROTO_HDR_TYPE(hdr, GTPU_IP);
715 if (gtp_spec && gtp_mask) {
716 if (gtp_mask->v_pt_rsv_flags ||
717 gtp_mask->msg_type ||
719 rte_flow_error_set(error, EINVAL,
720 RTE_FLOW_ERROR_TYPE_ITEM,
721 item, "Invalid GTP mask");
725 if (gtp_mask->teid == UINT32_MAX) {
726 input_set |= IAVF_INSET_GTPU_TEID;
727 VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(hdr, GTPU_IP, TEID);
730 rte_memcpy(hdr->buffer,
731 gtp_spec, sizeof(*gtp_spec));
734 filter->add_fltr.rule_cfg.proto_hdrs.count = ++layer;
737 case RTE_FLOW_ITEM_TYPE_GTP_PSC:
738 gtp_psc_spec = item->spec;
739 gtp_psc_mask = item->mask;
741 hdr = &filter->add_fltr.rule_cfg.proto_hdrs.proto_hdr[layer];
743 VIRTCHNL_SET_PROTO_HDR_TYPE(hdr, GTPU_EH);
745 if (gtp_psc_spec && gtp_psc_mask) {
746 if (gtp_psc_mask->qfi == UINT8_MAX) {
747 input_set |= IAVF_INSET_GTPU_QFI;
748 VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(hdr, GTPU_EH, QFI);
751 rte_memcpy(hdr->buffer, gtp_psc_spec,
752 sizeof(*gtp_psc_spec));
755 filter->add_fltr.rule_cfg.proto_hdrs.count = ++layer;
758 case RTE_FLOW_ITEM_TYPE_L2TPV3OIP:
759 l2tpv3oip_spec = item->spec;
760 l2tpv3oip_mask = item->mask;
762 hdr = &filter->add_fltr.rule_cfg.proto_hdrs.proto_hdr[layer];
764 VIRTCHNL_SET_PROTO_HDR_TYPE(hdr, L2TPV3);
766 if (l2tpv3oip_spec && l2tpv3oip_mask) {
767 if (l2tpv3oip_mask->session_id == UINT32_MAX) {
768 input_set |= IAVF_L2TPV3OIP_SESSION_ID;
769 VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(hdr, L2TPV3, SESS_ID);
772 rte_memcpy(hdr->buffer, l2tpv3oip_spec,
773 sizeof(*l2tpv3oip_spec));
776 filter->add_fltr.rule_cfg.proto_hdrs.count = ++layer;
779 case RTE_FLOW_ITEM_TYPE_ESP:
780 esp_spec = item->spec;
781 esp_mask = item->mask;
783 hdr = &filter->add_fltr.rule_cfg.proto_hdrs.proto_hdr[layer];
785 VIRTCHNL_SET_PROTO_HDR_TYPE(hdr, ESP);
787 if (esp_spec && esp_mask) {
788 if (esp_mask->hdr.spi == UINT32_MAX) {
789 input_set |= IAVF_INSET_ESP_SPI;
790 VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(hdr, ESP, SPI);
793 rte_memcpy(hdr->buffer, &esp_spec->hdr,
794 sizeof(esp_spec->hdr));
797 filter->add_fltr.rule_cfg.proto_hdrs.count = ++layer;
800 case RTE_FLOW_ITEM_TYPE_AH:
801 ah_spec = item->spec;
802 ah_mask = item->mask;
804 hdr = &filter->add_fltr.rule_cfg.proto_hdrs.proto_hdr[layer];
806 VIRTCHNL_SET_PROTO_HDR_TYPE(hdr, AH);
808 if (ah_spec && ah_mask) {
809 if (ah_mask->spi == UINT32_MAX) {
810 input_set |= IAVF_INSET_AH_SPI;
811 VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(hdr, AH, SPI);
814 rte_memcpy(hdr->buffer, ah_spec,
818 filter->add_fltr.rule_cfg.proto_hdrs.count = ++layer;
821 case RTE_FLOW_ITEM_TYPE_VOID:
825 rte_flow_error_set(error, EINVAL,
826 RTE_FLOW_ERROR_TYPE_ITEM, item,
827 "Invalid pattern item.");
832 if (layer > VIRTCHNL_MAX_NUM_PROTO_HDRS) {
833 rte_flow_error_set(error, EINVAL,
834 RTE_FLOW_ERROR_TYPE_ITEM, item,
835 "Protocol header layers exceed the maximum value");
839 filter->input_set = input_set;
845 iavf_fdir_parse(struct iavf_adapter *ad,
846 struct iavf_pattern_match_item *array,
848 const struct rte_flow_item pattern[],
849 const struct rte_flow_action actions[],
851 struct rte_flow_error *error)
853 struct iavf_info *vf = IAVF_DEV_PRIVATE_TO_VF(ad);
854 struct iavf_fdir_conf *filter = &vf->fdir.conf;
855 struct iavf_pattern_match_item *item = NULL;
859 memset(filter, 0, sizeof(*filter));
861 item = iavf_search_pattern_match_item(pattern, array, array_len, error);
865 ret = iavf_fdir_parse_pattern(ad, pattern, error, filter);
869 input_set = filter->input_set;
870 if (!input_set || input_set & ~item->input_set_mask) {
871 rte_flow_error_set(error, EINVAL,
872 RTE_FLOW_ERROR_TYPE_ITEM_SPEC, pattern,
873 "Invalid input set");
878 ret = iavf_fdir_parse_action(ad, actions, error, filter);
890 static struct iavf_flow_parser iavf_fdir_parser = {
891 .engine = &iavf_fdir_engine,
892 .array = iavf_fdir_pattern,
893 .array_len = RTE_DIM(iavf_fdir_pattern),
894 .parse_pattern_action = iavf_fdir_parse,
895 .stage = IAVF_FLOW_STAGE_DISTRIBUTOR,
898 RTE_INIT(iavf_fdir_engine_register)
900 iavf_register_flow_engine(&iavf_fdir_engine);