1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2020 Intel Corporation
13 #include <rte_ether.h>
14 #include <rte_ethdev_driver.h>
15 #include <rte_malloc.h>
16 #include <rte_tailq.h>
19 #include "iavf_generic_flow.h"
22 #define IAVF_FDIR_MAX_QREGION_SIZE 128
24 #define IAVF_FDIR_IPV6_TC_OFFSET 20
25 #define IAVF_IPV6_TC_MASK (0xFF << IAVF_FDIR_IPV6_TC_OFFSET)
27 #define IAVF_FDIR_INSET_ETH (\
30 #define IAVF_FDIR_INSET_ETH_IPV4 (\
31 IAVF_INSET_IPV4_SRC | IAVF_INSET_IPV4_DST | \
32 IAVF_INSET_IPV4_PROTO | IAVF_INSET_IPV4_TOS | \
35 #define IAVF_FDIR_INSET_ETH_IPV4_UDP (\
36 IAVF_INSET_IPV4_SRC | IAVF_INSET_IPV4_DST | \
37 IAVF_INSET_IPV4_TOS | IAVF_INSET_IPV4_TTL | \
38 IAVF_INSET_UDP_SRC_PORT | IAVF_INSET_UDP_DST_PORT)
40 #define IAVF_FDIR_INSET_ETH_IPV4_TCP (\
41 IAVF_INSET_IPV4_SRC | IAVF_INSET_IPV4_DST | \
42 IAVF_INSET_IPV4_TOS | IAVF_INSET_IPV4_TTL | \
43 IAVF_INSET_TCP_SRC_PORT | IAVF_INSET_TCP_DST_PORT)
45 #define IAVF_FDIR_INSET_ETH_IPV4_SCTP (\
46 IAVF_INSET_IPV4_SRC | IAVF_INSET_IPV4_DST | \
47 IAVF_INSET_IPV4_TOS | IAVF_INSET_IPV4_TTL | \
48 IAVF_INSET_SCTP_SRC_PORT | IAVF_INSET_SCTP_DST_PORT)
50 #define IAVF_FDIR_INSET_ETH_IPV6 (\
51 IAVF_INSET_IPV6_SRC | IAVF_INSET_IPV6_DST | \
52 IAVF_INSET_IPV6_NEXT_HDR | IAVF_INSET_IPV6_TC | \
53 IAVF_INSET_IPV6_HOP_LIMIT)
55 #define IAVF_FDIR_INSET_ETH_IPV6_UDP (\
56 IAVF_INSET_IPV6_SRC | IAVF_INSET_IPV6_DST | \
57 IAVF_INSET_IPV6_TC | IAVF_INSET_IPV6_HOP_LIMIT | \
58 IAVF_INSET_UDP_SRC_PORT | IAVF_INSET_UDP_DST_PORT)
60 #define IAVF_FDIR_INSET_ETH_IPV6_TCP (\
61 IAVF_INSET_IPV6_SRC | IAVF_INSET_IPV6_DST | \
62 IAVF_INSET_IPV6_TC | IAVF_INSET_IPV6_HOP_LIMIT | \
63 IAVF_INSET_TCP_SRC_PORT | IAVF_INSET_TCP_DST_PORT)
65 #define IAVF_FDIR_INSET_ETH_IPV6_SCTP (\
66 IAVF_INSET_IPV6_SRC | IAVF_INSET_IPV6_DST | \
67 IAVF_INSET_IPV6_TC | IAVF_INSET_IPV6_HOP_LIMIT | \
68 IAVF_INSET_SCTP_SRC_PORT | IAVF_INSET_SCTP_DST_PORT)
70 static struct iavf_pattern_match_item iavf_fdir_pattern[] = {
71 {iavf_pattern_ethertype, IAVF_FDIR_INSET_ETH, IAVF_INSET_NONE},
72 {iavf_pattern_eth_ipv4, IAVF_FDIR_INSET_ETH_IPV4, IAVF_INSET_NONE},
73 {iavf_pattern_eth_ipv4_udp, IAVF_FDIR_INSET_ETH_IPV4_UDP, IAVF_INSET_NONE},
74 {iavf_pattern_eth_ipv4_tcp, IAVF_FDIR_INSET_ETH_IPV4_TCP, IAVF_INSET_NONE},
75 {iavf_pattern_eth_ipv4_sctp, IAVF_FDIR_INSET_ETH_IPV4_SCTP, IAVF_INSET_NONE},
76 {iavf_pattern_eth_ipv6, IAVF_FDIR_INSET_ETH_IPV6, IAVF_INSET_NONE},
77 {iavf_pattern_eth_ipv6_udp, IAVF_FDIR_INSET_ETH_IPV6_UDP, IAVF_INSET_NONE},
78 {iavf_pattern_eth_ipv6_tcp, IAVF_FDIR_INSET_ETH_IPV6_TCP, IAVF_INSET_NONE},
79 {iavf_pattern_eth_ipv6_sctp, IAVF_FDIR_INSET_ETH_IPV6_SCTP, IAVF_INSET_NONE},
82 static struct iavf_flow_parser iavf_fdir_parser;
85 iavf_fdir_init(struct iavf_adapter *ad)
87 struct iavf_info *vf = IAVF_DEV_PRIVATE_TO_VF(ad);
88 struct iavf_flow_parser *parser;
93 if (vf->vf_res->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_FDIR_PF)
94 parser = &iavf_fdir_parser;
98 return iavf_register_parser(parser, ad);
102 iavf_fdir_uninit(struct iavf_adapter *ad)
104 iavf_unregister_parser(&iavf_fdir_parser, ad);
108 iavf_fdir_create(struct iavf_adapter *ad,
109 struct rte_flow *flow,
111 struct rte_flow_error *error)
113 struct iavf_fdir_conf *filter = meta;
114 struct iavf_fdir_conf *rule;
117 rule = rte_zmalloc("fdir_entry", sizeof(*rule), 0);
119 rte_flow_error_set(error, ENOMEM,
120 RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
121 "Failed to allocate memory for fdir rule");
125 ret = iavf_fdir_add(ad, filter);
127 rte_flow_error_set(error, -ret,
128 RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
129 "Failed to add filter rule.");
133 rte_memcpy(rule, filter, sizeof(*rule));
144 iavf_fdir_destroy(struct iavf_adapter *ad,
145 struct rte_flow *flow,
146 struct rte_flow_error *error)
148 struct iavf_fdir_conf *filter;
151 filter = (struct iavf_fdir_conf *)flow->rule;
153 ret = iavf_fdir_del(ad, filter);
155 rte_flow_error_set(error, -ret,
156 RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
157 "Failed to delete filter rule.");
168 iavf_fdir_validation(struct iavf_adapter *ad,
169 __rte_unused struct rte_flow *flow,
171 struct rte_flow_error *error)
173 struct iavf_fdir_conf *filter = meta;
176 ret = iavf_fdir_check(ad, filter);
178 rte_flow_error_set(error, -ret,
179 RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
180 "Failed to validate filter rule.");
187 static struct iavf_flow_engine iavf_fdir_engine = {
188 .init = iavf_fdir_init,
189 .uninit = iavf_fdir_uninit,
190 .create = iavf_fdir_create,
191 .destroy = iavf_fdir_destroy,
192 .validation = iavf_fdir_validation,
193 .type = IAVF_FLOW_ENGINE_FDIR,
197 iavf_fdir_parse_action_qregion(struct iavf_adapter *ad,
198 struct rte_flow_error *error,
199 const struct rte_flow_action *act,
200 struct virtchnl_filter_action *filter_action)
202 const struct rte_flow_action_rss *rss = act->conf;
205 if (act->type != RTE_FLOW_ACTION_TYPE_RSS) {
206 rte_flow_error_set(error, EINVAL,
207 RTE_FLOW_ERROR_TYPE_ACTION, act,
212 if (rss->queue_num <= 1) {
213 rte_flow_error_set(error, EINVAL,
214 RTE_FLOW_ERROR_TYPE_ACTION, act,
215 "Queue region size can't be 0 or 1.");
219 /* check if queue index for queue region is continuous */
220 for (i = 0; i < rss->queue_num - 1; i++) {
221 if (rss->queue[i + 1] != rss->queue[i] + 1) {
222 rte_flow_error_set(error, EINVAL,
223 RTE_FLOW_ERROR_TYPE_ACTION, act,
224 "Discontinuous queue region");
229 if (rss->queue[rss->queue_num - 1] >= ad->eth_dev->data->nb_rx_queues) {
230 rte_flow_error_set(error, EINVAL,
231 RTE_FLOW_ERROR_TYPE_ACTION, act,
232 "Invalid queue region indexes.");
236 if (!(rte_is_power_of_2(rss->queue_num) &&
237 rss->queue_num <= IAVF_FDIR_MAX_QREGION_SIZE)) {
238 rte_flow_error_set(error, EINVAL,
239 RTE_FLOW_ERROR_TYPE_ACTION, act,
240 "The region size should be any of the following values:"
241 "1, 2, 4, 8, 16, 32, 64, 128 as long as the total number "
242 "of queues do not exceed the VSI allocation.");
246 filter_action->act_conf.queue.index = rss->queue[0];
247 filter_action->act_conf.queue.region = rte_fls_u32(rss->queue_num) - 1;
253 iavf_fdir_parse_action(struct iavf_adapter *ad,
254 const struct rte_flow_action actions[],
255 struct rte_flow_error *error,
256 struct iavf_fdir_conf *filter)
258 const struct rte_flow_action_queue *act_q;
259 uint32_t dest_num = 0;
263 struct virtchnl_filter_action *filter_action;
265 for (; actions->type != RTE_FLOW_ACTION_TYPE_END; actions++) {
266 switch (actions->type) {
267 case RTE_FLOW_ACTION_TYPE_VOID:
270 case RTE_FLOW_ACTION_TYPE_PASSTHRU:
273 filter_action = &filter->add_fltr.rule_cfg.action_set.actions[number];
275 filter_action->type = VIRTCHNL_ACTION_PASSTHRU;
277 filter->add_fltr.rule_cfg.action_set.count = ++number;
280 case RTE_FLOW_ACTION_TYPE_DROP:
283 filter_action = &filter->add_fltr.rule_cfg.action_set.actions[number];
285 filter_action->type = VIRTCHNL_ACTION_DROP;
287 filter->add_fltr.rule_cfg.action_set.count = ++number;
290 case RTE_FLOW_ACTION_TYPE_QUEUE:
293 act_q = actions->conf;
294 filter_action = &filter->add_fltr.rule_cfg.action_set.actions[number];
296 filter_action->type = VIRTCHNL_ACTION_QUEUE;
297 filter_action->act_conf.queue.index = act_q->index;
299 if (filter_action->act_conf.queue.index >=
300 ad->eth_dev->data->nb_rx_queues) {
301 rte_flow_error_set(error, EINVAL,
302 RTE_FLOW_ERROR_TYPE_ACTION,
303 actions, "Invalid queue for FDIR.");
307 filter->add_fltr.rule_cfg.action_set.count = ++number;
310 case RTE_FLOW_ACTION_TYPE_RSS:
313 filter_action = &filter->add_fltr.rule_cfg.action_set.actions[number];
315 filter_action->type = VIRTCHNL_ACTION_Q_REGION;
317 ret = iavf_fdir_parse_action_qregion(ad,
318 error, actions, filter_action);
322 filter->add_fltr.rule_cfg.action_set.count = ++number;
326 rte_flow_error_set(error, EINVAL,
327 RTE_FLOW_ERROR_TYPE_ACTION, actions,
333 if (number > VIRTCHNL_MAX_NUM_ACTIONS) {
334 rte_flow_error_set(error, EINVAL,
335 RTE_FLOW_ERROR_TYPE_ACTION, actions,
336 "Action numbers exceed the maximum value");
340 if (dest_num == 0 || dest_num >= 2) {
341 rte_flow_error_set(error, EINVAL,
342 RTE_FLOW_ERROR_TYPE_ACTION, actions,
343 "Unsupported action combination");
351 iavf_fdir_parse_pattern(__rte_unused struct iavf_adapter *ad,
352 const struct rte_flow_item pattern[],
353 struct rte_flow_error *error,
354 struct iavf_fdir_conf *filter)
356 const struct rte_flow_item *item = pattern;
357 enum rte_flow_item_type item_type;
358 enum rte_flow_item_type l3 = RTE_FLOW_ITEM_TYPE_END;
359 const struct rte_flow_item_eth *eth_spec, *eth_mask;
360 const struct rte_flow_item_ipv4 *ipv4_spec, *ipv4_mask;
361 const struct rte_flow_item_ipv6 *ipv6_spec, *ipv6_mask;
362 const struct rte_flow_item_udp *udp_spec, *udp_mask;
363 const struct rte_flow_item_tcp *tcp_spec, *tcp_mask;
364 const struct rte_flow_item_sctp *sctp_spec, *sctp_mask;
365 uint64_t input_set = IAVF_INSET_NONE;
367 enum rte_flow_item_type next_type;
371 struct virtchnl_proto_hdr *hdr;
373 uint8_t ipv6_addr_mask[16] = {
374 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
375 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF
378 for (item = pattern; item->type != RTE_FLOW_ITEM_TYPE_END; item++) {
380 rte_flow_error_set(error, EINVAL,
381 RTE_FLOW_ERROR_TYPE_ITEM, item,
382 "Not support range");
385 item_type = item->type;
388 case RTE_FLOW_ITEM_TYPE_ETH:
389 eth_spec = item->spec;
390 eth_mask = item->mask;
391 next_type = (item + 1)->type;
393 hdr = &filter->add_fltr.rule_cfg.proto_hdrs.proto_hdr[layer];
395 VIRTCHNL_SET_PROTO_HDR_TYPE(hdr, ETH);
397 if (next_type == RTE_FLOW_ITEM_TYPE_END &&
398 (!eth_spec || !eth_mask)) {
399 rte_flow_error_set(error, EINVAL,
400 RTE_FLOW_ERROR_TYPE_ITEM,
401 item, "NULL eth spec/mask.");
405 if (eth_spec && eth_mask) {
406 if (!rte_is_zero_ether_addr(ð_mask->src) ||
407 !rte_is_zero_ether_addr(ð_mask->dst)) {
408 rte_flow_error_set(error, EINVAL,
409 RTE_FLOW_ERROR_TYPE_ITEM, item,
410 "Invalid MAC_addr mask.");
415 if (eth_spec && eth_mask && eth_mask->type) {
416 if (eth_mask->type != RTE_BE16(0xffff)) {
417 rte_flow_error_set(error, EINVAL,
418 RTE_FLOW_ERROR_TYPE_ITEM,
419 item, "Invalid type mask.");
423 ether_type = rte_be_to_cpu_16(eth_spec->type);
424 if (ether_type == RTE_ETHER_TYPE_IPV4 ||
425 ether_type == RTE_ETHER_TYPE_IPV6) {
426 rte_flow_error_set(error, EINVAL,
427 RTE_FLOW_ERROR_TYPE_ITEM,
429 "Unsupported ether_type.");
433 input_set |= IAVF_INSET_ETHERTYPE;
434 VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(hdr, ETH, ETHERTYPE);
436 rte_memcpy(hdr->buffer,
437 eth_spec, sizeof(*eth_spec));
440 filter->add_fltr.rule_cfg.proto_hdrs.count = ++layer;
443 case RTE_FLOW_ITEM_TYPE_IPV4:
444 l3 = RTE_FLOW_ITEM_TYPE_IPV4;
445 ipv4_spec = item->spec;
446 ipv4_mask = item->mask;
448 hdr = &filter->add_fltr.rule_cfg.proto_hdrs.proto_hdr[layer];
450 VIRTCHNL_SET_PROTO_HDR_TYPE(hdr, IPV4);
452 if (ipv4_spec && ipv4_mask) {
453 if (ipv4_mask->hdr.version_ihl ||
454 ipv4_mask->hdr.total_length ||
455 ipv4_mask->hdr.packet_id ||
456 ipv4_mask->hdr.fragment_offset ||
457 ipv4_mask->hdr.hdr_checksum) {
458 rte_flow_error_set(error, EINVAL,
459 RTE_FLOW_ERROR_TYPE_ITEM,
460 item, "Invalid IPv4 mask.");
464 if (ipv4_mask->hdr.type_of_service ==
466 input_set |= IAVF_INSET_IPV4_TOS;
467 VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(hdr, IPV4, DSCP);
469 if (ipv4_mask->hdr.next_proto_id == UINT8_MAX) {
470 input_set |= IAVF_INSET_IPV4_PROTO;
471 VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(hdr, IPV4, PROT);
473 if (ipv4_mask->hdr.time_to_live == UINT8_MAX) {
474 input_set |= IAVF_INSET_IPV4_TTL;
475 VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(hdr, IPV4, TTL);
477 if (ipv4_mask->hdr.src_addr == UINT32_MAX) {
478 input_set |= IAVF_INSET_IPV4_SRC;
479 VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(hdr, IPV4, SRC);
481 if (ipv4_mask->hdr.dst_addr == UINT32_MAX) {
482 input_set |= IAVF_INSET_IPV4_DST;
483 VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(hdr, IPV4, DST);
486 rte_memcpy(hdr->buffer,
488 sizeof(ipv4_spec->hdr));
491 filter->add_fltr.rule_cfg.proto_hdrs.count = ++layer;
494 case RTE_FLOW_ITEM_TYPE_IPV6:
495 l3 = RTE_FLOW_ITEM_TYPE_IPV6;
496 ipv6_spec = item->spec;
497 ipv6_mask = item->mask;
499 hdr = &filter->add_fltr.rule_cfg.proto_hdrs.proto_hdr[layer];
501 VIRTCHNL_SET_PROTO_HDR_TYPE(hdr, IPV6);
503 if (ipv6_spec && ipv6_mask) {
504 if (ipv6_mask->hdr.payload_len) {
505 rte_flow_error_set(error, EINVAL,
506 RTE_FLOW_ERROR_TYPE_ITEM,
507 item, "Invalid IPv6 mask");
511 if ((ipv6_mask->hdr.vtc_flow &
512 rte_cpu_to_be_32(IAVF_IPV6_TC_MASK))
513 == rte_cpu_to_be_32(IAVF_IPV6_TC_MASK)) {
514 input_set |= IAVF_INSET_IPV6_TC;
515 VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(hdr, IPV6, TC);
517 if (ipv6_mask->hdr.proto == UINT8_MAX) {
518 input_set |= IAVF_INSET_IPV6_NEXT_HDR;
519 VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(hdr, IPV6, PROT);
521 if (ipv6_mask->hdr.hop_limits == UINT8_MAX) {
522 input_set |= IAVF_INSET_IPV6_HOP_LIMIT;
523 VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(hdr, IPV6, HOP_LIMIT);
525 if (!memcmp(ipv6_mask->hdr.src_addr,
527 RTE_DIM(ipv6_mask->hdr.src_addr))) {
528 input_set |= IAVF_INSET_IPV6_SRC;
529 VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(hdr, IPV6, SRC);
531 if (!memcmp(ipv6_mask->hdr.dst_addr,
533 RTE_DIM(ipv6_mask->hdr.dst_addr))) {
534 input_set |= IAVF_INSET_IPV6_DST;
535 VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(hdr, IPV6, DST);
538 rte_memcpy(hdr->buffer,
540 sizeof(ipv6_spec->hdr));
543 filter->add_fltr.rule_cfg.proto_hdrs.count = ++layer;
546 case RTE_FLOW_ITEM_TYPE_UDP:
547 udp_spec = item->spec;
548 udp_mask = item->mask;
550 hdr = &filter->add_fltr.rule_cfg.proto_hdrs.proto_hdr[layer];
552 VIRTCHNL_SET_PROTO_HDR_TYPE(hdr, UDP);
554 if (udp_spec && udp_mask) {
555 if (udp_mask->hdr.dgram_len ||
556 udp_mask->hdr.dgram_cksum) {
557 rte_flow_error_set(error, EINVAL,
558 RTE_FLOW_ERROR_TYPE_ITEM, item,
563 if (udp_mask->hdr.src_port == UINT16_MAX) {
564 input_set |= IAVF_INSET_UDP_SRC_PORT;
565 VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(hdr, UDP, SRC_PORT);
567 if (udp_mask->hdr.dst_port == UINT16_MAX) {
568 input_set |= IAVF_INSET_UDP_DST_PORT;
569 VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(hdr, UDP, DST_PORT);
572 if (l3 == RTE_FLOW_ITEM_TYPE_IPV4)
573 rte_memcpy(hdr->buffer,
575 sizeof(udp_spec->hdr));
576 else if (l3 == RTE_FLOW_ITEM_TYPE_IPV6)
577 rte_memcpy(hdr->buffer,
579 sizeof(udp_spec->hdr));
582 filter->add_fltr.rule_cfg.proto_hdrs.count = ++layer;
585 case RTE_FLOW_ITEM_TYPE_TCP:
586 tcp_spec = item->spec;
587 tcp_mask = item->mask;
589 hdr = &filter->add_fltr.rule_cfg.proto_hdrs.proto_hdr[layer];
591 VIRTCHNL_SET_PROTO_HDR_TYPE(hdr, TCP);
593 if (tcp_spec && tcp_mask) {
594 if (tcp_mask->hdr.sent_seq ||
595 tcp_mask->hdr.recv_ack ||
596 tcp_mask->hdr.data_off ||
597 tcp_mask->hdr.tcp_flags ||
598 tcp_mask->hdr.rx_win ||
599 tcp_mask->hdr.cksum ||
600 tcp_mask->hdr.tcp_urp) {
601 rte_flow_error_set(error, EINVAL,
602 RTE_FLOW_ERROR_TYPE_ITEM, item,
607 if (tcp_mask->hdr.src_port == UINT16_MAX) {
608 input_set |= IAVF_INSET_TCP_SRC_PORT;
609 VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(hdr, TCP, SRC_PORT);
611 if (tcp_mask->hdr.dst_port == UINT16_MAX) {
612 input_set |= IAVF_INSET_TCP_DST_PORT;
613 VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(hdr, TCP, DST_PORT);
616 if (l3 == RTE_FLOW_ITEM_TYPE_IPV4)
617 rte_memcpy(hdr->buffer,
619 sizeof(tcp_spec->hdr));
620 else if (l3 == RTE_FLOW_ITEM_TYPE_IPV6)
621 rte_memcpy(hdr->buffer,
623 sizeof(tcp_spec->hdr));
626 filter->add_fltr.rule_cfg.proto_hdrs.count = ++layer;
629 case RTE_FLOW_ITEM_TYPE_SCTP:
630 sctp_spec = item->spec;
631 sctp_mask = item->mask;
633 hdr = &filter->add_fltr.rule_cfg.proto_hdrs.proto_hdr[layer];
635 VIRTCHNL_SET_PROTO_HDR_TYPE(hdr, SCTP);
637 if (sctp_spec && sctp_mask) {
638 if (sctp_mask->hdr.cksum) {
639 rte_flow_error_set(error, EINVAL,
640 RTE_FLOW_ERROR_TYPE_ITEM, item,
645 if (sctp_mask->hdr.src_port == UINT16_MAX) {
646 input_set |= IAVF_INSET_SCTP_SRC_PORT;
647 VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(hdr, SCTP, SRC_PORT);
649 if (sctp_mask->hdr.dst_port == UINT16_MAX) {
650 input_set |= IAVF_INSET_SCTP_DST_PORT;
651 VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(hdr, SCTP, DST_PORT);
654 if (l3 == RTE_FLOW_ITEM_TYPE_IPV4)
655 rte_memcpy(hdr->buffer,
657 sizeof(sctp_spec->hdr));
658 else if (l3 == RTE_FLOW_ITEM_TYPE_IPV6)
659 rte_memcpy(hdr->buffer,
661 sizeof(sctp_spec->hdr));
664 filter->add_fltr.rule_cfg.proto_hdrs.count = ++layer;
667 case RTE_FLOW_ITEM_TYPE_VOID:
671 rte_flow_error_set(error, EINVAL,
672 RTE_FLOW_ERROR_TYPE_ITEM, item,
673 "Invalid pattern item.");
678 if (layer > VIRTCHNL_MAX_NUM_PROTO_HDRS) {
679 rte_flow_error_set(error, EINVAL,
680 RTE_FLOW_ERROR_TYPE_ITEM, item,
681 "Protocol header layers exceed the maximum value");
685 filter->input_set = input_set;
691 iavf_fdir_parse(struct iavf_adapter *ad,
692 struct iavf_pattern_match_item *array,
694 const struct rte_flow_item pattern[],
695 const struct rte_flow_action actions[],
697 struct rte_flow_error *error)
699 struct iavf_info *vf = IAVF_DEV_PRIVATE_TO_VF(ad);
700 struct iavf_fdir_conf *filter = &vf->fdir.conf;
701 struct iavf_pattern_match_item *item = NULL;
705 memset(filter, 0, sizeof(*filter));
707 item = iavf_search_pattern_match_item(pattern, array, array_len, error);
711 ret = iavf_fdir_parse_pattern(ad, pattern, error, filter);
715 input_set = filter->input_set;
716 if (!input_set || input_set & ~item->input_set_mask) {
717 rte_flow_error_set(error, EINVAL,
718 RTE_FLOW_ERROR_TYPE_ITEM_SPEC, pattern,
719 "Invalid input set");
724 ret = iavf_fdir_parse_action(ad, actions, error, filter);
736 static struct iavf_flow_parser iavf_fdir_parser = {
737 .engine = &iavf_fdir_engine,
738 .array = iavf_fdir_pattern,
739 .array_len = RTE_DIM(iavf_fdir_pattern),
740 .parse_pattern_action = iavf_fdir_parse,
741 .stage = IAVF_FLOW_STAGE_DISTRIBUTOR,
744 RTE_INIT(iavf_fdir_engine_register)
746 iavf_register_flow_engine(&iavf_fdir_engine);