1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2020 Intel Corporation
13 #include <rte_ether.h>
14 #include <ethdev_driver.h>
15 #include <rte_malloc.h>
16 #include <rte_tailq.h>
19 #include "iavf_generic_flow.h"
21 #include "iavf_rxtx.h"
23 #define IAVF_FDIR_MAX_QREGION_SIZE 128
25 #define IAVF_FDIR_IPV6_TC_OFFSET 20
26 #define IAVF_IPV6_TC_MASK (0xFF << IAVF_FDIR_IPV6_TC_OFFSET)
28 #define IAVF_GTPU_EH_DWLINK 0
29 #define IAVF_GTPU_EH_UPLINK 1
31 #define IAVF_FDIR_INSET_ETH (\
34 #define IAVF_FDIR_INSET_ETH_IPV4 (\
35 IAVF_INSET_IPV4_SRC | IAVF_INSET_IPV4_DST | \
36 IAVF_INSET_IPV4_PROTO | IAVF_INSET_IPV4_TOS | \
37 IAVF_INSET_IPV4_TTL | IAVF_INSET_IPV4_ID)
39 #define IAVF_FDIR_INSET_ETH_IPV4_UDP (\
40 IAVF_INSET_IPV4_SRC | IAVF_INSET_IPV4_DST | \
41 IAVF_INSET_IPV4_TOS | IAVF_INSET_IPV4_TTL | \
42 IAVF_INSET_UDP_SRC_PORT | IAVF_INSET_UDP_DST_PORT)
44 #define IAVF_FDIR_INSET_ETH_IPV4_TCP (\
45 IAVF_INSET_IPV4_SRC | IAVF_INSET_IPV4_DST | \
46 IAVF_INSET_IPV4_TOS | IAVF_INSET_IPV4_TTL | \
47 IAVF_INSET_TCP_SRC_PORT | IAVF_INSET_TCP_DST_PORT)
49 #define IAVF_FDIR_INSET_ETH_IPV4_SCTP (\
50 IAVF_INSET_IPV4_SRC | IAVF_INSET_IPV4_DST | \
51 IAVF_INSET_IPV4_TOS | IAVF_INSET_IPV4_TTL | \
52 IAVF_INSET_SCTP_SRC_PORT | IAVF_INSET_SCTP_DST_PORT)
54 #define IAVF_FDIR_INSET_ETH_IPV6 (\
55 IAVF_INSET_IPV6_SRC | IAVF_INSET_IPV6_DST | \
56 IAVF_INSET_IPV6_NEXT_HDR | IAVF_INSET_IPV6_TC | \
57 IAVF_INSET_IPV6_HOP_LIMIT)
59 #define IAVF_FDIR_INSET_ETH_IPV6_FRAG_EXT (\
62 #define IAVF_FDIR_INSET_ETH_IPV6_UDP (\
63 IAVF_INSET_IPV6_SRC | IAVF_INSET_IPV6_DST | \
64 IAVF_INSET_IPV6_TC | IAVF_INSET_IPV6_HOP_LIMIT | \
65 IAVF_INSET_UDP_SRC_PORT | IAVF_INSET_UDP_DST_PORT)
67 #define IAVF_FDIR_INSET_ETH_IPV6_TCP (\
68 IAVF_INSET_IPV6_SRC | IAVF_INSET_IPV6_DST | \
69 IAVF_INSET_IPV6_TC | IAVF_INSET_IPV6_HOP_LIMIT | \
70 IAVF_INSET_TCP_SRC_PORT | IAVF_INSET_TCP_DST_PORT)
72 #define IAVF_FDIR_INSET_ETH_IPV6_SCTP (\
73 IAVF_INSET_IPV6_SRC | IAVF_INSET_IPV6_DST | \
74 IAVF_INSET_IPV6_TC | IAVF_INSET_IPV6_HOP_LIMIT | \
75 IAVF_INSET_SCTP_SRC_PORT | IAVF_INSET_SCTP_DST_PORT)
77 #define IAVF_FDIR_INSET_IPV4_GTPU (\
78 IAVF_INSET_IPV4_SRC | IAVF_INSET_IPV4_DST | \
81 #define IAVF_FDIR_INSET_GTPU_IPV4 (\
82 IAVF_INSET_TUN_IPV4_SRC | IAVF_INSET_TUN_IPV4_DST | \
83 IAVF_INSET_TUN_IPV4_PROTO | IAVF_INSET_TUN_IPV4_TOS | \
84 IAVF_INSET_TUN_IPV4_TTL)
86 #define IAVF_FDIR_INSET_GTPU_IPV4_UDP (\
87 IAVF_FDIR_INSET_GTPU_IPV4 | \
88 IAVF_INSET_TUN_UDP_SRC_PORT | IAVF_INSET_TUN_UDP_DST_PORT)
90 #define IAVF_FDIR_INSET_GTPU_IPV4_TCP (\
91 IAVF_FDIR_INSET_GTPU_IPV4 | \
92 IAVF_INSET_TUN_TCP_SRC_PORT | IAVF_INSET_TUN_TCP_DST_PORT)
94 #define IAVF_FDIR_INSET_IPV4_GTPU_EH (\
95 IAVF_INSET_IPV4_SRC | IAVF_INSET_IPV4_DST | \
96 IAVF_INSET_GTPU_TEID | IAVF_INSET_GTPU_QFI)
98 #define IAVF_FDIR_INSET_IPV6_GTPU (\
99 IAVF_INSET_IPV6_SRC | IAVF_INSET_IPV6_DST | \
100 IAVF_INSET_GTPU_TEID)
102 #define IAVF_FDIR_INSET_GTPU_IPV6 (\
103 IAVF_INSET_TUN_IPV6_SRC | IAVF_INSET_TUN_IPV6_DST | \
104 IAVF_INSET_TUN_IPV6_NEXT_HDR | IAVF_INSET_TUN_IPV6_TC | \
105 IAVF_INSET_TUN_IPV6_HOP_LIMIT)
107 #define IAVF_FDIR_INSET_GTPU_IPV6_UDP (\
108 IAVF_FDIR_INSET_GTPU_IPV6 | \
109 IAVF_INSET_TUN_UDP_SRC_PORT | IAVF_INSET_TUN_UDP_DST_PORT)
111 #define IAVF_FDIR_INSET_GTPU_IPV6_TCP (\
112 IAVF_FDIR_INSET_GTPU_IPV6 | \
113 IAVF_INSET_TUN_TCP_SRC_PORT | IAVF_INSET_TUN_TCP_DST_PORT)
115 #define IAVF_FDIR_INSET_IPV6_GTPU_EH (\
116 IAVF_INSET_IPV6_SRC | IAVF_INSET_IPV6_DST | \
117 IAVF_INSET_GTPU_TEID | IAVF_INSET_GTPU_QFI)
119 #define IAVF_FDIR_INSET_L2TPV3OIP (\
120 IAVF_L2TPV3OIP_SESSION_ID)
122 #define IAVF_FDIR_INSET_IPV4_ESP (\
123 IAVF_INSET_IPV4_SRC | IAVF_INSET_IPV4_DST | \
126 #define IAVF_FDIR_INSET_IPV6_ESP (\
127 IAVF_INSET_IPV6_SRC | IAVF_INSET_IPV6_DST | \
130 #define IAVF_FDIR_INSET_AH (\
133 #define IAVF_FDIR_INSET_IPV4_NATT_ESP (\
134 IAVF_INSET_IPV4_SRC | IAVF_INSET_IPV4_DST | \
137 #define IAVF_FDIR_INSET_IPV6_NATT_ESP (\
138 IAVF_INSET_IPV6_SRC | IAVF_INSET_IPV6_DST | \
141 #define IAVF_FDIR_INSET_PFCP (\
142 IAVF_INSET_PFCP_S_FIELD)
144 #define IAVF_FDIR_INSET_ECPRI (\
147 static struct iavf_pattern_match_item iavf_fdir_pattern[] = {
148 {iavf_pattern_ethertype, IAVF_FDIR_INSET_ETH, IAVF_INSET_NONE},
149 {iavf_pattern_eth_ipv4, IAVF_FDIR_INSET_ETH_IPV4, IAVF_INSET_NONE},
150 {iavf_pattern_eth_ipv4_udp, IAVF_FDIR_INSET_ETH_IPV4_UDP, IAVF_INSET_NONE},
151 {iavf_pattern_eth_ipv4_tcp, IAVF_FDIR_INSET_ETH_IPV4_TCP, IAVF_INSET_NONE},
152 {iavf_pattern_eth_ipv4_sctp, IAVF_FDIR_INSET_ETH_IPV4_SCTP, IAVF_INSET_NONE},
153 {iavf_pattern_eth_ipv6, IAVF_FDIR_INSET_ETH_IPV6, IAVF_INSET_NONE},
154 {iavf_pattern_eth_ipv6_frag_ext, IAVF_FDIR_INSET_ETH_IPV6_FRAG_EXT, IAVF_INSET_NONE},
155 {iavf_pattern_eth_ipv6_udp, IAVF_FDIR_INSET_ETH_IPV6_UDP, IAVF_INSET_NONE},
156 {iavf_pattern_eth_ipv6_tcp, IAVF_FDIR_INSET_ETH_IPV6_TCP, IAVF_INSET_NONE},
157 {iavf_pattern_eth_ipv6_sctp, IAVF_FDIR_INSET_ETH_IPV6_SCTP, IAVF_INSET_NONE},
158 {iavf_pattern_eth_ipv4_gtpu, IAVF_FDIR_INSET_IPV4_GTPU, IAVF_INSET_NONE},
159 {iavf_pattern_eth_ipv4_gtpu_ipv4, IAVF_FDIR_INSET_GTPU_IPV4, IAVF_INSET_NONE},
160 {iavf_pattern_eth_ipv4_gtpu_ipv4_udp, IAVF_FDIR_INSET_GTPU_IPV4_UDP, IAVF_INSET_NONE},
161 {iavf_pattern_eth_ipv4_gtpu_ipv4_tcp, IAVF_FDIR_INSET_GTPU_IPV4_TCP, IAVF_INSET_NONE},
162 {iavf_pattern_eth_ipv4_gtpu_ipv6, IAVF_FDIR_INSET_GTPU_IPV6, IAVF_INSET_NONE},
163 {iavf_pattern_eth_ipv4_gtpu_ipv6_udp, IAVF_FDIR_INSET_GTPU_IPV6_UDP, IAVF_INSET_NONE},
164 {iavf_pattern_eth_ipv4_gtpu_ipv6_tcp, IAVF_FDIR_INSET_GTPU_IPV6_TCP, IAVF_INSET_NONE},
165 {iavf_pattern_eth_ipv4_gtpu_eh, IAVF_FDIR_INSET_IPV4_GTPU_EH, IAVF_INSET_NONE},
166 {iavf_pattern_eth_ipv4_gtpu_eh_ipv4, IAVF_FDIR_INSET_GTPU_IPV4, IAVF_INSET_NONE},
167 {iavf_pattern_eth_ipv4_gtpu_eh_ipv4_udp, IAVF_FDIR_INSET_GTPU_IPV4_UDP, IAVF_INSET_NONE},
168 {iavf_pattern_eth_ipv4_gtpu_eh_ipv4_tcp, IAVF_FDIR_INSET_GTPU_IPV4_TCP, IAVF_INSET_NONE},
169 {iavf_pattern_eth_ipv4_gtpu_eh_ipv6, IAVF_FDIR_INSET_GTPU_IPV6, IAVF_INSET_NONE},
170 {iavf_pattern_eth_ipv4_gtpu_eh_ipv6_udp, IAVF_FDIR_INSET_GTPU_IPV6_UDP, IAVF_INSET_NONE},
171 {iavf_pattern_eth_ipv4_gtpu_eh_ipv6_tcp, IAVF_FDIR_INSET_GTPU_IPV6_TCP, IAVF_INSET_NONE},
172 {iavf_pattern_eth_ipv6_gtpu, IAVF_FDIR_INSET_IPV6_GTPU, IAVF_INSET_NONE},
173 {iavf_pattern_eth_ipv6_gtpu_eh, IAVF_FDIR_INSET_IPV6_GTPU_EH, IAVF_INSET_NONE},
174 {iavf_pattern_eth_ipv4_l2tpv3, IAVF_FDIR_INSET_L2TPV3OIP, IAVF_INSET_NONE},
175 {iavf_pattern_eth_ipv6_l2tpv3, IAVF_FDIR_INSET_L2TPV3OIP, IAVF_INSET_NONE},
176 {iavf_pattern_eth_ipv4_esp, IAVF_FDIR_INSET_IPV4_ESP, IAVF_INSET_NONE},
177 {iavf_pattern_eth_ipv6_esp, IAVF_FDIR_INSET_IPV6_ESP, IAVF_INSET_NONE},
178 {iavf_pattern_eth_ipv4_ah, IAVF_FDIR_INSET_AH, IAVF_INSET_NONE},
179 {iavf_pattern_eth_ipv6_ah, IAVF_FDIR_INSET_AH, IAVF_INSET_NONE},
180 {iavf_pattern_eth_ipv4_udp_esp, IAVF_FDIR_INSET_IPV4_NATT_ESP, IAVF_INSET_NONE},
181 {iavf_pattern_eth_ipv6_udp_esp, IAVF_FDIR_INSET_IPV6_NATT_ESP, IAVF_INSET_NONE},
182 {iavf_pattern_eth_ipv4_pfcp, IAVF_FDIR_INSET_PFCP, IAVF_INSET_NONE},
183 {iavf_pattern_eth_ipv6_pfcp, IAVF_FDIR_INSET_PFCP, IAVF_INSET_NONE},
184 {iavf_pattern_eth_ecpri, IAVF_FDIR_INSET_ECPRI, IAVF_INSET_NONE},
185 {iavf_pattern_eth_ipv4_ecpri, IAVF_FDIR_INSET_ECPRI, IAVF_INSET_NONE},
188 static struct iavf_flow_parser iavf_fdir_parser;
191 iavf_fdir_init(struct iavf_adapter *ad)
193 struct iavf_info *vf = IAVF_DEV_PRIVATE_TO_VF(ad);
194 struct iavf_flow_parser *parser;
199 if (vf->vf_res->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_FDIR_PF)
200 parser = &iavf_fdir_parser;
204 return iavf_register_parser(parser, ad);
208 iavf_fdir_uninit(struct iavf_adapter *ad)
210 iavf_unregister_parser(&iavf_fdir_parser, ad);
214 iavf_fdir_create(struct iavf_adapter *ad,
215 struct rte_flow *flow,
217 struct rte_flow_error *error)
219 struct iavf_fdir_conf *filter = meta;
220 struct iavf_fdir_conf *rule;
223 rule = rte_zmalloc("fdir_entry", sizeof(*rule), 0);
225 rte_flow_error_set(error, ENOMEM,
226 RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
227 "Failed to allocate memory for fdir rule");
231 ret = iavf_fdir_add(ad, filter);
233 rte_flow_error_set(error, -ret,
234 RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
235 "Failed to add filter rule.");
239 if (filter->mark_flag == 1)
240 iavf_fdir_rx_proc_enable(ad, 1);
242 rte_memcpy(rule, filter, sizeof(*rule));
253 iavf_fdir_destroy(struct iavf_adapter *ad,
254 struct rte_flow *flow,
255 struct rte_flow_error *error)
257 struct iavf_fdir_conf *filter;
260 filter = (struct iavf_fdir_conf *)flow->rule;
262 ret = iavf_fdir_del(ad, filter);
264 rte_flow_error_set(error, -ret,
265 RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
266 "Failed to delete filter rule.");
270 if (filter->mark_flag == 1)
271 iavf_fdir_rx_proc_enable(ad, 0);
280 iavf_fdir_validation(struct iavf_adapter *ad,
281 __rte_unused struct rte_flow *flow,
283 struct rte_flow_error *error)
285 struct iavf_fdir_conf *filter = meta;
288 ret = iavf_fdir_check(ad, filter);
290 rte_flow_error_set(error, -ret,
291 RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
292 "Failed to validate filter rule.");
299 static struct iavf_flow_engine iavf_fdir_engine = {
300 .init = iavf_fdir_init,
301 .uninit = iavf_fdir_uninit,
302 .create = iavf_fdir_create,
303 .destroy = iavf_fdir_destroy,
304 .validation = iavf_fdir_validation,
305 .type = IAVF_FLOW_ENGINE_FDIR,
309 iavf_fdir_parse_action_qregion(struct iavf_adapter *ad,
310 struct rte_flow_error *error,
311 const struct rte_flow_action *act,
312 struct virtchnl_filter_action *filter_action)
314 struct iavf_info *vf = IAVF_DEV_PRIVATE_TO_VF(ad);
315 const struct rte_flow_action_rss *rss = act->conf;
318 if (act->type != RTE_FLOW_ACTION_TYPE_RSS) {
319 rte_flow_error_set(error, EINVAL,
320 RTE_FLOW_ERROR_TYPE_ACTION, act,
325 if (rss->queue_num <= 1) {
326 rte_flow_error_set(error, EINVAL,
327 RTE_FLOW_ERROR_TYPE_ACTION, act,
328 "Queue region size can't be 0 or 1.");
332 /* check if queue index for queue region is continuous */
333 for (i = 0; i < rss->queue_num - 1; i++) {
334 if (rss->queue[i + 1] != rss->queue[i] + 1) {
335 rte_flow_error_set(error, EINVAL,
336 RTE_FLOW_ERROR_TYPE_ACTION, act,
337 "Discontinuous queue region");
342 if (rss->queue[rss->queue_num - 1] >= ad->eth_dev->data->nb_rx_queues) {
343 rte_flow_error_set(error, EINVAL,
344 RTE_FLOW_ERROR_TYPE_ACTION, act,
345 "Invalid queue region indexes.");
349 if (!(rte_is_power_of_2(rss->queue_num) &&
350 rss->queue_num <= IAVF_FDIR_MAX_QREGION_SIZE)) {
351 rte_flow_error_set(error, EINVAL,
352 RTE_FLOW_ERROR_TYPE_ACTION, act,
353 "The region size should be any of the following values:"
354 "1, 2, 4, 8, 16, 32, 64, 128 as long as the total number "
355 "of queues do not exceed the VSI allocation.");
359 if (rss->queue_num > vf->max_rss_qregion) {
360 rte_flow_error_set(error, EINVAL,
361 RTE_FLOW_ERROR_TYPE_ACTION, act,
362 "The region size cannot be large than the supported max RSS queue region");
366 filter_action->act_conf.queue.index = rss->queue[0];
367 filter_action->act_conf.queue.region = rte_fls_u32(rss->queue_num) - 1;
373 iavf_fdir_parse_action(struct iavf_adapter *ad,
374 const struct rte_flow_action actions[],
375 struct rte_flow_error *error,
376 struct iavf_fdir_conf *filter)
378 const struct rte_flow_action_queue *act_q;
379 const struct rte_flow_action_mark *mark_spec = NULL;
380 uint32_t dest_num = 0;
381 uint32_t mark_num = 0;
385 struct virtchnl_filter_action *filter_action;
387 for (; actions->type != RTE_FLOW_ACTION_TYPE_END; actions++) {
388 switch (actions->type) {
389 case RTE_FLOW_ACTION_TYPE_VOID:
392 case RTE_FLOW_ACTION_TYPE_PASSTHRU:
395 filter_action = &filter->add_fltr.rule_cfg.action_set.actions[number];
397 filter_action->type = VIRTCHNL_ACTION_PASSTHRU;
399 filter->add_fltr.rule_cfg.action_set.count = ++number;
402 case RTE_FLOW_ACTION_TYPE_DROP:
405 filter_action = &filter->add_fltr.rule_cfg.action_set.actions[number];
407 filter_action->type = VIRTCHNL_ACTION_DROP;
409 filter->add_fltr.rule_cfg.action_set.count = ++number;
412 case RTE_FLOW_ACTION_TYPE_QUEUE:
415 act_q = actions->conf;
416 filter_action = &filter->add_fltr.rule_cfg.action_set.actions[number];
418 filter_action->type = VIRTCHNL_ACTION_QUEUE;
419 filter_action->act_conf.queue.index = act_q->index;
421 if (filter_action->act_conf.queue.index >=
422 ad->eth_dev->data->nb_rx_queues) {
423 rte_flow_error_set(error, EINVAL,
424 RTE_FLOW_ERROR_TYPE_ACTION,
425 actions, "Invalid queue for FDIR.");
429 filter->add_fltr.rule_cfg.action_set.count = ++number;
432 case RTE_FLOW_ACTION_TYPE_RSS:
435 filter_action = &filter->add_fltr.rule_cfg.action_set.actions[number];
437 filter_action->type = VIRTCHNL_ACTION_Q_REGION;
439 ret = iavf_fdir_parse_action_qregion(ad,
440 error, actions, filter_action);
444 filter->add_fltr.rule_cfg.action_set.count = ++number;
447 case RTE_FLOW_ACTION_TYPE_MARK:
450 filter->mark_flag = 1;
451 mark_spec = actions->conf;
452 filter_action = &filter->add_fltr.rule_cfg.action_set.actions[number];
454 filter_action->type = VIRTCHNL_ACTION_MARK;
455 filter_action->act_conf.mark_id = mark_spec->id;
457 filter->add_fltr.rule_cfg.action_set.count = ++number;
461 rte_flow_error_set(error, EINVAL,
462 RTE_FLOW_ERROR_TYPE_ACTION, actions,
468 if (number > VIRTCHNL_MAX_NUM_ACTIONS) {
469 rte_flow_error_set(error, EINVAL,
470 RTE_FLOW_ERROR_TYPE_ACTION, actions,
471 "Action numbers exceed the maximum value");
476 rte_flow_error_set(error, EINVAL,
477 RTE_FLOW_ERROR_TYPE_ACTION, actions,
478 "Unsupported action combination");
483 rte_flow_error_set(error, EINVAL,
484 RTE_FLOW_ERROR_TYPE_ACTION, actions,
485 "Too many mark actions");
489 if (dest_num + mark_num == 0) {
490 rte_flow_error_set(error, EINVAL,
491 RTE_FLOW_ERROR_TYPE_ACTION, actions,
496 /* Mark only is equal to mark + passthru. */
498 filter_action = &filter->add_fltr.rule_cfg.action_set.actions[number];
499 filter_action->type = VIRTCHNL_ACTION_PASSTHRU;
500 filter->add_fltr.rule_cfg.action_set.count = ++number;
507 iavf_fdir_refine_input_set(const uint64_t input_set,
508 const uint64_t input_set_mask,
509 struct iavf_fdir_conf *filter)
511 struct virtchnl_proto_hdr *hdr, *hdr_last;
512 struct rte_flow_item_ipv4 ipv4_spec;
513 struct rte_flow_item_ipv6 ipv6_spec;
517 if (input_set & ~input_set_mask)
522 last_layer = filter->add_fltr.rule_cfg.proto_hdrs.count - 1;
523 /* Last layer of TCP/UDP pattern isn't less than 2. */
526 hdr_last = &filter->add_fltr.rule_cfg.proto_hdrs.proto_hdr[last_layer];
527 if (hdr_last->type == VIRTCHNL_PROTO_HDR_TCP)
529 else if (hdr_last->type == VIRTCHNL_PROTO_HDR_UDP)
534 hdr = &filter->add_fltr.rule_cfg.proto_hdrs.proto_hdr[last_layer - 1];
536 case VIRTCHNL_PROTO_HDR_IPV4:
537 VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(hdr, IPV4, PROT);
538 memset(&ipv4_spec, 0, sizeof(ipv4_spec));
539 ipv4_spec.hdr.next_proto_id = proto_id;
540 rte_memcpy(hdr->buffer, &ipv4_spec.hdr,
541 sizeof(ipv4_spec.hdr));
543 case VIRTCHNL_PROTO_HDR_IPV6:
544 VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(hdr, IPV6, PROT);
545 memset(&ipv6_spec, 0, sizeof(ipv6_spec));
546 ipv6_spec.hdr.proto = proto_id;
547 rte_memcpy(hdr->buffer, &ipv6_spec.hdr,
548 sizeof(ipv6_spec.hdr));
556 iavf_fdir_add_fragment_hdr(struct virtchnl_proto_hdrs *hdrs, int layer)
558 struct virtchnl_proto_hdr *hdr1;
559 struct virtchnl_proto_hdr *hdr2;
562 if (layer < 0 || layer > hdrs->count)
565 /* shift headers layer */
566 for (i = hdrs->count; i >= layer; i--) {
567 hdr1 = &hdrs->proto_hdr[i];
568 hdr2 = &hdrs->proto_hdr[i - 1];
572 /* adding dummy fragment header */
573 hdr1 = &hdrs->proto_hdr[layer];
574 VIRTCHNL_SET_PROTO_HDR_TYPE(hdr1, IPV4_FRAG);
575 hdrs->count = ++layer;
579 iavf_fdir_parse_pattern(__rte_unused struct iavf_adapter *ad,
580 const struct rte_flow_item pattern[],
581 const uint64_t input_set_mask,
582 struct rte_flow_error *error,
583 struct iavf_fdir_conf *filter)
585 struct virtchnl_proto_hdrs *hdrs =
586 &filter->add_fltr.rule_cfg.proto_hdrs;
587 enum rte_flow_item_type l3 = RTE_FLOW_ITEM_TYPE_END;
588 const struct rte_flow_item_eth *eth_spec, *eth_mask;
589 const struct rte_flow_item_ipv4 *ipv4_spec, *ipv4_last, *ipv4_mask;
590 const struct rte_flow_item_ipv6 *ipv6_spec, *ipv6_mask;
591 const struct rte_flow_item_ipv6_frag_ext *ipv6_frag_spec;
592 const struct rte_flow_item_ipv6_frag_ext *ipv6_frag_last;
593 const struct rte_flow_item_ipv6_frag_ext *ipv6_frag_mask;
594 const struct rte_flow_item_udp *udp_spec, *udp_mask;
595 const struct rte_flow_item_tcp *tcp_spec, *tcp_mask;
596 const struct rte_flow_item_sctp *sctp_spec, *sctp_mask;
597 const struct rte_flow_item_gtp *gtp_spec, *gtp_mask;
598 const struct rte_flow_item_gtp_psc *gtp_psc_spec, *gtp_psc_mask;
599 const struct rte_flow_item_l2tpv3oip *l2tpv3oip_spec, *l2tpv3oip_mask;
600 const struct rte_flow_item_esp *esp_spec, *esp_mask;
601 const struct rte_flow_item_ah *ah_spec, *ah_mask;
602 const struct rte_flow_item_pfcp *pfcp_spec, *pfcp_mask;
603 const struct rte_flow_item_ecpri *ecpri_spec, *ecpri_mask;
604 const struct rte_flow_item *item = pattern;
605 struct virtchnl_proto_hdr *hdr, *hdr1 = NULL;
606 struct rte_ecpri_common_hdr ecpri_common;
607 uint64_t input_set = IAVF_INSET_NONE;
608 enum rte_flow_item_type item_type;
609 enum rte_flow_item_type next_type;
610 uint8_t tun_inner = 0;
614 uint8_t ipv6_addr_mask[16] = {
615 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
616 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF
619 for (item = pattern; item->type != RTE_FLOW_ITEM_TYPE_END; item++) {
620 item_type = item->type;
622 if (item->last && !(item_type == RTE_FLOW_ITEM_TYPE_IPV4 ||
624 RTE_FLOW_ITEM_TYPE_IPV6_FRAG_EXT)) {
625 rte_flow_error_set(error, EINVAL,
626 RTE_FLOW_ERROR_TYPE_ITEM, item,
627 "Not support range");
631 case RTE_FLOW_ITEM_TYPE_ETH:
632 eth_spec = item->spec;
633 eth_mask = item->mask;
634 next_type = (item + 1)->type;
636 hdr1 = &hdrs->proto_hdr[layer];
638 VIRTCHNL_SET_PROTO_HDR_TYPE(hdr1, ETH);
640 if (next_type == RTE_FLOW_ITEM_TYPE_END &&
641 (!eth_spec || !eth_mask)) {
642 rte_flow_error_set(error, EINVAL,
643 RTE_FLOW_ERROR_TYPE_ITEM,
644 item, "NULL eth spec/mask.");
648 if (eth_spec && eth_mask) {
649 if (!rte_is_zero_ether_addr(ð_mask->src) ||
650 !rte_is_zero_ether_addr(ð_mask->dst)) {
651 rte_flow_error_set(error, EINVAL,
652 RTE_FLOW_ERROR_TYPE_ITEM, item,
653 "Invalid MAC_addr mask.");
658 if (eth_spec && eth_mask && eth_mask->type) {
659 if (eth_mask->type != RTE_BE16(0xffff)) {
660 rte_flow_error_set(error, EINVAL,
661 RTE_FLOW_ERROR_TYPE_ITEM,
662 item, "Invalid type mask.");
666 ether_type = rte_be_to_cpu_16(eth_spec->type);
667 if (ether_type == RTE_ETHER_TYPE_IPV4 ||
668 ether_type == RTE_ETHER_TYPE_IPV6) {
669 rte_flow_error_set(error, EINVAL,
670 RTE_FLOW_ERROR_TYPE_ITEM,
672 "Unsupported ether_type.");
676 input_set |= IAVF_INSET_ETHERTYPE;
677 VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(hdr1, ETH,
680 rte_memcpy(hdr1->buffer, eth_spec,
681 sizeof(struct rte_ether_hdr));
684 hdrs->count = ++layer;
687 case RTE_FLOW_ITEM_TYPE_IPV4:
688 l3 = RTE_FLOW_ITEM_TYPE_IPV4;
689 ipv4_spec = item->spec;
690 ipv4_last = item->last;
691 ipv4_mask = item->mask;
692 next_type = (item + 1)->type;
694 hdr = &hdrs->proto_hdr[layer];
696 VIRTCHNL_SET_PROTO_HDR_TYPE(hdr, IPV4);
698 if (!(ipv4_spec && ipv4_mask)) {
699 hdrs->count = ++layer;
703 if (ipv4_mask->hdr.version_ihl ||
704 ipv4_mask->hdr.total_length ||
705 ipv4_mask->hdr.hdr_checksum) {
706 rte_flow_error_set(error, EINVAL,
707 RTE_FLOW_ERROR_TYPE_ITEM,
708 item, "Invalid IPv4 mask.");
713 (ipv4_last->hdr.version_ihl ||
714 ipv4_last->hdr.type_of_service ||
715 ipv4_last->hdr.time_to_live ||
716 ipv4_last->hdr.total_length |
717 ipv4_last->hdr.next_proto_id ||
718 ipv4_last->hdr.hdr_checksum ||
719 ipv4_last->hdr.src_addr ||
720 ipv4_last->hdr.dst_addr)) {
721 rte_flow_error_set(error, EINVAL,
722 RTE_FLOW_ERROR_TYPE_ITEM,
723 item, "Invalid IPv4 last.");
727 if (ipv4_mask->hdr.type_of_service ==
729 input_set |= IAVF_INSET_IPV4_TOS;
730 VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(hdr, IPV4,
734 if (ipv4_mask->hdr.next_proto_id == UINT8_MAX) {
735 input_set |= IAVF_INSET_IPV4_PROTO;
736 VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(hdr, IPV4,
740 if (ipv4_mask->hdr.time_to_live == UINT8_MAX) {
741 input_set |= IAVF_INSET_IPV4_TTL;
742 VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(hdr, IPV4,
746 if (ipv4_mask->hdr.src_addr == UINT32_MAX) {
747 input_set |= IAVF_INSET_IPV4_SRC;
748 VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(hdr, IPV4,
752 if (ipv4_mask->hdr.dst_addr == UINT32_MAX) {
753 input_set |= IAVF_INSET_IPV4_DST;
754 VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(hdr, IPV4,
759 input_set &= ~IAVF_PROT_IPV4_OUTER;
760 input_set |= IAVF_PROT_IPV4_INNER;
763 rte_memcpy(hdr->buffer, &ipv4_spec->hdr,
764 sizeof(ipv4_spec->hdr));
766 hdrs->count = ++layer;
768 /* only support any packet id for fragment IPv4
770 * spec is 0, last is 0xffff, mask is 0xffff
772 if (ipv4_last && ipv4_spec->hdr.packet_id == 0 &&
773 ipv4_last->hdr.packet_id == UINT16_MAX &&
774 ipv4_mask->hdr.packet_id == UINT16_MAX &&
775 ipv4_mask->hdr.fragment_offset == UINT16_MAX) {
776 /* all IPv4 fragment packet has the same
777 * ethertype, if the spec is for all valid
778 * packet id, set ethertype into input set.
780 input_set |= IAVF_INSET_ETHERTYPE;
781 VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(hdr1, ETH,
784 /* add dummy header for IPv4 Fragment */
785 iavf_fdir_add_fragment_hdr(hdrs, layer);
786 } else if (ipv4_mask->hdr.packet_id == UINT16_MAX) {
787 rte_flow_error_set(error, EINVAL,
788 RTE_FLOW_ERROR_TYPE_ITEM,
789 item, "Invalid IPv4 mask.");
795 case RTE_FLOW_ITEM_TYPE_IPV6:
796 l3 = RTE_FLOW_ITEM_TYPE_IPV6;
797 ipv6_spec = item->spec;
798 ipv6_mask = item->mask;
800 hdr = &hdrs->proto_hdr[layer];
802 VIRTCHNL_SET_PROTO_HDR_TYPE(hdr, IPV6);
804 if (!(ipv6_spec && ipv6_mask)) {
805 hdrs->count = ++layer;
809 if (ipv6_mask->hdr.payload_len) {
810 rte_flow_error_set(error, EINVAL,
811 RTE_FLOW_ERROR_TYPE_ITEM,
812 item, "Invalid IPv6 mask");
816 if ((ipv6_mask->hdr.vtc_flow &
817 rte_cpu_to_be_32(IAVF_IPV6_TC_MASK))
818 == rte_cpu_to_be_32(IAVF_IPV6_TC_MASK)) {
819 input_set |= IAVF_INSET_IPV6_TC;
820 VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(hdr, IPV6,
824 if (ipv6_mask->hdr.proto == UINT8_MAX) {
825 input_set |= IAVF_INSET_IPV6_NEXT_HDR;
826 VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(hdr, IPV6,
830 if (ipv6_mask->hdr.hop_limits == UINT8_MAX) {
831 input_set |= IAVF_INSET_IPV6_HOP_LIMIT;
832 VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(hdr, IPV6,
836 if (!memcmp(ipv6_mask->hdr.src_addr, ipv6_addr_mask,
837 RTE_DIM(ipv6_mask->hdr.src_addr))) {
838 input_set |= IAVF_INSET_IPV6_SRC;
839 VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(hdr, IPV6,
842 if (!memcmp(ipv6_mask->hdr.dst_addr, ipv6_addr_mask,
843 RTE_DIM(ipv6_mask->hdr.dst_addr))) {
844 input_set |= IAVF_INSET_IPV6_DST;
845 VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(hdr, IPV6,
850 input_set &= ~IAVF_PROT_IPV6_OUTER;
851 input_set |= IAVF_PROT_IPV6_INNER;
854 rte_memcpy(hdr->buffer, &ipv6_spec->hdr,
855 sizeof(ipv6_spec->hdr));
857 hdrs->count = ++layer;
860 case RTE_FLOW_ITEM_TYPE_IPV6_FRAG_EXT:
861 ipv6_frag_spec = item->spec;
862 ipv6_frag_last = item->last;
863 ipv6_frag_mask = item->mask;
864 next_type = (item + 1)->type;
866 hdr = &hdrs->proto_hdr[layer];
868 VIRTCHNL_SET_PROTO_HDR_TYPE(hdr, IPV6_EH_FRAG);
870 if (!(ipv6_frag_spec && ipv6_frag_mask)) {
871 hdrs->count = ++layer;
875 /* only support any packet id for fragment IPv6
877 * spec is 0, last is 0xffffffff, mask is 0xffffffff
879 if (ipv6_frag_last && ipv6_frag_spec->hdr.id == 0 &&
880 ipv6_frag_last->hdr.id == UINT32_MAX &&
881 ipv6_frag_mask->hdr.id == UINT32_MAX &&
882 ipv6_frag_mask->hdr.frag_data == UINT16_MAX) {
883 /* all IPv6 fragment packet has the same
884 * ethertype, if the spec is for all valid
885 * packet id, set ethertype into input set.
887 input_set |= IAVF_INSET_ETHERTYPE;
888 VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(hdr1, ETH,
891 rte_memcpy(hdr->buffer, &ipv6_frag_spec->hdr,
892 sizeof(ipv6_frag_spec->hdr));
893 } else if (ipv6_frag_mask->hdr.id == UINT32_MAX) {
894 rte_flow_error_set(error, EINVAL,
895 RTE_FLOW_ERROR_TYPE_ITEM,
896 item, "Invalid IPv6 mask.");
900 hdrs->count = ++layer;
903 case RTE_FLOW_ITEM_TYPE_UDP:
904 udp_spec = item->spec;
905 udp_mask = item->mask;
907 hdr = &hdrs->proto_hdr[layer];
909 VIRTCHNL_SET_PROTO_HDR_TYPE(hdr, UDP);
911 if (udp_spec && udp_mask) {
912 if (udp_mask->hdr.dgram_len ||
913 udp_mask->hdr.dgram_cksum) {
914 rte_flow_error_set(error, EINVAL,
915 RTE_FLOW_ERROR_TYPE_ITEM, item,
920 if (udp_mask->hdr.src_port == UINT16_MAX) {
921 input_set |= IAVF_INSET_UDP_SRC_PORT;
922 VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(hdr, UDP, SRC_PORT);
924 if (udp_mask->hdr.dst_port == UINT16_MAX) {
925 input_set |= IAVF_INSET_UDP_DST_PORT;
926 VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(hdr, UDP, DST_PORT);
930 input_set &= ~IAVF_PROT_UDP_OUTER;
931 input_set |= IAVF_PROT_UDP_INNER;
934 if (l3 == RTE_FLOW_ITEM_TYPE_IPV4)
935 rte_memcpy(hdr->buffer,
937 sizeof(udp_spec->hdr));
938 else if (l3 == RTE_FLOW_ITEM_TYPE_IPV6)
939 rte_memcpy(hdr->buffer,
941 sizeof(udp_spec->hdr));
944 hdrs->count = ++layer;
947 case RTE_FLOW_ITEM_TYPE_TCP:
948 tcp_spec = item->spec;
949 tcp_mask = item->mask;
951 hdr = &hdrs->proto_hdr[layer];
953 VIRTCHNL_SET_PROTO_HDR_TYPE(hdr, TCP);
955 if (tcp_spec && tcp_mask) {
956 if (tcp_mask->hdr.sent_seq ||
957 tcp_mask->hdr.recv_ack ||
958 tcp_mask->hdr.data_off ||
959 tcp_mask->hdr.tcp_flags ||
960 tcp_mask->hdr.rx_win ||
961 tcp_mask->hdr.cksum ||
962 tcp_mask->hdr.tcp_urp) {
963 rte_flow_error_set(error, EINVAL,
964 RTE_FLOW_ERROR_TYPE_ITEM, item,
969 if (tcp_mask->hdr.src_port == UINT16_MAX) {
970 input_set |= IAVF_INSET_TCP_SRC_PORT;
971 VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(hdr, TCP, SRC_PORT);
973 if (tcp_mask->hdr.dst_port == UINT16_MAX) {
974 input_set |= IAVF_INSET_TCP_DST_PORT;
975 VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(hdr, TCP, DST_PORT);
979 input_set &= ~IAVF_PROT_TCP_OUTER;
980 input_set |= IAVF_PROT_TCP_INNER;
983 if (l3 == RTE_FLOW_ITEM_TYPE_IPV4)
984 rte_memcpy(hdr->buffer,
986 sizeof(tcp_spec->hdr));
987 else if (l3 == RTE_FLOW_ITEM_TYPE_IPV6)
988 rte_memcpy(hdr->buffer,
990 sizeof(tcp_spec->hdr));
993 hdrs->count = ++layer;
996 case RTE_FLOW_ITEM_TYPE_SCTP:
997 sctp_spec = item->spec;
998 sctp_mask = item->mask;
1000 hdr = &hdrs->proto_hdr[layer];
1002 VIRTCHNL_SET_PROTO_HDR_TYPE(hdr, SCTP);
1004 if (sctp_spec && sctp_mask) {
1005 if (sctp_mask->hdr.cksum) {
1006 rte_flow_error_set(error, EINVAL,
1007 RTE_FLOW_ERROR_TYPE_ITEM, item,
1008 "Invalid UDP mask");
1012 if (sctp_mask->hdr.src_port == UINT16_MAX) {
1013 input_set |= IAVF_INSET_SCTP_SRC_PORT;
1014 VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(hdr, SCTP, SRC_PORT);
1016 if (sctp_mask->hdr.dst_port == UINT16_MAX) {
1017 input_set |= IAVF_INSET_SCTP_DST_PORT;
1018 VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(hdr, SCTP, DST_PORT);
1021 if (l3 == RTE_FLOW_ITEM_TYPE_IPV4)
1022 rte_memcpy(hdr->buffer,
1024 sizeof(sctp_spec->hdr));
1025 else if (l3 == RTE_FLOW_ITEM_TYPE_IPV6)
1026 rte_memcpy(hdr->buffer,
1028 sizeof(sctp_spec->hdr));
1031 hdrs->count = ++layer;
1034 case RTE_FLOW_ITEM_TYPE_GTPU:
1035 gtp_spec = item->spec;
1036 gtp_mask = item->mask;
1038 hdr = &hdrs->proto_hdr[layer];
1040 VIRTCHNL_SET_PROTO_HDR_TYPE(hdr, GTPU_IP);
1042 if (gtp_spec && gtp_mask) {
1043 if (gtp_mask->v_pt_rsv_flags ||
1044 gtp_mask->msg_type ||
1045 gtp_mask->msg_len) {
1046 rte_flow_error_set(error, EINVAL,
1047 RTE_FLOW_ERROR_TYPE_ITEM,
1048 item, "Invalid GTP mask");
1052 if (gtp_mask->teid == UINT32_MAX) {
1053 input_set |= IAVF_INSET_GTPU_TEID;
1054 VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(hdr, GTPU_IP, TEID);
1057 rte_memcpy(hdr->buffer,
1058 gtp_spec, sizeof(*gtp_spec));
1063 hdrs->count = ++layer;
1066 case RTE_FLOW_ITEM_TYPE_GTP_PSC:
1067 gtp_psc_spec = item->spec;
1068 gtp_psc_mask = item->mask;
1070 hdr = &hdrs->proto_hdr[layer];
1073 VIRTCHNL_SET_PROTO_HDR_TYPE(hdr, GTPU_EH);
1074 else if ((gtp_psc_mask->qfi) && !(gtp_psc_mask->pdu_type))
1075 VIRTCHNL_SET_PROTO_HDR_TYPE(hdr, GTPU_EH);
1076 else if (gtp_psc_spec->pdu_type == IAVF_GTPU_EH_UPLINK)
1077 VIRTCHNL_SET_PROTO_HDR_TYPE(hdr, GTPU_EH_PDU_UP);
1078 else if (gtp_psc_spec->pdu_type == IAVF_GTPU_EH_DWLINK)
1079 VIRTCHNL_SET_PROTO_HDR_TYPE(hdr, GTPU_EH_PDU_DWN);
1081 if (gtp_psc_spec && gtp_psc_mask) {
1082 if (gtp_psc_mask->qfi == UINT8_MAX) {
1083 input_set |= IAVF_INSET_GTPU_QFI;
1084 VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(hdr, GTPU_EH, QFI);
1087 rte_memcpy(hdr->buffer, gtp_psc_spec,
1088 sizeof(*gtp_psc_spec));
1091 hdrs->count = ++layer;
1094 case RTE_FLOW_ITEM_TYPE_L2TPV3OIP:
1095 l2tpv3oip_spec = item->spec;
1096 l2tpv3oip_mask = item->mask;
1098 hdr = &hdrs->proto_hdr[layer];
1100 VIRTCHNL_SET_PROTO_HDR_TYPE(hdr, L2TPV3);
1102 if (l2tpv3oip_spec && l2tpv3oip_mask) {
1103 if (l2tpv3oip_mask->session_id == UINT32_MAX) {
1104 input_set |= IAVF_L2TPV3OIP_SESSION_ID;
1105 VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(hdr, L2TPV3, SESS_ID);
1108 rte_memcpy(hdr->buffer, l2tpv3oip_spec,
1109 sizeof(*l2tpv3oip_spec));
1112 hdrs->count = ++layer;
1115 case RTE_FLOW_ITEM_TYPE_ESP:
1116 esp_spec = item->spec;
1117 esp_mask = item->mask;
1119 hdr = &hdrs->proto_hdr[layer];
1121 VIRTCHNL_SET_PROTO_HDR_TYPE(hdr, ESP);
1123 if (esp_spec && esp_mask) {
1124 if (esp_mask->hdr.spi == UINT32_MAX) {
1125 input_set |= IAVF_INSET_ESP_SPI;
1126 VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(hdr, ESP, SPI);
1129 rte_memcpy(hdr->buffer, &esp_spec->hdr,
1130 sizeof(esp_spec->hdr));
1133 hdrs->count = ++layer;
1136 case RTE_FLOW_ITEM_TYPE_AH:
1137 ah_spec = item->spec;
1138 ah_mask = item->mask;
1140 hdr = &hdrs->proto_hdr[layer];
1142 VIRTCHNL_SET_PROTO_HDR_TYPE(hdr, AH);
1144 if (ah_spec && ah_mask) {
1145 if (ah_mask->spi == UINT32_MAX) {
1146 input_set |= IAVF_INSET_AH_SPI;
1147 VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(hdr, AH, SPI);
1150 rte_memcpy(hdr->buffer, ah_spec,
1154 hdrs->count = ++layer;
1157 case RTE_FLOW_ITEM_TYPE_PFCP:
1158 pfcp_spec = item->spec;
1159 pfcp_mask = item->mask;
1161 hdr = &hdrs->proto_hdr[layer];
1163 VIRTCHNL_SET_PROTO_HDR_TYPE(hdr, PFCP);
1165 if (pfcp_spec && pfcp_mask) {
1166 if (pfcp_mask->s_field == UINT8_MAX) {
1167 input_set |= IAVF_INSET_PFCP_S_FIELD;
1168 VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(hdr, PFCP, S_FIELD);
1171 rte_memcpy(hdr->buffer, pfcp_spec,
1172 sizeof(*pfcp_spec));
1175 hdrs->count = ++layer;
1178 case RTE_FLOW_ITEM_TYPE_ECPRI:
1179 ecpri_spec = item->spec;
1180 ecpri_mask = item->mask;
1182 ecpri_common.u32 = rte_be_to_cpu_32(ecpri_spec->hdr.common.u32);
1184 hdr = &hdrs->proto_hdr[layer];
1186 VIRTCHNL_SET_PROTO_HDR_TYPE(hdr, ECPRI);
1188 if (ecpri_spec && ecpri_mask) {
1189 if (ecpri_common.type == RTE_ECPRI_MSG_TYPE_IQ_DATA &&
1190 ecpri_mask->hdr.type0.pc_id == UINT16_MAX) {
1191 input_set |= IAVF_ECPRI_PC_RTC_ID;
1192 VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(hdr, ECPRI,
1196 rte_memcpy(hdr->buffer, ecpri_spec,
1197 sizeof(*ecpri_spec));
1200 hdrs->count = ++layer;
1203 case RTE_FLOW_ITEM_TYPE_VOID:
1207 rte_flow_error_set(error, EINVAL,
1208 RTE_FLOW_ERROR_TYPE_ITEM, item,
1209 "Invalid pattern item.");
1214 if (layer > VIRTCHNL_MAX_NUM_PROTO_HDRS) {
1215 rte_flow_error_set(error, EINVAL,
1216 RTE_FLOW_ERROR_TYPE_ITEM, item,
1217 "Protocol header layers exceed the maximum value");
1221 if (!iavf_fdir_refine_input_set(input_set,
1222 input_set_mask | IAVF_INSET_ETHERTYPE,
1224 rte_flow_error_set(error, EINVAL,
1225 RTE_FLOW_ERROR_TYPE_ITEM_SPEC, pattern,
1226 "Invalid input set");
1230 filter->input_set = input_set;
1236 iavf_fdir_parse(struct iavf_adapter *ad,
1237 struct iavf_pattern_match_item *array,
1239 const struct rte_flow_item pattern[],
1240 const struct rte_flow_action actions[],
1242 struct rte_flow_error *error)
1244 struct iavf_info *vf = IAVF_DEV_PRIVATE_TO_VF(ad);
1245 struct iavf_fdir_conf *filter = &vf->fdir.conf;
1246 struct iavf_pattern_match_item *item = NULL;
1249 memset(filter, 0, sizeof(*filter));
1251 item = iavf_search_pattern_match_item(pattern, array, array_len, error);
1255 ret = iavf_fdir_parse_pattern(ad, pattern, item->input_set_mask,
1260 ret = iavf_fdir_parse_action(ad, actions, error, filter);
1272 static struct iavf_flow_parser iavf_fdir_parser = {
1273 .engine = &iavf_fdir_engine,
1274 .array = iavf_fdir_pattern,
1275 .array_len = RTE_DIM(iavf_fdir_pattern),
1276 .parse_pattern_action = iavf_fdir_parse,
1277 .stage = IAVF_FLOW_STAGE_DISTRIBUTOR,
1280 RTE_INIT(iavf_fdir_engine_register)
1282 iavf_register_flow_engine(&iavf_fdir_engine);