1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2010-2020 Intel Corporation
5 #include "rte_malloc.h"
8 #include "igc_filter.h"
11 /*******************************************************************************
12 * All Supported Rule Type
15 * `para` or `(para)`, the para must been set
16 * `[para]`, the para is optional
17 * `([para1][para2]...)`, all paras is optional, but must one of them been set
18 * `para1 | para2 | ...`, only one of the paras can be set
21 * pattern: ETH(type)/END
26 * pattern: [ETH/]([IPv4(protocol)|IPv6(protocol)/][UDP(dst_port)|
27 * TCP([dst_port],[flags])|SCTP(dst_port)/])END
29 * attribute: [priority(0-7)]
32 * pattern: [ETH/][IPv4|IPv6/]TCP(flags=SYN)/END
34 * attribute: [priority(0,1)]
40 ******************************************************************************/
42 /* Structure to store all filters */
43 struct igc_all_filter {
44 struct igc_ethertype_filter ethertype;
45 struct igc_ntuple_filter ntuple;
46 struct igc_syn_filter syn;
47 struct igc_rss_filter rss;
48 uint32_t mask; /* see IGC_FILTER_MASK_* definition */
51 #define IGC_FILTER_MASK_ETHER (1u << IGC_FILTER_TYPE_ETHERTYPE)
52 #define IGC_FILTER_MASK_NTUPLE (1u << IGC_FILTER_TYPE_NTUPLE)
53 #define IGC_FILTER_MASK_TCP_SYN (1u << IGC_FILTER_TYPE_SYN)
54 #define IGC_FILTER_MASK_RSS (1u << IGC_FILTER_TYPE_HASH)
55 #define IGC_FILTER_MASK_ALL (IGC_FILTER_MASK_ETHER | \
56 IGC_FILTER_MASK_NTUPLE | \
57 IGC_FILTER_MASK_TCP_SYN | \
60 #define IGC_SET_FILTER_MASK(_filter, _mask_bits) \
61 ((_filter)->mask &= (_mask_bits))
63 #define IGC_IS_ALL_BITS_SET(_val) ((_val) == (typeof(_val))~0)
64 #define IGC_NOT_ALL_BITS_SET(_val) ((_val) != (typeof(_val))~0)
66 /* Parse rule attribute */
68 igc_parse_attribute(const struct rte_flow_attr *attr,
69 struct igc_all_filter *filter, struct rte_flow_error *error)
75 return rte_flow_error_set(error, EINVAL,
76 RTE_FLOW_ERROR_TYPE_ATTR_GROUP, attr,
80 return rte_flow_error_set(error, EINVAL,
81 RTE_FLOW_ERROR_TYPE_ATTR_EGRESS, attr,
85 return rte_flow_error_set(error, EINVAL,
86 RTE_FLOW_ERROR_TYPE_ATTR_TRANSFER, attr,
90 return rte_flow_error_set(error, EINVAL,
91 RTE_FLOW_ERROR_TYPE_ATTR_INGRESS, attr,
92 "A rule must apply to ingress traffic");
94 if (attr->priority == 0)
97 /* only n-tuple and SYN filter have priority level */
98 IGC_SET_FILTER_MASK(filter,
99 IGC_FILTER_MASK_NTUPLE | IGC_FILTER_MASK_TCP_SYN);
101 if (IGC_IS_ALL_BITS_SET(attr->priority)) {
102 /* only SYN filter match this value */
103 IGC_SET_FILTER_MASK(filter, IGC_FILTER_MASK_TCP_SYN);
104 filter->syn.hig_pri = 1;
108 if (attr->priority > IGC_NTUPLE_MAX_PRI)
109 return rte_flow_error_set(error, EINVAL,
110 RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY, attr,
111 "Priority value is invalid.");
113 if (attr->priority > 1) {
114 /* only n-tuple filter match this value */
115 IGC_SET_FILTER_MASK(filter, IGC_FILTER_MASK_NTUPLE);
118 filter->ntuple.tuple_info.priority = (uint8_t)attr->priority;
123 filter->ntuple.tuple_info.priority = (uint8_t)attr->priority;
124 filter->syn.hig_pri = (uint8_t)attr->priority;
129 /* function type of parse pattern */
130 typedef int (*igc_pattern_parse)(const struct rte_flow_item *,
131 struct igc_all_filter *, struct rte_flow_error *);
133 static int igc_parse_pattern_void(__rte_unused const struct rte_flow_item *item,
134 __rte_unused struct igc_all_filter *filter,
135 __rte_unused struct rte_flow_error *error);
136 static int igc_parse_pattern_ether(const struct rte_flow_item *item,
137 struct igc_all_filter *filter, struct rte_flow_error *error);
138 static int igc_parse_pattern_ip(const struct rte_flow_item *item,
139 struct igc_all_filter *filter, struct rte_flow_error *error);
140 static int igc_parse_pattern_ipv6(const struct rte_flow_item *item,
141 struct igc_all_filter *filter, struct rte_flow_error *error);
142 static int igc_parse_pattern_udp(const struct rte_flow_item *item,
143 struct igc_all_filter *filter, struct rte_flow_error *error);
144 static int igc_parse_pattern_tcp(const struct rte_flow_item *item,
145 struct igc_all_filter *filter, struct rte_flow_error *error);
147 static igc_pattern_parse pattern_parse_list[] = {
148 [RTE_FLOW_ITEM_TYPE_VOID] = igc_parse_pattern_void,
149 [RTE_FLOW_ITEM_TYPE_ETH] = igc_parse_pattern_ether,
150 [RTE_FLOW_ITEM_TYPE_IPV4] = igc_parse_pattern_ip,
151 [RTE_FLOW_ITEM_TYPE_IPV6] = igc_parse_pattern_ipv6,
152 [RTE_FLOW_ITEM_TYPE_UDP] = igc_parse_pattern_udp,
153 [RTE_FLOW_ITEM_TYPE_TCP] = igc_parse_pattern_tcp,
156 /* Parse rule patterns */
158 igc_parse_patterns(const struct rte_flow_item patterns[],
159 struct igc_all_filter *filter, struct rte_flow_error *error)
161 const struct rte_flow_item *item = patterns;
164 /* only RSS filter match this pattern */
165 IGC_SET_FILTER_MASK(filter, IGC_FILTER_MASK_RSS);
169 for (; item->type != RTE_FLOW_ITEM_TYPE_END; item++) {
172 if (item->type >= RTE_DIM(pattern_parse_list))
173 return rte_flow_error_set(error, EINVAL,
174 RTE_FLOW_ERROR_TYPE_ITEM, item,
175 "Not been supported");
178 return rte_flow_error_set(error, EINVAL,
179 RTE_FLOW_ERROR_TYPE_ITEM_LAST, item,
180 "Range not been supported");
182 /* check pattern format is valid */
183 if (!!item->spec ^ !!item->mask)
184 return rte_flow_error_set(error, EINVAL,
185 RTE_FLOW_ERROR_TYPE_ITEM, item,
188 /* get the pattern type callback */
189 igc_pattern_parse parse_func =
190 pattern_parse_list[item->type];
192 return rte_flow_error_set(error, EINVAL,
193 RTE_FLOW_ERROR_TYPE_ITEM, item,
194 "Not been supported");
196 /* call the pattern type function */
197 ret = parse_func(item, filter, error);
201 /* if no filter match the pattern */
202 if (filter->mask == 0)
203 return rte_flow_error_set(error, EINVAL,
204 RTE_FLOW_ERROR_TYPE_ITEM, item,
205 "Not been supported");
211 static int igc_parse_action_queue(struct rte_eth_dev *dev,
212 const struct rte_flow_action *act,
213 struct igc_all_filter *filter, struct rte_flow_error *error);
214 static int igc_parse_action_rss(struct rte_eth_dev *dev,
215 const struct rte_flow_action *act,
216 struct igc_all_filter *filter, struct rte_flow_error *error);
218 /* Parse flow actions */
220 igc_parse_actions(struct rte_eth_dev *dev,
221 const struct rte_flow_action actions[],
222 struct igc_all_filter *filter,
223 struct rte_flow_error *error)
225 const struct rte_flow_action *act = actions;
229 return rte_flow_error_set(error, EINVAL,
230 RTE_FLOW_ERROR_TYPE_ACTION_NUM, act,
233 for (; act->type != RTE_FLOW_ACTION_TYPE_END; act++) {
235 case RTE_FLOW_ACTION_TYPE_QUEUE:
236 ret = igc_parse_action_queue(dev, act, filter, error);
240 case RTE_FLOW_ACTION_TYPE_RSS:
241 ret = igc_parse_action_rss(dev, act, filter, error);
245 case RTE_FLOW_ACTION_TYPE_VOID:
248 return rte_flow_error_set(error, EINVAL,
249 RTE_FLOW_ERROR_TYPE_ACTION, act,
250 "Not been supported");
253 /* if no filter match the action */
254 if (filter->mask == 0)
255 return rte_flow_error_set(error, EINVAL,
256 RTE_FLOW_ERROR_TYPE_ACTION, act,
257 "Not been supported");
263 /* Parse a flow rule */
265 igc_parse_flow(struct rte_eth_dev *dev,
266 const struct rte_flow_attr *attr,
267 const struct rte_flow_item patterns[],
268 const struct rte_flow_action actions[],
269 struct rte_flow_error *error,
270 struct igc_all_filter *filter)
274 /* clear all filters */
275 memset(filter, 0, sizeof(*filter));
277 /* set default filter mask */
278 filter->mask = IGC_FILTER_MASK_ALL;
280 ret = igc_parse_attribute(attr, filter, error);
284 ret = igc_parse_patterns(patterns, filter, error);
288 ret = igc_parse_actions(dev, actions, filter, error);
292 /* if no or more than one filter matched this flow */
293 if (filter->mask == 0 || (filter->mask & (filter->mask - 1)))
294 return rte_flow_error_set(error, EINVAL,
295 RTE_FLOW_ERROR_TYPE_ITEM, NULL,
296 "Flow can't be recognized");
300 /* Parse pattern type of void */
302 igc_parse_pattern_void(__rte_unused const struct rte_flow_item *item,
303 __rte_unused struct igc_all_filter *filter,
304 __rte_unused struct rte_flow_error *error)
309 /* Parse pattern type of ethernet header */
311 igc_parse_pattern_ether(const struct rte_flow_item *item,
312 struct igc_all_filter *filter,
313 struct rte_flow_error *error)
315 const struct rte_flow_item_eth *spec = item->spec;
316 const struct rte_flow_item_eth *mask = item->mask;
317 struct igc_ethertype_filter *ether;
320 /* only n-tuple and SYN filter match the pattern */
321 IGC_SET_FILTER_MASK(filter, IGC_FILTER_MASK_NTUPLE |
322 IGC_FILTER_MASK_TCP_SYN);
326 /* only ether-type filter match the pattern*/
327 IGC_SET_FILTER_MASK(filter, IGC_FILTER_MASK_ETHER);
329 /* destination and source MAC address are not supported */
330 if (!rte_is_zero_ether_addr(&mask->src) ||
331 !rte_is_zero_ether_addr(&mask->dst))
332 return rte_flow_error_set(error, EINVAL,
333 RTE_FLOW_ERROR_TYPE_ITEM_MASK, item,
334 "Only support ether-type");
336 /* ether-type mask bits must be all 1 */
337 if (IGC_NOT_ALL_BITS_SET(mask->type))
338 return rte_flow_error_set(error, EINVAL,
339 RTE_FLOW_ERROR_TYPE_ITEM_MASK, item,
340 "Ethernet type mask bits must be all 1");
342 ether = &filter->ethertype;
345 ether->ether_type = rte_be_to_cpu_16(spec->type);
347 /* ether-type should not be IPv4 and IPv6 */
348 if (ether->ether_type == RTE_ETHER_TYPE_IPV4 ||
349 ether->ether_type == RTE_ETHER_TYPE_IPV6 ||
350 ether->ether_type == 0)
351 return rte_flow_error_set(error, EINVAL,
352 RTE_FLOW_ERROR_TYPE_ITEM, NULL,
353 "IPv4/IPv6/0 not supported by ethertype filter");
357 /* Parse pattern type of IP */
359 igc_parse_pattern_ip(const struct rte_flow_item *item,
360 struct igc_all_filter *filter,
361 struct rte_flow_error *error)
363 const struct rte_flow_item_ipv4 *spec = item->spec;
364 const struct rte_flow_item_ipv4 *mask = item->mask;
367 /* only n-tuple and SYN filter match this pattern */
368 IGC_SET_FILTER_MASK(filter,
369 IGC_FILTER_MASK_NTUPLE | IGC_FILTER_MASK_TCP_SYN);
373 /* only n-tuple filter match this pattern */
374 IGC_SET_FILTER_MASK(filter, IGC_FILTER_MASK_NTUPLE);
376 /* only protocol is used */
377 if (mask->hdr.version_ihl ||
378 mask->hdr.type_of_service ||
379 mask->hdr.total_length ||
380 mask->hdr.packet_id ||
381 mask->hdr.fragment_offset ||
382 mask->hdr.time_to_live ||
383 mask->hdr.hdr_checksum ||
384 mask->hdr.dst_addr ||
386 return rte_flow_error_set(error,
387 EINVAL, RTE_FLOW_ERROR_TYPE_ITEM_MASK, item,
388 "IPv4 only support protocol");
390 if (mask->hdr.next_proto_id == 0)
393 if (IGC_NOT_ALL_BITS_SET(mask->hdr.next_proto_id))
394 return rte_flow_error_set(error,
395 EINVAL, RTE_FLOW_ERROR_TYPE_ITEM_MASK, item,
396 "IPv4 protocol mask bits must be all 0 or 1");
398 /* get protocol type */
399 filter->ntuple.tuple_info.proto_mask = 1;
400 filter->ntuple.tuple_info.proto = spec->hdr.next_proto_id;
405 * Check ipv6 address is 0
406 * Return 1 if true, 0 for false.
409 igc_is_zero_ipv6_addr(const void *ipv6_addr)
411 const uint64_t *ddw = ipv6_addr;
412 return ddw[0] == 0 && ddw[1] == 0;
415 /* Parse pattern type of IPv6 */
417 igc_parse_pattern_ipv6(const struct rte_flow_item *item,
418 struct igc_all_filter *filter,
419 struct rte_flow_error *error)
421 const struct rte_flow_item_ipv6 *spec = item->spec;
422 const struct rte_flow_item_ipv6 *mask = item->mask;
425 /* only n-tuple and syn filter match this pattern */
426 IGC_SET_FILTER_MASK(filter,
427 IGC_FILTER_MASK_NTUPLE | IGC_FILTER_MASK_TCP_SYN);
431 /* only n-tuple filter match this pattern */
432 IGC_SET_FILTER_MASK(filter, IGC_FILTER_MASK_NTUPLE);
434 /* only protocol is used */
435 if (mask->hdr.vtc_flow ||
436 mask->hdr.payload_len ||
437 mask->hdr.hop_limits ||
438 !igc_is_zero_ipv6_addr(mask->hdr.src_addr) ||
439 !igc_is_zero_ipv6_addr(mask->hdr.dst_addr))
440 return rte_flow_error_set(error, EINVAL,
441 RTE_FLOW_ERROR_TYPE_ITEM, item,
442 "IPv6 only support protocol");
444 if (mask->hdr.proto == 0)
447 if (IGC_NOT_ALL_BITS_SET(mask->hdr.proto))
448 return rte_flow_error_set(error,
449 EINVAL, RTE_FLOW_ERROR_TYPE_ITEM_MASK, item,
450 "IPv6 protocol mask bits must be all 0 or 1");
452 /* get protocol type */
453 filter->ntuple.tuple_info.proto_mask = 1;
454 filter->ntuple.tuple_info.proto = spec->hdr.proto;
459 /* Parse pattern type of UDP */
461 igc_parse_pattern_udp(const struct rte_flow_item *item,
462 struct igc_all_filter *filter,
463 struct rte_flow_error *error)
465 const struct rte_flow_item_udp *spec = item->spec;
466 const struct rte_flow_item_udp *mask = item->mask;
468 /* only n-tuple filter match this pattern */
469 IGC_SET_FILTER_MASK(filter, IGC_FILTER_MASK_NTUPLE);
474 /* only destination port is used */
475 if (mask->hdr.dgram_len || mask->hdr.dgram_cksum || mask->hdr.src_port)
476 return rte_flow_error_set(error, EINVAL,
477 RTE_FLOW_ERROR_TYPE_ITEM_MASK, item,
478 "UDP only support destination port");
480 if (mask->hdr.dst_port == 0)
483 if (IGC_NOT_ALL_BITS_SET(mask->hdr.dst_port))
484 return rte_flow_error_set(error, EINVAL,
485 RTE_FLOW_ERROR_TYPE_ITEM_MASK, item,
486 "UDP port mask bits must be all 0 or 1");
488 /* get destination port info. */
489 filter->ntuple.tuple_info.dst_port_mask = 1;
490 filter->ntuple.tuple_info.dst_port = spec->hdr.dst_port;
495 /* Parse pattern type of TCP */
497 igc_parse_pattern_tcp(const struct rte_flow_item *item,
498 struct igc_all_filter *filter,
499 struct rte_flow_error *error)
501 const struct rte_flow_item_tcp *spec = item->spec;
502 const struct rte_flow_item_tcp *mask = item->mask;
503 struct igc_ntuple_info *tuple_info = &filter->ntuple.tuple_info;
506 /* only n-tuple filter match this pattern */
507 IGC_SET_FILTER_MASK(filter, IGC_FILTER_MASK_NTUPLE);
511 /* only n-tuple and SYN filter match this pattern */
512 IGC_SET_FILTER_MASK(filter,
513 IGC_FILTER_MASK_NTUPLE | IGC_FILTER_MASK_TCP_SYN);
515 /* only destination port and TCP flags are used */
516 if (mask->hdr.sent_seq ||
517 mask->hdr.recv_ack ||
518 mask->hdr.data_off ||
523 return rte_flow_error_set(error, EINVAL,
524 RTE_FLOW_ERROR_TYPE_ITEM_MASK, item,
525 "TCP only support destination port and flags");
527 /* if destination port is used */
528 if (mask->hdr.dst_port) {
529 /* only n-tuple match this pattern */
530 IGC_SET_FILTER_MASK(filter, IGC_FILTER_MASK_NTUPLE);
532 if (IGC_NOT_ALL_BITS_SET(mask->hdr.dst_port))
533 return rte_flow_error_set(error, EINVAL,
534 RTE_FLOW_ERROR_TYPE_ITEM_MASK, item,
535 "TCP port mask bits must be all 1");
537 /* get destination port info. */
538 tuple_info->dst_port = spec->hdr.dst_port;
539 tuple_info->dst_port_mask = 1;
542 /* if TCP flags are used */
543 if (mask->hdr.tcp_flags) {
544 if (IGC_IS_ALL_BITS_SET(mask->hdr.tcp_flags)) {
545 /* only n-tuple match this pattern */
546 IGC_SET_FILTER_MASK(filter, IGC_FILTER_MASK_NTUPLE);
549 tuple_info->tcp_flags = spec->hdr.tcp_flags;
550 } else if (mask->hdr.tcp_flags == RTE_TCP_SYN_FLAG) {
551 /* only TCP SYN filter match this pattern */
552 IGC_SET_FILTER_MASK(filter, IGC_FILTER_MASK_TCP_SYN);
554 /* no filter match this pattern */
555 return rte_flow_error_set(error, EINVAL,
556 RTE_FLOW_ERROR_TYPE_ITEM_MASK, item,
557 "TCP flags can't match");
560 /* only n-tuple match this pattern */
561 IGC_SET_FILTER_MASK(filter, IGC_FILTER_MASK_NTUPLE);
568 igc_parse_action_queue(struct rte_eth_dev *dev,
569 const struct rte_flow_action *act,
570 struct igc_all_filter *filter,
571 struct rte_flow_error *error)
575 if (act->conf == NULL)
576 return rte_flow_error_set(error, EINVAL,
577 RTE_FLOW_ERROR_TYPE_ACTION_CONF, act,
580 /* only ether-type, n-tuple, SYN filter match the action */
581 IGC_SET_FILTER_MASK(filter, IGC_FILTER_MASK_ETHER |
582 IGC_FILTER_MASK_NTUPLE | IGC_FILTER_MASK_TCP_SYN);
584 /* get queue index */
585 queue_idx = ((const struct rte_flow_action_queue *)act->conf)->index;
587 /* check the queue index is valid */
588 if (queue_idx >= dev->data->nb_rx_queues)
589 return rte_flow_error_set(error, EINVAL,
590 RTE_FLOW_ERROR_TYPE_ACTION_CONF, act,
591 "Queue id is invalid");
593 /* get queue info. */
594 filter->ethertype.queue = queue_idx;
595 filter->ntuple.queue = queue_idx;
596 filter->syn.queue = queue_idx;
600 /* Parse action of RSS */
602 igc_parse_action_rss(struct rte_eth_dev *dev,
603 const struct rte_flow_action *act,
604 struct igc_all_filter *filter,
605 struct rte_flow_error *error)
607 const struct rte_flow_action_rss *rss = act->conf;
610 if (act->conf == NULL)
611 return rte_flow_error_set(error, EINVAL,
612 RTE_FLOW_ERROR_TYPE_ACTION_CONF, act,
615 /* only RSS match the action */
616 IGC_SET_FILTER_MASK(filter, IGC_FILTER_MASK_RSS);
618 /* RSS redirect table can't be zero and can't exceed 128 */
619 if (!rss || !rss->queue_num || rss->queue_num > IGC_RSS_RDT_SIZD)
620 return rte_flow_error_set(error, EINVAL,
621 RTE_FLOW_ERROR_TYPE_ACTION_CONF, act,
624 /* queue index can't exceed max queue index */
625 for (i = 0; i < rss->queue_num; i++) {
626 if (rss->queue[i] >= dev->data->nb_rx_queues)
627 return rte_flow_error_set(error, EINVAL,
628 RTE_FLOW_ERROR_TYPE_ACTION_CONF, act,
629 "Queue id is invalid");
632 /* only default RSS hash function is supported */
633 if (rss->func != RTE_ETH_HASH_FUNCTION_DEFAULT)
634 return rte_flow_error_set(error, ENOTSUP,
635 RTE_FLOW_ERROR_TYPE_ACTION_CONF, act,
636 "Only default RSS hash functions is supported");
639 return rte_flow_error_set(error, ENOTSUP,
640 RTE_FLOW_ERROR_TYPE_ACTION_CONF, act,
641 "Only 0 RSS encapsulation level is supported");
643 /* check key length is valid */
644 if (rss->key_len && rss->key_len != sizeof(filter->rss.key))
645 return rte_flow_error_set(error, ENOTSUP,
646 RTE_FLOW_ERROR_TYPE_ACTION_CONF, act,
647 "RSS hash key must be exactly 40 bytes");
650 igc_rss_conf_set(&filter->rss, rss);
655 * Allocate a rte_flow from the heap
656 * Return the pointer of the flow, or NULL for failed
658 static inline struct rte_flow *
659 igc_alloc_flow(const void *filter, enum igc_filter_type type, uint inbytes)
661 /* allocate memory, 8 bytes boundary aligned */
662 struct rte_flow *flow = rte_malloc("igc flow filter",
663 sizeof(struct rte_flow) + inbytes, 8);
665 PMD_DRV_LOG(ERR, "failed to allocate memory");
669 flow->filter_type = type;
671 /* copy filter data */
672 memcpy(flow->filter, filter, inbytes);
676 /* Append a rte_flow to the list */
678 igc_append_flow(struct igc_flow_list *list, struct rte_flow *flow)
680 TAILQ_INSERT_TAIL(list, flow, node);
684 * Remove the flow and free the flow buffer
685 * The caller should make sure the flow is really exist in the list
688 igc_remove_flow(struct igc_flow_list *list, struct rte_flow *flow)
690 TAILQ_REMOVE(list, flow, node);
694 /* Check whether the flow is really in the list or not */
696 igc_is_flow_in_list(struct igc_flow_list *list, struct rte_flow *flow)
700 TAILQ_FOREACH(it, list, node) {
709 * Create a flow rule.
710 * Theoretically one rule can match more than one filters.
711 * We will let it use the filter which it hit first.
712 * So, the sequence matters.
714 static struct rte_flow *
715 igc_flow_create(struct rte_eth_dev *dev,
716 const struct rte_flow_attr *attr,
717 const struct rte_flow_item patterns[],
718 const struct rte_flow_action actions[],
719 struct rte_flow_error *error)
721 struct rte_flow *flow = NULL;
722 struct igc_all_filter filter;
725 ret = igc_parse_flow(dev, attr, patterns, actions, error, &filter);
730 switch (filter.mask) {
731 case IGC_FILTER_MASK_ETHER:
732 flow = igc_alloc_flow(&filter.ethertype,
733 IGC_FILTER_TYPE_ETHERTYPE,
734 sizeof(filter.ethertype));
736 ret = igc_add_ethertype_filter(dev, &filter.ethertype);
738 case IGC_FILTER_MASK_NTUPLE:
739 /* Check n-tuple filter is valid */
740 if (filter.ntuple.tuple_info.dst_port_mask == 0 &&
741 filter.ntuple.tuple_info.proto_mask == 0) {
742 rte_flow_error_set(error, EINVAL,
743 RTE_FLOW_ERROR_TYPE_NONE, NULL,
744 "Flow can't be recognized");
748 flow = igc_alloc_flow(&filter.ntuple, IGC_FILTER_TYPE_NTUPLE,
749 sizeof(filter.ntuple));
751 ret = igc_add_ntuple_filter(dev, &filter.ntuple);
753 case IGC_FILTER_MASK_TCP_SYN:
754 flow = igc_alloc_flow(&filter.syn, IGC_FILTER_TYPE_SYN,
757 ret = igc_set_syn_filter(dev, &filter.syn);
759 case IGC_FILTER_MASK_RSS:
760 flow = igc_alloc_flow(&filter.rss, IGC_FILTER_TYPE_HASH,
763 struct igc_rss_filter *rss =
764 (struct igc_rss_filter *)flow->filter;
765 rss->conf.key = rss->key;
766 rss->conf.queue = rss->queue;
767 ret = igc_add_rss_filter(dev, &filter.rss);
771 rte_flow_error_set(error, EINVAL,
772 RTE_FLOW_ERROR_TYPE_NONE, NULL,
773 "Flow can't be recognized");
778 /* check and free the memory */
782 rte_flow_error_set(error, -ret,
783 RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
784 "Failed to create flow.");
788 /* append the flow to the tail of the list */
789 igc_append_flow(IGC_DEV_PRIVATE_FLOW_LIST(dev), flow);
794 * Check if the flow rule is supported by the device.
795 * It only checks the format. Don't guarantee the rule can be programmed into
796 * the HW. Because there can be no enough room for the rule.
799 igc_flow_validate(struct rte_eth_dev *dev,
800 const struct rte_flow_attr *attr,
801 const struct rte_flow_item patterns[],
802 const struct rte_flow_action actions[],
803 struct rte_flow_error *error)
805 struct igc_all_filter filter;
808 ret = igc_parse_flow(dev, attr, patterns, actions, error, &filter);
812 switch (filter.mask) {
813 case IGC_FILTER_MASK_NTUPLE:
814 /* Check n-tuple filter is valid */
815 if (filter.ntuple.tuple_info.dst_port_mask == 0 &&
816 filter.ntuple.tuple_info.proto_mask == 0)
817 return rte_flow_error_set(error, EINVAL,
818 RTE_FLOW_ERROR_TYPE_NONE, NULL,
819 "Flow can't be recognized");
827 * Disable a valid flow, the flow must be not NULL and
828 * chained in the device flow list.
831 igc_disable_flow(struct rte_eth_dev *dev, struct rte_flow *flow)
835 switch (flow->filter_type) {
836 case IGC_FILTER_TYPE_ETHERTYPE:
837 ret = igc_del_ethertype_filter(dev,
838 (struct igc_ethertype_filter *)&flow->filter);
840 case IGC_FILTER_TYPE_NTUPLE:
841 ret = igc_del_ntuple_filter(dev,
842 (struct igc_ntuple_filter *)&flow->filter);
844 case IGC_FILTER_TYPE_SYN:
845 igc_clear_syn_filter(dev);
847 case IGC_FILTER_TYPE_HASH:
848 ret = igc_del_rss_filter(dev);
851 PMD_DRV_LOG(ERR, "Filter type (%d) not supported",
859 /* Destroy a flow rule */
861 igc_flow_destroy(struct rte_eth_dev *dev,
862 struct rte_flow *flow,
863 struct rte_flow_error *error)
865 struct igc_flow_list *list = IGC_DEV_PRIVATE_FLOW_LIST(dev);
869 PMD_DRV_LOG(ERR, "NULL flow!");
873 /* check the flow is create by IGC PMD */
874 if (!igc_is_flow_in_list(list, flow)) {
875 PMD_DRV_LOG(ERR, "Flow(%p) not been found!", flow);
879 ret = igc_disable_flow(dev, flow);
881 rte_flow_error_set(error, -ret,
882 RTE_FLOW_ERROR_TYPE_HANDLE,
883 NULL, "Failed to destroy flow");
885 igc_remove_flow(list, flow);
889 /* Initiate device flow list header */
891 igc_flow_init(struct rte_eth_dev *dev)
893 TAILQ_INIT(IGC_DEV_PRIVATE_FLOW_LIST(dev));
896 /* Destroy all flow in the list and free memory */
898 igc_flow_flush(struct rte_eth_dev *dev,
899 __rte_unused struct rte_flow_error *error)
901 struct igc_flow_list *list = IGC_DEV_PRIVATE_FLOW_LIST(dev);
902 struct rte_flow *flow;
904 while ((flow = TAILQ_FIRST(list)) != NULL) {
905 igc_disable_flow(dev, flow);
906 igc_remove_flow(list, flow);
912 const struct rte_flow_ops igc_flow_ops = {
913 .validate = igc_flow_validate,
914 .create = igc_flow_create,
915 .destroy = igc_flow_destroy,
916 .flush = igc_flow_flush,