4 * Copyright(c) 2010-2016 Intel Corporation. All rights reserved.
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
11 * * Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * * Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in
15 * the documentation and/or other materials provided with the
17 * * Neither the name of Intel Corporation nor the names of its
18 * contributors may be used to endorse or promote products derived
19 * from this software without specific prior written permission.
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
34 #include <sys/queue.h>
40 #include <rte_common.h>
41 #include <rte_interrupts.h>
42 #include <rte_byteorder.h>
44 #include <rte_debug.h>
46 #include <rte_ether.h>
47 #include <rte_ethdev.h>
48 #include <rte_ethdev_pci.h>
49 #include <rte_memory.h>
50 #include <rte_memzone.h>
52 #include <rte_atomic.h>
53 #include <rte_malloc.h>
56 #include <rte_flow_driver.h>
58 #include "e1000_logs.h"
59 #include "base/e1000_api.h"
60 #include "e1000_ethdev.h"
62 #define NEXT_ITEM_OF_PATTERN(item, pattern, index) \
64 item = (pattern) + (index); \
65 while (item->type == RTE_FLOW_ITEM_TYPE_VOID) { \
67 item = (pattern) + (index); \
71 #define NEXT_ITEM_OF_ACTION(act, actions, index) \
73 act = (actions) + (index); \
74 while (act->type == RTE_FLOW_ACTION_TYPE_VOID) {\
76 act = (actions) + (index); \
81 * Please aware there's an asumption for all the parsers.
82 * rte_flow_item is using big endian, rte_flow_attr and
83 * rte_flow_action are using CPU order.
84 * Because the pattern is used to describe the packets,
85 * normally the packets should use network order.
89 * Parse the rule to see if it is a n-tuple rule.
90 * And get the n-tuple filter info BTW.
92 * The first not void item can be ETH or IPV4.
93 * The second not void item must be IPV4 if the first one is ETH.
94 * The third not void item must be UDP or TCP or SCTP
95 * The next not void item must be END.
97 * The first not void action should be QUEUE.
98 * The next not void action should be END.
102 * IPV4 src_addr 192.168.1.20 0xFFFFFFFF
103 * dst_addr 192.167.3.50 0xFFFFFFFF
104 * next_proto_id 17 0xFF
105 * UDP/TCP/ src_port 80 0xFFFF
106 * SCTP dst_port 80 0xFFFF
108 * other members in mask and spec should set to 0x00.
109 * item->last should be NULL.
112 cons_parse_ntuple_filter(const struct rte_flow_attr *attr,
113 const struct rte_flow_item pattern[],
114 const struct rte_flow_action actions[],
115 struct rte_eth_ntuple_filter *filter,
116 struct rte_flow_error *error)
118 const struct rte_flow_item *item;
119 const struct rte_flow_action *act;
120 const struct rte_flow_item_ipv4 *ipv4_spec;
121 const struct rte_flow_item_ipv4 *ipv4_mask;
122 const struct rte_flow_item_tcp *tcp_spec;
123 const struct rte_flow_item_tcp *tcp_mask;
124 const struct rte_flow_item_udp *udp_spec;
125 const struct rte_flow_item_udp *udp_mask;
126 const struct rte_flow_item_sctp *sctp_spec;
127 const struct rte_flow_item_sctp *sctp_mask;
131 rte_flow_error_set(error,
132 EINVAL, RTE_FLOW_ERROR_TYPE_ITEM_NUM,
133 NULL, "NULL pattern.");
138 rte_flow_error_set(error, EINVAL,
139 RTE_FLOW_ERROR_TYPE_ACTION_NUM,
140 NULL, "NULL action.");
144 rte_flow_error_set(error, EINVAL,
145 RTE_FLOW_ERROR_TYPE_ATTR,
146 NULL, "NULL attribute.");
153 /* the first not void item can be MAC or IPv4 */
154 NEXT_ITEM_OF_PATTERN(item, pattern, index);
156 if (item->type != RTE_FLOW_ITEM_TYPE_ETH &&
157 item->type != RTE_FLOW_ITEM_TYPE_IPV4) {
158 rte_flow_error_set(error, EINVAL,
159 RTE_FLOW_ERROR_TYPE_ITEM,
160 item, "Not supported by ntuple filter");
164 if (item->type == RTE_FLOW_ITEM_TYPE_ETH) {
165 /*Not supported last point for range*/
167 rte_flow_error_set(error,
169 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
170 item, "Not supported last point for range");
173 /* if the first item is MAC, the content should be NULL */
174 if (item->spec || item->mask) {
175 rte_flow_error_set(error, EINVAL,
176 RTE_FLOW_ERROR_TYPE_ITEM,
177 item, "Not supported by ntuple filter");
180 /* check if the next not void item is IPv4 */
182 NEXT_ITEM_OF_PATTERN(item, pattern, index);
183 if (item->type != RTE_FLOW_ITEM_TYPE_IPV4) {
184 rte_flow_error_set(error,
185 EINVAL, RTE_FLOW_ERROR_TYPE_ITEM,
186 item, "Not supported by ntuple filter");
191 /* get the IPv4 info */
192 if (!item->spec || !item->mask) {
193 rte_flow_error_set(error, EINVAL,
194 RTE_FLOW_ERROR_TYPE_ITEM,
195 item, "Invalid ntuple mask");
198 /* Not supported last point for range */
200 rte_flow_error_set(error, EINVAL,
201 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
202 item, "Not supported last point for range");
206 ipv4_mask = (const struct rte_flow_item_ipv4 *)item->mask;
208 * Only support src & dst addresses, protocol,
209 * others should be masked.
212 if (ipv4_mask->hdr.version_ihl ||
213 ipv4_mask->hdr.type_of_service ||
214 ipv4_mask->hdr.total_length ||
215 ipv4_mask->hdr.packet_id ||
216 ipv4_mask->hdr.fragment_offset ||
217 ipv4_mask->hdr.time_to_live ||
218 ipv4_mask->hdr.hdr_checksum) {
219 rte_flow_error_set(error,
220 EINVAL, RTE_FLOW_ERROR_TYPE_ITEM,
221 item, "Not supported by ntuple filter");
225 filter->dst_ip_mask = ipv4_mask->hdr.dst_addr;
226 filter->src_ip_mask = ipv4_mask->hdr.src_addr;
227 filter->proto_mask = ipv4_mask->hdr.next_proto_id;
229 ipv4_spec = (const struct rte_flow_item_ipv4 *)item->spec;
230 filter->dst_ip = ipv4_spec->hdr.dst_addr;
231 filter->src_ip = ipv4_spec->hdr.src_addr;
232 filter->proto = ipv4_spec->hdr.next_proto_id;
234 /* check if the next not void item is TCP or UDP or SCTP */
236 NEXT_ITEM_OF_PATTERN(item, pattern, index);
237 if (item->type != RTE_FLOW_ITEM_TYPE_TCP &&
238 item->type != RTE_FLOW_ITEM_TYPE_UDP &&
239 item->type != RTE_FLOW_ITEM_TYPE_SCTP) {
240 memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
241 rte_flow_error_set(error, EINVAL,
242 RTE_FLOW_ERROR_TYPE_ITEM,
243 item, "Not supported by ntuple filter");
247 /* Not supported last point for range */
249 memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
250 rte_flow_error_set(error, EINVAL,
251 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
252 item, "Not supported last point for range");
256 /* get the TCP/UDP/SCTP info */
257 if (item->type == RTE_FLOW_ITEM_TYPE_TCP) {
258 if (item->spec && item->mask) {
259 tcp_mask = (const struct rte_flow_item_tcp *)item->mask;
262 * Only support src & dst ports, tcp flags,
263 * others should be masked.
265 if (tcp_mask->hdr.sent_seq ||
266 tcp_mask->hdr.recv_ack ||
267 tcp_mask->hdr.data_off ||
268 tcp_mask->hdr.rx_win ||
269 tcp_mask->hdr.cksum ||
270 tcp_mask->hdr.tcp_urp) {
272 sizeof(struct rte_eth_ntuple_filter));
273 rte_flow_error_set(error, EINVAL,
274 RTE_FLOW_ERROR_TYPE_ITEM,
275 item, "Not supported by ntuple filter");
279 filter->dst_port_mask = tcp_mask->hdr.dst_port;
280 filter->src_port_mask = tcp_mask->hdr.src_port;
281 if (tcp_mask->hdr.tcp_flags == 0xFF) {
282 filter->flags |= RTE_NTUPLE_FLAGS_TCP_FLAG;
283 } else if (!tcp_mask->hdr.tcp_flags) {
284 filter->flags &= ~RTE_NTUPLE_FLAGS_TCP_FLAG;
287 sizeof(struct rte_eth_ntuple_filter));
288 rte_flow_error_set(error, EINVAL,
289 RTE_FLOW_ERROR_TYPE_ITEM,
290 item, "Not supported by ntuple filter");
294 tcp_spec = (const struct rte_flow_item_tcp *)item->spec;
295 filter->dst_port = tcp_spec->hdr.dst_port;
296 filter->src_port = tcp_spec->hdr.src_port;
297 filter->tcp_flags = tcp_spec->hdr.tcp_flags;
299 } else if (item->type == RTE_FLOW_ITEM_TYPE_UDP) {
300 if (item->spec && item->mask) {
301 udp_mask = (const struct rte_flow_item_udp *)item->mask;
304 * Only support src & dst ports,
305 * others should be masked.
307 if (udp_mask->hdr.dgram_len ||
308 udp_mask->hdr.dgram_cksum) {
310 sizeof(struct rte_eth_ntuple_filter));
311 rte_flow_error_set(error, EINVAL,
312 RTE_FLOW_ERROR_TYPE_ITEM,
313 item, "Not supported by ntuple filter");
317 filter->dst_port_mask = udp_mask->hdr.dst_port;
318 filter->src_port_mask = udp_mask->hdr.src_port;
320 udp_spec = (const struct rte_flow_item_udp *)item->spec;
321 filter->dst_port = udp_spec->hdr.dst_port;
322 filter->src_port = udp_spec->hdr.src_port;
325 if (item->spec && item->mask) {
326 sctp_mask = (const struct rte_flow_item_sctp *)
330 * Only support src & dst ports,
331 * others should be masked.
333 if (sctp_mask->hdr.tag ||
334 sctp_mask->hdr.cksum) {
336 sizeof(struct rte_eth_ntuple_filter));
337 rte_flow_error_set(error, EINVAL,
338 RTE_FLOW_ERROR_TYPE_ITEM,
339 item, "Not supported by ntuple filter");
343 filter->dst_port_mask = sctp_mask->hdr.dst_port;
344 filter->src_port_mask = sctp_mask->hdr.src_port;
346 sctp_spec = (const struct rte_flow_item_sctp *)
348 filter->dst_port = sctp_spec->hdr.dst_port;
349 filter->src_port = sctp_spec->hdr.src_port;
352 /* check if the next not void item is END */
354 NEXT_ITEM_OF_PATTERN(item, pattern, index);
355 if (item->type != RTE_FLOW_ITEM_TYPE_END) {
356 memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
357 rte_flow_error_set(error, EINVAL,
358 RTE_FLOW_ERROR_TYPE_ITEM,
359 item, "Not supported by ntuple filter");
367 * n-tuple only supports forwarding,
368 * check if the first not void action is QUEUE.
370 NEXT_ITEM_OF_ACTION(act, actions, index);
371 if (act->type != RTE_FLOW_ACTION_TYPE_QUEUE) {
372 memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
373 rte_flow_error_set(error, EINVAL,
374 RTE_FLOW_ERROR_TYPE_ACTION,
375 item, "Not supported action.");
379 ((const struct rte_flow_action_queue *)act->conf)->index;
381 /* check if the next not void item is END */
383 NEXT_ITEM_OF_ACTION(act, actions, index);
384 if (act->type != RTE_FLOW_ACTION_TYPE_END) {
385 memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
386 rte_flow_error_set(error, EINVAL,
387 RTE_FLOW_ERROR_TYPE_ACTION,
388 act, "Not supported action.");
393 /* must be input direction */
394 if (!attr->ingress) {
395 memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
396 rte_flow_error_set(error, EINVAL,
397 RTE_FLOW_ERROR_TYPE_ATTR_INGRESS,
398 attr, "Only support ingress.");
404 memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
405 rte_flow_error_set(error, EINVAL,
406 RTE_FLOW_ERROR_TYPE_ATTR_EGRESS,
407 attr, "Not support egress.");
411 if (attr->priority > 0xFFFF) {
412 memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
413 rte_flow_error_set(error, EINVAL,
414 RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY,
415 attr, "Error priority.");
418 filter->priority = (uint16_t)attr->priority;
423 /* a specific function for igb because the flags is specific */
425 igb_parse_ntuple_filter(struct rte_eth_dev *dev,
426 const struct rte_flow_attr *attr,
427 const struct rte_flow_item pattern[],
428 const struct rte_flow_action actions[],
429 struct rte_eth_ntuple_filter *filter,
430 struct rte_flow_error *error)
432 struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
435 MAC_TYPE_FILTER_SUP(hw->mac.type);
437 ret = cons_parse_ntuple_filter(attr, pattern, actions, filter, error);
442 /* Igb doesn't support many priorities. */
443 if (filter->priority > E1000_2TUPLE_MAX_PRI) {
444 memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
445 rte_flow_error_set(error, EINVAL,
446 RTE_FLOW_ERROR_TYPE_ITEM,
447 NULL, "Priority not supported by ntuple filter");
451 if (hw->mac.type == e1000_82576) {
452 if (filter->queue >= IGB_MAX_RX_QUEUE_NUM_82576) {
453 memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
454 rte_flow_error_set(error, EINVAL,
455 RTE_FLOW_ERROR_TYPE_ITEM,
456 NULL, "queue number not "
457 "supported by ntuple filter");
460 filter->flags |= RTE_5TUPLE_FLAGS;
462 if (filter->src_ip_mask || filter->dst_ip_mask ||
463 filter->src_port_mask) {
464 memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
465 rte_flow_error_set(error, EINVAL,
466 RTE_FLOW_ERROR_TYPE_ITEM,
467 NULL, "only two tuple are "
468 "supported by this filter");
471 if (filter->queue >= IGB_MAX_RX_QUEUE_NUM) {
472 memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
473 rte_flow_error_set(error, EINVAL,
474 RTE_FLOW_ERROR_TYPE_ITEM,
475 NULL, "queue number not "
476 "supported by ntuple filter");
479 filter->flags |= RTE_2TUPLE_FLAGS;
486 * Parse the rule to see if it is a ethertype rule.
487 * And get the ethertype filter info BTW.
489 * The first not void item can be ETH.
490 * The next not void item must be END.
492 * The first not void action should be QUEUE.
493 * The next not void action should be END.
496 * ETH type 0x0807 0xFFFF
498 * other members in mask and spec should set to 0x00.
499 * item->last should be NULL.
502 cons_parse_ethertype_filter(const struct rte_flow_attr *attr,
503 const struct rte_flow_item *pattern,
504 const struct rte_flow_action *actions,
505 struct rte_eth_ethertype_filter *filter,
506 struct rte_flow_error *error)
508 const struct rte_flow_item *item;
509 const struct rte_flow_action *act;
510 const struct rte_flow_item_eth *eth_spec;
511 const struct rte_flow_item_eth *eth_mask;
512 const struct rte_flow_action_queue *act_q;
516 rte_flow_error_set(error, EINVAL,
517 RTE_FLOW_ERROR_TYPE_ITEM_NUM,
518 NULL, "NULL pattern.");
523 rte_flow_error_set(error, EINVAL,
524 RTE_FLOW_ERROR_TYPE_ACTION_NUM,
525 NULL, "NULL action.");
530 rte_flow_error_set(error, EINVAL,
531 RTE_FLOW_ERROR_TYPE_ATTR,
532 NULL, "NULL attribute.");
539 /* The first non-void item should be MAC. */
540 NEXT_ITEM_OF_PATTERN(item, pattern, index);
541 if (item->type != RTE_FLOW_ITEM_TYPE_ETH) {
542 rte_flow_error_set(error, EINVAL,
543 RTE_FLOW_ERROR_TYPE_ITEM,
544 item, "Not supported by ethertype filter");
548 /*Not supported last point for range*/
550 rte_flow_error_set(error, EINVAL,
551 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
552 item, "Not supported last point for range");
556 /* Get the MAC info. */
557 if (!item->spec || !item->mask) {
558 rte_flow_error_set(error, EINVAL,
559 RTE_FLOW_ERROR_TYPE_ITEM,
560 item, "Not supported by ethertype filter");
564 eth_spec = (const struct rte_flow_item_eth *)item->spec;
565 eth_mask = (const struct rte_flow_item_eth *)item->mask;
567 /* Mask bits of source MAC address must be full of 0.
568 * Mask bits of destination MAC address must be full
571 if (!is_zero_ether_addr(ð_mask->src) ||
572 (!is_zero_ether_addr(ð_mask->dst) &&
573 !is_broadcast_ether_addr(ð_mask->dst))) {
574 rte_flow_error_set(error, EINVAL,
575 RTE_FLOW_ERROR_TYPE_ITEM,
576 item, "Invalid ether address mask");
580 if ((eth_mask->type & UINT16_MAX) != UINT16_MAX) {
581 rte_flow_error_set(error, EINVAL,
582 RTE_FLOW_ERROR_TYPE_ITEM,
583 item, "Invalid ethertype mask");
587 /* If mask bits of destination MAC address
588 * are full of 1, set RTE_ETHTYPE_FLAGS_MAC.
590 if (is_broadcast_ether_addr(ð_mask->dst)) {
591 filter->mac_addr = eth_spec->dst;
592 filter->flags |= RTE_ETHTYPE_FLAGS_MAC;
594 filter->flags &= ~RTE_ETHTYPE_FLAGS_MAC;
596 filter->ether_type = rte_be_to_cpu_16(eth_spec->type);
598 /* Check if the next non-void item is END. */
600 NEXT_ITEM_OF_PATTERN(item, pattern, index);
601 if (item->type != RTE_FLOW_ITEM_TYPE_END) {
602 rte_flow_error_set(error, EINVAL,
603 RTE_FLOW_ERROR_TYPE_ITEM,
604 item, "Not supported by ethertype filter.");
611 /* Check if the first non-void action is QUEUE or DROP. */
612 NEXT_ITEM_OF_ACTION(act, actions, index);
613 if (act->type != RTE_FLOW_ACTION_TYPE_QUEUE &&
614 act->type != RTE_FLOW_ACTION_TYPE_DROP) {
615 rte_flow_error_set(error, EINVAL,
616 RTE_FLOW_ERROR_TYPE_ACTION,
617 act, "Not supported action.");
621 if (act->type == RTE_FLOW_ACTION_TYPE_QUEUE) {
622 act_q = (const struct rte_flow_action_queue *)act->conf;
623 filter->queue = act_q->index;
625 filter->flags |= RTE_ETHTYPE_FLAGS_DROP;
628 /* Check if the next non-void item is END */
630 NEXT_ITEM_OF_ACTION(act, actions, index);
631 if (act->type != RTE_FLOW_ACTION_TYPE_END) {
632 rte_flow_error_set(error, EINVAL,
633 RTE_FLOW_ERROR_TYPE_ACTION,
634 act, "Not supported action.");
639 /* Must be input direction */
640 if (!attr->ingress) {
641 rte_flow_error_set(error, EINVAL,
642 RTE_FLOW_ERROR_TYPE_ATTR_INGRESS,
643 attr, "Only support ingress.");
649 rte_flow_error_set(error, EINVAL,
650 RTE_FLOW_ERROR_TYPE_ATTR_EGRESS,
651 attr, "Not support egress.");
656 if (attr->priority) {
657 rte_flow_error_set(error, EINVAL,
658 RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY,
659 attr, "Not support priority.");
665 rte_flow_error_set(error, EINVAL,
666 RTE_FLOW_ERROR_TYPE_ATTR_GROUP,
667 attr, "Not support group.");
675 igb_parse_ethertype_filter(struct rte_eth_dev *dev,
676 const struct rte_flow_attr *attr,
677 const struct rte_flow_item pattern[],
678 const struct rte_flow_action actions[],
679 struct rte_eth_ethertype_filter *filter,
680 struct rte_flow_error *error)
682 struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
685 MAC_TYPE_FILTER_SUP(hw->mac.type);
687 ret = cons_parse_ethertype_filter(attr, pattern,
688 actions, filter, error);
693 if (hw->mac.type == e1000_82576) {
694 if (filter->queue >= IGB_MAX_RX_QUEUE_NUM_82576) {
695 memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
696 rte_flow_error_set(error, EINVAL,
697 RTE_FLOW_ERROR_TYPE_ITEM,
698 NULL, "queue number not supported "
699 "by ethertype filter");
703 if (filter->queue >= IGB_MAX_RX_QUEUE_NUM) {
704 memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
705 rte_flow_error_set(error, EINVAL,
706 RTE_FLOW_ERROR_TYPE_ITEM,
707 NULL, "queue number not supported "
708 "by ethertype filter");
713 if (filter->ether_type == ETHER_TYPE_IPv4 ||
714 filter->ether_type == ETHER_TYPE_IPv6) {
715 memset(filter, 0, sizeof(struct rte_eth_ethertype_filter));
716 rte_flow_error_set(error, EINVAL,
717 RTE_FLOW_ERROR_TYPE_ITEM,
718 NULL, "IPv4/IPv6 not supported by ethertype filter");
722 if (filter->flags & RTE_ETHTYPE_FLAGS_MAC) {
723 memset(filter, 0, sizeof(struct rte_eth_ethertype_filter));
724 rte_flow_error_set(error, EINVAL,
725 RTE_FLOW_ERROR_TYPE_ITEM,
726 NULL, "mac compare is unsupported");
730 if (filter->flags & RTE_ETHTYPE_FLAGS_DROP) {
731 memset(filter, 0, sizeof(struct rte_eth_ethertype_filter));
732 rte_flow_error_set(error, EINVAL,
733 RTE_FLOW_ERROR_TYPE_ITEM,
734 NULL, "drop option is unsupported");
742 * Parse the rule to see if it is a TCP SYN rule.
743 * And get the TCP SYN filter info BTW.
745 * The first not void item must be ETH.
746 * The second not void item must be IPV4 or IPV6.
747 * The third not void item must be TCP.
748 * The next not void item must be END.
750 * The first not void action should be QUEUE.
751 * The next not void action should be END.
755 * IPV4/IPV6 NULL NULL
756 * TCP tcp_flags 0x02 0xFF
758 * other members in mask and spec should set to 0x00.
759 * item->last should be NULL.
762 cons_parse_syn_filter(const struct rte_flow_attr *attr,
763 const struct rte_flow_item pattern[],
764 const struct rte_flow_action actions[],
765 struct rte_eth_syn_filter *filter,
766 struct rte_flow_error *error)
768 const struct rte_flow_item *item;
769 const struct rte_flow_action *act;
770 const struct rte_flow_item_tcp *tcp_spec;
771 const struct rte_flow_item_tcp *tcp_mask;
772 const struct rte_flow_action_queue *act_q;
776 rte_flow_error_set(error, EINVAL,
777 RTE_FLOW_ERROR_TYPE_ITEM_NUM,
778 NULL, "NULL pattern.");
783 rte_flow_error_set(error, EINVAL,
784 RTE_FLOW_ERROR_TYPE_ACTION_NUM,
785 NULL, "NULL action.");
790 rte_flow_error_set(error, EINVAL,
791 RTE_FLOW_ERROR_TYPE_ATTR,
792 NULL, "NULL attribute.");
799 /* the first not void item should be MAC or IPv4 or IPv6 or TCP */
800 NEXT_ITEM_OF_PATTERN(item, pattern, index);
801 if (item->type != RTE_FLOW_ITEM_TYPE_ETH &&
802 item->type != RTE_FLOW_ITEM_TYPE_IPV4 &&
803 item->type != RTE_FLOW_ITEM_TYPE_IPV6 &&
804 item->type != RTE_FLOW_ITEM_TYPE_TCP) {
805 rte_flow_error_set(error, EINVAL,
806 RTE_FLOW_ERROR_TYPE_ITEM,
807 item, "Not supported by syn filter");
810 /*Not supported last point for range*/
812 rte_flow_error_set(error, EINVAL,
813 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
814 item, "Not supported last point for range");
819 if (item->type == RTE_FLOW_ITEM_TYPE_ETH) {
820 /* if the item is MAC, the content should be NULL */
821 if (item->spec || item->mask) {
822 rte_flow_error_set(error, EINVAL,
823 RTE_FLOW_ERROR_TYPE_ITEM,
824 item, "Invalid SYN address mask");
828 /* check if the next not void item is IPv4 or IPv6 */
830 NEXT_ITEM_OF_PATTERN(item, pattern, index);
831 if (item->type != RTE_FLOW_ITEM_TYPE_IPV4 &&
832 item->type != RTE_FLOW_ITEM_TYPE_IPV6) {
833 rte_flow_error_set(error, EINVAL,
834 RTE_FLOW_ERROR_TYPE_ITEM,
835 item, "Not supported by syn filter");
841 if (item->type == RTE_FLOW_ITEM_TYPE_IPV4 ||
842 item->type == RTE_FLOW_ITEM_TYPE_IPV6) {
843 /* if the item is IP, the content should be NULL */
844 if (item->spec || item->mask) {
845 rte_flow_error_set(error, EINVAL,
846 RTE_FLOW_ERROR_TYPE_ITEM,
847 item, "Invalid SYN mask");
851 /* check if the next not void item is TCP */
853 NEXT_ITEM_OF_PATTERN(item, pattern, index);
854 if (item->type != RTE_FLOW_ITEM_TYPE_TCP) {
855 rte_flow_error_set(error, EINVAL,
856 RTE_FLOW_ERROR_TYPE_ITEM,
857 item, "Not supported by syn filter");
862 /* Get the TCP info. Only support SYN. */
863 if (!item->spec || !item->mask) {
864 rte_flow_error_set(error, EINVAL,
865 RTE_FLOW_ERROR_TYPE_ITEM,
866 item, "Invalid SYN mask");
869 /*Not supported last point for range*/
871 rte_flow_error_set(error, EINVAL,
872 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
873 item, "Not supported last point for range");
877 tcp_spec = (const struct rte_flow_item_tcp *)item->spec;
878 tcp_mask = (const struct rte_flow_item_tcp *)item->mask;
879 if (!(tcp_spec->hdr.tcp_flags & TCP_SYN_FLAG) ||
880 tcp_mask->hdr.src_port ||
881 tcp_mask->hdr.dst_port ||
882 tcp_mask->hdr.sent_seq ||
883 tcp_mask->hdr.recv_ack ||
884 tcp_mask->hdr.data_off ||
885 tcp_mask->hdr.tcp_flags != TCP_SYN_FLAG ||
886 tcp_mask->hdr.rx_win ||
887 tcp_mask->hdr.cksum ||
888 tcp_mask->hdr.tcp_urp) {
889 memset(filter, 0, sizeof(struct rte_eth_syn_filter));
890 rte_flow_error_set(error, EINVAL,
891 RTE_FLOW_ERROR_TYPE_ITEM,
892 item, "Not supported by syn filter");
896 /* check if the next not void item is END */
898 NEXT_ITEM_OF_PATTERN(item, pattern, index);
899 if (item->type != RTE_FLOW_ITEM_TYPE_END) {
900 memset(filter, 0, sizeof(struct rte_eth_syn_filter));
901 rte_flow_error_set(error, EINVAL,
902 RTE_FLOW_ERROR_TYPE_ITEM,
903 item, "Not supported by syn filter");
910 /* check if the first not void action is QUEUE. */
911 NEXT_ITEM_OF_ACTION(act, actions, index);
912 if (act->type != RTE_FLOW_ACTION_TYPE_QUEUE) {
913 memset(filter, 0, sizeof(struct rte_eth_syn_filter));
914 rte_flow_error_set(error, EINVAL,
915 RTE_FLOW_ERROR_TYPE_ACTION,
916 act, "Not supported action.");
920 act_q = (const struct rte_flow_action_queue *)act->conf;
921 filter->queue = act_q->index;
923 /* check if the next not void item is END */
925 NEXT_ITEM_OF_ACTION(act, actions, index);
926 if (act->type != RTE_FLOW_ACTION_TYPE_END) {
927 memset(filter, 0, sizeof(struct rte_eth_syn_filter));
928 rte_flow_error_set(error, EINVAL,
929 RTE_FLOW_ERROR_TYPE_ACTION,
930 act, "Not supported action.");
935 /* must be input direction */
936 if (!attr->ingress) {
937 memset(filter, 0, sizeof(struct rte_eth_syn_filter));
938 rte_flow_error_set(error, EINVAL,
939 RTE_FLOW_ERROR_TYPE_ATTR_INGRESS,
940 attr, "Only support ingress.");
946 memset(filter, 0, sizeof(struct rte_eth_syn_filter));
947 rte_flow_error_set(error, EINVAL,
948 RTE_FLOW_ERROR_TYPE_ATTR_EGRESS,
949 attr, "Not support egress.");
953 /* Support 2 priorities, the lowest or highest. */
954 if (!attr->priority) {
956 } else if (attr->priority == (uint32_t)~0U) {
959 memset(filter, 0, sizeof(struct rte_eth_syn_filter));
960 rte_flow_error_set(error, EINVAL,
961 RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY,
962 attr, "Not support priority.");
970 igb_parse_syn_filter(struct rte_eth_dev *dev,
971 const struct rte_flow_attr *attr,
972 const struct rte_flow_item pattern[],
973 const struct rte_flow_action actions[],
974 struct rte_eth_syn_filter *filter,
975 struct rte_flow_error *error)
977 struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
980 MAC_TYPE_FILTER_SUP(hw->mac.type);
982 ret = cons_parse_syn_filter(attr, pattern,
983 actions, filter, error);
985 if (hw->mac.type == e1000_82576) {
986 if (filter->queue >= IGB_MAX_RX_QUEUE_NUM_82576) {
987 memset(filter, 0, sizeof(struct rte_eth_syn_filter));
988 rte_flow_error_set(error, EINVAL,
989 RTE_FLOW_ERROR_TYPE_ITEM,
990 NULL, "queue number not "
991 "supported by syn filter");
995 if (filter->queue >= IGB_MAX_RX_QUEUE_NUM) {
996 memset(filter, 0, sizeof(struct rte_eth_syn_filter));
997 rte_flow_error_set(error, EINVAL,
998 RTE_FLOW_ERROR_TYPE_ITEM,
999 NULL, "queue number not "
1000 "supported by syn filter");
1012 * Parse the rule to see if it is a flex byte rule.
1013 * And get the flex byte filter info BTW.
1015 * The first not void item must be RAW.
1016 * The second not void item can be RAW or END.
1017 * The third not void item can be RAW or END.
1018 * The last not void item must be END.
1020 * The first not void action should be QUEUE.
1021 * The next not void action should be END.
1024 * RAW relative 0 0x1
1025 * offset 0 0xFFFFFFFF
1026 * pattern {0x08, 0x06} {0xFF, 0xFF}
1027 * RAW relative 1 0x1
1028 * offset 100 0xFFFFFFFF
1029 * pattern {0x11, 0x22, 0x33} {0xFF, 0xFF, 0xFF}
1031 * other members in mask and spec should set to 0x00.
1032 * item->last should be NULL.
1035 cons_parse_flex_filter(const struct rte_flow_attr *attr,
1036 const struct rte_flow_item pattern[],
1037 const struct rte_flow_action actions[],
1038 struct rte_eth_flex_filter *filter,
1039 struct rte_flow_error *error)
1041 const struct rte_flow_item *item;
1042 const struct rte_flow_action *act;
1043 const struct rte_flow_item_raw *raw_spec;
1044 const struct rte_flow_item_raw *raw_mask;
1045 const struct rte_flow_action_queue *act_q;
1046 uint32_t index, i, offset, total_offset = 0;
1050 rte_flow_error_set(error, EINVAL,
1051 RTE_FLOW_ERROR_TYPE_ITEM_NUM,
1052 NULL, "NULL pattern.");
1057 rte_flow_error_set(error, EINVAL,
1058 RTE_FLOW_ERROR_TYPE_ACTION_NUM,
1059 NULL, "NULL action.");
1064 rte_flow_error_set(error, EINVAL,
1065 RTE_FLOW_ERROR_TYPE_ATTR,
1066 NULL, "NULL attribute.");
1075 /* the first not void item should be RAW */
1076 NEXT_ITEM_OF_PATTERN(item, pattern, index);
1077 if (item->type != RTE_FLOW_ITEM_TYPE_RAW) {
1078 rte_flow_error_set(error, EINVAL,
1079 RTE_FLOW_ERROR_TYPE_ITEM,
1080 item, "Not supported by flex filter");
1083 /*Not supported last point for range*/
1085 rte_flow_error_set(error, EINVAL,
1086 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1087 item, "Not supported last point for range");
1091 raw_spec = (const struct rte_flow_item_raw *)item->spec;
1092 raw_mask = (const struct rte_flow_item_raw *)item->mask;
1094 if (!raw_mask->length ||
1095 !raw_mask->relative) {
1096 memset(filter, 0, sizeof(struct rte_eth_flex_filter));
1097 rte_flow_error_set(error, EINVAL,
1098 RTE_FLOW_ERROR_TYPE_ITEM,
1099 item, "Not supported by flex filter");
1103 if (raw_mask->offset)
1104 offset = raw_spec->offset;
1108 for (index = 0; index < raw_spec->length; index++) {
1109 if (raw_mask->pattern[index] != 0xFF) {
1110 memset(filter, 0, sizeof(struct rte_eth_flex_filter));
1111 rte_flow_error_set(error, EINVAL,
1112 RTE_FLOW_ERROR_TYPE_ITEM,
1113 item, "Not supported by flex filter");
1118 if ((raw_spec->length + offset + total_offset) >
1119 RTE_FLEX_FILTER_MAXLEN) {
1120 memset(filter, 0, sizeof(struct rte_eth_flex_filter));
1121 rte_flow_error_set(error, EINVAL,
1122 RTE_FLOW_ERROR_TYPE_ITEM,
1123 item, "Not supported by flex filter");
1127 if (raw_spec->relative == 0) {
1128 for (index = 0; index < raw_spec->length; index++)
1129 filter->bytes[index] = raw_spec->pattern[index];
1130 index = offset / CHAR_BIT;
1132 for (index = 0; index < raw_spec->length; index++)
1133 filter->bytes[total_offset + index] =
1134 raw_spec->pattern[index];
1135 index = (total_offset + offset) / CHAR_BIT;
1140 for (shift = offset % CHAR_BIT; shift < CHAR_BIT; shift++) {
1141 filter->mask[index] |= (0x80 >> shift);
1143 if (i == raw_spec->length)
1145 if (shift == (CHAR_BIT - 1)) {
1151 total_offset += offset + raw_spec->length;
1153 /* check if the next not void item is RAW */
1155 NEXT_ITEM_OF_PATTERN(item, pattern, index);
1156 if (item->type != RTE_FLOW_ITEM_TYPE_RAW &&
1157 item->type != RTE_FLOW_ITEM_TYPE_END) {
1158 rte_flow_error_set(error, EINVAL,
1159 RTE_FLOW_ERROR_TYPE_ITEM,
1160 item, "Not supported by flex filter");
1164 /* go back to parser */
1165 if (item->type == RTE_FLOW_ITEM_TYPE_RAW) {
1166 /* if the item is RAW, the content should be parse */
1170 filter->len = RTE_ALIGN(total_offset, 8);
1175 /* check if the first not void action is QUEUE. */
1176 NEXT_ITEM_OF_ACTION(act, actions, index);
1177 if (act->type != RTE_FLOW_ACTION_TYPE_QUEUE) {
1178 memset(filter, 0, sizeof(struct rte_eth_flex_filter));
1179 rte_flow_error_set(error, EINVAL,
1180 RTE_FLOW_ERROR_TYPE_ACTION,
1181 act, "Not supported action.");
1185 act_q = (const struct rte_flow_action_queue *)act->conf;
1186 filter->queue = act_q->index;
1188 /* check if the next not void item is END */
1190 NEXT_ITEM_OF_ACTION(act, actions, index);
1191 if (act->type != RTE_FLOW_ACTION_TYPE_END) {
1192 memset(filter, 0, sizeof(struct rte_eth_flex_filter));
1193 rte_flow_error_set(error, EINVAL,
1194 RTE_FLOW_ERROR_TYPE_ACTION,
1195 act, "Not supported action.");
1200 /* must be input direction */
1201 if (!attr->ingress) {
1202 memset(filter, 0, sizeof(struct rte_eth_flex_filter));
1203 rte_flow_error_set(error, EINVAL,
1204 RTE_FLOW_ERROR_TYPE_ATTR_INGRESS,
1205 attr, "Only support ingress.");
1211 memset(filter, 0, sizeof(struct rte_eth_flex_filter));
1212 rte_flow_error_set(error, EINVAL,
1213 RTE_FLOW_ERROR_TYPE_ATTR_EGRESS,
1214 attr, "Not support egress.");
1218 if (attr->priority > 0xFFFF) {
1219 memset(filter, 0, sizeof(struct rte_eth_flex_filter));
1220 rte_flow_error_set(error, EINVAL,
1221 RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY,
1222 attr, "Error priority.");
1226 filter->priority = (uint16_t)attr->priority;
1232 igb_parse_flex_filter(struct rte_eth_dev *dev,
1233 const struct rte_flow_attr *attr,
1234 const struct rte_flow_item pattern[],
1235 const struct rte_flow_action actions[],
1236 struct rte_eth_flex_filter *filter,
1237 struct rte_flow_error *error)
1239 struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1242 MAC_TYPE_FILTER_SUP_EXT(hw->mac.type);
1244 ret = cons_parse_flex_filter(attr, pattern,
1245 actions, filter, error);
1247 if (filter->queue >= IGB_MAX_RX_QUEUE_NUM) {
1248 memset(filter, 0, sizeof(struct rte_eth_flex_filter));
1249 rte_flow_error_set(error, EINVAL,
1250 RTE_FLOW_ERROR_TYPE_ITEM,
1251 NULL, "queue number not supported by flex filter");
1255 if (filter->len == 0 || filter->len > E1000_MAX_FLEX_FILTER_LEN ||
1256 filter->len % sizeof(uint64_t) != 0) {
1257 PMD_DRV_LOG(ERR, "filter's length is out of range");
1261 if (filter->priority > E1000_MAX_FLEX_FILTER_PRI) {
1262 PMD_DRV_LOG(ERR, "filter's priority is out of range");
1273 * Create a flow rule.
1274 * Theorically one rule can match more than one filters.
1275 * We will let it use the filter which it hitt first.
1276 * So, the sequence matters.
1278 static struct rte_flow *
1279 igb_flow_create(struct rte_eth_dev *dev,
1280 const struct rte_flow_attr *attr,
1281 const struct rte_flow_item pattern[],
1282 const struct rte_flow_action actions[],
1283 struct rte_flow_error *error)
1286 struct rte_eth_ntuple_filter ntuple_filter;
1287 struct rte_eth_ethertype_filter ethertype_filter;
1288 struct rte_eth_syn_filter syn_filter;
1289 struct rte_eth_flex_filter flex_filter;
1290 struct rte_flow *flow = NULL;
1291 struct igb_ntuple_filter_ele *ntuple_filter_ptr;
1292 struct igb_ethertype_filter_ele *ethertype_filter_ptr;
1293 struct igb_eth_syn_filter_ele *syn_filter_ptr;
1294 struct igb_flex_filter_ele *flex_filter_ptr;
1295 struct igb_flow_mem *igb_flow_mem_ptr;
1297 flow = rte_zmalloc("igb_rte_flow", sizeof(struct rte_flow), 0);
1299 PMD_DRV_LOG(ERR, "failed to allocate memory");
1300 return (struct rte_flow *)flow;
1302 igb_flow_mem_ptr = rte_zmalloc("igb_flow_mem",
1303 sizeof(struct igb_flow_mem), 0);
1304 if (!igb_flow_mem_ptr) {
1305 PMD_DRV_LOG(ERR, "failed to allocate memory");
1309 igb_flow_mem_ptr->flow = flow;
1310 igb_flow_mem_ptr->dev = dev;
1311 TAILQ_INSERT_TAIL(&igb_flow_list,
1312 igb_flow_mem_ptr, entries);
1314 memset(&ntuple_filter, 0, sizeof(struct rte_eth_ntuple_filter));
1315 ret = igb_parse_ntuple_filter(dev, attr, pattern,
1316 actions, &ntuple_filter, error);
1318 ret = igb_add_del_ntuple_filter(dev, &ntuple_filter, TRUE);
1320 ntuple_filter_ptr = rte_zmalloc("igb_ntuple_filter",
1321 sizeof(struct igb_ntuple_filter_ele), 0);
1322 (void)rte_memcpy(&ntuple_filter_ptr->filter_info,
1324 sizeof(struct rte_eth_ntuple_filter));
1325 TAILQ_INSERT_TAIL(&igb_filter_ntuple_list,
1326 ntuple_filter_ptr, entries);
1327 flow->rule = ntuple_filter_ptr;
1328 flow->filter_type = RTE_ETH_FILTER_NTUPLE;
1334 memset(ðertype_filter, 0, sizeof(struct rte_eth_ethertype_filter));
1335 ret = igb_parse_ethertype_filter(dev, attr, pattern,
1336 actions, ðertype_filter, error);
1338 ret = igb_add_del_ethertype_filter(dev,
1339 ðertype_filter, TRUE);
1341 ethertype_filter_ptr = rte_zmalloc(
1342 "igb_ethertype_filter",
1343 sizeof(struct igb_ethertype_filter_ele), 0);
1344 (void)rte_memcpy(ðertype_filter_ptr->filter_info,
1346 sizeof(struct rte_eth_ethertype_filter));
1347 TAILQ_INSERT_TAIL(&igb_filter_ethertype_list,
1348 ethertype_filter_ptr, entries);
1349 flow->rule = ethertype_filter_ptr;
1350 flow->filter_type = RTE_ETH_FILTER_ETHERTYPE;
1356 memset(&syn_filter, 0, sizeof(struct rte_eth_syn_filter));
1357 ret = igb_parse_syn_filter(dev, attr, pattern,
1358 actions, &syn_filter, error);
1360 ret = eth_igb_syn_filter_set(dev, &syn_filter, TRUE);
1362 syn_filter_ptr = rte_zmalloc("igb_syn_filter",
1363 sizeof(struct igb_eth_syn_filter_ele), 0);
1364 (void)rte_memcpy(&syn_filter_ptr->filter_info,
1366 sizeof(struct rte_eth_syn_filter));
1367 TAILQ_INSERT_TAIL(&igb_filter_syn_list,
1370 flow->rule = syn_filter_ptr;
1371 flow->filter_type = RTE_ETH_FILTER_SYN;
1377 memset(&flex_filter, 0, sizeof(struct rte_eth_flex_filter));
1378 ret = igb_parse_flex_filter(dev, attr, pattern,
1379 actions, &flex_filter, error);
1381 ret = eth_igb_add_del_flex_filter(dev, &flex_filter, TRUE);
1383 flex_filter_ptr = rte_zmalloc("igb_flex_filter",
1384 sizeof(struct igb_flex_filter_ele), 0);
1385 (void)rte_memcpy(&flex_filter_ptr->filter_info,
1387 sizeof(struct rte_eth_flex_filter));
1388 TAILQ_INSERT_TAIL(&igb_filter_flex_list,
1389 flex_filter_ptr, entries);
1390 flow->rule = flex_filter_ptr;
1391 flow->filter_type = RTE_ETH_FILTER_FLEXIBLE;
1397 TAILQ_REMOVE(&igb_flow_list,
1398 igb_flow_mem_ptr, entries);
1399 rte_flow_error_set(error, -ret,
1400 RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
1401 "Failed to create flow.");
1402 rte_free(igb_flow_mem_ptr);
1408 * Check if the flow rule is supported by igb.
1409 * It only checkes the format. Don't guarantee the rule can be programmed into
1410 * the HW. Because there can be no enough room for the rule.
1413 igb_flow_validate(__rte_unused struct rte_eth_dev *dev,
1414 const struct rte_flow_attr *attr,
1415 const struct rte_flow_item pattern[],
1416 const struct rte_flow_action actions[],
1417 struct rte_flow_error *error)
1419 struct rte_eth_ntuple_filter ntuple_filter;
1420 struct rte_eth_ethertype_filter ethertype_filter;
1421 struct rte_eth_syn_filter syn_filter;
1422 struct rte_eth_flex_filter flex_filter;
1425 memset(&ntuple_filter, 0, sizeof(struct rte_eth_ntuple_filter));
1426 ret = igb_parse_ntuple_filter(dev, attr, pattern,
1427 actions, &ntuple_filter, error);
1431 memset(ðertype_filter, 0, sizeof(struct rte_eth_ethertype_filter));
1432 ret = igb_parse_ethertype_filter(dev, attr, pattern,
1433 actions, ðertype_filter, error);
1437 memset(&syn_filter, 0, sizeof(struct rte_eth_syn_filter));
1438 ret = igb_parse_syn_filter(dev, attr, pattern,
1439 actions, &syn_filter, error);
1443 memset(&flex_filter, 0, sizeof(struct rte_eth_flex_filter));
1444 ret = igb_parse_flex_filter(dev, attr, pattern,
1445 actions, &flex_filter, error);
1450 /* Destroy a flow rule on igb. */
1452 igb_flow_destroy(struct rte_eth_dev *dev,
1453 struct rte_flow *flow,
1454 struct rte_flow_error *error)
1457 struct rte_flow *pmd_flow = flow;
1458 enum rte_filter_type filter_type = pmd_flow->filter_type;
1459 struct igb_ntuple_filter_ele *ntuple_filter_ptr;
1460 struct igb_ethertype_filter_ele *ethertype_filter_ptr;
1461 struct igb_eth_syn_filter_ele *syn_filter_ptr;
1462 struct igb_flex_filter_ele *flex_filter_ptr;
1463 struct igb_flow_mem *igb_flow_mem_ptr;
1465 switch (filter_type) {
1466 case RTE_ETH_FILTER_NTUPLE:
1467 ntuple_filter_ptr = (struct igb_ntuple_filter_ele *)
1469 ret = igb_add_del_ntuple_filter(dev,
1470 &ntuple_filter_ptr->filter_info, FALSE);
1472 TAILQ_REMOVE(&igb_filter_ntuple_list,
1473 ntuple_filter_ptr, entries);
1474 rte_free(ntuple_filter_ptr);
1477 case RTE_ETH_FILTER_ETHERTYPE:
1478 ethertype_filter_ptr = (struct igb_ethertype_filter_ele *)
1480 ret = igb_add_del_ethertype_filter(dev,
1481 ðertype_filter_ptr->filter_info, FALSE);
1483 TAILQ_REMOVE(&igb_filter_ethertype_list,
1484 ethertype_filter_ptr, entries);
1485 rte_free(ethertype_filter_ptr);
1488 case RTE_ETH_FILTER_SYN:
1489 syn_filter_ptr = (struct igb_eth_syn_filter_ele *)
1491 ret = eth_igb_syn_filter_set(dev,
1492 &syn_filter_ptr->filter_info, FALSE);
1494 TAILQ_REMOVE(&igb_filter_syn_list,
1495 syn_filter_ptr, entries);
1496 rte_free(syn_filter_ptr);
1499 case RTE_ETH_FILTER_FLEXIBLE:
1500 flex_filter_ptr = (struct igb_flex_filter_ele *)
1502 ret = eth_igb_add_del_flex_filter(dev,
1503 &flex_filter_ptr->filter_info, FALSE);
1505 TAILQ_REMOVE(&igb_filter_flex_list,
1506 flex_filter_ptr, entries);
1507 rte_free(flex_filter_ptr);
1511 PMD_DRV_LOG(WARNING, "Filter type (%d) not supported",
1518 rte_flow_error_set(error, EINVAL,
1519 RTE_FLOW_ERROR_TYPE_HANDLE,
1520 NULL, "Failed to destroy flow");
1524 TAILQ_FOREACH(igb_flow_mem_ptr, &igb_flow_list, entries) {
1525 if (igb_flow_mem_ptr->flow == pmd_flow) {
1526 TAILQ_REMOVE(&igb_flow_list,
1527 igb_flow_mem_ptr, entries);
1528 rte_free(igb_flow_mem_ptr);
1536 const struct rte_flow_ops igb_flow_ops = {