4 * Copyright(c) 2010-2016 Intel Corporation. All rights reserved.
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
11 * * Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * * Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in
15 * the documentation and/or other materials provided with the
17 * * Neither the name of Intel Corporation nor the names of its
18 * contributors may be used to endorse or promote products derived
19 * from this software without specific prior written permission.
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
34 #include <sys/queue.h>
40 #include <rte_common.h>
41 #include <rte_interrupts.h>
42 #include <rte_byteorder.h>
44 #include <rte_debug.h>
46 #include <rte_ether.h>
47 #include <rte_ethdev.h>
48 #include <rte_ethdev_pci.h>
49 #include <rte_memory.h>
50 #include <rte_memzone.h>
52 #include <rte_atomic.h>
53 #include <rte_malloc.h>
56 #include <rte_flow_driver.h>
58 #include "e1000_logs.h"
59 #include "base/e1000_api.h"
60 #include "e1000_ethdev.h"
62 #define NEXT_ITEM_OF_PATTERN(item, pattern, index) \
64 item = (pattern) + (index); \
65 while (item->type == RTE_FLOW_ITEM_TYPE_VOID) { \
67 item = (pattern) + (index); \
71 #define NEXT_ITEM_OF_ACTION(act, actions, index) \
73 act = (actions) + (index); \
74 while (act->type == RTE_FLOW_ACTION_TYPE_VOID) {\
76 act = (actions) + (index); \
80 #define IGB_FLEX_RAW_NUM 12
83 * Please aware there's an asumption for all the parsers.
84 * rte_flow_item is using big endian, rte_flow_attr and
85 * rte_flow_action are using CPU order.
86 * Because the pattern is used to describe the packets,
87 * normally the packets should use network order.
91 * Parse the rule to see if it is a n-tuple rule.
92 * And get the n-tuple filter info BTW.
94 * The first not void item can be ETH or IPV4.
95 * The second not void item must be IPV4 if the first one is ETH.
96 * The third not void item must be UDP or TCP or SCTP
97 * The next not void item must be END.
99 * The first not void action should be QUEUE.
100 * The next not void action should be END.
104 * IPV4 src_addr 192.168.1.20 0xFFFFFFFF
105 * dst_addr 192.167.3.50 0xFFFFFFFF
106 * next_proto_id 17 0xFF
107 * UDP/TCP/ src_port 80 0xFFFF
108 * SCTP dst_port 80 0xFFFF
110 * other members in mask and spec should set to 0x00.
111 * item->last should be NULL.
114 cons_parse_ntuple_filter(const struct rte_flow_attr *attr,
115 const struct rte_flow_item pattern[],
116 const struct rte_flow_action actions[],
117 struct rte_eth_ntuple_filter *filter,
118 struct rte_flow_error *error)
120 const struct rte_flow_item *item;
121 const struct rte_flow_action *act;
122 const struct rte_flow_item_ipv4 *ipv4_spec;
123 const struct rte_flow_item_ipv4 *ipv4_mask;
124 const struct rte_flow_item_tcp *tcp_spec;
125 const struct rte_flow_item_tcp *tcp_mask;
126 const struct rte_flow_item_udp *udp_spec;
127 const struct rte_flow_item_udp *udp_mask;
128 const struct rte_flow_item_sctp *sctp_spec;
129 const struct rte_flow_item_sctp *sctp_mask;
133 rte_flow_error_set(error,
134 EINVAL, RTE_FLOW_ERROR_TYPE_ITEM_NUM,
135 NULL, "NULL pattern.");
140 rte_flow_error_set(error, EINVAL,
141 RTE_FLOW_ERROR_TYPE_ACTION_NUM,
142 NULL, "NULL action.");
146 rte_flow_error_set(error, EINVAL,
147 RTE_FLOW_ERROR_TYPE_ATTR,
148 NULL, "NULL attribute.");
155 /* the first not void item can be MAC or IPv4 */
156 NEXT_ITEM_OF_PATTERN(item, pattern, index);
158 if (item->type != RTE_FLOW_ITEM_TYPE_ETH &&
159 item->type != RTE_FLOW_ITEM_TYPE_IPV4) {
160 rte_flow_error_set(error, EINVAL,
161 RTE_FLOW_ERROR_TYPE_ITEM,
162 item, "Not supported by ntuple filter");
166 if (item->type == RTE_FLOW_ITEM_TYPE_ETH) {
167 /*Not supported last point for range*/
169 rte_flow_error_set(error,
171 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
172 item, "Not supported last point for range");
175 /* if the first item is MAC, the content should be NULL */
176 if (item->spec || item->mask) {
177 rte_flow_error_set(error, EINVAL,
178 RTE_FLOW_ERROR_TYPE_ITEM,
179 item, "Not supported by ntuple filter");
182 /* check if the next not void item is IPv4 */
184 NEXT_ITEM_OF_PATTERN(item, pattern, index);
185 if (item->type != RTE_FLOW_ITEM_TYPE_IPV4) {
186 rte_flow_error_set(error,
187 EINVAL, RTE_FLOW_ERROR_TYPE_ITEM,
188 item, "Not supported by ntuple filter");
193 /* get the IPv4 info */
194 if (!item->spec || !item->mask) {
195 rte_flow_error_set(error, EINVAL,
196 RTE_FLOW_ERROR_TYPE_ITEM,
197 item, "Invalid ntuple mask");
200 /* Not supported last point for range */
202 rte_flow_error_set(error, EINVAL,
203 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
204 item, "Not supported last point for range");
208 ipv4_mask = (const struct rte_flow_item_ipv4 *)item->mask;
210 * Only support src & dst addresses, protocol,
211 * others should be masked.
214 if (ipv4_mask->hdr.version_ihl ||
215 ipv4_mask->hdr.type_of_service ||
216 ipv4_mask->hdr.total_length ||
217 ipv4_mask->hdr.packet_id ||
218 ipv4_mask->hdr.fragment_offset ||
219 ipv4_mask->hdr.time_to_live ||
220 ipv4_mask->hdr.hdr_checksum) {
221 rte_flow_error_set(error,
222 EINVAL, RTE_FLOW_ERROR_TYPE_ITEM,
223 item, "Not supported by ntuple filter");
227 filter->dst_ip_mask = ipv4_mask->hdr.dst_addr;
228 filter->src_ip_mask = ipv4_mask->hdr.src_addr;
229 filter->proto_mask = ipv4_mask->hdr.next_proto_id;
231 ipv4_spec = (const struct rte_flow_item_ipv4 *)item->spec;
232 filter->dst_ip = ipv4_spec->hdr.dst_addr;
233 filter->src_ip = ipv4_spec->hdr.src_addr;
234 filter->proto = ipv4_spec->hdr.next_proto_id;
236 /* check if the next not void item is TCP or UDP or SCTP */
238 NEXT_ITEM_OF_PATTERN(item, pattern, index);
239 if (item->type != RTE_FLOW_ITEM_TYPE_TCP &&
240 item->type != RTE_FLOW_ITEM_TYPE_UDP &&
241 item->type != RTE_FLOW_ITEM_TYPE_SCTP) {
242 memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
243 rte_flow_error_set(error, EINVAL,
244 RTE_FLOW_ERROR_TYPE_ITEM,
245 item, "Not supported by ntuple filter");
249 /* Not supported last point for range */
251 memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
252 rte_flow_error_set(error, EINVAL,
253 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
254 item, "Not supported last point for range");
258 /* get the TCP/UDP/SCTP info */
259 if (item->type == RTE_FLOW_ITEM_TYPE_TCP) {
260 if (item->spec && item->mask) {
261 tcp_mask = (const struct rte_flow_item_tcp *)item->mask;
264 * Only support src & dst ports, tcp flags,
265 * others should be masked.
267 if (tcp_mask->hdr.sent_seq ||
268 tcp_mask->hdr.recv_ack ||
269 tcp_mask->hdr.data_off ||
270 tcp_mask->hdr.rx_win ||
271 tcp_mask->hdr.cksum ||
272 tcp_mask->hdr.tcp_urp) {
274 sizeof(struct rte_eth_ntuple_filter));
275 rte_flow_error_set(error, EINVAL,
276 RTE_FLOW_ERROR_TYPE_ITEM,
277 item, "Not supported by ntuple filter");
281 filter->dst_port_mask = tcp_mask->hdr.dst_port;
282 filter->src_port_mask = tcp_mask->hdr.src_port;
283 if (tcp_mask->hdr.tcp_flags == 0xFF) {
284 filter->flags |= RTE_NTUPLE_FLAGS_TCP_FLAG;
285 } else if (!tcp_mask->hdr.tcp_flags) {
286 filter->flags &= ~RTE_NTUPLE_FLAGS_TCP_FLAG;
289 sizeof(struct rte_eth_ntuple_filter));
290 rte_flow_error_set(error, EINVAL,
291 RTE_FLOW_ERROR_TYPE_ITEM,
292 item, "Not supported by ntuple filter");
296 tcp_spec = (const struct rte_flow_item_tcp *)item->spec;
297 filter->dst_port = tcp_spec->hdr.dst_port;
298 filter->src_port = tcp_spec->hdr.src_port;
299 filter->tcp_flags = tcp_spec->hdr.tcp_flags;
301 } else if (item->type == RTE_FLOW_ITEM_TYPE_UDP) {
302 if (item->spec && item->mask) {
303 udp_mask = (const struct rte_flow_item_udp *)item->mask;
306 * Only support src & dst ports,
307 * others should be masked.
309 if (udp_mask->hdr.dgram_len ||
310 udp_mask->hdr.dgram_cksum) {
312 sizeof(struct rte_eth_ntuple_filter));
313 rte_flow_error_set(error, EINVAL,
314 RTE_FLOW_ERROR_TYPE_ITEM,
315 item, "Not supported by ntuple filter");
319 filter->dst_port_mask = udp_mask->hdr.dst_port;
320 filter->src_port_mask = udp_mask->hdr.src_port;
322 udp_spec = (const struct rte_flow_item_udp *)item->spec;
323 filter->dst_port = udp_spec->hdr.dst_port;
324 filter->src_port = udp_spec->hdr.src_port;
327 if (item->spec && item->mask) {
328 sctp_mask = (const struct rte_flow_item_sctp *)
332 * Only support src & dst ports,
333 * others should be masked.
335 if (sctp_mask->hdr.tag ||
336 sctp_mask->hdr.cksum) {
338 sizeof(struct rte_eth_ntuple_filter));
339 rte_flow_error_set(error, EINVAL,
340 RTE_FLOW_ERROR_TYPE_ITEM,
341 item, "Not supported by ntuple filter");
345 filter->dst_port_mask = sctp_mask->hdr.dst_port;
346 filter->src_port_mask = sctp_mask->hdr.src_port;
348 sctp_spec = (const struct rte_flow_item_sctp *)
350 filter->dst_port = sctp_spec->hdr.dst_port;
351 filter->src_port = sctp_spec->hdr.src_port;
354 /* check if the next not void item is END */
356 NEXT_ITEM_OF_PATTERN(item, pattern, index);
357 if (item->type != RTE_FLOW_ITEM_TYPE_END) {
358 memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
359 rte_flow_error_set(error, EINVAL,
360 RTE_FLOW_ERROR_TYPE_ITEM,
361 item, "Not supported by ntuple filter");
369 * n-tuple only supports forwarding,
370 * check if the first not void action is QUEUE.
372 NEXT_ITEM_OF_ACTION(act, actions, index);
373 if (act->type != RTE_FLOW_ACTION_TYPE_QUEUE) {
374 memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
375 rte_flow_error_set(error, EINVAL,
376 RTE_FLOW_ERROR_TYPE_ACTION,
377 item, "Not supported action.");
381 ((const struct rte_flow_action_queue *)act->conf)->index;
383 /* check if the next not void item is END */
385 NEXT_ITEM_OF_ACTION(act, actions, index);
386 if (act->type != RTE_FLOW_ACTION_TYPE_END) {
387 memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
388 rte_flow_error_set(error, EINVAL,
389 RTE_FLOW_ERROR_TYPE_ACTION,
390 act, "Not supported action.");
395 /* must be input direction */
396 if (!attr->ingress) {
397 memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
398 rte_flow_error_set(error, EINVAL,
399 RTE_FLOW_ERROR_TYPE_ATTR_INGRESS,
400 attr, "Only support ingress.");
406 memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
407 rte_flow_error_set(error, EINVAL,
408 RTE_FLOW_ERROR_TYPE_ATTR_EGRESS,
409 attr, "Not support egress.");
413 if (attr->priority > 0xFFFF) {
414 memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
415 rte_flow_error_set(error, EINVAL,
416 RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY,
417 attr, "Error priority.");
420 filter->priority = (uint16_t)attr->priority;
425 /* a specific function for igb because the flags is specific */
427 igb_parse_ntuple_filter(struct rte_eth_dev *dev,
428 const struct rte_flow_attr *attr,
429 const struct rte_flow_item pattern[],
430 const struct rte_flow_action actions[],
431 struct rte_eth_ntuple_filter *filter,
432 struct rte_flow_error *error)
434 struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
437 MAC_TYPE_FILTER_SUP(hw->mac.type);
439 ret = cons_parse_ntuple_filter(attr, pattern, actions, filter, error);
444 /* Igb doesn't support many priorities. */
445 if (filter->priority > E1000_2TUPLE_MAX_PRI) {
446 memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
447 rte_flow_error_set(error, EINVAL,
448 RTE_FLOW_ERROR_TYPE_ITEM,
449 NULL, "Priority not supported by ntuple filter");
453 if (hw->mac.type == e1000_82576) {
454 if (filter->queue >= IGB_MAX_RX_QUEUE_NUM_82576) {
455 memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
456 rte_flow_error_set(error, EINVAL,
457 RTE_FLOW_ERROR_TYPE_ITEM,
458 NULL, "queue number not "
459 "supported by ntuple filter");
462 filter->flags |= RTE_5TUPLE_FLAGS;
464 if (filter->src_ip_mask || filter->dst_ip_mask ||
465 filter->src_port_mask) {
466 memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
467 rte_flow_error_set(error, EINVAL,
468 RTE_FLOW_ERROR_TYPE_ITEM,
469 NULL, "only two tuple are "
470 "supported by this filter");
473 if (filter->queue >= IGB_MAX_RX_QUEUE_NUM) {
474 memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
475 rte_flow_error_set(error, EINVAL,
476 RTE_FLOW_ERROR_TYPE_ITEM,
477 NULL, "queue number not "
478 "supported by ntuple filter");
481 filter->flags |= RTE_2TUPLE_FLAGS;
488 * Parse the rule to see if it is a ethertype rule.
489 * And get the ethertype filter info BTW.
491 * The first not void item can be ETH.
492 * The next not void item must be END.
494 * The first not void action should be QUEUE.
495 * The next not void action should be END.
498 * ETH type 0x0807 0xFFFF
500 * other members in mask and spec should set to 0x00.
501 * item->last should be NULL.
504 cons_parse_ethertype_filter(const struct rte_flow_attr *attr,
505 const struct rte_flow_item *pattern,
506 const struct rte_flow_action *actions,
507 struct rte_eth_ethertype_filter *filter,
508 struct rte_flow_error *error)
510 const struct rte_flow_item *item;
511 const struct rte_flow_action *act;
512 const struct rte_flow_item_eth *eth_spec;
513 const struct rte_flow_item_eth *eth_mask;
514 const struct rte_flow_action_queue *act_q;
518 rte_flow_error_set(error, EINVAL,
519 RTE_FLOW_ERROR_TYPE_ITEM_NUM,
520 NULL, "NULL pattern.");
525 rte_flow_error_set(error, EINVAL,
526 RTE_FLOW_ERROR_TYPE_ACTION_NUM,
527 NULL, "NULL action.");
532 rte_flow_error_set(error, EINVAL,
533 RTE_FLOW_ERROR_TYPE_ATTR,
534 NULL, "NULL attribute.");
541 /* The first non-void item should be MAC. */
542 NEXT_ITEM_OF_PATTERN(item, pattern, index);
543 if (item->type != RTE_FLOW_ITEM_TYPE_ETH) {
544 rte_flow_error_set(error, EINVAL,
545 RTE_FLOW_ERROR_TYPE_ITEM,
546 item, "Not supported by ethertype filter");
550 /*Not supported last point for range*/
552 rte_flow_error_set(error, EINVAL,
553 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
554 item, "Not supported last point for range");
558 /* Get the MAC info. */
559 if (!item->spec || !item->mask) {
560 rte_flow_error_set(error, EINVAL,
561 RTE_FLOW_ERROR_TYPE_ITEM,
562 item, "Not supported by ethertype filter");
566 eth_spec = (const struct rte_flow_item_eth *)item->spec;
567 eth_mask = (const struct rte_flow_item_eth *)item->mask;
569 /* Mask bits of source MAC address must be full of 0.
570 * Mask bits of destination MAC address must be full
573 if (!is_zero_ether_addr(ð_mask->src) ||
574 (!is_zero_ether_addr(ð_mask->dst) &&
575 !is_broadcast_ether_addr(ð_mask->dst))) {
576 rte_flow_error_set(error, EINVAL,
577 RTE_FLOW_ERROR_TYPE_ITEM,
578 item, "Invalid ether address mask");
582 if ((eth_mask->type & UINT16_MAX) != UINT16_MAX) {
583 rte_flow_error_set(error, EINVAL,
584 RTE_FLOW_ERROR_TYPE_ITEM,
585 item, "Invalid ethertype mask");
589 /* If mask bits of destination MAC address
590 * are full of 1, set RTE_ETHTYPE_FLAGS_MAC.
592 if (is_broadcast_ether_addr(ð_mask->dst)) {
593 filter->mac_addr = eth_spec->dst;
594 filter->flags |= RTE_ETHTYPE_FLAGS_MAC;
596 filter->flags &= ~RTE_ETHTYPE_FLAGS_MAC;
598 filter->ether_type = rte_be_to_cpu_16(eth_spec->type);
600 /* Check if the next non-void item is END. */
602 NEXT_ITEM_OF_PATTERN(item, pattern, index);
603 if (item->type != RTE_FLOW_ITEM_TYPE_END) {
604 rte_flow_error_set(error, EINVAL,
605 RTE_FLOW_ERROR_TYPE_ITEM,
606 item, "Not supported by ethertype filter.");
613 /* Check if the first non-void action is QUEUE or DROP. */
614 NEXT_ITEM_OF_ACTION(act, actions, index);
615 if (act->type != RTE_FLOW_ACTION_TYPE_QUEUE &&
616 act->type != RTE_FLOW_ACTION_TYPE_DROP) {
617 rte_flow_error_set(error, EINVAL,
618 RTE_FLOW_ERROR_TYPE_ACTION,
619 act, "Not supported action.");
623 if (act->type == RTE_FLOW_ACTION_TYPE_QUEUE) {
624 act_q = (const struct rte_flow_action_queue *)act->conf;
625 filter->queue = act_q->index;
627 filter->flags |= RTE_ETHTYPE_FLAGS_DROP;
630 /* Check if the next non-void item is END */
632 NEXT_ITEM_OF_ACTION(act, actions, index);
633 if (act->type != RTE_FLOW_ACTION_TYPE_END) {
634 rte_flow_error_set(error, EINVAL,
635 RTE_FLOW_ERROR_TYPE_ACTION,
636 act, "Not supported action.");
641 /* Must be input direction */
642 if (!attr->ingress) {
643 rte_flow_error_set(error, EINVAL,
644 RTE_FLOW_ERROR_TYPE_ATTR_INGRESS,
645 attr, "Only support ingress.");
651 rte_flow_error_set(error, EINVAL,
652 RTE_FLOW_ERROR_TYPE_ATTR_EGRESS,
653 attr, "Not support egress.");
658 if (attr->priority) {
659 rte_flow_error_set(error, EINVAL,
660 RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY,
661 attr, "Not support priority.");
667 rte_flow_error_set(error, EINVAL,
668 RTE_FLOW_ERROR_TYPE_ATTR_GROUP,
669 attr, "Not support group.");
677 igb_parse_ethertype_filter(struct rte_eth_dev *dev,
678 const struct rte_flow_attr *attr,
679 const struct rte_flow_item pattern[],
680 const struct rte_flow_action actions[],
681 struct rte_eth_ethertype_filter *filter,
682 struct rte_flow_error *error)
684 struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
687 MAC_TYPE_FILTER_SUP(hw->mac.type);
689 ret = cons_parse_ethertype_filter(attr, pattern,
690 actions, filter, error);
695 if (hw->mac.type == e1000_82576) {
696 if (filter->queue >= IGB_MAX_RX_QUEUE_NUM_82576) {
697 memset(filter, 0, sizeof(
698 struct rte_eth_ethertype_filter));
699 rte_flow_error_set(error, EINVAL,
700 RTE_FLOW_ERROR_TYPE_ITEM,
701 NULL, "queue number not supported "
702 "by ethertype filter");
706 if (filter->queue >= IGB_MAX_RX_QUEUE_NUM) {
707 memset(filter, 0, sizeof(
708 struct rte_eth_ethertype_filter));
709 rte_flow_error_set(error, EINVAL,
710 RTE_FLOW_ERROR_TYPE_ITEM,
711 NULL, "queue number not supported "
712 "by ethertype filter");
717 if (filter->ether_type == ETHER_TYPE_IPv4 ||
718 filter->ether_type == ETHER_TYPE_IPv6) {
719 memset(filter, 0, sizeof(struct rte_eth_ethertype_filter));
720 rte_flow_error_set(error, EINVAL,
721 RTE_FLOW_ERROR_TYPE_ITEM,
722 NULL, "IPv4/IPv6 not supported by ethertype filter");
726 if (filter->flags & RTE_ETHTYPE_FLAGS_MAC) {
727 memset(filter, 0, sizeof(struct rte_eth_ethertype_filter));
728 rte_flow_error_set(error, EINVAL,
729 RTE_FLOW_ERROR_TYPE_ITEM,
730 NULL, "mac compare is unsupported");
734 if (filter->flags & RTE_ETHTYPE_FLAGS_DROP) {
735 memset(filter, 0, sizeof(struct rte_eth_ethertype_filter));
736 rte_flow_error_set(error, EINVAL,
737 RTE_FLOW_ERROR_TYPE_ITEM,
738 NULL, "drop option is unsupported");
746 * Parse the rule to see if it is a TCP SYN rule.
747 * And get the TCP SYN filter info BTW.
749 * The first not void item must be ETH.
750 * The second not void item must be IPV4 or IPV6.
751 * The third not void item must be TCP.
752 * The next not void item must be END.
754 * The first not void action should be QUEUE.
755 * The next not void action should be END.
759 * IPV4/IPV6 NULL NULL
760 * TCP tcp_flags 0x02 0xFF
762 * other members in mask and spec should set to 0x00.
763 * item->last should be NULL.
766 cons_parse_syn_filter(const struct rte_flow_attr *attr,
767 const struct rte_flow_item pattern[],
768 const struct rte_flow_action actions[],
769 struct rte_eth_syn_filter *filter,
770 struct rte_flow_error *error)
772 const struct rte_flow_item *item;
773 const struct rte_flow_action *act;
774 const struct rte_flow_item_tcp *tcp_spec;
775 const struct rte_flow_item_tcp *tcp_mask;
776 const struct rte_flow_action_queue *act_q;
780 rte_flow_error_set(error, EINVAL,
781 RTE_FLOW_ERROR_TYPE_ITEM_NUM,
782 NULL, "NULL pattern.");
787 rte_flow_error_set(error, EINVAL,
788 RTE_FLOW_ERROR_TYPE_ACTION_NUM,
789 NULL, "NULL action.");
794 rte_flow_error_set(error, EINVAL,
795 RTE_FLOW_ERROR_TYPE_ATTR,
796 NULL, "NULL attribute.");
803 /* the first not void item should be MAC or IPv4 or IPv6 or TCP */
804 NEXT_ITEM_OF_PATTERN(item, pattern, index);
805 if (item->type != RTE_FLOW_ITEM_TYPE_ETH &&
806 item->type != RTE_FLOW_ITEM_TYPE_IPV4 &&
807 item->type != RTE_FLOW_ITEM_TYPE_IPV6 &&
808 item->type != RTE_FLOW_ITEM_TYPE_TCP) {
809 rte_flow_error_set(error, EINVAL,
810 RTE_FLOW_ERROR_TYPE_ITEM,
811 item, "Not supported by syn filter");
814 /*Not supported last point for range*/
816 rte_flow_error_set(error, EINVAL,
817 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
818 item, "Not supported last point for range");
823 if (item->type == RTE_FLOW_ITEM_TYPE_ETH) {
824 /* if the item is MAC, the content should be NULL */
825 if (item->spec || item->mask) {
826 rte_flow_error_set(error, EINVAL,
827 RTE_FLOW_ERROR_TYPE_ITEM,
828 item, "Invalid SYN address mask");
832 /* check if the next not void item is IPv4 or IPv6 */
834 NEXT_ITEM_OF_PATTERN(item, pattern, index);
835 if (item->type != RTE_FLOW_ITEM_TYPE_IPV4 &&
836 item->type != RTE_FLOW_ITEM_TYPE_IPV6) {
837 rte_flow_error_set(error, EINVAL,
838 RTE_FLOW_ERROR_TYPE_ITEM,
839 item, "Not supported by syn filter");
845 if (item->type == RTE_FLOW_ITEM_TYPE_IPV4 ||
846 item->type == RTE_FLOW_ITEM_TYPE_IPV6) {
847 /* if the item is IP, the content should be NULL */
848 if (item->spec || item->mask) {
849 rte_flow_error_set(error, EINVAL,
850 RTE_FLOW_ERROR_TYPE_ITEM,
851 item, "Invalid SYN mask");
855 /* check if the next not void item is TCP */
857 NEXT_ITEM_OF_PATTERN(item, pattern, index);
858 if (item->type != RTE_FLOW_ITEM_TYPE_TCP) {
859 rte_flow_error_set(error, EINVAL,
860 RTE_FLOW_ERROR_TYPE_ITEM,
861 item, "Not supported by syn filter");
866 /* Get the TCP info. Only support SYN. */
867 if (!item->spec || !item->mask) {
868 rte_flow_error_set(error, EINVAL,
869 RTE_FLOW_ERROR_TYPE_ITEM,
870 item, "Invalid SYN mask");
873 /*Not supported last point for range*/
875 rte_flow_error_set(error, EINVAL,
876 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
877 item, "Not supported last point for range");
881 tcp_spec = (const struct rte_flow_item_tcp *)item->spec;
882 tcp_mask = (const struct rte_flow_item_tcp *)item->mask;
883 if (!(tcp_spec->hdr.tcp_flags & TCP_SYN_FLAG) ||
884 tcp_mask->hdr.src_port ||
885 tcp_mask->hdr.dst_port ||
886 tcp_mask->hdr.sent_seq ||
887 tcp_mask->hdr.recv_ack ||
888 tcp_mask->hdr.data_off ||
889 tcp_mask->hdr.tcp_flags != TCP_SYN_FLAG ||
890 tcp_mask->hdr.rx_win ||
891 tcp_mask->hdr.cksum ||
892 tcp_mask->hdr.tcp_urp) {
893 memset(filter, 0, sizeof(struct rte_eth_syn_filter));
894 rte_flow_error_set(error, EINVAL,
895 RTE_FLOW_ERROR_TYPE_ITEM,
896 item, "Not supported by syn filter");
900 /* check if the next not void item is END */
902 NEXT_ITEM_OF_PATTERN(item, pattern, index);
903 if (item->type != RTE_FLOW_ITEM_TYPE_END) {
904 memset(filter, 0, sizeof(struct rte_eth_syn_filter));
905 rte_flow_error_set(error, EINVAL,
906 RTE_FLOW_ERROR_TYPE_ITEM,
907 item, "Not supported by syn filter");
914 /* check if the first not void action is QUEUE. */
915 NEXT_ITEM_OF_ACTION(act, actions, index);
916 if (act->type != RTE_FLOW_ACTION_TYPE_QUEUE) {
917 memset(filter, 0, sizeof(struct rte_eth_syn_filter));
918 rte_flow_error_set(error, EINVAL,
919 RTE_FLOW_ERROR_TYPE_ACTION,
920 act, "Not supported action.");
924 act_q = (const struct rte_flow_action_queue *)act->conf;
925 filter->queue = act_q->index;
927 /* check if the next not void item is END */
929 NEXT_ITEM_OF_ACTION(act, actions, index);
930 if (act->type != RTE_FLOW_ACTION_TYPE_END) {
931 memset(filter, 0, sizeof(struct rte_eth_syn_filter));
932 rte_flow_error_set(error, EINVAL,
933 RTE_FLOW_ERROR_TYPE_ACTION,
934 act, "Not supported action.");
939 /* must be input direction */
940 if (!attr->ingress) {
941 memset(filter, 0, sizeof(struct rte_eth_syn_filter));
942 rte_flow_error_set(error, EINVAL,
943 RTE_FLOW_ERROR_TYPE_ATTR_INGRESS,
944 attr, "Only support ingress.");
950 memset(filter, 0, sizeof(struct rte_eth_syn_filter));
951 rte_flow_error_set(error, EINVAL,
952 RTE_FLOW_ERROR_TYPE_ATTR_EGRESS,
953 attr, "Not support egress.");
957 /* Support 2 priorities, the lowest or highest. */
958 if (!attr->priority) {
960 } else if (attr->priority == (uint32_t)~0U) {
963 memset(filter, 0, sizeof(struct rte_eth_syn_filter));
964 rte_flow_error_set(error, EINVAL,
965 RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY,
966 attr, "Not support priority.");
974 igb_parse_syn_filter(struct rte_eth_dev *dev,
975 const struct rte_flow_attr *attr,
976 const struct rte_flow_item pattern[],
977 const struct rte_flow_action actions[],
978 struct rte_eth_syn_filter *filter,
979 struct rte_flow_error *error)
981 struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
984 MAC_TYPE_FILTER_SUP(hw->mac.type);
986 ret = cons_parse_syn_filter(attr, pattern,
987 actions, filter, error);
989 if (hw->mac.type == e1000_82576) {
990 if (filter->queue >= IGB_MAX_RX_QUEUE_NUM_82576) {
991 memset(filter, 0, sizeof(struct rte_eth_syn_filter));
992 rte_flow_error_set(error, EINVAL,
993 RTE_FLOW_ERROR_TYPE_ITEM,
994 NULL, "queue number not "
995 "supported by syn filter");
999 if (filter->queue >= IGB_MAX_RX_QUEUE_NUM) {
1000 memset(filter, 0, sizeof(struct rte_eth_syn_filter));
1001 rte_flow_error_set(error, EINVAL,
1002 RTE_FLOW_ERROR_TYPE_ITEM,
1003 NULL, "queue number not "
1004 "supported by syn filter");
1016 * Parse the rule to see if it is a flex byte rule.
1017 * And get the flex byte filter info BTW.
1019 * The first not void item must be RAW.
1020 * The second not void item can be RAW or END.
1021 * The third not void item can be RAW or END.
1022 * The last not void item must be END.
1024 * The first not void action should be QUEUE.
1025 * The next not void action should be END.
1028 * RAW relative 0 0x1
1029 * offset 0 0xFFFFFFFF
1030 * pattern {0x08, 0x06} {0xFF, 0xFF}
1031 * RAW relative 1 0x1
1032 * offset 100 0xFFFFFFFF
1033 * pattern {0x11, 0x22, 0x33} {0xFF, 0xFF, 0xFF}
1035 * other members in mask and spec should set to 0x00.
1036 * item->last should be NULL.
1039 cons_parse_flex_filter(const struct rte_flow_attr *attr,
1040 const struct rte_flow_item pattern[],
1041 const struct rte_flow_action actions[],
1042 struct rte_eth_flex_filter *filter,
1043 struct rte_flow_error *error)
1045 const struct rte_flow_item *item;
1046 const struct rte_flow_action *act;
1047 const struct rte_flow_item_raw *raw_spec;
1048 const struct rte_flow_item_raw *raw_mask;
1049 const struct rte_flow_action_queue *act_q;
1050 uint32_t index, i, offset, total_offset;
1051 uint32_t max_offset = 0;
1052 int32_t shift, j, raw_index = 0;
1053 int32_t relative[IGB_FLEX_RAW_NUM] = {0};
1054 int32_t raw_offset[IGB_FLEX_RAW_NUM] = {0};
1057 rte_flow_error_set(error, EINVAL,
1058 RTE_FLOW_ERROR_TYPE_ITEM_NUM,
1059 NULL, "NULL pattern.");
1064 rte_flow_error_set(error, EINVAL,
1065 RTE_FLOW_ERROR_TYPE_ACTION_NUM,
1066 NULL, "NULL action.");
1071 rte_flow_error_set(error, EINVAL,
1072 RTE_FLOW_ERROR_TYPE_ATTR,
1073 NULL, "NULL attribute.");
1082 /* the first not void item should be RAW */
1083 NEXT_ITEM_OF_PATTERN(item, pattern, index);
1084 if (item->type != RTE_FLOW_ITEM_TYPE_RAW) {
1085 rte_flow_error_set(error, EINVAL,
1086 RTE_FLOW_ERROR_TYPE_ITEM,
1087 item, "Not supported by flex filter");
1090 /*Not supported last point for range*/
1092 rte_flow_error_set(error, EINVAL,
1093 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1094 item, "Not supported last point for range");
1098 raw_spec = (const struct rte_flow_item_raw *)item->spec;
1099 raw_mask = (const struct rte_flow_item_raw *)item->mask;
1101 if (!raw_mask->length ||
1102 !raw_mask->relative) {
1103 memset(filter, 0, sizeof(struct rte_eth_flex_filter));
1104 rte_flow_error_set(error, EINVAL,
1105 RTE_FLOW_ERROR_TYPE_ITEM,
1106 item, "Not supported by flex filter");
1110 if (raw_mask->offset)
1111 offset = raw_spec->offset;
1115 for (j = 0; j < raw_spec->length; j++) {
1116 if (raw_mask->pattern[j] != 0xFF) {
1117 memset(filter, 0, sizeof(struct rte_eth_flex_filter));
1118 rte_flow_error_set(error, EINVAL,
1119 RTE_FLOW_ERROR_TYPE_ITEM,
1120 item, "Not supported by flex filter");
1127 if (raw_spec->relative) {
1128 for (j = raw_index; j > 0; j--) {
1129 total_offset += raw_offset[j - 1];
1130 if (!relative[j - 1])
1133 if (total_offset + raw_spec->length + offset > max_offset)
1134 max_offset = total_offset + raw_spec->length + offset;
1136 if (raw_spec->length + offset > max_offset)
1137 max_offset = raw_spec->length + offset;
1140 if ((raw_spec->length + offset + total_offset) >
1141 RTE_FLEX_FILTER_MAXLEN) {
1142 memset(filter, 0, sizeof(struct rte_eth_flex_filter));
1143 rte_flow_error_set(error, EINVAL,
1144 RTE_FLOW_ERROR_TYPE_ITEM,
1145 item, "Not supported by flex filter");
1149 if (raw_spec->relative == 0) {
1150 for (j = 0; j < raw_spec->length; j++)
1151 filter->bytes[offset + j] =
1152 raw_spec->pattern[j];
1153 j = offset / CHAR_BIT;
1154 shift = offset % CHAR_BIT;
1156 for (j = 0; j < raw_spec->length; j++)
1157 filter->bytes[total_offset + offset + j] =
1158 raw_spec->pattern[j];
1159 j = (total_offset + offset) / CHAR_BIT;
1160 shift = (total_offset + offset) % CHAR_BIT;
1165 for ( ; shift < CHAR_BIT; shift++) {
1166 filter->mask[j] |= (0x80 >> shift);
1168 if (i == raw_spec->length)
1170 if (shift == (CHAR_BIT - 1)) {
1176 relative[raw_index] = raw_spec->relative;
1177 raw_offset[raw_index] = offset + raw_spec->length;
1180 /* check if the next not void item is RAW */
1182 NEXT_ITEM_OF_PATTERN(item, pattern, index);
1183 if (item->type != RTE_FLOW_ITEM_TYPE_RAW &&
1184 item->type != RTE_FLOW_ITEM_TYPE_END) {
1185 rte_flow_error_set(error, EINVAL,
1186 RTE_FLOW_ERROR_TYPE_ITEM,
1187 item, "Not supported by flex filter");
1191 /* go back to parser */
1192 if (item->type == RTE_FLOW_ITEM_TYPE_RAW) {
1193 /* if the item is RAW, the content should be parse */
1197 filter->len = RTE_ALIGN(max_offset, 8);
1202 /* check if the first not void action is QUEUE. */
1203 NEXT_ITEM_OF_ACTION(act, actions, index);
1204 if (act->type != RTE_FLOW_ACTION_TYPE_QUEUE) {
1205 memset(filter, 0, sizeof(struct rte_eth_flex_filter));
1206 rte_flow_error_set(error, EINVAL,
1207 RTE_FLOW_ERROR_TYPE_ACTION,
1208 act, "Not supported action.");
1212 act_q = (const struct rte_flow_action_queue *)act->conf;
1213 filter->queue = act_q->index;
1215 /* check if the next not void item is END */
1217 NEXT_ITEM_OF_ACTION(act, actions, index);
1218 if (act->type != RTE_FLOW_ACTION_TYPE_END) {
1219 memset(filter, 0, sizeof(struct rte_eth_flex_filter));
1220 rte_flow_error_set(error, EINVAL,
1221 RTE_FLOW_ERROR_TYPE_ACTION,
1222 act, "Not supported action.");
1227 /* must be input direction */
1228 if (!attr->ingress) {
1229 memset(filter, 0, sizeof(struct rte_eth_flex_filter));
1230 rte_flow_error_set(error, EINVAL,
1231 RTE_FLOW_ERROR_TYPE_ATTR_INGRESS,
1232 attr, "Only support ingress.");
1238 memset(filter, 0, sizeof(struct rte_eth_flex_filter));
1239 rte_flow_error_set(error, EINVAL,
1240 RTE_FLOW_ERROR_TYPE_ATTR_EGRESS,
1241 attr, "Not support egress.");
1245 if (attr->priority > 0xFFFF) {
1246 memset(filter, 0, sizeof(struct rte_eth_flex_filter));
1247 rte_flow_error_set(error, EINVAL,
1248 RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY,
1249 attr, "Error priority.");
1253 filter->priority = (uint16_t)attr->priority;
1259 igb_parse_flex_filter(struct rte_eth_dev *dev,
1260 const struct rte_flow_attr *attr,
1261 const struct rte_flow_item pattern[],
1262 const struct rte_flow_action actions[],
1263 struct rte_eth_flex_filter *filter,
1264 struct rte_flow_error *error)
1266 struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1269 MAC_TYPE_FILTER_SUP_EXT(hw->mac.type);
1271 ret = cons_parse_flex_filter(attr, pattern,
1272 actions, filter, error);
1274 if (filter->queue >= IGB_MAX_RX_QUEUE_NUM) {
1275 memset(filter, 0, sizeof(struct rte_eth_flex_filter));
1276 rte_flow_error_set(error, EINVAL,
1277 RTE_FLOW_ERROR_TYPE_ITEM,
1278 NULL, "queue number not supported by flex filter");
1282 if (filter->len == 0 || filter->len > E1000_MAX_FLEX_FILTER_LEN ||
1283 filter->len % sizeof(uint64_t) != 0) {
1284 PMD_DRV_LOG(ERR, "filter's length is out of range");
1288 if (filter->priority > E1000_MAX_FLEX_FILTER_PRI) {
1289 PMD_DRV_LOG(ERR, "filter's priority is out of range");
1300 * Create a flow rule.
1301 * Theorically one rule can match more than one filters.
1302 * We will let it use the filter which it hitt first.
1303 * So, the sequence matters.
1305 static struct rte_flow *
1306 igb_flow_create(struct rte_eth_dev *dev,
1307 const struct rte_flow_attr *attr,
1308 const struct rte_flow_item pattern[],
1309 const struct rte_flow_action actions[],
1310 struct rte_flow_error *error)
1313 struct rte_eth_ntuple_filter ntuple_filter;
1314 struct rte_eth_ethertype_filter ethertype_filter;
1315 struct rte_eth_syn_filter syn_filter;
1316 struct rte_eth_flex_filter flex_filter;
1317 struct rte_flow *flow = NULL;
1318 struct igb_ntuple_filter_ele *ntuple_filter_ptr;
1319 struct igb_ethertype_filter_ele *ethertype_filter_ptr;
1320 struct igb_eth_syn_filter_ele *syn_filter_ptr;
1321 struct igb_flex_filter_ele *flex_filter_ptr;
1322 struct igb_flow_mem *igb_flow_mem_ptr;
1324 flow = rte_zmalloc("igb_rte_flow", sizeof(struct rte_flow), 0);
1326 PMD_DRV_LOG(ERR, "failed to allocate memory");
1327 return (struct rte_flow *)flow;
1329 igb_flow_mem_ptr = rte_zmalloc("igb_flow_mem",
1330 sizeof(struct igb_flow_mem), 0);
1331 if (!igb_flow_mem_ptr) {
1332 PMD_DRV_LOG(ERR, "failed to allocate memory");
1336 igb_flow_mem_ptr->flow = flow;
1337 igb_flow_mem_ptr->dev = dev;
1338 TAILQ_INSERT_TAIL(&igb_flow_list,
1339 igb_flow_mem_ptr, entries);
1341 memset(&ntuple_filter, 0, sizeof(struct rte_eth_ntuple_filter));
1342 ret = igb_parse_ntuple_filter(dev, attr, pattern,
1343 actions, &ntuple_filter, error);
1345 ret = igb_add_del_ntuple_filter(dev, &ntuple_filter, TRUE);
1347 ntuple_filter_ptr = rte_zmalloc("igb_ntuple_filter",
1348 sizeof(struct igb_ntuple_filter_ele), 0);
1349 rte_memcpy(&ntuple_filter_ptr->filter_info,
1351 sizeof(struct rte_eth_ntuple_filter));
1352 TAILQ_INSERT_TAIL(&igb_filter_ntuple_list,
1353 ntuple_filter_ptr, entries);
1354 flow->rule = ntuple_filter_ptr;
1355 flow->filter_type = RTE_ETH_FILTER_NTUPLE;
1361 memset(ðertype_filter, 0, sizeof(struct rte_eth_ethertype_filter));
1362 ret = igb_parse_ethertype_filter(dev, attr, pattern,
1363 actions, ðertype_filter, error);
1365 ret = igb_add_del_ethertype_filter(dev,
1366 ðertype_filter, TRUE);
1368 ethertype_filter_ptr = rte_zmalloc(
1369 "igb_ethertype_filter",
1370 sizeof(struct igb_ethertype_filter_ele), 0);
1371 rte_memcpy(ðertype_filter_ptr->filter_info,
1373 sizeof(struct rte_eth_ethertype_filter));
1374 TAILQ_INSERT_TAIL(&igb_filter_ethertype_list,
1375 ethertype_filter_ptr, entries);
1376 flow->rule = ethertype_filter_ptr;
1377 flow->filter_type = RTE_ETH_FILTER_ETHERTYPE;
1383 memset(&syn_filter, 0, sizeof(struct rte_eth_syn_filter));
1384 ret = igb_parse_syn_filter(dev, attr, pattern,
1385 actions, &syn_filter, error);
1387 ret = eth_igb_syn_filter_set(dev, &syn_filter, TRUE);
1389 syn_filter_ptr = rte_zmalloc("igb_syn_filter",
1390 sizeof(struct igb_eth_syn_filter_ele), 0);
1391 rte_memcpy(&syn_filter_ptr->filter_info,
1393 sizeof(struct rte_eth_syn_filter));
1394 TAILQ_INSERT_TAIL(&igb_filter_syn_list,
1397 flow->rule = syn_filter_ptr;
1398 flow->filter_type = RTE_ETH_FILTER_SYN;
1404 memset(&flex_filter, 0, sizeof(struct rte_eth_flex_filter));
1405 ret = igb_parse_flex_filter(dev, attr, pattern,
1406 actions, &flex_filter, error);
1408 ret = eth_igb_add_del_flex_filter(dev, &flex_filter, TRUE);
1410 flex_filter_ptr = rte_zmalloc("igb_flex_filter",
1411 sizeof(struct igb_flex_filter_ele), 0);
1412 rte_memcpy(&flex_filter_ptr->filter_info,
1414 sizeof(struct rte_eth_flex_filter));
1415 TAILQ_INSERT_TAIL(&igb_filter_flex_list,
1416 flex_filter_ptr, entries);
1417 flow->rule = flex_filter_ptr;
1418 flow->filter_type = RTE_ETH_FILTER_FLEXIBLE;
1424 TAILQ_REMOVE(&igb_flow_list,
1425 igb_flow_mem_ptr, entries);
1426 rte_flow_error_set(error, -ret,
1427 RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
1428 "Failed to create flow.");
1429 rte_free(igb_flow_mem_ptr);
1435 * Check if the flow rule is supported by igb.
1436 * It only checkes the format. Don't guarantee the rule can be programmed into
1437 * the HW. Because there can be no enough room for the rule.
1440 igb_flow_validate(__rte_unused struct rte_eth_dev *dev,
1441 const struct rte_flow_attr *attr,
1442 const struct rte_flow_item pattern[],
1443 const struct rte_flow_action actions[],
1444 struct rte_flow_error *error)
1446 struct rte_eth_ntuple_filter ntuple_filter;
1447 struct rte_eth_ethertype_filter ethertype_filter;
1448 struct rte_eth_syn_filter syn_filter;
1449 struct rte_eth_flex_filter flex_filter;
1452 memset(&ntuple_filter, 0, sizeof(struct rte_eth_ntuple_filter));
1453 ret = igb_parse_ntuple_filter(dev, attr, pattern,
1454 actions, &ntuple_filter, error);
1458 memset(ðertype_filter, 0, sizeof(struct rte_eth_ethertype_filter));
1459 ret = igb_parse_ethertype_filter(dev, attr, pattern,
1460 actions, ðertype_filter, error);
1464 memset(&syn_filter, 0, sizeof(struct rte_eth_syn_filter));
1465 ret = igb_parse_syn_filter(dev, attr, pattern,
1466 actions, &syn_filter, error);
1470 memset(&flex_filter, 0, sizeof(struct rte_eth_flex_filter));
1471 ret = igb_parse_flex_filter(dev, attr, pattern,
1472 actions, &flex_filter, error);
1477 /* Destroy a flow rule on igb. */
1479 igb_flow_destroy(struct rte_eth_dev *dev,
1480 struct rte_flow *flow,
1481 struct rte_flow_error *error)
1484 struct rte_flow *pmd_flow = flow;
1485 enum rte_filter_type filter_type = pmd_flow->filter_type;
1486 struct igb_ntuple_filter_ele *ntuple_filter_ptr;
1487 struct igb_ethertype_filter_ele *ethertype_filter_ptr;
1488 struct igb_eth_syn_filter_ele *syn_filter_ptr;
1489 struct igb_flex_filter_ele *flex_filter_ptr;
1490 struct igb_flow_mem *igb_flow_mem_ptr;
1492 switch (filter_type) {
1493 case RTE_ETH_FILTER_NTUPLE:
1494 ntuple_filter_ptr = (struct igb_ntuple_filter_ele *)
1496 ret = igb_add_del_ntuple_filter(dev,
1497 &ntuple_filter_ptr->filter_info, FALSE);
1499 TAILQ_REMOVE(&igb_filter_ntuple_list,
1500 ntuple_filter_ptr, entries);
1501 rte_free(ntuple_filter_ptr);
1504 case RTE_ETH_FILTER_ETHERTYPE:
1505 ethertype_filter_ptr = (struct igb_ethertype_filter_ele *)
1507 ret = igb_add_del_ethertype_filter(dev,
1508 ðertype_filter_ptr->filter_info, FALSE);
1510 TAILQ_REMOVE(&igb_filter_ethertype_list,
1511 ethertype_filter_ptr, entries);
1512 rte_free(ethertype_filter_ptr);
1515 case RTE_ETH_FILTER_SYN:
1516 syn_filter_ptr = (struct igb_eth_syn_filter_ele *)
1518 ret = eth_igb_syn_filter_set(dev,
1519 &syn_filter_ptr->filter_info, FALSE);
1521 TAILQ_REMOVE(&igb_filter_syn_list,
1522 syn_filter_ptr, entries);
1523 rte_free(syn_filter_ptr);
1526 case RTE_ETH_FILTER_FLEXIBLE:
1527 flex_filter_ptr = (struct igb_flex_filter_ele *)
1529 ret = eth_igb_add_del_flex_filter(dev,
1530 &flex_filter_ptr->filter_info, FALSE);
1532 TAILQ_REMOVE(&igb_filter_flex_list,
1533 flex_filter_ptr, entries);
1534 rte_free(flex_filter_ptr);
1538 PMD_DRV_LOG(WARNING, "Filter type (%d) not supported",
1545 rte_flow_error_set(error, EINVAL,
1546 RTE_FLOW_ERROR_TYPE_HANDLE,
1547 NULL, "Failed to destroy flow");
1551 TAILQ_FOREACH(igb_flow_mem_ptr, &igb_flow_list, entries) {
1552 if (igb_flow_mem_ptr->flow == pmd_flow) {
1553 TAILQ_REMOVE(&igb_flow_list,
1554 igb_flow_mem_ptr, entries);
1555 rte_free(igb_flow_mem_ptr);
1563 /* remove all the n-tuple filters */
1565 igb_clear_all_ntuple_filter(struct rte_eth_dev *dev)
1567 struct e1000_filter_info *filter_info =
1568 E1000_DEV_PRIVATE_TO_FILTER_INFO(dev->data->dev_private);
1569 struct e1000_5tuple_filter *p_5tuple;
1570 struct e1000_2tuple_filter *p_2tuple;
1572 while ((p_5tuple = TAILQ_FIRST(&filter_info->fivetuple_list)))
1573 igb_delete_5tuple_filter_82576(dev, p_5tuple);
1575 while ((p_2tuple = TAILQ_FIRST(&filter_info->twotuple_list)))
1576 igb_delete_2tuple_filter(dev, p_2tuple);
1579 /* remove all the ether type filters */
1581 igb_clear_all_ethertype_filter(struct rte_eth_dev *dev)
1583 struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1584 struct e1000_filter_info *filter_info =
1585 E1000_DEV_PRIVATE_TO_FILTER_INFO(dev->data->dev_private);
1588 for (i = 0; i < E1000_MAX_ETQF_FILTERS; i++) {
1589 if (filter_info->ethertype_mask & (1 << i)) {
1590 (void)igb_ethertype_filter_remove(filter_info,
1592 E1000_WRITE_REG(hw, E1000_ETQF(i), 0);
1593 E1000_WRITE_FLUSH(hw);
1598 /* remove the SYN filter */
1600 igb_clear_syn_filter(struct rte_eth_dev *dev)
1602 struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1603 struct e1000_filter_info *filter_info =
1604 E1000_DEV_PRIVATE_TO_FILTER_INFO(dev->data->dev_private);
1606 if (filter_info->syn_info & E1000_SYN_FILTER_ENABLE) {
1607 filter_info->syn_info = 0;
1608 E1000_WRITE_REG(hw, E1000_SYNQF(0), 0);
1609 E1000_WRITE_FLUSH(hw);
1613 /* remove all the flex filters */
1615 igb_clear_all_flex_filter(struct rte_eth_dev *dev)
1617 struct e1000_filter_info *filter_info =
1618 E1000_DEV_PRIVATE_TO_FILTER_INFO(dev->data->dev_private);
1619 struct e1000_flex_filter *flex_filter;
1621 while ((flex_filter = TAILQ_FIRST(&filter_info->flex_list)))
1622 igb_remove_flex_filter(dev, flex_filter);
1626 igb_filterlist_flush(struct rte_eth_dev *dev)
1628 struct igb_ntuple_filter_ele *ntuple_filter_ptr;
1629 struct igb_ethertype_filter_ele *ethertype_filter_ptr;
1630 struct igb_eth_syn_filter_ele *syn_filter_ptr;
1631 struct igb_flex_filter_ele *flex_filter_ptr;
1632 struct igb_flow_mem *igb_flow_mem_ptr;
1633 enum rte_filter_type filter_type;
1634 struct rte_flow *pmd_flow;
1636 TAILQ_FOREACH(igb_flow_mem_ptr, &igb_flow_list, entries) {
1637 if (igb_flow_mem_ptr->dev == dev) {
1638 pmd_flow = igb_flow_mem_ptr->flow;
1639 filter_type = pmd_flow->filter_type;
1641 switch (filter_type) {
1642 case RTE_ETH_FILTER_NTUPLE:
1644 (struct igb_ntuple_filter_ele *)
1646 TAILQ_REMOVE(&igb_filter_ntuple_list,
1647 ntuple_filter_ptr, entries);
1648 rte_free(ntuple_filter_ptr);
1650 case RTE_ETH_FILTER_ETHERTYPE:
1651 ethertype_filter_ptr =
1652 (struct igb_ethertype_filter_ele *)
1654 TAILQ_REMOVE(&igb_filter_ethertype_list,
1655 ethertype_filter_ptr, entries);
1656 rte_free(ethertype_filter_ptr);
1658 case RTE_ETH_FILTER_SYN:
1660 (struct igb_eth_syn_filter_ele *)
1662 TAILQ_REMOVE(&igb_filter_syn_list,
1663 syn_filter_ptr, entries);
1664 rte_free(syn_filter_ptr);
1666 case RTE_ETH_FILTER_FLEXIBLE:
1668 (struct igb_flex_filter_ele *)
1670 TAILQ_REMOVE(&igb_filter_flex_list,
1671 flex_filter_ptr, entries);
1672 rte_free(flex_filter_ptr);
1675 PMD_DRV_LOG(WARNING, "Filter type"
1676 "(%d) not supported", filter_type);
1679 TAILQ_REMOVE(&igb_flow_list,
1682 rte_free(igb_flow_mem_ptr->flow);
1683 rte_free(igb_flow_mem_ptr);
1688 /* Destroy all flow rules associated with a port on igb. */
1690 igb_flow_flush(struct rte_eth_dev *dev,
1691 __rte_unused struct rte_flow_error *error)
1693 igb_clear_all_ntuple_filter(dev);
1694 igb_clear_all_ethertype_filter(dev);
1695 igb_clear_syn_filter(dev);
1696 igb_clear_all_flex_filter(dev);
1697 igb_filterlist_flush(dev);
1702 const struct rte_flow_ops igb_flow_ops = {
1703 .validate = igb_flow_validate,
1704 .create = igb_flow_create,
1705 .destroy = igb_flow_destroy,
1706 .flush = igb_flow_flush,